1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/core.c - core driver model code (device registration, etc) 4 * 5 * Copyright (c) 2002-3 Patrick Mochel 6 * Copyright (c) 2002-3 Open Source Development Labs 7 * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de> 8 * Copyright (c) 2006 Novell, Inc. 9 */ 10 11 #include <linux/device.h> 12 #include <linux/err.h> 13 #include <linux/fwnode.h> 14 #include <linux/init.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/string.h> 18 #include <linux/kdev_t.h> 19 #include <linux/notifier.h> 20 #include <linux/of.h> 21 #include <linux/of_device.h> 22 #include <linux/genhd.h> 23 #include <linux/mutex.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/netdevice.h> 26 #include <linux/sched/signal.h> 27 #include <linux/sysfs.h> 28 29 #include "base.h" 30 #include "power/power.h" 31 32 #ifdef CONFIG_SYSFS_DEPRECATED 33 #ifdef CONFIG_SYSFS_DEPRECATED_V2 34 long sysfs_deprecated = 1; 35 #else 36 long sysfs_deprecated = 0; 37 #endif 38 static int __init sysfs_deprecated_setup(char *arg) 39 { 40 return kstrtol(arg, 10, &sysfs_deprecated); 41 } 42 early_param("sysfs.deprecated", sysfs_deprecated_setup); 43 #endif 44 45 /* Device links support. */ 46 47 #ifdef CONFIG_SRCU 48 static DEFINE_MUTEX(device_links_lock); 49 DEFINE_STATIC_SRCU(device_links_srcu); 50 51 static inline void device_links_write_lock(void) 52 { 53 mutex_lock(&device_links_lock); 54 } 55 56 static inline void device_links_write_unlock(void) 57 { 58 mutex_unlock(&device_links_lock); 59 } 60 61 int device_links_read_lock(void) 62 { 63 return srcu_read_lock(&device_links_srcu); 64 } 65 66 void device_links_read_unlock(int idx) 67 { 68 srcu_read_unlock(&device_links_srcu, idx); 69 } 70 #else /* !CONFIG_SRCU */ 71 static DECLARE_RWSEM(device_links_lock); 72 73 static inline void device_links_write_lock(void) 74 { 75 down_write(&device_links_lock); 76 } 77 78 static inline void device_links_write_unlock(void) 79 { 80 up_write(&device_links_lock); 81 } 82 83 int device_links_read_lock(void) 84 { 85 down_read(&device_links_lock); 86 return 0; 87 } 88 89 void device_links_read_unlock(int not_used) 90 { 91 up_read(&device_links_lock); 92 } 93 #endif /* !CONFIG_SRCU */ 94 95 /** 96 * device_is_dependent - Check if one device depends on another one 97 * @dev: Device to check dependencies for. 98 * @target: Device to check against. 99 * 100 * Check if @target depends on @dev or any device dependent on it (its child or 101 * its consumer etc). Return 1 if that is the case or 0 otherwise. 102 */ 103 static int device_is_dependent(struct device *dev, void *target) 104 { 105 struct device_link *link; 106 int ret; 107 108 if (dev == target) 109 return 1; 110 111 ret = device_for_each_child(dev, target, device_is_dependent); 112 if (ret) 113 return ret; 114 115 list_for_each_entry(link, &dev->links.consumers, s_node) { 116 if (link->consumer == target) 117 return 1; 118 119 ret = device_is_dependent(link->consumer, target); 120 if (ret) 121 break; 122 } 123 return ret; 124 } 125 126 static int device_reorder_to_tail(struct device *dev, void *not_used) 127 { 128 struct device_link *link; 129 130 /* 131 * Devices that have not been registered yet will be put to the ends 132 * of the lists during the registration, so skip them here. 133 */ 134 if (device_is_registered(dev)) 135 devices_kset_move_last(dev); 136 137 if (device_pm_initialized(dev)) 138 device_pm_move_last(dev); 139 140 device_for_each_child(dev, NULL, device_reorder_to_tail); 141 list_for_each_entry(link, &dev->links.consumers, s_node) 142 device_reorder_to_tail(link->consumer, NULL); 143 144 return 0; 145 } 146 147 /** 148 * device_pm_move_to_tail - Move set of devices to the end of device lists 149 * @dev: Device to move 150 * 151 * This is a device_reorder_to_tail() wrapper taking the requisite locks. 152 * 153 * It moves the @dev along with all of its children and all of its consumers 154 * to the ends of the device_kset and dpm_list, recursively. 155 */ 156 void device_pm_move_to_tail(struct device *dev) 157 { 158 int idx; 159 160 idx = device_links_read_lock(); 161 device_pm_lock(); 162 device_reorder_to_tail(dev, NULL); 163 device_pm_unlock(); 164 device_links_read_unlock(idx); 165 } 166 167 /** 168 * device_link_add - Create a link between two devices. 169 * @consumer: Consumer end of the link. 170 * @supplier: Supplier end of the link. 171 * @flags: Link flags. 172 * 173 * The caller is responsible for the proper synchronization of the link creation 174 * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the 175 * runtime PM framework to take the link into account. Second, if the 176 * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will 177 * be forced into the active metastate and reference-counted upon the creation 178 * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be 179 * ignored. 180 * 181 * If the DL_FLAG_AUTOREMOVE_CONSUMER is set, the link will be removed 182 * automatically when the consumer device driver unbinds from it. 183 * The combination of both DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_STATELESS 184 * set is invalid and will cause NULL to be returned. 185 * 186 * A side effect of the link creation is re-ordering of dpm_list and the 187 * devices_kset list by moving the consumer device and all devices depending 188 * on it to the ends of these lists (that does not happen to devices that have 189 * not been registered when this function is called). 190 * 191 * The supplier device is required to be registered when this function is called 192 * and NULL will be returned if that is not the case. The consumer device need 193 * not be registered, however. 194 */ 195 struct device_link *device_link_add(struct device *consumer, 196 struct device *supplier, u32 flags) 197 { 198 struct device_link *link; 199 200 if (!consumer || !supplier || 201 ((flags & DL_FLAG_STATELESS) && 202 (flags & DL_FLAG_AUTOREMOVE_CONSUMER))) 203 return NULL; 204 205 device_links_write_lock(); 206 device_pm_lock(); 207 208 /* 209 * If the supplier has not been fully registered yet or there is a 210 * reverse dependency between the consumer and the supplier already in 211 * the graph, return NULL. 212 */ 213 if (!device_pm_initialized(supplier) 214 || device_is_dependent(consumer, supplier)) { 215 link = NULL; 216 goto out; 217 } 218 219 list_for_each_entry(link, &supplier->links.consumers, s_node) 220 if (link->consumer == consumer) { 221 kref_get(&link->kref); 222 goto out; 223 } 224 225 link = kzalloc(sizeof(*link), GFP_KERNEL); 226 if (!link) 227 goto out; 228 229 if (flags & DL_FLAG_PM_RUNTIME) { 230 if (flags & DL_FLAG_RPM_ACTIVE) { 231 if (pm_runtime_get_sync(supplier) < 0) { 232 pm_runtime_put_noidle(supplier); 233 kfree(link); 234 link = NULL; 235 goto out; 236 } 237 link->rpm_active = true; 238 } 239 pm_runtime_new_link(consumer); 240 /* 241 * If the link is being added by the consumer driver at probe 242 * time, balance the decrementation of the supplier's runtime PM 243 * usage counter after consumer probe in driver_probe_device(). 244 */ 245 if (consumer->links.status == DL_DEV_PROBING) 246 pm_runtime_get_noresume(supplier); 247 } 248 get_device(supplier); 249 link->supplier = supplier; 250 INIT_LIST_HEAD(&link->s_node); 251 get_device(consumer); 252 link->consumer = consumer; 253 INIT_LIST_HEAD(&link->c_node); 254 link->flags = flags; 255 kref_init(&link->kref); 256 257 /* Determine the initial link state. */ 258 if (flags & DL_FLAG_STATELESS) { 259 link->status = DL_STATE_NONE; 260 } else { 261 switch (supplier->links.status) { 262 case DL_DEV_DRIVER_BOUND: 263 switch (consumer->links.status) { 264 case DL_DEV_PROBING: 265 /* 266 * Some callers expect the link creation during 267 * consumer driver probe to resume the supplier 268 * even without DL_FLAG_RPM_ACTIVE. 269 */ 270 if (flags & DL_FLAG_PM_RUNTIME) 271 pm_runtime_resume(supplier); 272 273 link->status = DL_STATE_CONSUMER_PROBE; 274 break; 275 case DL_DEV_DRIVER_BOUND: 276 link->status = DL_STATE_ACTIVE; 277 break; 278 default: 279 link->status = DL_STATE_AVAILABLE; 280 break; 281 } 282 break; 283 case DL_DEV_UNBINDING: 284 link->status = DL_STATE_SUPPLIER_UNBIND; 285 break; 286 default: 287 link->status = DL_STATE_DORMANT; 288 break; 289 } 290 } 291 292 /* 293 * Move the consumer and all of the devices depending on it to the end 294 * of dpm_list and the devices_kset list. 295 * 296 * It is necessary to hold dpm_list locked throughout all that or else 297 * we may end up suspending with a wrong ordering of it. 298 */ 299 device_reorder_to_tail(consumer, NULL); 300 301 list_add_tail_rcu(&link->s_node, &supplier->links.consumers); 302 list_add_tail_rcu(&link->c_node, &consumer->links.suppliers); 303 304 dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); 305 306 out: 307 device_pm_unlock(); 308 device_links_write_unlock(); 309 return link; 310 } 311 EXPORT_SYMBOL_GPL(device_link_add); 312 313 static void device_link_free(struct device_link *link) 314 { 315 put_device(link->consumer); 316 put_device(link->supplier); 317 kfree(link); 318 } 319 320 #ifdef CONFIG_SRCU 321 static void __device_link_free_srcu(struct rcu_head *rhead) 322 { 323 device_link_free(container_of(rhead, struct device_link, rcu_head)); 324 } 325 326 static void __device_link_del(struct kref *kref) 327 { 328 struct device_link *link = container_of(kref, struct device_link, kref); 329 330 dev_info(link->consumer, "Dropping the link to %s\n", 331 dev_name(link->supplier)); 332 333 if (link->flags & DL_FLAG_PM_RUNTIME) 334 pm_runtime_drop_link(link->consumer); 335 336 list_del_rcu(&link->s_node); 337 list_del_rcu(&link->c_node); 338 call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu); 339 } 340 #else /* !CONFIG_SRCU */ 341 static void __device_link_del(struct kref *kref) 342 { 343 struct device_link *link = container_of(kref, struct device_link, kref); 344 345 dev_info(link->consumer, "Dropping the link to %s\n", 346 dev_name(link->supplier)); 347 348 if (link->flags & DL_FLAG_PM_RUNTIME) 349 pm_runtime_drop_link(link->consumer); 350 351 list_del(&link->s_node); 352 list_del(&link->c_node); 353 device_link_free(link); 354 } 355 #endif /* !CONFIG_SRCU */ 356 357 /** 358 * device_link_del - Delete a link between two devices. 359 * @link: Device link to delete. 360 * 361 * The caller must ensure proper synchronization of this function with runtime 362 * PM. If the link was added multiple times, it needs to be deleted as often. 363 * Care is required for hotplugged devices: Their links are purged on removal 364 * and calling device_link_del() is then no longer allowed. 365 */ 366 void device_link_del(struct device_link *link) 367 { 368 device_links_write_lock(); 369 device_pm_lock(); 370 kref_put(&link->kref, __device_link_del); 371 device_pm_unlock(); 372 device_links_write_unlock(); 373 } 374 EXPORT_SYMBOL_GPL(device_link_del); 375 376 /** 377 * device_link_remove - remove a link between two devices. 378 * @consumer: Consumer end of the link. 379 * @supplier: Supplier end of the link. 380 * 381 * The caller must ensure proper synchronization of this function with runtime 382 * PM. 383 */ 384 void device_link_remove(void *consumer, struct device *supplier) 385 { 386 struct device_link *link; 387 388 if (WARN_ON(consumer == supplier)) 389 return; 390 391 device_links_write_lock(); 392 device_pm_lock(); 393 394 list_for_each_entry(link, &supplier->links.consumers, s_node) { 395 if (link->consumer == consumer) { 396 kref_put(&link->kref, __device_link_del); 397 break; 398 } 399 } 400 401 device_pm_unlock(); 402 device_links_write_unlock(); 403 } 404 EXPORT_SYMBOL_GPL(device_link_remove); 405 406 static void device_links_missing_supplier(struct device *dev) 407 { 408 struct device_link *link; 409 410 list_for_each_entry(link, &dev->links.suppliers, c_node) 411 if (link->status == DL_STATE_CONSUMER_PROBE) 412 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 413 } 414 415 /** 416 * device_links_check_suppliers - Check presence of supplier drivers. 417 * @dev: Consumer device. 418 * 419 * Check links from this device to any suppliers. Walk the list of the device's 420 * links to suppliers and see if all of them are available. If not, simply 421 * return -EPROBE_DEFER. 422 * 423 * We need to guarantee that the supplier will not go away after the check has 424 * been positive here. It only can go away in __device_release_driver() and 425 * that function checks the device's links to consumers. This means we need to 426 * mark the link as "consumer probe in progress" to make the supplier removal 427 * wait for us to complete (or bad things may happen). 428 * 429 * Links with the DL_FLAG_STATELESS flag set are ignored. 430 */ 431 int device_links_check_suppliers(struct device *dev) 432 { 433 struct device_link *link; 434 int ret = 0; 435 436 device_links_write_lock(); 437 438 list_for_each_entry(link, &dev->links.suppliers, c_node) { 439 if (link->flags & DL_FLAG_STATELESS) 440 continue; 441 442 if (link->status != DL_STATE_AVAILABLE) { 443 device_links_missing_supplier(dev); 444 ret = -EPROBE_DEFER; 445 break; 446 } 447 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE); 448 } 449 dev->links.status = DL_DEV_PROBING; 450 451 device_links_write_unlock(); 452 return ret; 453 } 454 455 /** 456 * device_links_driver_bound - Update device links after probing its driver. 457 * @dev: Device to update the links for. 458 * 459 * The probe has been successful, so update links from this device to any 460 * consumers by changing their status to "available". 461 * 462 * Also change the status of @dev's links to suppliers to "active". 463 * 464 * Links with the DL_FLAG_STATELESS flag set are ignored. 465 */ 466 void device_links_driver_bound(struct device *dev) 467 { 468 struct device_link *link; 469 470 device_links_write_lock(); 471 472 list_for_each_entry(link, &dev->links.consumers, s_node) { 473 if (link->flags & DL_FLAG_STATELESS) 474 continue; 475 476 WARN_ON(link->status != DL_STATE_DORMANT); 477 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 478 } 479 480 list_for_each_entry(link, &dev->links.suppliers, c_node) { 481 if (link->flags & DL_FLAG_STATELESS) 482 continue; 483 484 WARN_ON(link->status != DL_STATE_CONSUMER_PROBE); 485 WRITE_ONCE(link->status, DL_STATE_ACTIVE); 486 } 487 488 dev->links.status = DL_DEV_DRIVER_BOUND; 489 490 device_links_write_unlock(); 491 } 492 493 /** 494 * __device_links_no_driver - Update links of a device without a driver. 495 * @dev: Device without a drvier. 496 * 497 * Delete all non-persistent links from this device to any suppliers. 498 * 499 * Persistent links stay around, but their status is changed to "available", 500 * unless they already are in the "supplier unbind in progress" state in which 501 * case they need not be updated. 502 * 503 * Links with the DL_FLAG_STATELESS flag set are ignored. 504 */ 505 static void __device_links_no_driver(struct device *dev) 506 { 507 struct device_link *link, *ln; 508 509 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { 510 if (link->flags & DL_FLAG_STATELESS) 511 continue; 512 513 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) 514 kref_put(&link->kref, __device_link_del); 515 else if (link->status != DL_STATE_SUPPLIER_UNBIND) 516 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 517 } 518 519 dev->links.status = DL_DEV_NO_DRIVER; 520 } 521 522 void device_links_no_driver(struct device *dev) 523 { 524 device_links_write_lock(); 525 __device_links_no_driver(dev); 526 device_links_write_unlock(); 527 } 528 529 /** 530 * device_links_driver_cleanup - Update links after driver removal. 531 * @dev: Device whose driver has just gone away. 532 * 533 * Update links to consumers for @dev by changing their status to "dormant" and 534 * invoke %__device_links_no_driver() to update links to suppliers for it as 535 * appropriate. 536 * 537 * Links with the DL_FLAG_STATELESS flag set are ignored. 538 */ 539 void device_links_driver_cleanup(struct device *dev) 540 { 541 struct device_link *link; 542 543 device_links_write_lock(); 544 545 list_for_each_entry(link, &dev->links.consumers, s_node) { 546 if (link->flags & DL_FLAG_STATELESS) 547 continue; 548 549 WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER); 550 WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND); 551 552 /* 553 * autoremove the links between this @dev and its consumer 554 * devices that are not active, i.e. where the link state 555 * has moved to DL_STATE_SUPPLIER_UNBIND. 556 */ 557 if (link->status == DL_STATE_SUPPLIER_UNBIND && 558 link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) 559 kref_put(&link->kref, __device_link_del); 560 561 WRITE_ONCE(link->status, DL_STATE_DORMANT); 562 } 563 564 __device_links_no_driver(dev); 565 566 device_links_write_unlock(); 567 } 568 569 /** 570 * device_links_busy - Check if there are any busy links to consumers. 571 * @dev: Device to check. 572 * 573 * Check each consumer of the device and return 'true' if its link's status 574 * is one of "consumer probe" or "active" (meaning that the given consumer is 575 * probing right now or its driver is present). Otherwise, change the link 576 * state to "supplier unbind" to prevent the consumer from being probed 577 * successfully going forward. 578 * 579 * Return 'false' if there are no probing or active consumers. 580 * 581 * Links with the DL_FLAG_STATELESS flag set are ignored. 582 */ 583 bool device_links_busy(struct device *dev) 584 { 585 struct device_link *link; 586 bool ret = false; 587 588 device_links_write_lock(); 589 590 list_for_each_entry(link, &dev->links.consumers, s_node) { 591 if (link->flags & DL_FLAG_STATELESS) 592 continue; 593 594 if (link->status == DL_STATE_CONSUMER_PROBE 595 || link->status == DL_STATE_ACTIVE) { 596 ret = true; 597 break; 598 } 599 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); 600 } 601 602 dev->links.status = DL_DEV_UNBINDING; 603 604 device_links_write_unlock(); 605 return ret; 606 } 607 608 /** 609 * device_links_unbind_consumers - Force unbind consumers of the given device. 610 * @dev: Device to unbind the consumers of. 611 * 612 * Walk the list of links to consumers for @dev and if any of them is in the 613 * "consumer probe" state, wait for all device probes in progress to complete 614 * and start over. 615 * 616 * If that's not the case, change the status of the link to "supplier unbind" 617 * and check if the link was in the "active" state. If so, force the consumer 618 * driver to unbind and start over (the consumer will not re-probe as we have 619 * changed the state of the link already). 620 * 621 * Links with the DL_FLAG_STATELESS flag set are ignored. 622 */ 623 void device_links_unbind_consumers(struct device *dev) 624 { 625 struct device_link *link; 626 627 start: 628 device_links_write_lock(); 629 630 list_for_each_entry(link, &dev->links.consumers, s_node) { 631 enum device_link_state status; 632 633 if (link->flags & DL_FLAG_STATELESS) 634 continue; 635 636 status = link->status; 637 if (status == DL_STATE_CONSUMER_PROBE) { 638 device_links_write_unlock(); 639 640 wait_for_device_probe(); 641 goto start; 642 } 643 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); 644 if (status == DL_STATE_ACTIVE) { 645 struct device *consumer = link->consumer; 646 647 get_device(consumer); 648 649 device_links_write_unlock(); 650 651 device_release_driver_internal(consumer, NULL, 652 consumer->parent); 653 put_device(consumer); 654 goto start; 655 } 656 } 657 658 device_links_write_unlock(); 659 } 660 661 /** 662 * device_links_purge - Delete existing links to other devices. 663 * @dev: Target device. 664 */ 665 static void device_links_purge(struct device *dev) 666 { 667 struct device_link *link, *ln; 668 669 /* 670 * Delete all of the remaining links from this device to any other 671 * devices (either consumers or suppliers). 672 */ 673 device_links_write_lock(); 674 675 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { 676 WARN_ON(link->status == DL_STATE_ACTIVE); 677 __device_link_del(&link->kref); 678 } 679 680 list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) { 681 WARN_ON(link->status != DL_STATE_DORMANT && 682 link->status != DL_STATE_NONE); 683 __device_link_del(&link->kref); 684 } 685 686 device_links_write_unlock(); 687 } 688 689 /* Device links support end. */ 690 691 int (*platform_notify)(struct device *dev) = NULL; 692 int (*platform_notify_remove)(struct device *dev) = NULL; 693 static struct kobject *dev_kobj; 694 struct kobject *sysfs_dev_char_kobj; 695 struct kobject *sysfs_dev_block_kobj; 696 697 static DEFINE_MUTEX(device_hotplug_lock); 698 699 void lock_device_hotplug(void) 700 { 701 mutex_lock(&device_hotplug_lock); 702 } 703 704 void unlock_device_hotplug(void) 705 { 706 mutex_unlock(&device_hotplug_lock); 707 } 708 709 int lock_device_hotplug_sysfs(void) 710 { 711 if (mutex_trylock(&device_hotplug_lock)) 712 return 0; 713 714 /* Avoid busy looping (5 ms of sleep should do). */ 715 msleep(5); 716 return restart_syscall(); 717 } 718 719 #ifdef CONFIG_BLOCK 720 static inline int device_is_not_partition(struct device *dev) 721 { 722 return !(dev->type == &part_type); 723 } 724 #else 725 static inline int device_is_not_partition(struct device *dev) 726 { 727 return 1; 728 } 729 #endif 730 731 /** 732 * dev_driver_string - Return a device's driver name, if at all possible 733 * @dev: struct device to get the name of 734 * 735 * Will return the device's driver's name if it is bound to a device. If 736 * the device is not bound to a driver, it will return the name of the bus 737 * it is attached to. If it is not attached to a bus either, an empty 738 * string will be returned. 739 */ 740 const char *dev_driver_string(const struct device *dev) 741 { 742 struct device_driver *drv; 743 744 /* dev->driver can change to NULL underneath us because of unbinding, 745 * so be careful about accessing it. dev->bus and dev->class should 746 * never change once they are set, so they don't need special care. 747 */ 748 drv = READ_ONCE(dev->driver); 749 return drv ? drv->name : 750 (dev->bus ? dev->bus->name : 751 (dev->class ? dev->class->name : "")); 752 } 753 EXPORT_SYMBOL(dev_driver_string); 754 755 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr) 756 757 static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr, 758 char *buf) 759 { 760 struct device_attribute *dev_attr = to_dev_attr(attr); 761 struct device *dev = kobj_to_dev(kobj); 762 ssize_t ret = -EIO; 763 764 if (dev_attr->show) 765 ret = dev_attr->show(dev, dev_attr, buf); 766 if (ret >= (ssize_t)PAGE_SIZE) { 767 printk("dev_attr_show: %pS returned bad count\n", 768 dev_attr->show); 769 } 770 return ret; 771 } 772 773 static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr, 774 const char *buf, size_t count) 775 { 776 struct device_attribute *dev_attr = to_dev_attr(attr); 777 struct device *dev = kobj_to_dev(kobj); 778 ssize_t ret = -EIO; 779 780 if (dev_attr->store) 781 ret = dev_attr->store(dev, dev_attr, buf, count); 782 return ret; 783 } 784 785 static const struct sysfs_ops dev_sysfs_ops = { 786 .show = dev_attr_show, 787 .store = dev_attr_store, 788 }; 789 790 #define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr) 791 792 ssize_t device_store_ulong(struct device *dev, 793 struct device_attribute *attr, 794 const char *buf, size_t size) 795 { 796 struct dev_ext_attribute *ea = to_ext_attr(attr); 797 char *end; 798 unsigned long new = simple_strtoul(buf, &end, 0); 799 if (end == buf) 800 return -EINVAL; 801 *(unsigned long *)(ea->var) = new; 802 /* Always return full write size even if we didn't consume all */ 803 return size; 804 } 805 EXPORT_SYMBOL_GPL(device_store_ulong); 806 807 ssize_t device_show_ulong(struct device *dev, 808 struct device_attribute *attr, 809 char *buf) 810 { 811 struct dev_ext_attribute *ea = to_ext_attr(attr); 812 return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var)); 813 } 814 EXPORT_SYMBOL_GPL(device_show_ulong); 815 816 ssize_t device_store_int(struct device *dev, 817 struct device_attribute *attr, 818 const char *buf, size_t size) 819 { 820 struct dev_ext_attribute *ea = to_ext_attr(attr); 821 char *end; 822 long new = simple_strtol(buf, &end, 0); 823 if (end == buf || new > INT_MAX || new < INT_MIN) 824 return -EINVAL; 825 *(int *)(ea->var) = new; 826 /* Always return full write size even if we didn't consume all */ 827 return size; 828 } 829 EXPORT_SYMBOL_GPL(device_store_int); 830 831 ssize_t device_show_int(struct device *dev, 832 struct device_attribute *attr, 833 char *buf) 834 { 835 struct dev_ext_attribute *ea = to_ext_attr(attr); 836 837 return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var)); 838 } 839 EXPORT_SYMBOL_GPL(device_show_int); 840 841 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, 842 const char *buf, size_t size) 843 { 844 struct dev_ext_attribute *ea = to_ext_attr(attr); 845 846 if (strtobool(buf, ea->var) < 0) 847 return -EINVAL; 848 849 return size; 850 } 851 EXPORT_SYMBOL_GPL(device_store_bool); 852 853 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, 854 char *buf) 855 { 856 struct dev_ext_attribute *ea = to_ext_attr(attr); 857 858 return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var)); 859 } 860 EXPORT_SYMBOL_GPL(device_show_bool); 861 862 /** 863 * device_release - free device structure. 864 * @kobj: device's kobject. 865 * 866 * This is called once the reference count for the object 867 * reaches 0. We forward the call to the device's release 868 * method, which should handle actually freeing the structure. 869 */ 870 static void device_release(struct kobject *kobj) 871 { 872 struct device *dev = kobj_to_dev(kobj); 873 struct device_private *p = dev->p; 874 875 /* 876 * Some platform devices are driven without driver attached 877 * and managed resources may have been acquired. Make sure 878 * all resources are released. 879 * 880 * Drivers still can add resources into device after device 881 * is deleted but alive, so release devres here to avoid 882 * possible memory leak. 883 */ 884 devres_release_all(dev); 885 886 if (dev->release) 887 dev->release(dev); 888 else if (dev->type && dev->type->release) 889 dev->type->release(dev); 890 else if (dev->class && dev->class->dev_release) 891 dev->class->dev_release(dev); 892 else 893 WARN(1, KERN_ERR "Device '%s' does not have a release() " 894 "function, it is broken and must be fixed.\n", 895 dev_name(dev)); 896 kfree(p); 897 } 898 899 static const void *device_namespace(struct kobject *kobj) 900 { 901 struct device *dev = kobj_to_dev(kobj); 902 const void *ns = NULL; 903 904 if (dev->class && dev->class->ns_type) 905 ns = dev->class->namespace(dev); 906 907 return ns; 908 } 909 910 static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) 911 { 912 struct device *dev = kobj_to_dev(kobj); 913 914 if (dev->class && dev->class->get_ownership) 915 dev->class->get_ownership(dev, uid, gid); 916 } 917 918 static struct kobj_type device_ktype = { 919 .release = device_release, 920 .sysfs_ops = &dev_sysfs_ops, 921 .namespace = device_namespace, 922 .get_ownership = device_get_ownership, 923 }; 924 925 926 static int dev_uevent_filter(struct kset *kset, struct kobject *kobj) 927 { 928 struct kobj_type *ktype = get_ktype(kobj); 929 930 if (ktype == &device_ktype) { 931 struct device *dev = kobj_to_dev(kobj); 932 if (dev->bus) 933 return 1; 934 if (dev->class) 935 return 1; 936 } 937 return 0; 938 } 939 940 static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj) 941 { 942 struct device *dev = kobj_to_dev(kobj); 943 944 if (dev->bus) 945 return dev->bus->name; 946 if (dev->class) 947 return dev->class->name; 948 return NULL; 949 } 950 951 static int dev_uevent(struct kset *kset, struct kobject *kobj, 952 struct kobj_uevent_env *env) 953 { 954 struct device *dev = kobj_to_dev(kobj); 955 int retval = 0; 956 957 /* add device node properties if present */ 958 if (MAJOR(dev->devt)) { 959 const char *tmp; 960 const char *name; 961 umode_t mode = 0; 962 kuid_t uid = GLOBAL_ROOT_UID; 963 kgid_t gid = GLOBAL_ROOT_GID; 964 965 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt)); 966 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt)); 967 name = device_get_devnode(dev, &mode, &uid, &gid, &tmp); 968 if (name) { 969 add_uevent_var(env, "DEVNAME=%s", name); 970 if (mode) 971 add_uevent_var(env, "DEVMODE=%#o", mode & 0777); 972 if (!uid_eq(uid, GLOBAL_ROOT_UID)) 973 add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid)); 974 if (!gid_eq(gid, GLOBAL_ROOT_GID)) 975 add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid)); 976 kfree(tmp); 977 } 978 } 979 980 if (dev->type && dev->type->name) 981 add_uevent_var(env, "DEVTYPE=%s", dev->type->name); 982 983 if (dev->driver) 984 add_uevent_var(env, "DRIVER=%s", dev->driver->name); 985 986 /* Add common DT information about the device */ 987 of_device_uevent(dev, env); 988 989 /* have the bus specific function add its stuff */ 990 if (dev->bus && dev->bus->uevent) { 991 retval = dev->bus->uevent(dev, env); 992 if (retval) 993 pr_debug("device: '%s': %s: bus uevent() returned %d\n", 994 dev_name(dev), __func__, retval); 995 } 996 997 /* have the class specific function add its stuff */ 998 if (dev->class && dev->class->dev_uevent) { 999 retval = dev->class->dev_uevent(dev, env); 1000 if (retval) 1001 pr_debug("device: '%s': %s: class uevent() " 1002 "returned %d\n", dev_name(dev), 1003 __func__, retval); 1004 } 1005 1006 /* have the device type specific function add its stuff */ 1007 if (dev->type && dev->type->uevent) { 1008 retval = dev->type->uevent(dev, env); 1009 if (retval) 1010 pr_debug("device: '%s': %s: dev_type uevent() " 1011 "returned %d\n", dev_name(dev), 1012 __func__, retval); 1013 } 1014 1015 return retval; 1016 } 1017 1018 static const struct kset_uevent_ops device_uevent_ops = { 1019 .filter = dev_uevent_filter, 1020 .name = dev_uevent_name, 1021 .uevent = dev_uevent, 1022 }; 1023 1024 static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, 1025 char *buf) 1026 { 1027 struct kobject *top_kobj; 1028 struct kset *kset; 1029 struct kobj_uevent_env *env = NULL; 1030 int i; 1031 size_t count = 0; 1032 int retval; 1033 1034 /* search the kset, the device belongs to */ 1035 top_kobj = &dev->kobj; 1036 while (!top_kobj->kset && top_kobj->parent) 1037 top_kobj = top_kobj->parent; 1038 if (!top_kobj->kset) 1039 goto out; 1040 1041 kset = top_kobj->kset; 1042 if (!kset->uevent_ops || !kset->uevent_ops->uevent) 1043 goto out; 1044 1045 /* respect filter */ 1046 if (kset->uevent_ops && kset->uevent_ops->filter) 1047 if (!kset->uevent_ops->filter(kset, &dev->kobj)) 1048 goto out; 1049 1050 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); 1051 if (!env) 1052 return -ENOMEM; 1053 1054 /* let the kset specific function add its keys */ 1055 retval = kset->uevent_ops->uevent(kset, &dev->kobj, env); 1056 if (retval) 1057 goto out; 1058 1059 /* copy keys to file */ 1060 for (i = 0; i < env->envp_idx; i++) 1061 count += sprintf(&buf[count], "%s\n", env->envp[i]); 1062 out: 1063 kfree(env); 1064 return count; 1065 } 1066 1067 static ssize_t uevent_store(struct device *dev, struct device_attribute *attr, 1068 const char *buf, size_t count) 1069 { 1070 if (kobject_synth_uevent(&dev->kobj, buf, count)) 1071 dev_err(dev, "uevent: failed to send synthetic uevent\n"); 1072 1073 return count; 1074 } 1075 static DEVICE_ATTR_RW(uevent); 1076 1077 static ssize_t online_show(struct device *dev, struct device_attribute *attr, 1078 char *buf) 1079 { 1080 bool val; 1081 1082 device_lock(dev); 1083 val = !dev->offline; 1084 device_unlock(dev); 1085 return sprintf(buf, "%u\n", val); 1086 } 1087 1088 static ssize_t online_store(struct device *dev, struct device_attribute *attr, 1089 const char *buf, size_t count) 1090 { 1091 bool val; 1092 int ret; 1093 1094 ret = strtobool(buf, &val); 1095 if (ret < 0) 1096 return ret; 1097 1098 ret = lock_device_hotplug_sysfs(); 1099 if (ret) 1100 return ret; 1101 1102 ret = val ? device_online(dev) : device_offline(dev); 1103 unlock_device_hotplug(); 1104 return ret < 0 ? ret : count; 1105 } 1106 static DEVICE_ATTR_RW(online); 1107 1108 int device_add_groups(struct device *dev, const struct attribute_group **groups) 1109 { 1110 return sysfs_create_groups(&dev->kobj, groups); 1111 } 1112 EXPORT_SYMBOL_GPL(device_add_groups); 1113 1114 void device_remove_groups(struct device *dev, 1115 const struct attribute_group **groups) 1116 { 1117 sysfs_remove_groups(&dev->kobj, groups); 1118 } 1119 EXPORT_SYMBOL_GPL(device_remove_groups); 1120 1121 union device_attr_group_devres { 1122 const struct attribute_group *group; 1123 const struct attribute_group **groups; 1124 }; 1125 1126 static int devm_attr_group_match(struct device *dev, void *res, void *data) 1127 { 1128 return ((union device_attr_group_devres *)res)->group == data; 1129 } 1130 1131 static void devm_attr_group_remove(struct device *dev, void *res) 1132 { 1133 union device_attr_group_devres *devres = res; 1134 const struct attribute_group *group = devres->group; 1135 1136 dev_dbg(dev, "%s: removing group %p\n", __func__, group); 1137 sysfs_remove_group(&dev->kobj, group); 1138 } 1139 1140 static void devm_attr_groups_remove(struct device *dev, void *res) 1141 { 1142 union device_attr_group_devres *devres = res; 1143 const struct attribute_group **groups = devres->groups; 1144 1145 dev_dbg(dev, "%s: removing groups %p\n", __func__, groups); 1146 sysfs_remove_groups(&dev->kobj, groups); 1147 } 1148 1149 /** 1150 * devm_device_add_group - given a device, create a managed attribute group 1151 * @dev: The device to create the group for 1152 * @grp: The attribute group to create 1153 * 1154 * This function creates a group for the first time. It will explicitly 1155 * warn and error if any of the attribute files being created already exist. 1156 * 1157 * Returns 0 on success or error code on failure. 1158 */ 1159 int devm_device_add_group(struct device *dev, const struct attribute_group *grp) 1160 { 1161 union device_attr_group_devres *devres; 1162 int error; 1163 1164 devres = devres_alloc(devm_attr_group_remove, 1165 sizeof(*devres), GFP_KERNEL); 1166 if (!devres) 1167 return -ENOMEM; 1168 1169 error = sysfs_create_group(&dev->kobj, grp); 1170 if (error) { 1171 devres_free(devres); 1172 return error; 1173 } 1174 1175 devres->group = grp; 1176 devres_add(dev, devres); 1177 return 0; 1178 } 1179 EXPORT_SYMBOL_GPL(devm_device_add_group); 1180 1181 /** 1182 * devm_device_remove_group: remove a managed group from a device 1183 * @dev: device to remove the group from 1184 * @grp: group to remove 1185 * 1186 * This function removes a group of attributes from a device. The attributes 1187 * previously have to have been created for this group, otherwise it will fail. 1188 */ 1189 void devm_device_remove_group(struct device *dev, 1190 const struct attribute_group *grp) 1191 { 1192 WARN_ON(devres_release(dev, devm_attr_group_remove, 1193 devm_attr_group_match, 1194 /* cast away const */ (void *)grp)); 1195 } 1196 EXPORT_SYMBOL_GPL(devm_device_remove_group); 1197 1198 /** 1199 * devm_device_add_groups - create a bunch of managed attribute groups 1200 * @dev: The device to create the group for 1201 * @groups: The attribute groups to create, NULL terminated 1202 * 1203 * This function creates a bunch of managed attribute groups. If an error 1204 * occurs when creating a group, all previously created groups will be 1205 * removed, unwinding everything back to the original state when this 1206 * function was called. It will explicitly warn and error if any of the 1207 * attribute files being created already exist. 1208 * 1209 * Returns 0 on success or error code from sysfs_create_group on failure. 1210 */ 1211 int devm_device_add_groups(struct device *dev, 1212 const struct attribute_group **groups) 1213 { 1214 union device_attr_group_devres *devres; 1215 int error; 1216 1217 devres = devres_alloc(devm_attr_groups_remove, 1218 sizeof(*devres), GFP_KERNEL); 1219 if (!devres) 1220 return -ENOMEM; 1221 1222 error = sysfs_create_groups(&dev->kobj, groups); 1223 if (error) { 1224 devres_free(devres); 1225 return error; 1226 } 1227 1228 devres->groups = groups; 1229 devres_add(dev, devres); 1230 return 0; 1231 } 1232 EXPORT_SYMBOL_GPL(devm_device_add_groups); 1233 1234 /** 1235 * devm_device_remove_groups - remove a list of managed groups 1236 * 1237 * @dev: The device for the groups to be removed from 1238 * @groups: NULL terminated list of groups to be removed 1239 * 1240 * If groups is not NULL, remove the specified groups from the device. 1241 */ 1242 void devm_device_remove_groups(struct device *dev, 1243 const struct attribute_group **groups) 1244 { 1245 WARN_ON(devres_release(dev, devm_attr_groups_remove, 1246 devm_attr_group_match, 1247 /* cast away const */ (void *)groups)); 1248 } 1249 EXPORT_SYMBOL_GPL(devm_device_remove_groups); 1250 1251 static int device_add_attrs(struct device *dev) 1252 { 1253 struct class *class = dev->class; 1254 const struct device_type *type = dev->type; 1255 int error; 1256 1257 if (class) { 1258 error = device_add_groups(dev, class->dev_groups); 1259 if (error) 1260 return error; 1261 } 1262 1263 if (type) { 1264 error = device_add_groups(dev, type->groups); 1265 if (error) 1266 goto err_remove_class_groups; 1267 } 1268 1269 error = device_add_groups(dev, dev->groups); 1270 if (error) 1271 goto err_remove_type_groups; 1272 1273 if (device_supports_offline(dev) && !dev->offline_disabled) { 1274 error = device_create_file(dev, &dev_attr_online); 1275 if (error) 1276 goto err_remove_dev_groups; 1277 } 1278 1279 return 0; 1280 1281 err_remove_dev_groups: 1282 device_remove_groups(dev, dev->groups); 1283 err_remove_type_groups: 1284 if (type) 1285 device_remove_groups(dev, type->groups); 1286 err_remove_class_groups: 1287 if (class) 1288 device_remove_groups(dev, class->dev_groups); 1289 1290 return error; 1291 } 1292 1293 static void device_remove_attrs(struct device *dev) 1294 { 1295 struct class *class = dev->class; 1296 const struct device_type *type = dev->type; 1297 1298 device_remove_file(dev, &dev_attr_online); 1299 device_remove_groups(dev, dev->groups); 1300 1301 if (type) 1302 device_remove_groups(dev, type->groups); 1303 1304 if (class) 1305 device_remove_groups(dev, class->dev_groups); 1306 } 1307 1308 static ssize_t dev_show(struct device *dev, struct device_attribute *attr, 1309 char *buf) 1310 { 1311 return print_dev_t(buf, dev->devt); 1312 } 1313 static DEVICE_ATTR_RO(dev); 1314 1315 /* /sys/devices/ */ 1316 struct kset *devices_kset; 1317 1318 /** 1319 * devices_kset_move_before - Move device in the devices_kset's list. 1320 * @deva: Device to move. 1321 * @devb: Device @deva should come before. 1322 */ 1323 static void devices_kset_move_before(struct device *deva, struct device *devb) 1324 { 1325 if (!devices_kset) 1326 return; 1327 pr_debug("devices_kset: Moving %s before %s\n", 1328 dev_name(deva), dev_name(devb)); 1329 spin_lock(&devices_kset->list_lock); 1330 list_move_tail(&deva->kobj.entry, &devb->kobj.entry); 1331 spin_unlock(&devices_kset->list_lock); 1332 } 1333 1334 /** 1335 * devices_kset_move_after - Move device in the devices_kset's list. 1336 * @deva: Device to move 1337 * @devb: Device @deva should come after. 1338 */ 1339 static void devices_kset_move_after(struct device *deva, struct device *devb) 1340 { 1341 if (!devices_kset) 1342 return; 1343 pr_debug("devices_kset: Moving %s after %s\n", 1344 dev_name(deva), dev_name(devb)); 1345 spin_lock(&devices_kset->list_lock); 1346 list_move(&deva->kobj.entry, &devb->kobj.entry); 1347 spin_unlock(&devices_kset->list_lock); 1348 } 1349 1350 /** 1351 * devices_kset_move_last - move the device to the end of devices_kset's list. 1352 * @dev: device to move 1353 */ 1354 void devices_kset_move_last(struct device *dev) 1355 { 1356 if (!devices_kset) 1357 return; 1358 pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev)); 1359 spin_lock(&devices_kset->list_lock); 1360 list_move_tail(&dev->kobj.entry, &devices_kset->list); 1361 spin_unlock(&devices_kset->list_lock); 1362 } 1363 1364 /** 1365 * device_create_file - create sysfs attribute file for device. 1366 * @dev: device. 1367 * @attr: device attribute descriptor. 1368 */ 1369 int device_create_file(struct device *dev, 1370 const struct device_attribute *attr) 1371 { 1372 int error = 0; 1373 1374 if (dev) { 1375 WARN(((attr->attr.mode & S_IWUGO) && !attr->store), 1376 "Attribute %s: write permission without 'store'\n", 1377 attr->attr.name); 1378 WARN(((attr->attr.mode & S_IRUGO) && !attr->show), 1379 "Attribute %s: read permission without 'show'\n", 1380 attr->attr.name); 1381 error = sysfs_create_file(&dev->kobj, &attr->attr); 1382 } 1383 1384 return error; 1385 } 1386 EXPORT_SYMBOL_GPL(device_create_file); 1387 1388 /** 1389 * device_remove_file - remove sysfs attribute file. 1390 * @dev: device. 1391 * @attr: device attribute descriptor. 1392 */ 1393 void device_remove_file(struct device *dev, 1394 const struct device_attribute *attr) 1395 { 1396 if (dev) 1397 sysfs_remove_file(&dev->kobj, &attr->attr); 1398 } 1399 EXPORT_SYMBOL_GPL(device_remove_file); 1400 1401 /** 1402 * device_remove_file_self - remove sysfs attribute file from its own method. 1403 * @dev: device. 1404 * @attr: device attribute descriptor. 1405 * 1406 * See kernfs_remove_self() for details. 1407 */ 1408 bool device_remove_file_self(struct device *dev, 1409 const struct device_attribute *attr) 1410 { 1411 if (dev) 1412 return sysfs_remove_file_self(&dev->kobj, &attr->attr); 1413 else 1414 return false; 1415 } 1416 EXPORT_SYMBOL_GPL(device_remove_file_self); 1417 1418 /** 1419 * device_create_bin_file - create sysfs binary attribute file for device. 1420 * @dev: device. 1421 * @attr: device binary attribute descriptor. 1422 */ 1423 int device_create_bin_file(struct device *dev, 1424 const struct bin_attribute *attr) 1425 { 1426 int error = -EINVAL; 1427 if (dev) 1428 error = sysfs_create_bin_file(&dev->kobj, attr); 1429 return error; 1430 } 1431 EXPORT_SYMBOL_GPL(device_create_bin_file); 1432 1433 /** 1434 * device_remove_bin_file - remove sysfs binary attribute file 1435 * @dev: device. 1436 * @attr: device binary attribute descriptor. 1437 */ 1438 void device_remove_bin_file(struct device *dev, 1439 const struct bin_attribute *attr) 1440 { 1441 if (dev) 1442 sysfs_remove_bin_file(&dev->kobj, attr); 1443 } 1444 EXPORT_SYMBOL_GPL(device_remove_bin_file); 1445 1446 static void klist_children_get(struct klist_node *n) 1447 { 1448 struct device_private *p = to_device_private_parent(n); 1449 struct device *dev = p->device; 1450 1451 get_device(dev); 1452 } 1453 1454 static void klist_children_put(struct klist_node *n) 1455 { 1456 struct device_private *p = to_device_private_parent(n); 1457 struct device *dev = p->device; 1458 1459 put_device(dev); 1460 } 1461 1462 /** 1463 * device_initialize - init device structure. 1464 * @dev: device. 1465 * 1466 * This prepares the device for use by other layers by initializing 1467 * its fields. 1468 * It is the first half of device_register(), if called by 1469 * that function, though it can also be called separately, so one 1470 * may use @dev's fields. In particular, get_device()/put_device() 1471 * may be used for reference counting of @dev after calling this 1472 * function. 1473 * 1474 * All fields in @dev must be initialized by the caller to 0, except 1475 * for those explicitly set to some other value. The simplest 1476 * approach is to use kzalloc() to allocate the structure containing 1477 * @dev. 1478 * 1479 * NOTE: Use put_device() to give up your reference instead of freeing 1480 * @dev directly once you have called this function. 1481 */ 1482 void device_initialize(struct device *dev) 1483 { 1484 dev->kobj.kset = devices_kset; 1485 kobject_init(&dev->kobj, &device_ktype); 1486 INIT_LIST_HEAD(&dev->dma_pools); 1487 mutex_init(&dev->mutex); 1488 lockdep_set_novalidate_class(&dev->mutex); 1489 spin_lock_init(&dev->devres_lock); 1490 INIT_LIST_HEAD(&dev->devres_head); 1491 device_pm_init(dev); 1492 set_dev_node(dev, -1); 1493 #ifdef CONFIG_GENERIC_MSI_IRQ 1494 INIT_LIST_HEAD(&dev->msi_list); 1495 #endif 1496 INIT_LIST_HEAD(&dev->links.consumers); 1497 INIT_LIST_HEAD(&dev->links.suppliers); 1498 dev->links.status = DL_DEV_NO_DRIVER; 1499 } 1500 EXPORT_SYMBOL_GPL(device_initialize); 1501 1502 struct kobject *virtual_device_parent(struct device *dev) 1503 { 1504 static struct kobject *virtual_dir = NULL; 1505 1506 if (!virtual_dir) 1507 virtual_dir = kobject_create_and_add("virtual", 1508 &devices_kset->kobj); 1509 1510 return virtual_dir; 1511 } 1512 1513 struct class_dir { 1514 struct kobject kobj; 1515 struct class *class; 1516 }; 1517 1518 #define to_class_dir(obj) container_of(obj, struct class_dir, kobj) 1519 1520 static void class_dir_release(struct kobject *kobj) 1521 { 1522 struct class_dir *dir = to_class_dir(kobj); 1523 kfree(dir); 1524 } 1525 1526 static const 1527 struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj) 1528 { 1529 struct class_dir *dir = to_class_dir(kobj); 1530 return dir->class->ns_type; 1531 } 1532 1533 static struct kobj_type class_dir_ktype = { 1534 .release = class_dir_release, 1535 .sysfs_ops = &kobj_sysfs_ops, 1536 .child_ns_type = class_dir_child_ns_type 1537 }; 1538 1539 static struct kobject * 1540 class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) 1541 { 1542 struct class_dir *dir; 1543 int retval; 1544 1545 dir = kzalloc(sizeof(*dir), GFP_KERNEL); 1546 if (!dir) 1547 return ERR_PTR(-ENOMEM); 1548 1549 dir->class = class; 1550 kobject_init(&dir->kobj, &class_dir_ktype); 1551 1552 dir->kobj.kset = &class->p->glue_dirs; 1553 1554 retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name); 1555 if (retval < 0) { 1556 kobject_put(&dir->kobj); 1557 return ERR_PTR(retval); 1558 } 1559 return &dir->kobj; 1560 } 1561 1562 static DEFINE_MUTEX(gdp_mutex); 1563 1564 static struct kobject *get_device_parent(struct device *dev, 1565 struct device *parent) 1566 { 1567 if (dev->class) { 1568 struct kobject *kobj = NULL; 1569 struct kobject *parent_kobj; 1570 struct kobject *k; 1571 1572 #ifdef CONFIG_BLOCK 1573 /* block disks show up in /sys/block */ 1574 if (sysfs_deprecated && dev->class == &block_class) { 1575 if (parent && parent->class == &block_class) 1576 return &parent->kobj; 1577 return &block_class.p->subsys.kobj; 1578 } 1579 #endif 1580 1581 /* 1582 * If we have no parent, we live in "virtual". 1583 * Class-devices with a non class-device as parent, live 1584 * in a "glue" directory to prevent namespace collisions. 1585 */ 1586 if (parent == NULL) 1587 parent_kobj = virtual_device_parent(dev); 1588 else if (parent->class && !dev->class->ns_type) 1589 return &parent->kobj; 1590 else 1591 parent_kobj = &parent->kobj; 1592 1593 mutex_lock(&gdp_mutex); 1594 1595 /* find our class-directory at the parent and reference it */ 1596 spin_lock(&dev->class->p->glue_dirs.list_lock); 1597 list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry) 1598 if (k->parent == parent_kobj) { 1599 kobj = kobject_get(k); 1600 break; 1601 } 1602 spin_unlock(&dev->class->p->glue_dirs.list_lock); 1603 if (kobj) { 1604 mutex_unlock(&gdp_mutex); 1605 return kobj; 1606 } 1607 1608 /* or create a new class-directory at the parent device */ 1609 k = class_dir_create_and_add(dev->class, parent_kobj); 1610 /* do not emit an uevent for this simple "glue" directory */ 1611 mutex_unlock(&gdp_mutex); 1612 return k; 1613 } 1614 1615 /* subsystems can specify a default root directory for their devices */ 1616 if (!parent && dev->bus && dev->bus->dev_root) 1617 return &dev->bus->dev_root->kobj; 1618 1619 if (parent) 1620 return &parent->kobj; 1621 return NULL; 1622 } 1623 1624 static inline bool live_in_glue_dir(struct kobject *kobj, 1625 struct device *dev) 1626 { 1627 if (!kobj || !dev->class || 1628 kobj->kset != &dev->class->p->glue_dirs) 1629 return false; 1630 return true; 1631 } 1632 1633 static inline struct kobject *get_glue_dir(struct device *dev) 1634 { 1635 return dev->kobj.parent; 1636 } 1637 1638 /* 1639 * make sure cleaning up dir as the last step, we need to make 1640 * sure .release handler of kobject is run with holding the 1641 * global lock 1642 */ 1643 static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) 1644 { 1645 /* see if we live in a "glue" directory */ 1646 if (!live_in_glue_dir(glue_dir, dev)) 1647 return; 1648 1649 mutex_lock(&gdp_mutex); 1650 if (!kobject_has_children(glue_dir)) 1651 kobject_del(glue_dir); 1652 kobject_put(glue_dir); 1653 mutex_unlock(&gdp_mutex); 1654 } 1655 1656 static int device_add_class_symlinks(struct device *dev) 1657 { 1658 struct device_node *of_node = dev_of_node(dev); 1659 int error; 1660 1661 if (of_node) { 1662 error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node"); 1663 if (error) 1664 dev_warn(dev, "Error %d creating of_node link\n",error); 1665 /* An error here doesn't warrant bringing down the device */ 1666 } 1667 1668 if (!dev->class) 1669 return 0; 1670 1671 error = sysfs_create_link(&dev->kobj, 1672 &dev->class->p->subsys.kobj, 1673 "subsystem"); 1674 if (error) 1675 goto out_devnode; 1676 1677 if (dev->parent && device_is_not_partition(dev)) { 1678 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj, 1679 "device"); 1680 if (error) 1681 goto out_subsys; 1682 } 1683 1684 #ifdef CONFIG_BLOCK 1685 /* /sys/block has directories and does not need symlinks */ 1686 if (sysfs_deprecated && dev->class == &block_class) 1687 return 0; 1688 #endif 1689 1690 /* link in the class directory pointing to the device */ 1691 error = sysfs_create_link(&dev->class->p->subsys.kobj, 1692 &dev->kobj, dev_name(dev)); 1693 if (error) 1694 goto out_device; 1695 1696 return 0; 1697 1698 out_device: 1699 sysfs_remove_link(&dev->kobj, "device"); 1700 1701 out_subsys: 1702 sysfs_remove_link(&dev->kobj, "subsystem"); 1703 out_devnode: 1704 sysfs_remove_link(&dev->kobj, "of_node"); 1705 return error; 1706 } 1707 1708 static void device_remove_class_symlinks(struct device *dev) 1709 { 1710 if (dev_of_node(dev)) 1711 sysfs_remove_link(&dev->kobj, "of_node"); 1712 1713 if (!dev->class) 1714 return; 1715 1716 if (dev->parent && device_is_not_partition(dev)) 1717 sysfs_remove_link(&dev->kobj, "device"); 1718 sysfs_remove_link(&dev->kobj, "subsystem"); 1719 #ifdef CONFIG_BLOCK 1720 if (sysfs_deprecated && dev->class == &block_class) 1721 return; 1722 #endif 1723 sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev)); 1724 } 1725 1726 /** 1727 * dev_set_name - set a device name 1728 * @dev: device 1729 * @fmt: format string for the device's name 1730 */ 1731 int dev_set_name(struct device *dev, const char *fmt, ...) 1732 { 1733 va_list vargs; 1734 int err; 1735 1736 va_start(vargs, fmt); 1737 err = kobject_set_name_vargs(&dev->kobj, fmt, vargs); 1738 va_end(vargs); 1739 return err; 1740 } 1741 EXPORT_SYMBOL_GPL(dev_set_name); 1742 1743 /** 1744 * device_to_dev_kobj - select a /sys/dev/ directory for the device 1745 * @dev: device 1746 * 1747 * By default we select char/ for new entries. Setting class->dev_obj 1748 * to NULL prevents an entry from being created. class->dev_kobj must 1749 * be set (or cleared) before any devices are registered to the class 1750 * otherwise device_create_sys_dev_entry() and 1751 * device_remove_sys_dev_entry() will disagree about the presence of 1752 * the link. 1753 */ 1754 static struct kobject *device_to_dev_kobj(struct device *dev) 1755 { 1756 struct kobject *kobj; 1757 1758 if (dev->class) 1759 kobj = dev->class->dev_kobj; 1760 else 1761 kobj = sysfs_dev_char_kobj; 1762 1763 return kobj; 1764 } 1765 1766 static int device_create_sys_dev_entry(struct device *dev) 1767 { 1768 struct kobject *kobj = device_to_dev_kobj(dev); 1769 int error = 0; 1770 char devt_str[15]; 1771 1772 if (kobj) { 1773 format_dev_t(devt_str, dev->devt); 1774 error = sysfs_create_link(kobj, &dev->kobj, devt_str); 1775 } 1776 1777 return error; 1778 } 1779 1780 static void device_remove_sys_dev_entry(struct device *dev) 1781 { 1782 struct kobject *kobj = device_to_dev_kobj(dev); 1783 char devt_str[15]; 1784 1785 if (kobj) { 1786 format_dev_t(devt_str, dev->devt); 1787 sysfs_remove_link(kobj, devt_str); 1788 } 1789 } 1790 1791 static int device_private_init(struct device *dev) 1792 { 1793 dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL); 1794 if (!dev->p) 1795 return -ENOMEM; 1796 dev->p->device = dev; 1797 klist_init(&dev->p->klist_children, klist_children_get, 1798 klist_children_put); 1799 INIT_LIST_HEAD(&dev->p->deferred_probe); 1800 return 0; 1801 } 1802 1803 /** 1804 * device_add - add device to device hierarchy. 1805 * @dev: device. 1806 * 1807 * This is part 2 of device_register(), though may be called 1808 * separately _iff_ device_initialize() has been called separately. 1809 * 1810 * This adds @dev to the kobject hierarchy via kobject_add(), adds it 1811 * to the global and sibling lists for the device, then 1812 * adds it to the other relevant subsystems of the driver model. 1813 * 1814 * Do not call this routine or device_register() more than once for 1815 * any device structure. The driver model core is not designed to work 1816 * with devices that get unregistered and then spring back to life. 1817 * (Among other things, it's very hard to guarantee that all references 1818 * to the previous incarnation of @dev have been dropped.) Allocate 1819 * and register a fresh new struct device instead. 1820 * 1821 * NOTE: _Never_ directly free @dev after calling this function, even 1822 * if it returned an error! Always use put_device() to give up your 1823 * reference instead. 1824 */ 1825 int device_add(struct device *dev) 1826 { 1827 struct device *parent; 1828 struct kobject *kobj; 1829 struct class_interface *class_intf; 1830 int error = -EINVAL; 1831 struct kobject *glue_dir = NULL; 1832 1833 dev = get_device(dev); 1834 if (!dev) 1835 goto done; 1836 1837 if (!dev->p) { 1838 error = device_private_init(dev); 1839 if (error) 1840 goto done; 1841 } 1842 1843 /* 1844 * for statically allocated devices, which should all be converted 1845 * some day, we need to initialize the name. We prevent reading back 1846 * the name, and force the use of dev_name() 1847 */ 1848 if (dev->init_name) { 1849 dev_set_name(dev, "%s", dev->init_name); 1850 dev->init_name = NULL; 1851 } 1852 1853 /* subsystems can specify simple device enumeration */ 1854 if (!dev_name(dev) && dev->bus && dev->bus->dev_name) 1855 dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id); 1856 1857 if (!dev_name(dev)) { 1858 error = -EINVAL; 1859 goto name_error; 1860 } 1861 1862 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 1863 1864 parent = get_device(dev->parent); 1865 kobj = get_device_parent(dev, parent); 1866 if (IS_ERR(kobj)) { 1867 error = PTR_ERR(kobj); 1868 goto parent_error; 1869 } 1870 if (kobj) 1871 dev->kobj.parent = kobj; 1872 1873 /* use parent numa_node */ 1874 if (parent && (dev_to_node(dev) == NUMA_NO_NODE)) 1875 set_dev_node(dev, dev_to_node(parent)); 1876 1877 /* first, register with generic layer. */ 1878 /* we require the name to be set before, and pass NULL */ 1879 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); 1880 if (error) { 1881 glue_dir = get_glue_dir(dev); 1882 goto Error; 1883 } 1884 1885 /* notify platform of device entry */ 1886 if (platform_notify) 1887 platform_notify(dev); 1888 1889 error = device_create_file(dev, &dev_attr_uevent); 1890 if (error) 1891 goto attrError; 1892 1893 error = device_add_class_symlinks(dev); 1894 if (error) 1895 goto SymlinkError; 1896 error = device_add_attrs(dev); 1897 if (error) 1898 goto AttrsError; 1899 error = bus_add_device(dev); 1900 if (error) 1901 goto BusError; 1902 error = dpm_sysfs_add(dev); 1903 if (error) 1904 goto DPMError; 1905 device_pm_add(dev); 1906 1907 if (MAJOR(dev->devt)) { 1908 error = device_create_file(dev, &dev_attr_dev); 1909 if (error) 1910 goto DevAttrError; 1911 1912 error = device_create_sys_dev_entry(dev); 1913 if (error) 1914 goto SysEntryError; 1915 1916 devtmpfs_create_node(dev); 1917 } 1918 1919 /* Notify clients of device addition. This call must come 1920 * after dpm_sysfs_add() and before kobject_uevent(). 1921 */ 1922 if (dev->bus) 1923 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 1924 BUS_NOTIFY_ADD_DEVICE, dev); 1925 1926 kobject_uevent(&dev->kobj, KOBJ_ADD); 1927 bus_probe_device(dev); 1928 if (parent) 1929 klist_add_tail(&dev->p->knode_parent, 1930 &parent->p->klist_children); 1931 1932 if (dev->class) { 1933 mutex_lock(&dev->class->p->mutex); 1934 /* tie the class to the device */ 1935 klist_add_tail(&dev->knode_class, 1936 &dev->class->p->klist_devices); 1937 1938 /* notify any interfaces that the device is here */ 1939 list_for_each_entry(class_intf, 1940 &dev->class->p->interfaces, node) 1941 if (class_intf->add_dev) 1942 class_intf->add_dev(dev, class_intf); 1943 mutex_unlock(&dev->class->p->mutex); 1944 } 1945 done: 1946 put_device(dev); 1947 return error; 1948 SysEntryError: 1949 if (MAJOR(dev->devt)) 1950 device_remove_file(dev, &dev_attr_dev); 1951 DevAttrError: 1952 device_pm_remove(dev); 1953 dpm_sysfs_remove(dev); 1954 DPMError: 1955 bus_remove_device(dev); 1956 BusError: 1957 device_remove_attrs(dev); 1958 AttrsError: 1959 device_remove_class_symlinks(dev); 1960 SymlinkError: 1961 device_remove_file(dev, &dev_attr_uevent); 1962 attrError: 1963 kobject_uevent(&dev->kobj, KOBJ_REMOVE); 1964 glue_dir = get_glue_dir(dev); 1965 kobject_del(&dev->kobj); 1966 Error: 1967 cleanup_glue_dir(dev, glue_dir); 1968 parent_error: 1969 put_device(parent); 1970 name_error: 1971 kfree(dev->p); 1972 dev->p = NULL; 1973 goto done; 1974 } 1975 EXPORT_SYMBOL_GPL(device_add); 1976 1977 /** 1978 * device_register - register a device with the system. 1979 * @dev: pointer to the device structure 1980 * 1981 * This happens in two clean steps - initialize the device 1982 * and add it to the system. The two steps can be called 1983 * separately, but this is the easiest and most common. 1984 * I.e. you should only call the two helpers separately if 1985 * have a clearly defined need to use and refcount the device 1986 * before it is added to the hierarchy. 1987 * 1988 * For more information, see the kerneldoc for device_initialize() 1989 * and device_add(). 1990 * 1991 * NOTE: _Never_ directly free @dev after calling this function, even 1992 * if it returned an error! Always use put_device() to give up the 1993 * reference initialized in this function instead. 1994 */ 1995 int device_register(struct device *dev) 1996 { 1997 device_initialize(dev); 1998 return device_add(dev); 1999 } 2000 EXPORT_SYMBOL_GPL(device_register); 2001 2002 /** 2003 * get_device - increment reference count for device. 2004 * @dev: device. 2005 * 2006 * This simply forwards the call to kobject_get(), though 2007 * we do take care to provide for the case that we get a NULL 2008 * pointer passed in. 2009 */ 2010 struct device *get_device(struct device *dev) 2011 { 2012 return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL; 2013 } 2014 EXPORT_SYMBOL_GPL(get_device); 2015 2016 /** 2017 * put_device - decrement reference count. 2018 * @dev: device in question. 2019 */ 2020 void put_device(struct device *dev) 2021 { 2022 /* might_sleep(); */ 2023 if (dev) 2024 kobject_put(&dev->kobj); 2025 } 2026 EXPORT_SYMBOL_GPL(put_device); 2027 2028 /** 2029 * device_del - delete device from system. 2030 * @dev: device. 2031 * 2032 * This is the first part of the device unregistration 2033 * sequence. This removes the device from the lists we control 2034 * from here, has it removed from the other driver model 2035 * subsystems it was added to in device_add(), and removes it 2036 * from the kobject hierarchy. 2037 * 2038 * NOTE: this should be called manually _iff_ device_add() was 2039 * also called manually. 2040 */ 2041 void device_del(struct device *dev) 2042 { 2043 struct device *parent = dev->parent; 2044 struct kobject *glue_dir = NULL; 2045 struct class_interface *class_intf; 2046 2047 /* Notify clients of device removal. This call must come 2048 * before dpm_sysfs_remove(). 2049 */ 2050 if (dev->bus) 2051 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 2052 BUS_NOTIFY_DEL_DEVICE, dev); 2053 2054 dpm_sysfs_remove(dev); 2055 if (parent) 2056 klist_del(&dev->p->knode_parent); 2057 if (MAJOR(dev->devt)) { 2058 devtmpfs_delete_node(dev); 2059 device_remove_sys_dev_entry(dev); 2060 device_remove_file(dev, &dev_attr_dev); 2061 } 2062 if (dev->class) { 2063 device_remove_class_symlinks(dev); 2064 2065 mutex_lock(&dev->class->p->mutex); 2066 /* notify any interfaces that the device is now gone */ 2067 list_for_each_entry(class_intf, 2068 &dev->class->p->interfaces, node) 2069 if (class_intf->remove_dev) 2070 class_intf->remove_dev(dev, class_intf); 2071 /* remove the device from the class list */ 2072 klist_del(&dev->knode_class); 2073 mutex_unlock(&dev->class->p->mutex); 2074 } 2075 device_remove_file(dev, &dev_attr_uevent); 2076 device_remove_attrs(dev); 2077 bus_remove_device(dev); 2078 device_pm_remove(dev); 2079 driver_deferred_probe_del(dev); 2080 device_remove_properties(dev); 2081 device_links_purge(dev); 2082 2083 /* Notify the platform of the removal, in case they 2084 * need to do anything... 2085 */ 2086 if (platform_notify_remove) 2087 platform_notify_remove(dev); 2088 if (dev->bus) 2089 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 2090 BUS_NOTIFY_REMOVED_DEVICE, dev); 2091 kobject_uevent(&dev->kobj, KOBJ_REMOVE); 2092 glue_dir = get_glue_dir(dev); 2093 kobject_del(&dev->kobj); 2094 cleanup_glue_dir(dev, glue_dir); 2095 put_device(parent); 2096 } 2097 EXPORT_SYMBOL_GPL(device_del); 2098 2099 /** 2100 * device_unregister - unregister device from system. 2101 * @dev: device going away. 2102 * 2103 * We do this in two parts, like we do device_register(). First, 2104 * we remove it from all the subsystems with device_del(), then 2105 * we decrement the reference count via put_device(). If that 2106 * is the final reference count, the device will be cleaned up 2107 * via device_release() above. Otherwise, the structure will 2108 * stick around until the final reference to the device is dropped. 2109 */ 2110 void device_unregister(struct device *dev) 2111 { 2112 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 2113 device_del(dev); 2114 put_device(dev); 2115 } 2116 EXPORT_SYMBOL_GPL(device_unregister); 2117 2118 static struct device *prev_device(struct klist_iter *i) 2119 { 2120 struct klist_node *n = klist_prev(i); 2121 struct device *dev = NULL; 2122 struct device_private *p; 2123 2124 if (n) { 2125 p = to_device_private_parent(n); 2126 dev = p->device; 2127 } 2128 return dev; 2129 } 2130 2131 static struct device *next_device(struct klist_iter *i) 2132 { 2133 struct klist_node *n = klist_next(i); 2134 struct device *dev = NULL; 2135 struct device_private *p; 2136 2137 if (n) { 2138 p = to_device_private_parent(n); 2139 dev = p->device; 2140 } 2141 return dev; 2142 } 2143 2144 /** 2145 * device_get_devnode - path of device node file 2146 * @dev: device 2147 * @mode: returned file access mode 2148 * @uid: returned file owner 2149 * @gid: returned file group 2150 * @tmp: possibly allocated string 2151 * 2152 * Return the relative path of a possible device node. 2153 * Non-default names may need to allocate a memory to compose 2154 * a name. This memory is returned in tmp and needs to be 2155 * freed by the caller. 2156 */ 2157 const char *device_get_devnode(struct device *dev, 2158 umode_t *mode, kuid_t *uid, kgid_t *gid, 2159 const char **tmp) 2160 { 2161 char *s; 2162 2163 *tmp = NULL; 2164 2165 /* the device type may provide a specific name */ 2166 if (dev->type && dev->type->devnode) 2167 *tmp = dev->type->devnode(dev, mode, uid, gid); 2168 if (*tmp) 2169 return *tmp; 2170 2171 /* the class may provide a specific name */ 2172 if (dev->class && dev->class->devnode) 2173 *tmp = dev->class->devnode(dev, mode); 2174 if (*tmp) 2175 return *tmp; 2176 2177 /* return name without allocation, tmp == NULL */ 2178 if (strchr(dev_name(dev), '!') == NULL) 2179 return dev_name(dev); 2180 2181 /* replace '!' in the name with '/' */ 2182 s = kstrdup(dev_name(dev), GFP_KERNEL); 2183 if (!s) 2184 return NULL; 2185 strreplace(s, '!', '/'); 2186 return *tmp = s; 2187 } 2188 2189 /** 2190 * device_for_each_child - device child iterator. 2191 * @parent: parent struct device. 2192 * @fn: function to be called for each device. 2193 * @data: data for the callback. 2194 * 2195 * Iterate over @parent's child devices, and call @fn for each, 2196 * passing it @data. 2197 * 2198 * We check the return of @fn each time. If it returns anything 2199 * other than 0, we break out and return that value. 2200 */ 2201 int device_for_each_child(struct device *parent, void *data, 2202 int (*fn)(struct device *dev, void *data)) 2203 { 2204 struct klist_iter i; 2205 struct device *child; 2206 int error = 0; 2207 2208 if (!parent->p) 2209 return 0; 2210 2211 klist_iter_init(&parent->p->klist_children, &i); 2212 while (!error && (child = next_device(&i))) 2213 error = fn(child, data); 2214 klist_iter_exit(&i); 2215 return error; 2216 } 2217 EXPORT_SYMBOL_GPL(device_for_each_child); 2218 2219 /** 2220 * device_for_each_child_reverse - device child iterator in reversed order. 2221 * @parent: parent struct device. 2222 * @fn: function to be called for each device. 2223 * @data: data for the callback. 2224 * 2225 * Iterate over @parent's child devices, and call @fn for each, 2226 * passing it @data. 2227 * 2228 * We check the return of @fn each time. If it returns anything 2229 * other than 0, we break out and return that value. 2230 */ 2231 int device_for_each_child_reverse(struct device *parent, void *data, 2232 int (*fn)(struct device *dev, void *data)) 2233 { 2234 struct klist_iter i; 2235 struct device *child; 2236 int error = 0; 2237 2238 if (!parent->p) 2239 return 0; 2240 2241 klist_iter_init(&parent->p->klist_children, &i); 2242 while ((child = prev_device(&i)) && !error) 2243 error = fn(child, data); 2244 klist_iter_exit(&i); 2245 return error; 2246 } 2247 EXPORT_SYMBOL_GPL(device_for_each_child_reverse); 2248 2249 /** 2250 * device_find_child - device iterator for locating a particular device. 2251 * @parent: parent struct device 2252 * @match: Callback function to check device 2253 * @data: Data to pass to match function 2254 * 2255 * This is similar to the device_for_each_child() function above, but it 2256 * returns a reference to a device that is 'found' for later use, as 2257 * determined by the @match callback. 2258 * 2259 * The callback should return 0 if the device doesn't match and non-zero 2260 * if it does. If the callback returns non-zero and a reference to the 2261 * current device can be obtained, this function will return to the caller 2262 * and not iterate over any more devices. 2263 * 2264 * NOTE: you will need to drop the reference with put_device() after use. 2265 */ 2266 struct device *device_find_child(struct device *parent, void *data, 2267 int (*match)(struct device *dev, void *data)) 2268 { 2269 struct klist_iter i; 2270 struct device *child; 2271 2272 if (!parent) 2273 return NULL; 2274 2275 klist_iter_init(&parent->p->klist_children, &i); 2276 while ((child = next_device(&i))) 2277 if (match(child, data) && get_device(child)) 2278 break; 2279 klist_iter_exit(&i); 2280 return child; 2281 } 2282 EXPORT_SYMBOL_GPL(device_find_child); 2283 2284 int __init devices_init(void) 2285 { 2286 devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL); 2287 if (!devices_kset) 2288 return -ENOMEM; 2289 dev_kobj = kobject_create_and_add("dev", NULL); 2290 if (!dev_kobj) 2291 goto dev_kobj_err; 2292 sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj); 2293 if (!sysfs_dev_block_kobj) 2294 goto block_kobj_err; 2295 sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj); 2296 if (!sysfs_dev_char_kobj) 2297 goto char_kobj_err; 2298 2299 return 0; 2300 2301 char_kobj_err: 2302 kobject_put(sysfs_dev_block_kobj); 2303 block_kobj_err: 2304 kobject_put(dev_kobj); 2305 dev_kobj_err: 2306 kset_unregister(devices_kset); 2307 return -ENOMEM; 2308 } 2309 2310 static int device_check_offline(struct device *dev, void *not_used) 2311 { 2312 int ret; 2313 2314 ret = device_for_each_child(dev, NULL, device_check_offline); 2315 if (ret) 2316 return ret; 2317 2318 return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0; 2319 } 2320 2321 /** 2322 * device_offline - Prepare the device for hot-removal. 2323 * @dev: Device to be put offline. 2324 * 2325 * Execute the device bus type's .offline() callback, if present, to prepare 2326 * the device for a subsequent hot-removal. If that succeeds, the device must 2327 * not be used until either it is removed or its bus type's .online() callback 2328 * is executed. 2329 * 2330 * Call under device_hotplug_lock. 2331 */ 2332 int device_offline(struct device *dev) 2333 { 2334 int ret; 2335 2336 if (dev->offline_disabled) 2337 return -EPERM; 2338 2339 ret = device_for_each_child(dev, NULL, device_check_offline); 2340 if (ret) 2341 return ret; 2342 2343 device_lock(dev); 2344 if (device_supports_offline(dev)) { 2345 if (dev->offline) { 2346 ret = 1; 2347 } else { 2348 ret = dev->bus->offline(dev); 2349 if (!ret) { 2350 kobject_uevent(&dev->kobj, KOBJ_OFFLINE); 2351 dev->offline = true; 2352 } 2353 } 2354 } 2355 device_unlock(dev); 2356 2357 return ret; 2358 } 2359 2360 /** 2361 * device_online - Put the device back online after successful device_offline(). 2362 * @dev: Device to be put back online. 2363 * 2364 * If device_offline() has been successfully executed for @dev, but the device 2365 * has not been removed subsequently, execute its bus type's .online() callback 2366 * to indicate that the device can be used again. 2367 * 2368 * Call under device_hotplug_lock. 2369 */ 2370 int device_online(struct device *dev) 2371 { 2372 int ret = 0; 2373 2374 device_lock(dev); 2375 if (device_supports_offline(dev)) { 2376 if (dev->offline) { 2377 ret = dev->bus->online(dev); 2378 if (!ret) { 2379 kobject_uevent(&dev->kobj, KOBJ_ONLINE); 2380 dev->offline = false; 2381 } 2382 } else { 2383 ret = 1; 2384 } 2385 } 2386 device_unlock(dev); 2387 2388 return ret; 2389 } 2390 2391 struct root_device { 2392 struct device dev; 2393 struct module *owner; 2394 }; 2395 2396 static inline struct root_device *to_root_device(struct device *d) 2397 { 2398 return container_of(d, struct root_device, dev); 2399 } 2400 2401 static void root_device_release(struct device *dev) 2402 { 2403 kfree(to_root_device(dev)); 2404 } 2405 2406 /** 2407 * __root_device_register - allocate and register a root device 2408 * @name: root device name 2409 * @owner: owner module of the root device, usually THIS_MODULE 2410 * 2411 * This function allocates a root device and registers it 2412 * using device_register(). In order to free the returned 2413 * device, use root_device_unregister(). 2414 * 2415 * Root devices are dummy devices which allow other devices 2416 * to be grouped under /sys/devices. Use this function to 2417 * allocate a root device and then use it as the parent of 2418 * any device which should appear under /sys/devices/{name} 2419 * 2420 * The /sys/devices/{name} directory will also contain a 2421 * 'module' symlink which points to the @owner directory 2422 * in sysfs. 2423 * 2424 * Returns &struct device pointer on success, or ERR_PTR() on error. 2425 * 2426 * Note: You probably want to use root_device_register(). 2427 */ 2428 struct device *__root_device_register(const char *name, struct module *owner) 2429 { 2430 struct root_device *root; 2431 int err = -ENOMEM; 2432 2433 root = kzalloc(sizeof(struct root_device), GFP_KERNEL); 2434 if (!root) 2435 return ERR_PTR(err); 2436 2437 err = dev_set_name(&root->dev, "%s", name); 2438 if (err) { 2439 kfree(root); 2440 return ERR_PTR(err); 2441 } 2442 2443 root->dev.release = root_device_release; 2444 2445 err = device_register(&root->dev); 2446 if (err) { 2447 put_device(&root->dev); 2448 return ERR_PTR(err); 2449 } 2450 2451 #ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */ 2452 if (owner) { 2453 struct module_kobject *mk = &owner->mkobj; 2454 2455 err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module"); 2456 if (err) { 2457 device_unregister(&root->dev); 2458 return ERR_PTR(err); 2459 } 2460 root->owner = owner; 2461 } 2462 #endif 2463 2464 return &root->dev; 2465 } 2466 EXPORT_SYMBOL_GPL(__root_device_register); 2467 2468 /** 2469 * root_device_unregister - unregister and free a root device 2470 * @dev: device going away 2471 * 2472 * This function unregisters and cleans up a device that was created by 2473 * root_device_register(). 2474 */ 2475 void root_device_unregister(struct device *dev) 2476 { 2477 struct root_device *root = to_root_device(dev); 2478 2479 if (root->owner) 2480 sysfs_remove_link(&root->dev.kobj, "module"); 2481 2482 device_unregister(dev); 2483 } 2484 EXPORT_SYMBOL_GPL(root_device_unregister); 2485 2486 2487 static void device_create_release(struct device *dev) 2488 { 2489 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 2490 kfree(dev); 2491 } 2492 2493 static __printf(6, 0) struct device * 2494 device_create_groups_vargs(struct class *class, struct device *parent, 2495 dev_t devt, void *drvdata, 2496 const struct attribute_group **groups, 2497 const char *fmt, va_list args) 2498 { 2499 struct device *dev = NULL; 2500 int retval = -ENODEV; 2501 2502 if (class == NULL || IS_ERR(class)) 2503 goto error; 2504 2505 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2506 if (!dev) { 2507 retval = -ENOMEM; 2508 goto error; 2509 } 2510 2511 device_initialize(dev); 2512 dev->devt = devt; 2513 dev->class = class; 2514 dev->parent = parent; 2515 dev->groups = groups; 2516 dev->release = device_create_release; 2517 dev_set_drvdata(dev, drvdata); 2518 2519 retval = kobject_set_name_vargs(&dev->kobj, fmt, args); 2520 if (retval) 2521 goto error; 2522 2523 retval = device_add(dev); 2524 if (retval) 2525 goto error; 2526 2527 return dev; 2528 2529 error: 2530 put_device(dev); 2531 return ERR_PTR(retval); 2532 } 2533 2534 /** 2535 * device_create_vargs - creates a device and registers it with sysfs 2536 * @class: pointer to the struct class that this device should be registered to 2537 * @parent: pointer to the parent struct device of this new device, if any 2538 * @devt: the dev_t for the char device to be added 2539 * @drvdata: the data to be added to the device for callbacks 2540 * @fmt: string for the device's name 2541 * @args: va_list for the device's name 2542 * 2543 * This function can be used by char device classes. A struct device 2544 * will be created in sysfs, registered to the specified class. 2545 * 2546 * A "dev" file will be created, showing the dev_t for the device, if 2547 * the dev_t is not 0,0. 2548 * If a pointer to a parent struct device is passed in, the newly created 2549 * struct device will be a child of that device in sysfs. 2550 * The pointer to the struct device will be returned from the call. 2551 * Any further sysfs files that might be required can be created using this 2552 * pointer. 2553 * 2554 * Returns &struct device pointer on success, or ERR_PTR() on error. 2555 * 2556 * Note: the struct class passed to this function must have previously 2557 * been created with a call to class_create(). 2558 */ 2559 struct device *device_create_vargs(struct class *class, struct device *parent, 2560 dev_t devt, void *drvdata, const char *fmt, 2561 va_list args) 2562 { 2563 return device_create_groups_vargs(class, parent, devt, drvdata, NULL, 2564 fmt, args); 2565 } 2566 EXPORT_SYMBOL_GPL(device_create_vargs); 2567 2568 /** 2569 * device_create - creates a device and registers it with sysfs 2570 * @class: pointer to the struct class that this device should be registered to 2571 * @parent: pointer to the parent struct device of this new device, if any 2572 * @devt: the dev_t for the char device to be added 2573 * @drvdata: the data to be added to the device for callbacks 2574 * @fmt: string for the device's name 2575 * 2576 * This function can be used by char device classes. A struct device 2577 * will be created in sysfs, registered to the specified class. 2578 * 2579 * A "dev" file will be created, showing the dev_t for the device, if 2580 * the dev_t is not 0,0. 2581 * If a pointer to a parent struct device is passed in, the newly created 2582 * struct device will be a child of that device in sysfs. 2583 * The pointer to the struct device will be returned from the call. 2584 * Any further sysfs files that might be required can be created using this 2585 * pointer. 2586 * 2587 * Returns &struct device pointer on success, or ERR_PTR() on error. 2588 * 2589 * Note: the struct class passed to this function must have previously 2590 * been created with a call to class_create(). 2591 */ 2592 struct device *device_create(struct class *class, struct device *parent, 2593 dev_t devt, void *drvdata, const char *fmt, ...) 2594 { 2595 va_list vargs; 2596 struct device *dev; 2597 2598 va_start(vargs, fmt); 2599 dev = device_create_vargs(class, parent, devt, drvdata, fmt, vargs); 2600 va_end(vargs); 2601 return dev; 2602 } 2603 EXPORT_SYMBOL_GPL(device_create); 2604 2605 /** 2606 * device_create_with_groups - creates a device and registers it with sysfs 2607 * @class: pointer to the struct class that this device should be registered to 2608 * @parent: pointer to the parent struct device of this new device, if any 2609 * @devt: the dev_t for the char device to be added 2610 * @drvdata: the data to be added to the device for callbacks 2611 * @groups: NULL-terminated list of attribute groups to be created 2612 * @fmt: string for the device's name 2613 * 2614 * This function can be used by char device classes. A struct device 2615 * will be created in sysfs, registered to the specified class. 2616 * Additional attributes specified in the groups parameter will also 2617 * be created automatically. 2618 * 2619 * A "dev" file will be created, showing the dev_t for the device, if 2620 * the dev_t is not 0,0. 2621 * If a pointer to a parent struct device is passed in, the newly created 2622 * struct device will be a child of that device in sysfs. 2623 * The pointer to the struct device will be returned from the call. 2624 * Any further sysfs files that might be required can be created using this 2625 * pointer. 2626 * 2627 * Returns &struct device pointer on success, or ERR_PTR() on error. 2628 * 2629 * Note: the struct class passed to this function must have previously 2630 * been created with a call to class_create(). 2631 */ 2632 struct device *device_create_with_groups(struct class *class, 2633 struct device *parent, dev_t devt, 2634 void *drvdata, 2635 const struct attribute_group **groups, 2636 const char *fmt, ...) 2637 { 2638 va_list vargs; 2639 struct device *dev; 2640 2641 va_start(vargs, fmt); 2642 dev = device_create_groups_vargs(class, parent, devt, drvdata, groups, 2643 fmt, vargs); 2644 va_end(vargs); 2645 return dev; 2646 } 2647 EXPORT_SYMBOL_GPL(device_create_with_groups); 2648 2649 static int __match_devt(struct device *dev, const void *data) 2650 { 2651 const dev_t *devt = data; 2652 2653 return dev->devt == *devt; 2654 } 2655 2656 /** 2657 * device_destroy - removes a device that was created with device_create() 2658 * @class: pointer to the struct class that this device was registered with 2659 * @devt: the dev_t of the device that was previously registered 2660 * 2661 * This call unregisters and cleans up a device that was created with a 2662 * call to device_create(). 2663 */ 2664 void device_destroy(struct class *class, dev_t devt) 2665 { 2666 struct device *dev; 2667 2668 dev = class_find_device(class, NULL, &devt, __match_devt); 2669 if (dev) { 2670 put_device(dev); 2671 device_unregister(dev); 2672 } 2673 } 2674 EXPORT_SYMBOL_GPL(device_destroy); 2675 2676 /** 2677 * device_rename - renames a device 2678 * @dev: the pointer to the struct device to be renamed 2679 * @new_name: the new name of the device 2680 * 2681 * It is the responsibility of the caller to provide mutual 2682 * exclusion between two different calls of device_rename 2683 * on the same device to ensure that new_name is valid and 2684 * won't conflict with other devices. 2685 * 2686 * Note: Don't call this function. Currently, the networking layer calls this 2687 * function, but that will change. The following text from Kay Sievers offers 2688 * some insight: 2689 * 2690 * Renaming devices is racy at many levels, symlinks and other stuff are not 2691 * replaced atomically, and you get a "move" uevent, but it's not easy to 2692 * connect the event to the old and new device. Device nodes are not renamed at 2693 * all, there isn't even support for that in the kernel now. 2694 * 2695 * In the meantime, during renaming, your target name might be taken by another 2696 * driver, creating conflicts. Or the old name is taken directly after you 2697 * renamed it -- then you get events for the same DEVPATH, before you even see 2698 * the "move" event. It's just a mess, and nothing new should ever rely on 2699 * kernel device renaming. Besides that, it's not even implemented now for 2700 * other things than (driver-core wise very simple) network devices. 2701 * 2702 * We are currently about to change network renaming in udev to completely 2703 * disallow renaming of devices in the same namespace as the kernel uses, 2704 * because we can't solve the problems properly, that arise with swapping names 2705 * of multiple interfaces without races. Means, renaming of eth[0-9]* will only 2706 * be allowed to some other name than eth[0-9]*, for the aforementioned 2707 * reasons. 2708 * 2709 * Make up a "real" name in the driver before you register anything, or add 2710 * some other attributes for userspace to find the device, or use udev to add 2711 * symlinks -- but never rename kernel devices later, it's a complete mess. We 2712 * don't even want to get into that and try to implement the missing pieces in 2713 * the core. We really have other pieces to fix in the driver core mess. :) 2714 */ 2715 int device_rename(struct device *dev, const char *new_name) 2716 { 2717 struct kobject *kobj = &dev->kobj; 2718 char *old_device_name = NULL; 2719 int error; 2720 2721 dev = get_device(dev); 2722 if (!dev) 2723 return -EINVAL; 2724 2725 dev_dbg(dev, "renaming to %s\n", new_name); 2726 2727 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL); 2728 if (!old_device_name) { 2729 error = -ENOMEM; 2730 goto out; 2731 } 2732 2733 if (dev->class) { 2734 error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj, 2735 kobj, old_device_name, 2736 new_name, kobject_namespace(kobj)); 2737 if (error) 2738 goto out; 2739 } 2740 2741 error = kobject_rename(kobj, new_name); 2742 if (error) 2743 goto out; 2744 2745 out: 2746 put_device(dev); 2747 2748 kfree(old_device_name); 2749 2750 return error; 2751 } 2752 EXPORT_SYMBOL_GPL(device_rename); 2753 2754 static int device_move_class_links(struct device *dev, 2755 struct device *old_parent, 2756 struct device *new_parent) 2757 { 2758 int error = 0; 2759 2760 if (old_parent) 2761 sysfs_remove_link(&dev->kobj, "device"); 2762 if (new_parent) 2763 error = sysfs_create_link(&dev->kobj, &new_parent->kobj, 2764 "device"); 2765 return error; 2766 } 2767 2768 /** 2769 * device_move - moves a device to a new parent 2770 * @dev: the pointer to the struct device to be moved 2771 * @new_parent: the new parent of the device (can be NULL) 2772 * @dpm_order: how to reorder the dpm_list 2773 */ 2774 int device_move(struct device *dev, struct device *new_parent, 2775 enum dpm_order dpm_order) 2776 { 2777 int error; 2778 struct device *old_parent; 2779 struct kobject *new_parent_kobj; 2780 2781 dev = get_device(dev); 2782 if (!dev) 2783 return -EINVAL; 2784 2785 device_pm_lock(); 2786 new_parent = get_device(new_parent); 2787 new_parent_kobj = get_device_parent(dev, new_parent); 2788 if (IS_ERR(new_parent_kobj)) { 2789 error = PTR_ERR(new_parent_kobj); 2790 put_device(new_parent); 2791 goto out; 2792 } 2793 2794 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev), 2795 __func__, new_parent ? dev_name(new_parent) : "<NULL>"); 2796 error = kobject_move(&dev->kobj, new_parent_kobj); 2797 if (error) { 2798 cleanup_glue_dir(dev, new_parent_kobj); 2799 put_device(new_parent); 2800 goto out; 2801 } 2802 old_parent = dev->parent; 2803 dev->parent = new_parent; 2804 if (old_parent) 2805 klist_remove(&dev->p->knode_parent); 2806 if (new_parent) { 2807 klist_add_tail(&dev->p->knode_parent, 2808 &new_parent->p->klist_children); 2809 set_dev_node(dev, dev_to_node(new_parent)); 2810 } 2811 2812 if (dev->class) { 2813 error = device_move_class_links(dev, old_parent, new_parent); 2814 if (error) { 2815 /* We ignore errors on cleanup since we're hosed anyway... */ 2816 device_move_class_links(dev, new_parent, old_parent); 2817 if (!kobject_move(&dev->kobj, &old_parent->kobj)) { 2818 if (new_parent) 2819 klist_remove(&dev->p->knode_parent); 2820 dev->parent = old_parent; 2821 if (old_parent) { 2822 klist_add_tail(&dev->p->knode_parent, 2823 &old_parent->p->klist_children); 2824 set_dev_node(dev, dev_to_node(old_parent)); 2825 } 2826 } 2827 cleanup_glue_dir(dev, new_parent_kobj); 2828 put_device(new_parent); 2829 goto out; 2830 } 2831 } 2832 switch (dpm_order) { 2833 case DPM_ORDER_NONE: 2834 break; 2835 case DPM_ORDER_DEV_AFTER_PARENT: 2836 device_pm_move_after(dev, new_parent); 2837 devices_kset_move_after(dev, new_parent); 2838 break; 2839 case DPM_ORDER_PARENT_BEFORE_DEV: 2840 device_pm_move_before(new_parent, dev); 2841 devices_kset_move_before(new_parent, dev); 2842 break; 2843 case DPM_ORDER_DEV_LAST: 2844 device_pm_move_last(dev); 2845 devices_kset_move_last(dev); 2846 break; 2847 } 2848 2849 put_device(old_parent); 2850 out: 2851 device_pm_unlock(); 2852 put_device(dev); 2853 return error; 2854 } 2855 EXPORT_SYMBOL_GPL(device_move); 2856 2857 /** 2858 * device_shutdown - call ->shutdown() on each device to shutdown. 2859 */ 2860 void device_shutdown(void) 2861 { 2862 struct device *dev, *parent; 2863 2864 wait_for_device_probe(); 2865 device_block_probing(); 2866 2867 spin_lock(&devices_kset->list_lock); 2868 /* 2869 * Walk the devices list backward, shutting down each in turn. 2870 * Beware that device unplug events may also start pulling 2871 * devices offline, even as the system is shutting down. 2872 */ 2873 while (!list_empty(&devices_kset->list)) { 2874 dev = list_entry(devices_kset->list.prev, struct device, 2875 kobj.entry); 2876 2877 /* 2878 * hold reference count of device's parent to 2879 * prevent it from being freed because parent's 2880 * lock is to be held 2881 */ 2882 parent = get_device(dev->parent); 2883 get_device(dev); 2884 /* 2885 * Make sure the device is off the kset list, in the 2886 * event that dev->*->shutdown() doesn't remove it. 2887 */ 2888 list_del_init(&dev->kobj.entry); 2889 spin_unlock(&devices_kset->list_lock); 2890 2891 /* hold lock to avoid race with probe/release */ 2892 if (parent) 2893 device_lock(parent); 2894 device_lock(dev); 2895 2896 /* Don't allow any more runtime suspends */ 2897 pm_runtime_get_noresume(dev); 2898 pm_runtime_barrier(dev); 2899 2900 if (dev->class && dev->class->shutdown_pre) { 2901 if (initcall_debug) 2902 dev_info(dev, "shutdown_pre\n"); 2903 dev->class->shutdown_pre(dev); 2904 } 2905 if (dev->bus && dev->bus->shutdown) { 2906 if (initcall_debug) 2907 dev_info(dev, "shutdown\n"); 2908 dev->bus->shutdown(dev); 2909 } else if (dev->driver && dev->driver->shutdown) { 2910 if (initcall_debug) 2911 dev_info(dev, "shutdown\n"); 2912 dev->driver->shutdown(dev); 2913 } 2914 2915 device_unlock(dev); 2916 if (parent) 2917 device_unlock(parent); 2918 2919 put_device(dev); 2920 put_device(parent); 2921 2922 spin_lock(&devices_kset->list_lock); 2923 } 2924 spin_unlock(&devices_kset->list_lock); 2925 } 2926 2927 /* 2928 * Device logging functions 2929 */ 2930 2931 #ifdef CONFIG_PRINTK 2932 static int 2933 create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen) 2934 { 2935 const char *subsys; 2936 size_t pos = 0; 2937 2938 if (dev->class) 2939 subsys = dev->class->name; 2940 else if (dev->bus) 2941 subsys = dev->bus->name; 2942 else 2943 return 0; 2944 2945 pos += snprintf(hdr + pos, hdrlen - pos, "SUBSYSTEM=%s", subsys); 2946 if (pos >= hdrlen) 2947 goto overflow; 2948 2949 /* 2950 * Add device identifier DEVICE=: 2951 * b12:8 block dev_t 2952 * c127:3 char dev_t 2953 * n8 netdev ifindex 2954 * +sound:card0 subsystem:devname 2955 */ 2956 if (MAJOR(dev->devt)) { 2957 char c; 2958 2959 if (strcmp(subsys, "block") == 0) 2960 c = 'b'; 2961 else 2962 c = 'c'; 2963 pos++; 2964 pos += snprintf(hdr + pos, hdrlen - pos, 2965 "DEVICE=%c%u:%u", 2966 c, MAJOR(dev->devt), MINOR(dev->devt)); 2967 } else if (strcmp(subsys, "net") == 0) { 2968 struct net_device *net = to_net_dev(dev); 2969 2970 pos++; 2971 pos += snprintf(hdr + pos, hdrlen - pos, 2972 "DEVICE=n%u", net->ifindex); 2973 } else { 2974 pos++; 2975 pos += snprintf(hdr + pos, hdrlen - pos, 2976 "DEVICE=+%s:%s", subsys, dev_name(dev)); 2977 } 2978 2979 if (pos >= hdrlen) 2980 goto overflow; 2981 2982 return pos; 2983 2984 overflow: 2985 dev_WARN(dev, "device/subsystem name too long"); 2986 return 0; 2987 } 2988 2989 int dev_vprintk_emit(int level, const struct device *dev, 2990 const char *fmt, va_list args) 2991 { 2992 char hdr[128]; 2993 size_t hdrlen; 2994 2995 hdrlen = create_syslog_header(dev, hdr, sizeof(hdr)); 2996 2997 return vprintk_emit(0, level, hdrlen ? hdr : NULL, hdrlen, fmt, args); 2998 } 2999 EXPORT_SYMBOL(dev_vprintk_emit); 3000 3001 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) 3002 { 3003 va_list args; 3004 int r; 3005 3006 va_start(args, fmt); 3007 3008 r = dev_vprintk_emit(level, dev, fmt, args); 3009 3010 va_end(args); 3011 3012 return r; 3013 } 3014 EXPORT_SYMBOL(dev_printk_emit); 3015 3016 static void __dev_printk(const char *level, const struct device *dev, 3017 struct va_format *vaf) 3018 { 3019 if (dev) 3020 dev_printk_emit(level[1] - '0', dev, "%s %s: %pV", 3021 dev_driver_string(dev), dev_name(dev), vaf); 3022 else 3023 printk("%s(NULL device *): %pV", level, vaf); 3024 } 3025 3026 void dev_printk(const char *level, const struct device *dev, 3027 const char *fmt, ...) 3028 { 3029 struct va_format vaf; 3030 va_list args; 3031 3032 va_start(args, fmt); 3033 3034 vaf.fmt = fmt; 3035 vaf.va = &args; 3036 3037 __dev_printk(level, dev, &vaf); 3038 3039 va_end(args); 3040 } 3041 EXPORT_SYMBOL(dev_printk); 3042 3043 #define define_dev_printk_level(func, kern_level) \ 3044 void func(const struct device *dev, const char *fmt, ...) \ 3045 { \ 3046 struct va_format vaf; \ 3047 va_list args; \ 3048 \ 3049 va_start(args, fmt); \ 3050 \ 3051 vaf.fmt = fmt; \ 3052 vaf.va = &args; \ 3053 \ 3054 __dev_printk(kern_level, dev, &vaf); \ 3055 \ 3056 va_end(args); \ 3057 } \ 3058 EXPORT_SYMBOL(func); 3059 3060 define_dev_printk_level(_dev_emerg, KERN_EMERG); 3061 define_dev_printk_level(_dev_alert, KERN_ALERT); 3062 define_dev_printk_level(_dev_crit, KERN_CRIT); 3063 define_dev_printk_level(_dev_err, KERN_ERR); 3064 define_dev_printk_level(_dev_warn, KERN_WARNING); 3065 define_dev_printk_level(_dev_notice, KERN_NOTICE); 3066 define_dev_printk_level(_dev_info, KERN_INFO); 3067 3068 #endif 3069 3070 static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) 3071 { 3072 return fwnode && !IS_ERR(fwnode->secondary); 3073 } 3074 3075 /** 3076 * set_primary_fwnode - Change the primary firmware node of a given device. 3077 * @dev: Device to handle. 3078 * @fwnode: New primary firmware node of the device. 3079 * 3080 * Set the device's firmware node pointer to @fwnode, but if a secondary 3081 * firmware node of the device is present, preserve it. 3082 */ 3083 void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) 3084 { 3085 if (fwnode) { 3086 struct fwnode_handle *fn = dev->fwnode; 3087 3088 if (fwnode_is_primary(fn)) 3089 fn = fn->secondary; 3090 3091 if (fn) { 3092 WARN_ON(fwnode->secondary); 3093 fwnode->secondary = fn; 3094 } 3095 dev->fwnode = fwnode; 3096 } else { 3097 dev->fwnode = fwnode_is_primary(dev->fwnode) ? 3098 dev->fwnode->secondary : NULL; 3099 } 3100 } 3101 EXPORT_SYMBOL_GPL(set_primary_fwnode); 3102 3103 /** 3104 * set_secondary_fwnode - Change the secondary firmware node of a given device. 3105 * @dev: Device to handle. 3106 * @fwnode: New secondary firmware node of the device. 3107 * 3108 * If a primary firmware node of the device is present, set its secondary 3109 * pointer to @fwnode. Otherwise, set the device's firmware node pointer to 3110 * @fwnode. 3111 */ 3112 void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode) 3113 { 3114 if (fwnode) 3115 fwnode->secondary = ERR_PTR(-ENODEV); 3116 3117 if (fwnode_is_primary(dev->fwnode)) 3118 dev->fwnode->secondary = fwnode; 3119 else 3120 dev->fwnode = fwnode; 3121 } 3122 3123 /** 3124 * device_set_of_node_from_dev - reuse device-tree node of another device 3125 * @dev: device whose device-tree node is being set 3126 * @dev2: device whose device-tree node is being reused 3127 * 3128 * Takes another reference to the new device-tree node after first dropping 3129 * any reference held to the old node. 3130 */ 3131 void device_set_of_node_from_dev(struct device *dev, const struct device *dev2) 3132 { 3133 of_node_put(dev->of_node); 3134 dev->of_node = of_node_get(dev2->of_node); 3135 dev->of_node_reused = true; 3136 } 3137 EXPORT_SYMBOL_GPL(device_set_of_node_from_dev); 3138