1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/core.c - core driver model code (device registration, etc) 4 * 5 * Copyright (c) 2002-3 Patrick Mochel 6 * Copyright (c) 2002-3 Open Source Development Labs 7 * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de> 8 * Copyright (c) 2006 Novell, Inc. 9 */ 10 11 #include <linux/acpi.h> 12 #include <linux/device.h> 13 #include <linux/err.h> 14 #include <linux/fwnode.h> 15 #include <linux/init.h> 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/string.h> 19 #include <linux/kdev_t.h> 20 #include <linux/notifier.h> 21 #include <linux/of.h> 22 #include <linux/of_device.h> 23 #include <linux/genhd.h> 24 #include <linux/mutex.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/netdevice.h> 27 #include <linux/sched/signal.h> 28 #include <linux/sysfs.h> 29 30 #include "base.h" 31 #include "power/power.h" 32 33 #ifdef CONFIG_SYSFS_DEPRECATED 34 #ifdef CONFIG_SYSFS_DEPRECATED_V2 35 long sysfs_deprecated = 1; 36 #else 37 long sysfs_deprecated = 0; 38 #endif 39 static int __init sysfs_deprecated_setup(char *arg) 40 { 41 return kstrtol(arg, 10, &sysfs_deprecated); 42 } 43 early_param("sysfs.deprecated", sysfs_deprecated_setup); 44 #endif 45 46 /* Device links support. */ 47 48 #ifdef CONFIG_SRCU 49 static DEFINE_MUTEX(device_links_lock); 50 DEFINE_STATIC_SRCU(device_links_srcu); 51 52 static inline void device_links_write_lock(void) 53 { 54 mutex_lock(&device_links_lock); 55 } 56 57 static inline void device_links_write_unlock(void) 58 { 59 mutex_unlock(&device_links_lock); 60 } 61 62 int device_links_read_lock(void) 63 { 64 return srcu_read_lock(&device_links_srcu); 65 } 66 67 void device_links_read_unlock(int idx) 68 { 69 srcu_read_unlock(&device_links_srcu, idx); 70 } 71 #else /* !CONFIG_SRCU */ 72 static DECLARE_RWSEM(device_links_lock); 73 74 static inline void device_links_write_lock(void) 75 { 76 down_write(&device_links_lock); 77 } 78 79 static inline void device_links_write_unlock(void) 80 { 81 up_write(&device_links_lock); 82 } 83 84 int device_links_read_lock(void) 85 { 86 down_read(&device_links_lock); 87 return 0; 88 } 89 90 void device_links_read_unlock(int not_used) 91 { 92 up_read(&device_links_lock); 93 } 94 #endif /* !CONFIG_SRCU */ 95 96 /** 97 * device_is_dependent - Check if one device depends on another one 98 * @dev: Device to check dependencies for. 99 * @target: Device to check against. 100 * 101 * Check if @target depends on @dev or any device dependent on it (its child or 102 * its consumer etc). Return 1 if that is the case or 0 otherwise. 103 */ 104 static int device_is_dependent(struct device *dev, void *target) 105 { 106 struct device_link *link; 107 int ret; 108 109 if (dev == target) 110 return 1; 111 112 ret = device_for_each_child(dev, target, device_is_dependent); 113 if (ret) 114 return ret; 115 116 list_for_each_entry(link, &dev->links.consumers, s_node) { 117 if (link->consumer == target) 118 return 1; 119 120 ret = device_is_dependent(link->consumer, target); 121 if (ret) 122 break; 123 } 124 return ret; 125 } 126 127 static int device_reorder_to_tail(struct device *dev, void *not_used) 128 { 129 struct device_link *link; 130 131 /* 132 * Devices that have not been registered yet will be put to the ends 133 * of the lists during the registration, so skip them here. 134 */ 135 if (device_is_registered(dev)) 136 devices_kset_move_last(dev); 137 138 if (device_pm_initialized(dev)) 139 device_pm_move_last(dev); 140 141 device_for_each_child(dev, NULL, device_reorder_to_tail); 142 list_for_each_entry(link, &dev->links.consumers, s_node) 143 device_reorder_to_tail(link->consumer, NULL); 144 145 return 0; 146 } 147 148 /** 149 * device_pm_move_to_tail - Move set of devices to the end of device lists 150 * @dev: Device to move 151 * 152 * This is a device_reorder_to_tail() wrapper taking the requisite locks. 153 * 154 * It moves the @dev along with all of its children and all of its consumers 155 * to the ends of the device_kset and dpm_list, recursively. 156 */ 157 void device_pm_move_to_tail(struct device *dev) 158 { 159 int idx; 160 161 idx = device_links_read_lock(); 162 device_pm_lock(); 163 device_reorder_to_tail(dev, NULL); 164 device_pm_unlock(); 165 device_links_read_unlock(idx); 166 } 167 168 /** 169 * device_link_add - Create a link between two devices. 170 * @consumer: Consumer end of the link. 171 * @supplier: Supplier end of the link. 172 * @flags: Link flags. 173 * 174 * The caller is responsible for the proper synchronization of the link creation 175 * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the 176 * runtime PM framework to take the link into account. Second, if the 177 * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will 178 * be forced into the active metastate and reference-counted upon the creation 179 * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be 180 * ignored. 181 * 182 * If DL_FLAG_STATELESS is set in @flags, the link is not going to be managed by 183 * the driver core and, in particular, the caller of this function is expected 184 * to drop the reference to the link acquired by it directly. 185 * 186 * If that flag is not set, however, the caller of this function is handing the 187 * management of the link over to the driver core entirely and its return value 188 * can only be used to check whether or not the link is present. In that case, 189 * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link 190 * flags can be used to indicate to the driver core when the link can be safely 191 * deleted. Namely, setting one of them in @flags indicates to the driver core 192 * that the link is not going to be used (by the given caller of this function) 193 * after unbinding the consumer or supplier driver, respectively, from its 194 * device, so the link can be deleted at that point. If none of them is set, 195 * the link will be maintained until one of the devices pointed to by it (either 196 * the consumer or the supplier) is unregistered. 197 * 198 * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and 199 * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent 200 * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can 201 * be used to request the driver core to automaticall probe for a consmer 202 * driver after successfully binding a driver to the supplier device. 203 * 204 * The combination of DL_FLAG_STATELESS and either DL_FLAG_AUTOREMOVE_CONSUMER 205 * or DL_FLAG_AUTOREMOVE_SUPPLIER set in @flags at the same time is invalid and 206 * will cause NULL to be returned upfront. 207 * 208 * A side effect of the link creation is re-ordering of dpm_list and the 209 * devices_kset list by moving the consumer device and all devices depending 210 * on it to the ends of these lists (that does not happen to devices that have 211 * not been registered when this function is called). 212 * 213 * The supplier device is required to be registered when this function is called 214 * and NULL will be returned if that is not the case. The consumer device need 215 * not be registered, however. 216 */ 217 struct device_link *device_link_add(struct device *consumer, 218 struct device *supplier, u32 flags) 219 { 220 struct device_link *link; 221 222 if (!consumer || !supplier || 223 (flags & DL_FLAG_STATELESS && 224 flags & (DL_FLAG_AUTOREMOVE_CONSUMER | 225 DL_FLAG_AUTOREMOVE_SUPPLIER | 226 DL_FLAG_AUTOPROBE_CONSUMER)) || 227 (flags & DL_FLAG_AUTOPROBE_CONSUMER && 228 flags & (DL_FLAG_AUTOREMOVE_CONSUMER | 229 DL_FLAG_AUTOREMOVE_SUPPLIER))) 230 return NULL; 231 232 if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) { 233 if (pm_runtime_get_sync(supplier) < 0) { 234 pm_runtime_put_noidle(supplier); 235 return NULL; 236 } 237 } 238 239 device_links_write_lock(); 240 device_pm_lock(); 241 242 /* 243 * If the supplier has not been fully registered yet or there is a 244 * reverse dependency between the consumer and the supplier already in 245 * the graph, return NULL. 246 */ 247 if (!device_pm_initialized(supplier) 248 || device_is_dependent(consumer, supplier)) { 249 link = NULL; 250 goto out; 251 } 252 253 /* 254 * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed 255 * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both 256 * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER. 257 */ 258 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) 259 flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; 260 261 list_for_each_entry(link, &supplier->links.consumers, s_node) { 262 if (link->consumer != consumer) 263 continue; 264 265 /* 266 * Don't return a stateless link if the caller wants a stateful 267 * one and vice versa. 268 */ 269 if (WARN_ON((flags & DL_FLAG_STATELESS) != (link->flags & DL_FLAG_STATELESS))) { 270 link = NULL; 271 goto out; 272 } 273 274 if (flags & DL_FLAG_PM_RUNTIME) { 275 if (!(link->flags & DL_FLAG_PM_RUNTIME)) { 276 pm_runtime_new_link(consumer); 277 link->flags |= DL_FLAG_PM_RUNTIME; 278 } 279 if (flags & DL_FLAG_RPM_ACTIVE) 280 refcount_inc(&link->rpm_active); 281 } 282 283 if (flags & DL_FLAG_STATELESS) { 284 kref_get(&link->kref); 285 goto out; 286 } 287 288 /* 289 * If the life time of the link following from the new flags is 290 * longer than indicated by the flags of the existing link, 291 * update the existing link to stay around longer. 292 */ 293 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) { 294 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { 295 link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; 296 link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER; 297 } 298 } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) { 299 link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER | 300 DL_FLAG_AUTOREMOVE_SUPPLIER); 301 } 302 goto out; 303 } 304 305 link = kzalloc(sizeof(*link), GFP_KERNEL); 306 if (!link) 307 goto out; 308 309 refcount_set(&link->rpm_active, 1); 310 311 if (flags & DL_FLAG_PM_RUNTIME) { 312 if (flags & DL_FLAG_RPM_ACTIVE) 313 refcount_inc(&link->rpm_active); 314 315 pm_runtime_new_link(consumer); 316 } 317 318 get_device(supplier); 319 link->supplier = supplier; 320 INIT_LIST_HEAD(&link->s_node); 321 get_device(consumer); 322 link->consumer = consumer; 323 INIT_LIST_HEAD(&link->c_node); 324 link->flags = flags; 325 kref_init(&link->kref); 326 327 /* Determine the initial link state. */ 328 if (flags & DL_FLAG_STATELESS) { 329 link->status = DL_STATE_NONE; 330 } else { 331 switch (supplier->links.status) { 332 case DL_DEV_PROBING: 333 switch (consumer->links.status) { 334 case DL_DEV_PROBING: 335 /* 336 * A consumer driver can create a link to a 337 * supplier that has not completed its probing 338 * yet as long as it knows that the supplier is 339 * already functional (for example, it has just 340 * acquired some resources from the supplier). 341 */ 342 link->status = DL_STATE_CONSUMER_PROBE; 343 break; 344 default: 345 link->status = DL_STATE_DORMANT; 346 break; 347 } 348 break; 349 case DL_DEV_DRIVER_BOUND: 350 switch (consumer->links.status) { 351 case DL_DEV_PROBING: 352 link->status = DL_STATE_CONSUMER_PROBE; 353 break; 354 case DL_DEV_DRIVER_BOUND: 355 link->status = DL_STATE_ACTIVE; 356 break; 357 default: 358 link->status = DL_STATE_AVAILABLE; 359 break; 360 } 361 break; 362 case DL_DEV_UNBINDING: 363 link->status = DL_STATE_SUPPLIER_UNBIND; 364 break; 365 default: 366 link->status = DL_STATE_DORMANT; 367 break; 368 } 369 } 370 371 /* 372 * Some callers expect the link creation during consumer driver probe to 373 * resume the supplier even without DL_FLAG_RPM_ACTIVE. 374 */ 375 if (link->status == DL_STATE_CONSUMER_PROBE && 376 flags & DL_FLAG_PM_RUNTIME) 377 pm_runtime_resume(supplier); 378 379 /* 380 * Move the consumer and all of the devices depending on it to the end 381 * of dpm_list and the devices_kset list. 382 * 383 * It is necessary to hold dpm_list locked throughout all that or else 384 * we may end up suspending with a wrong ordering of it. 385 */ 386 device_reorder_to_tail(consumer, NULL); 387 388 list_add_tail_rcu(&link->s_node, &supplier->links.consumers); 389 list_add_tail_rcu(&link->c_node, &consumer->links.suppliers); 390 391 dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); 392 393 out: 394 device_pm_unlock(); 395 device_links_write_unlock(); 396 397 if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link) 398 pm_runtime_put(supplier); 399 400 return link; 401 } 402 EXPORT_SYMBOL_GPL(device_link_add); 403 404 static void device_link_free(struct device_link *link) 405 { 406 while (refcount_dec_not_one(&link->rpm_active)) 407 pm_runtime_put(link->supplier); 408 409 put_device(link->consumer); 410 put_device(link->supplier); 411 kfree(link); 412 } 413 414 #ifdef CONFIG_SRCU 415 static void __device_link_free_srcu(struct rcu_head *rhead) 416 { 417 device_link_free(container_of(rhead, struct device_link, rcu_head)); 418 } 419 420 static void __device_link_del(struct kref *kref) 421 { 422 struct device_link *link = container_of(kref, struct device_link, kref); 423 424 dev_dbg(link->consumer, "Dropping the link to %s\n", 425 dev_name(link->supplier)); 426 427 if (link->flags & DL_FLAG_PM_RUNTIME) 428 pm_runtime_drop_link(link->consumer); 429 430 list_del_rcu(&link->s_node); 431 list_del_rcu(&link->c_node); 432 call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu); 433 } 434 #else /* !CONFIG_SRCU */ 435 static void __device_link_del(struct kref *kref) 436 { 437 struct device_link *link = container_of(kref, struct device_link, kref); 438 439 dev_info(link->consumer, "Dropping the link to %s\n", 440 dev_name(link->supplier)); 441 442 if (link->flags & DL_FLAG_PM_RUNTIME) 443 pm_runtime_drop_link(link->consumer); 444 445 list_del(&link->s_node); 446 list_del(&link->c_node); 447 device_link_free(link); 448 } 449 #endif /* !CONFIG_SRCU */ 450 451 static void device_link_put_kref(struct device_link *link) 452 { 453 if (link->flags & DL_FLAG_STATELESS) 454 kref_put(&link->kref, __device_link_del); 455 else 456 WARN(1, "Unable to drop a managed device link reference\n"); 457 } 458 459 /** 460 * device_link_del - Delete a stateless link between two devices. 461 * @link: Device link to delete. 462 * 463 * The caller must ensure proper synchronization of this function with runtime 464 * PM. If the link was added multiple times, it needs to be deleted as often. 465 * Care is required for hotplugged devices: Their links are purged on removal 466 * and calling device_link_del() is then no longer allowed. 467 */ 468 void device_link_del(struct device_link *link) 469 { 470 device_links_write_lock(); 471 device_pm_lock(); 472 device_link_put_kref(link); 473 device_pm_unlock(); 474 device_links_write_unlock(); 475 } 476 EXPORT_SYMBOL_GPL(device_link_del); 477 478 /** 479 * device_link_remove - Delete a stateless link between two devices. 480 * @consumer: Consumer end of the link. 481 * @supplier: Supplier end of the link. 482 * 483 * The caller must ensure proper synchronization of this function with runtime 484 * PM. 485 */ 486 void device_link_remove(void *consumer, struct device *supplier) 487 { 488 struct device_link *link; 489 490 if (WARN_ON(consumer == supplier)) 491 return; 492 493 device_links_write_lock(); 494 device_pm_lock(); 495 496 list_for_each_entry(link, &supplier->links.consumers, s_node) { 497 if (link->consumer == consumer) { 498 device_link_put_kref(link); 499 break; 500 } 501 } 502 503 device_pm_unlock(); 504 device_links_write_unlock(); 505 } 506 EXPORT_SYMBOL_GPL(device_link_remove); 507 508 static void device_links_missing_supplier(struct device *dev) 509 { 510 struct device_link *link; 511 512 list_for_each_entry(link, &dev->links.suppliers, c_node) 513 if (link->status == DL_STATE_CONSUMER_PROBE) 514 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 515 } 516 517 /** 518 * device_links_check_suppliers - Check presence of supplier drivers. 519 * @dev: Consumer device. 520 * 521 * Check links from this device to any suppliers. Walk the list of the device's 522 * links to suppliers and see if all of them are available. If not, simply 523 * return -EPROBE_DEFER. 524 * 525 * We need to guarantee that the supplier will not go away after the check has 526 * been positive here. It only can go away in __device_release_driver() and 527 * that function checks the device's links to consumers. This means we need to 528 * mark the link as "consumer probe in progress" to make the supplier removal 529 * wait for us to complete (or bad things may happen). 530 * 531 * Links with the DL_FLAG_STATELESS flag set are ignored. 532 */ 533 int device_links_check_suppliers(struct device *dev) 534 { 535 struct device_link *link; 536 int ret = 0; 537 538 device_links_write_lock(); 539 540 list_for_each_entry(link, &dev->links.suppliers, c_node) { 541 if (link->flags & DL_FLAG_STATELESS) 542 continue; 543 544 if (link->status != DL_STATE_AVAILABLE) { 545 device_links_missing_supplier(dev); 546 ret = -EPROBE_DEFER; 547 break; 548 } 549 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE); 550 } 551 dev->links.status = DL_DEV_PROBING; 552 553 device_links_write_unlock(); 554 return ret; 555 } 556 557 /** 558 * device_links_driver_bound - Update device links after probing its driver. 559 * @dev: Device to update the links for. 560 * 561 * The probe has been successful, so update links from this device to any 562 * consumers by changing their status to "available". 563 * 564 * Also change the status of @dev's links to suppliers to "active". 565 * 566 * Links with the DL_FLAG_STATELESS flag set are ignored. 567 */ 568 void device_links_driver_bound(struct device *dev) 569 { 570 struct device_link *link; 571 572 device_links_write_lock(); 573 574 list_for_each_entry(link, &dev->links.consumers, s_node) { 575 if (link->flags & DL_FLAG_STATELESS) 576 continue; 577 578 /* 579 * Links created during consumer probe may be in the "consumer 580 * probe" state to start with if the supplier is still probing 581 * when they are created and they may become "active" if the 582 * consumer probe returns first. Skip them here. 583 */ 584 if (link->status == DL_STATE_CONSUMER_PROBE || 585 link->status == DL_STATE_ACTIVE) 586 continue; 587 588 WARN_ON(link->status != DL_STATE_DORMANT); 589 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 590 591 if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER) 592 driver_deferred_probe_add(link->consumer); 593 } 594 595 list_for_each_entry(link, &dev->links.suppliers, c_node) { 596 if (link->flags & DL_FLAG_STATELESS) 597 continue; 598 599 WARN_ON(link->status != DL_STATE_CONSUMER_PROBE); 600 WRITE_ONCE(link->status, DL_STATE_ACTIVE); 601 } 602 603 dev->links.status = DL_DEV_DRIVER_BOUND; 604 605 device_links_write_unlock(); 606 } 607 608 /** 609 * __device_links_no_driver - Update links of a device without a driver. 610 * @dev: Device without a drvier. 611 * 612 * Delete all non-persistent links from this device to any suppliers. 613 * 614 * Persistent links stay around, but their status is changed to "available", 615 * unless they already are in the "supplier unbind in progress" state in which 616 * case they need not be updated. 617 * 618 * Links with the DL_FLAG_STATELESS flag set are ignored. 619 */ 620 static void __device_links_no_driver(struct device *dev) 621 { 622 struct device_link *link, *ln; 623 624 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { 625 if (link->flags & DL_FLAG_STATELESS) 626 continue; 627 628 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) 629 __device_link_del(&link->kref); 630 else if (link->status == DL_STATE_CONSUMER_PROBE || 631 link->status == DL_STATE_ACTIVE) 632 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 633 } 634 635 dev->links.status = DL_DEV_NO_DRIVER; 636 } 637 638 /** 639 * device_links_no_driver - Update links after failing driver probe. 640 * @dev: Device whose driver has just failed to probe. 641 * 642 * Clean up leftover links to consumers for @dev and invoke 643 * %__device_links_no_driver() to update links to suppliers for it as 644 * appropriate. 645 * 646 * Links with the DL_FLAG_STATELESS flag set are ignored. 647 */ 648 void device_links_no_driver(struct device *dev) 649 { 650 struct device_link *link; 651 652 device_links_write_lock(); 653 654 list_for_each_entry(link, &dev->links.consumers, s_node) { 655 if (link->flags & DL_FLAG_STATELESS) 656 continue; 657 658 /* 659 * The probe has failed, so if the status of the link is 660 * "consumer probe" or "active", it must have been added by 661 * a probing consumer while this device was still probing. 662 * Change its state to "dormant", as it represents a valid 663 * relationship, but it is not functionally meaningful. 664 */ 665 if (link->status == DL_STATE_CONSUMER_PROBE || 666 link->status == DL_STATE_ACTIVE) 667 WRITE_ONCE(link->status, DL_STATE_DORMANT); 668 } 669 670 __device_links_no_driver(dev); 671 672 device_links_write_unlock(); 673 } 674 675 /** 676 * device_links_driver_cleanup - Update links after driver removal. 677 * @dev: Device whose driver has just gone away. 678 * 679 * Update links to consumers for @dev by changing their status to "dormant" and 680 * invoke %__device_links_no_driver() to update links to suppliers for it as 681 * appropriate. 682 * 683 * Links with the DL_FLAG_STATELESS flag set are ignored. 684 */ 685 void device_links_driver_cleanup(struct device *dev) 686 { 687 struct device_link *link, *ln; 688 689 device_links_write_lock(); 690 691 list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) { 692 if (link->flags & DL_FLAG_STATELESS) 693 continue; 694 695 WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER); 696 WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND); 697 698 /* 699 * autoremove the links between this @dev and its consumer 700 * devices that are not active, i.e. where the link state 701 * has moved to DL_STATE_SUPPLIER_UNBIND. 702 */ 703 if (link->status == DL_STATE_SUPPLIER_UNBIND && 704 link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) 705 __device_link_del(&link->kref); 706 707 WRITE_ONCE(link->status, DL_STATE_DORMANT); 708 } 709 710 __device_links_no_driver(dev); 711 712 device_links_write_unlock(); 713 } 714 715 /** 716 * device_links_busy - Check if there are any busy links to consumers. 717 * @dev: Device to check. 718 * 719 * Check each consumer of the device and return 'true' if its link's status 720 * is one of "consumer probe" or "active" (meaning that the given consumer is 721 * probing right now or its driver is present). Otherwise, change the link 722 * state to "supplier unbind" to prevent the consumer from being probed 723 * successfully going forward. 724 * 725 * Return 'false' if there are no probing or active consumers. 726 * 727 * Links with the DL_FLAG_STATELESS flag set are ignored. 728 */ 729 bool device_links_busy(struct device *dev) 730 { 731 struct device_link *link; 732 bool ret = false; 733 734 device_links_write_lock(); 735 736 list_for_each_entry(link, &dev->links.consumers, s_node) { 737 if (link->flags & DL_FLAG_STATELESS) 738 continue; 739 740 if (link->status == DL_STATE_CONSUMER_PROBE 741 || link->status == DL_STATE_ACTIVE) { 742 ret = true; 743 break; 744 } 745 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); 746 } 747 748 dev->links.status = DL_DEV_UNBINDING; 749 750 device_links_write_unlock(); 751 return ret; 752 } 753 754 /** 755 * device_links_unbind_consumers - Force unbind consumers of the given device. 756 * @dev: Device to unbind the consumers of. 757 * 758 * Walk the list of links to consumers for @dev and if any of them is in the 759 * "consumer probe" state, wait for all device probes in progress to complete 760 * and start over. 761 * 762 * If that's not the case, change the status of the link to "supplier unbind" 763 * and check if the link was in the "active" state. If so, force the consumer 764 * driver to unbind and start over (the consumer will not re-probe as we have 765 * changed the state of the link already). 766 * 767 * Links with the DL_FLAG_STATELESS flag set are ignored. 768 */ 769 void device_links_unbind_consumers(struct device *dev) 770 { 771 struct device_link *link; 772 773 start: 774 device_links_write_lock(); 775 776 list_for_each_entry(link, &dev->links.consumers, s_node) { 777 enum device_link_state status; 778 779 if (link->flags & DL_FLAG_STATELESS) 780 continue; 781 782 status = link->status; 783 if (status == DL_STATE_CONSUMER_PROBE) { 784 device_links_write_unlock(); 785 786 wait_for_device_probe(); 787 goto start; 788 } 789 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); 790 if (status == DL_STATE_ACTIVE) { 791 struct device *consumer = link->consumer; 792 793 get_device(consumer); 794 795 device_links_write_unlock(); 796 797 device_release_driver_internal(consumer, NULL, 798 consumer->parent); 799 put_device(consumer); 800 goto start; 801 } 802 } 803 804 device_links_write_unlock(); 805 } 806 807 /** 808 * device_links_purge - Delete existing links to other devices. 809 * @dev: Target device. 810 */ 811 static void device_links_purge(struct device *dev) 812 { 813 struct device_link *link, *ln; 814 815 /* 816 * Delete all of the remaining links from this device to any other 817 * devices (either consumers or suppliers). 818 */ 819 device_links_write_lock(); 820 821 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { 822 WARN_ON(link->status == DL_STATE_ACTIVE); 823 __device_link_del(&link->kref); 824 } 825 826 list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) { 827 WARN_ON(link->status != DL_STATE_DORMANT && 828 link->status != DL_STATE_NONE); 829 __device_link_del(&link->kref); 830 } 831 832 device_links_write_unlock(); 833 } 834 835 /* Device links support end. */ 836 837 int (*platform_notify)(struct device *dev) = NULL; 838 int (*platform_notify_remove)(struct device *dev) = NULL; 839 static struct kobject *dev_kobj; 840 struct kobject *sysfs_dev_char_kobj; 841 struct kobject *sysfs_dev_block_kobj; 842 843 static DEFINE_MUTEX(device_hotplug_lock); 844 845 void lock_device_hotplug(void) 846 { 847 mutex_lock(&device_hotplug_lock); 848 } 849 850 void unlock_device_hotplug(void) 851 { 852 mutex_unlock(&device_hotplug_lock); 853 } 854 855 int lock_device_hotplug_sysfs(void) 856 { 857 if (mutex_trylock(&device_hotplug_lock)) 858 return 0; 859 860 /* Avoid busy looping (5 ms of sleep should do). */ 861 msleep(5); 862 return restart_syscall(); 863 } 864 865 #ifdef CONFIG_BLOCK 866 static inline int device_is_not_partition(struct device *dev) 867 { 868 return !(dev->type == &part_type); 869 } 870 #else 871 static inline int device_is_not_partition(struct device *dev) 872 { 873 return 1; 874 } 875 #endif 876 877 static int 878 device_platform_notify(struct device *dev, enum kobject_action action) 879 { 880 int ret; 881 882 ret = acpi_platform_notify(dev, action); 883 if (ret) 884 return ret; 885 886 ret = software_node_notify(dev, action); 887 if (ret) 888 return ret; 889 890 if (platform_notify && action == KOBJ_ADD) 891 platform_notify(dev); 892 else if (platform_notify_remove && action == KOBJ_REMOVE) 893 platform_notify_remove(dev); 894 return 0; 895 } 896 897 /** 898 * dev_driver_string - Return a device's driver name, if at all possible 899 * @dev: struct device to get the name of 900 * 901 * Will return the device's driver's name if it is bound to a device. If 902 * the device is not bound to a driver, it will return the name of the bus 903 * it is attached to. If it is not attached to a bus either, an empty 904 * string will be returned. 905 */ 906 const char *dev_driver_string(const struct device *dev) 907 { 908 struct device_driver *drv; 909 910 /* dev->driver can change to NULL underneath us because of unbinding, 911 * so be careful about accessing it. dev->bus and dev->class should 912 * never change once they are set, so they don't need special care. 913 */ 914 drv = READ_ONCE(dev->driver); 915 return drv ? drv->name : 916 (dev->bus ? dev->bus->name : 917 (dev->class ? dev->class->name : "")); 918 } 919 EXPORT_SYMBOL(dev_driver_string); 920 921 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr) 922 923 static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr, 924 char *buf) 925 { 926 struct device_attribute *dev_attr = to_dev_attr(attr); 927 struct device *dev = kobj_to_dev(kobj); 928 ssize_t ret = -EIO; 929 930 if (dev_attr->show) 931 ret = dev_attr->show(dev, dev_attr, buf); 932 if (ret >= (ssize_t)PAGE_SIZE) { 933 printk("dev_attr_show: %pS returned bad count\n", 934 dev_attr->show); 935 } 936 return ret; 937 } 938 939 static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr, 940 const char *buf, size_t count) 941 { 942 struct device_attribute *dev_attr = to_dev_attr(attr); 943 struct device *dev = kobj_to_dev(kobj); 944 ssize_t ret = -EIO; 945 946 if (dev_attr->store) 947 ret = dev_attr->store(dev, dev_attr, buf, count); 948 return ret; 949 } 950 951 static const struct sysfs_ops dev_sysfs_ops = { 952 .show = dev_attr_show, 953 .store = dev_attr_store, 954 }; 955 956 #define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr) 957 958 ssize_t device_store_ulong(struct device *dev, 959 struct device_attribute *attr, 960 const char *buf, size_t size) 961 { 962 struct dev_ext_attribute *ea = to_ext_attr(attr); 963 int ret; 964 unsigned long new; 965 966 ret = kstrtoul(buf, 0, &new); 967 if (ret) 968 return ret; 969 *(unsigned long *)(ea->var) = new; 970 /* Always return full write size even if we didn't consume all */ 971 return size; 972 } 973 EXPORT_SYMBOL_GPL(device_store_ulong); 974 975 ssize_t device_show_ulong(struct device *dev, 976 struct device_attribute *attr, 977 char *buf) 978 { 979 struct dev_ext_attribute *ea = to_ext_attr(attr); 980 return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var)); 981 } 982 EXPORT_SYMBOL_GPL(device_show_ulong); 983 984 ssize_t device_store_int(struct device *dev, 985 struct device_attribute *attr, 986 const char *buf, size_t size) 987 { 988 struct dev_ext_attribute *ea = to_ext_attr(attr); 989 int ret; 990 long new; 991 992 ret = kstrtol(buf, 0, &new); 993 if (ret) 994 return ret; 995 996 if (new > INT_MAX || new < INT_MIN) 997 return -EINVAL; 998 *(int *)(ea->var) = new; 999 /* Always return full write size even if we didn't consume all */ 1000 return size; 1001 } 1002 EXPORT_SYMBOL_GPL(device_store_int); 1003 1004 ssize_t device_show_int(struct device *dev, 1005 struct device_attribute *attr, 1006 char *buf) 1007 { 1008 struct dev_ext_attribute *ea = to_ext_attr(attr); 1009 1010 return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var)); 1011 } 1012 EXPORT_SYMBOL_GPL(device_show_int); 1013 1014 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, 1015 const char *buf, size_t size) 1016 { 1017 struct dev_ext_attribute *ea = to_ext_attr(attr); 1018 1019 if (strtobool(buf, ea->var) < 0) 1020 return -EINVAL; 1021 1022 return size; 1023 } 1024 EXPORT_SYMBOL_GPL(device_store_bool); 1025 1026 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, 1027 char *buf) 1028 { 1029 struct dev_ext_attribute *ea = to_ext_attr(attr); 1030 1031 return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var)); 1032 } 1033 EXPORT_SYMBOL_GPL(device_show_bool); 1034 1035 /** 1036 * device_release - free device structure. 1037 * @kobj: device's kobject. 1038 * 1039 * This is called once the reference count for the object 1040 * reaches 0. We forward the call to the device's release 1041 * method, which should handle actually freeing the structure. 1042 */ 1043 static void device_release(struct kobject *kobj) 1044 { 1045 struct device *dev = kobj_to_dev(kobj); 1046 struct device_private *p = dev->p; 1047 1048 /* 1049 * Some platform devices are driven without driver attached 1050 * and managed resources may have been acquired. Make sure 1051 * all resources are released. 1052 * 1053 * Drivers still can add resources into device after device 1054 * is deleted but alive, so release devres here to avoid 1055 * possible memory leak. 1056 */ 1057 devres_release_all(dev); 1058 1059 if (dev->release) 1060 dev->release(dev); 1061 else if (dev->type && dev->type->release) 1062 dev->type->release(dev); 1063 else if (dev->class && dev->class->dev_release) 1064 dev->class->dev_release(dev); 1065 else 1066 WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/kobject.txt.\n", 1067 dev_name(dev)); 1068 kfree(p); 1069 } 1070 1071 static const void *device_namespace(struct kobject *kobj) 1072 { 1073 struct device *dev = kobj_to_dev(kobj); 1074 const void *ns = NULL; 1075 1076 if (dev->class && dev->class->ns_type) 1077 ns = dev->class->namespace(dev); 1078 1079 return ns; 1080 } 1081 1082 static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) 1083 { 1084 struct device *dev = kobj_to_dev(kobj); 1085 1086 if (dev->class && dev->class->get_ownership) 1087 dev->class->get_ownership(dev, uid, gid); 1088 } 1089 1090 static struct kobj_type device_ktype = { 1091 .release = device_release, 1092 .sysfs_ops = &dev_sysfs_ops, 1093 .namespace = device_namespace, 1094 .get_ownership = device_get_ownership, 1095 }; 1096 1097 1098 static int dev_uevent_filter(struct kset *kset, struct kobject *kobj) 1099 { 1100 struct kobj_type *ktype = get_ktype(kobj); 1101 1102 if (ktype == &device_ktype) { 1103 struct device *dev = kobj_to_dev(kobj); 1104 if (dev->bus) 1105 return 1; 1106 if (dev->class) 1107 return 1; 1108 } 1109 return 0; 1110 } 1111 1112 static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj) 1113 { 1114 struct device *dev = kobj_to_dev(kobj); 1115 1116 if (dev->bus) 1117 return dev->bus->name; 1118 if (dev->class) 1119 return dev->class->name; 1120 return NULL; 1121 } 1122 1123 static int dev_uevent(struct kset *kset, struct kobject *kobj, 1124 struct kobj_uevent_env *env) 1125 { 1126 struct device *dev = kobj_to_dev(kobj); 1127 int retval = 0; 1128 1129 /* add device node properties if present */ 1130 if (MAJOR(dev->devt)) { 1131 const char *tmp; 1132 const char *name; 1133 umode_t mode = 0; 1134 kuid_t uid = GLOBAL_ROOT_UID; 1135 kgid_t gid = GLOBAL_ROOT_GID; 1136 1137 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt)); 1138 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt)); 1139 name = device_get_devnode(dev, &mode, &uid, &gid, &tmp); 1140 if (name) { 1141 add_uevent_var(env, "DEVNAME=%s", name); 1142 if (mode) 1143 add_uevent_var(env, "DEVMODE=%#o", mode & 0777); 1144 if (!uid_eq(uid, GLOBAL_ROOT_UID)) 1145 add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid)); 1146 if (!gid_eq(gid, GLOBAL_ROOT_GID)) 1147 add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid)); 1148 kfree(tmp); 1149 } 1150 } 1151 1152 if (dev->type && dev->type->name) 1153 add_uevent_var(env, "DEVTYPE=%s", dev->type->name); 1154 1155 if (dev->driver) 1156 add_uevent_var(env, "DRIVER=%s", dev->driver->name); 1157 1158 /* Add common DT information about the device */ 1159 of_device_uevent(dev, env); 1160 1161 /* have the bus specific function add its stuff */ 1162 if (dev->bus && dev->bus->uevent) { 1163 retval = dev->bus->uevent(dev, env); 1164 if (retval) 1165 pr_debug("device: '%s': %s: bus uevent() returned %d\n", 1166 dev_name(dev), __func__, retval); 1167 } 1168 1169 /* have the class specific function add its stuff */ 1170 if (dev->class && dev->class->dev_uevent) { 1171 retval = dev->class->dev_uevent(dev, env); 1172 if (retval) 1173 pr_debug("device: '%s': %s: class uevent() " 1174 "returned %d\n", dev_name(dev), 1175 __func__, retval); 1176 } 1177 1178 /* have the device type specific function add its stuff */ 1179 if (dev->type && dev->type->uevent) { 1180 retval = dev->type->uevent(dev, env); 1181 if (retval) 1182 pr_debug("device: '%s': %s: dev_type uevent() " 1183 "returned %d\n", dev_name(dev), 1184 __func__, retval); 1185 } 1186 1187 return retval; 1188 } 1189 1190 static const struct kset_uevent_ops device_uevent_ops = { 1191 .filter = dev_uevent_filter, 1192 .name = dev_uevent_name, 1193 .uevent = dev_uevent, 1194 }; 1195 1196 static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, 1197 char *buf) 1198 { 1199 struct kobject *top_kobj; 1200 struct kset *kset; 1201 struct kobj_uevent_env *env = NULL; 1202 int i; 1203 size_t count = 0; 1204 int retval; 1205 1206 /* search the kset, the device belongs to */ 1207 top_kobj = &dev->kobj; 1208 while (!top_kobj->kset && top_kobj->parent) 1209 top_kobj = top_kobj->parent; 1210 if (!top_kobj->kset) 1211 goto out; 1212 1213 kset = top_kobj->kset; 1214 if (!kset->uevent_ops || !kset->uevent_ops->uevent) 1215 goto out; 1216 1217 /* respect filter */ 1218 if (kset->uevent_ops && kset->uevent_ops->filter) 1219 if (!kset->uevent_ops->filter(kset, &dev->kobj)) 1220 goto out; 1221 1222 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); 1223 if (!env) 1224 return -ENOMEM; 1225 1226 /* let the kset specific function add its keys */ 1227 retval = kset->uevent_ops->uevent(kset, &dev->kobj, env); 1228 if (retval) 1229 goto out; 1230 1231 /* copy keys to file */ 1232 for (i = 0; i < env->envp_idx; i++) 1233 count += sprintf(&buf[count], "%s\n", env->envp[i]); 1234 out: 1235 kfree(env); 1236 return count; 1237 } 1238 1239 static ssize_t uevent_store(struct device *dev, struct device_attribute *attr, 1240 const char *buf, size_t count) 1241 { 1242 int rc; 1243 1244 rc = kobject_synth_uevent(&dev->kobj, buf, count); 1245 1246 if (rc) { 1247 dev_err(dev, "uevent: failed to send synthetic uevent\n"); 1248 return rc; 1249 } 1250 1251 return count; 1252 } 1253 static DEVICE_ATTR_RW(uevent); 1254 1255 static ssize_t online_show(struct device *dev, struct device_attribute *attr, 1256 char *buf) 1257 { 1258 bool val; 1259 1260 device_lock(dev); 1261 val = !dev->offline; 1262 device_unlock(dev); 1263 return sprintf(buf, "%u\n", val); 1264 } 1265 1266 static ssize_t online_store(struct device *dev, struct device_attribute *attr, 1267 const char *buf, size_t count) 1268 { 1269 bool val; 1270 int ret; 1271 1272 ret = strtobool(buf, &val); 1273 if (ret < 0) 1274 return ret; 1275 1276 ret = lock_device_hotplug_sysfs(); 1277 if (ret) 1278 return ret; 1279 1280 ret = val ? device_online(dev) : device_offline(dev); 1281 unlock_device_hotplug(); 1282 return ret < 0 ? ret : count; 1283 } 1284 static DEVICE_ATTR_RW(online); 1285 1286 int device_add_groups(struct device *dev, const struct attribute_group **groups) 1287 { 1288 return sysfs_create_groups(&dev->kobj, groups); 1289 } 1290 EXPORT_SYMBOL_GPL(device_add_groups); 1291 1292 void device_remove_groups(struct device *dev, 1293 const struct attribute_group **groups) 1294 { 1295 sysfs_remove_groups(&dev->kobj, groups); 1296 } 1297 EXPORT_SYMBOL_GPL(device_remove_groups); 1298 1299 union device_attr_group_devres { 1300 const struct attribute_group *group; 1301 const struct attribute_group **groups; 1302 }; 1303 1304 static int devm_attr_group_match(struct device *dev, void *res, void *data) 1305 { 1306 return ((union device_attr_group_devres *)res)->group == data; 1307 } 1308 1309 static void devm_attr_group_remove(struct device *dev, void *res) 1310 { 1311 union device_attr_group_devres *devres = res; 1312 const struct attribute_group *group = devres->group; 1313 1314 dev_dbg(dev, "%s: removing group %p\n", __func__, group); 1315 sysfs_remove_group(&dev->kobj, group); 1316 } 1317 1318 static void devm_attr_groups_remove(struct device *dev, void *res) 1319 { 1320 union device_attr_group_devres *devres = res; 1321 const struct attribute_group **groups = devres->groups; 1322 1323 dev_dbg(dev, "%s: removing groups %p\n", __func__, groups); 1324 sysfs_remove_groups(&dev->kobj, groups); 1325 } 1326 1327 /** 1328 * devm_device_add_group - given a device, create a managed attribute group 1329 * @dev: The device to create the group for 1330 * @grp: The attribute group to create 1331 * 1332 * This function creates a group for the first time. It will explicitly 1333 * warn and error if any of the attribute files being created already exist. 1334 * 1335 * Returns 0 on success or error code on failure. 1336 */ 1337 int devm_device_add_group(struct device *dev, const struct attribute_group *grp) 1338 { 1339 union device_attr_group_devres *devres; 1340 int error; 1341 1342 devres = devres_alloc(devm_attr_group_remove, 1343 sizeof(*devres), GFP_KERNEL); 1344 if (!devres) 1345 return -ENOMEM; 1346 1347 error = sysfs_create_group(&dev->kobj, grp); 1348 if (error) { 1349 devres_free(devres); 1350 return error; 1351 } 1352 1353 devres->group = grp; 1354 devres_add(dev, devres); 1355 return 0; 1356 } 1357 EXPORT_SYMBOL_GPL(devm_device_add_group); 1358 1359 /** 1360 * devm_device_remove_group: remove a managed group from a device 1361 * @dev: device to remove the group from 1362 * @grp: group to remove 1363 * 1364 * This function removes a group of attributes from a device. The attributes 1365 * previously have to have been created for this group, otherwise it will fail. 1366 */ 1367 void devm_device_remove_group(struct device *dev, 1368 const struct attribute_group *grp) 1369 { 1370 WARN_ON(devres_release(dev, devm_attr_group_remove, 1371 devm_attr_group_match, 1372 /* cast away const */ (void *)grp)); 1373 } 1374 EXPORT_SYMBOL_GPL(devm_device_remove_group); 1375 1376 /** 1377 * devm_device_add_groups - create a bunch of managed attribute groups 1378 * @dev: The device to create the group for 1379 * @groups: The attribute groups to create, NULL terminated 1380 * 1381 * This function creates a bunch of managed attribute groups. If an error 1382 * occurs when creating a group, all previously created groups will be 1383 * removed, unwinding everything back to the original state when this 1384 * function was called. It will explicitly warn and error if any of the 1385 * attribute files being created already exist. 1386 * 1387 * Returns 0 on success or error code from sysfs_create_group on failure. 1388 */ 1389 int devm_device_add_groups(struct device *dev, 1390 const struct attribute_group **groups) 1391 { 1392 union device_attr_group_devres *devres; 1393 int error; 1394 1395 devres = devres_alloc(devm_attr_groups_remove, 1396 sizeof(*devres), GFP_KERNEL); 1397 if (!devres) 1398 return -ENOMEM; 1399 1400 error = sysfs_create_groups(&dev->kobj, groups); 1401 if (error) { 1402 devres_free(devres); 1403 return error; 1404 } 1405 1406 devres->groups = groups; 1407 devres_add(dev, devres); 1408 return 0; 1409 } 1410 EXPORT_SYMBOL_GPL(devm_device_add_groups); 1411 1412 /** 1413 * devm_device_remove_groups - remove a list of managed groups 1414 * 1415 * @dev: The device for the groups to be removed from 1416 * @groups: NULL terminated list of groups to be removed 1417 * 1418 * If groups is not NULL, remove the specified groups from the device. 1419 */ 1420 void devm_device_remove_groups(struct device *dev, 1421 const struct attribute_group **groups) 1422 { 1423 WARN_ON(devres_release(dev, devm_attr_groups_remove, 1424 devm_attr_group_match, 1425 /* cast away const */ (void *)groups)); 1426 } 1427 EXPORT_SYMBOL_GPL(devm_device_remove_groups); 1428 1429 static int device_add_attrs(struct device *dev) 1430 { 1431 struct class *class = dev->class; 1432 const struct device_type *type = dev->type; 1433 int error; 1434 1435 if (class) { 1436 error = device_add_groups(dev, class->dev_groups); 1437 if (error) 1438 return error; 1439 } 1440 1441 if (type) { 1442 error = device_add_groups(dev, type->groups); 1443 if (error) 1444 goto err_remove_class_groups; 1445 } 1446 1447 error = device_add_groups(dev, dev->groups); 1448 if (error) 1449 goto err_remove_type_groups; 1450 1451 if (device_supports_offline(dev) && !dev->offline_disabled) { 1452 error = device_create_file(dev, &dev_attr_online); 1453 if (error) 1454 goto err_remove_dev_groups; 1455 } 1456 1457 return 0; 1458 1459 err_remove_dev_groups: 1460 device_remove_groups(dev, dev->groups); 1461 err_remove_type_groups: 1462 if (type) 1463 device_remove_groups(dev, type->groups); 1464 err_remove_class_groups: 1465 if (class) 1466 device_remove_groups(dev, class->dev_groups); 1467 1468 return error; 1469 } 1470 1471 static void device_remove_attrs(struct device *dev) 1472 { 1473 struct class *class = dev->class; 1474 const struct device_type *type = dev->type; 1475 1476 device_remove_file(dev, &dev_attr_online); 1477 device_remove_groups(dev, dev->groups); 1478 1479 if (type) 1480 device_remove_groups(dev, type->groups); 1481 1482 if (class) 1483 device_remove_groups(dev, class->dev_groups); 1484 } 1485 1486 static ssize_t dev_show(struct device *dev, struct device_attribute *attr, 1487 char *buf) 1488 { 1489 return print_dev_t(buf, dev->devt); 1490 } 1491 static DEVICE_ATTR_RO(dev); 1492 1493 /* /sys/devices/ */ 1494 struct kset *devices_kset; 1495 1496 /** 1497 * devices_kset_move_before - Move device in the devices_kset's list. 1498 * @deva: Device to move. 1499 * @devb: Device @deva should come before. 1500 */ 1501 static void devices_kset_move_before(struct device *deva, struct device *devb) 1502 { 1503 if (!devices_kset) 1504 return; 1505 pr_debug("devices_kset: Moving %s before %s\n", 1506 dev_name(deva), dev_name(devb)); 1507 spin_lock(&devices_kset->list_lock); 1508 list_move_tail(&deva->kobj.entry, &devb->kobj.entry); 1509 spin_unlock(&devices_kset->list_lock); 1510 } 1511 1512 /** 1513 * devices_kset_move_after - Move device in the devices_kset's list. 1514 * @deva: Device to move 1515 * @devb: Device @deva should come after. 1516 */ 1517 static void devices_kset_move_after(struct device *deva, struct device *devb) 1518 { 1519 if (!devices_kset) 1520 return; 1521 pr_debug("devices_kset: Moving %s after %s\n", 1522 dev_name(deva), dev_name(devb)); 1523 spin_lock(&devices_kset->list_lock); 1524 list_move(&deva->kobj.entry, &devb->kobj.entry); 1525 spin_unlock(&devices_kset->list_lock); 1526 } 1527 1528 /** 1529 * devices_kset_move_last - move the device to the end of devices_kset's list. 1530 * @dev: device to move 1531 */ 1532 void devices_kset_move_last(struct device *dev) 1533 { 1534 if (!devices_kset) 1535 return; 1536 pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev)); 1537 spin_lock(&devices_kset->list_lock); 1538 list_move_tail(&dev->kobj.entry, &devices_kset->list); 1539 spin_unlock(&devices_kset->list_lock); 1540 } 1541 1542 /** 1543 * device_create_file - create sysfs attribute file for device. 1544 * @dev: device. 1545 * @attr: device attribute descriptor. 1546 */ 1547 int device_create_file(struct device *dev, 1548 const struct device_attribute *attr) 1549 { 1550 int error = 0; 1551 1552 if (dev) { 1553 WARN(((attr->attr.mode & S_IWUGO) && !attr->store), 1554 "Attribute %s: write permission without 'store'\n", 1555 attr->attr.name); 1556 WARN(((attr->attr.mode & S_IRUGO) && !attr->show), 1557 "Attribute %s: read permission without 'show'\n", 1558 attr->attr.name); 1559 error = sysfs_create_file(&dev->kobj, &attr->attr); 1560 } 1561 1562 return error; 1563 } 1564 EXPORT_SYMBOL_GPL(device_create_file); 1565 1566 /** 1567 * device_remove_file - remove sysfs attribute file. 1568 * @dev: device. 1569 * @attr: device attribute descriptor. 1570 */ 1571 void device_remove_file(struct device *dev, 1572 const struct device_attribute *attr) 1573 { 1574 if (dev) 1575 sysfs_remove_file(&dev->kobj, &attr->attr); 1576 } 1577 EXPORT_SYMBOL_GPL(device_remove_file); 1578 1579 /** 1580 * device_remove_file_self - remove sysfs attribute file from its own method. 1581 * @dev: device. 1582 * @attr: device attribute descriptor. 1583 * 1584 * See kernfs_remove_self() for details. 1585 */ 1586 bool device_remove_file_self(struct device *dev, 1587 const struct device_attribute *attr) 1588 { 1589 if (dev) 1590 return sysfs_remove_file_self(&dev->kobj, &attr->attr); 1591 else 1592 return false; 1593 } 1594 EXPORT_SYMBOL_GPL(device_remove_file_self); 1595 1596 /** 1597 * device_create_bin_file - create sysfs binary attribute file for device. 1598 * @dev: device. 1599 * @attr: device binary attribute descriptor. 1600 */ 1601 int device_create_bin_file(struct device *dev, 1602 const struct bin_attribute *attr) 1603 { 1604 int error = -EINVAL; 1605 if (dev) 1606 error = sysfs_create_bin_file(&dev->kobj, attr); 1607 return error; 1608 } 1609 EXPORT_SYMBOL_GPL(device_create_bin_file); 1610 1611 /** 1612 * device_remove_bin_file - remove sysfs binary attribute file 1613 * @dev: device. 1614 * @attr: device binary attribute descriptor. 1615 */ 1616 void device_remove_bin_file(struct device *dev, 1617 const struct bin_attribute *attr) 1618 { 1619 if (dev) 1620 sysfs_remove_bin_file(&dev->kobj, attr); 1621 } 1622 EXPORT_SYMBOL_GPL(device_remove_bin_file); 1623 1624 static void klist_children_get(struct klist_node *n) 1625 { 1626 struct device_private *p = to_device_private_parent(n); 1627 struct device *dev = p->device; 1628 1629 get_device(dev); 1630 } 1631 1632 static void klist_children_put(struct klist_node *n) 1633 { 1634 struct device_private *p = to_device_private_parent(n); 1635 struct device *dev = p->device; 1636 1637 put_device(dev); 1638 } 1639 1640 /** 1641 * device_initialize - init device structure. 1642 * @dev: device. 1643 * 1644 * This prepares the device for use by other layers by initializing 1645 * its fields. 1646 * It is the first half of device_register(), if called by 1647 * that function, though it can also be called separately, so one 1648 * may use @dev's fields. In particular, get_device()/put_device() 1649 * may be used for reference counting of @dev after calling this 1650 * function. 1651 * 1652 * All fields in @dev must be initialized by the caller to 0, except 1653 * for those explicitly set to some other value. The simplest 1654 * approach is to use kzalloc() to allocate the structure containing 1655 * @dev. 1656 * 1657 * NOTE: Use put_device() to give up your reference instead of freeing 1658 * @dev directly once you have called this function. 1659 */ 1660 void device_initialize(struct device *dev) 1661 { 1662 dev->kobj.kset = devices_kset; 1663 kobject_init(&dev->kobj, &device_ktype); 1664 INIT_LIST_HEAD(&dev->dma_pools); 1665 mutex_init(&dev->mutex); 1666 #ifdef CONFIG_PROVE_LOCKING 1667 mutex_init(&dev->lockdep_mutex); 1668 #endif 1669 lockdep_set_novalidate_class(&dev->mutex); 1670 spin_lock_init(&dev->devres_lock); 1671 INIT_LIST_HEAD(&dev->devres_head); 1672 device_pm_init(dev); 1673 set_dev_node(dev, -1); 1674 #ifdef CONFIG_GENERIC_MSI_IRQ 1675 INIT_LIST_HEAD(&dev->msi_list); 1676 #endif 1677 INIT_LIST_HEAD(&dev->links.consumers); 1678 INIT_LIST_HEAD(&dev->links.suppliers); 1679 dev->links.status = DL_DEV_NO_DRIVER; 1680 } 1681 EXPORT_SYMBOL_GPL(device_initialize); 1682 1683 struct kobject *virtual_device_parent(struct device *dev) 1684 { 1685 static struct kobject *virtual_dir = NULL; 1686 1687 if (!virtual_dir) 1688 virtual_dir = kobject_create_and_add("virtual", 1689 &devices_kset->kobj); 1690 1691 return virtual_dir; 1692 } 1693 1694 struct class_dir { 1695 struct kobject kobj; 1696 struct class *class; 1697 }; 1698 1699 #define to_class_dir(obj) container_of(obj, struct class_dir, kobj) 1700 1701 static void class_dir_release(struct kobject *kobj) 1702 { 1703 struct class_dir *dir = to_class_dir(kobj); 1704 kfree(dir); 1705 } 1706 1707 static const 1708 struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj) 1709 { 1710 struct class_dir *dir = to_class_dir(kobj); 1711 return dir->class->ns_type; 1712 } 1713 1714 static struct kobj_type class_dir_ktype = { 1715 .release = class_dir_release, 1716 .sysfs_ops = &kobj_sysfs_ops, 1717 .child_ns_type = class_dir_child_ns_type 1718 }; 1719 1720 static struct kobject * 1721 class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) 1722 { 1723 struct class_dir *dir; 1724 int retval; 1725 1726 dir = kzalloc(sizeof(*dir), GFP_KERNEL); 1727 if (!dir) 1728 return ERR_PTR(-ENOMEM); 1729 1730 dir->class = class; 1731 kobject_init(&dir->kobj, &class_dir_ktype); 1732 1733 dir->kobj.kset = &class->p->glue_dirs; 1734 1735 retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name); 1736 if (retval < 0) { 1737 kobject_put(&dir->kobj); 1738 return ERR_PTR(retval); 1739 } 1740 return &dir->kobj; 1741 } 1742 1743 static DEFINE_MUTEX(gdp_mutex); 1744 1745 static struct kobject *get_device_parent(struct device *dev, 1746 struct device *parent) 1747 { 1748 if (dev->class) { 1749 struct kobject *kobj = NULL; 1750 struct kobject *parent_kobj; 1751 struct kobject *k; 1752 1753 #ifdef CONFIG_BLOCK 1754 /* block disks show up in /sys/block */ 1755 if (sysfs_deprecated && dev->class == &block_class) { 1756 if (parent && parent->class == &block_class) 1757 return &parent->kobj; 1758 return &block_class.p->subsys.kobj; 1759 } 1760 #endif 1761 1762 /* 1763 * If we have no parent, we live in "virtual". 1764 * Class-devices with a non class-device as parent, live 1765 * in a "glue" directory to prevent namespace collisions. 1766 */ 1767 if (parent == NULL) 1768 parent_kobj = virtual_device_parent(dev); 1769 else if (parent->class && !dev->class->ns_type) 1770 return &parent->kobj; 1771 else 1772 parent_kobj = &parent->kobj; 1773 1774 mutex_lock(&gdp_mutex); 1775 1776 /* find our class-directory at the parent and reference it */ 1777 spin_lock(&dev->class->p->glue_dirs.list_lock); 1778 list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry) 1779 if (k->parent == parent_kobj) { 1780 kobj = kobject_get(k); 1781 break; 1782 } 1783 spin_unlock(&dev->class->p->glue_dirs.list_lock); 1784 if (kobj) { 1785 mutex_unlock(&gdp_mutex); 1786 return kobj; 1787 } 1788 1789 /* or create a new class-directory at the parent device */ 1790 k = class_dir_create_and_add(dev->class, parent_kobj); 1791 /* do not emit an uevent for this simple "glue" directory */ 1792 mutex_unlock(&gdp_mutex); 1793 return k; 1794 } 1795 1796 /* subsystems can specify a default root directory for their devices */ 1797 if (!parent && dev->bus && dev->bus->dev_root) 1798 return &dev->bus->dev_root->kobj; 1799 1800 if (parent) 1801 return &parent->kobj; 1802 return NULL; 1803 } 1804 1805 static inline bool live_in_glue_dir(struct kobject *kobj, 1806 struct device *dev) 1807 { 1808 if (!kobj || !dev->class || 1809 kobj->kset != &dev->class->p->glue_dirs) 1810 return false; 1811 return true; 1812 } 1813 1814 static inline struct kobject *get_glue_dir(struct device *dev) 1815 { 1816 return dev->kobj.parent; 1817 } 1818 1819 /* 1820 * make sure cleaning up dir as the last step, we need to make 1821 * sure .release handler of kobject is run with holding the 1822 * global lock 1823 */ 1824 static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) 1825 { 1826 unsigned int ref; 1827 1828 /* see if we live in a "glue" directory */ 1829 if (!live_in_glue_dir(glue_dir, dev)) 1830 return; 1831 1832 mutex_lock(&gdp_mutex); 1833 /** 1834 * There is a race condition between removing glue directory 1835 * and adding a new device under the glue directory. 1836 * 1837 * CPU1: CPU2: 1838 * 1839 * device_add() 1840 * get_device_parent() 1841 * class_dir_create_and_add() 1842 * kobject_add_internal() 1843 * create_dir() // create glue_dir 1844 * 1845 * device_add() 1846 * get_device_parent() 1847 * kobject_get() // get glue_dir 1848 * 1849 * device_del() 1850 * cleanup_glue_dir() 1851 * kobject_del(glue_dir) 1852 * 1853 * kobject_add() 1854 * kobject_add_internal() 1855 * create_dir() // in glue_dir 1856 * sysfs_create_dir_ns() 1857 * kernfs_create_dir_ns(sd) 1858 * 1859 * sysfs_remove_dir() // glue_dir->sd=NULL 1860 * sysfs_put() // free glue_dir->sd 1861 * 1862 * // sd is freed 1863 * kernfs_new_node(sd) 1864 * kernfs_get(glue_dir) 1865 * kernfs_add_one() 1866 * kernfs_put() 1867 * 1868 * Before CPU1 remove last child device under glue dir, if CPU2 add 1869 * a new device under glue dir, the glue_dir kobject reference count 1870 * will be increase to 2 in kobject_get(k). And CPU2 has been called 1871 * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir() 1872 * and sysfs_put(). This result in glue_dir->sd is freed. 1873 * 1874 * Then the CPU2 will see a stale "empty" but still potentially used 1875 * glue dir around in kernfs_new_node(). 1876 * 1877 * In order to avoid this happening, we also should make sure that 1878 * kernfs_node for glue_dir is released in CPU1 only when refcount 1879 * for glue_dir kobj is 1. 1880 */ 1881 ref = kref_read(&glue_dir->kref); 1882 if (!kobject_has_children(glue_dir) && !--ref) 1883 kobject_del(glue_dir); 1884 kobject_put(glue_dir); 1885 mutex_unlock(&gdp_mutex); 1886 } 1887 1888 static int device_add_class_symlinks(struct device *dev) 1889 { 1890 struct device_node *of_node = dev_of_node(dev); 1891 int error; 1892 1893 if (of_node) { 1894 error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node"); 1895 if (error) 1896 dev_warn(dev, "Error %d creating of_node link\n",error); 1897 /* An error here doesn't warrant bringing down the device */ 1898 } 1899 1900 if (!dev->class) 1901 return 0; 1902 1903 error = sysfs_create_link(&dev->kobj, 1904 &dev->class->p->subsys.kobj, 1905 "subsystem"); 1906 if (error) 1907 goto out_devnode; 1908 1909 if (dev->parent && device_is_not_partition(dev)) { 1910 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj, 1911 "device"); 1912 if (error) 1913 goto out_subsys; 1914 } 1915 1916 #ifdef CONFIG_BLOCK 1917 /* /sys/block has directories and does not need symlinks */ 1918 if (sysfs_deprecated && dev->class == &block_class) 1919 return 0; 1920 #endif 1921 1922 /* link in the class directory pointing to the device */ 1923 error = sysfs_create_link(&dev->class->p->subsys.kobj, 1924 &dev->kobj, dev_name(dev)); 1925 if (error) 1926 goto out_device; 1927 1928 return 0; 1929 1930 out_device: 1931 sysfs_remove_link(&dev->kobj, "device"); 1932 1933 out_subsys: 1934 sysfs_remove_link(&dev->kobj, "subsystem"); 1935 out_devnode: 1936 sysfs_remove_link(&dev->kobj, "of_node"); 1937 return error; 1938 } 1939 1940 static void device_remove_class_symlinks(struct device *dev) 1941 { 1942 if (dev_of_node(dev)) 1943 sysfs_remove_link(&dev->kobj, "of_node"); 1944 1945 if (!dev->class) 1946 return; 1947 1948 if (dev->parent && device_is_not_partition(dev)) 1949 sysfs_remove_link(&dev->kobj, "device"); 1950 sysfs_remove_link(&dev->kobj, "subsystem"); 1951 #ifdef CONFIG_BLOCK 1952 if (sysfs_deprecated && dev->class == &block_class) 1953 return; 1954 #endif 1955 sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev)); 1956 } 1957 1958 /** 1959 * dev_set_name - set a device name 1960 * @dev: device 1961 * @fmt: format string for the device's name 1962 */ 1963 int dev_set_name(struct device *dev, const char *fmt, ...) 1964 { 1965 va_list vargs; 1966 int err; 1967 1968 va_start(vargs, fmt); 1969 err = kobject_set_name_vargs(&dev->kobj, fmt, vargs); 1970 va_end(vargs); 1971 return err; 1972 } 1973 EXPORT_SYMBOL_GPL(dev_set_name); 1974 1975 /** 1976 * device_to_dev_kobj - select a /sys/dev/ directory for the device 1977 * @dev: device 1978 * 1979 * By default we select char/ for new entries. Setting class->dev_obj 1980 * to NULL prevents an entry from being created. class->dev_kobj must 1981 * be set (or cleared) before any devices are registered to the class 1982 * otherwise device_create_sys_dev_entry() and 1983 * device_remove_sys_dev_entry() will disagree about the presence of 1984 * the link. 1985 */ 1986 static struct kobject *device_to_dev_kobj(struct device *dev) 1987 { 1988 struct kobject *kobj; 1989 1990 if (dev->class) 1991 kobj = dev->class->dev_kobj; 1992 else 1993 kobj = sysfs_dev_char_kobj; 1994 1995 return kobj; 1996 } 1997 1998 static int device_create_sys_dev_entry(struct device *dev) 1999 { 2000 struct kobject *kobj = device_to_dev_kobj(dev); 2001 int error = 0; 2002 char devt_str[15]; 2003 2004 if (kobj) { 2005 format_dev_t(devt_str, dev->devt); 2006 error = sysfs_create_link(kobj, &dev->kobj, devt_str); 2007 } 2008 2009 return error; 2010 } 2011 2012 static void device_remove_sys_dev_entry(struct device *dev) 2013 { 2014 struct kobject *kobj = device_to_dev_kobj(dev); 2015 char devt_str[15]; 2016 2017 if (kobj) { 2018 format_dev_t(devt_str, dev->devt); 2019 sysfs_remove_link(kobj, devt_str); 2020 } 2021 } 2022 2023 static int device_private_init(struct device *dev) 2024 { 2025 dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL); 2026 if (!dev->p) 2027 return -ENOMEM; 2028 dev->p->device = dev; 2029 klist_init(&dev->p->klist_children, klist_children_get, 2030 klist_children_put); 2031 INIT_LIST_HEAD(&dev->p->deferred_probe); 2032 return 0; 2033 } 2034 2035 /** 2036 * device_add - add device to device hierarchy. 2037 * @dev: device. 2038 * 2039 * This is part 2 of device_register(), though may be called 2040 * separately _iff_ device_initialize() has been called separately. 2041 * 2042 * This adds @dev to the kobject hierarchy via kobject_add(), adds it 2043 * to the global and sibling lists for the device, then 2044 * adds it to the other relevant subsystems of the driver model. 2045 * 2046 * Do not call this routine or device_register() more than once for 2047 * any device structure. The driver model core is not designed to work 2048 * with devices that get unregistered and then spring back to life. 2049 * (Among other things, it's very hard to guarantee that all references 2050 * to the previous incarnation of @dev have been dropped.) Allocate 2051 * and register a fresh new struct device instead. 2052 * 2053 * NOTE: _Never_ directly free @dev after calling this function, even 2054 * if it returned an error! Always use put_device() to give up your 2055 * reference instead. 2056 * 2057 * Rule of thumb is: if device_add() succeeds, you should call 2058 * device_del() when you want to get rid of it. If device_add() has 2059 * *not* succeeded, use *only* put_device() to drop the reference 2060 * count. 2061 */ 2062 int device_add(struct device *dev) 2063 { 2064 struct device *parent; 2065 struct kobject *kobj; 2066 struct class_interface *class_intf; 2067 int error = -EINVAL; 2068 struct kobject *glue_dir = NULL; 2069 2070 dev = get_device(dev); 2071 if (!dev) 2072 goto done; 2073 2074 if (!dev->p) { 2075 error = device_private_init(dev); 2076 if (error) 2077 goto done; 2078 } 2079 2080 /* 2081 * for statically allocated devices, which should all be converted 2082 * some day, we need to initialize the name. We prevent reading back 2083 * the name, and force the use of dev_name() 2084 */ 2085 if (dev->init_name) { 2086 dev_set_name(dev, "%s", dev->init_name); 2087 dev->init_name = NULL; 2088 } 2089 2090 /* subsystems can specify simple device enumeration */ 2091 if (!dev_name(dev) && dev->bus && dev->bus->dev_name) 2092 dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id); 2093 2094 if (!dev_name(dev)) { 2095 error = -EINVAL; 2096 goto name_error; 2097 } 2098 2099 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 2100 2101 parent = get_device(dev->parent); 2102 kobj = get_device_parent(dev, parent); 2103 if (IS_ERR(kobj)) { 2104 error = PTR_ERR(kobj); 2105 goto parent_error; 2106 } 2107 if (kobj) 2108 dev->kobj.parent = kobj; 2109 2110 /* use parent numa_node */ 2111 if (parent && (dev_to_node(dev) == NUMA_NO_NODE)) 2112 set_dev_node(dev, dev_to_node(parent)); 2113 2114 /* first, register with generic layer. */ 2115 /* we require the name to be set before, and pass NULL */ 2116 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); 2117 if (error) { 2118 glue_dir = get_glue_dir(dev); 2119 goto Error; 2120 } 2121 2122 /* notify platform of device entry */ 2123 error = device_platform_notify(dev, KOBJ_ADD); 2124 if (error) 2125 goto platform_error; 2126 2127 error = device_create_file(dev, &dev_attr_uevent); 2128 if (error) 2129 goto attrError; 2130 2131 error = device_add_class_symlinks(dev); 2132 if (error) 2133 goto SymlinkError; 2134 error = device_add_attrs(dev); 2135 if (error) 2136 goto AttrsError; 2137 error = bus_add_device(dev); 2138 if (error) 2139 goto BusError; 2140 error = dpm_sysfs_add(dev); 2141 if (error) 2142 goto DPMError; 2143 device_pm_add(dev); 2144 2145 if (MAJOR(dev->devt)) { 2146 error = device_create_file(dev, &dev_attr_dev); 2147 if (error) 2148 goto DevAttrError; 2149 2150 error = device_create_sys_dev_entry(dev); 2151 if (error) 2152 goto SysEntryError; 2153 2154 devtmpfs_create_node(dev); 2155 } 2156 2157 /* Notify clients of device addition. This call must come 2158 * after dpm_sysfs_add() and before kobject_uevent(). 2159 */ 2160 if (dev->bus) 2161 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 2162 BUS_NOTIFY_ADD_DEVICE, dev); 2163 2164 kobject_uevent(&dev->kobj, KOBJ_ADD); 2165 bus_probe_device(dev); 2166 if (parent) 2167 klist_add_tail(&dev->p->knode_parent, 2168 &parent->p->klist_children); 2169 2170 if (dev->class) { 2171 mutex_lock(&dev->class->p->mutex); 2172 /* tie the class to the device */ 2173 klist_add_tail(&dev->p->knode_class, 2174 &dev->class->p->klist_devices); 2175 2176 /* notify any interfaces that the device is here */ 2177 list_for_each_entry(class_intf, 2178 &dev->class->p->interfaces, node) 2179 if (class_intf->add_dev) 2180 class_intf->add_dev(dev, class_intf); 2181 mutex_unlock(&dev->class->p->mutex); 2182 } 2183 done: 2184 put_device(dev); 2185 return error; 2186 SysEntryError: 2187 if (MAJOR(dev->devt)) 2188 device_remove_file(dev, &dev_attr_dev); 2189 DevAttrError: 2190 device_pm_remove(dev); 2191 dpm_sysfs_remove(dev); 2192 DPMError: 2193 bus_remove_device(dev); 2194 BusError: 2195 device_remove_attrs(dev); 2196 AttrsError: 2197 device_remove_class_symlinks(dev); 2198 SymlinkError: 2199 device_remove_file(dev, &dev_attr_uevent); 2200 attrError: 2201 device_platform_notify(dev, KOBJ_REMOVE); 2202 platform_error: 2203 kobject_uevent(&dev->kobj, KOBJ_REMOVE); 2204 glue_dir = get_glue_dir(dev); 2205 kobject_del(&dev->kobj); 2206 Error: 2207 cleanup_glue_dir(dev, glue_dir); 2208 parent_error: 2209 put_device(parent); 2210 name_error: 2211 kfree(dev->p); 2212 dev->p = NULL; 2213 goto done; 2214 } 2215 EXPORT_SYMBOL_GPL(device_add); 2216 2217 /** 2218 * device_register - register a device with the system. 2219 * @dev: pointer to the device structure 2220 * 2221 * This happens in two clean steps - initialize the device 2222 * and add it to the system. The two steps can be called 2223 * separately, but this is the easiest and most common. 2224 * I.e. you should only call the two helpers separately if 2225 * have a clearly defined need to use and refcount the device 2226 * before it is added to the hierarchy. 2227 * 2228 * For more information, see the kerneldoc for device_initialize() 2229 * and device_add(). 2230 * 2231 * NOTE: _Never_ directly free @dev after calling this function, even 2232 * if it returned an error! Always use put_device() to give up the 2233 * reference initialized in this function instead. 2234 */ 2235 int device_register(struct device *dev) 2236 { 2237 device_initialize(dev); 2238 return device_add(dev); 2239 } 2240 EXPORT_SYMBOL_GPL(device_register); 2241 2242 /** 2243 * get_device - increment reference count for device. 2244 * @dev: device. 2245 * 2246 * This simply forwards the call to kobject_get(), though 2247 * we do take care to provide for the case that we get a NULL 2248 * pointer passed in. 2249 */ 2250 struct device *get_device(struct device *dev) 2251 { 2252 return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL; 2253 } 2254 EXPORT_SYMBOL_GPL(get_device); 2255 2256 /** 2257 * put_device - decrement reference count. 2258 * @dev: device in question. 2259 */ 2260 void put_device(struct device *dev) 2261 { 2262 /* might_sleep(); */ 2263 if (dev) 2264 kobject_put(&dev->kobj); 2265 } 2266 EXPORT_SYMBOL_GPL(put_device); 2267 2268 bool kill_device(struct device *dev) 2269 { 2270 /* 2271 * Require the device lock and set the "dead" flag to guarantee that 2272 * the update behavior is consistent with the other bitfields near 2273 * it and that we cannot have an asynchronous probe routine trying 2274 * to run while we are tearing out the bus/class/sysfs from 2275 * underneath the device. 2276 */ 2277 lockdep_assert_held(&dev->mutex); 2278 2279 if (dev->p->dead) 2280 return false; 2281 dev->p->dead = true; 2282 return true; 2283 } 2284 EXPORT_SYMBOL_GPL(kill_device); 2285 2286 /** 2287 * device_del - delete device from system. 2288 * @dev: device. 2289 * 2290 * This is the first part of the device unregistration 2291 * sequence. This removes the device from the lists we control 2292 * from here, has it removed from the other driver model 2293 * subsystems it was added to in device_add(), and removes it 2294 * from the kobject hierarchy. 2295 * 2296 * NOTE: this should be called manually _iff_ device_add() was 2297 * also called manually. 2298 */ 2299 void device_del(struct device *dev) 2300 { 2301 struct device *parent = dev->parent; 2302 struct kobject *glue_dir = NULL; 2303 struct class_interface *class_intf; 2304 2305 device_lock(dev); 2306 kill_device(dev); 2307 device_unlock(dev); 2308 2309 /* Notify clients of device removal. This call must come 2310 * before dpm_sysfs_remove(). 2311 */ 2312 if (dev->bus) 2313 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 2314 BUS_NOTIFY_DEL_DEVICE, dev); 2315 2316 dpm_sysfs_remove(dev); 2317 if (parent) 2318 klist_del(&dev->p->knode_parent); 2319 if (MAJOR(dev->devt)) { 2320 devtmpfs_delete_node(dev); 2321 device_remove_sys_dev_entry(dev); 2322 device_remove_file(dev, &dev_attr_dev); 2323 } 2324 if (dev->class) { 2325 device_remove_class_symlinks(dev); 2326 2327 mutex_lock(&dev->class->p->mutex); 2328 /* notify any interfaces that the device is now gone */ 2329 list_for_each_entry(class_intf, 2330 &dev->class->p->interfaces, node) 2331 if (class_intf->remove_dev) 2332 class_intf->remove_dev(dev, class_intf); 2333 /* remove the device from the class list */ 2334 klist_del(&dev->p->knode_class); 2335 mutex_unlock(&dev->class->p->mutex); 2336 } 2337 device_remove_file(dev, &dev_attr_uevent); 2338 device_remove_attrs(dev); 2339 bus_remove_device(dev); 2340 device_pm_remove(dev); 2341 driver_deferred_probe_del(dev); 2342 device_platform_notify(dev, KOBJ_REMOVE); 2343 device_remove_properties(dev); 2344 device_links_purge(dev); 2345 2346 if (dev->bus) 2347 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 2348 BUS_NOTIFY_REMOVED_DEVICE, dev); 2349 kobject_uevent(&dev->kobj, KOBJ_REMOVE); 2350 glue_dir = get_glue_dir(dev); 2351 kobject_del(&dev->kobj); 2352 cleanup_glue_dir(dev, glue_dir); 2353 put_device(parent); 2354 } 2355 EXPORT_SYMBOL_GPL(device_del); 2356 2357 /** 2358 * device_unregister - unregister device from system. 2359 * @dev: device going away. 2360 * 2361 * We do this in two parts, like we do device_register(). First, 2362 * we remove it from all the subsystems with device_del(), then 2363 * we decrement the reference count via put_device(). If that 2364 * is the final reference count, the device will be cleaned up 2365 * via device_release() above. Otherwise, the structure will 2366 * stick around until the final reference to the device is dropped. 2367 */ 2368 void device_unregister(struct device *dev) 2369 { 2370 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 2371 device_del(dev); 2372 put_device(dev); 2373 } 2374 EXPORT_SYMBOL_GPL(device_unregister); 2375 2376 static struct device *prev_device(struct klist_iter *i) 2377 { 2378 struct klist_node *n = klist_prev(i); 2379 struct device *dev = NULL; 2380 struct device_private *p; 2381 2382 if (n) { 2383 p = to_device_private_parent(n); 2384 dev = p->device; 2385 } 2386 return dev; 2387 } 2388 2389 static struct device *next_device(struct klist_iter *i) 2390 { 2391 struct klist_node *n = klist_next(i); 2392 struct device *dev = NULL; 2393 struct device_private *p; 2394 2395 if (n) { 2396 p = to_device_private_parent(n); 2397 dev = p->device; 2398 } 2399 return dev; 2400 } 2401 2402 /** 2403 * device_get_devnode - path of device node file 2404 * @dev: device 2405 * @mode: returned file access mode 2406 * @uid: returned file owner 2407 * @gid: returned file group 2408 * @tmp: possibly allocated string 2409 * 2410 * Return the relative path of a possible device node. 2411 * Non-default names may need to allocate a memory to compose 2412 * a name. This memory is returned in tmp and needs to be 2413 * freed by the caller. 2414 */ 2415 const char *device_get_devnode(struct device *dev, 2416 umode_t *mode, kuid_t *uid, kgid_t *gid, 2417 const char **tmp) 2418 { 2419 char *s; 2420 2421 *tmp = NULL; 2422 2423 /* the device type may provide a specific name */ 2424 if (dev->type && dev->type->devnode) 2425 *tmp = dev->type->devnode(dev, mode, uid, gid); 2426 if (*tmp) 2427 return *tmp; 2428 2429 /* the class may provide a specific name */ 2430 if (dev->class && dev->class->devnode) 2431 *tmp = dev->class->devnode(dev, mode); 2432 if (*tmp) 2433 return *tmp; 2434 2435 /* return name without allocation, tmp == NULL */ 2436 if (strchr(dev_name(dev), '!') == NULL) 2437 return dev_name(dev); 2438 2439 /* replace '!' in the name with '/' */ 2440 s = kstrdup(dev_name(dev), GFP_KERNEL); 2441 if (!s) 2442 return NULL; 2443 strreplace(s, '!', '/'); 2444 return *tmp = s; 2445 } 2446 2447 /** 2448 * device_for_each_child - device child iterator. 2449 * @parent: parent struct device. 2450 * @fn: function to be called for each device. 2451 * @data: data for the callback. 2452 * 2453 * Iterate over @parent's child devices, and call @fn for each, 2454 * passing it @data. 2455 * 2456 * We check the return of @fn each time. If it returns anything 2457 * other than 0, we break out and return that value. 2458 */ 2459 int device_for_each_child(struct device *parent, void *data, 2460 int (*fn)(struct device *dev, void *data)) 2461 { 2462 struct klist_iter i; 2463 struct device *child; 2464 int error = 0; 2465 2466 if (!parent->p) 2467 return 0; 2468 2469 klist_iter_init(&parent->p->klist_children, &i); 2470 while (!error && (child = next_device(&i))) 2471 error = fn(child, data); 2472 klist_iter_exit(&i); 2473 return error; 2474 } 2475 EXPORT_SYMBOL_GPL(device_for_each_child); 2476 2477 /** 2478 * device_for_each_child_reverse - device child iterator in reversed order. 2479 * @parent: parent struct device. 2480 * @fn: function to be called for each device. 2481 * @data: data for the callback. 2482 * 2483 * Iterate over @parent's child devices, and call @fn for each, 2484 * passing it @data. 2485 * 2486 * We check the return of @fn each time. If it returns anything 2487 * other than 0, we break out and return that value. 2488 */ 2489 int device_for_each_child_reverse(struct device *parent, void *data, 2490 int (*fn)(struct device *dev, void *data)) 2491 { 2492 struct klist_iter i; 2493 struct device *child; 2494 int error = 0; 2495 2496 if (!parent->p) 2497 return 0; 2498 2499 klist_iter_init(&parent->p->klist_children, &i); 2500 while ((child = prev_device(&i)) && !error) 2501 error = fn(child, data); 2502 klist_iter_exit(&i); 2503 return error; 2504 } 2505 EXPORT_SYMBOL_GPL(device_for_each_child_reverse); 2506 2507 /** 2508 * device_find_child - device iterator for locating a particular device. 2509 * @parent: parent struct device 2510 * @match: Callback function to check device 2511 * @data: Data to pass to match function 2512 * 2513 * This is similar to the device_for_each_child() function above, but it 2514 * returns a reference to a device that is 'found' for later use, as 2515 * determined by the @match callback. 2516 * 2517 * The callback should return 0 if the device doesn't match and non-zero 2518 * if it does. If the callback returns non-zero and a reference to the 2519 * current device can be obtained, this function will return to the caller 2520 * and not iterate over any more devices. 2521 * 2522 * NOTE: you will need to drop the reference with put_device() after use. 2523 */ 2524 struct device *device_find_child(struct device *parent, void *data, 2525 int (*match)(struct device *dev, void *data)) 2526 { 2527 struct klist_iter i; 2528 struct device *child; 2529 2530 if (!parent) 2531 return NULL; 2532 2533 klist_iter_init(&parent->p->klist_children, &i); 2534 while ((child = next_device(&i))) 2535 if (match(child, data) && get_device(child)) 2536 break; 2537 klist_iter_exit(&i); 2538 return child; 2539 } 2540 EXPORT_SYMBOL_GPL(device_find_child); 2541 2542 /** 2543 * device_find_child_by_name - device iterator for locating a child device. 2544 * @parent: parent struct device 2545 * @name: name of the child device 2546 * 2547 * This is similar to the device_find_child() function above, but it 2548 * returns a reference to a device that has the name @name. 2549 * 2550 * NOTE: you will need to drop the reference with put_device() after use. 2551 */ 2552 struct device *device_find_child_by_name(struct device *parent, 2553 const char *name) 2554 { 2555 struct klist_iter i; 2556 struct device *child; 2557 2558 if (!parent) 2559 return NULL; 2560 2561 klist_iter_init(&parent->p->klist_children, &i); 2562 while ((child = next_device(&i))) 2563 if (!strcmp(dev_name(child), name) && get_device(child)) 2564 break; 2565 klist_iter_exit(&i); 2566 return child; 2567 } 2568 EXPORT_SYMBOL_GPL(device_find_child_by_name); 2569 2570 int __init devices_init(void) 2571 { 2572 devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL); 2573 if (!devices_kset) 2574 return -ENOMEM; 2575 dev_kobj = kobject_create_and_add("dev", NULL); 2576 if (!dev_kobj) 2577 goto dev_kobj_err; 2578 sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj); 2579 if (!sysfs_dev_block_kobj) 2580 goto block_kobj_err; 2581 sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj); 2582 if (!sysfs_dev_char_kobj) 2583 goto char_kobj_err; 2584 2585 return 0; 2586 2587 char_kobj_err: 2588 kobject_put(sysfs_dev_block_kobj); 2589 block_kobj_err: 2590 kobject_put(dev_kobj); 2591 dev_kobj_err: 2592 kset_unregister(devices_kset); 2593 return -ENOMEM; 2594 } 2595 2596 static int device_check_offline(struct device *dev, void *not_used) 2597 { 2598 int ret; 2599 2600 ret = device_for_each_child(dev, NULL, device_check_offline); 2601 if (ret) 2602 return ret; 2603 2604 return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0; 2605 } 2606 2607 /** 2608 * device_offline - Prepare the device for hot-removal. 2609 * @dev: Device to be put offline. 2610 * 2611 * Execute the device bus type's .offline() callback, if present, to prepare 2612 * the device for a subsequent hot-removal. If that succeeds, the device must 2613 * not be used until either it is removed or its bus type's .online() callback 2614 * is executed. 2615 * 2616 * Call under device_hotplug_lock. 2617 */ 2618 int device_offline(struct device *dev) 2619 { 2620 int ret; 2621 2622 if (dev->offline_disabled) 2623 return -EPERM; 2624 2625 ret = device_for_each_child(dev, NULL, device_check_offline); 2626 if (ret) 2627 return ret; 2628 2629 device_lock(dev); 2630 if (device_supports_offline(dev)) { 2631 if (dev->offline) { 2632 ret = 1; 2633 } else { 2634 ret = dev->bus->offline(dev); 2635 if (!ret) { 2636 kobject_uevent(&dev->kobj, KOBJ_OFFLINE); 2637 dev->offline = true; 2638 } 2639 } 2640 } 2641 device_unlock(dev); 2642 2643 return ret; 2644 } 2645 2646 /** 2647 * device_online - Put the device back online after successful device_offline(). 2648 * @dev: Device to be put back online. 2649 * 2650 * If device_offline() has been successfully executed for @dev, but the device 2651 * has not been removed subsequently, execute its bus type's .online() callback 2652 * to indicate that the device can be used again. 2653 * 2654 * Call under device_hotplug_lock. 2655 */ 2656 int device_online(struct device *dev) 2657 { 2658 int ret = 0; 2659 2660 device_lock(dev); 2661 if (device_supports_offline(dev)) { 2662 if (dev->offline) { 2663 ret = dev->bus->online(dev); 2664 if (!ret) { 2665 kobject_uevent(&dev->kobj, KOBJ_ONLINE); 2666 dev->offline = false; 2667 } 2668 } else { 2669 ret = 1; 2670 } 2671 } 2672 device_unlock(dev); 2673 2674 return ret; 2675 } 2676 2677 struct root_device { 2678 struct device dev; 2679 struct module *owner; 2680 }; 2681 2682 static inline struct root_device *to_root_device(struct device *d) 2683 { 2684 return container_of(d, struct root_device, dev); 2685 } 2686 2687 static void root_device_release(struct device *dev) 2688 { 2689 kfree(to_root_device(dev)); 2690 } 2691 2692 /** 2693 * __root_device_register - allocate and register a root device 2694 * @name: root device name 2695 * @owner: owner module of the root device, usually THIS_MODULE 2696 * 2697 * This function allocates a root device and registers it 2698 * using device_register(). In order to free the returned 2699 * device, use root_device_unregister(). 2700 * 2701 * Root devices are dummy devices which allow other devices 2702 * to be grouped under /sys/devices. Use this function to 2703 * allocate a root device and then use it as the parent of 2704 * any device which should appear under /sys/devices/{name} 2705 * 2706 * The /sys/devices/{name} directory will also contain a 2707 * 'module' symlink which points to the @owner directory 2708 * in sysfs. 2709 * 2710 * Returns &struct device pointer on success, or ERR_PTR() on error. 2711 * 2712 * Note: You probably want to use root_device_register(). 2713 */ 2714 struct device *__root_device_register(const char *name, struct module *owner) 2715 { 2716 struct root_device *root; 2717 int err = -ENOMEM; 2718 2719 root = kzalloc(sizeof(struct root_device), GFP_KERNEL); 2720 if (!root) 2721 return ERR_PTR(err); 2722 2723 err = dev_set_name(&root->dev, "%s", name); 2724 if (err) { 2725 kfree(root); 2726 return ERR_PTR(err); 2727 } 2728 2729 root->dev.release = root_device_release; 2730 2731 err = device_register(&root->dev); 2732 if (err) { 2733 put_device(&root->dev); 2734 return ERR_PTR(err); 2735 } 2736 2737 #ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */ 2738 if (owner) { 2739 struct module_kobject *mk = &owner->mkobj; 2740 2741 err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module"); 2742 if (err) { 2743 device_unregister(&root->dev); 2744 return ERR_PTR(err); 2745 } 2746 root->owner = owner; 2747 } 2748 #endif 2749 2750 return &root->dev; 2751 } 2752 EXPORT_SYMBOL_GPL(__root_device_register); 2753 2754 /** 2755 * root_device_unregister - unregister and free a root device 2756 * @dev: device going away 2757 * 2758 * This function unregisters and cleans up a device that was created by 2759 * root_device_register(). 2760 */ 2761 void root_device_unregister(struct device *dev) 2762 { 2763 struct root_device *root = to_root_device(dev); 2764 2765 if (root->owner) 2766 sysfs_remove_link(&root->dev.kobj, "module"); 2767 2768 device_unregister(dev); 2769 } 2770 EXPORT_SYMBOL_GPL(root_device_unregister); 2771 2772 2773 static void device_create_release(struct device *dev) 2774 { 2775 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 2776 kfree(dev); 2777 } 2778 2779 static __printf(6, 0) struct device * 2780 device_create_groups_vargs(struct class *class, struct device *parent, 2781 dev_t devt, void *drvdata, 2782 const struct attribute_group **groups, 2783 const char *fmt, va_list args) 2784 { 2785 struct device *dev = NULL; 2786 int retval = -ENODEV; 2787 2788 if (class == NULL || IS_ERR(class)) 2789 goto error; 2790 2791 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2792 if (!dev) { 2793 retval = -ENOMEM; 2794 goto error; 2795 } 2796 2797 device_initialize(dev); 2798 dev->devt = devt; 2799 dev->class = class; 2800 dev->parent = parent; 2801 dev->groups = groups; 2802 dev->release = device_create_release; 2803 dev_set_drvdata(dev, drvdata); 2804 2805 retval = kobject_set_name_vargs(&dev->kobj, fmt, args); 2806 if (retval) 2807 goto error; 2808 2809 retval = device_add(dev); 2810 if (retval) 2811 goto error; 2812 2813 return dev; 2814 2815 error: 2816 put_device(dev); 2817 return ERR_PTR(retval); 2818 } 2819 2820 /** 2821 * device_create_vargs - creates a device and registers it with sysfs 2822 * @class: pointer to the struct class that this device should be registered to 2823 * @parent: pointer to the parent struct device of this new device, if any 2824 * @devt: the dev_t for the char device to be added 2825 * @drvdata: the data to be added to the device for callbacks 2826 * @fmt: string for the device's name 2827 * @args: va_list for the device's name 2828 * 2829 * This function can be used by char device classes. A struct device 2830 * will be created in sysfs, registered to the specified class. 2831 * 2832 * A "dev" file will be created, showing the dev_t for the device, if 2833 * the dev_t is not 0,0. 2834 * If a pointer to a parent struct device is passed in, the newly created 2835 * struct device will be a child of that device in sysfs. 2836 * The pointer to the struct device will be returned from the call. 2837 * Any further sysfs files that might be required can be created using this 2838 * pointer. 2839 * 2840 * Returns &struct device pointer on success, or ERR_PTR() on error. 2841 * 2842 * Note: the struct class passed to this function must have previously 2843 * been created with a call to class_create(). 2844 */ 2845 struct device *device_create_vargs(struct class *class, struct device *parent, 2846 dev_t devt, void *drvdata, const char *fmt, 2847 va_list args) 2848 { 2849 return device_create_groups_vargs(class, parent, devt, drvdata, NULL, 2850 fmt, args); 2851 } 2852 EXPORT_SYMBOL_GPL(device_create_vargs); 2853 2854 /** 2855 * device_create - creates a device and registers it with sysfs 2856 * @class: pointer to the struct class that this device should be registered to 2857 * @parent: pointer to the parent struct device of this new device, if any 2858 * @devt: the dev_t for the char device to be added 2859 * @drvdata: the data to be added to the device for callbacks 2860 * @fmt: string for the device's name 2861 * 2862 * This function can be used by char device classes. A struct device 2863 * will be created in sysfs, registered to the specified class. 2864 * 2865 * A "dev" file will be created, showing the dev_t for the device, if 2866 * the dev_t is not 0,0. 2867 * If a pointer to a parent struct device is passed in, the newly created 2868 * struct device will be a child of that device in sysfs. 2869 * The pointer to the struct device will be returned from the call. 2870 * Any further sysfs files that might be required can be created using this 2871 * pointer. 2872 * 2873 * Returns &struct device pointer on success, or ERR_PTR() on error. 2874 * 2875 * Note: the struct class passed to this function must have previously 2876 * been created with a call to class_create(). 2877 */ 2878 struct device *device_create(struct class *class, struct device *parent, 2879 dev_t devt, void *drvdata, const char *fmt, ...) 2880 { 2881 va_list vargs; 2882 struct device *dev; 2883 2884 va_start(vargs, fmt); 2885 dev = device_create_vargs(class, parent, devt, drvdata, fmt, vargs); 2886 va_end(vargs); 2887 return dev; 2888 } 2889 EXPORT_SYMBOL_GPL(device_create); 2890 2891 /** 2892 * device_create_with_groups - creates a device and registers it with sysfs 2893 * @class: pointer to the struct class that this device should be registered to 2894 * @parent: pointer to the parent struct device of this new device, if any 2895 * @devt: the dev_t for the char device to be added 2896 * @drvdata: the data to be added to the device for callbacks 2897 * @groups: NULL-terminated list of attribute groups to be created 2898 * @fmt: string for the device's name 2899 * 2900 * This function can be used by char device classes. A struct device 2901 * will be created in sysfs, registered to the specified class. 2902 * Additional attributes specified in the groups parameter will also 2903 * be created automatically. 2904 * 2905 * A "dev" file will be created, showing the dev_t for the device, if 2906 * the dev_t is not 0,0. 2907 * If a pointer to a parent struct device is passed in, the newly created 2908 * struct device will be a child of that device in sysfs. 2909 * The pointer to the struct device will be returned from the call. 2910 * Any further sysfs files that might be required can be created using this 2911 * pointer. 2912 * 2913 * Returns &struct device pointer on success, or ERR_PTR() on error. 2914 * 2915 * Note: the struct class passed to this function must have previously 2916 * been created with a call to class_create(). 2917 */ 2918 struct device *device_create_with_groups(struct class *class, 2919 struct device *parent, dev_t devt, 2920 void *drvdata, 2921 const struct attribute_group **groups, 2922 const char *fmt, ...) 2923 { 2924 va_list vargs; 2925 struct device *dev; 2926 2927 va_start(vargs, fmt); 2928 dev = device_create_groups_vargs(class, parent, devt, drvdata, groups, 2929 fmt, vargs); 2930 va_end(vargs); 2931 return dev; 2932 } 2933 EXPORT_SYMBOL_GPL(device_create_with_groups); 2934 2935 static int __match_devt(struct device *dev, const void *data) 2936 { 2937 const dev_t *devt = data; 2938 2939 return dev->devt == *devt; 2940 } 2941 2942 /** 2943 * device_destroy - removes a device that was created with device_create() 2944 * @class: pointer to the struct class that this device was registered with 2945 * @devt: the dev_t of the device that was previously registered 2946 * 2947 * This call unregisters and cleans up a device that was created with a 2948 * call to device_create(). 2949 */ 2950 void device_destroy(struct class *class, dev_t devt) 2951 { 2952 struct device *dev; 2953 2954 dev = class_find_device(class, NULL, &devt, __match_devt); 2955 if (dev) { 2956 put_device(dev); 2957 device_unregister(dev); 2958 } 2959 } 2960 EXPORT_SYMBOL_GPL(device_destroy); 2961 2962 /** 2963 * device_rename - renames a device 2964 * @dev: the pointer to the struct device to be renamed 2965 * @new_name: the new name of the device 2966 * 2967 * It is the responsibility of the caller to provide mutual 2968 * exclusion between two different calls of device_rename 2969 * on the same device to ensure that new_name is valid and 2970 * won't conflict with other devices. 2971 * 2972 * Note: Don't call this function. Currently, the networking layer calls this 2973 * function, but that will change. The following text from Kay Sievers offers 2974 * some insight: 2975 * 2976 * Renaming devices is racy at many levels, symlinks and other stuff are not 2977 * replaced atomically, and you get a "move" uevent, but it's not easy to 2978 * connect the event to the old and new device. Device nodes are not renamed at 2979 * all, there isn't even support for that in the kernel now. 2980 * 2981 * In the meantime, during renaming, your target name might be taken by another 2982 * driver, creating conflicts. Or the old name is taken directly after you 2983 * renamed it -- then you get events for the same DEVPATH, before you even see 2984 * the "move" event. It's just a mess, and nothing new should ever rely on 2985 * kernel device renaming. Besides that, it's not even implemented now for 2986 * other things than (driver-core wise very simple) network devices. 2987 * 2988 * We are currently about to change network renaming in udev to completely 2989 * disallow renaming of devices in the same namespace as the kernel uses, 2990 * because we can't solve the problems properly, that arise with swapping names 2991 * of multiple interfaces without races. Means, renaming of eth[0-9]* will only 2992 * be allowed to some other name than eth[0-9]*, for the aforementioned 2993 * reasons. 2994 * 2995 * Make up a "real" name in the driver before you register anything, or add 2996 * some other attributes for userspace to find the device, or use udev to add 2997 * symlinks -- but never rename kernel devices later, it's a complete mess. We 2998 * don't even want to get into that and try to implement the missing pieces in 2999 * the core. We really have other pieces to fix in the driver core mess. :) 3000 */ 3001 int device_rename(struct device *dev, const char *new_name) 3002 { 3003 struct kobject *kobj = &dev->kobj; 3004 char *old_device_name = NULL; 3005 int error; 3006 3007 dev = get_device(dev); 3008 if (!dev) 3009 return -EINVAL; 3010 3011 dev_dbg(dev, "renaming to %s\n", new_name); 3012 3013 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL); 3014 if (!old_device_name) { 3015 error = -ENOMEM; 3016 goto out; 3017 } 3018 3019 if (dev->class) { 3020 error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj, 3021 kobj, old_device_name, 3022 new_name, kobject_namespace(kobj)); 3023 if (error) 3024 goto out; 3025 } 3026 3027 error = kobject_rename(kobj, new_name); 3028 if (error) 3029 goto out; 3030 3031 out: 3032 put_device(dev); 3033 3034 kfree(old_device_name); 3035 3036 return error; 3037 } 3038 EXPORT_SYMBOL_GPL(device_rename); 3039 3040 static int device_move_class_links(struct device *dev, 3041 struct device *old_parent, 3042 struct device *new_parent) 3043 { 3044 int error = 0; 3045 3046 if (old_parent) 3047 sysfs_remove_link(&dev->kobj, "device"); 3048 if (new_parent) 3049 error = sysfs_create_link(&dev->kobj, &new_parent->kobj, 3050 "device"); 3051 return error; 3052 } 3053 3054 /** 3055 * device_move - moves a device to a new parent 3056 * @dev: the pointer to the struct device to be moved 3057 * @new_parent: the new parent of the device (can be NULL) 3058 * @dpm_order: how to reorder the dpm_list 3059 */ 3060 int device_move(struct device *dev, struct device *new_parent, 3061 enum dpm_order dpm_order) 3062 { 3063 int error; 3064 struct device *old_parent; 3065 struct kobject *new_parent_kobj; 3066 3067 dev = get_device(dev); 3068 if (!dev) 3069 return -EINVAL; 3070 3071 device_pm_lock(); 3072 new_parent = get_device(new_parent); 3073 new_parent_kobj = get_device_parent(dev, new_parent); 3074 if (IS_ERR(new_parent_kobj)) { 3075 error = PTR_ERR(new_parent_kobj); 3076 put_device(new_parent); 3077 goto out; 3078 } 3079 3080 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev), 3081 __func__, new_parent ? dev_name(new_parent) : "<NULL>"); 3082 error = kobject_move(&dev->kobj, new_parent_kobj); 3083 if (error) { 3084 cleanup_glue_dir(dev, new_parent_kobj); 3085 put_device(new_parent); 3086 goto out; 3087 } 3088 old_parent = dev->parent; 3089 dev->parent = new_parent; 3090 if (old_parent) 3091 klist_remove(&dev->p->knode_parent); 3092 if (new_parent) { 3093 klist_add_tail(&dev->p->knode_parent, 3094 &new_parent->p->klist_children); 3095 set_dev_node(dev, dev_to_node(new_parent)); 3096 } 3097 3098 if (dev->class) { 3099 error = device_move_class_links(dev, old_parent, new_parent); 3100 if (error) { 3101 /* We ignore errors on cleanup since we're hosed anyway... */ 3102 device_move_class_links(dev, new_parent, old_parent); 3103 if (!kobject_move(&dev->kobj, &old_parent->kobj)) { 3104 if (new_parent) 3105 klist_remove(&dev->p->knode_parent); 3106 dev->parent = old_parent; 3107 if (old_parent) { 3108 klist_add_tail(&dev->p->knode_parent, 3109 &old_parent->p->klist_children); 3110 set_dev_node(dev, dev_to_node(old_parent)); 3111 } 3112 } 3113 cleanup_glue_dir(dev, new_parent_kobj); 3114 put_device(new_parent); 3115 goto out; 3116 } 3117 } 3118 switch (dpm_order) { 3119 case DPM_ORDER_NONE: 3120 break; 3121 case DPM_ORDER_DEV_AFTER_PARENT: 3122 device_pm_move_after(dev, new_parent); 3123 devices_kset_move_after(dev, new_parent); 3124 break; 3125 case DPM_ORDER_PARENT_BEFORE_DEV: 3126 device_pm_move_before(new_parent, dev); 3127 devices_kset_move_before(new_parent, dev); 3128 break; 3129 case DPM_ORDER_DEV_LAST: 3130 device_pm_move_last(dev); 3131 devices_kset_move_last(dev); 3132 break; 3133 } 3134 3135 put_device(old_parent); 3136 out: 3137 device_pm_unlock(); 3138 put_device(dev); 3139 return error; 3140 } 3141 EXPORT_SYMBOL_GPL(device_move); 3142 3143 /** 3144 * device_shutdown - call ->shutdown() on each device to shutdown. 3145 */ 3146 void device_shutdown(void) 3147 { 3148 struct device *dev, *parent; 3149 3150 wait_for_device_probe(); 3151 device_block_probing(); 3152 3153 spin_lock(&devices_kset->list_lock); 3154 /* 3155 * Walk the devices list backward, shutting down each in turn. 3156 * Beware that device unplug events may also start pulling 3157 * devices offline, even as the system is shutting down. 3158 */ 3159 while (!list_empty(&devices_kset->list)) { 3160 dev = list_entry(devices_kset->list.prev, struct device, 3161 kobj.entry); 3162 3163 /* 3164 * hold reference count of device's parent to 3165 * prevent it from being freed because parent's 3166 * lock is to be held 3167 */ 3168 parent = get_device(dev->parent); 3169 get_device(dev); 3170 /* 3171 * Make sure the device is off the kset list, in the 3172 * event that dev->*->shutdown() doesn't remove it. 3173 */ 3174 list_del_init(&dev->kobj.entry); 3175 spin_unlock(&devices_kset->list_lock); 3176 3177 /* hold lock to avoid race with probe/release */ 3178 if (parent) 3179 device_lock(parent); 3180 device_lock(dev); 3181 3182 /* Don't allow any more runtime suspends */ 3183 pm_runtime_get_noresume(dev); 3184 pm_runtime_barrier(dev); 3185 3186 if (dev->class && dev->class->shutdown_pre) { 3187 if (initcall_debug) 3188 dev_info(dev, "shutdown_pre\n"); 3189 dev->class->shutdown_pre(dev); 3190 } 3191 if (dev->bus && dev->bus->shutdown) { 3192 if (initcall_debug) 3193 dev_info(dev, "shutdown\n"); 3194 dev->bus->shutdown(dev); 3195 } else if (dev->driver && dev->driver->shutdown) { 3196 if (initcall_debug) 3197 dev_info(dev, "shutdown\n"); 3198 dev->driver->shutdown(dev); 3199 } 3200 3201 device_unlock(dev); 3202 if (parent) 3203 device_unlock(parent); 3204 3205 put_device(dev); 3206 put_device(parent); 3207 3208 spin_lock(&devices_kset->list_lock); 3209 } 3210 spin_unlock(&devices_kset->list_lock); 3211 } 3212 3213 /* 3214 * Device logging functions 3215 */ 3216 3217 #ifdef CONFIG_PRINTK 3218 static int 3219 create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen) 3220 { 3221 const char *subsys; 3222 size_t pos = 0; 3223 3224 if (dev->class) 3225 subsys = dev->class->name; 3226 else if (dev->bus) 3227 subsys = dev->bus->name; 3228 else 3229 return 0; 3230 3231 pos += snprintf(hdr + pos, hdrlen - pos, "SUBSYSTEM=%s", subsys); 3232 if (pos >= hdrlen) 3233 goto overflow; 3234 3235 /* 3236 * Add device identifier DEVICE=: 3237 * b12:8 block dev_t 3238 * c127:3 char dev_t 3239 * n8 netdev ifindex 3240 * +sound:card0 subsystem:devname 3241 */ 3242 if (MAJOR(dev->devt)) { 3243 char c; 3244 3245 if (strcmp(subsys, "block") == 0) 3246 c = 'b'; 3247 else 3248 c = 'c'; 3249 pos++; 3250 pos += snprintf(hdr + pos, hdrlen - pos, 3251 "DEVICE=%c%u:%u", 3252 c, MAJOR(dev->devt), MINOR(dev->devt)); 3253 } else if (strcmp(subsys, "net") == 0) { 3254 struct net_device *net = to_net_dev(dev); 3255 3256 pos++; 3257 pos += snprintf(hdr + pos, hdrlen - pos, 3258 "DEVICE=n%u", net->ifindex); 3259 } else { 3260 pos++; 3261 pos += snprintf(hdr + pos, hdrlen - pos, 3262 "DEVICE=+%s:%s", subsys, dev_name(dev)); 3263 } 3264 3265 if (pos >= hdrlen) 3266 goto overflow; 3267 3268 return pos; 3269 3270 overflow: 3271 dev_WARN(dev, "device/subsystem name too long"); 3272 return 0; 3273 } 3274 3275 int dev_vprintk_emit(int level, const struct device *dev, 3276 const char *fmt, va_list args) 3277 { 3278 char hdr[128]; 3279 size_t hdrlen; 3280 3281 hdrlen = create_syslog_header(dev, hdr, sizeof(hdr)); 3282 3283 return vprintk_emit(0, level, hdrlen ? hdr : NULL, hdrlen, fmt, args); 3284 } 3285 EXPORT_SYMBOL(dev_vprintk_emit); 3286 3287 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) 3288 { 3289 va_list args; 3290 int r; 3291 3292 va_start(args, fmt); 3293 3294 r = dev_vprintk_emit(level, dev, fmt, args); 3295 3296 va_end(args); 3297 3298 return r; 3299 } 3300 EXPORT_SYMBOL(dev_printk_emit); 3301 3302 static void __dev_printk(const char *level, const struct device *dev, 3303 struct va_format *vaf) 3304 { 3305 if (dev) 3306 dev_printk_emit(level[1] - '0', dev, "%s %s: %pV", 3307 dev_driver_string(dev), dev_name(dev), vaf); 3308 else 3309 printk("%s(NULL device *): %pV", level, vaf); 3310 } 3311 3312 void dev_printk(const char *level, const struct device *dev, 3313 const char *fmt, ...) 3314 { 3315 struct va_format vaf; 3316 va_list args; 3317 3318 va_start(args, fmt); 3319 3320 vaf.fmt = fmt; 3321 vaf.va = &args; 3322 3323 __dev_printk(level, dev, &vaf); 3324 3325 va_end(args); 3326 } 3327 EXPORT_SYMBOL(dev_printk); 3328 3329 #define define_dev_printk_level(func, kern_level) \ 3330 void func(const struct device *dev, const char *fmt, ...) \ 3331 { \ 3332 struct va_format vaf; \ 3333 va_list args; \ 3334 \ 3335 va_start(args, fmt); \ 3336 \ 3337 vaf.fmt = fmt; \ 3338 vaf.va = &args; \ 3339 \ 3340 __dev_printk(kern_level, dev, &vaf); \ 3341 \ 3342 va_end(args); \ 3343 } \ 3344 EXPORT_SYMBOL(func); 3345 3346 define_dev_printk_level(_dev_emerg, KERN_EMERG); 3347 define_dev_printk_level(_dev_alert, KERN_ALERT); 3348 define_dev_printk_level(_dev_crit, KERN_CRIT); 3349 define_dev_printk_level(_dev_err, KERN_ERR); 3350 define_dev_printk_level(_dev_warn, KERN_WARNING); 3351 define_dev_printk_level(_dev_notice, KERN_NOTICE); 3352 define_dev_printk_level(_dev_info, KERN_INFO); 3353 3354 #endif 3355 3356 static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) 3357 { 3358 return fwnode && !IS_ERR(fwnode->secondary); 3359 } 3360 3361 /** 3362 * set_primary_fwnode - Change the primary firmware node of a given device. 3363 * @dev: Device to handle. 3364 * @fwnode: New primary firmware node of the device. 3365 * 3366 * Set the device's firmware node pointer to @fwnode, but if a secondary 3367 * firmware node of the device is present, preserve it. 3368 */ 3369 void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) 3370 { 3371 if (fwnode) { 3372 struct fwnode_handle *fn = dev->fwnode; 3373 3374 if (fwnode_is_primary(fn)) 3375 fn = fn->secondary; 3376 3377 if (fn) { 3378 WARN_ON(fwnode->secondary); 3379 fwnode->secondary = fn; 3380 } 3381 dev->fwnode = fwnode; 3382 } else { 3383 dev->fwnode = fwnode_is_primary(dev->fwnode) ? 3384 dev->fwnode->secondary : NULL; 3385 } 3386 } 3387 EXPORT_SYMBOL_GPL(set_primary_fwnode); 3388 3389 /** 3390 * set_secondary_fwnode - Change the secondary firmware node of a given device. 3391 * @dev: Device to handle. 3392 * @fwnode: New secondary firmware node of the device. 3393 * 3394 * If a primary firmware node of the device is present, set its secondary 3395 * pointer to @fwnode. Otherwise, set the device's firmware node pointer to 3396 * @fwnode. 3397 */ 3398 void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode) 3399 { 3400 if (fwnode) 3401 fwnode->secondary = ERR_PTR(-ENODEV); 3402 3403 if (fwnode_is_primary(dev->fwnode)) 3404 dev->fwnode->secondary = fwnode; 3405 else 3406 dev->fwnode = fwnode; 3407 } 3408 3409 /** 3410 * device_set_of_node_from_dev - reuse device-tree node of another device 3411 * @dev: device whose device-tree node is being set 3412 * @dev2: device whose device-tree node is being reused 3413 * 3414 * Takes another reference to the new device-tree node after first dropping 3415 * any reference held to the old node. 3416 */ 3417 void device_set_of_node_from_dev(struct device *dev, const struct device *dev2) 3418 { 3419 of_node_put(dev->of_node); 3420 dev->of_node = of_node_get(dev2->of_node); 3421 dev->of_node_reused = true; 3422 } 3423 EXPORT_SYMBOL_GPL(device_set_of_node_from_dev); 3424 3425 int device_match_of_node(struct device *dev, const void *np) 3426 { 3427 return dev->of_node == np; 3428 } 3429 EXPORT_SYMBOL_GPL(device_match_of_node); 3430