1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * platform.c - platform 'pseudo' bus for legacy devices 4 * 5 * Copyright (c) 2002-3 Patrick Mochel 6 * Copyright (c) 2002-3 Open Source Development Labs 7 * 8 * Please see Documentation/driver-api/driver-model/platform.rst for more 9 * information. 10 */ 11 12 #include <linux/string.h> 13 #include <linux/platform_device.h> 14 #include <linux/of_device.h> 15 #include <linux/of_irq.h> 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/memblock.h> 20 #include <linux/err.h> 21 #include <linux/slab.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/pm_domain.h> 24 #include <linux/idr.h> 25 #include <linux/acpi.h> 26 #include <linux/clk/clk-conf.h> 27 #include <linux/limits.h> 28 #include <linux/property.h> 29 #include <linux/kmemleak.h> 30 31 #include "base.h" 32 #include "power/power.h" 33 34 /* For automatically allocated device IDs */ 35 static DEFINE_IDA(platform_devid_ida); 36 37 struct device platform_bus = { 38 .init_name = "platform", 39 }; 40 EXPORT_SYMBOL_GPL(platform_bus); 41 42 /** 43 * platform_get_resource - get a resource for a device 44 * @dev: platform device 45 * @type: resource type 46 * @num: resource index 47 */ 48 struct resource *platform_get_resource(struct platform_device *dev, 49 unsigned int type, unsigned int num) 50 { 51 int i; 52 53 for (i = 0; i < dev->num_resources; i++) { 54 struct resource *r = &dev->resource[i]; 55 56 if (type == resource_type(r) && num-- == 0) 57 return r; 58 } 59 return NULL; 60 } 61 EXPORT_SYMBOL_GPL(platform_get_resource); 62 63 /** 64 * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform 65 * device 66 * 67 * @pdev: platform device to use both for memory resource lookup as well as 68 * resource management 69 * @index: resource index 70 */ 71 #ifdef CONFIG_HAS_IOMEM 72 void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev, 73 unsigned int index) 74 { 75 struct resource *res; 76 77 res = platform_get_resource(pdev, IORESOURCE_MEM, index); 78 return devm_ioremap_resource(&pdev->dev, res); 79 } 80 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); 81 #endif /* CONFIG_HAS_IOMEM */ 82 83 static int __platform_get_irq(struct platform_device *dev, unsigned int num) 84 { 85 #ifdef CONFIG_SPARC 86 /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ 87 if (!dev || num >= dev->archdata.num_irqs) 88 return -ENXIO; 89 return dev->archdata.irqs[num]; 90 #else 91 struct resource *r; 92 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { 93 int ret; 94 95 ret = of_irq_get(dev->dev.of_node, num); 96 if (ret > 0 || ret == -EPROBE_DEFER) 97 return ret; 98 } 99 100 r = platform_get_resource(dev, IORESOURCE_IRQ, num); 101 if (has_acpi_companion(&dev->dev)) { 102 if (r && r->flags & IORESOURCE_DISABLED) { 103 int ret; 104 105 ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r); 106 if (ret) 107 return ret; 108 } 109 } 110 111 /* 112 * The resources may pass trigger flags to the irqs that need 113 * to be set up. It so happens that the trigger flags for 114 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER* 115 * settings. 116 */ 117 if (r && r->flags & IORESOURCE_BITS) { 118 struct irq_data *irqd; 119 120 irqd = irq_get_irq_data(r->start); 121 if (!irqd) 122 return -ENXIO; 123 irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); 124 } 125 126 if (r) 127 return r->start; 128 129 /* 130 * For the index 0 interrupt, allow falling back to GpioInt 131 * resources. While a device could have both Interrupt and GpioInt 132 * resources, making this fallback ambiguous, in many common cases 133 * the device will only expose one IRQ, and this fallback 134 * allows a common code path across either kind of resource. 135 */ 136 if (num == 0 && has_acpi_companion(&dev->dev)) { 137 int ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num); 138 139 /* Our callers expect -ENXIO for missing IRQs. */ 140 if (ret >= 0 || ret == -EPROBE_DEFER) 141 return ret; 142 } 143 144 return -ENXIO; 145 #endif 146 } 147 148 /** 149 * platform_get_irq - get an IRQ for a device 150 * @dev: platform device 151 * @num: IRQ number index 152 * 153 * Gets an IRQ for a platform device and prints an error message if finding the 154 * IRQ fails. Device drivers should check the return value for errors so as to 155 * not pass a negative integer value to the request_irq() APIs. 156 * 157 * Example: 158 * int irq = platform_get_irq(pdev, 0); 159 * if (irq < 0) 160 * return irq; 161 * 162 * Return: IRQ number on success, negative error number on failure. 163 */ 164 int platform_get_irq(struct platform_device *dev, unsigned int num) 165 { 166 int ret; 167 168 ret = __platform_get_irq(dev, num); 169 if (ret < 0 && ret != -EPROBE_DEFER) 170 dev_err(&dev->dev, "IRQ index %u not found\n", num); 171 172 return ret; 173 } 174 EXPORT_SYMBOL_GPL(platform_get_irq); 175 176 /** 177 * platform_get_irq_optional - get an optional IRQ for a device 178 * @dev: platform device 179 * @num: IRQ number index 180 * 181 * Gets an IRQ for a platform device. Device drivers should check the return 182 * value for errors so as to not pass a negative integer value to the 183 * request_irq() APIs. This is the same as platform_get_irq(), except that it 184 * does not print an error message if an IRQ can not be obtained. 185 * 186 * Example: 187 * int irq = platform_get_irq_optional(pdev, 0); 188 * if (irq < 0) 189 * return irq; 190 * 191 * Return: IRQ number on success, negative error number on failure. 192 */ 193 int platform_get_irq_optional(struct platform_device *dev, unsigned int num) 194 { 195 return __platform_get_irq(dev, num); 196 } 197 EXPORT_SYMBOL_GPL(platform_get_irq_optional); 198 199 /** 200 * platform_irq_count - Count the number of IRQs a platform device uses 201 * @dev: platform device 202 * 203 * Return: Number of IRQs a platform device uses or EPROBE_DEFER 204 */ 205 int platform_irq_count(struct platform_device *dev) 206 { 207 int ret, nr = 0; 208 209 while ((ret = __platform_get_irq(dev, nr)) >= 0) 210 nr++; 211 212 if (ret == -EPROBE_DEFER) 213 return ret; 214 215 return nr; 216 } 217 EXPORT_SYMBOL_GPL(platform_irq_count); 218 219 /** 220 * platform_get_resource_byname - get a resource for a device by name 221 * @dev: platform device 222 * @type: resource type 223 * @name: resource name 224 */ 225 struct resource *platform_get_resource_byname(struct platform_device *dev, 226 unsigned int type, 227 const char *name) 228 { 229 int i; 230 231 for (i = 0; i < dev->num_resources; i++) { 232 struct resource *r = &dev->resource[i]; 233 234 if (unlikely(!r->name)) 235 continue; 236 237 if (type == resource_type(r) && !strcmp(r->name, name)) 238 return r; 239 } 240 return NULL; 241 } 242 EXPORT_SYMBOL_GPL(platform_get_resource_byname); 243 244 /** 245 * platform_get_irq_byname - get an IRQ for a device by name 246 * @dev: platform device 247 * @name: IRQ name 248 */ 249 int platform_get_irq_byname(struct platform_device *dev, const char *name) 250 { 251 struct resource *r; 252 253 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { 254 int ret; 255 256 ret = of_irq_get_byname(dev->dev.of_node, name); 257 if (ret > 0 || ret == -EPROBE_DEFER) 258 return ret; 259 } 260 261 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); 262 if (r) 263 return r->start; 264 265 dev_err(&dev->dev, "IRQ %s not found\n", name); 266 return -ENXIO; 267 } 268 EXPORT_SYMBOL_GPL(platform_get_irq_byname); 269 270 /** 271 * platform_add_devices - add a numbers of platform devices 272 * @devs: array of platform devices to add 273 * @num: number of platform devices in array 274 */ 275 int platform_add_devices(struct platform_device **devs, int num) 276 { 277 int i, ret = 0; 278 279 for (i = 0; i < num; i++) { 280 ret = platform_device_register(devs[i]); 281 if (ret) { 282 while (--i >= 0) 283 platform_device_unregister(devs[i]); 284 break; 285 } 286 } 287 288 return ret; 289 } 290 EXPORT_SYMBOL_GPL(platform_add_devices); 291 292 struct platform_object { 293 struct platform_device pdev; 294 char name[]; 295 }; 296 297 /* 298 * Set up default DMA mask for platform devices if the they weren't 299 * previously set by the architecture / DT. 300 */ 301 static void setup_pdev_dma_masks(struct platform_device *pdev) 302 { 303 if (!pdev->dev.coherent_dma_mask) 304 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 305 if (!pdev->dma_mask) 306 pdev->dma_mask = DMA_BIT_MASK(32); 307 if (!pdev->dev.dma_mask) 308 pdev->dev.dma_mask = &pdev->dma_mask; 309 }; 310 311 /** 312 * platform_device_put - destroy a platform device 313 * @pdev: platform device to free 314 * 315 * Free all memory associated with a platform device. This function must 316 * _only_ be externally called in error cases. All other usage is a bug. 317 */ 318 void platform_device_put(struct platform_device *pdev) 319 { 320 if (!IS_ERR_OR_NULL(pdev)) 321 put_device(&pdev->dev); 322 } 323 EXPORT_SYMBOL_GPL(platform_device_put); 324 325 static void platform_device_release(struct device *dev) 326 { 327 struct platform_object *pa = container_of(dev, struct platform_object, 328 pdev.dev); 329 330 of_device_node_put(&pa->pdev.dev); 331 kfree(pa->pdev.dev.platform_data); 332 kfree(pa->pdev.mfd_cell); 333 kfree(pa->pdev.resource); 334 kfree(pa->pdev.driver_override); 335 kfree(pa); 336 } 337 338 /** 339 * platform_device_alloc - create a platform device 340 * @name: base name of the device we're adding 341 * @id: instance id 342 * 343 * Create a platform device object which can have other objects attached 344 * to it, and which will have attached objects freed when it is released. 345 */ 346 struct platform_device *platform_device_alloc(const char *name, int id) 347 { 348 struct platform_object *pa; 349 350 pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL); 351 if (pa) { 352 strcpy(pa->name, name); 353 pa->pdev.name = pa->name; 354 pa->pdev.id = id; 355 device_initialize(&pa->pdev.dev); 356 pa->pdev.dev.release = platform_device_release; 357 setup_pdev_dma_masks(&pa->pdev); 358 } 359 360 return pa ? &pa->pdev : NULL; 361 } 362 EXPORT_SYMBOL_GPL(platform_device_alloc); 363 364 /** 365 * platform_device_add_resources - add resources to a platform device 366 * @pdev: platform device allocated by platform_device_alloc to add resources to 367 * @res: set of resources that needs to be allocated for the device 368 * @num: number of resources 369 * 370 * Add a copy of the resources to the platform device. The memory 371 * associated with the resources will be freed when the platform device is 372 * released. 373 */ 374 int platform_device_add_resources(struct platform_device *pdev, 375 const struct resource *res, unsigned int num) 376 { 377 struct resource *r = NULL; 378 379 if (res) { 380 r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); 381 if (!r) 382 return -ENOMEM; 383 } 384 385 kfree(pdev->resource); 386 pdev->resource = r; 387 pdev->num_resources = num; 388 return 0; 389 } 390 EXPORT_SYMBOL_GPL(platform_device_add_resources); 391 392 /** 393 * platform_device_add_data - add platform-specific data to a platform device 394 * @pdev: platform device allocated by platform_device_alloc to add resources to 395 * @data: platform specific data for this platform device 396 * @size: size of platform specific data 397 * 398 * Add a copy of platform specific data to the platform device's 399 * platform_data pointer. The memory associated with the platform data 400 * will be freed when the platform device is released. 401 */ 402 int platform_device_add_data(struct platform_device *pdev, const void *data, 403 size_t size) 404 { 405 void *d = NULL; 406 407 if (data) { 408 d = kmemdup(data, size, GFP_KERNEL); 409 if (!d) 410 return -ENOMEM; 411 } 412 413 kfree(pdev->dev.platform_data); 414 pdev->dev.platform_data = d; 415 return 0; 416 } 417 EXPORT_SYMBOL_GPL(platform_device_add_data); 418 419 /** 420 * platform_device_add_properties - add built-in properties to a platform device 421 * @pdev: platform device to add properties to 422 * @properties: null terminated array of properties to add 423 * 424 * The function will take deep copy of @properties and attach the copy to the 425 * platform device. The memory associated with properties will be freed when the 426 * platform device is released. 427 */ 428 int platform_device_add_properties(struct platform_device *pdev, 429 const struct property_entry *properties) 430 { 431 return device_add_properties(&pdev->dev, properties); 432 } 433 EXPORT_SYMBOL_GPL(platform_device_add_properties); 434 435 /** 436 * platform_device_add - add a platform device to device hierarchy 437 * @pdev: platform device we're adding 438 * 439 * This is part 2 of platform_device_register(), though may be called 440 * separately _iff_ pdev was allocated by platform_device_alloc(). 441 */ 442 int platform_device_add(struct platform_device *pdev) 443 { 444 int i, ret; 445 446 if (!pdev) 447 return -EINVAL; 448 449 if (!pdev->dev.parent) 450 pdev->dev.parent = &platform_bus; 451 452 pdev->dev.bus = &platform_bus_type; 453 454 switch (pdev->id) { 455 default: 456 dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); 457 break; 458 case PLATFORM_DEVID_NONE: 459 dev_set_name(&pdev->dev, "%s", pdev->name); 460 break; 461 case PLATFORM_DEVID_AUTO: 462 /* 463 * Automatically allocated device ID. We mark it as such so 464 * that we remember it must be freed, and we append a suffix 465 * to avoid namespace collision with explicit IDs. 466 */ 467 ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL); 468 if (ret < 0) 469 goto err_out; 470 pdev->id = ret; 471 pdev->id_auto = true; 472 dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id); 473 break; 474 } 475 476 for (i = 0; i < pdev->num_resources; i++) { 477 struct resource *p, *r = &pdev->resource[i]; 478 479 if (r->name == NULL) 480 r->name = dev_name(&pdev->dev); 481 482 p = r->parent; 483 if (!p) { 484 if (resource_type(r) == IORESOURCE_MEM) 485 p = &iomem_resource; 486 else if (resource_type(r) == IORESOURCE_IO) 487 p = &ioport_resource; 488 } 489 490 if (p) { 491 ret = insert_resource(p, r); 492 if (ret) { 493 dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r); 494 goto failed; 495 } 496 } 497 } 498 499 pr_debug("Registering platform device '%s'. Parent at %s\n", 500 dev_name(&pdev->dev), dev_name(pdev->dev.parent)); 501 502 ret = device_add(&pdev->dev); 503 if (ret == 0) 504 return ret; 505 506 failed: 507 if (pdev->id_auto) { 508 ida_simple_remove(&platform_devid_ida, pdev->id); 509 pdev->id = PLATFORM_DEVID_AUTO; 510 } 511 512 while (--i >= 0) { 513 struct resource *r = &pdev->resource[i]; 514 if (r->parent) 515 release_resource(r); 516 } 517 518 err_out: 519 return ret; 520 } 521 EXPORT_SYMBOL_GPL(platform_device_add); 522 523 /** 524 * platform_device_del - remove a platform-level device 525 * @pdev: platform device we're removing 526 * 527 * Note that this function will also release all memory- and port-based 528 * resources owned by the device (@dev->resource). This function must 529 * _only_ be externally called in error cases. All other usage is a bug. 530 */ 531 void platform_device_del(struct platform_device *pdev) 532 { 533 int i; 534 535 if (!IS_ERR_OR_NULL(pdev)) { 536 device_del(&pdev->dev); 537 538 if (pdev->id_auto) { 539 ida_simple_remove(&platform_devid_ida, pdev->id); 540 pdev->id = PLATFORM_DEVID_AUTO; 541 } 542 543 for (i = 0; i < pdev->num_resources; i++) { 544 struct resource *r = &pdev->resource[i]; 545 if (r->parent) 546 release_resource(r); 547 } 548 } 549 } 550 EXPORT_SYMBOL_GPL(platform_device_del); 551 552 /** 553 * platform_device_register - add a platform-level device 554 * @pdev: platform device we're adding 555 */ 556 int platform_device_register(struct platform_device *pdev) 557 { 558 device_initialize(&pdev->dev); 559 setup_pdev_dma_masks(pdev); 560 return platform_device_add(pdev); 561 } 562 EXPORT_SYMBOL_GPL(platform_device_register); 563 564 /** 565 * platform_device_unregister - unregister a platform-level device 566 * @pdev: platform device we're unregistering 567 * 568 * Unregistration is done in 2 steps. First we release all resources 569 * and remove it from the subsystem, then we drop reference count by 570 * calling platform_device_put(). 571 */ 572 void platform_device_unregister(struct platform_device *pdev) 573 { 574 platform_device_del(pdev); 575 platform_device_put(pdev); 576 } 577 EXPORT_SYMBOL_GPL(platform_device_unregister); 578 579 /** 580 * platform_device_register_full - add a platform-level device with 581 * resources and platform-specific data 582 * 583 * @pdevinfo: data used to create device 584 * 585 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 586 */ 587 struct platform_device *platform_device_register_full( 588 const struct platform_device_info *pdevinfo) 589 { 590 int ret = -ENOMEM; 591 struct platform_device *pdev; 592 593 pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); 594 if (!pdev) 595 return ERR_PTR(-ENOMEM); 596 597 pdev->dev.parent = pdevinfo->parent; 598 pdev->dev.fwnode = pdevinfo->fwnode; 599 pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); 600 pdev->dev.of_node_reused = pdevinfo->of_node_reused; 601 602 if (pdevinfo->dma_mask) { 603 /* 604 * This memory isn't freed when the device is put, 605 * I don't have a nice idea for that though. Conceptually 606 * dma_mask in struct device should not be a pointer. 607 * See http://thread.gmane.org/gmane.linux.kernel.pci/9081 608 */ 609 pdev->dev.dma_mask = 610 kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL); 611 if (!pdev->dev.dma_mask) 612 goto err; 613 614 kmemleak_ignore(pdev->dev.dma_mask); 615 616 *pdev->dev.dma_mask = pdevinfo->dma_mask; 617 pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; 618 } 619 620 ret = platform_device_add_resources(pdev, 621 pdevinfo->res, pdevinfo->num_res); 622 if (ret) 623 goto err; 624 625 ret = platform_device_add_data(pdev, 626 pdevinfo->data, pdevinfo->size_data); 627 if (ret) 628 goto err; 629 630 if (pdevinfo->properties) { 631 ret = platform_device_add_properties(pdev, 632 pdevinfo->properties); 633 if (ret) 634 goto err; 635 } 636 637 ret = platform_device_add(pdev); 638 if (ret) { 639 err: 640 ACPI_COMPANION_SET(&pdev->dev, NULL); 641 kfree(pdev->dev.dma_mask); 642 platform_device_put(pdev); 643 return ERR_PTR(ret); 644 } 645 646 return pdev; 647 } 648 EXPORT_SYMBOL_GPL(platform_device_register_full); 649 650 static int platform_drv_probe(struct device *_dev) 651 { 652 struct platform_driver *drv = to_platform_driver(_dev->driver); 653 struct platform_device *dev = to_platform_device(_dev); 654 int ret; 655 656 ret = of_clk_set_defaults(_dev->of_node, false); 657 if (ret < 0) 658 return ret; 659 660 ret = dev_pm_domain_attach(_dev, true); 661 if (ret) 662 goto out; 663 664 if (drv->probe) { 665 ret = drv->probe(dev); 666 if (ret) 667 dev_pm_domain_detach(_dev, true); 668 } 669 670 out: 671 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { 672 dev_warn(_dev, "probe deferral not supported\n"); 673 ret = -ENXIO; 674 } 675 676 return ret; 677 } 678 679 static int platform_drv_probe_fail(struct device *_dev) 680 { 681 return -ENXIO; 682 } 683 684 static int platform_drv_remove(struct device *_dev) 685 { 686 struct platform_driver *drv = to_platform_driver(_dev->driver); 687 struct platform_device *dev = to_platform_device(_dev); 688 int ret = 0; 689 690 if (drv->remove) 691 ret = drv->remove(dev); 692 dev_pm_domain_detach(_dev, true); 693 694 return ret; 695 } 696 697 static void platform_drv_shutdown(struct device *_dev) 698 { 699 struct platform_driver *drv = to_platform_driver(_dev->driver); 700 struct platform_device *dev = to_platform_device(_dev); 701 702 if (drv->shutdown) 703 drv->shutdown(dev); 704 } 705 706 /** 707 * __platform_driver_register - register a driver for platform-level devices 708 * @drv: platform driver structure 709 * @owner: owning module/driver 710 */ 711 int __platform_driver_register(struct platform_driver *drv, 712 struct module *owner) 713 { 714 drv->driver.owner = owner; 715 drv->driver.bus = &platform_bus_type; 716 drv->driver.probe = platform_drv_probe; 717 drv->driver.remove = platform_drv_remove; 718 drv->driver.shutdown = platform_drv_shutdown; 719 720 return driver_register(&drv->driver); 721 } 722 EXPORT_SYMBOL_GPL(__platform_driver_register); 723 724 /** 725 * platform_driver_unregister - unregister a driver for platform-level devices 726 * @drv: platform driver structure 727 */ 728 void platform_driver_unregister(struct platform_driver *drv) 729 { 730 driver_unregister(&drv->driver); 731 } 732 EXPORT_SYMBOL_GPL(platform_driver_unregister); 733 734 /** 735 * __platform_driver_probe - register driver for non-hotpluggable device 736 * @drv: platform driver structure 737 * @probe: the driver probe routine, probably from an __init section 738 * @module: module which will be the owner of the driver 739 * 740 * Use this instead of platform_driver_register() when you know the device 741 * is not hotpluggable and has already been registered, and you want to 742 * remove its run-once probe() infrastructure from memory after the driver 743 * has bound to the device. 744 * 745 * One typical use for this would be with drivers for controllers integrated 746 * into system-on-chip processors, where the controller devices have been 747 * configured as part of board setup. 748 * 749 * Note that this is incompatible with deferred probing. 750 * 751 * Returns zero if the driver registered and bound to a device, else returns 752 * a negative error code and with the driver not registered. 753 */ 754 int __init_or_module __platform_driver_probe(struct platform_driver *drv, 755 int (*probe)(struct platform_device *), struct module *module) 756 { 757 int retval, code; 758 759 if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) { 760 pr_err("%s: drivers registered with %s can not be probed asynchronously\n", 761 drv->driver.name, __func__); 762 return -EINVAL; 763 } 764 765 /* 766 * We have to run our probes synchronously because we check if 767 * we find any devices to bind to and exit with error if there 768 * are any. 769 */ 770 drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; 771 772 /* 773 * Prevent driver from requesting probe deferral to avoid further 774 * futile probe attempts. 775 */ 776 drv->prevent_deferred_probe = true; 777 778 /* make sure driver won't have bind/unbind attributes */ 779 drv->driver.suppress_bind_attrs = true; 780 781 /* temporary section violation during probe() */ 782 drv->probe = probe; 783 retval = code = __platform_driver_register(drv, module); 784 785 /* 786 * Fixup that section violation, being paranoid about code scanning 787 * the list of drivers in order to probe new devices. Check to see 788 * if the probe was successful, and make sure any forced probes of 789 * new devices fail. 790 */ 791 spin_lock(&drv->driver.bus->p->klist_drivers.k_lock); 792 drv->probe = NULL; 793 if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) 794 retval = -ENODEV; 795 drv->driver.probe = platform_drv_probe_fail; 796 spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock); 797 798 if (code != retval) 799 platform_driver_unregister(drv); 800 return retval; 801 } 802 EXPORT_SYMBOL_GPL(__platform_driver_probe); 803 804 /** 805 * __platform_create_bundle - register driver and create corresponding device 806 * @driver: platform driver structure 807 * @probe: the driver probe routine, probably from an __init section 808 * @res: set of resources that needs to be allocated for the device 809 * @n_res: number of resources 810 * @data: platform specific data for this platform device 811 * @size: size of platform specific data 812 * @module: module which will be the owner of the driver 813 * 814 * Use this in legacy-style modules that probe hardware directly and 815 * register a single platform device and corresponding platform driver. 816 * 817 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 818 */ 819 struct platform_device * __init_or_module __platform_create_bundle( 820 struct platform_driver *driver, 821 int (*probe)(struct platform_device *), 822 struct resource *res, unsigned int n_res, 823 const void *data, size_t size, struct module *module) 824 { 825 struct platform_device *pdev; 826 int error; 827 828 pdev = platform_device_alloc(driver->driver.name, -1); 829 if (!pdev) { 830 error = -ENOMEM; 831 goto err_out; 832 } 833 834 error = platform_device_add_resources(pdev, res, n_res); 835 if (error) 836 goto err_pdev_put; 837 838 error = platform_device_add_data(pdev, data, size); 839 if (error) 840 goto err_pdev_put; 841 842 error = platform_device_add(pdev); 843 if (error) 844 goto err_pdev_put; 845 846 error = __platform_driver_probe(driver, probe, module); 847 if (error) 848 goto err_pdev_del; 849 850 return pdev; 851 852 err_pdev_del: 853 platform_device_del(pdev); 854 err_pdev_put: 855 platform_device_put(pdev); 856 err_out: 857 return ERR_PTR(error); 858 } 859 EXPORT_SYMBOL_GPL(__platform_create_bundle); 860 861 /** 862 * __platform_register_drivers - register an array of platform drivers 863 * @drivers: an array of drivers to register 864 * @count: the number of drivers to register 865 * @owner: module owning the drivers 866 * 867 * Registers platform drivers specified by an array. On failure to register a 868 * driver, all previously registered drivers will be unregistered. Callers of 869 * this API should use platform_unregister_drivers() to unregister drivers in 870 * the reverse order. 871 * 872 * Returns: 0 on success or a negative error code on failure. 873 */ 874 int __platform_register_drivers(struct platform_driver * const *drivers, 875 unsigned int count, struct module *owner) 876 { 877 unsigned int i; 878 int err; 879 880 for (i = 0; i < count; i++) { 881 pr_debug("registering platform driver %ps\n", drivers[i]); 882 883 err = __platform_driver_register(drivers[i], owner); 884 if (err < 0) { 885 pr_err("failed to register platform driver %ps: %d\n", 886 drivers[i], err); 887 goto error; 888 } 889 } 890 891 return 0; 892 893 error: 894 while (i--) { 895 pr_debug("unregistering platform driver %ps\n", drivers[i]); 896 platform_driver_unregister(drivers[i]); 897 } 898 899 return err; 900 } 901 EXPORT_SYMBOL_GPL(__platform_register_drivers); 902 903 /** 904 * platform_unregister_drivers - unregister an array of platform drivers 905 * @drivers: an array of drivers to unregister 906 * @count: the number of drivers to unregister 907 * 908 * Unegisters platform drivers specified by an array. This is typically used 909 * to complement an earlier call to platform_register_drivers(). Drivers are 910 * unregistered in the reverse order in which they were registered. 911 */ 912 void platform_unregister_drivers(struct platform_driver * const *drivers, 913 unsigned int count) 914 { 915 while (count--) { 916 pr_debug("unregistering platform driver %ps\n", drivers[count]); 917 platform_driver_unregister(drivers[count]); 918 } 919 } 920 EXPORT_SYMBOL_GPL(platform_unregister_drivers); 921 922 /* modalias support enables more hands-off userspace setup: 923 * (a) environment variable lets new-style hotplug events work once system is 924 * fully running: "modprobe $MODALIAS" 925 * (b) sysfs attribute lets new-style coldplug recover from hotplug events 926 * mishandled before system is fully running: "modprobe $(cat modalias)" 927 */ 928 static ssize_t modalias_show(struct device *dev, struct device_attribute *a, 929 char *buf) 930 { 931 struct platform_device *pdev = to_platform_device(dev); 932 int len; 933 934 len = of_device_modalias(dev, buf, PAGE_SIZE); 935 if (len != -ENODEV) 936 return len; 937 938 len = acpi_device_modalias(dev, buf, PAGE_SIZE -1); 939 if (len != -ENODEV) 940 return len; 941 942 len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name); 943 944 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; 945 } 946 static DEVICE_ATTR_RO(modalias); 947 948 static ssize_t driver_override_store(struct device *dev, 949 struct device_attribute *attr, 950 const char *buf, size_t count) 951 { 952 struct platform_device *pdev = to_platform_device(dev); 953 char *driver_override, *old, *cp; 954 955 /* We need to keep extra room for a newline */ 956 if (count >= (PAGE_SIZE - 1)) 957 return -EINVAL; 958 959 driver_override = kstrndup(buf, count, GFP_KERNEL); 960 if (!driver_override) 961 return -ENOMEM; 962 963 cp = strchr(driver_override, '\n'); 964 if (cp) 965 *cp = '\0'; 966 967 device_lock(dev); 968 old = pdev->driver_override; 969 if (strlen(driver_override)) { 970 pdev->driver_override = driver_override; 971 } else { 972 kfree(driver_override); 973 pdev->driver_override = NULL; 974 } 975 device_unlock(dev); 976 977 kfree(old); 978 979 return count; 980 } 981 982 static ssize_t driver_override_show(struct device *dev, 983 struct device_attribute *attr, char *buf) 984 { 985 struct platform_device *pdev = to_platform_device(dev); 986 ssize_t len; 987 988 device_lock(dev); 989 len = sprintf(buf, "%s\n", pdev->driver_override); 990 device_unlock(dev); 991 return len; 992 } 993 static DEVICE_ATTR_RW(driver_override); 994 995 996 static struct attribute *platform_dev_attrs[] = { 997 &dev_attr_modalias.attr, 998 &dev_attr_driver_override.attr, 999 NULL, 1000 }; 1001 ATTRIBUTE_GROUPS(platform_dev); 1002 1003 static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) 1004 { 1005 struct platform_device *pdev = to_platform_device(dev); 1006 int rc; 1007 1008 /* Some devices have extra OF data and an OF-style MODALIAS */ 1009 rc = of_device_uevent_modalias(dev, env); 1010 if (rc != -ENODEV) 1011 return rc; 1012 1013 rc = acpi_device_uevent_modalias(dev, env); 1014 if (rc != -ENODEV) 1015 return rc; 1016 1017 add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, 1018 pdev->name); 1019 return 0; 1020 } 1021 1022 static const struct platform_device_id *platform_match_id( 1023 const struct platform_device_id *id, 1024 struct platform_device *pdev) 1025 { 1026 while (id->name[0]) { 1027 if (strcmp(pdev->name, id->name) == 0) { 1028 pdev->id_entry = id; 1029 return id; 1030 } 1031 id++; 1032 } 1033 return NULL; 1034 } 1035 1036 /** 1037 * platform_match - bind platform device to platform driver. 1038 * @dev: device. 1039 * @drv: driver. 1040 * 1041 * Platform device IDs are assumed to be encoded like this: 1042 * "<name><instance>", where <name> is a short description of the type of 1043 * device, like "pci" or "floppy", and <instance> is the enumerated 1044 * instance of the device, like '0' or '42'. Driver IDs are simply 1045 * "<name>". So, extract the <name> from the platform_device structure, 1046 * and compare it against the name of the driver. Return whether they match 1047 * or not. 1048 */ 1049 static int platform_match(struct device *dev, struct device_driver *drv) 1050 { 1051 struct platform_device *pdev = to_platform_device(dev); 1052 struct platform_driver *pdrv = to_platform_driver(drv); 1053 1054 /* When driver_override is set, only bind to the matching driver */ 1055 if (pdev->driver_override) 1056 return !strcmp(pdev->driver_override, drv->name); 1057 1058 /* Attempt an OF style match first */ 1059 if (of_driver_match_device(dev, drv)) 1060 return 1; 1061 1062 /* Then try ACPI style match */ 1063 if (acpi_driver_match_device(dev, drv)) 1064 return 1; 1065 1066 /* Then try to match against the id table */ 1067 if (pdrv->id_table) 1068 return platform_match_id(pdrv->id_table, pdev) != NULL; 1069 1070 /* fall-back to driver name match */ 1071 return (strcmp(pdev->name, drv->name) == 0); 1072 } 1073 1074 #ifdef CONFIG_PM_SLEEP 1075 1076 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) 1077 { 1078 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1079 struct platform_device *pdev = to_platform_device(dev); 1080 int ret = 0; 1081 1082 if (dev->driver && pdrv->suspend) 1083 ret = pdrv->suspend(pdev, mesg); 1084 1085 return ret; 1086 } 1087 1088 static int platform_legacy_resume(struct device *dev) 1089 { 1090 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1091 struct platform_device *pdev = to_platform_device(dev); 1092 int ret = 0; 1093 1094 if (dev->driver && pdrv->resume) 1095 ret = pdrv->resume(pdev); 1096 1097 return ret; 1098 } 1099 1100 #endif /* CONFIG_PM_SLEEP */ 1101 1102 #ifdef CONFIG_SUSPEND 1103 1104 int platform_pm_suspend(struct device *dev) 1105 { 1106 struct device_driver *drv = dev->driver; 1107 int ret = 0; 1108 1109 if (!drv) 1110 return 0; 1111 1112 if (drv->pm) { 1113 if (drv->pm->suspend) 1114 ret = drv->pm->suspend(dev); 1115 } else { 1116 ret = platform_legacy_suspend(dev, PMSG_SUSPEND); 1117 } 1118 1119 return ret; 1120 } 1121 1122 int platform_pm_resume(struct device *dev) 1123 { 1124 struct device_driver *drv = dev->driver; 1125 int ret = 0; 1126 1127 if (!drv) 1128 return 0; 1129 1130 if (drv->pm) { 1131 if (drv->pm->resume) 1132 ret = drv->pm->resume(dev); 1133 } else { 1134 ret = platform_legacy_resume(dev); 1135 } 1136 1137 return ret; 1138 } 1139 1140 #endif /* CONFIG_SUSPEND */ 1141 1142 #ifdef CONFIG_HIBERNATE_CALLBACKS 1143 1144 int platform_pm_freeze(struct device *dev) 1145 { 1146 struct device_driver *drv = dev->driver; 1147 int ret = 0; 1148 1149 if (!drv) 1150 return 0; 1151 1152 if (drv->pm) { 1153 if (drv->pm->freeze) 1154 ret = drv->pm->freeze(dev); 1155 } else { 1156 ret = platform_legacy_suspend(dev, PMSG_FREEZE); 1157 } 1158 1159 return ret; 1160 } 1161 1162 int platform_pm_thaw(struct device *dev) 1163 { 1164 struct device_driver *drv = dev->driver; 1165 int ret = 0; 1166 1167 if (!drv) 1168 return 0; 1169 1170 if (drv->pm) { 1171 if (drv->pm->thaw) 1172 ret = drv->pm->thaw(dev); 1173 } else { 1174 ret = platform_legacy_resume(dev); 1175 } 1176 1177 return ret; 1178 } 1179 1180 int platform_pm_poweroff(struct device *dev) 1181 { 1182 struct device_driver *drv = dev->driver; 1183 int ret = 0; 1184 1185 if (!drv) 1186 return 0; 1187 1188 if (drv->pm) { 1189 if (drv->pm->poweroff) 1190 ret = drv->pm->poweroff(dev); 1191 } else { 1192 ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); 1193 } 1194 1195 return ret; 1196 } 1197 1198 int platform_pm_restore(struct device *dev) 1199 { 1200 struct device_driver *drv = dev->driver; 1201 int ret = 0; 1202 1203 if (!drv) 1204 return 0; 1205 1206 if (drv->pm) { 1207 if (drv->pm->restore) 1208 ret = drv->pm->restore(dev); 1209 } else { 1210 ret = platform_legacy_resume(dev); 1211 } 1212 1213 return ret; 1214 } 1215 1216 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 1217 1218 int platform_dma_configure(struct device *dev) 1219 { 1220 enum dev_dma_attr attr; 1221 int ret = 0; 1222 1223 if (dev->of_node) { 1224 ret = of_dma_configure(dev, dev->of_node, true); 1225 } else if (has_acpi_companion(dev)) { 1226 attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode)); 1227 ret = acpi_dma_configure(dev, attr); 1228 } 1229 1230 return ret; 1231 } 1232 1233 static const struct dev_pm_ops platform_dev_pm_ops = { 1234 .runtime_suspend = pm_generic_runtime_suspend, 1235 .runtime_resume = pm_generic_runtime_resume, 1236 USE_PLATFORM_PM_SLEEP_OPS 1237 }; 1238 1239 struct bus_type platform_bus_type = { 1240 .name = "platform", 1241 .dev_groups = platform_dev_groups, 1242 .match = platform_match, 1243 .uevent = platform_uevent, 1244 .dma_configure = platform_dma_configure, 1245 .pm = &platform_dev_pm_ops, 1246 }; 1247 EXPORT_SYMBOL_GPL(platform_bus_type); 1248 1249 /** 1250 * platform_find_device_by_driver - Find a platform device with a given 1251 * driver. 1252 * @start: The device to start the search from. 1253 * @drv: The device driver to look for. 1254 */ 1255 struct device *platform_find_device_by_driver(struct device *start, 1256 const struct device_driver *drv) 1257 { 1258 return bus_find_device(&platform_bus_type, start, drv, 1259 (void *)platform_match); 1260 } 1261 EXPORT_SYMBOL_GPL(platform_find_device_by_driver); 1262 1263 int __init platform_bus_init(void) 1264 { 1265 int error; 1266 1267 early_platform_cleanup(); 1268 1269 error = device_register(&platform_bus); 1270 if (error) { 1271 put_device(&platform_bus); 1272 return error; 1273 } 1274 error = bus_register(&platform_bus_type); 1275 if (error) 1276 device_unregister(&platform_bus); 1277 of_platform_register_reconfig_notifier(); 1278 return error; 1279 } 1280 1281 static __initdata LIST_HEAD(early_platform_driver_list); 1282 static __initdata LIST_HEAD(early_platform_device_list); 1283 1284 /** 1285 * early_platform_driver_register - register early platform driver 1286 * @epdrv: early_platform driver structure 1287 * @buf: string passed from early_param() 1288 * 1289 * Helper function for early_platform_init() / early_platform_init_buffer() 1290 */ 1291 int __init early_platform_driver_register(struct early_platform_driver *epdrv, 1292 char *buf) 1293 { 1294 char *tmp; 1295 int n; 1296 1297 /* Simply add the driver to the end of the global list. 1298 * Drivers will by default be put on the list in compiled-in order. 1299 */ 1300 if (!epdrv->list.next) { 1301 INIT_LIST_HEAD(&epdrv->list); 1302 list_add_tail(&epdrv->list, &early_platform_driver_list); 1303 } 1304 1305 /* If the user has specified device then make sure the driver 1306 * gets prioritized. The driver of the last device specified on 1307 * command line will be put first on the list. 1308 */ 1309 n = strlen(epdrv->pdrv->driver.name); 1310 if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) { 1311 list_move(&epdrv->list, &early_platform_driver_list); 1312 1313 /* Allow passing parameters after device name */ 1314 if (buf[n] == '\0' || buf[n] == ',') 1315 epdrv->requested_id = -1; 1316 else { 1317 epdrv->requested_id = simple_strtoul(&buf[n + 1], 1318 &tmp, 10); 1319 1320 if (buf[n] != '.' || (tmp == &buf[n + 1])) { 1321 epdrv->requested_id = EARLY_PLATFORM_ID_ERROR; 1322 n = 0; 1323 } else 1324 n += strcspn(&buf[n + 1], ",") + 1; 1325 } 1326 1327 if (buf[n] == ',') 1328 n++; 1329 1330 if (epdrv->bufsize) { 1331 memcpy(epdrv->buffer, &buf[n], 1332 min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1)); 1333 epdrv->buffer[epdrv->bufsize - 1] = '\0'; 1334 } 1335 } 1336 1337 return 0; 1338 } 1339 1340 /** 1341 * early_platform_add_devices - adds a number of early platform devices 1342 * @devs: array of early platform devices to add 1343 * @num: number of early platform devices in array 1344 * 1345 * Used by early architecture code to register early platform devices and 1346 * their platform data. 1347 */ 1348 void __init early_platform_add_devices(struct platform_device **devs, int num) 1349 { 1350 struct device *dev; 1351 int i; 1352 1353 /* simply add the devices to list */ 1354 for (i = 0; i < num; i++) { 1355 dev = &devs[i]->dev; 1356 1357 if (!dev->devres_head.next) { 1358 pm_runtime_early_init(dev); 1359 INIT_LIST_HEAD(&dev->devres_head); 1360 list_add_tail(&dev->devres_head, 1361 &early_platform_device_list); 1362 } 1363 } 1364 } 1365 1366 /** 1367 * early_platform_driver_register_all - register early platform drivers 1368 * @class_str: string to identify early platform driver class 1369 * 1370 * Used by architecture code to register all early platform drivers 1371 * for a certain class. If omitted then only early platform drivers 1372 * with matching kernel command line class parameters will be registered. 1373 */ 1374 void __init early_platform_driver_register_all(char *class_str) 1375 { 1376 /* The "class_str" parameter may or may not be present on the kernel 1377 * command line. If it is present then there may be more than one 1378 * matching parameter. 1379 * 1380 * Since we register our early platform drivers using early_param() 1381 * we need to make sure that they also get registered in the case 1382 * when the parameter is missing from the kernel command line. 1383 * 1384 * We use parse_early_options() to make sure the early_param() gets 1385 * called at least once. The early_param() may be called more than 1386 * once since the name of the preferred device may be specified on 1387 * the kernel command line. early_platform_driver_register() handles 1388 * this case for us. 1389 */ 1390 parse_early_options(class_str); 1391 } 1392 1393 /** 1394 * early_platform_match - find early platform device matching driver 1395 * @epdrv: early platform driver structure 1396 * @id: id to match against 1397 */ 1398 static struct platform_device * __init 1399 early_platform_match(struct early_platform_driver *epdrv, int id) 1400 { 1401 struct platform_device *pd; 1402 1403 list_for_each_entry(pd, &early_platform_device_list, dev.devres_head) 1404 if (platform_match(&pd->dev, &epdrv->pdrv->driver)) 1405 if (pd->id == id) 1406 return pd; 1407 1408 return NULL; 1409 } 1410 1411 /** 1412 * early_platform_left - check if early platform driver has matching devices 1413 * @epdrv: early platform driver structure 1414 * @id: return true if id or above exists 1415 */ 1416 static int __init early_platform_left(struct early_platform_driver *epdrv, 1417 int id) 1418 { 1419 struct platform_device *pd; 1420 1421 list_for_each_entry(pd, &early_platform_device_list, dev.devres_head) 1422 if (platform_match(&pd->dev, &epdrv->pdrv->driver)) 1423 if (pd->id >= id) 1424 return 1; 1425 1426 return 0; 1427 } 1428 1429 /** 1430 * early_platform_driver_probe_id - probe drivers matching class_str and id 1431 * @class_str: string to identify early platform driver class 1432 * @id: id to match against 1433 * @nr_probe: number of platform devices to successfully probe before exiting 1434 */ 1435 static int __init early_platform_driver_probe_id(char *class_str, 1436 int id, 1437 int nr_probe) 1438 { 1439 struct early_platform_driver *epdrv; 1440 struct platform_device *match; 1441 int match_id; 1442 int n = 0; 1443 int left = 0; 1444 1445 list_for_each_entry(epdrv, &early_platform_driver_list, list) { 1446 /* only use drivers matching our class_str */ 1447 if (strcmp(class_str, epdrv->class_str)) 1448 continue; 1449 1450 if (id == -2) { 1451 match_id = epdrv->requested_id; 1452 left = 1; 1453 1454 } else { 1455 match_id = id; 1456 left += early_platform_left(epdrv, id); 1457 1458 /* skip requested id */ 1459 switch (epdrv->requested_id) { 1460 case EARLY_PLATFORM_ID_ERROR: 1461 case EARLY_PLATFORM_ID_UNSET: 1462 break; 1463 default: 1464 if (epdrv->requested_id == id) 1465 match_id = EARLY_PLATFORM_ID_UNSET; 1466 } 1467 } 1468 1469 switch (match_id) { 1470 case EARLY_PLATFORM_ID_ERROR: 1471 pr_warn("%s: unable to parse %s parameter\n", 1472 class_str, epdrv->pdrv->driver.name); 1473 /* fall-through */ 1474 case EARLY_PLATFORM_ID_UNSET: 1475 match = NULL; 1476 break; 1477 default: 1478 match = early_platform_match(epdrv, match_id); 1479 } 1480 1481 if (match) { 1482 /* 1483 * Set up a sensible init_name to enable 1484 * dev_name() and others to be used before the 1485 * rest of the driver core is initialized. 1486 */ 1487 if (!match->dev.init_name && slab_is_available()) { 1488 if (match->id != -1) 1489 match->dev.init_name = 1490 kasprintf(GFP_KERNEL, "%s.%d", 1491 match->name, 1492 match->id); 1493 else 1494 match->dev.init_name = 1495 kasprintf(GFP_KERNEL, "%s", 1496 match->name); 1497 1498 if (!match->dev.init_name) 1499 return -ENOMEM; 1500 } 1501 1502 if (epdrv->pdrv->probe(match)) 1503 pr_warn("%s: unable to probe %s early.\n", 1504 class_str, match->name); 1505 else 1506 n++; 1507 } 1508 1509 if (n >= nr_probe) 1510 break; 1511 } 1512 1513 if (left) 1514 return n; 1515 else 1516 return -ENODEV; 1517 } 1518 1519 /** 1520 * early_platform_driver_probe - probe a class of registered drivers 1521 * @class_str: string to identify early platform driver class 1522 * @nr_probe: number of platform devices to successfully probe before exiting 1523 * @user_only: only probe user specified early platform devices 1524 * 1525 * Used by architecture code to probe registered early platform drivers 1526 * within a certain class. For probe to happen a registered early platform 1527 * device matching a registered early platform driver is needed. 1528 */ 1529 int __init early_platform_driver_probe(char *class_str, 1530 int nr_probe, 1531 int user_only) 1532 { 1533 int k, n, i; 1534 1535 n = 0; 1536 for (i = -2; n < nr_probe; i++) { 1537 k = early_platform_driver_probe_id(class_str, i, nr_probe - n); 1538 1539 if (k < 0) 1540 break; 1541 1542 n += k; 1543 1544 if (user_only) 1545 break; 1546 } 1547 1548 return n; 1549 } 1550 1551 /** 1552 * early_platform_cleanup - clean up early platform code 1553 */ 1554 void __init early_platform_cleanup(void) 1555 { 1556 struct platform_device *pd, *pd2; 1557 1558 /* clean up the devres list used to chain devices */ 1559 list_for_each_entry_safe(pd, pd2, &early_platform_device_list, 1560 dev.devres_head) { 1561 list_del(&pd->dev.devres_head); 1562 memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head)); 1563 } 1564 } 1565 1566