1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * platform.c - platform 'pseudo' bus for legacy devices 4 * 5 * Copyright (c) 2002-3 Patrick Mochel 6 * Copyright (c) 2002-3 Open Source Development Labs 7 * 8 * Please see Documentation/driver-api/driver-model/platform.rst for more 9 * information. 10 */ 11 12 #include <linux/string.h> 13 #include <linux/platform_device.h> 14 #include <linux/of_device.h> 15 #include <linux/of_irq.h> 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/memblock.h> 20 #include <linux/err.h> 21 #include <linux/slab.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/pm_domain.h> 24 #include <linux/idr.h> 25 #include <linux/acpi.h> 26 #include <linux/clk/clk-conf.h> 27 #include <linux/limits.h> 28 #include <linux/property.h> 29 #include <linux/kmemleak.h> 30 31 #include "base.h" 32 #include "power/power.h" 33 34 /* For automatically allocated device IDs */ 35 static DEFINE_IDA(platform_devid_ida); 36 37 struct device platform_bus = { 38 .init_name = "platform", 39 }; 40 EXPORT_SYMBOL_GPL(platform_bus); 41 42 /** 43 * arch_setup_pdev_archdata - Allow manipulation of archdata before its used 44 * @pdev: platform device 45 * 46 * This is called before platform_device_add() such that any pdev_archdata may 47 * be setup before the platform_notifier is called. So if a user needs to 48 * manipulate any relevant information in the pdev_archdata they can do: 49 * 50 * platform_device_alloc() 51 * ... manipulate ... 52 * platform_device_add() 53 * 54 * And if they don't care they can just call platform_device_register() and 55 * everything will just work out. 56 */ 57 void __weak arch_setup_pdev_archdata(struct platform_device *pdev) 58 { 59 } 60 61 /** 62 * platform_get_resource - get a resource for a device 63 * @dev: platform device 64 * @type: resource type 65 * @num: resource index 66 */ 67 struct resource *platform_get_resource(struct platform_device *dev, 68 unsigned int type, unsigned int num) 69 { 70 int i; 71 72 for (i = 0; i < dev->num_resources; i++) { 73 struct resource *r = &dev->resource[i]; 74 75 if (type == resource_type(r) && num-- == 0) 76 return r; 77 } 78 return NULL; 79 } 80 EXPORT_SYMBOL_GPL(platform_get_resource); 81 82 /** 83 * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform 84 * device 85 * 86 * @pdev: platform device to use both for memory resource lookup as well as 87 * resource management 88 * @index: resource index 89 */ 90 #ifdef CONFIG_HAS_IOMEM 91 void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev, 92 unsigned int index) 93 { 94 struct resource *res; 95 96 res = platform_get_resource(pdev, IORESOURCE_MEM, index); 97 return devm_ioremap_resource(&pdev->dev, res); 98 } 99 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); 100 #endif /* CONFIG_HAS_IOMEM */ 101 102 /** 103 * platform_get_irq - get an IRQ for a device 104 * @dev: platform device 105 * @num: IRQ number index 106 */ 107 int platform_get_irq(struct platform_device *dev, unsigned int num) 108 { 109 #ifdef CONFIG_SPARC 110 /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ 111 if (!dev || num >= dev->archdata.num_irqs) 112 return -ENXIO; 113 return dev->archdata.irqs[num]; 114 #else 115 struct resource *r; 116 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { 117 int ret; 118 119 ret = of_irq_get(dev->dev.of_node, num); 120 if (ret > 0 || ret == -EPROBE_DEFER) 121 return ret; 122 } 123 124 r = platform_get_resource(dev, IORESOURCE_IRQ, num); 125 if (has_acpi_companion(&dev->dev)) { 126 if (r && r->flags & IORESOURCE_DISABLED) { 127 int ret; 128 129 ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r); 130 if (ret) 131 return ret; 132 } 133 } 134 135 /* 136 * The resources may pass trigger flags to the irqs that need 137 * to be set up. It so happens that the trigger flags for 138 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER* 139 * settings. 140 */ 141 if (r && r->flags & IORESOURCE_BITS) { 142 struct irq_data *irqd; 143 144 irqd = irq_get_irq_data(r->start); 145 if (!irqd) 146 return -ENXIO; 147 irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); 148 } 149 150 if (r) 151 return r->start; 152 153 /* 154 * For the index 0 interrupt, allow falling back to GpioInt 155 * resources. While a device could have both Interrupt and GpioInt 156 * resources, making this fallback ambiguous, in many common cases 157 * the device will only expose one IRQ, and this fallback 158 * allows a common code path across either kind of resource. 159 */ 160 if (num == 0 && has_acpi_companion(&dev->dev)) 161 return acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num); 162 163 return -ENXIO; 164 #endif 165 } 166 EXPORT_SYMBOL_GPL(platform_get_irq); 167 168 /** 169 * platform_irq_count - Count the number of IRQs a platform device uses 170 * @dev: platform device 171 * 172 * Return: Number of IRQs a platform device uses or EPROBE_DEFER 173 */ 174 int platform_irq_count(struct platform_device *dev) 175 { 176 int ret, nr = 0; 177 178 while ((ret = platform_get_irq(dev, nr)) >= 0) 179 nr++; 180 181 if (ret == -EPROBE_DEFER) 182 return ret; 183 184 return nr; 185 } 186 EXPORT_SYMBOL_GPL(platform_irq_count); 187 188 /** 189 * platform_get_resource_byname - get a resource for a device by name 190 * @dev: platform device 191 * @type: resource type 192 * @name: resource name 193 */ 194 struct resource *platform_get_resource_byname(struct platform_device *dev, 195 unsigned int type, 196 const char *name) 197 { 198 int i; 199 200 for (i = 0; i < dev->num_resources; i++) { 201 struct resource *r = &dev->resource[i]; 202 203 if (unlikely(!r->name)) 204 continue; 205 206 if (type == resource_type(r) && !strcmp(r->name, name)) 207 return r; 208 } 209 return NULL; 210 } 211 EXPORT_SYMBOL_GPL(platform_get_resource_byname); 212 213 /** 214 * platform_get_irq_byname - get an IRQ for a device by name 215 * @dev: platform device 216 * @name: IRQ name 217 */ 218 int platform_get_irq_byname(struct platform_device *dev, const char *name) 219 { 220 struct resource *r; 221 222 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { 223 int ret; 224 225 ret = of_irq_get_byname(dev->dev.of_node, name); 226 if (ret > 0 || ret == -EPROBE_DEFER) 227 return ret; 228 } 229 230 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); 231 return r ? r->start : -ENXIO; 232 } 233 EXPORT_SYMBOL_GPL(platform_get_irq_byname); 234 235 /** 236 * platform_add_devices - add a numbers of platform devices 237 * @devs: array of platform devices to add 238 * @num: number of platform devices in array 239 */ 240 int platform_add_devices(struct platform_device **devs, int num) 241 { 242 int i, ret = 0; 243 244 for (i = 0; i < num; i++) { 245 ret = platform_device_register(devs[i]); 246 if (ret) { 247 while (--i >= 0) 248 platform_device_unregister(devs[i]); 249 break; 250 } 251 } 252 253 return ret; 254 } 255 EXPORT_SYMBOL_GPL(platform_add_devices); 256 257 struct platform_object { 258 struct platform_device pdev; 259 char name[]; 260 }; 261 262 /** 263 * platform_device_put - destroy a platform device 264 * @pdev: platform device to free 265 * 266 * Free all memory associated with a platform device. This function must 267 * _only_ be externally called in error cases. All other usage is a bug. 268 */ 269 void platform_device_put(struct platform_device *pdev) 270 { 271 if (!IS_ERR_OR_NULL(pdev)) 272 put_device(&pdev->dev); 273 } 274 EXPORT_SYMBOL_GPL(platform_device_put); 275 276 static void platform_device_release(struct device *dev) 277 { 278 struct platform_object *pa = container_of(dev, struct platform_object, 279 pdev.dev); 280 281 of_device_node_put(&pa->pdev.dev); 282 kfree(pa->pdev.dev.platform_data); 283 kfree(pa->pdev.mfd_cell); 284 kfree(pa->pdev.resource); 285 kfree(pa->pdev.driver_override); 286 kfree(pa); 287 } 288 289 /** 290 * platform_device_alloc - create a platform device 291 * @name: base name of the device we're adding 292 * @id: instance id 293 * 294 * Create a platform device object which can have other objects attached 295 * to it, and which will have attached objects freed when it is released. 296 */ 297 struct platform_device *platform_device_alloc(const char *name, int id) 298 { 299 struct platform_object *pa; 300 301 pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL); 302 if (pa) { 303 strcpy(pa->name, name); 304 pa->pdev.name = pa->name; 305 pa->pdev.id = id; 306 device_initialize(&pa->pdev.dev); 307 pa->pdev.dev.release = platform_device_release; 308 arch_setup_pdev_archdata(&pa->pdev); 309 } 310 311 return pa ? &pa->pdev : NULL; 312 } 313 EXPORT_SYMBOL_GPL(platform_device_alloc); 314 315 /** 316 * platform_device_add_resources - add resources to a platform device 317 * @pdev: platform device allocated by platform_device_alloc to add resources to 318 * @res: set of resources that needs to be allocated for the device 319 * @num: number of resources 320 * 321 * Add a copy of the resources to the platform device. The memory 322 * associated with the resources will be freed when the platform device is 323 * released. 324 */ 325 int platform_device_add_resources(struct platform_device *pdev, 326 const struct resource *res, unsigned int num) 327 { 328 struct resource *r = NULL; 329 330 if (res) { 331 r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); 332 if (!r) 333 return -ENOMEM; 334 } 335 336 kfree(pdev->resource); 337 pdev->resource = r; 338 pdev->num_resources = num; 339 return 0; 340 } 341 EXPORT_SYMBOL_GPL(platform_device_add_resources); 342 343 /** 344 * platform_device_add_data - add platform-specific data to a platform device 345 * @pdev: platform device allocated by platform_device_alloc to add resources to 346 * @data: platform specific data for this platform device 347 * @size: size of platform specific data 348 * 349 * Add a copy of platform specific data to the platform device's 350 * platform_data pointer. The memory associated with the platform data 351 * will be freed when the platform device is released. 352 */ 353 int platform_device_add_data(struct platform_device *pdev, const void *data, 354 size_t size) 355 { 356 void *d = NULL; 357 358 if (data) { 359 d = kmemdup(data, size, GFP_KERNEL); 360 if (!d) 361 return -ENOMEM; 362 } 363 364 kfree(pdev->dev.platform_data); 365 pdev->dev.platform_data = d; 366 return 0; 367 } 368 EXPORT_SYMBOL_GPL(platform_device_add_data); 369 370 /** 371 * platform_device_add_properties - add built-in properties to a platform device 372 * @pdev: platform device to add properties to 373 * @properties: null terminated array of properties to add 374 * 375 * The function will take deep copy of @properties and attach the copy to the 376 * platform device. The memory associated with properties will be freed when the 377 * platform device is released. 378 */ 379 int platform_device_add_properties(struct platform_device *pdev, 380 const struct property_entry *properties) 381 { 382 return device_add_properties(&pdev->dev, properties); 383 } 384 EXPORT_SYMBOL_GPL(platform_device_add_properties); 385 386 /** 387 * platform_device_add - add a platform device to device hierarchy 388 * @pdev: platform device we're adding 389 * 390 * This is part 2 of platform_device_register(), though may be called 391 * separately _iff_ pdev was allocated by platform_device_alloc(). 392 */ 393 int platform_device_add(struct platform_device *pdev) 394 { 395 int i, ret; 396 397 if (!pdev) 398 return -EINVAL; 399 400 if (!pdev->dev.parent) 401 pdev->dev.parent = &platform_bus; 402 403 pdev->dev.bus = &platform_bus_type; 404 405 switch (pdev->id) { 406 default: 407 dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); 408 break; 409 case PLATFORM_DEVID_NONE: 410 dev_set_name(&pdev->dev, "%s", pdev->name); 411 break; 412 case PLATFORM_DEVID_AUTO: 413 /* 414 * Automatically allocated device ID. We mark it as such so 415 * that we remember it must be freed, and we append a suffix 416 * to avoid namespace collision with explicit IDs. 417 */ 418 ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL); 419 if (ret < 0) 420 goto err_out; 421 pdev->id = ret; 422 pdev->id_auto = true; 423 dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id); 424 break; 425 } 426 427 for (i = 0; i < pdev->num_resources; i++) { 428 struct resource *p, *r = &pdev->resource[i]; 429 430 if (r->name == NULL) 431 r->name = dev_name(&pdev->dev); 432 433 p = r->parent; 434 if (!p) { 435 if (resource_type(r) == IORESOURCE_MEM) 436 p = &iomem_resource; 437 else if (resource_type(r) == IORESOURCE_IO) 438 p = &ioport_resource; 439 } 440 441 if (p) { 442 ret = insert_resource(p, r); 443 if (ret) { 444 dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r); 445 goto failed; 446 } 447 } 448 } 449 450 pr_debug("Registering platform device '%s'. Parent at %s\n", 451 dev_name(&pdev->dev), dev_name(pdev->dev.parent)); 452 453 ret = device_add(&pdev->dev); 454 if (ret == 0) 455 return ret; 456 457 failed: 458 if (pdev->id_auto) { 459 ida_simple_remove(&platform_devid_ida, pdev->id); 460 pdev->id = PLATFORM_DEVID_AUTO; 461 } 462 463 while (--i >= 0) { 464 struct resource *r = &pdev->resource[i]; 465 if (r->parent) 466 release_resource(r); 467 } 468 469 err_out: 470 return ret; 471 } 472 EXPORT_SYMBOL_GPL(platform_device_add); 473 474 /** 475 * platform_device_del - remove a platform-level device 476 * @pdev: platform device we're removing 477 * 478 * Note that this function will also release all memory- and port-based 479 * resources owned by the device (@dev->resource). This function must 480 * _only_ be externally called in error cases. All other usage is a bug. 481 */ 482 void platform_device_del(struct platform_device *pdev) 483 { 484 int i; 485 486 if (!IS_ERR_OR_NULL(pdev)) { 487 device_del(&pdev->dev); 488 489 if (pdev->id_auto) { 490 ida_simple_remove(&platform_devid_ida, pdev->id); 491 pdev->id = PLATFORM_DEVID_AUTO; 492 } 493 494 for (i = 0; i < pdev->num_resources; i++) { 495 struct resource *r = &pdev->resource[i]; 496 if (r->parent) 497 release_resource(r); 498 } 499 } 500 } 501 EXPORT_SYMBOL_GPL(platform_device_del); 502 503 /** 504 * platform_device_register - add a platform-level device 505 * @pdev: platform device we're adding 506 */ 507 int platform_device_register(struct platform_device *pdev) 508 { 509 device_initialize(&pdev->dev); 510 arch_setup_pdev_archdata(pdev); 511 return platform_device_add(pdev); 512 } 513 EXPORT_SYMBOL_GPL(platform_device_register); 514 515 /** 516 * platform_device_unregister - unregister a platform-level device 517 * @pdev: platform device we're unregistering 518 * 519 * Unregistration is done in 2 steps. First we release all resources 520 * and remove it from the subsystem, then we drop reference count by 521 * calling platform_device_put(). 522 */ 523 void platform_device_unregister(struct platform_device *pdev) 524 { 525 platform_device_del(pdev); 526 platform_device_put(pdev); 527 } 528 EXPORT_SYMBOL_GPL(platform_device_unregister); 529 530 /** 531 * platform_device_register_full - add a platform-level device with 532 * resources and platform-specific data 533 * 534 * @pdevinfo: data used to create device 535 * 536 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 537 */ 538 struct platform_device *platform_device_register_full( 539 const struct platform_device_info *pdevinfo) 540 { 541 int ret = -ENOMEM; 542 struct platform_device *pdev; 543 544 pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); 545 if (!pdev) 546 return ERR_PTR(-ENOMEM); 547 548 pdev->dev.parent = pdevinfo->parent; 549 pdev->dev.fwnode = pdevinfo->fwnode; 550 pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); 551 pdev->dev.of_node_reused = pdevinfo->of_node_reused; 552 553 if (pdevinfo->dma_mask) { 554 /* 555 * This memory isn't freed when the device is put, 556 * I don't have a nice idea for that though. Conceptually 557 * dma_mask in struct device should not be a pointer. 558 * See http://thread.gmane.org/gmane.linux.kernel.pci/9081 559 */ 560 pdev->dev.dma_mask = 561 kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL); 562 if (!pdev->dev.dma_mask) 563 goto err; 564 565 kmemleak_ignore(pdev->dev.dma_mask); 566 567 *pdev->dev.dma_mask = pdevinfo->dma_mask; 568 pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; 569 } 570 571 ret = platform_device_add_resources(pdev, 572 pdevinfo->res, pdevinfo->num_res); 573 if (ret) 574 goto err; 575 576 ret = platform_device_add_data(pdev, 577 pdevinfo->data, pdevinfo->size_data); 578 if (ret) 579 goto err; 580 581 if (pdevinfo->properties) { 582 ret = platform_device_add_properties(pdev, 583 pdevinfo->properties); 584 if (ret) 585 goto err; 586 } 587 588 ret = platform_device_add(pdev); 589 if (ret) { 590 err: 591 ACPI_COMPANION_SET(&pdev->dev, NULL); 592 kfree(pdev->dev.dma_mask); 593 platform_device_put(pdev); 594 return ERR_PTR(ret); 595 } 596 597 return pdev; 598 } 599 EXPORT_SYMBOL_GPL(platform_device_register_full); 600 601 static int platform_drv_probe(struct device *_dev) 602 { 603 struct platform_driver *drv = to_platform_driver(_dev->driver); 604 struct platform_device *dev = to_platform_device(_dev); 605 int ret; 606 607 ret = of_clk_set_defaults(_dev->of_node, false); 608 if (ret < 0) 609 return ret; 610 611 ret = dev_pm_domain_attach(_dev, true); 612 if (ret) 613 goto out; 614 615 if (drv->probe) { 616 ret = drv->probe(dev); 617 if (ret) 618 dev_pm_domain_detach(_dev, true); 619 } 620 621 out: 622 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { 623 dev_warn(_dev, "probe deferral not supported\n"); 624 ret = -ENXIO; 625 } 626 627 return ret; 628 } 629 630 static int platform_drv_probe_fail(struct device *_dev) 631 { 632 return -ENXIO; 633 } 634 635 static int platform_drv_remove(struct device *_dev) 636 { 637 struct platform_driver *drv = to_platform_driver(_dev->driver); 638 struct platform_device *dev = to_platform_device(_dev); 639 int ret = 0; 640 641 if (drv->remove) 642 ret = drv->remove(dev); 643 dev_pm_domain_detach(_dev, true); 644 645 return ret; 646 } 647 648 static void platform_drv_shutdown(struct device *_dev) 649 { 650 struct platform_driver *drv = to_platform_driver(_dev->driver); 651 struct platform_device *dev = to_platform_device(_dev); 652 653 if (drv->shutdown) 654 drv->shutdown(dev); 655 } 656 657 /** 658 * __platform_driver_register - register a driver for platform-level devices 659 * @drv: platform driver structure 660 * @owner: owning module/driver 661 */ 662 int __platform_driver_register(struct platform_driver *drv, 663 struct module *owner) 664 { 665 drv->driver.owner = owner; 666 drv->driver.bus = &platform_bus_type; 667 drv->driver.probe = platform_drv_probe; 668 drv->driver.remove = platform_drv_remove; 669 drv->driver.shutdown = platform_drv_shutdown; 670 671 return driver_register(&drv->driver); 672 } 673 EXPORT_SYMBOL_GPL(__platform_driver_register); 674 675 /** 676 * platform_driver_unregister - unregister a driver for platform-level devices 677 * @drv: platform driver structure 678 */ 679 void platform_driver_unregister(struct platform_driver *drv) 680 { 681 driver_unregister(&drv->driver); 682 } 683 EXPORT_SYMBOL_GPL(platform_driver_unregister); 684 685 /** 686 * __platform_driver_probe - register driver for non-hotpluggable device 687 * @drv: platform driver structure 688 * @probe: the driver probe routine, probably from an __init section 689 * @module: module which will be the owner of the driver 690 * 691 * Use this instead of platform_driver_register() when you know the device 692 * is not hotpluggable and has already been registered, and you want to 693 * remove its run-once probe() infrastructure from memory after the driver 694 * has bound to the device. 695 * 696 * One typical use for this would be with drivers for controllers integrated 697 * into system-on-chip processors, where the controller devices have been 698 * configured as part of board setup. 699 * 700 * Note that this is incompatible with deferred probing. 701 * 702 * Returns zero if the driver registered and bound to a device, else returns 703 * a negative error code and with the driver not registered. 704 */ 705 int __init_or_module __platform_driver_probe(struct platform_driver *drv, 706 int (*probe)(struct platform_device *), struct module *module) 707 { 708 int retval, code; 709 710 if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) { 711 pr_err("%s: drivers registered with %s can not be probed asynchronously\n", 712 drv->driver.name, __func__); 713 return -EINVAL; 714 } 715 716 /* 717 * We have to run our probes synchronously because we check if 718 * we find any devices to bind to and exit with error if there 719 * are any. 720 */ 721 drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; 722 723 /* 724 * Prevent driver from requesting probe deferral to avoid further 725 * futile probe attempts. 726 */ 727 drv->prevent_deferred_probe = true; 728 729 /* make sure driver won't have bind/unbind attributes */ 730 drv->driver.suppress_bind_attrs = true; 731 732 /* temporary section violation during probe() */ 733 drv->probe = probe; 734 retval = code = __platform_driver_register(drv, module); 735 736 /* 737 * Fixup that section violation, being paranoid about code scanning 738 * the list of drivers in order to probe new devices. Check to see 739 * if the probe was successful, and make sure any forced probes of 740 * new devices fail. 741 */ 742 spin_lock(&drv->driver.bus->p->klist_drivers.k_lock); 743 drv->probe = NULL; 744 if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) 745 retval = -ENODEV; 746 drv->driver.probe = platform_drv_probe_fail; 747 spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock); 748 749 if (code != retval) 750 platform_driver_unregister(drv); 751 return retval; 752 } 753 EXPORT_SYMBOL_GPL(__platform_driver_probe); 754 755 /** 756 * __platform_create_bundle - register driver and create corresponding device 757 * @driver: platform driver structure 758 * @probe: the driver probe routine, probably from an __init section 759 * @res: set of resources that needs to be allocated for the device 760 * @n_res: number of resources 761 * @data: platform specific data for this platform device 762 * @size: size of platform specific data 763 * @module: module which will be the owner of the driver 764 * 765 * Use this in legacy-style modules that probe hardware directly and 766 * register a single platform device and corresponding platform driver. 767 * 768 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 769 */ 770 struct platform_device * __init_or_module __platform_create_bundle( 771 struct platform_driver *driver, 772 int (*probe)(struct platform_device *), 773 struct resource *res, unsigned int n_res, 774 const void *data, size_t size, struct module *module) 775 { 776 struct platform_device *pdev; 777 int error; 778 779 pdev = platform_device_alloc(driver->driver.name, -1); 780 if (!pdev) { 781 error = -ENOMEM; 782 goto err_out; 783 } 784 785 error = platform_device_add_resources(pdev, res, n_res); 786 if (error) 787 goto err_pdev_put; 788 789 error = platform_device_add_data(pdev, data, size); 790 if (error) 791 goto err_pdev_put; 792 793 error = platform_device_add(pdev); 794 if (error) 795 goto err_pdev_put; 796 797 error = __platform_driver_probe(driver, probe, module); 798 if (error) 799 goto err_pdev_del; 800 801 return pdev; 802 803 err_pdev_del: 804 platform_device_del(pdev); 805 err_pdev_put: 806 platform_device_put(pdev); 807 err_out: 808 return ERR_PTR(error); 809 } 810 EXPORT_SYMBOL_GPL(__platform_create_bundle); 811 812 /** 813 * __platform_register_drivers - register an array of platform drivers 814 * @drivers: an array of drivers to register 815 * @count: the number of drivers to register 816 * @owner: module owning the drivers 817 * 818 * Registers platform drivers specified by an array. On failure to register a 819 * driver, all previously registered drivers will be unregistered. Callers of 820 * this API should use platform_unregister_drivers() to unregister drivers in 821 * the reverse order. 822 * 823 * Returns: 0 on success or a negative error code on failure. 824 */ 825 int __platform_register_drivers(struct platform_driver * const *drivers, 826 unsigned int count, struct module *owner) 827 { 828 unsigned int i; 829 int err; 830 831 for (i = 0; i < count; i++) { 832 pr_debug("registering platform driver %ps\n", drivers[i]); 833 834 err = __platform_driver_register(drivers[i], owner); 835 if (err < 0) { 836 pr_err("failed to register platform driver %ps: %d\n", 837 drivers[i], err); 838 goto error; 839 } 840 } 841 842 return 0; 843 844 error: 845 while (i--) { 846 pr_debug("unregistering platform driver %ps\n", drivers[i]); 847 platform_driver_unregister(drivers[i]); 848 } 849 850 return err; 851 } 852 EXPORT_SYMBOL_GPL(__platform_register_drivers); 853 854 /** 855 * platform_unregister_drivers - unregister an array of platform drivers 856 * @drivers: an array of drivers to unregister 857 * @count: the number of drivers to unregister 858 * 859 * Unegisters platform drivers specified by an array. This is typically used 860 * to complement an earlier call to platform_register_drivers(). Drivers are 861 * unregistered in the reverse order in which they were registered. 862 */ 863 void platform_unregister_drivers(struct platform_driver * const *drivers, 864 unsigned int count) 865 { 866 while (count--) { 867 pr_debug("unregistering platform driver %ps\n", drivers[count]); 868 platform_driver_unregister(drivers[count]); 869 } 870 } 871 EXPORT_SYMBOL_GPL(platform_unregister_drivers); 872 873 /* modalias support enables more hands-off userspace setup: 874 * (a) environment variable lets new-style hotplug events work once system is 875 * fully running: "modprobe $MODALIAS" 876 * (b) sysfs attribute lets new-style coldplug recover from hotplug events 877 * mishandled before system is fully running: "modprobe $(cat modalias)" 878 */ 879 static ssize_t modalias_show(struct device *dev, struct device_attribute *a, 880 char *buf) 881 { 882 struct platform_device *pdev = to_platform_device(dev); 883 int len; 884 885 len = of_device_modalias(dev, buf, PAGE_SIZE); 886 if (len != -ENODEV) 887 return len; 888 889 len = acpi_device_modalias(dev, buf, PAGE_SIZE -1); 890 if (len != -ENODEV) 891 return len; 892 893 len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name); 894 895 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; 896 } 897 static DEVICE_ATTR_RO(modalias); 898 899 static ssize_t driver_override_store(struct device *dev, 900 struct device_attribute *attr, 901 const char *buf, size_t count) 902 { 903 struct platform_device *pdev = to_platform_device(dev); 904 char *driver_override, *old, *cp; 905 906 /* We need to keep extra room for a newline */ 907 if (count >= (PAGE_SIZE - 1)) 908 return -EINVAL; 909 910 driver_override = kstrndup(buf, count, GFP_KERNEL); 911 if (!driver_override) 912 return -ENOMEM; 913 914 cp = strchr(driver_override, '\n'); 915 if (cp) 916 *cp = '\0'; 917 918 device_lock(dev); 919 old = pdev->driver_override; 920 if (strlen(driver_override)) { 921 pdev->driver_override = driver_override; 922 } else { 923 kfree(driver_override); 924 pdev->driver_override = NULL; 925 } 926 device_unlock(dev); 927 928 kfree(old); 929 930 return count; 931 } 932 933 static ssize_t driver_override_show(struct device *dev, 934 struct device_attribute *attr, char *buf) 935 { 936 struct platform_device *pdev = to_platform_device(dev); 937 ssize_t len; 938 939 device_lock(dev); 940 len = sprintf(buf, "%s\n", pdev->driver_override); 941 device_unlock(dev); 942 return len; 943 } 944 static DEVICE_ATTR_RW(driver_override); 945 946 947 static struct attribute *platform_dev_attrs[] = { 948 &dev_attr_modalias.attr, 949 &dev_attr_driver_override.attr, 950 NULL, 951 }; 952 ATTRIBUTE_GROUPS(platform_dev); 953 954 static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) 955 { 956 struct platform_device *pdev = to_platform_device(dev); 957 int rc; 958 959 /* Some devices have extra OF data and an OF-style MODALIAS */ 960 rc = of_device_uevent_modalias(dev, env); 961 if (rc != -ENODEV) 962 return rc; 963 964 rc = acpi_device_uevent_modalias(dev, env); 965 if (rc != -ENODEV) 966 return rc; 967 968 add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, 969 pdev->name); 970 return 0; 971 } 972 973 static const struct platform_device_id *platform_match_id( 974 const struct platform_device_id *id, 975 struct platform_device *pdev) 976 { 977 while (id->name[0]) { 978 if (strcmp(pdev->name, id->name) == 0) { 979 pdev->id_entry = id; 980 return id; 981 } 982 id++; 983 } 984 return NULL; 985 } 986 987 /** 988 * platform_match - bind platform device to platform driver. 989 * @dev: device. 990 * @drv: driver. 991 * 992 * Platform device IDs are assumed to be encoded like this: 993 * "<name><instance>", where <name> is a short description of the type of 994 * device, like "pci" or "floppy", and <instance> is the enumerated 995 * instance of the device, like '0' or '42'. Driver IDs are simply 996 * "<name>". So, extract the <name> from the platform_device structure, 997 * and compare it against the name of the driver. Return whether they match 998 * or not. 999 */ 1000 static int platform_match(struct device *dev, struct device_driver *drv) 1001 { 1002 struct platform_device *pdev = to_platform_device(dev); 1003 struct platform_driver *pdrv = to_platform_driver(drv); 1004 1005 /* When driver_override is set, only bind to the matching driver */ 1006 if (pdev->driver_override) 1007 return !strcmp(pdev->driver_override, drv->name); 1008 1009 /* Attempt an OF style match first */ 1010 if (of_driver_match_device(dev, drv)) 1011 return 1; 1012 1013 /* Then try ACPI style match */ 1014 if (acpi_driver_match_device(dev, drv)) 1015 return 1; 1016 1017 /* Then try to match against the id table */ 1018 if (pdrv->id_table) 1019 return platform_match_id(pdrv->id_table, pdev) != NULL; 1020 1021 /* fall-back to driver name match */ 1022 return (strcmp(pdev->name, drv->name) == 0); 1023 } 1024 1025 #ifdef CONFIG_PM_SLEEP 1026 1027 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) 1028 { 1029 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1030 struct platform_device *pdev = to_platform_device(dev); 1031 int ret = 0; 1032 1033 if (dev->driver && pdrv->suspend) 1034 ret = pdrv->suspend(pdev, mesg); 1035 1036 return ret; 1037 } 1038 1039 static int platform_legacy_resume(struct device *dev) 1040 { 1041 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1042 struct platform_device *pdev = to_platform_device(dev); 1043 int ret = 0; 1044 1045 if (dev->driver && pdrv->resume) 1046 ret = pdrv->resume(pdev); 1047 1048 return ret; 1049 } 1050 1051 #endif /* CONFIG_PM_SLEEP */ 1052 1053 #ifdef CONFIG_SUSPEND 1054 1055 int platform_pm_suspend(struct device *dev) 1056 { 1057 struct device_driver *drv = dev->driver; 1058 int ret = 0; 1059 1060 if (!drv) 1061 return 0; 1062 1063 if (drv->pm) { 1064 if (drv->pm->suspend) 1065 ret = drv->pm->suspend(dev); 1066 } else { 1067 ret = platform_legacy_suspend(dev, PMSG_SUSPEND); 1068 } 1069 1070 return ret; 1071 } 1072 1073 int platform_pm_resume(struct device *dev) 1074 { 1075 struct device_driver *drv = dev->driver; 1076 int ret = 0; 1077 1078 if (!drv) 1079 return 0; 1080 1081 if (drv->pm) { 1082 if (drv->pm->resume) 1083 ret = drv->pm->resume(dev); 1084 } else { 1085 ret = platform_legacy_resume(dev); 1086 } 1087 1088 return ret; 1089 } 1090 1091 #endif /* CONFIG_SUSPEND */ 1092 1093 #ifdef CONFIG_HIBERNATE_CALLBACKS 1094 1095 int platform_pm_freeze(struct device *dev) 1096 { 1097 struct device_driver *drv = dev->driver; 1098 int ret = 0; 1099 1100 if (!drv) 1101 return 0; 1102 1103 if (drv->pm) { 1104 if (drv->pm->freeze) 1105 ret = drv->pm->freeze(dev); 1106 } else { 1107 ret = platform_legacy_suspend(dev, PMSG_FREEZE); 1108 } 1109 1110 return ret; 1111 } 1112 1113 int platform_pm_thaw(struct device *dev) 1114 { 1115 struct device_driver *drv = dev->driver; 1116 int ret = 0; 1117 1118 if (!drv) 1119 return 0; 1120 1121 if (drv->pm) { 1122 if (drv->pm->thaw) 1123 ret = drv->pm->thaw(dev); 1124 } else { 1125 ret = platform_legacy_resume(dev); 1126 } 1127 1128 return ret; 1129 } 1130 1131 int platform_pm_poweroff(struct device *dev) 1132 { 1133 struct device_driver *drv = dev->driver; 1134 int ret = 0; 1135 1136 if (!drv) 1137 return 0; 1138 1139 if (drv->pm) { 1140 if (drv->pm->poweroff) 1141 ret = drv->pm->poweroff(dev); 1142 } else { 1143 ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); 1144 } 1145 1146 return ret; 1147 } 1148 1149 int platform_pm_restore(struct device *dev) 1150 { 1151 struct device_driver *drv = dev->driver; 1152 int ret = 0; 1153 1154 if (!drv) 1155 return 0; 1156 1157 if (drv->pm) { 1158 if (drv->pm->restore) 1159 ret = drv->pm->restore(dev); 1160 } else { 1161 ret = platform_legacy_resume(dev); 1162 } 1163 1164 return ret; 1165 } 1166 1167 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 1168 1169 int platform_dma_configure(struct device *dev) 1170 { 1171 enum dev_dma_attr attr; 1172 int ret = 0; 1173 1174 if (dev->of_node) { 1175 ret = of_dma_configure(dev, dev->of_node, true); 1176 } else if (has_acpi_companion(dev)) { 1177 attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode)); 1178 ret = acpi_dma_configure(dev, attr); 1179 } 1180 1181 return ret; 1182 } 1183 1184 static const struct dev_pm_ops platform_dev_pm_ops = { 1185 .runtime_suspend = pm_generic_runtime_suspend, 1186 .runtime_resume = pm_generic_runtime_resume, 1187 USE_PLATFORM_PM_SLEEP_OPS 1188 }; 1189 1190 struct bus_type platform_bus_type = { 1191 .name = "platform", 1192 .dev_groups = platform_dev_groups, 1193 .match = platform_match, 1194 .uevent = platform_uevent, 1195 .dma_configure = platform_dma_configure, 1196 .pm = &platform_dev_pm_ops, 1197 }; 1198 EXPORT_SYMBOL_GPL(platform_bus_type); 1199 1200 int __init platform_bus_init(void) 1201 { 1202 int error; 1203 1204 early_platform_cleanup(); 1205 1206 error = device_register(&platform_bus); 1207 if (error) { 1208 put_device(&platform_bus); 1209 return error; 1210 } 1211 error = bus_register(&platform_bus_type); 1212 if (error) 1213 device_unregister(&platform_bus); 1214 of_platform_register_reconfig_notifier(); 1215 return error; 1216 } 1217 1218 static __initdata LIST_HEAD(early_platform_driver_list); 1219 static __initdata LIST_HEAD(early_platform_device_list); 1220 1221 /** 1222 * early_platform_driver_register - register early platform driver 1223 * @epdrv: early_platform driver structure 1224 * @buf: string passed from early_param() 1225 * 1226 * Helper function for early_platform_init() / early_platform_init_buffer() 1227 */ 1228 int __init early_platform_driver_register(struct early_platform_driver *epdrv, 1229 char *buf) 1230 { 1231 char *tmp; 1232 int n; 1233 1234 /* Simply add the driver to the end of the global list. 1235 * Drivers will by default be put on the list in compiled-in order. 1236 */ 1237 if (!epdrv->list.next) { 1238 INIT_LIST_HEAD(&epdrv->list); 1239 list_add_tail(&epdrv->list, &early_platform_driver_list); 1240 } 1241 1242 /* If the user has specified device then make sure the driver 1243 * gets prioritized. The driver of the last device specified on 1244 * command line will be put first on the list. 1245 */ 1246 n = strlen(epdrv->pdrv->driver.name); 1247 if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) { 1248 list_move(&epdrv->list, &early_platform_driver_list); 1249 1250 /* Allow passing parameters after device name */ 1251 if (buf[n] == '\0' || buf[n] == ',') 1252 epdrv->requested_id = -1; 1253 else { 1254 epdrv->requested_id = simple_strtoul(&buf[n + 1], 1255 &tmp, 10); 1256 1257 if (buf[n] != '.' || (tmp == &buf[n + 1])) { 1258 epdrv->requested_id = EARLY_PLATFORM_ID_ERROR; 1259 n = 0; 1260 } else 1261 n += strcspn(&buf[n + 1], ",") + 1; 1262 } 1263 1264 if (buf[n] == ',') 1265 n++; 1266 1267 if (epdrv->bufsize) { 1268 memcpy(epdrv->buffer, &buf[n], 1269 min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1)); 1270 epdrv->buffer[epdrv->bufsize - 1] = '\0'; 1271 } 1272 } 1273 1274 return 0; 1275 } 1276 1277 /** 1278 * early_platform_add_devices - adds a number of early platform devices 1279 * @devs: array of early platform devices to add 1280 * @num: number of early platform devices in array 1281 * 1282 * Used by early architecture code to register early platform devices and 1283 * their platform data. 1284 */ 1285 void __init early_platform_add_devices(struct platform_device **devs, int num) 1286 { 1287 struct device *dev; 1288 int i; 1289 1290 /* simply add the devices to list */ 1291 for (i = 0; i < num; i++) { 1292 dev = &devs[i]->dev; 1293 1294 if (!dev->devres_head.next) { 1295 pm_runtime_early_init(dev); 1296 INIT_LIST_HEAD(&dev->devres_head); 1297 list_add_tail(&dev->devres_head, 1298 &early_platform_device_list); 1299 } 1300 } 1301 } 1302 1303 /** 1304 * early_platform_driver_register_all - register early platform drivers 1305 * @class_str: string to identify early platform driver class 1306 * 1307 * Used by architecture code to register all early platform drivers 1308 * for a certain class. If omitted then only early platform drivers 1309 * with matching kernel command line class parameters will be registered. 1310 */ 1311 void __init early_platform_driver_register_all(char *class_str) 1312 { 1313 /* The "class_str" parameter may or may not be present on the kernel 1314 * command line. If it is present then there may be more than one 1315 * matching parameter. 1316 * 1317 * Since we register our early platform drivers using early_param() 1318 * we need to make sure that they also get registered in the case 1319 * when the parameter is missing from the kernel command line. 1320 * 1321 * We use parse_early_options() to make sure the early_param() gets 1322 * called at least once. The early_param() may be called more than 1323 * once since the name of the preferred device may be specified on 1324 * the kernel command line. early_platform_driver_register() handles 1325 * this case for us. 1326 */ 1327 parse_early_options(class_str); 1328 } 1329 1330 /** 1331 * early_platform_match - find early platform device matching driver 1332 * @epdrv: early platform driver structure 1333 * @id: id to match against 1334 */ 1335 static struct platform_device * __init 1336 early_platform_match(struct early_platform_driver *epdrv, int id) 1337 { 1338 struct platform_device *pd; 1339 1340 list_for_each_entry(pd, &early_platform_device_list, dev.devres_head) 1341 if (platform_match(&pd->dev, &epdrv->pdrv->driver)) 1342 if (pd->id == id) 1343 return pd; 1344 1345 return NULL; 1346 } 1347 1348 /** 1349 * early_platform_left - check if early platform driver has matching devices 1350 * @epdrv: early platform driver structure 1351 * @id: return true if id or above exists 1352 */ 1353 static int __init early_platform_left(struct early_platform_driver *epdrv, 1354 int id) 1355 { 1356 struct platform_device *pd; 1357 1358 list_for_each_entry(pd, &early_platform_device_list, dev.devres_head) 1359 if (platform_match(&pd->dev, &epdrv->pdrv->driver)) 1360 if (pd->id >= id) 1361 return 1; 1362 1363 return 0; 1364 } 1365 1366 /** 1367 * early_platform_driver_probe_id - probe drivers matching class_str and id 1368 * @class_str: string to identify early platform driver class 1369 * @id: id to match against 1370 * @nr_probe: number of platform devices to successfully probe before exiting 1371 */ 1372 static int __init early_platform_driver_probe_id(char *class_str, 1373 int id, 1374 int nr_probe) 1375 { 1376 struct early_platform_driver *epdrv; 1377 struct platform_device *match; 1378 int match_id; 1379 int n = 0; 1380 int left = 0; 1381 1382 list_for_each_entry(epdrv, &early_platform_driver_list, list) { 1383 /* only use drivers matching our class_str */ 1384 if (strcmp(class_str, epdrv->class_str)) 1385 continue; 1386 1387 if (id == -2) { 1388 match_id = epdrv->requested_id; 1389 left = 1; 1390 1391 } else { 1392 match_id = id; 1393 left += early_platform_left(epdrv, id); 1394 1395 /* skip requested id */ 1396 switch (epdrv->requested_id) { 1397 case EARLY_PLATFORM_ID_ERROR: 1398 case EARLY_PLATFORM_ID_UNSET: 1399 break; 1400 default: 1401 if (epdrv->requested_id == id) 1402 match_id = EARLY_PLATFORM_ID_UNSET; 1403 } 1404 } 1405 1406 switch (match_id) { 1407 case EARLY_PLATFORM_ID_ERROR: 1408 pr_warn("%s: unable to parse %s parameter\n", 1409 class_str, epdrv->pdrv->driver.name); 1410 /* fall-through */ 1411 case EARLY_PLATFORM_ID_UNSET: 1412 match = NULL; 1413 break; 1414 default: 1415 match = early_platform_match(epdrv, match_id); 1416 } 1417 1418 if (match) { 1419 /* 1420 * Set up a sensible init_name to enable 1421 * dev_name() and others to be used before the 1422 * rest of the driver core is initialized. 1423 */ 1424 if (!match->dev.init_name && slab_is_available()) { 1425 if (match->id != -1) 1426 match->dev.init_name = 1427 kasprintf(GFP_KERNEL, "%s.%d", 1428 match->name, 1429 match->id); 1430 else 1431 match->dev.init_name = 1432 kasprintf(GFP_KERNEL, "%s", 1433 match->name); 1434 1435 if (!match->dev.init_name) 1436 return -ENOMEM; 1437 } 1438 1439 if (epdrv->pdrv->probe(match)) 1440 pr_warn("%s: unable to probe %s early.\n", 1441 class_str, match->name); 1442 else 1443 n++; 1444 } 1445 1446 if (n >= nr_probe) 1447 break; 1448 } 1449 1450 if (left) 1451 return n; 1452 else 1453 return -ENODEV; 1454 } 1455 1456 /** 1457 * early_platform_driver_probe - probe a class of registered drivers 1458 * @class_str: string to identify early platform driver class 1459 * @nr_probe: number of platform devices to successfully probe before exiting 1460 * @user_only: only probe user specified early platform devices 1461 * 1462 * Used by architecture code to probe registered early platform drivers 1463 * within a certain class. For probe to happen a registered early platform 1464 * device matching a registered early platform driver is needed. 1465 */ 1466 int __init early_platform_driver_probe(char *class_str, 1467 int nr_probe, 1468 int user_only) 1469 { 1470 int k, n, i; 1471 1472 n = 0; 1473 for (i = -2; n < nr_probe; i++) { 1474 k = early_platform_driver_probe_id(class_str, i, nr_probe - n); 1475 1476 if (k < 0) 1477 break; 1478 1479 n += k; 1480 1481 if (user_only) 1482 break; 1483 } 1484 1485 return n; 1486 } 1487 1488 /** 1489 * early_platform_cleanup - clean up early platform code 1490 */ 1491 void __init early_platform_cleanup(void) 1492 { 1493 struct platform_device *pd, *pd2; 1494 1495 /* clean up the devres list used to chain devices */ 1496 list_for_each_entry_safe(pd, pd2, &early_platform_device_list, 1497 dev.devres_head) { 1498 list_del(&pd->dev.devres_head); 1499 memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head)); 1500 } 1501 } 1502 1503