1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * platform.c - platform 'pseudo' bus for legacy devices 4 * 5 * Copyright (c) 2002-3 Patrick Mochel 6 * Copyright (c) 2002-3 Open Source Development Labs 7 * 8 * Please see Documentation/driver-api/driver-model/platform.rst for more 9 * information. 10 */ 11 12 #include <linux/string.h> 13 #include <linux/platform_device.h> 14 #include <linux/of_device.h> 15 #include <linux/of_irq.h> 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/memblock.h> 20 #include <linux/err.h> 21 #include <linux/slab.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/pm_domain.h> 24 #include <linux/idr.h> 25 #include <linux/acpi.h> 26 #include <linux/clk/clk-conf.h> 27 #include <linux/limits.h> 28 #include <linux/property.h> 29 #include <linux/kmemleak.h> 30 31 #include "base.h" 32 #include "power/power.h" 33 34 /* For automatically allocated device IDs */ 35 static DEFINE_IDA(platform_devid_ida); 36 37 struct device platform_bus = { 38 .init_name = "platform", 39 }; 40 EXPORT_SYMBOL_GPL(platform_bus); 41 42 /** 43 * arch_setup_pdev_archdata - Allow manipulation of archdata before its used 44 * @pdev: platform device 45 * 46 * This is called before platform_device_add() such that any pdev_archdata may 47 * be setup before the platform_notifier is called. So if a user needs to 48 * manipulate any relevant information in the pdev_archdata they can do: 49 * 50 * platform_device_alloc() 51 * ... manipulate ... 52 * platform_device_add() 53 * 54 * And if they don't care they can just call platform_device_register() and 55 * everything will just work out. 56 */ 57 void __weak arch_setup_pdev_archdata(struct platform_device *pdev) 58 { 59 } 60 61 /** 62 * platform_get_resource - get a resource for a device 63 * @dev: platform device 64 * @type: resource type 65 * @num: resource index 66 */ 67 struct resource *platform_get_resource(struct platform_device *dev, 68 unsigned int type, unsigned int num) 69 { 70 int i; 71 72 for (i = 0; i < dev->num_resources; i++) { 73 struct resource *r = &dev->resource[i]; 74 75 if (type == resource_type(r) && num-- == 0) 76 return r; 77 } 78 return NULL; 79 } 80 EXPORT_SYMBOL_GPL(platform_get_resource); 81 82 /** 83 * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform 84 * device 85 * 86 * @pdev: platform device to use both for memory resource lookup as well as 87 * resource management 88 * @index: resource index 89 */ 90 #ifdef CONFIG_HAS_IOMEM 91 void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev, 92 unsigned int index) 93 { 94 struct resource *res; 95 96 res = platform_get_resource(pdev, IORESOURCE_MEM, index); 97 return devm_ioremap_resource(&pdev->dev, res); 98 } 99 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); 100 #endif /* CONFIG_HAS_IOMEM */ 101 102 /** 103 * platform_get_irq - get an IRQ for a device 104 * @dev: platform device 105 * @num: IRQ number index 106 */ 107 int platform_get_irq(struct platform_device *dev, unsigned int num) 108 { 109 #ifdef CONFIG_SPARC 110 /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ 111 if (!dev || num >= dev->archdata.num_irqs) 112 return -ENXIO; 113 return dev->archdata.irqs[num]; 114 #else 115 struct resource *r; 116 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { 117 int ret; 118 119 ret = of_irq_get(dev->dev.of_node, num); 120 if (ret > 0 || ret == -EPROBE_DEFER) 121 return ret; 122 } 123 124 r = platform_get_resource(dev, IORESOURCE_IRQ, num); 125 if (has_acpi_companion(&dev->dev)) { 126 if (r && r->flags & IORESOURCE_DISABLED) { 127 int ret; 128 129 ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r); 130 if (ret) 131 return ret; 132 } 133 } 134 135 /* 136 * The resources may pass trigger flags to the irqs that need 137 * to be set up. It so happens that the trigger flags for 138 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER* 139 * settings. 140 */ 141 if (r && r->flags & IORESOURCE_BITS) { 142 struct irq_data *irqd; 143 144 irqd = irq_get_irq_data(r->start); 145 if (!irqd) 146 return -ENXIO; 147 irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); 148 } 149 150 if (r) 151 return r->start; 152 153 /* 154 * For the index 0 interrupt, allow falling back to GpioInt 155 * resources. While a device could have both Interrupt and GpioInt 156 * resources, making this fallback ambiguous, in many common cases 157 * the device will only expose one IRQ, and this fallback 158 * allows a common code path across either kind of resource. 159 */ 160 if (num == 0 && has_acpi_companion(&dev->dev)) { 161 int ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num); 162 163 /* Our callers expect -ENXIO for missing IRQs. */ 164 if (ret >= 0 || ret == -EPROBE_DEFER) 165 return ret; 166 } 167 168 return -ENXIO; 169 #endif 170 } 171 EXPORT_SYMBOL_GPL(platform_get_irq); 172 173 /** 174 * platform_irq_count - Count the number of IRQs a platform device uses 175 * @dev: platform device 176 * 177 * Return: Number of IRQs a platform device uses or EPROBE_DEFER 178 */ 179 int platform_irq_count(struct platform_device *dev) 180 { 181 int ret, nr = 0; 182 183 while ((ret = platform_get_irq(dev, nr)) >= 0) 184 nr++; 185 186 if (ret == -EPROBE_DEFER) 187 return ret; 188 189 return nr; 190 } 191 EXPORT_SYMBOL_GPL(platform_irq_count); 192 193 /** 194 * platform_get_resource_byname - get a resource for a device by name 195 * @dev: platform device 196 * @type: resource type 197 * @name: resource name 198 */ 199 struct resource *platform_get_resource_byname(struct platform_device *dev, 200 unsigned int type, 201 const char *name) 202 { 203 int i; 204 205 for (i = 0; i < dev->num_resources; i++) { 206 struct resource *r = &dev->resource[i]; 207 208 if (unlikely(!r->name)) 209 continue; 210 211 if (type == resource_type(r) && !strcmp(r->name, name)) 212 return r; 213 } 214 return NULL; 215 } 216 EXPORT_SYMBOL_GPL(platform_get_resource_byname); 217 218 /** 219 * platform_get_irq_byname - get an IRQ for a device by name 220 * @dev: platform device 221 * @name: IRQ name 222 */ 223 int platform_get_irq_byname(struct platform_device *dev, const char *name) 224 { 225 struct resource *r; 226 227 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { 228 int ret; 229 230 ret = of_irq_get_byname(dev->dev.of_node, name); 231 if (ret > 0 || ret == -EPROBE_DEFER) 232 return ret; 233 } 234 235 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); 236 return r ? r->start : -ENXIO; 237 } 238 EXPORT_SYMBOL_GPL(platform_get_irq_byname); 239 240 /** 241 * platform_add_devices - add a numbers of platform devices 242 * @devs: array of platform devices to add 243 * @num: number of platform devices in array 244 */ 245 int platform_add_devices(struct platform_device **devs, int num) 246 { 247 int i, ret = 0; 248 249 for (i = 0; i < num; i++) { 250 ret = platform_device_register(devs[i]); 251 if (ret) { 252 while (--i >= 0) 253 platform_device_unregister(devs[i]); 254 break; 255 } 256 } 257 258 return ret; 259 } 260 EXPORT_SYMBOL_GPL(platform_add_devices); 261 262 struct platform_object { 263 struct platform_device pdev; 264 char name[]; 265 }; 266 267 /** 268 * platform_device_put - destroy a platform device 269 * @pdev: platform device to free 270 * 271 * Free all memory associated with a platform device. This function must 272 * _only_ be externally called in error cases. All other usage is a bug. 273 */ 274 void platform_device_put(struct platform_device *pdev) 275 { 276 if (!IS_ERR_OR_NULL(pdev)) 277 put_device(&pdev->dev); 278 } 279 EXPORT_SYMBOL_GPL(platform_device_put); 280 281 static void platform_device_release(struct device *dev) 282 { 283 struct platform_object *pa = container_of(dev, struct platform_object, 284 pdev.dev); 285 286 of_device_node_put(&pa->pdev.dev); 287 kfree(pa->pdev.dev.platform_data); 288 kfree(pa->pdev.mfd_cell); 289 kfree(pa->pdev.resource); 290 kfree(pa->pdev.driver_override); 291 kfree(pa); 292 } 293 294 /** 295 * platform_device_alloc - create a platform device 296 * @name: base name of the device we're adding 297 * @id: instance id 298 * 299 * Create a platform device object which can have other objects attached 300 * to it, and which will have attached objects freed when it is released. 301 */ 302 struct platform_device *platform_device_alloc(const char *name, int id) 303 { 304 struct platform_object *pa; 305 306 pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL); 307 if (pa) { 308 strcpy(pa->name, name); 309 pa->pdev.name = pa->name; 310 pa->pdev.id = id; 311 device_initialize(&pa->pdev.dev); 312 pa->pdev.dev.release = platform_device_release; 313 arch_setup_pdev_archdata(&pa->pdev); 314 } 315 316 return pa ? &pa->pdev : NULL; 317 } 318 EXPORT_SYMBOL_GPL(platform_device_alloc); 319 320 /** 321 * platform_device_add_resources - add resources to a platform device 322 * @pdev: platform device allocated by platform_device_alloc to add resources to 323 * @res: set of resources that needs to be allocated for the device 324 * @num: number of resources 325 * 326 * Add a copy of the resources to the platform device. The memory 327 * associated with the resources will be freed when the platform device is 328 * released. 329 */ 330 int platform_device_add_resources(struct platform_device *pdev, 331 const struct resource *res, unsigned int num) 332 { 333 struct resource *r = NULL; 334 335 if (res) { 336 r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); 337 if (!r) 338 return -ENOMEM; 339 } 340 341 kfree(pdev->resource); 342 pdev->resource = r; 343 pdev->num_resources = num; 344 return 0; 345 } 346 EXPORT_SYMBOL_GPL(platform_device_add_resources); 347 348 /** 349 * platform_device_add_data - add platform-specific data to a platform device 350 * @pdev: platform device allocated by platform_device_alloc to add resources to 351 * @data: platform specific data for this platform device 352 * @size: size of platform specific data 353 * 354 * Add a copy of platform specific data to the platform device's 355 * platform_data pointer. The memory associated with the platform data 356 * will be freed when the platform device is released. 357 */ 358 int platform_device_add_data(struct platform_device *pdev, const void *data, 359 size_t size) 360 { 361 void *d = NULL; 362 363 if (data) { 364 d = kmemdup(data, size, GFP_KERNEL); 365 if (!d) 366 return -ENOMEM; 367 } 368 369 kfree(pdev->dev.platform_data); 370 pdev->dev.platform_data = d; 371 return 0; 372 } 373 EXPORT_SYMBOL_GPL(platform_device_add_data); 374 375 /** 376 * platform_device_add_properties - add built-in properties to a platform device 377 * @pdev: platform device to add properties to 378 * @properties: null terminated array of properties to add 379 * 380 * The function will take deep copy of @properties and attach the copy to the 381 * platform device. The memory associated with properties will be freed when the 382 * platform device is released. 383 */ 384 int platform_device_add_properties(struct platform_device *pdev, 385 const struct property_entry *properties) 386 { 387 return device_add_properties(&pdev->dev, properties); 388 } 389 EXPORT_SYMBOL_GPL(platform_device_add_properties); 390 391 /** 392 * platform_device_add - add a platform device to device hierarchy 393 * @pdev: platform device we're adding 394 * 395 * This is part 2 of platform_device_register(), though may be called 396 * separately _iff_ pdev was allocated by platform_device_alloc(). 397 */ 398 int platform_device_add(struct platform_device *pdev) 399 { 400 int i, ret; 401 402 if (!pdev) 403 return -EINVAL; 404 405 if (!pdev->dev.parent) 406 pdev->dev.parent = &platform_bus; 407 408 pdev->dev.bus = &platform_bus_type; 409 410 switch (pdev->id) { 411 default: 412 dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); 413 break; 414 case PLATFORM_DEVID_NONE: 415 dev_set_name(&pdev->dev, "%s", pdev->name); 416 break; 417 case PLATFORM_DEVID_AUTO: 418 /* 419 * Automatically allocated device ID. We mark it as such so 420 * that we remember it must be freed, and we append a suffix 421 * to avoid namespace collision with explicit IDs. 422 */ 423 ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL); 424 if (ret < 0) 425 goto err_out; 426 pdev->id = ret; 427 pdev->id_auto = true; 428 dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id); 429 break; 430 } 431 432 for (i = 0; i < pdev->num_resources; i++) { 433 struct resource *p, *r = &pdev->resource[i]; 434 435 if (r->name == NULL) 436 r->name = dev_name(&pdev->dev); 437 438 p = r->parent; 439 if (!p) { 440 if (resource_type(r) == IORESOURCE_MEM) 441 p = &iomem_resource; 442 else if (resource_type(r) == IORESOURCE_IO) 443 p = &ioport_resource; 444 } 445 446 if (p) { 447 ret = insert_resource(p, r); 448 if (ret) { 449 dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r); 450 goto failed; 451 } 452 } 453 } 454 455 pr_debug("Registering platform device '%s'. Parent at %s\n", 456 dev_name(&pdev->dev), dev_name(pdev->dev.parent)); 457 458 ret = device_add(&pdev->dev); 459 if (ret == 0) 460 return ret; 461 462 failed: 463 if (pdev->id_auto) { 464 ida_simple_remove(&platform_devid_ida, pdev->id); 465 pdev->id = PLATFORM_DEVID_AUTO; 466 } 467 468 while (--i >= 0) { 469 struct resource *r = &pdev->resource[i]; 470 if (r->parent) 471 release_resource(r); 472 } 473 474 err_out: 475 return ret; 476 } 477 EXPORT_SYMBOL_GPL(platform_device_add); 478 479 /** 480 * platform_device_del - remove a platform-level device 481 * @pdev: platform device we're removing 482 * 483 * Note that this function will also release all memory- and port-based 484 * resources owned by the device (@dev->resource). This function must 485 * _only_ be externally called in error cases. All other usage is a bug. 486 */ 487 void platform_device_del(struct platform_device *pdev) 488 { 489 int i; 490 491 if (!IS_ERR_OR_NULL(pdev)) { 492 device_del(&pdev->dev); 493 494 if (pdev->id_auto) { 495 ida_simple_remove(&platform_devid_ida, pdev->id); 496 pdev->id = PLATFORM_DEVID_AUTO; 497 } 498 499 for (i = 0; i < pdev->num_resources; i++) { 500 struct resource *r = &pdev->resource[i]; 501 if (r->parent) 502 release_resource(r); 503 } 504 } 505 } 506 EXPORT_SYMBOL_GPL(platform_device_del); 507 508 /** 509 * platform_device_register - add a platform-level device 510 * @pdev: platform device we're adding 511 */ 512 int platform_device_register(struct platform_device *pdev) 513 { 514 device_initialize(&pdev->dev); 515 arch_setup_pdev_archdata(pdev); 516 return platform_device_add(pdev); 517 } 518 EXPORT_SYMBOL_GPL(platform_device_register); 519 520 /** 521 * platform_device_unregister - unregister a platform-level device 522 * @pdev: platform device we're unregistering 523 * 524 * Unregistration is done in 2 steps. First we release all resources 525 * and remove it from the subsystem, then we drop reference count by 526 * calling platform_device_put(). 527 */ 528 void platform_device_unregister(struct platform_device *pdev) 529 { 530 platform_device_del(pdev); 531 platform_device_put(pdev); 532 } 533 EXPORT_SYMBOL_GPL(platform_device_unregister); 534 535 /** 536 * platform_device_register_full - add a platform-level device with 537 * resources and platform-specific data 538 * 539 * @pdevinfo: data used to create device 540 * 541 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 542 */ 543 struct platform_device *platform_device_register_full( 544 const struct platform_device_info *pdevinfo) 545 { 546 int ret = -ENOMEM; 547 struct platform_device *pdev; 548 549 pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); 550 if (!pdev) 551 return ERR_PTR(-ENOMEM); 552 553 pdev->dev.parent = pdevinfo->parent; 554 pdev->dev.fwnode = pdevinfo->fwnode; 555 pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); 556 pdev->dev.of_node_reused = pdevinfo->of_node_reused; 557 558 if (pdevinfo->dma_mask) { 559 /* 560 * This memory isn't freed when the device is put, 561 * I don't have a nice idea for that though. Conceptually 562 * dma_mask in struct device should not be a pointer. 563 * See http://thread.gmane.org/gmane.linux.kernel.pci/9081 564 */ 565 pdev->dev.dma_mask = 566 kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL); 567 if (!pdev->dev.dma_mask) 568 goto err; 569 570 kmemleak_ignore(pdev->dev.dma_mask); 571 572 *pdev->dev.dma_mask = pdevinfo->dma_mask; 573 pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; 574 } 575 576 ret = platform_device_add_resources(pdev, 577 pdevinfo->res, pdevinfo->num_res); 578 if (ret) 579 goto err; 580 581 ret = platform_device_add_data(pdev, 582 pdevinfo->data, pdevinfo->size_data); 583 if (ret) 584 goto err; 585 586 if (pdevinfo->properties) { 587 ret = platform_device_add_properties(pdev, 588 pdevinfo->properties); 589 if (ret) 590 goto err; 591 } 592 593 ret = platform_device_add(pdev); 594 if (ret) { 595 err: 596 ACPI_COMPANION_SET(&pdev->dev, NULL); 597 kfree(pdev->dev.dma_mask); 598 platform_device_put(pdev); 599 return ERR_PTR(ret); 600 } 601 602 return pdev; 603 } 604 EXPORT_SYMBOL_GPL(platform_device_register_full); 605 606 static int platform_drv_probe(struct device *_dev) 607 { 608 struct platform_driver *drv = to_platform_driver(_dev->driver); 609 struct platform_device *dev = to_platform_device(_dev); 610 int ret; 611 612 ret = of_clk_set_defaults(_dev->of_node, false); 613 if (ret < 0) 614 return ret; 615 616 ret = dev_pm_domain_attach(_dev, true); 617 if (ret) 618 goto out; 619 620 if (drv->probe) { 621 ret = drv->probe(dev); 622 if (ret) 623 dev_pm_domain_detach(_dev, true); 624 } 625 626 out: 627 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { 628 dev_warn(_dev, "probe deferral not supported\n"); 629 ret = -ENXIO; 630 } 631 632 return ret; 633 } 634 635 static int platform_drv_probe_fail(struct device *_dev) 636 { 637 return -ENXIO; 638 } 639 640 static int platform_drv_remove(struct device *_dev) 641 { 642 struct platform_driver *drv = to_platform_driver(_dev->driver); 643 struct platform_device *dev = to_platform_device(_dev); 644 int ret = 0; 645 646 if (drv->remove) 647 ret = drv->remove(dev); 648 dev_pm_domain_detach(_dev, true); 649 650 return ret; 651 } 652 653 static void platform_drv_shutdown(struct device *_dev) 654 { 655 struct platform_driver *drv = to_platform_driver(_dev->driver); 656 struct platform_device *dev = to_platform_device(_dev); 657 658 if (drv->shutdown) 659 drv->shutdown(dev); 660 } 661 662 /** 663 * __platform_driver_register - register a driver for platform-level devices 664 * @drv: platform driver structure 665 * @owner: owning module/driver 666 */ 667 int __platform_driver_register(struct platform_driver *drv, 668 struct module *owner) 669 { 670 drv->driver.owner = owner; 671 drv->driver.bus = &platform_bus_type; 672 drv->driver.probe = platform_drv_probe; 673 drv->driver.remove = platform_drv_remove; 674 drv->driver.shutdown = platform_drv_shutdown; 675 676 return driver_register(&drv->driver); 677 } 678 EXPORT_SYMBOL_GPL(__platform_driver_register); 679 680 /** 681 * platform_driver_unregister - unregister a driver for platform-level devices 682 * @drv: platform driver structure 683 */ 684 void platform_driver_unregister(struct platform_driver *drv) 685 { 686 driver_unregister(&drv->driver); 687 } 688 EXPORT_SYMBOL_GPL(platform_driver_unregister); 689 690 /** 691 * __platform_driver_probe - register driver for non-hotpluggable device 692 * @drv: platform driver structure 693 * @probe: the driver probe routine, probably from an __init section 694 * @module: module which will be the owner of the driver 695 * 696 * Use this instead of platform_driver_register() when you know the device 697 * is not hotpluggable and has already been registered, and you want to 698 * remove its run-once probe() infrastructure from memory after the driver 699 * has bound to the device. 700 * 701 * One typical use for this would be with drivers for controllers integrated 702 * into system-on-chip processors, where the controller devices have been 703 * configured as part of board setup. 704 * 705 * Note that this is incompatible with deferred probing. 706 * 707 * Returns zero if the driver registered and bound to a device, else returns 708 * a negative error code and with the driver not registered. 709 */ 710 int __init_or_module __platform_driver_probe(struct platform_driver *drv, 711 int (*probe)(struct platform_device *), struct module *module) 712 { 713 int retval, code; 714 715 if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) { 716 pr_err("%s: drivers registered with %s can not be probed asynchronously\n", 717 drv->driver.name, __func__); 718 return -EINVAL; 719 } 720 721 /* 722 * We have to run our probes synchronously because we check if 723 * we find any devices to bind to and exit with error if there 724 * are any. 725 */ 726 drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; 727 728 /* 729 * Prevent driver from requesting probe deferral to avoid further 730 * futile probe attempts. 731 */ 732 drv->prevent_deferred_probe = true; 733 734 /* make sure driver won't have bind/unbind attributes */ 735 drv->driver.suppress_bind_attrs = true; 736 737 /* temporary section violation during probe() */ 738 drv->probe = probe; 739 retval = code = __platform_driver_register(drv, module); 740 741 /* 742 * Fixup that section violation, being paranoid about code scanning 743 * the list of drivers in order to probe new devices. Check to see 744 * if the probe was successful, and make sure any forced probes of 745 * new devices fail. 746 */ 747 spin_lock(&drv->driver.bus->p->klist_drivers.k_lock); 748 drv->probe = NULL; 749 if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) 750 retval = -ENODEV; 751 drv->driver.probe = platform_drv_probe_fail; 752 spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock); 753 754 if (code != retval) 755 platform_driver_unregister(drv); 756 return retval; 757 } 758 EXPORT_SYMBOL_GPL(__platform_driver_probe); 759 760 /** 761 * __platform_create_bundle - register driver and create corresponding device 762 * @driver: platform driver structure 763 * @probe: the driver probe routine, probably from an __init section 764 * @res: set of resources that needs to be allocated for the device 765 * @n_res: number of resources 766 * @data: platform specific data for this platform device 767 * @size: size of platform specific data 768 * @module: module which will be the owner of the driver 769 * 770 * Use this in legacy-style modules that probe hardware directly and 771 * register a single platform device and corresponding platform driver. 772 * 773 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 774 */ 775 struct platform_device * __init_or_module __platform_create_bundle( 776 struct platform_driver *driver, 777 int (*probe)(struct platform_device *), 778 struct resource *res, unsigned int n_res, 779 const void *data, size_t size, struct module *module) 780 { 781 struct platform_device *pdev; 782 int error; 783 784 pdev = platform_device_alloc(driver->driver.name, -1); 785 if (!pdev) { 786 error = -ENOMEM; 787 goto err_out; 788 } 789 790 error = platform_device_add_resources(pdev, res, n_res); 791 if (error) 792 goto err_pdev_put; 793 794 error = platform_device_add_data(pdev, data, size); 795 if (error) 796 goto err_pdev_put; 797 798 error = platform_device_add(pdev); 799 if (error) 800 goto err_pdev_put; 801 802 error = __platform_driver_probe(driver, probe, module); 803 if (error) 804 goto err_pdev_del; 805 806 return pdev; 807 808 err_pdev_del: 809 platform_device_del(pdev); 810 err_pdev_put: 811 platform_device_put(pdev); 812 err_out: 813 return ERR_PTR(error); 814 } 815 EXPORT_SYMBOL_GPL(__platform_create_bundle); 816 817 /** 818 * __platform_register_drivers - register an array of platform drivers 819 * @drivers: an array of drivers to register 820 * @count: the number of drivers to register 821 * @owner: module owning the drivers 822 * 823 * Registers platform drivers specified by an array. On failure to register a 824 * driver, all previously registered drivers will be unregistered. Callers of 825 * this API should use platform_unregister_drivers() to unregister drivers in 826 * the reverse order. 827 * 828 * Returns: 0 on success or a negative error code on failure. 829 */ 830 int __platform_register_drivers(struct platform_driver * const *drivers, 831 unsigned int count, struct module *owner) 832 { 833 unsigned int i; 834 int err; 835 836 for (i = 0; i < count; i++) { 837 pr_debug("registering platform driver %ps\n", drivers[i]); 838 839 err = __platform_driver_register(drivers[i], owner); 840 if (err < 0) { 841 pr_err("failed to register platform driver %ps: %d\n", 842 drivers[i], err); 843 goto error; 844 } 845 } 846 847 return 0; 848 849 error: 850 while (i--) { 851 pr_debug("unregistering platform driver %ps\n", drivers[i]); 852 platform_driver_unregister(drivers[i]); 853 } 854 855 return err; 856 } 857 EXPORT_SYMBOL_GPL(__platform_register_drivers); 858 859 /** 860 * platform_unregister_drivers - unregister an array of platform drivers 861 * @drivers: an array of drivers to unregister 862 * @count: the number of drivers to unregister 863 * 864 * Unegisters platform drivers specified by an array. This is typically used 865 * to complement an earlier call to platform_register_drivers(). Drivers are 866 * unregistered in the reverse order in which they were registered. 867 */ 868 void platform_unregister_drivers(struct platform_driver * const *drivers, 869 unsigned int count) 870 { 871 while (count--) { 872 pr_debug("unregistering platform driver %ps\n", drivers[count]); 873 platform_driver_unregister(drivers[count]); 874 } 875 } 876 EXPORT_SYMBOL_GPL(platform_unregister_drivers); 877 878 /* modalias support enables more hands-off userspace setup: 879 * (a) environment variable lets new-style hotplug events work once system is 880 * fully running: "modprobe $MODALIAS" 881 * (b) sysfs attribute lets new-style coldplug recover from hotplug events 882 * mishandled before system is fully running: "modprobe $(cat modalias)" 883 */ 884 static ssize_t modalias_show(struct device *dev, struct device_attribute *a, 885 char *buf) 886 { 887 struct platform_device *pdev = to_platform_device(dev); 888 int len; 889 890 len = of_device_modalias(dev, buf, PAGE_SIZE); 891 if (len != -ENODEV) 892 return len; 893 894 len = acpi_device_modalias(dev, buf, PAGE_SIZE -1); 895 if (len != -ENODEV) 896 return len; 897 898 len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name); 899 900 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; 901 } 902 static DEVICE_ATTR_RO(modalias); 903 904 static ssize_t driver_override_store(struct device *dev, 905 struct device_attribute *attr, 906 const char *buf, size_t count) 907 { 908 struct platform_device *pdev = to_platform_device(dev); 909 char *driver_override, *old, *cp; 910 911 /* We need to keep extra room for a newline */ 912 if (count >= (PAGE_SIZE - 1)) 913 return -EINVAL; 914 915 driver_override = kstrndup(buf, count, GFP_KERNEL); 916 if (!driver_override) 917 return -ENOMEM; 918 919 cp = strchr(driver_override, '\n'); 920 if (cp) 921 *cp = '\0'; 922 923 device_lock(dev); 924 old = pdev->driver_override; 925 if (strlen(driver_override)) { 926 pdev->driver_override = driver_override; 927 } else { 928 kfree(driver_override); 929 pdev->driver_override = NULL; 930 } 931 device_unlock(dev); 932 933 kfree(old); 934 935 return count; 936 } 937 938 static ssize_t driver_override_show(struct device *dev, 939 struct device_attribute *attr, char *buf) 940 { 941 struct platform_device *pdev = to_platform_device(dev); 942 ssize_t len; 943 944 device_lock(dev); 945 len = sprintf(buf, "%s\n", pdev->driver_override); 946 device_unlock(dev); 947 return len; 948 } 949 static DEVICE_ATTR_RW(driver_override); 950 951 952 static struct attribute *platform_dev_attrs[] = { 953 &dev_attr_modalias.attr, 954 &dev_attr_driver_override.attr, 955 NULL, 956 }; 957 ATTRIBUTE_GROUPS(platform_dev); 958 959 static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) 960 { 961 struct platform_device *pdev = to_platform_device(dev); 962 int rc; 963 964 /* Some devices have extra OF data and an OF-style MODALIAS */ 965 rc = of_device_uevent_modalias(dev, env); 966 if (rc != -ENODEV) 967 return rc; 968 969 rc = acpi_device_uevent_modalias(dev, env); 970 if (rc != -ENODEV) 971 return rc; 972 973 add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, 974 pdev->name); 975 return 0; 976 } 977 978 static const struct platform_device_id *platform_match_id( 979 const struct platform_device_id *id, 980 struct platform_device *pdev) 981 { 982 while (id->name[0]) { 983 if (strcmp(pdev->name, id->name) == 0) { 984 pdev->id_entry = id; 985 return id; 986 } 987 id++; 988 } 989 return NULL; 990 } 991 992 /** 993 * platform_match - bind platform device to platform driver. 994 * @dev: device. 995 * @drv: driver. 996 * 997 * Platform device IDs are assumed to be encoded like this: 998 * "<name><instance>", where <name> is a short description of the type of 999 * device, like "pci" or "floppy", and <instance> is the enumerated 1000 * instance of the device, like '0' or '42'. Driver IDs are simply 1001 * "<name>". So, extract the <name> from the platform_device structure, 1002 * and compare it against the name of the driver. Return whether they match 1003 * or not. 1004 */ 1005 static int platform_match(struct device *dev, struct device_driver *drv) 1006 { 1007 struct platform_device *pdev = to_platform_device(dev); 1008 struct platform_driver *pdrv = to_platform_driver(drv); 1009 1010 /* When driver_override is set, only bind to the matching driver */ 1011 if (pdev->driver_override) 1012 return !strcmp(pdev->driver_override, drv->name); 1013 1014 /* Attempt an OF style match first */ 1015 if (of_driver_match_device(dev, drv)) 1016 return 1; 1017 1018 /* Then try ACPI style match */ 1019 if (acpi_driver_match_device(dev, drv)) 1020 return 1; 1021 1022 /* Then try to match against the id table */ 1023 if (pdrv->id_table) 1024 return platform_match_id(pdrv->id_table, pdev) != NULL; 1025 1026 /* fall-back to driver name match */ 1027 return (strcmp(pdev->name, drv->name) == 0); 1028 } 1029 1030 #ifdef CONFIG_PM_SLEEP 1031 1032 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) 1033 { 1034 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1035 struct platform_device *pdev = to_platform_device(dev); 1036 int ret = 0; 1037 1038 if (dev->driver && pdrv->suspend) 1039 ret = pdrv->suspend(pdev, mesg); 1040 1041 return ret; 1042 } 1043 1044 static int platform_legacy_resume(struct device *dev) 1045 { 1046 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1047 struct platform_device *pdev = to_platform_device(dev); 1048 int ret = 0; 1049 1050 if (dev->driver && pdrv->resume) 1051 ret = pdrv->resume(pdev); 1052 1053 return ret; 1054 } 1055 1056 #endif /* CONFIG_PM_SLEEP */ 1057 1058 #ifdef CONFIG_SUSPEND 1059 1060 int platform_pm_suspend(struct device *dev) 1061 { 1062 struct device_driver *drv = dev->driver; 1063 int ret = 0; 1064 1065 if (!drv) 1066 return 0; 1067 1068 if (drv->pm) { 1069 if (drv->pm->suspend) 1070 ret = drv->pm->suspend(dev); 1071 } else { 1072 ret = platform_legacy_suspend(dev, PMSG_SUSPEND); 1073 } 1074 1075 return ret; 1076 } 1077 1078 int platform_pm_resume(struct device *dev) 1079 { 1080 struct device_driver *drv = dev->driver; 1081 int ret = 0; 1082 1083 if (!drv) 1084 return 0; 1085 1086 if (drv->pm) { 1087 if (drv->pm->resume) 1088 ret = drv->pm->resume(dev); 1089 } else { 1090 ret = platform_legacy_resume(dev); 1091 } 1092 1093 return ret; 1094 } 1095 1096 #endif /* CONFIG_SUSPEND */ 1097 1098 #ifdef CONFIG_HIBERNATE_CALLBACKS 1099 1100 int platform_pm_freeze(struct device *dev) 1101 { 1102 struct device_driver *drv = dev->driver; 1103 int ret = 0; 1104 1105 if (!drv) 1106 return 0; 1107 1108 if (drv->pm) { 1109 if (drv->pm->freeze) 1110 ret = drv->pm->freeze(dev); 1111 } else { 1112 ret = platform_legacy_suspend(dev, PMSG_FREEZE); 1113 } 1114 1115 return ret; 1116 } 1117 1118 int platform_pm_thaw(struct device *dev) 1119 { 1120 struct device_driver *drv = dev->driver; 1121 int ret = 0; 1122 1123 if (!drv) 1124 return 0; 1125 1126 if (drv->pm) { 1127 if (drv->pm->thaw) 1128 ret = drv->pm->thaw(dev); 1129 } else { 1130 ret = platform_legacy_resume(dev); 1131 } 1132 1133 return ret; 1134 } 1135 1136 int platform_pm_poweroff(struct device *dev) 1137 { 1138 struct device_driver *drv = dev->driver; 1139 int ret = 0; 1140 1141 if (!drv) 1142 return 0; 1143 1144 if (drv->pm) { 1145 if (drv->pm->poweroff) 1146 ret = drv->pm->poweroff(dev); 1147 } else { 1148 ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); 1149 } 1150 1151 return ret; 1152 } 1153 1154 int platform_pm_restore(struct device *dev) 1155 { 1156 struct device_driver *drv = dev->driver; 1157 int ret = 0; 1158 1159 if (!drv) 1160 return 0; 1161 1162 if (drv->pm) { 1163 if (drv->pm->restore) 1164 ret = drv->pm->restore(dev); 1165 } else { 1166 ret = platform_legacy_resume(dev); 1167 } 1168 1169 return ret; 1170 } 1171 1172 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 1173 1174 int platform_dma_configure(struct device *dev) 1175 { 1176 enum dev_dma_attr attr; 1177 int ret = 0; 1178 1179 if (dev->of_node) { 1180 ret = of_dma_configure(dev, dev->of_node, true); 1181 } else if (has_acpi_companion(dev)) { 1182 attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode)); 1183 ret = acpi_dma_configure(dev, attr); 1184 } 1185 1186 return ret; 1187 } 1188 1189 static const struct dev_pm_ops platform_dev_pm_ops = { 1190 .runtime_suspend = pm_generic_runtime_suspend, 1191 .runtime_resume = pm_generic_runtime_resume, 1192 USE_PLATFORM_PM_SLEEP_OPS 1193 }; 1194 1195 struct bus_type platform_bus_type = { 1196 .name = "platform", 1197 .dev_groups = platform_dev_groups, 1198 .match = platform_match, 1199 .uevent = platform_uevent, 1200 .dma_configure = platform_dma_configure, 1201 .pm = &platform_dev_pm_ops, 1202 }; 1203 EXPORT_SYMBOL_GPL(platform_bus_type); 1204 1205 int __init platform_bus_init(void) 1206 { 1207 int error; 1208 1209 early_platform_cleanup(); 1210 1211 error = device_register(&platform_bus); 1212 if (error) { 1213 put_device(&platform_bus); 1214 return error; 1215 } 1216 error = bus_register(&platform_bus_type); 1217 if (error) 1218 device_unregister(&platform_bus); 1219 of_platform_register_reconfig_notifier(); 1220 return error; 1221 } 1222 1223 static __initdata LIST_HEAD(early_platform_driver_list); 1224 static __initdata LIST_HEAD(early_platform_device_list); 1225 1226 /** 1227 * early_platform_driver_register - register early platform driver 1228 * @epdrv: early_platform driver structure 1229 * @buf: string passed from early_param() 1230 * 1231 * Helper function for early_platform_init() / early_platform_init_buffer() 1232 */ 1233 int __init early_platform_driver_register(struct early_platform_driver *epdrv, 1234 char *buf) 1235 { 1236 char *tmp; 1237 int n; 1238 1239 /* Simply add the driver to the end of the global list. 1240 * Drivers will by default be put on the list in compiled-in order. 1241 */ 1242 if (!epdrv->list.next) { 1243 INIT_LIST_HEAD(&epdrv->list); 1244 list_add_tail(&epdrv->list, &early_platform_driver_list); 1245 } 1246 1247 /* If the user has specified device then make sure the driver 1248 * gets prioritized. The driver of the last device specified on 1249 * command line will be put first on the list. 1250 */ 1251 n = strlen(epdrv->pdrv->driver.name); 1252 if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) { 1253 list_move(&epdrv->list, &early_platform_driver_list); 1254 1255 /* Allow passing parameters after device name */ 1256 if (buf[n] == '\0' || buf[n] == ',') 1257 epdrv->requested_id = -1; 1258 else { 1259 epdrv->requested_id = simple_strtoul(&buf[n + 1], 1260 &tmp, 10); 1261 1262 if (buf[n] != '.' || (tmp == &buf[n + 1])) { 1263 epdrv->requested_id = EARLY_PLATFORM_ID_ERROR; 1264 n = 0; 1265 } else 1266 n += strcspn(&buf[n + 1], ",") + 1; 1267 } 1268 1269 if (buf[n] == ',') 1270 n++; 1271 1272 if (epdrv->bufsize) { 1273 memcpy(epdrv->buffer, &buf[n], 1274 min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1)); 1275 epdrv->buffer[epdrv->bufsize - 1] = '\0'; 1276 } 1277 } 1278 1279 return 0; 1280 } 1281 1282 /** 1283 * early_platform_add_devices - adds a number of early platform devices 1284 * @devs: array of early platform devices to add 1285 * @num: number of early platform devices in array 1286 * 1287 * Used by early architecture code to register early platform devices and 1288 * their platform data. 1289 */ 1290 void __init early_platform_add_devices(struct platform_device **devs, int num) 1291 { 1292 struct device *dev; 1293 int i; 1294 1295 /* simply add the devices to list */ 1296 for (i = 0; i < num; i++) { 1297 dev = &devs[i]->dev; 1298 1299 if (!dev->devres_head.next) { 1300 pm_runtime_early_init(dev); 1301 INIT_LIST_HEAD(&dev->devres_head); 1302 list_add_tail(&dev->devres_head, 1303 &early_platform_device_list); 1304 } 1305 } 1306 } 1307 1308 /** 1309 * early_platform_driver_register_all - register early platform drivers 1310 * @class_str: string to identify early platform driver class 1311 * 1312 * Used by architecture code to register all early platform drivers 1313 * for a certain class. If omitted then only early platform drivers 1314 * with matching kernel command line class parameters will be registered. 1315 */ 1316 void __init early_platform_driver_register_all(char *class_str) 1317 { 1318 /* The "class_str" parameter may or may not be present on the kernel 1319 * command line. If it is present then there may be more than one 1320 * matching parameter. 1321 * 1322 * Since we register our early platform drivers using early_param() 1323 * we need to make sure that they also get registered in the case 1324 * when the parameter is missing from the kernel command line. 1325 * 1326 * We use parse_early_options() to make sure the early_param() gets 1327 * called at least once. The early_param() may be called more than 1328 * once since the name of the preferred device may be specified on 1329 * the kernel command line. early_platform_driver_register() handles 1330 * this case for us. 1331 */ 1332 parse_early_options(class_str); 1333 } 1334 1335 /** 1336 * early_platform_match - find early platform device matching driver 1337 * @epdrv: early platform driver structure 1338 * @id: id to match against 1339 */ 1340 static struct platform_device * __init 1341 early_platform_match(struct early_platform_driver *epdrv, int id) 1342 { 1343 struct platform_device *pd; 1344 1345 list_for_each_entry(pd, &early_platform_device_list, dev.devres_head) 1346 if (platform_match(&pd->dev, &epdrv->pdrv->driver)) 1347 if (pd->id == id) 1348 return pd; 1349 1350 return NULL; 1351 } 1352 1353 /** 1354 * early_platform_left - check if early platform driver has matching devices 1355 * @epdrv: early platform driver structure 1356 * @id: return true if id or above exists 1357 */ 1358 static int __init early_platform_left(struct early_platform_driver *epdrv, 1359 int id) 1360 { 1361 struct platform_device *pd; 1362 1363 list_for_each_entry(pd, &early_platform_device_list, dev.devres_head) 1364 if (platform_match(&pd->dev, &epdrv->pdrv->driver)) 1365 if (pd->id >= id) 1366 return 1; 1367 1368 return 0; 1369 } 1370 1371 /** 1372 * early_platform_driver_probe_id - probe drivers matching class_str and id 1373 * @class_str: string to identify early platform driver class 1374 * @id: id to match against 1375 * @nr_probe: number of platform devices to successfully probe before exiting 1376 */ 1377 static int __init early_platform_driver_probe_id(char *class_str, 1378 int id, 1379 int nr_probe) 1380 { 1381 struct early_platform_driver *epdrv; 1382 struct platform_device *match; 1383 int match_id; 1384 int n = 0; 1385 int left = 0; 1386 1387 list_for_each_entry(epdrv, &early_platform_driver_list, list) { 1388 /* only use drivers matching our class_str */ 1389 if (strcmp(class_str, epdrv->class_str)) 1390 continue; 1391 1392 if (id == -2) { 1393 match_id = epdrv->requested_id; 1394 left = 1; 1395 1396 } else { 1397 match_id = id; 1398 left += early_platform_left(epdrv, id); 1399 1400 /* skip requested id */ 1401 switch (epdrv->requested_id) { 1402 case EARLY_PLATFORM_ID_ERROR: 1403 case EARLY_PLATFORM_ID_UNSET: 1404 break; 1405 default: 1406 if (epdrv->requested_id == id) 1407 match_id = EARLY_PLATFORM_ID_UNSET; 1408 } 1409 } 1410 1411 switch (match_id) { 1412 case EARLY_PLATFORM_ID_ERROR: 1413 pr_warn("%s: unable to parse %s parameter\n", 1414 class_str, epdrv->pdrv->driver.name); 1415 /* fall-through */ 1416 case EARLY_PLATFORM_ID_UNSET: 1417 match = NULL; 1418 break; 1419 default: 1420 match = early_platform_match(epdrv, match_id); 1421 } 1422 1423 if (match) { 1424 /* 1425 * Set up a sensible init_name to enable 1426 * dev_name() and others to be used before the 1427 * rest of the driver core is initialized. 1428 */ 1429 if (!match->dev.init_name && slab_is_available()) { 1430 if (match->id != -1) 1431 match->dev.init_name = 1432 kasprintf(GFP_KERNEL, "%s.%d", 1433 match->name, 1434 match->id); 1435 else 1436 match->dev.init_name = 1437 kasprintf(GFP_KERNEL, "%s", 1438 match->name); 1439 1440 if (!match->dev.init_name) 1441 return -ENOMEM; 1442 } 1443 1444 if (epdrv->pdrv->probe(match)) 1445 pr_warn("%s: unable to probe %s early.\n", 1446 class_str, match->name); 1447 else 1448 n++; 1449 } 1450 1451 if (n >= nr_probe) 1452 break; 1453 } 1454 1455 if (left) 1456 return n; 1457 else 1458 return -ENODEV; 1459 } 1460 1461 /** 1462 * early_platform_driver_probe - probe a class of registered drivers 1463 * @class_str: string to identify early platform driver class 1464 * @nr_probe: number of platform devices to successfully probe before exiting 1465 * @user_only: only probe user specified early platform devices 1466 * 1467 * Used by architecture code to probe registered early platform drivers 1468 * within a certain class. For probe to happen a registered early platform 1469 * device matching a registered early platform driver is needed. 1470 */ 1471 int __init early_platform_driver_probe(char *class_str, 1472 int nr_probe, 1473 int user_only) 1474 { 1475 int k, n, i; 1476 1477 n = 0; 1478 for (i = -2; n < nr_probe; i++) { 1479 k = early_platform_driver_probe_id(class_str, i, nr_probe - n); 1480 1481 if (k < 0) 1482 break; 1483 1484 n += k; 1485 1486 if (user_only) 1487 break; 1488 } 1489 1490 return n; 1491 } 1492 1493 /** 1494 * early_platform_cleanup - clean up early platform code 1495 */ 1496 void __init early_platform_cleanup(void) 1497 { 1498 struct platform_device *pd, *pd2; 1499 1500 /* clean up the devres list used to chain devices */ 1501 list_for_each_entry_safe(pd, pd2, &early_platform_device_list, 1502 dev.devres_head) { 1503 list_del(&pd->dev.devres_head); 1504 memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head)); 1505 } 1506 } 1507 1508