1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * platform.c - platform 'pseudo' bus for legacy devices 4 * 5 * Copyright (c) 2002-3 Patrick Mochel 6 * Copyright (c) 2002-3 Open Source Development Labs 7 * 8 * Please see Documentation/driver-model/platform.txt for more 9 * information. 10 */ 11 12 #include <linux/string.h> 13 #include <linux/platform_device.h> 14 #include <linux/of_device.h> 15 #include <linux/of_irq.h> 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/memblock.h> 20 #include <linux/err.h> 21 #include <linux/slab.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/pm_domain.h> 24 #include <linux/idr.h> 25 #include <linux/acpi.h> 26 #include <linux/clk/clk-conf.h> 27 #include <linux/limits.h> 28 #include <linux/property.h> 29 #include <linux/kmemleak.h> 30 31 #include "base.h" 32 #include "power/power.h" 33 34 /* For automatically allocated device IDs */ 35 static DEFINE_IDA(platform_devid_ida); 36 37 struct device platform_bus = { 38 .init_name = "platform", 39 }; 40 EXPORT_SYMBOL_GPL(platform_bus); 41 42 /** 43 * arch_setup_pdev_archdata - Allow manipulation of archdata before its used 44 * @pdev: platform device 45 * 46 * This is called before platform_device_add() such that any pdev_archdata may 47 * be setup before the platform_notifier is called. So if a user needs to 48 * manipulate any relevant information in the pdev_archdata they can do: 49 * 50 * platform_device_alloc() 51 * ... manipulate ... 52 * platform_device_add() 53 * 54 * And if they don't care they can just call platform_device_register() and 55 * everything will just work out. 56 */ 57 void __weak arch_setup_pdev_archdata(struct platform_device *pdev) 58 { 59 } 60 61 /** 62 * platform_get_resource - get a resource for a device 63 * @dev: platform device 64 * @type: resource type 65 * @num: resource index 66 */ 67 struct resource *platform_get_resource(struct platform_device *dev, 68 unsigned int type, unsigned int num) 69 { 70 int i; 71 72 for (i = 0; i < dev->num_resources; i++) { 73 struct resource *r = &dev->resource[i]; 74 75 if (type == resource_type(r) && num-- == 0) 76 return r; 77 } 78 return NULL; 79 } 80 EXPORT_SYMBOL_GPL(platform_get_resource); 81 82 /** 83 * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform 84 * device 85 * 86 * @pdev: platform device to use both for memory resource lookup as well as 87 * resource managemend 88 * @index: resource index 89 */ 90 #ifdef CONFIG_HAS_IOMEM 91 void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev, 92 unsigned int index) 93 { 94 struct resource *res; 95 96 res = platform_get_resource(pdev, IORESOURCE_MEM, index); 97 return devm_ioremap_resource(&pdev->dev, res); 98 } 99 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); 100 #endif /* CONFIG_HAS_IOMEM */ 101 102 /** 103 * platform_get_irq - get an IRQ for a device 104 * @dev: platform device 105 * @num: IRQ number index 106 */ 107 int platform_get_irq(struct platform_device *dev, unsigned int num) 108 { 109 #ifdef CONFIG_SPARC 110 /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ 111 if (!dev || num >= dev->archdata.num_irqs) 112 return -ENXIO; 113 return dev->archdata.irqs[num]; 114 #else 115 struct resource *r; 116 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { 117 int ret; 118 119 ret = of_irq_get(dev->dev.of_node, num); 120 if (ret > 0 || ret == -EPROBE_DEFER) 121 return ret; 122 } 123 124 r = platform_get_resource(dev, IORESOURCE_IRQ, num); 125 if (has_acpi_companion(&dev->dev)) { 126 if (r && r->flags & IORESOURCE_DISABLED) { 127 int ret; 128 129 ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r); 130 if (ret) 131 return ret; 132 } 133 } 134 135 /* 136 * The resources may pass trigger flags to the irqs that need 137 * to be set up. It so happens that the trigger flags for 138 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER* 139 * settings. 140 */ 141 if (r && r->flags & IORESOURCE_BITS) { 142 struct irq_data *irqd; 143 144 irqd = irq_get_irq_data(r->start); 145 if (!irqd) 146 return -ENXIO; 147 irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); 148 } 149 150 if (r) 151 return r->start; 152 153 /* 154 * For the index 0 interrupt, allow falling back to GpioInt 155 * resources. While a device could have both Interrupt and GpioInt 156 * resources, making this fallback ambiguous, in many common cases 157 * the device will only expose one IRQ, and this fallback 158 * allows a common code path across either kind of resource. 159 */ 160 if (num == 0 && has_acpi_companion(&dev->dev)) 161 return acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num); 162 163 return -ENXIO; 164 #endif 165 } 166 EXPORT_SYMBOL_GPL(platform_get_irq); 167 168 /** 169 * platform_irq_count - Count the number of IRQs a platform device uses 170 * @dev: platform device 171 * 172 * Return: Number of IRQs a platform device uses or EPROBE_DEFER 173 */ 174 int platform_irq_count(struct platform_device *dev) 175 { 176 int ret, nr = 0; 177 178 while ((ret = platform_get_irq(dev, nr)) >= 0) 179 nr++; 180 181 if (ret == -EPROBE_DEFER) 182 return ret; 183 184 return nr; 185 } 186 EXPORT_SYMBOL_GPL(platform_irq_count); 187 188 /** 189 * platform_get_resource_byname - get a resource for a device by name 190 * @dev: platform device 191 * @type: resource type 192 * @name: resource name 193 */ 194 struct resource *platform_get_resource_byname(struct platform_device *dev, 195 unsigned int type, 196 const char *name) 197 { 198 int i; 199 200 for (i = 0; i < dev->num_resources; i++) { 201 struct resource *r = &dev->resource[i]; 202 203 if (unlikely(!r->name)) 204 continue; 205 206 if (type == resource_type(r) && !strcmp(r->name, name)) 207 return r; 208 } 209 return NULL; 210 } 211 EXPORT_SYMBOL_GPL(platform_get_resource_byname); 212 213 /** 214 * platform_get_irq_byname - get an IRQ for a device by name 215 * @dev: platform device 216 * @name: IRQ name 217 */ 218 int platform_get_irq_byname(struct platform_device *dev, const char *name) 219 { 220 struct resource *r; 221 222 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { 223 int ret; 224 225 ret = of_irq_get_byname(dev->dev.of_node, name); 226 if (ret > 0 || ret == -EPROBE_DEFER) 227 return ret; 228 } 229 230 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); 231 return r ? r->start : -ENXIO; 232 } 233 EXPORT_SYMBOL_GPL(platform_get_irq_byname); 234 235 /** 236 * platform_add_devices - add a numbers of platform devices 237 * @devs: array of platform devices to add 238 * @num: number of platform devices in array 239 */ 240 int platform_add_devices(struct platform_device **devs, int num) 241 { 242 int i, ret = 0; 243 244 for (i = 0; i < num; i++) { 245 ret = platform_device_register(devs[i]); 246 if (ret) { 247 while (--i >= 0) 248 platform_device_unregister(devs[i]); 249 break; 250 } 251 } 252 253 return ret; 254 } 255 EXPORT_SYMBOL_GPL(platform_add_devices); 256 257 struct platform_object { 258 struct platform_device pdev; 259 char name[]; 260 }; 261 262 /** 263 * platform_device_put - destroy a platform device 264 * @pdev: platform device to free 265 * 266 * Free all memory associated with a platform device. This function must 267 * _only_ be externally called in error cases. All other usage is a bug. 268 */ 269 void platform_device_put(struct platform_device *pdev) 270 { 271 if (!IS_ERR_OR_NULL(pdev)) 272 put_device(&pdev->dev); 273 } 274 EXPORT_SYMBOL_GPL(platform_device_put); 275 276 static void platform_device_release(struct device *dev) 277 { 278 struct platform_object *pa = container_of(dev, struct platform_object, 279 pdev.dev); 280 281 of_device_node_put(&pa->pdev.dev); 282 kfree(pa->pdev.dev.platform_data); 283 kfree(pa->pdev.mfd_cell); 284 kfree(pa->pdev.resource); 285 kfree(pa->pdev.driver_override); 286 kfree(pa); 287 } 288 289 /** 290 * platform_device_alloc - create a platform device 291 * @name: base name of the device we're adding 292 * @id: instance id 293 * 294 * Create a platform device object which can have other objects attached 295 * to it, and which will have attached objects freed when it is released. 296 */ 297 struct platform_device *platform_device_alloc(const char *name, int id) 298 { 299 struct platform_object *pa; 300 301 pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL); 302 if (pa) { 303 strcpy(pa->name, name); 304 pa->pdev.name = pa->name; 305 pa->pdev.id = id; 306 device_initialize(&pa->pdev.dev); 307 pa->pdev.dev.release = platform_device_release; 308 arch_setup_pdev_archdata(&pa->pdev); 309 } 310 311 return pa ? &pa->pdev : NULL; 312 } 313 EXPORT_SYMBOL_GPL(platform_device_alloc); 314 315 /** 316 * platform_device_add_resources - add resources to a platform device 317 * @pdev: platform device allocated by platform_device_alloc to add resources to 318 * @res: set of resources that needs to be allocated for the device 319 * @num: number of resources 320 * 321 * Add a copy of the resources to the platform device. The memory 322 * associated with the resources will be freed when the platform device is 323 * released. 324 */ 325 int platform_device_add_resources(struct platform_device *pdev, 326 const struct resource *res, unsigned int num) 327 { 328 struct resource *r = NULL; 329 330 if (res) { 331 r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); 332 if (!r) 333 return -ENOMEM; 334 } 335 336 kfree(pdev->resource); 337 pdev->resource = r; 338 pdev->num_resources = num; 339 return 0; 340 } 341 EXPORT_SYMBOL_GPL(platform_device_add_resources); 342 343 /** 344 * platform_device_add_data - add platform-specific data to a platform device 345 * @pdev: platform device allocated by platform_device_alloc to add resources to 346 * @data: platform specific data for this platform device 347 * @size: size of platform specific data 348 * 349 * Add a copy of platform specific data to the platform device's 350 * platform_data pointer. The memory associated with the platform data 351 * will be freed when the platform device is released. 352 */ 353 int platform_device_add_data(struct platform_device *pdev, const void *data, 354 size_t size) 355 { 356 void *d = NULL; 357 358 if (data) { 359 d = kmemdup(data, size, GFP_KERNEL); 360 if (!d) 361 return -ENOMEM; 362 } 363 364 kfree(pdev->dev.platform_data); 365 pdev->dev.platform_data = d; 366 return 0; 367 } 368 EXPORT_SYMBOL_GPL(platform_device_add_data); 369 370 /** 371 * platform_device_add_properties - add built-in properties to a platform device 372 * @pdev: platform device to add properties to 373 * @properties: null terminated array of properties to add 374 * 375 * The function will take deep copy of @properties and attach the copy to the 376 * platform device. The memory associated with properties will be freed when the 377 * platform device is released. 378 */ 379 int platform_device_add_properties(struct platform_device *pdev, 380 const struct property_entry *properties) 381 { 382 return device_add_properties(&pdev->dev, properties); 383 } 384 EXPORT_SYMBOL_GPL(platform_device_add_properties); 385 386 /** 387 * platform_device_add - add a platform device to device hierarchy 388 * @pdev: platform device we're adding 389 * 390 * This is part 2 of platform_device_register(), though may be called 391 * separately _iff_ pdev was allocated by platform_device_alloc(). 392 */ 393 int platform_device_add(struct platform_device *pdev) 394 { 395 int i, ret; 396 397 if (!pdev) 398 return -EINVAL; 399 400 if (!pdev->dev.parent) 401 pdev->dev.parent = &platform_bus; 402 403 pdev->dev.bus = &platform_bus_type; 404 405 switch (pdev->id) { 406 default: 407 dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); 408 break; 409 case PLATFORM_DEVID_NONE: 410 dev_set_name(&pdev->dev, "%s", pdev->name); 411 break; 412 case PLATFORM_DEVID_AUTO: 413 /* 414 * Automatically allocated device ID. We mark it as such so 415 * that we remember it must be freed, and we append a suffix 416 * to avoid namespace collision with explicit IDs. 417 */ 418 ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL); 419 if (ret < 0) 420 goto err_out; 421 pdev->id = ret; 422 pdev->id_auto = true; 423 dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id); 424 break; 425 } 426 427 for (i = 0; i < pdev->num_resources; i++) { 428 struct resource *p, *r = &pdev->resource[i]; 429 430 if (r->name == NULL) 431 r->name = dev_name(&pdev->dev); 432 433 p = r->parent; 434 if (!p) { 435 if (resource_type(r) == IORESOURCE_MEM) 436 p = &iomem_resource; 437 else if (resource_type(r) == IORESOURCE_IO) 438 p = &ioport_resource; 439 } 440 441 if (p && insert_resource(p, r)) { 442 dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r); 443 ret = -EBUSY; 444 goto failed; 445 } 446 } 447 448 pr_debug("Registering platform device '%s'. Parent at %s\n", 449 dev_name(&pdev->dev), dev_name(pdev->dev.parent)); 450 451 ret = device_add(&pdev->dev); 452 if (ret == 0) 453 return ret; 454 455 failed: 456 if (pdev->id_auto) { 457 ida_simple_remove(&platform_devid_ida, pdev->id); 458 pdev->id = PLATFORM_DEVID_AUTO; 459 } 460 461 while (--i >= 0) { 462 struct resource *r = &pdev->resource[i]; 463 if (r->parent) 464 release_resource(r); 465 } 466 467 err_out: 468 return ret; 469 } 470 EXPORT_SYMBOL_GPL(platform_device_add); 471 472 /** 473 * platform_device_del - remove a platform-level device 474 * @pdev: platform device we're removing 475 * 476 * Note that this function will also release all memory- and port-based 477 * resources owned by the device (@dev->resource). This function must 478 * _only_ be externally called in error cases. All other usage is a bug. 479 */ 480 void platform_device_del(struct platform_device *pdev) 481 { 482 int i; 483 484 if (!IS_ERR_OR_NULL(pdev)) { 485 device_del(&pdev->dev); 486 487 if (pdev->id_auto) { 488 ida_simple_remove(&platform_devid_ida, pdev->id); 489 pdev->id = PLATFORM_DEVID_AUTO; 490 } 491 492 for (i = 0; i < pdev->num_resources; i++) { 493 struct resource *r = &pdev->resource[i]; 494 if (r->parent) 495 release_resource(r); 496 } 497 } 498 } 499 EXPORT_SYMBOL_GPL(platform_device_del); 500 501 /** 502 * platform_device_register - add a platform-level device 503 * @pdev: platform device we're adding 504 */ 505 int platform_device_register(struct platform_device *pdev) 506 { 507 device_initialize(&pdev->dev); 508 arch_setup_pdev_archdata(pdev); 509 return platform_device_add(pdev); 510 } 511 EXPORT_SYMBOL_GPL(platform_device_register); 512 513 /** 514 * platform_device_unregister - unregister a platform-level device 515 * @pdev: platform device we're unregistering 516 * 517 * Unregistration is done in 2 steps. First we release all resources 518 * and remove it from the subsystem, then we drop reference count by 519 * calling platform_device_put(). 520 */ 521 void platform_device_unregister(struct platform_device *pdev) 522 { 523 platform_device_del(pdev); 524 platform_device_put(pdev); 525 } 526 EXPORT_SYMBOL_GPL(platform_device_unregister); 527 528 /** 529 * platform_device_register_full - add a platform-level device with 530 * resources and platform-specific data 531 * 532 * @pdevinfo: data used to create device 533 * 534 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 535 */ 536 struct platform_device *platform_device_register_full( 537 const struct platform_device_info *pdevinfo) 538 { 539 int ret = -ENOMEM; 540 struct platform_device *pdev; 541 542 pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); 543 if (!pdev) 544 return ERR_PTR(-ENOMEM); 545 546 pdev->dev.parent = pdevinfo->parent; 547 pdev->dev.fwnode = pdevinfo->fwnode; 548 pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); 549 pdev->dev.of_node_reused = pdevinfo->of_node_reused; 550 551 if (pdevinfo->dma_mask) { 552 /* 553 * This memory isn't freed when the device is put, 554 * I don't have a nice idea for that though. Conceptually 555 * dma_mask in struct device should not be a pointer. 556 * See http://thread.gmane.org/gmane.linux.kernel.pci/9081 557 */ 558 pdev->dev.dma_mask = 559 kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL); 560 if (!pdev->dev.dma_mask) 561 goto err; 562 563 kmemleak_ignore(pdev->dev.dma_mask); 564 565 *pdev->dev.dma_mask = pdevinfo->dma_mask; 566 pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; 567 } 568 569 ret = platform_device_add_resources(pdev, 570 pdevinfo->res, pdevinfo->num_res); 571 if (ret) 572 goto err; 573 574 ret = platform_device_add_data(pdev, 575 pdevinfo->data, pdevinfo->size_data); 576 if (ret) 577 goto err; 578 579 if (pdevinfo->properties) { 580 ret = platform_device_add_properties(pdev, 581 pdevinfo->properties); 582 if (ret) 583 goto err; 584 } 585 586 ret = platform_device_add(pdev); 587 if (ret) { 588 err: 589 ACPI_COMPANION_SET(&pdev->dev, NULL); 590 kfree(pdev->dev.dma_mask); 591 platform_device_put(pdev); 592 return ERR_PTR(ret); 593 } 594 595 return pdev; 596 } 597 EXPORT_SYMBOL_GPL(platform_device_register_full); 598 599 static int platform_drv_probe(struct device *_dev) 600 { 601 struct platform_driver *drv = to_platform_driver(_dev->driver); 602 struct platform_device *dev = to_platform_device(_dev); 603 int ret; 604 605 ret = of_clk_set_defaults(_dev->of_node, false); 606 if (ret < 0) 607 return ret; 608 609 ret = dev_pm_domain_attach(_dev, true); 610 if (ret) 611 goto out; 612 613 if (drv->probe) { 614 ret = drv->probe(dev); 615 if (ret) 616 dev_pm_domain_detach(_dev, true); 617 } 618 619 out: 620 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { 621 dev_warn(_dev, "probe deferral not supported\n"); 622 ret = -ENXIO; 623 } 624 625 return ret; 626 } 627 628 static int platform_drv_probe_fail(struct device *_dev) 629 { 630 return -ENXIO; 631 } 632 633 static int platform_drv_remove(struct device *_dev) 634 { 635 struct platform_driver *drv = to_platform_driver(_dev->driver); 636 struct platform_device *dev = to_platform_device(_dev); 637 int ret = 0; 638 639 if (drv->remove) 640 ret = drv->remove(dev); 641 dev_pm_domain_detach(_dev, true); 642 643 return ret; 644 } 645 646 static void platform_drv_shutdown(struct device *_dev) 647 { 648 struct platform_driver *drv = to_platform_driver(_dev->driver); 649 struct platform_device *dev = to_platform_device(_dev); 650 651 if (drv->shutdown) 652 drv->shutdown(dev); 653 } 654 655 /** 656 * __platform_driver_register - register a driver for platform-level devices 657 * @drv: platform driver structure 658 * @owner: owning module/driver 659 */ 660 int __platform_driver_register(struct platform_driver *drv, 661 struct module *owner) 662 { 663 drv->driver.owner = owner; 664 drv->driver.bus = &platform_bus_type; 665 drv->driver.probe = platform_drv_probe; 666 drv->driver.remove = platform_drv_remove; 667 drv->driver.shutdown = platform_drv_shutdown; 668 669 return driver_register(&drv->driver); 670 } 671 EXPORT_SYMBOL_GPL(__platform_driver_register); 672 673 /** 674 * platform_driver_unregister - unregister a driver for platform-level devices 675 * @drv: platform driver structure 676 */ 677 void platform_driver_unregister(struct platform_driver *drv) 678 { 679 driver_unregister(&drv->driver); 680 } 681 EXPORT_SYMBOL_GPL(platform_driver_unregister); 682 683 /** 684 * __platform_driver_probe - register driver for non-hotpluggable device 685 * @drv: platform driver structure 686 * @probe: the driver probe routine, probably from an __init section 687 * @module: module which will be the owner of the driver 688 * 689 * Use this instead of platform_driver_register() when you know the device 690 * is not hotpluggable and has already been registered, and you want to 691 * remove its run-once probe() infrastructure from memory after the driver 692 * has bound to the device. 693 * 694 * One typical use for this would be with drivers for controllers integrated 695 * into system-on-chip processors, where the controller devices have been 696 * configured as part of board setup. 697 * 698 * Note that this is incompatible with deferred probing. 699 * 700 * Returns zero if the driver registered and bound to a device, else returns 701 * a negative error code and with the driver not registered. 702 */ 703 int __init_or_module __platform_driver_probe(struct platform_driver *drv, 704 int (*probe)(struct platform_device *), struct module *module) 705 { 706 int retval, code; 707 708 if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) { 709 pr_err("%s: drivers registered with %s can not be probed asynchronously\n", 710 drv->driver.name, __func__); 711 return -EINVAL; 712 } 713 714 /* 715 * We have to run our probes synchronously because we check if 716 * we find any devices to bind to and exit with error if there 717 * are any. 718 */ 719 drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; 720 721 /* 722 * Prevent driver from requesting probe deferral to avoid further 723 * futile probe attempts. 724 */ 725 drv->prevent_deferred_probe = true; 726 727 /* make sure driver won't have bind/unbind attributes */ 728 drv->driver.suppress_bind_attrs = true; 729 730 /* temporary section violation during probe() */ 731 drv->probe = probe; 732 retval = code = __platform_driver_register(drv, module); 733 734 /* 735 * Fixup that section violation, being paranoid about code scanning 736 * the list of drivers in order to probe new devices. Check to see 737 * if the probe was successful, and make sure any forced probes of 738 * new devices fail. 739 */ 740 spin_lock(&drv->driver.bus->p->klist_drivers.k_lock); 741 drv->probe = NULL; 742 if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) 743 retval = -ENODEV; 744 drv->driver.probe = platform_drv_probe_fail; 745 spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock); 746 747 if (code != retval) 748 platform_driver_unregister(drv); 749 return retval; 750 } 751 EXPORT_SYMBOL_GPL(__platform_driver_probe); 752 753 /** 754 * __platform_create_bundle - register driver and create corresponding device 755 * @driver: platform driver structure 756 * @probe: the driver probe routine, probably from an __init section 757 * @res: set of resources that needs to be allocated for the device 758 * @n_res: number of resources 759 * @data: platform specific data for this platform device 760 * @size: size of platform specific data 761 * @module: module which will be the owner of the driver 762 * 763 * Use this in legacy-style modules that probe hardware directly and 764 * register a single platform device and corresponding platform driver. 765 * 766 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 767 */ 768 struct platform_device * __init_or_module __platform_create_bundle( 769 struct platform_driver *driver, 770 int (*probe)(struct platform_device *), 771 struct resource *res, unsigned int n_res, 772 const void *data, size_t size, struct module *module) 773 { 774 struct platform_device *pdev; 775 int error; 776 777 pdev = platform_device_alloc(driver->driver.name, -1); 778 if (!pdev) { 779 error = -ENOMEM; 780 goto err_out; 781 } 782 783 error = platform_device_add_resources(pdev, res, n_res); 784 if (error) 785 goto err_pdev_put; 786 787 error = platform_device_add_data(pdev, data, size); 788 if (error) 789 goto err_pdev_put; 790 791 error = platform_device_add(pdev); 792 if (error) 793 goto err_pdev_put; 794 795 error = __platform_driver_probe(driver, probe, module); 796 if (error) 797 goto err_pdev_del; 798 799 return pdev; 800 801 err_pdev_del: 802 platform_device_del(pdev); 803 err_pdev_put: 804 platform_device_put(pdev); 805 err_out: 806 return ERR_PTR(error); 807 } 808 EXPORT_SYMBOL_GPL(__platform_create_bundle); 809 810 /** 811 * __platform_register_drivers - register an array of platform drivers 812 * @drivers: an array of drivers to register 813 * @count: the number of drivers to register 814 * @owner: module owning the drivers 815 * 816 * Registers platform drivers specified by an array. On failure to register a 817 * driver, all previously registered drivers will be unregistered. Callers of 818 * this API should use platform_unregister_drivers() to unregister drivers in 819 * the reverse order. 820 * 821 * Returns: 0 on success or a negative error code on failure. 822 */ 823 int __platform_register_drivers(struct platform_driver * const *drivers, 824 unsigned int count, struct module *owner) 825 { 826 unsigned int i; 827 int err; 828 829 for (i = 0; i < count; i++) { 830 pr_debug("registering platform driver %ps\n", drivers[i]); 831 832 err = __platform_driver_register(drivers[i], owner); 833 if (err < 0) { 834 pr_err("failed to register platform driver %ps: %d\n", 835 drivers[i], err); 836 goto error; 837 } 838 } 839 840 return 0; 841 842 error: 843 while (i--) { 844 pr_debug("unregistering platform driver %ps\n", drivers[i]); 845 platform_driver_unregister(drivers[i]); 846 } 847 848 return err; 849 } 850 EXPORT_SYMBOL_GPL(__platform_register_drivers); 851 852 /** 853 * platform_unregister_drivers - unregister an array of platform drivers 854 * @drivers: an array of drivers to unregister 855 * @count: the number of drivers to unregister 856 * 857 * Unegisters platform drivers specified by an array. This is typically used 858 * to complement an earlier call to platform_register_drivers(). Drivers are 859 * unregistered in the reverse order in which they were registered. 860 */ 861 void platform_unregister_drivers(struct platform_driver * const *drivers, 862 unsigned int count) 863 { 864 while (count--) { 865 pr_debug("unregistering platform driver %ps\n", drivers[count]); 866 platform_driver_unregister(drivers[count]); 867 } 868 } 869 EXPORT_SYMBOL_GPL(platform_unregister_drivers); 870 871 /* modalias support enables more hands-off userspace setup: 872 * (a) environment variable lets new-style hotplug events work once system is 873 * fully running: "modprobe $MODALIAS" 874 * (b) sysfs attribute lets new-style coldplug recover from hotplug events 875 * mishandled before system is fully running: "modprobe $(cat modalias)" 876 */ 877 static ssize_t modalias_show(struct device *dev, struct device_attribute *a, 878 char *buf) 879 { 880 struct platform_device *pdev = to_platform_device(dev); 881 int len; 882 883 len = of_device_modalias(dev, buf, PAGE_SIZE); 884 if (len != -ENODEV) 885 return len; 886 887 len = acpi_device_modalias(dev, buf, PAGE_SIZE -1); 888 if (len != -ENODEV) 889 return len; 890 891 len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name); 892 893 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; 894 } 895 static DEVICE_ATTR_RO(modalias); 896 897 static ssize_t driver_override_store(struct device *dev, 898 struct device_attribute *attr, 899 const char *buf, size_t count) 900 { 901 struct platform_device *pdev = to_platform_device(dev); 902 char *driver_override, *old, *cp; 903 904 /* We need to keep extra room for a newline */ 905 if (count >= (PAGE_SIZE - 1)) 906 return -EINVAL; 907 908 driver_override = kstrndup(buf, count, GFP_KERNEL); 909 if (!driver_override) 910 return -ENOMEM; 911 912 cp = strchr(driver_override, '\n'); 913 if (cp) 914 *cp = '\0'; 915 916 device_lock(dev); 917 old = pdev->driver_override; 918 if (strlen(driver_override)) { 919 pdev->driver_override = driver_override; 920 } else { 921 kfree(driver_override); 922 pdev->driver_override = NULL; 923 } 924 device_unlock(dev); 925 926 kfree(old); 927 928 return count; 929 } 930 931 static ssize_t driver_override_show(struct device *dev, 932 struct device_attribute *attr, char *buf) 933 { 934 struct platform_device *pdev = to_platform_device(dev); 935 ssize_t len; 936 937 device_lock(dev); 938 len = sprintf(buf, "%s\n", pdev->driver_override); 939 device_unlock(dev); 940 return len; 941 } 942 static DEVICE_ATTR_RW(driver_override); 943 944 945 static struct attribute *platform_dev_attrs[] = { 946 &dev_attr_modalias.attr, 947 &dev_attr_driver_override.attr, 948 NULL, 949 }; 950 ATTRIBUTE_GROUPS(platform_dev); 951 952 static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) 953 { 954 struct platform_device *pdev = to_platform_device(dev); 955 int rc; 956 957 /* Some devices have extra OF data and an OF-style MODALIAS */ 958 rc = of_device_uevent_modalias(dev, env); 959 if (rc != -ENODEV) 960 return rc; 961 962 rc = acpi_device_uevent_modalias(dev, env); 963 if (rc != -ENODEV) 964 return rc; 965 966 add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, 967 pdev->name); 968 return 0; 969 } 970 971 static const struct platform_device_id *platform_match_id( 972 const struct platform_device_id *id, 973 struct platform_device *pdev) 974 { 975 while (id->name[0]) { 976 if (strcmp(pdev->name, id->name) == 0) { 977 pdev->id_entry = id; 978 return id; 979 } 980 id++; 981 } 982 return NULL; 983 } 984 985 /** 986 * platform_match - bind platform device to platform driver. 987 * @dev: device. 988 * @drv: driver. 989 * 990 * Platform device IDs are assumed to be encoded like this: 991 * "<name><instance>", where <name> is a short description of the type of 992 * device, like "pci" or "floppy", and <instance> is the enumerated 993 * instance of the device, like '0' or '42'. Driver IDs are simply 994 * "<name>". So, extract the <name> from the platform_device structure, 995 * and compare it against the name of the driver. Return whether they match 996 * or not. 997 */ 998 static int platform_match(struct device *dev, struct device_driver *drv) 999 { 1000 struct platform_device *pdev = to_platform_device(dev); 1001 struct platform_driver *pdrv = to_platform_driver(drv); 1002 1003 /* When driver_override is set, only bind to the matching driver */ 1004 if (pdev->driver_override) 1005 return !strcmp(pdev->driver_override, drv->name); 1006 1007 /* Attempt an OF style match first */ 1008 if (of_driver_match_device(dev, drv)) 1009 return 1; 1010 1011 /* Then try ACPI style match */ 1012 if (acpi_driver_match_device(dev, drv)) 1013 return 1; 1014 1015 /* Then try to match against the id table */ 1016 if (pdrv->id_table) 1017 return platform_match_id(pdrv->id_table, pdev) != NULL; 1018 1019 /* fall-back to driver name match */ 1020 return (strcmp(pdev->name, drv->name) == 0); 1021 } 1022 1023 #ifdef CONFIG_PM_SLEEP 1024 1025 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) 1026 { 1027 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1028 struct platform_device *pdev = to_platform_device(dev); 1029 int ret = 0; 1030 1031 if (dev->driver && pdrv->suspend) 1032 ret = pdrv->suspend(pdev, mesg); 1033 1034 return ret; 1035 } 1036 1037 static int platform_legacy_resume(struct device *dev) 1038 { 1039 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1040 struct platform_device *pdev = to_platform_device(dev); 1041 int ret = 0; 1042 1043 if (dev->driver && pdrv->resume) 1044 ret = pdrv->resume(pdev); 1045 1046 return ret; 1047 } 1048 1049 #endif /* CONFIG_PM_SLEEP */ 1050 1051 #ifdef CONFIG_SUSPEND 1052 1053 int platform_pm_suspend(struct device *dev) 1054 { 1055 struct device_driver *drv = dev->driver; 1056 int ret = 0; 1057 1058 if (!drv) 1059 return 0; 1060 1061 if (drv->pm) { 1062 if (drv->pm->suspend) 1063 ret = drv->pm->suspend(dev); 1064 } else { 1065 ret = platform_legacy_suspend(dev, PMSG_SUSPEND); 1066 } 1067 1068 return ret; 1069 } 1070 1071 int platform_pm_resume(struct device *dev) 1072 { 1073 struct device_driver *drv = dev->driver; 1074 int ret = 0; 1075 1076 if (!drv) 1077 return 0; 1078 1079 if (drv->pm) { 1080 if (drv->pm->resume) 1081 ret = drv->pm->resume(dev); 1082 } else { 1083 ret = platform_legacy_resume(dev); 1084 } 1085 1086 return ret; 1087 } 1088 1089 #endif /* CONFIG_SUSPEND */ 1090 1091 #ifdef CONFIG_HIBERNATE_CALLBACKS 1092 1093 int platform_pm_freeze(struct device *dev) 1094 { 1095 struct device_driver *drv = dev->driver; 1096 int ret = 0; 1097 1098 if (!drv) 1099 return 0; 1100 1101 if (drv->pm) { 1102 if (drv->pm->freeze) 1103 ret = drv->pm->freeze(dev); 1104 } else { 1105 ret = platform_legacy_suspend(dev, PMSG_FREEZE); 1106 } 1107 1108 return ret; 1109 } 1110 1111 int platform_pm_thaw(struct device *dev) 1112 { 1113 struct device_driver *drv = dev->driver; 1114 int ret = 0; 1115 1116 if (!drv) 1117 return 0; 1118 1119 if (drv->pm) { 1120 if (drv->pm->thaw) 1121 ret = drv->pm->thaw(dev); 1122 } else { 1123 ret = platform_legacy_resume(dev); 1124 } 1125 1126 return ret; 1127 } 1128 1129 int platform_pm_poweroff(struct device *dev) 1130 { 1131 struct device_driver *drv = dev->driver; 1132 int ret = 0; 1133 1134 if (!drv) 1135 return 0; 1136 1137 if (drv->pm) { 1138 if (drv->pm->poweroff) 1139 ret = drv->pm->poweroff(dev); 1140 } else { 1141 ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); 1142 } 1143 1144 return ret; 1145 } 1146 1147 int platform_pm_restore(struct device *dev) 1148 { 1149 struct device_driver *drv = dev->driver; 1150 int ret = 0; 1151 1152 if (!drv) 1153 return 0; 1154 1155 if (drv->pm) { 1156 if (drv->pm->restore) 1157 ret = drv->pm->restore(dev); 1158 } else { 1159 ret = platform_legacy_resume(dev); 1160 } 1161 1162 return ret; 1163 } 1164 1165 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 1166 1167 int platform_dma_configure(struct device *dev) 1168 { 1169 enum dev_dma_attr attr; 1170 int ret = 0; 1171 1172 if (dev->of_node) { 1173 ret = of_dma_configure(dev, dev->of_node, true); 1174 } else if (has_acpi_companion(dev)) { 1175 attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode)); 1176 ret = acpi_dma_configure(dev, attr); 1177 } 1178 1179 return ret; 1180 } 1181 1182 static const struct dev_pm_ops platform_dev_pm_ops = { 1183 .runtime_suspend = pm_generic_runtime_suspend, 1184 .runtime_resume = pm_generic_runtime_resume, 1185 USE_PLATFORM_PM_SLEEP_OPS 1186 }; 1187 1188 struct bus_type platform_bus_type = { 1189 .name = "platform", 1190 .dev_groups = platform_dev_groups, 1191 .match = platform_match, 1192 .uevent = platform_uevent, 1193 .dma_configure = platform_dma_configure, 1194 .pm = &platform_dev_pm_ops, 1195 }; 1196 EXPORT_SYMBOL_GPL(platform_bus_type); 1197 1198 int __init platform_bus_init(void) 1199 { 1200 int error; 1201 1202 early_platform_cleanup(); 1203 1204 error = device_register(&platform_bus); 1205 if (error) { 1206 put_device(&platform_bus); 1207 return error; 1208 } 1209 error = bus_register(&platform_bus_type); 1210 if (error) 1211 device_unregister(&platform_bus); 1212 of_platform_register_reconfig_notifier(); 1213 return error; 1214 } 1215 1216 static __initdata LIST_HEAD(early_platform_driver_list); 1217 static __initdata LIST_HEAD(early_platform_device_list); 1218 1219 /** 1220 * early_platform_driver_register - register early platform driver 1221 * @epdrv: early_platform driver structure 1222 * @buf: string passed from early_param() 1223 * 1224 * Helper function for early_platform_init() / early_platform_init_buffer() 1225 */ 1226 int __init early_platform_driver_register(struct early_platform_driver *epdrv, 1227 char *buf) 1228 { 1229 char *tmp; 1230 int n; 1231 1232 /* Simply add the driver to the end of the global list. 1233 * Drivers will by default be put on the list in compiled-in order. 1234 */ 1235 if (!epdrv->list.next) { 1236 INIT_LIST_HEAD(&epdrv->list); 1237 list_add_tail(&epdrv->list, &early_platform_driver_list); 1238 } 1239 1240 /* If the user has specified device then make sure the driver 1241 * gets prioritized. The driver of the last device specified on 1242 * command line will be put first on the list. 1243 */ 1244 n = strlen(epdrv->pdrv->driver.name); 1245 if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) { 1246 list_move(&epdrv->list, &early_platform_driver_list); 1247 1248 /* Allow passing parameters after device name */ 1249 if (buf[n] == '\0' || buf[n] == ',') 1250 epdrv->requested_id = -1; 1251 else { 1252 epdrv->requested_id = simple_strtoul(&buf[n + 1], 1253 &tmp, 10); 1254 1255 if (buf[n] != '.' || (tmp == &buf[n + 1])) { 1256 epdrv->requested_id = EARLY_PLATFORM_ID_ERROR; 1257 n = 0; 1258 } else 1259 n += strcspn(&buf[n + 1], ",") + 1; 1260 } 1261 1262 if (buf[n] == ',') 1263 n++; 1264 1265 if (epdrv->bufsize) { 1266 memcpy(epdrv->buffer, &buf[n], 1267 min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1)); 1268 epdrv->buffer[epdrv->bufsize - 1] = '\0'; 1269 } 1270 } 1271 1272 return 0; 1273 } 1274 1275 /** 1276 * early_platform_add_devices - adds a number of early platform devices 1277 * @devs: array of early platform devices to add 1278 * @num: number of early platform devices in array 1279 * 1280 * Used by early architecture code to register early platform devices and 1281 * their platform data. 1282 */ 1283 void __init early_platform_add_devices(struct platform_device **devs, int num) 1284 { 1285 struct device *dev; 1286 int i; 1287 1288 /* simply add the devices to list */ 1289 for (i = 0; i < num; i++) { 1290 dev = &devs[i]->dev; 1291 1292 if (!dev->devres_head.next) { 1293 pm_runtime_early_init(dev); 1294 INIT_LIST_HEAD(&dev->devres_head); 1295 list_add_tail(&dev->devres_head, 1296 &early_platform_device_list); 1297 } 1298 } 1299 } 1300 1301 /** 1302 * early_platform_driver_register_all - register early platform drivers 1303 * @class_str: string to identify early platform driver class 1304 * 1305 * Used by architecture code to register all early platform drivers 1306 * for a certain class. If omitted then only early platform drivers 1307 * with matching kernel command line class parameters will be registered. 1308 */ 1309 void __init early_platform_driver_register_all(char *class_str) 1310 { 1311 /* The "class_str" parameter may or may not be present on the kernel 1312 * command line. If it is present then there may be more than one 1313 * matching parameter. 1314 * 1315 * Since we register our early platform drivers using early_param() 1316 * we need to make sure that they also get registered in the case 1317 * when the parameter is missing from the kernel command line. 1318 * 1319 * We use parse_early_options() to make sure the early_param() gets 1320 * called at least once. The early_param() may be called more than 1321 * once since the name of the preferred device may be specified on 1322 * the kernel command line. early_platform_driver_register() handles 1323 * this case for us. 1324 */ 1325 parse_early_options(class_str); 1326 } 1327 1328 /** 1329 * early_platform_match - find early platform device matching driver 1330 * @epdrv: early platform driver structure 1331 * @id: id to match against 1332 */ 1333 static struct platform_device * __init 1334 early_platform_match(struct early_platform_driver *epdrv, int id) 1335 { 1336 struct platform_device *pd; 1337 1338 list_for_each_entry(pd, &early_platform_device_list, dev.devres_head) 1339 if (platform_match(&pd->dev, &epdrv->pdrv->driver)) 1340 if (pd->id == id) 1341 return pd; 1342 1343 return NULL; 1344 } 1345 1346 /** 1347 * early_platform_left - check if early platform driver has matching devices 1348 * @epdrv: early platform driver structure 1349 * @id: return true if id or above exists 1350 */ 1351 static int __init early_platform_left(struct early_platform_driver *epdrv, 1352 int id) 1353 { 1354 struct platform_device *pd; 1355 1356 list_for_each_entry(pd, &early_platform_device_list, dev.devres_head) 1357 if (platform_match(&pd->dev, &epdrv->pdrv->driver)) 1358 if (pd->id >= id) 1359 return 1; 1360 1361 return 0; 1362 } 1363 1364 /** 1365 * early_platform_driver_probe_id - probe drivers matching class_str and id 1366 * @class_str: string to identify early platform driver class 1367 * @id: id to match against 1368 * @nr_probe: number of platform devices to successfully probe before exiting 1369 */ 1370 static int __init early_platform_driver_probe_id(char *class_str, 1371 int id, 1372 int nr_probe) 1373 { 1374 struct early_platform_driver *epdrv; 1375 struct platform_device *match; 1376 int match_id; 1377 int n = 0; 1378 int left = 0; 1379 1380 list_for_each_entry(epdrv, &early_platform_driver_list, list) { 1381 /* only use drivers matching our class_str */ 1382 if (strcmp(class_str, epdrv->class_str)) 1383 continue; 1384 1385 if (id == -2) { 1386 match_id = epdrv->requested_id; 1387 left = 1; 1388 1389 } else { 1390 match_id = id; 1391 left += early_platform_left(epdrv, id); 1392 1393 /* skip requested id */ 1394 switch (epdrv->requested_id) { 1395 case EARLY_PLATFORM_ID_ERROR: 1396 case EARLY_PLATFORM_ID_UNSET: 1397 break; 1398 default: 1399 if (epdrv->requested_id == id) 1400 match_id = EARLY_PLATFORM_ID_UNSET; 1401 } 1402 } 1403 1404 switch (match_id) { 1405 case EARLY_PLATFORM_ID_ERROR: 1406 pr_warn("%s: unable to parse %s parameter\n", 1407 class_str, epdrv->pdrv->driver.name); 1408 /* fall-through */ 1409 case EARLY_PLATFORM_ID_UNSET: 1410 match = NULL; 1411 break; 1412 default: 1413 match = early_platform_match(epdrv, match_id); 1414 } 1415 1416 if (match) { 1417 /* 1418 * Set up a sensible init_name to enable 1419 * dev_name() and others to be used before the 1420 * rest of the driver core is initialized. 1421 */ 1422 if (!match->dev.init_name && slab_is_available()) { 1423 if (match->id != -1) 1424 match->dev.init_name = 1425 kasprintf(GFP_KERNEL, "%s.%d", 1426 match->name, 1427 match->id); 1428 else 1429 match->dev.init_name = 1430 kasprintf(GFP_KERNEL, "%s", 1431 match->name); 1432 1433 if (!match->dev.init_name) 1434 return -ENOMEM; 1435 } 1436 1437 if (epdrv->pdrv->probe(match)) 1438 pr_warn("%s: unable to probe %s early.\n", 1439 class_str, match->name); 1440 else 1441 n++; 1442 } 1443 1444 if (n >= nr_probe) 1445 break; 1446 } 1447 1448 if (left) 1449 return n; 1450 else 1451 return -ENODEV; 1452 } 1453 1454 /** 1455 * early_platform_driver_probe - probe a class of registered drivers 1456 * @class_str: string to identify early platform driver class 1457 * @nr_probe: number of platform devices to successfully probe before exiting 1458 * @user_only: only probe user specified early platform devices 1459 * 1460 * Used by architecture code to probe registered early platform drivers 1461 * within a certain class. For probe to happen a registered early platform 1462 * device matching a registered early platform driver is needed. 1463 */ 1464 int __init early_platform_driver_probe(char *class_str, 1465 int nr_probe, 1466 int user_only) 1467 { 1468 int k, n, i; 1469 1470 n = 0; 1471 for (i = -2; n < nr_probe; i++) { 1472 k = early_platform_driver_probe_id(class_str, i, nr_probe - n); 1473 1474 if (k < 0) 1475 break; 1476 1477 n += k; 1478 1479 if (user_only) 1480 break; 1481 } 1482 1483 return n; 1484 } 1485 1486 /** 1487 * early_platform_cleanup - clean up early platform code 1488 */ 1489 void __init early_platform_cleanup(void) 1490 { 1491 struct platform_device *pd, *pd2; 1492 1493 /* clean up the devres list used to chain devices */ 1494 list_for_each_entry_safe(pd, pd2, &early_platform_device_list, 1495 dev.devres_head) { 1496 list_del(&pd->dev.devres_head); 1497 memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head)); 1498 } 1499 } 1500 1501