1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * platform.c - platform 'pseudo' bus for legacy devices 4 * 5 * Copyright (c) 2002-3 Patrick Mochel 6 * Copyright (c) 2002-3 Open Source Development Labs 7 * 8 * Please see Documentation/driver-api/driver-model/platform.rst for more 9 * information. 10 */ 11 12 #include <linux/string.h> 13 #include <linux/platform_device.h> 14 #include <linux/of_device.h> 15 #include <linux/of_irq.h> 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/memblock.h> 20 #include <linux/err.h> 21 #include <linux/slab.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/pm_domain.h> 24 #include <linux/idr.h> 25 #include <linux/acpi.h> 26 #include <linux/clk/clk-conf.h> 27 #include <linux/limits.h> 28 #include <linux/property.h> 29 #include <linux/kmemleak.h> 30 #include <linux/types.h> 31 32 #include "base.h" 33 #include "power/power.h" 34 35 /* For automatically allocated device IDs */ 36 static DEFINE_IDA(platform_devid_ida); 37 38 struct device platform_bus = { 39 .init_name = "platform", 40 }; 41 EXPORT_SYMBOL_GPL(platform_bus); 42 43 /** 44 * platform_get_resource - get a resource for a device 45 * @dev: platform device 46 * @type: resource type 47 * @num: resource index 48 */ 49 struct resource *platform_get_resource(struct platform_device *dev, 50 unsigned int type, unsigned int num) 51 { 52 u32 i; 53 54 for (i = 0; i < dev->num_resources; i++) { 55 struct resource *r = &dev->resource[i]; 56 57 if (type == resource_type(r) && num-- == 0) 58 return r; 59 } 60 return NULL; 61 } 62 EXPORT_SYMBOL_GPL(platform_get_resource); 63 64 #ifdef CONFIG_HAS_IOMEM 65 /** 66 * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform 67 * device 68 * 69 * @pdev: platform device to use both for memory resource lookup as well as 70 * resource management 71 * @index: resource index 72 */ 73 void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev, 74 unsigned int index) 75 { 76 struct resource *res; 77 78 res = platform_get_resource(pdev, IORESOURCE_MEM, index); 79 return devm_ioremap_resource(&pdev->dev, res); 80 } 81 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); 82 83 /** 84 * devm_platform_ioremap_resource_wc - write-combined variant of 85 * devm_platform_ioremap_resource() 86 * 87 * @pdev: platform device to use both for memory resource lookup as well as 88 * resource management 89 * @index: resource index 90 */ 91 void __iomem *devm_platform_ioremap_resource_wc(struct platform_device *pdev, 92 unsigned int index) 93 { 94 struct resource *res; 95 96 res = platform_get_resource(pdev, IORESOURCE_MEM, index); 97 return devm_ioremap_resource_wc(&pdev->dev, res); 98 } 99 100 /** 101 * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for 102 * a platform device, retrieve the 103 * resource by name 104 * 105 * @pdev: platform device to use both for memory resource lookup as well as 106 * resource management 107 * @name: name of the resource 108 */ 109 void __iomem * 110 devm_platform_ioremap_resource_byname(struct platform_device *pdev, 111 const char *name) 112 { 113 struct resource *res; 114 115 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); 116 return devm_ioremap_resource(&pdev->dev, res); 117 } 118 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname); 119 #endif /* CONFIG_HAS_IOMEM */ 120 121 /** 122 * platform_get_irq_optional - get an optional IRQ for a device 123 * @dev: platform device 124 * @num: IRQ number index 125 * 126 * Gets an IRQ for a platform device. Device drivers should check the return 127 * value for errors so as to not pass a negative integer value to the 128 * request_irq() APIs. This is the same as platform_get_irq(), except that it 129 * does not print an error message if an IRQ can not be obtained. 130 * 131 * Example: 132 * int irq = platform_get_irq_optional(pdev, 0); 133 * if (irq < 0) 134 * return irq; 135 * 136 * Return: IRQ number on success, negative error number on failure. 137 */ 138 int platform_get_irq_optional(struct platform_device *dev, unsigned int num) 139 { 140 #ifdef CONFIG_SPARC 141 /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ 142 if (!dev || num >= dev->archdata.num_irqs) 143 return -ENXIO; 144 return dev->archdata.irqs[num]; 145 #else 146 struct resource *r; 147 int ret; 148 149 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { 150 ret = of_irq_get(dev->dev.of_node, num); 151 if (ret > 0 || ret == -EPROBE_DEFER) 152 return ret; 153 } 154 155 r = platform_get_resource(dev, IORESOURCE_IRQ, num); 156 if (has_acpi_companion(&dev->dev)) { 157 if (r && r->flags & IORESOURCE_DISABLED) { 158 ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r); 159 if (ret) 160 return ret; 161 } 162 } 163 164 /* 165 * The resources may pass trigger flags to the irqs that need 166 * to be set up. It so happens that the trigger flags for 167 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER* 168 * settings. 169 */ 170 if (r && r->flags & IORESOURCE_BITS) { 171 struct irq_data *irqd; 172 173 irqd = irq_get_irq_data(r->start); 174 if (!irqd) 175 return -ENXIO; 176 irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); 177 } 178 179 if (r) 180 return r->start; 181 182 /* 183 * For the index 0 interrupt, allow falling back to GpioInt 184 * resources. While a device could have both Interrupt and GpioInt 185 * resources, making this fallback ambiguous, in many common cases 186 * the device will only expose one IRQ, and this fallback 187 * allows a common code path across either kind of resource. 188 */ 189 if (num == 0 && has_acpi_companion(&dev->dev)) { 190 ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num); 191 /* Our callers expect -ENXIO for missing IRQs. */ 192 if (ret >= 0 || ret == -EPROBE_DEFER) 193 return ret; 194 } 195 196 return -ENXIO; 197 #endif 198 } 199 EXPORT_SYMBOL_GPL(platform_get_irq_optional); 200 201 /** 202 * platform_get_irq - get an IRQ for a device 203 * @dev: platform device 204 * @num: IRQ number index 205 * 206 * Gets an IRQ for a platform device and prints an error message if finding the 207 * IRQ fails. Device drivers should check the return value for errors so as to 208 * not pass a negative integer value to the request_irq() APIs. 209 * 210 * Example: 211 * int irq = platform_get_irq(pdev, 0); 212 * if (irq < 0) 213 * return irq; 214 * 215 * Return: IRQ number on success, negative error number on failure. 216 */ 217 int platform_get_irq(struct platform_device *dev, unsigned int num) 218 { 219 int ret; 220 221 ret = platform_get_irq_optional(dev, num); 222 if (ret < 0 && ret != -EPROBE_DEFER) 223 dev_err(&dev->dev, "IRQ index %u not found\n", num); 224 225 return ret; 226 } 227 EXPORT_SYMBOL_GPL(platform_get_irq); 228 229 /** 230 * platform_irq_count - Count the number of IRQs a platform device uses 231 * @dev: platform device 232 * 233 * Return: Number of IRQs a platform device uses or EPROBE_DEFER 234 */ 235 int platform_irq_count(struct platform_device *dev) 236 { 237 int ret, nr = 0; 238 239 while ((ret = platform_get_irq_optional(dev, nr)) >= 0) 240 nr++; 241 242 if (ret == -EPROBE_DEFER) 243 return ret; 244 245 return nr; 246 } 247 EXPORT_SYMBOL_GPL(platform_irq_count); 248 249 /** 250 * platform_get_resource_byname - get a resource for a device by name 251 * @dev: platform device 252 * @type: resource type 253 * @name: resource name 254 */ 255 struct resource *platform_get_resource_byname(struct platform_device *dev, 256 unsigned int type, 257 const char *name) 258 { 259 u32 i; 260 261 for (i = 0; i < dev->num_resources; i++) { 262 struct resource *r = &dev->resource[i]; 263 264 if (unlikely(!r->name)) 265 continue; 266 267 if (type == resource_type(r) && !strcmp(r->name, name)) 268 return r; 269 } 270 return NULL; 271 } 272 EXPORT_SYMBOL_GPL(platform_get_resource_byname); 273 274 static int __platform_get_irq_byname(struct platform_device *dev, 275 const char *name) 276 { 277 struct resource *r; 278 int ret; 279 280 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { 281 ret = of_irq_get_byname(dev->dev.of_node, name); 282 if (ret > 0 || ret == -EPROBE_DEFER) 283 return ret; 284 } 285 286 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); 287 if (r) 288 return r->start; 289 290 return -ENXIO; 291 } 292 293 /** 294 * platform_get_irq_byname - get an IRQ for a device by name 295 * @dev: platform device 296 * @name: IRQ name 297 * 298 * Get an IRQ like platform_get_irq(), but then by name rather then by index. 299 * 300 * Return: IRQ number on success, negative error number on failure. 301 */ 302 int platform_get_irq_byname(struct platform_device *dev, const char *name) 303 { 304 int ret; 305 306 ret = __platform_get_irq_byname(dev, name); 307 if (ret < 0 && ret != -EPROBE_DEFER) 308 dev_err(&dev->dev, "IRQ %s not found\n", name); 309 310 return ret; 311 } 312 EXPORT_SYMBOL_GPL(platform_get_irq_byname); 313 314 /** 315 * platform_get_irq_byname_optional - get an optional IRQ for a device by name 316 * @dev: platform device 317 * @name: IRQ name 318 * 319 * Get an optional IRQ by name like platform_get_irq_byname(). Except that it 320 * does not print an error message if an IRQ can not be obtained. 321 * 322 * Return: IRQ number on success, negative error number on failure. 323 */ 324 int platform_get_irq_byname_optional(struct platform_device *dev, 325 const char *name) 326 { 327 return __platform_get_irq_byname(dev, name); 328 } 329 EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional); 330 331 /** 332 * platform_add_devices - add a numbers of platform devices 333 * @devs: array of platform devices to add 334 * @num: number of platform devices in array 335 */ 336 int platform_add_devices(struct platform_device **devs, int num) 337 { 338 int i, ret = 0; 339 340 for (i = 0; i < num; i++) { 341 ret = platform_device_register(devs[i]); 342 if (ret) { 343 while (--i >= 0) 344 platform_device_unregister(devs[i]); 345 break; 346 } 347 } 348 349 return ret; 350 } 351 EXPORT_SYMBOL_GPL(platform_add_devices); 352 353 struct platform_object { 354 struct platform_device pdev; 355 char name[]; 356 }; 357 358 /* 359 * Set up default DMA mask for platform devices if the they weren't 360 * previously set by the architecture / DT. 361 */ 362 static void setup_pdev_dma_masks(struct platform_device *pdev) 363 { 364 if (!pdev->dev.coherent_dma_mask) 365 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 366 if (!pdev->dma_mask) 367 pdev->dma_mask = DMA_BIT_MASK(32); 368 if (!pdev->dev.dma_mask) 369 pdev->dev.dma_mask = &pdev->dma_mask; 370 }; 371 372 /** 373 * platform_device_put - destroy a platform device 374 * @pdev: platform device to free 375 * 376 * Free all memory associated with a platform device. This function must 377 * _only_ be externally called in error cases. All other usage is a bug. 378 */ 379 void platform_device_put(struct platform_device *pdev) 380 { 381 if (!IS_ERR_OR_NULL(pdev)) 382 put_device(&pdev->dev); 383 } 384 EXPORT_SYMBOL_GPL(platform_device_put); 385 386 static void platform_device_release(struct device *dev) 387 { 388 struct platform_object *pa = container_of(dev, struct platform_object, 389 pdev.dev); 390 391 of_device_node_put(&pa->pdev.dev); 392 kfree(pa->pdev.dev.platform_data); 393 kfree(pa->pdev.mfd_cell); 394 kfree(pa->pdev.resource); 395 kfree(pa->pdev.driver_override); 396 kfree(pa); 397 } 398 399 /** 400 * platform_device_alloc - create a platform device 401 * @name: base name of the device we're adding 402 * @id: instance id 403 * 404 * Create a platform device object which can have other objects attached 405 * to it, and which will have attached objects freed when it is released. 406 */ 407 struct platform_device *platform_device_alloc(const char *name, int id) 408 { 409 struct platform_object *pa; 410 411 pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL); 412 if (pa) { 413 strcpy(pa->name, name); 414 pa->pdev.name = pa->name; 415 pa->pdev.id = id; 416 device_initialize(&pa->pdev.dev); 417 pa->pdev.dev.release = platform_device_release; 418 setup_pdev_dma_masks(&pa->pdev); 419 } 420 421 return pa ? &pa->pdev : NULL; 422 } 423 EXPORT_SYMBOL_GPL(platform_device_alloc); 424 425 /** 426 * platform_device_add_resources - add resources to a platform device 427 * @pdev: platform device allocated by platform_device_alloc to add resources to 428 * @res: set of resources that needs to be allocated for the device 429 * @num: number of resources 430 * 431 * Add a copy of the resources to the platform device. The memory 432 * associated with the resources will be freed when the platform device is 433 * released. 434 */ 435 int platform_device_add_resources(struct platform_device *pdev, 436 const struct resource *res, unsigned int num) 437 { 438 struct resource *r = NULL; 439 440 if (res) { 441 r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); 442 if (!r) 443 return -ENOMEM; 444 } 445 446 kfree(pdev->resource); 447 pdev->resource = r; 448 pdev->num_resources = num; 449 return 0; 450 } 451 EXPORT_SYMBOL_GPL(platform_device_add_resources); 452 453 /** 454 * platform_device_add_data - add platform-specific data to a platform device 455 * @pdev: platform device allocated by platform_device_alloc to add resources to 456 * @data: platform specific data for this platform device 457 * @size: size of platform specific data 458 * 459 * Add a copy of platform specific data to the platform device's 460 * platform_data pointer. The memory associated with the platform data 461 * will be freed when the platform device is released. 462 */ 463 int platform_device_add_data(struct platform_device *pdev, const void *data, 464 size_t size) 465 { 466 void *d = NULL; 467 468 if (data) { 469 d = kmemdup(data, size, GFP_KERNEL); 470 if (!d) 471 return -ENOMEM; 472 } 473 474 kfree(pdev->dev.platform_data); 475 pdev->dev.platform_data = d; 476 return 0; 477 } 478 EXPORT_SYMBOL_GPL(platform_device_add_data); 479 480 /** 481 * platform_device_add_properties - add built-in properties to a platform device 482 * @pdev: platform device to add properties to 483 * @properties: null terminated array of properties to add 484 * 485 * The function will take deep copy of @properties and attach the copy to the 486 * platform device. The memory associated with properties will be freed when the 487 * platform device is released. 488 */ 489 int platform_device_add_properties(struct platform_device *pdev, 490 const struct property_entry *properties) 491 { 492 return device_add_properties(&pdev->dev, properties); 493 } 494 EXPORT_SYMBOL_GPL(platform_device_add_properties); 495 496 /** 497 * platform_device_add - add a platform device to device hierarchy 498 * @pdev: platform device we're adding 499 * 500 * This is part 2 of platform_device_register(), though may be called 501 * separately _iff_ pdev was allocated by platform_device_alloc(). 502 */ 503 int platform_device_add(struct platform_device *pdev) 504 { 505 u32 i; 506 int ret; 507 508 if (!pdev) 509 return -EINVAL; 510 511 if (!pdev->dev.parent) 512 pdev->dev.parent = &platform_bus; 513 514 pdev->dev.bus = &platform_bus_type; 515 516 switch (pdev->id) { 517 default: 518 dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); 519 break; 520 case PLATFORM_DEVID_NONE: 521 dev_set_name(&pdev->dev, "%s", pdev->name); 522 break; 523 case PLATFORM_DEVID_AUTO: 524 /* 525 * Automatically allocated device ID. We mark it as such so 526 * that we remember it must be freed, and we append a suffix 527 * to avoid namespace collision with explicit IDs. 528 */ 529 ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL); 530 if (ret < 0) 531 goto err_out; 532 pdev->id = ret; 533 pdev->id_auto = true; 534 dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id); 535 break; 536 } 537 538 for (i = 0; i < pdev->num_resources; i++) { 539 struct resource *p, *r = &pdev->resource[i]; 540 541 if (r->name == NULL) 542 r->name = dev_name(&pdev->dev); 543 544 p = r->parent; 545 if (!p) { 546 if (resource_type(r) == IORESOURCE_MEM) 547 p = &iomem_resource; 548 else if (resource_type(r) == IORESOURCE_IO) 549 p = &ioport_resource; 550 } 551 552 if (p) { 553 ret = insert_resource(p, r); 554 if (ret) { 555 dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r); 556 goto failed; 557 } 558 } 559 } 560 561 pr_debug("Registering platform device '%s'. Parent at %s\n", 562 dev_name(&pdev->dev), dev_name(pdev->dev.parent)); 563 564 ret = device_add(&pdev->dev); 565 if (ret == 0) 566 return ret; 567 568 failed: 569 if (pdev->id_auto) { 570 ida_simple_remove(&platform_devid_ida, pdev->id); 571 pdev->id = PLATFORM_DEVID_AUTO; 572 } 573 574 while (i--) { 575 struct resource *r = &pdev->resource[i]; 576 if (r->parent) 577 release_resource(r); 578 } 579 580 err_out: 581 return ret; 582 } 583 EXPORT_SYMBOL_GPL(platform_device_add); 584 585 /** 586 * platform_device_del - remove a platform-level device 587 * @pdev: platform device we're removing 588 * 589 * Note that this function will also release all memory- and port-based 590 * resources owned by the device (@dev->resource). This function must 591 * _only_ be externally called in error cases. All other usage is a bug. 592 */ 593 void platform_device_del(struct platform_device *pdev) 594 { 595 u32 i; 596 597 if (!IS_ERR_OR_NULL(pdev)) { 598 device_del(&pdev->dev); 599 600 if (pdev->id_auto) { 601 ida_simple_remove(&platform_devid_ida, pdev->id); 602 pdev->id = PLATFORM_DEVID_AUTO; 603 } 604 605 for (i = 0; i < pdev->num_resources; i++) { 606 struct resource *r = &pdev->resource[i]; 607 if (r->parent) 608 release_resource(r); 609 } 610 } 611 } 612 EXPORT_SYMBOL_GPL(platform_device_del); 613 614 /** 615 * platform_device_register - add a platform-level device 616 * @pdev: platform device we're adding 617 */ 618 int platform_device_register(struct platform_device *pdev) 619 { 620 device_initialize(&pdev->dev); 621 setup_pdev_dma_masks(pdev); 622 return platform_device_add(pdev); 623 } 624 EXPORT_SYMBOL_GPL(platform_device_register); 625 626 /** 627 * platform_device_unregister - unregister a platform-level device 628 * @pdev: platform device we're unregistering 629 * 630 * Unregistration is done in 2 steps. First we release all resources 631 * and remove it from the subsystem, then we drop reference count by 632 * calling platform_device_put(). 633 */ 634 void platform_device_unregister(struct platform_device *pdev) 635 { 636 platform_device_del(pdev); 637 platform_device_put(pdev); 638 } 639 EXPORT_SYMBOL_GPL(platform_device_unregister); 640 641 /** 642 * platform_device_register_full - add a platform-level device with 643 * resources and platform-specific data 644 * 645 * @pdevinfo: data used to create device 646 * 647 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 648 */ 649 struct platform_device *platform_device_register_full( 650 const struct platform_device_info *pdevinfo) 651 { 652 int ret = -ENOMEM; 653 struct platform_device *pdev; 654 655 pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); 656 if (!pdev) 657 return ERR_PTR(-ENOMEM); 658 659 pdev->dev.parent = pdevinfo->parent; 660 pdev->dev.fwnode = pdevinfo->fwnode; 661 pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); 662 pdev->dev.of_node_reused = pdevinfo->of_node_reused; 663 664 if (pdevinfo->dma_mask) { 665 /* 666 * This memory isn't freed when the device is put, 667 * I don't have a nice idea for that though. Conceptually 668 * dma_mask in struct device should not be a pointer. 669 * See http://thread.gmane.org/gmane.linux.kernel.pci/9081 670 */ 671 pdev->dev.dma_mask = 672 kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL); 673 if (!pdev->dev.dma_mask) 674 goto err; 675 676 kmemleak_ignore(pdev->dev.dma_mask); 677 678 *pdev->dev.dma_mask = pdevinfo->dma_mask; 679 pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; 680 } 681 682 ret = platform_device_add_resources(pdev, 683 pdevinfo->res, pdevinfo->num_res); 684 if (ret) 685 goto err; 686 687 ret = platform_device_add_data(pdev, 688 pdevinfo->data, pdevinfo->size_data); 689 if (ret) 690 goto err; 691 692 if (pdevinfo->properties) { 693 ret = platform_device_add_properties(pdev, 694 pdevinfo->properties); 695 if (ret) 696 goto err; 697 } 698 699 ret = platform_device_add(pdev); 700 if (ret) { 701 err: 702 ACPI_COMPANION_SET(&pdev->dev, NULL); 703 kfree(pdev->dev.dma_mask); 704 platform_device_put(pdev); 705 return ERR_PTR(ret); 706 } 707 708 return pdev; 709 } 710 EXPORT_SYMBOL_GPL(platform_device_register_full); 711 712 static int platform_drv_probe(struct device *_dev) 713 { 714 struct platform_driver *drv = to_platform_driver(_dev->driver); 715 struct platform_device *dev = to_platform_device(_dev); 716 int ret; 717 718 ret = of_clk_set_defaults(_dev->of_node, false); 719 if (ret < 0) 720 return ret; 721 722 ret = dev_pm_domain_attach(_dev, true); 723 if (ret) 724 goto out; 725 726 if (drv->probe) { 727 ret = drv->probe(dev); 728 if (ret) 729 dev_pm_domain_detach(_dev, true); 730 } 731 732 out: 733 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { 734 dev_warn(_dev, "probe deferral not supported\n"); 735 ret = -ENXIO; 736 } 737 738 return ret; 739 } 740 741 static int platform_drv_probe_fail(struct device *_dev) 742 { 743 return -ENXIO; 744 } 745 746 static int platform_drv_remove(struct device *_dev) 747 { 748 struct platform_driver *drv = to_platform_driver(_dev->driver); 749 struct platform_device *dev = to_platform_device(_dev); 750 int ret = 0; 751 752 if (drv->remove) 753 ret = drv->remove(dev); 754 dev_pm_domain_detach(_dev, true); 755 756 return ret; 757 } 758 759 static void platform_drv_shutdown(struct device *_dev) 760 { 761 struct platform_driver *drv = to_platform_driver(_dev->driver); 762 struct platform_device *dev = to_platform_device(_dev); 763 764 if (drv->shutdown) 765 drv->shutdown(dev); 766 } 767 768 /** 769 * __platform_driver_register - register a driver for platform-level devices 770 * @drv: platform driver structure 771 * @owner: owning module/driver 772 */ 773 int __platform_driver_register(struct platform_driver *drv, 774 struct module *owner) 775 { 776 drv->driver.owner = owner; 777 drv->driver.bus = &platform_bus_type; 778 drv->driver.probe = platform_drv_probe; 779 drv->driver.remove = platform_drv_remove; 780 drv->driver.shutdown = platform_drv_shutdown; 781 782 return driver_register(&drv->driver); 783 } 784 EXPORT_SYMBOL_GPL(__platform_driver_register); 785 786 /** 787 * platform_driver_unregister - unregister a driver for platform-level devices 788 * @drv: platform driver structure 789 */ 790 void platform_driver_unregister(struct platform_driver *drv) 791 { 792 driver_unregister(&drv->driver); 793 } 794 EXPORT_SYMBOL_GPL(platform_driver_unregister); 795 796 /** 797 * __platform_driver_probe - register driver for non-hotpluggable device 798 * @drv: platform driver structure 799 * @probe: the driver probe routine, probably from an __init section 800 * @module: module which will be the owner of the driver 801 * 802 * Use this instead of platform_driver_register() when you know the device 803 * is not hotpluggable and has already been registered, and you want to 804 * remove its run-once probe() infrastructure from memory after the driver 805 * has bound to the device. 806 * 807 * One typical use for this would be with drivers for controllers integrated 808 * into system-on-chip processors, where the controller devices have been 809 * configured as part of board setup. 810 * 811 * Note that this is incompatible with deferred probing. 812 * 813 * Returns zero if the driver registered and bound to a device, else returns 814 * a negative error code and with the driver not registered. 815 */ 816 int __init_or_module __platform_driver_probe(struct platform_driver *drv, 817 int (*probe)(struct platform_device *), struct module *module) 818 { 819 int retval, code; 820 821 if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) { 822 pr_err("%s: drivers registered with %s can not be probed asynchronously\n", 823 drv->driver.name, __func__); 824 return -EINVAL; 825 } 826 827 /* 828 * We have to run our probes synchronously because we check if 829 * we find any devices to bind to and exit with error if there 830 * are any. 831 */ 832 drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; 833 834 /* 835 * Prevent driver from requesting probe deferral to avoid further 836 * futile probe attempts. 837 */ 838 drv->prevent_deferred_probe = true; 839 840 /* make sure driver won't have bind/unbind attributes */ 841 drv->driver.suppress_bind_attrs = true; 842 843 /* temporary section violation during probe() */ 844 drv->probe = probe; 845 retval = code = __platform_driver_register(drv, module); 846 847 /* 848 * Fixup that section violation, being paranoid about code scanning 849 * the list of drivers in order to probe new devices. Check to see 850 * if the probe was successful, and make sure any forced probes of 851 * new devices fail. 852 */ 853 spin_lock(&drv->driver.bus->p->klist_drivers.k_lock); 854 drv->probe = NULL; 855 if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) 856 retval = -ENODEV; 857 drv->driver.probe = platform_drv_probe_fail; 858 spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock); 859 860 if (code != retval) 861 platform_driver_unregister(drv); 862 return retval; 863 } 864 EXPORT_SYMBOL_GPL(__platform_driver_probe); 865 866 /** 867 * __platform_create_bundle - register driver and create corresponding device 868 * @driver: platform driver structure 869 * @probe: the driver probe routine, probably from an __init section 870 * @res: set of resources that needs to be allocated for the device 871 * @n_res: number of resources 872 * @data: platform specific data for this platform device 873 * @size: size of platform specific data 874 * @module: module which will be the owner of the driver 875 * 876 * Use this in legacy-style modules that probe hardware directly and 877 * register a single platform device and corresponding platform driver. 878 * 879 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 880 */ 881 struct platform_device * __init_or_module __platform_create_bundle( 882 struct platform_driver *driver, 883 int (*probe)(struct platform_device *), 884 struct resource *res, unsigned int n_res, 885 const void *data, size_t size, struct module *module) 886 { 887 struct platform_device *pdev; 888 int error; 889 890 pdev = platform_device_alloc(driver->driver.name, -1); 891 if (!pdev) { 892 error = -ENOMEM; 893 goto err_out; 894 } 895 896 error = platform_device_add_resources(pdev, res, n_res); 897 if (error) 898 goto err_pdev_put; 899 900 error = platform_device_add_data(pdev, data, size); 901 if (error) 902 goto err_pdev_put; 903 904 error = platform_device_add(pdev); 905 if (error) 906 goto err_pdev_put; 907 908 error = __platform_driver_probe(driver, probe, module); 909 if (error) 910 goto err_pdev_del; 911 912 return pdev; 913 914 err_pdev_del: 915 platform_device_del(pdev); 916 err_pdev_put: 917 platform_device_put(pdev); 918 err_out: 919 return ERR_PTR(error); 920 } 921 EXPORT_SYMBOL_GPL(__platform_create_bundle); 922 923 /** 924 * __platform_register_drivers - register an array of platform drivers 925 * @drivers: an array of drivers to register 926 * @count: the number of drivers to register 927 * @owner: module owning the drivers 928 * 929 * Registers platform drivers specified by an array. On failure to register a 930 * driver, all previously registered drivers will be unregistered. Callers of 931 * this API should use platform_unregister_drivers() to unregister drivers in 932 * the reverse order. 933 * 934 * Returns: 0 on success or a negative error code on failure. 935 */ 936 int __platform_register_drivers(struct platform_driver * const *drivers, 937 unsigned int count, struct module *owner) 938 { 939 unsigned int i; 940 int err; 941 942 for (i = 0; i < count; i++) { 943 pr_debug("registering platform driver %ps\n", drivers[i]); 944 945 err = __platform_driver_register(drivers[i], owner); 946 if (err < 0) { 947 pr_err("failed to register platform driver %ps: %d\n", 948 drivers[i], err); 949 goto error; 950 } 951 } 952 953 return 0; 954 955 error: 956 while (i--) { 957 pr_debug("unregistering platform driver %ps\n", drivers[i]); 958 platform_driver_unregister(drivers[i]); 959 } 960 961 return err; 962 } 963 EXPORT_SYMBOL_GPL(__platform_register_drivers); 964 965 /** 966 * platform_unregister_drivers - unregister an array of platform drivers 967 * @drivers: an array of drivers to unregister 968 * @count: the number of drivers to unregister 969 * 970 * Unegisters platform drivers specified by an array. This is typically used 971 * to complement an earlier call to platform_register_drivers(). Drivers are 972 * unregistered in the reverse order in which they were registered. 973 */ 974 void platform_unregister_drivers(struct platform_driver * const *drivers, 975 unsigned int count) 976 { 977 while (count--) { 978 pr_debug("unregistering platform driver %ps\n", drivers[count]); 979 platform_driver_unregister(drivers[count]); 980 } 981 } 982 EXPORT_SYMBOL_GPL(platform_unregister_drivers); 983 984 /* modalias support enables more hands-off userspace setup: 985 * (a) environment variable lets new-style hotplug events work once system is 986 * fully running: "modprobe $MODALIAS" 987 * (b) sysfs attribute lets new-style coldplug recover from hotplug events 988 * mishandled before system is fully running: "modprobe $(cat modalias)" 989 */ 990 static ssize_t modalias_show(struct device *dev, struct device_attribute *a, 991 char *buf) 992 { 993 struct platform_device *pdev = to_platform_device(dev); 994 int len; 995 996 len = of_device_modalias(dev, buf, PAGE_SIZE); 997 if (len != -ENODEV) 998 return len; 999 1000 len = acpi_device_modalias(dev, buf, PAGE_SIZE -1); 1001 if (len != -ENODEV) 1002 return len; 1003 1004 len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name); 1005 1006 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; 1007 } 1008 static DEVICE_ATTR_RO(modalias); 1009 1010 static ssize_t driver_override_store(struct device *dev, 1011 struct device_attribute *attr, 1012 const char *buf, size_t count) 1013 { 1014 struct platform_device *pdev = to_platform_device(dev); 1015 char *driver_override, *old, *cp; 1016 1017 /* We need to keep extra room for a newline */ 1018 if (count >= (PAGE_SIZE - 1)) 1019 return -EINVAL; 1020 1021 driver_override = kstrndup(buf, count, GFP_KERNEL); 1022 if (!driver_override) 1023 return -ENOMEM; 1024 1025 cp = strchr(driver_override, '\n'); 1026 if (cp) 1027 *cp = '\0'; 1028 1029 device_lock(dev); 1030 old = pdev->driver_override; 1031 if (strlen(driver_override)) { 1032 pdev->driver_override = driver_override; 1033 } else { 1034 kfree(driver_override); 1035 pdev->driver_override = NULL; 1036 } 1037 device_unlock(dev); 1038 1039 kfree(old); 1040 1041 return count; 1042 } 1043 1044 static ssize_t driver_override_show(struct device *dev, 1045 struct device_attribute *attr, char *buf) 1046 { 1047 struct platform_device *pdev = to_platform_device(dev); 1048 ssize_t len; 1049 1050 device_lock(dev); 1051 len = sprintf(buf, "%s\n", pdev->driver_override); 1052 device_unlock(dev); 1053 return len; 1054 } 1055 static DEVICE_ATTR_RW(driver_override); 1056 1057 1058 static struct attribute *platform_dev_attrs[] = { 1059 &dev_attr_modalias.attr, 1060 &dev_attr_driver_override.attr, 1061 NULL, 1062 }; 1063 ATTRIBUTE_GROUPS(platform_dev); 1064 1065 static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) 1066 { 1067 struct platform_device *pdev = to_platform_device(dev); 1068 int rc; 1069 1070 /* Some devices have extra OF data and an OF-style MODALIAS */ 1071 rc = of_device_uevent_modalias(dev, env); 1072 if (rc != -ENODEV) 1073 return rc; 1074 1075 rc = acpi_device_uevent_modalias(dev, env); 1076 if (rc != -ENODEV) 1077 return rc; 1078 1079 add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, 1080 pdev->name); 1081 return 0; 1082 } 1083 1084 static const struct platform_device_id *platform_match_id( 1085 const struct platform_device_id *id, 1086 struct platform_device *pdev) 1087 { 1088 while (id->name[0]) { 1089 if (strcmp(pdev->name, id->name) == 0) { 1090 pdev->id_entry = id; 1091 return id; 1092 } 1093 id++; 1094 } 1095 return NULL; 1096 } 1097 1098 /** 1099 * platform_match - bind platform device to platform driver. 1100 * @dev: device. 1101 * @drv: driver. 1102 * 1103 * Platform device IDs are assumed to be encoded like this: 1104 * "<name><instance>", where <name> is a short description of the type of 1105 * device, like "pci" or "floppy", and <instance> is the enumerated 1106 * instance of the device, like '0' or '42'. Driver IDs are simply 1107 * "<name>". So, extract the <name> from the platform_device structure, 1108 * and compare it against the name of the driver. Return whether they match 1109 * or not. 1110 */ 1111 static int platform_match(struct device *dev, struct device_driver *drv) 1112 { 1113 struct platform_device *pdev = to_platform_device(dev); 1114 struct platform_driver *pdrv = to_platform_driver(drv); 1115 1116 /* When driver_override is set, only bind to the matching driver */ 1117 if (pdev->driver_override) 1118 return !strcmp(pdev->driver_override, drv->name); 1119 1120 /* Attempt an OF style match first */ 1121 if (of_driver_match_device(dev, drv)) 1122 return 1; 1123 1124 /* Then try ACPI style match */ 1125 if (acpi_driver_match_device(dev, drv)) 1126 return 1; 1127 1128 /* Then try to match against the id table */ 1129 if (pdrv->id_table) 1130 return platform_match_id(pdrv->id_table, pdev) != NULL; 1131 1132 /* fall-back to driver name match */ 1133 return (strcmp(pdev->name, drv->name) == 0); 1134 } 1135 1136 #ifdef CONFIG_PM_SLEEP 1137 1138 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) 1139 { 1140 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1141 struct platform_device *pdev = to_platform_device(dev); 1142 int ret = 0; 1143 1144 if (dev->driver && pdrv->suspend) 1145 ret = pdrv->suspend(pdev, mesg); 1146 1147 return ret; 1148 } 1149 1150 static int platform_legacy_resume(struct device *dev) 1151 { 1152 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1153 struct platform_device *pdev = to_platform_device(dev); 1154 int ret = 0; 1155 1156 if (dev->driver && pdrv->resume) 1157 ret = pdrv->resume(pdev); 1158 1159 return ret; 1160 } 1161 1162 #endif /* CONFIG_PM_SLEEP */ 1163 1164 #ifdef CONFIG_SUSPEND 1165 1166 int platform_pm_suspend(struct device *dev) 1167 { 1168 struct device_driver *drv = dev->driver; 1169 int ret = 0; 1170 1171 if (!drv) 1172 return 0; 1173 1174 if (drv->pm) { 1175 if (drv->pm->suspend) 1176 ret = drv->pm->suspend(dev); 1177 } else { 1178 ret = platform_legacy_suspend(dev, PMSG_SUSPEND); 1179 } 1180 1181 return ret; 1182 } 1183 1184 int platform_pm_resume(struct device *dev) 1185 { 1186 struct device_driver *drv = dev->driver; 1187 int ret = 0; 1188 1189 if (!drv) 1190 return 0; 1191 1192 if (drv->pm) { 1193 if (drv->pm->resume) 1194 ret = drv->pm->resume(dev); 1195 } else { 1196 ret = platform_legacy_resume(dev); 1197 } 1198 1199 return ret; 1200 } 1201 1202 #endif /* CONFIG_SUSPEND */ 1203 1204 #ifdef CONFIG_HIBERNATE_CALLBACKS 1205 1206 int platform_pm_freeze(struct device *dev) 1207 { 1208 struct device_driver *drv = dev->driver; 1209 int ret = 0; 1210 1211 if (!drv) 1212 return 0; 1213 1214 if (drv->pm) { 1215 if (drv->pm->freeze) 1216 ret = drv->pm->freeze(dev); 1217 } else { 1218 ret = platform_legacy_suspend(dev, PMSG_FREEZE); 1219 } 1220 1221 return ret; 1222 } 1223 1224 int platform_pm_thaw(struct device *dev) 1225 { 1226 struct device_driver *drv = dev->driver; 1227 int ret = 0; 1228 1229 if (!drv) 1230 return 0; 1231 1232 if (drv->pm) { 1233 if (drv->pm->thaw) 1234 ret = drv->pm->thaw(dev); 1235 } else { 1236 ret = platform_legacy_resume(dev); 1237 } 1238 1239 return ret; 1240 } 1241 1242 int platform_pm_poweroff(struct device *dev) 1243 { 1244 struct device_driver *drv = dev->driver; 1245 int ret = 0; 1246 1247 if (!drv) 1248 return 0; 1249 1250 if (drv->pm) { 1251 if (drv->pm->poweroff) 1252 ret = drv->pm->poweroff(dev); 1253 } else { 1254 ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); 1255 } 1256 1257 return ret; 1258 } 1259 1260 int platform_pm_restore(struct device *dev) 1261 { 1262 struct device_driver *drv = dev->driver; 1263 int ret = 0; 1264 1265 if (!drv) 1266 return 0; 1267 1268 if (drv->pm) { 1269 if (drv->pm->restore) 1270 ret = drv->pm->restore(dev); 1271 } else { 1272 ret = platform_legacy_resume(dev); 1273 } 1274 1275 return ret; 1276 } 1277 1278 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 1279 1280 int platform_dma_configure(struct device *dev) 1281 { 1282 enum dev_dma_attr attr; 1283 int ret = 0; 1284 1285 if (dev->of_node) { 1286 ret = of_dma_configure(dev, dev->of_node, true); 1287 } else if (has_acpi_companion(dev)) { 1288 attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode)); 1289 ret = acpi_dma_configure(dev, attr); 1290 } 1291 1292 return ret; 1293 } 1294 1295 static const struct dev_pm_ops platform_dev_pm_ops = { 1296 .runtime_suspend = pm_generic_runtime_suspend, 1297 .runtime_resume = pm_generic_runtime_resume, 1298 USE_PLATFORM_PM_SLEEP_OPS 1299 }; 1300 1301 struct bus_type platform_bus_type = { 1302 .name = "platform", 1303 .dev_groups = platform_dev_groups, 1304 .match = platform_match, 1305 .uevent = platform_uevent, 1306 .dma_configure = platform_dma_configure, 1307 .pm = &platform_dev_pm_ops, 1308 }; 1309 EXPORT_SYMBOL_GPL(platform_bus_type); 1310 1311 static inline int __platform_match(struct device *dev, const void *drv) 1312 { 1313 return platform_match(dev, (struct device_driver *)drv); 1314 } 1315 1316 /** 1317 * platform_find_device_by_driver - Find a platform device with a given 1318 * driver. 1319 * @start: The device to start the search from. 1320 * @drv: The device driver to look for. 1321 */ 1322 struct device *platform_find_device_by_driver(struct device *start, 1323 const struct device_driver *drv) 1324 { 1325 return bus_find_device(&platform_bus_type, start, drv, 1326 __platform_match); 1327 } 1328 EXPORT_SYMBOL_GPL(platform_find_device_by_driver); 1329 1330 void __weak __init early_platform_cleanup(void) { } 1331 1332 int __init platform_bus_init(void) 1333 { 1334 int error; 1335 1336 early_platform_cleanup(); 1337 1338 error = device_register(&platform_bus); 1339 if (error) { 1340 put_device(&platform_bus); 1341 return error; 1342 } 1343 error = bus_register(&platform_bus_type); 1344 if (error) 1345 device_unregister(&platform_bus); 1346 of_platform_register_reconfig_notifier(); 1347 return error; 1348 } 1349