1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * platform.c - platform 'pseudo' bus for legacy devices 4 * 5 * Copyright (c) 2002-3 Patrick Mochel 6 * Copyright (c) 2002-3 Open Source Development Labs 7 * 8 * Please see Documentation/driver-api/driver-model/platform.rst for more 9 * information. 10 */ 11 12 #include <linux/string.h> 13 #include <linux/platform_device.h> 14 #include <linux/of_device.h> 15 #include <linux/of_irq.h> 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/interrupt.h> 19 #include <linux/ioport.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/memblock.h> 22 #include <linux/err.h> 23 #include <linux/slab.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/pm_domain.h> 26 #include <linux/idr.h> 27 #include <linux/acpi.h> 28 #include <linux/clk/clk-conf.h> 29 #include <linux/limits.h> 30 #include <linux/property.h> 31 #include <linux/kmemleak.h> 32 #include <linux/types.h> 33 34 #include "base.h" 35 #include "power/power.h" 36 37 /* For automatically allocated device IDs */ 38 static DEFINE_IDA(platform_devid_ida); 39 40 struct device platform_bus = { 41 .init_name = "platform", 42 }; 43 EXPORT_SYMBOL_GPL(platform_bus); 44 45 /** 46 * platform_get_resource - get a resource for a device 47 * @dev: platform device 48 * @type: resource type 49 * @num: resource index 50 * 51 * Return: a pointer to the resource or NULL on failure. 52 */ 53 struct resource *platform_get_resource(struct platform_device *dev, 54 unsigned int type, unsigned int num) 55 { 56 u32 i; 57 58 for (i = 0; i < dev->num_resources; i++) { 59 struct resource *r = &dev->resource[i]; 60 61 if (type == resource_type(r) && num-- == 0) 62 return r; 63 } 64 return NULL; 65 } 66 EXPORT_SYMBOL_GPL(platform_get_resource); 67 68 struct resource *platform_get_mem_or_io(struct platform_device *dev, 69 unsigned int num) 70 { 71 u32 i; 72 73 for (i = 0; i < dev->num_resources; i++) { 74 struct resource *r = &dev->resource[i]; 75 76 if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0) 77 return r; 78 } 79 return NULL; 80 } 81 EXPORT_SYMBOL_GPL(platform_get_mem_or_io); 82 83 #ifdef CONFIG_HAS_IOMEM 84 /** 85 * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a 86 * platform device and get resource 87 * 88 * @pdev: platform device to use both for memory resource lookup as well as 89 * resource management 90 * @index: resource index 91 * @res: optional output parameter to store a pointer to the obtained resource. 92 * 93 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code 94 * on failure. 95 */ 96 void __iomem * 97 devm_platform_get_and_ioremap_resource(struct platform_device *pdev, 98 unsigned int index, struct resource **res) 99 { 100 struct resource *r; 101 102 r = platform_get_resource(pdev, IORESOURCE_MEM, index); 103 if (res) 104 *res = r; 105 return devm_ioremap_resource(&pdev->dev, r); 106 } 107 EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource); 108 109 /** 110 * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform 111 * device 112 * 113 * @pdev: platform device to use both for memory resource lookup as well as 114 * resource management 115 * @index: resource index 116 * 117 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code 118 * on failure. 119 */ 120 void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev, 121 unsigned int index) 122 { 123 return devm_platform_get_and_ioremap_resource(pdev, index, NULL); 124 } 125 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); 126 127 /** 128 * devm_platform_ioremap_resource_wc - write-combined variant of 129 * devm_platform_ioremap_resource() 130 * 131 * @pdev: platform device to use both for memory resource lookup as well as 132 * resource management 133 * @index: resource index 134 * 135 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code 136 * on failure. 137 */ 138 void __iomem *devm_platform_ioremap_resource_wc(struct platform_device *pdev, 139 unsigned int index) 140 { 141 struct resource *res; 142 143 res = platform_get_resource(pdev, IORESOURCE_MEM, index); 144 return devm_ioremap_resource_wc(&pdev->dev, res); 145 } 146 147 /** 148 * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for 149 * a platform device, retrieve the 150 * resource by name 151 * 152 * @pdev: platform device to use both for memory resource lookup as well as 153 * resource management 154 * @name: name of the resource 155 * 156 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code 157 * on failure. 158 */ 159 void __iomem * 160 devm_platform_ioremap_resource_byname(struct platform_device *pdev, 161 const char *name) 162 { 163 struct resource *res; 164 165 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); 166 return devm_ioremap_resource(&pdev->dev, res); 167 } 168 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname); 169 #endif /* CONFIG_HAS_IOMEM */ 170 171 static int platform_do_get_irq(struct platform_device *dev, unsigned int num) 172 { 173 int ret; 174 #ifdef CONFIG_SPARC 175 /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ 176 if (!dev || num >= dev->archdata.num_irqs) 177 goto out_not_found; 178 ret = dev->archdata.irqs[num]; 179 goto out; 180 #else 181 struct resource *r; 182 183 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { 184 ret = of_irq_get(dev->dev.of_node, num); 185 if (ret > 0 || ret == -EPROBE_DEFER) 186 goto out; 187 } 188 189 r = platform_get_resource(dev, IORESOURCE_IRQ, num); 190 if (has_acpi_companion(&dev->dev)) { 191 if (r && r->flags & IORESOURCE_DISABLED) { 192 ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r); 193 if (ret) 194 goto out; 195 } 196 } 197 198 /* 199 * The resources may pass trigger flags to the irqs that need 200 * to be set up. It so happens that the trigger flags for 201 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER* 202 * settings. 203 */ 204 if (r && r->flags & IORESOURCE_BITS) { 205 struct irq_data *irqd; 206 207 irqd = irq_get_irq_data(r->start); 208 if (!irqd) 209 goto out_not_found; 210 irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); 211 } 212 213 if (r) { 214 ret = r->start; 215 goto out; 216 } 217 218 /* 219 * For the index 0 interrupt, allow falling back to GpioInt 220 * resources. While a device could have both Interrupt and GpioInt 221 * resources, making this fallback ambiguous, in many common cases 222 * the device will only expose one IRQ, and this fallback 223 * allows a common code path across either kind of resource. 224 */ 225 if (num == 0 && has_acpi_companion(&dev->dev)) { 226 ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num); 227 /* Our callers expect -ENXIO for missing IRQs. */ 228 if (ret >= 0 || ret == -EPROBE_DEFER) 229 goto out; 230 } 231 232 #endif 233 out_not_found: 234 ret = -ENXIO; 235 out: 236 WARN(ret == 0, "0 is an invalid IRQ number\n"); 237 return ret; 238 } 239 240 /** 241 * platform_get_irq_optional - get an optional IRQ for a device 242 * @dev: platform device 243 * @num: IRQ number index 244 * 245 * Gets an IRQ for a platform device. Device drivers should check the return 246 * value for errors so as to not pass a negative integer value to the 247 * request_irq() APIs. This is the same as platform_get_irq(), except that it 248 * does not print an error message if an IRQ can not be obtained and returns 249 * 0 when IRQ resource has not been found. 250 * 251 * For example:: 252 * 253 * int irq = platform_get_irq_optional(pdev, 0); 254 * if (irq < 0) 255 * return irq; 256 * if (irq > 0) 257 * ...we have IRQ line defined... 258 * 259 * Return: non-zero IRQ number on success, negative error number on failure. 260 */ 261 int platform_get_irq_optional(struct platform_device *dev, unsigned int num) 262 { 263 int ret; 264 265 ret = platform_do_get_irq(dev, num); 266 if (ret == -ENXIO) 267 return 0; 268 return ret; 269 } 270 EXPORT_SYMBOL_GPL(platform_get_irq_optional); 271 272 /** 273 * platform_get_irq - get an IRQ for a device 274 * @dev: platform device 275 * @num: IRQ number index 276 * 277 * Gets an IRQ for a platform device and prints an error message if finding the 278 * IRQ fails. Device drivers should check the return value for errors so as to 279 * not pass a negative integer value to the request_irq() APIs. 280 * 281 * For example:: 282 * 283 * int irq = platform_get_irq(pdev, 0); 284 * if (irq < 0) 285 * return irq; 286 * 287 * Return: non-zero IRQ number on success, negative error number on failure. 288 */ 289 int platform_get_irq(struct platform_device *dev, unsigned int num) 290 { 291 int ret; 292 293 ret = platform_do_get_irq(dev, num); 294 if (ret < 0 && ret != -EPROBE_DEFER) 295 dev_err(&dev->dev, "IRQ index %u not found\n", num); 296 297 return ret; 298 } 299 EXPORT_SYMBOL_GPL(platform_get_irq); 300 301 /** 302 * platform_irq_count - Count the number of IRQs a platform device uses 303 * @dev: platform device 304 * 305 * Return: Number of IRQs a platform device uses or EPROBE_DEFER 306 */ 307 int platform_irq_count(struct platform_device *dev) 308 { 309 int ret, nr = 0; 310 311 while ((ret = platform_do_get_irq(dev, nr)) >= 0) 312 nr++; 313 314 if (ret == -EPROBE_DEFER) 315 return ret; 316 317 return nr; 318 } 319 EXPORT_SYMBOL_GPL(platform_irq_count); 320 321 struct irq_affinity_devres { 322 unsigned int count; 323 unsigned int irq[]; 324 }; 325 326 static void platform_disable_acpi_irq(struct platform_device *pdev, int index) 327 { 328 struct resource *r; 329 330 r = platform_get_resource(pdev, IORESOURCE_IRQ, index); 331 if (r) 332 irqresource_disabled(r, 0); 333 } 334 335 static void devm_platform_get_irqs_affinity_release(struct device *dev, 336 void *res) 337 { 338 struct irq_affinity_devres *ptr = res; 339 int i; 340 341 for (i = 0; i < ptr->count; i++) { 342 irq_dispose_mapping(ptr->irq[i]); 343 344 if (has_acpi_companion(dev)) 345 platform_disable_acpi_irq(to_platform_device(dev), i); 346 } 347 } 348 349 /** 350 * devm_platform_get_irqs_affinity - devm method to get a set of IRQs for a 351 * device using an interrupt affinity descriptor 352 * @dev: platform device pointer 353 * @affd: affinity descriptor 354 * @minvec: minimum count of interrupt vectors 355 * @maxvec: maximum count of interrupt vectors 356 * @irqs: pointer holder for IRQ numbers 357 * 358 * Gets a set of IRQs for a platform device, and updates IRQ afffinty according 359 * to the passed affinity descriptor 360 * 361 * Return: Number of vectors on success, negative error number on failure. 362 */ 363 int devm_platform_get_irqs_affinity(struct platform_device *dev, 364 struct irq_affinity *affd, 365 unsigned int minvec, 366 unsigned int maxvec, 367 int **irqs) 368 { 369 struct irq_affinity_devres *ptr; 370 struct irq_affinity_desc *desc; 371 size_t size; 372 int i, ret, nvec; 373 374 if (!affd) 375 return -EPERM; 376 377 if (maxvec < minvec) 378 return -ERANGE; 379 380 nvec = platform_irq_count(dev); 381 if (nvec < 0) 382 return nvec; 383 384 if (nvec < minvec) 385 return -ENOSPC; 386 387 nvec = irq_calc_affinity_vectors(minvec, nvec, affd); 388 if (nvec < minvec) 389 return -ENOSPC; 390 391 if (nvec > maxvec) 392 nvec = maxvec; 393 394 size = sizeof(*ptr) + sizeof(unsigned int) * nvec; 395 ptr = devres_alloc(devm_platform_get_irqs_affinity_release, size, 396 GFP_KERNEL); 397 if (!ptr) 398 return -ENOMEM; 399 400 ptr->count = nvec; 401 402 for (i = 0; i < nvec; i++) { 403 int irq = platform_get_irq(dev, i); 404 if (irq < 0) { 405 ret = irq; 406 goto err_free_devres; 407 } 408 ptr->irq[i] = irq; 409 } 410 411 desc = irq_create_affinity_masks(nvec, affd); 412 if (!desc) { 413 ret = -ENOMEM; 414 goto err_free_devres; 415 } 416 417 for (i = 0; i < nvec; i++) { 418 ret = irq_update_affinity_desc(ptr->irq[i], &desc[i]); 419 if (ret) { 420 dev_err(&dev->dev, "failed to update irq%d affinity descriptor (%d)\n", 421 ptr->irq[i], ret); 422 goto err_free_desc; 423 } 424 } 425 426 devres_add(&dev->dev, ptr); 427 428 kfree(desc); 429 430 *irqs = ptr->irq; 431 432 return nvec; 433 434 err_free_desc: 435 kfree(desc); 436 err_free_devres: 437 devres_free(ptr); 438 return ret; 439 } 440 EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity); 441 442 /** 443 * platform_get_resource_byname - get a resource for a device by name 444 * @dev: platform device 445 * @type: resource type 446 * @name: resource name 447 */ 448 struct resource *platform_get_resource_byname(struct platform_device *dev, 449 unsigned int type, 450 const char *name) 451 { 452 u32 i; 453 454 for (i = 0; i < dev->num_resources; i++) { 455 struct resource *r = &dev->resource[i]; 456 457 if (unlikely(!r->name)) 458 continue; 459 460 if (type == resource_type(r) && !strcmp(r->name, name)) 461 return r; 462 } 463 return NULL; 464 } 465 EXPORT_SYMBOL_GPL(platform_get_resource_byname); 466 467 static int __platform_get_irq_byname(struct platform_device *dev, 468 const char *name) 469 { 470 struct resource *r; 471 int ret; 472 473 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { 474 ret = of_irq_get_byname(dev->dev.of_node, name); 475 if (ret > 0 || ret == -EPROBE_DEFER) 476 return ret; 477 } 478 479 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); 480 if (r) { 481 WARN(r->start == 0, "0 is an invalid IRQ number\n"); 482 return r->start; 483 } 484 485 return -ENXIO; 486 } 487 488 /** 489 * platform_get_irq_byname - get an IRQ for a device by name 490 * @dev: platform device 491 * @name: IRQ name 492 * 493 * Get an IRQ like platform_get_irq(), but then by name rather then by index. 494 * 495 * Return: non-zero IRQ number on success, negative error number on failure. 496 */ 497 int platform_get_irq_byname(struct platform_device *dev, const char *name) 498 { 499 int ret; 500 501 ret = __platform_get_irq_byname(dev, name); 502 if (ret < 0 && ret != -EPROBE_DEFER) 503 dev_err(&dev->dev, "IRQ %s not found\n", name); 504 505 return ret; 506 } 507 EXPORT_SYMBOL_GPL(platform_get_irq_byname); 508 509 /** 510 * platform_get_irq_byname_optional - get an optional IRQ for a device by name 511 * @dev: platform device 512 * @name: IRQ name 513 * 514 * Get an optional IRQ by name like platform_get_irq_byname(). Except that it 515 * does not print an error message if an IRQ can not be obtained. 516 * 517 * Return: non-zero IRQ number on success, negative error number on failure. 518 */ 519 int platform_get_irq_byname_optional(struct platform_device *dev, 520 const char *name) 521 { 522 return __platform_get_irq_byname(dev, name); 523 } 524 EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional); 525 526 /** 527 * platform_add_devices - add a numbers of platform devices 528 * @devs: array of platform devices to add 529 * @num: number of platform devices in array 530 */ 531 int platform_add_devices(struct platform_device **devs, int num) 532 { 533 int i, ret = 0; 534 535 for (i = 0; i < num; i++) { 536 ret = platform_device_register(devs[i]); 537 if (ret) { 538 while (--i >= 0) 539 platform_device_unregister(devs[i]); 540 break; 541 } 542 } 543 544 return ret; 545 } 546 EXPORT_SYMBOL_GPL(platform_add_devices); 547 548 struct platform_object { 549 struct platform_device pdev; 550 char name[]; 551 }; 552 553 /* 554 * Set up default DMA mask for platform devices if the they weren't 555 * previously set by the architecture / DT. 556 */ 557 static void setup_pdev_dma_masks(struct platform_device *pdev) 558 { 559 pdev->dev.dma_parms = &pdev->dma_parms; 560 561 if (!pdev->dev.coherent_dma_mask) 562 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 563 if (!pdev->dev.dma_mask) { 564 pdev->platform_dma_mask = DMA_BIT_MASK(32); 565 pdev->dev.dma_mask = &pdev->platform_dma_mask; 566 } 567 }; 568 569 /** 570 * platform_device_put - destroy a platform device 571 * @pdev: platform device to free 572 * 573 * Free all memory associated with a platform device. This function must 574 * _only_ be externally called in error cases. All other usage is a bug. 575 */ 576 void platform_device_put(struct platform_device *pdev) 577 { 578 if (!IS_ERR_OR_NULL(pdev)) 579 put_device(&pdev->dev); 580 } 581 EXPORT_SYMBOL_GPL(platform_device_put); 582 583 static void platform_device_release(struct device *dev) 584 { 585 struct platform_object *pa = container_of(dev, struct platform_object, 586 pdev.dev); 587 588 of_node_put(pa->pdev.dev.of_node); 589 kfree(pa->pdev.dev.platform_data); 590 kfree(pa->pdev.mfd_cell); 591 kfree(pa->pdev.resource); 592 kfree(pa->pdev.driver_override); 593 kfree(pa); 594 } 595 596 /** 597 * platform_device_alloc - create a platform device 598 * @name: base name of the device we're adding 599 * @id: instance id 600 * 601 * Create a platform device object which can have other objects attached 602 * to it, and which will have attached objects freed when it is released. 603 */ 604 struct platform_device *platform_device_alloc(const char *name, int id) 605 { 606 struct platform_object *pa; 607 608 pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL); 609 if (pa) { 610 strcpy(pa->name, name); 611 pa->pdev.name = pa->name; 612 pa->pdev.id = id; 613 device_initialize(&pa->pdev.dev); 614 pa->pdev.dev.release = platform_device_release; 615 setup_pdev_dma_masks(&pa->pdev); 616 } 617 618 return pa ? &pa->pdev : NULL; 619 } 620 EXPORT_SYMBOL_GPL(platform_device_alloc); 621 622 /** 623 * platform_device_add_resources - add resources to a platform device 624 * @pdev: platform device allocated by platform_device_alloc to add resources to 625 * @res: set of resources that needs to be allocated for the device 626 * @num: number of resources 627 * 628 * Add a copy of the resources to the platform device. The memory 629 * associated with the resources will be freed when the platform device is 630 * released. 631 */ 632 int platform_device_add_resources(struct platform_device *pdev, 633 const struct resource *res, unsigned int num) 634 { 635 struct resource *r = NULL; 636 637 if (res) { 638 r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); 639 if (!r) 640 return -ENOMEM; 641 } 642 643 kfree(pdev->resource); 644 pdev->resource = r; 645 pdev->num_resources = num; 646 return 0; 647 } 648 EXPORT_SYMBOL_GPL(platform_device_add_resources); 649 650 /** 651 * platform_device_add_data - add platform-specific data to a platform device 652 * @pdev: platform device allocated by platform_device_alloc to add resources to 653 * @data: platform specific data for this platform device 654 * @size: size of platform specific data 655 * 656 * Add a copy of platform specific data to the platform device's 657 * platform_data pointer. The memory associated with the platform data 658 * will be freed when the platform device is released. 659 */ 660 int platform_device_add_data(struct platform_device *pdev, const void *data, 661 size_t size) 662 { 663 void *d = NULL; 664 665 if (data) { 666 d = kmemdup(data, size, GFP_KERNEL); 667 if (!d) 668 return -ENOMEM; 669 } 670 671 kfree(pdev->dev.platform_data); 672 pdev->dev.platform_data = d; 673 return 0; 674 } 675 EXPORT_SYMBOL_GPL(platform_device_add_data); 676 677 /** 678 * platform_device_add_properties - add built-in properties to a platform device 679 * @pdev: platform device to add properties to 680 * @properties: null terminated array of properties to add 681 * 682 * The function will take deep copy of @properties and attach the copy to the 683 * platform device. The memory associated with properties will be freed when the 684 * platform device is released. 685 */ 686 int platform_device_add_properties(struct platform_device *pdev, 687 const struct property_entry *properties) 688 { 689 return device_add_properties(&pdev->dev, properties); 690 } 691 EXPORT_SYMBOL_GPL(platform_device_add_properties); 692 693 /** 694 * platform_device_add - add a platform device to device hierarchy 695 * @pdev: platform device we're adding 696 * 697 * This is part 2 of platform_device_register(), though may be called 698 * separately _iff_ pdev was allocated by platform_device_alloc(). 699 */ 700 int platform_device_add(struct platform_device *pdev) 701 { 702 u32 i; 703 int ret; 704 705 if (!pdev) 706 return -EINVAL; 707 708 if (!pdev->dev.parent) 709 pdev->dev.parent = &platform_bus; 710 711 pdev->dev.bus = &platform_bus_type; 712 713 switch (pdev->id) { 714 default: 715 dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); 716 break; 717 case PLATFORM_DEVID_NONE: 718 dev_set_name(&pdev->dev, "%s", pdev->name); 719 break; 720 case PLATFORM_DEVID_AUTO: 721 /* 722 * Automatically allocated device ID. We mark it as such so 723 * that we remember it must be freed, and we append a suffix 724 * to avoid namespace collision with explicit IDs. 725 */ 726 ret = ida_alloc(&platform_devid_ida, GFP_KERNEL); 727 if (ret < 0) 728 goto err_out; 729 pdev->id = ret; 730 pdev->id_auto = true; 731 dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id); 732 break; 733 } 734 735 for (i = 0; i < pdev->num_resources; i++) { 736 struct resource *p, *r = &pdev->resource[i]; 737 738 if (r->name == NULL) 739 r->name = dev_name(&pdev->dev); 740 741 p = r->parent; 742 if (!p) { 743 if (resource_type(r) == IORESOURCE_MEM) 744 p = &iomem_resource; 745 else if (resource_type(r) == IORESOURCE_IO) 746 p = &ioport_resource; 747 } 748 749 if (p) { 750 ret = insert_resource(p, r); 751 if (ret) { 752 dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r); 753 goto failed; 754 } 755 } 756 } 757 758 pr_debug("Registering platform device '%s'. Parent at %s\n", 759 dev_name(&pdev->dev), dev_name(pdev->dev.parent)); 760 761 ret = device_add(&pdev->dev); 762 if (ret == 0) 763 return ret; 764 765 failed: 766 if (pdev->id_auto) { 767 ida_free(&platform_devid_ida, pdev->id); 768 pdev->id = PLATFORM_DEVID_AUTO; 769 } 770 771 while (i--) { 772 struct resource *r = &pdev->resource[i]; 773 if (r->parent) 774 release_resource(r); 775 } 776 777 err_out: 778 return ret; 779 } 780 EXPORT_SYMBOL_GPL(platform_device_add); 781 782 /** 783 * platform_device_del - remove a platform-level device 784 * @pdev: platform device we're removing 785 * 786 * Note that this function will also release all memory- and port-based 787 * resources owned by the device (@dev->resource). This function must 788 * _only_ be externally called in error cases. All other usage is a bug. 789 */ 790 void platform_device_del(struct platform_device *pdev) 791 { 792 u32 i; 793 794 if (!IS_ERR_OR_NULL(pdev)) { 795 device_del(&pdev->dev); 796 797 if (pdev->id_auto) { 798 ida_free(&platform_devid_ida, pdev->id); 799 pdev->id = PLATFORM_DEVID_AUTO; 800 } 801 802 for (i = 0; i < pdev->num_resources; i++) { 803 struct resource *r = &pdev->resource[i]; 804 if (r->parent) 805 release_resource(r); 806 } 807 } 808 } 809 EXPORT_SYMBOL_GPL(platform_device_del); 810 811 /** 812 * platform_device_register - add a platform-level device 813 * @pdev: platform device we're adding 814 */ 815 int platform_device_register(struct platform_device *pdev) 816 { 817 device_initialize(&pdev->dev); 818 setup_pdev_dma_masks(pdev); 819 return platform_device_add(pdev); 820 } 821 EXPORT_SYMBOL_GPL(platform_device_register); 822 823 /** 824 * platform_device_unregister - unregister a platform-level device 825 * @pdev: platform device we're unregistering 826 * 827 * Unregistration is done in 2 steps. First we release all resources 828 * and remove it from the subsystem, then we drop reference count by 829 * calling platform_device_put(). 830 */ 831 void platform_device_unregister(struct platform_device *pdev) 832 { 833 platform_device_del(pdev); 834 platform_device_put(pdev); 835 } 836 EXPORT_SYMBOL_GPL(platform_device_unregister); 837 838 /** 839 * platform_device_register_full - add a platform-level device with 840 * resources and platform-specific data 841 * 842 * @pdevinfo: data used to create device 843 * 844 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 845 */ 846 struct platform_device *platform_device_register_full( 847 const struct platform_device_info *pdevinfo) 848 { 849 int ret; 850 struct platform_device *pdev; 851 852 pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); 853 if (!pdev) 854 return ERR_PTR(-ENOMEM); 855 856 pdev->dev.parent = pdevinfo->parent; 857 pdev->dev.fwnode = pdevinfo->fwnode; 858 pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); 859 pdev->dev.of_node_reused = pdevinfo->of_node_reused; 860 861 if (pdevinfo->dma_mask) { 862 pdev->platform_dma_mask = pdevinfo->dma_mask; 863 pdev->dev.dma_mask = &pdev->platform_dma_mask; 864 pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; 865 } 866 867 ret = platform_device_add_resources(pdev, 868 pdevinfo->res, pdevinfo->num_res); 869 if (ret) 870 goto err; 871 872 ret = platform_device_add_data(pdev, 873 pdevinfo->data, pdevinfo->size_data); 874 if (ret) 875 goto err; 876 877 if (pdevinfo->properties) { 878 ret = platform_device_add_properties(pdev, 879 pdevinfo->properties); 880 if (ret) 881 goto err; 882 } 883 884 ret = platform_device_add(pdev); 885 if (ret) { 886 err: 887 ACPI_COMPANION_SET(&pdev->dev, NULL); 888 platform_device_put(pdev); 889 return ERR_PTR(ret); 890 } 891 892 return pdev; 893 } 894 EXPORT_SYMBOL_GPL(platform_device_register_full); 895 896 /** 897 * __platform_driver_register - register a driver for platform-level devices 898 * @drv: platform driver structure 899 * @owner: owning module/driver 900 */ 901 int __platform_driver_register(struct platform_driver *drv, 902 struct module *owner) 903 { 904 drv->driver.owner = owner; 905 drv->driver.bus = &platform_bus_type; 906 907 return driver_register(&drv->driver); 908 } 909 EXPORT_SYMBOL_GPL(__platform_driver_register); 910 911 /** 912 * platform_driver_unregister - unregister a driver for platform-level devices 913 * @drv: platform driver structure 914 */ 915 void platform_driver_unregister(struct platform_driver *drv) 916 { 917 driver_unregister(&drv->driver); 918 } 919 EXPORT_SYMBOL_GPL(platform_driver_unregister); 920 921 static int platform_probe_fail(struct platform_device *pdev) 922 { 923 return -ENXIO; 924 } 925 926 /** 927 * __platform_driver_probe - register driver for non-hotpluggable device 928 * @drv: platform driver structure 929 * @probe: the driver probe routine, probably from an __init section 930 * @module: module which will be the owner of the driver 931 * 932 * Use this instead of platform_driver_register() when you know the device 933 * is not hotpluggable and has already been registered, and you want to 934 * remove its run-once probe() infrastructure from memory after the driver 935 * has bound to the device. 936 * 937 * One typical use for this would be with drivers for controllers integrated 938 * into system-on-chip processors, where the controller devices have been 939 * configured as part of board setup. 940 * 941 * Note that this is incompatible with deferred probing. 942 * 943 * Returns zero if the driver registered and bound to a device, else returns 944 * a negative error code and with the driver not registered. 945 */ 946 int __init_or_module __platform_driver_probe(struct platform_driver *drv, 947 int (*probe)(struct platform_device *), struct module *module) 948 { 949 int retval, code; 950 951 if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) { 952 pr_err("%s: drivers registered with %s can not be probed asynchronously\n", 953 drv->driver.name, __func__); 954 return -EINVAL; 955 } 956 957 /* 958 * We have to run our probes synchronously because we check if 959 * we find any devices to bind to and exit with error if there 960 * are any. 961 */ 962 drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; 963 964 /* 965 * Prevent driver from requesting probe deferral to avoid further 966 * futile probe attempts. 967 */ 968 drv->prevent_deferred_probe = true; 969 970 /* make sure driver won't have bind/unbind attributes */ 971 drv->driver.suppress_bind_attrs = true; 972 973 /* temporary section violation during probe() */ 974 drv->probe = probe; 975 retval = code = __platform_driver_register(drv, module); 976 if (retval) 977 return retval; 978 979 /* 980 * Fixup that section violation, being paranoid about code scanning 981 * the list of drivers in order to probe new devices. Check to see 982 * if the probe was successful, and make sure any forced probes of 983 * new devices fail. 984 */ 985 spin_lock(&drv->driver.bus->p->klist_drivers.k_lock); 986 drv->probe = platform_probe_fail; 987 if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) 988 retval = -ENODEV; 989 spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock); 990 991 if (code != retval) 992 platform_driver_unregister(drv); 993 return retval; 994 } 995 EXPORT_SYMBOL_GPL(__platform_driver_probe); 996 997 /** 998 * __platform_create_bundle - register driver and create corresponding device 999 * @driver: platform driver structure 1000 * @probe: the driver probe routine, probably from an __init section 1001 * @res: set of resources that needs to be allocated for the device 1002 * @n_res: number of resources 1003 * @data: platform specific data for this platform device 1004 * @size: size of platform specific data 1005 * @module: module which will be the owner of the driver 1006 * 1007 * Use this in legacy-style modules that probe hardware directly and 1008 * register a single platform device and corresponding platform driver. 1009 * 1010 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 1011 */ 1012 struct platform_device * __init_or_module __platform_create_bundle( 1013 struct platform_driver *driver, 1014 int (*probe)(struct platform_device *), 1015 struct resource *res, unsigned int n_res, 1016 const void *data, size_t size, struct module *module) 1017 { 1018 struct platform_device *pdev; 1019 int error; 1020 1021 pdev = platform_device_alloc(driver->driver.name, -1); 1022 if (!pdev) { 1023 error = -ENOMEM; 1024 goto err_out; 1025 } 1026 1027 error = platform_device_add_resources(pdev, res, n_res); 1028 if (error) 1029 goto err_pdev_put; 1030 1031 error = platform_device_add_data(pdev, data, size); 1032 if (error) 1033 goto err_pdev_put; 1034 1035 error = platform_device_add(pdev); 1036 if (error) 1037 goto err_pdev_put; 1038 1039 error = __platform_driver_probe(driver, probe, module); 1040 if (error) 1041 goto err_pdev_del; 1042 1043 return pdev; 1044 1045 err_pdev_del: 1046 platform_device_del(pdev); 1047 err_pdev_put: 1048 platform_device_put(pdev); 1049 err_out: 1050 return ERR_PTR(error); 1051 } 1052 EXPORT_SYMBOL_GPL(__platform_create_bundle); 1053 1054 /** 1055 * __platform_register_drivers - register an array of platform drivers 1056 * @drivers: an array of drivers to register 1057 * @count: the number of drivers to register 1058 * @owner: module owning the drivers 1059 * 1060 * Registers platform drivers specified by an array. On failure to register a 1061 * driver, all previously registered drivers will be unregistered. Callers of 1062 * this API should use platform_unregister_drivers() to unregister drivers in 1063 * the reverse order. 1064 * 1065 * Returns: 0 on success or a negative error code on failure. 1066 */ 1067 int __platform_register_drivers(struct platform_driver * const *drivers, 1068 unsigned int count, struct module *owner) 1069 { 1070 unsigned int i; 1071 int err; 1072 1073 for (i = 0; i < count; i++) { 1074 pr_debug("registering platform driver %ps\n", drivers[i]); 1075 1076 err = __platform_driver_register(drivers[i], owner); 1077 if (err < 0) { 1078 pr_err("failed to register platform driver %ps: %d\n", 1079 drivers[i], err); 1080 goto error; 1081 } 1082 } 1083 1084 return 0; 1085 1086 error: 1087 while (i--) { 1088 pr_debug("unregistering platform driver %ps\n", drivers[i]); 1089 platform_driver_unregister(drivers[i]); 1090 } 1091 1092 return err; 1093 } 1094 EXPORT_SYMBOL_GPL(__platform_register_drivers); 1095 1096 /** 1097 * platform_unregister_drivers - unregister an array of platform drivers 1098 * @drivers: an array of drivers to unregister 1099 * @count: the number of drivers to unregister 1100 * 1101 * Unregisters platform drivers specified by an array. This is typically used 1102 * to complement an earlier call to platform_register_drivers(). Drivers are 1103 * unregistered in the reverse order in which they were registered. 1104 */ 1105 void platform_unregister_drivers(struct platform_driver * const *drivers, 1106 unsigned int count) 1107 { 1108 while (count--) { 1109 pr_debug("unregistering platform driver %ps\n", drivers[count]); 1110 platform_driver_unregister(drivers[count]); 1111 } 1112 } 1113 EXPORT_SYMBOL_GPL(platform_unregister_drivers); 1114 1115 static const struct platform_device_id *platform_match_id( 1116 const struct platform_device_id *id, 1117 struct platform_device *pdev) 1118 { 1119 while (id->name[0]) { 1120 if (strcmp(pdev->name, id->name) == 0) { 1121 pdev->id_entry = id; 1122 return id; 1123 } 1124 id++; 1125 } 1126 return NULL; 1127 } 1128 1129 #ifdef CONFIG_PM_SLEEP 1130 1131 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) 1132 { 1133 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1134 struct platform_device *pdev = to_platform_device(dev); 1135 int ret = 0; 1136 1137 if (dev->driver && pdrv->suspend) 1138 ret = pdrv->suspend(pdev, mesg); 1139 1140 return ret; 1141 } 1142 1143 static int platform_legacy_resume(struct device *dev) 1144 { 1145 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1146 struct platform_device *pdev = to_platform_device(dev); 1147 int ret = 0; 1148 1149 if (dev->driver && pdrv->resume) 1150 ret = pdrv->resume(pdev); 1151 1152 return ret; 1153 } 1154 1155 #endif /* CONFIG_PM_SLEEP */ 1156 1157 #ifdef CONFIG_SUSPEND 1158 1159 int platform_pm_suspend(struct device *dev) 1160 { 1161 struct device_driver *drv = dev->driver; 1162 int ret = 0; 1163 1164 if (!drv) 1165 return 0; 1166 1167 if (drv->pm) { 1168 if (drv->pm->suspend) 1169 ret = drv->pm->suspend(dev); 1170 } else { 1171 ret = platform_legacy_suspend(dev, PMSG_SUSPEND); 1172 } 1173 1174 return ret; 1175 } 1176 1177 int platform_pm_resume(struct device *dev) 1178 { 1179 struct device_driver *drv = dev->driver; 1180 int ret = 0; 1181 1182 if (!drv) 1183 return 0; 1184 1185 if (drv->pm) { 1186 if (drv->pm->resume) 1187 ret = drv->pm->resume(dev); 1188 } else { 1189 ret = platform_legacy_resume(dev); 1190 } 1191 1192 return ret; 1193 } 1194 1195 #endif /* CONFIG_SUSPEND */ 1196 1197 #ifdef CONFIG_HIBERNATE_CALLBACKS 1198 1199 int platform_pm_freeze(struct device *dev) 1200 { 1201 struct device_driver *drv = dev->driver; 1202 int ret = 0; 1203 1204 if (!drv) 1205 return 0; 1206 1207 if (drv->pm) { 1208 if (drv->pm->freeze) 1209 ret = drv->pm->freeze(dev); 1210 } else { 1211 ret = platform_legacy_suspend(dev, PMSG_FREEZE); 1212 } 1213 1214 return ret; 1215 } 1216 1217 int platform_pm_thaw(struct device *dev) 1218 { 1219 struct device_driver *drv = dev->driver; 1220 int ret = 0; 1221 1222 if (!drv) 1223 return 0; 1224 1225 if (drv->pm) { 1226 if (drv->pm->thaw) 1227 ret = drv->pm->thaw(dev); 1228 } else { 1229 ret = platform_legacy_resume(dev); 1230 } 1231 1232 return ret; 1233 } 1234 1235 int platform_pm_poweroff(struct device *dev) 1236 { 1237 struct device_driver *drv = dev->driver; 1238 int ret = 0; 1239 1240 if (!drv) 1241 return 0; 1242 1243 if (drv->pm) { 1244 if (drv->pm->poweroff) 1245 ret = drv->pm->poweroff(dev); 1246 } else { 1247 ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); 1248 } 1249 1250 return ret; 1251 } 1252 1253 int platform_pm_restore(struct device *dev) 1254 { 1255 struct device_driver *drv = dev->driver; 1256 int ret = 0; 1257 1258 if (!drv) 1259 return 0; 1260 1261 if (drv->pm) { 1262 if (drv->pm->restore) 1263 ret = drv->pm->restore(dev); 1264 } else { 1265 ret = platform_legacy_resume(dev); 1266 } 1267 1268 return ret; 1269 } 1270 1271 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 1272 1273 /* modalias support enables more hands-off userspace setup: 1274 * (a) environment variable lets new-style hotplug events work once system is 1275 * fully running: "modprobe $MODALIAS" 1276 * (b) sysfs attribute lets new-style coldplug recover from hotplug events 1277 * mishandled before system is fully running: "modprobe $(cat modalias)" 1278 */ 1279 static ssize_t modalias_show(struct device *dev, 1280 struct device_attribute *attr, char *buf) 1281 { 1282 struct platform_device *pdev = to_platform_device(dev); 1283 int len; 1284 1285 len = of_device_modalias(dev, buf, PAGE_SIZE); 1286 if (len != -ENODEV) 1287 return len; 1288 1289 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 1290 if (len != -ENODEV) 1291 return len; 1292 1293 return sysfs_emit(buf, "platform:%s\n", pdev->name); 1294 } 1295 static DEVICE_ATTR_RO(modalias); 1296 1297 static ssize_t numa_node_show(struct device *dev, 1298 struct device_attribute *attr, char *buf) 1299 { 1300 return sysfs_emit(buf, "%d\n", dev_to_node(dev)); 1301 } 1302 static DEVICE_ATTR_RO(numa_node); 1303 1304 static ssize_t driver_override_show(struct device *dev, 1305 struct device_attribute *attr, char *buf) 1306 { 1307 struct platform_device *pdev = to_platform_device(dev); 1308 ssize_t len; 1309 1310 device_lock(dev); 1311 len = sysfs_emit(buf, "%s\n", pdev->driver_override); 1312 device_unlock(dev); 1313 1314 return len; 1315 } 1316 1317 static ssize_t driver_override_store(struct device *dev, 1318 struct device_attribute *attr, 1319 const char *buf, size_t count) 1320 { 1321 struct platform_device *pdev = to_platform_device(dev); 1322 char *driver_override, *old, *cp; 1323 1324 /* We need to keep extra room for a newline */ 1325 if (count >= (PAGE_SIZE - 1)) 1326 return -EINVAL; 1327 1328 driver_override = kstrndup(buf, count, GFP_KERNEL); 1329 if (!driver_override) 1330 return -ENOMEM; 1331 1332 cp = strchr(driver_override, '\n'); 1333 if (cp) 1334 *cp = '\0'; 1335 1336 device_lock(dev); 1337 old = pdev->driver_override; 1338 if (strlen(driver_override)) { 1339 pdev->driver_override = driver_override; 1340 } else { 1341 kfree(driver_override); 1342 pdev->driver_override = NULL; 1343 } 1344 device_unlock(dev); 1345 1346 kfree(old); 1347 1348 return count; 1349 } 1350 static DEVICE_ATTR_RW(driver_override); 1351 1352 static struct attribute *platform_dev_attrs[] = { 1353 &dev_attr_modalias.attr, 1354 &dev_attr_numa_node.attr, 1355 &dev_attr_driver_override.attr, 1356 NULL, 1357 }; 1358 1359 static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a, 1360 int n) 1361 { 1362 struct device *dev = container_of(kobj, typeof(*dev), kobj); 1363 1364 if (a == &dev_attr_numa_node.attr && 1365 dev_to_node(dev) == NUMA_NO_NODE) 1366 return 0; 1367 1368 return a->mode; 1369 } 1370 1371 static struct attribute_group platform_dev_group = { 1372 .attrs = platform_dev_attrs, 1373 .is_visible = platform_dev_attrs_visible, 1374 }; 1375 __ATTRIBUTE_GROUPS(platform_dev); 1376 1377 1378 /** 1379 * platform_match - bind platform device to platform driver. 1380 * @dev: device. 1381 * @drv: driver. 1382 * 1383 * Platform device IDs are assumed to be encoded like this: 1384 * "<name><instance>", where <name> is a short description of the type of 1385 * device, like "pci" or "floppy", and <instance> is the enumerated 1386 * instance of the device, like '0' or '42'. Driver IDs are simply 1387 * "<name>". So, extract the <name> from the platform_device structure, 1388 * and compare it against the name of the driver. Return whether they match 1389 * or not. 1390 */ 1391 static int platform_match(struct device *dev, struct device_driver *drv) 1392 { 1393 struct platform_device *pdev = to_platform_device(dev); 1394 struct platform_driver *pdrv = to_platform_driver(drv); 1395 1396 /* When driver_override is set, only bind to the matching driver */ 1397 if (pdev->driver_override) 1398 return !strcmp(pdev->driver_override, drv->name); 1399 1400 /* Attempt an OF style match first */ 1401 if (of_driver_match_device(dev, drv)) 1402 return 1; 1403 1404 /* Then try ACPI style match */ 1405 if (acpi_driver_match_device(dev, drv)) 1406 return 1; 1407 1408 /* Then try to match against the id table */ 1409 if (pdrv->id_table) 1410 return platform_match_id(pdrv->id_table, pdev) != NULL; 1411 1412 /* fall-back to driver name match */ 1413 return (strcmp(pdev->name, drv->name) == 0); 1414 } 1415 1416 static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) 1417 { 1418 struct platform_device *pdev = to_platform_device(dev); 1419 int rc; 1420 1421 /* Some devices have extra OF data and an OF-style MODALIAS */ 1422 rc = of_device_uevent_modalias(dev, env); 1423 if (rc != -ENODEV) 1424 return rc; 1425 1426 rc = acpi_device_uevent_modalias(dev, env); 1427 if (rc != -ENODEV) 1428 return rc; 1429 1430 add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, 1431 pdev->name); 1432 return 0; 1433 } 1434 1435 static int platform_probe(struct device *_dev) 1436 { 1437 struct platform_driver *drv = to_platform_driver(_dev->driver); 1438 struct platform_device *dev = to_platform_device(_dev); 1439 int ret; 1440 1441 /* 1442 * A driver registered using platform_driver_probe() cannot be bound 1443 * again later because the probe function usually lives in __init code 1444 * and so is gone. For these drivers .probe is set to 1445 * platform_probe_fail in __platform_driver_probe(). Don't even prepare 1446 * clocks and PM domains for these to match the traditional behaviour. 1447 */ 1448 if (unlikely(drv->probe == platform_probe_fail)) 1449 return -ENXIO; 1450 1451 ret = of_clk_set_defaults(_dev->of_node, false); 1452 if (ret < 0) 1453 return ret; 1454 1455 ret = dev_pm_domain_attach(_dev, true); 1456 if (ret) 1457 goto out; 1458 1459 if (drv->probe) { 1460 ret = drv->probe(dev); 1461 if (ret) 1462 dev_pm_domain_detach(_dev, true); 1463 } 1464 1465 out: 1466 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { 1467 dev_warn(_dev, "probe deferral not supported\n"); 1468 ret = -ENXIO; 1469 } 1470 1471 return ret; 1472 } 1473 1474 static int platform_remove(struct device *_dev) 1475 { 1476 struct platform_driver *drv = to_platform_driver(_dev->driver); 1477 struct platform_device *dev = to_platform_device(_dev); 1478 1479 if (drv->remove) { 1480 int ret = drv->remove(dev); 1481 1482 if (ret) 1483 dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n"); 1484 } 1485 dev_pm_domain_detach(_dev, true); 1486 1487 return 0; 1488 } 1489 1490 static void platform_shutdown(struct device *_dev) 1491 { 1492 struct platform_device *dev = to_platform_device(_dev); 1493 struct platform_driver *drv; 1494 1495 if (!_dev->driver) 1496 return; 1497 1498 drv = to_platform_driver(_dev->driver); 1499 if (drv->shutdown) 1500 drv->shutdown(dev); 1501 } 1502 1503 1504 int platform_dma_configure(struct device *dev) 1505 { 1506 enum dev_dma_attr attr; 1507 int ret = 0; 1508 1509 if (dev->of_node) { 1510 ret = of_dma_configure(dev, dev->of_node, true); 1511 } else if (has_acpi_companion(dev)) { 1512 attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode)); 1513 ret = acpi_dma_configure(dev, attr); 1514 } 1515 1516 return ret; 1517 } 1518 1519 static const struct dev_pm_ops platform_dev_pm_ops = { 1520 .runtime_suspend = pm_generic_runtime_suspend, 1521 .runtime_resume = pm_generic_runtime_resume, 1522 USE_PLATFORM_PM_SLEEP_OPS 1523 }; 1524 1525 struct bus_type platform_bus_type = { 1526 .name = "platform", 1527 .dev_groups = platform_dev_groups, 1528 .match = platform_match, 1529 .uevent = platform_uevent, 1530 .probe = platform_probe, 1531 .remove = platform_remove, 1532 .shutdown = platform_shutdown, 1533 .dma_configure = platform_dma_configure, 1534 .pm = &platform_dev_pm_ops, 1535 }; 1536 EXPORT_SYMBOL_GPL(platform_bus_type); 1537 1538 static inline int __platform_match(struct device *dev, const void *drv) 1539 { 1540 return platform_match(dev, (struct device_driver *)drv); 1541 } 1542 1543 /** 1544 * platform_find_device_by_driver - Find a platform device with a given 1545 * driver. 1546 * @start: The device to start the search from. 1547 * @drv: The device driver to look for. 1548 */ 1549 struct device *platform_find_device_by_driver(struct device *start, 1550 const struct device_driver *drv) 1551 { 1552 return bus_find_device(&platform_bus_type, start, drv, 1553 __platform_match); 1554 } 1555 EXPORT_SYMBOL_GPL(platform_find_device_by_driver); 1556 1557 void __weak __init early_platform_cleanup(void) { } 1558 1559 int __init platform_bus_init(void) 1560 { 1561 int error; 1562 1563 early_platform_cleanup(); 1564 1565 error = device_register(&platform_bus); 1566 if (error) { 1567 put_device(&platform_bus); 1568 return error; 1569 } 1570 error = bus_register(&platform_bus_type); 1571 if (error) 1572 device_unregister(&platform_bus); 1573 of_platform_register_reconfig_notifier(); 1574 return error; 1575 } 1576