1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * platform.c - platform 'pseudo' bus for legacy devices 4 * 5 * Copyright (c) 2002-3 Patrick Mochel 6 * Copyright (c) 2002-3 Open Source Development Labs 7 * 8 * Please see Documentation/driver-api/driver-model/platform.rst for more 9 * information. 10 */ 11 12 #include <linux/string.h> 13 #include <linux/platform_device.h> 14 #include <linux/of_device.h> 15 #include <linux/of_irq.h> 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/interrupt.h> 19 #include <linux/ioport.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/memblock.h> 22 #include <linux/err.h> 23 #include <linux/slab.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/pm_domain.h> 26 #include <linux/idr.h> 27 #include <linux/acpi.h> 28 #include <linux/clk/clk-conf.h> 29 #include <linux/limits.h> 30 #include <linux/property.h> 31 #include <linux/kmemleak.h> 32 #include <linux/types.h> 33 34 #include "base.h" 35 #include "power/power.h" 36 37 /* For automatically allocated device IDs */ 38 static DEFINE_IDA(platform_devid_ida); 39 40 struct device platform_bus = { 41 .init_name = "platform", 42 }; 43 EXPORT_SYMBOL_GPL(platform_bus); 44 45 /** 46 * platform_get_resource - get a resource for a device 47 * @dev: platform device 48 * @type: resource type 49 * @num: resource index 50 * 51 * Return: a pointer to the resource or NULL on failure. 52 */ 53 struct resource *platform_get_resource(struct platform_device *dev, 54 unsigned int type, unsigned int num) 55 { 56 u32 i; 57 58 for (i = 0; i < dev->num_resources; i++) { 59 struct resource *r = &dev->resource[i]; 60 61 if (type == resource_type(r) && num-- == 0) 62 return r; 63 } 64 return NULL; 65 } 66 EXPORT_SYMBOL_GPL(platform_get_resource); 67 68 struct resource *platform_get_mem_or_io(struct platform_device *dev, 69 unsigned int num) 70 { 71 u32 i; 72 73 for (i = 0; i < dev->num_resources; i++) { 74 struct resource *r = &dev->resource[i]; 75 76 if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0) 77 return r; 78 } 79 return NULL; 80 } 81 EXPORT_SYMBOL_GPL(platform_get_mem_or_io); 82 83 #ifdef CONFIG_HAS_IOMEM 84 /** 85 * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a 86 * platform device and get resource 87 * 88 * @pdev: platform device to use both for memory resource lookup as well as 89 * resource management 90 * @index: resource index 91 * @res: optional output parameter to store a pointer to the obtained resource. 92 * 93 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code 94 * on failure. 95 */ 96 void __iomem * 97 devm_platform_get_and_ioremap_resource(struct platform_device *pdev, 98 unsigned int index, struct resource **res) 99 { 100 struct resource *r; 101 102 r = platform_get_resource(pdev, IORESOURCE_MEM, index); 103 if (res) 104 *res = r; 105 return devm_ioremap_resource(&pdev->dev, r); 106 } 107 EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource); 108 109 /** 110 * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform 111 * device 112 * 113 * @pdev: platform device to use both for memory resource lookup as well as 114 * resource management 115 * @index: resource index 116 * 117 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code 118 * on failure. 119 */ 120 void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev, 121 unsigned int index) 122 { 123 return devm_platform_get_and_ioremap_resource(pdev, index, NULL); 124 } 125 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); 126 127 /** 128 * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for 129 * a platform device, retrieve the 130 * resource by name 131 * 132 * @pdev: platform device to use both for memory resource lookup as well as 133 * resource management 134 * @name: name of the resource 135 * 136 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code 137 * on failure. 138 */ 139 void __iomem * 140 devm_platform_ioremap_resource_byname(struct platform_device *pdev, 141 const char *name) 142 { 143 struct resource *res; 144 145 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); 146 return devm_ioremap_resource(&pdev->dev, res); 147 } 148 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname); 149 #endif /* CONFIG_HAS_IOMEM */ 150 151 /** 152 * platform_get_irq_optional - get an optional IRQ for a device 153 * @dev: platform device 154 * @num: IRQ number index 155 * 156 * Gets an IRQ for a platform device. Device drivers should check the return 157 * value for errors so as to not pass a negative integer value to the 158 * request_irq() APIs. This is the same as platform_get_irq(), except that it 159 * does not print an error message if an IRQ can not be obtained. 160 * 161 * For example:: 162 * 163 * int irq = platform_get_irq_optional(pdev, 0); 164 * if (irq < 0) 165 * return irq; 166 * 167 * Return: non-zero IRQ number on success, negative error number on failure. 168 */ 169 int platform_get_irq_optional(struct platform_device *dev, unsigned int num) 170 { 171 int ret; 172 #ifdef CONFIG_SPARC 173 /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ 174 if (!dev || num >= dev->archdata.num_irqs) 175 goto out_not_found; 176 ret = dev->archdata.irqs[num]; 177 goto out; 178 #else 179 struct resource *r; 180 181 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { 182 ret = of_irq_get(dev->dev.of_node, num); 183 if (ret > 0 || ret == -EPROBE_DEFER) 184 goto out; 185 } 186 187 r = platform_get_resource(dev, IORESOURCE_IRQ, num); 188 if (has_acpi_companion(&dev->dev)) { 189 if (r && r->flags & IORESOURCE_DISABLED) { 190 ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r); 191 if (ret) 192 goto out; 193 } 194 } 195 196 /* 197 * The resources may pass trigger flags to the irqs that need 198 * to be set up. It so happens that the trigger flags for 199 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER* 200 * settings. 201 */ 202 if (r && r->flags & IORESOURCE_BITS) { 203 struct irq_data *irqd; 204 205 irqd = irq_get_irq_data(r->start); 206 if (!irqd) 207 goto out_not_found; 208 irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); 209 } 210 211 if (r) { 212 ret = r->start; 213 goto out; 214 } 215 216 /* 217 * For the index 0 interrupt, allow falling back to GpioInt 218 * resources. While a device could have both Interrupt and GpioInt 219 * resources, making this fallback ambiguous, in many common cases 220 * the device will only expose one IRQ, and this fallback 221 * allows a common code path across either kind of resource. 222 */ 223 if (num == 0 && has_acpi_companion(&dev->dev)) { 224 ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num); 225 /* Our callers expect -ENXIO for missing IRQs. */ 226 if (ret >= 0 || ret == -EPROBE_DEFER) 227 goto out; 228 } 229 230 #endif 231 out_not_found: 232 ret = -ENXIO; 233 out: 234 WARN(ret == 0, "0 is an invalid IRQ number\n"); 235 return ret; 236 } 237 EXPORT_SYMBOL_GPL(platform_get_irq_optional); 238 239 /** 240 * platform_get_irq - get an IRQ for a device 241 * @dev: platform device 242 * @num: IRQ number index 243 * 244 * Gets an IRQ for a platform device and prints an error message if finding the 245 * IRQ fails. Device drivers should check the return value for errors so as to 246 * not pass a negative integer value to the request_irq() APIs. 247 * 248 * For example:: 249 * 250 * int irq = platform_get_irq(pdev, 0); 251 * if (irq < 0) 252 * return irq; 253 * 254 * Return: non-zero IRQ number on success, negative error number on failure. 255 */ 256 int platform_get_irq(struct platform_device *dev, unsigned int num) 257 { 258 int ret; 259 260 ret = platform_get_irq_optional(dev, num); 261 if (ret < 0 && ret != -EPROBE_DEFER) 262 dev_err(&dev->dev, "IRQ index %u not found\n", num); 263 264 return ret; 265 } 266 EXPORT_SYMBOL_GPL(platform_get_irq); 267 268 /** 269 * platform_irq_count - Count the number of IRQs a platform device uses 270 * @dev: platform device 271 * 272 * Return: Number of IRQs a platform device uses or EPROBE_DEFER 273 */ 274 int platform_irq_count(struct platform_device *dev) 275 { 276 int ret, nr = 0; 277 278 while ((ret = platform_get_irq_optional(dev, nr)) >= 0) 279 nr++; 280 281 if (ret == -EPROBE_DEFER) 282 return ret; 283 284 return nr; 285 } 286 EXPORT_SYMBOL_GPL(platform_irq_count); 287 288 struct irq_affinity_devres { 289 unsigned int count; 290 unsigned int irq[]; 291 }; 292 293 static void platform_disable_acpi_irq(struct platform_device *pdev, int index) 294 { 295 struct resource *r; 296 297 r = platform_get_resource(pdev, IORESOURCE_IRQ, index); 298 if (r) 299 irqresource_disabled(r, 0); 300 } 301 302 static void devm_platform_get_irqs_affinity_release(struct device *dev, 303 void *res) 304 { 305 struct irq_affinity_devres *ptr = res; 306 int i; 307 308 for (i = 0; i < ptr->count; i++) { 309 irq_dispose_mapping(ptr->irq[i]); 310 311 if (has_acpi_companion(dev)) 312 platform_disable_acpi_irq(to_platform_device(dev), i); 313 } 314 } 315 316 /** 317 * devm_platform_get_irqs_affinity - devm method to get a set of IRQs for a 318 * device using an interrupt affinity descriptor 319 * @dev: platform device pointer 320 * @affd: affinity descriptor 321 * @minvec: minimum count of interrupt vectors 322 * @maxvec: maximum count of interrupt vectors 323 * @irqs: pointer holder for IRQ numbers 324 * 325 * Gets a set of IRQs for a platform device, and updates IRQ afffinty according 326 * to the passed affinity descriptor 327 * 328 * Return: Number of vectors on success, negative error number on failure. 329 */ 330 int devm_platform_get_irqs_affinity(struct platform_device *dev, 331 struct irq_affinity *affd, 332 unsigned int minvec, 333 unsigned int maxvec, 334 int **irqs) 335 { 336 struct irq_affinity_devres *ptr; 337 struct irq_affinity_desc *desc; 338 size_t size; 339 int i, ret, nvec; 340 341 if (!affd) 342 return -EPERM; 343 344 if (maxvec < minvec) 345 return -ERANGE; 346 347 nvec = platform_irq_count(dev); 348 if (nvec < 0) 349 return nvec; 350 351 if (nvec < minvec) 352 return -ENOSPC; 353 354 nvec = irq_calc_affinity_vectors(minvec, nvec, affd); 355 if (nvec < minvec) 356 return -ENOSPC; 357 358 if (nvec > maxvec) 359 nvec = maxvec; 360 361 size = sizeof(*ptr) + sizeof(unsigned int) * nvec; 362 ptr = devres_alloc(devm_platform_get_irqs_affinity_release, size, 363 GFP_KERNEL); 364 if (!ptr) 365 return -ENOMEM; 366 367 ptr->count = nvec; 368 369 for (i = 0; i < nvec; i++) { 370 int irq = platform_get_irq(dev, i); 371 if (irq < 0) { 372 ret = irq; 373 goto err_free_devres; 374 } 375 ptr->irq[i] = irq; 376 } 377 378 desc = irq_create_affinity_masks(nvec, affd); 379 if (!desc) { 380 ret = -ENOMEM; 381 goto err_free_devres; 382 } 383 384 for (i = 0; i < nvec; i++) { 385 ret = irq_update_affinity_desc(ptr->irq[i], &desc[i]); 386 if (ret) { 387 dev_err(&dev->dev, "failed to update irq%d affinity descriptor (%d)\n", 388 ptr->irq[i], ret); 389 goto err_free_desc; 390 } 391 } 392 393 devres_add(&dev->dev, ptr); 394 395 kfree(desc); 396 397 *irqs = ptr->irq; 398 399 return nvec; 400 401 err_free_desc: 402 kfree(desc); 403 err_free_devres: 404 devres_free(ptr); 405 return ret; 406 } 407 EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity); 408 409 /** 410 * platform_get_resource_byname - get a resource for a device by name 411 * @dev: platform device 412 * @type: resource type 413 * @name: resource name 414 */ 415 struct resource *platform_get_resource_byname(struct platform_device *dev, 416 unsigned int type, 417 const char *name) 418 { 419 u32 i; 420 421 for (i = 0; i < dev->num_resources; i++) { 422 struct resource *r = &dev->resource[i]; 423 424 if (unlikely(!r->name)) 425 continue; 426 427 if (type == resource_type(r) && !strcmp(r->name, name)) 428 return r; 429 } 430 return NULL; 431 } 432 EXPORT_SYMBOL_GPL(platform_get_resource_byname); 433 434 static int __platform_get_irq_byname(struct platform_device *dev, 435 const char *name) 436 { 437 struct resource *r; 438 int ret; 439 440 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { 441 ret = of_irq_get_byname(dev->dev.of_node, name); 442 if (ret > 0 || ret == -EPROBE_DEFER) 443 return ret; 444 } 445 446 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); 447 if (r) { 448 WARN(r->start == 0, "0 is an invalid IRQ number\n"); 449 return r->start; 450 } 451 452 return -ENXIO; 453 } 454 455 /** 456 * platform_get_irq_byname - get an IRQ for a device by name 457 * @dev: platform device 458 * @name: IRQ name 459 * 460 * Get an IRQ like platform_get_irq(), but then by name rather then by index. 461 * 462 * Return: non-zero IRQ number on success, negative error number on failure. 463 */ 464 int platform_get_irq_byname(struct platform_device *dev, const char *name) 465 { 466 int ret; 467 468 ret = __platform_get_irq_byname(dev, name); 469 if (ret < 0 && ret != -EPROBE_DEFER) 470 dev_err(&dev->dev, "IRQ %s not found\n", name); 471 472 return ret; 473 } 474 EXPORT_SYMBOL_GPL(platform_get_irq_byname); 475 476 /** 477 * platform_get_irq_byname_optional - get an optional IRQ for a device by name 478 * @dev: platform device 479 * @name: IRQ name 480 * 481 * Get an optional IRQ by name like platform_get_irq_byname(). Except that it 482 * does not print an error message if an IRQ can not be obtained. 483 * 484 * Return: non-zero IRQ number on success, negative error number on failure. 485 */ 486 int platform_get_irq_byname_optional(struct platform_device *dev, 487 const char *name) 488 { 489 return __platform_get_irq_byname(dev, name); 490 } 491 EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional); 492 493 /** 494 * platform_add_devices - add a numbers of platform devices 495 * @devs: array of platform devices to add 496 * @num: number of platform devices in array 497 */ 498 int platform_add_devices(struct platform_device **devs, int num) 499 { 500 int i, ret = 0; 501 502 for (i = 0; i < num; i++) { 503 ret = platform_device_register(devs[i]); 504 if (ret) { 505 while (--i >= 0) 506 platform_device_unregister(devs[i]); 507 break; 508 } 509 } 510 511 return ret; 512 } 513 EXPORT_SYMBOL_GPL(platform_add_devices); 514 515 struct platform_object { 516 struct platform_device pdev; 517 char name[]; 518 }; 519 520 /* 521 * Set up default DMA mask for platform devices if the they weren't 522 * previously set by the architecture / DT. 523 */ 524 static void setup_pdev_dma_masks(struct platform_device *pdev) 525 { 526 pdev->dev.dma_parms = &pdev->dma_parms; 527 528 if (!pdev->dev.coherent_dma_mask) 529 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 530 if (!pdev->dev.dma_mask) { 531 pdev->platform_dma_mask = DMA_BIT_MASK(32); 532 pdev->dev.dma_mask = &pdev->platform_dma_mask; 533 } 534 }; 535 536 /** 537 * platform_device_put - destroy a platform device 538 * @pdev: platform device to free 539 * 540 * Free all memory associated with a platform device. This function must 541 * _only_ be externally called in error cases. All other usage is a bug. 542 */ 543 void platform_device_put(struct platform_device *pdev) 544 { 545 if (!IS_ERR_OR_NULL(pdev)) 546 put_device(&pdev->dev); 547 } 548 EXPORT_SYMBOL_GPL(platform_device_put); 549 550 static void platform_device_release(struct device *dev) 551 { 552 struct platform_object *pa = container_of(dev, struct platform_object, 553 pdev.dev); 554 555 of_node_put(pa->pdev.dev.of_node); 556 kfree(pa->pdev.dev.platform_data); 557 kfree(pa->pdev.mfd_cell); 558 kfree(pa->pdev.resource); 559 kfree(pa->pdev.driver_override); 560 kfree(pa); 561 } 562 563 /** 564 * platform_device_alloc - create a platform device 565 * @name: base name of the device we're adding 566 * @id: instance id 567 * 568 * Create a platform device object which can have other objects attached 569 * to it, and which will have attached objects freed when it is released. 570 */ 571 struct platform_device *platform_device_alloc(const char *name, int id) 572 { 573 struct platform_object *pa; 574 575 pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL); 576 if (pa) { 577 strcpy(pa->name, name); 578 pa->pdev.name = pa->name; 579 pa->pdev.id = id; 580 device_initialize(&pa->pdev.dev); 581 pa->pdev.dev.release = platform_device_release; 582 setup_pdev_dma_masks(&pa->pdev); 583 } 584 585 return pa ? &pa->pdev : NULL; 586 } 587 EXPORT_SYMBOL_GPL(platform_device_alloc); 588 589 /** 590 * platform_device_add_resources - add resources to a platform device 591 * @pdev: platform device allocated by platform_device_alloc to add resources to 592 * @res: set of resources that needs to be allocated for the device 593 * @num: number of resources 594 * 595 * Add a copy of the resources to the platform device. The memory 596 * associated with the resources will be freed when the platform device is 597 * released. 598 */ 599 int platform_device_add_resources(struct platform_device *pdev, 600 const struct resource *res, unsigned int num) 601 { 602 struct resource *r = NULL; 603 604 if (res) { 605 r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); 606 if (!r) 607 return -ENOMEM; 608 } 609 610 kfree(pdev->resource); 611 pdev->resource = r; 612 pdev->num_resources = num; 613 return 0; 614 } 615 EXPORT_SYMBOL_GPL(platform_device_add_resources); 616 617 /** 618 * platform_device_add_data - add platform-specific data to a platform device 619 * @pdev: platform device allocated by platform_device_alloc to add resources to 620 * @data: platform specific data for this platform device 621 * @size: size of platform specific data 622 * 623 * Add a copy of platform specific data to the platform device's 624 * platform_data pointer. The memory associated with the platform data 625 * will be freed when the platform device is released. 626 */ 627 int platform_device_add_data(struct platform_device *pdev, const void *data, 628 size_t size) 629 { 630 void *d = NULL; 631 632 if (data) { 633 d = kmemdup(data, size, GFP_KERNEL); 634 if (!d) 635 return -ENOMEM; 636 } 637 638 kfree(pdev->dev.platform_data); 639 pdev->dev.platform_data = d; 640 return 0; 641 } 642 EXPORT_SYMBOL_GPL(platform_device_add_data); 643 644 /** 645 * platform_device_add_properties - add built-in properties to a platform device 646 * @pdev: platform device to add properties to 647 * @properties: null terminated array of properties to add 648 * 649 * The function will take deep copy of @properties and attach the copy to the 650 * platform device. The memory associated with properties will be freed when the 651 * platform device is released. 652 */ 653 int platform_device_add_properties(struct platform_device *pdev, 654 const struct property_entry *properties) 655 { 656 return device_add_properties(&pdev->dev, properties); 657 } 658 EXPORT_SYMBOL_GPL(platform_device_add_properties); 659 660 /** 661 * platform_device_add - add a platform device to device hierarchy 662 * @pdev: platform device we're adding 663 * 664 * This is part 2 of platform_device_register(), though may be called 665 * separately _iff_ pdev was allocated by platform_device_alloc(). 666 */ 667 int platform_device_add(struct platform_device *pdev) 668 { 669 u32 i; 670 int ret; 671 672 if (!pdev) 673 return -EINVAL; 674 675 if (!pdev->dev.parent) 676 pdev->dev.parent = &platform_bus; 677 678 pdev->dev.bus = &platform_bus_type; 679 680 switch (pdev->id) { 681 default: 682 dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); 683 break; 684 case PLATFORM_DEVID_NONE: 685 dev_set_name(&pdev->dev, "%s", pdev->name); 686 break; 687 case PLATFORM_DEVID_AUTO: 688 /* 689 * Automatically allocated device ID. We mark it as such so 690 * that we remember it must be freed, and we append a suffix 691 * to avoid namespace collision with explicit IDs. 692 */ 693 ret = ida_alloc(&platform_devid_ida, GFP_KERNEL); 694 if (ret < 0) 695 goto err_out; 696 pdev->id = ret; 697 pdev->id_auto = true; 698 dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id); 699 break; 700 } 701 702 for (i = 0; i < pdev->num_resources; i++) { 703 struct resource *p, *r = &pdev->resource[i]; 704 705 if (r->name == NULL) 706 r->name = dev_name(&pdev->dev); 707 708 p = r->parent; 709 if (!p) { 710 if (resource_type(r) == IORESOURCE_MEM) 711 p = &iomem_resource; 712 else if (resource_type(r) == IORESOURCE_IO) 713 p = &ioport_resource; 714 } 715 716 if (p) { 717 ret = insert_resource(p, r); 718 if (ret) { 719 dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r); 720 goto failed; 721 } 722 } 723 } 724 725 pr_debug("Registering platform device '%s'. Parent at %s\n", 726 dev_name(&pdev->dev), dev_name(pdev->dev.parent)); 727 728 ret = device_add(&pdev->dev); 729 if (ret == 0) 730 return ret; 731 732 failed: 733 if (pdev->id_auto) { 734 ida_free(&platform_devid_ida, pdev->id); 735 pdev->id = PLATFORM_DEVID_AUTO; 736 } 737 738 while (i--) { 739 struct resource *r = &pdev->resource[i]; 740 if (r->parent) 741 release_resource(r); 742 } 743 744 err_out: 745 return ret; 746 } 747 EXPORT_SYMBOL_GPL(platform_device_add); 748 749 /** 750 * platform_device_del - remove a platform-level device 751 * @pdev: platform device we're removing 752 * 753 * Note that this function will also release all memory- and port-based 754 * resources owned by the device (@dev->resource). This function must 755 * _only_ be externally called in error cases. All other usage is a bug. 756 */ 757 void platform_device_del(struct platform_device *pdev) 758 { 759 u32 i; 760 761 if (!IS_ERR_OR_NULL(pdev)) { 762 device_del(&pdev->dev); 763 764 if (pdev->id_auto) { 765 ida_free(&platform_devid_ida, pdev->id); 766 pdev->id = PLATFORM_DEVID_AUTO; 767 } 768 769 for (i = 0; i < pdev->num_resources; i++) { 770 struct resource *r = &pdev->resource[i]; 771 if (r->parent) 772 release_resource(r); 773 } 774 } 775 } 776 EXPORT_SYMBOL_GPL(platform_device_del); 777 778 /** 779 * platform_device_register - add a platform-level device 780 * @pdev: platform device we're adding 781 */ 782 int platform_device_register(struct platform_device *pdev) 783 { 784 device_initialize(&pdev->dev); 785 setup_pdev_dma_masks(pdev); 786 return platform_device_add(pdev); 787 } 788 EXPORT_SYMBOL_GPL(platform_device_register); 789 790 /** 791 * platform_device_unregister - unregister a platform-level device 792 * @pdev: platform device we're unregistering 793 * 794 * Unregistration is done in 2 steps. First we release all resources 795 * and remove it from the subsystem, then we drop reference count by 796 * calling platform_device_put(). 797 */ 798 void platform_device_unregister(struct platform_device *pdev) 799 { 800 platform_device_del(pdev); 801 platform_device_put(pdev); 802 } 803 EXPORT_SYMBOL_GPL(platform_device_unregister); 804 805 /** 806 * platform_device_register_full - add a platform-level device with 807 * resources and platform-specific data 808 * 809 * @pdevinfo: data used to create device 810 * 811 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 812 */ 813 struct platform_device *platform_device_register_full( 814 const struct platform_device_info *pdevinfo) 815 { 816 int ret; 817 struct platform_device *pdev; 818 819 pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); 820 if (!pdev) 821 return ERR_PTR(-ENOMEM); 822 823 pdev->dev.parent = pdevinfo->parent; 824 pdev->dev.fwnode = pdevinfo->fwnode; 825 pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); 826 pdev->dev.of_node_reused = pdevinfo->of_node_reused; 827 828 if (pdevinfo->dma_mask) { 829 pdev->platform_dma_mask = pdevinfo->dma_mask; 830 pdev->dev.dma_mask = &pdev->platform_dma_mask; 831 pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; 832 } 833 834 ret = platform_device_add_resources(pdev, 835 pdevinfo->res, pdevinfo->num_res); 836 if (ret) 837 goto err; 838 839 ret = platform_device_add_data(pdev, 840 pdevinfo->data, pdevinfo->size_data); 841 if (ret) 842 goto err; 843 844 if (pdevinfo->properties) { 845 ret = platform_device_add_properties(pdev, 846 pdevinfo->properties); 847 if (ret) 848 goto err; 849 } 850 851 ret = platform_device_add(pdev); 852 if (ret) { 853 err: 854 ACPI_COMPANION_SET(&pdev->dev, NULL); 855 platform_device_put(pdev); 856 return ERR_PTR(ret); 857 } 858 859 return pdev; 860 } 861 EXPORT_SYMBOL_GPL(platform_device_register_full); 862 863 /** 864 * __platform_driver_register - register a driver for platform-level devices 865 * @drv: platform driver structure 866 * @owner: owning module/driver 867 */ 868 int __platform_driver_register(struct platform_driver *drv, 869 struct module *owner) 870 { 871 drv->driver.owner = owner; 872 drv->driver.bus = &platform_bus_type; 873 874 return driver_register(&drv->driver); 875 } 876 EXPORT_SYMBOL_GPL(__platform_driver_register); 877 878 /** 879 * platform_driver_unregister - unregister a driver for platform-level devices 880 * @drv: platform driver structure 881 */ 882 void platform_driver_unregister(struct platform_driver *drv) 883 { 884 driver_unregister(&drv->driver); 885 } 886 EXPORT_SYMBOL_GPL(platform_driver_unregister); 887 888 static int platform_probe_fail(struct platform_device *pdev) 889 { 890 return -ENXIO; 891 } 892 893 /** 894 * __platform_driver_probe - register driver for non-hotpluggable device 895 * @drv: platform driver structure 896 * @probe: the driver probe routine, probably from an __init section 897 * @module: module which will be the owner of the driver 898 * 899 * Use this instead of platform_driver_register() when you know the device 900 * is not hotpluggable and has already been registered, and you want to 901 * remove its run-once probe() infrastructure from memory after the driver 902 * has bound to the device. 903 * 904 * One typical use for this would be with drivers for controllers integrated 905 * into system-on-chip processors, where the controller devices have been 906 * configured as part of board setup. 907 * 908 * Note that this is incompatible with deferred probing. 909 * 910 * Returns zero if the driver registered and bound to a device, else returns 911 * a negative error code and with the driver not registered. 912 */ 913 int __init_or_module __platform_driver_probe(struct platform_driver *drv, 914 int (*probe)(struct platform_device *), struct module *module) 915 { 916 int retval, code; 917 918 if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) { 919 pr_err("%s: drivers registered with %s can not be probed asynchronously\n", 920 drv->driver.name, __func__); 921 return -EINVAL; 922 } 923 924 /* 925 * We have to run our probes synchronously because we check if 926 * we find any devices to bind to and exit with error if there 927 * are any. 928 */ 929 drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; 930 931 /* 932 * Prevent driver from requesting probe deferral to avoid further 933 * futile probe attempts. 934 */ 935 drv->prevent_deferred_probe = true; 936 937 /* make sure driver won't have bind/unbind attributes */ 938 drv->driver.suppress_bind_attrs = true; 939 940 /* temporary section violation during probe() */ 941 drv->probe = probe; 942 retval = code = __platform_driver_register(drv, module); 943 if (retval) 944 return retval; 945 946 /* 947 * Fixup that section violation, being paranoid about code scanning 948 * the list of drivers in order to probe new devices. Check to see 949 * if the probe was successful, and make sure any forced probes of 950 * new devices fail. 951 */ 952 spin_lock(&drv->driver.bus->p->klist_drivers.k_lock); 953 drv->probe = platform_probe_fail; 954 if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) 955 retval = -ENODEV; 956 spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock); 957 958 if (code != retval) 959 platform_driver_unregister(drv); 960 return retval; 961 } 962 EXPORT_SYMBOL_GPL(__platform_driver_probe); 963 964 /** 965 * __platform_create_bundle - register driver and create corresponding device 966 * @driver: platform driver structure 967 * @probe: the driver probe routine, probably from an __init section 968 * @res: set of resources that needs to be allocated for the device 969 * @n_res: number of resources 970 * @data: platform specific data for this platform device 971 * @size: size of platform specific data 972 * @module: module which will be the owner of the driver 973 * 974 * Use this in legacy-style modules that probe hardware directly and 975 * register a single platform device and corresponding platform driver. 976 * 977 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 978 */ 979 struct platform_device * __init_or_module __platform_create_bundle( 980 struct platform_driver *driver, 981 int (*probe)(struct platform_device *), 982 struct resource *res, unsigned int n_res, 983 const void *data, size_t size, struct module *module) 984 { 985 struct platform_device *pdev; 986 int error; 987 988 pdev = platform_device_alloc(driver->driver.name, -1); 989 if (!pdev) { 990 error = -ENOMEM; 991 goto err_out; 992 } 993 994 error = platform_device_add_resources(pdev, res, n_res); 995 if (error) 996 goto err_pdev_put; 997 998 error = platform_device_add_data(pdev, data, size); 999 if (error) 1000 goto err_pdev_put; 1001 1002 error = platform_device_add(pdev); 1003 if (error) 1004 goto err_pdev_put; 1005 1006 error = __platform_driver_probe(driver, probe, module); 1007 if (error) 1008 goto err_pdev_del; 1009 1010 return pdev; 1011 1012 err_pdev_del: 1013 platform_device_del(pdev); 1014 err_pdev_put: 1015 platform_device_put(pdev); 1016 err_out: 1017 return ERR_PTR(error); 1018 } 1019 EXPORT_SYMBOL_GPL(__platform_create_bundle); 1020 1021 /** 1022 * __platform_register_drivers - register an array of platform drivers 1023 * @drivers: an array of drivers to register 1024 * @count: the number of drivers to register 1025 * @owner: module owning the drivers 1026 * 1027 * Registers platform drivers specified by an array. On failure to register a 1028 * driver, all previously registered drivers will be unregistered. Callers of 1029 * this API should use platform_unregister_drivers() to unregister drivers in 1030 * the reverse order. 1031 * 1032 * Returns: 0 on success or a negative error code on failure. 1033 */ 1034 int __platform_register_drivers(struct platform_driver * const *drivers, 1035 unsigned int count, struct module *owner) 1036 { 1037 unsigned int i; 1038 int err; 1039 1040 for (i = 0; i < count; i++) { 1041 pr_debug("registering platform driver %ps\n", drivers[i]); 1042 1043 err = __platform_driver_register(drivers[i], owner); 1044 if (err < 0) { 1045 pr_err("failed to register platform driver %ps: %d\n", 1046 drivers[i], err); 1047 goto error; 1048 } 1049 } 1050 1051 return 0; 1052 1053 error: 1054 while (i--) { 1055 pr_debug("unregistering platform driver %ps\n", drivers[i]); 1056 platform_driver_unregister(drivers[i]); 1057 } 1058 1059 return err; 1060 } 1061 EXPORT_SYMBOL_GPL(__platform_register_drivers); 1062 1063 /** 1064 * platform_unregister_drivers - unregister an array of platform drivers 1065 * @drivers: an array of drivers to unregister 1066 * @count: the number of drivers to unregister 1067 * 1068 * Unregisters platform drivers specified by an array. This is typically used 1069 * to complement an earlier call to platform_register_drivers(). Drivers are 1070 * unregistered in the reverse order in which they were registered. 1071 */ 1072 void platform_unregister_drivers(struct platform_driver * const *drivers, 1073 unsigned int count) 1074 { 1075 while (count--) { 1076 pr_debug("unregistering platform driver %ps\n", drivers[count]); 1077 platform_driver_unregister(drivers[count]); 1078 } 1079 } 1080 EXPORT_SYMBOL_GPL(platform_unregister_drivers); 1081 1082 static const struct platform_device_id *platform_match_id( 1083 const struct platform_device_id *id, 1084 struct platform_device *pdev) 1085 { 1086 while (id->name[0]) { 1087 if (strcmp(pdev->name, id->name) == 0) { 1088 pdev->id_entry = id; 1089 return id; 1090 } 1091 id++; 1092 } 1093 return NULL; 1094 } 1095 1096 #ifdef CONFIG_PM_SLEEP 1097 1098 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) 1099 { 1100 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1101 struct platform_device *pdev = to_platform_device(dev); 1102 int ret = 0; 1103 1104 if (dev->driver && pdrv->suspend) 1105 ret = pdrv->suspend(pdev, mesg); 1106 1107 return ret; 1108 } 1109 1110 static int platform_legacy_resume(struct device *dev) 1111 { 1112 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1113 struct platform_device *pdev = to_platform_device(dev); 1114 int ret = 0; 1115 1116 if (dev->driver && pdrv->resume) 1117 ret = pdrv->resume(pdev); 1118 1119 return ret; 1120 } 1121 1122 #endif /* CONFIG_PM_SLEEP */ 1123 1124 #ifdef CONFIG_SUSPEND 1125 1126 int platform_pm_suspend(struct device *dev) 1127 { 1128 struct device_driver *drv = dev->driver; 1129 int ret = 0; 1130 1131 if (!drv) 1132 return 0; 1133 1134 if (drv->pm) { 1135 if (drv->pm->suspend) 1136 ret = drv->pm->suspend(dev); 1137 } else { 1138 ret = platform_legacy_suspend(dev, PMSG_SUSPEND); 1139 } 1140 1141 return ret; 1142 } 1143 1144 int platform_pm_resume(struct device *dev) 1145 { 1146 struct device_driver *drv = dev->driver; 1147 int ret = 0; 1148 1149 if (!drv) 1150 return 0; 1151 1152 if (drv->pm) { 1153 if (drv->pm->resume) 1154 ret = drv->pm->resume(dev); 1155 } else { 1156 ret = platform_legacy_resume(dev); 1157 } 1158 1159 return ret; 1160 } 1161 1162 #endif /* CONFIG_SUSPEND */ 1163 1164 #ifdef CONFIG_HIBERNATE_CALLBACKS 1165 1166 int platform_pm_freeze(struct device *dev) 1167 { 1168 struct device_driver *drv = dev->driver; 1169 int ret = 0; 1170 1171 if (!drv) 1172 return 0; 1173 1174 if (drv->pm) { 1175 if (drv->pm->freeze) 1176 ret = drv->pm->freeze(dev); 1177 } else { 1178 ret = platform_legacy_suspend(dev, PMSG_FREEZE); 1179 } 1180 1181 return ret; 1182 } 1183 1184 int platform_pm_thaw(struct device *dev) 1185 { 1186 struct device_driver *drv = dev->driver; 1187 int ret = 0; 1188 1189 if (!drv) 1190 return 0; 1191 1192 if (drv->pm) { 1193 if (drv->pm->thaw) 1194 ret = drv->pm->thaw(dev); 1195 } else { 1196 ret = platform_legacy_resume(dev); 1197 } 1198 1199 return ret; 1200 } 1201 1202 int platform_pm_poweroff(struct device *dev) 1203 { 1204 struct device_driver *drv = dev->driver; 1205 int ret = 0; 1206 1207 if (!drv) 1208 return 0; 1209 1210 if (drv->pm) { 1211 if (drv->pm->poweroff) 1212 ret = drv->pm->poweroff(dev); 1213 } else { 1214 ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); 1215 } 1216 1217 return ret; 1218 } 1219 1220 int platform_pm_restore(struct device *dev) 1221 { 1222 struct device_driver *drv = dev->driver; 1223 int ret = 0; 1224 1225 if (!drv) 1226 return 0; 1227 1228 if (drv->pm) { 1229 if (drv->pm->restore) 1230 ret = drv->pm->restore(dev); 1231 } else { 1232 ret = platform_legacy_resume(dev); 1233 } 1234 1235 return ret; 1236 } 1237 1238 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 1239 1240 /* modalias support enables more hands-off userspace setup: 1241 * (a) environment variable lets new-style hotplug events work once system is 1242 * fully running: "modprobe $MODALIAS" 1243 * (b) sysfs attribute lets new-style coldplug recover from hotplug events 1244 * mishandled before system is fully running: "modprobe $(cat modalias)" 1245 */ 1246 static ssize_t modalias_show(struct device *dev, 1247 struct device_attribute *attr, char *buf) 1248 { 1249 struct platform_device *pdev = to_platform_device(dev); 1250 int len; 1251 1252 len = of_device_modalias(dev, buf, PAGE_SIZE); 1253 if (len != -ENODEV) 1254 return len; 1255 1256 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 1257 if (len != -ENODEV) 1258 return len; 1259 1260 return sysfs_emit(buf, "platform:%s\n", pdev->name); 1261 } 1262 static DEVICE_ATTR_RO(modalias); 1263 1264 static ssize_t numa_node_show(struct device *dev, 1265 struct device_attribute *attr, char *buf) 1266 { 1267 return sysfs_emit(buf, "%d\n", dev_to_node(dev)); 1268 } 1269 static DEVICE_ATTR_RO(numa_node); 1270 1271 static ssize_t driver_override_show(struct device *dev, 1272 struct device_attribute *attr, char *buf) 1273 { 1274 struct platform_device *pdev = to_platform_device(dev); 1275 ssize_t len; 1276 1277 device_lock(dev); 1278 len = sysfs_emit(buf, "%s\n", pdev->driver_override); 1279 device_unlock(dev); 1280 1281 return len; 1282 } 1283 1284 static ssize_t driver_override_store(struct device *dev, 1285 struct device_attribute *attr, 1286 const char *buf, size_t count) 1287 { 1288 struct platform_device *pdev = to_platform_device(dev); 1289 char *driver_override, *old, *cp; 1290 1291 /* We need to keep extra room for a newline */ 1292 if (count >= (PAGE_SIZE - 1)) 1293 return -EINVAL; 1294 1295 driver_override = kstrndup(buf, count, GFP_KERNEL); 1296 if (!driver_override) 1297 return -ENOMEM; 1298 1299 cp = strchr(driver_override, '\n'); 1300 if (cp) 1301 *cp = '\0'; 1302 1303 device_lock(dev); 1304 old = pdev->driver_override; 1305 if (strlen(driver_override)) { 1306 pdev->driver_override = driver_override; 1307 } else { 1308 kfree(driver_override); 1309 pdev->driver_override = NULL; 1310 } 1311 device_unlock(dev); 1312 1313 kfree(old); 1314 1315 return count; 1316 } 1317 static DEVICE_ATTR_RW(driver_override); 1318 1319 static struct attribute *platform_dev_attrs[] = { 1320 &dev_attr_modalias.attr, 1321 &dev_attr_numa_node.attr, 1322 &dev_attr_driver_override.attr, 1323 NULL, 1324 }; 1325 1326 static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a, 1327 int n) 1328 { 1329 struct device *dev = container_of(kobj, typeof(*dev), kobj); 1330 1331 if (a == &dev_attr_numa_node.attr && 1332 dev_to_node(dev) == NUMA_NO_NODE) 1333 return 0; 1334 1335 return a->mode; 1336 } 1337 1338 static const struct attribute_group platform_dev_group = { 1339 .attrs = platform_dev_attrs, 1340 .is_visible = platform_dev_attrs_visible, 1341 }; 1342 __ATTRIBUTE_GROUPS(platform_dev); 1343 1344 1345 /** 1346 * platform_match - bind platform device to platform driver. 1347 * @dev: device. 1348 * @drv: driver. 1349 * 1350 * Platform device IDs are assumed to be encoded like this: 1351 * "<name><instance>", where <name> is a short description of the type of 1352 * device, like "pci" or "floppy", and <instance> is the enumerated 1353 * instance of the device, like '0' or '42'. Driver IDs are simply 1354 * "<name>". So, extract the <name> from the platform_device structure, 1355 * and compare it against the name of the driver. Return whether they match 1356 * or not. 1357 */ 1358 static int platform_match(struct device *dev, struct device_driver *drv) 1359 { 1360 struct platform_device *pdev = to_platform_device(dev); 1361 struct platform_driver *pdrv = to_platform_driver(drv); 1362 1363 /* When driver_override is set, only bind to the matching driver */ 1364 if (pdev->driver_override) 1365 return !strcmp(pdev->driver_override, drv->name); 1366 1367 /* Attempt an OF style match first */ 1368 if (of_driver_match_device(dev, drv)) 1369 return 1; 1370 1371 /* Then try ACPI style match */ 1372 if (acpi_driver_match_device(dev, drv)) 1373 return 1; 1374 1375 /* Then try to match against the id table */ 1376 if (pdrv->id_table) 1377 return platform_match_id(pdrv->id_table, pdev) != NULL; 1378 1379 /* fall-back to driver name match */ 1380 return (strcmp(pdev->name, drv->name) == 0); 1381 } 1382 1383 static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) 1384 { 1385 struct platform_device *pdev = to_platform_device(dev); 1386 int rc; 1387 1388 /* Some devices have extra OF data and an OF-style MODALIAS */ 1389 rc = of_device_uevent_modalias(dev, env); 1390 if (rc != -ENODEV) 1391 return rc; 1392 1393 rc = acpi_device_uevent_modalias(dev, env); 1394 if (rc != -ENODEV) 1395 return rc; 1396 1397 add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, 1398 pdev->name); 1399 return 0; 1400 } 1401 1402 static int platform_probe(struct device *_dev) 1403 { 1404 struct platform_driver *drv = to_platform_driver(_dev->driver); 1405 struct platform_device *dev = to_platform_device(_dev); 1406 int ret; 1407 1408 /* 1409 * A driver registered using platform_driver_probe() cannot be bound 1410 * again later because the probe function usually lives in __init code 1411 * and so is gone. For these drivers .probe is set to 1412 * platform_probe_fail in __platform_driver_probe(). Don't even prepare 1413 * clocks and PM domains for these to match the traditional behaviour. 1414 */ 1415 if (unlikely(drv->probe == platform_probe_fail)) 1416 return -ENXIO; 1417 1418 ret = of_clk_set_defaults(_dev->of_node, false); 1419 if (ret < 0) 1420 return ret; 1421 1422 ret = dev_pm_domain_attach(_dev, true); 1423 if (ret) 1424 goto out; 1425 1426 if (drv->probe) { 1427 ret = drv->probe(dev); 1428 if (ret) 1429 dev_pm_domain_detach(_dev, true); 1430 } 1431 1432 out: 1433 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { 1434 dev_warn(_dev, "probe deferral not supported\n"); 1435 ret = -ENXIO; 1436 } 1437 1438 return ret; 1439 } 1440 1441 static int platform_remove(struct device *_dev) 1442 { 1443 struct platform_driver *drv = to_platform_driver(_dev->driver); 1444 struct platform_device *dev = to_platform_device(_dev); 1445 1446 if (drv->remove) { 1447 int ret = drv->remove(dev); 1448 1449 if (ret) 1450 dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n"); 1451 } 1452 dev_pm_domain_detach(_dev, true); 1453 1454 return 0; 1455 } 1456 1457 static void platform_shutdown(struct device *_dev) 1458 { 1459 struct platform_device *dev = to_platform_device(_dev); 1460 struct platform_driver *drv; 1461 1462 if (!_dev->driver) 1463 return; 1464 1465 drv = to_platform_driver(_dev->driver); 1466 if (drv->shutdown) 1467 drv->shutdown(dev); 1468 } 1469 1470 1471 int platform_dma_configure(struct device *dev) 1472 { 1473 enum dev_dma_attr attr; 1474 int ret = 0; 1475 1476 if (dev->of_node) { 1477 ret = of_dma_configure(dev, dev->of_node, true); 1478 } else if (has_acpi_companion(dev)) { 1479 attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode)); 1480 ret = acpi_dma_configure(dev, attr); 1481 } 1482 1483 return ret; 1484 } 1485 1486 static const struct dev_pm_ops platform_dev_pm_ops = { 1487 .runtime_suspend = pm_generic_runtime_suspend, 1488 .runtime_resume = pm_generic_runtime_resume, 1489 USE_PLATFORM_PM_SLEEP_OPS 1490 }; 1491 1492 struct bus_type platform_bus_type = { 1493 .name = "platform", 1494 .dev_groups = platform_dev_groups, 1495 .match = platform_match, 1496 .uevent = platform_uevent, 1497 .probe = platform_probe, 1498 .remove = platform_remove, 1499 .shutdown = platform_shutdown, 1500 .dma_configure = platform_dma_configure, 1501 .pm = &platform_dev_pm_ops, 1502 }; 1503 EXPORT_SYMBOL_GPL(platform_bus_type); 1504 1505 static inline int __platform_match(struct device *dev, const void *drv) 1506 { 1507 return platform_match(dev, (struct device_driver *)drv); 1508 } 1509 1510 /** 1511 * platform_find_device_by_driver - Find a platform device with a given 1512 * driver. 1513 * @start: The device to start the search from. 1514 * @drv: The device driver to look for. 1515 */ 1516 struct device *platform_find_device_by_driver(struct device *start, 1517 const struct device_driver *drv) 1518 { 1519 return bus_find_device(&platform_bus_type, start, drv, 1520 __platform_match); 1521 } 1522 EXPORT_SYMBOL_GPL(platform_find_device_by_driver); 1523 1524 void __weak __init early_platform_cleanup(void) { } 1525 1526 int __init platform_bus_init(void) 1527 { 1528 int error; 1529 1530 early_platform_cleanup(); 1531 1532 error = device_register(&platform_bus); 1533 if (error) { 1534 put_device(&platform_bus); 1535 return error; 1536 } 1537 error = bus_register(&platform_bus_type); 1538 if (error) 1539 device_unregister(&platform_bus); 1540 of_platform_register_reconfig_notifier(); 1541 return error; 1542 } 1543