xref: /openbmc/linux/drivers/base/platform.c (revision 249592bf)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * platform.c - platform 'pseudo' bus for legacy devices
4  *
5  * Copyright (c) 2002-3 Patrick Mochel
6  * Copyright (c) 2002-3 Open Source Development Labs
7  *
8  * Please see Documentation/driver-api/driver-model/platform.rst for more
9  * information.
10  */
11 
12 #include <linux/string.h>
13 #include <linux/platform_device.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/ioport.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/memblock.h>
22 #include <linux/err.h>
23 #include <linux/slab.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm_domain.h>
26 #include <linux/idr.h>
27 #include <linux/acpi.h>
28 #include <linux/clk/clk-conf.h>
29 #include <linux/limits.h>
30 #include <linux/property.h>
31 #include <linux/kmemleak.h>
32 #include <linux/types.h>
33 
34 #include "base.h"
35 #include "power/power.h"
36 
37 /* For automatically allocated device IDs */
38 static DEFINE_IDA(platform_devid_ida);
39 
40 struct device platform_bus = {
41 	.init_name	= "platform",
42 };
43 EXPORT_SYMBOL_GPL(platform_bus);
44 
45 /**
46  * platform_get_resource - get a resource for a device
47  * @dev: platform device
48  * @type: resource type
49  * @num: resource index
50  *
51  * Return: a pointer to the resource or NULL on failure.
52  */
53 struct resource *platform_get_resource(struct platform_device *dev,
54 				       unsigned int type, unsigned int num)
55 {
56 	u32 i;
57 
58 	for (i = 0; i < dev->num_resources; i++) {
59 		struct resource *r = &dev->resource[i];
60 
61 		if (type == resource_type(r) && num-- == 0)
62 			return r;
63 	}
64 	return NULL;
65 }
66 EXPORT_SYMBOL_GPL(platform_get_resource);
67 
68 struct resource *platform_get_mem_or_io(struct platform_device *dev,
69 					unsigned int num)
70 {
71 	u32 i;
72 
73 	for (i = 0; i < dev->num_resources; i++) {
74 		struct resource *r = &dev->resource[i];
75 
76 		if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0)
77 			return r;
78 	}
79 	return NULL;
80 }
81 EXPORT_SYMBOL_GPL(platform_get_mem_or_io);
82 
83 #ifdef CONFIG_HAS_IOMEM
84 /**
85  * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a
86  *					    platform device and get resource
87  *
88  * @pdev: platform device to use both for memory resource lookup as well as
89  *        resource management
90  * @index: resource index
91  * @res: optional output parameter to store a pointer to the obtained resource.
92  *
93  * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
94  * on failure.
95  */
96 void __iomem *
97 devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
98 				unsigned int index, struct resource **res)
99 {
100 	struct resource *r;
101 
102 	r = platform_get_resource(pdev, IORESOURCE_MEM, index);
103 	if (res)
104 		*res = r;
105 	return devm_ioremap_resource(&pdev->dev, r);
106 }
107 EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource);
108 
109 /**
110  * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
111  *				    device
112  *
113  * @pdev: platform device to use both for memory resource lookup as well as
114  *        resource management
115  * @index: resource index
116  *
117  * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
118  * on failure.
119  */
120 void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
121 					     unsigned int index)
122 {
123 	return devm_platform_get_and_ioremap_resource(pdev, index, NULL);
124 }
125 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
126 
127 /**
128  * devm_platform_ioremap_resource_wc - write-combined variant of
129  *                                     devm_platform_ioremap_resource()
130  *
131  * @pdev: platform device to use both for memory resource lookup as well as
132  *        resource management
133  * @index: resource index
134  *
135  * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
136  * on failure.
137  */
138 void __iomem *devm_platform_ioremap_resource_wc(struct platform_device *pdev,
139 						unsigned int index)
140 {
141 	struct resource *res;
142 
143 	res = platform_get_resource(pdev, IORESOURCE_MEM, index);
144 	return devm_ioremap_resource_wc(&pdev->dev, res);
145 }
146 
147 /**
148  * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for
149  *					   a platform device, retrieve the
150  *					   resource by name
151  *
152  * @pdev: platform device to use both for memory resource lookup as well as
153  *	  resource management
154  * @name: name of the resource
155  *
156  * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
157  * on failure.
158  */
159 void __iomem *
160 devm_platform_ioremap_resource_byname(struct platform_device *pdev,
161 				      const char *name)
162 {
163 	struct resource *res;
164 
165 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
166 	return devm_ioremap_resource(&pdev->dev, res);
167 }
168 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
169 #endif /* CONFIG_HAS_IOMEM */
170 
171 /**
172  * platform_get_irq_optional - get an optional IRQ for a device
173  * @dev: platform device
174  * @num: IRQ number index
175  *
176  * Gets an IRQ for a platform device. Device drivers should check the return
177  * value for errors so as to not pass a negative integer value to the
178  * request_irq() APIs. This is the same as platform_get_irq(), except that it
179  * does not print an error message if an IRQ can not be obtained.
180  *
181  * For example::
182  *
183  *		int irq = platform_get_irq_optional(pdev, 0);
184  *		if (irq < 0)
185  *			return irq;
186  *
187  * Return: non-zero IRQ number on success, negative error number on failure.
188  */
189 int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
190 {
191 	int ret;
192 #ifdef CONFIG_SPARC
193 	/* sparc does not have irqs represented as IORESOURCE_IRQ resources */
194 	if (!dev || num >= dev->archdata.num_irqs)
195 		goto out_not_found;
196 	ret = dev->archdata.irqs[num];
197 	goto out;
198 #else
199 	struct resource *r;
200 
201 	if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
202 		ret = of_irq_get(dev->dev.of_node, num);
203 		if (ret > 0 || ret == -EPROBE_DEFER)
204 			goto out;
205 	}
206 
207 	r = platform_get_resource(dev, IORESOURCE_IRQ, num);
208 	if (has_acpi_companion(&dev->dev)) {
209 		if (r && r->flags & IORESOURCE_DISABLED) {
210 			ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
211 			if (ret)
212 				goto out;
213 		}
214 	}
215 
216 	/*
217 	 * The resources may pass trigger flags to the irqs that need
218 	 * to be set up. It so happens that the trigger flags for
219 	 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
220 	 * settings.
221 	 */
222 	if (r && r->flags & IORESOURCE_BITS) {
223 		struct irq_data *irqd;
224 
225 		irqd = irq_get_irq_data(r->start);
226 		if (!irqd)
227 			goto out_not_found;
228 		irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
229 	}
230 
231 	if (r) {
232 		ret = r->start;
233 		goto out;
234 	}
235 
236 	/*
237 	 * For the index 0 interrupt, allow falling back to GpioInt
238 	 * resources. While a device could have both Interrupt and GpioInt
239 	 * resources, making this fallback ambiguous, in many common cases
240 	 * the device will only expose one IRQ, and this fallback
241 	 * allows a common code path across either kind of resource.
242 	 */
243 	if (num == 0 && has_acpi_companion(&dev->dev)) {
244 		ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
245 		/* Our callers expect -ENXIO for missing IRQs. */
246 		if (ret >= 0 || ret == -EPROBE_DEFER)
247 			goto out;
248 	}
249 
250 #endif
251 out_not_found:
252 	ret = -ENXIO;
253 out:
254 	WARN(ret == 0, "0 is an invalid IRQ number\n");
255 	return ret;
256 }
257 EXPORT_SYMBOL_GPL(platform_get_irq_optional);
258 
259 /**
260  * platform_get_irq - get an IRQ for a device
261  * @dev: platform device
262  * @num: IRQ number index
263  *
264  * Gets an IRQ for a platform device and prints an error message if finding the
265  * IRQ fails. Device drivers should check the return value for errors so as to
266  * not pass a negative integer value to the request_irq() APIs.
267  *
268  * For example::
269  *
270  *		int irq = platform_get_irq(pdev, 0);
271  *		if (irq < 0)
272  *			return irq;
273  *
274  * Return: non-zero IRQ number on success, negative error number on failure.
275  */
276 int platform_get_irq(struct platform_device *dev, unsigned int num)
277 {
278 	int ret;
279 
280 	ret = platform_get_irq_optional(dev, num);
281 	if (ret < 0 && ret != -EPROBE_DEFER)
282 		dev_err(&dev->dev, "IRQ index %u not found\n", num);
283 
284 	return ret;
285 }
286 EXPORT_SYMBOL_GPL(platform_get_irq);
287 
288 /**
289  * platform_irq_count - Count the number of IRQs a platform device uses
290  * @dev: platform device
291  *
292  * Return: Number of IRQs a platform device uses or EPROBE_DEFER
293  */
294 int platform_irq_count(struct platform_device *dev)
295 {
296 	int ret, nr = 0;
297 
298 	while ((ret = platform_get_irq_optional(dev, nr)) >= 0)
299 		nr++;
300 
301 	if (ret == -EPROBE_DEFER)
302 		return ret;
303 
304 	return nr;
305 }
306 EXPORT_SYMBOL_GPL(platform_irq_count);
307 
308 struct irq_affinity_devres {
309 	unsigned int count;
310 	unsigned int irq[];
311 };
312 
313 static void platform_disable_acpi_irq(struct platform_device *pdev, int index)
314 {
315 	struct resource *r;
316 
317 	r = platform_get_resource(pdev, IORESOURCE_IRQ, index);
318 	if (r)
319 		irqresource_disabled(r, 0);
320 }
321 
322 static void devm_platform_get_irqs_affinity_release(struct device *dev,
323 						    void *res)
324 {
325 	struct irq_affinity_devres *ptr = res;
326 	int i;
327 
328 	for (i = 0; i < ptr->count; i++) {
329 		irq_dispose_mapping(ptr->irq[i]);
330 
331 		if (has_acpi_companion(dev))
332 			platform_disable_acpi_irq(to_platform_device(dev), i);
333 	}
334 }
335 
336 /**
337  * devm_platform_get_irqs_affinity - devm method to get a set of IRQs for a
338  *				device using an interrupt affinity descriptor
339  * @dev: platform device pointer
340  * @affd: affinity descriptor
341  * @minvec: minimum count of interrupt vectors
342  * @maxvec: maximum count of interrupt vectors
343  * @irqs: pointer holder for IRQ numbers
344  *
345  * Gets a set of IRQs for a platform device, and updates IRQ afffinty according
346  * to the passed affinity descriptor
347  *
348  * Return: Number of vectors on success, negative error number on failure.
349  */
350 int devm_platform_get_irqs_affinity(struct platform_device *dev,
351 				    struct irq_affinity *affd,
352 				    unsigned int minvec,
353 				    unsigned int maxvec,
354 				    int **irqs)
355 {
356 	struct irq_affinity_devres *ptr;
357 	struct irq_affinity_desc *desc;
358 	size_t size;
359 	int i, ret, nvec;
360 
361 	if (!affd)
362 		return -EPERM;
363 
364 	if (maxvec < minvec)
365 		return -ERANGE;
366 
367 	nvec = platform_irq_count(dev);
368 	if (nvec < 0)
369 		return nvec;
370 
371 	if (nvec < minvec)
372 		return -ENOSPC;
373 
374 	nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
375 	if (nvec < minvec)
376 		return -ENOSPC;
377 
378 	if (nvec > maxvec)
379 		nvec = maxvec;
380 
381 	size = sizeof(*ptr) + sizeof(unsigned int) * nvec;
382 	ptr = devres_alloc(devm_platform_get_irqs_affinity_release, size,
383 			   GFP_KERNEL);
384 	if (!ptr)
385 		return -ENOMEM;
386 
387 	ptr->count = nvec;
388 
389 	for (i = 0; i < nvec; i++) {
390 		int irq = platform_get_irq(dev, i);
391 		if (irq < 0) {
392 			ret = irq;
393 			goto err_free_devres;
394 		}
395 		ptr->irq[i] = irq;
396 	}
397 
398 	desc = irq_create_affinity_masks(nvec, affd);
399 	if (!desc) {
400 		ret = -ENOMEM;
401 		goto err_free_devres;
402 	}
403 
404 	for (i = 0; i < nvec; i++) {
405 		ret = irq_update_affinity_desc(ptr->irq[i], &desc[i]);
406 		if (ret) {
407 			dev_err(&dev->dev, "failed to update irq%d affinity descriptor (%d)\n",
408 				ptr->irq[i], ret);
409 			goto err_free_desc;
410 		}
411 	}
412 
413 	devres_add(&dev->dev, ptr);
414 
415 	kfree(desc);
416 
417 	*irqs = ptr->irq;
418 
419 	return nvec;
420 
421 err_free_desc:
422 	kfree(desc);
423 err_free_devres:
424 	devres_free(ptr);
425 	return ret;
426 }
427 EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity);
428 
429 /**
430  * platform_get_resource_byname - get a resource for a device by name
431  * @dev: platform device
432  * @type: resource type
433  * @name: resource name
434  */
435 struct resource *platform_get_resource_byname(struct platform_device *dev,
436 					      unsigned int type,
437 					      const char *name)
438 {
439 	u32 i;
440 
441 	for (i = 0; i < dev->num_resources; i++) {
442 		struct resource *r = &dev->resource[i];
443 
444 		if (unlikely(!r->name))
445 			continue;
446 
447 		if (type == resource_type(r) && !strcmp(r->name, name))
448 			return r;
449 	}
450 	return NULL;
451 }
452 EXPORT_SYMBOL_GPL(platform_get_resource_byname);
453 
454 static int __platform_get_irq_byname(struct platform_device *dev,
455 				     const char *name)
456 {
457 	struct resource *r;
458 	int ret;
459 
460 	if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
461 		ret = of_irq_get_byname(dev->dev.of_node, name);
462 		if (ret > 0 || ret == -EPROBE_DEFER)
463 			return ret;
464 	}
465 
466 	r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
467 	if (r) {
468 		WARN(r->start == 0, "0 is an invalid IRQ number\n");
469 		return r->start;
470 	}
471 
472 	return -ENXIO;
473 }
474 
475 /**
476  * platform_get_irq_byname - get an IRQ for a device by name
477  * @dev: platform device
478  * @name: IRQ name
479  *
480  * Get an IRQ like platform_get_irq(), but then by name rather then by index.
481  *
482  * Return: non-zero IRQ number on success, negative error number on failure.
483  */
484 int platform_get_irq_byname(struct platform_device *dev, const char *name)
485 {
486 	int ret;
487 
488 	ret = __platform_get_irq_byname(dev, name);
489 	if (ret < 0 && ret != -EPROBE_DEFER)
490 		dev_err(&dev->dev, "IRQ %s not found\n", name);
491 
492 	return ret;
493 }
494 EXPORT_SYMBOL_GPL(platform_get_irq_byname);
495 
496 /**
497  * platform_get_irq_byname_optional - get an optional IRQ for a device by name
498  * @dev: platform device
499  * @name: IRQ name
500  *
501  * Get an optional IRQ by name like platform_get_irq_byname(). Except that it
502  * does not print an error message if an IRQ can not be obtained.
503  *
504  * Return: non-zero IRQ number on success, negative error number on failure.
505  */
506 int platform_get_irq_byname_optional(struct platform_device *dev,
507 				     const char *name)
508 {
509 	return __platform_get_irq_byname(dev, name);
510 }
511 EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
512 
513 /**
514  * platform_add_devices - add a numbers of platform devices
515  * @devs: array of platform devices to add
516  * @num: number of platform devices in array
517  */
518 int platform_add_devices(struct platform_device **devs, int num)
519 {
520 	int i, ret = 0;
521 
522 	for (i = 0; i < num; i++) {
523 		ret = platform_device_register(devs[i]);
524 		if (ret) {
525 			while (--i >= 0)
526 				platform_device_unregister(devs[i]);
527 			break;
528 		}
529 	}
530 
531 	return ret;
532 }
533 EXPORT_SYMBOL_GPL(platform_add_devices);
534 
535 struct platform_object {
536 	struct platform_device pdev;
537 	char name[];
538 };
539 
540 /*
541  * Set up default DMA mask for platform devices if the they weren't
542  * previously set by the architecture / DT.
543  */
544 static void setup_pdev_dma_masks(struct platform_device *pdev)
545 {
546 	pdev->dev.dma_parms = &pdev->dma_parms;
547 
548 	if (!pdev->dev.coherent_dma_mask)
549 		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
550 	if (!pdev->dev.dma_mask) {
551 		pdev->platform_dma_mask = DMA_BIT_MASK(32);
552 		pdev->dev.dma_mask = &pdev->platform_dma_mask;
553 	}
554 };
555 
556 /**
557  * platform_device_put - destroy a platform device
558  * @pdev: platform device to free
559  *
560  * Free all memory associated with a platform device.  This function must
561  * _only_ be externally called in error cases.  All other usage is a bug.
562  */
563 void platform_device_put(struct platform_device *pdev)
564 {
565 	if (!IS_ERR_OR_NULL(pdev))
566 		put_device(&pdev->dev);
567 }
568 EXPORT_SYMBOL_GPL(platform_device_put);
569 
570 static void platform_device_release(struct device *dev)
571 {
572 	struct platform_object *pa = container_of(dev, struct platform_object,
573 						  pdev.dev);
574 
575 	of_node_put(pa->pdev.dev.of_node);
576 	kfree(pa->pdev.dev.platform_data);
577 	kfree(pa->pdev.mfd_cell);
578 	kfree(pa->pdev.resource);
579 	kfree(pa->pdev.driver_override);
580 	kfree(pa);
581 }
582 
583 /**
584  * platform_device_alloc - create a platform device
585  * @name: base name of the device we're adding
586  * @id: instance id
587  *
588  * Create a platform device object which can have other objects attached
589  * to it, and which will have attached objects freed when it is released.
590  */
591 struct platform_device *platform_device_alloc(const char *name, int id)
592 {
593 	struct platform_object *pa;
594 
595 	pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
596 	if (pa) {
597 		strcpy(pa->name, name);
598 		pa->pdev.name = pa->name;
599 		pa->pdev.id = id;
600 		device_initialize(&pa->pdev.dev);
601 		pa->pdev.dev.release = platform_device_release;
602 		setup_pdev_dma_masks(&pa->pdev);
603 	}
604 
605 	return pa ? &pa->pdev : NULL;
606 }
607 EXPORT_SYMBOL_GPL(platform_device_alloc);
608 
609 /**
610  * platform_device_add_resources - add resources to a platform device
611  * @pdev: platform device allocated by platform_device_alloc to add resources to
612  * @res: set of resources that needs to be allocated for the device
613  * @num: number of resources
614  *
615  * Add a copy of the resources to the platform device.  The memory
616  * associated with the resources will be freed when the platform device is
617  * released.
618  */
619 int platform_device_add_resources(struct platform_device *pdev,
620 				  const struct resource *res, unsigned int num)
621 {
622 	struct resource *r = NULL;
623 
624 	if (res) {
625 		r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
626 		if (!r)
627 			return -ENOMEM;
628 	}
629 
630 	kfree(pdev->resource);
631 	pdev->resource = r;
632 	pdev->num_resources = num;
633 	return 0;
634 }
635 EXPORT_SYMBOL_GPL(platform_device_add_resources);
636 
637 /**
638  * platform_device_add_data - add platform-specific data to a platform device
639  * @pdev: platform device allocated by platform_device_alloc to add resources to
640  * @data: platform specific data for this platform device
641  * @size: size of platform specific data
642  *
643  * Add a copy of platform specific data to the platform device's
644  * platform_data pointer.  The memory associated with the platform data
645  * will be freed when the platform device is released.
646  */
647 int platform_device_add_data(struct platform_device *pdev, const void *data,
648 			     size_t size)
649 {
650 	void *d = NULL;
651 
652 	if (data) {
653 		d = kmemdup(data, size, GFP_KERNEL);
654 		if (!d)
655 			return -ENOMEM;
656 	}
657 
658 	kfree(pdev->dev.platform_data);
659 	pdev->dev.platform_data = d;
660 	return 0;
661 }
662 EXPORT_SYMBOL_GPL(platform_device_add_data);
663 
664 /**
665  * platform_device_add_properties - add built-in properties to a platform device
666  * @pdev: platform device to add properties to
667  * @properties: null terminated array of properties to add
668  *
669  * The function will take deep copy of @properties and attach the copy to the
670  * platform device. The memory associated with properties will be freed when the
671  * platform device is released.
672  */
673 int platform_device_add_properties(struct platform_device *pdev,
674 				   const struct property_entry *properties)
675 {
676 	return device_add_properties(&pdev->dev, properties);
677 }
678 EXPORT_SYMBOL_GPL(platform_device_add_properties);
679 
680 /**
681  * platform_device_add - add a platform device to device hierarchy
682  * @pdev: platform device we're adding
683  *
684  * This is part 2 of platform_device_register(), though may be called
685  * separately _iff_ pdev was allocated by platform_device_alloc().
686  */
687 int platform_device_add(struct platform_device *pdev)
688 {
689 	u32 i;
690 	int ret;
691 
692 	if (!pdev)
693 		return -EINVAL;
694 
695 	if (!pdev->dev.parent)
696 		pdev->dev.parent = &platform_bus;
697 
698 	pdev->dev.bus = &platform_bus_type;
699 
700 	switch (pdev->id) {
701 	default:
702 		dev_set_name(&pdev->dev, "%s.%d", pdev->name,  pdev->id);
703 		break;
704 	case PLATFORM_DEVID_NONE:
705 		dev_set_name(&pdev->dev, "%s", pdev->name);
706 		break;
707 	case PLATFORM_DEVID_AUTO:
708 		/*
709 		 * Automatically allocated device ID. We mark it as such so
710 		 * that we remember it must be freed, and we append a suffix
711 		 * to avoid namespace collision with explicit IDs.
712 		 */
713 		ret = ida_alloc(&platform_devid_ida, GFP_KERNEL);
714 		if (ret < 0)
715 			goto err_out;
716 		pdev->id = ret;
717 		pdev->id_auto = true;
718 		dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
719 		break;
720 	}
721 
722 	for (i = 0; i < pdev->num_resources; i++) {
723 		struct resource *p, *r = &pdev->resource[i];
724 
725 		if (r->name == NULL)
726 			r->name = dev_name(&pdev->dev);
727 
728 		p = r->parent;
729 		if (!p) {
730 			if (resource_type(r) == IORESOURCE_MEM)
731 				p = &iomem_resource;
732 			else if (resource_type(r) == IORESOURCE_IO)
733 				p = &ioport_resource;
734 		}
735 
736 		if (p) {
737 			ret = insert_resource(p, r);
738 			if (ret) {
739 				dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
740 				goto failed;
741 			}
742 		}
743 	}
744 
745 	pr_debug("Registering platform device '%s'. Parent at %s\n",
746 		 dev_name(&pdev->dev), dev_name(pdev->dev.parent));
747 
748 	ret = device_add(&pdev->dev);
749 	if (ret == 0)
750 		return ret;
751 
752  failed:
753 	if (pdev->id_auto) {
754 		ida_free(&platform_devid_ida, pdev->id);
755 		pdev->id = PLATFORM_DEVID_AUTO;
756 	}
757 
758 	while (i--) {
759 		struct resource *r = &pdev->resource[i];
760 		if (r->parent)
761 			release_resource(r);
762 	}
763 
764  err_out:
765 	return ret;
766 }
767 EXPORT_SYMBOL_GPL(platform_device_add);
768 
769 /**
770  * platform_device_del - remove a platform-level device
771  * @pdev: platform device we're removing
772  *
773  * Note that this function will also release all memory- and port-based
774  * resources owned by the device (@dev->resource).  This function must
775  * _only_ be externally called in error cases.  All other usage is a bug.
776  */
777 void platform_device_del(struct platform_device *pdev)
778 {
779 	u32 i;
780 
781 	if (!IS_ERR_OR_NULL(pdev)) {
782 		device_del(&pdev->dev);
783 
784 		if (pdev->id_auto) {
785 			ida_free(&platform_devid_ida, pdev->id);
786 			pdev->id = PLATFORM_DEVID_AUTO;
787 		}
788 
789 		for (i = 0; i < pdev->num_resources; i++) {
790 			struct resource *r = &pdev->resource[i];
791 			if (r->parent)
792 				release_resource(r);
793 		}
794 	}
795 }
796 EXPORT_SYMBOL_GPL(platform_device_del);
797 
798 /**
799  * platform_device_register - add a platform-level device
800  * @pdev: platform device we're adding
801  */
802 int platform_device_register(struct platform_device *pdev)
803 {
804 	device_initialize(&pdev->dev);
805 	setup_pdev_dma_masks(pdev);
806 	return platform_device_add(pdev);
807 }
808 EXPORT_SYMBOL_GPL(platform_device_register);
809 
810 /**
811  * platform_device_unregister - unregister a platform-level device
812  * @pdev: platform device we're unregistering
813  *
814  * Unregistration is done in 2 steps. First we release all resources
815  * and remove it from the subsystem, then we drop reference count by
816  * calling platform_device_put().
817  */
818 void platform_device_unregister(struct platform_device *pdev)
819 {
820 	platform_device_del(pdev);
821 	platform_device_put(pdev);
822 }
823 EXPORT_SYMBOL_GPL(platform_device_unregister);
824 
825 /**
826  * platform_device_register_full - add a platform-level device with
827  * resources and platform-specific data
828  *
829  * @pdevinfo: data used to create device
830  *
831  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
832  */
833 struct platform_device *platform_device_register_full(
834 		const struct platform_device_info *pdevinfo)
835 {
836 	int ret;
837 	struct platform_device *pdev;
838 
839 	pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
840 	if (!pdev)
841 		return ERR_PTR(-ENOMEM);
842 
843 	pdev->dev.parent = pdevinfo->parent;
844 	pdev->dev.fwnode = pdevinfo->fwnode;
845 	pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode));
846 	pdev->dev.of_node_reused = pdevinfo->of_node_reused;
847 
848 	if (pdevinfo->dma_mask) {
849 		pdev->platform_dma_mask = pdevinfo->dma_mask;
850 		pdev->dev.dma_mask = &pdev->platform_dma_mask;
851 		pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
852 	}
853 
854 	ret = platform_device_add_resources(pdev,
855 			pdevinfo->res, pdevinfo->num_res);
856 	if (ret)
857 		goto err;
858 
859 	ret = platform_device_add_data(pdev,
860 			pdevinfo->data, pdevinfo->size_data);
861 	if (ret)
862 		goto err;
863 
864 	if (pdevinfo->properties) {
865 		ret = platform_device_add_properties(pdev,
866 						     pdevinfo->properties);
867 		if (ret)
868 			goto err;
869 	}
870 
871 	ret = platform_device_add(pdev);
872 	if (ret) {
873 err:
874 		ACPI_COMPANION_SET(&pdev->dev, NULL);
875 		platform_device_put(pdev);
876 		return ERR_PTR(ret);
877 	}
878 
879 	return pdev;
880 }
881 EXPORT_SYMBOL_GPL(platform_device_register_full);
882 
883 /**
884  * __platform_driver_register - register a driver for platform-level devices
885  * @drv: platform driver structure
886  * @owner: owning module/driver
887  */
888 int __platform_driver_register(struct platform_driver *drv,
889 				struct module *owner)
890 {
891 	drv->driver.owner = owner;
892 	drv->driver.bus = &platform_bus_type;
893 
894 	return driver_register(&drv->driver);
895 }
896 EXPORT_SYMBOL_GPL(__platform_driver_register);
897 
898 /**
899  * platform_driver_unregister - unregister a driver for platform-level devices
900  * @drv: platform driver structure
901  */
902 void platform_driver_unregister(struct platform_driver *drv)
903 {
904 	driver_unregister(&drv->driver);
905 }
906 EXPORT_SYMBOL_GPL(platform_driver_unregister);
907 
908 static int platform_probe_fail(struct platform_device *pdev)
909 {
910 	return -ENXIO;
911 }
912 
913 /**
914  * __platform_driver_probe - register driver for non-hotpluggable device
915  * @drv: platform driver structure
916  * @probe: the driver probe routine, probably from an __init section
917  * @module: module which will be the owner of the driver
918  *
919  * Use this instead of platform_driver_register() when you know the device
920  * is not hotpluggable and has already been registered, and you want to
921  * remove its run-once probe() infrastructure from memory after the driver
922  * has bound to the device.
923  *
924  * One typical use for this would be with drivers for controllers integrated
925  * into system-on-chip processors, where the controller devices have been
926  * configured as part of board setup.
927  *
928  * Note that this is incompatible with deferred probing.
929  *
930  * Returns zero if the driver registered and bound to a device, else returns
931  * a negative error code and with the driver not registered.
932  */
933 int __init_or_module __platform_driver_probe(struct platform_driver *drv,
934 		int (*probe)(struct platform_device *), struct module *module)
935 {
936 	int retval, code;
937 
938 	if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
939 		pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
940 			 drv->driver.name, __func__);
941 		return -EINVAL;
942 	}
943 
944 	/*
945 	 * We have to run our probes synchronously because we check if
946 	 * we find any devices to bind to and exit with error if there
947 	 * are any.
948 	 */
949 	drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
950 
951 	/*
952 	 * Prevent driver from requesting probe deferral to avoid further
953 	 * futile probe attempts.
954 	 */
955 	drv->prevent_deferred_probe = true;
956 
957 	/* make sure driver won't have bind/unbind attributes */
958 	drv->driver.suppress_bind_attrs = true;
959 
960 	/* temporary section violation during probe() */
961 	drv->probe = probe;
962 	retval = code = __platform_driver_register(drv, module);
963 	if (retval)
964 		return retval;
965 
966 	/*
967 	 * Fixup that section violation, being paranoid about code scanning
968 	 * the list of drivers in order to probe new devices.  Check to see
969 	 * if the probe was successful, and make sure any forced probes of
970 	 * new devices fail.
971 	 */
972 	spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
973 	drv->probe = platform_probe_fail;
974 	if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
975 		retval = -ENODEV;
976 	spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
977 
978 	if (code != retval)
979 		platform_driver_unregister(drv);
980 	return retval;
981 }
982 EXPORT_SYMBOL_GPL(__platform_driver_probe);
983 
984 /**
985  * __platform_create_bundle - register driver and create corresponding device
986  * @driver: platform driver structure
987  * @probe: the driver probe routine, probably from an __init section
988  * @res: set of resources that needs to be allocated for the device
989  * @n_res: number of resources
990  * @data: platform specific data for this platform device
991  * @size: size of platform specific data
992  * @module: module which will be the owner of the driver
993  *
994  * Use this in legacy-style modules that probe hardware directly and
995  * register a single platform device and corresponding platform driver.
996  *
997  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
998  */
999 struct platform_device * __init_or_module __platform_create_bundle(
1000 			struct platform_driver *driver,
1001 			int (*probe)(struct platform_device *),
1002 			struct resource *res, unsigned int n_res,
1003 			const void *data, size_t size, struct module *module)
1004 {
1005 	struct platform_device *pdev;
1006 	int error;
1007 
1008 	pdev = platform_device_alloc(driver->driver.name, -1);
1009 	if (!pdev) {
1010 		error = -ENOMEM;
1011 		goto err_out;
1012 	}
1013 
1014 	error = platform_device_add_resources(pdev, res, n_res);
1015 	if (error)
1016 		goto err_pdev_put;
1017 
1018 	error = platform_device_add_data(pdev, data, size);
1019 	if (error)
1020 		goto err_pdev_put;
1021 
1022 	error = platform_device_add(pdev);
1023 	if (error)
1024 		goto err_pdev_put;
1025 
1026 	error = __platform_driver_probe(driver, probe, module);
1027 	if (error)
1028 		goto err_pdev_del;
1029 
1030 	return pdev;
1031 
1032 err_pdev_del:
1033 	platform_device_del(pdev);
1034 err_pdev_put:
1035 	platform_device_put(pdev);
1036 err_out:
1037 	return ERR_PTR(error);
1038 }
1039 EXPORT_SYMBOL_GPL(__platform_create_bundle);
1040 
1041 /**
1042  * __platform_register_drivers - register an array of platform drivers
1043  * @drivers: an array of drivers to register
1044  * @count: the number of drivers to register
1045  * @owner: module owning the drivers
1046  *
1047  * Registers platform drivers specified by an array. On failure to register a
1048  * driver, all previously registered drivers will be unregistered. Callers of
1049  * this API should use platform_unregister_drivers() to unregister drivers in
1050  * the reverse order.
1051  *
1052  * Returns: 0 on success or a negative error code on failure.
1053  */
1054 int __platform_register_drivers(struct platform_driver * const *drivers,
1055 				unsigned int count, struct module *owner)
1056 {
1057 	unsigned int i;
1058 	int err;
1059 
1060 	for (i = 0; i < count; i++) {
1061 		pr_debug("registering platform driver %ps\n", drivers[i]);
1062 
1063 		err = __platform_driver_register(drivers[i], owner);
1064 		if (err < 0) {
1065 			pr_err("failed to register platform driver %ps: %d\n",
1066 			       drivers[i], err);
1067 			goto error;
1068 		}
1069 	}
1070 
1071 	return 0;
1072 
1073 error:
1074 	while (i--) {
1075 		pr_debug("unregistering platform driver %ps\n", drivers[i]);
1076 		platform_driver_unregister(drivers[i]);
1077 	}
1078 
1079 	return err;
1080 }
1081 EXPORT_SYMBOL_GPL(__platform_register_drivers);
1082 
1083 /**
1084  * platform_unregister_drivers - unregister an array of platform drivers
1085  * @drivers: an array of drivers to unregister
1086  * @count: the number of drivers to unregister
1087  *
1088  * Unregisters platform drivers specified by an array. This is typically used
1089  * to complement an earlier call to platform_register_drivers(). Drivers are
1090  * unregistered in the reverse order in which they were registered.
1091  */
1092 void platform_unregister_drivers(struct platform_driver * const *drivers,
1093 				 unsigned int count)
1094 {
1095 	while (count--) {
1096 		pr_debug("unregistering platform driver %ps\n", drivers[count]);
1097 		platform_driver_unregister(drivers[count]);
1098 	}
1099 }
1100 EXPORT_SYMBOL_GPL(platform_unregister_drivers);
1101 
1102 static const struct platform_device_id *platform_match_id(
1103 			const struct platform_device_id *id,
1104 			struct platform_device *pdev)
1105 {
1106 	while (id->name[0]) {
1107 		if (strcmp(pdev->name, id->name) == 0) {
1108 			pdev->id_entry = id;
1109 			return id;
1110 		}
1111 		id++;
1112 	}
1113 	return NULL;
1114 }
1115 
1116 #ifdef CONFIG_PM_SLEEP
1117 
1118 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
1119 {
1120 	struct platform_driver *pdrv = to_platform_driver(dev->driver);
1121 	struct platform_device *pdev = to_platform_device(dev);
1122 	int ret = 0;
1123 
1124 	if (dev->driver && pdrv->suspend)
1125 		ret = pdrv->suspend(pdev, mesg);
1126 
1127 	return ret;
1128 }
1129 
1130 static int platform_legacy_resume(struct device *dev)
1131 {
1132 	struct platform_driver *pdrv = to_platform_driver(dev->driver);
1133 	struct platform_device *pdev = to_platform_device(dev);
1134 	int ret = 0;
1135 
1136 	if (dev->driver && pdrv->resume)
1137 		ret = pdrv->resume(pdev);
1138 
1139 	return ret;
1140 }
1141 
1142 #endif /* CONFIG_PM_SLEEP */
1143 
1144 #ifdef CONFIG_SUSPEND
1145 
1146 int platform_pm_suspend(struct device *dev)
1147 {
1148 	struct device_driver *drv = dev->driver;
1149 	int ret = 0;
1150 
1151 	if (!drv)
1152 		return 0;
1153 
1154 	if (drv->pm) {
1155 		if (drv->pm->suspend)
1156 			ret = drv->pm->suspend(dev);
1157 	} else {
1158 		ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
1159 	}
1160 
1161 	return ret;
1162 }
1163 
1164 int platform_pm_resume(struct device *dev)
1165 {
1166 	struct device_driver *drv = dev->driver;
1167 	int ret = 0;
1168 
1169 	if (!drv)
1170 		return 0;
1171 
1172 	if (drv->pm) {
1173 		if (drv->pm->resume)
1174 			ret = drv->pm->resume(dev);
1175 	} else {
1176 		ret = platform_legacy_resume(dev);
1177 	}
1178 
1179 	return ret;
1180 }
1181 
1182 #endif /* CONFIG_SUSPEND */
1183 
1184 #ifdef CONFIG_HIBERNATE_CALLBACKS
1185 
1186 int platform_pm_freeze(struct device *dev)
1187 {
1188 	struct device_driver *drv = dev->driver;
1189 	int ret = 0;
1190 
1191 	if (!drv)
1192 		return 0;
1193 
1194 	if (drv->pm) {
1195 		if (drv->pm->freeze)
1196 			ret = drv->pm->freeze(dev);
1197 	} else {
1198 		ret = platform_legacy_suspend(dev, PMSG_FREEZE);
1199 	}
1200 
1201 	return ret;
1202 }
1203 
1204 int platform_pm_thaw(struct device *dev)
1205 {
1206 	struct device_driver *drv = dev->driver;
1207 	int ret = 0;
1208 
1209 	if (!drv)
1210 		return 0;
1211 
1212 	if (drv->pm) {
1213 		if (drv->pm->thaw)
1214 			ret = drv->pm->thaw(dev);
1215 	} else {
1216 		ret = platform_legacy_resume(dev);
1217 	}
1218 
1219 	return ret;
1220 }
1221 
1222 int platform_pm_poweroff(struct device *dev)
1223 {
1224 	struct device_driver *drv = dev->driver;
1225 	int ret = 0;
1226 
1227 	if (!drv)
1228 		return 0;
1229 
1230 	if (drv->pm) {
1231 		if (drv->pm->poweroff)
1232 			ret = drv->pm->poweroff(dev);
1233 	} else {
1234 		ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
1235 	}
1236 
1237 	return ret;
1238 }
1239 
1240 int platform_pm_restore(struct device *dev)
1241 {
1242 	struct device_driver *drv = dev->driver;
1243 	int ret = 0;
1244 
1245 	if (!drv)
1246 		return 0;
1247 
1248 	if (drv->pm) {
1249 		if (drv->pm->restore)
1250 			ret = drv->pm->restore(dev);
1251 	} else {
1252 		ret = platform_legacy_resume(dev);
1253 	}
1254 
1255 	return ret;
1256 }
1257 
1258 #endif /* CONFIG_HIBERNATE_CALLBACKS */
1259 
1260 /* modalias support enables more hands-off userspace setup:
1261  * (a) environment variable lets new-style hotplug events work once system is
1262  *     fully running:  "modprobe $MODALIAS"
1263  * (b) sysfs attribute lets new-style coldplug recover from hotplug events
1264  *     mishandled before system is fully running:  "modprobe $(cat modalias)"
1265  */
1266 static ssize_t modalias_show(struct device *dev,
1267 			     struct device_attribute *attr, char *buf)
1268 {
1269 	struct platform_device *pdev = to_platform_device(dev);
1270 	int len;
1271 
1272 	len = of_device_modalias(dev, buf, PAGE_SIZE);
1273 	if (len != -ENODEV)
1274 		return len;
1275 
1276 	len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
1277 	if (len != -ENODEV)
1278 		return len;
1279 
1280 	return sysfs_emit(buf, "platform:%s\n", pdev->name);
1281 }
1282 static DEVICE_ATTR_RO(modalias);
1283 
1284 static ssize_t numa_node_show(struct device *dev,
1285 			      struct device_attribute *attr, char *buf)
1286 {
1287 	return sysfs_emit(buf, "%d\n", dev_to_node(dev));
1288 }
1289 static DEVICE_ATTR_RO(numa_node);
1290 
1291 static ssize_t driver_override_show(struct device *dev,
1292 				    struct device_attribute *attr, char *buf)
1293 {
1294 	struct platform_device *pdev = to_platform_device(dev);
1295 	ssize_t len;
1296 
1297 	device_lock(dev);
1298 	len = sysfs_emit(buf, "%s\n", pdev->driver_override);
1299 	device_unlock(dev);
1300 
1301 	return len;
1302 }
1303 
1304 static ssize_t driver_override_store(struct device *dev,
1305 				     struct device_attribute *attr,
1306 				     const char *buf, size_t count)
1307 {
1308 	struct platform_device *pdev = to_platform_device(dev);
1309 	char *driver_override, *old, *cp;
1310 
1311 	/* We need to keep extra room for a newline */
1312 	if (count >= (PAGE_SIZE - 1))
1313 		return -EINVAL;
1314 
1315 	driver_override = kstrndup(buf, count, GFP_KERNEL);
1316 	if (!driver_override)
1317 		return -ENOMEM;
1318 
1319 	cp = strchr(driver_override, '\n');
1320 	if (cp)
1321 		*cp = '\0';
1322 
1323 	device_lock(dev);
1324 	old = pdev->driver_override;
1325 	if (strlen(driver_override)) {
1326 		pdev->driver_override = driver_override;
1327 	} else {
1328 		kfree(driver_override);
1329 		pdev->driver_override = NULL;
1330 	}
1331 	device_unlock(dev);
1332 
1333 	kfree(old);
1334 
1335 	return count;
1336 }
1337 static DEVICE_ATTR_RW(driver_override);
1338 
1339 static struct attribute *platform_dev_attrs[] = {
1340 	&dev_attr_modalias.attr,
1341 	&dev_attr_numa_node.attr,
1342 	&dev_attr_driver_override.attr,
1343 	NULL,
1344 };
1345 
1346 static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a,
1347 		int n)
1348 {
1349 	struct device *dev = container_of(kobj, typeof(*dev), kobj);
1350 
1351 	if (a == &dev_attr_numa_node.attr &&
1352 			dev_to_node(dev) == NUMA_NO_NODE)
1353 		return 0;
1354 
1355 	return a->mode;
1356 }
1357 
1358 static struct attribute_group platform_dev_group = {
1359 	.attrs = platform_dev_attrs,
1360 	.is_visible = platform_dev_attrs_visible,
1361 };
1362 __ATTRIBUTE_GROUPS(platform_dev);
1363 
1364 
1365 /**
1366  * platform_match - bind platform device to platform driver.
1367  * @dev: device.
1368  * @drv: driver.
1369  *
1370  * Platform device IDs are assumed to be encoded like this:
1371  * "<name><instance>", where <name> is a short description of the type of
1372  * device, like "pci" or "floppy", and <instance> is the enumerated
1373  * instance of the device, like '0' or '42'.  Driver IDs are simply
1374  * "<name>".  So, extract the <name> from the platform_device structure,
1375  * and compare it against the name of the driver. Return whether they match
1376  * or not.
1377  */
1378 static int platform_match(struct device *dev, struct device_driver *drv)
1379 {
1380 	struct platform_device *pdev = to_platform_device(dev);
1381 	struct platform_driver *pdrv = to_platform_driver(drv);
1382 
1383 	/* When driver_override is set, only bind to the matching driver */
1384 	if (pdev->driver_override)
1385 		return !strcmp(pdev->driver_override, drv->name);
1386 
1387 	/* Attempt an OF style match first */
1388 	if (of_driver_match_device(dev, drv))
1389 		return 1;
1390 
1391 	/* Then try ACPI style match */
1392 	if (acpi_driver_match_device(dev, drv))
1393 		return 1;
1394 
1395 	/* Then try to match against the id table */
1396 	if (pdrv->id_table)
1397 		return platform_match_id(pdrv->id_table, pdev) != NULL;
1398 
1399 	/* fall-back to driver name match */
1400 	return (strcmp(pdev->name, drv->name) == 0);
1401 }
1402 
1403 static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
1404 {
1405 	struct platform_device	*pdev = to_platform_device(dev);
1406 	int rc;
1407 
1408 	/* Some devices have extra OF data and an OF-style MODALIAS */
1409 	rc = of_device_uevent_modalias(dev, env);
1410 	if (rc != -ENODEV)
1411 		return rc;
1412 
1413 	rc = acpi_device_uevent_modalias(dev, env);
1414 	if (rc != -ENODEV)
1415 		return rc;
1416 
1417 	add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
1418 			pdev->name);
1419 	return 0;
1420 }
1421 
1422 static int platform_probe(struct device *_dev)
1423 {
1424 	struct platform_driver *drv = to_platform_driver(_dev->driver);
1425 	struct platform_device *dev = to_platform_device(_dev);
1426 	int ret;
1427 
1428 	/*
1429 	 * A driver registered using platform_driver_probe() cannot be bound
1430 	 * again later because the probe function usually lives in __init code
1431 	 * and so is gone. For these drivers .probe is set to
1432 	 * platform_probe_fail in __platform_driver_probe(). Don't even prepare
1433 	 * clocks and PM domains for these to match the traditional behaviour.
1434 	 */
1435 	if (unlikely(drv->probe == platform_probe_fail))
1436 		return -ENXIO;
1437 
1438 	ret = of_clk_set_defaults(_dev->of_node, false);
1439 	if (ret < 0)
1440 		return ret;
1441 
1442 	ret = dev_pm_domain_attach(_dev, true);
1443 	if (ret)
1444 		goto out;
1445 
1446 	if (drv->probe) {
1447 		ret = drv->probe(dev);
1448 		if (ret)
1449 			dev_pm_domain_detach(_dev, true);
1450 	}
1451 
1452 out:
1453 	if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
1454 		dev_warn(_dev, "probe deferral not supported\n");
1455 		ret = -ENXIO;
1456 	}
1457 
1458 	return ret;
1459 }
1460 
1461 static int platform_remove(struct device *_dev)
1462 {
1463 	struct platform_driver *drv = to_platform_driver(_dev->driver);
1464 	struct platform_device *dev = to_platform_device(_dev);
1465 
1466 	if (drv->remove) {
1467 		int ret = drv->remove(dev);
1468 
1469 		if (ret)
1470 			dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n");
1471 	}
1472 	dev_pm_domain_detach(_dev, true);
1473 
1474 	return 0;
1475 }
1476 
1477 static void platform_shutdown(struct device *_dev)
1478 {
1479 	struct platform_device *dev = to_platform_device(_dev);
1480 	struct platform_driver *drv;
1481 
1482 	if (!_dev->driver)
1483 		return;
1484 
1485 	drv = to_platform_driver(_dev->driver);
1486 	if (drv->shutdown)
1487 		drv->shutdown(dev);
1488 }
1489 
1490 
1491 int platform_dma_configure(struct device *dev)
1492 {
1493 	enum dev_dma_attr attr;
1494 	int ret = 0;
1495 
1496 	if (dev->of_node) {
1497 		ret = of_dma_configure(dev, dev->of_node, true);
1498 	} else if (has_acpi_companion(dev)) {
1499 		attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
1500 		ret = acpi_dma_configure(dev, attr);
1501 	}
1502 
1503 	return ret;
1504 }
1505 
1506 static const struct dev_pm_ops platform_dev_pm_ops = {
1507 	.runtime_suspend = pm_generic_runtime_suspend,
1508 	.runtime_resume = pm_generic_runtime_resume,
1509 	USE_PLATFORM_PM_SLEEP_OPS
1510 };
1511 
1512 struct bus_type platform_bus_type = {
1513 	.name		= "platform",
1514 	.dev_groups	= platform_dev_groups,
1515 	.match		= platform_match,
1516 	.uevent		= platform_uevent,
1517 	.probe		= platform_probe,
1518 	.remove		= platform_remove,
1519 	.shutdown	= platform_shutdown,
1520 	.dma_configure	= platform_dma_configure,
1521 	.pm		= &platform_dev_pm_ops,
1522 };
1523 EXPORT_SYMBOL_GPL(platform_bus_type);
1524 
1525 static inline int __platform_match(struct device *dev, const void *drv)
1526 {
1527 	return platform_match(dev, (struct device_driver *)drv);
1528 }
1529 
1530 /**
1531  * platform_find_device_by_driver - Find a platform device with a given
1532  * driver.
1533  * @start: The device to start the search from.
1534  * @drv: The device driver to look for.
1535  */
1536 struct device *platform_find_device_by_driver(struct device *start,
1537 					      const struct device_driver *drv)
1538 {
1539 	return bus_find_device(&platform_bus_type, start, drv,
1540 			       __platform_match);
1541 }
1542 EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
1543 
1544 void __weak __init early_platform_cleanup(void) { }
1545 
1546 int __init platform_bus_init(void)
1547 {
1548 	int error;
1549 
1550 	early_platform_cleanup();
1551 
1552 	error = device_register(&platform_bus);
1553 	if (error) {
1554 		put_device(&platform_bus);
1555 		return error;
1556 	}
1557 	error =  bus_register(&platform_bus_type);
1558 	if (error)
1559 		device_unregister(&platform_bus);
1560 	of_platform_register_reconfig_notifier();
1561 	return error;
1562 }
1563