xref: /openbmc/linux/drivers/base/platform.c (revision afba8b0a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * platform.c - platform 'pseudo' bus for legacy devices
4  *
5  * Copyright (c) 2002-3 Patrick Mochel
6  * Copyright (c) 2002-3 Open Source Development Labs
7  *
8  * Please see Documentation/driver-api/driver-model/platform.rst for more
9  * information.
10  */
11 
12 #include <linux/string.h>
13 #include <linux/platform_device.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/memblock.h>
20 #include <linux/err.h>
21 #include <linux/slab.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/pm_domain.h>
24 #include <linux/idr.h>
25 #include <linux/acpi.h>
26 #include <linux/clk/clk-conf.h>
27 #include <linux/limits.h>
28 #include <linux/property.h>
29 #include <linux/kmemleak.h>
30 #include <linux/types.h>
31 
32 #include "base.h"
33 #include "power/power.h"
34 
35 /* For automatically allocated device IDs */
36 static DEFINE_IDA(platform_devid_ida);
37 
38 struct device platform_bus = {
39 	.init_name	= "platform",
40 };
41 EXPORT_SYMBOL_GPL(platform_bus);
42 
43 /**
44  * platform_get_resource - get a resource for a device
45  * @dev: platform device
46  * @type: resource type
47  * @num: resource index
48  *
49  * Return: a pointer to the resource or NULL on failure.
50  */
51 struct resource *platform_get_resource(struct platform_device *dev,
52 				       unsigned int type, unsigned int num)
53 {
54 	u32 i;
55 
56 	for (i = 0; i < dev->num_resources; i++) {
57 		struct resource *r = &dev->resource[i];
58 
59 		if (type == resource_type(r) && num-- == 0)
60 			return r;
61 	}
62 	return NULL;
63 }
64 EXPORT_SYMBOL_GPL(platform_get_resource);
65 
66 #ifdef CONFIG_HAS_IOMEM
67 /**
68  * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a
69  *					    platform device and get resource
70  *
71  * @pdev: platform device to use both for memory resource lookup as well as
72  *        resource management
73  * @index: resource index
74  * @res: optional output parameter to store a pointer to the obtained resource.
75  *
76  * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
77  * on failure.
78  */
79 void __iomem *
80 devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
81 				unsigned int index, struct resource **res)
82 {
83 	struct resource *r;
84 
85 	r = platform_get_resource(pdev, IORESOURCE_MEM, index);
86 	if (res)
87 		*res = r;
88 	return devm_ioremap_resource(&pdev->dev, r);
89 }
90 EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource);
91 
92 /**
93  * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
94  *				    device
95  *
96  * @pdev: platform device to use both for memory resource lookup as well as
97  *        resource management
98  * @index: resource index
99  *
100  * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
101  * on failure.
102  */
103 void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
104 					     unsigned int index)
105 {
106 	return devm_platform_get_and_ioremap_resource(pdev, index, NULL);
107 }
108 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
109 
110 /**
111  * devm_platform_ioremap_resource_wc - write-combined variant of
112  *                                     devm_platform_ioremap_resource()
113  *
114  * @pdev: platform device to use both for memory resource lookup as well as
115  *        resource management
116  * @index: resource index
117  *
118  * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
119  * on failure.
120  */
121 void __iomem *devm_platform_ioremap_resource_wc(struct platform_device *pdev,
122 						unsigned int index)
123 {
124 	struct resource *res;
125 
126 	res = platform_get_resource(pdev, IORESOURCE_MEM, index);
127 	return devm_ioremap_resource_wc(&pdev->dev, res);
128 }
129 
130 /**
131  * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for
132  *					   a platform device, retrieve the
133  *					   resource by name
134  *
135  * @pdev: platform device to use both for memory resource lookup as well as
136  *	  resource management
137  * @name: name of the resource
138  *
139  * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
140  * on failure.
141  */
142 void __iomem *
143 devm_platform_ioremap_resource_byname(struct platform_device *pdev,
144 				      const char *name)
145 {
146 	struct resource *res;
147 
148 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
149 	return devm_ioremap_resource(&pdev->dev, res);
150 }
151 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
152 #endif /* CONFIG_HAS_IOMEM */
153 
154 /**
155  * platform_get_irq_optional - get an optional IRQ for a device
156  * @dev: platform device
157  * @num: IRQ number index
158  *
159  * Gets an IRQ for a platform device. Device drivers should check the return
160  * value for errors so as to not pass a negative integer value to the
161  * request_irq() APIs. This is the same as platform_get_irq(), except that it
162  * does not print an error message if an IRQ can not be obtained.
163  *
164  * For example::
165  *
166  *		int irq = platform_get_irq_optional(pdev, 0);
167  *		if (irq < 0)
168  *			return irq;
169  *
170  * Return: non-zero IRQ number on success, negative error number on failure.
171  */
172 int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
173 {
174 	int ret;
175 #ifdef CONFIG_SPARC
176 	/* sparc does not have irqs represented as IORESOURCE_IRQ resources */
177 	if (!dev || num >= dev->archdata.num_irqs)
178 		return -ENXIO;
179 	ret = dev->archdata.irqs[num];
180 	goto out;
181 #else
182 	struct resource *r;
183 
184 	if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
185 		ret = of_irq_get(dev->dev.of_node, num);
186 		if (ret > 0 || ret == -EPROBE_DEFER)
187 			goto out;
188 	}
189 
190 	r = platform_get_resource(dev, IORESOURCE_IRQ, num);
191 	if (has_acpi_companion(&dev->dev)) {
192 		if (r && r->flags & IORESOURCE_DISABLED) {
193 			ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
194 			if (ret)
195 				goto out;
196 		}
197 	}
198 
199 	/*
200 	 * The resources may pass trigger flags to the irqs that need
201 	 * to be set up. It so happens that the trigger flags for
202 	 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
203 	 * settings.
204 	 */
205 	if (r && r->flags & IORESOURCE_BITS) {
206 		struct irq_data *irqd;
207 
208 		irqd = irq_get_irq_data(r->start);
209 		if (!irqd) {
210 			ret = -ENXIO;
211 			goto out;
212 		}
213 		irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
214 	}
215 
216 	if (r) {
217 		ret = r->start;
218 		goto out;
219 	}
220 
221 	/*
222 	 * For the index 0 interrupt, allow falling back to GpioInt
223 	 * resources. While a device could have both Interrupt and GpioInt
224 	 * resources, making this fallback ambiguous, in many common cases
225 	 * the device will only expose one IRQ, and this fallback
226 	 * allows a common code path across either kind of resource.
227 	 */
228 	if (num == 0 && has_acpi_companion(&dev->dev)) {
229 		ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
230 		/* Our callers expect -ENXIO for missing IRQs. */
231 		if (ret >= 0 || ret == -EPROBE_DEFER)
232 			goto out;
233 	}
234 
235 	ret = -ENXIO;
236 #endif
237 out:
238 	WARN(ret == 0, "0 is an invalid IRQ number\n");
239 	return ret;
240 }
241 EXPORT_SYMBOL_GPL(platform_get_irq_optional);
242 
243 /**
244  * platform_get_irq - get an IRQ for a device
245  * @dev: platform device
246  * @num: IRQ number index
247  *
248  * Gets an IRQ for a platform device and prints an error message if finding the
249  * IRQ fails. Device drivers should check the return value for errors so as to
250  * not pass a negative integer value to the request_irq() APIs.
251  *
252  * For example::
253  *
254  *		int irq = platform_get_irq(pdev, 0);
255  *		if (irq < 0)
256  *			return irq;
257  *
258  * Return: non-zero IRQ number on success, negative error number on failure.
259  */
260 int platform_get_irq(struct platform_device *dev, unsigned int num)
261 {
262 	int ret;
263 
264 	ret = platform_get_irq_optional(dev, num);
265 	if (ret < 0 && ret != -EPROBE_DEFER)
266 		dev_err(&dev->dev, "IRQ index %u not found\n", num);
267 
268 	return ret;
269 }
270 EXPORT_SYMBOL_GPL(platform_get_irq);
271 
272 /**
273  * platform_irq_count - Count the number of IRQs a platform device uses
274  * @dev: platform device
275  *
276  * Return: Number of IRQs a platform device uses or EPROBE_DEFER
277  */
278 int platform_irq_count(struct platform_device *dev)
279 {
280 	int ret, nr = 0;
281 
282 	while ((ret = platform_get_irq_optional(dev, nr)) >= 0)
283 		nr++;
284 
285 	if (ret == -EPROBE_DEFER)
286 		return ret;
287 
288 	return nr;
289 }
290 EXPORT_SYMBOL_GPL(platform_irq_count);
291 
292 /**
293  * platform_get_resource_byname - get a resource for a device by name
294  * @dev: platform device
295  * @type: resource type
296  * @name: resource name
297  */
298 struct resource *platform_get_resource_byname(struct platform_device *dev,
299 					      unsigned int type,
300 					      const char *name)
301 {
302 	u32 i;
303 
304 	for (i = 0; i < dev->num_resources; i++) {
305 		struct resource *r = &dev->resource[i];
306 
307 		if (unlikely(!r->name))
308 			continue;
309 
310 		if (type == resource_type(r) && !strcmp(r->name, name))
311 			return r;
312 	}
313 	return NULL;
314 }
315 EXPORT_SYMBOL_GPL(platform_get_resource_byname);
316 
317 static int __platform_get_irq_byname(struct platform_device *dev,
318 				     const char *name)
319 {
320 	struct resource *r;
321 	int ret;
322 
323 	if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
324 		ret = of_irq_get_byname(dev->dev.of_node, name);
325 		if (ret > 0 || ret == -EPROBE_DEFER)
326 			return ret;
327 	}
328 
329 	r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
330 	if (r) {
331 		WARN(r->start == 0, "0 is an invalid IRQ number\n");
332 		return r->start;
333 	}
334 
335 	return -ENXIO;
336 }
337 
338 /**
339  * platform_get_irq_byname - get an IRQ for a device by name
340  * @dev: platform device
341  * @name: IRQ name
342  *
343  * Get an IRQ like platform_get_irq(), but then by name rather then by index.
344  *
345  * Return: non-zero IRQ number on success, negative error number on failure.
346  */
347 int platform_get_irq_byname(struct platform_device *dev, const char *name)
348 {
349 	int ret;
350 
351 	ret = __platform_get_irq_byname(dev, name);
352 	if (ret < 0 && ret != -EPROBE_DEFER)
353 		dev_err(&dev->dev, "IRQ %s not found\n", name);
354 
355 	return ret;
356 }
357 EXPORT_SYMBOL_GPL(platform_get_irq_byname);
358 
359 /**
360  * platform_get_irq_byname_optional - get an optional IRQ for a device by name
361  * @dev: platform device
362  * @name: IRQ name
363  *
364  * Get an optional IRQ by name like platform_get_irq_byname(). Except that it
365  * does not print an error message if an IRQ can not be obtained.
366  *
367  * Return: non-zero IRQ number on success, negative error number on failure.
368  */
369 int platform_get_irq_byname_optional(struct platform_device *dev,
370 				     const char *name)
371 {
372 	return __platform_get_irq_byname(dev, name);
373 }
374 EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
375 
376 /**
377  * platform_add_devices - add a numbers of platform devices
378  * @devs: array of platform devices to add
379  * @num: number of platform devices in array
380  */
381 int platform_add_devices(struct platform_device **devs, int num)
382 {
383 	int i, ret = 0;
384 
385 	for (i = 0; i < num; i++) {
386 		ret = platform_device_register(devs[i]);
387 		if (ret) {
388 			while (--i >= 0)
389 				platform_device_unregister(devs[i]);
390 			break;
391 		}
392 	}
393 
394 	return ret;
395 }
396 EXPORT_SYMBOL_GPL(platform_add_devices);
397 
398 struct platform_object {
399 	struct platform_device pdev;
400 	char name[];
401 };
402 
403 /*
404  * Set up default DMA mask for platform devices if the they weren't
405  * previously set by the architecture / DT.
406  */
407 static void setup_pdev_dma_masks(struct platform_device *pdev)
408 {
409 	pdev->dev.dma_parms = &pdev->dma_parms;
410 
411 	if (!pdev->dev.coherent_dma_mask)
412 		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
413 	if (!pdev->dev.dma_mask) {
414 		pdev->platform_dma_mask = DMA_BIT_MASK(32);
415 		pdev->dev.dma_mask = &pdev->platform_dma_mask;
416 	}
417 };
418 
419 /**
420  * platform_device_put - destroy a platform device
421  * @pdev: platform device to free
422  *
423  * Free all memory associated with a platform device.  This function must
424  * _only_ be externally called in error cases.  All other usage is a bug.
425  */
426 void platform_device_put(struct platform_device *pdev)
427 {
428 	if (!IS_ERR_OR_NULL(pdev))
429 		put_device(&pdev->dev);
430 }
431 EXPORT_SYMBOL_GPL(platform_device_put);
432 
433 static void platform_device_release(struct device *dev)
434 {
435 	struct platform_object *pa = container_of(dev, struct platform_object,
436 						  pdev.dev);
437 
438 	of_device_node_put(&pa->pdev.dev);
439 	kfree(pa->pdev.dev.platform_data);
440 	kfree(pa->pdev.mfd_cell);
441 	kfree(pa->pdev.resource);
442 	kfree(pa->pdev.driver_override);
443 	kfree(pa);
444 }
445 
446 /**
447  * platform_device_alloc - create a platform device
448  * @name: base name of the device we're adding
449  * @id: instance id
450  *
451  * Create a platform device object which can have other objects attached
452  * to it, and which will have attached objects freed when it is released.
453  */
454 struct platform_device *platform_device_alloc(const char *name, int id)
455 {
456 	struct platform_object *pa;
457 
458 	pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
459 	if (pa) {
460 		strcpy(pa->name, name);
461 		pa->pdev.name = pa->name;
462 		pa->pdev.id = id;
463 		device_initialize(&pa->pdev.dev);
464 		pa->pdev.dev.release = platform_device_release;
465 		setup_pdev_dma_masks(&pa->pdev);
466 	}
467 
468 	return pa ? &pa->pdev : NULL;
469 }
470 EXPORT_SYMBOL_GPL(platform_device_alloc);
471 
472 /**
473  * platform_device_add_resources - add resources to a platform device
474  * @pdev: platform device allocated by platform_device_alloc to add resources to
475  * @res: set of resources that needs to be allocated for the device
476  * @num: number of resources
477  *
478  * Add a copy of the resources to the platform device.  The memory
479  * associated with the resources will be freed when the platform device is
480  * released.
481  */
482 int platform_device_add_resources(struct platform_device *pdev,
483 				  const struct resource *res, unsigned int num)
484 {
485 	struct resource *r = NULL;
486 
487 	if (res) {
488 		r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
489 		if (!r)
490 			return -ENOMEM;
491 	}
492 
493 	kfree(pdev->resource);
494 	pdev->resource = r;
495 	pdev->num_resources = num;
496 	return 0;
497 }
498 EXPORT_SYMBOL_GPL(platform_device_add_resources);
499 
500 /**
501  * platform_device_add_data - add platform-specific data to a platform device
502  * @pdev: platform device allocated by platform_device_alloc to add resources to
503  * @data: platform specific data for this platform device
504  * @size: size of platform specific data
505  *
506  * Add a copy of platform specific data to the platform device's
507  * platform_data pointer.  The memory associated with the platform data
508  * will be freed when the platform device is released.
509  */
510 int platform_device_add_data(struct platform_device *pdev, const void *data,
511 			     size_t size)
512 {
513 	void *d = NULL;
514 
515 	if (data) {
516 		d = kmemdup(data, size, GFP_KERNEL);
517 		if (!d)
518 			return -ENOMEM;
519 	}
520 
521 	kfree(pdev->dev.platform_data);
522 	pdev->dev.platform_data = d;
523 	return 0;
524 }
525 EXPORT_SYMBOL_GPL(platform_device_add_data);
526 
527 /**
528  * platform_device_add_properties - add built-in properties to a platform device
529  * @pdev: platform device to add properties to
530  * @properties: null terminated array of properties to add
531  *
532  * The function will take deep copy of @properties and attach the copy to the
533  * platform device. The memory associated with properties will be freed when the
534  * platform device is released.
535  */
536 int platform_device_add_properties(struct platform_device *pdev,
537 				   const struct property_entry *properties)
538 {
539 	return device_add_properties(&pdev->dev, properties);
540 }
541 EXPORT_SYMBOL_GPL(platform_device_add_properties);
542 
543 /**
544  * platform_device_add - add a platform device to device hierarchy
545  * @pdev: platform device we're adding
546  *
547  * This is part 2 of platform_device_register(), though may be called
548  * separately _iff_ pdev was allocated by platform_device_alloc().
549  */
550 int platform_device_add(struct platform_device *pdev)
551 {
552 	u32 i;
553 	int ret;
554 
555 	if (!pdev)
556 		return -EINVAL;
557 
558 	if (!pdev->dev.parent)
559 		pdev->dev.parent = &platform_bus;
560 
561 	pdev->dev.bus = &platform_bus_type;
562 
563 	switch (pdev->id) {
564 	default:
565 		dev_set_name(&pdev->dev, "%s.%d", pdev->name,  pdev->id);
566 		break;
567 	case PLATFORM_DEVID_NONE:
568 		dev_set_name(&pdev->dev, "%s", pdev->name);
569 		break;
570 	case PLATFORM_DEVID_AUTO:
571 		/*
572 		 * Automatically allocated device ID. We mark it as such so
573 		 * that we remember it must be freed, and we append a suffix
574 		 * to avoid namespace collision with explicit IDs.
575 		 */
576 		ret = ida_alloc(&platform_devid_ida, GFP_KERNEL);
577 		if (ret < 0)
578 			goto err_out;
579 		pdev->id = ret;
580 		pdev->id_auto = true;
581 		dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
582 		break;
583 	}
584 
585 	for (i = 0; i < pdev->num_resources; i++) {
586 		struct resource *p, *r = &pdev->resource[i];
587 
588 		if (r->name == NULL)
589 			r->name = dev_name(&pdev->dev);
590 
591 		p = r->parent;
592 		if (!p) {
593 			if (resource_type(r) == IORESOURCE_MEM)
594 				p = &iomem_resource;
595 			else if (resource_type(r) == IORESOURCE_IO)
596 				p = &ioport_resource;
597 		}
598 
599 		if (p) {
600 			ret = insert_resource(p, r);
601 			if (ret) {
602 				dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
603 				goto failed;
604 			}
605 		}
606 	}
607 
608 	pr_debug("Registering platform device '%s'. Parent at %s\n",
609 		 dev_name(&pdev->dev), dev_name(pdev->dev.parent));
610 
611 	ret = device_add(&pdev->dev);
612 	if (ret == 0)
613 		return ret;
614 
615  failed:
616 	if (pdev->id_auto) {
617 		ida_free(&platform_devid_ida, pdev->id);
618 		pdev->id = PLATFORM_DEVID_AUTO;
619 	}
620 
621 	while (i--) {
622 		struct resource *r = &pdev->resource[i];
623 		if (r->parent)
624 			release_resource(r);
625 	}
626 
627  err_out:
628 	return ret;
629 }
630 EXPORT_SYMBOL_GPL(platform_device_add);
631 
632 /**
633  * platform_device_del - remove a platform-level device
634  * @pdev: platform device we're removing
635  *
636  * Note that this function will also release all memory- and port-based
637  * resources owned by the device (@dev->resource).  This function must
638  * _only_ be externally called in error cases.  All other usage is a bug.
639  */
640 void platform_device_del(struct platform_device *pdev)
641 {
642 	u32 i;
643 
644 	if (!IS_ERR_OR_NULL(pdev)) {
645 		device_del(&pdev->dev);
646 
647 		if (pdev->id_auto) {
648 			ida_free(&platform_devid_ida, pdev->id);
649 			pdev->id = PLATFORM_DEVID_AUTO;
650 		}
651 
652 		for (i = 0; i < pdev->num_resources; i++) {
653 			struct resource *r = &pdev->resource[i];
654 			if (r->parent)
655 				release_resource(r);
656 		}
657 	}
658 }
659 EXPORT_SYMBOL_GPL(platform_device_del);
660 
661 /**
662  * platform_device_register - add a platform-level device
663  * @pdev: platform device we're adding
664  */
665 int platform_device_register(struct platform_device *pdev)
666 {
667 	device_initialize(&pdev->dev);
668 	setup_pdev_dma_masks(pdev);
669 	return platform_device_add(pdev);
670 }
671 EXPORT_SYMBOL_GPL(platform_device_register);
672 
673 /**
674  * platform_device_unregister - unregister a platform-level device
675  * @pdev: platform device we're unregistering
676  *
677  * Unregistration is done in 2 steps. First we release all resources
678  * and remove it from the subsystem, then we drop reference count by
679  * calling platform_device_put().
680  */
681 void platform_device_unregister(struct platform_device *pdev)
682 {
683 	platform_device_del(pdev);
684 	platform_device_put(pdev);
685 }
686 EXPORT_SYMBOL_GPL(platform_device_unregister);
687 
688 /**
689  * platform_device_register_full - add a platform-level device with
690  * resources and platform-specific data
691  *
692  * @pdevinfo: data used to create device
693  *
694  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
695  */
696 struct platform_device *platform_device_register_full(
697 		const struct platform_device_info *pdevinfo)
698 {
699 	int ret;
700 	struct platform_device *pdev;
701 
702 	pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
703 	if (!pdev)
704 		return ERR_PTR(-ENOMEM);
705 
706 	pdev->dev.parent = pdevinfo->parent;
707 	pdev->dev.fwnode = pdevinfo->fwnode;
708 	pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode));
709 	pdev->dev.of_node_reused = pdevinfo->of_node_reused;
710 
711 	if (pdevinfo->dma_mask) {
712 		pdev->platform_dma_mask = pdevinfo->dma_mask;
713 		pdev->dev.dma_mask = &pdev->platform_dma_mask;
714 		pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
715 	}
716 
717 	ret = platform_device_add_resources(pdev,
718 			pdevinfo->res, pdevinfo->num_res);
719 	if (ret)
720 		goto err;
721 
722 	ret = platform_device_add_data(pdev,
723 			pdevinfo->data, pdevinfo->size_data);
724 	if (ret)
725 		goto err;
726 
727 	if (pdevinfo->properties) {
728 		ret = platform_device_add_properties(pdev,
729 						     pdevinfo->properties);
730 		if (ret)
731 			goto err;
732 	}
733 
734 	ret = platform_device_add(pdev);
735 	if (ret) {
736 err:
737 		ACPI_COMPANION_SET(&pdev->dev, NULL);
738 		platform_device_put(pdev);
739 		return ERR_PTR(ret);
740 	}
741 
742 	return pdev;
743 }
744 EXPORT_SYMBOL_GPL(platform_device_register_full);
745 
746 static int platform_drv_probe(struct device *_dev)
747 {
748 	struct platform_driver *drv = to_platform_driver(_dev->driver);
749 	struct platform_device *dev = to_platform_device(_dev);
750 	int ret;
751 
752 	ret = of_clk_set_defaults(_dev->of_node, false);
753 	if (ret < 0)
754 		return ret;
755 
756 	ret = dev_pm_domain_attach(_dev, true);
757 	if (ret)
758 		goto out;
759 
760 	if (drv->probe) {
761 		ret = drv->probe(dev);
762 		if (ret)
763 			dev_pm_domain_detach(_dev, true);
764 	}
765 
766 out:
767 	if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
768 		dev_warn(_dev, "probe deferral not supported\n");
769 		ret = -ENXIO;
770 	}
771 
772 	return ret;
773 }
774 
775 static int platform_drv_probe_fail(struct device *_dev)
776 {
777 	return -ENXIO;
778 }
779 
780 static int platform_drv_remove(struct device *_dev)
781 {
782 	struct platform_driver *drv = to_platform_driver(_dev->driver);
783 	struct platform_device *dev = to_platform_device(_dev);
784 	int ret = 0;
785 
786 	if (drv->remove)
787 		ret = drv->remove(dev);
788 	dev_pm_domain_detach(_dev, true);
789 
790 	return ret;
791 }
792 
793 static void platform_drv_shutdown(struct device *_dev)
794 {
795 	struct platform_driver *drv = to_platform_driver(_dev->driver);
796 	struct platform_device *dev = to_platform_device(_dev);
797 
798 	if (drv->shutdown)
799 		drv->shutdown(dev);
800 }
801 
802 /**
803  * __platform_driver_register - register a driver for platform-level devices
804  * @drv: platform driver structure
805  * @owner: owning module/driver
806  */
807 int __platform_driver_register(struct platform_driver *drv,
808 				struct module *owner)
809 {
810 	drv->driver.owner = owner;
811 	drv->driver.bus = &platform_bus_type;
812 	drv->driver.probe = platform_drv_probe;
813 	drv->driver.remove = platform_drv_remove;
814 	drv->driver.shutdown = platform_drv_shutdown;
815 
816 	return driver_register(&drv->driver);
817 }
818 EXPORT_SYMBOL_GPL(__platform_driver_register);
819 
820 /**
821  * platform_driver_unregister - unregister a driver for platform-level devices
822  * @drv: platform driver structure
823  */
824 void platform_driver_unregister(struct platform_driver *drv)
825 {
826 	driver_unregister(&drv->driver);
827 }
828 EXPORT_SYMBOL_GPL(platform_driver_unregister);
829 
830 /**
831  * __platform_driver_probe - register driver for non-hotpluggable device
832  * @drv: platform driver structure
833  * @probe: the driver probe routine, probably from an __init section
834  * @module: module which will be the owner of the driver
835  *
836  * Use this instead of platform_driver_register() when you know the device
837  * is not hotpluggable and has already been registered, and you want to
838  * remove its run-once probe() infrastructure from memory after the driver
839  * has bound to the device.
840  *
841  * One typical use for this would be with drivers for controllers integrated
842  * into system-on-chip processors, where the controller devices have been
843  * configured as part of board setup.
844  *
845  * Note that this is incompatible with deferred probing.
846  *
847  * Returns zero if the driver registered and bound to a device, else returns
848  * a negative error code and with the driver not registered.
849  */
850 int __init_or_module __platform_driver_probe(struct platform_driver *drv,
851 		int (*probe)(struct platform_device *), struct module *module)
852 {
853 	int retval, code;
854 
855 	if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
856 		pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
857 			 drv->driver.name, __func__);
858 		return -EINVAL;
859 	}
860 
861 	/*
862 	 * We have to run our probes synchronously because we check if
863 	 * we find any devices to bind to and exit with error if there
864 	 * are any.
865 	 */
866 	drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
867 
868 	/*
869 	 * Prevent driver from requesting probe deferral to avoid further
870 	 * futile probe attempts.
871 	 */
872 	drv->prevent_deferred_probe = true;
873 
874 	/* make sure driver won't have bind/unbind attributes */
875 	drv->driver.suppress_bind_attrs = true;
876 
877 	/* temporary section violation during probe() */
878 	drv->probe = probe;
879 	retval = code = __platform_driver_register(drv, module);
880 	if (retval)
881 		return retval;
882 
883 	/*
884 	 * Fixup that section violation, being paranoid about code scanning
885 	 * the list of drivers in order to probe new devices.  Check to see
886 	 * if the probe was successful, and make sure any forced probes of
887 	 * new devices fail.
888 	 */
889 	spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
890 	drv->probe = NULL;
891 	if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
892 		retval = -ENODEV;
893 	drv->driver.probe = platform_drv_probe_fail;
894 	spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
895 
896 	if (code != retval)
897 		platform_driver_unregister(drv);
898 	return retval;
899 }
900 EXPORT_SYMBOL_GPL(__platform_driver_probe);
901 
902 /**
903  * __platform_create_bundle - register driver and create corresponding device
904  * @driver: platform driver structure
905  * @probe: the driver probe routine, probably from an __init section
906  * @res: set of resources that needs to be allocated for the device
907  * @n_res: number of resources
908  * @data: platform specific data for this platform device
909  * @size: size of platform specific data
910  * @module: module which will be the owner of the driver
911  *
912  * Use this in legacy-style modules that probe hardware directly and
913  * register a single platform device and corresponding platform driver.
914  *
915  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
916  */
917 struct platform_device * __init_or_module __platform_create_bundle(
918 			struct platform_driver *driver,
919 			int (*probe)(struct platform_device *),
920 			struct resource *res, unsigned int n_res,
921 			const void *data, size_t size, struct module *module)
922 {
923 	struct platform_device *pdev;
924 	int error;
925 
926 	pdev = platform_device_alloc(driver->driver.name, -1);
927 	if (!pdev) {
928 		error = -ENOMEM;
929 		goto err_out;
930 	}
931 
932 	error = platform_device_add_resources(pdev, res, n_res);
933 	if (error)
934 		goto err_pdev_put;
935 
936 	error = platform_device_add_data(pdev, data, size);
937 	if (error)
938 		goto err_pdev_put;
939 
940 	error = platform_device_add(pdev);
941 	if (error)
942 		goto err_pdev_put;
943 
944 	error = __platform_driver_probe(driver, probe, module);
945 	if (error)
946 		goto err_pdev_del;
947 
948 	return pdev;
949 
950 err_pdev_del:
951 	platform_device_del(pdev);
952 err_pdev_put:
953 	platform_device_put(pdev);
954 err_out:
955 	return ERR_PTR(error);
956 }
957 EXPORT_SYMBOL_GPL(__platform_create_bundle);
958 
959 /**
960  * __platform_register_drivers - register an array of platform drivers
961  * @drivers: an array of drivers to register
962  * @count: the number of drivers to register
963  * @owner: module owning the drivers
964  *
965  * Registers platform drivers specified by an array. On failure to register a
966  * driver, all previously registered drivers will be unregistered. Callers of
967  * this API should use platform_unregister_drivers() to unregister drivers in
968  * the reverse order.
969  *
970  * Returns: 0 on success or a negative error code on failure.
971  */
972 int __platform_register_drivers(struct platform_driver * const *drivers,
973 				unsigned int count, struct module *owner)
974 {
975 	unsigned int i;
976 	int err;
977 
978 	for (i = 0; i < count; i++) {
979 		pr_debug("registering platform driver %ps\n", drivers[i]);
980 
981 		err = __platform_driver_register(drivers[i], owner);
982 		if (err < 0) {
983 			pr_err("failed to register platform driver %ps: %d\n",
984 			       drivers[i], err);
985 			goto error;
986 		}
987 	}
988 
989 	return 0;
990 
991 error:
992 	while (i--) {
993 		pr_debug("unregistering platform driver %ps\n", drivers[i]);
994 		platform_driver_unregister(drivers[i]);
995 	}
996 
997 	return err;
998 }
999 EXPORT_SYMBOL_GPL(__platform_register_drivers);
1000 
1001 /**
1002  * platform_unregister_drivers - unregister an array of platform drivers
1003  * @drivers: an array of drivers to unregister
1004  * @count: the number of drivers to unregister
1005  *
1006  * Unregisters platform drivers specified by an array. This is typically used
1007  * to complement an earlier call to platform_register_drivers(). Drivers are
1008  * unregistered in the reverse order in which they were registered.
1009  */
1010 void platform_unregister_drivers(struct platform_driver * const *drivers,
1011 				 unsigned int count)
1012 {
1013 	while (count--) {
1014 		pr_debug("unregistering platform driver %ps\n", drivers[count]);
1015 		platform_driver_unregister(drivers[count]);
1016 	}
1017 }
1018 EXPORT_SYMBOL_GPL(platform_unregister_drivers);
1019 
1020 /* modalias support enables more hands-off userspace setup:
1021  * (a) environment variable lets new-style hotplug events work once system is
1022  *     fully running:  "modprobe $MODALIAS"
1023  * (b) sysfs attribute lets new-style coldplug recover from hotplug events
1024  *     mishandled before system is fully running:  "modprobe $(cat modalias)"
1025  */
1026 static ssize_t modalias_show(struct device *dev,
1027 			     struct device_attribute *attr, char *buf)
1028 {
1029 	struct platform_device *pdev = to_platform_device(dev);
1030 	int len;
1031 
1032 	len = of_device_modalias(dev, buf, PAGE_SIZE);
1033 	if (len != -ENODEV)
1034 		return len;
1035 
1036 	len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
1037 	if (len != -ENODEV)
1038 		return len;
1039 
1040 	return sysfs_emit(buf, "platform:%s\n", pdev->name);
1041 }
1042 static DEVICE_ATTR_RO(modalias);
1043 
1044 static ssize_t driver_override_store(struct device *dev,
1045 				     struct device_attribute *attr,
1046 				     const char *buf, size_t count)
1047 {
1048 	struct platform_device *pdev = to_platform_device(dev);
1049 	char *driver_override, *old, *cp;
1050 
1051 	/* We need to keep extra room for a newline */
1052 	if (count >= (PAGE_SIZE - 1))
1053 		return -EINVAL;
1054 
1055 	driver_override = kstrndup(buf, count, GFP_KERNEL);
1056 	if (!driver_override)
1057 		return -ENOMEM;
1058 
1059 	cp = strchr(driver_override, '\n');
1060 	if (cp)
1061 		*cp = '\0';
1062 
1063 	device_lock(dev);
1064 	old = pdev->driver_override;
1065 	if (strlen(driver_override)) {
1066 		pdev->driver_override = driver_override;
1067 	} else {
1068 		kfree(driver_override);
1069 		pdev->driver_override = NULL;
1070 	}
1071 	device_unlock(dev);
1072 
1073 	kfree(old);
1074 
1075 	return count;
1076 }
1077 
1078 static ssize_t driver_override_show(struct device *dev,
1079 				    struct device_attribute *attr, char *buf)
1080 {
1081 	struct platform_device *pdev = to_platform_device(dev);
1082 	ssize_t len;
1083 
1084 	device_lock(dev);
1085 	len = sysfs_emit(buf, "%s\n", pdev->driver_override);
1086 	device_unlock(dev);
1087 
1088 	return len;
1089 }
1090 static DEVICE_ATTR_RW(driver_override);
1091 
1092 static ssize_t numa_node_show(struct device *dev,
1093 			      struct device_attribute *attr, char *buf)
1094 {
1095 	return sysfs_emit(buf, "%d\n", dev_to_node(dev));
1096 }
1097 static DEVICE_ATTR_RO(numa_node);
1098 
1099 static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a,
1100 		int n)
1101 {
1102 	struct device *dev = container_of(kobj, typeof(*dev), kobj);
1103 
1104 	if (a == &dev_attr_numa_node.attr &&
1105 			dev_to_node(dev) == NUMA_NO_NODE)
1106 		return 0;
1107 
1108 	return a->mode;
1109 }
1110 
1111 static struct attribute *platform_dev_attrs[] = {
1112 	&dev_attr_modalias.attr,
1113 	&dev_attr_numa_node.attr,
1114 	&dev_attr_driver_override.attr,
1115 	NULL,
1116 };
1117 
1118 static struct attribute_group platform_dev_group = {
1119 	.attrs = platform_dev_attrs,
1120 	.is_visible = platform_dev_attrs_visible,
1121 };
1122 __ATTRIBUTE_GROUPS(platform_dev);
1123 
1124 static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
1125 {
1126 	struct platform_device	*pdev = to_platform_device(dev);
1127 	int rc;
1128 
1129 	/* Some devices have extra OF data and an OF-style MODALIAS */
1130 	rc = of_device_uevent_modalias(dev, env);
1131 	if (rc != -ENODEV)
1132 		return rc;
1133 
1134 	rc = acpi_device_uevent_modalias(dev, env);
1135 	if (rc != -ENODEV)
1136 		return rc;
1137 
1138 	add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
1139 			pdev->name);
1140 	return 0;
1141 }
1142 
1143 static const struct platform_device_id *platform_match_id(
1144 			const struct platform_device_id *id,
1145 			struct platform_device *pdev)
1146 {
1147 	while (id->name[0]) {
1148 		if (strcmp(pdev->name, id->name) == 0) {
1149 			pdev->id_entry = id;
1150 			return id;
1151 		}
1152 		id++;
1153 	}
1154 	return NULL;
1155 }
1156 
1157 /**
1158  * platform_match - bind platform device to platform driver.
1159  * @dev: device.
1160  * @drv: driver.
1161  *
1162  * Platform device IDs are assumed to be encoded like this:
1163  * "<name><instance>", where <name> is a short description of the type of
1164  * device, like "pci" or "floppy", and <instance> is the enumerated
1165  * instance of the device, like '0' or '42'.  Driver IDs are simply
1166  * "<name>".  So, extract the <name> from the platform_device structure,
1167  * and compare it against the name of the driver. Return whether they match
1168  * or not.
1169  */
1170 static int platform_match(struct device *dev, struct device_driver *drv)
1171 {
1172 	struct platform_device *pdev = to_platform_device(dev);
1173 	struct platform_driver *pdrv = to_platform_driver(drv);
1174 
1175 	/* When driver_override is set, only bind to the matching driver */
1176 	if (pdev->driver_override)
1177 		return !strcmp(pdev->driver_override, drv->name);
1178 
1179 	/* Attempt an OF style match first */
1180 	if (of_driver_match_device(dev, drv))
1181 		return 1;
1182 
1183 	/* Then try ACPI style match */
1184 	if (acpi_driver_match_device(dev, drv))
1185 		return 1;
1186 
1187 	/* Then try to match against the id table */
1188 	if (pdrv->id_table)
1189 		return platform_match_id(pdrv->id_table, pdev) != NULL;
1190 
1191 	/* fall-back to driver name match */
1192 	return (strcmp(pdev->name, drv->name) == 0);
1193 }
1194 
1195 #ifdef CONFIG_PM_SLEEP
1196 
1197 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
1198 {
1199 	struct platform_driver *pdrv = to_platform_driver(dev->driver);
1200 	struct platform_device *pdev = to_platform_device(dev);
1201 	int ret = 0;
1202 
1203 	if (dev->driver && pdrv->suspend)
1204 		ret = pdrv->suspend(pdev, mesg);
1205 
1206 	return ret;
1207 }
1208 
1209 static int platform_legacy_resume(struct device *dev)
1210 {
1211 	struct platform_driver *pdrv = to_platform_driver(dev->driver);
1212 	struct platform_device *pdev = to_platform_device(dev);
1213 	int ret = 0;
1214 
1215 	if (dev->driver && pdrv->resume)
1216 		ret = pdrv->resume(pdev);
1217 
1218 	return ret;
1219 }
1220 
1221 #endif /* CONFIG_PM_SLEEP */
1222 
1223 #ifdef CONFIG_SUSPEND
1224 
1225 int platform_pm_suspend(struct device *dev)
1226 {
1227 	struct device_driver *drv = dev->driver;
1228 	int ret = 0;
1229 
1230 	if (!drv)
1231 		return 0;
1232 
1233 	if (drv->pm) {
1234 		if (drv->pm->suspend)
1235 			ret = drv->pm->suspend(dev);
1236 	} else {
1237 		ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
1238 	}
1239 
1240 	return ret;
1241 }
1242 
1243 int platform_pm_resume(struct device *dev)
1244 {
1245 	struct device_driver *drv = dev->driver;
1246 	int ret = 0;
1247 
1248 	if (!drv)
1249 		return 0;
1250 
1251 	if (drv->pm) {
1252 		if (drv->pm->resume)
1253 			ret = drv->pm->resume(dev);
1254 	} else {
1255 		ret = platform_legacy_resume(dev);
1256 	}
1257 
1258 	return ret;
1259 }
1260 
1261 #endif /* CONFIG_SUSPEND */
1262 
1263 #ifdef CONFIG_HIBERNATE_CALLBACKS
1264 
1265 int platform_pm_freeze(struct device *dev)
1266 {
1267 	struct device_driver *drv = dev->driver;
1268 	int ret = 0;
1269 
1270 	if (!drv)
1271 		return 0;
1272 
1273 	if (drv->pm) {
1274 		if (drv->pm->freeze)
1275 			ret = drv->pm->freeze(dev);
1276 	} else {
1277 		ret = platform_legacy_suspend(dev, PMSG_FREEZE);
1278 	}
1279 
1280 	return ret;
1281 }
1282 
1283 int platform_pm_thaw(struct device *dev)
1284 {
1285 	struct device_driver *drv = dev->driver;
1286 	int ret = 0;
1287 
1288 	if (!drv)
1289 		return 0;
1290 
1291 	if (drv->pm) {
1292 		if (drv->pm->thaw)
1293 			ret = drv->pm->thaw(dev);
1294 	} else {
1295 		ret = platform_legacy_resume(dev);
1296 	}
1297 
1298 	return ret;
1299 }
1300 
1301 int platform_pm_poweroff(struct device *dev)
1302 {
1303 	struct device_driver *drv = dev->driver;
1304 	int ret = 0;
1305 
1306 	if (!drv)
1307 		return 0;
1308 
1309 	if (drv->pm) {
1310 		if (drv->pm->poweroff)
1311 			ret = drv->pm->poweroff(dev);
1312 	} else {
1313 		ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
1314 	}
1315 
1316 	return ret;
1317 }
1318 
1319 int platform_pm_restore(struct device *dev)
1320 {
1321 	struct device_driver *drv = dev->driver;
1322 	int ret = 0;
1323 
1324 	if (!drv)
1325 		return 0;
1326 
1327 	if (drv->pm) {
1328 		if (drv->pm->restore)
1329 			ret = drv->pm->restore(dev);
1330 	} else {
1331 		ret = platform_legacy_resume(dev);
1332 	}
1333 
1334 	return ret;
1335 }
1336 
1337 #endif /* CONFIG_HIBERNATE_CALLBACKS */
1338 
1339 int platform_dma_configure(struct device *dev)
1340 {
1341 	enum dev_dma_attr attr;
1342 	int ret = 0;
1343 
1344 	if (dev->of_node) {
1345 		ret = of_dma_configure(dev, dev->of_node, true);
1346 	} else if (has_acpi_companion(dev)) {
1347 		attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
1348 		ret = acpi_dma_configure(dev, attr);
1349 	}
1350 
1351 	return ret;
1352 }
1353 
1354 static const struct dev_pm_ops platform_dev_pm_ops = {
1355 	.runtime_suspend = pm_generic_runtime_suspend,
1356 	.runtime_resume = pm_generic_runtime_resume,
1357 	USE_PLATFORM_PM_SLEEP_OPS
1358 };
1359 
1360 struct bus_type platform_bus_type = {
1361 	.name		= "platform",
1362 	.dev_groups	= platform_dev_groups,
1363 	.match		= platform_match,
1364 	.uevent		= platform_uevent,
1365 	.dma_configure	= platform_dma_configure,
1366 	.pm		= &platform_dev_pm_ops,
1367 };
1368 EXPORT_SYMBOL_GPL(platform_bus_type);
1369 
1370 static inline int __platform_match(struct device *dev, const void *drv)
1371 {
1372 	return platform_match(dev, (struct device_driver *)drv);
1373 }
1374 
1375 /**
1376  * platform_find_device_by_driver - Find a platform device with a given
1377  * driver.
1378  * @start: The device to start the search from.
1379  * @drv: The device driver to look for.
1380  */
1381 struct device *platform_find_device_by_driver(struct device *start,
1382 					      const struct device_driver *drv)
1383 {
1384 	return bus_find_device(&platform_bus_type, start, drv,
1385 			       __platform_match);
1386 }
1387 EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
1388 
1389 void __weak __init early_platform_cleanup(void) { }
1390 
1391 int __init platform_bus_init(void)
1392 {
1393 	int error;
1394 
1395 	early_platform_cleanup();
1396 
1397 	error = device_register(&platform_bus);
1398 	if (error) {
1399 		put_device(&platform_bus);
1400 		return error;
1401 	}
1402 	error =  bus_register(&platform_bus_type);
1403 	if (error)
1404 		device_unregister(&platform_bus);
1405 	of_platform_register_reconfig_notifier();
1406 	return error;
1407 }
1408