xref: /openbmc/linux/drivers/base/platform.c (revision e72e8bf1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * platform.c - platform 'pseudo' bus for legacy devices
4  *
5  * Copyright (c) 2002-3 Patrick Mochel
6  * Copyright (c) 2002-3 Open Source Development Labs
7  *
8  * Please see Documentation/driver-api/driver-model/platform.rst for more
9  * information.
10  */
11 
12 #include <linux/string.h>
13 #include <linux/platform_device.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/memblock.h>
20 #include <linux/err.h>
21 #include <linux/slab.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/pm_domain.h>
24 #include <linux/idr.h>
25 #include <linux/acpi.h>
26 #include <linux/clk/clk-conf.h>
27 #include <linux/limits.h>
28 #include <linux/property.h>
29 #include <linux/kmemleak.h>
30 #include <linux/types.h>
31 
32 #include "base.h"
33 #include "power/power.h"
34 
35 /* For automatically allocated device IDs */
36 static DEFINE_IDA(platform_devid_ida);
37 
38 struct device platform_bus = {
39 	.init_name	= "platform",
40 };
41 EXPORT_SYMBOL_GPL(platform_bus);
42 
43 /**
44  * platform_get_resource - get a resource for a device
45  * @dev: platform device
46  * @type: resource type
47  * @num: resource index
48  */
49 struct resource *platform_get_resource(struct platform_device *dev,
50 				       unsigned int type, unsigned int num)
51 {
52 	u32 i;
53 
54 	for (i = 0; i < dev->num_resources; i++) {
55 		struct resource *r = &dev->resource[i];
56 
57 		if (type == resource_type(r) && num-- == 0)
58 			return r;
59 	}
60 	return NULL;
61 }
62 EXPORT_SYMBOL_GPL(platform_get_resource);
63 
64 #ifdef CONFIG_HAS_IOMEM
65 /**
66  * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a
67  *					    platform device and get resource
68  *
69  * @pdev: platform device to use both for memory resource lookup as well as
70  *        resource management
71  * @index: resource index
72  * @res: optional output parameter to store a pointer to the obtained resource.
73  */
74 void __iomem *
75 devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
76 				unsigned int index, struct resource **res)
77 {
78 	struct resource *r;
79 
80 	r = platform_get_resource(pdev, IORESOURCE_MEM, index);
81 	if (res)
82 		*res = r;
83 	return devm_ioremap_resource(&pdev->dev, r);
84 }
85 EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource);
86 
87 /**
88  * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
89  *				    device
90  *
91  * @pdev: platform device to use both for memory resource lookup as well as
92  *        resource management
93  * @index: resource index
94  */
95 void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
96 					     unsigned int index)
97 {
98 	return devm_platform_get_and_ioremap_resource(pdev, index, NULL);
99 }
100 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
101 
102 /**
103  * devm_platform_ioremap_resource_wc - write-combined variant of
104  *                                     devm_platform_ioremap_resource()
105  *
106  * @pdev: platform device to use both for memory resource lookup as well as
107  *        resource management
108  * @index: resource index
109  */
110 void __iomem *devm_platform_ioremap_resource_wc(struct platform_device *pdev,
111 						unsigned int index)
112 {
113 	struct resource *res;
114 
115 	res = platform_get_resource(pdev, IORESOURCE_MEM, index);
116 	return devm_ioremap_resource_wc(&pdev->dev, res);
117 }
118 
119 /**
120  * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for
121  *					   a platform device, retrieve the
122  *					   resource by name
123  *
124  * @pdev: platform device to use both for memory resource lookup as well as
125  *	  resource management
126  * @name: name of the resource
127  */
128 void __iomem *
129 devm_platform_ioremap_resource_byname(struct platform_device *pdev,
130 				      const char *name)
131 {
132 	struct resource *res;
133 
134 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
135 	return devm_ioremap_resource(&pdev->dev, res);
136 }
137 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
138 #endif /* CONFIG_HAS_IOMEM */
139 
140 /**
141  * platform_get_irq_optional - get an optional IRQ for a device
142  * @dev: platform device
143  * @num: IRQ number index
144  *
145  * Gets an IRQ for a platform device. Device drivers should check the return
146  * value for errors so as to not pass a negative integer value to the
147  * request_irq() APIs. This is the same as platform_get_irq(), except that it
148  * does not print an error message if an IRQ can not be obtained.
149  *
150  * Example:
151  *		int irq = platform_get_irq_optional(pdev, 0);
152  *		if (irq < 0)
153  *			return irq;
154  *
155  * Return: IRQ number on success, negative error number on failure.
156  */
157 int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
158 {
159 #ifdef CONFIG_SPARC
160 	/* sparc does not have irqs represented as IORESOURCE_IRQ resources */
161 	if (!dev || num >= dev->archdata.num_irqs)
162 		return -ENXIO;
163 	return dev->archdata.irqs[num];
164 #else
165 	struct resource *r;
166 	int ret;
167 
168 	if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
169 		ret = of_irq_get(dev->dev.of_node, num);
170 		if (ret > 0 || ret == -EPROBE_DEFER)
171 			return ret;
172 	}
173 
174 	r = platform_get_resource(dev, IORESOURCE_IRQ, num);
175 	if (has_acpi_companion(&dev->dev)) {
176 		if (r && r->flags & IORESOURCE_DISABLED) {
177 			ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
178 			if (ret)
179 				return ret;
180 		}
181 	}
182 
183 	/*
184 	 * The resources may pass trigger flags to the irqs that need
185 	 * to be set up. It so happens that the trigger flags for
186 	 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
187 	 * settings.
188 	 */
189 	if (r && r->flags & IORESOURCE_BITS) {
190 		struct irq_data *irqd;
191 
192 		irqd = irq_get_irq_data(r->start);
193 		if (!irqd)
194 			return -ENXIO;
195 		irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
196 	}
197 
198 	if (r)
199 		return r->start;
200 
201 	/*
202 	 * For the index 0 interrupt, allow falling back to GpioInt
203 	 * resources. While a device could have both Interrupt and GpioInt
204 	 * resources, making this fallback ambiguous, in many common cases
205 	 * the device will only expose one IRQ, and this fallback
206 	 * allows a common code path across either kind of resource.
207 	 */
208 	if (num == 0 && has_acpi_companion(&dev->dev)) {
209 		ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
210 		/* Our callers expect -ENXIO for missing IRQs. */
211 		if (ret >= 0 || ret == -EPROBE_DEFER)
212 			return ret;
213 	}
214 
215 	return -ENXIO;
216 #endif
217 }
218 EXPORT_SYMBOL_GPL(platform_get_irq_optional);
219 
220 /**
221  * platform_get_irq - get an IRQ for a device
222  * @dev: platform device
223  * @num: IRQ number index
224  *
225  * Gets an IRQ for a platform device and prints an error message if finding the
226  * IRQ fails. Device drivers should check the return value for errors so as to
227  * not pass a negative integer value to the request_irq() APIs.
228  *
229  * Example:
230  *		int irq = platform_get_irq(pdev, 0);
231  *		if (irq < 0)
232  *			return irq;
233  *
234  * Return: IRQ number on success, negative error number on failure.
235  */
236 int platform_get_irq(struct platform_device *dev, unsigned int num)
237 {
238 	int ret;
239 
240 	ret = platform_get_irq_optional(dev, num);
241 	if (ret < 0 && ret != -EPROBE_DEFER)
242 		dev_err(&dev->dev, "IRQ index %u not found\n", num);
243 
244 	return ret;
245 }
246 EXPORT_SYMBOL_GPL(platform_get_irq);
247 
248 /**
249  * platform_irq_count - Count the number of IRQs a platform device uses
250  * @dev: platform device
251  *
252  * Return: Number of IRQs a platform device uses or EPROBE_DEFER
253  */
254 int platform_irq_count(struct platform_device *dev)
255 {
256 	int ret, nr = 0;
257 
258 	while ((ret = platform_get_irq_optional(dev, nr)) >= 0)
259 		nr++;
260 
261 	if (ret == -EPROBE_DEFER)
262 		return ret;
263 
264 	return nr;
265 }
266 EXPORT_SYMBOL_GPL(platform_irq_count);
267 
268 /**
269  * platform_get_resource_byname - get a resource for a device by name
270  * @dev: platform device
271  * @type: resource type
272  * @name: resource name
273  */
274 struct resource *platform_get_resource_byname(struct platform_device *dev,
275 					      unsigned int type,
276 					      const char *name)
277 {
278 	u32 i;
279 
280 	for (i = 0; i < dev->num_resources; i++) {
281 		struct resource *r = &dev->resource[i];
282 
283 		if (unlikely(!r->name))
284 			continue;
285 
286 		if (type == resource_type(r) && !strcmp(r->name, name))
287 			return r;
288 	}
289 	return NULL;
290 }
291 EXPORT_SYMBOL_GPL(platform_get_resource_byname);
292 
293 static int __platform_get_irq_byname(struct platform_device *dev,
294 				     const char *name)
295 {
296 	struct resource *r;
297 	int ret;
298 
299 	if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
300 		ret = of_irq_get_byname(dev->dev.of_node, name);
301 		if (ret > 0 || ret == -EPROBE_DEFER)
302 			return ret;
303 	}
304 
305 	r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
306 	if (r)
307 		return r->start;
308 
309 	return -ENXIO;
310 }
311 
312 /**
313  * platform_get_irq_byname - get an IRQ for a device by name
314  * @dev: platform device
315  * @name: IRQ name
316  *
317  * Get an IRQ like platform_get_irq(), but then by name rather then by index.
318  *
319  * Return: IRQ number on success, negative error number on failure.
320  */
321 int platform_get_irq_byname(struct platform_device *dev, const char *name)
322 {
323 	int ret;
324 
325 	ret = __platform_get_irq_byname(dev, name);
326 	if (ret < 0 && ret != -EPROBE_DEFER)
327 		dev_err(&dev->dev, "IRQ %s not found\n", name);
328 
329 	return ret;
330 }
331 EXPORT_SYMBOL_GPL(platform_get_irq_byname);
332 
333 /**
334  * platform_get_irq_byname_optional - get an optional IRQ for a device by name
335  * @dev: platform device
336  * @name: IRQ name
337  *
338  * Get an optional IRQ by name like platform_get_irq_byname(). Except that it
339  * does not print an error message if an IRQ can not be obtained.
340  *
341  * Return: IRQ number on success, negative error number on failure.
342  */
343 int platform_get_irq_byname_optional(struct platform_device *dev,
344 				     const char *name)
345 {
346 	return __platform_get_irq_byname(dev, name);
347 }
348 EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
349 
350 /**
351  * platform_add_devices - add a numbers of platform devices
352  * @devs: array of platform devices to add
353  * @num: number of platform devices in array
354  */
355 int platform_add_devices(struct platform_device **devs, int num)
356 {
357 	int i, ret = 0;
358 
359 	for (i = 0; i < num; i++) {
360 		ret = platform_device_register(devs[i]);
361 		if (ret) {
362 			while (--i >= 0)
363 				platform_device_unregister(devs[i]);
364 			break;
365 		}
366 	}
367 
368 	return ret;
369 }
370 EXPORT_SYMBOL_GPL(platform_add_devices);
371 
372 struct platform_object {
373 	struct platform_device pdev;
374 	char name[];
375 };
376 
377 /*
378  * Set up default DMA mask for platform devices if the they weren't
379  * previously set by the architecture / DT.
380  */
381 static void setup_pdev_dma_masks(struct platform_device *pdev)
382 {
383 	if (!pdev->dev.coherent_dma_mask)
384 		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
385 	if (!pdev->dev.dma_mask) {
386 		pdev->platform_dma_mask = DMA_BIT_MASK(32);
387 		pdev->dev.dma_mask = &pdev->platform_dma_mask;
388 	}
389 };
390 
391 /**
392  * platform_device_put - destroy a platform device
393  * @pdev: platform device to free
394  *
395  * Free all memory associated with a platform device.  This function must
396  * _only_ be externally called in error cases.  All other usage is a bug.
397  */
398 void platform_device_put(struct platform_device *pdev)
399 {
400 	if (!IS_ERR_OR_NULL(pdev))
401 		put_device(&pdev->dev);
402 }
403 EXPORT_SYMBOL_GPL(platform_device_put);
404 
405 static void platform_device_release(struct device *dev)
406 {
407 	struct platform_object *pa = container_of(dev, struct platform_object,
408 						  pdev.dev);
409 
410 	of_device_node_put(&pa->pdev.dev);
411 	kfree(pa->pdev.dev.platform_data);
412 	kfree(pa->pdev.mfd_cell);
413 	kfree(pa->pdev.resource);
414 	kfree(pa->pdev.driver_override);
415 	kfree(pa);
416 }
417 
418 /**
419  * platform_device_alloc - create a platform device
420  * @name: base name of the device we're adding
421  * @id: instance id
422  *
423  * Create a platform device object which can have other objects attached
424  * to it, and which will have attached objects freed when it is released.
425  */
426 struct platform_device *platform_device_alloc(const char *name, int id)
427 {
428 	struct platform_object *pa;
429 
430 	pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
431 	if (pa) {
432 		strcpy(pa->name, name);
433 		pa->pdev.name = pa->name;
434 		pa->pdev.id = id;
435 		device_initialize(&pa->pdev.dev);
436 		pa->pdev.dev.release = platform_device_release;
437 		setup_pdev_dma_masks(&pa->pdev);
438 	}
439 
440 	return pa ? &pa->pdev : NULL;
441 }
442 EXPORT_SYMBOL_GPL(platform_device_alloc);
443 
444 /**
445  * platform_device_add_resources - add resources to a platform device
446  * @pdev: platform device allocated by platform_device_alloc to add resources to
447  * @res: set of resources that needs to be allocated for the device
448  * @num: number of resources
449  *
450  * Add a copy of the resources to the platform device.  The memory
451  * associated with the resources will be freed when the platform device is
452  * released.
453  */
454 int platform_device_add_resources(struct platform_device *pdev,
455 				  const struct resource *res, unsigned int num)
456 {
457 	struct resource *r = NULL;
458 
459 	if (res) {
460 		r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
461 		if (!r)
462 			return -ENOMEM;
463 	}
464 
465 	kfree(pdev->resource);
466 	pdev->resource = r;
467 	pdev->num_resources = num;
468 	return 0;
469 }
470 EXPORT_SYMBOL_GPL(platform_device_add_resources);
471 
472 /**
473  * platform_device_add_data - add platform-specific data to a platform device
474  * @pdev: platform device allocated by platform_device_alloc to add resources to
475  * @data: platform specific data for this platform device
476  * @size: size of platform specific data
477  *
478  * Add a copy of platform specific data to the platform device's
479  * platform_data pointer.  The memory associated with the platform data
480  * will be freed when the platform device is released.
481  */
482 int platform_device_add_data(struct platform_device *pdev, const void *data,
483 			     size_t size)
484 {
485 	void *d = NULL;
486 
487 	if (data) {
488 		d = kmemdup(data, size, GFP_KERNEL);
489 		if (!d)
490 			return -ENOMEM;
491 	}
492 
493 	kfree(pdev->dev.platform_data);
494 	pdev->dev.platform_data = d;
495 	return 0;
496 }
497 EXPORT_SYMBOL_GPL(platform_device_add_data);
498 
499 /**
500  * platform_device_add_properties - add built-in properties to a platform device
501  * @pdev: platform device to add properties to
502  * @properties: null terminated array of properties to add
503  *
504  * The function will take deep copy of @properties and attach the copy to the
505  * platform device. The memory associated with properties will be freed when the
506  * platform device is released.
507  */
508 int platform_device_add_properties(struct platform_device *pdev,
509 				   const struct property_entry *properties)
510 {
511 	return device_add_properties(&pdev->dev, properties);
512 }
513 EXPORT_SYMBOL_GPL(platform_device_add_properties);
514 
515 /**
516  * platform_device_add - add a platform device to device hierarchy
517  * @pdev: platform device we're adding
518  *
519  * This is part 2 of platform_device_register(), though may be called
520  * separately _iff_ pdev was allocated by platform_device_alloc().
521  */
522 int platform_device_add(struct platform_device *pdev)
523 {
524 	u32 i;
525 	int ret;
526 
527 	if (!pdev)
528 		return -EINVAL;
529 
530 	if (!pdev->dev.parent)
531 		pdev->dev.parent = &platform_bus;
532 
533 	pdev->dev.bus = &platform_bus_type;
534 
535 	switch (pdev->id) {
536 	default:
537 		dev_set_name(&pdev->dev, "%s.%d", pdev->name,  pdev->id);
538 		break;
539 	case PLATFORM_DEVID_NONE:
540 		dev_set_name(&pdev->dev, "%s", pdev->name);
541 		break;
542 	case PLATFORM_DEVID_AUTO:
543 		/*
544 		 * Automatically allocated device ID. We mark it as such so
545 		 * that we remember it must be freed, and we append a suffix
546 		 * to avoid namespace collision with explicit IDs.
547 		 */
548 		ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL);
549 		if (ret < 0)
550 			goto err_out;
551 		pdev->id = ret;
552 		pdev->id_auto = true;
553 		dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
554 		break;
555 	}
556 
557 	for (i = 0; i < pdev->num_resources; i++) {
558 		struct resource *p, *r = &pdev->resource[i];
559 
560 		if (r->name == NULL)
561 			r->name = dev_name(&pdev->dev);
562 
563 		p = r->parent;
564 		if (!p) {
565 			if (resource_type(r) == IORESOURCE_MEM)
566 				p = &iomem_resource;
567 			else if (resource_type(r) == IORESOURCE_IO)
568 				p = &ioport_resource;
569 		}
570 
571 		if (p) {
572 			ret = insert_resource(p, r);
573 			if (ret) {
574 				dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
575 				goto failed;
576 			}
577 		}
578 	}
579 
580 	pr_debug("Registering platform device '%s'. Parent at %s\n",
581 		 dev_name(&pdev->dev), dev_name(pdev->dev.parent));
582 
583 	ret = device_add(&pdev->dev);
584 	if (ret == 0)
585 		return ret;
586 
587  failed:
588 	if (pdev->id_auto) {
589 		ida_simple_remove(&platform_devid_ida, pdev->id);
590 		pdev->id = PLATFORM_DEVID_AUTO;
591 	}
592 
593 	while (i--) {
594 		struct resource *r = &pdev->resource[i];
595 		if (r->parent)
596 			release_resource(r);
597 	}
598 
599  err_out:
600 	return ret;
601 }
602 EXPORT_SYMBOL_GPL(platform_device_add);
603 
604 /**
605  * platform_device_del - remove a platform-level device
606  * @pdev: platform device we're removing
607  *
608  * Note that this function will also release all memory- and port-based
609  * resources owned by the device (@dev->resource).  This function must
610  * _only_ be externally called in error cases.  All other usage is a bug.
611  */
612 void platform_device_del(struct platform_device *pdev)
613 {
614 	u32 i;
615 
616 	if (!IS_ERR_OR_NULL(pdev)) {
617 		device_del(&pdev->dev);
618 
619 		if (pdev->id_auto) {
620 			ida_simple_remove(&platform_devid_ida, pdev->id);
621 			pdev->id = PLATFORM_DEVID_AUTO;
622 		}
623 
624 		for (i = 0; i < pdev->num_resources; i++) {
625 			struct resource *r = &pdev->resource[i];
626 			if (r->parent)
627 				release_resource(r);
628 		}
629 	}
630 }
631 EXPORT_SYMBOL_GPL(platform_device_del);
632 
633 /**
634  * platform_device_register - add a platform-level device
635  * @pdev: platform device we're adding
636  */
637 int platform_device_register(struct platform_device *pdev)
638 {
639 	device_initialize(&pdev->dev);
640 	setup_pdev_dma_masks(pdev);
641 	return platform_device_add(pdev);
642 }
643 EXPORT_SYMBOL_GPL(platform_device_register);
644 
645 /**
646  * platform_device_unregister - unregister a platform-level device
647  * @pdev: platform device we're unregistering
648  *
649  * Unregistration is done in 2 steps. First we release all resources
650  * and remove it from the subsystem, then we drop reference count by
651  * calling platform_device_put().
652  */
653 void platform_device_unregister(struct platform_device *pdev)
654 {
655 	platform_device_del(pdev);
656 	platform_device_put(pdev);
657 }
658 EXPORT_SYMBOL_GPL(platform_device_unregister);
659 
660 /**
661  * platform_device_register_full - add a platform-level device with
662  * resources and platform-specific data
663  *
664  * @pdevinfo: data used to create device
665  *
666  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
667  */
668 struct platform_device *platform_device_register_full(
669 		const struct platform_device_info *pdevinfo)
670 {
671 	int ret = -ENOMEM;
672 	struct platform_device *pdev;
673 
674 	pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
675 	if (!pdev)
676 		return ERR_PTR(-ENOMEM);
677 
678 	pdev->dev.parent = pdevinfo->parent;
679 	pdev->dev.fwnode = pdevinfo->fwnode;
680 	pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode));
681 	pdev->dev.of_node_reused = pdevinfo->of_node_reused;
682 
683 	if (pdevinfo->dma_mask) {
684 		pdev->platform_dma_mask = pdevinfo->dma_mask;
685 		pdev->dev.dma_mask = &pdev->platform_dma_mask;
686 		pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
687 	}
688 
689 	ret = platform_device_add_resources(pdev,
690 			pdevinfo->res, pdevinfo->num_res);
691 	if (ret)
692 		goto err;
693 
694 	ret = platform_device_add_data(pdev,
695 			pdevinfo->data, pdevinfo->size_data);
696 	if (ret)
697 		goto err;
698 
699 	if (pdevinfo->properties) {
700 		ret = platform_device_add_properties(pdev,
701 						     pdevinfo->properties);
702 		if (ret)
703 			goto err;
704 	}
705 
706 	ret = platform_device_add(pdev);
707 	if (ret) {
708 err:
709 		ACPI_COMPANION_SET(&pdev->dev, NULL);
710 		platform_device_put(pdev);
711 		return ERR_PTR(ret);
712 	}
713 
714 	return pdev;
715 }
716 EXPORT_SYMBOL_GPL(platform_device_register_full);
717 
718 static int platform_drv_probe(struct device *_dev)
719 {
720 	struct platform_driver *drv = to_platform_driver(_dev->driver);
721 	struct platform_device *dev = to_platform_device(_dev);
722 	int ret;
723 
724 	ret = of_clk_set_defaults(_dev->of_node, false);
725 	if (ret < 0)
726 		return ret;
727 
728 	ret = dev_pm_domain_attach(_dev, true);
729 	if (ret)
730 		goto out;
731 
732 	if (drv->probe) {
733 		ret = drv->probe(dev);
734 		if (ret)
735 			dev_pm_domain_detach(_dev, true);
736 	}
737 
738 out:
739 	if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
740 		dev_warn(_dev, "probe deferral not supported\n");
741 		ret = -ENXIO;
742 	}
743 
744 	return ret;
745 }
746 
747 static int platform_drv_probe_fail(struct device *_dev)
748 {
749 	return -ENXIO;
750 }
751 
752 static int platform_drv_remove(struct device *_dev)
753 {
754 	struct platform_driver *drv = to_platform_driver(_dev->driver);
755 	struct platform_device *dev = to_platform_device(_dev);
756 	int ret = 0;
757 
758 	if (drv->remove)
759 		ret = drv->remove(dev);
760 	dev_pm_domain_detach(_dev, true);
761 
762 	return ret;
763 }
764 
765 static void platform_drv_shutdown(struct device *_dev)
766 {
767 	struct platform_driver *drv = to_platform_driver(_dev->driver);
768 	struct platform_device *dev = to_platform_device(_dev);
769 
770 	if (drv->shutdown)
771 		drv->shutdown(dev);
772 }
773 
774 /**
775  * __platform_driver_register - register a driver for platform-level devices
776  * @drv: platform driver structure
777  * @owner: owning module/driver
778  */
779 int __platform_driver_register(struct platform_driver *drv,
780 				struct module *owner)
781 {
782 	drv->driver.owner = owner;
783 	drv->driver.bus = &platform_bus_type;
784 	drv->driver.probe = platform_drv_probe;
785 	drv->driver.remove = platform_drv_remove;
786 	drv->driver.shutdown = platform_drv_shutdown;
787 
788 	return driver_register(&drv->driver);
789 }
790 EXPORT_SYMBOL_GPL(__platform_driver_register);
791 
792 /**
793  * platform_driver_unregister - unregister a driver for platform-level devices
794  * @drv: platform driver structure
795  */
796 void platform_driver_unregister(struct platform_driver *drv)
797 {
798 	driver_unregister(&drv->driver);
799 }
800 EXPORT_SYMBOL_GPL(platform_driver_unregister);
801 
802 /**
803  * __platform_driver_probe - register driver for non-hotpluggable device
804  * @drv: platform driver structure
805  * @probe: the driver probe routine, probably from an __init section
806  * @module: module which will be the owner of the driver
807  *
808  * Use this instead of platform_driver_register() when you know the device
809  * is not hotpluggable and has already been registered, and you want to
810  * remove its run-once probe() infrastructure from memory after the driver
811  * has bound to the device.
812  *
813  * One typical use for this would be with drivers for controllers integrated
814  * into system-on-chip processors, where the controller devices have been
815  * configured as part of board setup.
816  *
817  * Note that this is incompatible with deferred probing.
818  *
819  * Returns zero if the driver registered and bound to a device, else returns
820  * a negative error code and with the driver not registered.
821  */
822 int __init_or_module __platform_driver_probe(struct platform_driver *drv,
823 		int (*probe)(struct platform_device *), struct module *module)
824 {
825 	int retval, code;
826 
827 	if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
828 		pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
829 			 drv->driver.name, __func__);
830 		return -EINVAL;
831 	}
832 
833 	/*
834 	 * We have to run our probes synchronously because we check if
835 	 * we find any devices to bind to and exit with error if there
836 	 * are any.
837 	 */
838 	drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
839 
840 	/*
841 	 * Prevent driver from requesting probe deferral to avoid further
842 	 * futile probe attempts.
843 	 */
844 	drv->prevent_deferred_probe = true;
845 
846 	/* make sure driver won't have bind/unbind attributes */
847 	drv->driver.suppress_bind_attrs = true;
848 
849 	/* temporary section violation during probe() */
850 	drv->probe = probe;
851 	retval = code = __platform_driver_register(drv, module);
852 
853 	/*
854 	 * Fixup that section violation, being paranoid about code scanning
855 	 * the list of drivers in order to probe new devices.  Check to see
856 	 * if the probe was successful, and make sure any forced probes of
857 	 * new devices fail.
858 	 */
859 	spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
860 	drv->probe = NULL;
861 	if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
862 		retval = -ENODEV;
863 	drv->driver.probe = platform_drv_probe_fail;
864 	spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
865 
866 	if (code != retval)
867 		platform_driver_unregister(drv);
868 	return retval;
869 }
870 EXPORT_SYMBOL_GPL(__platform_driver_probe);
871 
872 /**
873  * __platform_create_bundle - register driver and create corresponding device
874  * @driver: platform driver structure
875  * @probe: the driver probe routine, probably from an __init section
876  * @res: set of resources that needs to be allocated for the device
877  * @n_res: number of resources
878  * @data: platform specific data for this platform device
879  * @size: size of platform specific data
880  * @module: module which will be the owner of the driver
881  *
882  * Use this in legacy-style modules that probe hardware directly and
883  * register a single platform device and corresponding platform driver.
884  *
885  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
886  */
887 struct platform_device * __init_or_module __platform_create_bundle(
888 			struct platform_driver *driver,
889 			int (*probe)(struct platform_device *),
890 			struct resource *res, unsigned int n_res,
891 			const void *data, size_t size, struct module *module)
892 {
893 	struct platform_device *pdev;
894 	int error;
895 
896 	pdev = platform_device_alloc(driver->driver.name, -1);
897 	if (!pdev) {
898 		error = -ENOMEM;
899 		goto err_out;
900 	}
901 
902 	error = platform_device_add_resources(pdev, res, n_res);
903 	if (error)
904 		goto err_pdev_put;
905 
906 	error = platform_device_add_data(pdev, data, size);
907 	if (error)
908 		goto err_pdev_put;
909 
910 	error = platform_device_add(pdev);
911 	if (error)
912 		goto err_pdev_put;
913 
914 	error = __platform_driver_probe(driver, probe, module);
915 	if (error)
916 		goto err_pdev_del;
917 
918 	return pdev;
919 
920 err_pdev_del:
921 	platform_device_del(pdev);
922 err_pdev_put:
923 	platform_device_put(pdev);
924 err_out:
925 	return ERR_PTR(error);
926 }
927 EXPORT_SYMBOL_GPL(__platform_create_bundle);
928 
929 /**
930  * __platform_register_drivers - register an array of platform drivers
931  * @drivers: an array of drivers to register
932  * @count: the number of drivers to register
933  * @owner: module owning the drivers
934  *
935  * Registers platform drivers specified by an array. On failure to register a
936  * driver, all previously registered drivers will be unregistered. Callers of
937  * this API should use platform_unregister_drivers() to unregister drivers in
938  * the reverse order.
939  *
940  * Returns: 0 on success or a negative error code on failure.
941  */
942 int __platform_register_drivers(struct platform_driver * const *drivers,
943 				unsigned int count, struct module *owner)
944 {
945 	unsigned int i;
946 	int err;
947 
948 	for (i = 0; i < count; i++) {
949 		pr_debug("registering platform driver %ps\n", drivers[i]);
950 
951 		err = __platform_driver_register(drivers[i], owner);
952 		if (err < 0) {
953 			pr_err("failed to register platform driver %ps: %d\n",
954 			       drivers[i], err);
955 			goto error;
956 		}
957 	}
958 
959 	return 0;
960 
961 error:
962 	while (i--) {
963 		pr_debug("unregistering platform driver %ps\n", drivers[i]);
964 		platform_driver_unregister(drivers[i]);
965 	}
966 
967 	return err;
968 }
969 EXPORT_SYMBOL_GPL(__platform_register_drivers);
970 
971 /**
972  * platform_unregister_drivers - unregister an array of platform drivers
973  * @drivers: an array of drivers to unregister
974  * @count: the number of drivers to unregister
975  *
976  * Unegisters platform drivers specified by an array. This is typically used
977  * to complement an earlier call to platform_register_drivers(). Drivers are
978  * unregistered in the reverse order in which they were registered.
979  */
980 void platform_unregister_drivers(struct platform_driver * const *drivers,
981 				 unsigned int count)
982 {
983 	while (count--) {
984 		pr_debug("unregistering platform driver %ps\n", drivers[count]);
985 		platform_driver_unregister(drivers[count]);
986 	}
987 }
988 EXPORT_SYMBOL_GPL(platform_unregister_drivers);
989 
990 /* modalias support enables more hands-off userspace setup:
991  * (a) environment variable lets new-style hotplug events work once system is
992  *     fully running:  "modprobe $MODALIAS"
993  * (b) sysfs attribute lets new-style coldplug recover from hotplug events
994  *     mishandled before system is fully running:  "modprobe $(cat modalias)"
995  */
996 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
997 			     char *buf)
998 {
999 	struct platform_device	*pdev = to_platform_device(dev);
1000 	int len;
1001 
1002 	len = of_device_modalias(dev, buf, PAGE_SIZE);
1003 	if (len != -ENODEV)
1004 		return len;
1005 
1006 	len = acpi_device_modalias(dev, buf, PAGE_SIZE -1);
1007 	if (len != -ENODEV)
1008 		return len;
1009 
1010 	len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
1011 
1012 	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
1013 }
1014 static DEVICE_ATTR_RO(modalias);
1015 
1016 static ssize_t driver_override_store(struct device *dev,
1017 				     struct device_attribute *attr,
1018 				     const char *buf, size_t count)
1019 {
1020 	struct platform_device *pdev = to_platform_device(dev);
1021 	char *driver_override, *old, *cp;
1022 
1023 	/* We need to keep extra room for a newline */
1024 	if (count >= (PAGE_SIZE - 1))
1025 		return -EINVAL;
1026 
1027 	driver_override = kstrndup(buf, count, GFP_KERNEL);
1028 	if (!driver_override)
1029 		return -ENOMEM;
1030 
1031 	cp = strchr(driver_override, '\n');
1032 	if (cp)
1033 		*cp = '\0';
1034 
1035 	device_lock(dev);
1036 	old = pdev->driver_override;
1037 	if (strlen(driver_override)) {
1038 		pdev->driver_override = driver_override;
1039 	} else {
1040 		kfree(driver_override);
1041 		pdev->driver_override = NULL;
1042 	}
1043 	device_unlock(dev);
1044 
1045 	kfree(old);
1046 
1047 	return count;
1048 }
1049 
1050 static ssize_t driver_override_show(struct device *dev,
1051 				    struct device_attribute *attr, char *buf)
1052 {
1053 	struct platform_device *pdev = to_platform_device(dev);
1054 	ssize_t len;
1055 
1056 	device_lock(dev);
1057 	len = sprintf(buf, "%s\n", pdev->driver_override);
1058 	device_unlock(dev);
1059 	return len;
1060 }
1061 static DEVICE_ATTR_RW(driver_override);
1062 
1063 
1064 static struct attribute *platform_dev_attrs[] = {
1065 	&dev_attr_modalias.attr,
1066 	&dev_attr_driver_override.attr,
1067 	NULL,
1068 };
1069 ATTRIBUTE_GROUPS(platform_dev);
1070 
1071 static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
1072 {
1073 	struct platform_device	*pdev = to_platform_device(dev);
1074 	int rc;
1075 
1076 	/* Some devices have extra OF data and an OF-style MODALIAS */
1077 	rc = of_device_uevent_modalias(dev, env);
1078 	if (rc != -ENODEV)
1079 		return rc;
1080 
1081 	rc = acpi_device_uevent_modalias(dev, env);
1082 	if (rc != -ENODEV)
1083 		return rc;
1084 
1085 	add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
1086 			pdev->name);
1087 	return 0;
1088 }
1089 
1090 static const struct platform_device_id *platform_match_id(
1091 			const struct platform_device_id *id,
1092 			struct platform_device *pdev)
1093 {
1094 	while (id->name[0]) {
1095 		if (strcmp(pdev->name, id->name) == 0) {
1096 			pdev->id_entry = id;
1097 			return id;
1098 		}
1099 		id++;
1100 	}
1101 	return NULL;
1102 }
1103 
1104 /**
1105  * platform_match - bind platform device to platform driver.
1106  * @dev: device.
1107  * @drv: driver.
1108  *
1109  * Platform device IDs are assumed to be encoded like this:
1110  * "<name><instance>", where <name> is a short description of the type of
1111  * device, like "pci" or "floppy", and <instance> is the enumerated
1112  * instance of the device, like '0' or '42'.  Driver IDs are simply
1113  * "<name>".  So, extract the <name> from the platform_device structure,
1114  * and compare it against the name of the driver. Return whether they match
1115  * or not.
1116  */
1117 static int platform_match(struct device *dev, struct device_driver *drv)
1118 {
1119 	struct platform_device *pdev = to_platform_device(dev);
1120 	struct platform_driver *pdrv = to_platform_driver(drv);
1121 
1122 	/* When driver_override is set, only bind to the matching driver */
1123 	if (pdev->driver_override)
1124 		return !strcmp(pdev->driver_override, drv->name);
1125 
1126 	/* Attempt an OF style match first */
1127 	if (of_driver_match_device(dev, drv))
1128 		return 1;
1129 
1130 	/* Then try ACPI style match */
1131 	if (acpi_driver_match_device(dev, drv))
1132 		return 1;
1133 
1134 	/* Then try to match against the id table */
1135 	if (pdrv->id_table)
1136 		return platform_match_id(pdrv->id_table, pdev) != NULL;
1137 
1138 	/* fall-back to driver name match */
1139 	return (strcmp(pdev->name, drv->name) == 0);
1140 }
1141 
1142 #ifdef CONFIG_PM_SLEEP
1143 
1144 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
1145 {
1146 	struct platform_driver *pdrv = to_platform_driver(dev->driver);
1147 	struct platform_device *pdev = to_platform_device(dev);
1148 	int ret = 0;
1149 
1150 	if (dev->driver && pdrv->suspend)
1151 		ret = pdrv->suspend(pdev, mesg);
1152 
1153 	return ret;
1154 }
1155 
1156 static int platform_legacy_resume(struct device *dev)
1157 {
1158 	struct platform_driver *pdrv = to_platform_driver(dev->driver);
1159 	struct platform_device *pdev = to_platform_device(dev);
1160 	int ret = 0;
1161 
1162 	if (dev->driver && pdrv->resume)
1163 		ret = pdrv->resume(pdev);
1164 
1165 	return ret;
1166 }
1167 
1168 #endif /* CONFIG_PM_SLEEP */
1169 
1170 #ifdef CONFIG_SUSPEND
1171 
1172 int platform_pm_suspend(struct device *dev)
1173 {
1174 	struct device_driver *drv = dev->driver;
1175 	int ret = 0;
1176 
1177 	if (!drv)
1178 		return 0;
1179 
1180 	if (drv->pm) {
1181 		if (drv->pm->suspend)
1182 			ret = drv->pm->suspend(dev);
1183 	} else {
1184 		ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
1185 	}
1186 
1187 	return ret;
1188 }
1189 
1190 int platform_pm_resume(struct device *dev)
1191 {
1192 	struct device_driver *drv = dev->driver;
1193 	int ret = 0;
1194 
1195 	if (!drv)
1196 		return 0;
1197 
1198 	if (drv->pm) {
1199 		if (drv->pm->resume)
1200 			ret = drv->pm->resume(dev);
1201 	} else {
1202 		ret = platform_legacy_resume(dev);
1203 	}
1204 
1205 	return ret;
1206 }
1207 
1208 #endif /* CONFIG_SUSPEND */
1209 
1210 #ifdef CONFIG_HIBERNATE_CALLBACKS
1211 
1212 int platform_pm_freeze(struct device *dev)
1213 {
1214 	struct device_driver *drv = dev->driver;
1215 	int ret = 0;
1216 
1217 	if (!drv)
1218 		return 0;
1219 
1220 	if (drv->pm) {
1221 		if (drv->pm->freeze)
1222 			ret = drv->pm->freeze(dev);
1223 	} else {
1224 		ret = platform_legacy_suspend(dev, PMSG_FREEZE);
1225 	}
1226 
1227 	return ret;
1228 }
1229 
1230 int platform_pm_thaw(struct device *dev)
1231 {
1232 	struct device_driver *drv = dev->driver;
1233 	int ret = 0;
1234 
1235 	if (!drv)
1236 		return 0;
1237 
1238 	if (drv->pm) {
1239 		if (drv->pm->thaw)
1240 			ret = drv->pm->thaw(dev);
1241 	} else {
1242 		ret = platform_legacy_resume(dev);
1243 	}
1244 
1245 	return ret;
1246 }
1247 
1248 int platform_pm_poweroff(struct device *dev)
1249 {
1250 	struct device_driver *drv = dev->driver;
1251 	int ret = 0;
1252 
1253 	if (!drv)
1254 		return 0;
1255 
1256 	if (drv->pm) {
1257 		if (drv->pm->poweroff)
1258 			ret = drv->pm->poweroff(dev);
1259 	} else {
1260 		ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
1261 	}
1262 
1263 	return ret;
1264 }
1265 
1266 int platform_pm_restore(struct device *dev)
1267 {
1268 	struct device_driver *drv = dev->driver;
1269 	int ret = 0;
1270 
1271 	if (!drv)
1272 		return 0;
1273 
1274 	if (drv->pm) {
1275 		if (drv->pm->restore)
1276 			ret = drv->pm->restore(dev);
1277 	} else {
1278 		ret = platform_legacy_resume(dev);
1279 	}
1280 
1281 	return ret;
1282 }
1283 
1284 #endif /* CONFIG_HIBERNATE_CALLBACKS */
1285 
1286 int platform_dma_configure(struct device *dev)
1287 {
1288 	enum dev_dma_attr attr;
1289 	int ret = 0;
1290 
1291 	if (dev->of_node) {
1292 		ret = of_dma_configure(dev, dev->of_node, true);
1293 	} else if (has_acpi_companion(dev)) {
1294 		attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
1295 		ret = acpi_dma_configure(dev, attr);
1296 	}
1297 
1298 	return ret;
1299 }
1300 
1301 static const struct dev_pm_ops platform_dev_pm_ops = {
1302 	.runtime_suspend = pm_generic_runtime_suspend,
1303 	.runtime_resume = pm_generic_runtime_resume,
1304 	USE_PLATFORM_PM_SLEEP_OPS
1305 };
1306 
1307 struct bus_type platform_bus_type = {
1308 	.name		= "platform",
1309 	.dev_groups	= platform_dev_groups,
1310 	.match		= platform_match,
1311 	.uevent		= platform_uevent,
1312 	.dma_configure	= platform_dma_configure,
1313 	.pm		= &platform_dev_pm_ops,
1314 };
1315 EXPORT_SYMBOL_GPL(platform_bus_type);
1316 
1317 static inline int __platform_match(struct device *dev, const void *drv)
1318 {
1319 	return platform_match(dev, (struct device_driver *)drv);
1320 }
1321 
1322 /**
1323  * platform_find_device_by_driver - Find a platform device with a given
1324  * driver.
1325  * @start: The device to start the search from.
1326  * @drv: The device driver to look for.
1327  */
1328 struct device *platform_find_device_by_driver(struct device *start,
1329 					      const struct device_driver *drv)
1330 {
1331 	return bus_find_device(&platform_bus_type, start, drv,
1332 			       __platform_match);
1333 }
1334 EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
1335 
1336 void __weak __init early_platform_cleanup(void) { }
1337 
1338 int __init platform_bus_init(void)
1339 {
1340 	int error;
1341 
1342 	early_platform_cleanup();
1343 
1344 	error = device_register(&platform_bus);
1345 	if (error) {
1346 		put_device(&platform_bus);
1347 		return error;
1348 	}
1349 	error =  bus_register(&platform_bus_type);
1350 	if (error)
1351 		device_unregister(&platform_bus);
1352 	of_platform_register_reconfig_notifier();
1353 	return error;
1354 }
1355