xref: /openbmc/linux/drivers/base/platform.c (revision 55fd7e02)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * platform.c - platform 'pseudo' bus for legacy devices
4  *
5  * Copyright (c) 2002-3 Patrick Mochel
6  * Copyright (c) 2002-3 Open Source Development Labs
7  *
8  * Please see Documentation/driver-api/driver-model/platform.rst for more
9  * information.
10  */
11 
12 #include <linux/string.h>
13 #include <linux/platform_device.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/memblock.h>
20 #include <linux/err.h>
21 #include <linux/slab.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/pm_domain.h>
24 #include <linux/idr.h>
25 #include <linux/acpi.h>
26 #include <linux/clk/clk-conf.h>
27 #include <linux/limits.h>
28 #include <linux/property.h>
29 #include <linux/kmemleak.h>
30 #include <linux/types.h>
31 
32 #include "base.h"
33 #include "power/power.h"
34 
35 /* For automatically allocated device IDs */
36 static DEFINE_IDA(platform_devid_ida);
37 
38 struct device platform_bus = {
39 	.init_name	= "platform",
40 };
41 EXPORT_SYMBOL_GPL(platform_bus);
42 
43 /**
44  * platform_get_resource - get a resource for a device
45  * @dev: platform device
46  * @type: resource type
47  * @num: resource index
48  */
49 struct resource *platform_get_resource(struct platform_device *dev,
50 				       unsigned int type, unsigned int num)
51 {
52 	u32 i;
53 
54 	for (i = 0; i < dev->num_resources; i++) {
55 		struct resource *r = &dev->resource[i];
56 
57 		if (type == resource_type(r) && num-- == 0)
58 			return r;
59 	}
60 	return NULL;
61 }
62 EXPORT_SYMBOL_GPL(platform_get_resource);
63 
64 #ifdef CONFIG_HAS_IOMEM
65 /**
66  * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a
67  *					    platform device and get resource
68  *
69  * @pdev: platform device to use both for memory resource lookup as well as
70  *        resource management
71  * @index: resource index
72  * @res: optional output parameter to store a pointer to the obtained resource.
73  */
74 void __iomem *
75 devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
76 				unsigned int index, struct resource **res)
77 {
78 	struct resource *r;
79 
80 	r = platform_get_resource(pdev, IORESOURCE_MEM, index);
81 	if (res)
82 		*res = r;
83 	return devm_ioremap_resource(&pdev->dev, r);
84 }
85 EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource);
86 
87 /**
88  * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
89  *				    device
90  *
91  * @pdev: platform device to use both for memory resource lookup as well as
92  *        resource management
93  * @index: resource index
94  */
95 void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
96 					     unsigned int index)
97 {
98 	return devm_platform_get_and_ioremap_resource(pdev, index, NULL);
99 }
100 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
101 
102 /**
103  * devm_platform_ioremap_resource_wc - write-combined variant of
104  *                                     devm_platform_ioremap_resource()
105  *
106  * @pdev: platform device to use both for memory resource lookup as well as
107  *        resource management
108  * @index: resource index
109  */
110 void __iomem *devm_platform_ioremap_resource_wc(struct platform_device *pdev,
111 						unsigned int index)
112 {
113 	struct resource *res;
114 
115 	res = platform_get_resource(pdev, IORESOURCE_MEM, index);
116 	return devm_ioremap_resource_wc(&pdev->dev, res);
117 }
118 
119 /**
120  * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for
121  *					   a platform device, retrieve the
122  *					   resource by name
123  *
124  * @pdev: platform device to use both for memory resource lookup as well as
125  *	  resource management
126  * @name: name of the resource
127  */
128 void __iomem *
129 devm_platform_ioremap_resource_byname(struct platform_device *pdev,
130 				      const char *name)
131 {
132 	struct resource *res;
133 
134 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
135 	return devm_ioremap_resource(&pdev->dev, res);
136 }
137 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
138 #endif /* CONFIG_HAS_IOMEM */
139 
140 /**
141  * platform_get_irq_optional - get an optional IRQ for a device
142  * @dev: platform device
143  * @num: IRQ number index
144  *
145  * Gets an IRQ for a platform device. Device drivers should check the return
146  * value for errors so as to not pass a negative integer value to the
147  * request_irq() APIs. This is the same as platform_get_irq(), except that it
148  * does not print an error message if an IRQ can not be obtained.
149  *
150  * For example::
151  *
152  *		int irq = platform_get_irq_optional(pdev, 0);
153  *		if (irq < 0)
154  *			return irq;
155  *
156  * Return: non-zero IRQ number on success, negative error number on failure.
157  */
158 int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
159 {
160 	int ret;
161 #ifdef CONFIG_SPARC
162 	/* sparc does not have irqs represented as IORESOURCE_IRQ resources */
163 	if (!dev || num >= dev->archdata.num_irqs)
164 		return -ENXIO;
165 	ret = dev->archdata.irqs[num];
166 	goto out;
167 #else
168 	struct resource *r;
169 
170 	if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
171 		ret = of_irq_get(dev->dev.of_node, num);
172 		if (ret > 0 || ret == -EPROBE_DEFER)
173 			goto out;
174 	}
175 
176 	r = platform_get_resource(dev, IORESOURCE_IRQ, num);
177 	if (has_acpi_companion(&dev->dev)) {
178 		if (r && r->flags & IORESOURCE_DISABLED) {
179 			ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
180 			if (ret)
181 				goto out;
182 		}
183 	}
184 
185 	/*
186 	 * The resources may pass trigger flags to the irqs that need
187 	 * to be set up. It so happens that the trigger flags for
188 	 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
189 	 * settings.
190 	 */
191 	if (r && r->flags & IORESOURCE_BITS) {
192 		struct irq_data *irqd;
193 
194 		irqd = irq_get_irq_data(r->start);
195 		if (!irqd) {
196 			ret = -ENXIO;
197 			goto out;
198 		}
199 		irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
200 	}
201 
202 	if (r) {
203 		ret = r->start;
204 		goto out;
205 	}
206 
207 	/*
208 	 * For the index 0 interrupt, allow falling back to GpioInt
209 	 * resources. While a device could have both Interrupt and GpioInt
210 	 * resources, making this fallback ambiguous, in many common cases
211 	 * the device will only expose one IRQ, and this fallback
212 	 * allows a common code path across either kind of resource.
213 	 */
214 	if (num == 0 && has_acpi_companion(&dev->dev)) {
215 		ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
216 		/* Our callers expect -ENXIO for missing IRQs. */
217 		if (ret >= 0 || ret == -EPROBE_DEFER)
218 			goto out;
219 	}
220 
221 	ret = -ENXIO;
222 #endif
223 out:
224 	WARN(ret == 0, "0 is an invalid IRQ number\n");
225 	return ret;
226 }
227 EXPORT_SYMBOL_GPL(platform_get_irq_optional);
228 
229 /**
230  * platform_get_irq - get an IRQ for a device
231  * @dev: platform device
232  * @num: IRQ number index
233  *
234  * Gets an IRQ for a platform device and prints an error message if finding the
235  * IRQ fails. Device drivers should check the return value for errors so as to
236  * not pass a negative integer value to the request_irq() APIs.
237  *
238  * For example::
239  *
240  *		int irq = platform_get_irq(pdev, 0);
241  *		if (irq < 0)
242  *			return irq;
243  *
244  * Return: non-zero IRQ number on success, negative error number on failure.
245  */
246 int platform_get_irq(struct platform_device *dev, unsigned int num)
247 {
248 	int ret;
249 
250 	ret = platform_get_irq_optional(dev, num);
251 	if (ret < 0 && ret != -EPROBE_DEFER)
252 		dev_err(&dev->dev, "IRQ index %u not found\n", num);
253 
254 	return ret;
255 }
256 EXPORT_SYMBOL_GPL(platform_get_irq);
257 
258 /**
259  * platform_irq_count - Count the number of IRQs a platform device uses
260  * @dev: platform device
261  *
262  * Return: Number of IRQs a platform device uses or EPROBE_DEFER
263  */
264 int platform_irq_count(struct platform_device *dev)
265 {
266 	int ret, nr = 0;
267 
268 	while ((ret = platform_get_irq_optional(dev, nr)) >= 0)
269 		nr++;
270 
271 	if (ret == -EPROBE_DEFER)
272 		return ret;
273 
274 	return nr;
275 }
276 EXPORT_SYMBOL_GPL(platform_irq_count);
277 
278 /**
279  * platform_get_resource_byname - get a resource for a device by name
280  * @dev: platform device
281  * @type: resource type
282  * @name: resource name
283  */
284 struct resource *platform_get_resource_byname(struct platform_device *dev,
285 					      unsigned int type,
286 					      const char *name)
287 {
288 	u32 i;
289 
290 	for (i = 0; i < dev->num_resources; i++) {
291 		struct resource *r = &dev->resource[i];
292 
293 		if (unlikely(!r->name))
294 			continue;
295 
296 		if (type == resource_type(r) && !strcmp(r->name, name))
297 			return r;
298 	}
299 	return NULL;
300 }
301 EXPORT_SYMBOL_GPL(platform_get_resource_byname);
302 
303 static int __platform_get_irq_byname(struct platform_device *dev,
304 				     const char *name)
305 {
306 	struct resource *r;
307 	int ret;
308 
309 	if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
310 		ret = of_irq_get_byname(dev->dev.of_node, name);
311 		if (ret > 0 || ret == -EPROBE_DEFER)
312 			return ret;
313 	}
314 
315 	r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
316 	if (r) {
317 		WARN(r->start == 0, "0 is an invalid IRQ number\n");
318 		return r->start;
319 	}
320 
321 	return -ENXIO;
322 }
323 
324 /**
325  * platform_get_irq_byname - get an IRQ for a device by name
326  * @dev: platform device
327  * @name: IRQ name
328  *
329  * Get an IRQ like platform_get_irq(), but then by name rather then by index.
330  *
331  * Return: non-zero IRQ number on success, negative error number on failure.
332  */
333 int platform_get_irq_byname(struct platform_device *dev, const char *name)
334 {
335 	int ret;
336 
337 	ret = __platform_get_irq_byname(dev, name);
338 	if (ret < 0 && ret != -EPROBE_DEFER)
339 		dev_err(&dev->dev, "IRQ %s not found\n", name);
340 
341 	return ret;
342 }
343 EXPORT_SYMBOL_GPL(platform_get_irq_byname);
344 
345 /**
346  * platform_get_irq_byname_optional - get an optional IRQ for a device by name
347  * @dev: platform device
348  * @name: IRQ name
349  *
350  * Get an optional IRQ by name like platform_get_irq_byname(). Except that it
351  * does not print an error message if an IRQ can not be obtained.
352  *
353  * Return: non-zero IRQ number on success, negative error number on failure.
354  */
355 int platform_get_irq_byname_optional(struct platform_device *dev,
356 				     const char *name)
357 {
358 	return __platform_get_irq_byname(dev, name);
359 }
360 EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
361 
362 /**
363  * platform_add_devices - add a numbers of platform devices
364  * @devs: array of platform devices to add
365  * @num: number of platform devices in array
366  */
367 int platform_add_devices(struct platform_device **devs, int num)
368 {
369 	int i, ret = 0;
370 
371 	for (i = 0; i < num; i++) {
372 		ret = platform_device_register(devs[i]);
373 		if (ret) {
374 			while (--i >= 0)
375 				platform_device_unregister(devs[i]);
376 			break;
377 		}
378 	}
379 
380 	return ret;
381 }
382 EXPORT_SYMBOL_GPL(platform_add_devices);
383 
384 struct platform_object {
385 	struct platform_device pdev;
386 	char name[];
387 };
388 
389 /*
390  * Set up default DMA mask for platform devices if the they weren't
391  * previously set by the architecture / DT.
392  */
393 static void setup_pdev_dma_masks(struct platform_device *pdev)
394 {
395 	pdev->dev.dma_parms = &pdev->dma_parms;
396 
397 	if (!pdev->dev.coherent_dma_mask)
398 		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
399 	if (!pdev->dev.dma_mask) {
400 		pdev->platform_dma_mask = DMA_BIT_MASK(32);
401 		pdev->dev.dma_mask = &pdev->platform_dma_mask;
402 	}
403 };
404 
405 /**
406  * platform_device_put - destroy a platform device
407  * @pdev: platform device to free
408  *
409  * Free all memory associated with a platform device.  This function must
410  * _only_ be externally called in error cases.  All other usage is a bug.
411  */
412 void platform_device_put(struct platform_device *pdev)
413 {
414 	if (!IS_ERR_OR_NULL(pdev))
415 		put_device(&pdev->dev);
416 }
417 EXPORT_SYMBOL_GPL(platform_device_put);
418 
419 static void platform_device_release(struct device *dev)
420 {
421 	struct platform_object *pa = container_of(dev, struct platform_object,
422 						  pdev.dev);
423 
424 	of_device_node_put(&pa->pdev.dev);
425 	kfree(pa->pdev.dev.platform_data);
426 	kfree(pa->pdev.mfd_cell);
427 	kfree(pa->pdev.resource);
428 	kfree(pa->pdev.driver_override);
429 	kfree(pa);
430 }
431 
432 /**
433  * platform_device_alloc - create a platform device
434  * @name: base name of the device we're adding
435  * @id: instance id
436  *
437  * Create a platform device object which can have other objects attached
438  * to it, and which will have attached objects freed when it is released.
439  */
440 struct platform_device *platform_device_alloc(const char *name, int id)
441 {
442 	struct platform_object *pa;
443 
444 	pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
445 	if (pa) {
446 		strcpy(pa->name, name);
447 		pa->pdev.name = pa->name;
448 		pa->pdev.id = id;
449 		device_initialize(&pa->pdev.dev);
450 		pa->pdev.dev.release = platform_device_release;
451 		setup_pdev_dma_masks(&pa->pdev);
452 	}
453 
454 	return pa ? &pa->pdev : NULL;
455 }
456 EXPORT_SYMBOL_GPL(platform_device_alloc);
457 
458 /**
459  * platform_device_add_resources - add resources to a platform device
460  * @pdev: platform device allocated by platform_device_alloc to add resources to
461  * @res: set of resources that needs to be allocated for the device
462  * @num: number of resources
463  *
464  * Add a copy of the resources to the platform device.  The memory
465  * associated with the resources will be freed when the platform device is
466  * released.
467  */
468 int platform_device_add_resources(struct platform_device *pdev,
469 				  const struct resource *res, unsigned int num)
470 {
471 	struct resource *r = NULL;
472 
473 	if (res) {
474 		r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
475 		if (!r)
476 			return -ENOMEM;
477 	}
478 
479 	kfree(pdev->resource);
480 	pdev->resource = r;
481 	pdev->num_resources = num;
482 	return 0;
483 }
484 EXPORT_SYMBOL_GPL(platform_device_add_resources);
485 
486 /**
487  * platform_device_add_data - add platform-specific data to a platform device
488  * @pdev: platform device allocated by platform_device_alloc to add resources to
489  * @data: platform specific data for this platform device
490  * @size: size of platform specific data
491  *
492  * Add a copy of platform specific data to the platform device's
493  * platform_data pointer.  The memory associated with the platform data
494  * will be freed when the platform device is released.
495  */
496 int platform_device_add_data(struct platform_device *pdev, const void *data,
497 			     size_t size)
498 {
499 	void *d = NULL;
500 
501 	if (data) {
502 		d = kmemdup(data, size, GFP_KERNEL);
503 		if (!d)
504 			return -ENOMEM;
505 	}
506 
507 	kfree(pdev->dev.platform_data);
508 	pdev->dev.platform_data = d;
509 	return 0;
510 }
511 EXPORT_SYMBOL_GPL(platform_device_add_data);
512 
513 /**
514  * platform_device_add_properties - add built-in properties to a platform device
515  * @pdev: platform device to add properties to
516  * @properties: null terminated array of properties to add
517  *
518  * The function will take deep copy of @properties and attach the copy to the
519  * platform device. The memory associated with properties will be freed when the
520  * platform device is released.
521  */
522 int platform_device_add_properties(struct platform_device *pdev,
523 				   const struct property_entry *properties)
524 {
525 	return device_add_properties(&pdev->dev, properties);
526 }
527 EXPORT_SYMBOL_GPL(platform_device_add_properties);
528 
529 /**
530  * platform_device_add - add a platform device to device hierarchy
531  * @pdev: platform device we're adding
532  *
533  * This is part 2 of platform_device_register(), though may be called
534  * separately _iff_ pdev was allocated by platform_device_alloc().
535  */
536 int platform_device_add(struct platform_device *pdev)
537 {
538 	u32 i;
539 	int ret;
540 
541 	if (!pdev)
542 		return -EINVAL;
543 
544 	if (!pdev->dev.parent)
545 		pdev->dev.parent = &platform_bus;
546 
547 	pdev->dev.bus = &platform_bus_type;
548 
549 	switch (pdev->id) {
550 	default:
551 		dev_set_name(&pdev->dev, "%s.%d", pdev->name,  pdev->id);
552 		break;
553 	case PLATFORM_DEVID_NONE:
554 		dev_set_name(&pdev->dev, "%s", pdev->name);
555 		break;
556 	case PLATFORM_DEVID_AUTO:
557 		/*
558 		 * Automatically allocated device ID. We mark it as such so
559 		 * that we remember it must be freed, and we append a suffix
560 		 * to avoid namespace collision with explicit IDs.
561 		 */
562 		ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL);
563 		if (ret < 0)
564 			goto err_out;
565 		pdev->id = ret;
566 		pdev->id_auto = true;
567 		dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
568 		break;
569 	}
570 
571 	for (i = 0; i < pdev->num_resources; i++) {
572 		struct resource *p, *r = &pdev->resource[i];
573 
574 		if (r->name == NULL)
575 			r->name = dev_name(&pdev->dev);
576 
577 		p = r->parent;
578 		if (!p) {
579 			if (resource_type(r) == IORESOURCE_MEM)
580 				p = &iomem_resource;
581 			else if (resource_type(r) == IORESOURCE_IO)
582 				p = &ioport_resource;
583 		}
584 
585 		if (p) {
586 			ret = insert_resource(p, r);
587 			if (ret) {
588 				dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
589 				goto failed;
590 			}
591 		}
592 	}
593 
594 	pr_debug("Registering platform device '%s'. Parent at %s\n",
595 		 dev_name(&pdev->dev), dev_name(pdev->dev.parent));
596 
597 	ret = device_add(&pdev->dev);
598 	if (ret == 0)
599 		return ret;
600 
601  failed:
602 	if (pdev->id_auto) {
603 		ida_simple_remove(&platform_devid_ida, pdev->id);
604 		pdev->id = PLATFORM_DEVID_AUTO;
605 	}
606 
607 	while (i--) {
608 		struct resource *r = &pdev->resource[i];
609 		if (r->parent)
610 			release_resource(r);
611 	}
612 
613  err_out:
614 	return ret;
615 }
616 EXPORT_SYMBOL_GPL(platform_device_add);
617 
618 /**
619  * platform_device_del - remove a platform-level device
620  * @pdev: platform device we're removing
621  *
622  * Note that this function will also release all memory- and port-based
623  * resources owned by the device (@dev->resource).  This function must
624  * _only_ be externally called in error cases.  All other usage is a bug.
625  */
626 void platform_device_del(struct platform_device *pdev)
627 {
628 	u32 i;
629 
630 	if (!IS_ERR_OR_NULL(pdev)) {
631 		device_del(&pdev->dev);
632 
633 		if (pdev->id_auto) {
634 			ida_simple_remove(&platform_devid_ida, pdev->id);
635 			pdev->id = PLATFORM_DEVID_AUTO;
636 		}
637 
638 		for (i = 0; i < pdev->num_resources; i++) {
639 			struct resource *r = &pdev->resource[i];
640 			if (r->parent)
641 				release_resource(r);
642 		}
643 	}
644 }
645 EXPORT_SYMBOL_GPL(platform_device_del);
646 
647 /**
648  * platform_device_register - add a platform-level device
649  * @pdev: platform device we're adding
650  */
651 int platform_device_register(struct platform_device *pdev)
652 {
653 	device_initialize(&pdev->dev);
654 	setup_pdev_dma_masks(pdev);
655 	return platform_device_add(pdev);
656 }
657 EXPORT_SYMBOL_GPL(platform_device_register);
658 
659 /**
660  * platform_device_unregister - unregister a platform-level device
661  * @pdev: platform device we're unregistering
662  *
663  * Unregistration is done in 2 steps. First we release all resources
664  * and remove it from the subsystem, then we drop reference count by
665  * calling platform_device_put().
666  */
667 void platform_device_unregister(struct platform_device *pdev)
668 {
669 	platform_device_del(pdev);
670 	platform_device_put(pdev);
671 }
672 EXPORT_SYMBOL_GPL(platform_device_unregister);
673 
674 /**
675  * platform_device_register_full - add a platform-level device with
676  * resources and platform-specific data
677  *
678  * @pdevinfo: data used to create device
679  *
680  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
681  */
682 struct platform_device *platform_device_register_full(
683 		const struct platform_device_info *pdevinfo)
684 {
685 	int ret;
686 	struct platform_device *pdev;
687 
688 	pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
689 	if (!pdev)
690 		return ERR_PTR(-ENOMEM);
691 
692 	pdev->dev.parent = pdevinfo->parent;
693 	pdev->dev.fwnode = pdevinfo->fwnode;
694 	pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode));
695 	pdev->dev.of_node_reused = pdevinfo->of_node_reused;
696 
697 	if (pdevinfo->dma_mask) {
698 		pdev->platform_dma_mask = pdevinfo->dma_mask;
699 		pdev->dev.dma_mask = &pdev->platform_dma_mask;
700 		pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
701 	}
702 
703 	ret = platform_device_add_resources(pdev,
704 			pdevinfo->res, pdevinfo->num_res);
705 	if (ret)
706 		goto err;
707 
708 	ret = platform_device_add_data(pdev,
709 			pdevinfo->data, pdevinfo->size_data);
710 	if (ret)
711 		goto err;
712 
713 	if (pdevinfo->properties) {
714 		ret = platform_device_add_properties(pdev,
715 						     pdevinfo->properties);
716 		if (ret)
717 			goto err;
718 	}
719 
720 	ret = platform_device_add(pdev);
721 	if (ret) {
722 err:
723 		ACPI_COMPANION_SET(&pdev->dev, NULL);
724 		platform_device_put(pdev);
725 		return ERR_PTR(ret);
726 	}
727 
728 	return pdev;
729 }
730 EXPORT_SYMBOL_GPL(platform_device_register_full);
731 
732 static int platform_drv_probe(struct device *_dev)
733 {
734 	struct platform_driver *drv = to_platform_driver(_dev->driver);
735 	struct platform_device *dev = to_platform_device(_dev);
736 	int ret;
737 
738 	ret = of_clk_set_defaults(_dev->of_node, false);
739 	if (ret < 0)
740 		return ret;
741 
742 	ret = dev_pm_domain_attach(_dev, true);
743 	if (ret)
744 		goto out;
745 
746 	if (drv->probe) {
747 		ret = drv->probe(dev);
748 		if (ret)
749 			dev_pm_domain_detach(_dev, true);
750 	}
751 
752 out:
753 	if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
754 		dev_warn(_dev, "probe deferral not supported\n");
755 		ret = -ENXIO;
756 	}
757 
758 	return ret;
759 }
760 
761 static int platform_drv_probe_fail(struct device *_dev)
762 {
763 	return -ENXIO;
764 }
765 
766 static int platform_drv_remove(struct device *_dev)
767 {
768 	struct platform_driver *drv = to_platform_driver(_dev->driver);
769 	struct platform_device *dev = to_platform_device(_dev);
770 	int ret = 0;
771 
772 	if (drv->remove)
773 		ret = drv->remove(dev);
774 	dev_pm_domain_detach(_dev, true);
775 
776 	return ret;
777 }
778 
779 static void platform_drv_shutdown(struct device *_dev)
780 {
781 	struct platform_driver *drv = to_platform_driver(_dev->driver);
782 	struct platform_device *dev = to_platform_device(_dev);
783 
784 	if (drv->shutdown)
785 		drv->shutdown(dev);
786 }
787 
788 /**
789  * __platform_driver_register - register a driver for platform-level devices
790  * @drv: platform driver structure
791  * @owner: owning module/driver
792  */
793 int __platform_driver_register(struct platform_driver *drv,
794 				struct module *owner)
795 {
796 	drv->driver.owner = owner;
797 	drv->driver.bus = &platform_bus_type;
798 	drv->driver.probe = platform_drv_probe;
799 	drv->driver.remove = platform_drv_remove;
800 	drv->driver.shutdown = platform_drv_shutdown;
801 
802 	return driver_register(&drv->driver);
803 }
804 EXPORT_SYMBOL_GPL(__platform_driver_register);
805 
806 /**
807  * platform_driver_unregister - unregister a driver for platform-level devices
808  * @drv: platform driver structure
809  */
810 void platform_driver_unregister(struct platform_driver *drv)
811 {
812 	driver_unregister(&drv->driver);
813 }
814 EXPORT_SYMBOL_GPL(platform_driver_unregister);
815 
816 /**
817  * __platform_driver_probe - register driver for non-hotpluggable device
818  * @drv: platform driver structure
819  * @probe: the driver probe routine, probably from an __init section
820  * @module: module which will be the owner of the driver
821  *
822  * Use this instead of platform_driver_register() when you know the device
823  * is not hotpluggable and has already been registered, and you want to
824  * remove its run-once probe() infrastructure from memory after the driver
825  * has bound to the device.
826  *
827  * One typical use for this would be with drivers for controllers integrated
828  * into system-on-chip processors, where the controller devices have been
829  * configured as part of board setup.
830  *
831  * Note that this is incompatible with deferred probing.
832  *
833  * Returns zero if the driver registered and bound to a device, else returns
834  * a negative error code and with the driver not registered.
835  */
836 int __init_or_module __platform_driver_probe(struct platform_driver *drv,
837 		int (*probe)(struct platform_device *), struct module *module)
838 {
839 	int retval, code;
840 
841 	if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
842 		pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
843 			 drv->driver.name, __func__);
844 		return -EINVAL;
845 	}
846 
847 	/*
848 	 * We have to run our probes synchronously because we check if
849 	 * we find any devices to bind to and exit with error if there
850 	 * are any.
851 	 */
852 	drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
853 
854 	/*
855 	 * Prevent driver from requesting probe deferral to avoid further
856 	 * futile probe attempts.
857 	 */
858 	drv->prevent_deferred_probe = true;
859 
860 	/* make sure driver won't have bind/unbind attributes */
861 	drv->driver.suppress_bind_attrs = true;
862 
863 	/* temporary section violation during probe() */
864 	drv->probe = probe;
865 	retval = code = __platform_driver_register(drv, module);
866 	if (retval)
867 		return retval;
868 
869 	/*
870 	 * Fixup that section violation, being paranoid about code scanning
871 	 * the list of drivers in order to probe new devices.  Check to see
872 	 * if the probe was successful, and make sure any forced probes of
873 	 * new devices fail.
874 	 */
875 	spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
876 	drv->probe = NULL;
877 	if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
878 		retval = -ENODEV;
879 	drv->driver.probe = platform_drv_probe_fail;
880 	spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
881 
882 	if (code != retval)
883 		platform_driver_unregister(drv);
884 	return retval;
885 }
886 EXPORT_SYMBOL_GPL(__platform_driver_probe);
887 
888 /**
889  * __platform_create_bundle - register driver and create corresponding device
890  * @driver: platform driver structure
891  * @probe: the driver probe routine, probably from an __init section
892  * @res: set of resources that needs to be allocated for the device
893  * @n_res: number of resources
894  * @data: platform specific data for this platform device
895  * @size: size of platform specific data
896  * @module: module which will be the owner of the driver
897  *
898  * Use this in legacy-style modules that probe hardware directly and
899  * register a single platform device and corresponding platform driver.
900  *
901  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
902  */
903 struct platform_device * __init_or_module __platform_create_bundle(
904 			struct platform_driver *driver,
905 			int (*probe)(struct platform_device *),
906 			struct resource *res, unsigned int n_res,
907 			const void *data, size_t size, struct module *module)
908 {
909 	struct platform_device *pdev;
910 	int error;
911 
912 	pdev = platform_device_alloc(driver->driver.name, -1);
913 	if (!pdev) {
914 		error = -ENOMEM;
915 		goto err_out;
916 	}
917 
918 	error = platform_device_add_resources(pdev, res, n_res);
919 	if (error)
920 		goto err_pdev_put;
921 
922 	error = platform_device_add_data(pdev, data, size);
923 	if (error)
924 		goto err_pdev_put;
925 
926 	error = platform_device_add(pdev);
927 	if (error)
928 		goto err_pdev_put;
929 
930 	error = __platform_driver_probe(driver, probe, module);
931 	if (error)
932 		goto err_pdev_del;
933 
934 	return pdev;
935 
936 err_pdev_del:
937 	platform_device_del(pdev);
938 err_pdev_put:
939 	platform_device_put(pdev);
940 err_out:
941 	return ERR_PTR(error);
942 }
943 EXPORT_SYMBOL_GPL(__platform_create_bundle);
944 
945 /**
946  * __platform_register_drivers - register an array of platform drivers
947  * @drivers: an array of drivers to register
948  * @count: the number of drivers to register
949  * @owner: module owning the drivers
950  *
951  * Registers platform drivers specified by an array. On failure to register a
952  * driver, all previously registered drivers will be unregistered. Callers of
953  * this API should use platform_unregister_drivers() to unregister drivers in
954  * the reverse order.
955  *
956  * Returns: 0 on success or a negative error code on failure.
957  */
958 int __platform_register_drivers(struct platform_driver * const *drivers,
959 				unsigned int count, struct module *owner)
960 {
961 	unsigned int i;
962 	int err;
963 
964 	for (i = 0; i < count; i++) {
965 		pr_debug("registering platform driver %ps\n", drivers[i]);
966 
967 		err = __platform_driver_register(drivers[i], owner);
968 		if (err < 0) {
969 			pr_err("failed to register platform driver %ps: %d\n",
970 			       drivers[i], err);
971 			goto error;
972 		}
973 	}
974 
975 	return 0;
976 
977 error:
978 	while (i--) {
979 		pr_debug("unregistering platform driver %ps\n", drivers[i]);
980 		platform_driver_unregister(drivers[i]);
981 	}
982 
983 	return err;
984 }
985 EXPORT_SYMBOL_GPL(__platform_register_drivers);
986 
987 /**
988  * platform_unregister_drivers - unregister an array of platform drivers
989  * @drivers: an array of drivers to unregister
990  * @count: the number of drivers to unregister
991  *
992  * Unregisters platform drivers specified by an array. This is typically used
993  * to complement an earlier call to platform_register_drivers(). Drivers are
994  * unregistered in the reverse order in which they were registered.
995  */
996 void platform_unregister_drivers(struct platform_driver * const *drivers,
997 				 unsigned int count)
998 {
999 	while (count--) {
1000 		pr_debug("unregistering platform driver %ps\n", drivers[count]);
1001 		platform_driver_unregister(drivers[count]);
1002 	}
1003 }
1004 EXPORT_SYMBOL_GPL(platform_unregister_drivers);
1005 
1006 /* modalias support enables more hands-off userspace setup:
1007  * (a) environment variable lets new-style hotplug events work once system is
1008  *     fully running:  "modprobe $MODALIAS"
1009  * (b) sysfs attribute lets new-style coldplug recover from hotplug events
1010  *     mishandled before system is fully running:  "modprobe $(cat modalias)"
1011  */
1012 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
1013 			     char *buf)
1014 {
1015 	struct platform_device	*pdev = to_platform_device(dev);
1016 	int len;
1017 
1018 	len = of_device_modalias(dev, buf, PAGE_SIZE);
1019 	if (len != -ENODEV)
1020 		return len;
1021 
1022 	len = acpi_device_modalias(dev, buf, PAGE_SIZE -1);
1023 	if (len != -ENODEV)
1024 		return len;
1025 
1026 	len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
1027 
1028 	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
1029 }
1030 static DEVICE_ATTR_RO(modalias);
1031 
1032 static ssize_t driver_override_store(struct device *dev,
1033 				     struct device_attribute *attr,
1034 				     const char *buf, size_t count)
1035 {
1036 	struct platform_device *pdev = to_platform_device(dev);
1037 	char *driver_override, *old, *cp;
1038 
1039 	/* We need to keep extra room for a newline */
1040 	if (count >= (PAGE_SIZE - 1))
1041 		return -EINVAL;
1042 
1043 	driver_override = kstrndup(buf, count, GFP_KERNEL);
1044 	if (!driver_override)
1045 		return -ENOMEM;
1046 
1047 	cp = strchr(driver_override, '\n');
1048 	if (cp)
1049 		*cp = '\0';
1050 
1051 	device_lock(dev);
1052 	old = pdev->driver_override;
1053 	if (strlen(driver_override)) {
1054 		pdev->driver_override = driver_override;
1055 	} else {
1056 		kfree(driver_override);
1057 		pdev->driver_override = NULL;
1058 	}
1059 	device_unlock(dev);
1060 
1061 	kfree(old);
1062 
1063 	return count;
1064 }
1065 
1066 static ssize_t driver_override_show(struct device *dev,
1067 				    struct device_attribute *attr, char *buf)
1068 {
1069 	struct platform_device *pdev = to_platform_device(dev);
1070 	ssize_t len;
1071 
1072 	device_lock(dev);
1073 	len = sprintf(buf, "%s\n", pdev->driver_override);
1074 	device_unlock(dev);
1075 	return len;
1076 }
1077 static DEVICE_ATTR_RW(driver_override);
1078 
1079 
1080 static struct attribute *platform_dev_attrs[] = {
1081 	&dev_attr_modalias.attr,
1082 	&dev_attr_driver_override.attr,
1083 	NULL,
1084 };
1085 ATTRIBUTE_GROUPS(platform_dev);
1086 
1087 static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
1088 {
1089 	struct platform_device	*pdev = to_platform_device(dev);
1090 	int rc;
1091 
1092 	/* Some devices have extra OF data and an OF-style MODALIAS */
1093 	rc = of_device_uevent_modalias(dev, env);
1094 	if (rc != -ENODEV)
1095 		return rc;
1096 
1097 	rc = acpi_device_uevent_modalias(dev, env);
1098 	if (rc != -ENODEV)
1099 		return rc;
1100 
1101 	add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
1102 			pdev->name);
1103 	return 0;
1104 }
1105 
1106 static const struct platform_device_id *platform_match_id(
1107 			const struct platform_device_id *id,
1108 			struct platform_device *pdev)
1109 {
1110 	while (id->name[0]) {
1111 		if (strcmp(pdev->name, id->name) == 0) {
1112 			pdev->id_entry = id;
1113 			return id;
1114 		}
1115 		id++;
1116 	}
1117 	return NULL;
1118 }
1119 
1120 /**
1121  * platform_match - bind platform device to platform driver.
1122  * @dev: device.
1123  * @drv: driver.
1124  *
1125  * Platform device IDs are assumed to be encoded like this:
1126  * "<name><instance>", where <name> is a short description of the type of
1127  * device, like "pci" or "floppy", and <instance> is the enumerated
1128  * instance of the device, like '0' or '42'.  Driver IDs are simply
1129  * "<name>".  So, extract the <name> from the platform_device structure,
1130  * and compare it against the name of the driver. Return whether they match
1131  * or not.
1132  */
1133 static int platform_match(struct device *dev, struct device_driver *drv)
1134 {
1135 	struct platform_device *pdev = to_platform_device(dev);
1136 	struct platform_driver *pdrv = to_platform_driver(drv);
1137 
1138 	/* When driver_override is set, only bind to the matching driver */
1139 	if (pdev->driver_override)
1140 		return !strcmp(pdev->driver_override, drv->name);
1141 
1142 	/* Attempt an OF style match first */
1143 	if (of_driver_match_device(dev, drv))
1144 		return 1;
1145 
1146 	/* Then try ACPI style match */
1147 	if (acpi_driver_match_device(dev, drv))
1148 		return 1;
1149 
1150 	/* Then try to match against the id table */
1151 	if (pdrv->id_table)
1152 		return platform_match_id(pdrv->id_table, pdev) != NULL;
1153 
1154 	/* fall-back to driver name match */
1155 	return (strcmp(pdev->name, drv->name) == 0);
1156 }
1157 
1158 #ifdef CONFIG_PM_SLEEP
1159 
1160 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
1161 {
1162 	struct platform_driver *pdrv = to_platform_driver(dev->driver);
1163 	struct platform_device *pdev = to_platform_device(dev);
1164 	int ret = 0;
1165 
1166 	if (dev->driver && pdrv->suspend)
1167 		ret = pdrv->suspend(pdev, mesg);
1168 
1169 	return ret;
1170 }
1171 
1172 static int platform_legacy_resume(struct device *dev)
1173 {
1174 	struct platform_driver *pdrv = to_platform_driver(dev->driver);
1175 	struct platform_device *pdev = to_platform_device(dev);
1176 	int ret = 0;
1177 
1178 	if (dev->driver && pdrv->resume)
1179 		ret = pdrv->resume(pdev);
1180 
1181 	return ret;
1182 }
1183 
1184 #endif /* CONFIG_PM_SLEEP */
1185 
1186 #ifdef CONFIG_SUSPEND
1187 
1188 int platform_pm_suspend(struct device *dev)
1189 {
1190 	struct device_driver *drv = dev->driver;
1191 	int ret = 0;
1192 
1193 	if (!drv)
1194 		return 0;
1195 
1196 	if (drv->pm) {
1197 		if (drv->pm->suspend)
1198 			ret = drv->pm->suspend(dev);
1199 	} else {
1200 		ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
1201 	}
1202 
1203 	return ret;
1204 }
1205 
1206 int platform_pm_resume(struct device *dev)
1207 {
1208 	struct device_driver *drv = dev->driver;
1209 	int ret = 0;
1210 
1211 	if (!drv)
1212 		return 0;
1213 
1214 	if (drv->pm) {
1215 		if (drv->pm->resume)
1216 			ret = drv->pm->resume(dev);
1217 	} else {
1218 		ret = platform_legacy_resume(dev);
1219 	}
1220 
1221 	return ret;
1222 }
1223 
1224 #endif /* CONFIG_SUSPEND */
1225 
1226 #ifdef CONFIG_HIBERNATE_CALLBACKS
1227 
1228 int platform_pm_freeze(struct device *dev)
1229 {
1230 	struct device_driver *drv = dev->driver;
1231 	int ret = 0;
1232 
1233 	if (!drv)
1234 		return 0;
1235 
1236 	if (drv->pm) {
1237 		if (drv->pm->freeze)
1238 			ret = drv->pm->freeze(dev);
1239 	} else {
1240 		ret = platform_legacy_suspend(dev, PMSG_FREEZE);
1241 	}
1242 
1243 	return ret;
1244 }
1245 
1246 int platform_pm_thaw(struct device *dev)
1247 {
1248 	struct device_driver *drv = dev->driver;
1249 	int ret = 0;
1250 
1251 	if (!drv)
1252 		return 0;
1253 
1254 	if (drv->pm) {
1255 		if (drv->pm->thaw)
1256 			ret = drv->pm->thaw(dev);
1257 	} else {
1258 		ret = platform_legacy_resume(dev);
1259 	}
1260 
1261 	return ret;
1262 }
1263 
1264 int platform_pm_poweroff(struct device *dev)
1265 {
1266 	struct device_driver *drv = dev->driver;
1267 	int ret = 0;
1268 
1269 	if (!drv)
1270 		return 0;
1271 
1272 	if (drv->pm) {
1273 		if (drv->pm->poweroff)
1274 			ret = drv->pm->poweroff(dev);
1275 	} else {
1276 		ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
1277 	}
1278 
1279 	return ret;
1280 }
1281 
1282 int platform_pm_restore(struct device *dev)
1283 {
1284 	struct device_driver *drv = dev->driver;
1285 	int ret = 0;
1286 
1287 	if (!drv)
1288 		return 0;
1289 
1290 	if (drv->pm) {
1291 		if (drv->pm->restore)
1292 			ret = drv->pm->restore(dev);
1293 	} else {
1294 		ret = platform_legacy_resume(dev);
1295 	}
1296 
1297 	return ret;
1298 }
1299 
1300 #endif /* CONFIG_HIBERNATE_CALLBACKS */
1301 
1302 int platform_dma_configure(struct device *dev)
1303 {
1304 	enum dev_dma_attr attr;
1305 	int ret = 0;
1306 
1307 	if (dev->of_node) {
1308 		ret = of_dma_configure(dev, dev->of_node, true);
1309 	} else if (has_acpi_companion(dev)) {
1310 		attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
1311 		ret = acpi_dma_configure(dev, attr);
1312 	}
1313 
1314 	return ret;
1315 }
1316 
1317 static const struct dev_pm_ops platform_dev_pm_ops = {
1318 	.runtime_suspend = pm_generic_runtime_suspend,
1319 	.runtime_resume = pm_generic_runtime_resume,
1320 	USE_PLATFORM_PM_SLEEP_OPS
1321 };
1322 
1323 struct bus_type platform_bus_type = {
1324 	.name		= "platform",
1325 	.dev_groups	= platform_dev_groups,
1326 	.match		= platform_match,
1327 	.uevent		= platform_uevent,
1328 	.dma_configure	= platform_dma_configure,
1329 	.pm		= &platform_dev_pm_ops,
1330 };
1331 EXPORT_SYMBOL_GPL(platform_bus_type);
1332 
1333 static inline int __platform_match(struct device *dev, const void *drv)
1334 {
1335 	return platform_match(dev, (struct device_driver *)drv);
1336 }
1337 
1338 /**
1339  * platform_find_device_by_driver - Find a platform device with a given
1340  * driver.
1341  * @start: The device to start the search from.
1342  * @drv: The device driver to look for.
1343  */
1344 struct device *platform_find_device_by_driver(struct device *start,
1345 					      const struct device_driver *drv)
1346 {
1347 	return bus_find_device(&platform_bus_type, start, drv,
1348 			       __platform_match);
1349 }
1350 EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
1351 
1352 void __weak __init early_platform_cleanup(void) { }
1353 
1354 int __init platform_bus_init(void)
1355 {
1356 	int error;
1357 
1358 	early_platform_cleanup();
1359 
1360 	error = device_register(&platform_bus);
1361 	if (error) {
1362 		put_device(&platform_bus);
1363 		return error;
1364 	}
1365 	error =  bus_register(&platform_bus_type);
1366 	if (error)
1367 		device_unregister(&platform_bus);
1368 	of_platform_register_reconfig_notifier();
1369 	return error;
1370 }
1371