1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 // Author: Inki Dae <inki.dae@samsung.com>
5 // Author: Andrzej Hajda <a.hajda@samsung.com>
6 
7 #include <linux/dma-iommu.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/iommu.h>
10 #include <linux/platform_device.h>
11 
12 #include <drm/drm_print.h>
13 #include <drm/exynos_drm.h>
14 
15 #include "exynos_drm_drv.h"
16 
17 #if defined(CONFIG_ARM_DMA_USE_IOMMU)
18 #include <asm/dma-iommu.h>
19 #else
20 #define arm_iommu_create_mapping(...)	({ NULL; })
21 #define arm_iommu_attach_device(...)	({ -ENODEV; })
22 #define arm_iommu_release_mapping(...)	({ })
23 #define arm_iommu_detach_device(...)	({ })
24 #define to_dma_iommu_mapping(dev) NULL
25 #endif
26 
27 #if !defined(CONFIG_IOMMU_DMA)
28 #define iommu_dma_init_domain(...) ({ -EINVAL; })
29 #endif
30 
31 #define EXYNOS_DEV_ADDR_START	0x20000000
32 #define EXYNOS_DEV_ADDR_SIZE	0x40000000
33 
34 static inline int configure_dma_max_seg_size(struct device *dev)
35 {
36 	if (!dev->dma_parms)
37 		dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
38 	if (!dev->dma_parms)
39 		return -ENOMEM;
40 
41 	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
42 	return 0;
43 }
44 
45 static inline void clear_dma_max_seg_size(struct device *dev)
46 {
47 	kfree(dev->dma_parms);
48 	dev->dma_parms = NULL;
49 }
50 
51 /*
52  * drm_iommu_attach_device- attach device to iommu mapping
53  *
54  * @drm_dev: DRM device
55  * @subdrv_dev: device to be attach
56  *
57  * This function should be called by sub drivers to attach it to iommu
58  * mapping.
59  */
60 static int drm_iommu_attach_device(struct drm_device *drm_dev,
61 				struct device *subdrv_dev, void **dma_priv)
62 {
63 	struct exynos_drm_private *priv = drm_dev->dev_private;
64 	int ret = 0;
65 
66 	if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
67 		DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n",
68 			  dev_name(subdrv_dev));
69 		return -EINVAL;
70 	}
71 
72 	ret = configure_dma_max_seg_size(subdrv_dev);
73 	if (ret)
74 		return ret;
75 
76 	if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
77 		/*
78 		 * Keep the original DMA mapping of the sub-device and
79 		 * restore it on Exynos DRM detach, otherwise the DMA
80 		 * framework considers it as IOMMU-less during the next
81 		 * probe (in case of deferred probe or modular build)
82 		 */
83 		*dma_priv = to_dma_iommu_mapping(subdrv_dev);
84 		if (*dma_priv)
85 			arm_iommu_detach_device(subdrv_dev);
86 
87 		ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
88 	} else if (IS_ENABLED(CONFIG_IOMMU_DMA)) {
89 		ret = iommu_attach_device(priv->mapping, subdrv_dev);
90 	}
91 
92 	if (ret)
93 		clear_dma_max_seg_size(subdrv_dev);
94 
95 	return ret;
96 }
97 
98 /*
99  * drm_iommu_detach_device -detach device address space mapping from device
100  *
101  * @drm_dev: DRM device
102  * @subdrv_dev: device to be detached
103  *
104  * This function should be called by sub drivers to detach it from iommu
105  * mapping
106  */
107 static void drm_iommu_detach_device(struct drm_device *drm_dev,
108 				    struct device *subdrv_dev, void **dma_priv)
109 {
110 	struct exynos_drm_private *priv = drm_dev->dev_private;
111 
112 	if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
113 		arm_iommu_detach_device(subdrv_dev);
114 		arm_iommu_attach_device(subdrv_dev, *dma_priv);
115 	} else if (IS_ENABLED(CONFIG_IOMMU_DMA))
116 		iommu_detach_device(priv->mapping, subdrv_dev);
117 
118 	clear_dma_max_seg_size(subdrv_dev);
119 }
120 
121 int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
122 			    void **dma_priv)
123 {
124 	struct exynos_drm_private *priv = drm->dev_private;
125 
126 	if (!priv->dma_dev) {
127 		priv->dma_dev = dev;
128 		DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
129 			 dev_name(dev));
130 	}
131 
132 	if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
133 		return 0;
134 
135 	if (!priv->mapping) {
136 		void *mapping;
137 
138 		if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
139 			mapping = arm_iommu_create_mapping(&platform_bus_type,
140 				EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
141 		else if (IS_ENABLED(CONFIG_IOMMU_DMA))
142 			mapping = iommu_get_domain_for_dev(priv->dma_dev);
143 
144 		if (IS_ERR(mapping))
145 			return PTR_ERR(mapping);
146 		priv->mapping = mapping;
147 	}
148 
149 	return drm_iommu_attach_device(drm, dev, dma_priv);
150 }
151 
152 void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
153 			       void **dma_priv)
154 {
155 	if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
156 		drm_iommu_detach_device(drm, dev, dma_priv);
157 }
158 
159 void exynos_drm_cleanup_dma(struct drm_device *drm)
160 {
161 	struct exynos_drm_private *priv = drm->dev_private;
162 
163 	if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
164 		return;
165 
166 	arm_iommu_release_mapping(priv->mapping);
167 	priv->mapping = NULL;
168 	priv->dma_dev = NULL;
169 }
170