xref: /openbmc/linux/drivers/gpu/drm/nouveau/nouveau_platform.c (revision bbde9fc1824aab58bc78c084163007dd6c03fe5b)
1 /*
2  * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include <linux/clk.h>
24 #include <linux/io.h>
25 #include <linux/module.h>
26 #include <linux/platform_device.h>
27 #include <linux/of.h>
28 #include <linux/reset.h>
29 #include <linux/regulator/consumer.h>
30 #include <linux/iommu.h>
31 #include <soc/tegra/fuse.h>
32 #include <soc/tegra/pmc.h>
33 
34 #include "nouveau_drm.h"
35 #include "nouveau_platform.h"
36 
37 static int nouveau_platform_power_up(struct nouveau_platform_gpu *gpu)
38 {
39 	int err;
40 
41 	err = regulator_enable(gpu->vdd);
42 	if (err)
43 		goto err_power;
44 
45 	err = clk_prepare_enable(gpu->clk);
46 	if (err)
47 		goto err_clk;
48 	err = clk_prepare_enable(gpu->clk_pwr);
49 	if (err)
50 		goto err_clk_pwr;
51 	clk_set_rate(gpu->clk_pwr, 204000000);
52 	udelay(10);
53 
54 	reset_control_assert(gpu->rst);
55 	udelay(10);
56 
57 	err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
58 	if (err)
59 		goto err_clamp;
60 	udelay(10);
61 
62 	reset_control_deassert(gpu->rst);
63 	udelay(10);
64 
65 	return 0;
66 
67 err_clamp:
68 	clk_disable_unprepare(gpu->clk_pwr);
69 err_clk_pwr:
70 	clk_disable_unprepare(gpu->clk);
71 err_clk:
72 	regulator_disable(gpu->vdd);
73 err_power:
74 	return err;
75 }
76 
77 static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
78 {
79 	int err;
80 
81 	reset_control_assert(gpu->rst);
82 	udelay(10);
83 
84 	clk_disable_unprepare(gpu->clk_pwr);
85 	clk_disable_unprepare(gpu->clk);
86 	udelay(10);
87 
88 	err = regulator_disable(gpu->vdd);
89 	if (err)
90 		return err;
91 
92 	return 0;
93 }
94 
95 #if IS_ENABLED(CONFIG_IOMMU_API)
96 
97 static void nouveau_platform_probe_iommu(struct device *dev,
98 					 struct nouveau_platform_gpu *gpu)
99 {
100 	int err;
101 	unsigned long pgsize_bitmap;
102 
103 	mutex_init(&gpu->iommu.mutex);
104 
105 	if (iommu_present(&platform_bus_type)) {
106 		gpu->iommu.domain = iommu_domain_alloc(&platform_bus_type);
107 		if (IS_ERR(gpu->iommu.domain))
108 			goto error;
109 
110 		/*
111 		 * A IOMMU is only usable if it supports page sizes smaller
112 		 * or equal to the system's PAGE_SIZE, with a preference if
113 		 * both are equal.
114 		 */
115 		pgsize_bitmap = gpu->iommu.domain->ops->pgsize_bitmap;
116 		if (pgsize_bitmap & PAGE_SIZE) {
117 			gpu->iommu.pgshift = PAGE_SHIFT;
118 		} else {
119 			gpu->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
120 			if (gpu->iommu.pgshift == 0) {
121 				dev_warn(dev, "unsupported IOMMU page size\n");
122 				goto free_domain;
123 			}
124 			gpu->iommu.pgshift -= 1;
125 		}
126 
127 		err = iommu_attach_device(gpu->iommu.domain, dev);
128 		if (err)
129 			goto free_domain;
130 
131 		err = nvkm_mm_init(&gpu->iommu._mm, 0,
132 				   (1ULL << 40) >> gpu->iommu.pgshift, 1);
133 		if (err)
134 			goto detach_device;
135 
136 		gpu->iommu.mm = &gpu->iommu._mm;
137 	}
138 
139 	return;
140 
141 detach_device:
142 	iommu_detach_device(gpu->iommu.domain, dev);
143 
144 free_domain:
145 	iommu_domain_free(gpu->iommu.domain);
146 
147 error:
148 	gpu->iommu.domain = NULL;
149 	gpu->iommu.pgshift = 0;
150 	dev_err(dev, "cannot initialize IOMMU MM\n");
151 }
152 
153 static void nouveau_platform_remove_iommu(struct device *dev,
154 					  struct nouveau_platform_gpu *gpu)
155 {
156 	if (gpu->iommu.domain) {
157 		nvkm_mm_fini(&gpu->iommu._mm);
158 		iommu_detach_device(gpu->iommu.domain, dev);
159 		iommu_domain_free(gpu->iommu.domain);
160 	}
161 }
162 
163 #else
164 
165 static void nouveau_platform_probe_iommu(struct device *dev,
166 					 struct nouveau_platform_gpu *gpu)
167 {
168 }
169 
170 static void nouveau_platform_remove_iommu(struct device *dev,
171 					  struct nouveau_platform_gpu *gpu)
172 {
173 }
174 
175 #endif
176 
177 static int nouveau_platform_probe(struct platform_device *pdev)
178 {
179 	struct nouveau_platform_gpu *gpu;
180 	struct nouveau_platform_device *device;
181 	struct drm_device *drm;
182 	int err;
183 
184 	gpu = devm_kzalloc(&pdev->dev, sizeof(*gpu), GFP_KERNEL);
185 	if (!gpu)
186 		return -ENOMEM;
187 
188 	gpu->vdd = devm_regulator_get(&pdev->dev, "vdd");
189 	if (IS_ERR(gpu->vdd))
190 		return PTR_ERR(gpu->vdd);
191 
192 	gpu->rst = devm_reset_control_get(&pdev->dev, "gpu");
193 	if (IS_ERR(gpu->rst))
194 		return PTR_ERR(gpu->rst);
195 
196 	gpu->clk = devm_clk_get(&pdev->dev, "gpu");
197 	if (IS_ERR(gpu->clk))
198 		return PTR_ERR(gpu->clk);
199 
200 	gpu->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
201 	if (IS_ERR(gpu->clk_pwr))
202 		return PTR_ERR(gpu->clk_pwr);
203 
204 	nouveau_platform_probe_iommu(&pdev->dev, gpu);
205 
206 	err = nouveau_platform_power_up(gpu);
207 	if (err)
208 		return err;
209 
210 	drm = nouveau_platform_device_create(pdev, &device);
211 	if (IS_ERR(drm)) {
212 		err = PTR_ERR(drm);
213 		goto power_down;
214 	}
215 
216 	device->gpu = gpu;
217 	device->gpu_speedo = tegra_sku_info.gpu_speedo_value;
218 
219 	err = drm_dev_register(drm, 0);
220 	if (err < 0)
221 		goto err_unref;
222 
223 	return 0;
224 
225 err_unref:
226 	drm_dev_unref(drm);
227 
228 power_down:
229 	nouveau_platform_power_down(gpu);
230 	nouveau_platform_remove_iommu(&pdev->dev, gpu);
231 
232 	return err;
233 }
234 
235 static int nouveau_platform_remove(struct platform_device *pdev)
236 {
237 	struct drm_device *drm_dev = platform_get_drvdata(pdev);
238 	struct nouveau_drm *drm = nouveau_drm(drm_dev);
239 	struct nvkm_device *device = nvxx_device(&drm->device);
240 	struct nouveau_platform_gpu *gpu = nv_device_to_platform(device)->gpu;
241 	int err;
242 
243 	nouveau_drm_device_remove(drm_dev);
244 
245 	err = nouveau_platform_power_down(gpu);
246 
247 	nouveau_platform_remove_iommu(&pdev->dev, gpu);
248 
249 	return err;
250 }
251 
252 #if IS_ENABLED(CONFIG_OF)
253 static const struct of_device_id nouveau_platform_match[] = {
254 	{ .compatible = "nvidia,gk20a" },
255 	{ }
256 };
257 
258 MODULE_DEVICE_TABLE(of, nouveau_platform_match);
259 #endif
260 
261 struct platform_driver nouveau_platform_driver = {
262 	.driver = {
263 		.name = "nouveau",
264 		.of_match_table = of_match_ptr(nouveau_platform_match),
265 	},
266 	.probe = nouveau_platform_probe,
267 	.remove = nouveau_platform_remove,
268 };
269