xref: /openbmc/linux/drivers/gpu/drm/tegra/gr2d.c (revision 62fa0a98)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012-2013, NVIDIA Corporation.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/iommu.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/platform_device.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/reset.h>
14 
15 #include <soc/tegra/common.h>
16 
17 #include "drm.h"
18 #include "gem.h"
19 #include "gr2d.h"
20 
21 enum {
22 	RST_MC,
23 	RST_GR2D,
24 	RST_GR2D_MAX,
25 };
26 
27 struct gr2d_soc {
28 	unsigned int version;
29 };
30 
31 struct gr2d {
32 	struct tegra_drm_client client;
33 	struct host1x_channel *channel;
34 	struct clk *clk;
35 
36 	struct reset_control_bulk_data resets[RST_GR2D_MAX];
37 	unsigned int nresets;
38 
39 	const struct gr2d_soc *soc;
40 
41 	DECLARE_BITMAP(addr_regs, GR2D_NUM_REGS);
42 };
43 
to_gr2d(struct tegra_drm_client * client)44 static inline struct gr2d *to_gr2d(struct tegra_drm_client *client)
45 {
46 	return container_of(client, struct gr2d, client);
47 }
48 
gr2d_init(struct host1x_client * client)49 static int gr2d_init(struct host1x_client *client)
50 {
51 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
52 	struct drm_device *dev = dev_get_drvdata(client->host);
53 	unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
54 	struct gr2d *gr2d = to_gr2d(drm);
55 	int err;
56 
57 	gr2d->channel = host1x_channel_request(client);
58 	if (!gr2d->channel)
59 		return -ENOMEM;
60 
61 	client->syncpts[0] = host1x_syncpt_request(client, flags);
62 	if (!client->syncpts[0]) {
63 		err = -ENOMEM;
64 		dev_err(client->dev, "failed to request syncpoint: %d\n", err);
65 		goto put;
66 	}
67 
68 	err = host1x_client_iommu_attach(client);
69 	if (err < 0) {
70 		dev_err(client->dev, "failed to attach to domain: %d\n", err);
71 		goto free;
72 	}
73 
74 	err = tegra_drm_register_client(dev->dev_private, drm);
75 	if (err < 0) {
76 		dev_err(client->dev, "failed to register client: %d\n", err);
77 		goto detach_iommu;
78 	}
79 
80 	return 0;
81 
82 detach_iommu:
83 	host1x_client_iommu_detach(client);
84 free:
85 	host1x_syncpt_put(client->syncpts[0]);
86 put:
87 	host1x_channel_put(gr2d->channel);
88 	return err;
89 }
90 
gr2d_exit(struct host1x_client * client)91 static int gr2d_exit(struct host1x_client *client)
92 {
93 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
94 	struct drm_device *dev = dev_get_drvdata(client->host);
95 	struct tegra_drm *tegra = dev->dev_private;
96 	struct gr2d *gr2d = to_gr2d(drm);
97 	int err;
98 
99 	err = tegra_drm_unregister_client(tegra, drm);
100 	if (err < 0)
101 		return err;
102 
103 	pm_runtime_dont_use_autosuspend(client->dev);
104 	pm_runtime_force_suspend(client->dev);
105 
106 	host1x_client_iommu_detach(client);
107 	host1x_syncpt_put(client->syncpts[0]);
108 	host1x_channel_put(gr2d->channel);
109 
110 	gr2d->channel = NULL;
111 
112 	return 0;
113 }
114 
115 static const struct host1x_client_ops gr2d_client_ops = {
116 	.init = gr2d_init,
117 	.exit = gr2d_exit,
118 };
119 
gr2d_open_channel(struct tegra_drm_client * client,struct tegra_drm_context * context)120 static int gr2d_open_channel(struct tegra_drm_client *client,
121 			     struct tegra_drm_context *context)
122 {
123 	struct gr2d *gr2d = to_gr2d(client);
124 
125 	context->channel = host1x_channel_get(gr2d->channel);
126 	if (!context->channel)
127 		return -ENOMEM;
128 
129 	return 0;
130 }
131 
gr2d_close_channel(struct tegra_drm_context * context)132 static void gr2d_close_channel(struct tegra_drm_context *context)
133 {
134 	host1x_channel_put(context->channel);
135 }
136 
gr2d_is_addr_reg(struct device * dev,u32 class,u32 offset)137 static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 offset)
138 {
139 	struct gr2d *gr2d = dev_get_drvdata(dev);
140 
141 	switch (class) {
142 	case HOST1X_CLASS_HOST1X:
143 		if (offset == 0x2b)
144 			return 1;
145 
146 		break;
147 
148 	case HOST1X_CLASS_GR2D:
149 	case HOST1X_CLASS_GR2D_SB:
150 		if (offset >= GR2D_NUM_REGS)
151 			break;
152 
153 		if (test_bit(offset, gr2d->addr_regs))
154 			return 1;
155 
156 		break;
157 	}
158 
159 	return 0;
160 }
161 
gr2d_is_valid_class(u32 class)162 static int gr2d_is_valid_class(u32 class)
163 {
164 	return (class == HOST1X_CLASS_GR2D ||
165 		class == HOST1X_CLASS_GR2D_SB);
166 }
167 
168 static const struct tegra_drm_client_ops gr2d_ops = {
169 	.open_channel = gr2d_open_channel,
170 	.close_channel = gr2d_close_channel,
171 	.is_addr_reg = gr2d_is_addr_reg,
172 	.is_valid_class = gr2d_is_valid_class,
173 	.submit = tegra_drm_submit,
174 };
175 
176 static const struct gr2d_soc tegra20_gr2d_soc = {
177 	.version = 0x20,
178 };
179 
180 static const struct gr2d_soc tegra30_gr2d_soc = {
181 	.version = 0x30,
182 };
183 
184 static const struct gr2d_soc tegra114_gr2d_soc = {
185 	.version = 0x35,
186 };
187 
188 static const struct of_device_id gr2d_match[] = {
189 	{ .compatible = "nvidia,tegra114-gr2d", .data = &tegra114_gr2d_soc },
190 	{ .compatible = "nvidia,tegra30-gr2d", .data = &tegra30_gr2d_soc },
191 	{ .compatible = "nvidia,tegra20-gr2d", .data = &tegra20_gr2d_soc },
192 	{ },
193 };
194 MODULE_DEVICE_TABLE(of, gr2d_match);
195 
196 static const u32 gr2d_addr_regs[] = {
197 	GR2D_UA_BASE_ADDR,
198 	GR2D_VA_BASE_ADDR,
199 	GR2D_PAT_BASE_ADDR,
200 	GR2D_DSTA_BASE_ADDR,
201 	GR2D_DSTB_BASE_ADDR,
202 	GR2D_DSTC_BASE_ADDR,
203 	GR2D_SRCA_BASE_ADDR,
204 	GR2D_SRCB_BASE_ADDR,
205 	GR2D_PATBASE_ADDR,
206 	GR2D_SRC_BASE_ADDR_SB,
207 	GR2D_DSTA_BASE_ADDR_SB,
208 	GR2D_DSTB_BASE_ADDR_SB,
209 	GR2D_UA_BASE_ADDR_SB,
210 	GR2D_VA_BASE_ADDR_SB,
211 };
212 
gr2d_get_resets(struct device * dev,struct gr2d * gr2d)213 static int gr2d_get_resets(struct device *dev, struct gr2d *gr2d)
214 {
215 	int err;
216 
217 	gr2d->resets[RST_MC].id = "mc";
218 	gr2d->resets[RST_GR2D].id = "2d";
219 	gr2d->nresets = RST_GR2D_MAX;
220 
221 	err = devm_reset_control_bulk_get_optional_exclusive_released(
222 				dev, gr2d->nresets, gr2d->resets);
223 	if (err) {
224 		dev_err(dev, "failed to get reset: %d\n", err);
225 		return err;
226 	}
227 
228 	if (WARN_ON(!gr2d->resets[RST_GR2D].rstc))
229 		return -ENOENT;
230 
231 	return 0;
232 }
233 
gr2d_probe(struct platform_device * pdev)234 static int gr2d_probe(struct platform_device *pdev)
235 {
236 	struct device *dev = &pdev->dev;
237 	struct host1x_syncpt **syncpts;
238 	struct gr2d *gr2d;
239 	unsigned int i;
240 	int err;
241 
242 	gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
243 	if (!gr2d)
244 		return -ENOMEM;
245 
246 	platform_set_drvdata(pdev, gr2d);
247 
248 	gr2d->soc = of_device_get_match_data(dev);
249 
250 	syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
251 	if (!syncpts)
252 		return -ENOMEM;
253 
254 	gr2d->clk = devm_clk_get(dev, NULL);
255 	if (IS_ERR(gr2d->clk)) {
256 		dev_err(dev, "cannot get clock\n");
257 		return PTR_ERR(gr2d->clk);
258 	}
259 
260 	err = gr2d_get_resets(dev, gr2d);
261 	if (err)
262 		return err;
263 
264 	INIT_LIST_HEAD(&gr2d->client.base.list);
265 	gr2d->client.base.ops = &gr2d_client_ops;
266 	gr2d->client.base.dev = dev;
267 	gr2d->client.base.class = HOST1X_CLASS_GR2D;
268 	gr2d->client.base.syncpts = syncpts;
269 	gr2d->client.base.num_syncpts = 1;
270 
271 	INIT_LIST_HEAD(&gr2d->client.list);
272 	gr2d->client.version = gr2d->soc->version;
273 	gr2d->client.ops = &gr2d_ops;
274 
275 	err = devm_tegra_core_dev_init_opp_table_common(dev);
276 	if (err)
277 		return err;
278 
279 	err = host1x_client_register(&gr2d->client.base);
280 	if (err < 0) {
281 		dev_err(dev, "failed to register host1x client: %d\n", err);
282 		return err;
283 	}
284 
285 	/* initialize address register map */
286 	for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); i++)
287 		set_bit(gr2d_addr_regs[i], gr2d->addr_regs);
288 
289 	return 0;
290 }
291 
gr2d_remove(struct platform_device * pdev)292 static void gr2d_remove(struct platform_device *pdev)
293 {
294 	struct gr2d *gr2d = platform_get_drvdata(pdev);
295 
296 	pm_runtime_disable(&pdev->dev);
297 	host1x_client_unregister(&gr2d->client.base);
298 }
299 
gr2d_runtime_suspend(struct device * dev)300 static int __maybe_unused gr2d_runtime_suspend(struct device *dev)
301 {
302 	struct gr2d *gr2d = dev_get_drvdata(dev);
303 	int err;
304 
305 	host1x_channel_stop(gr2d->channel);
306 	reset_control_bulk_release(gr2d->nresets, gr2d->resets);
307 
308 	/*
309 	 * GR2D module shouldn't be reset while hardware is idling, otherwise
310 	 * host1x's cmdproc will stuck on trying to access any G2 register
311 	 * after reset. GR2D module could be either hot-reset or reset after
312 	 * power-gating of the HEG partition. Hence we will put in reset only
313 	 * the memory client part of the module, the HEG GENPD will take care
314 	 * of resetting GR2D module across power-gating.
315 	 *
316 	 * On Tegra20 there is no HEG partition, but it's okay to have
317 	 * undetermined h/w state since userspace is expected to reprogram
318 	 * the state on each job submission anyways.
319 	 */
320 	err = reset_control_acquire(gr2d->resets[RST_MC].rstc);
321 	if (err) {
322 		dev_err(dev, "failed to acquire MC reset: %d\n", err);
323 		goto acquire_reset;
324 	}
325 
326 	err = reset_control_assert(gr2d->resets[RST_MC].rstc);
327 	reset_control_release(gr2d->resets[RST_MC].rstc);
328 	if (err) {
329 		dev_err(dev, "failed to assert MC reset: %d\n", err);
330 		goto acquire_reset;
331 	}
332 
333 	clk_disable_unprepare(gr2d->clk);
334 
335 	return 0;
336 
337 acquire_reset:
338 	reset_control_bulk_acquire(gr2d->nresets, gr2d->resets);
339 	reset_control_bulk_deassert(gr2d->nresets, gr2d->resets);
340 
341 	return err;
342 }
343 
gr2d_runtime_resume(struct device * dev)344 static int __maybe_unused gr2d_runtime_resume(struct device *dev)
345 {
346 	struct gr2d *gr2d = dev_get_drvdata(dev);
347 	int err;
348 
349 	err = reset_control_bulk_acquire(gr2d->nresets, gr2d->resets);
350 	if (err) {
351 		dev_err(dev, "failed to acquire reset: %d\n", err);
352 		return err;
353 	}
354 
355 	err = clk_prepare_enable(gr2d->clk);
356 	if (err) {
357 		dev_err(dev, "failed to enable clock: %d\n", err);
358 		goto release_reset;
359 	}
360 
361 	usleep_range(2000, 4000);
362 
363 	/* this is a reset array which deasserts both 2D MC and 2D itself */
364 	err = reset_control_bulk_deassert(gr2d->nresets, gr2d->resets);
365 	if (err) {
366 		dev_err(dev, "failed to deassert reset: %d\n", err);
367 		goto disable_clk;
368 	}
369 
370 	pm_runtime_enable(dev);
371 	pm_runtime_use_autosuspend(dev);
372 	pm_runtime_set_autosuspend_delay(dev, 500);
373 
374 	return 0;
375 
376 disable_clk:
377 	clk_disable_unprepare(gr2d->clk);
378 release_reset:
379 	reset_control_bulk_release(gr2d->nresets, gr2d->resets);
380 
381 	return err;
382 }
383 
384 static const struct dev_pm_ops tegra_gr2d_pm = {
385 	SET_RUNTIME_PM_OPS(gr2d_runtime_suspend, gr2d_runtime_resume, NULL)
386 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
387 				pm_runtime_force_resume)
388 };
389 
390 struct platform_driver tegra_gr2d_driver = {
391 	.driver = {
392 		.name = "tegra-gr2d",
393 		.of_match_table = gr2d_match,
394 		.pm = &tegra_gr2d_pm,
395 	},
396 	.probe = gr2d_probe,
397 	.remove_new = gr2d_remove,
398 };
399