xref: /openbmc/linux/drivers/gpu/drm/tegra/vic.c (revision d28a1de5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015, NVIDIA Corporation.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/host1x.h>
10 #include <linux/iommu.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/of_device.h>
14 #include <linux/of_platform.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/reset.h>
18 
19 #include <soc/tegra/pmc.h>
20 
21 #include "drm.h"
22 #include "falcon.h"
23 #include "vic.h"
24 
25 struct vic_config {
26 	const char *firmware;
27 	unsigned int version;
28 	bool supports_sid;
29 };
30 
31 struct vic {
32 	struct falcon falcon;
33 
34 	void __iomem *regs;
35 	struct tegra_drm_client client;
36 	struct host1x_channel *channel;
37 	struct device *dev;
38 	struct clk *clk;
39 	struct reset_control *rst;
40 
41 	bool can_use_context;
42 
43 	/* Platform configuration */
44 	const struct vic_config *config;
45 };
46 
47 static inline struct vic *to_vic(struct tegra_drm_client *client)
48 {
49 	return container_of(client, struct vic, client);
50 }
51 
52 static void vic_writel(struct vic *vic, u32 value, unsigned int offset)
53 {
54 	writel(value, vic->regs + offset);
55 }
56 
57 static int vic_boot(struct vic *vic)
58 {
59 #ifdef CONFIG_IOMMU_API
60 	struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
61 #endif
62 	u32 fce_ucode_size, fce_bin_data_offset;
63 	void *hdr;
64 	int err = 0;
65 
66 #ifdef CONFIG_IOMMU_API
67 	if (vic->config->supports_sid && spec) {
68 		u32 value;
69 
70 		value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) |
71 			TRANSCFG_ATT(0, TRANSCFG_SID_HW);
72 		vic_writel(vic, value, VIC_TFBIF_TRANSCFG);
73 
74 		if (spec->num_ids > 0) {
75 			value = spec->ids[0] & 0xffff;
76 
77 			/*
78 			 * STREAMID0 is used for input/output buffers.
79 			 * Initialize it to SID_VIC in case context isolation
80 			 * is not enabled, and SID_VIC is used for both firmware
81 			 * and data buffers.
82 			 *
83 			 * If context isolation is enabled, it will be
84 			 * overridden by the SETSTREAMID opcode as part of
85 			 * each job.
86 			 */
87 			vic_writel(vic, value, VIC_THI_STREAMID0);
88 
89 			/* STREAMID1 is used for firmware loading. */
90 			vic_writel(vic, value, VIC_THI_STREAMID1);
91 		}
92 	}
93 #endif
94 
95 	/* setup clockgating registers */
96 	vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
97 			CG_IDLE_CG_EN |
98 			CG_WAKEUP_DLY_CNT(4),
99 		   NV_PVIC_MISC_PRI_VIC_CG);
100 
101 	err = falcon_boot(&vic->falcon);
102 	if (err < 0)
103 		return err;
104 
105 	hdr = vic->falcon.firmware.virt;
106 	fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
107 
108 	/* Old VIC firmware needs kernel help with setting up FCE microcode. */
109 	if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) {
110 		hdr = vic->falcon.firmware.virt +
111 			*(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
112 		fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
113 
114 		falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
115 				      fce_ucode_size);
116 		falcon_execute_method(
117 			&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
118 			(vic->falcon.firmware.iova + fce_bin_data_offset) >> 8);
119 	}
120 
121 	err = falcon_wait_idle(&vic->falcon);
122 	if (err < 0) {
123 		dev_err(vic->dev,
124 			"failed to set application ID and FCE base\n");
125 		return err;
126 	}
127 
128 	return 0;
129 }
130 
131 static int vic_init(struct host1x_client *client)
132 {
133 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
134 	struct drm_device *dev = dev_get_drvdata(client->host);
135 	struct tegra_drm *tegra = dev->dev_private;
136 	struct vic *vic = to_vic(drm);
137 	int err;
138 
139 	err = host1x_client_iommu_attach(client);
140 	if (err < 0 && err != -ENODEV) {
141 		dev_err(vic->dev, "failed to attach to domain: %d\n", err);
142 		return err;
143 	}
144 
145 	vic->channel = host1x_channel_request(client);
146 	if (!vic->channel) {
147 		err = -ENOMEM;
148 		goto detach;
149 	}
150 
151 	client->syncpts[0] = host1x_syncpt_request(client, 0);
152 	if (!client->syncpts[0]) {
153 		err = -ENOMEM;
154 		goto free_channel;
155 	}
156 
157 	pm_runtime_enable(client->dev);
158 	pm_runtime_use_autosuspend(client->dev);
159 	pm_runtime_set_autosuspend_delay(client->dev, 500);
160 
161 	err = tegra_drm_register_client(tegra, drm);
162 	if (err < 0)
163 		goto disable_rpm;
164 
165 	/*
166 	 * Inherit the DMA parameters (such as maximum segment size) from the
167 	 * parent host1x device.
168 	 */
169 	client->dev->dma_parms = client->host->dma_parms;
170 
171 	return 0;
172 
173 disable_rpm:
174 	pm_runtime_dont_use_autosuspend(client->dev);
175 	pm_runtime_force_suspend(client->dev);
176 
177 	host1x_syncpt_put(client->syncpts[0]);
178 free_channel:
179 	host1x_channel_put(vic->channel);
180 detach:
181 	host1x_client_iommu_detach(client);
182 
183 	return err;
184 }
185 
186 static int vic_exit(struct host1x_client *client)
187 {
188 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
189 	struct drm_device *dev = dev_get_drvdata(client->host);
190 	struct tegra_drm *tegra = dev->dev_private;
191 	struct vic *vic = to_vic(drm);
192 	int err;
193 
194 	/* avoid a dangling pointer just in case this disappears */
195 	client->dev->dma_parms = NULL;
196 
197 	err = tegra_drm_unregister_client(tegra, drm);
198 	if (err < 0)
199 		return err;
200 
201 	pm_runtime_dont_use_autosuspend(client->dev);
202 	pm_runtime_force_suspend(client->dev);
203 
204 	host1x_syncpt_put(client->syncpts[0]);
205 	host1x_channel_put(vic->channel);
206 	host1x_client_iommu_detach(client);
207 
208 	vic->channel = NULL;
209 
210 	if (client->group) {
211 		dma_unmap_single(vic->dev, vic->falcon.firmware.phys,
212 				 vic->falcon.firmware.size, DMA_TO_DEVICE);
213 		tegra_drm_free(tegra, vic->falcon.firmware.size,
214 			       vic->falcon.firmware.virt,
215 			       vic->falcon.firmware.iova);
216 	} else {
217 		dma_free_coherent(vic->dev, vic->falcon.firmware.size,
218 				  vic->falcon.firmware.virt,
219 				  vic->falcon.firmware.iova);
220 	}
221 
222 	return 0;
223 }
224 
225 static const struct host1x_client_ops vic_client_ops = {
226 	.init = vic_init,
227 	.exit = vic_exit,
228 };
229 
230 static int vic_load_firmware(struct vic *vic)
231 {
232 	struct host1x_client *client = &vic->client.base;
233 	struct tegra_drm *tegra = vic->client.drm;
234 	static DEFINE_MUTEX(lock);
235 	u32 fce_bin_data_offset;
236 	dma_addr_t iova;
237 	size_t size;
238 	void *virt;
239 	int err;
240 
241 	mutex_lock(&lock);
242 
243 	if (vic->falcon.firmware.virt) {
244 		err = 0;
245 		goto unlock;
246 	}
247 
248 	err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
249 	if (err < 0)
250 		goto unlock;
251 
252 	size = vic->falcon.firmware.size;
253 
254 	if (!client->group) {
255 		virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL);
256 		if (!virt) {
257 			err = -ENOMEM;
258 			goto unlock;
259 		}
260 	} else {
261 		virt = tegra_drm_alloc(tegra, size, &iova);
262 		if (IS_ERR(virt)) {
263 			err = PTR_ERR(virt);
264 			goto unlock;
265 		}
266 	}
267 
268 	vic->falcon.firmware.virt = virt;
269 	vic->falcon.firmware.iova = iova;
270 
271 	err = falcon_load_firmware(&vic->falcon);
272 	if (err < 0)
273 		goto cleanup;
274 
275 	/*
276 	 * In this case we have received an IOVA from the shared domain, so we
277 	 * need to make sure to get the physical address so that the DMA API
278 	 * knows what memory pages to flush the cache for.
279 	 */
280 	if (client->group) {
281 		dma_addr_t phys;
282 
283 		phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE);
284 
285 		err = dma_mapping_error(vic->dev, phys);
286 		if (err < 0)
287 			goto cleanup;
288 
289 		vic->falcon.firmware.phys = phys;
290 	}
291 
292 	/*
293 	 * Check if firmware is new enough to not require mapping firmware
294 	 * to data buffer domains.
295 	 */
296 	fce_bin_data_offset = *(u32 *)(virt + VIC_UCODE_FCE_DATA_OFFSET);
297 
298 	if (!vic->config->supports_sid) {
299 		vic->can_use_context = false;
300 	} else if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) {
301 		/*
302 		 * Firmware will access FCE through STREAMID0, so context
303 		 * isolation cannot be used.
304 		 */
305 		vic->can_use_context = false;
306 		dev_warn_once(vic->dev, "context isolation disabled due to old firmware\n");
307 	} else {
308 		vic->can_use_context = true;
309 	}
310 
311 unlock:
312 	mutex_unlock(&lock);
313 	return err;
314 
315 cleanup:
316 	if (!client->group)
317 		dma_free_coherent(vic->dev, size, virt, iova);
318 	else
319 		tegra_drm_free(tegra, size, virt, iova);
320 
321 	mutex_unlock(&lock);
322 	return err;
323 }
324 
325 
326 static int __maybe_unused vic_runtime_resume(struct device *dev)
327 {
328 	struct vic *vic = dev_get_drvdata(dev);
329 	int err;
330 
331 	err = clk_prepare_enable(vic->clk);
332 	if (err < 0)
333 		return err;
334 
335 	usleep_range(10, 20);
336 
337 	err = reset_control_deassert(vic->rst);
338 	if (err < 0)
339 		goto disable;
340 
341 	usleep_range(10, 20);
342 
343 	err = vic_load_firmware(vic);
344 	if (err < 0)
345 		goto assert;
346 
347 	err = vic_boot(vic);
348 	if (err < 0)
349 		goto assert;
350 
351 	return 0;
352 
353 assert:
354 	reset_control_assert(vic->rst);
355 disable:
356 	clk_disable_unprepare(vic->clk);
357 	return err;
358 }
359 
360 static int __maybe_unused vic_runtime_suspend(struct device *dev)
361 {
362 	struct vic *vic = dev_get_drvdata(dev);
363 	int err;
364 
365 	host1x_channel_stop(vic->channel);
366 
367 	err = reset_control_assert(vic->rst);
368 	if (err < 0)
369 		return err;
370 
371 	usleep_range(2000, 4000);
372 
373 	clk_disable_unprepare(vic->clk);
374 
375 	return 0;
376 }
377 
378 static int vic_open_channel(struct tegra_drm_client *client,
379 			    struct tegra_drm_context *context)
380 {
381 	struct vic *vic = to_vic(client);
382 
383 	context->channel = host1x_channel_get(vic->channel);
384 	if (!context->channel)
385 		return -ENOMEM;
386 
387 	return 0;
388 }
389 
390 static void vic_close_channel(struct tegra_drm_context *context)
391 {
392 	host1x_channel_put(context->channel);
393 }
394 
395 static int vic_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported)
396 {
397 	struct vic *vic = to_vic(client);
398 	int err;
399 
400 	/* This doesn't access HW so it's safe to call without powering up. */
401 	err = vic_load_firmware(vic);
402 	if (err < 0)
403 		return err;
404 
405 	*supported = vic->can_use_context;
406 
407 	return 0;
408 }
409 
410 static const struct tegra_drm_client_ops vic_ops = {
411 	.open_channel = vic_open_channel,
412 	.close_channel = vic_close_channel,
413 	.submit = tegra_drm_submit,
414 	.get_streamid_offset = tegra_drm_get_streamid_offset_thi,
415 	.can_use_memory_ctx = vic_can_use_memory_ctx,
416 };
417 
418 #define NVIDIA_TEGRA_124_VIC_FIRMWARE "nvidia/tegra124/vic03_ucode.bin"
419 
420 static const struct vic_config vic_t124_config = {
421 	.firmware = NVIDIA_TEGRA_124_VIC_FIRMWARE,
422 	.version = 0x40,
423 	.supports_sid = false,
424 };
425 
426 #define NVIDIA_TEGRA_210_VIC_FIRMWARE "nvidia/tegra210/vic04_ucode.bin"
427 
428 static const struct vic_config vic_t210_config = {
429 	.firmware = NVIDIA_TEGRA_210_VIC_FIRMWARE,
430 	.version = 0x21,
431 	.supports_sid = false,
432 };
433 
434 #define NVIDIA_TEGRA_186_VIC_FIRMWARE "nvidia/tegra186/vic04_ucode.bin"
435 
436 static const struct vic_config vic_t186_config = {
437 	.firmware = NVIDIA_TEGRA_186_VIC_FIRMWARE,
438 	.version = 0x18,
439 	.supports_sid = true,
440 };
441 
442 #define NVIDIA_TEGRA_194_VIC_FIRMWARE "nvidia/tegra194/vic.bin"
443 
444 static const struct vic_config vic_t194_config = {
445 	.firmware = NVIDIA_TEGRA_194_VIC_FIRMWARE,
446 	.version = 0x19,
447 	.supports_sid = true,
448 };
449 
450 #define NVIDIA_TEGRA_234_VIC_FIRMWARE "nvidia/tegra234/vic.bin"
451 
452 static const struct vic_config vic_t234_config = {
453 	.firmware = NVIDIA_TEGRA_234_VIC_FIRMWARE,
454 	.version = 0x23,
455 	.supports_sid = true,
456 };
457 
458 static const struct of_device_id tegra_vic_of_match[] = {
459 	{ .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config },
460 	{ .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config },
461 	{ .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config },
462 	{ .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config },
463 	{ .compatible = "nvidia,tegra234-vic", .data = &vic_t234_config },
464 	{ },
465 };
466 MODULE_DEVICE_TABLE(of, tegra_vic_of_match);
467 
468 static int vic_probe(struct platform_device *pdev)
469 {
470 	struct device *dev = &pdev->dev;
471 	struct host1x_syncpt **syncpts;
472 	struct vic *vic;
473 	int err;
474 
475 	/* inherit DMA mask from host1x parent */
476 	err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
477 	if (err < 0) {
478 		dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
479 		return err;
480 	}
481 
482 	vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL);
483 	if (!vic)
484 		return -ENOMEM;
485 
486 	vic->config = of_device_get_match_data(dev);
487 
488 	syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
489 	if (!syncpts)
490 		return -ENOMEM;
491 
492 	vic->regs = devm_platform_ioremap_resource(pdev, 0);
493 	if (IS_ERR(vic->regs))
494 		return PTR_ERR(vic->regs);
495 
496 	vic->clk = devm_clk_get(dev, NULL);
497 	if (IS_ERR(vic->clk)) {
498 		dev_err(&pdev->dev, "failed to get clock\n");
499 		return PTR_ERR(vic->clk);
500 	}
501 
502 	err = clk_set_rate(vic->clk, ULONG_MAX);
503 	if (err < 0) {
504 		dev_err(&pdev->dev, "failed to set clock rate\n");
505 		return err;
506 	}
507 
508 	if (!dev->pm_domain) {
509 		vic->rst = devm_reset_control_get(dev, "vic");
510 		if (IS_ERR(vic->rst)) {
511 			dev_err(&pdev->dev, "failed to get reset\n");
512 			return PTR_ERR(vic->rst);
513 		}
514 	}
515 
516 	vic->falcon.dev = dev;
517 	vic->falcon.regs = vic->regs;
518 
519 	err = falcon_init(&vic->falcon);
520 	if (err < 0)
521 		return err;
522 
523 	platform_set_drvdata(pdev, vic);
524 
525 	INIT_LIST_HEAD(&vic->client.base.list);
526 	vic->client.base.ops = &vic_client_ops;
527 	vic->client.base.dev = dev;
528 	vic->client.base.class = HOST1X_CLASS_VIC;
529 	vic->client.base.syncpts = syncpts;
530 	vic->client.base.num_syncpts = 1;
531 	vic->dev = dev;
532 
533 	INIT_LIST_HEAD(&vic->client.list);
534 	vic->client.version = vic->config->version;
535 	vic->client.ops = &vic_ops;
536 
537 	err = host1x_client_register(&vic->client.base);
538 	if (err < 0) {
539 		dev_err(dev, "failed to register host1x client: %d\n", err);
540 		goto exit_falcon;
541 	}
542 
543 	return 0;
544 
545 exit_falcon:
546 	falcon_exit(&vic->falcon);
547 
548 	return err;
549 }
550 
551 static int vic_remove(struct platform_device *pdev)
552 {
553 	struct vic *vic = platform_get_drvdata(pdev);
554 	int err;
555 
556 	err = host1x_client_unregister(&vic->client.base);
557 	if (err < 0) {
558 		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
559 			err);
560 		return err;
561 	}
562 
563 	falcon_exit(&vic->falcon);
564 
565 	return 0;
566 }
567 
568 static const struct dev_pm_ops vic_pm_ops = {
569 	RUNTIME_PM_OPS(vic_runtime_suspend, vic_runtime_resume, NULL)
570 	SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
571 };
572 
573 struct platform_driver tegra_vic_driver = {
574 	.driver = {
575 		.name = "tegra-vic",
576 		.of_match_table = tegra_vic_of_match,
577 		.pm = &vic_pm_ops
578 	},
579 	.probe = vic_probe,
580 	.remove = vic_remove,
581 };
582 
583 #if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC)
584 MODULE_FIRMWARE(NVIDIA_TEGRA_124_VIC_FIRMWARE);
585 #endif
586 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
587 MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE);
588 #endif
589 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
590 MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE);
591 #endif
592 #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
593 MODULE_FIRMWARE(NVIDIA_TEGRA_194_VIC_FIRMWARE);
594 #endif
595 #if IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
596 MODULE_FIRMWARE(NVIDIA_TEGRA_234_VIC_FIRMWARE);
597 #endif
598