1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * NVIDIA Tegra Video decoder driver
4  *
5  * Copyright (C) 2016-2017 Dmitry Osipenko <digetx@gmail.com>
6  *
7  */
8 
9 #include <linux/clk.h>
10 #include <linux/dma-buf.h>
11 #include <linux/genalloc.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/reset.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 
22 #include <soc/tegra/common.h>
23 #include <soc/tegra/pmc.h>
24 
25 #include "vde.h"
26 
27 #define CREATE_TRACE_POINTS
28 #include "trace.h"
29 
tegra_vde_writel(struct tegra_vde * vde,u32 value,void __iomem * base,u32 offset)30 void tegra_vde_writel(struct tegra_vde *vde, u32 value,
31 		      void __iomem *base, u32 offset)
32 {
33 	trace_vde_writel(vde, base, offset, value);
34 
35 	writel_relaxed(value, base + offset);
36 }
37 
tegra_vde_readl(struct tegra_vde * vde,void __iomem * base,u32 offset)38 u32 tegra_vde_readl(struct tegra_vde *vde, void __iomem *base, u32 offset)
39 {
40 	u32 value = readl_relaxed(base + offset);
41 
42 	trace_vde_readl(vde, base, offset, value);
43 
44 	return value;
45 }
46 
tegra_vde_set_bits(struct tegra_vde * vde,u32 mask,void __iomem * base,u32 offset)47 void tegra_vde_set_bits(struct tegra_vde *vde, u32 mask,
48 			void __iomem *base, u32 offset)
49 {
50 	u32 value = tegra_vde_readl(vde, base, offset);
51 
52 	tegra_vde_writel(vde, value | mask, base, offset);
53 }
54 
tegra_vde_alloc_bo(struct tegra_vde * vde,struct tegra_vde_bo ** ret_bo,enum dma_data_direction dma_dir,size_t size)55 int tegra_vde_alloc_bo(struct tegra_vde *vde,
56 		       struct tegra_vde_bo **ret_bo,
57 		       enum dma_data_direction dma_dir,
58 		       size_t size)
59 {
60 	struct device *dev = vde->dev;
61 	struct tegra_vde_bo *bo;
62 	int err;
63 
64 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
65 	if (!bo)
66 		return -ENOMEM;
67 
68 	bo->vde = vde;
69 	bo->size = size;
70 	bo->dma_dir = dma_dir;
71 	bo->dma_attrs = DMA_ATTR_WRITE_COMBINE |
72 			DMA_ATTR_NO_KERNEL_MAPPING;
73 
74 	if (!vde->domain)
75 		bo->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
76 
77 	bo->dma_cookie = dma_alloc_attrs(dev, bo->size, &bo->dma_handle,
78 					 GFP_KERNEL, bo->dma_attrs);
79 	if (!bo->dma_cookie) {
80 		dev_err(dev, "Failed to allocate DMA buffer of size: %zu\n",
81 			bo->size);
82 		err = -ENOMEM;
83 		goto free_bo;
84 	}
85 
86 	err = dma_get_sgtable_attrs(dev, &bo->sgt, bo->dma_cookie,
87 				    bo->dma_handle, bo->size, bo->dma_attrs);
88 	if (err) {
89 		dev_err(dev, "Failed to get DMA buffer SG table: %d\n", err);
90 		goto free_attrs;
91 	}
92 
93 	err = dma_map_sgtable(dev, &bo->sgt, bo->dma_dir, bo->dma_attrs);
94 	if (err) {
95 		dev_err(dev, "Failed to map DMA buffer SG table: %d\n", err);
96 		goto free_table;
97 	}
98 
99 	if (vde->domain) {
100 		err = tegra_vde_iommu_map(vde, &bo->sgt, &bo->iova, bo->size);
101 		if (err) {
102 			dev_err(dev, "Failed to map DMA buffer IOVA: %d\n", err);
103 			goto unmap_sgtable;
104 		}
105 
106 		bo->dma_addr = iova_dma_addr(&vde->iova, bo->iova);
107 	} else {
108 		bo->dma_addr = sg_dma_address(bo->sgt.sgl);
109 	}
110 
111 	*ret_bo = bo;
112 
113 	return 0;
114 
115 unmap_sgtable:
116 	dma_unmap_sgtable(dev, &bo->sgt, bo->dma_dir, bo->dma_attrs);
117 free_table:
118 	sg_free_table(&bo->sgt);
119 free_attrs:
120 	dma_free_attrs(dev, bo->size, bo->dma_cookie, bo->dma_handle,
121 		       bo->dma_attrs);
122 free_bo:
123 	kfree(bo);
124 
125 	return err;
126 }
127 
tegra_vde_free_bo(struct tegra_vde_bo * bo)128 void tegra_vde_free_bo(struct tegra_vde_bo *bo)
129 {
130 	struct tegra_vde *vde = bo->vde;
131 	struct device *dev = vde->dev;
132 
133 	if (vde->domain)
134 		tegra_vde_iommu_unmap(vde, bo->iova);
135 
136 	dma_unmap_sgtable(dev, &bo->sgt, bo->dma_dir, bo->dma_attrs);
137 
138 	sg_free_table(&bo->sgt);
139 
140 	dma_free_attrs(dev, bo->size, bo->dma_cookie, bo->dma_handle,
141 		       bo->dma_attrs);
142 	kfree(bo);
143 }
144 
tegra_vde_isr(int irq,void * data)145 static irqreturn_t tegra_vde_isr(int irq, void *data)
146 {
147 	struct tegra_vde *vde = data;
148 
149 	if (completion_done(&vde->decode_completion))
150 		return IRQ_NONE;
151 
152 	tegra_vde_set_bits(vde, 0, vde->frameid, 0x208);
153 	complete(&vde->decode_completion);
154 
155 	return IRQ_HANDLED;
156 }
157 
tegra_vde_runtime_suspend(struct device * dev)158 static __maybe_unused int tegra_vde_runtime_suspend(struct device *dev)
159 {
160 	struct tegra_vde *vde = dev_get_drvdata(dev);
161 	int err;
162 
163 	if (!dev->pm_domain) {
164 		err = tegra_powergate_power_off(TEGRA_POWERGATE_VDEC);
165 		if (err) {
166 			dev_err(dev, "Failed to power down HW: %d\n", err);
167 			return err;
168 		}
169 	}
170 
171 	clk_disable_unprepare(vde->clk);
172 	reset_control_release(vde->rst);
173 	reset_control_release(vde->rst_mc);
174 
175 	return 0;
176 }
177 
tegra_vde_runtime_resume(struct device * dev)178 static __maybe_unused int tegra_vde_runtime_resume(struct device *dev)
179 {
180 	struct tegra_vde *vde = dev_get_drvdata(dev);
181 	int err;
182 
183 	err = reset_control_acquire(vde->rst_mc);
184 	if (err) {
185 		dev_err(dev, "Failed to acquire mc reset: %d\n", err);
186 		return err;
187 	}
188 
189 	err = reset_control_acquire(vde->rst);
190 	if (err) {
191 		dev_err(dev, "Failed to acquire reset: %d\n", err);
192 		goto release_mc_reset;
193 	}
194 
195 	if (!dev->pm_domain) {
196 		err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_VDEC,
197 							vde->clk, vde->rst);
198 		if (err) {
199 			dev_err(dev, "Failed to power up HW : %d\n", err);
200 			goto release_reset;
201 		}
202 	} else {
203 		/*
204 		 * tegra_powergate_sequence_power_up() leaves clocks enabled,
205 		 * while GENPD not.
206 		 */
207 		err = clk_prepare_enable(vde->clk);
208 		if (err) {
209 			dev_err(dev, "Failed to enable clock: %d\n", err);
210 			goto release_reset;
211 		}
212 	}
213 
214 	return 0;
215 
216 release_reset:
217 	reset_control_release(vde->rst);
218 release_mc_reset:
219 	reset_control_release(vde->rst_mc);
220 
221 	return err;
222 }
223 
tegra_vde_probe(struct platform_device * pdev)224 static int tegra_vde_probe(struct platform_device *pdev)
225 {
226 	struct device *dev = &pdev->dev;
227 	struct tegra_vde *vde;
228 	int irq, err;
229 
230 	vde = devm_kzalloc(dev, sizeof(*vde), GFP_KERNEL);
231 	if (!vde)
232 		return -ENOMEM;
233 
234 	platform_set_drvdata(pdev, vde);
235 
236 	vde->soc = of_device_get_match_data(&pdev->dev);
237 	vde->dev = dev;
238 
239 	vde->sxe = devm_platform_ioremap_resource_byname(pdev, "sxe");
240 	if (IS_ERR(vde->sxe))
241 		return PTR_ERR(vde->sxe);
242 
243 	vde->bsev = devm_platform_ioremap_resource_byname(pdev, "bsev");
244 	if (IS_ERR(vde->bsev))
245 		return PTR_ERR(vde->bsev);
246 
247 	vde->mbe = devm_platform_ioremap_resource_byname(pdev, "mbe");
248 	if (IS_ERR(vde->mbe))
249 		return PTR_ERR(vde->mbe);
250 
251 	vde->ppe = devm_platform_ioremap_resource_byname(pdev, "ppe");
252 	if (IS_ERR(vde->ppe))
253 		return PTR_ERR(vde->ppe);
254 
255 	vde->mce = devm_platform_ioremap_resource_byname(pdev, "mce");
256 	if (IS_ERR(vde->mce))
257 		return PTR_ERR(vde->mce);
258 
259 	vde->tfe = devm_platform_ioremap_resource_byname(pdev, "tfe");
260 	if (IS_ERR(vde->tfe))
261 		return PTR_ERR(vde->tfe);
262 
263 	vde->ppb = devm_platform_ioremap_resource_byname(pdev, "ppb");
264 	if (IS_ERR(vde->ppb))
265 		return PTR_ERR(vde->ppb);
266 
267 	vde->vdma = devm_platform_ioremap_resource_byname(pdev, "vdma");
268 	if (IS_ERR(vde->vdma))
269 		return PTR_ERR(vde->vdma);
270 
271 	vde->frameid = devm_platform_ioremap_resource_byname(pdev, "frameid");
272 	if (IS_ERR(vde->frameid))
273 		return PTR_ERR(vde->frameid);
274 
275 	vde->clk = devm_clk_get(dev, NULL);
276 	if (IS_ERR(vde->clk)) {
277 		err = PTR_ERR(vde->clk);
278 		dev_err(dev, "Could not get VDE clk %d\n", err);
279 		return err;
280 	}
281 
282 	vde->rst = devm_reset_control_get_exclusive_released(dev, NULL);
283 	if (IS_ERR(vde->rst)) {
284 		err = PTR_ERR(vde->rst);
285 		dev_err(dev, "Could not get VDE reset %d\n", err);
286 		return err;
287 	}
288 
289 	vde->rst_mc = devm_reset_control_get_optional_exclusive_released(dev, "mc");
290 	if (IS_ERR(vde->rst_mc)) {
291 		err = PTR_ERR(vde->rst_mc);
292 		dev_err(dev, "Could not get MC reset %d\n", err);
293 		return err;
294 	}
295 
296 	irq = platform_get_irq_byname(pdev, "sync-token");
297 	if (irq < 0)
298 		return irq;
299 
300 	err = devm_request_irq(dev, irq, tegra_vde_isr, 0,
301 			       dev_name(dev), vde);
302 	if (err) {
303 		dev_err(dev, "Could not request IRQ %d\n", err);
304 		return err;
305 	}
306 
307 	err = devm_tegra_core_dev_init_opp_table_common(dev);
308 	if (err) {
309 		dev_err(dev, "Could initialize OPP table %d\n", err);
310 		return err;
311 	}
312 
313 	vde->iram_pool = of_gen_pool_get(dev->of_node, "iram", 0);
314 	if (!vde->iram_pool) {
315 		dev_err(dev, "Could not get IRAM pool\n");
316 		return -EPROBE_DEFER;
317 	}
318 
319 	vde->iram = gen_pool_dma_alloc(vde->iram_pool,
320 				       gen_pool_size(vde->iram_pool),
321 				       &vde->iram_lists_addr);
322 	if (!vde->iram) {
323 		dev_err(dev, "Could not reserve IRAM\n");
324 		return -ENOMEM;
325 	}
326 
327 	INIT_LIST_HEAD(&vde->map_list);
328 	mutex_init(&vde->map_lock);
329 	mutex_init(&vde->lock);
330 	init_completion(&vde->decode_completion);
331 
332 	err = tegra_vde_iommu_init(vde);
333 	if (err) {
334 		dev_err(dev, "Failed to initialize IOMMU: %d\n", err);
335 		goto err_gen_free;
336 	}
337 
338 	pm_runtime_enable(dev);
339 	pm_runtime_use_autosuspend(dev);
340 	pm_runtime_set_autosuspend_delay(dev, 300);
341 
342 	/*
343 	 * VDE partition may be left ON after bootloader, hence let's
344 	 * power-cycle it in order to put hardware into a predictable lower
345 	 * power state.
346 	 */
347 	err = pm_runtime_resume_and_get(dev);
348 	if (err)
349 		goto err_pm_runtime;
350 
351 	pm_runtime_put(dev);
352 
353 	err = tegra_vde_alloc_bo(vde, &vde->secure_bo, DMA_FROM_DEVICE, 4096);
354 	if (err) {
355 		dev_err(dev, "Failed to allocate secure BO: %d\n", err);
356 		goto err_pm_runtime;
357 	}
358 
359 	err = tegra_vde_v4l2_init(vde);
360 	if (err) {
361 		dev_err(dev, "Failed to initialize V4L2: %d\n", err);
362 		goto err_free_secure_bo;
363 	}
364 
365 	return 0;
366 
367 err_free_secure_bo:
368 	tegra_vde_free_bo(vde->secure_bo);
369 err_pm_runtime:
370 	pm_runtime_dont_use_autosuspend(dev);
371 	pm_runtime_disable(dev);
372 
373 	tegra_vde_iommu_deinit(vde);
374 
375 err_gen_free:
376 	gen_pool_free(vde->iram_pool, (unsigned long)vde->iram,
377 		      gen_pool_size(vde->iram_pool));
378 
379 	return err;
380 }
381 
tegra_vde_remove(struct platform_device * pdev)382 static void tegra_vde_remove(struct platform_device *pdev)
383 {
384 	struct tegra_vde *vde = platform_get_drvdata(pdev);
385 	struct device *dev = &pdev->dev;
386 
387 	tegra_vde_v4l2_deinit(vde);
388 	tegra_vde_free_bo(vde->secure_bo);
389 
390 	/*
391 	 * As it increments RPM usage_count even on errors, we don't need to
392 	 * check the returned code here.
393 	 */
394 	pm_runtime_get_sync(dev);
395 
396 	pm_runtime_dont_use_autosuspend(dev);
397 	pm_runtime_disable(dev);
398 
399 	/*
400 	 * Balance RPM state, the VDE power domain is left ON and hardware
401 	 * is clock-gated. It's safe to reboot machine now.
402 	 */
403 	pm_runtime_put_noidle(dev);
404 	clk_disable_unprepare(vde->clk);
405 
406 	tegra_vde_dmabuf_cache_unmap_all(vde);
407 	tegra_vde_iommu_deinit(vde);
408 
409 	gen_pool_free(vde->iram_pool, (unsigned long)vde->iram,
410 		      gen_pool_size(vde->iram_pool));
411 }
412 
tegra_vde_shutdown(struct platform_device * pdev)413 static void tegra_vde_shutdown(struct platform_device *pdev)
414 {
415 	/*
416 	 * On some devices bootloader isn't ready to a power-gated VDE on
417 	 * a warm-reboot, machine will hang in that case.
418 	 */
419 	pm_runtime_get_sync(&pdev->dev);
420 }
421 
tegra_vde_pm_suspend(struct device * dev)422 static __maybe_unused int tegra_vde_pm_suspend(struct device *dev)
423 {
424 	struct tegra_vde *vde = dev_get_drvdata(dev);
425 	int err;
426 
427 	mutex_lock(&vde->lock);
428 
429 	err = pm_runtime_force_suspend(dev);
430 	if (err < 0)
431 		return err;
432 
433 	return 0;
434 }
435 
tegra_vde_pm_resume(struct device * dev)436 static __maybe_unused int tegra_vde_pm_resume(struct device *dev)
437 {
438 	struct tegra_vde *vde = dev_get_drvdata(dev);
439 	int err;
440 
441 	err = pm_runtime_force_resume(dev);
442 	if (err < 0)
443 		return err;
444 
445 	mutex_unlock(&vde->lock);
446 
447 	return 0;
448 }
449 
450 static const struct dev_pm_ops tegra_vde_pm_ops = {
451 	SET_RUNTIME_PM_OPS(tegra_vde_runtime_suspend,
452 			   tegra_vde_runtime_resume,
453 			   NULL)
454 	SET_SYSTEM_SLEEP_PM_OPS(tegra_vde_pm_suspend,
455 				tegra_vde_pm_resume)
456 };
457 
458 static const u32 tegra124_decoded_fmts[] = {
459 	/* TBD: T124 supports only a non-standard Tegra tiled format */
460 };
461 
462 static const struct tegra_coded_fmt_desc tegra124_coded_fmts[] = {
463 	{
464 		.fourcc = V4L2_PIX_FMT_H264_SLICE,
465 		.frmsize = {
466 			.min_width = 16,
467 			.max_width = 1920,
468 			.step_width = 16,
469 			.min_height = 16,
470 			.max_height = 2032,
471 			.step_height = 16,
472 		},
473 		.num_decoded_fmts = ARRAY_SIZE(tegra124_decoded_fmts),
474 		.decoded_fmts = tegra124_decoded_fmts,
475 		.decode_run = tegra_vde_h264_decode_run,
476 		.decode_wait = tegra_vde_h264_decode_wait,
477 	},
478 };
479 
480 static const u32 tegra20_decoded_fmts[] = {
481 	V4L2_PIX_FMT_YUV420M,
482 	V4L2_PIX_FMT_YVU420M,
483 };
484 
485 static const struct tegra_coded_fmt_desc tegra20_coded_fmts[] = {
486 	{
487 		.fourcc = V4L2_PIX_FMT_H264_SLICE,
488 		.frmsize = {
489 			.min_width = 16,
490 			.max_width = 1920,
491 			.step_width = 16,
492 			.min_height = 16,
493 			.max_height = 2032,
494 			.step_height = 16,
495 		},
496 		.num_decoded_fmts = ARRAY_SIZE(tegra20_decoded_fmts),
497 		.decoded_fmts = tegra20_decoded_fmts,
498 		.decode_run = tegra_vde_h264_decode_run,
499 		.decode_wait = tegra_vde_h264_decode_wait,
500 	},
501 };
502 
503 static const struct tegra_vde_soc tegra124_vde_soc = {
504 	.supports_ref_pic_marking = true,
505 	.coded_fmts = tegra124_coded_fmts,
506 	.num_coded_fmts = ARRAY_SIZE(tegra124_coded_fmts),
507 };
508 
509 static const struct tegra_vde_soc tegra114_vde_soc = {
510 	.supports_ref_pic_marking = true,
511 	.coded_fmts = tegra20_coded_fmts,
512 	.num_coded_fmts = ARRAY_SIZE(tegra20_coded_fmts),
513 };
514 
515 static const struct tegra_vde_soc tegra30_vde_soc = {
516 	.supports_ref_pic_marking = false,
517 	.coded_fmts = tegra20_coded_fmts,
518 	.num_coded_fmts = ARRAY_SIZE(tegra20_coded_fmts),
519 };
520 
521 static const struct tegra_vde_soc tegra20_vde_soc = {
522 	.supports_ref_pic_marking = false,
523 	.coded_fmts = tegra20_coded_fmts,
524 	.num_coded_fmts = ARRAY_SIZE(tegra20_coded_fmts),
525 };
526 
527 static const struct of_device_id tegra_vde_of_match[] = {
528 	{ .compatible = "nvidia,tegra124-vde", .data = &tegra124_vde_soc },
529 	{ .compatible = "nvidia,tegra114-vde", .data = &tegra114_vde_soc },
530 	{ .compatible = "nvidia,tegra30-vde", .data = &tegra30_vde_soc },
531 	{ .compatible = "nvidia,tegra20-vde", .data = &tegra20_vde_soc },
532 	{ },
533 };
534 MODULE_DEVICE_TABLE(of, tegra_vde_of_match);
535 
536 static struct platform_driver tegra_vde_driver = {
537 	.probe		= tegra_vde_probe,
538 	.remove_new	= tegra_vde_remove,
539 	.shutdown	= tegra_vde_shutdown,
540 	.driver		= {
541 		.name		= "tegra-vde",
542 		.of_match_table = tegra_vde_of_match,
543 		.pm		= &tegra_vde_pm_ops,
544 	},
545 };
546 module_platform_driver(tegra_vde_driver);
547 
548 MODULE_DESCRIPTION("NVIDIA Tegra Video Decoder driver");
549 MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
550 MODULE_LICENSE("GPL");
551