1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cedrus VPU driver
4  *
5  * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
6  * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
7  * Copyright (C) 2018 Bootlin
8  *
9  * Based on the vim2m driver, that is:
10  *
11  * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
12  * Pawel Osciak, <pawel@osciak.com>
13  * Marek Szyprowski, <m.szyprowski@samsung.com>
14  */
15 
16 #include <linux/platform_device.h>
17 #include <linux/of_reserved_mem.h>
18 #include <linux/of_device.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/interrupt.h>
21 #include <linux/clk.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/regmap.h>
24 #include <linux/reset.h>
25 #include <linux/soc/sunxi/sunxi_sram.h>
26 
27 #include <media/videobuf2-core.h>
28 #include <media/v4l2-mem2mem.h>
29 
30 #include "cedrus.h"
31 #include "cedrus_hw.h"
32 #include "cedrus_regs.h"
33 
34 int cedrus_engine_enable(struct cedrus_ctx *ctx, enum cedrus_codec codec)
35 {
36 	u32 reg = 0;
37 
38 	/*
39 	 * FIXME: This is only valid on 32-bits DDR's, we should test
40 	 * it on the A13/A33.
41 	 */
42 	reg |= VE_MODE_REC_WR_MODE_2MB;
43 	reg |= VE_MODE_DDR_MODE_BW_128;
44 
45 	switch (codec) {
46 	case CEDRUS_CODEC_MPEG2:
47 		reg |= VE_MODE_DEC_MPEG;
48 		break;
49 
50 	case CEDRUS_CODEC_H264:
51 		reg |= VE_MODE_DEC_H264;
52 		break;
53 
54 	case CEDRUS_CODEC_H265:
55 		reg |= VE_MODE_DEC_H265;
56 		break;
57 
58 	default:
59 		return -EINVAL;
60 	}
61 
62 	if (ctx->src_fmt.width == 4096)
63 		reg |= VE_MODE_PIC_WIDTH_IS_4096;
64 	if (ctx->src_fmt.width > 2048)
65 		reg |= VE_MODE_PIC_WIDTH_MORE_2048;
66 
67 	cedrus_write(ctx->dev, VE_MODE, reg);
68 
69 	return 0;
70 }
71 
72 void cedrus_engine_disable(struct cedrus_dev *dev)
73 {
74 	cedrus_write(dev, VE_MODE, VE_MODE_DISABLED);
75 }
76 
77 void cedrus_dst_format_set(struct cedrus_dev *dev,
78 			   struct v4l2_pix_format *fmt)
79 {
80 	unsigned int width = fmt->width;
81 	unsigned int height = fmt->height;
82 	u32 chroma_size;
83 	u32 reg;
84 
85 	switch (fmt->pixelformat) {
86 	case V4L2_PIX_FMT_NV12:
87 		chroma_size = ALIGN(width, 16) * ALIGN(height, 16) / 2;
88 
89 		reg = VE_PRIMARY_OUT_FMT_NV12;
90 		cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
91 
92 		reg = chroma_size / 2;
93 		cedrus_write(dev, VE_PRIMARY_CHROMA_BUF_LEN, reg);
94 
95 		reg = VE_PRIMARY_FB_LINE_STRIDE_LUMA(ALIGN(width, 16)) |
96 		      VE_PRIMARY_FB_LINE_STRIDE_CHROMA(ALIGN(width, 16) / 2);
97 		cedrus_write(dev, VE_PRIMARY_FB_LINE_STRIDE, reg);
98 
99 		break;
100 	case V4L2_PIX_FMT_SUNXI_TILED_NV12:
101 	default:
102 		reg = VE_PRIMARY_OUT_FMT_TILED_32_NV12;
103 		cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
104 
105 		reg = VE_SECONDARY_OUT_FMT_TILED_32_NV12;
106 		cedrus_write(dev, VE_CHROMA_BUF_LEN, reg);
107 
108 		break;
109 	}
110 }
111 
112 static irqreturn_t cedrus_irq(int irq, void *data)
113 {
114 	struct cedrus_dev *dev = data;
115 	struct cedrus_ctx *ctx;
116 	enum vb2_buffer_state state;
117 	enum cedrus_irq_status status;
118 
119 	ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
120 	if (!ctx) {
121 		v4l2_err(&dev->v4l2_dev,
122 			 "Instance released before the end of transaction\n");
123 		return IRQ_NONE;
124 	}
125 
126 	status = dev->dec_ops[ctx->current_codec]->irq_status(ctx);
127 	if (status == CEDRUS_IRQ_NONE)
128 		return IRQ_NONE;
129 
130 	dev->dec_ops[ctx->current_codec]->irq_disable(ctx);
131 	dev->dec_ops[ctx->current_codec]->irq_clear(ctx);
132 
133 	if (status == CEDRUS_IRQ_ERROR)
134 		state = VB2_BUF_STATE_ERROR;
135 	else
136 		state = VB2_BUF_STATE_DONE;
137 
138 	v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
139 					 state);
140 
141 	return IRQ_HANDLED;
142 }
143 
144 int cedrus_hw_suspend(struct device *device)
145 {
146 	struct cedrus_dev *dev = dev_get_drvdata(device);
147 
148 	reset_control_assert(dev->rstc);
149 
150 	clk_disable_unprepare(dev->ram_clk);
151 	clk_disable_unprepare(dev->mod_clk);
152 	clk_disable_unprepare(dev->ahb_clk);
153 
154 	return 0;
155 }
156 
157 int cedrus_hw_resume(struct device *device)
158 {
159 	struct cedrus_dev *dev = dev_get_drvdata(device);
160 	int ret;
161 
162 	ret = clk_prepare_enable(dev->ahb_clk);
163 	if (ret) {
164 		dev_err(dev->dev, "Failed to enable AHB clock\n");
165 
166 		return ret;
167 	}
168 
169 	ret = clk_prepare_enable(dev->mod_clk);
170 	if (ret) {
171 		dev_err(dev->dev, "Failed to enable MOD clock\n");
172 
173 		goto err_ahb_clk;
174 	}
175 
176 	ret = clk_prepare_enable(dev->ram_clk);
177 	if (ret) {
178 		dev_err(dev->dev, "Failed to enable RAM clock\n");
179 
180 		goto err_mod_clk;
181 	}
182 
183 	ret = reset_control_reset(dev->rstc);
184 	if (ret) {
185 		dev_err(dev->dev, "Failed to apply reset\n");
186 
187 		goto err_ram_clk;
188 	}
189 
190 	return 0;
191 
192 err_ram_clk:
193 	clk_disable_unprepare(dev->ram_clk);
194 err_mod_clk:
195 	clk_disable_unprepare(dev->mod_clk);
196 err_ahb_clk:
197 	clk_disable_unprepare(dev->ahb_clk);
198 
199 	return ret;
200 }
201 
202 int cedrus_hw_probe(struct cedrus_dev *dev)
203 {
204 	const struct cedrus_variant *variant;
205 	int irq_dec;
206 	int ret;
207 
208 	variant = of_device_get_match_data(dev->dev);
209 	if (!variant)
210 		return -EINVAL;
211 
212 	dev->capabilities = variant->capabilities;
213 
214 	irq_dec = platform_get_irq(dev->pdev, 0);
215 	if (irq_dec <= 0)
216 		return irq_dec;
217 	ret = devm_request_irq(dev->dev, irq_dec, cedrus_irq,
218 			       0, dev_name(dev->dev), dev);
219 	if (ret) {
220 		dev_err(dev->dev, "Failed to request IRQ\n");
221 
222 		return ret;
223 	}
224 
225 	/*
226 	 * The VPU is only able to handle bus addresses so we have to subtract
227 	 * the RAM offset to the physcal addresses.
228 	 *
229 	 * This information will eventually be obtained from device-tree.
230 	 */
231 
232 #ifdef PHYS_PFN_OFFSET
233 	if (!(variant->quirks & CEDRUS_QUIRK_NO_DMA_OFFSET))
234 		dev->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
235 #endif
236 
237 	ret = of_reserved_mem_device_init(dev->dev);
238 	if (ret && ret != -ENODEV) {
239 		dev_err(dev->dev, "Failed to reserve memory\n");
240 
241 		return ret;
242 	}
243 
244 	ret = sunxi_sram_claim(dev->dev);
245 	if (ret) {
246 		dev_err(dev->dev, "Failed to claim SRAM\n");
247 
248 		goto err_mem;
249 	}
250 
251 	dev->ahb_clk = devm_clk_get(dev->dev, "ahb");
252 	if (IS_ERR(dev->ahb_clk)) {
253 		dev_err(dev->dev, "Failed to get AHB clock\n");
254 
255 		ret = PTR_ERR(dev->ahb_clk);
256 		goto err_sram;
257 	}
258 
259 	dev->mod_clk = devm_clk_get(dev->dev, "mod");
260 	if (IS_ERR(dev->mod_clk)) {
261 		dev_err(dev->dev, "Failed to get MOD clock\n");
262 
263 		ret = PTR_ERR(dev->mod_clk);
264 		goto err_sram;
265 	}
266 
267 	dev->ram_clk = devm_clk_get(dev->dev, "ram");
268 	if (IS_ERR(dev->ram_clk)) {
269 		dev_err(dev->dev, "Failed to get RAM clock\n");
270 
271 		ret = PTR_ERR(dev->ram_clk);
272 		goto err_sram;
273 	}
274 
275 	dev->rstc = devm_reset_control_get(dev->dev, NULL);
276 	if (IS_ERR(dev->rstc)) {
277 		dev_err(dev->dev, "Failed to get reset control\n");
278 
279 		ret = PTR_ERR(dev->rstc);
280 		goto err_sram;
281 	}
282 
283 	dev->base = devm_platform_ioremap_resource(dev->pdev, 0);
284 	if (IS_ERR(dev->base)) {
285 		dev_err(dev->dev, "Failed to map registers\n");
286 
287 		ret = PTR_ERR(dev->base);
288 		goto err_sram;
289 	}
290 
291 	ret = clk_set_rate(dev->mod_clk, variant->mod_rate);
292 	if (ret) {
293 		dev_err(dev->dev, "Failed to set clock rate\n");
294 
295 		goto err_sram;
296 	}
297 
298 	pm_runtime_enable(dev->dev);
299 	if (!pm_runtime_enabled(dev->dev)) {
300 		ret = cedrus_hw_resume(dev->dev);
301 		if (ret)
302 			goto err_pm;
303 	}
304 
305 	return 0;
306 
307 err_pm:
308 	pm_runtime_disable(dev->dev);
309 err_sram:
310 	sunxi_sram_release(dev->dev);
311 err_mem:
312 	of_reserved_mem_device_release(dev->dev);
313 
314 	return ret;
315 }
316 
317 void cedrus_hw_remove(struct cedrus_dev *dev)
318 {
319 	pm_runtime_disable(dev->dev);
320 	if (!pm_runtime_status_suspended(dev->dev))
321 		cedrus_hw_suspend(dev->dev);
322 
323 	sunxi_sram_release(dev->dev);
324 
325 	of_reserved_mem_device_release(dev->dev);
326 }
327