xref: /openbmc/linux/drivers/gpu/drm/sti/sti_hqvdp.c (revision f2a89d3b)
1 /*
2  * Copyright (C) STMicroelectronics SA 2014
3  * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
4  * License terms:  GNU General Public License (GPL), version 2
5  */
6 
7 #include <linux/component.h>
8 #include <linux/firmware.h>
9 #include <linux/reset.h>
10 #include <linux/seq_file.h>
11 
12 #include <drm/drm_atomic.h>
13 #include <drm/drm_fb_cma_helper.h>
14 #include <drm/drm_gem_cma_helper.h>
15 
16 #include "sti_compositor.h"
17 #include "sti_hqvdp_lut.h"
18 #include "sti_plane.h"
19 #include "sti_vtg.h"
20 
21 /* Firmware name */
22 #define HQVDP_FMW_NAME          "hqvdp-stih407.bin"
23 
24 /* Regs address */
25 #define HQVDP_DMEM              0x00000000               /* 0x00000000 */
26 #define HQVDP_PMEM              0x00040000               /* 0x00040000 */
27 #define HQVDP_RD_PLUG           0x000E0000               /* 0x000E0000 */
28 #define HQVDP_RD_PLUG_CONTROL   (HQVDP_RD_PLUG + 0x1000) /* 0x000E1000 */
29 #define HQVDP_RD_PLUG_PAGE_SIZE (HQVDP_RD_PLUG + 0x1004) /* 0x000E1004 */
30 #define HQVDP_RD_PLUG_MIN_OPC   (HQVDP_RD_PLUG + 0x1008) /* 0x000E1008 */
31 #define HQVDP_RD_PLUG_MAX_OPC   (HQVDP_RD_PLUG + 0x100C) /* 0x000E100C */
32 #define HQVDP_RD_PLUG_MAX_CHK   (HQVDP_RD_PLUG + 0x1010) /* 0x000E1010 */
33 #define HQVDP_RD_PLUG_MAX_MSG   (HQVDP_RD_PLUG + 0x1014) /* 0x000E1014 */
34 #define HQVDP_RD_PLUG_MIN_SPACE (HQVDP_RD_PLUG + 0x1018) /* 0x000E1018 */
35 #define HQVDP_WR_PLUG           0x000E2000               /* 0x000E2000 */
36 #define HQVDP_WR_PLUG_CONTROL   (HQVDP_WR_PLUG + 0x1000) /* 0x000E3000 */
37 #define HQVDP_WR_PLUG_PAGE_SIZE (HQVDP_WR_PLUG + 0x1004) /* 0x000E3004 */
38 #define HQVDP_WR_PLUG_MIN_OPC   (HQVDP_WR_PLUG + 0x1008) /* 0x000E3008 */
39 #define HQVDP_WR_PLUG_MAX_OPC   (HQVDP_WR_PLUG + 0x100C) /* 0x000E300C */
40 #define HQVDP_WR_PLUG_MAX_CHK   (HQVDP_WR_PLUG + 0x1010) /* 0x000E3010 */
41 #define HQVDP_WR_PLUG_MAX_MSG   (HQVDP_WR_PLUG + 0x1014) /* 0x000E3014 */
42 #define HQVDP_WR_PLUG_MIN_SPACE (HQVDP_WR_PLUG + 0x1018) /* 0x000E3018 */
43 #define HQVDP_MBX               0x000E4000               /* 0x000E4000 */
44 #define HQVDP_MBX_IRQ_TO_XP70   (HQVDP_MBX + 0x0000)     /* 0x000E4000 */
45 #define HQVDP_MBX_INFO_HOST     (HQVDP_MBX + 0x0004)     /* 0x000E4004 */
46 #define HQVDP_MBX_IRQ_TO_HOST   (HQVDP_MBX + 0x0008)     /* 0x000E4008 */
47 #define HQVDP_MBX_INFO_XP70     (HQVDP_MBX + 0x000C)     /* 0x000E400C */
48 #define HQVDP_MBX_SW_RESET_CTRL (HQVDP_MBX + 0x0010)     /* 0x000E4010 */
49 #define HQVDP_MBX_STARTUP_CTRL1 (HQVDP_MBX + 0x0014)     /* 0x000E4014 */
50 #define HQVDP_MBX_STARTUP_CTRL2 (HQVDP_MBX + 0x0018)     /* 0x000E4018 */
51 #define HQVDP_MBX_GP_STATUS     (HQVDP_MBX + 0x001C)     /* 0x000E401C */
52 #define HQVDP_MBX_NEXT_CMD      (HQVDP_MBX + 0x0020)     /* 0x000E4020 */
53 #define HQVDP_MBX_CURRENT_CMD   (HQVDP_MBX + 0x0024)     /* 0x000E4024 */
54 #define HQVDP_MBX_SOFT_VSYNC    (HQVDP_MBX + 0x0028)     /* 0x000E4028 */
55 
56 /* Plugs config */
57 #define PLUG_CONTROL_ENABLE     0x00000001
58 #define PLUG_PAGE_SIZE_256      0x00000002
59 #define PLUG_MIN_OPC_8          0x00000003
60 #define PLUG_MAX_OPC_64         0x00000006
61 #define PLUG_MAX_CHK_2X         0x00000001
62 #define PLUG_MAX_MSG_1X         0x00000000
63 #define PLUG_MIN_SPACE_1        0x00000000
64 
65 /* SW reset CTRL */
66 #define SW_RESET_CTRL_FULL      BIT(0)
67 #define SW_RESET_CTRL_CORE      BIT(1)
68 
69 /* Startup ctrl 1 */
70 #define STARTUP_CTRL1_RST_DONE  BIT(0)
71 #define STARTUP_CTRL1_AUTH_IDLE BIT(2)
72 
73 /* Startup ctrl 2 */
74 #define STARTUP_CTRL2_FETCH_EN  BIT(1)
75 
76 /* Info xP70 */
77 #define INFO_XP70_FW_READY      BIT(15)
78 #define INFO_XP70_FW_PROCESSING BIT(14)
79 #define INFO_XP70_FW_INITQUEUES BIT(13)
80 
81 /* SOFT_VSYNC */
82 #define SOFT_VSYNC_HW           0x00000000
83 #define SOFT_VSYNC_SW_CMD       0x00000001
84 #define SOFT_VSYNC_SW_CTRL_IRQ  0x00000003
85 
86 /* Reset & boot poll config */
87 #define POLL_MAX_ATTEMPT        50
88 #define POLL_DELAY_MS           20
89 
90 #define SCALE_FACTOR            8192
91 #define SCALE_MAX_FOR_LEG_LUT_F 4096
92 #define SCALE_MAX_FOR_LEG_LUT_E 4915
93 #define SCALE_MAX_FOR_LEG_LUT_D 6654
94 #define SCALE_MAX_FOR_LEG_LUT_C 8192
95 
96 enum sti_hvsrc_orient {
97 	HVSRC_HORI,
98 	HVSRC_VERT
99 };
100 
101 /* Command structures */
102 struct sti_hqvdp_top {
103 	u32 config;
104 	u32 mem_format;
105 	u32 current_luma;
106 	u32 current_enh_luma;
107 	u32 current_right_luma;
108 	u32 current_enh_right_luma;
109 	u32 current_chroma;
110 	u32 current_enh_chroma;
111 	u32 current_right_chroma;
112 	u32 current_enh_right_chroma;
113 	u32 output_luma;
114 	u32 output_chroma;
115 	u32 luma_src_pitch;
116 	u32 luma_enh_src_pitch;
117 	u32 luma_right_src_pitch;
118 	u32 luma_enh_right_src_pitch;
119 	u32 chroma_src_pitch;
120 	u32 chroma_enh_src_pitch;
121 	u32 chroma_right_src_pitch;
122 	u32 chroma_enh_right_src_pitch;
123 	u32 luma_processed_pitch;
124 	u32 chroma_processed_pitch;
125 	u32 input_frame_size;
126 	u32 input_viewport_ori;
127 	u32 input_viewport_ori_right;
128 	u32 input_viewport_size;
129 	u32 left_view_border_width;
130 	u32 right_view_border_width;
131 	u32 left_view_3d_offset_width;
132 	u32 right_view_3d_offset_width;
133 	u32 side_stripe_color;
134 	u32 crc_reset_ctrl;
135 };
136 
137 /* Configs for interlaced : no IT, no pass thru, 3 fields */
138 #define TOP_CONFIG_INTER_BTM            0x00000000
139 #define TOP_CONFIG_INTER_TOP            0x00000002
140 
141 /* Config for progressive : no IT, no pass thru, 3 fields */
142 #define TOP_CONFIG_PROGRESSIVE          0x00000001
143 
144 /* Default MemFormat: in=420_raster_dual out=444_raster;opaque Mem2Tv mode */
145 #define TOP_MEM_FORMAT_DFLT             0x00018060
146 
147 /* Min/Max size */
148 #define MAX_WIDTH                       0x1FFF
149 #define MAX_HEIGHT                      0x0FFF
150 #define MIN_WIDTH                       0x0030
151 #define MIN_HEIGHT                      0x0010
152 
153 struct sti_hqvdp_vc1re {
154 	u32 ctrl_prv_csdi;
155 	u32 ctrl_cur_csdi;
156 	u32 ctrl_nxt_csdi;
157 	u32 ctrl_cur_fmd;
158 	u32 ctrl_nxt_fmd;
159 };
160 
161 struct sti_hqvdp_fmd {
162 	u32 config;
163 	u32 viewport_ori;
164 	u32 viewport_size;
165 	u32 next_next_luma;
166 	u32 next_next_right_luma;
167 	u32 next_next_next_luma;
168 	u32 next_next_next_right_luma;
169 	u32 threshold_scd;
170 	u32 threshold_rfd;
171 	u32 threshold_move;
172 	u32 threshold_cfd;
173 };
174 
175 struct sti_hqvdp_csdi {
176 	u32 config;
177 	u32 config2;
178 	u32 dcdi_config;
179 	u32 prev_luma;
180 	u32 prev_enh_luma;
181 	u32 prev_right_luma;
182 	u32 prev_enh_right_luma;
183 	u32 next_luma;
184 	u32 next_enh_luma;
185 	u32 next_right_luma;
186 	u32 next_enh_right_luma;
187 	u32 prev_chroma;
188 	u32 prev_enh_chroma;
189 	u32 prev_right_chroma;
190 	u32 prev_enh_right_chroma;
191 	u32 next_chroma;
192 	u32 next_enh_chroma;
193 	u32 next_right_chroma;
194 	u32 next_enh_right_chroma;
195 	u32 prev_motion;
196 	u32 prev_right_motion;
197 	u32 cur_motion;
198 	u32 cur_right_motion;
199 	u32 next_motion;
200 	u32 next_right_motion;
201 };
202 
203 /* Config for progressive: by pass */
204 #define CSDI_CONFIG_PROG                0x00000000
205 /* Config for directional deinterlacing without motion */
206 #define CSDI_CONFIG_INTER_DIR           0x00000016
207 /* Additional configs for fader, blender, motion,... deinterlace algorithms */
208 #define CSDI_CONFIG2_DFLT               0x000001B3
209 #define CSDI_DCDI_CONFIG_DFLT           0x00203803
210 
211 struct sti_hqvdp_hvsrc {
212 	u32 hor_panoramic_ctrl;
213 	u32 output_picture_size;
214 	u32 init_horizontal;
215 	u32 init_vertical;
216 	u32 param_ctrl;
217 	u32 yh_coef[NB_COEF];
218 	u32 ch_coef[NB_COEF];
219 	u32 yv_coef[NB_COEF];
220 	u32 cv_coef[NB_COEF];
221 	u32 hori_shift;
222 	u32 vert_shift;
223 };
224 
225 /* Default ParamCtrl: all controls enabled */
226 #define HVSRC_PARAM_CTRL_DFLT           0xFFFFFFFF
227 
228 struct sti_hqvdp_iqi {
229 	u32 config;
230 	u32 demo_wind_size;
231 	u32 pk_config;
232 	u32 coeff0_coeff1;
233 	u32 coeff2_coeff3;
234 	u32 coeff4;
235 	u32 pk_lut;
236 	u32 pk_gain;
237 	u32 pk_coring_level;
238 	u32 cti_config;
239 	u32 le_config;
240 	u32 le_lut[64];
241 	u32 con_bri;
242 	u32 sat_gain;
243 	u32 pxf_conf;
244 	u32 default_color;
245 };
246 
247 /* Default Config : IQI bypassed */
248 #define IQI_CONFIG_DFLT                 0x00000001
249 /* Default Contrast & Brightness gain = 256 */
250 #define IQI_CON_BRI_DFLT                0x00000100
251 /* Default Saturation gain = 256 */
252 #define IQI_SAT_GAIN_DFLT               0x00000100
253 /* Default PxfConf : P2I bypassed */
254 #define IQI_PXF_CONF_DFLT               0x00000001
255 
256 struct sti_hqvdp_top_status {
257 	u32 processing_time;
258 	u32 input_y_crc;
259 	u32 input_uv_crc;
260 };
261 
262 struct sti_hqvdp_fmd_status {
263 	u32 fmd_repeat_move_status;
264 	u32 fmd_scene_count_status;
265 	u32 cfd_sum;
266 	u32 field_sum;
267 	u32 next_y_fmd_crc;
268 	u32 next_next_y_fmd_crc;
269 	u32 next_next_next_y_fmd_crc;
270 };
271 
272 struct sti_hqvdp_csdi_status {
273 	u32 prev_y_csdi_crc;
274 	u32 cur_y_csdi_crc;
275 	u32 next_y_csdi_crc;
276 	u32 prev_uv_csdi_crc;
277 	u32 cur_uv_csdi_crc;
278 	u32 next_uv_csdi_crc;
279 	u32 y_csdi_crc;
280 	u32 uv_csdi_crc;
281 	u32 uv_cup_crc;
282 	u32 mot_csdi_crc;
283 	u32 mot_cur_csdi_crc;
284 	u32 mot_prev_csdi_crc;
285 };
286 
287 struct sti_hqvdp_hvsrc_status {
288 	u32 y_hvsrc_crc;
289 	u32 u_hvsrc_crc;
290 	u32 v_hvsrc_crc;
291 };
292 
293 struct sti_hqvdp_iqi_status {
294 	u32 pxf_it_status;
295 	u32 y_iqi_crc;
296 	u32 u_iqi_crc;
297 	u32 v_iqi_crc;
298 };
299 
300 /* Main commands. We use 2 commands one being processed by the firmware, one
301  * ready to be fetched upon next Vsync*/
302 #define NB_VDP_CMD	2
303 
304 struct sti_hqvdp_cmd {
305 	struct sti_hqvdp_top top;
306 	struct sti_hqvdp_vc1re vc1re;
307 	struct sti_hqvdp_fmd fmd;
308 	struct sti_hqvdp_csdi csdi;
309 	struct sti_hqvdp_hvsrc hvsrc;
310 	struct sti_hqvdp_iqi iqi;
311 	struct sti_hqvdp_top_status top_status;
312 	struct sti_hqvdp_fmd_status fmd_status;
313 	struct sti_hqvdp_csdi_status csdi_status;
314 	struct sti_hqvdp_hvsrc_status hvsrc_status;
315 	struct sti_hqvdp_iqi_status iqi_status;
316 };
317 
318 /*
319  * STI HQVDP structure
320  *
321  * @dev:               driver device
322  * @drm_dev:           the drm device
323  * @regs:              registers
324  * @plane:             plane structure for hqvdp it self
325  * @clk:               IP clock
326  * @clk_pix_main:      pix main clock
327  * @reset:             reset control
328  * @vtg_nb:            notifier to handle VTG Vsync
329  * @btm_field_pending: is there any bottom field (interlaced frame) to display
330  * @hqvdp_cmd:         buffer of commands
331  * @hqvdp_cmd_paddr:   physical address of hqvdp_cmd
332  * @vtg:               vtg for main data path
333  * @xp70_initialized:  true if xp70 is already initialized
334  */
335 struct sti_hqvdp {
336 	struct device *dev;
337 	struct drm_device *drm_dev;
338 	void __iomem *regs;
339 	struct sti_plane plane;
340 	struct clk *clk;
341 	struct clk *clk_pix_main;
342 	struct reset_control *reset;
343 	struct notifier_block vtg_nb;
344 	bool btm_field_pending;
345 	void *hqvdp_cmd;
346 	u32 hqvdp_cmd_paddr;
347 	struct sti_vtg *vtg;
348 	bool xp70_initialized;
349 };
350 
351 #define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, plane)
352 
353 static const uint32_t hqvdp_supported_formats[] = {
354 	DRM_FORMAT_NV12,
355 };
356 
357 /**
358  * sti_hqvdp_get_free_cmd
359  * @hqvdp: hqvdp structure
360  *
361  * Look for a hqvdp_cmd that is not being used (or about to be used) by the FW.
362  *
363  * RETURNS:
364  * the offset of the command to be used.
365  * -1 in error cases
366  */
367 static int sti_hqvdp_get_free_cmd(struct sti_hqvdp *hqvdp)
368 {
369 	u32 curr_cmd, next_cmd;
370 	u32 cmd = hqvdp->hqvdp_cmd_paddr;
371 	int i;
372 
373 	curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
374 	next_cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
375 
376 	for (i = 0; i < NB_VDP_CMD; i++) {
377 		if ((cmd != curr_cmd) && (cmd != next_cmd))
378 			return i * sizeof(struct sti_hqvdp_cmd);
379 		cmd += sizeof(struct sti_hqvdp_cmd);
380 	}
381 
382 	return -1;
383 }
384 
385 /**
386  * sti_hqvdp_get_curr_cmd
387  * @hqvdp: hqvdp structure
388  *
389  * Look for the hqvdp_cmd that is being used by the FW.
390  *
391  * RETURNS:
392  *  the offset of the command to be used.
393  * -1 in error cases
394  */
395 static int sti_hqvdp_get_curr_cmd(struct sti_hqvdp *hqvdp)
396 {
397 	u32 curr_cmd;
398 	u32 cmd = hqvdp->hqvdp_cmd_paddr;
399 	unsigned int i;
400 
401 	curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
402 
403 	for (i = 0; i < NB_VDP_CMD; i++) {
404 		if (cmd == curr_cmd)
405 			return i * sizeof(struct sti_hqvdp_cmd);
406 
407 		cmd += sizeof(struct sti_hqvdp_cmd);
408 	}
409 
410 	return -1;
411 }
412 
413 /**
414  * sti_hqvdp_get_next_cmd
415  * @hqvdp: hqvdp structure
416  *
417  * Look for the next hqvdp_cmd that will be used by the FW.
418  *
419  * RETURNS:
420  *  the offset of the next command that will be used.
421  * -1 in error cases
422  */
423 static int sti_hqvdp_get_next_cmd(struct sti_hqvdp *hqvdp)
424 {
425 	int next_cmd;
426 	dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
427 	unsigned int i;
428 
429 	next_cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
430 
431 	for (i = 0; i < NB_VDP_CMD; i++) {
432 		if (cmd == next_cmd)
433 			return i * sizeof(struct sti_hqvdp_cmd);
434 
435 		cmd += sizeof(struct sti_hqvdp_cmd);
436 	}
437 
438 	return -1;
439 }
440 
441 #define DBGFS_DUMP(reg) seq_printf(s, "\n  %-25s 0x%08X", #reg, \
442 				   readl(hqvdp->regs + reg))
443 
444 static const char *hqvdp_dbg_get_lut(u32 *coef)
445 {
446 	if (!memcmp(coef, coef_lut_a_legacy, 16))
447 		return "LUT A";
448 	if (!memcmp(coef, coef_lut_b, 16))
449 		return "LUT B";
450 	if (!memcmp(coef, coef_lut_c_y_legacy, 16))
451 		return "LUT C Y";
452 	if (!memcmp(coef, coef_lut_c_c_legacy, 16))
453 		return "LUT C C";
454 	if (!memcmp(coef, coef_lut_d_y_legacy, 16))
455 		return "LUT D Y";
456 	if (!memcmp(coef, coef_lut_d_c_legacy, 16))
457 		return "LUT D C";
458 	if (!memcmp(coef, coef_lut_e_y_legacy, 16))
459 		return "LUT E Y";
460 	if (!memcmp(coef, coef_lut_e_c_legacy, 16))
461 		return "LUT E C";
462 	if (!memcmp(coef, coef_lut_f_y_legacy, 16))
463 		return "LUT F Y";
464 	if (!memcmp(coef, coef_lut_f_c_legacy, 16))
465 		return "LUT F C";
466 	return "<UNKNOWN>";
467 }
468 
469 static void hqvdp_dbg_dump_cmd(struct seq_file *s, struct sti_hqvdp_cmd *c)
470 {
471 	int src_w, src_h, dst_w, dst_h;
472 
473 	seq_puts(s, "\n\tTOP:");
474 	seq_printf(s, "\n\t %-20s 0x%08X", "Config", c->top.config);
475 	switch (c->top.config) {
476 	case TOP_CONFIG_PROGRESSIVE:
477 		seq_puts(s, "\tProgressive");
478 		break;
479 	case TOP_CONFIG_INTER_TOP:
480 		seq_puts(s, "\tInterlaced, top field");
481 		break;
482 	case TOP_CONFIG_INTER_BTM:
483 		seq_puts(s, "\tInterlaced, bottom field");
484 		break;
485 	default:
486 		seq_puts(s, "\t<UNKNOWN>");
487 		break;
488 	}
489 
490 	seq_printf(s, "\n\t %-20s 0x%08X", "MemFormat", c->top.mem_format);
491 	seq_printf(s, "\n\t %-20s 0x%08X", "CurrentY", c->top.current_luma);
492 	seq_printf(s, "\n\t %-20s 0x%08X", "CurrentC", c->top.current_chroma);
493 	seq_printf(s, "\n\t %-20s 0x%08X", "YSrcPitch", c->top.luma_src_pitch);
494 	seq_printf(s, "\n\t %-20s 0x%08X", "CSrcPitch",
495 		   c->top.chroma_src_pitch);
496 	seq_printf(s, "\n\t %-20s 0x%08X", "InputFrameSize",
497 		   c->top.input_frame_size);
498 	seq_printf(s, "\t%dx%d",
499 		   c->top.input_frame_size & 0x0000FFFF,
500 		   c->top.input_frame_size >> 16);
501 	seq_printf(s, "\n\t %-20s 0x%08X", "InputViewportSize",
502 		   c->top.input_viewport_size);
503 	src_w = c->top.input_viewport_size & 0x0000FFFF;
504 	src_h = c->top.input_viewport_size >> 16;
505 	seq_printf(s, "\t%dx%d", src_w, src_h);
506 
507 	seq_puts(s, "\n\tHVSRC:");
508 	seq_printf(s, "\n\t %-20s 0x%08X", "OutputPictureSize",
509 		   c->hvsrc.output_picture_size);
510 	dst_w = c->hvsrc.output_picture_size & 0x0000FFFF;
511 	dst_h = c->hvsrc.output_picture_size >> 16;
512 	seq_printf(s, "\t%dx%d", dst_w, dst_h);
513 	seq_printf(s, "\n\t %-20s 0x%08X", "ParamCtrl", c->hvsrc.param_ctrl);
514 
515 	seq_printf(s, "\n\t %-20s %s", "yh_coef",
516 		   hqvdp_dbg_get_lut(c->hvsrc.yh_coef));
517 	seq_printf(s, "\n\t %-20s %s", "ch_coef",
518 		   hqvdp_dbg_get_lut(c->hvsrc.ch_coef));
519 	seq_printf(s, "\n\t %-20s %s", "yv_coef",
520 		   hqvdp_dbg_get_lut(c->hvsrc.yv_coef));
521 	seq_printf(s, "\n\t %-20s %s", "cv_coef",
522 		   hqvdp_dbg_get_lut(c->hvsrc.cv_coef));
523 
524 	seq_printf(s, "\n\t %-20s", "ScaleH");
525 	if (dst_w > src_w)
526 		seq_printf(s, " %d/1", dst_w / src_w);
527 	else
528 		seq_printf(s, " 1/%d", src_w / dst_w);
529 
530 	seq_printf(s, "\n\t %-20s", "tScaleV");
531 	if (dst_h > src_h)
532 		seq_printf(s, " %d/1", dst_h / src_h);
533 	else
534 		seq_printf(s, " 1/%d", src_h / dst_h);
535 
536 	seq_puts(s, "\n\tCSDI:");
537 	seq_printf(s, "\n\t %-20s 0x%08X\t", "Config", c->csdi.config);
538 	switch (c->csdi.config) {
539 	case CSDI_CONFIG_PROG:
540 		seq_puts(s, "Bypass");
541 		break;
542 	case CSDI_CONFIG_INTER_DIR:
543 		seq_puts(s, "Deinterlace, directional");
544 		break;
545 	default:
546 		seq_puts(s, "<UNKNOWN>");
547 		break;
548 	}
549 
550 	seq_printf(s, "\n\t %-20s 0x%08X", "Config2", c->csdi.config2);
551 	seq_printf(s, "\n\t %-20s 0x%08X", "DcdiConfig", c->csdi.dcdi_config);
552 }
553 
554 static int hqvdp_dbg_show(struct seq_file *s, void *data)
555 {
556 	struct drm_info_node *node = s->private;
557 	struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data;
558 	int cmd, cmd_offset, infoxp70;
559 	void *virt;
560 
561 	seq_printf(s, "%s: (vaddr = 0x%p)",
562 		   sti_plane_to_str(&hqvdp->plane), hqvdp->regs);
563 
564 	DBGFS_DUMP(HQVDP_MBX_IRQ_TO_XP70);
565 	DBGFS_DUMP(HQVDP_MBX_INFO_HOST);
566 	DBGFS_DUMP(HQVDP_MBX_IRQ_TO_HOST);
567 	DBGFS_DUMP(HQVDP_MBX_INFO_XP70);
568 	infoxp70 = readl(hqvdp->regs + HQVDP_MBX_INFO_XP70);
569 	seq_puts(s, "\tFirmware state: ");
570 	if (infoxp70 & INFO_XP70_FW_READY)
571 		seq_puts(s, "idle and ready");
572 	else if (infoxp70 & INFO_XP70_FW_PROCESSING)
573 		seq_puts(s, "processing a picture");
574 	else if (infoxp70 & INFO_XP70_FW_INITQUEUES)
575 		seq_puts(s, "programming queues");
576 	else
577 		seq_puts(s, "NOT READY");
578 
579 	DBGFS_DUMP(HQVDP_MBX_SW_RESET_CTRL);
580 	DBGFS_DUMP(HQVDP_MBX_STARTUP_CTRL1);
581 	if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1)
582 					& STARTUP_CTRL1_RST_DONE)
583 		seq_puts(s, "\tReset is done");
584 	else
585 		seq_puts(s, "\tReset is NOT done");
586 	DBGFS_DUMP(HQVDP_MBX_STARTUP_CTRL2);
587 	if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL2)
588 					& STARTUP_CTRL2_FETCH_EN)
589 		seq_puts(s, "\tFetch is enabled");
590 	else
591 		seq_puts(s, "\tFetch is NOT enabled");
592 	DBGFS_DUMP(HQVDP_MBX_GP_STATUS);
593 	DBGFS_DUMP(HQVDP_MBX_NEXT_CMD);
594 	DBGFS_DUMP(HQVDP_MBX_CURRENT_CMD);
595 	DBGFS_DUMP(HQVDP_MBX_SOFT_VSYNC);
596 	if (!(readl(hqvdp->regs + HQVDP_MBX_SOFT_VSYNC) & 3))
597 		seq_puts(s, "\tHW Vsync");
598 	else
599 		seq_puts(s, "\tSW Vsync ?!?!");
600 
601 	/* Last command */
602 	cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
603 	cmd_offset = sti_hqvdp_get_curr_cmd(hqvdp);
604 	if (cmd_offset == -1) {
605 		seq_puts(s, "\n\n  Last command: unknown");
606 	} else {
607 		virt = hqvdp->hqvdp_cmd + cmd_offset;
608 		seq_printf(s, "\n\n  Last command: address @ 0x%x (0x%p)",
609 			   cmd, virt);
610 		hqvdp_dbg_dump_cmd(s, (struct sti_hqvdp_cmd *)virt);
611 	}
612 
613 	/* Next command */
614 	cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
615 	cmd_offset = sti_hqvdp_get_next_cmd(hqvdp);
616 	if (cmd_offset == -1) {
617 		seq_puts(s, "\n\n  Next command: unknown");
618 	} else {
619 		virt = hqvdp->hqvdp_cmd + cmd_offset;
620 		seq_printf(s, "\n\n  Next command address: @ 0x%x (0x%p)",
621 			   cmd, virt);
622 		hqvdp_dbg_dump_cmd(s, (struct sti_hqvdp_cmd *)virt);
623 	}
624 
625 	seq_puts(s, "\n");
626 
627 	return 0;
628 }
629 
630 static struct drm_info_list hqvdp_debugfs_files[] = {
631 	{ "hqvdp", hqvdp_dbg_show, 0, NULL },
632 };
633 
634 static int hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
635 {
636 	unsigned int i;
637 
638 	for (i = 0; i < ARRAY_SIZE(hqvdp_debugfs_files); i++)
639 		hqvdp_debugfs_files[i].data = hqvdp;
640 
641 	return drm_debugfs_create_files(hqvdp_debugfs_files,
642 					ARRAY_SIZE(hqvdp_debugfs_files),
643 					minor->debugfs_root, minor);
644 }
645 
646 /**
647  * sti_hqvdp_update_hvsrc
648  * @orient: horizontal or vertical
649  * @scale:  scaling/zoom factor
650  * @hvsrc:  the structure containing the LUT coef
651  *
652  * Update the Y and C Lut coef, as well as the shift param
653  *
654  * RETURNS:
655  * None.
656  */
657 static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
658 		struct sti_hqvdp_hvsrc *hvsrc)
659 {
660 	const int *coef_c, *coef_y;
661 	int shift_c, shift_y;
662 
663 	/* Get the appropriate coef tables */
664 	if (scale < SCALE_MAX_FOR_LEG_LUT_F) {
665 		coef_y = coef_lut_f_y_legacy;
666 		coef_c = coef_lut_f_c_legacy;
667 		shift_y = SHIFT_LUT_F_Y_LEGACY;
668 		shift_c = SHIFT_LUT_F_C_LEGACY;
669 	} else if (scale < SCALE_MAX_FOR_LEG_LUT_E) {
670 		coef_y = coef_lut_e_y_legacy;
671 		coef_c = coef_lut_e_c_legacy;
672 		shift_y = SHIFT_LUT_E_Y_LEGACY;
673 		shift_c = SHIFT_LUT_E_C_LEGACY;
674 	} else if (scale < SCALE_MAX_FOR_LEG_LUT_D) {
675 		coef_y = coef_lut_d_y_legacy;
676 		coef_c = coef_lut_d_c_legacy;
677 		shift_y = SHIFT_LUT_D_Y_LEGACY;
678 		shift_c = SHIFT_LUT_D_C_LEGACY;
679 	} else if (scale < SCALE_MAX_FOR_LEG_LUT_C) {
680 		coef_y = coef_lut_c_y_legacy;
681 		coef_c = coef_lut_c_c_legacy;
682 		shift_y = SHIFT_LUT_C_Y_LEGACY;
683 		shift_c = SHIFT_LUT_C_C_LEGACY;
684 	} else if (scale == SCALE_MAX_FOR_LEG_LUT_C) {
685 		coef_y = coef_c = coef_lut_b;
686 		shift_y = shift_c = SHIFT_LUT_B;
687 	} else {
688 		coef_y = coef_c = coef_lut_a_legacy;
689 		shift_y = shift_c = SHIFT_LUT_A_LEGACY;
690 	}
691 
692 	if (orient == HVSRC_HORI) {
693 		hvsrc->hori_shift = (shift_c << 16) | shift_y;
694 		memcpy(hvsrc->yh_coef, coef_y, sizeof(hvsrc->yh_coef));
695 		memcpy(hvsrc->ch_coef, coef_c, sizeof(hvsrc->ch_coef));
696 	} else {
697 		hvsrc->vert_shift = (shift_c << 16) | shift_y;
698 		memcpy(hvsrc->yv_coef, coef_y, sizeof(hvsrc->yv_coef));
699 		memcpy(hvsrc->cv_coef, coef_c, sizeof(hvsrc->cv_coef));
700 	}
701 }
702 
703 /**
704  * sti_hqvdp_check_hw_scaling
705  * @hqvdp: hqvdp pointer
706  * @mode: display mode with timing constraints
707  * @src_w: source width
708  * @src_h: source height
709  * @dst_w: destination width
710  * @dst_h: destination height
711  *
712  * Check if the HW is able to perform the scaling request
713  * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where:
714  *   Zy = OutputHeight / InputHeight
715  *   LFW = (Tx * IPClock) / (MaxNbCycles * Cp)
716  *     Tx : Total video mode horizontal resolution
717  *     IPClock : HQVDP IP clock (Mhz)
718  *     MaxNbCycles: max(InputWidth, OutputWidth)
719  *     Cp: Video mode pixel clock (Mhz)
720  *
721  * RETURNS:
722  * True if the HW can scale.
723  */
724 static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp,
725 				       struct drm_display_mode *mode,
726 				       int src_w, int src_h,
727 				       int dst_w, int dst_h)
728 {
729 	unsigned long lfw;
730 	unsigned int inv_zy;
731 
732 	lfw = mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
733 	lfw /= max(src_w, dst_w) * mode->clock / 1000;
734 
735 	inv_zy = DIV_ROUND_UP(src_h, dst_h);
736 
737 	return (inv_zy <= lfw) ? true : false;
738 }
739 
740 /**
741  * sti_hqvdp_disable
742  * @hqvdp: hqvdp pointer
743  *
744  * Disables the HQVDP plane
745  */
746 static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
747 {
748 	int i;
749 
750 	DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&hqvdp->plane));
751 
752 	/* Unregister VTG Vsync callback */
753 	if (sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
754 		DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
755 
756 	/* Set next cmd to NULL */
757 	writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD);
758 
759 	for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
760 		if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70)
761 				& INFO_XP70_FW_READY)
762 			break;
763 		msleep(POLL_DELAY_MS);
764 	}
765 
766 	/* VTG can stop now */
767 	clk_disable_unprepare(hqvdp->clk_pix_main);
768 
769 	if (i == POLL_MAX_ATTEMPT)
770 		DRM_ERROR("XP70 could not revert to idle\n");
771 
772 	hqvdp->plane.status = STI_PLANE_DISABLED;
773 }
774 
775 /**
776  * sti_vdp_vtg_cb
777  * @nb: notifier block
778  * @evt: event message
779  * @data: private data
780  *
781  * Handle VTG Vsync event, display pending bottom field
782  *
783  * RETURNS:
784  * 0 on success.
785  */
786 int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
787 {
788 	struct sti_hqvdp *hqvdp = container_of(nb, struct sti_hqvdp, vtg_nb);
789 	int btm_cmd_offset, top_cmd_offest;
790 	struct sti_hqvdp_cmd *btm_cmd, *top_cmd;
791 
792 	if ((evt != VTG_TOP_FIELD_EVENT) && (evt != VTG_BOTTOM_FIELD_EVENT)) {
793 		DRM_DEBUG_DRIVER("Unknown event\n");
794 		return 0;
795 	}
796 
797 	if (hqvdp->plane.status == STI_PLANE_FLUSHING) {
798 		/* disable need to be synchronize on vsync event */
799 		DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
800 				 sti_plane_to_str(&hqvdp->plane));
801 
802 		sti_hqvdp_disable(hqvdp);
803 	}
804 
805 	if (hqvdp->btm_field_pending) {
806 		/* Create the btm field command from the current one */
807 		btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
808 		top_cmd_offest = sti_hqvdp_get_curr_cmd(hqvdp);
809 		if ((btm_cmd_offset == -1) || (top_cmd_offest == -1)) {
810 			DRM_DEBUG_DRIVER("Warning: no cmd, will skip field\n");
811 			return -EBUSY;
812 		}
813 
814 		btm_cmd = hqvdp->hqvdp_cmd + btm_cmd_offset;
815 		top_cmd = hqvdp->hqvdp_cmd + top_cmd_offest;
816 
817 		memcpy(btm_cmd, top_cmd, sizeof(*btm_cmd));
818 
819 		btm_cmd->top.config = TOP_CONFIG_INTER_BTM;
820 		btm_cmd->top.current_luma +=
821 				btm_cmd->top.luma_src_pitch / 2;
822 		btm_cmd->top.current_chroma +=
823 				btm_cmd->top.chroma_src_pitch / 2;
824 
825 		/* Post the command to mailbox */
826 		writel(hqvdp->hqvdp_cmd_paddr + btm_cmd_offset,
827 				hqvdp->regs + HQVDP_MBX_NEXT_CMD);
828 
829 		hqvdp->btm_field_pending = false;
830 
831 		dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
832 				__func__, hqvdp->hqvdp_cmd_paddr);
833 
834 		sti_plane_update_fps(&hqvdp->plane, false, true);
835 	}
836 
837 	return 0;
838 }
839 
840 static void sti_hqvdp_init(struct sti_hqvdp *hqvdp)
841 {
842 	int size;
843 	dma_addr_t dma_addr;
844 
845 	hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
846 
847 	/* Allocate memory for the VDP commands */
848 	size = NB_VDP_CMD * sizeof(struct sti_hqvdp_cmd);
849 	hqvdp->hqvdp_cmd = dma_alloc_wc(hqvdp->dev, size,
850 					&dma_addr,
851 					GFP_KERNEL | GFP_DMA);
852 	if (!hqvdp->hqvdp_cmd) {
853 		DRM_ERROR("Failed to allocate memory for VDP cmd\n");
854 		return;
855 	}
856 
857 	hqvdp->hqvdp_cmd_paddr = (u32)dma_addr;
858 	memset(hqvdp->hqvdp_cmd, 0, size);
859 }
860 
861 static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp)
862 {
863 	/* Configure Plugs (same for RD & WR) */
864 	writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_RD_PLUG_PAGE_SIZE);
865 	writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_RD_PLUG_MIN_OPC);
866 	writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_RD_PLUG_MAX_OPC);
867 	writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_RD_PLUG_MAX_CHK);
868 	writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_RD_PLUG_MAX_MSG);
869 	writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_RD_PLUG_MIN_SPACE);
870 	writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_RD_PLUG_CONTROL);
871 
872 	writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_WR_PLUG_PAGE_SIZE);
873 	writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_WR_PLUG_MIN_OPC);
874 	writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_WR_PLUG_MAX_OPC);
875 	writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_WR_PLUG_MAX_CHK);
876 	writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_WR_PLUG_MAX_MSG);
877 	writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_WR_PLUG_MIN_SPACE);
878 	writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_WR_PLUG_CONTROL);
879 }
880 
881 /**
882  * sti_hqvdp_start_xp70
883  * @hqvdp: hqvdp pointer
884  *
885  * Run the xP70 initialization sequence
886  */
887 static void sti_hqvdp_start_xp70(struct sti_hqvdp *hqvdp)
888 {
889 	const struct firmware *firmware;
890 	u32 *fw_rd_plug, *fw_wr_plug, *fw_pmem, *fw_dmem;
891 	u8 *data;
892 	int i;
893 	struct fw_header {
894 		int rd_size;
895 		int wr_size;
896 		int pmem_size;
897 		int dmem_size;
898 	} *header;
899 
900 	DRM_DEBUG_DRIVER("\n");
901 
902 	if (hqvdp->xp70_initialized) {
903 		DRM_DEBUG_DRIVER("HQVDP XP70 already initialized\n");
904 		return;
905 	}
906 
907 	/* Request firmware */
908 	if (request_firmware(&firmware, HQVDP_FMW_NAME, hqvdp->dev)) {
909 		DRM_ERROR("Can't get HQVDP firmware\n");
910 		return;
911 	}
912 
913 	/* Check firmware parts */
914 	if (!firmware) {
915 		DRM_ERROR("Firmware not available\n");
916 		return;
917 	}
918 
919 	header = (struct fw_header *)firmware->data;
920 	if (firmware->size < sizeof(*header)) {
921 		DRM_ERROR("Invalid firmware size (%d)\n", firmware->size);
922 		goto out;
923 	}
924 	if ((sizeof(*header) + header->rd_size + header->wr_size +
925 		header->pmem_size + header->dmem_size) != firmware->size) {
926 		DRM_ERROR("Invalid fmw structure (%d+%d+%d+%d+%d != %d)\n",
927 			  sizeof(*header), header->rd_size, header->wr_size,
928 			  header->pmem_size, header->dmem_size,
929 			  firmware->size);
930 		goto out;
931 	}
932 
933 	data = (u8 *)firmware->data;
934 	data += sizeof(*header);
935 	fw_rd_plug = (void *)data;
936 	data += header->rd_size;
937 	fw_wr_plug = (void *)data;
938 	data += header->wr_size;
939 	fw_pmem = (void *)data;
940 	data += header->pmem_size;
941 	fw_dmem = (void *)data;
942 
943 	/* Enable clock */
944 	if (clk_prepare_enable(hqvdp->clk))
945 		DRM_ERROR("Failed to prepare/enable HQVDP clk\n");
946 
947 	/* Reset */
948 	writel(SW_RESET_CTRL_FULL, hqvdp->regs + HQVDP_MBX_SW_RESET_CTRL);
949 
950 	for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
951 		if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1)
952 				& STARTUP_CTRL1_RST_DONE)
953 			break;
954 		msleep(POLL_DELAY_MS);
955 	}
956 	if (i == POLL_MAX_ATTEMPT) {
957 		DRM_ERROR("Could not reset\n");
958 		goto out;
959 	}
960 
961 	/* Init Read & Write plugs */
962 	for (i = 0; i < header->rd_size / 4; i++)
963 		writel(fw_rd_plug[i], hqvdp->regs + HQVDP_RD_PLUG + i * 4);
964 	for (i = 0; i < header->wr_size / 4; i++)
965 		writel(fw_wr_plug[i], hqvdp->regs + HQVDP_WR_PLUG + i * 4);
966 
967 	sti_hqvdp_init_plugs(hqvdp);
968 
969 	/* Authorize Idle Mode */
970 	writel(STARTUP_CTRL1_AUTH_IDLE, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1);
971 
972 	/* Prevent VTG interruption during the boot */
973 	writel(SOFT_VSYNC_SW_CTRL_IRQ, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
974 	writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD);
975 
976 	/* Download PMEM & DMEM */
977 	for (i = 0; i < header->pmem_size / 4; i++)
978 		writel(fw_pmem[i], hqvdp->regs + HQVDP_PMEM + i * 4);
979 	for (i = 0; i < header->dmem_size / 4; i++)
980 		writel(fw_dmem[i], hqvdp->regs + HQVDP_DMEM + i * 4);
981 
982 	/* Enable fetch */
983 	writel(STARTUP_CTRL2_FETCH_EN, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL2);
984 
985 	/* Wait end of boot */
986 	for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
987 		if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70)
988 				& INFO_XP70_FW_READY)
989 			break;
990 		msleep(POLL_DELAY_MS);
991 	}
992 	if (i == POLL_MAX_ATTEMPT) {
993 		DRM_ERROR("Could not boot\n");
994 		goto out;
995 	}
996 
997 	/* Launch Vsync */
998 	writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
999 
1000 	DRM_INFO("HQVDP XP70 initialized\n");
1001 
1002 	hqvdp->xp70_initialized = true;
1003 
1004 out:
1005 	release_firmware(firmware);
1006 }
1007 
1008 static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
1009 				  struct drm_plane_state *state)
1010 {
1011 	struct sti_plane *plane = to_sti_plane(drm_plane);
1012 	struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
1013 	struct drm_crtc *crtc = state->crtc;
1014 	struct drm_framebuffer *fb = state->fb;
1015 	bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
1016 	struct drm_crtc_state *crtc_state;
1017 	struct drm_display_mode *mode;
1018 	int dst_x, dst_y, dst_w, dst_h;
1019 	int src_x, src_y, src_w, src_h;
1020 
1021 	/* no need for further checks if the plane is being disabled */
1022 	if (!crtc || !fb)
1023 		return 0;
1024 
1025 	crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
1026 	mode = &crtc_state->mode;
1027 	dst_x = state->crtc_x;
1028 	dst_y = state->crtc_y;
1029 	dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
1030 	dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
1031 	/* src_x are in 16.16 format */
1032 	src_x = state->src_x >> 16;
1033 	src_y = state->src_y >> 16;
1034 	src_w = state->src_w >> 16;
1035 	src_h = state->src_h >> 16;
1036 
1037 	if (!sti_hqvdp_check_hw_scaling(hqvdp, mode,
1038 					src_w, src_h,
1039 					dst_w, dst_h)) {
1040 		DRM_ERROR("Scaling beyond HW capabilities\n");
1041 		return -EINVAL;
1042 	}
1043 
1044 	if (!drm_fb_cma_get_gem_obj(fb, 0)) {
1045 		DRM_ERROR("Can't get CMA GEM object for fb\n");
1046 		return -EINVAL;
1047 	}
1048 
1049 	/*
1050 	 * Input / output size
1051 	 * Align to upper even value
1052 	 */
1053 	dst_w = ALIGN(dst_w, 2);
1054 	dst_h = ALIGN(dst_h, 2);
1055 
1056 	if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
1057 	    (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
1058 	    (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
1059 	    (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
1060 		DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
1061 			  src_w, src_h,
1062 			  dst_w, dst_h);
1063 		return -EINVAL;
1064 	}
1065 
1066 	if (first_prepare) {
1067 		/* Start HQVDP XP70 coprocessor */
1068 		sti_hqvdp_start_xp70(hqvdp);
1069 
1070 		/* Prevent VTG shutdown */
1071 		if (clk_prepare_enable(hqvdp->clk_pix_main)) {
1072 			DRM_ERROR("Failed to prepare/enable pix main clk\n");
1073 			return -EINVAL;
1074 		}
1075 
1076 		/* Register VTG Vsync callback to handle bottom fields */
1077 		if (sti_vtg_register_client(hqvdp->vtg,
1078 					    &hqvdp->vtg_nb,
1079 					    crtc)) {
1080 			DRM_ERROR("Cannot register VTG notifier\n");
1081 			return -EINVAL;
1082 		}
1083 	}
1084 
1085 	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
1086 		      crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)),
1087 		      drm_plane->base.id, sti_plane_to_str(plane));
1088 	DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
1089 		      sti_plane_to_str(plane),
1090 		      dst_w, dst_h, dst_x, dst_y,
1091 		      src_w, src_h, src_x, src_y);
1092 
1093 	return 0;
1094 }
1095 
1096 static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
1097 				    struct drm_plane_state *oldstate)
1098 {
1099 	struct drm_plane_state *state = drm_plane->state;
1100 	struct sti_plane *plane = to_sti_plane(drm_plane);
1101 	struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
1102 	struct drm_crtc *crtc = state->crtc;
1103 	struct drm_framebuffer *fb = state->fb;
1104 	struct drm_display_mode *mode;
1105 	int dst_x, dst_y, dst_w, dst_h;
1106 	int src_x, src_y, src_w, src_h;
1107 	struct drm_gem_cma_object *cma_obj;
1108 	struct sti_hqvdp_cmd *cmd;
1109 	int scale_h, scale_v;
1110 	int cmd_offset;
1111 
1112 	if (!crtc || !fb)
1113 		return;
1114 
1115 	mode = &crtc->mode;
1116 	dst_x = state->crtc_x;
1117 	dst_y = state->crtc_y;
1118 	dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
1119 	dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
1120 	/* src_x are in 16.16 format */
1121 	src_x = state->src_x >> 16;
1122 	src_y = state->src_y >> 16;
1123 	src_w = state->src_w >> 16;
1124 	src_h = state->src_h >> 16;
1125 
1126 	cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
1127 	if (cmd_offset == -1) {
1128 		DRM_DEBUG_DRIVER("Warning: no cmd, will skip frame\n");
1129 		return;
1130 	}
1131 	cmd = hqvdp->hqvdp_cmd + cmd_offset;
1132 
1133 	/* Static parameters, defaulting to progressive mode */
1134 	cmd->top.config = TOP_CONFIG_PROGRESSIVE;
1135 	cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
1136 	cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
1137 	cmd->csdi.config = CSDI_CONFIG_PROG;
1138 
1139 	/* VC1RE, FMD bypassed : keep everything set to 0
1140 	 * IQI/P2I bypassed */
1141 	cmd->iqi.config = IQI_CONFIG_DFLT;
1142 	cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
1143 	cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
1144 	cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
1145 
1146 	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
1147 
1148 	DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
1149 			 (char *)&fb->pixel_format,
1150 			 (unsigned long)cma_obj->paddr);
1151 
1152 	/* Buffer planes address */
1153 	cmd->top.current_luma = (u32)cma_obj->paddr + fb->offsets[0];
1154 	cmd->top.current_chroma = (u32)cma_obj->paddr + fb->offsets[1];
1155 
1156 	/* Pitches */
1157 	cmd->top.luma_processed_pitch = fb->pitches[0];
1158 	cmd->top.luma_src_pitch = fb->pitches[0];
1159 	cmd->top.chroma_processed_pitch = fb->pitches[1];
1160 	cmd->top.chroma_src_pitch = fb->pitches[1];
1161 
1162 	/* Input / output size
1163 	 * Align to upper even value */
1164 	dst_w = ALIGN(dst_w, 2);
1165 	dst_h = ALIGN(dst_h, 2);
1166 
1167 	cmd->top.input_viewport_size = src_h << 16 | src_w;
1168 	cmd->top.input_frame_size = src_h << 16 | src_w;
1169 	cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w;
1170 	cmd->top.input_viewport_ori = src_y << 16 | src_x;
1171 
1172 	/* Handle interlaced */
1173 	if (fb->flags & DRM_MODE_FB_INTERLACED) {
1174 		/* Top field to display */
1175 		cmd->top.config = TOP_CONFIG_INTER_TOP;
1176 
1177 		/* Update pitches and vert size */
1178 		cmd->top.input_frame_size = (src_h / 2) << 16 | src_w;
1179 		cmd->top.luma_processed_pitch *= 2;
1180 		cmd->top.luma_src_pitch *= 2;
1181 		cmd->top.chroma_processed_pitch *= 2;
1182 		cmd->top.chroma_src_pitch *= 2;
1183 
1184 		/* Enable directional deinterlacing processing */
1185 		cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
1186 		cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
1187 		cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
1188 	}
1189 
1190 	/* Update hvsrc lut coef */
1191 	scale_h = SCALE_FACTOR * dst_w / src_w;
1192 	sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
1193 
1194 	scale_v = SCALE_FACTOR * dst_h / src_h;
1195 	sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
1196 
1197 	writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
1198 	       hqvdp->regs + HQVDP_MBX_NEXT_CMD);
1199 
1200 	/* Interlaced : get ready to display the bottom field at next Vsync */
1201 	if (fb->flags & DRM_MODE_FB_INTERLACED)
1202 		hqvdp->btm_field_pending = true;
1203 
1204 	dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
1205 		__func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
1206 
1207 	sti_plane_update_fps(plane, true, true);
1208 
1209 	plane->status = STI_PLANE_UPDATED;
1210 }
1211 
1212 static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
1213 				     struct drm_plane_state *oldstate)
1214 {
1215 	struct sti_plane *plane = to_sti_plane(drm_plane);
1216 
1217 	if (!drm_plane->crtc) {
1218 		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
1219 				 drm_plane->base.id);
1220 		return;
1221 	}
1222 
1223 	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
1224 			 drm_plane->crtc->base.id,
1225 			 sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
1226 			 drm_plane->base.id, sti_plane_to_str(plane));
1227 
1228 	plane->status = STI_PLANE_DISABLING;
1229 }
1230 
1231 static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
1232 	.atomic_check = sti_hqvdp_atomic_check,
1233 	.atomic_update = sti_hqvdp_atomic_update,
1234 	.atomic_disable = sti_hqvdp_atomic_disable,
1235 };
1236 
1237 static void sti_hqvdp_destroy(struct drm_plane *drm_plane)
1238 {
1239 	DRM_DEBUG_DRIVER("\n");
1240 
1241 	drm_plane_helper_disable(drm_plane);
1242 	drm_plane_cleanup(drm_plane);
1243 }
1244 
1245 static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
1246 {
1247 	struct sti_plane *plane = to_sti_plane(drm_plane);
1248 	struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
1249 
1250 	return hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
1251 }
1252 
1253 struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
1254 	.update_plane = drm_atomic_helper_update_plane,
1255 	.disable_plane = drm_atomic_helper_disable_plane,
1256 	.destroy = sti_hqvdp_destroy,
1257 	.set_property = drm_atomic_helper_plane_set_property,
1258 	.reset = sti_plane_reset,
1259 	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
1260 	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
1261 	.late_register = sti_hqvdp_late_register,
1262 };
1263 
1264 static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
1265 					  struct device *dev, int desc)
1266 {
1267 	struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
1268 	int res;
1269 
1270 	hqvdp->plane.desc = desc;
1271 	hqvdp->plane.status = STI_PLANE_DISABLED;
1272 
1273 	sti_hqvdp_init(hqvdp);
1274 
1275 	res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1,
1276 				       &sti_hqvdp_plane_helpers_funcs,
1277 				       hqvdp_supported_formats,
1278 				       ARRAY_SIZE(hqvdp_supported_formats),
1279 				       DRM_PLANE_TYPE_OVERLAY, NULL);
1280 	if (res) {
1281 		DRM_ERROR("Failed to initialize universal plane\n");
1282 		return NULL;
1283 	}
1284 
1285 	drm_plane_helper_add(&hqvdp->plane.drm_plane, &sti_hqvdp_helpers_funcs);
1286 
1287 	sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
1288 
1289 	return &hqvdp->plane.drm_plane;
1290 }
1291 
1292 int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
1293 {
1294 	struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
1295 	struct drm_device *drm_dev = data;
1296 	struct drm_plane *plane;
1297 
1298 	DRM_DEBUG_DRIVER("\n");
1299 
1300 	hqvdp->drm_dev = drm_dev;
1301 
1302 	/* Create HQVDP plane once xp70 is initialized */
1303 	plane = sti_hqvdp_create(drm_dev, hqvdp->dev, STI_HQVDP_0);
1304 	if (!plane)
1305 		DRM_ERROR("Can't create HQVDP plane\n");
1306 
1307 	return 0;
1308 }
1309 
1310 static void sti_hqvdp_unbind(struct device *dev,
1311 		struct device *master, void *data)
1312 {
1313 	/* do nothing */
1314 }
1315 
1316 static const struct component_ops sti_hqvdp_ops = {
1317 	.bind = sti_hqvdp_bind,
1318 	.unbind = sti_hqvdp_unbind,
1319 };
1320 
1321 static int sti_hqvdp_probe(struct platform_device *pdev)
1322 {
1323 	struct device *dev = &pdev->dev;
1324 	struct device_node *vtg_np;
1325 	struct sti_hqvdp *hqvdp;
1326 	struct resource *res;
1327 
1328 	DRM_DEBUG_DRIVER("\n");
1329 
1330 	hqvdp = devm_kzalloc(dev, sizeof(*hqvdp), GFP_KERNEL);
1331 	if (!hqvdp) {
1332 		DRM_ERROR("Failed to allocate HQVDP context\n");
1333 		return -ENOMEM;
1334 	}
1335 
1336 	hqvdp->dev = dev;
1337 
1338 	/* Get Memory resources */
1339 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1340 	if (res == NULL) {
1341 		DRM_ERROR("Get memory resource failed\n");
1342 		return -ENXIO;
1343 	}
1344 	hqvdp->regs = devm_ioremap(dev, res->start, resource_size(res));
1345 	if (hqvdp->regs == NULL) {
1346 		DRM_ERROR("Register mapping failed\n");
1347 		return -ENXIO;
1348 	}
1349 
1350 	/* Get clock resources */
1351 	hqvdp->clk = devm_clk_get(dev, "hqvdp");
1352 	hqvdp->clk_pix_main = devm_clk_get(dev, "pix_main");
1353 	if (IS_ERR(hqvdp->clk) || IS_ERR(hqvdp->clk_pix_main)) {
1354 		DRM_ERROR("Cannot get clocks\n");
1355 		return -ENXIO;
1356 	}
1357 
1358 	/* Get reset resources */
1359 	hqvdp->reset = devm_reset_control_get(dev, "hqvdp");
1360 	if (!IS_ERR(hqvdp->reset))
1361 		reset_control_deassert(hqvdp->reset);
1362 
1363 	vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
1364 	if (vtg_np)
1365 		hqvdp->vtg = of_vtg_find(vtg_np);
1366 	of_node_put(vtg_np);
1367 
1368 	platform_set_drvdata(pdev, hqvdp);
1369 
1370 	return component_add(&pdev->dev, &sti_hqvdp_ops);
1371 }
1372 
1373 static int sti_hqvdp_remove(struct platform_device *pdev)
1374 {
1375 	component_del(&pdev->dev, &sti_hqvdp_ops);
1376 	return 0;
1377 }
1378 
1379 static struct of_device_id hqvdp_of_match[] = {
1380 	{ .compatible = "st,stih407-hqvdp", },
1381 	{ /* end node */ }
1382 };
1383 MODULE_DEVICE_TABLE(of, hqvdp_of_match);
1384 
1385 struct platform_driver sti_hqvdp_driver = {
1386 	.driver = {
1387 		.name = "sti-hqvdp",
1388 		.owner = THIS_MODULE,
1389 		.of_match_table = hqvdp_of_match,
1390 	},
1391 	.probe = sti_hqvdp_probe,
1392 	.remove = sti_hqvdp_remove,
1393 };
1394 
1395 MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
1396 MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
1397 MODULE_LICENSE("GPL");
1398