xref: /openbmc/linux/drivers/gpu/drm/sti/sti_hqvdp.c (revision f4284724)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) STMicroelectronics SA 2014
4  * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
5  */
6 
7 #include <linux/component.h>
8 #include <linux/delay.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/firmware.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/reset.h>
14 #include <linux/seq_file.h>
15 
16 #include <drm/drm_atomic.h>
17 #include <drm/drm_device.h>
18 #include <drm/drm_fb_cma_helper.h>
19 #include <drm/drm_fourcc.h>
20 #include <drm/drm_framebuffer.h>
21 #include <drm/drm_gem_cma_helper.h>
22 
23 #include "sti_compositor.h"
24 #include "sti_drv.h"
25 #include "sti_hqvdp_lut.h"
26 #include "sti_plane.h"
27 #include "sti_vtg.h"
28 
29 /* Firmware name */
30 #define HQVDP_FMW_NAME          "hqvdp-stih407.bin"
31 
32 /* Regs address */
33 #define HQVDP_DMEM              0x00000000               /* 0x00000000 */
34 #define HQVDP_PMEM              0x00040000               /* 0x00040000 */
35 #define HQVDP_RD_PLUG           0x000E0000               /* 0x000E0000 */
36 #define HQVDP_RD_PLUG_CONTROL   (HQVDP_RD_PLUG + 0x1000) /* 0x000E1000 */
37 #define HQVDP_RD_PLUG_PAGE_SIZE (HQVDP_RD_PLUG + 0x1004) /* 0x000E1004 */
38 #define HQVDP_RD_PLUG_MIN_OPC   (HQVDP_RD_PLUG + 0x1008) /* 0x000E1008 */
39 #define HQVDP_RD_PLUG_MAX_OPC   (HQVDP_RD_PLUG + 0x100C) /* 0x000E100C */
40 #define HQVDP_RD_PLUG_MAX_CHK   (HQVDP_RD_PLUG + 0x1010) /* 0x000E1010 */
41 #define HQVDP_RD_PLUG_MAX_MSG   (HQVDP_RD_PLUG + 0x1014) /* 0x000E1014 */
42 #define HQVDP_RD_PLUG_MIN_SPACE (HQVDP_RD_PLUG + 0x1018) /* 0x000E1018 */
43 #define HQVDP_WR_PLUG           0x000E2000               /* 0x000E2000 */
44 #define HQVDP_WR_PLUG_CONTROL   (HQVDP_WR_PLUG + 0x1000) /* 0x000E3000 */
45 #define HQVDP_WR_PLUG_PAGE_SIZE (HQVDP_WR_PLUG + 0x1004) /* 0x000E3004 */
46 #define HQVDP_WR_PLUG_MIN_OPC   (HQVDP_WR_PLUG + 0x1008) /* 0x000E3008 */
47 #define HQVDP_WR_PLUG_MAX_OPC   (HQVDP_WR_PLUG + 0x100C) /* 0x000E300C */
48 #define HQVDP_WR_PLUG_MAX_CHK   (HQVDP_WR_PLUG + 0x1010) /* 0x000E3010 */
49 #define HQVDP_WR_PLUG_MAX_MSG   (HQVDP_WR_PLUG + 0x1014) /* 0x000E3014 */
50 #define HQVDP_WR_PLUG_MIN_SPACE (HQVDP_WR_PLUG + 0x1018) /* 0x000E3018 */
51 #define HQVDP_MBX               0x000E4000               /* 0x000E4000 */
52 #define HQVDP_MBX_IRQ_TO_XP70   (HQVDP_MBX + 0x0000)     /* 0x000E4000 */
53 #define HQVDP_MBX_INFO_HOST     (HQVDP_MBX + 0x0004)     /* 0x000E4004 */
54 #define HQVDP_MBX_IRQ_TO_HOST   (HQVDP_MBX + 0x0008)     /* 0x000E4008 */
55 #define HQVDP_MBX_INFO_XP70     (HQVDP_MBX + 0x000C)     /* 0x000E400C */
56 #define HQVDP_MBX_SW_RESET_CTRL (HQVDP_MBX + 0x0010)     /* 0x000E4010 */
57 #define HQVDP_MBX_STARTUP_CTRL1 (HQVDP_MBX + 0x0014)     /* 0x000E4014 */
58 #define HQVDP_MBX_STARTUP_CTRL2 (HQVDP_MBX + 0x0018)     /* 0x000E4018 */
59 #define HQVDP_MBX_GP_STATUS     (HQVDP_MBX + 0x001C)     /* 0x000E401C */
60 #define HQVDP_MBX_NEXT_CMD      (HQVDP_MBX + 0x0020)     /* 0x000E4020 */
61 #define HQVDP_MBX_CURRENT_CMD   (HQVDP_MBX + 0x0024)     /* 0x000E4024 */
62 #define HQVDP_MBX_SOFT_VSYNC    (HQVDP_MBX + 0x0028)     /* 0x000E4028 */
63 
64 /* Plugs config */
65 #define PLUG_CONTROL_ENABLE     0x00000001
66 #define PLUG_PAGE_SIZE_256      0x00000002
67 #define PLUG_MIN_OPC_8          0x00000003
68 #define PLUG_MAX_OPC_64         0x00000006
69 #define PLUG_MAX_CHK_2X         0x00000001
70 #define PLUG_MAX_MSG_1X         0x00000000
71 #define PLUG_MIN_SPACE_1        0x00000000
72 
73 /* SW reset CTRL */
74 #define SW_RESET_CTRL_FULL      BIT(0)
75 #define SW_RESET_CTRL_CORE      BIT(1)
76 
77 /* Startup ctrl 1 */
78 #define STARTUP_CTRL1_RST_DONE  BIT(0)
79 #define STARTUP_CTRL1_AUTH_IDLE BIT(2)
80 
81 /* Startup ctrl 2 */
82 #define STARTUP_CTRL2_FETCH_EN  BIT(1)
83 
84 /* Info xP70 */
85 #define INFO_XP70_FW_READY      BIT(15)
86 #define INFO_XP70_FW_PROCESSING BIT(14)
87 #define INFO_XP70_FW_INITQUEUES BIT(13)
88 
89 /* SOFT_VSYNC */
90 #define SOFT_VSYNC_HW           0x00000000
91 #define SOFT_VSYNC_SW_CMD       0x00000001
92 #define SOFT_VSYNC_SW_CTRL_IRQ  0x00000003
93 
94 /* Reset & boot poll config */
95 #define POLL_MAX_ATTEMPT        50
96 #define POLL_DELAY_MS           20
97 
98 #define SCALE_FACTOR            8192
99 #define SCALE_MAX_FOR_LEG_LUT_F 4096
100 #define SCALE_MAX_FOR_LEG_LUT_E 4915
101 #define SCALE_MAX_FOR_LEG_LUT_D 6654
102 #define SCALE_MAX_FOR_LEG_LUT_C 8192
103 
104 enum sti_hvsrc_orient {
105 	HVSRC_HORI,
106 	HVSRC_VERT
107 };
108 
109 /* Command structures */
110 struct sti_hqvdp_top {
111 	u32 config;
112 	u32 mem_format;
113 	u32 current_luma;
114 	u32 current_enh_luma;
115 	u32 current_right_luma;
116 	u32 current_enh_right_luma;
117 	u32 current_chroma;
118 	u32 current_enh_chroma;
119 	u32 current_right_chroma;
120 	u32 current_enh_right_chroma;
121 	u32 output_luma;
122 	u32 output_chroma;
123 	u32 luma_src_pitch;
124 	u32 luma_enh_src_pitch;
125 	u32 luma_right_src_pitch;
126 	u32 luma_enh_right_src_pitch;
127 	u32 chroma_src_pitch;
128 	u32 chroma_enh_src_pitch;
129 	u32 chroma_right_src_pitch;
130 	u32 chroma_enh_right_src_pitch;
131 	u32 luma_processed_pitch;
132 	u32 chroma_processed_pitch;
133 	u32 input_frame_size;
134 	u32 input_viewport_ori;
135 	u32 input_viewport_ori_right;
136 	u32 input_viewport_size;
137 	u32 left_view_border_width;
138 	u32 right_view_border_width;
139 	u32 left_view_3d_offset_width;
140 	u32 right_view_3d_offset_width;
141 	u32 side_stripe_color;
142 	u32 crc_reset_ctrl;
143 };
144 
145 /* Configs for interlaced : no IT, no pass thru, 3 fields */
146 #define TOP_CONFIG_INTER_BTM            0x00000000
147 #define TOP_CONFIG_INTER_TOP            0x00000002
148 
149 /* Config for progressive : no IT, no pass thru, 3 fields */
150 #define TOP_CONFIG_PROGRESSIVE          0x00000001
151 
152 /* Default MemFormat: in=420_raster_dual out=444_raster;opaque Mem2Tv mode */
153 #define TOP_MEM_FORMAT_DFLT             0x00018060
154 
155 /* Min/Max size */
156 #define MAX_WIDTH                       0x1FFF
157 #define MAX_HEIGHT                      0x0FFF
158 #define MIN_WIDTH                       0x0030
159 #define MIN_HEIGHT                      0x0010
160 
161 struct sti_hqvdp_vc1re {
162 	u32 ctrl_prv_csdi;
163 	u32 ctrl_cur_csdi;
164 	u32 ctrl_nxt_csdi;
165 	u32 ctrl_cur_fmd;
166 	u32 ctrl_nxt_fmd;
167 };
168 
169 struct sti_hqvdp_fmd {
170 	u32 config;
171 	u32 viewport_ori;
172 	u32 viewport_size;
173 	u32 next_next_luma;
174 	u32 next_next_right_luma;
175 	u32 next_next_next_luma;
176 	u32 next_next_next_right_luma;
177 	u32 threshold_scd;
178 	u32 threshold_rfd;
179 	u32 threshold_move;
180 	u32 threshold_cfd;
181 };
182 
183 struct sti_hqvdp_csdi {
184 	u32 config;
185 	u32 config2;
186 	u32 dcdi_config;
187 	u32 prev_luma;
188 	u32 prev_enh_luma;
189 	u32 prev_right_luma;
190 	u32 prev_enh_right_luma;
191 	u32 next_luma;
192 	u32 next_enh_luma;
193 	u32 next_right_luma;
194 	u32 next_enh_right_luma;
195 	u32 prev_chroma;
196 	u32 prev_enh_chroma;
197 	u32 prev_right_chroma;
198 	u32 prev_enh_right_chroma;
199 	u32 next_chroma;
200 	u32 next_enh_chroma;
201 	u32 next_right_chroma;
202 	u32 next_enh_right_chroma;
203 	u32 prev_motion;
204 	u32 prev_right_motion;
205 	u32 cur_motion;
206 	u32 cur_right_motion;
207 	u32 next_motion;
208 	u32 next_right_motion;
209 };
210 
211 /* Config for progressive: by pass */
212 #define CSDI_CONFIG_PROG                0x00000000
213 /* Config for directional deinterlacing without motion */
214 #define CSDI_CONFIG_INTER_DIR           0x00000016
215 /* Additional configs for fader, blender, motion,... deinterlace algorithms */
216 #define CSDI_CONFIG2_DFLT               0x000001B3
217 #define CSDI_DCDI_CONFIG_DFLT           0x00203803
218 
219 struct sti_hqvdp_hvsrc {
220 	u32 hor_panoramic_ctrl;
221 	u32 output_picture_size;
222 	u32 init_horizontal;
223 	u32 init_vertical;
224 	u32 param_ctrl;
225 	u32 yh_coef[NB_COEF];
226 	u32 ch_coef[NB_COEF];
227 	u32 yv_coef[NB_COEF];
228 	u32 cv_coef[NB_COEF];
229 	u32 hori_shift;
230 	u32 vert_shift;
231 };
232 
233 /* Default ParamCtrl: all controls enabled */
234 #define HVSRC_PARAM_CTRL_DFLT           0xFFFFFFFF
235 
236 struct sti_hqvdp_iqi {
237 	u32 config;
238 	u32 demo_wind_size;
239 	u32 pk_config;
240 	u32 coeff0_coeff1;
241 	u32 coeff2_coeff3;
242 	u32 coeff4;
243 	u32 pk_lut;
244 	u32 pk_gain;
245 	u32 pk_coring_level;
246 	u32 cti_config;
247 	u32 le_config;
248 	u32 le_lut[64];
249 	u32 con_bri;
250 	u32 sat_gain;
251 	u32 pxf_conf;
252 	u32 default_color;
253 };
254 
255 /* Default Config : IQI bypassed */
256 #define IQI_CONFIG_DFLT                 0x00000001
257 /* Default Contrast & Brightness gain = 256 */
258 #define IQI_CON_BRI_DFLT                0x00000100
259 /* Default Saturation gain = 256 */
260 #define IQI_SAT_GAIN_DFLT               0x00000100
261 /* Default PxfConf : P2I bypassed */
262 #define IQI_PXF_CONF_DFLT               0x00000001
263 
264 struct sti_hqvdp_top_status {
265 	u32 processing_time;
266 	u32 input_y_crc;
267 	u32 input_uv_crc;
268 };
269 
270 struct sti_hqvdp_fmd_status {
271 	u32 fmd_repeat_move_status;
272 	u32 fmd_scene_count_status;
273 	u32 cfd_sum;
274 	u32 field_sum;
275 	u32 next_y_fmd_crc;
276 	u32 next_next_y_fmd_crc;
277 	u32 next_next_next_y_fmd_crc;
278 };
279 
280 struct sti_hqvdp_csdi_status {
281 	u32 prev_y_csdi_crc;
282 	u32 cur_y_csdi_crc;
283 	u32 next_y_csdi_crc;
284 	u32 prev_uv_csdi_crc;
285 	u32 cur_uv_csdi_crc;
286 	u32 next_uv_csdi_crc;
287 	u32 y_csdi_crc;
288 	u32 uv_csdi_crc;
289 	u32 uv_cup_crc;
290 	u32 mot_csdi_crc;
291 	u32 mot_cur_csdi_crc;
292 	u32 mot_prev_csdi_crc;
293 };
294 
295 struct sti_hqvdp_hvsrc_status {
296 	u32 y_hvsrc_crc;
297 	u32 u_hvsrc_crc;
298 	u32 v_hvsrc_crc;
299 };
300 
301 struct sti_hqvdp_iqi_status {
302 	u32 pxf_it_status;
303 	u32 y_iqi_crc;
304 	u32 u_iqi_crc;
305 	u32 v_iqi_crc;
306 };
307 
308 /* Main commands. We use 2 commands one being processed by the firmware, one
309  * ready to be fetched upon next Vsync*/
310 #define NB_VDP_CMD	2
311 
312 struct sti_hqvdp_cmd {
313 	struct sti_hqvdp_top top;
314 	struct sti_hqvdp_vc1re vc1re;
315 	struct sti_hqvdp_fmd fmd;
316 	struct sti_hqvdp_csdi csdi;
317 	struct sti_hqvdp_hvsrc hvsrc;
318 	struct sti_hqvdp_iqi iqi;
319 	struct sti_hqvdp_top_status top_status;
320 	struct sti_hqvdp_fmd_status fmd_status;
321 	struct sti_hqvdp_csdi_status csdi_status;
322 	struct sti_hqvdp_hvsrc_status hvsrc_status;
323 	struct sti_hqvdp_iqi_status iqi_status;
324 };
325 
326 /*
327  * STI HQVDP structure
328  *
329  * @dev:               driver device
330  * @drm_dev:           the drm device
331  * @regs:              registers
332  * @plane:             plane structure for hqvdp it self
333  * @clk:               IP clock
334  * @clk_pix_main:      pix main clock
335  * @reset:             reset control
336  * @vtg_nb:            notifier to handle VTG Vsync
337  * @btm_field_pending: is there any bottom field (interlaced frame) to display
338  * @hqvdp_cmd:         buffer of commands
339  * @hqvdp_cmd_paddr:   physical address of hqvdp_cmd
340  * @vtg:               vtg for main data path
341  * @xp70_initialized:  true if xp70 is already initialized
342  * @vtg_registered:    true if registered to VTG
343  */
344 struct sti_hqvdp {
345 	struct device *dev;
346 	struct drm_device *drm_dev;
347 	void __iomem *regs;
348 	struct sti_plane plane;
349 	struct clk *clk;
350 	struct clk *clk_pix_main;
351 	struct reset_control *reset;
352 	struct notifier_block vtg_nb;
353 	bool btm_field_pending;
354 	void *hqvdp_cmd;
355 	u32 hqvdp_cmd_paddr;
356 	struct sti_vtg *vtg;
357 	bool xp70_initialized;
358 	bool vtg_registered;
359 };
360 
361 #define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, plane)
362 
363 static const uint32_t hqvdp_supported_formats[] = {
364 	DRM_FORMAT_NV12,
365 };
366 
367 /**
368  * sti_hqvdp_get_free_cmd
369  * @hqvdp: hqvdp structure
370  *
371  * Look for a hqvdp_cmd that is not being used (or about to be used) by the FW.
372  *
373  * RETURNS:
374  * the offset of the command to be used.
375  * -1 in error cases
376  */
377 static int sti_hqvdp_get_free_cmd(struct sti_hqvdp *hqvdp)
378 {
379 	u32 curr_cmd, next_cmd;
380 	u32 cmd = hqvdp->hqvdp_cmd_paddr;
381 	int i;
382 
383 	curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
384 	next_cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
385 
386 	for (i = 0; i < NB_VDP_CMD; i++) {
387 		if ((cmd != curr_cmd) && (cmd != next_cmd))
388 			return i * sizeof(struct sti_hqvdp_cmd);
389 		cmd += sizeof(struct sti_hqvdp_cmd);
390 	}
391 
392 	return -1;
393 }
394 
395 /**
396  * sti_hqvdp_get_curr_cmd
397  * @hqvdp: hqvdp structure
398  *
399  * Look for the hqvdp_cmd that is being used by the FW.
400  *
401  * RETURNS:
402  *  the offset of the command to be used.
403  * -1 in error cases
404  */
405 static int sti_hqvdp_get_curr_cmd(struct sti_hqvdp *hqvdp)
406 {
407 	u32 curr_cmd;
408 	u32 cmd = hqvdp->hqvdp_cmd_paddr;
409 	unsigned int i;
410 
411 	curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
412 
413 	for (i = 0; i < NB_VDP_CMD; i++) {
414 		if (cmd == curr_cmd)
415 			return i * sizeof(struct sti_hqvdp_cmd);
416 
417 		cmd += sizeof(struct sti_hqvdp_cmd);
418 	}
419 
420 	return -1;
421 }
422 
423 /**
424  * sti_hqvdp_get_next_cmd
425  * @hqvdp: hqvdp structure
426  *
427  * Look for the next hqvdp_cmd that will be used by the FW.
428  *
429  * RETURNS:
430  *  the offset of the next command that will be used.
431  * -1 in error cases
432  */
433 static int sti_hqvdp_get_next_cmd(struct sti_hqvdp *hqvdp)
434 {
435 	int next_cmd;
436 	dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
437 	unsigned int i;
438 
439 	next_cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
440 
441 	for (i = 0; i < NB_VDP_CMD; i++) {
442 		if (cmd == next_cmd)
443 			return i * sizeof(struct sti_hqvdp_cmd);
444 
445 		cmd += sizeof(struct sti_hqvdp_cmd);
446 	}
447 
448 	return -1;
449 }
450 
451 #define DBGFS_DUMP(reg) seq_printf(s, "\n  %-25s 0x%08X", #reg, \
452 				   readl(hqvdp->regs + reg))
453 
454 static const char *hqvdp_dbg_get_lut(u32 *coef)
455 {
456 	if (!memcmp(coef, coef_lut_a_legacy, 16))
457 		return "LUT A";
458 	if (!memcmp(coef, coef_lut_b, 16))
459 		return "LUT B";
460 	if (!memcmp(coef, coef_lut_c_y_legacy, 16))
461 		return "LUT C Y";
462 	if (!memcmp(coef, coef_lut_c_c_legacy, 16))
463 		return "LUT C C";
464 	if (!memcmp(coef, coef_lut_d_y_legacy, 16))
465 		return "LUT D Y";
466 	if (!memcmp(coef, coef_lut_d_c_legacy, 16))
467 		return "LUT D C";
468 	if (!memcmp(coef, coef_lut_e_y_legacy, 16))
469 		return "LUT E Y";
470 	if (!memcmp(coef, coef_lut_e_c_legacy, 16))
471 		return "LUT E C";
472 	if (!memcmp(coef, coef_lut_f_y_legacy, 16))
473 		return "LUT F Y";
474 	if (!memcmp(coef, coef_lut_f_c_legacy, 16))
475 		return "LUT F C";
476 	return "<UNKNOWN>";
477 }
478 
479 static void hqvdp_dbg_dump_cmd(struct seq_file *s, struct sti_hqvdp_cmd *c)
480 {
481 	int src_w, src_h, dst_w, dst_h;
482 
483 	seq_puts(s, "\n\tTOP:");
484 	seq_printf(s, "\n\t %-20s 0x%08X", "Config", c->top.config);
485 	switch (c->top.config) {
486 	case TOP_CONFIG_PROGRESSIVE:
487 		seq_puts(s, "\tProgressive");
488 		break;
489 	case TOP_CONFIG_INTER_TOP:
490 		seq_puts(s, "\tInterlaced, top field");
491 		break;
492 	case TOP_CONFIG_INTER_BTM:
493 		seq_puts(s, "\tInterlaced, bottom field");
494 		break;
495 	default:
496 		seq_puts(s, "\t<UNKNOWN>");
497 		break;
498 	}
499 
500 	seq_printf(s, "\n\t %-20s 0x%08X", "MemFormat", c->top.mem_format);
501 	seq_printf(s, "\n\t %-20s 0x%08X", "CurrentY", c->top.current_luma);
502 	seq_printf(s, "\n\t %-20s 0x%08X", "CurrentC", c->top.current_chroma);
503 	seq_printf(s, "\n\t %-20s 0x%08X", "YSrcPitch", c->top.luma_src_pitch);
504 	seq_printf(s, "\n\t %-20s 0x%08X", "CSrcPitch",
505 		   c->top.chroma_src_pitch);
506 	seq_printf(s, "\n\t %-20s 0x%08X", "InputFrameSize",
507 		   c->top.input_frame_size);
508 	seq_printf(s, "\t%dx%d",
509 		   c->top.input_frame_size & 0x0000FFFF,
510 		   c->top.input_frame_size >> 16);
511 	seq_printf(s, "\n\t %-20s 0x%08X", "InputViewportSize",
512 		   c->top.input_viewport_size);
513 	src_w = c->top.input_viewport_size & 0x0000FFFF;
514 	src_h = c->top.input_viewport_size >> 16;
515 	seq_printf(s, "\t%dx%d", src_w, src_h);
516 
517 	seq_puts(s, "\n\tHVSRC:");
518 	seq_printf(s, "\n\t %-20s 0x%08X", "OutputPictureSize",
519 		   c->hvsrc.output_picture_size);
520 	dst_w = c->hvsrc.output_picture_size & 0x0000FFFF;
521 	dst_h = c->hvsrc.output_picture_size >> 16;
522 	seq_printf(s, "\t%dx%d", dst_w, dst_h);
523 	seq_printf(s, "\n\t %-20s 0x%08X", "ParamCtrl", c->hvsrc.param_ctrl);
524 
525 	seq_printf(s, "\n\t %-20s %s", "yh_coef",
526 		   hqvdp_dbg_get_lut(c->hvsrc.yh_coef));
527 	seq_printf(s, "\n\t %-20s %s", "ch_coef",
528 		   hqvdp_dbg_get_lut(c->hvsrc.ch_coef));
529 	seq_printf(s, "\n\t %-20s %s", "yv_coef",
530 		   hqvdp_dbg_get_lut(c->hvsrc.yv_coef));
531 	seq_printf(s, "\n\t %-20s %s", "cv_coef",
532 		   hqvdp_dbg_get_lut(c->hvsrc.cv_coef));
533 
534 	seq_printf(s, "\n\t %-20s", "ScaleH");
535 	if (dst_w > src_w)
536 		seq_printf(s, " %d/1", dst_w / src_w);
537 	else
538 		seq_printf(s, " 1/%d", src_w / dst_w);
539 
540 	seq_printf(s, "\n\t %-20s", "tScaleV");
541 	if (dst_h > src_h)
542 		seq_printf(s, " %d/1", dst_h / src_h);
543 	else
544 		seq_printf(s, " 1/%d", src_h / dst_h);
545 
546 	seq_puts(s, "\n\tCSDI:");
547 	seq_printf(s, "\n\t %-20s 0x%08X\t", "Config", c->csdi.config);
548 	switch (c->csdi.config) {
549 	case CSDI_CONFIG_PROG:
550 		seq_puts(s, "Bypass");
551 		break;
552 	case CSDI_CONFIG_INTER_DIR:
553 		seq_puts(s, "Deinterlace, directional");
554 		break;
555 	default:
556 		seq_puts(s, "<UNKNOWN>");
557 		break;
558 	}
559 
560 	seq_printf(s, "\n\t %-20s 0x%08X", "Config2", c->csdi.config2);
561 	seq_printf(s, "\n\t %-20s 0x%08X", "DcdiConfig", c->csdi.dcdi_config);
562 }
563 
564 static int hqvdp_dbg_show(struct seq_file *s, void *data)
565 {
566 	struct drm_info_node *node = s->private;
567 	struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data;
568 	int cmd, cmd_offset, infoxp70;
569 	void *virt;
570 
571 	seq_printf(s, "%s: (vaddr = 0x%p)",
572 		   sti_plane_to_str(&hqvdp->plane), hqvdp->regs);
573 
574 	DBGFS_DUMP(HQVDP_MBX_IRQ_TO_XP70);
575 	DBGFS_DUMP(HQVDP_MBX_INFO_HOST);
576 	DBGFS_DUMP(HQVDP_MBX_IRQ_TO_HOST);
577 	DBGFS_DUMP(HQVDP_MBX_INFO_XP70);
578 	infoxp70 = readl(hqvdp->regs + HQVDP_MBX_INFO_XP70);
579 	seq_puts(s, "\tFirmware state: ");
580 	if (infoxp70 & INFO_XP70_FW_READY)
581 		seq_puts(s, "idle and ready");
582 	else if (infoxp70 & INFO_XP70_FW_PROCESSING)
583 		seq_puts(s, "processing a picture");
584 	else if (infoxp70 & INFO_XP70_FW_INITQUEUES)
585 		seq_puts(s, "programming queues");
586 	else
587 		seq_puts(s, "NOT READY");
588 
589 	DBGFS_DUMP(HQVDP_MBX_SW_RESET_CTRL);
590 	DBGFS_DUMP(HQVDP_MBX_STARTUP_CTRL1);
591 	if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1)
592 					& STARTUP_CTRL1_RST_DONE)
593 		seq_puts(s, "\tReset is done");
594 	else
595 		seq_puts(s, "\tReset is NOT done");
596 	DBGFS_DUMP(HQVDP_MBX_STARTUP_CTRL2);
597 	if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL2)
598 					& STARTUP_CTRL2_FETCH_EN)
599 		seq_puts(s, "\tFetch is enabled");
600 	else
601 		seq_puts(s, "\tFetch is NOT enabled");
602 	DBGFS_DUMP(HQVDP_MBX_GP_STATUS);
603 	DBGFS_DUMP(HQVDP_MBX_NEXT_CMD);
604 	DBGFS_DUMP(HQVDP_MBX_CURRENT_CMD);
605 	DBGFS_DUMP(HQVDP_MBX_SOFT_VSYNC);
606 	if (!(readl(hqvdp->regs + HQVDP_MBX_SOFT_VSYNC) & 3))
607 		seq_puts(s, "\tHW Vsync");
608 	else
609 		seq_puts(s, "\tSW Vsync ?!?!");
610 
611 	/* Last command */
612 	cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
613 	cmd_offset = sti_hqvdp_get_curr_cmd(hqvdp);
614 	if (cmd_offset == -1) {
615 		seq_puts(s, "\n\n  Last command: unknown");
616 	} else {
617 		virt = hqvdp->hqvdp_cmd + cmd_offset;
618 		seq_printf(s, "\n\n  Last command: address @ 0x%x (0x%p)",
619 			   cmd, virt);
620 		hqvdp_dbg_dump_cmd(s, (struct sti_hqvdp_cmd *)virt);
621 	}
622 
623 	/* Next command */
624 	cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
625 	cmd_offset = sti_hqvdp_get_next_cmd(hqvdp);
626 	if (cmd_offset == -1) {
627 		seq_puts(s, "\n\n  Next command: unknown");
628 	} else {
629 		virt = hqvdp->hqvdp_cmd + cmd_offset;
630 		seq_printf(s, "\n\n  Next command address: @ 0x%x (0x%p)",
631 			   cmd, virt);
632 		hqvdp_dbg_dump_cmd(s, (struct sti_hqvdp_cmd *)virt);
633 	}
634 
635 	seq_putc(s, '\n');
636 	return 0;
637 }
638 
639 static struct drm_info_list hqvdp_debugfs_files[] = {
640 	{ "hqvdp", hqvdp_dbg_show, 0, NULL },
641 };
642 
643 static void hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
644 {
645 	unsigned int i;
646 
647 	for (i = 0; i < ARRAY_SIZE(hqvdp_debugfs_files); i++)
648 		hqvdp_debugfs_files[i].data = hqvdp;
649 
650 	drm_debugfs_create_files(hqvdp_debugfs_files,
651 				 ARRAY_SIZE(hqvdp_debugfs_files),
652 				 minor->debugfs_root, minor);
653 }
654 
655 /**
656  * sti_hqvdp_update_hvsrc
657  * @orient: horizontal or vertical
658  * @scale:  scaling/zoom factor
659  * @hvsrc:  the structure containing the LUT coef
660  *
661  * Update the Y and C Lut coef, as well as the shift param
662  *
663  * RETURNS:
664  * None.
665  */
666 static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
667 		struct sti_hqvdp_hvsrc *hvsrc)
668 {
669 	const int *coef_c, *coef_y;
670 	int shift_c, shift_y;
671 
672 	/* Get the appropriate coef tables */
673 	if (scale < SCALE_MAX_FOR_LEG_LUT_F) {
674 		coef_y = coef_lut_f_y_legacy;
675 		coef_c = coef_lut_f_c_legacy;
676 		shift_y = SHIFT_LUT_F_Y_LEGACY;
677 		shift_c = SHIFT_LUT_F_C_LEGACY;
678 	} else if (scale < SCALE_MAX_FOR_LEG_LUT_E) {
679 		coef_y = coef_lut_e_y_legacy;
680 		coef_c = coef_lut_e_c_legacy;
681 		shift_y = SHIFT_LUT_E_Y_LEGACY;
682 		shift_c = SHIFT_LUT_E_C_LEGACY;
683 	} else if (scale < SCALE_MAX_FOR_LEG_LUT_D) {
684 		coef_y = coef_lut_d_y_legacy;
685 		coef_c = coef_lut_d_c_legacy;
686 		shift_y = SHIFT_LUT_D_Y_LEGACY;
687 		shift_c = SHIFT_LUT_D_C_LEGACY;
688 	} else if (scale < SCALE_MAX_FOR_LEG_LUT_C) {
689 		coef_y = coef_lut_c_y_legacy;
690 		coef_c = coef_lut_c_c_legacy;
691 		shift_y = SHIFT_LUT_C_Y_LEGACY;
692 		shift_c = SHIFT_LUT_C_C_LEGACY;
693 	} else if (scale == SCALE_MAX_FOR_LEG_LUT_C) {
694 		coef_y = coef_c = coef_lut_b;
695 		shift_y = shift_c = SHIFT_LUT_B;
696 	} else {
697 		coef_y = coef_c = coef_lut_a_legacy;
698 		shift_y = shift_c = SHIFT_LUT_A_LEGACY;
699 	}
700 
701 	if (orient == HVSRC_HORI) {
702 		hvsrc->hori_shift = (shift_c << 16) | shift_y;
703 		memcpy(hvsrc->yh_coef, coef_y, sizeof(hvsrc->yh_coef));
704 		memcpy(hvsrc->ch_coef, coef_c, sizeof(hvsrc->ch_coef));
705 	} else {
706 		hvsrc->vert_shift = (shift_c << 16) | shift_y;
707 		memcpy(hvsrc->yv_coef, coef_y, sizeof(hvsrc->yv_coef));
708 		memcpy(hvsrc->cv_coef, coef_c, sizeof(hvsrc->cv_coef));
709 	}
710 }
711 
712 /**
713  * sti_hqvdp_check_hw_scaling
714  * @hqvdp: hqvdp pointer
715  * @mode: display mode with timing constraints
716  * @src_w: source width
717  * @src_h: source height
718  * @dst_w: destination width
719  * @dst_h: destination height
720  *
721  * Check if the HW is able to perform the scaling request
722  * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where:
723  *   Zy = OutputHeight / InputHeight
724  *   LFW = (Tx * IPClock) / (MaxNbCycles * Cp)
725  *     Tx : Total video mode horizontal resolution
726  *     IPClock : HQVDP IP clock (Mhz)
727  *     MaxNbCycles: max(InputWidth, OutputWidth)
728  *     Cp: Video mode pixel clock (Mhz)
729  *
730  * RETURNS:
731  * True if the HW can scale.
732  */
733 static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp,
734 				       struct drm_display_mode *mode,
735 				       int src_w, int src_h,
736 				       int dst_w, int dst_h)
737 {
738 	unsigned long lfw;
739 	unsigned int inv_zy;
740 
741 	lfw = mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
742 	lfw /= max(src_w, dst_w) * mode->clock / 1000;
743 
744 	inv_zy = DIV_ROUND_UP(src_h, dst_h);
745 
746 	return (inv_zy <= lfw) ? true : false;
747 }
748 
749 /**
750  * sti_hqvdp_disable
751  * @hqvdp: hqvdp pointer
752  *
753  * Disables the HQVDP plane
754  */
755 static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
756 {
757 	int i;
758 
759 	DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&hqvdp->plane));
760 
761 	/* Unregister VTG Vsync callback */
762 	if (sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
763 		DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
764 
765 	/* Set next cmd to NULL */
766 	writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD);
767 
768 	for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
769 		if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70)
770 				& INFO_XP70_FW_READY)
771 			break;
772 		msleep(POLL_DELAY_MS);
773 	}
774 
775 	/* VTG can stop now */
776 	clk_disable_unprepare(hqvdp->clk_pix_main);
777 
778 	if (i == POLL_MAX_ATTEMPT)
779 		DRM_ERROR("XP70 could not revert to idle\n");
780 
781 	hqvdp->plane.status = STI_PLANE_DISABLED;
782 	hqvdp->vtg_registered = false;
783 }
784 
785 /**
786  * sti_hqvdp_vtg_cb
787  * @nb: notifier block
788  * @evt: event message
789  * @data: private data
790  *
791  * Handle VTG Vsync event, display pending bottom field
792  *
793  * RETURNS:
794  * 0 on success.
795  */
796 static int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
797 {
798 	struct sti_hqvdp *hqvdp = container_of(nb, struct sti_hqvdp, vtg_nb);
799 	int btm_cmd_offset, top_cmd_offest;
800 	struct sti_hqvdp_cmd *btm_cmd, *top_cmd;
801 
802 	if ((evt != VTG_TOP_FIELD_EVENT) && (evt != VTG_BOTTOM_FIELD_EVENT)) {
803 		DRM_DEBUG_DRIVER("Unknown event\n");
804 		return 0;
805 	}
806 
807 	if (hqvdp->plane.status == STI_PLANE_FLUSHING) {
808 		/* disable need to be synchronize on vsync event */
809 		DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
810 				 sti_plane_to_str(&hqvdp->plane));
811 
812 		sti_hqvdp_disable(hqvdp);
813 	}
814 
815 	if (hqvdp->btm_field_pending) {
816 		/* Create the btm field command from the current one */
817 		btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
818 		top_cmd_offest = sti_hqvdp_get_curr_cmd(hqvdp);
819 		if ((btm_cmd_offset == -1) || (top_cmd_offest == -1)) {
820 			DRM_DEBUG_DRIVER("Warning: no cmd, will skip field\n");
821 			return -EBUSY;
822 		}
823 
824 		btm_cmd = hqvdp->hqvdp_cmd + btm_cmd_offset;
825 		top_cmd = hqvdp->hqvdp_cmd + top_cmd_offest;
826 
827 		memcpy(btm_cmd, top_cmd, sizeof(*btm_cmd));
828 
829 		btm_cmd->top.config = TOP_CONFIG_INTER_BTM;
830 		btm_cmd->top.current_luma +=
831 				btm_cmd->top.luma_src_pitch / 2;
832 		btm_cmd->top.current_chroma +=
833 				btm_cmd->top.chroma_src_pitch / 2;
834 
835 		/* Post the command to mailbox */
836 		writel(hqvdp->hqvdp_cmd_paddr + btm_cmd_offset,
837 				hqvdp->regs + HQVDP_MBX_NEXT_CMD);
838 
839 		hqvdp->btm_field_pending = false;
840 
841 		dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
842 				__func__, hqvdp->hqvdp_cmd_paddr);
843 
844 		sti_plane_update_fps(&hqvdp->plane, false, true);
845 	}
846 
847 	return 0;
848 }
849 
850 static void sti_hqvdp_init(struct sti_hqvdp *hqvdp)
851 {
852 	int size;
853 	dma_addr_t dma_addr;
854 
855 	hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
856 
857 	/* Allocate memory for the VDP commands */
858 	size = NB_VDP_CMD * sizeof(struct sti_hqvdp_cmd);
859 	hqvdp->hqvdp_cmd = dma_alloc_wc(hqvdp->dev, size,
860 					&dma_addr,
861 					GFP_KERNEL | GFP_DMA);
862 	if (!hqvdp->hqvdp_cmd) {
863 		DRM_ERROR("Failed to allocate memory for VDP cmd\n");
864 		return;
865 	}
866 
867 	hqvdp->hqvdp_cmd_paddr = (u32)dma_addr;
868 	memset(hqvdp->hqvdp_cmd, 0, size);
869 }
870 
871 static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp)
872 {
873 	/* Configure Plugs (same for RD & WR) */
874 	writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_RD_PLUG_PAGE_SIZE);
875 	writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_RD_PLUG_MIN_OPC);
876 	writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_RD_PLUG_MAX_OPC);
877 	writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_RD_PLUG_MAX_CHK);
878 	writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_RD_PLUG_MAX_MSG);
879 	writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_RD_PLUG_MIN_SPACE);
880 	writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_RD_PLUG_CONTROL);
881 
882 	writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_WR_PLUG_PAGE_SIZE);
883 	writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_WR_PLUG_MIN_OPC);
884 	writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_WR_PLUG_MAX_OPC);
885 	writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_WR_PLUG_MAX_CHK);
886 	writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_WR_PLUG_MAX_MSG);
887 	writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_WR_PLUG_MIN_SPACE);
888 	writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_WR_PLUG_CONTROL);
889 }
890 
891 /**
892  * sti_hqvdp_start_xp70
893  * @hqvdp: hqvdp pointer
894  *
895  * Run the xP70 initialization sequence
896  */
897 static void sti_hqvdp_start_xp70(struct sti_hqvdp *hqvdp)
898 {
899 	const struct firmware *firmware;
900 	u32 *fw_rd_plug, *fw_wr_plug, *fw_pmem, *fw_dmem;
901 	u8 *data;
902 	int i;
903 	struct fw_header {
904 		int rd_size;
905 		int wr_size;
906 		int pmem_size;
907 		int dmem_size;
908 	} *header;
909 
910 	DRM_DEBUG_DRIVER("\n");
911 
912 	if (hqvdp->xp70_initialized) {
913 		DRM_DEBUG_DRIVER("HQVDP XP70 already initialized\n");
914 		return;
915 	}
916 
917 	/* Request firmware */
918 	if (request_firmware(&firmware, HQVDP_FMW_NAME, hqvdp->dev)) {
919 		DRM_ERROR("Can't get HQVDP firmware\n");
920 		return;
921 	}
922 
923 	/* Check firmware parts */
924 	if (!firmware) {
925 		DRM_ERROR("Firmware not available\n");
926 		return;
927 	}
928 
929 	header = (struct fw_header *)firmware->data;
930 	if (firmware->size < sizeof(*header)) {
931 		DRM_ERROR("Invalid firmware size (%zu)\n", firmware->size);
932 		goto out;
933 	}
934 	if ((sizeof(*header) + header->rd_size + header->wr_size +
935 		header->pmem_size + header->dmem_size) != firmware->size) {
936 		DRM_ERROR("Invalid fmw structure (%zu+%d+%d+%d+%d != %zu)\n",
937 			  sizeof(*header), header->rd_size, header->wr_size,
938 			  header->pmem_size, header->dmem_size,
939 			  firmware->size);
940 		goto out;
941 	}
942 
943 	data = (u8 *)firmware->data;
944 	data += sizeof(*header);
945 	fw_rd_plug = (void *)data;
946 	data += header->rd_size;
947 	fw_wr_plug = (void *)data;
948 	data += header->wr_size;
949 	fw_pmem = (void *)data;
950 	data += header->pmem_size;
951 	fw_dmem = (void *)data;
952 
953 	/* Enable clock */
954 	if (clk_prepare_enable(hqvdp->clk))
955 		DRM_ERROR("Failed to prepare/enable HQVDP clk\n");
956 
957 	/* Reset */
958 	writel(SW_RESET_CTRL_FULL, hqvdp->regs + HQVDP_MBX_SW_RESET_CTRL);
959 
960 	for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
961 		if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1)
962 				& STARTUP_CTRL1_RST_DONE)
963 			break;
964 		msleep(POLL_DELAY_MS);
965 	}
966 	if (i == POLL_MAX_ATTEMPT) {
967 		DRM_ERROR("Could not reset\n");
968 		clk_disable_unprepare(hqvdp->clk);
969 		goto out;
970 	}
971 
972 	/* Init Read & Write plugs */
973 	for (i = 0; i < header->rd_size / 4; i++)
974 		writel(fw_rd_plug[i], hqvdp->regs + HQVDP_RD_PLUG + i * 4);
975 	for (i = 0; i < header->wr_size / 4; i++)
976 		writel(fw_wr_plug[i], hqvdp->regs + HQVDP_WR_PLUG + i * 4);
977 
978 	sti_hqvdp_init_plugs(hqvdp);
979 
980 	/* Authorize Idle Mode */
981 	writel(STARTUP_CTRL1_AUTH_IDLE, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1);
982 
983 	/* Prevent VTG interruption during the boot */
984 	writel(SOFT_VSYNC_SW_CTRL_IRQ, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
985 	writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD);
986 
987 	/* Download PMEM & DMEM */
988 	for (i = 0; i < header->pmem_size / 4; i++)
989 		writel(fw_pmem[i], hqvdp->regs + HQVDP_PMEM + i * 4);
990 	for (i = 0; i < header->dmem_size / 4; i++)
991 		writel(fw_dmem[i], hqvdp->regs + HQVDP_DMEM + i * 4);
992 
993 	/* Enable fetch */
994 	writel(STARTUP_CTRL2_FETCH_EN, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL2);
995 
996 	/* Wait end of boot */
997 	for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
998 		if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70)
999 				& INFO_XP70_FW_READY)
1000 			break;
1001 		msleep(POLL_DELAY_MS);
1002 	}
1003 	if (i == POLL_MAX_ATTEMPT) {
1004 		DRM_ERROR("Could not boot\n");
1005 		clk_disable_unprepare(hqvdp->clk);
1006 		goto out;
1007 	}
1008 
1009 	/* Launch Vsync */
1010 	writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
1011 
1012 	DRM_INFO("HQVDP XP70 initialized\n");
1013 
1014 	hqvdp->xp70_initialized = true;
1015 
1016 out:
1017 	release_firmware(firmware);
1018 }
1019 
1020 static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
1021 				  struct drm_atomic_state *state)
1022 {
1023 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
1024 										 drm_plane);
1025 	struct sti_plane *plane = to_sti_plane(drm_plane);
1026 	struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
1027 	struct drm_crtc *crtc = new_plane_state->crtc;
1028 	struct drm_framebuffer *fb = new_plane_state->fb;
1029 	struct drm_crtc_state *crtc_state;
1030 	struct drm_display_mode *mode;
1031 	int dst_x, dst_y, dst_w, dst_h;
1032 	int src_x, src_y, src_w, src_h;
1033 
1034 	/* no need for further checks if the plane is being disabled */
1035 	if (!crtc || !fb)
1036 		return 0;
1037 
1038 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
1039 	mode = &crtc_state->mode;
1040 	dst_x = new_plane_state->crtc_x;
1041 	dst_y = new_plane_state->crtc_y;
1042 	dst_w = clamp_val(new_plane_state->crtc_w, 0, mode->hdisplay - dst_x);
1043 	dst_h = clamp_val(new_plane_state->crtc_h, 0, mode->vdisplay - dst_y);
1044 	/* src_x are in 16.16 format */
1045 	src_x = new_plane_state->src_x >> 16;
1046 	src_y = new_plane_state->src_y >> 16;
1047 	src_w = new_plane_state->src_w >> 16;
1048 	src_h = new_plane_state->src_h >> 16;
1049 
1050 	if (mode->clock && !sti_hqvdp_check_hw_scaling(hqvdp, mode,
1051 						       src_w, src_h,
1052 						       dst_w, dst_h)) {
1053 		DRM_ERROR("Scaling beyond HW capabilities\n");
1054 		return -EINVAL;
1055 	}
1056 
1057 	if (!drm_fb_cma_get_gem_obj(fb, 0)) {
1058 		DRM_ERROR("Can't get CMA GEM object for fb\n");
1059 		return -EINVAL;
1060 	}
1061 
1062 	/*
1063 	 * Input / output size
1064 	 * Align to upper even value
1065 	 */
1066 	dst_w = ALIGN(dst_w, 2);
1067 	dst_h = ALIGN(dst_h, 2);
1068 
1069 	if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
1070 	    (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
1071 	    (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
1072 	    (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
1073 		DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
1074 			  src_w, src_h,
1075 			  dst_w, dst_h);
1076 		return -EINVAL;
1077 	}
1078 
1079 	if (!hqvdp->xp70_initialized)
1080 		/* Start HQVDP XP70 coprocessor */
1081 		sti_hqvdp_start_xp70(hqvdp);
1082 
1083 	if (!hqvdp->vtg_registered) {
1084 		/* Prevent VTG shutdown */
1085 		if (clk_prepare_enable(hqvdp->clk_pix_main)) {
1086 			DRM_ERROR("Failed to prepare/enable pix main clk\n");
1087 			return -EINVAL;
1088 		}
1089 
1090 		/* Register VTG Vsync callback to handle bottom fields */
1091 		if (sti_vtg_register_client(hqvdp->vtg,
1092 					    &hqvdp->vtg_nb,
1093 					    crtc)) {
1094 			DRM_ERROR("Cannot register VTG notifier\n");
1095 			clk_disable_unprepare(hqvdp->clk_pix_main);
1096 			return -EINVAL;
1097 		}
1098 		hqvdp->vtg_registered = true;
1099 	}
1100 
1101 	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
1102 		      crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)),
1103 		      drm_plane->base.id, sti_plane_to_str(plane));
1104 	DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
1105 		      sti_plane_to_str(plane),
1106 		      dst_w, dst_h, dst_x, dst_y,
1107 		      src_w, src_h, src_x, src_y);
1108 
1109 	return 0;
1110 }
1111 
1112 static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
1113 				    struct drm_atomic_state *state)
1114 {
1115 	struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state,
1116 									  drm_plane);
1117 	struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state,
1118 									  drm_plane);
1119 	struct sti_plane *plane = to_sti_plane(drm_plane);
1120 	struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
1121 	struct drm_crtc *crtc = newstate->crtc;
1122 	struct drm_framebuffer *fb = newstate->fb;
1123 	struct drm_display_mode *mode;
1124 	int dst_x, dst_y, dst_w, dst_h;
1125 	int src_x, src_y, src_w, src_h;
1126 	struct drm_gem_cma_object *cma_obj;
1127 	struct sti_hqvdp_cmd *cmd;
1128 	int scale_h, scale_v;
1129 	int cmd_offset;
1130 
1131 	if (!crtc || !fb)
1132 		return;
1133 
1134 	if ((oldstate->fb == newstate->fb) &&
1135 	    (oldstate->crtc_x == newstate->crtc_x) &&
1136 	    (oldstate->crtc_y == newstate->crtc_y) &&
1137 	    (oldstate->crtc_w == newstate->crtc_w) &&
1138 	    (oldstate->crtc_h == newstate->crtc_h) &&
1139 	    (oldstate->src_x == newstate->src_x) &&
1140 	    (oldstate->src_y == newstate->src_y) &&
1141 	    (oldstate->src_w == newstate->src_w) &&
1142 	    (oldstate->src_h == newstate->src_h)) {
1143 		/* No change since last update, do not post cmd */
1144 		DRM_DEBUG_DRIVER("No change, not posting cmd\n");
1145 		plane->status = STI_PLANE_UPDATED;
1146 		return;
1147 	}
1148 
1149 	mode = &crtc->mode;
1150 	dst_x = newstate->crtc_x;
1151 	dst_y = newstate->crtc_y;
1152 	dst_w = clamp_val(newstate->crtc_w, 0, mode->hdisplay - dst_x);
1153 	dst_h = clamp_val(newstate->crtc_h, 0, mode->vdisplay - dst_y);
1154 	/* src_x are in 16.16 format */
1155 	src_x = newstate->src_x >> 16;
1156 	src_y = newstate->src_y >> 16;
1157 	src_w = newstate->src_w >> 16;
1158 	src_h = newstate->src_h >> 16;
1159 
1160 	cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
1161 	if (cmd_offset == -1) {
1162 		DRM_DEBUG_DRIVER("Warning: no cmd, will skip frame\n");
1163 		return;
1164 	}
1165 	cmd = hqvdp->hqvdp_cmd + cmd_offset;
1166 
1167 	/* Static parameters, defaulting to progressive mode */
1168 	cmd->top.config = TOP_CONFIG_PROGRESSIVE;
1169 	cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
1170 	cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
1171 	cmd->csdi.config = CSDI_CONFIG_PROG;
1172 
1173 	/* VC1RE, FMD bypassed : keep everything set to 0
1174 	 * IQI/P2I bypassed */
1175 	cmd->iqi.config = IQI_CONFIG_DFLT;
1176 	cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
1177 	cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
1178 	cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
1179 
1180 	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
1181 
1182 	DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
1183 			 (char *)&fb->format->format,
1184 			 (unsigned long)cma_obj->paddr);
1185 
1186 	/* Buffer planes address */
1187 	cmd->top.current_luma = (u32)cma_obj->paddr + fb->offsets[0];
1188 	cmd->top.current_chroma = (u32)cma_obj->paddr + fb->offsets[1];
1189 
1190 	/* Pitches */
1191 	cmd->top.luma_processed_pitch = fb->pitches[0];
1192 	cmd->top.luma_src_pitch = fb->pitches[0];
1193 	cmd->top.chroma_processed_pitch = fb->pitches[1];
1194 	cmd->top.chroma_src_pitch = fb->pitches[1];
1195 
1196 	/* Input / output size
1197 	 * Align to upper even value */
1198 	dst_w = ALIGN(dst_w, 2);
1199 	dst_h = ALIGN(dst_h, 2);
1200 
1201 	cmd->top.input_viewport_size = src_h << 16 | src_w;
1202 	cmd->top.input_frame_size = src_h << 16 | src_w;
1203 	cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w;
1204 	cmd->top.input_viewport_ori = src_y << 16 | src_x;
1205 
1206 	/* Handle interlaced */
1207 	if (fb->flags & DRM_MODE_FB_INTERLACED) {
1208 		/* Top field to display */
1209 		cmd->top.config = TOP_CONFIG_INTER_TOP;
1210 
1211 		/* Update pitches and vert size */
1212 		cmd->top.input_frame_size = (src_h / 2) << 16 | src_w;
1213 		cmd->top.luma_processed_pitch *= 2;
1214 		cmd->top.luma_src_pitch *= 2;
1215 		cmd->top.chroma_processed_pitch *= 2;
1216 		cmd->top.chroma_src_pitch *= 2;
1217 
1218 		/* Enable directional deinterlacing processing */
1219 		cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
1220 		cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
1221 		cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
1222 	}
1223 
1224 	/* Update hvsrc lut coef */
1225 	scale_h = SCALE_FACTOR * dst_w / src_w;
1226 	sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
1227 
1228 	scale_v = SCALE_FACTOR * dst_h / src_h;
1229 	sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
1230 
1231 	writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
1232 	       hqvdp->regs + HQVDP_MBX_NEXT_CMD);
1233 
1234 	/* Interlaced : get ready to display the bottom field at next Vsync */
1235 	if (fb->flags & DRM_MODE_FB_INTERLACED)
1236 		hqvdp->btm_field_pending = true;
1237 
1238 	dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
1239 		__func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
1240 
1241 	sti_plane_update_fps(plane, true, true);
1242 
1243 	plane->status = STI_PLANE_UPDATED;
1244 }
1245 
1246 static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
1247 				     struct drm_atomic_state *state)
1248 {
1249 	struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state,
1250 									  drm_plane);
1251 	struct sti_plane *plane = to_sti_plane(drm_plane);
1252 
1253 	if (!oldstate->crtc) {
1254 		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
1255 				 drm_plane->base.id);
1256 		return;
1257 	}
1258 
1259 	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
1260 			 oldstate->crtc->base.id,
1261 			 sti_mixer_to_str(to_sti_mixer(oldstate->crtc)),
1262 			 drm_plane->base.id, sti_plane_to_str(plane));
1263 
1264 	plane->status = STI_PLANE_DISABLING;
1265 }
1266 
1267 static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
1268 	.atomic_check = sti_hqvdp_atomic_check,
1269 	.atomic_update = sti_hqvdp_atomic_update,
1270 	.atomic_disable = sti_hqvdp_atomic_disable,
1271 };
1272 
1273 static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
1274 {
1275 	struct sti_plane *plane = to_sti_plane(drm_plane);
1276 	struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
1277 
1278 	hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
1279 
1280 	return 0;
1281 }
1282 
1283 static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
1284 	.update_plane = drm_atomic_helper_update_plane,
1285 	.disable_plane = drm_atomic_helper_disable_plane,
1286 	.destroy = drm_plane_cleanup,
1287 	.reset = drm_atomic_helper_plane_reset,
1288 	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
1289 	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
1290 	.late_register = sti_hqvdp_late_register,
1291 };
1292 
1293 static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
1294 					  struct device *dev, int desc)
1295 {
1296 	struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
1297 	int res;
1298 
1299 	hqvdp->plane.desc = desc;
1300 	hqvdp->plane.status = STI_PLANE_DISABLED;
1301 
1302 	sti_hqvdp_init(hqvdp);
1303 
1304 	res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1,
1305 				       &sti_hqvdp_plane_helpers_funcs,
1306 				       hqvdp_supported_formats,
1307 				       ARRAY_SIZE(hqvdp_supported_formats),
1308 				       NULL, DRM_PLANE_TYPE_OVERLAY, NULL);
1309 	if (res) {
1310 		DRM_ERROR("Failed to initialize universal plane\n");
1311 		return NULL;
1312 	}
1313 
1314 	drm_plane_helper_add(&hqvdp->plane.drm_plane, &sti_hqvdp_helpers_funcs);
1315 
1316 	sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
1317 
1318 	return &hqvdp->plane.drm_plane;
1319 }
1320 
1321 static int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
1322 {
1323 	struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
1324 	struct drm_device *drm_dev = data;
1325 	struct drm_plane *plane;
1326 
1327 	DRM_DEBUG_DRIVER("\n");
1328 
1329 	hqvdp->drm_dev = drm_dev;
1330 
1331 	/* Create HQVDP plane once xp70 is initialized */
1332 	plane = sti_hqvdp_create(drm_dev, hqvdp->dev, STI_HQVDP_0);
1333 	if (!plane)
1334 		DRM_ERROR("Can't create HQVDP plane\n");
1335 
1336 	return 0;
1337 }
1338 
1339 static void sti_hqvdp_unbind(struct device *dev,
1340 		struct device *master, void *data)
1341 {
1342 	/* do nothing */
1343 }
1344 
1345 static const struct component_ops sti_hqvdp_ops = {
1346 	.bind = sti_hqvdp_bind,
1347 	.unbind = sti_hqvdp_unbind,
1348 };
1349 
1350 static int sti_hqvdp_probe(struct platform_device *pdev)
1351 {
1352 	struct device *dev = &pdev->dev;
1353 	struct device_node *vtg_np;
1354 	struct sti_hqvdp *hqvdp;
1355 	struct resource *res;
1356 
1357 	DRM_DEBUG_DRIVER("\n");
1358 
1359 	hqvdp = devm_kzalloc(dev, sizeof(*hqvdp), GFP_KERNEL);
1360 	if (!hqvdp) {
1361 		DRM_ERROR("Failed to allocate HQVDP context\n");
1362 		return -ENOMEM;
1363 	}
1364 
1365 	hqvdp->dev = dev;
1366 
1367 	/* Get Memory resources */
1368 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1369 	if (!res) {
1370 		DRM_ERROR("Get memory resource failed\n");
1371 		return -ENXIO;
1372 	}
1373 	hqvdp->regs = devm_ioremap(dev, res->start, resource_size(res));
1374 	if (!hqvdp->regs) {
1375 		DRM_ERROR("Register mapping failed\n");
1376 		return -ENXIO;
1377 	}
1378 
1379 	/* Get clock resources */
1380 	hqvdp->clk = devm_clk_get(dev, "hqvdp");
1381 	hqvdp->clk_pix_main = devm_clk_get(dev, "pix_main");
1382 	if (IS_ERR(hqvdp->clk) || IS_ERR(hqvdp->clk_pix_main)) {
1383 		DRM_ERROR("Cannot get clocks\n");
1384 		return -ENXIO;
1385 	}
1386 
1387 	/* Get reset resources */
1388 	hqvdp->reset = devm_reset_control_get(dev, "hqvdp");
1389 	if (!IS_ERR(hqvdp->reset))
1390 		reset_control_deassert(hqvdp->reset);
1391 
1392 	vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
1393 	if (vtg_np)
1394 		hqvdp->vtg = of_vtg_find(vtg_np);
1395 	of_node_put(vtg_np);
1396 
1397 	platform_set_drvdata(pdev, hqvdp);
1398 
1399 	return component_add(&pdev->dev, &sti_hqvdp_ops);
1400 }
1401 
1402 static int sti_hqvdp_remove(struct platform_device *pdev)
1403 {
1404 	component_del(&pdev->dev, &sti_hqvdp_ops);
1405 	return 0;
1406 }
1407 
1408 static const struct of_device_id hqvdp_of_match[] = {
1409 	{ .compatible = "st,stih407-hqvdp", },
1410 	{ /* end node */ }
1411 };
1412 MODULE_DEVICE_TABLE(of, hqvdp_of_match);
1413 
1414 struct platform_driver sti_hqvdp_driver = {
1415 	.driver = {
1416 		.name = "sti-hqvdp",
1417 		.owner = THIS_MODULE,
1418 		.of_match_table = hqvdp_of_match,
1419 	},
1420 	.probe = sti_hqvdp_probe,
1421 	.remove = sti_hqvdp_remove,
1422 };
1423 
1424 MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
1425 MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
1426 MODULE_LICENSE("GPL");
1427