1 /*
2  * Copyright (C) 2012 Samsung Electronics Co.Ltd
3  * Authors:
4  *	Eunchul Kim <chulspro.kim@samsung.com>
5  *	Jinyoung Jeon <jy0.jeon@samsung.com>
6  *	Sangmin Lee <lsmin.lee@samsung.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  */
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/clk.h>
18 #include <linux/pm_runtime.h>
19 #include <plat/map-base.h>
20 
21 #include <drm/drmP.h>
22 #include <drm/exynos_drm.h>
23 #include "regs-gsc.h"
24 #include "exynos_drm_ipp.h"
25 #include "exynos_drm_gsc.h"
26 
27 /*
28  * GSC stands for General SCaler and
29  * supports image scaler/rotator and input/output DMA operations.
30  * input DMA reads image data from the memory.
31  * output DMA writes image data to memory.
32  * GSC supports image rotation and image effect functions.
33  *
34  * M2M operation : supports crop/scale/rotation/csc so on.
35  * Memory ----> GSC H/W ----> Memory.
36  * Writeback operation : supports cloned screen with FIMD.
37  * FIMD ----> GSC H/W ----> Memory.
38  * Output operation : supports direct display using local path.
39  * Memory ----> GSC H/W ----> FIMD, Mixer.
40  */
41 
42 /*
43  * TODO
44  * 1. check suspend/resume api if needed.
45  * 2. need to check use case platform_device_id.
46  * 3. check src/dst size with, height.
47  * 4. added check_prepare api for right register.
48  * 5. need to add supported list in prop_list.
49  * 6. check prescaler/scaler optimization.
50  */
51 
52 #define GSC_MAX_DEVS	4
53 #define GSC_MAX_SRC		4
54 #define GSC_MAX_DST		16
55 #define GSC_RESET_TIMEOUT	50
56 #define GSC_BUF_STOP	1
57 #define GSC_BUF_START	2
58 #define GSC_REG_SZ		16
59 #define GSC_WIDTH_ITU_709	1280
60 #define GSC_SC_UP_MAX_RATIO		65536
61 #define GSC_SC_DOWN_RATIO_7_8		74898
62 #define GSC_SC_DOWN_RATIO_6_8		87381
63 #define GSC_SC_DOWN_RATIO_5_8		104857
64 #define GSC_SC_DOWN_RATIO_4_8		131072
65 #define GSC_SC_DOWN_RATIO_3_8		174762
66 #define GSC_SC_DOWN_RATIO_2_8		262144
67 #define GSC_REFRESH_MIN	12
68 #define GSC_REFRESH_MAX	60
69 #define GSC_CROP_MAX	8192
70 #define GSC_CROP_MIN	32
71 #define GSC_SCALE_MAX	4224
72 #define GSC_SCALE_MIN	32
73 #define GSC_COEF_RATIO	7
74 #define GSC_COEF_PHASE	9
75 #define GSC_COEF_ATTR	16
76 #define GSC_COEF_H_8T	8
77 #define GSC_COEF_V_4T	4
78 #define GSC_COEF_DEPTH	3
79 
80 #define get_gsc_context(dev)	platform_get_drvdata(to_platform_device(dev))
81 #define get_ctx_from_ippdrv(ippdrv)	container_of(ippdrv,\
82 					struct gsc_context, ippdrv);
83 #define gsc_read(offset)		readl(ctx->regs + (offset))
84 #define gsc_write(cfg, offset)	writel(cfg, ctx->regs + (offset))
85 
86 /*
87  * A structure of scaler.
88  *
89  * @range: narrow, wide.
90  * @pre_shfactor: pre sclaer shift factor.
91  * @pre_hratio: horizontal ratio of the prescaler.
92  * @pre_vratio: vertical ratio of the prescaler.
93  * @main_hratio: the main scaler's horizontal ratio.
94  * @main_vratio: the main scaler's vertical ratio.
95  */
96 struct gsc_scaler {
97 	bool	range;
98 	u32	pre_shfactor;
99 	u32	pre_hratio;
100 	u32	pre_vratio;
101 	unsigned long main_hratio;
102 	unsigned long main_vratio;
103 };
104 
105 /*
106  * A structure of scaler capability.
107  *
108  * find user manual 49.2 features.
109  * @tile_w: tile mode or rotation width.
110  * @tile_h: tile mode or rotation height.
111  * @w: other cases width.
112  * @h: other cases height.
113  */
114 struct gsc_capability {
115 	/* tile or rotation */
116 	u32	tile_w;
117 	u32	tile_h;
118 	/* other cases */
119 	u32	w;
120 	u32	h;
121 };
122 
123 /*
124  * A structure of gsc context.
125  *
126  * @ippdrv: prepare initialization using ippdrv.
127  * @regs_res: register resources.
128  * @regs: memory mapped io registers.
129  * @lock: locking of operations.
130  * @gsc_clk: gsc gate clock.
131  * @sc: scaler infomations.
132  * @id: gsc id.
133  * @irq: irq number.
134  * @rotation: supports rotation of src.
135  * @suspended: qos operations.
136  */
137 struct gsc_context {
138 	struct exynos_drm_ippdrv	ippdrv;
139 	struct resource	*regs_res;
140 	void __iomem	*regs;
141 	struct mutex	lock;
142 	struct clk	*gsc_clk;
143 	struct gsc_scaler	sc;
144 	int	id;
145 	int	irq;
146 	bool	rotation;
147 	bool	suspended;
148 };
149 
150 /* 8-tap Filter Coefficient */
151 static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = {
152 	{	/* Ratio <= 65536 (~8:8) */
153 		{  0,  0,   0, 128,   0,   0,  0,  0 },
154 		{ -1,  2,  -6, 127,   7,  -2,  1,  0 },
155 		{ -1,  4, -12, 125,  16,  -5,  1,  0 },
156 		{ -1,  5, -15, 120,  25,  -8,  2,  0 },
157 		{ -1,  6, -18, 114,  35, -10,  3, -1 },
158 		{ -1,  6, -20, 107,  46, -13,  4, -1 },
159 		{ -2,  7, -21,  99,  57, -16,  5, -1 },
160 		{ -1,  6, -20,  89,  68, -18,  5, -1 },
161 		{ -1,  6, -20,  79,  79, -20,  6, -1 },
162 		{ -1,  5, -18,  68,  89, -20,  6, -1 },
163 		{ -1,  5, -16,  57,  99, -21,  7, -2 },
164 		{ -1,  4, -13,  46, 107, -20,  6, -1 },
165 		{ -1,  3, -10,  35, 114, -18,  6, -1 },
166 		{  0,  2,  -8,  25, 120, -15,  5, -1 },
167 		{  0,  1,  -5,  16, 125, -12,  4, -1 },
168 		{  0,  1,  -2,   7, 127,  -6,  2, -1 }
169 	}, {	/* 65536 < Ratio <= 74898 (~8:7) */
170 		{  3, -8,  14, 111,  13,  -8,  3,  0 },
171 		{  2, -6,   7, 112,  21, -10,  3, -1 },
172 		{  2, -4,   1, 110,  28, -12,  4, -1 },
173 		{  1, -2,  -3, 106,  36, -13,  4, -1 },
174 		{  1, -1,  -7, 103,  44, -15,  4, -1 },
175 		{  1,  1, -11,  97,  53, -16,  4, -1 },
176 		{  0,  2, -13,  91,  61, -16,  4, -1 },
177 		{  0,  3, -15,  85,  69, -17,  4, -1 },
178 		{  0,  3, -16,  77,  77, -16,  3,  0 },
179 		{ -1,  4, -17,  69,  85, -15,  3,  0 },
180 		{ -1,  4, -16,  61,  91, -13,  2,  0 },
181 		{ -1,  4, -16,  53,  97, -11,  1,  1 },
182 		{ -1,  4, -15,  44, 103,  -7, -1,  1 },
183 		{ -1,  4, -13,  36, 106,  -3, -2,  1 },
184 		{ -1,  4, -12,  28, 110,   1, -4,  2 },
185 		{ -1,  3, -10,  21, 112,   7, -6,  2 }
186 	}, {	/* 74898 < Ratio <= 87381 (~8:6) */
187 		{ 2, -11,  25,  96, 25, -11,   2,  0 },
188 		{ 2, -10,  19,  96, 31, -12,   2,  0 },
189 		{ 2,  -9,  14,  94, 37, -12,   2,  0 },
190 		{ 2,  -8,  10,  92, 43, -12,   1,  0 },
191 		{ 2,  -7,   5,  90, 49, -12,   1,  0 },
192 		{ 2,  -5,   1,  86, 55, -12,   0,  1 },
193 		{ 2,  -4,  -2,  82, 61, -11,  -1,  1 },
194 		{ 1,  -3,  -5,  77, 67,  -9,  -1,  1 },
195 		{ 1,  -2,  -7,  72, 72,  -7,  -2,  1 },
196 		{ 1,  -1,  -9,  67, 77,  -5,  -3,  1 },
197 		{ 1,  -1, -11,  61, 82,  -2,  -4,  2 },
198 		{ 1,   0, -12,  55, 86,   1,  -5,  2 },
199 		{ 0,   1, -12,  49, 90,   5,  -7,  2 },
200 		{ 0,   1, -12,  43, 92,  10,  -8,  2 },
201 		{ 0,   2, -12,  37, 94,  14,  -9,  2 },
202 		{ 0,   2, -12,  31, 96,  19, -10,  2 }
203 	}, {	/* 87381 < Ratio <= 104857 (~8:5) */
204 		{ -1,  -8, 33,  80, 33,  -8,  -1,  0 },
205 		{ -1,  -8, 28,  80, 37,  -7,  -2,  1 },
206 		{  0,  -8, 24,  79, 41,  -7,  -2,  1 },
207 		{  0,  -8, 20,  78, 46,  -6,  -3,  1 },
208 		{  0,  -8, 16,  76, 50,  -4,  -3,  1 },
209 		{  0,  -7, 13,  74, 54,  -3,  -4,  1 },
210 		{  1,  -7, 10,  71, 58,  -1,  -5,  1 },
211 		{  1,  -6,  6,  68, 62,   1,  -5,  1 },
212 		{  1,  -6,  4,  65, 65,   4,  -6,  1 },
213 		{  1,  -5,  1,  62, 68,   6,  -6,  1 },
214 		{  1,  -5, -1,  58, 71,  10,  -7,  1 },
215 		{  1,  -4, -3,  54, 74,  13,  -7,  0 },
216 		{  1,  -3, -4,  50, 76,  16,  -8,  0 },
217 		{  1,  -3, -6,  46, 78,  20,  -8,  0 },
218 		{  1,  -2, -7,  41, 79,  24,  -8,  0 },
219 		{  1,  -2, -7,  37, 80,  28,  -8, -1 }
220 	}, {	/* 104857 < Ratio <= 131072 (~8:4) */
221 		{ -3,   0, 35,  64, 35,   0,  -3,  0 },
222 		{ -3,  -1, 32,  64, 38,   1,  -3,  0 },
223 		{ -2,  -2, 29,  63, 41,   2,  -3,  0 },
224 		{ -2,  -3, 27,  63, 43,   4,  -4,  0 },
225 		{ -2,  -3, 24,  61, 46,   6,  -4,  0 },
226 		{ -2,  -3, 21,  60, 49,   7,  -4,  0 },
227 		{ -1,  -4, 19,  59, 51,   9,  -4, -1 },
228 		{ -1,  -4, 16,  57, 53,  12,  -4, -1 },
229 		{ -1,  -4, 14,  55, 55,  14,  -4, -1 },
230 		{ -1,  -4, 12,  53, 57,  16,  -4, -1 },
231 		{ -1,  -4,  9,  51, 59,  19,  -4, -1 },
232 		{  0,  -4,  7,  49, 60,  21,  -3, -2 },
233 		{  0,  -4,  6,  46, 61,  24,  -3, -2 },
234 		{  0,  -4,  4,  43, 63,  27,  -3, -2 },
235 		{  0,  -3,  2,  41, 63,  29,  -2, -2 },
236 		{  0,  -3,  1,  38, 64,  32,  -1, -3 }
237 	}, {	/* 131072 < Ratio <= 174762 (~8:3) */
238 		{ -1,   8, 33,  48, 33,   8,  -1,  0 },
239 		{ -1,   7, 31,  49, 35,   9,  -1, -1 },
240 		{ -1,   6, 30,  49, 36,  10,  -1, -1 },
241 		{ -1,   5, 28,  48, 38,  12,  -1, -1 },
242 		{ -1,   4, 26,  48, 39,  13,   0, -1 },
243 		{ -1,   3, 24,  47, 41,  15,   0, -1 },
244 		{ -1,   2, 23,  47, 42,  16,   0, -1 },
245 		{ -1,   2, 21,  45, 43,  18,   1, -1 },
246 		{ -1,   1, 19,  45, 45,  19,   1, -1 },
247 		{ -1,   1, 18,  43, 45,  21,   2, -1 },
248 		{ -1,   0, 16,  42, 47,  23,   2, -1 },
249 		{ -1,   0, 15,  41, 47,  24,   3, -1 },
250 		{ -1,   0, 13,  39, 48,  26,   4, -1 },
251 		{ -1,  -1, 12,  38, 48,  28,   5, -1 },
252 		{ -1,  -1, 10,  36, 49,  30,   6, -1 },
253 		{ -1,  -1,  9,  35, 49,  31,   7, -1 }
254 	}, {	/* 174762 < Ratio <= 262144 (~8:2) */
255 		{  2,  13, 30,  38, 30,  13,   2,  0 },
256 		{  2,  12, 29,  38, 30,  14,   3,  0 },
257 		{  2,  11, 28,  38, 31,  15,   3,  0 },
258 		{  2,  10, 26,  38, 32,  16,   4,  0 },
259 		{  1,  10, 26,  37, 33,  17,   4,  0 },
260 		{  1,   9, 24,  37, 34,  18,   5,  0 },
261 		{  1,   8, 24,  37, 34,  19,   5,  0 },
262 		{  1,   7, 22,  36, 35,  20,   6,  1 },
263 		{  1,   6, 21,  36, 36,  21,   6,  1 },
264 		{  1,   6, 20,  35, 36,  22,   7,  1 },
265 		{  0,   5, 19,  34, 37,  24,   8,  1 },
266 		{  0,   5, 18,  34, 37,  24,   9,  1 },
267 		{  0,   4, 17,  33, 37,  26,  10,  1 },
268 		{  0,   4, 16,  32, 38,  26,  10,  2 },
269 		{  0,   3, 15,  31, 38,  28,  11,  2 },
270 		{  0,   3, 14,  30, 38,  29,  12,  2 }
271 	}
272 };
273 
274 /* 4-tap Filter Coefficient */
275 static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = {
276 	{	/* Ratio <= 65536 (~8:8) */
277 		{  0, 128,   0,  0 },
278 		{ -4, 127,   5,  0 },
279 		{ -6, 124,  11, -1 },
280 		{ -8, 118,  19, -1 },
281 		{ -8, 111,  27, -2 },
282 		{ -8, 102,  37, -3 },
283 		{ -8,  92,  48, -4 },
284 		{ -7,  81,  59, -5 },
285 		{ -6,  70,  70, -6 },
286 		{ -5,  59,  81, -7 },
287 		{ -4,  48,  92, -8 },
288 		{ -3,  37, 102, -8 },
289 		{ -2,  27, 111, -8 },
290 		{ -1,  19, 118, -8 },
291 		{ -1,  11, 124, -6 },
292 		{  0,   5, 127, -4 }
293 	}, {	/* 65536 < Ratio <= 74898 (~8:7) */
294 		{  8, 112,   8,  0 },
295 		{  4, 111,  14, -1 },
296 		{  1, 109,  20, -2 },
297 		{ -2, 105,  27, -2 },
298 		{ -3, 100,  34, -3 },
299 		{ -5,  93,  43, -3 },
300 		{ -5,  86,  51, -4 },
301 		{ -5,  77,  60, -4 },
302 		{ -5,  69,  69, -5 },
303 		{ -4,  60,  77, -5 },
304 		{ -4,  51,  86, -5 },
305 		{ -3,  43,  93, -5 },
306 		{ -3,  34, 100, -3 },
307 		{ -2,  27, 105, -2 },
308 		{ -2,  20, 109,  1 },
309 		{ -1,  14, 111,  4 }
310 	}, {	/* 74898 < Ratio <= 87381 (~8:6) */
311 		{ 16,  96,  16,  0 },
312 		{ 12,  97,  21, -2 },
313 		{  8,  96,  26, -2 },
314 		{  5,  93,  32, -2 },
315 		{  2,  89,  39, -2 },
316 		{  0,  84,  46, -2 },
317 		{ -1,  79,  53, -3 },
318 		{ -2,  73,  59, -2 },
319 		{ -2,  66,  66, -2 },
320 		{ -2,  59,  73, -2 },
321 		{ -3,  53,  79, -1 },
322 		{ -2,  46,  84,  0 },
323 		{ -2,  39,  89,  2 },
324 		{ -2,  32,  93,  5 },
325 		{ -2,  26,  96,  8 },
326 		{ -2,  21,  97, 12 }
327 	}, {	/* 87381 < Ratio <= 104857 (~8:5) */
328 		{ 22,  84,  22,  0 },
329 		{ 18,  85,  26, -1 },
330 		{ 14,  84,  31, -1 },
331 		{ 11,  82,  36, -1 },
332 		{  8,  79,  42, -1 },
333 		{  6,  76,  47, -1 },
334 		{  4,  72,  52,  0 },
335 		{  2,  68,  58,  0 },
336 		{  1,  63,  63,  1 },
337 		{  0,  58,  68,  2 },
338 		{  0,  52,  72,  4 },
339 		{ -1,  47,  76,  6 },
340 		{ -1,  42,  79,  8 },
341 		{ -1,  36,  82, 11 },
342 		{ -1,  31,  84, 14 },
343 		{ -1,  26,  85, 18 }
344 	}, {	/* 104857 < Ratio <= 131072 (~8:4) */
345 		{ 26,  76,  26,  0 },
346 		{ 22,  76,  30,  0 },
347 		{ 19,  75,  34,  0 },
348 		{ 16,  73,  38,  1 },
349 		{ 13,  71,  43,  1 },
350 		{ 10,  69,  47,  2 },
351 		{  8,  66,  51,  3 },
352 		{  6,  63,  55,  4 },
353 		{  5,  59,  59,  5 },
354 		{  4,  55,  63,  6 },
355 		{  3,  51,  66,  8 },
356 		{  2,  47,  69, 10 },
357 		{  1,  43,  71, 13 },
358 		{  1,  38,  73, 16 },
359 		{  0,  34,  75, 19 },
360 		{  0,  30,  76, 22 }
361 	}, {	/* 131072 < Ratio <= 174762 (~8:3) */
362 		{ 29,  70,  29,  0 },
363 		{ 26,  68,  32,  2 },
364 		{ 23,  67,  36,  2 },
365 		{ 20,  66,  39,  3 },
366 		{ 17,  65,  43,  3 },
367 		{ 15,  63,  46,  4 },
368 		{ 12,  61,  50,  5 },
369 		{ 10,  58,  53,  7 },
370 		{  8,  56,  56,  8 },
371 		{  7,  53,  58, 10 },
372 		{  5,  50,  61, 12 },
373 		{  4,  46,  63, 15 },
374 		{  3,  43,  65, 17 },
375 		{  3,  39,  66, 20 },
376 		{  2,  36,  67, 23 },
377 		{  2,  32,  68, 26 }
378 	}, {	/* 174762 < Ratio <= 262144 (~8:2) */
379 		{ 32,  64,  32,  0 },
380 		{ 28,  63,  34,  3 },
381 		{ 25,  62,  37,  4 },
382 		{ 22,  62,  40,  4 },
383 		{ 19,  61,  43,  5 },
384 		{ 17,  59,  46,  6 },
385 		{ 15,  58,  48,  7 },
386 		{ 13,  55,  51,  9 },
387 		{ 11,  53,  53, 11 },
388 		{  9,  51,  55, 13 },
389 		{  7,  48,  58, 15 },
390 		{  6,  46,  59, 17 },
391 		{  5,  43,  61, 19 },
392 		{  4,  40,  62, 22 },
393 		{  4,  37,  62, 25 },
394 		{  3,  34,  63, 28 }
395 	}
396 };
397 
398 static int gsc_sw_reset(struct gsc_context *ctx)
399 {
400 	u32 cfg;
401 	int count = GSC_RESET_TIMEOUT;
402 
403 	DRM_DEBUG_KMS("%s\n", __func__);
404 
405 	/* s/w reset */
406 	cfg = (GSC_SW_RESET_SRESET);
407 	gsc_write(cfg, GSC_SW_RESET);
408 
409 	/* wait s/w reset complete */
410 	while (count--) {
411 		cfg = gsc_read(GSC_SW_RESET);
412 		if (!cfg)
413 			break;
414 		usleep_range(1000, 2000);
415 	}
416 
417 	if (cfg) {
418 		DRM_ERROR("failed to reset gsc h/w.\n");
419 		return -EBUSY;
420 	}
421 
422 	/* reset sequence */
423 	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
424 	cfg |= (GSC_IN_BASE_ADDR_MASK |
425 		GSC_IN_BASE_ADDR_PINGPONG(0));
426 	gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
427 	gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
428 	gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
429 
430 	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
431 	cfg |= (GSC_OUT_BASE_ADDR_MASK |
432 		GSC_OUT_BASE_ADDR_PINGPONG(0));
433 	gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
434 	gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
435 	gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
436 
437 	return 0;
438 }
439 
440 static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
441 {
442 	u32 gscblk_cfg;
443 
444 	DRM_DEBUG_KMS("%s\n", __func__);
445 
446 	gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
447 
448 	if (enable)
449 		gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
450 				GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
451 				GSC_BLK_SW_RESET_WB_DEST(ctx->id);
452 	else
453 		gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
454 
455 	writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
456 }
457 
458 static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
459 		bool overflow, bool done)
460 {
461 	u32 cfg;
462 
463 	DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
464 			enable, overflow, done);
465 
466 	cfg = gsc_read(GSC_IRQ);
467 	cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
468 
469 	if (enable)
470 		cfg |= GSC_IRQ_ENABLE;
471 	else
472 		cfg &= ~GSC_IRQ_ENABLE;
473 
474 	if (overflow)
475 		cfg &= ~GSC_IRQ_OR_MASK;
476 	else
477 		cfg |= GSC_IRQ_OR_MASK;
478 
479 	if (done)
480 		cfg &= ~GSC_IRQ_FRMDONE_MASK;
481 	else
482 		cfg |= GSC_IRQ_FRMDONE_MASK;
483 
484 	gsc_write(cfg, GSC_IRQ);
485 }
486 
487 
488 static int gsc_src_set_fmt(struct device *dev, u32 fmt)
489 {
490 	struct gsc_context *ctx = get_gsc_context(dev);
491 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
492 	u32 cfg;
493 
494 	DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
495 
496 	cfg = gsc_read(GSC_IN_CON);
497 	cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
498 		 GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
499 		 GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE |
500 		 GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK);
501 
502 	switch (fmt) {
503 	case DRM_FORMAT_RGB565:
504 		cfg |= GSC_IN_RGB565;
505 		break;
506 	case DRM_FORMAT_XRGB8888:
507 		cfg |= GSC_IN_XRGB8888;
508 		break;
509 	case DRM_FORMAT_BGRX8888:
510 		cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP);
511 		break;
512 	case DRM_FORMAT_YUYV:
513 		cfg |= (GSC_IN_YUV422_1P |
514 			GSC_IN_YUV422_1P_ORDER_LSB_Y |
515 			GSC_IN_CHROMA_ORDER_CBCR);
516 		break;
517 	case DRM_FORMAT_YVYU:
518 		cfg |= (GSC_IN_YUV422_1P |
519 			GSC_IN_YUV422_1P_ORDER_LSB_Y |
520 			GSC_IN_CHROMA_ORDER_CRCB);
521 		break;
522 	case DRM_FORMAT_UYVY:
523 		cfg |= (GSC_IN_YUV422_1P |
524 			GSC_IN_YUV422_1P_OEDER_LSB_C |
525 			GSC_IN_CHROMA_ORDER_CBCR);
526 		break;
527 	case DRM_FORMAT_VYUY:
528 		cfg |= (GSC_IN_YUV422_1P |
529 			GSC_IN_YUV422_1P_OEDER_LSB_C |
530 			GSC_IN_CHROMA_ORDER_CRCB);
531 		break;
532 	case DRM_FORMAT_NV21:
533 	case DRM_FORMAT_NV61:
534 		cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
535 			GSC_IN_YUV420_2P);
536 		break;
537 	case DRM_FORMAT_YUV422:
538 		cfg |= GSC_IN_YUV422_3P;
539 		break;
540 	case DRM_FORMAT_YUV420:
541 	case DRM_FORMAT_YVU420:
542 		cfg |= GSC_IN_YUV420_3P;
543 		break;
544 	case DRM_FORMAT_NV12:
545 	case DRM_FORMAT_NV16:
546 		cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
547 			GSC_IN_YUV420_2P);
548 		break;
549 	case DRM_FORMAT_NV12MT:
550 		cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
551 		break;
552 	default:
553 		dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
554 		return -EINVAL;
555 	}
556 
557 	gsc_write(cfg, GSC_IN_CON);
558 
559 	return 0;
560 }
561 
562 static int gsc_src_set_transf(struct device *dev,
563 		enum drm_exynos_degree degree,
564 		enum drm_exynos_flip flip, bool *swap)
565 {
566 	struct gsc_context *ctx = get_gsc_context(dev);
567 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
568 	u32 cfg;
569 
570 	DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
571 		degree, flip);
572 
573 	cfg = gsc_read(GSC_IN_CON);
574 	cfg &= ~GSC_IN_ROT_MASK;
575 
576 	switch (degree) {
577 	case EXYNOS_DRM_DEGREE_0:
578 		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
579 			cfg |= GSC_IN_ROT_XFLIP;
580 		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
581 			cfg |= GSC_IN_ROT_YFLIP;
582 		break;
583 	case EXYNOS_DRM_DEGREE_90:
584 		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
585 			cfg |= GSC_IN_ROT_90_XFLIP;
586 		else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
587 			cfg |= GSC_IN_ROT_90_YFLIP;
588 		else
589 			cfg |= GSC_IN_ROT_90;
590 		break;
591 	case EXYNOS_DRM_DEGREE_180:
592 		cfg |= GSC_IN_ROT_180;
593 		break;
594 	case EXYNOS_DRM_DEGREE_270:
595 		cfg |= GSC_IN_ROT_270;
596 		break;
597 	default:
598 		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
599 		return -EINVAL;
600 	}
601 
602 	gsc_write(cfg, GSC_IN_CON);
603 
604 	ctx->rotation = cfg &
605 		(GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
606 	*swap = ctx->rotation;
607 
608 	return 0;
609 }
610 
611 static int gsc_src_set_size(struct device *dev, int swap,
612 		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
613 {
614 	struct gsc_context *ctx = get_gsc_context(dev);
615 	struct drm_exynos_pos img_pos = *pos;
616 	struct gsc_scaler *sc = &ctx->sc;
617 	u32 cfg;
618 
619 	DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
620 		__func__, swap, pos->x, pos->y, pos->w, pos->h);
621 
622 	if (swap) {
623 		img_pos.w = pos->h;
624 		img_pos.h = pos->w;
625 	}
626 
627 	/* pixel offset */
628 	cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
629 		GSC_SRCIMG_OFFSET_Y(img_pos.y));
630 	gsc_write(cfg, GSC_SRCIMG_OFFSET);
631 
632 	/* cropped size */
633 	cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
634 		GSC_CROPPED_HEIGHT(img_pos.h));
635 	gsc_write(cfg, GSC_CROPPED_SIZE);
636 
637 	DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
638 		__func__, sz->hsize, sz->vsize);
639 
640 	/* original size */
641 	cfg = gsc_read(GSC_SRCIMG_SIZE);
642 	cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
643 		GSC_SRCIMG_WIDTH_MASK);
644 
645 	cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
646 		GSC_SRCIMG_HEIGHT(sz->vsize));
647 
648 	gsc_write(cfg, GSC_SRCIMG_SIZE);
649 
650 	cfg = gsc_read(GSC_IN_CON);
651 	cfg &= ~GSC_IN_RGB_TYPE_MASK;
652 
653 	DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
654 		__func__, pos->w, sc->range);
655 
656 	if (pos->w >= GSC_WIDTH_ITU_709)
657 		if (sc->range)
658 			cfg |= GSC_IN_RGB_HD_WIDE;
659 		else
660 			cfg |= GSC_IN_RGB_HD_NARROW;
661 	else
662 		if (sc->range)
663 			cfg |= GSC_IN_RGB_SD_WIDE;
664 		else
665 			cfg |= GSC_IN_RGB_SD_NARROW;
666 
667 	gsc_write(cfg, GSC_IN_CON);
668 
669 	return 0;
670 }
671 
672 static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
673 		enum drm_exynos_ipp_buf_type buf_type)
674 {
675 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
676 	bool masked;
677 	u32 cfg;
678 	u32 mask = 0x00000001 << buf_id;
679 
680 	DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
681 		buf_id, buf_type);
682 
683 	/* mask register set */
684 	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
685 
686 	switch (buf_type) {
687 	case IPP_BUF_ENQUEUE:
688 		masked = false;
689 		break;
690 	case IPP_BUF_DEQUEUE:
691 		masked = true;
692 		break;
693 	default:
694 		dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
695 		return -EINVAL;
696 	}
697 
698 	/* sequence id */
699 	cfg &= ~mask;
700 	cfg |= masked << buf_id;
701 	gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
702 	gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
703 	gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
704 
705 	return 0;
706 }
707 
708 static int gsc_src_set_addr(struct device *dev,
709 		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
710 		enum drm_exynos_ipp_buf_type buf_type)
711 {
712 	struct gsc_context *ctx = get_gsc_context(dev);
713 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
714 	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
715 	struct drm_exynos_ipp_property *property;
716 
717 	if (!c_node) {
718 		DRM_ERROR("failed to get c_node.\n");
719 		return -EFAULT;
720 	}
721 
722 	property = &c_node->property;
723 
724 	DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
725 		property->prop_id, buf_id, buf_type);
726 
727 	if (buf_id > GSC_MAX_SRC) {
728 		dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
729 		return -EINVAL;
730 	}
731 
732 	/* address register set */
733 	switch (buf_type) {
734 	case IPP_BUF_ENQUEUE:
735 		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
736 			GSC_IN_BASE_ADDR_Y(buf_id));
737 		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
738 			GSC_IN_BASE_ADDR_CB(buf_id));
739 		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
740 			GSC_IN_BASE_ADDR_CR(buf_id));
741 		break;
742 	case IPP_BUF_DEQUEUE:
743 		gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
744 		gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
745 		gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
746 		break;
747 	default:
748 		/* bypass */
749 		break;
750 	}
751 
752 	return gsc_src_set_buf_seq(ctx, buf_id, buf_type);
753 }
754 
755 static struct exynos_drm_ipp_ops gsc_src_ops = {
756 	.set_fmt = gsc_src_set_fmt,
757 	.set_transf = gsc_src_set_transf,
758 	.set_size = gsc_src_set_size,
759 	.set_addr = gsc_src_set_addr,
760 };
761 
762 static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
763 {
764 	struct gsc_context *ctx = get_gsc_context(dev);
765 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
766 	u32 cfg;
767 
768 	DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
769 
770 	cfg = gsc_read(GSC_OUT_CON);
771 	cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
772 		 GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
773 		 GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK |
774 		 GSC_OUT_GLOBAL_ALPHA_MASK);
775 
776 	switch (fmt) {
777 	case DRM_FORMAT_RGB565:
778 		cfg |= GSC_OUT_RGB565;
779 		break;
780 	case DRM_FORMAT_XRGB8888:
781 		cfg |= GSC_OUT_XRGB8888;
782 		break;
783 	case DRM_FORMAT_BGRX8888:
784 		cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
785 		break;
786 	case DRM_FORMAT_YUYV:
787 		cfg |= (GSC_OUT_YUV422_1P |
788 			GSC_OUT_YUV422_1P_ORDER_LSB_Y |
789 			GSC_OUT_CHROMA_ORDER_CBCR);
790 		break;
791 	case DRM_FORMAT_YVYU:
792 		cfg |= (GSC_OUT_YUV422_1P |
793 			GSC_OUT_YUV422_1P_ORDER_LSB_Y |
794 			GSC_OUT_CHROMA_ORDER_CRCB);
795 		break;
796 	case DRM_FORMAT_UYVY:
797 		cfg |= (GSC_OUT_YUV422_1P |
798 			GSC_OUT_YUV422_1P_OEDER_LSB_C |
799 			GSC_OUT_CHROMA_ORDER_CBCR);
800 		break;
801 	case DRM_FORMAT_VYUY:
802 		cfg |= (GSC_OUT_YUV422_1P |
803 			GSC_OUT_YUV422_1P_OEDER_LSB_C |
804 			GSC_OUT_CHROMA_ORDER_CRCB);
805 		break;
806 	case DRM_FORMAT_NV21:
807 	case DRM_FORMAT_NV61:
808 		cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
809 		break;
810 	case DRM_FORMAT_YUV422:
811 	case DRM_FORMAT_YUV420:
812 	case DRM_FORMAT_YVU420:
813 		cfg |= GSC_OUT_YUV420_3P;
814 		break;
815 	case DRM_FORMAT_NV12:
816 	case DRM_FORMAT_NV16:
817 		cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
818 			GSC_OUT_YUV420_2P);
819 		break;
820 	case DRM_FORMAT_NV12MT:
821 		cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE);
822 		break;
823 	default:
824 		dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
825 		return -EINVAL;
826 	}
827 
828 	gsc_write(cfg, GSC_OUT_CON);
829 
830 	return 0;
831 }
832 
833 static int gsc_dst_set_transf(struct device *dev,
834 		enum drm_exynos_degree degree,
835 		enum drm_exynos_flip flip, bool *swap)
836 {
837 	struct gsc_context *ctx = get_gsc_context(dev);
838 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
839 	u32 cfg;
840 
841 	DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
842 		degree, flip);
843 
844 	cfg = gsc_read(GSC_IN_CON);
845 	cfg &= ~GSC_IN_ROT_MASK;
846 
847 	switch (degree) {
848 	case EXYNOS_DRM_DEGREE_0:
849 		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
850 			cfg |= GSC_IN_ROT_XFLIP;
851 		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
852 			cfg |= GSC_IN_ROT_YFLIP;
853 		break;
854 	case EXYNOS_DRM_DEGREE_90:
855 		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
856 			cfg |= GSC_IN_ROT_90_XFLIP;
857 		else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
858 			cfg |= GSC_IN_ROT_90_YFLIP;
859 		else
860 			cfg |= GSC_IN_ROT_90;
861 		break;
862 	case EXYNOS_DRM_DEGREE_180:
863 		cfg |= GSC_IN_ROT_180;
864 		break;
865 	case EXYNOS_DRM_DEGREE_270:
866 		cfg |= GSC_IN_ROT_270;
867 		break;
868 	default:
869 		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
870 		return -EINVAL;
871 	}
872 
873 	gsc_write(cfg, GSC_IN_CON);
874 
875 	ctx->rotation = cfg &
876 		(GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
877 	*swap = ctx->rotation;
878 
879 	return 0;
880 }
881 
882 static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
883 {
884 	DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
885 
886 	if (src >= dst * 8) {
887 		DRM_ERROR("failed to make ratio and shift.\n");
888 		return -EINVAL;
889 	} else if (src >= dst * 4)
890 		*ratio = 4;
891 	else if (src >= dst * 2)
892 		*ratio = 2;
893 	else
894 		*ratio = 1;
895 
896 	return 0;
897 }
898 
899 static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
900 {
901 	if (hratio == 4 && vratio == 4)
902 		*shfactor = 4;
903 	else if ((hratio == 4 && vratio == 2) ||
904 		 (hratio == 2 && vratio == 4))
905 		*shfactor = 3;
906 	else if ((hratio == 4 && vratio == 1) ||
907 		 (hratio == 1 && vratio == 4) ||
908 		 (hratio == 2 && vratio == 2))
909 		*shfactor = 2;
910 	else if (hratio == 1 && vratio == 1)
911 		*shfactor = 0;
912 	else
913 		*shfactor = 1;
914 }
915 
916 static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
917 		struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
918 {
919 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
920 	u32 cfg;
921 	u32 src_w, src_h, dst_w, dst_h;
922 	int ret = 0;
923 
924 	src_w = src->w;
925 	src_h = src->h;
926 
927 	if (ctx->rotation) {
928 		dst_w = dst->h;
929 		dst_h = dst->w;
930 	} else {
931 		dst_w = dst->w;
932 		dst_h = dst->h;
933 	}
934 
935 	ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
936 	if (ret) {
937 		dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
938 		return ret;
939 	}
940 
941 	ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
942 	if (ret) {
943 		dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
944 		return ret;
945 	}
946 
947 	DRM_DEBUG_KMS("%s:pre_hratio[%d]pre_vratio[%d]\n",
948 		__func__, sc->pre_hratio, sc->pre_vratio);
949 
950 	sc->main_hratio = (src_w << 16) / dst_w;
951 	sc->main_vratio = (src_h << 16) / dst_h;
952 
953 	DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
954 		__func__, sc->main_hratio, sc->main_vratio);
955 
956 	gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
957 		&sc->pre_shfactor);
958 
959 	DRM_DEBUG_KMS("%s:pre_shfactor[%d]\n", __func__,
960 		sc->pre_shfactor);
961 
962 	cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
963 		GSC_PRESC_H_RATIO(sc->pre_hratio) |
964 		GSC_PRESC_V_RATIO(sc->pre_vratio));
965 	gsc_write(cfg, GSC_PRE_SCALE_RATIO);
966 
967 	return ret;
968 }
969 
970 static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio)
971 {
972 	int i, j, k, sc_ratio;
973 
974 	if (main_hratio <= GSC_SC_UP_MAX_RATIO)
975 		sc_ratio = 0;
976 	else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8)
977 		sc_ratio = 1;
978 	else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8)
979 		sc_ratio = 2;
980 	else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8)
981 		sc_ratio = 3;
982 	else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8)
983 		sc_ratio = 4;
984 	else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8)
985 		sc_ratio = 5;
986 	else
987 		sc_ratio = 6;
988 
989 	for (i = 0; i < GSC_COEF_PHASE; i++)
990 		for (j = 0; j < GSC_COEF_H_8T; j++)
991 			for (k = 0; k < GSC_COEF_DEPTH; k++)
992 				gsc_write(h_coef_8t[sc_ratio][i][j],
993 					GSC_HCOEF(i, j, k));
994 }
995 
996 static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio)
997 {
998 	int i, j, k, sc_ratio;
999 
1000 	if (main_vratio <= GSC_SC_UP_MAX_RATIO)
1001 		sc_ratio = 0;
1002 	else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8)
1003 		sc_ratio = 1;
1004 	else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8)
1005 		sc_ratio = 2;
1006 	else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8)
1007 		sc_ratio = 3;
1008 	else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8)
1009 		sc_ratio = 4;
1010 	else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8)
1011 		sc_ratio = 5;
1012 	else
1013 		sc_ratio = 6;
1014 
1015 	for (i = 0; i < GSC_COEF_PHASE; i++)
1016 		for (j = 0; j < GSC_COEF_V_4T; j++)
1017 			for (k = 0; k < GSC_COEF_DEPTH; k++)
1018 				gsc_write(v_coef_4t[sc_ratio][i][j],
1019 					GSC_VCOEF(i, j, k));
1020 }
1021 
1022 static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
1023 {
1024 	u32 cfg;
1025 
1026 	DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
1027 		__func__, sc->main_hratio, sc->main_vratio);
1028 
1029 	gsc_set_h_coef(ctx, sc->main_hratio);
1030 	cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
1031 	gsc_write(cfg, GSC_MAIN_H_RATIO);
1032 
1033 	gsc_set_v_coef(ctx, sc->main_vratio);
1034 	cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
1035 	gsc_write(cfg, GSC_MAIN_V_RATIO);
1036 }
1037 
1038 static int gsc_dst_set_size(struct device *dev, int swap,
1039 		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
1040 {
1041 	struct gsc_context *ctx = get_gsc_context(dev);
1042 	struct drm_exynos_pos img_pos = *pos;
1043 	struct gsc_scaler *sc = &ctx->sc;
1044 	u32 cfg;
1045 
1046 	DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
1047 		__func__, swap, pos->x, pos->y, pos->w, pos->h);
1048 
1049 	if (swap) {
1050 		img_pos.w = pos->h;
1051 		img_pos.h = pos->w;
1052 	}
1053 
1054 	/* pixel offset */
1055 	cfg = (GSC_DSTIMG_OFFSET_X(pos->x) |
1056 		GSC_DSTIMG_OFFSET_Y(pos->y));
1057 	gsc_write(cfg, GSC_DSTIMG_OFFSET);
1058 
1059 	/* scaled size */
1060 	cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
1061 	gsc_write(cfg, GSC_SCALED_SIZE);
1062 
1063 	DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
1064 		__func__, sz->hsize, sz->vsize);
1065 
1066 	/* original size */
1067 	cfg = gsc_read(GSC_DSTIMG_SIZE);
1068 	cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
1069 		GSC_DSTIMG_WIDTH_MASK);
1070 	cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) |
1071 		GSC_DSTIMG_HEIGHT(sz->vsize));
1072 	gsc_write(cfg, GSC_DSTIMG_SIZE);
1073 
1074 	cfg = gsc_read(GSC_OUT_CON);
1075 	cfg &= ~GSC_OUT_RGB_TYPE_MASK;
1076 
1077 	DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
1078 		__func__, pos->w, sc->range);
1079 
1080 	if (pos->w >= GSC_WIDTH_ITU_709)
1081 		if (sc->range)
1082 			cfg |= GSC_OUT_RGB_HD_WIDE;
1083 		else
1084 			cfg |= GSC_OUT_RGB_HD_NARROW;
1085 	else
1086 		if (sc->range)
1087 			cfg |= GSC_OUT_RGB_SD_WIDE;
1088 		else
1089 			cfg |= GSC_OUT_RGB_SD_NARROW;
1090 
1091 	gsc_write(cfg, GSC_OUT_CON);
1092 
1093 	return 0;
1094 }
1095 
1096 static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
1097 {
1098 	u32 cfg, i, buf_num = GSC_REG_SZ;
1099 	u32 mask = 0x00000001;
1100 
1101 	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1102 
1103 	for (i = 0; i < GSC_REG_SZ; i++)
1104 		if (cfg & (mask << i))
1105 			buf_num--;
1106 
1107 	DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
1108 
1109 	return buf_num;
1110 }
1111 
1112 static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
1113 		enum drm_exynos_ipp_buf_type buf_type)
1114 {
1115 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1116 	bool masked;
1117 	u32 cfg;
1118 	u32 mask = 0x00000001 << buf_id;
1119 	int ret = 0;
1120 
1121 	DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
1122 		buf_id, buf_type);
1123 
1124 	mutex_lock(&ctx->lock);
1125 
1126 	/* mask register set */
1127 	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1128 
1129 	switch (buf_type) {
1130 	case IPP_BUF_ENQUEUE:
1131 		masked = false;
1132 		break;
1133 	case IPP_BUF_DEQUEUE:
1134 		masked = true;
1135 		break;
1136 	default:
1137 		dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
1138 		ret =  -EINVAL;
1139 		goto err_unlock;
1140 	}
1141 
1142 	/* sequence id */
1143 	cfg &= ~mask;
1144 	cfg |= masked << buf_id;
1145 	gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
1146 	gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
1147 	gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
1148 
1149 	/* interrupt enable */
1150 	if (buf_type == IPP_BUF_ENQUEUE &&
1151 	    gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
1152 		gsc_handle_irq(ctx, true, false, true);
1153 
1154 	/* interrupt disable */
1155 	if (buf_type == IPP_BUF_DEQUEUE &&
1156 	    gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
1157 		gsc_handle_irq(ctx, false, false, true);
1158 
1159 err_unlock:
1160 	mutex_unlock(&ctx->lock);
1161 	return ret;
1162 }
1163 
1164 static int gsc_dst_set_addr(struct device *dev,
1165 		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
1166 		enum drm_exynos_ipp_buf_type buf_type)
1167 {
1168 	struct gsc_context *ctx = get_gsc_context(dev);
1169 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1170 	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1171 	struct drm_exynos_ipp_property *property;
1172 
1173 	if (!c_node) {
1174 		DRM_ERROR("failed to get c_node.\n");
1175 		return -EFAULT;
1176 	}
1177 
1178 	property = &c_node->property;
1179 
1180 	DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
1181 		property->prop_id, buf_id, buf_type);
1182 
1183 	if (buf_id > GSC_MAX_DST) {
1184 		dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
1185 		return -EINVAL;
1186 	}
1187 
1188 	/* address register set */
1189 	switch (buf_type) {
1190 	case IPP_BUF_ENQUEUE:
1191 		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
1192 			GSC_OUT_BASE_ADDR_Y(buf_id));
1193 		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
1194 			GSC_OUT_BASE_ADDR_CB(buf_id));
1195 		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
1196 			GSC_OUT_BASE_ADDR_CR(buf_id));
1197 		break;
1198 	case IPP_BUF_DEQUEUE:
1199 		gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
1200 		gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
1201 		gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
1202 		break;
1203 	default:
1204 		/* bypass */
1205 		break;
1206 	}
1207 
1208 	return gsc_dst_set_buf_seq(ctx, buf_id, buf_type);
1209 }
1210 
1211 static struct exynos_drm_ipp_ops gsc_dst_ops = {
1212 	.set_fmt = gsc_dst_set_fmt,
1213 	.set_transf = gsc_dst_set_transf,
1214 	.set_size = gsc_dst_set_size,
1215 	.set_addr = gsc_dst_set_addr,
1216 };
1217 
1218 static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
1219 {
1220 	DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1221 
1222 	if (enable) {
1223 		clk_enable(ctx->gsc_clk);
1224 		ctx->suspended = false;
1225 	} else {
1226 		clk_disable(ctx->gsc_clk);
1227 		ctx->suspended = true;
1228 	}
1229 
1230 	return 0;
1231 }
1232 
1233 static int gsc_get_src_buf_index(struct gsc_context *ctx)
1234 {
1235 	u32 cfg, curr_index, i;
1236 	u32 buf_id = GSC_MAX_SRC;
1237 	int ret;
1238 
1239 	DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
1240 
1241 	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
1242 	curr_index = GSC_IN_CURR_GET_INDEX(cfg);
1243 
1244 	for (i = curr_index; i < GSC_MAX_SRC; i++) {
1245 		if (!((cfg >> i) & 0x1)) {
1246 			buf_id = i;
1247 			break;
1248 		}
1249 	}
1250 
1251 	if (buf_id == GSC_MAX_SRC) {
1252 		DRM_ERROR("failed to get in buffer index.\n");
1253 		return -EINVAL;
1254 	}
1255 
1256 	ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
1257 	if (ret < 0) {
1258 		DRM_ERROR("failed to dequeue.\n");
1259 		return ret;
1260 	}
1261 
1262 	DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
1263 		curr_index, buf_id);
1264 
1265 	return buf_id;
1266 }
1267 
1268 static int gsc_get_dst_buf_index(struct gsc_context *ctx)
1269 {
1270 	u32 cfg, curr_index, i;
1271 	u32 buf_id = GSC_MAX_DST;
1272 	int ret;
1273 
1274 	DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
1275 
1276 	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1277 	curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
1278 
1279 	for (i = curr_index; i < GSC_MAX_DST; i++) {
1280 		if (!((cfg >> i) & 0x1)) {
1281 			buf_id = i;
1282 			break;
1283 		}
1284 	}
1285 
1286 	if (buf_id == GSC_MAX_DST) {
1287 		DRM_ERROR("failed to get out buffer index.\n");
1288 		return -EINVAL;
1289 	}
1290 
1291 	ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
1292 	if (ret < 0) {
1293 		DRM_ERROR("failed to dequeue.\n");
1294 		return ret;
1295 	}
1296 
1297 	DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
1298 		curr_index, buf_id);
1299 
1300 	return buf_id;
1301 }
1302 
1303 static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
1304 {
1305 	struct gsc_context *ctx = dev_id;
1306 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1307 	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1308 	struct drm_exynos_ipp_event_work *event_work =
1309 		c_node->event_work;
1310 	u32 status;
1311 	int buf_id[EXYNOS_DRM_OPS_MAX];
1312 
1313 	DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
1314 
1315 	status = gsc_read(GSC_IRQ);
1316 	if (status & GSC_IRQ_STATUS_OR_IRQ) {
1317 		dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
1318 			ctx->id, status);
1319 		return IRQ_NONE;
1320 	}
1321 
1322 	if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
1323 		dev_dbg(ippdrv->dev, "occured frame done at %d, status 0x%x.\n",
1324 			ctx->id, status);
1325 
1326 		buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
1327 		if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
1328 			return IRQ_HANDLED;
1329 
1330 		buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
1331 		if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
1332 			return IRQ_HANDLED;
1333 
1334 		DRM_DEBUG_KMS("%s:buf_id_src[%d]buf_id_dst[%d]\n", __func__,
1335 			buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
1336 
1337 		event_work->ippdrv = ippdrv;
1338 		event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
1339 			buf_id[EXYNOS_DRM_OPS_SRC];
1340 		event_work->buf_id[EXYNOS_DRM_OPS_DST] =
1341 			buf_id[EXYNOS_DRM_OPS_DST];
1342 		queue_work(ippdrv->event_workq,
1343 			(struct work_struct *)event_work);
1344 	}
1345 
1346 	return IRQ_HANDLED;
1347 }
1348 
1349 static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1350 {
1351 	struct drm_exynos_ipp_prop_list *prop_list;
1352 
1353 	DRM_DEBUG_KMS("%s\n", __func__);
1354 
1355 	prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
1356 	if (!prop_list) {
1357 		DRM_ERROR("failed to alloc property list.\n");
1358 		return -ENOMEM;
1359 	}
1360 
1361 	prop_list->version = 1;
1362 	prop_list->writeback = 1;
1363 	prop_list->refresh_min = GSC_REFRESH_MIN;
1364 	prop_list->refresh_max = GSC_REFRESH_MAX;
1365 	prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
1366 				(1 << EXYNOS_DRM_FLIP_HORIZONTAL);
1367 	prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
1368 				(1 << EXYNOS_DRM_DEGREE_90) |
1369 				(1 << EXYNOS_DRM_DEGREE_180) |
1370 				(1 << EXYNOS_DRM_DEGREE_270);
1371 	prop_list->csc = 1;
1372 	prop_list->crop = 1;
1373 	prop_list->crop_max.hsize = GSC_CROP_MAX;
1374 	prop_list->crop_max.vsize = GSC_CROP_MAX;
1375 	prop_list->crop_min.hsize = GSC_CROP_MIN;
1376 	prop_list->crop_min.vsize = GSC_CROP_MIN;
1377 	prop_list->scale = 1;
1378 	prop_list->scale_max.hsize = GSC_SCALE_MAX;
1379 	prop_list->scale_max.vsize = GSC_SCALE_MAX;
1380 	prop_list->scale_min.hsize = GSC_SCALE_MIN;
1381 	prop_list->scale_min.vsize = GSC_SCALE_MIN;
1382 
1383 	ippdrv->prop_list = prop_list;
1384 
1385 	return 0;
1386 }
1387 
1388 static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
1389 {
1390 	switch (flip) {
1391 	case EXYNOS_DRM_FLIP_NONE:
1392 	case EXYNOS_DRM_FLIP_VERTICAL:
1393 	case EXYNOS_DRM_FLIP_HORIZONTAL:
1394 	case EXYNOS_DRM_FLIP_BOTH:
1395 		return true;
1396 	default:
1397 		DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
1398 		return false;
1399 	}
1400 }
1401 
1402 static int gsc_ippdrv_check_property(struct device *dev,
1403 		struct drm_exynos_ipp_property *property)
1404 {
1405 	struct gsc_context *ctx = get_gsc_context(dev);
1406 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1407 	struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
1408 	struct drm_exynos_ipp_config *config;
1409 	struct drm_exynos_pos *pos;
1410 	struct drm_exynos_sz *sz;
1411 	bool swap;
1412 	int i;
1413 
1414 	DRM_DEBUG_KMS("%s\n", __func__);
1415 
1416 	for_each_ipp_ops(i) {
1417 		if ((i == EXYNOS_DRM_OPS_SRC) &&
1418 			(property->cmd == IPP_CMD_WB))
1419 			continue;
1420 
1421 		config = &property->config[i];
1422 		pos = &config->pos;
1423 		sz = &config->sz;
1424 
1425 		/* check for flip */
1426 		if (!gsc_check_drm_flip(config->flip)) {
1427 			DRM_ERROR("invalid flip.\n");
1428 			goto err_property;
1429 		}
1430 
1431 		/* check for degree */
1432 		switch (config->degree) {
1433 		case EXYNOS_DRM_DEGREE_90:
1434 		case EXYNOS_DRM_DEGREE_270:
1435 			swap = true;
1436 			break;
1437 		case EXYNOS_DRM_DEGREE_0:
1438 		case EXYNOS_DRM_DEGREE_180:
1439 			swap = false;
1440 			break;
1441 		default:
1442 			DRM_ERROR("invalid degree.\n");
1443 			goto err_property;
1444 		}
1445 
1446 		/* check for buffer bound */
1447 		if ((pos->x + pos->w > sz->hsize) ||
1448 			(pos->y + pos->h > sz->vsize)) {
1449 			DRM_ERROR("out of buf bound.\n");
1450 			goto err_property;
1451 		}
1452 
1453 		/* check for crop */
1454 		if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
1455 			if (swap) {
1456 				if ((pos->h < pp->crop_min.hsize) ||
1457 					(sz->vsize > pp->crop_max.hsize) ||
1458 					(pos->w < pp->crop_min.vsize) ||
1459 					(sz->hsize > pp->crop_max.vsize)) {
1460 					DRM_ERROR("out of crop size.\n");
1461 					goto err_property;
1462 				}
1463 			} else {
1464 				if ((pos->w < pp->crop_min.hsize) ||
1465 					(sz->hsize > pp->crop_max.hsize) ||
1466 					(pos->h < pp->crop_min.vsize) ||
1467 					(sz->vsize > pp->crop_max.vsize)) {
1468 					DRM_ERROR("out of crop size.\n");
1469 					goto err_property;
1470 				}
1471 			}
1472 		}
1473 
1474 		/* check for scale */
1475 		if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
1476 			if (swap) {
1477 				if ((pos->h < pp->scale_min.hsize) ||
1478 					(sz->vsize > pp->scale_max.hsize) ||
1479 					(pos->w < pp->scale_min.vsize) ||
1480 					(sz->hsize > pp->scale_max.vsize)) {
1481 					DRM_ERROR("out of scale size.\n");
1482 					goto err_property;
1483 				}
1484 			} else {
1485 				if ((pos->w < pp->scale_min.hsize) ||
1486 					(sz->hsize > pp->scale_max.hsize) ||
1487 					(pos->h < pp->scale_min.vsize) ||
1488 					(sz->vsize > pp->scale_max.vsize)) {
1489 					DRM_ERROR("out of scale size.\n");
1490 					goto err_property;
1491 				}
1492 			}
1493 		}
1494 	}
1495 
1496 	return 0;
1497 
1498 err_property:
1499 	for_each_ipp_ops(i) {
1500 		if ((i == EXYNOS_DRM_OPS_SRC) &&
1501 			(property->cmd == IPP_CMD_WB))
1502 			continue;
1503 
1504 		config = &property->config[i];
1505 		pos = &config->pos;
1506 		sz = &config->sz;
1507 
1508 		DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
1509 			i ? "dst" : "src", config->flip, config->degree,
1510 			pos->x, pos->y, pos->w, pos->h,
1511 			sz->hsize, sz->vsize);
1512 	}
1513 
1514 	return -EINVAL;
1515 }
1516 
1517 
1518 static int gsc_ippdrv_reset(struct device *dev)
1519 {
1520 	struct gsc_context *ctx = get_gsc_context(dev);
1521 	struct gsc_scaler *sc = &ctx->sc;
1522 	int ret;
1523 
1524 	DRM_DEBUG_KMS("%s\n", __func__);
1525 
1526 	/* reset h/w block */
1527 	ret = gsc_sw_reset(ctx);
1528 	if (ret < 0) {
1529 		dev_err(dev, "failed to reset hardware.\n");
1530 		return ret;
1531 	}
1532 
1533 	/* scaler setting */
1534 	memset(&ctx->sc, 0x0, sizeof(ctx->sc));
1535 	sc->range = true;
1536 
1537 	return 0;
1538 }
1539 
1540 static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1541 {
1542 	struct gsc_context *ctx = get_gsc_context(dev);
1543 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1544 	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1545 	struct drm_exynos_ipp_property *property;
1546 	struct drm_exynos_ipp_config *config;
1547 	struct drm_exynos_pos	img_pos[EXYNOS_DRM_OPS_MAX];
1548 	struct drm_exynos_ipp_set_wb set_wb;
1549 	u32 cfg;
1550 	int ret, i;
1551 
1552 	DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
1553 
1554 	if (!c_node) {
1555 		DRM_ERROR("failed to get c_node.\n");
1556 		return -EINVAL;
1557 	}
1558 
1559 	property = &c_node->property;
1560 
1561 	gsc_handle_irq(ctx, true, false, true);
1562 
1563 	for_each_ipp_ops(i) {
1564 		config = &property->config[i];
1565 		img_pos[i] = config->pos;
1566 	}
1567 
1568 	switch (cmd) {
1569 	case IPP_CMD_M2M:
1570 		/* enable one shot */
1571 		cfg = gsc_read(GSC_ENABLE);
1572 		cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
1573 			GSC_ENABLE_CLK_GATE_MODE_MASK);
1574 		cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
1575 		gsc_write(cfg, GSC_ENABLE);
1576 
1577 		/* src dma memory */
1578 		cfg = gsc_read(GSC_IN_CON);
1579 		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1580 		cfg |= GSC_IN_PATH_MEMORY;
1581 		gsc_write(cfg, GSC_IN_CON);
1582 
1583 		/* dst dma memory */
1584 		cfg = gsc_read(GSC_OUT_CON);
1585 		cfg |= GSC_OUT_PATH_MEMORY;
1586 		gsc_write(cfg, GSC_OUT_CON);
1587 		break;
1588 	case IPP_CMD_WB:
1589 		set_wb.enable = 1;
1590 		set_wb.refresh = property->refresh_rate;
1591 		gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
1592 		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1593 
1594 		/* src local path */
1595 		cfg = gsc_read(GSC_IN_CON);
1596 		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1597 		cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
1598 		gsc_write(cfg, GSC_IN_CON);
1599 
1600 		/* dst dma memory */
1601 		cfg = gsc_read(GSC_OUT_CON);
1602 		cfg |= GSC_OUT_PATH_MEMORY;
1603 		gsc_write(cfg, GSC_OUT_CON);
1604 		break;
1605 	case IPP_CMD_OUTPUT:
1606 		/* src dma memory */
1607 		cfg = gsc_read(GSC_IN_CON);
1608 		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1609 		cfg |= GSC_IN_PATH_MEMORY;
1610 		gsc_write(cfg, GSC_IN_CON);
1611 
1612 		/* dst local path */
1613 		cfg = gsc_read(GSC_OUT_CON);
1614 		cfg |= GSC_OUT_PATH_MEMORY;
1615 		gsc_write(cfg, GSC_OUT_CON);
1616 		break;
1617 	default:
1618 		ret = -EINVAL;
1619 		dev_err(dev, "invalid operations.\n");
1620 		return ret;
1621 	}
1622 
1623 	ret = gsc_set_prescaler(ctx, &ctx->sc,
1624 		&img_pos[EXYNOS_DRM_OPS_SRC],
1625 		&img_pos[EXYNOS_DRM_OPS_DST]);
1626 	if (ret) {
1627 		dev_err(dev, "failed to set precalser.\n");
1628 		return ret;
1629 	}
1630 
1631 	gsc_set_scaler(ctx, &ctx->sc);
1632 
1633 	cfg = gsc_read(GSC_ENABLE);
1634 	cfg |= GSC_ENABLE_ON;
1635 	gsc_write(cfg, GSC_ENABLE);
1636 
1637 	return 0;
1638 }
1639 
1640 static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1641 {
1642 	struct gsc_context *ctx = get_gsc_context(dev);
1643 	struct drm_exynos_ipp_set_wb set_wb = {0, 0};
1644 	u32 cfg;
1645 
1646 	DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
1647 
1648 	switch (cmd) {
1649 	case IPP_CMD_M2M:
1650 		/* bypass */
1651 		break;
1652 	case IPP_CMD_WB:
1653 		gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
1654 		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1655 		break;
1656 	case IPP_CMD_OUTPUT:
1657 	default:
1658 		dev_err(dev, "invalid operations.\n");
1659 		break;
1660 	}
1661 
1662 	gsc_handle_irq(ctx, false, false, true);
1663 
1664 	/* reset sequence */
1665 	gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
1666 	gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
1667 	gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
1668 
1669 	cfg = gsc_read(GSC_ENABLE);
1670 	cfg &= ~GSC_ENABLE_ON;
1671 	gsc_write(cfg, GSC_ENABLE);
1672 }
1673 
1674 static int gsc_probe(struct platform_device *pdev)
1675 {
1676 	struct device *dev = &pdev->dev;
1677 	struct gsc_context *ctx;
1678 	struct resource *res;
1679 	struct exynos_drm_ippdrv *ippdrv;
1680 	int ret;
1681 
1682 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1683 	if (!ctx)
1684 		return -ENOMEM;
1685 
1686 	/* clock control */
1687 	ctx->gsc_clk = devm_clk_get(dev, "gscl");
1688 	if (IS_ERR(ctx->gsc_clk)) {
1689 		dev_err(dev, "failed to get gsc clock.\n");
1690 		return PTR_ERR(ctx->gsc_clk);
1691 	}
1692 
1693 	/* resource memory */
1694 	ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1695 	ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
1696 	if (IS_ERR(ctx->regs))
1697 		return PTR_ERR(ctx->regs);
1698 
1699 	/* resource irq */
1700 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1701 	if (!res) {
1702 		dev_err(dev, "failed to request irq resource.\n");
1703 		return -ENOENT;
1704 	}
1705 
1706 	ctx->irq = res->start;
1707 	ret = devm_request_threaded_irq(dev, ctx->irq, NULL, gsc_irq_handler,
1708 		IRQF_ONESHOT, "drm_gsc", ctx);
1709 	if (ret < 0) {
1710 		dev_err(dev, "failed to request irq.\n");
1711 		return ret;
1712 	}
1713 
1714 	/* context initailization */
1715 	ctx->id = pdev->id;
1716 
1717 	ippdrv = &ctx->ippdrv;
1718 	ippdrv->dev = dev;
1719 	ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
1720 	ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
1721 	ippdrv->check_property = gsc_ippdrv_check_property;
1722 	ippdrv->reset = gsc_ippdrv_reset;
1723 	ippdrv->start = gsc_ippdrv_start;
1724 	ippdrv->stop = gsc_ippdrv_stop;
1725 	ret = gsc_init_prop_list(ippdrv);
1726 	if (ret < 0) {
1727 		dev_err(dev, "failed to init property list.\n");
1728 		return ret;
1729 	}
1730 
1731 	DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
1732 		(int)ippdrv);
1733 
1734 	mutex_init(&ctx->lock);
1735 	platform_set_drvdata(pdev, ctx);
1736 
1737 	pm_runtime_set_active(dev);
1738 	pm_runtime_enable(dev);
1739 
1740 	ret = exynos_drm_ippdrv_register(ippdrv);
1741 	if (ret < 0) {
1742 		dev_err(dev, "failed to register drm gsc device.\n");
1743 		goto err_ippdrv_register;
1744 	}
1745 
1746 	dev_info(dev, "drm gsc registered successfully.\n");
1747 
1748 	return 0;
1749 
1750 err_ippdrv_register:
1751 	pm_runtime_disable(dev);
1752 	return ret;
1753 }
1754 
1755 static int gsc_remove(struct platform_device *pdev)
1756 {
1757 	struct device *dev = &pdev->dev;
1758 	struct gsc_context *ctx = get_gsc_context(dev);
1759 	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1760 
1761 	exynos_drm_ippdrv_unregister(ippdrv);
1762 	mutex_destroy(&ctx->lock);
1763 
1764 	pm_runtime_set_suspended(dev);
1765 	pm_runtime_disable(dev);
1766 
1767 	return 0;
1768 }
1769 
1770 #ifdef CONFIG_PM_SLEEP
1771 static int gsc_suspend(struct device *dev)
1772 {
1773 	struct gsc_context *ctx = get_gsc_context(dev);
1774 
1775 	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1776 
1777 	if (pm_runtime_suspended(dev))
1778 		return 0;
1779 
1780 	return gsc_clk_ctrl(ctx, false);
1781 }
1782 
1783 static int gsc_resume(struct device *dev)
1784 {
1785 	struct gsc_context *ctx = get_gsc_context(dev);
1786 
1787 	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1788 
1789 	if (!pm_runtime_suspended(dev))
1790 		return gsc_clk_ctrl(ctx, true);
1791 
1792 	return 0;
1793 }
1794 #endif
1795 
1796 #ifdef CONFIG_PM_RUNTIME
1797 static int gsc_runtime_suspend(struct device *dev)
1798 {
1799 	struct gsc_context *ctx = get_gsc_context(dev);
1800 
1801 	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1802 
1803 	return  gsc_clk_ctrl(ctx, false);
1804 }
1805 
1806 static int gsc_runtime_resume(struct device *dev)
1807 {
1808 	struct gsc_context *ctx = get_gsc_context(dev);
1809 
1810 	DRM_DEBUG_KMS("%s:id[%d]\n", __FILE__, ctx->id);
1811 
1812 	return  gsc_clk_ctrl(ctx, true);
1813 }
1814 #endif
1815 
1816 static const struct dev_pm_ops gsc_pm_ops = {
1817 	SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
1818 	SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
1819 };
1820 
1821 struct platform_driver gsc_driver = {
1822 	.probe		= gsc_probe,
1823 	.remove		= gsc_remove,
1824 	.driver		= {
1825 		.name	= "exynos-drm-gsc",
1826 		.owner	= THIS_MODULE,
1827 		.pm	= &gsc_pm_ops,
1828 	},
1829 };
1830 
1831