1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2 
3 #include <linux/kernel.h>
4 #include <linux/export.h>
5 #include <media/saa7146_vv.h>
6 
7 static void calculate_output_format_register(struct saa7146_dev* saa, u32 palette, u32* clip_format)
8 {
9 	/* clear out the necessary bits */
10 	*clip_format &= 0x0000ffff;
11 	/* set these bits new */
12 	*clip_format |=  (( ((palette&0xf00)>>8) << 30) | ((palette&0x00f) << 24) | (((palette&0x0f0)>>4) << 16));
13 }
14 
15 static void calculate_hps_source_and_sync(struct saa7146_dev *dev, int source, int sync, u32* hps_ctrl)
16 {
17 	*hps_ctrl &= ~(MASK_30 | MASK_31 | MASK_28);
18 	*hps_ctrl |= (source << 30) | (sync << 28);
19 }
20 
21 static void calculate_hxo_and_hyo(struct saa7146_vv *vv, u32* hps_h_scale, u32* hps_ctrl)
22 {
23 	int hyo = 0, hxo = 0;
24 
25 	hyo = vv->standard->v_offset;
26 	hxo = vv->standard->h_offset;
27 
28 	*hps_h_scale	&= ~(MASK_B0 | 0xf00);
29 	*hps_h_scale	|= (hxo <<  0);
30 
31 	*hps_ctrl	&= ~(MASK_W0 | MASK_B2);
32 	*hps_ctrl	|= (hyo << 12);
33 }
34 
35 /* helper functions for the calculation of the horizontal- and vertical
36    scaling registers, clip-format-register etc ...
37    these functions take pointers to the (most-likely read-out
38    original-values) and manipulate them according to the requested
39    changes.
40 */
41 
42 /* hps_coeff used for CXY and CXUV; scale 1/1 -> scale 1/64 */
43 static struct {
44 	u16 hps_coeff;
45 	u16 weight_sum;
46 } hps_h_coeff_tab [] = {
47 	{0x00,   2}, {0x02,   4}, {0x00,   4}, {0x06,   8}, {0x02,   8},
48 	{0x08,   8}, {0x00,   8}, {0x1E,  16}, {0x0E,   8}, {0x26,   8},
49 	{0x06,   8}, {0x42,   8}, {0x02,   8}, {0x80,   8}, {0x00,   8},
50 	{0xFE,  16}, {0xFE,   8}, {0x7E,   8}, {0x7E,   8}, {0x3E,   8},
51 	{0x3E,   8}, {0x1E,   8}, {0x1E,   8}, {0x0E,   8}, {0x0E,   8},
52 	{0x06,   8}, {0x06,   8}, {0x02,   8}, {0x02,   8}, {0x00,   8},
53 	{0x00,   8}, {0xFE,  16}, {0xFE,   8}, {0xFE,   8}, {0xFE,   8},
54 	{0xFE,   8}, {0xFE,   8}, {0xFE,   8}, {0xFE,   8}, {0xFE,   8},
55 	{0xFE,   8}, {0xFE,   8}, {0xFE,   8}, {0xFE,   8}, {0xFE,   8},
56 	{0xFE,   8}, {0xFE,   8}, {0xFE,   8}, {0xFE,   8}, {0x7E,   8},
57 	{0x7E,   8}, {0x3E,   8}, {0x3E,   8}, {0x1E,   8}, {0x1E,   8},
58 	{0x0E,   8}, {0x0E,   8}, {0x06,   8}, {0x06,   8}, {0x02,   8},
59 	{0x02,   8}, {0x00,   8}, {0x00,   8}, {0xFE,  16}
60 };
61 
62 /* table of attenuation values for horizontal scaling */
63 static u8 h_attenuation[] = { 1, 2, 4, 8, 2, 4, 8, 16, 0};
64 
65 /* calculate horizontal scale registers */
66 static int calculate_h_scale_registers(struct saa7146_dev *dev,
67 	int in_x, int out_x, int flip_lr,
68 	u32* hps_ctrl, u32* hps_v_gain, u32* hps_h_prescale, u32* hps_h_scale)
69 {
70 	/* horizontal prescaler */
71 	u32 dcgx = 0, xpsc = 0, xacm = 0, cxy = 0, cxuv = 0;
72 	/* horizontal scaler */
73 	u32 xim = 0, xp = 0, xsci =0;
74 	/* vertical scale & gain */
75 	u32 pfuv = 0;
76 
77 	/* helper variables */
78 	u32 h_atten = 0, i = 0;
79 
80 	if ( 0 == out_x ) {
81 		return -EINVAL;
82 	}
83 
84 	/* mask out vanity-bit */
85 	*hps_ctrl &= ~MASK_29;
86 
87 	/* calculate prescale-(xspc)-value:	[n   .. 1/2) : 1
88 						[1/2 .. 1/3) : 2
89 						[1/3 .. 1/4) : 3
90 						...		*/
91 	if (in_x > out_x) {
92 		xpsc = in_x / out_x;
93 	}
94 	else {
95 		/* zooming */
96 		xpsc = 1;
97 	}
98 
99 	/* if flip_lr-bit is set, number of pixels after
100 	   horizontal prescaling must be < 384 */
101 	if ( 0 != flip_lr ) {
102 
103 		/* set vanity bit */
104 		*hps_ctrl |= MASK_29;
105 
106 		while (in_x / xpsc >= 384 )
107 			xpsc++;
108 	}
109 	/* if zooming is wanted, number of pixels after
110 	   horizontal prescaling must be < 768 */
111 	else {
112 		while ( in_x / xpsc >= 768 )
113 			xpsc++;
114 	}
115 
116 	/* maximum prescale is 64 (p.69) */
117 	if ( xpsc > 64 )
118 		xpsc = 64;
119 
120 	/* keep xacm clear*/
121 	xacm = 0;
122 
123 	/* set horizontal filter parameters (CXY = CXUV) */
124 	cxy = hps_h_coeff_tab[( (xpsc - 1) < 63 ? (xpsc - 1) : 63 )].hps_coeff;
125 	cxuv = cxy;
126 
127 	/* calculate and set horizontal fine scale (xsci) */
128 
129 	/* bypass the horizontal scaler ? */
130 	if ( (in_x == out_x) && ( 1 == xpsc ) )
131 		xsci = 0x400;
132 	else
133 		xsci = ( (1024 * in_x) / (out_x * xpsc) ) + xpsc;
134 
135 	/* set start phase for horizontal fine scale (xp) to 0 */
136 	xp = 0;
137 
138 	/* set xim, if we bypass the horizontal scaler */
139 	if ( 0x400 == xsci )
140 		xim = 1;
141 	else
142 		xim = 0;
143 
144 	/* if the prescaler is bypassed, enable horizontal
145 	   accumulation mode (xacm) and clear dcgx */
146 	if( 1 == xpsc ) {
147 		xacm = 1;
148 		dcgx = 0;
149 	} else {
150 		xacm = 0;
151 		/* get best match in the table of attenuations
152 		   for horizontal scaling */
153 		h_atten = hps_h_coeff_tab[( (xpsc - 1) < 63 ? (xpsc - 1) : 63 )].weight_sum;
154 
155 		for (i = 0; h_attenuation[i] != 0; i++) {
156 			if (h_attenuation[i] >= h_atten)
157 				break;
158 		}
159 
160 		dcgx = i;
161 	}
162 
163 	/* the horizontal scaling increment controls the UV filter
164 	   to reduce the bandwidth to improve the display quality,
165 	   so set it ... */
166 	if ( xsci == 0x400)
167 		pfuv = 0x00;
168 	else if ( xsci < 0x600)
169 		pfuv = 0x01;
170 	else if ( xsci < 0x680)
171 		pfuv = 0x11;
172 	else if ( xsci < 0x700)
173 		pfuv = 0x22;
174 	else
175 		pfuv = 0x33;
176 
177 
178 	*hps_v_gain  &= MASK_W0|MASK_B2;
179 	*hps_v_gain  |= (pfuv << 24);
180 
181 	*hps_h_scale	&= ~(MASK_W1 | 0xf000);
182 	*hps_h_scale	|= (xim << 31) | (xp << 24) | (xsci << 12);
183 
184 	*hps_h_prescale	|= (dcgx << 27) | ((xpsc-1) << 18) | (xacm << 17) | (cxy << 8) | (cxuv << 0);
185 
186 	return 0;
187 }
188 
189 static struct {
190 	u16 hps_coeff;
191 	u16 weight_sum;
192 } hps_v_coeff_tab [] = {
193  {0x0100,   2},  {0x0102,   4},  {0x0300,   4},  {0x0106,   8},  {0x0502,   8},
194  {0x0708,   8},  {0x0F00,   8},  {0x011E,  16},  {0x110E,  16},  {0x1926,  16},
195  {0x3906,  16},  {0x3D42,  16},  {0x7D02,  16},  {0x7F80,  16},  {0xFF00,  16},
196  {0x01FE,  32},  {0x01FE,  32},  {0x817E,  32},  {0x817E,  32},  {0xC13E,  32},
197  {0xC13E,  32},  {0xE11E,  32},  {0xE11E,  32},  {0xF10E,  32},  {0xF10E,  32},
198  {0xF906,  32},  {0xF906,  32},  {0xFD02,  32},  {0xFD02,  32},  {0xFF00,  32},
199  {0xFF00,  32},  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},
200  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},
201  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},
202  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},  {0x817E,  64},
203  {0x817E,  64},  {0xC13E,  64},  {0xC13E,  64},  {0xE11E,  64},  {0xE11E,  64},
204  {0xF10E,  64},  {0xF10E,  64},  {0xF906,  64},  {0xF906,  64},  {0xFD02,  64},
205  {0xFD02,  64},  {0xFF00,  64},  {0xFF00,  64},  {0x01FE, 128}
206 };
207 
208 /* table of attenuation values for vertical scaling */
209 static u16 v_attenuation[] = { 2, 4, 8, 16, 32, 64, 128, 256, 0};
210 
211 /* calculate vertical scale registers */
212 static int calculate_v_scale_registers(struct saa7146_dev *dev, enum v4l2_field field,
213 	int in_y, int out_y, u32* hps_v_scale, u32* hps_v_gain)
214 {
215 	int lpi = 0;
216 
217 	/* vertical scaling */
218 	u32 yacm = 0, ysci = 0, yacl = 0, ypo = 0, ype = 0;
219 	/* vertical scale & gain */
220 	u32 dcgy = 0, cya_cyb = 0;
221 
222 	/* helper variables */
223 	u32 v_atten = 0, i = 0;
224 
225 	/* error, if vertical zooming */
226 	if ( in_y < out_y ) {
227 		return -EINVAL;
228 	}
229 
230 	/* linear phase interpolation may be used
231 	   if scaling is between 1 and 1/2 (both fields used)
232 	   or scaling is between 1/2 and 1/4 (if only one field is used) */
233 
234 	if (V4L2_FIELD_HAS_BOTH(field)) {
235 		if( 2*out_y >= in_y) {
236 			lpi = 1;
237 		}
238 	} else if (field == V4L2_FIELD_TOP
239 		|| field == V4L2_FIELD_ALTERNATE
240 		|| field == V4L2_FIELD_BOTTOM) {
241 		if( 4*out_y >= in_y ) {
242 			lpi = 1;
243 		}
244 		out_y *= 2;
245 	}
246 	if( 0 != lpi ) {
247 
248 		yacm = 0;
249 		yacl = 0;
250 		cya_cyb = 0x00ff;
251 
252 		/* calculate scaling increment */
253 		if ( in_y > out_y )
254 			ysci = ((1024 * in_y) / (out_y + 1)) - 1024;
255 		else
256 			ysci = 0;
257 
258 		dcgy = 0;
259 
260 		/* calculate ype and ypo */
261 		ype = ysci / 16;
262 		ypo = ype + (ysci / 64);
263 
264 	} else {
265 		yacm = 1;
266 
267 		/* calculate scaling increment */
268 		ysci = (((10 * 1024 * (in_y - out_y - 1)) / in_y) + 9) / 10;
269 
270 		/* calculate ype and ypo */
271 		ypo = ype = ((ysci + 15) / 16);
272 
273 		/* the sequence length interval (yacl) has to be set according
274 		   to the prescale value, e.g.	[n   .. 1/2) : 0
275 						[1/2 .. 1/3) : 1
276 						[1/3 .. 1/4) : 2
277 						... */
278 		if ( ysci < 512) {
279 			yacl = 0;
280 		} else {
281 			yacl = ( ysci / (1024 - ysci) );
282 		}
283 
284 		/* get filter coefficients for cya, cyb from table hps_v_coeff_tab */
285 		cya_cyb = hps_v_coeff_tab[ (yacl < 63 ? yacl : 63 ) ].hps_coeff;
286 
287 		/* get best match in the table of attenuations for vertical scaling */
288 		v_atten = hps_v_coeff_tab[ (yacl < 63 ? yacl : 63 ) ].weight_sum;
289 
290 		for (i = 0; v_attenuation[i] != 0; i++) {
291 			if (v_attenuation[i] >= v_atten)
292 				break;
293 		}
294 
295 		dcgy = i;
296 	}
297 
298 	/* ypo and ype swapped in spec ? */
299 	*hps_v_scale	|= (yacm << 31) | (ysci << 21) | (yacl << 15) | (ypo << 8 ) | (ype << 1);
300 
301 	*hps_v_gain	&= ~(MASK_W0|MASK_B2);
302 	*hps_v_gain	|= (dcgy << 16) | (cya_cyb << 0);
303 
304 	return 0;
305 }
306 
307 /* simple bubble-sort algorithm with duplicate elimination */
308 static int sort_and_eliminate(u32* values, int* count)
309 {
310 	int low = 0, high = 0, top = 0;
311 	int cur = 0, next = 0;
312 
313 	/* sanity checks */
314 	if( (0 > *count) || (NULL == values) ) {
315 		return -EINVAL;
316 	}
317 
318 	/* bubble sort the first @count items of the array @values */
319 	for( top = *count; top > 0; top--) {
320 		for( low = 0, high = 1; high < top; low++, high++) {
321 			if( values[low] > values[high] )
322 				swap(values[low], values[high]);
323 		}
324 	}
325 
326 	/* remove duplicate items */
327 	for( cur = 0, next = 1; next < *count; next++) {
328 		if( values[cur] != values[next])
329 			values[++cur] = values[next];
330 	}
331 
332 	*count = cur + 1;
333 
334 	return 0;
335 }
336 
337 static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct saa7146_fh *fh,
338 	struct saa7146_video_dma *vdma2, u32* clip_format, u32* arbtr_ctrl, enum v4l2_field field)
339 {
340 	struct saa7146_vv *vv = dev->vv_data;
341 	__le32 *clipping = vv->d_clipping.cpu_addr;
342 
343 	int width = vv->ov.win.w.width;
344 	int height =  vv->ov.win.w.height;
345 	int clipcount = vv->ov.nclips;
346 
347 	u32 line_list[32];
348 	u32 pixel_list[32];
349 	int numdwords = 0;
350 
351 	int i = 0, j = 0;
352 	int cnt_line = 0, cnt_pixel = 0;
353 
354 	int x[32], y[32], w[32], h[32];
355 
356 	/* clear out memory */
357 	memset(&line_list[0],  0x00, sizeof(u32)*32);
358 	memset(&pixel_list[0], 0x00, sizeof(u32)*32);
359 	memset(clipping,  0x00, SAA7146_CLIPPING_MEM);
360 
361 	/* fill the line and pixel-lists */
362 	for(i = 0; i < clipcount; i++) {
363 		int l = 0, r = 0, t = 0, b = 0;
364 
365 		x[i] = vv->ov.clips[i].c.left;
366 		y[i] = vv->ov.clips[i].c.top;
367 		w[i] = vv->ov.clips[i].c.width;
368 		h[i] = vv->ov.clips[i].c.height;
369 
370 		if( w[i] < 0) {
371 			x[i] += w[i]; w[i] = -w[i];
372 		}
373 		if( h[i] < 0) {
374 			y[i] += h[i]; h[i] = -h[i];
375 		}
376 		if( x[i] < 0) {
377 			w[i] += x[i]; x[i] = 0;
378 		}
379 		if( y[i] < 0) {
380 			h[i] += y[i]; y[i] = 0;
381 		}
382 		if( 0 != vv->vflip ) {
383 			y[i] = height - y[i] - h[i];
384 		}
385 
386 		l = x[i];
387 		r = x[i]+w[i];
388 		t = y[i];
389 		b = y[i]+h[i];
390 
391 		/* insert left/right coordinates */
392 		pixel_list[ 2*i   ] = min_t(int, l, width);
393 		pixel_list[(2*i)+1] = min_t(int, r, width);
394 		/* insert top/bottom coordinates */
395 		line_list[ 2*i   ] = min_t(int, t, height);
396 		line_list[(2*i)+1] = min_t(int, b, height);
397 	}
398 
399 	/* sort and eliminate lists */
400 	cnt_line = cnt_pixel = 2*clipcount;
401 	sort_and_eliminate( &pixel_list[0], &cnt_pixel );
402 	sort_and_eliminate( &line_list[0], &cnt_line );
403 
404 	/* calculate the number of used u32s */
405 	numdwords = max_t(int, (cnt_line+1), (cnt_pixel+1))*2;
406 	numdwords = max_t(int, 4, numdwords);
407 	numdwords = min_t(int, 64, numdwords);
408 
409 	/* fill up cliptable */
410 	for(i = 0; i < cnt_pixel; i++) {
411 		clipping[2*i] |= cpu_to_le32(pixel_list[i] << 16);
412 	}
413 	for(i = 0; i < cnt_line; i++) {
414 		clipping[(2*i)+1] |= cpu_to_le32(line_list[i] << 16);
415 	}
416 
417 	/* fill up cliptable with the display infos */
418 	for(j = 0; j < clipcount; j++) {
419 
420 		for(i = 0; i < cnt_pixel; i++) {
421 
422 			if( x[j] < 0)
423 				x[j] = 0;
424 
425 			if( pixel_list[i] < (x[j] + w[j])) {
426 
427 				if ( pixel_list[i] >= x[j] ) {
428 					clipping[2*i] |= cpu_to_le32(1 << j);
429 				}
430 			}
431 		}
432 		for(i = 0; i < cnt_line; i++) {
433 
434 			if( y[j] < 0)
435 				y[j] = 0;
436 
437 			if( line_list[i] < (y[j] + h[j]) ) {
438 
439 				if( line_list[i] >= y[j] ) {
440 					clipping[(2*i)+1] |= cpu_to_le32(1 << j);
441 				}
442 			}
443 		}
444 	}
445 
446 	/* adjust arbitration control register */
447 	*arbtr_ctrl &= 0xffff00ff;
448 	*arbtr_ctrl |= 0x00001c00;
449 
450 	vdma2->base_even	= vv->d_clipping.dma_handle;
451 	vdma2->base_odd		= vv->d_clipping.dma_handle;
452 	vdma2->prot_addr	= vv->d_clipping.dma_handle+((sizeof(u32))*(numdwords));
453 	vdma2->base_page	= 0x04;
454 	vdma2->pitch		= 0x00;
455 	vdma2->num_line_byte	= (0 << 16 | (sizeof(u32))*(numdwords-1) );
456 
457 	/* set clipping-mode. this depends on the field(s) used */
458 	*clip_format &= 0xfffffff7;
459 	if (V4L2_FIELD_HAS_BOTH(field)) {
460 		*clip_format |= 0x00000008;
461 	} else {
462 		*clip_format |= 0x00000000;
463 	}
464 }
465 
466 /* disable clipping */
467 static void saa7146_disable_clipping(struct saa7146_dev *dev)
468 {
469 	u32 clip_format	= saa7146_read(dev, CLIP_FORMAT_CTRL);
470 
471 	/* mask out relevant bits (=lower word)*/
472 	clip_format &= MASK_W1;
473 
474 	/* upload clipping-registers*/
475 	saa7146_write(dev, CLIP_FORMAT_CTRL,clip_format);
476 	saa7146_write(dev, MC2, (MASK_05 | MASK_21));
477 
478 	/* disable video dma2 */
479 	saa7146_write(dev, MC1, MASK_21);
480 }
481 
482 static void saa7146_set_clipping_rect(struct saa7146_fh *fh)
483 {
484 	struct saa7146_dev *dev = fh->dev;
485 	struct saa7146_vv *vv = dev->vv_data;
486 	enum v4l2_field field = vv->ov.win.field;
487 	struct	saa7146_video_dma vdma2;
488 	u32 clip_format;
489 	u32 arbtr_ctrl;
490 
491 	/* check clipcount, disable clipping if clipcount == 0*/
492 	if (vv->ov.nclips == 0) {
493 		saa7146_disable_clipping(dev);
494 		return;
495 	}
496 
497 	clip_format = saa7146_read(dev, CLIP_FORMAT_CTRL);
498 	arbtr_ctrl = saa7146_read(dev, PCI_BT_V1);
499 
500 	calculate_clipping_registers_rect(dev, fh, &vdma2, &clip_format, &arbtr_ctrl, field);
501 
502 	/* set clipping format */
503 	clip_format &= 0xffff0008;
504 	clip_format |= (SAA7146_CLIPPING_RECT << 4);
505 
506 	/* prepare video dma2 */
507 	saa7146_write(dev, BASE_EVEN2,		vdma2.base_even);
508 	saa7146_write(dev, BASE_ODD2,		vdma2.base_odd);
509 	saa7146_write(dev, PROT_ADDR2,		vdma2.prot_addr);
510 	saa7146_write(dev, BASE_PAGE2,		vdma2.base_page);
511 	saa7146_write(dev, PITCH2,		vdma2.pitch);
512 	saa7146_write(dev, NUM_LINE_BYTE2,	vdma2.num_line_byte);
513 
514 	/* prepare the rest */
515 	saa7146_write(dev, CLIP_FORMAT_CTRL,clip_format);
516 	saa7146_write(dev, PCI_BT_V1, arbtr_ctrl);
517 
518 	/* upload clip_control-register, clipping-registers, enable video dma2 */
519 	saa7146_write(dev, MC2, (MASK_05 | MASK_21 | MASK_03 | MASK_19));
520 	saa7146_write(dev, MC1, (MASK_05 | MASK_21));
521 }
522 
523 static void saa7146_set_window(struct saa7146_dev *dev, int width, int height, enum v4l2_field field)
524 {
525 	struct saa7146_vv *vv = dev->vv_data;
526 
527 	int source = vv->current_hps_source;
528 	int sync = vv->current_hps_sync;
529 
530 	u32 hps_v_scale = 0, hps_v_gain  = 0, hps_ctrl = 0, hps_h_prescale = 0, hps_h_scale = 0;
531 
532 	/* set vertical scale */
533 	hps_v_scale = 0; /* all bits get set by the function-call */
534 	hps_v_gain  = 0; /* fixme: saa7146_read(dev, HPS_V_GAIN);*/
535 	calculate_v_scale_registers(dev, field, vv->standard->v_field*2, height, &hps_v_scale, &hps_v_gain);
536 
537 	/* set horizontal scale */
538 	hps_ctrl	= 0;
539 	hps_h_prescale	= 0; /* all bits get set in the function */
540 	hps_h_scale	= 0;
541 	calculate_h_scale_registers(dev, vv->standard->h_pixels, width, vv->hflip, &hps_ctrl, &hps_v_gain, &hps_h_prescale, &hps_h_scale);
542 
543 	/* set hyo and hxo */
544 	calculate_hxo_and_hyo(vv, &hps_h_scale, &hps_ctrl);
545 	calculate_hps_source_and_sync(dev, source, sync, &hps_ctrl);
546 
547 	/* write out new register contents */
548 	saa7146_write(dev, HPS_V_SCALE,	hps_v_scale);
549 	saa7146_write(dev, HPS_V_GAIN,	hps_v_gain);
550 	saa7146_write(dev, HPS_CTRL,	hps_ctrl);
551 	saa7146_write(dev, HPS_H_PRESCALE,hps_h_prescale);
552 	saa7146_write(dev, HPS_H_SCALE,	hps_h_scale);
553 
554 	/* upload shadow-ram registers */
555 	saa7146_write(dev, MC2, (MASK_05 | MASK_06 | MASK_21 | MASK_22) );
556 }
557 
558 /* calculate the new memory offsets for a desired position */
559 static void saa7146_set_position(struct saa7146_dev *dev, int w_x, int w_y, int w_height, enum v4l2_field field, u32 pixelformat)
560 {
561 	struct saa7146_vv *vv = dev->vv_data;
562 	struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev, pixelformat);
563 
564 	int b_depth = vv->ov_fmt->depth;
565 	int b_bpl = vv->ov_fb.fmt.bytesperline;
566 	/* The unsigned long cast is to remove a 64-bit compile warning since
567 	   it looks like a 64-bit address is cast to a 32-bit value, even
568 	   though the base pointer is really a 32-bit physical address that
569 	   goes into a 32-bit DMA register.
570 	   FIXME: might not work on some 64-bit platforms, but see the FIXME
571 	   in struct v4l2_framebuffer (videodev2.h) for that.
572 	 */
573 	u32 base = (u32)(unsigned long)vv->ov_fb.base;
574 
575 	struct	saa7146_video_dma vdma1;
576 
577 	/* calculate memory offsets for picture, look if we shall top-down-flip */
578 	vdma1.pitch	= 2*b_bpl;
579 	if ( 0 == vv->vflip ) {
580 		vdma1.base_even = base + (w_y * (vdma1.pitch/2)) + (w_x * (b_depth / 8));
581 		vdma1.base_odd  = vdma1.base_even + (vdma1.pitch / 2);
582 		vdma1.prot_addr = vdma1.base_even + (w_height * (vdma1.pitch / 2));
583 	}
584 	else {
585 		vdma1.base_even = base + ((w_y+w_height) * (vdma1.pitch/2)) + (w_x * (b_depth / 8));
586 		vdma1.base_odd  = vdma1.base_even - (vdma1.pitch / 2);
587 		vdma1.prot_addr = vdma1.base_odd - (w_height * (vdma1.pitch / 2));
588 	}
589 
590 	if (V4L2_FIELD_HAS_BOTH(field)) {
591 	} else if (field == V4L2_FIELD_ALTERNATE) {
592 		/* fixme */
593 		vdma1.base_odd = vdma1.prot_addr;
594 		vdma1.pitch /= 2;
595 	} else if (field == V4L2_FIELD_TOP) {
596 		vdma1.base_odd = vdma1.prot_addr;
597 		vdma1.pitch /= 2;
598 	} else if (field == V4L2_FIELD_BOTTOM) {
599 		vdma1.base_odd = vdma1.base_even;
600 		vdma1.base_even = vdma1.prot_addr;
601 		vdma1.pitch /= 2;
602 	}
603 
604 	if ( 0 != vv->vflip ) {
605 		vdma1.pitch *= -1;
606 	}
607 
608 	vdma1.base_page = sfmt->swap;
609 	vdma1.num_line_byte = (vv->standard->v_field<<16)+vv->standard->h_pixels;
610 
611 	saa7146_write_out_dma(dev, 1, &vdma1);
612 }
613 
614 static void saa7146_set_output_format(struct saa7146_dev *dev, unsigned long palette)
615 {
616 	u32 clip_format = saa7146_read(dev, CLIP_FORMAT_CTRL);
617 
618 	/* call helper function */
619 	calculate_output_format_register(dev,palette,&clip_format);
620 
621 	/* update the hps registers */
622 	saa7146_write(dev, CLIP_FORMAT_CTRL, clip_format);
623 	saa7146_write(dev, MC2, (MASK_05 | MASK_21));
624 }
625 
626 /* select input-source */
627 void saa7146_set_hps_source_and_sync(struct saa7146_dev *dev, int source, int sync)
628 {
629 	struct saa7146_vv *vv = dev->vv_data;
630 	u32 hps_ctrl = 0;
631 
632 	/* read old state */
633 	hps_ctrl = saa7146_read(dev, HPS_CTRL);
634 
635 	hps_ctrl &= ~( MASK_31 | MASK_30 | MASK_28 );
636 	hps_ctrl |= (source << 30) | (sync << 28);
637 
638 	/* write back & upload register */
639 	saa7146_write(dev, HPS_CTRL, hps_ctrl);
640 	saa7146_write(dev, MC2, (MASK_05 | MASK_21));
641 
642 	vv->current_hps_source = source;
643 	vv->current_hps_sync = sync;
644 }
645 EXPORT_SYMBOL_GPL(saa7146_set_hps_source_and_sync);
646 
647 int saa7146_enable_overlay(struct saa7146_fh *fh)
648 {
649 	struct saa7146_dev *dev = fh->dev;
650 	struct saa7146_vv *vv = dev->vv_data;
651 
652 	saa7146_set_window(dev, vv->ov.win.w.width, vv->ov.win.w.height, vv->ov.win.field);
653 	saa7146_set_position(dev, vv->ov.win.w.left, vv->ov.win.w.top, vv->ov.win.w.height, vv->ov.win.field, vv->ov_fmt->pixelformat);
654 	saa7146_set_output_format(dev, vv->ov_fmt->trans);
655 	saa7146_set_clipping_rect(fh);
656 
657 	/* enable video dma1 */
658 	saa7146_write(dev, MC1, (MASK_06 | MASK_22));
659 	return 0;
660 }
661 
662 void saa7146_disable_overlay(struct saa7146_fh *fh)
663 {
664 	struct saa7146_dev *dev = fh->dev;
665 
666 	/* disable clipping + video dma1 */
667 	saa7146_disable_clipping(dev);
668 	saa7146_write(dev, MC1, MASK_22);
669 }
670 
671 void saa7146_write_out_dma(struct saa7146_dev* dev, int which, struct saa7146_video_dma* vdma)
672 {
673 	int where = 0;
674 
675 	if( which < 1 || which > 3) {
676 		return;
677 	}
678 
679 	/* calculate starting address */
680 	where  = (which-1)*0x18;
681 
682 	saa7146_write(dev, where,	vdma->base_odd);
683 	saa7146_write(dev, where+0x04,	vdma->base_even);
684 	saa7146_write(dev, where+0x08,	vdma->prot_addr);
685 	saa7146_write(dev, where+0x0c,	vdma->pitch);
686 	saa7146_write(dev, where+0x10,	vdma->base_page);
687 	saa7146_write(dev, where+0x14,	vdma->num_line_byte);
688 
689 	/* upload */
690 	saa7146_write(dev, MC2, (MASK_02<<(which-1))|(MASK_18<<(which-1)));
691 /*
692 	printk("vdma%d.base_even:     0x%08x\n", which,vdma->base_even);
693 	printk("vdma%d.base_odd:      0x%08x\n", which,vdma->base_odd);
694 	printk("vdma%d.prot_addr:     0x%08x\n", which,vdma->prot_addr);
695 	printk("vdma%d.base_page:     0x%08x\n", which,vdma->base_page);
696 	printk("vdma%d.pitch:         0x%08x\n", which,vdma->pitch);
697 	printk("vdma%d.num_line_byte: 0x%08x\n", which,vdma->num_line_byte);
698 */
699 }
700 
701 static int calculate_video_dma_grab_packed(struct saa7146_dev* dev, struct saa7146_buf *buf)
702 {
703 	struct saa7146_vv *vv = dev->vv_data;
704 	struct saa7146_video_dma vdma1;
705 
706 	struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
707 
708 	int width = buf->fmt->width;
709 	int height = buf->fmt->height;
710 	int bytesperline = buf->fmt->bytesperline;
711 	enum v4l2_field field = buf->fmt->field;
712 
713 	int depth = sfmt->depth;
714 
715 	DEB_CAP("[size=%dx%d,fields=%s]\n",
716 		width, height, v4l2_field_names[field]);
717 
718 	if( bytesperline != 0) {
719 		vdma1.pitch = bytesperline*2;
720 	} else {
721 		vdma1.pitch = (width*depth*2)/8;
722 	}
723 	vdma1.num_line_byte	= ((vv->standard->v_field<<16) + vv->standard->h_pixels);
724 	vdma1.base_page		= buf->pt[0].dma | ME1 | sfmt->swap;
725 
726 	if( 0 != vv->vflip ) {
727 		vdma1.prot_addr	= buf->pt[0].offset;
728 		vdma1.base_even	= buf->pt[0].offset+(vdma1.pitch/2)*height;
729 		vdma1.base_odd	= vdma1.base_even - (vdma1.pitch/2);
730 	} else {
731 		vdma1.base_even	= buf->pt[0].offset;
732 		vdma1.base_odd	= vdma1.base_even + (vdma1.pitch/2);
733 		vdma1.prot_addr	= buf->pt[0].offset+(vdma1.pitch/2)*height;
734 	}
735 
736 	if (V4L2_FIELD_HAS_BOTH(field)) {
737 	} else if (field == V4L2_FIELD_ALTERNATE) {
738 		/* fixme */
739 		if ( vv->last_field == V4L2_FIELD_TOP ) {
740 			vdma1.base_odd	= vdma1.prot_addr;
741 			vdma1.pitch /= 2;
742 		} else if ( vv->last_field == V4L2_FIELD_BOTTOM ) {
743 			vdma1.base_odd	= vdma1.base_even;
744 			vdma1.base_even = vdma1.prot_addr;
745 			vdma1.pitch /= 2;
746 		}
747 	} else if (field == V4L2_FIELD_TOP) {
748 		vdma1.base_odd	= vdma1.prot_addr;
749 		vdma1.pitch /= 2;
750 	} else if (field == V4L2_FIELD_BOTTOM) {
751 		vdma1.base_odd	= vdma1.base_even;
752 		vdma1.base_even = vdma1.prot_addr;
753 		vdma1.pitch /= 2;
754 	}
755 
756 	if( 0 != vv->vflip ) {
757 		vdma1.pitch *= -1;
758 	}
759 
760 	saa7146_write_out_dma(dev, 1, &vdma1);
761 	return 0;
762 }
763 
764 static int calc_planar_422(struct saa7146_vv *vv, struct saa7146_buf *buf, struct saa7146_video_dma *vdma2, struct saa7146_video_dma *vdma3)
765 {
766 	int height = buf->fmt->height;
767 	int width = buf->fmt->width;
768 
769 	vdma2->pitch	= width;
770 	vdma3->pitch	= width;
771 
772 	/* fixme: look at bytesperline! */
773 
774 	if( 0 != vv->vflip ) {
775 		vdma2->prot_addr	= buf->pt[1].offset;
776 		vdma2->base_even	= ((vdma2->pitch/2)*height)+buf->pt[1].offset;
777 		vdma2->base_odd		= vdma2->base_even - (vdma2->pitch/2);
778 
779 		vdma3->prot_addr	= buf->pt[2].offset;
780 		vdma3->base_even	= ((vdma3->pitch/2)*height)+buf->pt[2].offset;
781 		vdma3->base_odd		= vdma3->base_even - (vdma3->pitch/2);
782 	} else {
783 		vdma3->base_even	= buf->pt[2].offset;
784 		vdma3->base_odd		= vdma3->base_even + (vdma3->pitch/2);
785 		vdma3->prot_addr	= (vdma3->pitch/2)*height+buf->pt[2].offset;
786 
787 		vdma2->base_even	= buf->pt[1].offset;
788 		vdma2->base_odd		= vdma2->base_even + (vdma2->pitch/2);
789 		vdma2->prot_addr	= (vdma2->pitch/2)*height+buf->pt[1].offset;
790 	}
791 
792 	return 0;
793 }
794 
795 static int calc_planar_420(struct saa7146_vv *vv, struct saa7146_buf *buf, struct saa7146_video_dma *vdma2, struct saa7146_video_dma *vdma3)
796 {
797 	int height = buf->fmt->height;
798 	int width = buf->fmt->width;
799 
800 	vdma2->pitch	= width/2;
801 	vdma3->pitch	= width/2;
802 
803 	if( 0 != vv->vflip ) {
804 		vdma2->prot_addr	= buf->pt[2].offset;
805 		vdma2->base_even	= ((vdma2->pitch/2)*height)+buf->pt[2].offset;
806 		vdma2->base_odd		= vdma2->base_even - (vdma2->pitch/2);
807 
808 		vdma3->prot_addr	= buf->pt[1].offset;
809 		vdma3->base_even	= ((vdma3->pitch/2)*height)+buf->pt[1].offset;
810 		vdma3->base_odd		= vdma3->base_even - (vdma3->pitch/2);
811 
812 	} else {
813 		vdma3->base_even	= buf->pt[2].offset;
814 		vdma3->base_odd		= vdma3->base_even + (vdma3->pitch);
815 		vdma3->prot_addr	= (vdma3->pitch/2)*height+buf->pt[2].offset;
816 
817 		vdma2->base_even	= buf->pt[1].offset;
818 		vdma2->base_odd		= vdma2->base_even + (vdma2->pitch);
819 		vdma2->prot_addr	= (vdma2->pitch/2)*height+buf->pt[1].offset;
820 	}
821 	return 0;
822 }
823 
824 static int calculate_video_dma_grab_planar(struct saa7146_dev* dev, struct saa7146_buf *buf)
825 {
826 	struct saa7146_vv *vv = dev->vv_data;
827 	struct saa7146_video_dma vdma1;
828 	struct saa7146_video_dma vdma2;
829 	struct saa7146_video_dma vdma3;
830 
831 	struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
832 
833 	int width = buf->fmt->width;
834 	int height = buf->fmt->height;
835 	enum v4l2_field field = buf->fmt->field;
836 
837 	BUG_ON(0 == buf->pt[0].dma);
838 	BUG_ON(0 == buf->pt[1].dma);
839 	BUG_ON(0 == buf->pt[2].dma);
840 
841 	DEB_CAP("[size=%dx%d,fields=%s]\n",
842 		width, height, v4l2_field_names[field]);
843 
844 	/* fixme: look at bytesperline! */
845 
846 	/* fixme: what happens for user space buffers here?. The offsets are
847 	   most likely wrong, this version here only works for page-aligned
848 	   buffers, modifications to the pagetable-functions are necessary...*/
849 
850 	vdma1.pitch		= width*2;
851 	vdma1.num_line_byte	= ((vv->standard->v_field<<16) + vv->standard->h_pixels);
852 	vdma1.base_page		= buf->pt[0].dma | ME1;
853 
854 	if( 0 != vv->vflip ) {
855 		vdma1.prot_addr	= buf->pt[0].offset;
856 		vdma1.base_even	= ((vdma1.pitch/2)*height)+buf->pt[0].offset;
857 		vdma1.base_odd	= vdma1.base_even - (vdma1.pitch/2);
858 	} else {
859 		vdma1.base_even	= buf->pt[0].offset;
860 		vdma1.base_odd	= vdma1.base_even + (vdma1.pitch/2);
861 		vdma1.prot_addr	= (vdma1.pitch/2)*height+buf->pt[0].offset;
862 	}
863 
864 	vdma2.num_line_byte	= 0; /* unused */
865 	vdma2.base_page		= buf->pt[1].dma | ME1;
866 
867 	vdma3.num_line_byte	= 0; /* unused */
868 	vdma3.base_page		= buf->pt[2].dma | ME1;
869 
870 	switch( sfmt->depth ) {
871 		case 12: {
872 			calc_planar_420(vv,buf,&vdma2,&vdma3);
873 			break;
874 		}
875 		case 16: {
876 			calc_planar_422(vv,buf,&vdma2,&vdma3);
877 			break;
878 		}
879 		default: {
880 			return -1;
881 		}
882 	}
883 
884 	if (V4L2_FIELD_HAS_BOTH(field)) {
885 	} else if (field == V4L2_FIELD_ALTERNATE) {
886 		/* fixme */
887 		vdma1.base_odd	= vdma1.prot_addr;
888 		vdma1.pitch /= 2;
889 		vdma2.base_odd	= vdma2.prot_addr;
890 		vdma2.pitch /= 2;
891 		vdma3.base_odd	= vdma3.prot_addr;
892 		vdma3.pitch /= 2;
893 	} else if (field == V4L2_FIELD_TOP) {
894 		vdma1.base_odd	= vdma1.prot_addr;
895 		vdma1.pitch /= 2;
896 		vdma2.base_odd	= vdma2.prot_addr;
897 		vdma2.pitch /= 2;
898 		vdma3.base_odd	= vdma3.prot_addr;
899 		vdma3.pitch /= 2;
900 	} else if (field == V4L2_FIELD_BOTTOM) {
901 		vdma1.base_odd	= vdma1.base_even;
902 		vdma1.base_even = vdma1.prot_addr;
903 		vdma1.pitch /= 2;
904 		vdma2.base_odd	= vdma2.base_even;
905 		vdma2.base_even = vdma2.prot_addr;
906 		vdma2.pitch /= 2;
907 		vdma3.base_odd	= vdma3.base_even;
908 		vdma3.base_even = vdma3.prot_addr;
909 		vdma3.pitch /= 2;
910 	}
911 
912 	if( 0 != vv->vflip ) {
913 		vdma1.pitch *= -1;
914 		vdma2.pitch *= -1;
915 		vdma3.pitch *= -1;
916 	}
917 
918 	saa7146_write_out_dma(dev, 1, &vdma1);
919 	if( (sfmt->flags & FORMAT_BYTE_SWAP) != 0 ) {
920 		saa7146_write_out_dma(dev, 3, &vdma2);
921 		saa7146_write_out_dma(dev, 2, &vdma3);
922 	} else {
923 		saa7146_write_out_dma(dev, 2, &vdma2);
924 		saa7146_write_out_dma(dev, 3, &vdma3);
925 	}
926 	return 0;
927 }
928 
929 static void program_capture_engine(struct saa7146_dev *dev, int planar)
930 {
931 	struct saa7146_vv *vv = dev->vv_data;
932 	int count = 0;
933 
934 	unsigned long e_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_E_FID_A : CMD_E_FID_B;
935 	unsigned long o_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_O_FID_A : CMD_O_FID_B;
936 
937 	/* wait for o_fid_a/b / e_fid_a/b toggle only if rps register 0 is not set*/
938 	WRITE_RPS0(CMD_PAUSE | CMD_OAN | CMD_SIG0 | o_wait);
939 	WRITE_RPS0(CMD_PAUSE | CMD_OAN | CMD_SIG0 | e_wait);
940 
941 	/* set rps register 0 */
942 	WRITE_RPS0(CMD_WR_REG | (1 << 8) | (MC2/4));
943 	WRITE_RPS0(MASK_27 | MASK_11);
944 
945 	/* turn on video-dma1 */
946 	WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
947 	WRITE_RPS0(MASK_06 | MASK_22);			/* => mask */
948 	WRITE_RPS0(MASK_06 | MASK_22);			/* => values */
949 	if( 0 != planar ) {
950 		/* turn on video-dma2 */
951 		WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
952 		WRITE_RPS0(MASK_05 | MASK_21);			/* => mask */
953 		WRITE_RPS0(MASK_05 | MASK_21);			/* => values */
954 
955 		/* turn on video-dma3 */
956 		WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
957 		WRITE_RPS0(MASK_04 | MASK_20);			/* => mask */
958 		WRITE_RPS0(MASK_04 | MASK_20);			/* => values */
959 	}
960 
961 	/* wait for o_fid_a/b / e_fid_a/b toggle */
962 	if ( vv->last_field == V4L2_FIELD_INTERLACED ) {
963 		WRITE_RPS0(CMD_PAUSE | o_wait);
964 		WRITE_RPS0(CMD_PAUSE | e_wait);
965 	} else if ( vv->last_field == V4L2_FIELD_TOP ) {
966 		WRITE_RPS0(CMD_PAUSE | (vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? MASK_10 : MASK_09));
967 		WRITE_RPS0(CMD_PAUSE | o_wait);
968 	} else if ( vv->last_field == V4L2_FIELD_BOTTOM ) {
969 		WRITE_RPS0(CMD_PAUSE | (vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? MASK_10 : MASK_09));
970 		WRITE_RPS0(CMD_PAUSE | e_wait);
971 	}
972 
973 	/* turn off video-dma1 */
974 	WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
975 	WRITE_RPS0(MASK_22 | MASK_06);			/* => mask */
976 	WRITE_RPS0(MASK_22);				/* => values */
977 	if( 0 != planar ) {
978 		/* turn off video-dma2 */
979 		WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
980 		WRITE_RPS0(MASK_05 | MASK_21);			/* => mask */
981 		WRITE_RPS0(MASK_21);				/* => values */
982 
983 		/* turn off video-dma3 */
984 		WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
985 		WRITE_RPS0(MASK_04 | MASK_20);			/* => mask */
986 		WRITE_RPS0(MASK_20);				/* => values */
987 	}
988 
989 	/* generate interrupt */
990 	WRITE_RPS0(CMD_INTERRUPT);
991 
992 	/* stop */
993 	WRITE_RPS0(CMD_STOP);
994 }
995 
996 void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next)
997 {
998 	struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
999 	struct saa7146_vv *vv = dev->vv_data;
1000 	u32 vdma1_prot_addr;
1001 
1002 	DEB_CAP("buf:%p, next:%p\n", buf, next);
1003 
1004 	vdma1_prot_addr = saa7146_read(dev, PROT_ADDR1);
1005 	if( 0 == vdma1_prot_addr ) {
1006 		/* clear out beginning of streaming bit (rps register 0)*/
1007 		DEB_CAP("forcing sync to new frame\n");
1008 		saa7146_write(dev, MC2, MASK_27 );
1009 	}
1010 
1011 	saa7146_set_window(dev, buf->fmt->width, buf->fmt->height, buf->fmt->field);
1012 	saa7146_set_output_format(dev, sfmt->trans);
1013 	saa7146_disable_clipping(dev);
1014 
1015 	if ( vv->last_field == V4L2_FIELD_INTERLACED ) {
1016 	} else if ( vv->last_field == V4L2_FIELD_TOP ) {
1017 		vv->last_field = V4L2_FIELD_BOTTOM;
1018 	} else if ( vv->last_field == V4L2_FIELD_BOTTOM ) {
1019 		vv->last_field = V4L2_FIELD_TOP;
1020 	}
1021 
1022 	if( 0 != IS_PLANAR(sfmt->trans)) {
1023 		calculate_video_dma_grab_planar(dev, buf);
1024 		program_capture_engine(dev,1);
1025 	} else {
1026 		calculate_video_dma_grab_packed(dev, buf);
1027 		program_capture_engine(dev,0);
1028 	}
1029 
1030 /*
1031 	printk("vdma%d.base_even:     0x%08x\n", 1,saa7146_read(dev,BASE_EVEN1));
1032 	printk("vdma%d.base_odd:      0x%08x\n", 1,saa7146_read(dev,BASE_ODD1));
1033 	printk("vdma%d.prot_addr:     0x%08x\n", 1,saa7146_read(dev,PROT_ADDR1));
1034 	printk("vdma%d.base_page:     0x%08x\n", 1,saa7146_read(dev,BASE_PAGE1));
1035 	printk("vdma%d.pitch:         0x%08x\n", 1,saa7146_read(dev,PITCH1));
1036 	printk("vdma%d.num_line_byte: 0x%08x\n", 1,saa7146_read(dev,NUM_LINE_BYTE1));
1037 	printk("vdma%d => vptr      : 0x%08x\n", 1,saa7146_read(dev,PCI_VDP1));
1038 */
1039 
1040 	/* write the address of the rps-program */
1041 	saa7146_write(dev, RPS_ADDR0, dev->d_rps0.dma_handle);
1042 
1043 	/* turn on rps */
1044 	saa7146_write(dev, MC1, (MASK_12 | MASK_28));
1045 }
1046