1 /*
2  *  Driver for the Conexant CX23885 PCIe bridge
3  *
4  *  Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *
15  *  GNU General Public License for more details.
16  */
17 
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kmod.h>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/div64.h>
28 #include <linux/firmware.h>
29 
30 #include "cx23885.h"
31 #include "cimax2.h"
32 #include "altera-ci.h"
33 #include "cx23888-ir.h"
34 #include "cx23885-ir.h"
35 #include "cx23885-av.h"
36 #include "cx23885-input.h"
37 
38 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
39 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
40 MODULE_LICENSE("GPL");
41 MODULE_VERSION(CX23885_VERSION);
42 
43 static unsigned int debug;
44 module_param(debug, int, 0644);
45 MODULE_PARM_DESC(debug, "enable debug messages");
46 
47 static unsigned int card[]  = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
48 module_param_array(card,  int, NULL, 0444);
49 MODULE_PARM_DESC(card, "card type");
50 
51 #define dprintk(level, fmt, arg...)\
52 	do { if (debug >= level)\
53 		printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
54 	} while (0)
55 
56 static unsigned int cx23885_devcount;
57 
58 #define NO_SYNC_LINE (-1U)
59 
60 /* FIXME, these allocations will change when
61  * analog arrives. The be reviewed.
62  * CX23887 Assumptions
63  * 1 line = 16 bytes of CDT
64  * cmds size = 80
65  * cdt size = 16 * linesize
66  * iqsize = 64
67  * maxlines = 6
68  *
69  * Address Space:
70  * 0x00000000 0x00008fff FIFO clusters
71  * 0x00010000 0x000104af Channel Management Data Structures
72  * 0x000104b0 0x000104ff Free
73  * 0x00010500 0x000108bf 15 channels * iqsize
74  * 0x000108c0 0x000108ff Free
75  * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
76  *                       15 channels * (iqsize + (maxlines * linesize))
77  * 0x00010ea0 0x00010xxx Free
78  */
79 
80 static struct sram_channel cx23885_sram_channels[] = {
81 	[SRAM_CH01] = {
82 		.name		= "VID A",
83 		.cmds_start	= 0x10000,
84 		.ctrl_start	= 0x10380,
85 		.cdt		= 0x104c0,
86 		.fifo_start	= 0x40,
87 		.fifo_size	= 0x2800,
88 		.ptr1_reg	= DMA1_PTR1,
89 		.ptr2_reg	= DMA1_PTR2,
90 		.cnt1_reg	= DMA1_CNT1,
91 		.cnt2_reg	= DMA1_CNT2,
92 	},
93 	[SRAM_CH02] = {
94 		.name		= "ch2",
95 		.cmds_start	= 0x0,
96 		.ctrl_start	= 0x0,
97 		.cdt		= 0x0,
98 		.fifo_start	= 0x0,
99 		.fifo_size	= 0x0,
100 		.ptr1_reg	= DMA2_PTR1,
101 		.ptr2_reg	= DMA2_PTR2,
102 		.cnt1_reg	= DMA2_CNT1,
103 		.cnt2_reg	= DMA2_CNT2,
104 	},
105 	[SRAM_CH03] = {
106 		.name		= "TS1 B",
107 		.cmds_start	= 0x100A0,
108 		.ctrl_start	= 0x10400,
109 		.cdt		= 0x10580,
110 		.fifo_start	= 0x5000,
111 		.fifo_size	= 0x1000,
112 		.ptr1_reg	= DMA3_PTR1,
113 		.ptr2_reg	= DMA3_PTR2,
114 		.cnt1_reg	= DMA3_CNT1,
115 		.cnt2_reg	= DMA3_CNT2,
116 	},
117 	[SRAM_CH04] = {
118 		.name		= "ch4",
119 		.cmds_start	= 0x0,
120 		.ctrl_start	= 0x0,
121 		.cdt		= 0x0,
122 		.fifo_start	= 0x0,
123 		.fifo_size	= 0x0,
124 		.ptr1_reg	= DMA4_PTR1,
125 		.ptr2_reg	= DMA4_PTR2,
126 		.cnt1_reg	= DMA4_CNT1,
127 		.cnt2_reg	= DMA4_CNT2,
128 	},
129 	[SRAM_CH05] = {
130 		.name		= "ch5",
131 		.cmds_start	= 0x0,
132 		.ctrl_start	= 0x0,
133 		.cdt		= 0x0,
134 		.fifo_start	= 0x0,
135 		.fifo_size	= 0x0,
136 		.ptr1_reg	= DMA5_PTR1,
137 		.ptr2_reg	= DMA5_PTR2,
138 		.cnt1_reg	= DMA5_CNT1,
139 		.cnt2_reg	= DMA5_CNT2,
140 	},
141 	[SRAM_CH06] = {
142 		.name		= "TS2 C",
143 		.cmds_start	= 0x10140,
144 		.ctrl_start	= 0x10440,
145 		.cdt		= 0x105e0,
146 		.fifo_start	= 0x6000,
147 		.fifo_size	= 0x1000,
148 		.ptr1_reg	= DMA5_PTR1,
149 		.ptr2_reg	= DMA5_PTR2,
150 		.cnt1_reg	= DMA5_CNT1,
151 		.cnt2_reg	= DMA5_CNT2,
152 	},
153 	[SRAM_CH07] = {
154 		.name		= "TV Audio",
155 		.cmds_start	= 0x10190,
156 		.ctrl_start	= 0x10480,
157 		.cdt		= 0x10a00,
158 		.fifo_start	= 0x7000,
159 		.fifo_size	= 0x1000,
160 		.ptr1_reg	= DMA6_PTR1,
161 		.ptr2_reg	= DMA6_PTR2,
162 		.cnt1_reg	= DMA6_CNT1,
163 		.cnt2_reg	= DMA6_CNT2,
164 	},
165 	[SRAM_CH08] = {
166 		.name		= "ch8",
167 		.cmds_start	= 0x0,
168 		.ctrl_start	= 0x0,
169 		.cdt		= 0x0,
170 		.fifo_start	= 0x0,
171 		.fifo_size	= 0x0,
172 		.ptr1_reg	= DMA7_PTR1,
173 		.ptr2_reg	= DMA7_PTR2,
174 		.cnt1_reg	= DMA7_CNT1,
175 		.cnt2_reg	= DMA7_CNT2,
176 	},
177 	[SRAM_CH09] = {
178 		.name		= "ch9",
179 		.cmds_start	= 0x0,
180 		.ctrl_start	= 0x0,
181 		.cdt		= 0x0,
182 		.fifo_start	= 0x0,
183 		.fifo_size	= 0x0,
184 		.ptr1_reg	= DMA8_PTR1,
185 		.ptr2_reg	= DMA8_PTR2,
186 		.cnt1_reg	= DMA8_CNT1,
187 		.cnt2_reg	= DMA8_CNT2,
188 	},
189 };
190 
191 static struct sram_channel cx23887_sram_channels[] = {
192 	[SRAM_CH01] = {
193 		.name		= "VID A",
194 		.cmds_start	= 0x10000,
195 		.ctrl_start	= 0x105b0,
196 		.cdt		= 0x107b0,
197 		.fifo_start	= 0x40,
198 		.fifo_size	= 0x2800,
199 		.ptr1_reg	= DMA1_PTR1,
200 		.ptr2_reg	= DMA1_PTR2,
201 		.cnt1_reg	= DMA1_CNT1,
202 		.cnt2_reg	= DMA1_CNT2,
203 	},
204 	[SRAM_CH02] = {
205 		.name		= "VID A (VBI)",
206 		.cmds_start	= 0x10050,
207 		.ctrl_start	= 0x105F0,
208 		.cdt		= 0x10810,
209 		.fifo_start	= 0x3000,
210 		.fifo_size	= 0x1000,
211 		.ptr1_reg	= DMA2_PTR1,
212 		.ptr2_reg	= DMA2_PTR2,
213 		.cnt1_reg	= DMA2_CNT1,
214 		.cnt2_reg	= DMA2_CNT2,
215 	},
216 	[SRAM_CH03] = {
217 		.name		= "TS1 B",
218 		.cmds_start	= 0x100A0,
219 		.ctrl_start	= 0x10630,
220 		.cdt		= 0x10870,
221 		.fifo_start	= 0x5000,
222 		.fifo_size	= 0x1000,
223 		.ptr1_reg	= DMA3_PTR1,
224 		.ptr2_reg	= DMA3_PTR2,
225 		.cnt1_reg	= DMA3_CNT1,
226 		.cnt2_reg	= DMA3_CNT2,
227 	},
228 	[SRAM_CH04] = {
229 		.name		= "ch4",
230 		.cmds_start	= 0x0,
231 		.ctrl_start	= 0x0,
232 		.cdt		= 0x0,
233 		.fifo_start	= 0x0,
234 		.fifo_size	= 0x0,
235 		.ptr1_reg	= DMA4_PTR1,
236 		.ptr2_reg	= DMA4_PTR2,
237 		.cnt1_reg	= DMA4_CNT1,
238 		.cnt2_reg	= DMA4_CNT2,
239 	},
240 	[SRAM_CH05] = {
241 		.name		= "ch5",
242 		.cmds_start	= 0x0,
243 		.ctrl_start	= 0x0,
244 		.cdt		= 0x0,
245 		.fifo_start	= 0x0,
246 		.fifo_size	= 0x0,
247 		.ptr1_reg	= DMA5_PTR1,
248 		.ptr2_reg	= DMA5_PTR2,
249 		.cnt1_reg	= DMA5_CNT1,
250 		.cnt2_reg	= DMA5_CNT2,
251 	},
252 	[SRAM_CH06] = {
253 		.name		= "TS2 C",
254 		.cmds_start	= 0x10140,
255 		.ctrl_start	= 0x10670,
256 		.cdt		= 0x108d0,
257 		.fifo_start	= 0x6000,
258 		.fifo_size	= 0x1000,
259 		.ptr1_reg	= DMA5_PTR1,
260 		.ptr2_reg	= DMA5_PTR2,
261 		.cnt1_reg	= DMA5_CNT1,
262 		.cnt2_reg	= DMA5_CNT2,
263 	},
264 	[SRAM_CH07] = {
265 		.name		= "TV Audio",
266 		.cmds_start	= 0x10190,
267 		.ctrl_start	= 0x106B0,
268 		.cdt		= 0x10930,
269 		.fifo_start	= 0x7000,
270 		.fifo_size	= 0x1000,
271 		.ptr1_reg	= DMA6_PTR1,
272 		.ptr2_reg	= DMA6_PTR2,
273 		.cnt1_reg	= DMA6_CNT1,
274 		.cnt2_reg	= DMA6_CNT2,
275 	},
276 	[SRAM_CH08] = {
277 		.name		= "ch8",
278 		.cmds_start	= 0x0,
279 		.ctrl_start	= 0x0,
280 		.cdt		= 0x0,
281 		.fifo_start	= 0x0,
282 		.fifo_size	= 0x0,
283 		.ptr1_reg	= DMA7_PTR1,
284 		.ptr2_reg	= DMA7_PTR2,
285 		.cnt1_reg	= DMA7_CNT1,
286 		.cnt2_reg	= DMA7_CNT2,
287 	},
288 	[SRAM_CH09] = {
289 		.name		= "ch9",
290 		.cmds_start	= 0x0,
291 		.ctrl_start	= 0x0,
292 		.cdt		= 0x0,
293 		.fifo_start	= 0x0,
294 		.fifo_size	= 0x0,
295 		.ptr1_reg	= DMA8_PTR1,
296 		.ptr2_reg	= DMA8_PTR2,
297 		.cnt1_reg	= DMA8_CNT1,
298 		.cnt2_reg	= DMA8_CNT2,
299 	},
300 };
301 
302 static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
303 {
304 	unsigned long flags;
305 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
306 
307 	dev->pci_irqmask |= mask;
308 
309 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
310 }
311 
312 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
313 {
314 	unsigned long flags;
315 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
316 
317 	dev->pci_irqmask |= mask;
318 	cx_set(PCI_INT_MSK, mask);
319 
320 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
321 }
322 
323 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
324 {
325 	u32 v;
326 	unsigned long flags;
327 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
328 
329 	v = mask & dev->pci_irqmask;
330 	if (v)
331 		cx_set(PCI_INT_MSK, v);
332 
333 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
334 }
335 
336 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
337 {
338 	cx23885_irq_enable(dev, 0xffffffff);
339 }
340 
341 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
342 {
343 	unsigned long flags;
344 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
345 
346 	cx_clear(PCI_INT_MSK, mask);
347 
348 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
349 }
350 
351 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
352 {
353 	cx23885_irq_disable(dev, 0xffffffff);
354 }
355 
356 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
357 {
358 	unsigned long flags;
359 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
360 
361 	dev->pci_irqmask &= ~mask;
362 	cx_clear(PCI_INT_MSK, mask);
363 
364 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
365 }
366 
367 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
368 {
369 	u32 v;
370 	unsigned long flags;
371 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
372 
373 	v = cx_read(PCI_INT_MSK);
374 
375 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
376 	return v;
377 }
378 
379 static int cx23885_risc_decode(u32 risc)
380 {
381 	static char *instr[16] = {
382 		[RISC_SYNC    >> 28] = "sync",
383 		[RISC_WRITE   >> 28] = "write",
384 		[RISC_WRITEC  >> 28] = "writec",
385 		[RISC_READ    >> 28] = "read",
386 		[RISC_READC   >> 28] = "readc",
387 		[RISC_JUMP    >> 28] = "jump",
388 		[RISC_SKIP    >> 28] = "skip",
389 		[RISC_WRITERM >> 28] = "writerm",
390 		[RISC_WRITECM >> 28] = "writecm",
391 		[RISC_WRITECR >> 28] = "writecr",
392 	};
393 	static int incr[16] = {
394 		[RISC_WRITE   >> 28] = 3,
395 		[RISC_JUMP    >> 28] = 3,
396 		[RISC_SKIP    >> 28] = 1,
397 		[RISC_SYNC    >> 28] = 1,
398 		[RISC_WRITERM >> 28] = 3,
399 		[RISC_WRITECM >> 28] = 3,
400 		[RISC_WRITECR >> 28] = 4,
401 	};
402 	static char *bits[] = {
403 		"12",   "13",   "14",   "resync",
404 		"cnt0", "cnt1", "18",   "19",
405 		"20",   "21",   "22",   "23",
406 		"irq1", "irq2", "eol",  "sol",
407 	};
408 	int i;
409 
410 	printk("0x%08x [ %s", risc,
411 	       instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
412 	for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
413 		if (risc & (1 << (i + 12)))
414 			printk(" %s", bits[i]);
415 	printk(" count=%d ]\n", risc & 0xfff);
416 	return incr[risc >> 28] ? incr[risc >> 28] : 1;
417 }
418 
419 static void cx23885_wakeup(struct cx23885_tsport *port,
420 			   struct cx23885_dmaqueue *q, u32 count)
421 {
422 	struct cx23885_dev *dev = port->dev;
423 	struct cx23885_buffer *buf;
424 
425 	if (list_empty(&q->active))
426 		return;
427 	buf = list_entry(q->active.next,
428 			 struct cx23885_buffer, queue);
429 
430 	v4l2_get_timestamp(&buf->vb.timestamp);
431 	buf->vb.sequence = q->count++;
432 	dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
433 		buf->vb.vb2_buf.index,
434 		count, q->count);
435 	list_del(&buf->queue);
436 	vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
437 }
438 
439 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
440 				      struct sram_channel *ch,
441 				      unsigned int bpl, u32 risc)
442 {
443 	unsigned int i, lines;
444 	u32 cdt;
445 
446 	if (ch->cmds_start == 0) {
447 		dprintk(1, "%s() Erasing channel [%s]\n", __func__,
448 			ch->name);
449 		cx_write(ch->ptr1_reg, 0);
450 		cx_write(ch->ptr2_reg, 0);
451 		cx_write(ch->cnt2_reg, 0);
452 		cx_write(ch->cnt1_reg, 0);
453 		return 0;
454 	} else {
455 		dprintk(1, "%s() Configuring channel [%s]\n", __func__,
456 			ch->name);
457 	}
458 
459 	bpl   = (bpl + 7) & ~7; /* alignment */
460 	cdt   = ch->cdt;
461 	lines = ch->fifo_size / bpl;
462 	if (lines > 6)
463 		lines = 6;
464 	BUG_ON(lines < 2);
465 
466 	cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
467 	cx_write(8 + 4, 12);
468 	cx_write(8 + 8, 0);
469 
470 	/* write CDT */
471 	for (i = 0; i < lines; i++) {
472 		dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
473 			ch->fifo_start + bpl*i);
474 		cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
475 		cx_write(cdt + 16*i +  4, 0);
476 		cx_write(cdt + 16*i +  8, 0);
477 		cx_write(cdt + 16*i + 12, 0);
478 	}
479 
480 	/* write CMDS */
481 	if (ch->jumponly)
482 		cx_write(ch->cmds_start + 0, 8);
483 	else
484 		cx_write(ch->cmds_start + 0, risc);
485 	cx_write(ch->cmds_start +  4, 0); /* 64 bits 63-32 */
486 	cx_write(ch->cmds_start +  8, cdt);
487 	cx_write(ch->cmds_start + 12, (lines*16) >> 3);
488 	cx_write(ch->cmds_start + 16, ch->ctrl_start);
489 	if (ch->jumponly)
490 		cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
491 	else
492 		cx_write(ch->cmds_start + 20, 64 >> 2);
493 	for (i = 24; i < 80; i += 4)
494 		cx_write(ch->cmds_start + i, 0);
495 
496 	/* fill registers */
497 	cx_write(ch->ptr1_reg, ch->fifo_start);
498 	cx_write(ch->ptr2_reg, cdt);
499 	cx_write(ch->cnt2_reg, (lines*16) >> 3);
500 	cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
501 
502 	dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
503 		dev->bridge,
504 		ch->name,
505 		bpl,
506 		lines);
507 
508 	return 0;
509 }
510 
511 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
512 				      struct sram_channel *ch)
513 {
514 	static char *name[] = {
515 		"init risc lo",
516 		"init risc hi",
517 		"cdt base",
518 		"cdt size",
519 		"iq base",
520 		"iq size",
521 		"risc pc lo",
522 		"risc pc hi",
523 		"iq wr ptr",
524 		"iq rd ptr",
525 		"cdt current",
526 		"pci target lo",
527 		"pci target hi",
528 		"line / byte",
529 	};
530 	u32 risc;
531 	unsigned int i, j, n;
532 
533 	printk(KERN_WARNING "%s: %s - dma channel status dump\n",
534 	       dev->name, ch->name);
535 	for (i = 0; i < ARRAY_SIZE(name); i++)
536 		printk(KERN_WARNING "%s:   cmds: %-15s: 0x%08x\n",
537 		       dev->name, name[i],
538 		       cx_read(ch->cmds_start + 4*i));
539 
540 	for (i = 0; i < 4; i++) {
541 		risc = cx_read(ch->cmds_start + 4 * (i + 14));
542 		printk(KERN_WARNING "%s:   risc%d: ", dev->name, i);
543 		cx23885_risc_decode(risc);
544 	}
545 	for (i = 0; i < (64 >> 2); i += n) {
546 		risc = cx_read(ch->ctrl_start + 4 * i);
547 		/* No consideration for bits 63-32 */
548 
549 		printk(KERN_WARNING "%s:   (0x%08x) iq %x: ", dev->name,
550 		       ch->ctrl_start + 4 * i, i);
551 		n = cx23885_risc_decode(risc);
552 		for (j = 1; j < n; j++) {
553 			risc = cx_read(ch->ctrl_start + 4 * (i + j));
554 			printk(KERN_WARNING "%s:   iq %x: 0x%08x [ arg #%d ]\n",
555 			       dev->name, i+j, risc, j);
556 		}
557 	}
558 
559 	printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
560 	       dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
561 	printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
562 	       dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
563 	printk(KERN_WARNING "%s:   ptr1_reg: 0x%08x\n",
564 	       dev->name, cx_read(ch->ptr1_reg));
565 	printk(KERN_WARNING "%s:   ptr2_reg: 0x%08x\n",
566 	       dev->name, cx_read(ch->ptr2_reg));
567 	printk(KERN_WARNING "%s:   cnt1_reg: 0x%08x\n",
568 	       dev->name, cx_read(ch->cnt1_reg));
569 	printk(KERN_WARNING "%s:   cnt2_reg: 0x%08x\n",
570 	       dev->name, cx_read(ch->cnt2_reg));
571 }
572 
573 static void cx23885_risc_disasm(struct cx23885_tsport *port,
574 				struct cx23885_riscmem *risc)
575 {
576 	struct cx23885_dev *dev = port->dev;
577 	unsigned int i, j, n;
578 
579 	printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
580 	       dev->name, risc->cpu, (unsigned long)risc->dma);
581 	for (i = 0; i < (risc->size >> 2); i += n) {
582 		printk(KERN_INFO "%s:   %04d: ", dev->name, i);
583 		n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
584 		for (j = 1; j < n; j++)
585 			printk(KERN_INFO "%s:   %04d: 0x%08x [ arg #%d ]\n",
586 			       dev->name, i + j, risc->cpu[i + j], j);
587 		if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
588 			break;
589 	}
590 }
591 
592 static void cx23885_shutdown(struct cx23885_dev *dev)
593 {
594 	/* disable RISC controller */
595 	cx_write(DEV_CNTRL2, 0);
596 
597 	/* Disable all IR activity */
598 	cx_write(IR_CNTRL_REG, 0);
599 
600 	/* Disable Video A/B activity */
601 	cx_write(VID_A_DMA_CTL, 0);
602 	cx_write(VID_B_DMA_CTL, 0);
603 	cx_write(VID_C_DMA_CTL, 0);
604 
605 	/* Disable Audio activity */
606 	cx_write(AUD_INT_DMA_CTL, 0);
607 	cx_write(AUD_EXT_DMA_CTL, 0);
608 
609 	/* Disable Serial port */
610 	cx_write(UART_CTL, 0);
611 
612 	/* Disable Interrupts */
613 	cx23885_irq_disable_all(dev);
614 	cx_write(VID_A_INT_MSK, 0);
615 	cx_write(VID_B_INT_MSK, 0);
616 	cx_write(VID_C_INT_MSK, 0);
617 	cx_write(AUDIO_INT_INT_MSK, 0);
618 	cx_write(AUDIO_EXT_INT_MSK, 0);
619 
620 }
621 
622 static void cx23885_reset(struct cx23885_dev *dev)
623 {
624 	dprintk(1, "%s()\n", __func__);
625 
626 	cx23885_shutdown(dev);
627 
628 	cx_write(PCI_INT_STAT, 0xffffffff);
629 	cx_write(VID_A_INT_STAT, 0xffffffff);
630 	cx_write(VID_B_INT_STAT, 0xffffffff);
631 	cx_write(VID_C_INT_STAT, 0xffffffff);
632 	cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
633 	cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
634 	cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
635 	cx_write(PAD_CTRL, 0x00500300);
636 
637 	mdelay(100);
638 
639 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
640 		720*4, 0);
641 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
642 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
643 		188*4, 0);
644 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
645 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
646 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
647 		188*4, 0);
648 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
649 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
650 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
651 
652 	cx23885_gpio_setup(dev);
653 }
654 
655 
656 static int cx23885_pci_quirks(struct cx23885_dev *dev)
657 {
658 	dprintk(1, "%s()\n", __func__);
659 
660 	/* The cx23885 bridge has a weird bug which causes NMI to be asserted
661 	 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
662 	 * occur on the cx23887 bridge.
663 	 */
664 	if (dev->bridge == CX23885_BRIDGE_885)
665 		cx_clear(RDR_TLCTL0, 1 << 4);
666 
667 	return 0;
668 }
669 
670 static int get_resources(struct cx23885_dev *dev)
671 {
672 	if (request_mem_region(pci_resource_start(dev->pci, 0),
673 			       pci_resource_len(dev->pci, 0),
674 			       dev->name))
675 		return 0;
676 
677 	printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
678 		dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
679 
680 	return -EBUSY;
681 }
682 
683 static int cx23885_init_tsport(struct cx23885_dev *dev,
684 	struct cx23885_tsport *port, int portno)
685 {
686 	dprintk(1, "%s(portno=%d)\n", __func__, portno);
687 
688 	/* Transport bus init dma queue  - Common settings */
689 	port->dma_ctl_val        = 0x11; /* Enable RISC controller and Fifo */
690 	port->ts_int_msk_val     = 0x1111; /* TS port bits for RISC */
691 	port->vld_misc_val       = 0x0;
692 	port->hw_sop_ctrl_val    = (0x47 << 16 | 188 << 4);
693 
694 	spin_lock_init(&port->slock);
695 	port->dev = dev;
696 	port->nr = portno;
697 
698 	INIT_LIST_HEAD(&port->mpegq.active);
699 	mutex_init(&port->frontends.lock);
700 	INIT_LIST_HEAD(&port->frontends.felist);
701 	port->frontends.active_fe_id = 0;
702 
703 	/* This should be hardcoded allow a single frontend
704 	 * attachment to this tsport, keeping the -dvb.c
705 	 * code clean and safe.
706 	 */
707 	if (!port->num_frontends)
708 		port->num_frontends = 1;
709 
710 	switch (portno) {
711 	case 1:
712 		port->reg_gpcnt          = VID_B_GPCNT;
713 		port->reg_gpcnt_ctl      = VID_B_GPCNT_CTL;
714 		port->reg_dma_ctl        = VID_B_DMA_CTL;
715 		port->reg_lngth          = VID_B_LNGTH;
716 		port->reg_hw_sop_ctrl    = VID_B_HW_SOP_CTL;
717 		port->reg_gen_ctrl       = VID_B_GEN_CTL;
718 		port->reg_bd_pkt_status  = VID_B_BD_PKT_STATUS;
719 		port->reg_sop_status     = VID_B_SOP_STATUS;
720 		port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
721 		port->reg_vld_misc       = VID_B_VLD_MISC;
722 		port->reg_ts_clk_en      = VID_B_TS_CLK_EN;
723 		port->reg_src_sel        = VID_B_SRC_SEL;
724 		port->reg_ts_int_msk     = VID_B_INT_MSK;
725 		port->reg_ts_int_stat    = VID_B_INT_STAT;
726 		port->sram_chno          = SRAM_CH03; /* VID_B */
727 		port->pci_irqmask        = 0x02; /* VID_B bit1 */
728 		break;
729 	case 2:
730 		port->reg_gpcnt          = VID_C_GPCNT;
731 		port->reg_gpcnt_ctl      = VID_C_GPCNT_CTL;
732 		port->reg_dma_ctl        = VID_C_DMA_CTL;
733 		port->reg_lngth          = VID_C_LNGTH;
734 		port->reg_hw_sop_ctrl    = VID_C_HW_SOP_CTL;
735 		port->reg_gen_ctrl       = VID_C_GEN_CTL;
736 		port->reg_bd_pkt_status  = VID_C_BD_PKT_STATUS;
737 		port->reg_sop_status     = VID_C_SOP_STATUS;
738 		port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
739 		port->reg_vld_misc       = VID_C_VLD_MISC;
740 		port->reg_ts_clk_en      = VID_C_TS_CLK_EN;
741 		port->reg_src_sel        = 0;
742 		port->reg_ts_int_msk     = VID_C_INT_MSK;
743 		port->reg_ts_int_stat    = VID_C_INT_STAT;
744 		port->sram_chno          = SRAM_CH06; /* VID_C */
745 		port->pci_irqmask        = 0x04; /* VID_C bit2 */
746 		break;
747 	default:
748 		BUG();
749 	}
750 
751 	return 0;
752 }
753 
754 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
755 {
756 	switch (cx_read(RDR_CFG2) & 0xff) {
757 	case 0x00:
758 		/* cx23885 */
759 		dev->hwrevision = 0xa0;
760 		break;
761 	case 0x01:
762 		/* CX23885-12Z */
763 		dev->hwrevision = 0xa1;
764 		break;
765 	case 0x02:
766 		/* CX23885-13Z/14Z */
767 		dev->hwrevision = 0xb0;
768 		break;
769 	case 0x03:
770 		if (dev->pci->device == 0x8880) {
771 			/* CX23888-21Z/22Z */
772 			dev->hwrevision = 0xc0;
773 		} else {
774 			/* CX23885-14Z */
775 			dev->hwrevision = 0xa4;
776 		}
777 		break;
778 	case 0x04:
779 		if (dev->pci->device == 0x8880) {
780 			/* CX23888-31Z */
781 			dev->hwrevision = 0xd0;
782 		} else {
783 			/* CX23885-15Z, CX23888-31Z */
784 			dev->hwrevision = 0xa5;
785 		}
786 		break;
787 	case 0x0e:
788 		/* CX23887-15Z */
789 		dev->hwrevision = 0xc0;
790 		break;
791 	case 0x0f:
792 		/* CX23887-14Z */
793 		dev->hwrevision = 0xb1;
794 		break;
795 	default:
796 		printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
797 			__func__, dev->hwrevision);
798 	}
799 	if (dev->hwrevision)
800 		printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
801 			__func__, dev->hwrevision);
802 	else
803 		printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
804 			__func__, dev->hwrevision);
805 }
806 
807 /* Find the first v4l2_subdev member of the group id in hw */
808 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
809 {
810 	struct v4l2_subdev *result = NULL;
811 	struct v4l2_subdev *sd;
812 
813 	spin_lock(&dev->v4l2_dev.lock);
814 	v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
815 		if (sd->grp_id == hw) {
816 			result = sd;
817 			break;
818 		}
819 	}
820 	spin_unlock(&dev->v4l2_dev.lock);
821 	return result;
822 }
823 
824 static int cx23885_dev_setup(struct cx23885_dev *dev)
825 {
826 	int i;
827 
828 	spin_lock_init(&dev->pci_irqmask_lock);
829 	spin_lock_init(&dev->slock);
830 
831 	mutex_init(&dev->lock);
832 	mutex_init(&dev->gpio_lock);
833 
834 	atomic_inc(&dev->refcount);
835 
836 	dev->nr = cx23885_devcount++;
837 	sprintf(dev->name, "cx23885[%d]", dev->nr);
838 
839 	/* Configure the internal memory */
840 	if (dev->pci->device == 0x8880) {
841 		/* Could be 887 or 888, assume a default */
842 		dev->bridge = CX23885_BRIDGE_887;
843 		/* Apply a sensible clock frequency for the PCIe bridge */
844 		dev->clk_freq = 25000000;
845 		dev->sram_channels = cx23887_sram_channels;
846 	} else
847 	if (dev->pci->device == 0x8852) {
848 		dev->bridge = CX23885_BRIDGE_885;
849 		/* Apply a sensible clock frequency for the PCIe bridge */
850 		dev->clk_freq = 28000000;
851 		dev->sram_channels = cx23885_sram_channels;
852 	} else
853 		BUG();
854 
855 	dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
856 		__func__, dev->bridge);
857 
858 	/* board config */
859 	dev->board = UNSET;
860 	if (card[dev->nr] < cx23885_bcount)
861 		dev->board = card[dev->nr];
862 	for (i = 0; UNSET == dev->board  &&  i < cx23885_idcount; i++)
863 		if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
864 		    dev->pci->subsystem_device == cx23885_subids[i].subdevice)
865 			dev->board = cx23885_subids[i].card;
866 	if (UNSET == dev->board) {
867 		dev->board = CX23885_BOARD_UNKNOWN;
868 		cx23885_card_list(dev);
869 	}
870 
871 	/* If the user specific a clk freq override, apply it */
872 	if (cx23885_boards[dev->board].clk_freq > 0)
873 		dev->clk_freq = cx23885_boards[dev->board].clk_freq;
874 
875 	dev->pci_bus  = dev->pci->bus->number;
876 	dev->pci_slot = PCI_SLOT(dev->pci->devfn);
877 	cx23885_irq_add(dev, 0x001f00);
878 
879 	/* External Master 1 Bus */
880 	dev->i2c_bus[0].nr = 0;
881 	dev->i2c_bus[0].dev = dev;
882 	dev->i2c_bus[0].reg_stat  = I2C1_STAT;
883 	dev->i2c_bus[0].reg_ctrl  = I2C1_CTRL;
884 	dev->i2c_bus[0].reg_addr  = I2C1_ADDR;
885 	dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
886 	dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
887 	dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
888 
889 	/* External Master 2 Bus */
890 	dev->i2c_bus[1].nr = 1;
891 	dev->i2c_bus[1].dev = dev;
892 	dev->i2c_bus[1].reg_stat  = I2C2_STAT;
893 	dev->i2c_bus[1].reg_ctrl  = I2C2_CTRL;
894 	dev->i2c_bus[1].reg_addr  = I2C2_ADDR;
895 	dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
896 	dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
897 	dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
898 
899 	/* Internal Master 3 Bus */
900 	dev->i2c_bus[2].nr = 2;
901 	dev->i2c_bus[2].dev = dev;
902 	dev->i2c_bus[2].reg_stat  = I2C3_STAT;
903 	dev->i2c_bus[2].reg_ctrl  = I2C3_CTRL;
904 	dev->i2c_bus[2].reg_addr  = I2C3_ADDR;
905 	dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
906 	dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
907 	dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
908 
909 	if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
910 		(cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
911 		cx23885_init_tsport(dev, &dev->ts1, 1);
912 
913 	if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
914 		(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
915 		cx23885_init_tsport(dev, &dev->ts2, 2);
916 
917 	if (get_resources(dev) < 0) {
918 		printk(KERN_ERR "CORE %s No more PCIe resources for "
919 		       "subsystem: %04x:%04x\n",
920 		       dev->name, dev->pci->subsystem_vendor,
921 		       dev->pci->subsystem_device);
922 
923 		cx23885_devcount--;
924 		return -ENODEV;
925 	}
926 
927 	/* PCIe stuff */
928 	dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
929 			     pci_resource_len(dev->pci, 0));
930 
931 	dev->bmmio = (u8 __iomem *)dev->lmmio;
932 
933 	printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
934 	       dev->name, dev->pci->subsystem_vendor,
935 	       dev->pci->subsystem_device, cx23885_boards[dev->board].name,
936 	       dev->board, card[dev->nr] == dev->board ?
937 	       "insmod option" : "autodetected");
938 
939 	cx23885_pci_quirks(dev);
940 
941 	/* Assume some sensible defaults */
942 	dev->tuner_type = cx23885_boards[dev->board].tuner_type;
943 	dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
944 	dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
945 	dev->radio_type = cx23885_boards[dev->board].radio_type;
946 	dev->radio_addr = cx23885_boards[dev->board].radio_addr;
947 
948 	dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
949 		__func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
950 	dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
951 		__func__, dev->radio_type, dev->radio_addr);
952 
953 	/* The cx23417 encoder has GPIO's that need to be initialised
954 	 * before DVB, so that demodulators and tuners are out of
955 	 * reset before DVB uses them.
956 	 */
957 	if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
958 		(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
959 			cx23885_mc417_init(dev);
960 
961 	/* init hardware */
962 	cx23885_reset(dev);
963 
964 	cx23885_i2c_register(&dev->i2c_bus[0]);
965 	cx23885_i2c_register(&dev->i2c_bus[1]);
966 	cx23885_i2c_register(&dev->i2c_bus[2]);
967 	cx23885_card_setup(dev);
968 	call_all(dev, core, s_power, 0);
969 	cx23885_ir_init(dev);
970 
971 	if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
972 		if (cx23885_video_register(dev) < 0) {
973 			printk(KERN_ERR "%s() Failed to register analog "
974 				"video adapters on VID_A\n", __func__);
975 		}
976 	}
977 
978 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
979 		if (cx23885_boards[dev->board].num_fds_portb)
980 			dev->ts1.num_frontends =
981 				cx23885_boards[dev->board].num_fds_portb;
982 		if (cx23885_dvb_register(&dev->ts1) < 0) {
983 			printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
984 			       __func__);
985 		}
986 	} else
987 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
988 		if (cx23885_417_register(dev) < 0) {
989 			printk(KERN_ERR
990 				"%s() Failed to register 417 on VID_B\n",
991 			       __func__);
992 		}
993 	}
994 
995 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
996 		if (cx23885_boards[dev->board].num_fds_portc)
997 			dev->ts2.num_frontends =
998 				cx23885_boards[dev->board].num_fds_portc;
999 		if (cx23885_dvb_register(&dev->ts2) < 0) {
1000 			printk(KERN_ERR
1001 				"%s() Failed to register dvb on VID_C\n",
1002 			       __func__);
1003 		}
1004 	} else
1005 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1006 		if (cx23885_417_register(dev) < 0) {
1007 			printk(KERN_ERR
1008 				"%s() Failed to register 417 on VID_C\n",
1009 			       __func__);
1010 		}
1011 	}
1012 
1013 	cx23885_dev_checkrevision(dev);
1014 
1015 	/* disable MSI for NetUP cards, otherwise CI is not working */
1016 	if (cx23885_boards[dev->board].ci_type > 0)
1017 		cx_clear(RDR_RDRCTL1, 1 << 8);
1018 
1019 	switch (dev->board) {
1020 	case CX23885_BOARD_TEVII_S470:
1021 	case CX23885_BOARD_TEVII_S471:
1022 		cx_clear(RDR_RDRCTL1, 1 << 8);
1023 		break;
1024 	}
1025 
1026 	return 0;
1027 }
1028 
1029 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1030 {
1031 	release_mem_region(pci_resource_start(dev->pci, 0),
1032 			   pci_resource_len(dev->pci, 0));
1033 
1034 	if (!atomic_dec_and_test(&dev->refcount))
1035 		return;
1036 
1037 	if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1038 		cx23885_video_unregister(dev);
1039 
1040 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1041 		cx23885_dvb_unregister(&dev->ts1);
1042 
1043 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1044 		cx23885_417_unregister(dev);
1045 
1046 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1047 		cx23885_dvb_unregister(&dev->ts2);
1048 
1049 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1050 		cx23885_417_unregister(dev);
1051 
1052 	cx23885_i2c_unregister(&dev->i2c_bus[2]);
1053 	cx23885_i2c_unregister(&dev->i2c_bus[1]);
1054 	cx23885_i2c_unregister(&dev->i2c_bus[0]);
1055 
1056 	iounmap(dev->lmmio);
1057 }
1058 
1059 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1060 			       unsigned int offset, u32 sync_line,
1061 			       unsigned int bpl, unsigned int padding,
1062 			       unsigned int lines,  unsigned int lpi, bool jump)
1063 {
1064 	struct scatterlist *sg;
1065 	unsigned int line, todo, sol;
1066 
1067 
1068 	if (jump) {
1069 		*(rp++) = cpu_to_le32(RISC_JUMP);
1070 		*(rp++) = cpu_to_le32(0);
1071 		*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1072 	}
1073 
1074 	/* sync instruction */
1075 	if (sync_line != NO_SYNC_LINE)
1076 		*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1077 
1078 	/* scan lines */
1079 	sg = sglist;
1080 	for (line = 0; line < lines; line++) {
1081 		while (offset && offset >= sg_dma_len(sg)) {
1082 			offset -= sg_dma_len(sg);
1083 			sg = sg_next(sg);
1084 		}
1085 
1086 		if (lpi && line > 0 && !(line % lpi))
1087 			sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1088 		else
1089 			sol = RISC_SOL;
1090 
1091 		if (bpl <= sg_dma_len(sg)-offset) {
1092 			/* fits into current chunk */
1093 			*(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
1094 			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1095 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1096 			offset += bpl;
1097 		} else {
1098 			/* scanline needs to be split */
1099 			todo = bpl;
1100 			*(rp++) = cpu_to_le32(RISC_WRITE|sol|
1101 					    (sg_dma_len(sg)-offset));
1102 			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1103 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1104 			todo -= (sg_dma_len(sg)-offset);
1105 			offset = 0;
1106 			sg = sg_next(sg);
1107 			while (todo > sg_dma_len(sg)) {
1108 				*(rp++) = cpu_to_le32(RISC_WRITE|
1109 						    sg_dma_len(sg));
1110 				*(rp++) = cpu_to_le32(sg_dma_address(sg));
1111 				*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1112 				todo -= sg_dma_len(sg);
1113 				sg = sg_next(sg);
1114 			}
1115 			*(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1116 			*(rp++) = cpu_to_le32(sg_dma_address(sg));
1117 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1118 			offset += todo;
1119 		}
1120 		offset += padding;
1121 	}
1122 
1123 	return rp;
1124 }
1125 
1126 int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1127 			struct scatterlist *sglist, unsigned int top_offset,
1128 			unsigned int bottom_offset, unsigned int bpl,
1129 			unsigned int padding, unsigned int lines)
1130 {
1131 	u32 instructions, fields;
1132 	__le32 *rp;
1133 
1134 	fields = 0;
1135 	if (UNSET != top_offset)
1136 		fields++;
1137 	if (UNSET != bottom_offset)
1138 		fields++;
1139 
1140 	/* estimate risc mem: worst case is one write per page border +
1141 	   one write per scan line + syncs + jump (all 2 dwords).  Padding
1142 	   can cause next bpl to start close to a page border.  First DMA
1143 	   region may be smaller than PAGE_SIZE */
1144 	/* write and jump need and extra dword */
1145 	instructions  = fields * (1 + ((bpl + padding) * lines)
1146 		/ PAGE_SIZE + lines);
1147 	instructions += 5;
1148 	risc->size = instructions * 12;
1149 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1150 	if (risc->cpu == NULL)
1151 		return -ENOMEM;
1152 
1153 	/* write risc instructions */
1154 	rp = risc->cpu;
1155 	if (UNSET != top_offset)
1156 		rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1157 					bpl, padding, lines, 0, true);
1158 	if (UNSET != bottom_offset)
1159 		rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1160 					bpl, padding, lines, 0, UNSET == top_offset);
1161 
1162 	/* save pointer to jmp instruction address */
1163 	risc->jmp = rp;
1164 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1165 	return 0;
1166 }
1167 
1168 int cx23885_risc_databuffer(struct pci_dev *pci,
1169 				   struct cx23885_riscmem *risc,
1170 				   struct scatterlist *sglist,
1171 				   unsigned int bpl,
1172 				   unsigned int lines, unsigned int lpi)
1173 {
1174 	u32 instructions;
1175 	__le32 *rp;
1176 
1177 	/* estimate risc mem: worst case is one write per page border +
1178 	   one write per scan line + syncs + jump (all 2 dwords).  Here
1179 	   there is no padding and no sync.  First DMA region may be smaller
1180 	   than PAGE_SIZE */
1181 	/* Jump and write need an extra dword */
1182 	instructions  = 1 + (bpl * lines) / PAGE_SIZE + lines;
1183 	instructions += 4;
1184 
1185 	risc->size = instructions * 12;
1186 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1187 	if (risc->cpu == NULL)
1188 		return -ENOMEM;
1189 
1190 	/* write risc instructions */
1191 	rp = risc->cpu;
1192 	rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
1193 				bpl, 0, lines, lpi, lpi == 0);
1194 
1195 	/* save pointer to jmp instruction address */
1196 	risc->jmp = rp;
1197 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1198 	return 0;
1199 }
1200 
1201 int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1202 			struct scatterlist *sglist, unsigned int top_offset,
1203 			unsigned int bottom_offset, unsigned int bpl,
1204 			unsigned int padding, unsigned int lines)
1205 {
1206 	u32 instructions, fields;
1207 	__le32 *rp;
1208 
1209 	fields = 0;
1210 	if (UNSET != top_offset)
1211 		fields++;
1212 	if (UNSET != bottom_offset)
1213 		fields++;
1214 
1215 	/* estimate risc mem: worst case is one write per page border +
1216 	   one write per scan line + syncs + jump (all 2 dwords).  Padding
1217 	   can cause next bpl to start close to a page border.  First DMA
1218 	   region may be smaller than PAGE_SIZE */
1219 	/* write and jump need and extra dword */
1220 	instructions  = fields * (1 + ((bpl + padding) * lines)
1221 		/ PAGE_SIZE + lines);
1222 	instructions += 5;
1223 	risc->size = instructions * 12;
1224 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1225 	if (risc->cpu == NULL)
1226 		return -ENOMEM;
1227 	/* write risc instructions */
1228 	rp = risc->cpu;
1229 
1230 	/* Sync to line 6, so US CC line 21 will appear in line '12'
1231 	 * in the userland vbi payload */
1232 	if (UNSET != top_offset)
1233 		rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1234 					bpl, padding, lines, 0, true);
1235 
1236 	if (UNSET != bottom_offset)
1237 		rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1238 					bpl, padding, lines, 0, UNSET == top_offset);
1239 
1240 
1241 
1242 	/* save pointer to jmp instruction address */
1243 	risc->jmp = rp;
1244 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1245 	return 0;
1246 }
1247 
1248 
1249 void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
1250 {
1251 	struct cx23885_riscmem *risc = &buf->risc;
1252 
1253 	BUG_ON(in_interrupt());
1254 	pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
1255 }
1256 
1257 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1258 {
1259 	struct cx23885_dev *dev = port->dev;
1260 
1261 	dprintk(1, "%s() Register Dump\n", __func__);
1262 	dprintk(1, "%s() DEV_CNTRL2               0x%08X\n", __func__,
1263 		cx_read(DEV_CNTRL2));
1264 	dprintk(1, "%s() PCI_INT_MSK              0x%08X\n", __func__,
1265 		cx23885_irq_get_mask(dev));
1266 	dprintk(1, "%s() AUD_INT_INT_MSK          0x%08X\n", __func__,
1267 		cx_read(AUDIO_INT_INT_MSK));
1268 	dprintk(1, "%s() AUD_INT_DMA_CTL          0x%08X\n", __func__,
1269 		cx_read(AUD_INT_DMA_CTL));
1270 	dprintk(1, "%s() AUD_EXT_INT_MSK          0x%08X\n", __func__,
1271 		cx_read(AUDIO_EXT_INT_MSK));
1272 	dprintk(1, "%s() AUD_EXT_DMA_CTL          0x%08X\n", __func__,
1273 		cx_read(AUD_EXT_DMA_CTL));
1274 	dprintk(1, "%s() PAD_CTRL                 0x%08X\n", __func__,
1275 		cx_read(PAD_CTRL));
1276 	dprintk(1, "%s() ALT_PIN_OUT_SEL          0x%08X\n", __func__,
1277 		cx_read(ALT_PIN_OUT_SEL));
1278 	dprintk(1, "%s() GPIO2                    0x%08X\n", __func__,
1279 		cx_read(GPIO2));
1280 	dprintk(1, "%s() gpcnt(0x%08X)          0x%08X\n", __func__,
1281 		port->reg_gpcnt, cx_read(port->reg_gpcnt));
1282 	dprintk(1, "%s() gpcnt_ctl(0x%08X)      0x%08x\n", __func__,
1283 		port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1284 	dprintk(1, "%s() dma_ctl(0x%08X)        0x%08x\n", __func__,
1285 		port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1286 	if (port->reg_src_sel)
1287 		dprintk(1, "%s() src_sel(0x%08X)        0x%08x\n", __func__,
1288 			port->reg_src_sel, cx_read(port->reg_src_sel));
1289 	dprintk(1, "%s() lngth(0x%08X)          0x%08x\n", __func__,
1290 		port->reg_lngth, cx_read(port->reg_lngth));
1291 	dprintk(1, "%s() hw_sop_ctrl(0x%08X)    0x%08x\n", __func__,
1292 		port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1293 	dprintk(1, "%s() gen_ctrl(0x%08X)       0x%08x\n", __func__,
1294 		port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1295 	dprintk(1, "%s() bd_pkt_status(0x%08X)  0x%08x\n", __func__,
1296 		port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1297 	dprintk(1, "%s() sop_status(0x%08X)     0x%08x\n", __func__,
1298 		port->reg_sop_status, cx_read(port->reg_sop_status));
1299 	dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1300 		port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1301 	dprintk(1, "%s() vld_misc(0x%08X)       0x%08x\n", __func__,
1302 		port->reg_vld_misc, cx_read(port->reg_vld_misc));
1303 	dprintk(1, "%s() ts_clk_en(0x%08X)      0x%08x\n", __func__,
1304 		port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1305 	dprintk(1, "%s() ts_int_msk(0x%08X)     0x%08x\n", __func__,
1306 		port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1307 }
1308 
1309 int cx23885_start_dma(struct cx23885_tsport *port,
1310 			     struct cx23885_dmaqueue *q,
1311 			     struct cx23885_buffer   *buf)
1312 {
1313 	struct cx23885_dev *dev = port->dev;
1314 	u32 reg;
1315 
1316 	dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1317 		dev->width, dev->height, dev->field);
1318 
1319 	/* Stop the fifo and risc engine for this port */
1320 	cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1321 
1322 	/* setup fifo + format */
1323 	cx23885_sram_channel_setup(dev,
1324 				   &dev->sram_channels[port->sram_chno],
1325 				   port->ts_packet_size, buf->risc.dma);
1326 	if (debug > 5) {
1327 		cx23885_sram_channel_dump(dev,
1328 			&dev->sram_channels[port->sram_chno]);
1329 		cx23885_risc_disasm(port, &buf->risc);
1330 	}
1331 
1332 	/* write TS length to chip */
1333 	cx_write(port->reg_lngth, port->ts_packet_size);
1334 
1335 	if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1336 		(!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1337 		printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1338 			__func__,
1339 			cx23885_boards[dev->board].portb,
1340 			cx23885_boards[dev->board].portc);
1341 		return -EINVAL;
1342 	}
1343 
1344 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1345 		cx23885_av_clk(dev, 0);
1346 
1347 	udelay(100);
1348 
1349 	/* If the port supports SRC SELECT, configure it */
1350 	if (port->reg_src_sel)
1351 		cx_write(port->reg_src_sel, port->src_sel_val);
1352 
1353 	cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1354 	cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1355 	cx_write(port->reg_vld_misc, port->vld_misc_val);
1356 	cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1357 	udelay(100);
1358 
1359 	/* NOTE: this is 2 (reserved) for portb, does it matter? */
1360 	/* reset counter to zero */
1361 	cx_write(port->reg_gpcnt_ctl, 3);
1362 	q->count = 0;
1363 
1364 	/* Set VIDB pins to input */
1365 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1366 		reg = cx_read(PAD_CTRL);
1367 		reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1368 		cx_write(PAD_CTRL, reg);
1369 	}
1370 
1371 	/* Set VIDC pins to input */
1372 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1373 		reg = cx_read(PAD_CTRL);
1374 		reg &= ~0x4; /* Clear TS2_SOP_OE */
1375 		cx_write(PAD_CTRL, reg);
1376 	}
1377 
1378 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1379 
1380 		reg = cx_read(PAD_CTRL);
1381 		reg = reg & ~0x1;    /* Clear TS1_OE */
1382 
1383 		/* FIXME, bit 2 writing here is questionable */
1384 		/* set TS1_SOP_OE and TS1_OE_HI */
1385 		reg = reg | 0xa;
1386 		cx_write(PAD_CTRL, reg);
1387 
1388 		/* FIXME and these two registers should be documented. */
1389 		cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1390 		cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1391 	}
1392 
1393 	switch (dev->bridge) {
1394 	case CX23885_BRIDGE_885:
1395 	case CX23885_BRIDGE_887:
1396 	case CX23885_BRIDGE_888:
1397 		/* enable irqs */
1398 		dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1399 		cx_set(port->reg_ts_int_msk,  port->ts_int_msk_val);
1400 		cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1401 		cx23885_irq_add(dev, port->pci_irqmask);
1402 		cx23885_irq_enable_all(dev);
1403 		break;
1404 	default:
1405 		BUG();
1406 	}
1407 
1408 	cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1409 
1410 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1411 		cx23885_av_clk(dev, 1);
1412 
1413 	if (debug > 4)
1414 		cx23885_tsport_reg_dump(port);
1415 
1416 	return 0;
1417 }
1418 
1419 static int cx23885_stop_dma(struct cx23885_tsport *port)
1420 {
1421 	struct cx23885_dev *dev = port->dev;
1422 	u32 reg;
1423 
1424 	dprintk(1, "%s()\n", __func__);
1425 
1426 	/* Stop interrupts and DMA */
1427 	cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1428 	cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1429 
1430 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1431 
1432 		reg = cx_read(PAD_CTRL);
1433 
1434 		/* Set TS1_OE */
1435 		reg = reg | 0x1;
1436 
1437 		/* clear TS1_SOP_OE and TS1_OE_HI */
1438 		reg = reg & ~0xa;
1439 		cx_write(PAD_CTRL, reg);
1440 		cx_write(port->reg_src_sel, 0);
1441 		cx_write(port->reg_gen_ctrl, 8);
1442 
1443 	}
1444 
1445 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1446 		cx23885_av_clk(dev, 0);
1447 
1448 	return 0;
1449 }
1450 
1451 /* ------------------------------------------------------------------ */
1452 
1453 int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
1454 {
1455 	struct cx23885_dev *dev = port->dev;
1456 	int size = port->ts_packet_size * port->ts_packet_count;
1457 	struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
1458 
1459 	dprintk(1, "%s: %p\n", __func__, buf);
1460 	if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
1461 		return -EINVAL;
1462 	vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1463 
1464 	cx23885_risc_databuffer(dev->pci, &buf->risc,
1465 				sgt->sgl,
1466 				port->ts_packet_size, port->ts_packet_count, 0);
1467 	return 0;
1468 }
1469 
1470 /*
1471  * The risc program for each buffer works as follows: it starts with a simple
1472  * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1473  * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1474  * the initial JUMP).
1475  *
1476  * This is the risc program of the first buffer to be queued if the active list
1477  * is empty and it just keeps DMAing this buffer without generating any
1478  * interrupts.
1479  *
1480  * If a new buffer is added then the initial JUMP in the code for that buffer
1481  * will generate an interrupt which signals that the previous buffer has been
1482  * DMAed successfully and that it can be returned to userspace.
1483  *
1484  * It also sets the final jump of the previous buffer to the start of the new
1485  * buffer, thus chaining the new buffer into the DMA chain. This is a single
1486  * atomic u32 write, so there is no race condition.
1487  *
1488  * The end-result of all this that you only get an interrupt when a buffer
1489  * is ready, so the control flow is very easy.
1490  */
1491 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1492 {
1493 	struct cx23885_buffer    *prev;
1494 	struct cx23885_dev *dev = port->dev;
1495 	struct cx23885_dmaqueue  *cx88q = &port->mpegq;
1496 	unsigned long flags;
1497 
1498 	buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1499 	buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1500 	buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
1501 	buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1502 
1503 	spin_lock_irqsave(&dev->slock, flags);
1504 	if (list_empty(&cx88q->active)) {
1505 		list_add_tail(&buf->queue, &cx88q->active);
1506 		dprintk(1, "[%p/%d] %s - first active\n",
1507 			buf, buf->vb.vb2_buf.index, __func__);
1508 	} else {
1509 		buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
1510 		prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1511 				  queue);
1512 		list_add_tail(&buf->queue, &cx88q->active);
1513 		prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1514 		dprintk(1, "[%p/%d] %s - append to active\n",
1515 			 buf, buf->vb.vb2_buf.index, __func__);
1516 	}
1517 	spin_unlock_irqrestore(&dev->slock, flags);
1518 }
1519 
1520 /* ----------------------------------------------------------- */
1521 
1522 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
1523 {
1524 	struct cx23885_dev *dev = port->dev;
1525 	struct cx23885_dmaqueue *q = &port->mpegq;
1526 	struct cx23885_buffer *buf;
1527 	unsigned long flags;
1528 
1529 	spin_lock_irqsave(&port->slock, flags);
1530 	while (!list_empty(&q->active)) {
1531 		buf = list_entry(q->active.next, struct cx23885_buffer,
1532 				 queue);
1533 		list_del(&buf->queue);
1534 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1535 		dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1536 			buf, buf->vb.vb2_buf.index, reason,
1537 			(unsigned long)buf->risc.dma);
1538 	}
1539 	spin_unlock_irqrestore(&port->slock, flags);
1540 }
1541 
1542 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1543 {
1544 	struct cx23885_dev *dev = port->dev;
1545 
1546 	dprintk(1, "%s()\n", __func__);
1547 	cx23885_stop_dma(port);
1548 	do_cancel_buffers(port, "cancel");
1549 }
1550 
1551 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1552 {
1553 	/* FIXME: port1 assumption here. */
1554 	struct cx23885_tsport *port = &dev->ts1;
1555 	int count = 0;
1556 	int handled = 0;
1557 
1558 	if (status == 0)
1559 		return handled;
1560 
1561 	count = cx_read(port->reg_gpcnt);
1562 	dprintk(7, "status: 0x%08x  mask: 0x%08x count: 0x%x\n",
1563 		status, cx_read(port->reg_ts_int_msk), count);
1564 
1565 	if ((status & VID_B_MSK_BAD_PKT)         ||
1566 		(status & VID_B_MSK_OPC_ERR)     ||
1567 		(status & VID_B_MSK_VBI_OPC_ERR) ||
1568 		(status & VID_B_MSK_SYNC)        ||
1569 		(status & VID_B_MSK_VBI_SYNC)    ||
1570 		(status & VID_B_MSK_OF)          ||
1571 		(status & VID_B_MSK_VBI_OF)) {
1572 		printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1573 			"= 0x%x\n", dev->name, status);
1574 		if (status & VID_B_MSK_BAD_PKT)
1575 			dprintk(1, "        VID_B_MSK_BAD_PKT\n");
1576 		if (status & VID_B_MSK_OPC_ERR)
1577 			dprintk(1, "        VID_B_MSK_OPC_ERR\n");
1578 		if (status & VID_B_MSK_VBI_OPC_ERR)
1579 			dprintk(1, "        VID_B_MSK_VBI_OPC_ERR\n");
1580 		if (status & VID_B_MSK_SYNC)
1581 			dprintk(1, "        VID_B_MSK_SYNC\n");
1582 		if (status & VID_B_MSK_VBI_SYNC)
1583 			dprintk(1, "        VID_B_MSK_VBI_SYNC\n");
1584 		if (status & VID_B_MSK_OF)
1585 			dprintk(1, "        VID_B_MSK_OF\n");
1586 		if (status & VID_B_MSK_VBI_OF)
1587 			dprintk(1, "        VID_B_MSK_VBI_OF\n");
1588 
1589 		cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1590 		cx23885_sram_channel_dump(dev,
1591 			&dev->sram_channels[port->sram_chno]);
1592 		cx23885_417_check_encoder(dev);
1593 	} else if (status & VID_B_MSK_RISCI1) {
1594 		dprintk(7, "        VID_B_MSK_RISCI1\n");
1595 		spin_lock(&port->slock);
1596 		cx23885_wakeup(port, &port->mpegq, count);
1597 		spin_unlock(&port->slock);
1598 	}
1599 	if (status) {
1600 		cx_write(port->reg_ts_int_stat, status);
1601 		handled = 1;
1602 	}
1603 
1604 	return handled;
1605 }
1606 
1607 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1608 {
1609 	struct cx23885_dev *dev = port->dev;
1610 	int handled = 0;
1611 	u32 count;
1612 
1613 	if ((status & VID_BC_MSK_OPC_ERR) ||
1614 		(status & VID_BC_MSK_BAD_PKT) ||
1615 		(status & VID_BC_MSK_SYNC) ||
1616 		(status & VID_BC_MSK_OF)) {
1617 
1618 		if (status & VID_BC_MSK_OPC_ERR)
1619 			dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1620 				VID_BC_MSK_OPC_ERR);
1621 
1622 		if (status & VID_BC_MSK_BAD_PKT)
1623 			dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1624 				VID_BC_MSK_BAD_PKT);
1625 
1626 		if (status & VID_BC_MSK_SYNC)
1627 			dprintk(7, " (VID_BC_MSK_SYNC    0x%08x)\n",
1628 				VID_BC_MSK_SYNC);
1629 
1630 		if (status & VID_BC_MSK_OF)
1631 			dprintk(7, " (VID_BC_MSK_OF      0x%08x)\n",
1632 				VID_BC_MSK_OF);
1633 
1634 		printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1635 
1636 		cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1637 		cx23885_sram_channel_dump(dev,
1638 			&dev->sram_channels[port->sram_chno]);
1639 
1640 	} else if (status & VID_BC_MSK_RISCI1) {
1641 
1642 		dprintk(7, " (RISCI1            0x%08x)\n", VID_BC_MSK_RISCI1);
1643 
1644 		spin_lock(&port->slock);
1645 		count = cx_read(port->reg_gpcnt);
1646 		cx23885_wakeup(port, &port->mpegq, count);
1647 		spin_unlock(&port->slock);
1648 
1649 	}
1650 	if (status) {
1651 		cx_write(port->reg_ts_int_stat, status);
1652 		handled = 1;
1653 	}
1654 
1655 	return handled;
1656 }
1657 
1658 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1659 {
1660 	struct cx23885_dev *dev = dev_id;
1661 	struct cx23885_tsport *ts1 = &dev->ts1;
1662 	struct cx23885_tsport *ts2 = &dev->ts2;
1663 	u32 pci_status, pci_mask;
1664 	u32 vida_status, vida_mask;
1665 	u32 audint_status, audint_mask;
1666 	u32 ts1_status, ts1_mask;
1667 	u32 ts2_status, ts2_mask;
1668 	int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1669 	int audint_count = 0;
1670 	bool subdev_handled;
1671 
1672 	pci_status = cx_read(PCI_INT_STAT);
1673 	pci_mask = cx23885_irq_get_mask(dev);
1674 	vida_status = cx_read(VID_A_INT_STAT);
1675 	vida_mask = cx_read(VID_A_INT_MSK);
1676 	audint_status = cx_read(AUDIO_INT_INT_STAT);
1677 	audint_mask = cx_read(AUDIO_INT_INT_MSK);
1678 	ts1_status = cx_read(VID_B_INT_STAT);
1679 	ts1_mask = cx_read(VID_B_INT_MSK);
1680 	ts2_status = cx_read(VID_C_INT_STAT);
1681 	ts2_mask = cx_read(VID_C_INT_MSK);
1682 
1683 	if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1684 		goto out;
1685 
1686 	vida_count = cx_read(VID_A_GPCNT);
1687 	audint_count = cx_read(AUD_INT_A_GPCNT);
1688 	ts1_count = cx_read(ts1->reg_gpcnt);
1689 	ts2_count = cx_read(ts2->reg_gpcnt);
1690 	dprintk(7, "pci_status: 0x%08x  pci_mask: 0x%08x\n",
1691 		pci_status, pci_mask);
1692 	dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1693 		vida_status, vida_mask, vida_count);
1694 	dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1695 		audint_status, audint_mask, audint_count);
1696 	dprintk(7, "ts1_status: 0x%08x  ts1_mask: 0x%08x count: 0x%x\n",
1697 		ts1_status, ts1_mask, ts1_count);
1698 	dprintk(7, "ts2_status: 0x%08x  ts2_mask: 0x%08x count: 0x%x\n",
1699 		ts2_status, ts2_mask, ts2_count);
1700 
1701 	if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1702 			  PCI_MSK_AL_RD   | PCI_MSK_AL_WR   | PCI_MSK_APB_DMA |
1703 			  PCI_MSK_VID_C   | PCI_MSK_VID_B   | PCI_MSK_VID_A   |
1704 			  PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1705 			  PCI_MSK_GPIO0   | PCI_MSK_GPIO1   |
1706 			  PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1707 
1708 		if (pci_status & PCI_MSK_RISC_RD)
1709 			dprintk(7, " (PCI_MSK_RISC_RD   0x%08x)\n",
1710 				PCI_MSK_RISC_RD);
1711 
1712 		if (pci_status & PCI_MSK_RISC_WR)
1713 			dprintk(7, " (PCI_MSK_RISC_WR   0x%08x)\n",
1714 				PCI_MSK_RISC_WR);
1715 
1716 		if (pci_status & PCI_MSK_AL_RD)
1717 			dprintk(7, " (PCI_MSK_AL_RD     0x%08x)\n",
1718 				PCI_MSK_AL_RD);
1719 
1720 		if (pci_status & PCI_MSK_AL_WR)
1721 			dprintk(7, " (PCI_MSK_AL_WR     0x%08x)\n",
1722 				PCI_MSK_AL_WR);
1723 
1724 		if (pci_status & PCI_MSK_APB_DMA)
1725 			dprintk(7, " (PCI_MSK_APB_DMA   0x%08x)\n",
1726 				PCI_MSK_APB_DMA);
1727 
1728 		if (pci_status & PCI_MSK_VID_C)
1729 			dprintk(7, " (PCI_MSK_VID_C     0x%08x)\n",
1730 				PCI_MSK_VID_C);
1731 
1732 		if (pci_status & PCI_MSK_VID_B)
1733 			dprintk(7, " (PCI_MSK_VID_B     0x%08x)\n",
1734 				PCI_MSK_VID_B);
1735 
1736 		if (pci_status & PCI_MSK_VID_A)
1737 			dprintk(7, " (PCI_MSK_VID_A     0x%08x)\n",
1738 				PCI_MSK_VID_A);
1739 
1740 		if (pci_status & PCI_MSK_AUD_INT)
1741 			dprintk(7, " (PCI_MSK_AUD_INT   0x%08x)\n",
1742 				PCI_MSK_AUD_INT);
1743 
1744 		if (pci_status & PCI_MSK_AUD_EXT)
1745 			dprintk(7, " (PCI_MSK_AUD_EXT   0x%08x)\n",
1746 				PCI_MSK_AUD_EXT);
1747 
1748 		if (pci_status & PCI_MSK_GPIO0)
1749 			dprintk(7, " (PCI_MSK_GPIO0     0x%08x)\n",
1750 				PCI_MSK_GPIO0);
1751 
1752 		if (pci_status & PCI_MSK_GPIO1)
1753 			dprintk(7, " (PCI_MSK_GPIO1     0x%08x)\n",
1754 				PCI_MSK_GPIO1);
1755 
1756 		if (pci_status & PCI_MSK_AV_CORE)
1757 			dprintk(7, " (PCI_MSK_AV_CORE   0x%08x)\n",
1758 				PCI_MSK_AV_CORE);
1759 
1760 		if (pci_status & PCI_MSK_IR)
1761 			dprintk(7, " (PCI_MSK_IR        0x%08x)\n",
1762 				PCI_MSK_IR);
1763 	}
1764 
1765 	if (cx23885_boards[dev->board].ci_type == 1 &&
1766 			(pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1767 		handled += netup_ci_slot_status(dev, pci_status);
1768 
1769 	if (cx23885_boards[dev->board].ci_type == 2 &&
1770 			(pci_status & PCI_MSK_GPIO0))
1771 		handled += altera_ci_irq(dev);
1772 
1773 	if (ts1_status) {
1774 		if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1775 			handled += cx23885_irq_ts(ts1, ts1_status);
1776 		else
1777 		if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1778 			handled += cx23885_irq_417(dev, ts1_status);
1779 	}
1780 
1781 	if (ts2_status) {
1782 		if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1783 			handled += cx23885_irq_ts(ts2, ts2_status);
1784 		else
1785 		if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1786 			handled += cx23885_irq_417(dev, ts2_status);
1787 	}
1788 
1789 	if (vida_status)
1790 		handled += cx23885_video_irq(dev, vida_status);
1791 
1792 	if (audint_status)
1793 		handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1794 
1795 	if (pci_status & PCI_MSK_IR) {
1796 		subdev_handled = false;
1797 		v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1798 				 pci_status, &subdev_handled);
1799 		if (subdev_handled)
1800 			handled++;
1801 	}
1802 
1803 	if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1804 		cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1805 		schedule_work(&dev->cx25840_work);
1806 		handled++;
1807 	}
1808 
1809 	if (handled)
1810 		cx_write(PCI_INT_STAT, pci_status);
1811 out:
1812 	return IRQ_RETVAL(handled);
1813 }
1814 
1815 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1816 				    unsigned int notification, void *arg)
1817 {
1818 	struct cx23885_dev *dev;
1819 
1820 	if (sd == NULL)
1821 		return;
1822 
1823 	dev = to_cx23885(sd->v4l2_dev);
1824 
1825 	switch (notification) {
1826 	case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1827 		if (sd == dev->sd_ir)
1828 			cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1829 		break;
1830 	case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1831 		if (sd == dev->sd_ir)
1832 			cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1833 		break;
1834 	}
1835 }
1836 
1837 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1838 {
1839 	INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1840 	INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1841 	INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1842 	dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1843 }
1844 
1845 static inline int encoder_on_portb(struct cx23885_dev *dev)
1846 {
1847 	return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1848 }
1849 
1850 static inline int encoder_on_portc(struct cx23885_dev *dev)
1851 {
1852 	return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1853 }
1854 
1855 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1856  * registers depending on the board configuration (and whether the
1857  * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1858  * be pushed into the correct hardware register, regardless of the
1859  * physical location. Certain registers are shared so we sanity check
1860  * and report errors if we think we're tampering with a GPIo that might
1861  * be assigned to the encoder (and used for the host bus).
1862  *
1863  * GPIO  2 thru  0 - On the cx23885 bridge
1864  * GPIO 18 thru  3 - On the cx23417 host bus interface
1865  * GPIO 23 thru 19 - On the cx25840 a/v core
1866  */
1867 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1868 {
1869 	if (mask & 0x7)
1870 		cx_set(GP0_IO, mask & 0x7);
1871 
1872 	if (mask & 0x0007fff8) {
1873 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1874 			printk(KERN_ERR
1875 				"%s: Setting GPIO on encoder ports\n",
1876 				dev->name);
1877 		cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1878 	}
1879 
1880 	/* TODO: 23-19 */
1881 	if (mask & 0x00f80000)
1882 		printk(KERN_INFO "%s: Unsupported\n", dev->name);
1883 }
1884 
1885 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1886 {
1887 	if (mask & 0x00000007)
1888 		cx_clear(GP0_IO, mask & 0x7);
1889 
1890 	if (mask & 0x0007fff8) {
1891 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1892 			printk(KERN_ERR
1893 				"%s: Clearing GPIO moving on encoder ports\n",
1894 				dev->name);
1895 		cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1896 	}
1897 
1898 	/* TODO: 23-19 */
1899 	if (mask & 0x00f80000)
1900 		printk(KERN_INFO "%s: Unsupported\n", dev->name);
1901 }
1902 
1903 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1904 {
1905 	if (mask & 0x00000007)
1906 		return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1907 
1908 	if (mask & 0x0007fff8) {
1909 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1910 			printk(KERN_ERR
1911 				"%s: Reading GPIO moving on encoder ports\n",
1912 				dev->name);
1913 		return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1914 	}
1915 
1916 	/* TODO: 23-19 */
1917 	if (mask & 0x00f80000)
1918 		printk(KERN_INFO "%s: Unsupported\n", dev->name);
1919 
1920 	return 0;
1921 }
1922 
1923 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1924 {
1925 	if ((mask & 0x00000007) && asoutput)
1926 		cx_set(GP0_IO, (mask & 0x7) << 16);
1927 	else if ((mask & 0x00000007) && !asoutput)
1928 		cx_clear(GP0_IO, (mask & 0x7) << 16);
1929 
1930 	if (mask & 0x0007fff8) {
1931 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1932 			printk(KERN_ERR
1933 				"%s: Enabling GPIO on encoder ports\n",
1934 				dev->name);
1935 	}
1936 
1937 	/* MC417_OEN is active low for output, write 1 for an input */
1938 	if ((mask & 0x0007fff8) && asoutput)
1939 		cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
1940 
1941 	else if ((mask & 0x0007fff8) && !asoutput)
1942 		cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
1943 
1944 	/* TODO: 23-19 */
1945 }
1946 
1947 static int cx23885_initdev(struct pci_dev *pci_dev,
1948 			   const struct pci_device_id *pci_id)
1949 {
1950 	struct cx23885_dev *dev;
1951 	struct v4l2_ctrl_handler *hdl;
1952 	int err;
1953 
1954 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1955 	if (NULL == dev)
1956 		return -ENOMEM;
1957 
1958 	err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
1959 	if (err < 0)
1960 		goto fail_free;
1961 
1962 	hdl = &dev->ctrl_handler;
1963 	v4l2_ctrl_handler_init(hdl, 6);
1964 	if (hdl->error) {
1965 		err = hdl->error;
1966 		goto fail_ctrl;
1967 	}
1968 	dev->v4l2_dev.ctrl_handler = hdl;
1969 
1970 	/* Prepare to handle notifications from subdevices */
1971 	cx23885_v4l2_dev_notify_init(dev);
1972 
1973 	/* pci init */
1974 	dev->pci = pci_dev;
1975 	if (pci_enable_device(pci_dev)) {
1976 		err = -EIO;
1977 		goto fail_ctrl;
1978 	}
1979 
1980 	if (cx23885_dev_setup(dev) < 0) {
1981 		err = -EINVAL;
1982 		goto fail_ctrl;
1983 	}
1984 
1985 	/* print pci info */
1986 	dev->pci_rev = pci_dev->revision;
1987 	pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER,  &dev->pci_lat);
1988 	printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
1989 	       "latency: %d, mmio: 0x%llx\n", dev->name,
1990 	       pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
1991 	       dev->pci_lat,
1992 		(unsigned long long)pci_resource_start(pci_dev, 0));
1993 
1994 	pci_set_master(pci_dev);
1995 	err = pci_set_dma_mask(pci_dev, 0xffffffff);
1996 	if (err) {
1997 		printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1998 		goto fail_context;
1999 	}
2000 
2001 	dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
2002 	if (IS_ERR(dev->alloc_ctx)) {
2003 		err = PTR_ERR(dev->alloc_ctx);
2004 		goto fail_context;
2005 	}
2006 	err = request_irq(pci_dev->irq, cx23885_irq,
2007 			  IRQF_SHARED, dev->name, dev);
2008 	if (err < 0) {
2009 		printk(KERN_ERR "%s: can't get IRQ %d\n",
2010 		       dev->name, pci_dev->irq);
2011 		goto fail_irq;
2012 	}
2013 
2014 	switch (dev->board) {
2015 	case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2016 		cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2017 		break;
2018 	case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2019 		cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2020 		break;
2021 	}
2022 
2023 	/*
2024 	 * The CX2388[58] IR controller can start firing interrupts when
2025 	 * enabled, so these have to take place after the cx23885_irq() handler
2026 	 * is hooked up by the call to request_irq() above.
2027 	 */
2028 	cx23885_ir_pci_int_enable(dev);
2029 	cx23885_input_init(dev);
2030 
2031 	return 0;
2032 
2033 fail_irq:
2034 	vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
2035 fail_context:
2036 	cx23885_dev_unregister(dev);
2037 fail_ctrl:
2038 	v4l2_ctrl_handler_free(hdl);
2039 	v4l2_device_unregister(&dev->v4l2_dev);
2040 fail_free:
2041 	kfree(dev);
2042 	return err;
2043 }
2044 
2045 static void cx23885_finidev(struct pci_dev *pci_dev)
2046 {
2047 	struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2048 	struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2049 
2050 	cx23885_input_fini(dev);
2051 	cx23885_ir_fini(dev);
2052 
2053 	cx23885_shutdown(dev);
2054 
2055 	/* unregister stuff */
2056 	free_irq(pci_dev->irq, dev);
2057 
2058 	pci_disable_device(pci_dev);
2059 
2060 	cx23885_dev_unregister(dev);
2061 	vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
2062 	v4l2_ctrl_handler_free(&dev->ctrl_handler);
2063 	v4l2_device_unregister(v4l2_dev);
2064 	kfree(dev);
2065 }
2066 
2067 static struct pci_device_id cx23885_pci_tbl[] = {
2068 	{
2069 		/* CX23885 */
2070 		.vendor       = 0x14f1,
2071 		.device       = 0x8852,
2072 		.subvendor    = PCI_ANY_ID,
2073 		.subdevice    = PCI_ANY_ID,
2074 	}, {
2075 		/* CX23887 Rev 2 */
2076 		.vendor       = 0x14f1,
2077 		.device       = 0x8880,
2078 		.subvendor    = PCI_ANY_ID,
2079 		.subdevice    = PCI_ANY_ID,
2080 	}, {
2081 		/* --- end of list --- */
2082 	}
2083 };
2084 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2085 
2086 static struct pci_driver cx23885_pci_driver = {
2087 	.name     = "cx23885",
2088 	.id_table = cx23885_pci_tbl,
2089 	.probe    = cx23885_initdev,
2090 	.remove   = cx23885_finidev,
2091 	/* TODO */
2092 	.suspend  = NULL,
2093 	.resume   = NULL,
2094 };
2095 
2096 static int __init cx23885_init(void)
2097 {
2098 	printk(KERN_INFO "cx23885 driver version %s loaded\n",
2099 		CX23885_VERSION);
2100 	return pci_register_driver(&cx23885_pci_driver);
2101 }
2102 
2103 static void __exit cx23885_fini(void)
2104 {
2105 	pci_unregister_driver(&cx23885_pci_driver);
2106 }
2107 
2108 module_init(cx23885_init);
2109 module_exit(cx23885_fini);
2110