1 /*
2  *  Driver for the Conexant CX23885 PCIe bridge
3  *
4  *  Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *
15  *  GNU General Public License for more details.
16  */
17 
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kmod.h>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/div64.h>
28 #include <linux/firmware.h>
29 
30 #include "cx23885.h"
31 #include "cimax2.h"
32 #include "altera-ci.h"
33 #include "cx23888-ir.h"
34 #include "cx23885-ir.h"
35 #include "cx23885-av.h"
36 #include "cx23885-input.h"
37 
38 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
39 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
40 MODULE_LICENSE("GPL");
41 MODULE_VERSION(CX23885_VERSION);
42 
43 static unsigned int debug;
44 module_param(debug, int, 0644);
45 MODULE_PARM_DESC(debug, "enable debug messages");
46 
47 static unsigned int card[]  = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
48 module_param_array(card,  int, NULL, 0444);
49 MODULE_PARM_DESC(card, "card type");
50 
51 #define dprintk(level, fmt, arg...)\
52 	do { if (debug >= level)\
53 		printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
54 	} while (0)
55 
56 static unsigned int cx23885_devcount;
57 
58 #define NO_SYNC_LINE (-1U)
59 
60 /* FIXME, these allocations will change when
61  * analog arrives. The be reviewed.
62  * CX23887 Assumptions
63  * 1 line = 16 bytes of CDT
64  * cmds size = 80
65  * cdt size = 16 * linesize
66  * iqsize = 64
67  * maxlines = 6
68  *
69  * Address Space:
70  * 0x00000000 0x00008fff FIFO clusters
71  * 0x00010000 0x000104af Channel Management Data Structures
72  * 0x000104b0 0x000104ff Free
73  * 0x00010500 0x000108bf 15 channels * iqsize
74  * 0x000108c0 0x000108ff Free
75  * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
76  *                       15 channels * (iqsize + (maxlines * linesize))
77  * 0x00010ea0 0x00010xxx Free
78  */
79 
80 static struct sram_channel cx23885_sram_channels[] = {
81 	[SRAM_CH01] = {
82 		.name		= "VID A",
83 		.cmds_start	= 0x10000,
84 		.ctrl_start	= 0x10380,
85 		.cdt		= 0x104c0,
86 		.fifo_start	= 0x40,
87 		.fifo_size	= 0x2800,
88 		.ptr1_reg	= DMA1_PTR1,
89 		.ptr2_reg	= DMA1_PTR2,
90 		.cnt1_reg	= DMA1_CNT1,
91 		.cnt2_reg	= DMA1_CNT2,
92 	},
93 	[SRAM_CH02] = {
94 		.name		= "ch2",
95 		.cmds_start	= 0x0,
96 		.ctrl_start	= 0x0,
97 		.cdt		= 0x0,
98 		.fifo_start	= 0x0,
99 		.fifo_size	= 0x0,
100 		.ptr1_reg	= DMA2_PTR1,
101 		.ptr2_reg	= DMA2_PTR2,
102 		.cnt1_reg	= DMA2_CNT1,
103 		.cnt2_reg	= DMA2_CNT2,
104 	},
105 	[SRAM_CH03] = {
106 		.name		= "TS1 B",
107 		.cmds_start	= 0x100A0,
108 		.ctrl_start	= 0x10400,
109 		.cdt		= 0x10580,
110 		.fifo_start	= 0x5000,
111 		.fifo_size	= 0x1000,
112 		.ptr1_reg	= DMA3_PTR1,
113 		.ptr2_reg	= DMA3_PTR2,
114 		.cnt1_reg	= DMA3_CNT1,
115 		.cnt2_reg	= DMA3_CNT2,
116 	},
117 	[SRAM_CH04] = {
118 		.name		= "ch4",
119 		.cmds_start	= 0x0,
120 		.ctrl_start	= 0x0,
121 		.cdt		= 0x0,
122 		.fifo_start	= 0x0,
123 		.fifo_size	= 0x0,
124 		.ptr1_reg	= DMA4_PTR1,
125 		.ptr2_reg	= DMA4_PTR2,
126 		.cnt1_reg	= DMA4_CNT1,
127 		.cnt2_reg	= DMA4_CNT2,
128 	},
129 	[SRAM_CH05] = {
130 		.name		= "ch5",
131 		.cmds_start	= 0x0,
132 		.ctrl_start	= 0x0,
133 		.cdt		= 0x0,
134 		.fifo_start	= 0x0,
135 		.fifo_size	= 0x0,
136 		.ptr1_reg	= DMA5_PTR1,
137 		.ptr2_reg	= DMA5_PTR2,
138 		.cnt1_reg	= DMA5_CNT1,
139 		.cnt2_reg	= DMA5_CNT2,
140 	},
141 	[SRAM_CH06] = {
142 		.name		= "TS2 C",
143 		.cmds_start	= 0x10140,
144 		.ctrl_start	= 0x10440,
145 		.cdt		= 0x105e0,
146 		.fifo_start	= 0x6000,
147 		.fifo_size	= 0x1000,
148 		.ptr1_reg	= DMA5_PTR1,
149 		.ptr2_reg	= DMA5_PTR2,
150 		.cnt1_reg	= DMA5_CNT1,
151 		.cnt2_reg	= DMA5_CNT2,
152 	},
153 	[SRAM_CH07] = {
154 		.name		= "TV Audio",
155 		.cmds_start	= 0x10190,
156 		.ctrl_start	= 0x10480,
157 		.cdt		= 0x10a00,
158 		.fifo_start	= 0x7000,
159 		.fifo_size	= 0x1000,
160 		.ptr1_reg	= DMA6_PTR1,
161 		.ptr2_reg	= DMA6_PTR2,
162 		.cnt1_reg	= DMA6_CNT1,
163 		.cnt2_reg	= DMA6_CNT2,
164 	},
165 	[SRAM_CH08] = {
166 		.name		= "ch8",
167 		.cmds_start	= 0x0,
168 		.ctrl_start	= 0x0,
169 		.cdt		= 0x0,
170 		.fifo_start	= 0x0,
171 		.fifo_size	= 0x0,
172 		.ptr1_reg	= DMA7_PTR1,
173 		.ptr2_reg	= DMA7_PTR2,
174 		.cnt1_reg	= DMA7_CNT1,
175 		.cnt2_reg	= DMA7_CNT2,
176 	},
177 	[SRAM_CH09] = {
178 		.name		= "ch9",
179 		.cmds_start	= 0x0,
180 		.ctrl_start	= 0x0,
181 		.cdt		= 0x0,
182 		.fifo_start	= 0x0,
183 		.fifo_size	= 0x0,
184 		.ptr1_reg	= DMA8_PTR1,
185 		.ptr2_reg	= DMA8_PTR2,
186 		.cnt1_reg	= DMA8_CNT1,
187 		.cnt2_reg	= DMA8_CNT2,
188 	},
189 };
190 
191 static struct sram_channel cx23887_sram_channels[] = {
192 	[SRAM_CH01] = {
193 		.name		= "VID A",
194 		.cmds_start	= 0x10000,
195 		.ctrl_start	= 0x105b0,
196 		.cdt		= 0x107b0,
197 		.fifo_start	= 0x40,
198 		.fifo_size	= 0x2800,
199 		.ptr1_reg	= DMA1_PTR1,
200 		.ptr2_reg	= DMA1_PTR2,
201 		.cnt1_reg	= DMA1_CNT1,
202 		.cnt2_reg	= DMA1_CNT2,
203 	},
204 	[SRAM_CH02] = {
205 		.name		= "VID A (VBI)",
206 		.cmds_start	= 0x10050,
207 		.ctrl_start	= 0x105F0,
208 		.cdt		= 0x10810,
209 		.fifo_start	= 0x3000,
210 		.fifo_size	= 0x1000,
211 		.ptr1_reg	= DMA2_PTR1,
212 		.ptr2_reg	= DMA2_PTR2,
213 		.cnt1_reg	= DMA2_CNT1,
214 		.cnt2_reg	= DMA2_CNT2,
215 	},
216 	[SRAM_CH03] = {
217 		.name		= "TS1 B",
218 		.cmds_start	= 0x100A0,
219 		.ctrl_start	= 0x10630,
220 		.cdt		= 0x10870,
221 		.fifo_start	= 0x5000,
222 		.fifo_size	= 0x1000,
223 		.ptr1_reg	= DMA3_PTR1,
224 		.ptr2_reg	= DMA3_PTR2,
225 		.cnt1_reg	= DMA3_CNT1,
226 		.cnt2_reg	= DMA3_CNT2,
227 	},
228 	[SRAM_CH04] = {
229 		.name		= "ch4",
230 		.cmds_start	= 0x0,
231 		.ctrl_start	= 0x0,
232 		.cdt		= 0x0,
233 		.fifo_start	= 0x0,
234 		.fifo_size	= 0x0,
235 		.ptr1_reg	= DMA4_PTR1,
236 		.ptr2_reg	= DMA4_PTR2,
237 		.cnt1_reg	= DMA4_CNT1,
238 		.cnt2_reg	= DMA4_CNT2,
239 	},
240 	[SRAM_CH05] = {
241 		.name		= "ch5",
242 		.cmds_start	= 0x0,
243 		.ctrl_start	= 0x0,
244 		.cdt		= 0x0,
245 		.fifo_start	= 0x0,
246 		.fifo_size	= 0x0,
247 		.ptr1_reg	= DMA5_PTR1,
248 		.ptr2_reg	= DMA5_PTR2,
249 		.cnt1_reg	= DMA5_CNT1,
250 		.cnt2_reg	= DMA5_CNT2,
251 	},
252 	[SRAM_CH06] = {
253 		.name		= "TS2 C",
254 		.cmds_start	= 0x10140,
255 		.ctrl_start	= 0x10670,
256 		.cdt		= 0x108d0,
257 		.fifo_start	= 0x6000,
258 		.fifo_size	= 0x1000,
259 		.ptr1_reg	= DMA5_PTR1,
260 		.ptr2_reg	= DMA5_PTR2,
261 		.cnt1_reg	= DMA5_CNT1,
262 		.cnt2_reg	= DMA5_CNT2,
263 	},
264 	[SRAM_CH07] = {
265 		.name		= "TV Audio",
266 		.cmds_start	= 0x10190,
267 		.ctrl_start	= 0x106B0,
268 		.cdt		= 0x10930,
269 		.fifo_start	= 0x7000,
270 		.fifo_size	= 0x1000,
271 		.ptr1_reg	= DMA6_PTR1,
272 		.ptr2_reg	= DMA6_PTR2,
273 		.cnt1_reg	= DMA6_CNT1,
274 		.cnt2_reg	= DMA6_CNT2,
275 	},
276 	[SRAM_CH08] = {
277 		.name		= "ch8",
278 		.cmds_start	= 0x0,
279 		.ctrl_start	= 0x0,
280 		.cdt		= 0x0,
281 		.fifo_start	= 0x0,
282 		.fifo_size	= 0x0,
283 		.ptr1_reg	= DMA7_PTR1,
284 		.ptr2_reg	= DMA7_PTR2,
285 		.cnt1_reg	= DMA7_CNT1,
286 		.cnt2_reg	= DMA7_CNT2,
287 	},
288 	[SRAM_CH09] = {
289 		.name		= "ch9",
290 		.cmds_start	= 0x0,
291 		.ctrl_start	= 0x0,
292 		.cdt		= 0x0,
293 		.fifo_start	= 0x0,
294 		.fifo_size	= 0x0,
295 		.ptr1_reg	= DMA8_PTR1,
296 		.ptr2_reg	= DMA8_PTR2,
297 		.cnt1_reg	= DMA8_CNT1,
298 		.cnt2_reg	= DMA8_CNT2,
299 	},
300 };
301 
302 static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
303 {
304 	unsigned long flags;
305 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
306 
307 	dev->pci_irqmask |= mask;
308 
309 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
310 }
311 
312 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
313 {
314 	unsigned long flags;
315 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
316 
317 	dev->pci_irqmask |= mask;
318 	cx_set(PCI_INT_MSK, mask);
319 
320 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
321 }
322 
323 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
324 {
325 	u32 v;
326 	unsigned long flags;
327 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
328 
329 	v = mask & dev->pci_irqmask;
330 	if (v)
331 		cx_set(PCI_INT_MSK, v);
332 
333 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
334 }
335 
336 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
337 {
338 	cx23885_irq_enable(dev, 0xffffffff);
339 }
340 
341 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
342 {
343 	unsigned long flags;
344 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
345 
346 	cx_clear(PCI_INT_MSK, mask);
347 
348 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
349 }
350 
351 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
352 {
353 	cx23885_irq_disable(dev, 0xffffffff);
354 }
355 
356 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
357 {
358 	unsigned long flags;
359 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
360 
361 	dev->pci_irqmask &= ~mask;
362 	cx_clear(PCI_INT_MSK, mask);
363 
364 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
365 }
366 
367 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
368 {
369 	u32 v;
370 	unsigned long flags;
371 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
372 
373 	v = cx_read(PCI_INT_MSK);
374 
375 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
376 	return v;
377 }
378 
379 static int cx23885_risc_decode(u32 risc)
380 {
381 	static char *instr[16] = {
382 		[RISC_SYNC    >> 28] = "sync",
383 		[RISC_WRITE   >> 28] = "write",
384 		[RISC_WRITEC  >> 28] = "writec",
385 		[RISC_READ    >> 28] = "read",
386 		[RISC_READC   >> 28] = "readc",
387 		[RISC_JUMP    >> 28] = "jump",
388 		[RISC_SKIP    >> 28] = "skip",
389 		[RISC_WRITERM >> 28] = "writerm",
390 		[RISC_WRITECM >> 28] = "writecm",
391 		[RISC_WRITECR >> 28] = "writecr",
392 	};
393 	static int incr[16] = {
394 		[RISC_WRITE   >> 28] = 3,
395 		[RISC_JUMP    >> 28] = 3,
396 		[RISC_SKIP    >> 28] = 1,
397 		[RISC_SYNC    >> 28] = 1,
398 		[RISC_WRITERM >> 28] = 3,
399 		[RISC_WRITECM >> 28] = 3,
400 		[RISC_WRITECR >> 28] = 4,
401 	};
402 	static char *bits[] = {
403 		"12",   "13",   "14",   "resync",
404 		"cnt0", "cnt1", "18",   "19",
405 		"20",   "21",   "22",   "23",
406 		"irq1", "irq2", "eol",  "sol",
407 	};
408 	int i;
409 
410 	printk("0x%08x [ %s", risc,
411 	       instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
412 	for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
413 		if (risc & (1 << (i + 12)))
414 			printk(" %s", bits[i]);
415 	printk(" count=%d ]\n", risc & 0xfff);
416 	return incr[risc >> 28] ? incr[risc >> 28] : 1;
417 }
418 
419 static void cx23885_wakeup(struct cx23885_tsport *port,
420 			   struct cx23885_dmaqueue *q, u32 count)
421 {
422 	struct cx23885_dev *dev = port->dev;
423 	struct cx23885_buffer *buf;
424 
425 	if (list_empty(&q->active))
426 		return;
427 	buf = list_entry(q->active.next,
428 			 struct cx23885_buffer, queue);
429 
430 	buf->vb.vb2_buf.timestamp = ktime_get_ns();
431 	buf->vb.sequence = q->count++;
432 	dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
433 		buf->vb.vb2_buf.index,
434 		count, q->count);
435 	list_del(&buf->queue);
436 	vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
437 }
438 
439 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
440 				      struct sram_channel *ch,
441 				      unsigned int bpl, u32 risc)
442 {
443 	unsigned int i, lines;
444 	u32 cdt;
445 
446 	if (ch->cmds_start == 0) {
447 		dprintk(1, "%s() Erasing channel [%s]\n", __func__,
448 			ch->name);
449 		cx_write(ch->ptr1_reg, 0);
450 		cx_write(ch->ptr2_reg, 0);
451 		cx_write(ch->cnt2_reg, 0);
452 		cx_write(ch->cnt1_reg, 0);
453 		return 0;
454 	} else {
455 		dprintk(1, "%s() Configuring channel [%s]\n", __func__,
456 			ch->name);
457 	}
458 
459 	bpl   = (bpl + 7) & ~7; /* alignment */
460 	cdt   = ch->cdt;
461 	lines = ch->fifo_size / bpl;
462 	if (lines > 6)
463 		lines = 6;
464 	BUG_ON(lines < 2);
465 
466 	cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
467 	cx_write(8 + 4, 12);
468 	cx_write(8 + 8, 0);
469 
470 	/* write CDT */
471 	for (i = 0; i < lines; i++) {
472 		dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
473 			ch->fifo_start + bpl*i);
474 		cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
475 		cx_write(cdt + 16*i +  4, 0);
476 		cx_write(cdt + 16*i +  8, 0);
477 		cx_write(cdt + 16*i + 12, 0);
478 	}
479 
480 	/* write CMDS */
481 	if (ch->jumponly)
482 		cx_write(ch->cmds_start + 0, 8);
483 	else
484 		cx_write(ch->cmds_start + 0, risc);
485 	cx_write(ch->cmds_start +  4, 0); /* 64 bits 63-32 */
486 	cx_write(ch->cmds_start +  8, cdt);
487 	cx_write(ch->cmds_start + 12, (lines*16) >> 3);
488 	cx_write(ch->cmds_start + 16, ch->ctrl_start);
489 	if (ch->jumponly)
490 		cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
491 	else
492 		cx_write(ch->cmds_start + 20, 64 >> 2);
493 	for (i = 24; i < 80; i += 4)
494 		cx_write(ch->cmds_start + i, 0);
495 
496 	/* fill registers */
497 	cx_write(ch->ptr1_reg, ch->fifo_start);
498 	cx_write(ch->ptr2_reg, cdt);
499 	cx_write(ch->cnt2_reg, (lines*16) >> 3);
500 	cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
501 
502 	dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
503 		dev->bridge,
504 		ch->name,
505 		bpl,
506 		lines);
507 
508 	return 0;
509 }
510 
511 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
512 				      struct sram_channel *ch)
513 {
514 	static char *name[] = {
515 		"init risc lo",
516 		"init risc hi",
517 		"cdt base",
518 		"cdt size",
519 		"iq base",
520 		"iq size",
521 		"risc pc lo",
522 		"risc pc hi",
523 		"iq wr ptr",
524 		"iq rd ptr",
525 		"cdt current",
526 		"pci target lo",
527 		"pci target hi",
528 		"line / byte",
529 	};
530 	u32 risc;
531 	unsigned int i, j, n;
532 
533 	printk(KERN_WARNING "%s: %s - dma channel status dump\n",
534 	       dev->name, ch->name);
535 	for (i = 0; i < ARRAY_SIZE(name); i++)
536 		printk(KERN_WARNING "%s:   cmds: %-15s: 0x%08x\n",
537 		       dev->name, name[i],
538 		       cx_read(ch->cmds_start + 4*i));
539 
540 	for (i = 0; i < 4; i++) {
541 		risc = cx_read(ch->cmds_start + 4 * (i + 14));
542 		printk(KERN_WARNING "%s:   risc%d: ", dev->name, i);
543 		cx23885_risc_decode(risc);
544 	}
545 	for (i = 0; i < (64 >> 2); i += n) {
546 		risc = cx_read(ch->ctrl_start + 4 * i);
547 		/* No consideration for bits 63-32 */
548 
549 		printk(KERN_WARNING "%s:   (0x%08x) iq %x: ", dev->name,
550 		       ch->ctrl_start + 4 * i, i);
551 		n = cx23885_risc_decode(risc);
552 		for (j = 1; j < n; j++) {
553 			risc = cx_read(ch->ctrl_start + 4 * (i + j));
554 			printk(KERN_WARNING "%s:   iq %x: 0x%08x [ arg #%d ]\n",
555 			       dev->name, i+j, risc, j);
556 		}
557 	}
558 
559 	printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
560 	       dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
561 	printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
562 	       dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
563 	printk(KERN_WARNING "%s:   ptr1_reg: 0x%08x\n",
564 	       dev->name, cx_read(ch->ptr1_reg));
565 	printk(KERN_WARNING "%s:   ptr2_reg: 0x%08x\n",
566 	       dev->name, cx_read(ch->ptr2_reg));
567 	printk(KERN_WARNING "%s:   cnt1_reg: 0x%08x\n",
568 	       dev->name, cx_read(ch->cnt1_reg));
569 	printk(KERN_WARNING "%s:   cnt2_reg: 0x%08x\n",
570 	       dev->name, cx_read(ch->cnt2_reg));
571 }
572 
573 static void cx23885_risc_disasm(struct cx23885_tsport *port,
574 				struct cx23885_riscmem *risc)
575 {
576 	struct cx23885_dev *dev = port->dev;
577 	unsigned int i, j, n;
578 
579 	printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
580 	       dev->name, risc->cpu, (unsigned long)risc->dma);
581 	for (i = 0; i < (risc->size >> 2); i += n) {
582 		printk(KERN_INFO "%s:   %04d: ", dev->name, i);
583 		n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
584 		for (j = 1; j < n; j++)
585 			printk(KERN_INFO "%s:   %04d: 0x%08x [ arg #%d ]\n",
586 			       dev->name, i + j, risc->cpu[i + j], j);
587 		if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
588 			break;
589 	}
590 }
591 
592 static void cx23885_shutdown(struct cx23885_dev *dev)
593 {
594 	/* disable RISC controller */
595 	cx_write(DEV_CNTRL2, 0);
596 
597 	/* Disable all IR activity */
598 	cx_write(IR_CNTRL_REG, 0);
599 
600 	/* Disable Video A/B activity */
601 	cx_write(VID_A_DMA_CTL, 0);
602 	cx_write(VID_B_DMA_CTL, 0);
603 	cx_write(VID_C_DMA_CTL, 0);
604 
605 	/* Disable Audio activity */
606 	cx_write(AUD_INT_DMA_CTL, 0);
607 	cx_write(AUD_EXT_DMA_CTL, 0);
608 
609 	/* Disable Serial port */
610 	cx_write(UART_CTL, 0);
611 
612 	/* Disable Interrupts */
613 	cx23885_irq_disable_all(dev);
614 	cx_write(VID_A_INT_MSK, 0);
615 	cx_write(VID_B_INT_MSK, 0);
616 	cx_write(VID_C_INT_MSK, 0);
617 	cx_write(AUDIO_INT_INT_MSK, 0);
618 	cx_write(AUDIO_EXT_INT_MSK, 0);
619 
620 }
621 
622 static void cx23885_reset(struct cx23885_dev *dev)
623 {
624 	dprintk(1, "%s()\n", __func__);
625 
626 	cx23885_shutdown(dev);
627 
628 	cx_write(PCI_INT_STAT, 0xffffffff);
629 	cx_write(VID_A_INT_STAT, 0xffffffff);
630 	cx_write(VID_B_INT_STAT, 0xffffffff);
631 	cx_write(VID_C_INT_STAT, 0xffffffff);
632 	cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
633 	cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
634 	cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
635 	cx_write(PAD_CTRL, 0x00500300);
636 
637 	mdelay(100);
638 
639 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
640 		720*4, 0);
641 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
642 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
643 		188*4, 0);
644 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
645 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
646 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
647 		188*4, 0);
648 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
649 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
650 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
651 
652 	cx23885_gpio_setup(dev);
653 }
654 
655 
656 static int cx23885_pci_quirks(struct cx23885_dev *dev)
657 {
658 	dprintk(1, "%s()\n", __func__);
659 
660 	/* The cx23885 bridge has a weird bug which causes NMI to be asserted
661 	 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
662 	 * occur on the cx23887 bridge.
663 	 */
664 	if (dev->bridge == CX23885_BRIDGE_885)
665 		cx_clear(RDR_TLCTL0, 1 << 4);
666 
667 	return 0;
668 }
669 
670 static int get_resources(struct cx23885_dev *dev)
671 {
672 	if (request_mem_region(pci_resource_start(dev->pci, 0),
673 			       pci_resource_len(dev->pci, 0),
674 			       dev->name))
675 		return 0;
676 
677 	printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
678 		dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
679 
680 	return -EBUSY;
681 }
682 
683 static int cx23885_init_tsport(struct cx23885_dev *dev,
684 	struct cx23885_tsport *port, int portno)
685 {
686 	dprintk(1, "%s(portno=%d)\n", __func__, portno);
687 
688 	/* Transport bus init dma queue  - Common settings */
689 	port->dma_ctl_val        = 0x11; /* Enable RISC controller and Fifo */
690 	port->ts_int_msk_val     = 0x1111; /* TS port bits for RISC */
691 	port->vld_misc_val       = 0x0;
692 	port->hw_sop_ctrl_val    = (0x47 << 16 | 188 << 4);
693 
694 	spin_lock_init(&port->slock);
695 	port->dev = dev;
696 	port->nr = portno;
697 
698 	INIT_LIST_HEAD(&port->mpegq.active);
699 	mutex_init(&port->frontends.lock);
700 	INIT_LIST_HEAD(&port->frontends.felist);
701 	port->frontends.active_fe_id = 0;
702 
703 	/* This should be hardcoded allow a single frontend
704 	 * attachment to this tsport, keeping the -dvb.c
705 	 * code clean and safe.
706 	 */
707 	if (!port->num_frontends)
708 		port->num_frontends = 1;
709 
710 	switch (portno) {
711 	case 1:
712 		port->reg_gpcnt          = VID_B_GPCNT;
713 		port->reg_gpcnt_ctl      = VID_B_GPCNT_CTL;
714 		port->reg_dma_ctl        = VID_B_DMA_CTL;
715 		port->reg_lngth          = VID_B_LNGTH;
716 		port->reg_hw_sop_ctrl    = VID_B_HW_SOP_CTL;
717 		port->reg_gen_ctrl       = VID_B_GEN_CTL;
718 		port->reg_bd_pkt_status  = VID_B_BD_PKT_STATUS;
719 		port->reg_sop_status     = VID_B_SOP_STATUS;
720 		port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
721 		port->reg_vld_misc       = VID_B_VLD_MISC;
722 		port->reg_ts_clk_en      = VID_B_TS_CLK_EN;
723 		port->reg_src_sel        = VID_B_SRC_SEL;
724 		port->reg_ts_int_msk     = VID_B_INT_MSK;
725 		port->reg_ts_int_stat    = VID_B_INT_STAT;
726 		port->sram_chno          = SRAM_CH03; /* VID_B */
727 		port->pci_irqmask        = 0x02; /* VID_B bit1 */
728 		break;
729 	case 2:
730 		port->reg_gpcnt          = VID_C_GPCNT;
731 		port->reg_gpcnt_ctl      = VID_C_GPCNT_CTL;
732 		port->reg_dma_ctl        = VID_C_DMA_CTL;
733 		port->reg_lngth          = VID_C_LNGTH;
734 		port->reg_hw_sop_ctrl    = VID_C_HW_SOP_CTL;
735 		port->reg_gen_ctrl       = VID_C_GEN_CTL;
736 		port->reg_bd_pkt_status  = VID_C_BD_PKT_STATUS;
737 		port->reg_sop_status     = VID_C_SOP_STATUS;
738 		port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
739 		port->reg_vld_misc       = VID_C_VLD_MISC;
740 		port->reg_ts_clk_en      = VID_C_TS_CLK_EN;
741 		port->reg_src_sel        = 0;
742 		port->reg_ts_int_msk     = VID_C_INT_MSK;
743 		port->reg_ts_int_stat    = VID_C_INT_STAT;
744 		port->sram_chno          = SRAM_CH06; /* VID_C */
745 		port->pci_irqmask        = 0x04; /* VID_C bit2 */
746 		break;
747 	default:
748 		BUG();
749 	}
750 
751 	return 0;
752 }
753 
754 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
755 {
756 	switch (cx_read(RDR_CFG2) & 0xff) {
757 	case 0x00:
758 		/* cx23885 */
759 		dev->hwrevision = 0xa0;
760 		break;
761 	case 0x01:
762 		/* CX23885-12Z */
763 		dev->hwrevision = 0xa1;
764 		break;
765 	case 0x02:
766 		/* CX23885-13Z/14Z */
767 		dev->hwrevision = 0xb0;
768 		break;
769 	case 0x03:
770 		if (dev->pci->device == 0x8880) {
771 			/* CX23888-21Z/22Z */
772 			dev->hwrevision = 0xc0;
773 		} else {
774 			/* CX23885-14Z */
775 			dev->hwrevision = 0xa4;
776 		}
777 		break;
778 	case 0x04:
779 		if (dev->pci->device == 0x8880) {
780 			/* CX23888-31Z */
781 			dev->hwrevision = 0xd0;
782 		} else {
783 			/* CX23885-15Z, CX23888-31Z */
784 			dev->hwrevision = 0xa5;
785 		}
786 		break;
787 	case 0x0e:
788 		/* CX23887-15Z */
789 		dev->hwrevision = 0xc0;
790 		break;
791 	case 0x0f:
792 		/* CX23887-14Z */
793 		dev->hwrevision = 0xb1;
794 		break;
795 	default:
796 		printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
797 			__func__, dev->hwrevision);
798 	}
799 	if (dev->hwrevision)
800 		printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
801 			__func__, dev->hwrevision);
802 	else
803 		printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
804 			__func__, dev->hwrevision);
805 }
806 
807 /* Find the first v4l2_subdev member of the group id in hw */
808 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
809 {
810 	struct v4l2_subdev *result = NULL;
811 	struct v4l2_subdev *sd;
812 
813 	spin_lock(&dev->v4l2_dev.lock);
814 	v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
815 		if (sd->grp_id == hw) {
816 			result = sd;
817 			break;
818 		}
819 	}
820 	spin_unlock(&dev->v4l2_dev.lock);
821 	return result;
822 }
823 
824 static int cx23885_dev_setup(struct cx23885_dev *dev)
825 {
826 	int i;
827 
828 	spin_lock_init(&dev->pci_irqmask_lock);
829 	spin_lock_init(&dev->slock);
830 
831 	mutex_init(&dev->lock);
832 	mutex_init(&dev->gpio_lock);
833 
834 	atomic_inc(&dev->refcount);
835 
836 	dev->nr = cx23885_devcount++;
837 	sprintf(dev->name, "cx23885[%d]", dev->nr);
838 
839 	/* Configure the internal memory */
840 	if (dev->pci->device == 0x8880) {
841 		/* Could be 887 or 888, assume a default */
842 		dev->bridge = CX23885_BRIDGE_887;
843 		/* Apply a sensible clock frequency for the PCIe bridge */
844 		dev->clk_freq = 25000000;
845 		dev->sram_channels = cx23887_sram_channels;
846 	} else
847 	if (dev->pci->device == 0x8852) {
848 		dev->bridge = CX23885_BRIDGE_885;
849 		/* Apply a sensible clock frequency for the PCIe bridge */
850 		dev->clk_freq = 28000000;
851 		dev->sram_channels = cx23885_sram_channels;
852 	} else
853 		BUG();
854 
855 	dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
856 		__func__, dev->bridge);
857 
858 	/* board config */
859 	dev->board = UNSET;
860 	if (card[dev->nr] < cx23885_bcount)
861 		dev->board = card[dev->nr];
862 	for (i = 0; UNSET == dev->board  &&  i < cx23885_idcount; i++)
863 		if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
864 		    dev->pci->subsystem_device == cx23885_subids[i].subdevice)
865 			dev->board = cx23885_subids[i].card;
866 	if (UNSET == dev->board) {
867 		dev->board = CX23885_BOARD_UNKNOWN;
868 		cx23885_card_list(dev);
869 	}
870 
871 	/* If the user specific a clk freq override, apply it */
872 	if (cx23885_boards[dev->board].clk_freq > 0)
873 		dev->clk_freq = cx23885_boards[dev->board].clk_freq;
874 
875 	dev->pci_bus  = dev->pci->bus->number;
876 	dev->pci_slot = PCI_SLOT(dev->pci->devfn);
877 	cx23885_irq_add(dev, 0x001f00);
878 
879 	/* External Master 1 Bus */
880 	dev->i2c_bus[0].nr = 0;
881 	dev->i2c_bus[0].dev = dev;
882 	dev->i2c_bus[0].reg_stat  = I2C1_STAT;
883 	dev->i2c_bus[0].reg_ctrl  = I2C1_CTRL;
884 	dev->i2c_bus[0].reg_addr  = I2C1_ADDR;
885 	dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
886 	dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
887 	dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
888 
889 	/* External Master 2 Bus */
890 	dev->i2c_bus[1].nr = 1;
891 	dev->i2c_bus[1].dev = dev;
892 	dev->i2c_bus[1].reg_stat  = I2C2_STAT;
893 	dev->i2c_bus[1].reg_ctrl  = I2C2_CTRL;
894 	dev->i2c_bus[1].reg_addr  = I2C2_ADDR;
895 	dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
896 	dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
897 	dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
898 
899 	/* Internal Master 3 Bus */
900 	dev->i2c_bus[2].nr = 2;
901 	dev->i2c_bus[2].dev = dev;
902 	dev->i2c_bus[2].reg_stat  = I2C3_STAT;
903 	dev->i2c_bus[2].reg_ctrl  = I2C3_CTRL;
904 	dev->i2c_bus[2].reg_addr  = I2C3_ADDR;
905 	dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
906 	dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
907 	dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
908 
909 	if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
910 		(cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
911 		cx23885_init_tsport(dev, &dev->ts1, 1);
912 
913 	if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
914 		(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
915 		cx23885_init_tsport(dev, &dev->ts2, 2);
916 
917 	if (get_resources(dev) < 0) {
918 		printk(KERN_ERR "CORE %s No more PCIe resources for subsystem: %04x:%04x\n",
919 		       dev->name, dev->pci->subsystem_vendor,
920 		       dev->pci->subsystem_device);
921 
922 		cx23885_devcount--;
923 		return -ENODEV;
924 	}
925 
926 	/* PCIe stuff */
927 	dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
928 			     pci_resource_len(dev->pci, 0));
929 
930 	dev->bmmio = (u8 __iomem *)dev->lmmio;
931 
932 	printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
933 	       dev->name, dev->pci->subsystem_vendor,
934 	       dev->pci->subsystem_device, cx23885_boards[dev->board].name,
935 	       dev->board, card[dev->nr] == dev->board ?
936 	       "insmod option" : "autodetected");
937 
938 	cx23885_pci_quirks(dev);
939 
940 	/* Assume some sensible defaults */
941 	dev->tuner_type = cx23885_boards[dev->board].tuner_type;
942 	dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
943 	dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
944 	dev->radio_type = cx23885_boards[dev->board].radio_type;
945 	dev->radio_addr = cx23885_boards[dev->board].radio_addr;
946 
947 	dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
948 		__func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
949 	dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
950 		__func__, dev->radio_type, dev->radio_addr);
951 
952 	/* The cx23417 encoder has GPIO's that need to be initialised
953 	 * before DVB, so that demodulators and tuners are out of
954 	 * reset before DVB uses them.
955 	 */
956 	if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
957 		(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
958 			cx23885_mc417_init(dev);
959 
960 	/* init hardware */
961 	cx23885_reset(dev);
962 
963 	cx23885_i2c_register(&dev->i2c_bus[0]);
964 	cx23885_i2c_register(&dev->i2c_bus[1]);
965 	cx23885_i2c_register(&dev->i2c_bus[2]);
966 	cx23885_card_setup(dev);
967 	call_all(dev, core, s_power, 0);
968 	cx23885_ir_init(dev);
969 
970 	if (dev->board == CX23885_BOARD_VIEWCAST_460E) {
971 		/*
972 		 * GPIOs 9/8 are input detection bits for the breakout video
973 		 * (gpio 8) and audio (gpio 9) cables. When they're attached,
974 		 * this gpios are pulled high. Make sure these GPIOs are marked
975 		 * as inputs.
976 		 */
977 		cx23885_gpio_enable(dev, 0x300, 0);
978 	}
979 
980 	if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
981 		if (cx23885_video_register(dev) < 0) {
982 			printk(KERN_ERR "%s() Failed to register analog video adapters on VID_A\n",
983 			       __func__);
984 		}
985 	}
986 
987 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
988 		if (cx23885_boards[dev->board].num_fds_portb)
989 			dev->ts1.num_frontends =
990 				cx23885_boards[dev->board].num_fds_portb;
991 		if (cx23885_dvb_register(&dev->ts1) < 0) {
992 			printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
993 			       __func__);
994 		}
995 	} else
996 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
997 		if (cx23885_417_register(dev) < 0) {
998 			printk(KERN_ERR
999 				"%s() Failed to register 417 on VID_B\n",
1000 			       __func__);
1001 		}
1002 	}
1003 
1004 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1005 		if (cx23885_boards[dev->board].num_fds_portc)
1006 			dev->ts2.num_frontends =
1007 				cx23885_boards[dev->board].num_fds_portc;
1008 		if (cx23885_dvb_register(&dev->ts2) < 0) {
1009 			printk(KERN_ERR
1010 				"%s() Failed to register dvb on VID_C\n",
1011 			       __func__);
1012 		}
1013 	} else
1014 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1015 		if (cx23885_417_register(dev) < 0) {
1016 			printk(KERN_ERR
1017 				"%s() Failed to register 417 on VID_C\n",
1018 			       __func__);
1019 		}
1020 	}
1021 
1022 	cx23885_dev_checkrevision(dev);
1023 
1024 	/* disable MSI for NetUP cards, otherwise CI is not working */
1025 	if (cx23885_boards[dev->board].ci_type > 0)
1026 		cx_clear(RDR_RDRCTL1, 1 << 8);
1027 
1028 	switch (dev->board) {
1029 	case CX23885_BOARD_TEVII_S470:
1030 	case CX23885_BOARD_TEVII_S471:
1031 		cx_clear(RDR_RDRCTL1, 1 << 8);
1032 		break;
1033 	}
1034 
1035 	return 0;
1036 }
1037 
1038 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1039 {
1040 	release_mem_region(pci_resource_start(dev->pci, 0),
1041 			   pci_resource_len(dev->pci, 0));
1042 
1043 	if (!atomic_dec_and_test(&dev->refcount))
1044 		return;
1045 
1046 	if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1047 		cx23885_video_unregister(dev);
1048 
1049 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1050 		cx23885_dvb_unregister(&dev->ts1);
1051 
1052 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1053 		cx23885_417_unregister(dev);
1054 
1055 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1056 		cx23885_dvb_unregister(&dev->ts2);
1057 
1058 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1059 		cx23885_417_unregister(dev);
1060 
1061 	cx23885_i2c_unregister(&dev->i2c_bus[2]);
1062 	cx23885_i2c_unregister(&dev->i2c_bus[1]);
1063 	cx23885_i2c_unregister(&dev->i2c_bus[0]);
1064 
1065 	iounmap(dev->lmmio);
1066 }
1067 
1068 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1069 			       unsigned int offset, u32 sync_line,
1070 			       unsigned int bpl, unsigned int padding,
1071 			       unsigned int lines,  unsigned int lpi, bool jump)
1072 {
1073 	struct scatterlist *sg;
1074 	unsigned int line, todo, sol;
1075 
1076 
1077 	if (jump) {
1078 		*(rp++) = cpu_to_le32(RISC_JUMP);
1079 		*(rp++) = cpu_to_le32(0);
1080 		*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1081 	}
1082 
1083 	/* sync instruction */
1084 	if (sync_line != NO_SYNC_LINE)
1085 		*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1086 
1087 	/* scan lines */
1088 	sg = sglist;
1089 	for (line = 0; line < lines; line++) {
1090 		while (offset && offset >= sg_dma_len(sg)) {
1091 			offset -= sg_dma_len(sg);
1092 			sg = sg_next(sg);
1093 		}
1094 
1095 		if (lpi && line > 0 && !(line % lpi))
1096 			sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1097 		else
1098 			sol = RISC_SOL;
1099 
1100 		if (bpl <= sg_dma_len(sg)-offset) {
1101 			/* fits into current chunk */
1102 			*(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
1103 			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1104 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1105 			offset += bpl;
1106 		} else {
1107 			/* scanline needs to be split */
1108 			todo = bpl;
1109 			*(rp++) = cpu_to_le32(RISC_WRITE|sol|
1110 					    (sg_dma_len(sg)-offset));
1111 			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1112 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1113 			todo -= (sg_dma_len(sg)-offset);
1114 			offset = 0;
1115 			sg = sg_next(sg);
1116 			while (todo > sg_dma_len(sg)) {
1117 				*(rp++) = cpu_to_le32(RISC_WRITE|
1118 						    sg_dma_len(sg));
1119 				*(rp++) = cpu_to_le32(sg_dma_address(sg));
1120 				*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1121 				todo -= sg_dma_len(sg);
1122 				sg = sg_next(sg);
1123 			}
1124 			*(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1125 			*(rp++) = cpu_to_le32(sg_dma_address(sg));
1126 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1127 			offset += todo;
1128 		}
1129 		offset += padding;
1130 	}
1131 
1132 	return rp;
1133 }
1134 
1135 int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1136 			struct scatterlist *sglist, unsigned int top_offset,
1137 			unsigned int bottom_offset, unsigned int bpl,
1138 			unsigned int padding, unsigned int lines)
1139 {
1140 	u32 instructions, fields;
1141 	__le32 *rp;
1142 
1143 	fields = 0;
1144 	if (UNSET != top_offset)
1145 		fields++;
1146 	if (UNSET != bottom_offset)
1147 		fields++;
1148 
1149 	/* estimate risc mem: worst case is one write per page border +
1150 	   one write per scan line + syncs + jump (all 2 dwords).  Padding
1151 	   can cause next bpl to start close to a page border.  First DMA
1152 	   region may be smaller than PAGE_SIZE */
1153 	/* write and jump need and extra dword */
1154 	instructions  = fields * (1 + ((bpl + padding) * lines)
1155 		/ PAGE_SIZE + lines);
1156 	instructions += 5;
1157 	risc->size = instructions * 12;
1158 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1159 	if (risc->cpu == NULL)
1160 		return -ENOMEM;
1161 
1162 	/* write risc instructions */
1163 	rp = risc->cpu;
1164 	if (UNSET != top_offset)
1165 		rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1166 					bpl, padding, lines, 0, true);
1167 	if (UNSET != bottom_offset)
1168 		rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1169 					bpl, padding, lines, 0, UNSET == top_offset);
1170 
1171 	/* save pointer to jmp instruction address */
1172 	risc->jmp = rp;
1173 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1174 	return 0;
1175 }
1176 
1177 int cx23885_risc_databuffer(struct pci_dev *pci,
1178 				   struct cx23885_riscmem *risc,
1179 				   struct scatterlist *sglist,
1180 				   unsigned int bpl,
1181 				   unsigned int lines, unsigned int lpi)
1182 {
1183 	u32 instructions;
1184 	__le32 *rp;
1185 
1186 	/* estimate risc mem: worst case is one write per page border +
1187 	   one write per scan line + syncs + jump (all 2 dwords).  Here
1188 	   there is no padding and no sync.  First DMA region may be smaller
1189 	   than PAGE_SIZE */
1190 	/* Jump and write need an extra dword */
1191 	instructions  = 1 + (bpl * lines) / PAGE_SIZE + lines;
1192 	instructions += 4;
1193 
1194 	risc->size = instructions * 12;
1195 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1196 	if (risc->cpu == NULL)
1197 		return -ENOMEM;
1198 
1199 	/* write risc instructions */
1200 	rp = risc->cpu;
1201 	rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
1202 				bpl, 0, lines, lpi, lpi == 0);
1203 
1204 	/* save pointer to jmp instruction address */
1205 	risc->jmp = rp;
1206 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1207 	return 0;
1208 }
1209 
1210 int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1211 			struct scatterlist *sglist, unsigned int top_offset,
1212 			unsigned int bottom_offset, unsigned int bpl,
1213 			unsigned int padding, unsigned int lines)
1214 {
1215 	u32 instructions, fields;
1216 	__le32 *rp;
1217 
1218 	fields = 0;
1219 	if (UNSET != top_offset)
1220 		fields++;
1221 	if (UNSET != bottom_offset)
1222 		fields++;
1223 
1224 	/* estimate risc mem: worst case is one write per page border +
1225 	   one write per scan line + syncs + jump (all 2 dwords).  Padding
1226 	   can cause next bpl to start close to a page border.  First DMA
1227 	   region may be smaller than PAGE_SIZE */
1228 	/* write and jump need and extra dword */
1229 	instructions  = fields * (1 + ((bpl + padding) * lines)
1230 		/ PAGE_SIZE + lines);
1231 	instructions += 5;
1232 	risc->size = instructions * 12;
1233 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1234 	if (risc->cpu == NULL)
1235 		return -ENOMEM;
1236 	/* write risc instructions */
1237 	rp = risc->cpu;
1238 
1239 	/* Sync to line 6, so US CC line 21 will appear in line '12'
1240 	 * in the userland vbi payload */
1241 	if (UNSET != top_offset)
1242 		rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1243 					bpl, padding, lines, 0, true);
1244 
1245 	if (UNSET != bottom_offset)
1246 		rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1247 					bpl, padding, lines, 0, UNSET == top_offset);
1248 
1249 
1250 
1251 	/* save pointer to jmp instruction address */
1252 	risc->jmp = rp;
1253 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1254 	return 0;
1255 }
1256 
1257 
1258 void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
1259 {
1260 	struct cx23885_riscmem *risc = &buf->risc;
1261 
1262 	BUG_ON(in_interrupt());
1263 	pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
1264 }
1265 
1266 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1267 {
1268 	struct cx23885_dev *dev = port->dev;
1269 
1270 	dprintk(1, "%s() Register Dump\n", __func__);
1271 	dprintk(1, "%s() DEV_CNTRL2               0x%08X\n", __func__,
1272 		cx_read(DEV_CNTRL2));
1273 	dprintk(1, "%s() PCI_INT_MSK              0x%08X\n", __func__,
1274 		cx23885_irq_get_mask(dev));
1275 	dprintk(1, "%s() AUD_INT_INT_MSK          0x%08X\n", __func__,
1276 		cx_read(AUDIO_INT_INT_MSK));
1277 	dprintk(1, "%s() AUD_INT_DMA_CTL          0x%08X\n", __func__,
1278 		cx_read(AUD_INT_DMA_CTL));
1279 	dprintk(1, "%s() AUD_EXT_INT_MSK          0x%08X\n", __func__,
1280 		cx_read(AUDIO_EXT_INT_MSK));
1281 	dprintk(1, "%s() AUD_EXT_DMA_CTL          0x%08X\n", __func__,
1282 		cx_read(AUD_EXT_DMA_CTL));
1283 	dprintk(1, "%s() PAD_CTRL                 0x%08X\n", __func__,
1284 		cx_read(PAD_CTRL));
1285 	dprintk(1, "%s() ALT_PIN_OUT_SEL          0x%08X\n", __func__,
1286 		cx_read(ALT_PIN_OUT_SEL));
1287 	dprintk(1, "%s() GPIO2                    0x%08X\n", __func__,
1288 		cx_read(GPIO2));
1289 	dprintk(1, "%s() gpcnt(0x%08X)          0x%08X\n", __func__,
1290 		port->reg_gpcnt, cx_read(port->reg_gpcnt));
1291 	dprintk(1, "%s() gpcnt_ctl(0x%08X)      0x%08x\n", __func__,
1292 		port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1293 	dprintk(1, "%s() dma_ctl(0x%08X)        0x%08x\n", __func__,
1294 		port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1295 	if (port->reg_src_sel)
1296 		dprintk(1, "%s() src_sel(0x%08X)        0x%08x\n", __func__,
1297 			port->reg_src_sel, cx_read(port->reg_src_sel));
1298 	dprintk(1, "%s() lngth(0x%08X)          0x%08x\n", __func__,
1299 		port->reg_lngth, cx_read(port->reg_lngth));
1300 	dprintk(1, "%s() hw_sop_ctrl(0x%08X)    0x%08x\n", __func__,
1301 		port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1302 	dprintk(1, "%s() gen_ctrl(0x%08X)       0x%08x\n", __func__,
1303 		port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1304 	dprintk(1, "%s() bd_pkt_status(0x%08X)  0x%08x\n", __func__,
1305 		port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1306 	dprintk(1, "%s() sop_status(0x%08X)     0x%08x\n", __func__,
1307 		port->reg_sop_status, cx_read(port->reg_sop_status));
1308 	dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1309 		port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1310 	dprintk(1, "%s() vld_misc(0x%08X)       0x%08x\n", __func__,
1311 		port->reg_vld_misc, cx_read(port->reg_vld_misc));
1312 	dprintk(1, "%s() ts_clk_en(0x%08X)      0x%08x\n", __func__,
1313 		port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1314 	dprintk(1, "%s() ts_int_msk(0x%08X)     0x%08x\n", __func__,
1315 		port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1316 }
1317 
1318 int cx23885_start_dma(struct cx23885_tsport *port,
1319 			     struct cx23885_dmaqueue *q,
1320 			     struct cx23885_buffer   *buf)
1321 {
1322 	struct cx23885_dev *dev = port->dev;
1323 	u32 reg;
1324 
1325 	dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1326 		dev->width, dev->height, dev->field);
1327 
1328 	/* Stop the fifo and risc engine for this port */
1329 	cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1330 
1331 	/* setup fifo + format */
1332 	cx23885_sram_channel_setup(dev,
1333 				   &dev->sram_channels[port->sram_chno],
1334 				   port->ts_packet_size, buf->risc.dma);
1335 	if (debug > 5) {
1336 		cx23885_sram_channel_dump(dev,
1337 			&dev->sram_channels[port->sram_chno]);
1338 		cx23885_risc_disasm(port, &buf->risc);
1339 	}
1340 
1341 	/* write TS length to chip */
1342 	cx_write(port->reg_lngth, port->ts_packet_size);
1343 
1344 	if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1345 		(!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1346 		printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1347 			__func__,
1348 			cx23885_boards[dev->board].portb,
1349 			cx23885_boards[dev->board].portc);
1350 		return -EINVAL;
1351 	}
1352 
1353 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1354 		cx23885_av_clk(dev, 0);
1355 
1356 	udelay(100);
1357 
1358 	/* If the port supports SRC SELECT, configure it */
1359 	if (port->reg_src_sel)
1360 		cx_write(port->reg_src_sel, port->src_sel_val);
1361 
1362 	cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1363 	cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1364 	cx_write(port->reg_vld_misc, port->vld_misc_val);
1365 	cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1366 	udelay(100);
1367 
1368 	/* NOTE: this is 2 (reserved) for portb, does it matter? */
1369 	/* reset counter to zero */
1370 	cx_write(port->reg_gpcnt_ctl, 3);
1371 	q->count = 0;
1372 
1373 	/* Set VIDB pins to input */
1374 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1375 		reg = cx_read(PAD_CTRL);
1376 		reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1377 		cx_write(PAD_CTRL, reg);
1378 	}
1379 
1380 	/* Set VIDC pins to input */
1381 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1382 		reg = cx_read(PAD_CTRL);
1383 		reg &= ~0x4; /* Clear TS2_SOP_OE */
1384 		cx_write(PAD_CTRL, reg);
1385 	}
1386 
1387 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1388 
1389 		reg = cx_read(PAD_CTRL);
1390 		reg = reg & ~0x1;    /* Clear TS1_OE */
1391 
1392 		/* FIXME, bit 2 writing here is questionable */
1393 		/* set TS1_SOP_OE and TS1_OE_HI */
1394 		reg = reg | 0xa;
1395 		cx_write(PAD_CTRL, reg);
1396 
1397 		/* FIXME and these two registers should be documented. */
1398 		cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1399 		cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1400 	}
1401 
1402 	switch (dev->bridge) {
1403 	case CX23885_BRIDGE_885:
1404 	case CX23885_BRIDGE_887:
1405 	case CX23885_BRIDGE_888:
1406 		/* enable irqs */
1407 		dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1408 		cx_set(port->reg_ts_int_msk,  port->ts_int_msk_val);
1409 		cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1410 		cx23885_irq_add(dev, port->pci_irqmask);
1411 		cx23885_irq_enable_all(dev);
1412 		break;
1413 	default:
1414 		BUG();
1415 	}
1416 
1417 	cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1418 
1419 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1420 		cx23885_av_clk(dev, 1);
1421 
1422 	if (debug > 4)
1423 		cx23885_tsport_reg_dump(port);
1424 
1425 	return 0;
1426 }
1427 
1428 static int cx23885_stop_dma(struct cx23885_tsport *port)
1429 {
1430 	struct cx23885_dev *dev = port->dev;
1431 	u32 reg;
1432 
1433 	dprintk(1, "%s()\n", __func__);
1434 
1435 	/* Stop interrupts and DMA */
1436 	cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1437 	cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1438 
1439 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1440 
1441 		reg = cx_read(PAD_CTRL);
1442 
1443 		/* Set TS1_OE */
1444 		reg = reg | 0x1;
1445 
1446 		/* clear TS1_SOP_OE and TS1_OE_HI */
1447 		reg = reg & ~0xa;
1448 		cx_write(PAD_CTRL, reg);
1449 		cx_write(port->reg_src_sel, 0);
1450 		cx_write(port->reg_gen_ctrl, 8);
1451 
1452 	}
1453 
1454 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1455 		cx23885_av_clk(dev, 0);
1456 
1457 	return 0;
1458 }
1459 
1460 /* ------------------------------------------------------------------ */
1461 
1462 int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
1463 {
1464 	struct cx23885_dev *dev = port->dev;
1465 	int size = port->ts_packet_size * port->ts_packet_count;
1466 	struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
1467 
1468 	dprintk(1, "%s: %p\n", __func__, buf);
1469 	if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
1470 		return -EINVAL;
1471 	vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1472 
1473 	cx23885_risc_databuffer(dev->pci, &buf->risc,
1474 				sgt->sgl,
1475 				port->ts_packet_size, port->ts_packet_count, 0);
1476 	return 0;
1477 }
1478 
1479 /*
1480  * The risc program for each buffer works as follows: it starts with a simple
1481  * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1482  * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1483  * the initial JUMP).
1484  *
1485  * This is the risc program of the first buffer to be queued if the active list
1486  * is empty and it just keeps DMAing this buffer without generating any
1487  * interrupts.
1488  *
1489  * If a new buffer is added then the initial JUMP in the code for that buffer
1490  * will generate an interrupt which signals that the previous buffer has been
1491  * DMAed successfully and that it can be returned to userspace.
1492  *
1493  * It also sets the final jump of the previous buffer to the start of the new
1494  * buffer, thus chaining the new buffer into the DMA chain. This is a single
1495  * atomic u32 write, so there is no race condition.
1496  *
1497  * The end-result of all this that you only get an interrupt when a buffer
1498  * is ready, so the control flow is very easy.
1499  */
1500 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1501 {
1502 	struct cx23885_buffer    *prev;
1503 	struct cx23885_dev *dev = port->dev;
1504 	struct cx23885_dmaqueue  *cx88q = &port->mpegq;
1505 	unsigned long flags;
1506 
1507 	buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1508 	buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1509 	buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
1510 	buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1511 
1512 	spin_lock_irqsave(&dev->slock, flags);
1513 	if (list_empty(&cx88q->active)) {
1514 		list_add_tail(&buf->queue, &cx88q->active);
1515 		dprintk(1, "[%p/%d] %s - first active\n",
1516 			buf, buf->vb.vb2_buf.index, __func__);
1517 	} else {
1518 		buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
1519 		prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1520 				  queue);
1521 		list_add_tail(&buf->queue, &cx88q->active);
1522 		prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1523 		dprintk(1, "[%p/%d] %s - append to active\n",
1524 			 buf, buf->vb.vb2_buf.index, __func__);
1525 	}
1526 	spin_unlock_irqrestore(&dev->slock, flags);
1527 }
1528 
1529 /* ----------------------------------------------------------- */
1530 
1531 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
1532 {
1533 	struct cx23885_dev *dev = port->dev;
1534 	struct cx23885_dmaqueue *q = &port->mpegq;
1535 	struct cx23885_buffer *buf;
1536 	unsigned long flags;
1537 
1538 	spin_lock_irqsave(&port->slock, flags);
1539 	while (!list_empty(&q->active)) {
1540 		buf = list_entry(q->active.next, struct cx23885_buffer,
1541 				 queue);
1542 		list_del(&buf->queue);
1543 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1544 		dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1545 			buf, buf->vb.vb2_buf.index, reason,
1546 			(unsigned long)buf->risc.dma);
1547 	}
1548 	spin_unlock_irqrestore(&port->slock, flags);
1549 }
1550 
1551 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1552 {
1553 	struct cx23885_dev *dev = port->dev;
1554 
1555 	dprintk(1, "%s()\n", __func__);
1556 	cx23885_stop_dma(port);
1557 	do_cancel_buffers(port, "cancel");
1558 }
1559 
1560 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1561 {
1562 	/* FIXME: port1 assumption here. */
1563 	struct cx23885_tsport *port = &dev->ts1;
1564 	int count = 0;
1565 	int handled = 0;
1566 
1567 	if (status == 0)
1568 		return handled;
1569 
1570 	count = cx_read(port->reg_gpcnt);
1571 	dprintk(7, "status: 0x%08x  mask: 0x%08x count: 0x%x\n",
1572 		status, cx_read(port->reg_ts_int_msk), count);
1573 
1574 	if ((status & VID_B_MSK_BAD_PKT)         ||
1575 		(status & VID_B_MSK_OPC_ERR)     ||
1576 		(status & VID_B_MSK_VBI_OPC_ERR) ||
1577 		(status & VID_B_MSK_SYNC)        ||
1578 		(status & VID_B_MSK_VBI_SYNC)    ||
1579 		(status & VID_B_MSK_OF)          ||
1580 		(status & VID_B_MSK_VBI_OF)) {
1581 		printk(KERN_ERR "%s: V4L mpeg risc op code error, status = 0x%x\n",
1582 		       dev->name, status);
1583 		if (status & VID_B_MSK_BAD_PKT)
1584 			dprintk(1, "        VID_B_MSK_BAD_PKT\n");
1585 		if (status & VID_B_MSK_OPC_ERR)
1586 			dprintk(1, "        VID_B_MSK_OPC_ERR\n");
1587 		if (status & VID_B_MSK_VBI_OPC_ERR)
1588 			dprintk(1, "        VID_B_MSK_VBI_OPC_ERR\n");
1589 		if (status & VID_B_MSK_SYNC)
1590 			dprintk(1, "        VID_B_MSK_SYNC\n");
1591 		if (status & VID_B_MSK_VBI_SYNC)
1592 			dprintk(1, "        VID_B_MSK_VBI_SYNC\n");
1593 		if (status & VID_B_MSK_OF)
1594 			dprintk(1, "        VID_B_MSK_OF\n");
1595 		if (status & VID_B_MSK_VBI_OF)
1596 			dprintk(1, "        VID_B_MSK_VBI_OF\n");
1597 
1598 		cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1599 		cx23885_sram_channel_dump(dev,
1600 			&dev->sram_channels[port->sram_chno]);
1601 		cx23885_417_check_encoder(dev);
1602 	} else if (status & VID_B_MSK_RISCI1) {
1603 		dprintk(7, "        VID_B_MSK_RISCI1\n");
1604 		spin_lock(&port->slock);
1605 		cx23885_wakeup(port, &port->mpegq, count);
1606 		spin_unlock(&port->slock);
1607 	}
1608 	if (status) {
1609 		cx_write(port->reg_ts_int_stat, status);
1610 		handled = 1;
1611 	}
1612 
1613 	return handled;
1614 }
1615 
1616 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1617 {
1618 	struct cx23885_dev *dev = port->dev;
1619 	int handled = 0;
1620 	u32 count;
1621 
1622 	if ((status & VID_BC_MSK_OPC_ERR) ||
1623 		(status & VID_BC_MSK_BAD_PKT) ||
1624 		(status & VID_BC_MSK_SYNC) ||
1625 		(status & VID_BC_MSK_OF)) {
1626 
1627 		if (status & VID_BC_MSK_OPC_ERR)
1628 			dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1629 				VID_BC_MSK_OPC_ERR);
1630 
1631 		if (status & VID_BC_MSK_BAD_PKT)
1632 			dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1633 				VID_BC_MSK_BAD_PKT);
1634 
1635 		if (status & VID_BC_MSK_SYNC)
1636 			dprintk(7, " (VID_BC_MSK_SYNC    0x%08x)\n",
1637 				VID_BC_MSK_SYNC);
1638 
1639 		if (status & VID_BC_MSK_OF)
1640 			dprintk(7, " (VID_BC_MSK_OF      0x%08x)\n",
1641 				VID_BC_MSK_OF);
1642 
1643 		printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1644 
1645 		cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1646 		cx23885_sram_channel_dump(dev,
1647 			&dev->sram_channels[port->sram_chno]);
1648 
1649 	} else if (status & VID_BC_MSK_RISCI1) {
1650 
1651 		dprintk(7, " (RISCI1            0x%08x)\n", VID_BC_MSK_RISCI1);
1652 
1653 		spin_lock(&port->slock);
1654 		count = cx_read(port->reg_gpcnt);
1655 		cx23885_wakeup(port, &port->mpegq, count);
1656 		spin_unlock(&port->slock);
1657 
1658 	}
1659 	if (status) {
1660 		cx_write(port->reg_ts_int_stat, status);
1661 		handled = 1;
1662 	}
1663 
1664 	return handled;
1665 }
1666 
1667 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1668 {
1669 	struct cx23885_dev *dev = dev_id;
1670 	struct cx23885_tsport *ts1 = &dev->ts1;
1671 	struct cx23885_tsport *ts2 = &dev->ts2;
1672 	u32 pci_status, pci_mask;
1673 	u32 vida_status, vida_mask;
1674 	u32 audint_status, audint_mask;
1675 	u32 ts1_status, ts1_mask;
1676 	u32 ts2_status, ts2_mask;
1677 	int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1678 	int audint_count = 0;
1679 	bool subdev_handled;
1680 
1681 	pci_status = cx_read(PCI_INT_STAT);
1682 	pci_mask = cx23885_irq_get_mask(dev);
1683 	vida_status = cx_read(VID_A_INT_STAT);
1684 	vida_mask = cx_read(VID_A_INT_MSK);
1685 	audint_status = cx_read(AUDIO_INT_INT_STAT);
1686 	audint_mask = cx_read(AUDIO_INT_INT_MSK);
1687 	ts1_status = cx_read(VID_B_INT_STAT);
1688 	ts1_mask = cx_read(VID_B_INT_MSK);
1689 	ts2_status = cx_read(VID_C_INT_STAT);
1690 	ts2_mask = cx_read(VID_C_INT_MSK);
1691 
1692 	if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1693 		goto out;
1694 
1695 	vida_count = cx_read(VID_A_GPCNT);
1696 	audint_count = cx_read(AUD_INT_A_GPCNT);
1697 	ts1_count = cx_read(ts1->reg_gpcnt);
1698 	ts2_count = cx_read(ts2->reg_gpcnt);
1699 	dprintk(7, "pci_status: 0x%08x  pci_mask: 0x%08x\n",
1700 		pci_status, pci_mask);
1701 	dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1702 		vida_status, vida_mask, vida_count);
1703 	dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1704 		audint_status, audint_mask, audint_count);
1705 	dprintk(7, "ts1_status: 0x%08x  ts1_mask: 0x%08x count: 0x%x\n",
1706 		ts1_status, ts1_mask, ts1_count);
1707 	dprintk(7, "ts2_status: 0x%08x  ts2_mask: 0x%08x count: 0x%x\n",
1708 		ts2_status, ts2_mask, ts2_count);
1709 
1710 	if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1711 			  PCI_MSK_AL_RD   | PCI_MSK_AL_WR   | PCI_MSK_APB_DMA |
1712 			  PCI_MSK_VID_C   | PCI_MSK_VID_B   | PCI_MSK_VID_A   |
1713 			  PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1714 			  PCI_MSK_GPIO0   | PCI_MSK_GPIO1   |
1715 			  PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1716 
1717 		if (pci_status & PCI_MSK_RISC_RD)
1718 			dprintk(7, " (PCI_MSK_RISC_RD   0x%08x)\n",
1719 				PCI_MSK_RISC_RD);
1720 
1721 		if (pci_status & PCI_MSK_RISC_WR)
1722 			dprintk(7, " (PCI_MSK_RISC_WR   0x%08x)\n",
1723 				PCI_MSK_RISC_WR);
1724 
1725 		if (pci_status & PCI_MSK_AL_RD)
1726 			dprintk(7, " (PCI_MSK_AL_RD     0x%08x)\n",
1727 				PCI_MSK_AL_RD);
1728 
1729 		if (pci_status & PCI_MSK_AL_WR)
1730 			dprintk(7, " (PCI_MSK_AL_WR     0x%08x)\n",
1731 				PCI_MSK_AL_WR);
1732 
1733 		if (pci_status & PCI_MSK_APB_DMA)
1734 			dprintk(7, " (PCI_MSK_APB_DMA   0x%08x)\n",
1735 				PCI_MSK_APB_DMA);
1736 
1737 		if (pci_status & PCI_MSK_VID_C)
1738 			dprintk(7, " (PCI_MSK_VID_C     0x%08x)\n",
1739 				PCI_MSK_VID_C);
1740 
1741 		if (pci_status & PCI_MSK_VID_B)
1742 			dprintk(7, " (PCI_MSK_VID_B     0x%08x)\n",
1743 				PCI_MSK_VID_B);
1744 
1745 		if (pci_status & PCI_MSK_VID_A)
1746 			dprintk(7, " (PCI_MSK_VID_A     0x%08x)\n",
1747 				PCI_MSK_VID_A);
1748 
1749 		if (pci_status & PCI_MSK_AUD_INT)
1750 			dprintk(7, " (PCI_MSK_AUD_INT   0x%08x)\n",
1751 				PCI_MSK_AUD_INT);
1752 
1753 		if (pci_status & PCI_MSK_AUD_EXT)
1754 			dprintk(7, " (PCI_MSK_AUD_EXT   0x%08x)\n",
1755 				PCI_MSK_AUD_EXT);
1756 
1757 		if (pci_status & PCI_MSK_GPIO0)
1758 			dprintk(7, " (PCI_MSK_GPIO0     0x%08x)\n",
1759 				PCI_MSK_GPIO0);
1760 
1761 		if (pci_status & PCI_MSK_GPIO1)
1762 			dprintk(7, " (PCI_MSK_GPIO1     0x%08x)\n",
1763 				PCI_MSK_GPIO1);
1764 
1765 		if (pci_status & PCI_MSK_AV_CORE)
1766 			dprintk(7, " (PCI_MSK_AV_CORE   0x%08x)\n",
1767 				PCI_MSK_AV_CORE);
1768 
1769 		if (pci_status & PCI_MSK_IR)
1770 			dprintk(7, " (PCI_MSK_IR        0x%08x)\n",
1771 				PCI_MSK_IR);
1772 	}
1773 
1774 	if (cx23885_boards[dev->board].ci_type == 1 &&
1775 			(pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1776 		handled += netup_ci_slot_status(dev, pci_status);
1777 
1778 	if (cx23885_boards[dev->board].ci_type == 2 &&
1779 			(pci_status & PCI_MSK_GPIO0))
1780 		handled += altera_ci_irq(dev);
1781 
1782 	if (ts1_status) {
1783 		if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1784 			handled += cx23885_irq_ts(ts1, ts1_status);
1785 		else
1786 		if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1787 			handled += cx23885_irq_417(dev, ts1_status);
1788 	}
1789 
1790 	if (ts2_status) {
1791 		if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1792 			handled += cx23885_irq_ts(ts2, ts2_status);
1793 		else
1794 		if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1795 			handled += cx23885_irq_417(dev, ts2_status);
1796 	}
1797 
1798 	if (vida_status)
1799 		handled += cx23885_video_irq(dev, vida_status);
1800 
1801 	if (audint_status)
1802 		handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1803 
1804 	if (pci_status & PCI_MSK_IR) {
1805 		subdev_handled = false;
1806 		v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1807 				 pci_status, &subdev_handled);
1808 		if (subdev_handled)
1809 			handled++;
1810 	}
1811 
1812 	if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1813 		cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1814 		schedule_work(&dev->cx25840_work);
1815 		handled++;
1816 	}
1817 
1818 	if (handled)
1819 		cx_write(PCI_INT_STAT, pci_status);
1820 out:
1821 	return IRQ_RETVAL(handled);
1822 }
1823 
1824 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1825 				    unsigned int notification, void *arg)
1826 {
1827 	struct cx23885_dev *dev;
1828 
1829 	if (sd == NULL)
1830 		return;
1831 
1832 	dev = to_cx23885(sd->v4l2_dev);
1833 
1834 	switch (notification) {
1835 	case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1836 		if (sd == dev->sd_ir)
1837 			cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1838 		break;
1839 	case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1840 		if (sd == dev->sd_ir)
1841 			cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1842 		break;
1843 	}
1844 }
1845 
1846 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1847 {
1848 	INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1849 	INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1850 	INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1851 	dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1852 }
1853 
1854 static inline int encoder_on_portb(struct cx23885_dev *dev)
1855 {
1856 	return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1857 }
1858 
1859 static inline int encoder_on_portc(struct cx23885_dev *dev)
1860 {
1861 	return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1862 }
1863 
1864 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1865  * registers depending on the board configuration (and whether the
1866  * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1867  * be pushed into the correct hardware register, regardless of the
1868  * physical location. Certain registers are shared so we sanity check
1869  * and report errors if we think we're tampering with a GPIo that might
1870  * be assigned to the encoder (and used for the host bus).
1871  *
1872  * GPIO  2 thru  0 - On the cx23885 bridge
1873  * GPIO 18 thru  3 - On the cx23417 host bus interface
1874  * GPIO 23 thru 19 - On the cx25840 a/v core
1875  */
1876 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1877 {
1878 	if (mask & 0x7)
1879 		cx_set(GP0_IO, mask & 0x7);
1880 
1881 	if (mask & 0x0007fff8) {
1882 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1883 			printk(KERN_ERR
1884 				"%s: Setting GPIO on encoder ports\n",
1885 				dev->name);
1886 		cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1887 	}
1888 
1889 	/* TODO: 23-19 */
1890 	if (mask & 0x00f80000)
1891 		printk(KERN_INFO "%s: Unsupported\n", dev->name);
1892 }
1893 
1894 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1895 {
1896 	if (mask & 0x00000007)
1897 		cx_clear(GP0_IO, mask & 0x7);
1898 
1899 	if (mask & 0x0007fff8) {
1900 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1901 			printk(KERN_ERR
1902 				"%s: Clearing GPIO moving on encoder ports\n",
1903 				dev->name);
1904 		cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1905 	}
1906 
1907 	/* TODO: 23-19 */
1908 	if (mask & 0x00f80000)
1909 		printk(KERN_INFO "%s: Unsupported\n", dev->name);
1910 }
1911 
1912 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1913 {
1914 	if (mask & 0x00000007)
1915 		return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1916 
1917 	if (mask & 0x0007fff8) {
1918 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1919 			printk(KERN_ERR
1920 				"%s: Reading GPIO moving on encoder ports\n",
1921 				dev->name);
1922 		return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1923 	}
1924 
1925 	/* TODO: 23-19 */
1926 	if (mask & 0x00f80000)
1927 		printk(KERN_INFO "%s: Unsupported\n", dev->name);
1928 
1929 	return 0;
1930 }
1931 
1932 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1933 {
1934 	if ((mask & 0x00000007) && asoutput)
1935 		cx_set(GP0_IO, (mask & 0x7) << 16);
1936 	else if ((mask & 0x00000007) && !asoutput)
1937 		cx_clear(GP0_IO, (mask & 0x7) << 16);
1938 
1939 	if (mask & 0x0007fff8) {
1940 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1941 			printk(KERN_ERR
1942 				"%s: Enabling GPIO on encoder ports\n",
1943 				dev->name);
1944 	}
1945 
1946 	/* MC417_OEN is active low for output, write 1 for an input */
1947 	if ((mask & 0x0007fff8) && asoutput)
1948 		cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
1949 
1950 	else if ((mask & 0x0007fff8) && !asoutput)
1951 		cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
1952 
1953 	/* TODO: 23-19 */
1954 }
1955 
1956 static int cx23885_initdev(struct pci_dev *pci_dev,
1957 			   const struct pci_device_id *pci_id)
1958 {
1959 	struct cx23885_dev *dev;
1960 	struct v4l2_ctrl_handler *hdl;
1961 	int err;
1962 
1963 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1964 	if (NULL == dev)
1965 		return -ENOMEM;
1966 
1967 	err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
1968 	if (err < 0)
1969 		goto fail_free;
1970 
1971 	hdl = &dev->ctrl_handler;
1972 	v4l2_ctrl_handler_init(hdl, 6);
1973 	if (hdl->error) {
1974 		err = hdl->error;
1975 		goto fail_ctrl;
1976 	}
1977 	dev->v4l2_dev.ctrl_handler = hdl;
1978 
1979 	/* Prepare to handle notifications from subdevices */
1980 	cx23885_v4l2_dev_notify_init(dev);
1981 
1982 	/* pci init */
1983 	dev->pci = pci_dev;
1984 	if (pci_enable_device(pci_dev)) {
1985 		err = -EIO;
1986 		goto fail_ctrl;
1987 	}
1988 
1989 	if (cx23885_dev_setup(dev) < 0) {
1990 		err = -EINVAL;
1991 		goto fail_ctrl;
1992 	}
1993 
1994 	/* print pci info */
1995 	dev->pci_rev = pci_dev->revision;
1996 	pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER,  &dev->pci_lat);
1997 	printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
1998 	       dev->name,
1999 	       pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
2000 	       dev->pci_lat,
2001 		(unsigned long long)pci_resource_start(pci_dev, 0));
2002 
2003 	pci_set_master(pci_dev);
2004 	err = pci_set_dma_mask(pci_dev, 0xffffffff);
2005 	if (err) {
2006 		printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2007 		goto fail_ctrl;
2008 	}
2009 
2010 	err = request_irq(pci_dev->irq, cx23885_irq,
2011 			  IRQF_SHARED, dev->name, dev);
2012 	if (err < 0) {
2013 		printk(KERN_ERR "%s: can't get IRQ %d\n",
2014 		       dev->name, pci_dev->irq);
2015 		goto fail_irq;
2016 	}
2017 
2018 	switch (dev->board) {
2019 	case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2020 		cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2021 		break;
2022 	case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2023 		cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2024 		break;
2025 	}
2026 
2027 	/*
2028 	 * The CX2388[58] IR controller can start firing interrupts when
2029 	 * enabled, so these have to take place after the cx23885_irq() handler
2030 	 * is hooked up by the call to request_irq() above.
2031 	 */
2032 	cx23885_ir_pci_int_enable(dev);
2033 	cx23885_input_init(dev);
2034 
2035 	return 0;
2036 
2037 fail_irq:
2038 	cx23885_dev_unregister(dev);
2039 fail_ctrl:
2040 	v4l2_ctrl_handler_free(hdl);
2041 	v4l2_device_unregister(&dev->v4l2_dev);
2042 fail_free:
2043 	kfree(dev);
2044 	return err;
2045 }
2046 
2047 static void cx23885_finidev(struct pci_dev *pci_dev)
2048 {
2049 	struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2050 	struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2051 
2052 	cx23885_input_fini(dev);
2053 	cx23885_ir_fini(dev);
2054 
2055 	cx23885_shutdown(dev);
2056 
2057 	/* unregister stuff */
2058 	free_irq(pci_dev->irq, dev);
2059 
2060 	pci_disable_device(pci_dev);
2061 
2062 	cx23885_dev_unregister(dev);
2063 	v4l2_ctrl_handler_free(&dev->ctrl_handler);
2064 	v4l2_device_unregister(v4l2_dev);
2065 	kfree(dev);
2066 }
2067 
2068 static struct pci_device_id cx23885_pci_tbl[] = {
2069 	{
2070 		/* CX23885 */
2071 		.vendor       = 0x14f1,
2072 		.device       = 0x8852,
2073 		.subvendor    = PCI_ANY_ID,
2074 		.subdevice    = PCI_ANY_ID,
2075 	}, {
2076 		/* CX23887 Rev 2 */
2077 		.vendor       = 0x14f1,
2078 		.device       = 0x8880,
2079 		.subvendor    = PCI_ANY_ID,
2080 		.subdevice    = PCI_ANY_ID,
2081 	}, {
2082 		/* --- end of list --- */
2083 	}
2084 };
2085 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2086 
2087 static struct pci_driver cx23885_pci_driver = {
2088 	.name     = "cx23885",
2089 	.id_table = cx23885_pci_tbl,
2090 	.probe    = cx23885_initdev,
2091 	.remove   = cx23885_finidev,
2092 	/* TODO */
2093 	.suspend  = NULL,
2094 	.resume   = NULL,
2095 };
2096 
2097 static int __init cx23885_init(void)
2098 {
2099 	printk(KERN_INFO "cx23885 driver version %s loaded\n",
2100 		CX23885_VERSION);
2101 	return pci_register_driver(&cx23885_pci_driver);
2102 }
2103 
2104 static void __exit cx23885_fini(void)
2105 {
2106 	pci_unregister_driver(&cx23885_pci_driver);
2107 }
2108 
2109 module_init(cx23885_init);
2110 module_exit(cx23885_fini);
2111