1 /*
2  *  Driver for the Conexant CX23885 PCIe bridge
3  *
4  *  Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *
15  *  GNU General Public License for more details.
16  */
17 
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kmod.h>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/div64.h>
28 #include <linux/firmware.h>
29 
30 #include "cx23885.h"
31 #include "cimax2.h"
32 #include "altera-ci.h"
33 #include "cx23888-ir.h"
34 #include "cx23885-ir.h"
35 #include "cx23885-av.h"
36 #include "cx23885-input.h"
37 
38 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
39 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
40 MODULE_LICENSE("GPL");
41 MODULE_VERSION(CX23885_VERSION);
42 
43 static unsigned int debug;
44 module_param(debug, int, 0644);
45 MODULE_PARM_DESC(debug, "enable debug messages");
46 
47 static unsigned int card[]  = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
48 module_param_array(card,  int, NULL, 0444);
49 MODULE_PARM_DESC(card, "card type");
50 
51 #define dprintk(level, fmt, arg...)\
52 	do { if (debug >= level)\
53 		printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
54 	} while (0)
55 
56 static unsigned int cx23885_devcount;
57 
58 #define NO_SYNC_LINE (-1U)
59 
60 /* FIXME, these allocations will change when
61  * analog arrives. The be reviewed.
62  * CX23887 Assumptions
63  * 1 line = 16 bytes of CDT
64  * cmds size = 80
65  * cdt size = 16 * linesize
66  * iqsize = 64
67  * maxlines = 6
68  *
69  * Address Space:
70  * 0x00000000 0x00008fff FIFO clusters
71  * 0x00010000 0x000104af Channel Management Data Structures
72  * 0x000104b0 0x000104ff Free
73  * 0x00010500 0x000108bf 15 channels * iqsize
74  * 0x000108c0 0x000108ff Free
75  * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
76  *                       15 channels * (iqsize + (maxlines * linesize))
77  * 0x00010ea0 0x00010xxx Free
78  */
79 
80 static struct sram_channel cx23885_sram_channels[] = {
81 	[SRAM_CH01] = {
82 		.name		= "VID A",
83 		.cmds_start	= 0x10000,
84 		.ctrl_start	= 0x10380,
85 		.cdt		= 0x104c0,
86 		.fifo_start	= 0x40,
87 		.fifo_size	= 0x2800,
88 		.ptr1_reg	= DMA1_PTR1,
89 		.ptr2_reg	= DMA1_PTR2,
90 		.cnt1_reg	= DMA1_CNT1,
91 		.cnt2_reg	= DMA1_CNT2,
92 	},
93 	[SRAM_CH02] = {
94 		.name		= "ch2",
95 		.cmds_start	= 0x0,
96 		.ctrl_start	= 0x0,
97 		.cdt		= 0x0,
98 		.fifo_start	= 0x0,
99 		.fifo_size	= 0x0,
100 		.ptr1_reg	= DMA2_PTR1,
101 		.ptr2_reg	= DMA2_PTR2,
102 		.cnt1_reg	= DMA2_CNT1,
103 		.cnt2_reg	= DMA2_CNT2,
104 	},
105 	[SRAM_CH03] = {
106 		.name		= "TS1 B",
107 		.cmds_start	= 0x100A0,
108 		.ctrl_start	= 0x10400,
109 		.cdt		= 0x10580,
110 		.fifo_start	= 0x5000,
111 		.fifo_size	= 0x1000,
112 		.ptr1_reg	= DMA3_PTR1,
113 		.ptr2_reg	= DMA3_PTR2,
114 		.cnt1_reg	= DMA3_CNT1,
115 		.cnt2_reg	= DMA3_CNT2,
116 	},
117 	[SRAM_CH04] = {
118 		.name		= "ch4",
119 		.cmds_start	= 0x0,
120 		.ctrl_start	= 0x0,
121 		.cdt		= 0x0,
122 		.fifo_start	= 0x0,
123 		.fifo_size	= 0x0,
124 		.ptr1_reg	= DMA4_PTR1,
125 		.ptr2_reg	= DMA4_PTR2,
126 		.cnt1_reg	= DMA4_CNT1,
127 		.cnt2_reg	= DMA4_CNT2,
128 	},
129 	[SRAM_CH05] = {
130 		.name		= "ch5",
131 		.cmds_start	= 0x0,
132 		.ctrl_start	= 0x0,
133 		.cdt		= 0x0,
134 		.fifo_start	= 0x0,
135 		.fifo_size	= 0x0,
136 		.ptr1_reg	= DMA5_PTR1,
137 		.ptr2_reg	= DMA5_PTR2,
138 		.cnt1_reg	= DMA5_CNT1,
139 		.cnt2_reg	= DMA5_CNT2,
140 	},
141 	[SRAM_CH06] = {
142 		.name		= "TS2 C",
143 		.cmds_start	= 0x10140,
144 		.ctrl_start	= 0x10440,
145 		.cdt		= 0x105e0,
146 		.fifo_start	= 0x6000,
147 		.fifo_size	= 0x1000,
148 		.ptr1_reg	= DMA5_PTR1,
149 		.ptr2_reg	= DMA5_PTR2,
150 		.cnt1_reg	= DMA5_CNT1,
151 		.cnt2_reg	= DMA5_CNT2,
152 	},
153 	[SRAM_CH07] = {
154 		.name		= "TV Audio",
155 		.cmds_start	= 0x10190,
156 		.ctrl_start	= 0x10480,
157 		.cdt		= 0x10a00,
158 		.fifo_start	= 0x7000,
159 		.fifo_size	= 0x1000,
160 		.ptr1_reg	= DMA6_PTR1,
161 		.ptr2_reg	= DMA6_PTR2,
162 		.cnt1_reg	= DMA6_CNT1,
163 		.cnt2_reg	= DMA6_CNT2,
164 	},
165 	[SRAM_CH08] = {
166 		.name		= "ch8",
167 		.cmds_start	= 0x0,
168 		.ctrl_start	= 0x0,
169 		.cdt		= 0x0,
170 		.fifo_start	= 0x0,
171 		.fifo_size	= 0x0,
172 		.ptr1_reg	= DMA7_PTR1,
173 		.ptr2_reg	= DMA7_PTR2,
174 		.cnt1_reg	= DMA7_CNT1,
175 		.cnt2_reg	= DMA7_CNT2,
176 	},
177 	[SRAM_CH09] = {
178 		.name		= "ch9",
179 		.cmds_start	= 0x0,
180 		.ctrl_start	= 0x0,
181 		.cdt		= 0x0,
182 		.fifo_start	= 0x0,
183 		.fifo_size	= 0x0,
184 		.ptr1_reg	= DMA8_PTR1,
185 		.ptr2_reg	= DMA8_PTR2,
186 		.cnt1_reg	= DMA8_CNT1,
187 		.cnt2_reg	= DMA8_CNT2,
188 	},
189 };
190 
191 static struct sram_channel cx23887_sram_channels[] = {
192 	[SRAM_CH01] = {
193 		.name		= "VID A",
194 		.cmds_start	= 0x10000,
195 		.ctrl_start	= 0x105b0,
196 		.cdt		= 0x107b0,
197 		.fifo_start	= 0x40,
198 		.fifo_size	= 0x2800,
199 		.ptr1_reg	= DMA1_PTR1,
200 		.ptr2_reg	= DMA1_PTR2,
201 		.cnt1_reg	= DMA1_CNT1,
202 		.cnt2_reg	= DMA1_CNT2,
203 	},
204 	[SRAM_CH02] = {
205 		.name		= "VID A (VBI)",
206 		.cmds_start	= 0x10050,
207 		.ctrl_start	= 0x105F0,
208 		.cdt		= 0x10810,
209 		.fifo_start	= 0x3000,
210 		.fifo_size	= 0x1000,
211 		.ptr1_reg	= DMA2_PTR1,
212 		.ptr2_reg	= DMA2_PTR2,
213 		.cnt1_reg	= DMA2_CNT1,
214 		.cnt2_reg	= DMA2_CNT2,
215 	},
216 	[SRAM_CH03] = {
217 		.name		= "TS1 B",
218 		.cmds_start	= 0x100A0,
219 		.ctrl_start	= 0x10630,
220 		.cdt		= 0x10870,
221 		.fifo_start	= 0x5000,
222 		.fifo_size	= 0x1000,
223 		.ptr1_reg	= DMA3_PTR1,
224 		.ptr2_reg	= DMA3_PTR2,
225 		.cnt1_reg	= DMA3_CNT1,
226 		.cnt2_reg	= DMA3_CNT2,
227 	},
228 	[SRAM_CH04] = {
229 		.name		= "ch4",
230 		.cmds_start	= 0x0,
231 		.ctrl_start	= 0x0,
232 		.cdt		= 0x0,
233 		.fifo_start	= 0x0,
234 		.fifo_size	= 0x0,
235 		.ptr1_reg	= DMA4_PTR1,
236 		.ptr2_reg	= DMA4_PTR2,
237 		.cnt1_reg	= DMA4_CNT1,
238 		.cnt2_reg	= DMA4_CNT2,
239 	},
240 	[SRAM_CH05] = {
241 		.name		= "ch5",
242 		.cmds_start	= 0x0,
243 		.ctrl_start	= 0x0,
244 		.cdt		= 0x0,
245 		.fifo_start	= 0x0,
246 		.fifo_size	= 0x0,
247 		.ptr1_reg	= DMA5_PTR1,
248 		.ptr2_reg	= DMA5_PTR2,
249 		.cnt1_reg	= DMA5_CNT1,
250 		.cnt2_reg	= DMA5_CNT2,
251 	},
252 	[SRAM_CH06] = {
253 		.name		= "TS2 C",
254 		.cmds_start	= 0x10140,
255 		.ctrl_start	= 0x10670,
256 		.cdt		= 0x108d0,
257 		.fifo_start	= 0x6000,
258 		.fifo_size	= 0x1000,
259 		.ptr1_reg	= DMA5_PTR1,
260 		.ptr2_reg	= DMA5_PTR2,
261 		.cnt1_reg	= DMA5_CNT1,
262 		.cnt2_reg	= DMA5_CNT2,
263 	},
264 	[SRAM_CH07] = {
265 		.name		= "TV Audio",
266 		.cmds_start	= 0x10190,
267 		.ctrl_start	= 0x106B0,
268 		.cdt		= 0x10930,
269 		.fifo_start	= 0x7000,
270 		.fifo_size	= 0x1000,
271 		.ptr1_reg	= DMA6_PTR1,
272 		.ptr2_reg	= DMA6_PTR2,
273 		.cnt1_reg	= DMA6_CNT1,
274 		.cnt2_reg	= DMA6_CNT2,
275 	},
276 	[SRAM_CH08] = {
277 		.name		= "ch8",
278 		.cmds_start	= 0x0,
279 		.ctrl_start	= 0x0,
280 		.cdt		= 0x0,
281 		.fifo_start	= 0x0,
282 		.fifo_size	= 0x0,
283 		.ptr1_reg	= DMA7_PTR1,
284 		.ptr2_reg	= DMA7_PTR2,
285 		.cnt1_reg	= DMA7_CNT1,
286 		.cnt2_reg	= DMA7_CNT2,
287 	},
288 	[SRAM_CH09] = {
289 		.name		= "ch9",
290 		.cmds_start	= 0x0,
291 		.ctrl_start	= 0x0,
292 		.cdt		= 0x0,
293 		.fifo_start	= 0x0,
294 		.fifo_size	= 0x0,
295 		.ptr1_reg	= DMA8_PTR1,
296 		.ptr2_reg	= DMA8_PTR2,
297 		.cnt1_reg	= DMA8_CNT1,
298 		.cnt2_reg	= DMA8_CNT2,
299 	},
300 };
301 
302 static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
303 {
304 	unsigned long flags;
305 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
306 
307 	dev->pci_irqmask |= mask;
308 
309 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
310 }
311 
312 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
313 {
314 	unsigned long flags;
315 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
316 
317 	dev->pci_irqmask |= mask;
318 	cx_set(PCI_INT_MSK, mask);
319 
320 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
321 }
322 
323 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
324 {
325 	u32 v;
326 	unsigned long flags;
327 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
328 
329 	v = mask & dev->pci_irqmask;
330 	if (v)
331 		cx_set(PCI_INT_MSK, v);
332 
333 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
334 }
335 
336 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
337 {
338 	cx23885_irq_enable(dev, 0xffffffff);
339 }
340 
341 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
342 {
343 	unsigned long flags;
344 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
345 
346 	cx_clear(PCI_INT_MSK, mask);
347 
348 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
349 }
350 
351 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
352 {
353 	cx23885_irq_disable(dev, 0xffffffff);
354 }
355 
356 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
357 {
358 	unsigned long flags;
359 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
360 
361 	dev->pci_irqmask &= ~mask;
362 	cx_clear(PCI_INT_MSK, mask);
363 
364 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
365 }
366 
367 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
368 {
369 	u32 v;
370 	unsigned long flags;
371 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
372 
373 	v = cx_read(PCI_INT_MSK);
374 
375 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
376 	return v;
377 }
378 
379 static int cx23885_risc_decode(u32 risc)
380 {
381 	static char *instr[16] = {
382 		[RISC_SYNC    >> 28] = "sync",
383 		[RISC_WRITE   >> 28] = "write",
384 		[RISC_WRITEC  >> 28] = "writec",
385 		[RISC_READ    >> 28] = "read",
386 		[RISC_READC   >> 28] = "readc",
387 		[RISC_JUMP    >> 28] = "jump",
388 		[RISC_SKIP    >> 28] = "skip",
389 		[RISC_WRITERM >> 28] = "writerm",
390 		[RISC_WRITECM >> 28] = "writecm",
391 		[RISC_WRITECR >> 28] = "writecr",
392 	};
393 	static int incr[16] = {
394 		[RISC_WRITE   >> 28] = 3,
395 		[RISC_JUMP    >> 28] = 3,
396 		[RISC_SKIP    >> 28] = 1,
397 		[RISC_SYNC    >> 28] = 1,
398 		[RISC_WRITERM >> 28] = 3,
399 		[RISC_WRITECM >> 28] = 3,
400 		[RISC_WRITECR >> 28] = 4,
401 	};
402 	static char *bits[] = {
403 		"12",   "13",   "14",   "resync",
404 		"cnt0", "cnt1", "18",   "19",
405 		"20",   "21",   "22",   "23",
406 		"irq1", "irq2", "eol",  "sol",
407 	};
408 	int i;
409 
410 	printk("0x%08x [ %s", risc,
411 	       instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
412 	for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
413 		if (risc & (1 << (i + 12)))
414 			printk(" %s", bits[i]);
415 	printk(" count=%d ]\n", risc & 0xfff);
416 	return incr[risc >> 28] ? incr[risc >> 28] : 1;
417 }
418 
419 static void cx23885_wakeup(struct cx23885_tsport *port,
420 			   struct cx23885_dmaqueue *q, u32 count)
421 {
422 	struct cx23885_dev *dev = port->dev;
423 	struct cx23885_buffer *buf;
424 
425 	if (list_empty(&q->active))
426 		return;
427 	buf = list_entry(q->active.next,
428 			 struct cx23885_buffer, queue);
429 
430 	buf->vb.vb2_buf.timestamp = ktime_get_ns();
431 	buf->vb.sequence = q->count++;
432 	dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
433 		buf->vb.vb2_buf.index,
434 		count, q->count);
435 	list_del(&buf->queue);
436 	vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
437 }
438 
439 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
440 				      struct sram_channel *ch,
441 				      unsigned int bpl, u32 risc)
442 {
443 	unsigned int i, lines;
444 	u32 cdt;
445 
446 	if (ch->cmds_start == 0) {
447 		dprintk(1, "%s() Erasing channel [%s]\n", __func__,
448 			ch->name);
449 		cx_write(ch->ptr1_reg, 0);
450 		cx_write(ch->ptr2_reg, 0);
451 		cx_write(ch->cnt2_reg, 0);
452 		cx_write(ch->cnt1_reg, 0);
453 		return 0;
454 	} else {
455 		dprintk(1, "%s() Configuring channel [%s]\n", __func__,
456 			ch->name);
457 	}
458 
459 	bpl   = (bpl + 7) & ~7; /* alignment */
460 	cdt   = ch->cdt;
461 	lines = ch->fifo_size / bpl;
462 	if (lines > 6)
463 		lines = 6;
464 	BUG_ON(lines < 2);
465 
466 	cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
467 	cx_write(8 + 4, 12);
468 	cx_write(8 + 8, 0);
469 
470 	/* write CDT */
471 	for (i = 0; i < lines; i++) {
472 		dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
473 			ch->fifo_start + bpl*i);
474 		cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
475 		cx_write(cdt + 16*i +  4, 0);
476 		cx_write(cdt + 16*i +  8, 0);
477 		cx_write(cdt + 16*i + 12, 0);
478 	}
479 
480 	/* write CMDS */
481 	if (ch->jumponly)
482 		cx_write(ch->cmds_start + 0, 8);
483 	else
484 		cx_write(ch->cmds_start + 0, risc);
485 	cx_write(ch->cmds_start +  4, 0); /* 64 bits 63-32 */
486 	cx_write(ch->cmds_start +  8, cdt);
487 	cx_write(ch->cmds_start + 12, (lines*16) >> 3);
488 	cx_write(ch->cmds_start + 16, ch->ctrl_start);
489 	if (ch->jumponly)
490 		cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
491 	else
492 		cx_write(ch->cmds_start + 20, 64 >> 2);
493 	for (i = 24; i < 80; i += 4)
494 		cx_write(ch->cmds_start + i, 0);
495 
496 	/* fill registers */
497 	cx_write(ch->ptr1_reg, ch->fifo_start);
498 	cx_write(ch->ptr2_reg, cdt);
499 	cx_write(ch->cnt2_reg, (lines*16) >> 3);
500 	cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
501 
502 	dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
503 		dev->bridge,
504 		ch->name,
505 		bpl,
506 		lines);
507 
508 	return 0;
509 }
510 
511 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
512 				      struct sram_channel *ch)
513 {
514 	static char *name[] = {
515 		"init risc lo",
516 		"init risc hi",
517 		"cdt base",
518 		"cdt size",
519 		"iq base",
520 		"iq size",
521 		"risc pc lo",
522 		"risc pc hi",
523 		"iq wr ptr",
524 		"iq rd ptr",
525 		"cdt current",
526 		"pci target lo",
527 		"pci target hi",
528 		"line / byte",
529 	};
530 	u32 risc;
531 	unsigned int i, j, n;
532 
533 	printk(KERN_WARNING "%s: %s - dma channel status dump\n",
534 	       dev->name, ch->name);
535 	for (i = 0; i < ARRAY_SIZE(name); i++)
536 		printk(KERN_WARNING "%s:   cmds: %-15s: 0x%08x\n",
537 		       dev->name, name[i],
538 		       cx_read(ch->cmds_start + 4*i));
539 
540 	for (i = 0; i < 4; i++) {
541 		risc = cx_read(ch->cmds_start + 4 * (i + 14));
542 		printk(KERN_WARNING "%s:   risc%d: ", dev->name, i);
543 		cx23885_risc_decode(risc);
544 	}
545 	for (i = 0; i < (64 >> 2); i += n) {
546 		risc = cx_read(ch->ctrl_start + 4 * i);
547 		/* No consideration for bits 63-32 */
548 
549 		printk(KERN_WARNING "%s:   (0x%08x) iq %x: ", dev->name,
550 		       ch->ctrl_start + 4 * i, i);
551 		n = cx23885_risc_decode(risc);
552 		for (j = 1; j < n; j++) {
553 			risc = cx_read(ch->ctrl_start + 4 * (i + j));
554 			printk(KERN_WARNING "%s:   iq %x: 0x%08x [ arg #%d ]\n",
555 			       dev->name, i+j, risc, j);
556 		}
557 	}
558 
559 	printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
560 	       dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
561 	printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
562 	       dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
563 	printk(KERN_WARNING "%s:   ptr1_reg: 0x%08x\n",
564 	       dev->name, cx_read(ch->ptr1_reg));
565 	printk(KERN_WARNING "%s:   ptr2_reg: 0x%08x\n",
566 	       dev->name, cx_read(ch->ptr2_reg));
567 	printk(KERN_WARNING "%s:   cnt1_reg: 0x%08x\n",
568 	       dev->name, cx_read(ch->cnt1_reg));
569 	printk(KERN_WARNING "%s:   cnt2_reg: 0x%08x\n",
570 	       dev->name, cx_read(ch->cnt2_reg));
571 }
572 
573 static void cx23885_risc_disasm(struct cx23885_tsport *port,
574 				struct cx23885_riscmem *risc)
575 {
576 	struct cx23885_dev *dev = port->dev;
577 	unsigned int i, j, n;
578 
579 	printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
580 	       dev->name, risc->cpu, (unsigned long)risc->dma);
581 	for (i = 0; i < (risc->size >> 2); i += n) {
582 		printk(KERN_INFO "%s:   %04d: ", dev->name, i);
583 		n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
584 		for (j = 1; j < n; j++)
585 			printk(KERN_INFO "%s:   %04d: 0x%08x [ arg #%d ]\n",
586 			       dev->name, i + j, risc->cpu[i + j], j);
587 		if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
588 			break;
589 	}
590 }
591 
592 static void cx23885_shutdown(struct cx23885_dev *dev)
593 {
594 	/* disable RISC controller */
595 	cx_write(DEV_CNTRL2, 0);
596 
597 	/* Disable all IR activity */
598 	cx_write(IR_CNTRL_REG, 0);
599 
600 	/* Disable Video A/B activity */
601 	cx_write(VID_A_DMA_CTL, 0);
602 	cx_write(VID_B_DMA_CTL, 0);
603 	cx_write(VID_C_DMA_CTL, 0);
604 
605 	/* Disable Audio activity */
606 	cx_write(AUD_INT_DMA_CTL, 0);
607 	cx_write(AUD_EXT_DMA_CTL, 0);
608 
609 	/* Disable Serial port */
610 	cx_write(UART_CTL, 0);
611 
612 	/* Disable Interrupts */
613 	cx23885_irq_disable_all(dev);
614 	cx_write(VID_A_INT_MSK, 0);
615 	cx_write(VID_B_INT_MSK, 0);
616 	cx_write(VID_C_INT_MSK, 0);
617 	cx_write(AUDIO_INT_INT_MSK, 0);
618 	cx_write(AUDIO_EXT_INT_MSK, 0);
619 
620 }
621 
622 static void cx23885_reset(struct cx23885_dev *dev)
623 {
624 	dprintk(1, "%s()\n", __func__);
625 
626 	cx23885_shutdown(dev);
627 
628 	cx_write(PCI_INT_STAT, 0xffffffff);
629 	cx_write(VID_A_INT_STAT, 0xffffffff);
630 	cx_write(VID_B_INT_STAT, 0xffffffff);
631 	cx_write(VID_C_INT_STAT, 0xffffffff);
632 	cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
633 	cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
634 	cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
635 	cx_write(PAD_CTRL, 0x00500300);
636 
637 	mdelay(100);
638 
639 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
640 		720*4, 0);
641 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
642 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
643 		188*4, 0);
644 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
645 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
646 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
647 		188*4, 0);
648 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
649 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
650 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
651 
652 	cx23885_gpio_setup(dev);
653 }
654 
655 
656 static int cx23885_pci_quirks(struct cx23885_dev *dev)
657 {
658 	dprintk(1, "%s()\n", __func__);
659 
660 	/* The cx23885 bridge has a weird bug which causes NMI to be asserted
661 	 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
662 	 * occur on the cx23887 bridge.
663 	 */
664 	if (dev->bridge == CX23885_BRIDGE_885)
665 		cx_clear(RDR_TLCTL0, 1 << 4);
666 
667 	return 0;
668 }
669 
670 static int get_resources(struct cx23885_dev *dev)
671 {
672 	if (request_mem_region(pci_resource_start(dev->pci, 0),
673 			       pci_resource_len(dev->pci, 0),
674 			       dev->name))
675 		return 0;
676 
677 	printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
678 		dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
679 
680 	return -EBUSY;
681 }
682 
683 static int cx23885_init_tsport(struct cx23885_dev *dev,
684 	struct cx23885_tsport *port, int portno)
685 {
686 	dprintk(1, "%s(portno=%d)\n", __func__, portno);
687 
688 	/* Transport bus init dma queue  - Common settings */
689 	port->dma_ctl_val        = 0x11; /* Enable RISC controller and Fifo */
690 	port->ts_int_msk_val     = 0x1111; /* TS port bits for RISC */
691 	port->vld_misc_val       = 0x0;
692 	port->hw_sop_ctrl_val    = (0x47 << 16 | 188 << 4);
693 
694 	spin_lock_init(&port->slock);
695 	port->dev = dev;
696 	port->nr = portno;
697 
698 	INIT_LIST_HEAD(&port->mpegq.active);
699 	mutex_init(&port->frontends.lock);
700 	INIT_LIST_HEAD(&port->frontends.felist);
701 	port->frontends.active_fe_id = 0;
702 
703 	/* This should be hardcoded allow a single frontend
704 	 * attachment to this tsport, keeping the -dvb.c
705 	 * code clean and safe.
706 	 */
707 	if (!port->num_frontends)
708 		port->num_frontends = 1;
709 
710 	switch (portno) {
711 	case 1:
712 		port->reg_gpcnt          = VID_B_GPCNT;
713 		port->reg_gpcnt_ctl      = VID_B_GPCNT_CTL;
714 		port->reg_dma_ctl        = VID_B_DMA_CTL;
715 		port->reg_lngth          = VID_B_LNGTH;
716 		port->reg_hw_sop_ctrl    = VID_B_HW_SOP_CTL;
717 		port->reg_gen_ctrl       = VID_B_GEN_CTL;
718 		port->reg_bd_pkt_status  = VID_B_BD_PKT_STATUS;
719 		port->reg_sop_status     = VID_B_SOP_STATUS;
720 		port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
721 		port->reg_vld_misc       = VID_B_VLD_MISC;
722 		port->reg_ts_clk_en      = VID_B_TS_CLK_EN;
723 		port->reg_src_sel        = VID_B_SRC_SEL;
724 		port->reg_ts_int_msk     = VID_B_INT_MSK;
725 		port->reg_ts_int_stat    = VID_B_INT_STAT;
726 		port->sram_chno          = SRAM_CH03; /* VID_B */
727 		port->pci_irqmask        = 0x02; /* VID_B bit1 */
728 		break;
729 	case 2:
730 		port->reg_gpcnt          = VID_C_GPCNT;
731 		port->reg_gpcnt_ctl      = VID_C_GPCNT_CTL;
732 		port->reg_dma_ctl        = VID_C_DMA_CTL;
733 		port->reg_lngth          = VID_C_LNGTH;
734 		port->reg_hw_sop_ctrl    = VID_C_HW_SOP_CTL;
735 		port->reg_gen_ctrl       = VID_C_GEN_CTL;
736 		port->reg_bd_pkt_status  = VID_C_BD_PKT_STATUS;
737 		port->reg_sop_status     = VID_C_SOP_STATUS;
738 		port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
739 		port->reg_vld_misc       = VID_C_VLD_MISC;
740 		port->reg_ts_clk_en      = VID_C_TS_CLK_EN;
741 		port->reg_src_sel        = 0;
742 		port->reg_ts_int_msk     = VID_C_INT_MSK;
743 		port->reg_ts_int_stat    = VID_C_INT_STAT;
744 		port->sram_chno          = SRAM_CH06; /* VID_C */
745 		port->pci_irqmask        = 0x04; /* VID_C bit2 */
746 		break;
747 	default:
748 		BUG();
749 	}
750 
751 	return 0;
752 }
753 
754 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
755 {
756 	switch (cx_read(RDR_CFG2) & 0xff) {
757 	case 0x00:
758 		/* cx23885 */
759 		dev->hwrevision = 0xa0;
760 		break;
761 	case 0x01:
762 		/* CX23885-12Z */
763 		dev->hwrevision = 0xa1;
764 		break;
765 	case 0x02:
766 		/* CX23885-13Z/14Z */
767 		dev->hwrevision = 0xb0;
768 		break;
769 	case 0x03:
770 		if (dev->pci->device == 0x8880) {
771 			/* CX23888-21Z/22Z */
772 			dev->hwrevision = 0xc0;
773 		} else {
774 			/* CX23885-14Z */
775 			dev->hwrevision = 0xa4;
776 		}
777 		break;
778 	case 0x04:
779 		if (dev->pci->device == 0x8880) {
780 			/* CX23888-31Z */
781 			dev->hwrevision = 0xd0;
782 		} else {
783 			/* CX23885-15Z, CX23888-31Z */
784 			dev->hwrevision = 0xa5;
785 		}
786 		break;
787 	case 0x0e:
788 		/* CX23887-15Z */
789 		dev->hwrevision = 0xc0;
790 		break;
791 	case 0x0f:
792 		/* CX23887-14Z */
793 		dev->hwrevision = 0xb1;
794 		break;
795 	default:
796 		printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
797 			__func__, dev->hwrevision);
798 	}
799 	if (dev->hwrevision)
800 		printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
801 			__func__, dev->hwrevision);
802 	else
803 		printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
804 			__func__, dev->hwrevision);
805 }
806 
807 /* Find the first v4l2_subdev member of the group id in hw */
808 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
809 {
810 	struct v4l2_subdev *result = NULL;
811 	struct v4l2_subdev *sd;
812 
813 	spin_lock(&dev->v4l2_dev.lock);
814 	v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
815 		if (sd->grp_id == hw) {
816 			result = sd;
817 			break;
818 		}
819 	}
820 	spin_unlock(&dev->v4l2_dev.lock);
821 	return result;
822 }
823 
824 static int cx23885_dev_setup(struct cx23885_dev *dev)
825 {
826 	int i;
827 
828 	spin_lock_init(&dev->pci_irqmask_lock);
829 	spin_lock_init(&dev->slock);
830 
831 	mutex_init(&dev->lock);
832 	mutex_init(&dev->gpio_lock);
833 
834 	atomic_inc(&dev->refcount);
835 
836 	dev->nr = cx23885_devcount++;
837 	sprintf(dev->name, "cx23885[%d]", dev->nr);
838 
839 	/* Configure the internal memory */
840 	if (dev->pci->device == 0x8880) {
841 		/* Could be 887 or 888, assume a default */
842 		dev->bridge = CX23885_BRIDGE_887;
843 		/* Apply a sensible clock frequency for the PCIe bridge */
844 		dev->clk_freq = 25000000;
845 		dev->sram_channels = cx23887_sram_channels;
846 	} else
847 	if (dev->pci->device == 0x8852) {
848 		dev->bridge = CX23885_BRIDGE_885;
849 		/* Apply a sensible clock frequency for the PCIe bridge */
850 		dev->clk_freq = 28000000;
851 		dev->sram_channels = cx23885_sram_channels;
852 	} else
853 		BUG();
854 
855 	dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
856 		__func__, dev->bridge);
857 
858 	/* board config */
859 	dev->board = UNSET;
860 	if (card[dev->nr] < cx23885_bcount)
861 		dev->board = card[dev->nr];
862 	for (i = 0; UNSET == dev->board  &&  i < cx23885_idcount; i++)
863 		if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
864 		    dev->pci->subsystem_device == cx23885_subids[i].subdevice)
865 			dev->board = cx23885_subids[i].card;
866 	if (UNSET == dev->board) {
867 		dev->board = CX23885_BOARD_UNKNOWN;
868 		cx23885_card_list(dev);
869 	}
870 
871 	/* If the user specific a clk freq override, apply it */
872 	if (cx23885_boards[dev->board].clk_freq > 0)
873 		dev->clk_freq = cx23885_boards[dev->board].clk_freq;
874 
875 	dev->pci_bus  = dev->pci->bus->number;
876 	dev->pci_slot = PCI_SLOT(dev->pci->devfn);
877 	cx23885_irq_add(dev, 0x001f00);
878 
879 	/* External Master 1 Bus */
880 	dev->i2c_bus[0].nr = 0;
881 	dev->i2c_bus[0].dev = dev;
882 	dev->i2c_bus[0].reg_stat  = I2C1_STAT;
883 	dev->i2c_bus[0].reg_ctrl  = I2C1_CTRL;
884 	dev->i2c_bus[0].reg_addr  = I2C1_ADDR;
885 	dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
886 	dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
887 	dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
888 
889 	/* External Master 2 Bus */
890 	dev->i2c_bus[1].nr = 1;
891 	dev->i2c_bus[1].dev = dev;
892 	dev->i2c_bus[1].reg_stat  = I2C2_STAT;
893 	dev->i2c_bus[1].reg_ctrl  = I2C2_CTRL;
894 	dev->i2c_bus[1].reg_addr  = I2C2_ADDR;
895 	dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
896 	dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
897 	dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
898 
899 	/* Internal Master 3 Bus */
900 	dev->i2c_bus[2].nr = 2;
901 	dev->i2c_bus[2].dev = dev;
902 	dev->i2c_bus[2].reg_stat  = I2C3_STAT;
903 	dev->i2c_bus[2].reg_ctrl  = I2C3_CTRL;
904 	dev->i2c_bus[2].reg_addr  = I2C3_ADDR;
905 	dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
906 	dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
907 	dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
908 
909 	if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
910 		(cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
911 		cx23885_init_tsport(dev, &dev->ts1, 1);
912 
913 	if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
914 		(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
915 		cx23885_init_tsport(dev, &dev->ts2, 2);
916 
917 	if (get_resources(dev) < 0) {
918 		printk(KERN_ERR "CORE %s No more PCIe resources for "
919 		       "subsystem: %04x:%04x\n",
920 		       dev->name, dev->pci->subsystem_vendor,
921 		       dev->pci->subsystem_device);
922 
923 		cx23885_devcount--;
924 		return -ENODEV;
925 	}
926 
927 	/* PCIe stuff */
928 	dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
929 			     pci_resource_len(dev->pci, 0));
930 
931 	dev->bmmio = (u8 __iomem *)dev->lmmio;
932 
933 	printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
934 	       dev->name, dev->pci->subsystem_vendor,
935 	       dev->pci->subsystem_device, cx23885_boards[dev->board].name,
936 	       dev->board, card[dev->nr] == dev->board ?
937 	       "insmod option" : "autodetected");
938 
939 	cx23885_pci_quirks(dev);
940 
941 	/* Assume some sensible defaults */
942 	dev->tuner_type = cx23885_boards[dev->board].tuner_type;
943 	dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
944 	dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
945 	dev->radio_type = cx23885_boards[dev->board].radio_type;
946 	dev->radio_addr = cx23885_boards[dev->board].radio_addr;
947 
948 	dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
949 		__func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
950 	dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
951 		__func__, dev->radio_type, dev->radio_addr);
952 
953 	/* The cx23417 encoder has GPIO's that need to be initialised
954 	 * before DVB, so that demodulators and tuners are out of
955 	 * reset before DVB uses them.
956 	 */
957 	if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
958 		(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
959 			cx23885_mc417_init(dev);
960 
961 	/* init hardware */
962 	cx23885_reset(dev);
963 
964 	cx23885_i2c_register(&dev->i2c_bus[0]);
965 	cx23885_i2c_register(&dev->i2c_bus[1]);
966 	cx23885_i2c_register(&dev->i2c_bus[2]);
967 	cx23885_card_setup(dev);
968 	call_all(dev, core, s_power, 0);
969 	cx23885_ir_init(dev);
970 
971 	if (dev->board == CX23885_BOARD_VIEWCAST_460E) {
972 		/*
973 		 * GPIOs 9/8 are input detection bits for the breakout video
974 		 * (gpio 8) and audio (gpio 9) cables. When they're attached,
975 		 * this gpios are pulled high. Make sure these GPIOs are marked
976 		 * as inputs.
977 		 */
978 		cx23885_gpio_enable(dev, 0x300, 0);
979 	}
980 
981 	if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
982 		if (cx23885_video_register(dev) < 0) {
983 			printk(KERN_ERR "%s() Failed to register analog "
984 				"video adapters on VID_A\n", __func__);
985 		}
986 	}
987 
988 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
989 		if (cx23885_boards[dev->board].num_fds_portb)
990 			dev->ts1.num_frontends =
991 				cx23885_boards[dev->board].num_fds_portb;
992 		if (cx23885_dvb_register(&dev->ts1) < 0) {
993 			printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
994 			       __func__);
995 		}
996 	} else
997 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
998 		if (cx23885_417_register(dev) < 0) {
999 			printk(KERN_ERR
1000 				"%s() Failed to register 417 on VID_B\n",
1001 			       __func__);
1002 		}
1003 	}
1004 
1005 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1006 		if (cx23885_boards[dev->board].num_fds_portc)
1007 			dev->ts2.num_frontends =
1008 				cx23885_boards[dev->board].num_fds_portc;
1009 		if (cx23885_dvb_register(&dev->ts2) < 0) {
1010 			printk(KERN_ERR
1011 				"%s() Failed to register dvb on VID_C\n",
1012 			       __func__);
1013 		}
1014 	} else
1015 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1016 		if (cx23885_417_register(dev) < 0) {
1017 			printk(KERN_ERR
1018 				"%s() Failed to register 417 on VID_C\n",
1019 			       __func__);
1020 		}
1021 	}
1022 
1023 	cx23885_dev_checkrevision(dev);
1024 
1025 	/* disable MSI for NetUP cards, otherwise CI is not working */
1026 	if (cx23885_boards[dev->board].ci_type > 0)
1027 		cx_clear(RDR_RDRCTL1, 1 << 8);
1028 
1029 	switch (dev->board) {
1030 	case CX23885_BOARD_TEVII_S470:
1031 	case CX23885_BOARD_TEVII_S471:
1032 		cx_clear(RDR_RDRCTL1, 1 << 8);
1033 		break;
1034 	}
1035 
1036 	return 0;
1037 }
1038 
1039 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1040 {
1041 	release_mem_region(pci_resource_start(dev->pci, 0),
1042 			   pci_resource_len(dev->pci, 0));
1043 
1044 	if (!atomic_dec_and_test(&dev->refcount))
1045 		return;
1046 
1047 	if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1048 		cx23885_video_unregister(dev);
1049 
1050 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1051 		cx23885_dvb_unregister(&dev->ts1);
1052 
1053 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1054 		cx23885_417_unregister(dev);
1055 
1056 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1057 		cx23885_dvb_unregister(&dev->ts2);
1058 
1059 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1060 		cx23885_417_unregister(dev);
1061 
1062 	cx23885_i2c_unregister(&dev->i2c_bus[2]);
1063 	cx23885_i2c_unregister(&dev->i2c_bus[1]);
1064 	cx23885_i2c_unregister(&dev->i2c_bus[0]);
1065 
1066 	iounmap(dev->lmmio);
1067 }
1068 
1069 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1070 			       unsigned int offset, u32 sync_line,
1071 			       unsigned int bpl, unsigned int padding,
1072 			       unsigned int lines,  unsigned int lpi, bool jump)
1073 {
1074 	struct scatterlist *sg;
1075 	unsigned int line, todo, sol;
1076 
1077 
1078 	if (jump) {
1079 		*(rp++) = cpu_to_le32(RISC_JUMP);
1080 		*(rp++) = cpu_to_le32(0);
1081 		*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1082 	}
1083 
1084 	/* sync instruction */
1085 	if (sync_line != NO_SYNC_LINE)
1086 		*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1087 
1088 	/* scan lines */
1089 	sg = sglist;
1090 	for (line = 0; line < lines; line++) {
1091 		while (offset && offset >= sg_dma_len(sg)) {
1092 			offset -= sg_dma_len(sg);
1093 			sg = sg_next(sg);
1094 		}
1095 
1096 		if (lpi && line > 0 && !(line % lpi))
1097 			sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1098 		else
1099 			sol = RISC_SOL;
1100 
1101 		if (bpl <= sg_dma_len(sg)-offset) {
1102 			/* fits into current chunk */
1103 			*(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
1104 			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1105 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1106 			offset += bpl;
1107 		} else {
1108 			/* scanline needs to be split */
1109 			todo = bpl;
1110 			*(rp++) = cpu_to_le32(RISC_WRITE|sol|
1111 					    (sg_dma_len(sg)-offset));
1112 			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1113 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1114 			todo -= (sg_dma_len(sg)-offset);
1115 			offset = 0;
1116 			sg = sg_next(sg);
1117 			while (todo > sg_dma_len(sg)) {
1118 				*(rp++) = cpu_to_le32(RISC_WRITE|
1119 						    sg_dma_len(sg));
1120 				*(rp++) = cpu_to_le32(sg_dma_address(sg));
1121 				*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1122 				todo -= sg_dma_len(sg);
1123 				sg = sg_next(sg);
1124 			}
1125 			*(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1126 			*(rp++) = cpu_to_le32(sg_dma_address(sg));
1127 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1128 			offset += todo;
1129 		}
1130 		offset += padding;
1131 	}
1132 
1133 	return rp;
1134 }
1135 
1136 int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1137 			struct scatterlist *sglist, unsigned int top_offset,
1138 			unsigned int bottom_offset, unsigned int bpl,
1139 			unsigned int padding, unsigned int lines)
1140 {
1141 	u32 instructions, fields;
1142 	__le32 *rp;
1143 
1144 	fields = 0;
1145 	if (UNSET != top_offset)
1146 		fields++;
1147 	if (UNSET != bottom_offset)
1148 		fields++;
1149 
1150 	/* estimate risc mem: worst case is one write per page border +
1151 	   one write per scan line + syncs + jump (all 2 dwords).  Padding
1152 	   can cause next bpl to start close to a page border.  First DMA
1153 	   region may be smaller than PAGE_SIZE */
1154 	/* write and jump need and extra dword */
1155 	instructions  = fields * (1 + ((bpl + padding) * lines)
1156 		/ PAGE_SIZE + lines);
1157 	instructions += 5;
1158 	risc->size = instructions * 12;
1159 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1160 	if (risc->cpu == NULL)
1161 		return -ENOMEM;
1162 
1163 	/* write risc instructions */
1164 	rp = risc->cpu;
1165 	if (UNSET != top_offset)
1166 		rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1167 					bpl, padding, lines, 0, true);
1168 	if (UNSET != bottom_offset)
1169 		rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1170 					bpl, padding, lines, 0, UNSET == top_offset);
1171 
1172 	/* save pointer to jmp instruction address */
1173 	risc->jmp = rp;
1174 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1175 	return 0;
1176 }
1177 
1178 int cx23885_risc_databuffer(struct pci_dev *pci,
1179 				   struct cx23885_riscmem *risc,
1180 				   struct scatterlist *sglist,
1181 				   unsigned int bpl,
1182 				   unsigned int lines, unsigned int lpi)
1183 {
1184 	u32 instructions;
1185 	__le32 *rp;
1186 
1187 	/* estimate risc mem: worst case is one write per page border +
1188 	   one write per scan line + syncs + jump (all 2 dwords).  Here
1189 	   there is no padding and no sync.  First DMA region may be smaller
1190 	   than PAGE_SIZE */
1191 	/* Jump and write need an extra dword */
1192 	instructions  = 1 + (bpl * lines) / PAGE_SIZE + lines;
1193 	instructions += 4;
1194 
1195 	risc->size = instructions * 12;
1196 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1197 	if (risc->cpu == NULL)
1198 		return -ENOMEM;
1199 
1200 	/* write risc instructions */
1201 	rp = risc->cpu;
1202 	rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
1203 				bpl, 0, lines, lpi, lpi == 0);
1204 
1205 	/* save pointer to jmp instruction address */
1206 	risc->jmp = rp;
1207 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1208 	return 0;
1209 }
1210 
1211 int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1212 			struct scatterlist *sglist, unsigned int top_offset,
1213 			unsigned int bottom_offset, unsigned int bpl,
1214 			unsigned int padding, unsigned int lines)
1215 {
1216 	u32 instructions, fields;
1217 	__le32 *rp;
1218 
1219 	fields = 0;
1220 	if (UNSET != top_offset)
1221 		fields++;
1222 	if (UNSET != bottom_offset)
1223 		fields++;
1224 
1225 	/* estimate risc mem: worst case is one write per page border +
1226 	   one write per scan line + syncs + jump (all 2 dwords).  Padding
1227 	   can cause next bpl to start close to a page border.  First DMA
1228 	   region may be smaller than PAGE_SIZE */
1229 	/* write and jump need and extra dword */
1230 	instructions  = fields * (1 + ((bpl + padding) * lines)
1231 		/ PAGE_SIZE + lines);
1232 	instructions += 5;
1233 	risc->size = instructions * 12;
1234 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1235 	if (risc->cpu == NULL)
1236 		return -ENOMEM;
1237 	/* write risc instructions */
1238 	rp = risc->cpu;
1239 
1240 	/* Sync to line 6, so US CC line 21 will appear in line '12'
1241 	 * in the userland vbi payload */
1242 	if (UNSET != top_offset)
1243 		rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1244 					bpl, padding, lines, 0, true);
1245 
1246 	if (UNSET != bottom_offset)
1247 		rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1248 					bpl, padding, lines, 0, UNSET == top_offset);
1249 
1250 
1251 
1252 	/* save pointer to jmp instruction address */
1253 	risc->jmp = rp;
1254 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1255 	return 0;
1256 }
1257 
1258 
1259 void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
1260 {
1261 	struct cx23885_riscmem *risc = &buf->risc;
1262 
1263 	BUG_ON(in_interrupt());
1264 	pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
1265 }
1266 
1267 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1268 {
1269 	struct cx23885_dev *dev = port->dev;
1270 
1271 	dprintk(1, "%s() Register Dump\n", __func__);
1272 	dprintk(1, "%s() DEV_CNTRL2               0x%08X\n", __func__,
1273 		cx_read(DEV_CNTRL2));
1274 	dprintk(1, "%s() PCI_INT_MSK              0x%08X\n", __func__,
1275 		cx23885_irq_get_mask(dev));
1276 	dprintk(1, "%s() AUD_INT_INT_MSK          0x%08X\n", __func__,
1277 		cx_read(AUDIO_INT_INT_MSK));
1278 	dprintk(1, "%s() AUD_INT_DMA_CTL          0x%08X\n", __func__,
1279 		cx_read(AUD_INT_DMA_CTL));
1280 	dprintk(1, "%s() AUD_EXT_INT_MSK          0x%08X\n", __func__,
1281 		cx_read(AUDIO_EXT_INT_MSK));
1282 	dprintk(1, "%s() AUD_EXT_DMA_CTL          0x%08X\n", __func__,
1283 		cx_read(AUD_EXT_DMA_CTL));
1284 	dprintk(1, "%s() PAD_CTRL                 0x%08X\n", __func__,
1285 		cx_read(PAD_CTRL));
1286 	dprintk(1, "%s() ALT_PIN_OUT_SEL          0x%08X\n", __func__,
1287 		cx_read(ALT_PIN_OUT_SEL));
1288 	dprintk(1, "%s() GPIO2                    0x%08X\n", __func__,
1289 		cx_read(GPIO2));
1290 	dprintk(1, "%s() gpcnt(0x%08X)          0x%08X\n", __func__,
1291 		port->reg_gpcnt, cx_read(port->reg_gpcnt));
1292 	dprintk(1, "%s() gpcnt_ctl(0x%08X)      0x%08x\n", __func__,
1293 		port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1294 	dprintk(1, "%s() dma_ctl(0x%08X)        0x%08x\n", __func__,
1295 		port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1296 	if (port->reg_src_sel)
1297 		dprintk(1, "%s() src_sel(0x%08X)        0x%08x\n", __func__,
1298 			port->reg_src_sel, cx_read(port->reg_src_sel));
1299 	dprintk(1, "%s() lngth(0x%08X)          0x%08x\n", __func__,
1300 		port->reg_lngth, cx_read(port->reg_lngth));
1301 	dprintk(1, "%s() hw_sop_ctrl(0x%08X)    0x%08x\n", __func__,
1302 		port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1303 	dprintk(1, "%s() gen_ctrl(0x%08X)       0x%08x\n", __func__,
1304 		port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1305 	dprintk(1, "%s() bd_pkt_status(0x%08X)  0x%08x\n", __func__,
1306 		port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1307 	dprintk(1, "%s() sop_status(0x%08X)     0x%08x\n", __func__,
1308 		port->reg_sop_status, cx_read(port->reg_sop_status));
1309 	dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1310 		port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1311 	dprintk(1, "%s() vld_misc(0x%08X)       0x%08x\n", __func__,
1312 		port->reg_vld_misc, cx_read(port->reg_vld_misc));
1313 	dprintk(1, "%s() ts_clk_en(0x%08X)      0x%08x\n", __func__,
1314 		port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1315 	dprintk(1, "%s() ts_int_msk(0x%08X)     0x%08x\n", __func__,
1316 		port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1317 }
1318 
1319 int cx23885_start_dma(struct cx23885_tsport *port,
1320 			     struct cx23885_dmaqueue *q,
1321 			     struct cx23885_buffer   *buf)
1322 {
1323 	struct cx23885_dev *dev = port->dev;
1324 	u32 reg;
1325 
1326 	dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1327 		dev->width, dev->height, dev->field);
1328 
1329 	/* Stop the fifo and risc engine for this port */
1330 	cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1331 
1332 	/* setup fifo + format */
1333 	cx23885_sram_channel_setup(dev,
1334 				   &dev->sram_channels[port->sram_chno],
1335 				   port->ts_packet_size, buf->risc.dma);
1336 	if (debug > 5) {
1337 		cx23885_sram_channel_dump(dev,
1338 			&dev->sram_channels[port->sram_chno]);
1339 		cx23885_risc_disasm(port, &buf->risc);
1340 	}
1341 
1342 	/* write TS length to chip */
1343 	cx_write(port->reg_lngth, port->ts_packet_size);
1344 
1345 	if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1346 		(!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1347 		printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1348 			__func__,
1349 			cx23885_boards[dev->board].portb,
1350 			cx23885_boards[dev->board].portc);
1351 		return -EINVAL;
1352 	}
1353 
1354 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1355 		cx23885_av_clk(dev, 0);
1356 
1357 	udelay(100);
1358 
1359 	/* If the port supports SRC SELECT, configure it */
1360 	if (port->reg_src_sel)
1361 		cx_write(port->reg_src_sel, port->src_sel_val);
1362 
1363 	cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1364 	cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1365 	cx_write(port->reg_vld_misc, port->vld_misc_val);
1366 	cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1367 	udelay(100);
1368 
1369 	/* NOTE: this is 2 (reserved) for portb, does it matter? */
1370 	/* reset counter to zero */
1371 	cx_write(port->reg_gpcnt_ctl, 3);
1372 	q->count = 0;
1373 
1374 	/* Set VIDB pins to input */
1375 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1376 		reg = cx_read(PAD_CTRL);
1377 		reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1378 		cx_write(PAD_CTRL, reg);
1379 	}
1380 
1381 	/* Set VIDC pins to input */
1382 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1383 		reg = cx_read(PAD_CTRL);
1384 		reg &= ~0x4; /* Clear TS2_SOP_OE */
1385 		cx_write(PAD_CTRL, reg);
1386 	}
1387 
1388 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1389 
1390 		reg = cx_read(PAD_CTRL);
1391 		reg = reg & ~0x1;    /* Clear TS1_OE */
1392 
1393 		/* FIXME, bit 2 writing here is questionable */
1394 		/* set TS1_SOP_OE and TS1_OE_HI */
1395 		reg = reg | 0xa;
1396 		cx_write(PAD_CTRL, reg);
1397 
1398 		/* FIXME and these two registers should be documented. */
1399 		cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1400 		cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1401 	}
1402 
1403 	switch (dev->bridge) {
1404 	case CX23885_BRIDGE_885:
1405 	case CX23885_BRIDGE_887:
1406 	case CX23885_BRIDGE_888:
1407 		/* enable irqs */
1408 		dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1409 		cx_set(port->reg_ts_int_msk,  port->ts_int_msk_val);
1410 		cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1411 		cx23885_irq_add(dev, port->pci_irqmask);
1412 		cx23885_irq_enable_all(dev);
1413 		break;
1414 	default:
1415 		BUG();
1416 	}
1417 
1418 	cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1419 
1420 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1421 		cx23885_av_clk(dev, 1);
1422 
1423 	if (debug > 4)
1424 		cx23885_tsport_reg_dump(port);
1425 
1426 	return 0;
1427 }
1428 
1429 static int cx23885_stop_dma(struct cx23885_tsport *port)
1430 {
1431 	struct cx23885_dev *dev = port->dev;
1432 	u32 reg;
1433 
1434 	dprintk(1, "%s()\n", __func__);
1435 
1436 	/* Stop interrupts and DMA */
1437 	cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1438 	cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1439 
1440 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1441 
1442 		reg = cx_read(PAD_CTRL);
1443 
1444 		/* Set TS1_OE */
1445 		reg = reg | 0x1;
1446 
1447 		/* clear TS1_SOP_OE and TS1_OE_HI */
1448 		reg = reg & ~0xa;
1449 		cx_write(PAD_CTRL, reg);
1450 		cx_write(port->reg_src_sel, 0);
1451 		cx_write(port->reg_gen_ctrl, 8);
1452 
1453 	}
1454 
1455 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1456 		cx23885_av_clk(dev, 0);
1457 
1458 	return 0;
1459 }
1460 
1461 /* ------------------------------------------------------------------ */
1462 
1463 int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
1464 {
1465 	struct cx23885_dev *dev = port->dev;
1466 	int size = port->ts_packet_size * port->ts_packet_count;
1467 	struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
1468 
1469 	dprintk(1, "%s: %p\n", __func__, buf);
1470 	if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
1471 		return -EINVAL;
1472 	vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1473 
1474 	cx23885_risc_databuffer(dev->pci, &buf->risc,
1475 				sgt->sgl,
1476 				port->ts_packet_size, port->ts_packet_count, 0);
1477 	return 0;
1478 }
1479 
1480 /*
1481  * The risc program for each buffer works as follows: it starts with a simple
1482  * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1483  * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1484  * the initial JUMP).
1485  *
1486  * This is the risc program of the first buffer to be queued if the active list
1487  * is empty and it just keeps DMAing this buffer without generating any
1488  * interrupts.
1489  *
1490  * If a new buffer is added then the initial JUMP in the code for that buffer
1491  * will generate an interrupt which signals that the previous buffer has been
1492  * DMAed successfully and that it can be returned to userspace.
1493  *
1494  * It also sets the final jump of the previous buffer to the start of the new
1495  * buffer, thus chaining the new buffer into the DMA chain. This is a single
1496  * atomic u32 write, so there is no race condition.
1497  *
1498  * The end-result of all this that you only get an interrupt when a buffer
1499  * is ready, so the control flow is very easy.
1500  */
1501 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1502 {
1503 	struct cx23885_buffer    *prev;
1504 	struct cx23885_dev *dev = port->dev;
1505 	struct cx23885_dmaqueue  *cx88q = &port->mpegq;
1506 	unsigned long flags;
1507 
1508 	buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1509 	buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1510 	buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
1511 	buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1512 
1513 	spin_lock_irqsave(&dev->slock, flags);
1514 	if (list_empty(&cx88q->active)) {
1515 		list_add_tail(&buf->queue, &cx88q->active);
1516 		dprintk(1, "[%p/%d] %s - first active\n",
1517 			buf, buf->vb.vb2_buf.index, __func__);
1518 	} else {
1519 		buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
1520 		prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1521 				  queue);
1522 		list_add_tail(&buf->queue, &cx88q->active);
1523 		prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1524 		dprintk(1, "[%p/%d] %s - append to active\n",
1525 			 buf, buf->vb.vb2_buf.index, __func__);
1526 	}
1527 	spin_unlock_irqrestore(&dev->slock, flags);
1528 }
1529 
1530 /* ----------------------------------------------------------- */
1531 
1532 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
1533 {
1534 	struct cx23885_dev *dev = port->dev;
1535 	struct cx23885_dmaqueue *q = &port->mpegq;
1536 	struct cx23885_buffer *buf;
1537 	unsigned long flags;
1538 
1539 	spin_lock_irqsave(&port->slock, flags);
1540 	while (!list_empty(&q->active)) {
1541 		buf = list_entry(q->active.next, struct cx23885_buffer,
1542 				 queue);
1543 		list_del(&buf->queue);
1544 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1545 		dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1546 			buf, buf->vb.vb2_buf.index, reason,
1547 			(unsigned long)buf->risc.dma);
1548 	}
1549 	spin_unlock_irqrestore(&port->slock, flags);
1550 }
1551 
1552 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1553 {
1554 	struct cx23885_dev *dev = port->dev;
1555 
1556 	dprintk(1, "%s()\n", __func__);
1557 	cx23885_stop_dma(port);
1558 	do_cancel_buffers(port, "cancel");
1559 }
1560 
1561 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1562 {
1563 	/* FIXME: port1 assumption here. */
1564 	struct cx23885_tsport *port = &dev->ts1;
1565 	int count = 0;
1566 	int handled = 0;
1567 
1568 	if (status == 0)
1569 		return handled;
1570 
1571 	count = cx_read(port->reg_gpcnt);
1572 	dprintk(7, "status: 0x%08x  mask: 0x%08x count: 0x%x\n",
1573 		status, cx_read(port->reg_ts_int_msk), count);
1574 
1575 	if ((status & VID_B_MSK_BAD_PKT)         ||
1576 		(status & VID_B_MSK_OPC_ERR)     ||
1577 		(status & VID_B_MSK_VBI_OPC_ERR) ||
1578 		(status & VID_B_MSK_SYNC)        ||
1579 		(status & VID_B_MSK_VBI_SYNC)    ||
1580 		(status & VID_B_MSK_OF)          ||
1581 		(status & VID_B_MSK_VBI_OF)) {
1582 		printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1583 			"= 0x%x\n", dev->name, status);
1584 		if (status & VID_B_MSK_BAD_PKT)
1585 			dprintk(1, "        VID_B_MSK_BAD_PKT\n");
1586 		if (status & VID_B_MSK_OPC_ERR)
1587 			dprintk(1, "        VID_B_MSK_OPC_ERR\n");
1588 		if (status & VID_B_MSK_VBI_OPC_ERR)
1589 			dprintk(1, "        VID_B_MSK_VBI_OPC_ERR\n");
1590 		if (status & VID_B_MSK_SYNC)
1591 			dprintk(1, "        VID_B_MSK_SYNC\n");
1592 		if (status & VID_B_MSK_VBI_SYNC)
1593 			dprintk(1, "        VID_B_MSK_VBI_SYNC\n");
1594 		if (status & VID_B_MSK_OF)
1595 			dprintk(1, "        VID_B_MSK_OF\n");
1596 		if (status & VID_B_MSK_VBI_OF)
1597 			dprintk(1, "        VID_B_MSK_VBI_OF\n");
1598 
1599 		cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1600 		cx23885_sram_channel_dump(dev,
1601 			&dev->sram_channels[port->sram_chno]);
1602 		cx23885_417_check_encoder(dev);
1603 	} else if (status & VID_B_MSK_RISCI1) {
1604 		dprintk(7, "        VID_B_MSK_RISCI1\n");
1605 		spin_lock(&port->slock);
1606 		cx23885_wakeup(port, &port->mpegq, count);
1607 		spin_unlock(&port->slock);
1608 	}
1609 	if (status) {
1610 		cx_write(port->reg_ts_int_stat, status);
1611 		handled = 1;
1612 	}
1613 
1614 	return handled;
1615 }
1616 
1617 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1618 {
1619 	struct cx23885_dev *dev = port->dev;
1620 	int handled = 0;
1621 	u32 count;
1622 
1623 	if ((status & VID_BC_MSK_OPC_ERR) ||
1624 		(status & VID_BC_MSK_BAD_PKT) ||
1625 		(status & VID_BC_MSK_SYNC) ||
1626 		(status & VID_BC_MSK_OF)) {
1627 
1628 		if (status & VID_BC_MSK_OPC_ERR)
1629 			dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1630 				VID_BC_MSK_OPC_ERR);
1631 
1632 		if (status & VID_BC_MSK_BAD_PKT)
1633 			dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1634 				VID_BC_MSK_BAD_PKT);
1635 
1636 		if (status & VID_BC_MSK_SYNC)
1637 			dprintk(7, " (VID_BC_MSK_SYNC    0x%08x)\n",
1638 				VID_BC_MSK_SYNC);
1639 
1640 		if (status & VID_BC_MSK_OF)
1641 			dprintk(7, " (VID_BC_MSK_OF      0x%08x)\n",
1642 				VID_BC_MSK_OF);
1643 
1644 		printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1645 
1646 		cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1647 		cx23885_sram_channel_dump(dev,
1648 			&dev->sram_channels[port->sram_chno]);
1649 
1650 	} else if (status & VID_BC_MSK_RISCI1) {
1651 
1652 		dprintk(7, " (RISCI1            0x%08x)\n", VID_BC_MSK_RISCI1);
1653 
1654 		spin_lock(&port->slock);
1655 		count = cx_read(port->reg_gpcnt);
1656 		cx23885_wakeup(port, &port->mpegq, count);
1657 		spin_unlock(&port->slock);
1658 
1659 	}
1660 	if (status) {
1661 		cx_write(port->reg_ts_int_stat, status);
1662 		handled = 1;
1663 	}
1664 
1665 	return handled;
1666 }
1667 
1668 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1669 {
1670 	struct cx23885_dev *dev = dev_id;
1671 	struct cx23885_tsport *ts1 = &dev->ts1;
1672 	struct cx23885_tsport *ts2 = &dev->ts2;
1673 	u32 pci_status, pci_mask;
1674 	u32 vida_status, vida_mask;
1675 	u32 audint_status, audint_mask;
1676 	u32 ts1_status, ts1_mask;
1677 	u32 ts2_status, ts2_mask;
1678 	int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1679 	int audint_count = 0;
1680 	bool subdev_handled;
1681 
1682 	pci_status = cx_read(PCI_INT_STAT);
1683 	pci_mask = cx23885_irq_get_mask(dev);
1684 	vida_status = cx_read(VID_A_INT_STAT);
1685 	vida_mask = cx_read(VID_A_INT_MSK);
1686 	audint_status = cx_read(AUDIO_INT_INT_STAT);
1687 	audint_mask = cx_read(AUDIO_INT_INT_MSK);
1688 	ts1_status = cx_read(VID_B_INT_STAT);
1689 	ts1_mask = cx_read(VID_B_INT_MSK);
1690 	ts2_status = cx_read(VID_C_INT_STAT);
1691 	ts2_mask = cx_read(VID_C_INT_MSK);
1692 
1693 	if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1694 		goto out;
1695 
1696 	vida_count = cx_read(VID_A_GPCNT);
1697 	audint_count = cx_read(AUD_INT_A_GPCNT);
1698 	ts1_count = cx_read(ts1->reg_gpcnt);
1699 	ts2_count = cx_read(ts2->reg_gpcnt);
1700 	dprintk(7, "pci_status: 0x%08x  pci_mask: 0x%08x\n",
1701 		pci_status, pci_mask);
1702 	dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1703 		vida_status, vida_mask, vida_count);
1704 	dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1705 		audint_status, audint_mask, audint_count);
1706 	dprintk(7, "ts1_status: 0x%08x  ts1_mask: 0x%08x count: 0x%x\n",
1707 		ts1_status, ts1_mask, ts1_count);
1708 	dprintk(7, "ts2_status: 0x%08x  ts2_mask: 0x%08x count: 0x%x\n",
1709 		ts2_status, ts2_mask, ts2_count);
1710 
1711 	if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1712 			  PCI_MSK_AL_RD   | PCI_MSK_AL_WR   | PCI_MSK_APB_DMA |
1713 			  PCI_MSK_VID_C   | PCI_MSK_VID_B   | PCI_MSK_VID_A   |
1714 			  PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1715 			  PCI_MSK_GPIO0   | PCI_MSK_GPIO1   |
1716 			  PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1717 
1718 		if (pci_status & PCI_MSK_RISC_RD)
1719 			dprintk(7, " (PCI_MSK_RISC_RD   0x%08x)\n",
1720 				PCI_MSK_RISC_RD);
1721 
1722 		if (pci_status & PCI_MSK_RISC_WR)
1723 			dprintk(7, " (PCI_MSK_RISC_WR   0x%08x)\n",
1724 				PCI_MSK_RISC_WR);
1725 
1726 		if (pci_status & PCI_MSK_AL_RD)
1727 			dprintk(7, " (PCI_MSK_AL_RD     0x%08x)\n",
1728 				PCI_MSK_AL_RD);
1729 
1730 		if (pci_status & PCI_MSK_AL_WR)
1731 			dprintk(7, " (PCI_MSK_AL_WR     0x%08x)\n",
1732 				PCI_MSK_AL_WR);
1733 
1734 		if (pci_status & PCI_MSK_APB_DMA)
1735 			dprintk(7, " (PCI_MSK_APB_DMA   0x%08x)\n",
1736 				PCI_MSK_APB_DMA);
1737 
1738 		if (pci_status & PCI_MSK_VID_C)
1739 			dprintk(7, " (PCI_MSK_VID_C     0x%08x)\n",
1740 				PCI_MSK_VID_C);
1741 
1742 		if (pci_status & PCI_MSK_VID_B)
1743 			dprintk(7, " (PCI_MSK_VID_B     0x%08x)\n",
1744 				PCI_MSK_VID_B);
1745 
1746 		if (pci_status & PCI_MSK_VID_A)
1747 			dprintk(7, " (PCI_MSK_VID_A     0x%08x)\n",
1748 				PCI_MSK_VID_A);
1749 
1750 		if (pci_status & PCI_MSK_AUD_INT)
1751 			dprintk(7, " (PCI_MSK_AUD_INT   0x%08x)\n",
1752 				PCI_MSK_AUD_INT);
1753 
1754 		if (pci_status & PCI_MSK_AUD_EXT)
1755 			dprintk(7, " (PCI_MSK_AUD_EXT   0x%08x)\n",
1756 				PCI_MSK_AUD_EXT);
1757 
1758 		if (pci_status & PCI_MSK_GPIO0)
1759 			dprintk(7, " (PCI_MSK_GPIO0     0x%08x)\n",
1760 				PCI_MSK_GPIO0);
1761 
1762 		if (pci_status & PCI_MSK_GPIO1)
1763 			dprintk(7, " (PCI_MSK_GPIO1     0x%08x)\n",
1764 				PCI_MSK_GPIO1);
1765 
1766 		if (pci_status & PCI_MSK_AV_CORE)
1767 			dprintk(7, " (PCI_MSK_AV_CORE   0x%08x)\n",
1768 				PCI_MSK_AV_CORE);
1769 
1770 		if (pci_status & PCI_MSK_IR)
1771 			dprintk(7, " (PCI_MSK_IR        0x%08x)\n",
1772 				PCI_MSK_IR);
1773 	}
1774 
1775 	if (cx23885_boards[dev->board].ci_type == 1 &&
1776 			(pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1777 		handled += netup_ci_slot_status(dev, pci_status);
1778 
1779 	if (cx23885_boards[dev->board].ci_type == 2 &&
1780 			(pci_status & PCI_MSK_GPIO0))
1781 		handled += altera_ci_irq(dev);
1782 
1783 	if (ts1_status) {
1784 		if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1785 			handled += cx23885_irq_ts(ts1, ts1_status);
1786 		else
1787 		if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1788 			handled += cx23885_irq_417(dev, ts1_status);
1789 	}
1790 
1791 	if (ts2_status) {
1792 		if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1793 			handled += cx23885_irq_ts(ts2, ts2_status);
1794 		else
1795 		if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1796 			handled += cx23885_irq_417(dev, ts2_status);
1797 	}
1798 
1799 	if (vida_status)
1800 		handled += cx23885_video_irq(dev, vida_status);
1801 
1802 	if (audint_status)
1803 		handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1804 
1805 	if (pci_status & PCI_MSK_IR) {
1806 		subdev_handled = false;
1807 		v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1808 				 pci_status, &subdev_handled);
1809 		if (subdev_handled)
1810 			handled++;
1811 	}
1812 
1813 	if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1814 		cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1815 		schedule_work(&dev->cx25840_work);
1816 		handled++;
1817 	}
1818 
1819 	if (handled)
1820 		cx_write(PCI_INT_STAT, pci_status);
1821 out:
1822 	return IRQ_RETVAL(handled);
1823 }
1824 
1825 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1826 				    unsigned int notification, void *arg)
1827 {
1828 	struct cx23885_dev *dev;
1829 
1830 	if (sd == NULL)
1831 		return;
1832 
1833 	dev = to_cx23885(sd->v4l2_dev);
1834 
1835 	switch (notification) {
1836 	case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1837 		if (sd == dev->sd_ir)
1838 			cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1839 		break;
1840 	case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1841 		if (sd == dev->sd_ir)
1842 			cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1843 		break;
1844 	}
1845 }
1846 
1847 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1848 {
1849 	INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1850 	INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1851 	INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1852 	dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1853 }
1854 
1855 static inline int encoder_on_portb(struct cx23885_dev *dev)
1856 {
1857 	return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1858 }
1859 
1860 static inline int encoder_on_portc(struct cx23885_dev *dev)
1861 {
1862 	return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1863 }
1864 
1865 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1866  * registers depending on the board configuration (and whether the
1867  * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1868  * be pushed into the correct hardware register, regardless of the
1869  * physical location. Certain registers are shared so we sanity check
1870  * and report errors if we think we're tampering with a GPIo that might
1871  * be assigned to the encoder (and used for the host bus).
1872  *
1873  * GPIO  2 thru  0 - On the cx23885 bridge
1874  * GPIO 18 thru  3 - On the cx23417 host bus interface
1875  * GPIO 23 thru 19 - On the cx25840 a/v core
1876  */
1877 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1878 {
1879 	if (mask & 0x7)
1880 		cx_set(GP0_IO, mask & 0x7);
1881 
1882 	if (mask & 0x0007fff8) {
1883 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1884 			printk(KERN_ERR
1885 				"%s: Setting GPIO on encoder ports\n",
1886 				dev->name);
1887 		cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1888 	}
1889 
1890 	/* TODO: 23-19 */
1891 	if (mask & 0x00f80000)
1892 		printk(KERN_INFO "%s: Unsupported\n", dev->name);
1893 }
1894 
1895 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1896 {
1897 	if (mask & 0x00000007)
1898 		cx_clear(GP0_IO, mask & 0x7);
1899 
1900 	if (mask & 0x0007fff8) {
1901 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1902 			printk(KERN_ERR
1903 				"%s: Clearing GPIO moving on encoder ports\n",
1904 				dev->name);
1905 		cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1906 	}
1907 
1908 	/* TODO: 23-19 */
1909 	if (mask & 0x00f80000)
1910 		printk(KERN_INFO "%s: Unsupported\n", dev->name);
1911 }
1912 
1913 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1914 {
1915 	if (mask & 0x00000007)
1916 		return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1917 
1918 	if (mask & 0x0007fff8) {
1919 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1920 			printk(KERN_ERR
1921 				"%s: Reading GPIO moving on encoder ports\n",
1922 				dev->name);
1923 		return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1924 	}
1925 
1926 	/* TODO: 23-19 */
1927 	if (mask & 0x00f80000)
1928 		printk(KERN_INFO "%s: Unsupported\n", dev->name);
1929 
1930 	return 0;
1931 }
1932 
1933 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1934 {
1935 	if ((mask & 0x00000007) && asoutput)
1936 		cx_set(GP0_IO, (mask & 0x7) << 16);
1937 	else if ((mask & 0x00000007) && !asoutput)
1938 		cx_clear(GP0_IO, (mask & 0x7) << 16);
1939 
1940 	if (mask & 0x0007fff8) {
1941 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1942 			printk(KERN_ERR
1943 				"%s: Enabling GPIO on encoder ports\n",
1944 				dev->name);
1945 	}
1946 
1947 	/* MC417_OEN is active low for output, write 1 for an input */
1948 	if ((mask & 0x0007fff8) && asoutput)
1949 		cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
1950 
1951 	else if ((mask & 0x0007fff8) && !asoutput)
1952 		cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
1953 
1954 	/* TODO: 23-19 */
1955 }
1956 
1957 static int cx23885_initdev(struct pci_dev *pci_dev,
1958 			   const struct pci_device_id *pci_id)
1959 {
1960 	struct cx23885_dev *dev;
1961 	struct v4l2_ctrl_handler *hdl;
1962 	int err;
1963 
1964 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1965 	if (NULL == dev)
1966 		return -ENOMEM;
1967 
1968 	err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
1969 	if (err < 0)
1970 		goto fail_free;
1971 
1972 	hdl = &dev->ctrl_handler;
1973 	v4l2_ctrl_handler_init(hdl, 6);
1974 	if (hdl->error) {
1975 		err = hdl->error;
1976 		goto fail_ctrl;
1977 	}
1978 	dev->v4l2_dev.ctrl_handler = hdl;
1979 
1980 	/* Prepare to handle notifications from subdevices */
1981 	cx23885_v4l2_dev_notify_init(dev);
1982 
1983 	/* pci init */
1984 	dev->pci = pci_dev;
1985 	if (pci_enable_device(pci_dev)) {
1986 		err = -EIO;
1987 		goto fail_ctrl;
1988 	}
1989 
1990 	if (cx23885_dev_setup(dev) < 0) {
1991 		err = -EINVAL;
1992 		goto fail_ctrl;
1993 	}
1994 
1995 	/* print pci info */
1996 	dev->pci_rev = pci_dev->revision;
1997 	pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER,  &dev->pci_lat);
1998 	printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
1999 	       "latency: %d, mmio: 0x%llx\n", dev->name,
2000 	       pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
2001 	       dev->pci_lat,
2002 		(unsigned long long)pci_resource_start(pci_dev, 0));
2003 
2004 	pci_set_master(pci_dev);
2005 	err = pci_set_dma_mask(pci_dev, 0xffffffff);
2006 	if (err) {
2007 		printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2008 		goto fail_context;
2009 	}
2010 
2011 	dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
2012 	if (IS_ERR(dev->alloc_ctx)) {
2013 		err = PTR_ERR(dev->alloc_ctx);
2014 		goto fail_context;
2015 	}
2016 	err = request_irq(pci_dev->irq, cx23885_irq,
2017 			  IRQF_SHARED, dev->name, dev);
2018 	if (err < 0) {
2019 		printk(KERN_ERR "%s: can't get IRQ %d\n",
2020 		       dev->name, pci_dev->irq);
2021 		goto fail_irq;
2022 	}
2023 
2024 	switch (dev->board) {
2025 	case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2026 		cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2027 		break;
2028 	case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2029 		cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2030 		break;
2031 	}
2032 
2033 	/*
2034 	 * The CX2388[58] IR controller can start firing interrupts when
2035 	 * enabled, so these have to take place after the cx23885_irq() handler
2036 	 * is hooked up by the call to request_irq() above.
2037 	 */
2038 	cx23885_ir_pci_int_enable(dev);
2039 	cx23885_input_init(dev);
2040 
2041 	return 0;
2042 
2043 fail_irq:
2044 	vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
2045 fail_context:
2046 	cx23885_dev_unregister(dev);
2047 fail_ctrl:
2048 	v4l2_ctrl_handler_free(hdl);
2049 	v4l2_device_unregister(&dev->v4l2_dev);
2050 fail_free:
2051 	kfree(dev);
2052 	return err;
2053 }
2054 
2055 static void cx23885_finidev(struct pci_dev *pci_dev)
2056 {
2057 	struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2058 	struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2059 
2060 	cx23885_input_fini(dev);
2061 	cx23885_ir_fini(dev);
2062 
2063 	cx23885_shutdown(dev);
2064 
2065 	/* unregister stuff */
2066 	free_irq(pci_dev->irq, dev);
2067 
2068 	pci_disable_device(pci_dev);
2069 
2070 	cx23885_dev_unregister(dev);
2071 	vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
2072 	v4l2_ctrl_handler_free(&dev->ctrl_handler);
2073 	v4l2_device_unregister(v4l2_dev);
2074 	kfree(dev);
2075 }
2076 
2077 static struct pci_device_id cx23885_pci_tbl[] = {
2078 	{
2079 		/* CX23885 */
2080 		.vendor       = 0x14f1,
2081 		.device       = 0x8852,
2082 		.subvendor    = PCI_ANY_ID,
2083 		.subdevice    = PCI_ANY_ID,
2084 	}, {
2085 		/* CX23887 Rev 2 */
2086 		.vendor       = 0x14f1,
2087 		.device       = 0x8880,
2088 		.subvendor    = PCI_ANY_ID,
2089 		.subdevice    = PCI_ANY_ID,
2090 	}, {
2091 		/* --- end of list --- */
2092 	}
2093 };
2094 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2095 
2096 static struct pci_driver cx23885_pci_driver = {
2097 	.name     = "cx23885",
2098 	.id_table = cx23885_pci_tbl,
2099 	.probe    = cx23885_initdev,
2100 	.remove   = cx23885_finidev,
2101 	/* TODO */
2102 	.suspend  = NULL,
2103 	.resume   = NULL,
2104 };
2105 
2106 static int __init cx23885_init(void)
2107 {
2108 	printk(KERN_INFO "cx23885 driver version %s loaded\n",
2109 		CX23885_VERSION);
2110 	return pci_register_driver(&cx23885_pci_driver);
2111 }
2112 
2113 static void __exit cx23885_fini(void)
2114 {
2115 	pci_unregister_driver(&cx23885_pci_driver);
2116 }
2117 
2118 module_init(cx23885_init);
2119 module_exit(cx23885_fini);
2120