1 /*
2  *  Driver for the Conexant CX23885 PCIe bridge
3  *
4  *  Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *
15  *  GNU General Public License for more details.
16  */
17 
18 #include "cx23885.h"
19 
20 #include <linux/init.h>
21 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/kmod.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <asm/div64.h>
30 #include <linux/firmware.h>
31 
32 #include "cimax2.h"
33 #include "altera-ci.h"
34 #include "cx23888-ir.h"
35 #include "cx23885-ir.h"
36 #include "cx23885-av.h"
37 #include "cx23885-input.h"
38 
39 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
40 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
41 MODULE_LICENSE("GPL");
42 MODULE_VERSION(CX23885_VERSION);
43 
44 static unsigned int debug;
45 module_param(debug, int, 0644);
46 MODULE_PARM_DESC(debug, "enable debug messages");
47 
48 static unsigned int card[]  = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
49 module_param_array(card,  int, NULL, 0444);
50 MODULE_PARM_DESC(card, "card type");
51 
52 #define dprintk(level, fmt, arg...)\
53 	do { if (debug >= level)\
54 		printk(KERN_DEBUG pr_fmt("%s: " fmt), \
55 		       __func__, ##arg); \
56 	} while (0)
57 
58 static unsigned int cx23885_devcount;
59 
60 #define NO_SYNC_LINE (-1U)
61 
62 /* FIXME, these allocations will change when
63  * analog arrives. The be reviewed.
64  * CX23887 Assumptions
65  * 1 line = 16 bytes of CDT
66  * cmds size = 80
67  * cdt size = 16 * linesize
68  * iqsize = 64
69  * maxlines = 6
70  *
71  * Address Space:
72  * 0x00000000 0x00008fff FIFO clusters
73  * 0x00010000 0x000104af Channel Management Data Structures
74  * 0x000104b0 0x000104ff Free
75  * 0x00010500 0x000108bf 15 channels * iqsize
76  * 0x000108c0 0x000108ff Free
77  * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
78  *                       15 channels * (iqsize + (maxlines * linesize))
79  * 0x00010ea0 0x00010xxx Free
80  */
81 
82 static struct sram_channel cx23885_sram_channels[] = {
83 	[SRAM_CH01] = {
84 		.name		= "VID A",
85 		.cmds_start	= 0x10000,
86 		.ctrl_start	= 0x10380,
87 		.cdt		= 0x104c0,
88 		.fifo_start	= 0x40,
89 		.fifo_size	= 0x2800,
90 		.ptr1_reg	= DMA1_PTR1,
91 		.ptr2_reg	= DMA1_PTR2,
92 		.cnt1_reg	= DMA1_CNT1,
93 		.cnt2_reg	= DMA1_CNT2,
94 	},
95 	[SRAM_CH02] = {
96 		.name		= "ch2",
97 		.cmds_start	= 0x0,
98 		.ctrl_start	= 0x0,
99 		.cdt		= 0x0,
100 		.fifo_start	= 0x0,
101 		.fifo_size	= 0x0,
102 		.ptr1_reg	= DMA2_PTR1,
103 		.ptr2_reg	= DMA2_PTR2,
104 		.cnt1_reg	= DMA2_CNT1,
105 		.cnt2_reg	= DMA2_CNT2,
106 	},
107 	[SRAM_CH03] = {
108 		.name		= "TS1 B",
109 		.cmds_start	= 0x100A0,
110 		.ctrl_start	= 0x10400,
111 		.cdt		= 0x10580,
112 		.fifo_start	= 0x5000,
113 		.fifo_size	= 0x1000,
114 		.ptr1_reg	= DMA3_PTR1,
115 		.ptr2_reg	= DMA3_PTR2,
116 		.cnt1_reg	= DMA3_CNT1,
117 		.cnt2_reg	= DMA3_CNT2,
118 	},
119 	[SRAM_CH04] = {
120 		.name		= "ch4",
121 		.cmds_start	= 0x0,
122 		.ctrl_start	= 0x0,
123 		.cdt		= 0x0,
124 		.fifo_start	= 0x0,
125 		.fifo_size	= 0x0,
126 		.ptr1_reg	= DMA4_PTR1,
127 		.ptr2_reg	= DMA4_PTR2,
128 		.cnt1_reg	= DMA4_CNT1,
129 		.cnt2_reg	= DMA4_CNT2,
130 	},
131 	[SRAM_CH05] = {
132 		.name		= "ch5",
133 		.cmds_start	= 0x0,
134 		.ctrl_start	= 0x0,
135 		.cdt		= 0x0,
136 		.fifo_start	= 0x0,
137 		.fifo_size	= 0x0,
138 		.ptr1_reg	= DMA5_PTR1,
139 		.ptr2_reg	= DMA5_PTR2,
140 		.cnt1_reg	= DMA5_CNT1,
141 		.cnt2_reg	= DMA5_CNT2,
142 	},
143 	[SRAM_CH06] = {
144 		.name		= "TS2 C",
145 		.cmds_start	= 0x10140,
146 		.ctrl_start	= 0x10440,
147 		.cdt		= 0x105e0,
148 		.fifo_start	= 0x6000,
149 		.fifo_size	= 0x1000,
150 		.ptr1_reg	= DMA5_PTR1,
151 		.ptr2_reg	= DMA5_PTR2,
152 		.cnt1_reg	= DMA5_CNT1,
153 		.cnt2_reg	= DMA5_CNT2,
154 	},
155 	[SRAM_CH07] = {
156 		.name		= "TV Audio",
157 		.cmds_start	= 0x10190,
158 		.ctrl_start	= 0x10480,
159 		.cdt		= 0x10a00,
160 		.fifo_start	= 0x7000,
161 		.fifo_size	= 0x1000,
162 		.ptr1_reg	= DMA6_PTR1,
163 		.ptr2_reg	= DMA6_PTR2,
164 		.cnt1_reg	= DMA6_CNT1,
165 		.cnt2_reg	= DMA6_CNT2,
166 	},
167 	[SRAM_CH08] = {
168 		.name		= "ch8",
169 		.cmds_start	= 0x0,
170 		.ctrl_start	= 0x0,
171 		.cdt		= 0x0,
172 		.fifo_start	= 0x0,
173 		.fifo_size	= 0x0,
174 		.ptr1_reg	= DMA7_PTR1,
175 		.ptr2_reg	= DMA7_PTR2,
176 		.cnt1_reg	= DMA7_CNT1,
177 		.cnt2_reg	= DMA7_CNT2,
178 	},
179 	[SRAM_CH09] = {
180 		.name		= "ch9",
181 		.cmds_start	= 0x0,
182 		.ctrl_start	= 0x0,
183 		.cdt		= 0x0,
184 		.fifo_start	= 0x0,
185 		.fifo_size	= 0x0,
186 		.ptr1_reg	= DMA8_PTR1,
187 		.ptr2_reg	= DMA8_PTR2,
188 		.cnt1_reg	= DMA8_CNT1,
189 		.cnt2_reg	= DMA8_CNT2,
190 	},
191 };
192 
193 static struct sram_channel cx23887_sram_channels[] = {
194 	[SRAM_CH01] = {
195 		.name		= "VID A",
196 		.cmds_start	= 0x10000,
197 		.ctrl_start	= 0x105b0,
198 		.cdt		= 0x107b0,
199 		.fifo_start	= 0x40,
200 		.fifo_size	= 0x2800,
201 		.ptr1_reg	= DMA1_PTR1,
202 		.ptr2_reg	= DMA1_PTR2,
203 		.cnt1_reg	= DMA1_CNT1,
204 		.cnt2_reg	= DMA1_CNT2,
205 	},
206 	[SRAM_CH02] = {
207 		.name		= "VID A (VBI)",
208 		.cmds_start	= 0x10050,
209 		.ctrl_start	= 0x105F0,
210 		.cdt		= 0x10810,
211 		.fifo_start	= 0x3000,
212 		.fifo_size	= 0x1000,
213 		.ptr1_reg	= DMA2_PTR1,
214 		.ptr2_reg	= DMA2_PTR2,
215 		.cnt1_reg	= DMA2_CNT1,
216 		.cnt2_reg	= DMA2_CNT2,
217 	},
218 	[SRAM_CH03] = {
219 		.name		= "TS1 B",
220 		.cmds_start	= 0x100A0,
221 		.ctrl_start	= 0x10630,
222 		.cdt		= 0x10870,
223 		.fifo_start	= 0x5000,
224 		.fifo_size	= 0x1000,
225 		.ptr1_reg	= DMA3_PTR1,
226 		.ptr2_reg	= DMA3_PTR2,
227 		.cnt1_reg	= DMA3_CNT1,
228 		.cnt2_reg	= DMA3_CNT2,
229 	},
230 	[SRAM_CH04] = {
231 		.name		= "ch4",
232 		.cmds_start	= 0x0,
233 		.ctrl_start	= 0x0,
234 		.cdt		= 0x0,
235 		.fifo_start	= 0x0,
236 		.fifo_size	= 0x0,
237 		.ptr1_reg	= DMA4_PTR1,
238 		.ptr2_reg	= DMA4_PTR2,
239 		.cnt1_reg	= DMA4_CNT1,
240 		.cnt2_reg	= DMA4_CNT2,
241 	},
242 	[SRAM_CH05] = {
243 		.name		= "ch5",
244 		.cmds_start	= 0x0,
245 		.ctrl_start	= 0x0,
246 		.cdt		= 0x0,
247 		.fifo_start	= 0x0,
248 		.fifo_size	= 0x0,
249 		.ptr1_reg	= DMA5_PTR1,
250 		.ptr2_reg	= DMA5_PTR2,
251 		.cnt1_reg	= DMA5_CNT1,
252 		.cnt2_reg	= DMA5_CNT2,
253 	},
254 	[SRAM_CH06] = {
255 		.name		= "TS2 C",
256 		.cmds_start	= 0x10140,
257 		.ctrl_start	= 0x10670,
258 		.cdt		= 0x108d0,
259 		.fifo_start	= 0x6000,
260 		.fifo_size	= 0x1000,
261 		.ptr1_reg	= DMA5_PTR1,
262 		.ptr2_reg	= DMA5_PTR2,
263 		.cnt1_reg	= DMA5_CNT1,
264 		.cnt2_reg	= DMA5_CNT2,
265 	},
266 	[SRAM_CH07] = {
267 		.name		= "TV Audio",
268 		.cmds_start	= 0x10190,
269 		.ctrl_start	= 0x106B0,
270 		.cdt		= 0x10930,
271 		.fifo_start	= 0x7000,
272 		.fifo_size	= 0x1000,
273 		.ptr1_reg	= DMA6_PTR1,
274 		.ptr2_reg	= DMA6_PTR2,
275 		.cnt1_reg	= DMA6_CNT1,
276 		.cnt2_reg	= DMA6_CNT2,
277 	},
278 	[SRAM_CH08] = {
279 		.name		= "ch8",
280 		.cmds_start	= 0x0,
281 		.ctrl_start	= 0x0,
282 		.cdt		= 0x0,
283 		.fifo_start	= 0x0,
284 		.fifo_size	= 0x0,
285 		.ptr1_reg	= DMA7_PTR1,
286 		.ptr2_reg	= DMA7_PTR2,
287 		.cnt1_reg	= DMA7_CNT1,
288 		.cnt2_reg	= DMA7_CNT2,
289 	},
290 	[SRAM_CH09] = {
291 		.name		= "ch9",
292 		.cmds_start	= 0x0,
293 		.ctrl_start	= 0x0,
294 		.cdt		= 0x0,
295 		.fifo_start	= 0x0,
296 		.fifo_size	= 0x0,
297 		.ptr1_reg	= DMA8_PTR1,
298 		.ptr2_reg	= DMA8_PTR2,
299 		.cnt1_reg	= DMA8_CNT1,
300 		.cnt2_reg	= DMA8_CNT2,
301 	},
302 };
303 
304 static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
305 {
306 	unsigned long flags;
307 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
308 
309 	dev->pci_irqmask |= mask;
310 
311 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
312 }
313 
314 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
315 {
316 	unsigned long flags;
317 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
318 
319 	dev->pci_irqmask |= mask;
320 	cx_set(PCI_INT_MSK, mask);
321 
322 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
323 }
324 
325 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
326 {
327 	u32 v;
328 	unsigned long flags;
329 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
330 
331 	v = mask & dev->pci_irqmask;
332 	if (v)
333 		cx_set(PCI_INT_MSK, v);
334 
335 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
336 }
337 
338 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
339 {
340 	cx23885_irq_enable(dev, 0xffffffff);
341 }
342 
343 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
344 {
345 	unsigned long flags;
346 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
347 
348 	cx_clear(PCI_INT_MSK, mask);
349 
350 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
351 }
352 
353 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
354 {
355 	cx23885_irq_disable(dev, 0xffffffff);
356 }
357 
358 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
359 {
360 	unsigned long flags;
361 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
362 
363 	dev->pci_irqmask &= ~mask;
364 	cx_clear(PCI_INT_MSK, mask);
365 
366 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
367 }
368 
369 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
370 {
371 	u32 v;
372 	unsigned long flags;
373 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
374 
375 	v = cx_read(PCI_INT_MSK);
376 
377 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
378 	return v;
379 }
380 
381 static int cx23885_risc_decode(u32 risc)
382 {
383 	static char *instr[16] = {
384 		[RISC_SYNC    >> 28] = "sync",
385 		[RISC_WRITE   >> 28] = "write",
386 		[RISC_WRITEC  >> 28] = "writec",
387 		[RISC_READ    >> 28] = "read",
388 		[RISC_READC   >> 28] = "readc",
389 		[RISC_JUMP    >> 28] = "jump",
390 		[RISC_SKIP    >> 28] = "skip",
391 		[RISC_WRITERM >> 28] = "writerm",
392 		[RISC_WRITECM >> 28] = "writecm",
393 		[RISC_WRITECR >> 28] = "writecr",
394 	};
395 	static int incr[16] = {
396 		[RISC_WRITE   >> 28] = 3,
397 		[RISC_JUMP    >> 28] = 3,
398 		[RISC_SKIP    >> 28] = 1,
399 		[RISC_SYNC    >> 28] = 1,
400 		[RISC_WRITERM >> 28] = 3,
401 		[RISC_WRITECM >> 28] = 3,
402 		[RISC_WRITECR >> 28] = 4,
403 	};
404 	static char *bits[] = {
405 		"12",   "13",   "14",   "resync",
406 		"cnt0", "cnt1", "18",   "19",
407 		"20",   "21",   "22",   "23",
408 		"irq1", "irq2", "eol",  "sol",
409 	};
410 	int i;
411 
412 	printk(KERN_DEBUG "0x%08x [ %s", risc,
413 	       instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
414 	for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
415 		if (risc & (1 << (i + 12)))
416 			pr_cont(" %s", bits[i]);
417 	pr_cont(" count=%d ]\n", risc & 0xfff);
418 	return incr[risc >> 28] ? incr[risc >> 28] : 1;
419 }
420 
421 static void cx23885_wakeup(struct cx23885_tsport *port,
422 			   struct cx23885_dmaqueue *q, u32 count)
423 {
424 	struct cx23885_buffer *buf;
425 
426 	if (list_empty(&q->active))
427 		return;
428 	buf = list_entry(q->active.next,
429 			 struct cx23885_buffer, queue);
430 
431 	buf->vb.vb2_buf.timestamp = ktime_get_ns();
432 	buf->vb.sequence = q->count++;
433 	dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
434 		buf->vb.vb2_buf.index,
435 		count, q->count);
436 	list_del(&buf->queue);
437 	vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
438 }
439 
440 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
441 				      struct sram_channel *ch,
442 				      unsigned int bpl, u32 risc)
443 {
444 	unsigned int i, lines;
445 	u32 cdt;
446 
447 	if (ch->cmds_start == 0) {
448 		dprintk(1, "%s() Erasing channel [%s]\n", __func__,
449 			ch->name);
450 		cx_write(ch->ptr1_reg, 0);
451 		cx_write(ch->ptr2_reg, 0);
452 		cx_write(ch->cnt2_reg, 0);
453 		cx_write(ch->cnt1_reg, 0);
454 		return 0;
455 	} else {
456 		dprintk(1, "%s() Configuring channel [%s]\n", __func__,
457 			ch->name);
458 	}
459 
460 	bpl   = (bpl + 7) & ~7; /* alignment */
461 	cdt   = ch->cdt;
462 	lines = ch->fifo_size / bpl;
463 	if (lines > 6)
464 		lines = 6;
465 	BUG_ON(lines < 2);
466 
467 	cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
468 	cx_write(8 + 4, 12);
469 	cx_write(8 + 8, 0);
470 
471 	/* write CDT */
472 	for (i = 0; i < lines; i++) {
473 		dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
474 			ch->fifo_start + bpl*i);
475 		cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
476 		cx_write(cdt + 16*i +  4, 0);
477 		cx_write(cdt + 16*i +  8, 0);
478 		cx_write(cdt + 16*i + 12, 0);
479 	}
480 
481 	/* write CMDS */
482 	if (ch->jumponly)
483 		cx_write(ch->cmds_start + 0, 8);
484 	else
485 		cx_write(ch->cmds_start + 0, risc);
486 	cx_write(ch->cmds_start +  4, 0); /* 64 bits 63-32 */
487 	cx_write(ch->cmds_start +  8, cdt);
488 	cx_write(ch->cmds_start + 12, (lines*16) >> 3);
489 	cx_write(ch->cmds_start + 16, ch->ctrl_start);
490 	if (ch->jumponly)
491 		cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
492 	else
493 		cx_write(ch->cmds_start + 20, 64 >> 2);
494 	for (i = 24; i < 80; i += 4)
495 		cx_write(ch->cmds_start + i, 0);
496 
497 	/* fill registers */
498 	cx_write(ch->ptr1_reg, ch->fifo_start);
499 	cx_write(ch->ptr2_reg, cdt);
500 	cx_write(ch->cnt2_reg, (lines*16) >> 3);
501 	cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
502 
503 	dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
504 		dev->bridge,
505 		ch->name,
506 		bpl,
507 		lines);
508 
509 	return 0;
510 }
511 
512 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
513 				      struct sram_channel *ch)
514 {
515 	static char *name[] = {
516 		"init risc lo",
517 		"init risc hi",
518 		"cdt base",
519 		"cdt size",
520 		"iq base",
521 		"iq size",
522 		"risc pc lo",
523 		"risc pc hi",
524 		"iq wr ptr",
525 		"iq rd ptr",
526 		"cdt current",
527 		"pci target lo",
528 		"pci target hi",
529 		"line / byte",
530 	};
531 	u32 risc;
532 	unsigned int i, j, n;
533 
534 	pr_warn("%s: %s - dma channel status dump\n",
535 		dev->name, ch->name);
536 	for (i = 0; i < ARRAY_SIZE(name); i++)
537 		pr_warn("%s:   cmds: %-15s: 0x%08x\n",
538 			dev->name, name[i],
539 			cx_read(ch->cmds_start + 4*i));
540 
541 	for (i = 0; i < 4; i++) {
542 		risc = cx_read(ch->cmds_start + 4 * (i + 14));
543 		pr_warn("%s:   risc%d: ", dev->name, i);
544 		cx23885_risc_decode(risc);
545 	}
546 	for (i = 0; i < (64 >> 2); i += n) {
547 		risc = cx_read(ch->ctrl_start + 4 * i);
548 		/* No consideration for bits 63-32 */
549 
550 		pr_warn("%s:   (0x%08x) iq %x: ", dev->name,
551 			ch->ctrl_start + 4 * i, i);
552 		n = cx23885_risc_decode(risc);
553 		for (j = 1; j < n; j++) {
554 			risc = cx_read(ch->ctrl_start + 4 * (i + j));
555 			pr_warn("%s:   iq %x: 0x%08x [ arg #%d ]\n",
556 				dev->name, i+j, risc, j);
557 		}
558 	}
559 
560 	pr_warn("%s: fifo: 0x%08x -> 0x%x\n",
561 		dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
562 	pr_warn("%s: ctrl: 0x%08x -> 0x%x\n",
563 		dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
564 	pr_warn("%s:   ptr1_reg: 0x%08x\n",
565 		dev->name, cx_read(ch->ptr1_reg));
566 	pr_warn("%s:   ptr2_reg: 0x%08x\n",
567 		dev->name, cx_read(ch->ptr2_reg));
568 	pr_warn("%s:   cnt1_reg: 0x%08x\n",
569 		dev->name, cx_read(ch->cnt1_reg));
570 	pr_warn("%s:   cnt2_reg: 0x%08x\n",
571 		dev->name, cx_read(ch->cnt2_reg));
572 }
573 
574 static void cx23885_risc_disasm(struct cx23885_tsport *port,
575 				struct cx23885_riscmem *risc)
576 {
577 	struct cx23885_dev *dev = port->dev;
578 	unsigned int i, j, n;
579 
580 	pr_info("%s: risc disasm: %p [dma=0x%08lx]\n",
581 	       dev->name, risc->cpu, (unsigned long)risc->dma);
582 	for (i = 0; i < (risc->size >> 2); i += n) {
583 		pr_info("%s:   %04d: ", dev->name, i);
584 		n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
585 		for (j = 1; j < n; j++)
586 			pr_info("%s:   %04d: 0x%08x [ arg #%d ]\n",
587 				dev->name, i + j, risc->cpu[i + j], j);
588 		if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
589 			break;
590 	}
591 }
592 
593 static void cx23885_shutdown(struct cx23885_dev *dev)
594 {
595 	/* disable RISC controller */
596 	cx_write(DEV_CNTRL2, 0);
597 
598 	/* Disable all IR activity */
599 	cx_write(IR_CNTRL_REG, 0);
600 
601 	/* Disable Video A/B activity */
602 	cx_write(VID_A_DMA_CTL, 0);
603 	cx_write(VID_B_DMA_CTL, 0);
604 	cx_write(VID_C_DMA_CTL, 0);
605 
606 	/* Disable Audio activity */
607 	cx_write(AUD_INT_DMA_CTL, 0);
608 	cx_write(AUD_EXT_DMA_CTL, 0);
609 
610 	/* Disable Serial port */
611 	cx_write(UART_CTL, 0);
612 
613 	/* Disable Interrupts */
614 	cx23885_irq_disable_all(dev);
615 	cx_write(VID_A_INT_MSK, 0);
616 	cx_write(VID_B_INT_MSK, 0);
617 	cx_write(VID_C_INT_MSK, 0);
618 	cx_write(AUDIO_INT_INT_MSK, 0);
619 	cx_write(AUDIO_EXT_INT_MSK, 0);
620 
621 }
622 
623 static void cx23885_reset(struct cx23885_dev *dev)
624 {
625 	dprintk(1, "%s()\n", __func__);
626 
627 	cx23885_shutdown(dev);
628 
629 	cx_write(PCI_INT_STAT, 0xffffffff);
630 	cx_write(VID_A_INT_STAT, 0xffffffff);
631 	cx_write(VID_B_INT_STAT, 0xffffffff);
632 	cx_write(VID_C_INT_STAT, 0xffffffff);
633 	cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
634 	cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
635 	cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
636 	cx_write(PAD_CTRL, 0x00500300);
637 
638 	mdelay(100);
639 
640 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
641 		720*4, 0);
642 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
643 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
644 		188*4, 0);
645 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
646 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
647 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
648 		188*4, 0);
649 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
650 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
651 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
652 
653 	cx23885_gpio_setup(dev);
654 }
655 
656 
657 static int cx23885_pci_quirks(struct cx23885_dev *dev)
658 {
659 	dprintk(1, "%s()\n", __func__);
660 
661 	/* The cx23885 bridge has a weird bug which causes NMI to be asserted
662 	 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
663 	 * occur on the cx23887 bridge.
664 	 */
665 	if (dev->bridge == CX23885_BRIDGE_885)
666 		cx_clear(RDR_TLCTL0, 1 << 4);
667 
668 	return 0;
669 }
670 
671 static int get_resources(struct cx23885_dev *dev)
672 {
673 	if (request_mem_region(pci_resource_start(dev->pci, 0),
674 			       pci_resource_len(dev->pci, 0),
675 			       dev->name))
676 		return 0;
677 
678 	pr_err("%s: can't get MMIO memory @ 0x%llx\n",
679 	       dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
680 
681 	return -EBUSY;
682 }
683 
684 static int cx23885_init_tsport(struct cx23885_dev *dev,
685 	struct cx23885_tsport *port, int portno)
686 {
687 	dprintk(1, "%s(portno=%d)\n", __func__, portno);
688 
689 	/* Transport bus init dma queue  - Common settings */
690 	port->dma_ctl_val        = 0x11; /* Enable RISC controller and Fifo */
691 	port->ts_int_msk_val     = 0x1111; /* TS port bits for RISC */
692 	port->vld_misc_val       = 0x0;
693 	port->hw_sop_ctrl_val    = (0x47 << 16 | 188 << 4);
694 
695 	spin_lock_init(&port->slock);
696 	port->dev = dev;
697 	port->nr = portno;
698 
699 	INIT_LIST_HEAD(&port->mpegq.active);
700 	mutex_init(&port->frontends.lock);
701 	INIT_LIST_HEAD(&port->frontends.felist);
702 	port->frontends.active_fe_id = 0;
703 
704 	/* This should be hardcoded allow a single frontend
705 	 * attachment to this tsport, keeping the -dvb.c
706 	 * code clean and safe.
707 	 */
708 	if (!port->num_frontends)
709 		port->num_frontends = 1;
710 
711 	switch (portno) {
712 	case 1:
713 		port->reg_gpcnt          = VID_B_GPCNT;
714 		port->reg_gpcnt_ctl      = VID_B_GPCNT_CTL;
715 		port->reg_dma_ctl        = VID_B_DMA_CTL;
716 		port->reg_lngth          = VID_B_LNGTH;
717 		port->reg_hw_sop_ctrl    = VID_B_HW_SOP_CTL;
718 		port->reg_gen_ctrl       = VID_B_GEN_CTL;
719 		port->reg_bd_pkt_status  = VID_B_BD_PKT_STATUS;
720 		port->reg_sop_status     = VID_B_SOP_STATUS;
721 		port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
722 		port->reg_vld_misc       = VID_B_VLD_MISC;
723 		port->reg_ts_clk_en      = VID_B_TS_CLK_EN;
724 		port->reg_src_sel        = VID_B_SRC_SEL;
725 		port->reg_ts_int_msk     = VID_B_INT_MSK;
726 		port->reg_ts_int_stat    = VID_B_INT_STAT;
727 		port->sram_chno          = SRAM_CH03; /* VID_B */
728 		port->pci_irqmask        = 0x02; /* VID_B bit1 */
729 		break;
730 	case 2:
731 		port->reg_gpcnt          = VID_C_GPCNT;
732 		port->reg_gpcnt_ctl      = VID_C_GPCNT_CTL;
733 		port->reg_dma_ctl        = VID_C_DMA_CTL;
734 		port->reg_lngth          = VID_C_LNGTH;
735 		port->reg_hw_sop_ctrl    = VID_C_HW_SOP_CTL;
736 		port->reg_gen_ctrl       = VID_C_GEN_CTL;
737 		port->reg_bd_pkt_status  = VID_C_BD_PKT_STATUS;
738 		port->reg_sop_status     = VID_C_SOP_STATUS;
739 		port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
740 		port->reg_vld_misc       = VID_C_VLD_MISC;
741 		port->reg_ts_clk_en      = VID_C_TS_CLK_EN;
742 		port->reg_src_sel        = 0;
743 		port->reg_ts_int_msk     = VID_C_INT_MSK;
744 		port->reg_ts_int_stat    = VID_C_INT_STAT;
745 		port->sram_chno          = SRAM_CH06; /* VID_C */
746 		port->pci_irqmask        = 0x04; /* VID_C bit2 */
747 		break;
748 	default:
749 		BUG();
750 	}
751 
752 	return 0;
753 }
754 
755 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
756 {
757 	switch (cx_read(RDR_CFG2) & 0xff) {
758 	case 0x00:
759 		/* cx23885 */
760 		dev->hwrevision = 0xa0;
761 		break;
762 	case 0x01:
763 		/* CX23885-12Z */
764 		dev->hwrevision = 0xa1;
765 		break;
766 	case 0x02:
767 		/* CX23885-13Z/14Z */
768 		dev->hwrevision = 0xb0;
769 		break;
770 	case 0x03:
771 		if (dev->pci->device == 0x8880) {
772 			/* CX23888-21Z/22Z */
773 			dev->hwrevision = 0xc0;
774 		} else {
775 			/* CX23885-14Z */
776 			dev->hwrevision = 0xa4;
777 		}
778 		break;
779 	case 0x04:
780 		if (dev->pci->device == 0x8880) {
781 			/* CX23888-31Z */
782 			dev->hwrevision = 0xd0;
783 		} else {
784 			/* CX23885-15Z, CX23888-31Z */
785 			dev->hwrevision = 0xa5;
786 		}
787 		break;
788 	case 0x0e:
789 		/* CX23887-15Z */
790 		dev->hwrevision = 0xc0;
791 		break;
792 	case 0x0f:
793 		/* CX23887-14Z */
794 		dev->hwrevision = 0xb1;
795 		break;
796 	default:
797 		pr_err("%s() New hardware revision found 0x%x\n",
798 		       __func__, dev->hwrevision);
799 	}
800 	if (dev->hwrevision)
801 		pr_info("%s() Hardware revision = 0x%02x\n",
802 			__func__, dev->hwrevision);
803 	else
804 		pr_err("%s() Hardware revision unknown 0x%x\n",
805 		       __func__, dev->hwrevision);
806 }
807 
808 /* Find the first v4l2_subdev member of the group id in hw */
809 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
810 {
811 	struct v4l2_subdev *result = NULL;
812 	struct v4l2_subdev *sd;
813 
814 	spin_lock(&dev->v4l2_dev.lock);
815 	v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
816 		if (sd->grp_id == hw) {
817 			result = sd;
818 			break;
819 		}
820 	}
821 	spin_unlock(&dev->v4l2_dev.lock);
822 	return result;
823 }
824 
825 static int cx23885_dev_setup(struct cx23885_dev *dev)
826 {
827 	int i;
828 
829 	spin_lock_init(&dev->pci_irqmask_lock);
830 	spin_lock_init(&dev->slock);
831 
832 	mutex_init(&dev->lock);
833 	mutex_init(&dev->gpio_lock);
834 
835 	atomic_inc(&dev->refcount);
836 
837 	dev->nr = cx23885_devcount++;
838 	sprintf(dev->name, "cx23885[%d]", dev->nr);
839 
840 	/* Configure the internal memory */
841 	if (dev->pci->device == 0x8880) {
842 		/* Could be 887 or 888, assume a default */
843 		dev->bridge = CX23885_BRIDGE_887;
844 		/* Apply a sensible clock frequency for the PCIe bridge */
845 		dev->clk_freq = 25000000;
846 		dev->sram_channels = cx23887_sram_channels;
847 	} else
848 	if (dev->pci->device == 0x8852) {
849 		dev->bridge = CX23885_BRIDGE_885;
850 		/* Apply a sensible clock frequency for the PCIe bridge */
851 		dev->clk_freq = 28000000;
852 		dev->sram_channels = cx23885_sram_channels;
853 	} else
854 		BUG();
855 
856 	dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
857 		__func__, dev->bridge);
858 
859 	/* board config */
860 	dev->board = UNSET;
861 	if (card[dev->nr] < cx23885_bcount)
862 		dev->board = card[dev->nr];
863 	for (i = 0; UNSET == dev->board  &&  i < cx23885_idcount; i++)
864 		if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
865 		    dev->pci->subsystem_device == cx23885_subids[i].subdevice)
866 			dev->board = cx23885_subids[i].card;
867 	if (UNSET == dev->board) {
868 		dev->board = CX23885_BOARD_UNKNOWN;
869 		cx23885_card_list(dev);
870 	}
871 
872 	/* If the user specific a clk freq override, apply it */
873 	if (cx23885_boards[dev->board].clk_freq > 0)
874 		dev->clk_freq = cx23885_boards[dev->board].clk_freq;
875 
876 	dev->pci_bus  = dev->pci->bus->number;
877 	dev->pci_slot = PCI_SLOT(dev->pci->devfn);
878 	cx23885_irq_add(dev, 0x001f00);
879 
880 	/* External Master 1 Bus */
881 	dev->i2c_bus[0].nr = 0;
882 	dev->i2c_bus[0].dev = dev;
883 	dev->i2c_bus[0].reg_stat  = I2C1_STAT;
884 	dev->i2c_bus[0].reg_ctrl  = I2C1_CTRL;
885 	dev->i2c_bus[0].reg_addr  = I2C1_ADDR;
886 	dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
887 	dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
888 	dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
889 
890 	/* External Master 2 Bus */
891 	dev->i2c_bus[1].nr = 1;
892 	dev->i2c_bus[1].dev = dev;
893 	dev->i2c_bus[1].reg_stat  = I2C2_STAT;
894 	dev->i2c_bus[1].reg_ctrl  = I2C2_CTRL;
895 	dev->i2c_bus[1].reg_addr  = I2C2_ADDR;
896 	dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
897 	dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
898 	dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
899 
900 	/* Internal Master 3 Bus */
901 	dev->i2c_bus[2].nr = 2;
902 	dev->i2c_bus[2].dev = dev;
903 	dev->i2c_bus[2].reg_stat  = I2C3_STAT;
904 	dev->i2c_bus[2].reg_ctrl  = I2C3_CTRL;
905 	dev->i2c_bus[2].reg_addr  = I2C3_ADDR;
906 	dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
907 	dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
908 	dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
909 
910 	if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
911 		(cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
912 		cx23885_init_tsport(dev, &dev->ts1, 1);
913 
914 	if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
915 		(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
916 		cx23885_init_tsport(dev, &dev->ts2, 2);
917 
918 	if (get_resources(dev) < 0) {
919 		pr_err("CORE %s No more PCIe resources for subsystem: %04x:%04x\n",
920 		       dev->name, dev->pci->subsystem_vendor,
921 		       dev->pci->subsystem_device);
922 
923 		cx23885_devcount--;
924 		return -ENODEV;
925 	}
926 
927 	/* PCIe stuff */
928 	dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
929 			     pci_resource_len(dev->pci, 0));
930 
931 	dev->bmmio = (u8 __iomem *)dev->lmmio;
932 
933 	pr_info("CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
934 		dev->name, dev->pci->subsystem_vendor,
935 		dev->pci->subsystem_device, cx23885_boards[dev->board].name,
936 		dev->board, card[dev->nr] == dev->board ?
937 		"insmod option" : "autodetected");
938 
939 	cx23885_pci_quirks(dev);
940 
941 	/* Assume some sensible defaults */
942 	dev->tuner_type = cx23885_boards[dev->board].tuner_type;
943 	dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
944 	dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
945 	dev->radio_type = cx23885_boards[dev->board].radio_type;
946 	dev->radio_addr = cx23885_boards[dev->board].radio_addr;
947 
948 	dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
949 		__func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
950 	dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
951 		__func__, dev->radio_type, dev->radio_addr);
952 
953 	/* The cx23417 encoder has GPIO's that need to be initialised
954 	 * before DVB, so that demodulators and tuners are out of
955 	 * reset before DVB uses them.
956 	 */
957 	if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
958 		(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
959 			cx23885_mc417_init(dev);
960 
961 	/* init hardware */
962 	cx23885_reset(dev);
963 
964 	cx23885_i2c_register(&dev->i2c_bus[0]);
965 	cx23885_i2c_register(&dev->i2c_bus[1]);
966 	cx23885_i2c_register(&dev->i2c_bus[2]);
967 	cx23885_card_setup(dev);
968 	call_all(dev, core, s_power, 0);
969 	cx23885_ir_init(dev);
970 
971 	if (dev->board == CX23885_BOARD_VIEWCAST_460E) {
972 		/*
973 		 * GPIOs 9/8 are input detection bits for the breakout video
974 		 * (gpio 8) and audio (gpio 9) cables. When they're attached,
975 		 * this gpios are pulled high. Make sure these GPIOs are marked
976 		 * as inputs.
977 		 */
978 		cx23885_gpio_enable(dev, 0x300, 0);
979 	}
980 
981 	if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
982 		if (cx23885_video_register(dev) < 0) {
983 			pr_err("%s() Failed to register analog video adapters on VID_A\n",
984 			       __func__);
985 		}
986 	}
987 
988 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
989 		if (cx23885_boards[dev->board].num_fds_portb)
990 			dev->ts1.num_frontends =
991 				cx23885_boards[dev->board].num_fds_portb;
992 		if (cx23885_dvb_register(&dev->ts1) < 0) {
993 			pr_err("%s() Failed to register dvb adapters on VID_B\n",
994 			       __func__);
995 		}
996 	} else
997 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
998 		if (cx23885_417_register(dev) < 0) {
999 			pr_err("%s() Failed to register 417 on VID_B\n",
1000 			       __func__);
1001 		}
1002 	}
1003 
1004 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1005 		if (cx23885_boards[dev->board].num_fds_portc)
1006 			dev->ts2.num_frontends =
1007 				cx23885_boards[dev->board].num_fds_portc;
1008 		if (cx23885_dvb_register(&dev->ts2) < 0) {
1009 			pr_err("%s() Failed to register dvb on VID_C\n",
1010 			       __func__);
1011 		}
1012 	} else
1013 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1014 		if (cx23885_417_register(dev) < 0) {
1015 			pr_err("%s() Failed to register 417 on VID_C\n",
1016 			       __func__);
1017 		}
1018 	}
1019 
1020 	cx23885_dev_checkrevision(dev);
1021 
1022 	/* disable MSI for NetUP cards, otherwise CI is not working */
1023 	if (cx23885_boards[dev->board].ci_type > 0)
1024 		cx_clear(RDR_RDRCTL1, 1 << 8);
1025 
1026 	switch (dev->board) {
1027 	case CX23885_BOARD_TEVII_S470:
1028 	case CX23885_BOARD_TEVII_S471:
1029 		cx_clear(RDR_RDRCTL1, 1 << 8);
1030 		break;
1031 	}
1032 
1033 	return 0;
1034 }
1035 
1036 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1037 {
1038 	release_mem_region(pci_resource_start(dev->pci, 0),
1039 			   pci_resource_len(dev->pci, 0));
1040 
1041 	if (!atomic_dec_and_test(&dev->refcount))
1042 		return;
1043 
1044 	if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1045 		cx23885_video_unregister(dev);
1046 
1047 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1048 		cx23885_dvb_unregister(&dev->ts1);
1049 
1050 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1051 		cx23885_417_unregister(dev);
1052 
1053 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1054 		cx23885_dvb_unregister(&dev->ts2);
1055 
1056 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1057 		cx23885_417_unregister(dev);
1058 
1059 	cx23885_i2c_unregister(&dev->i2c_bus[2]);
1060 	cx23885_i2c_unregister(&dev->i2c_bus[1]);
1061 	cx23885_i2c_unregister(&dev->i2c_bus[0]);
1062 
1063 	iounmap(dev->lmmio);
1064 }
1065 
1066 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1067 			       unsigned int offset, u32 sync_line,
1068 			       unsigned int bpl, unsigned int padding,
1069 			       unsigned int lines,  unsigned int lpi, bool jump)
1070 {
1071 	struct scatterlist *sg;
1072 	unsigned int line, todo, sol;
1073 
1074 
1075 	if (jump) {
1076 		*(rp++) = cpu_to_le32(RISC_JUMP);
1077 		*(rp++) = cpu_to_le32(0);
1078 		*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1079 	}
1080 
1081 	/* sync instruction */
1082 	if (sync_line != NO_SYNC_LINE)
1083 		*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1084 
1085 	/* scan lines */
1086 	sg = sglist;
1087 	for (line = 0; line < lines; line++) {
1088 		while (offset && offset >= sg_dma_len(sg)) {
1089 			offset -= sg_dma_len(sg);
1090 			sg = sg_next(sg);
1091 		}
1092 
1093 		if (lpi && line > 0 && !(line % lpi))
1094 			sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1095 		else
1096 			sol = RISC_SOL;
1097 
1098 		if (bpl <= sg_dma_len(sg)-offset) {
1099 			/* fits into current chunk */
1100 			*(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
1101 			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1102 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1103 			offset += bpl;
1104 		} else {
1105 			/* scanline needs to be split */
1106 			todo = bpl;
1107 			*(rp++) = cpu_to_le32(RISC_WRITE|sol|
1108 					    (sg_dma_len(sg)-offset));
1109 			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1110 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1111 			todo -= (sg_dma_len(sg)-offset);
1112 			offset = 0;
1113 			sg = sg_next(sg);
1114 			while (todo > sg_dma_len(sg)) {
1115 				*(rp++) = cpu_to_le32(RISC_WRITE|
1116 						    sg_dma_len(sg));
1117 				*(rp++) = cpu_to_le32(sg_dma_address(sg));
1118 				*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1119 				todo -= sg_dma_len(sg);
1120 				sg = sg_next(sg);
1121 			}
1122 			*(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1123 			*(rp++) = cpu_to_le32(sg_dma_address(sg));
1124 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1125 			offset += todo;
1126 		}
1127 		offset += padding;
1128 	}
1129 
1130 	return rp;
1131 }
1132 
1133 int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1134 			struct scatterlist *sglist, unsigned int top_offset,
1135 			unsigned int bottom_offset, unsigned int bpl,
1136 			unsigned int padding, unsigned int lines)
1137 {
1138 	u32 instructions, fields;
1139 	__le32 *rp;
1140 
1141 	fields = 0;
1142 	if (UNSET != top_offset)
1143 		fields++;
1144 	if (UNSET != bottom_offset)
1145 		fields++;
1146 
1147 	/* estimate risc mem: worst case is one write per page border +
1148 	   one write per scan line + syncs + jump (all 2 dwords).  Padding
1149 	   can cause next bpl to start close to a page border.  First DMA
1150 	   region may be smaller than PAGE_SIZE */
1151 	/* write and jump need and extra dword */
1152 	instructions  = fields * (1 + ((bpl + padding) * lines)
1153 		/ PAGE_SIZE + lines);
1154 	instructions += 5;
1155 	risc->size = instructions * 12;
1156 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1157 	if (risc->cpu == NULL)
1158 		return -ENOMEM;
1159 
1160 	/* write risc instructions */
1161 	rp = risc->cpu;
1162 	if (UNSET != top_offset)
1163 		rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1164 					bpl, padding, lines, 0, true);
1165 	if (UNSET != bottom_offset)
1166 		rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1167 					bpl, padding, lines, 0, UNSET == top_offset);
1168 
1169 	/* save pointer to jmp instruction address */
1170 	risc->jmp = rp;
1171 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1172 	return 0;
1173 }
1174 
1175 int cx23885_risc_databuffer(struct pci_dev *pci,
1176 				   struct cx23885_riscmem *risc,
1177 				   struct scatterlist *sglist,
1178 				   unsigned int bpl,
1179 				   unsigned int lines, unsigned int lpi)
1180 {
1181 	u32 instructions;
1182 	__le32 *rp;
1183 
1184 	/* estimate risc mem: worst case is one write per page border +
1185 	   one write per scan line + syncs + jump (all 2 dwords).  Here
1186 	   there is no padding and no sync.  First DMA region may be smaller
1187 	   than PAGE_SIZE */
1188 	/* Jump and write need an extra dword */
1189 	instructions  = 1 + (bpl * lines) / PAGE_SIZE + lines;
1190 	instructions += 4;
1191 
1192 	risc->size = instructions * 12;
1193 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1194 	if (risc->cpu == NULL)
1195 		return -ENOMEM;
1196 
1197 	/* write risc instructions */
1198 	rp = risc->cpu;
1199 	rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
1200 				bpl, 0, lines, lpi, lpi == 0);
1201 
1202 	/* save pointer to jmp instruction address */
1203 	risc->jmp = rp;
1204 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1205 	return 0;
1206 }
1207 
1208 int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1209 			struct scatterlist *sglist, unsigned int top_offset,
1210 			unsigned int bottom_offset, unsigned int bpl,
1211 			unsigned int padding, unsigned int lines)
1212 {
1213 	u32 instructions, fields;
1214 	__le32 *rp;
1215 
1216 	fields = 0;
1217 	if (UNSET != top_offset)
1218 		fields++;
1219 	if (UNSET != bottom_offset)
1220 		fields++;
1221 
1222 	/* estimate risc mem: worst case is one write per page border +
1223 	   one write per scan line + syncs + jump (all 2 dwords).  Padding
1224 	   can cause next bpl to start close to a page border.  First DMA
1225 	   region may be smaller than PAGE_SIZE */
1226 	/* write and jump need and extra dword */
1227 	instructions  = fields * (1 + ((bpl + padding) * lines)
1228 		/ PAGE_SIZE + lines);
1229 	instructions += 5;
1230 	risc->size = instructions * 12;
1231 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1232 	if (risc->cpu == NULL)
1233 		return -ENOMEM;
1234 	/* write risc instructions */
1235 	rp = risc->cpu;
1236 
1237 	/* Sync to line 6, so US CC line 21 will appear in line '12'
1238 	 * in the userland vbi payload */
1239 	if (UNSET != top_offset)
1240 		rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1241 					bpl, padding, lines, 0, true);
1242 
1243 	if (UNSET != bottom_offset)
1244 		rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1245 					bpl, padding, lines, 0, UNSET == top_offset);
1246 
1247 
1248 
1249 	/* save pointer to jmp instruction address */
1250 	risc->jmp = rp;
1251 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1252 	return 0;
1253 }
1254 
1255 
1256 void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
1257 {
1258 	struct cx23885_riscmem *risc = &buf->risc;
1259 
1260 	BUG_ON(in_interrupt());
1261 	pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
1262 }
1263 
1264 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1265 {
1266 	struct cx23885_dev *dev = port->dev;
1267 
1268 	dprintk(1, "%s() Register Dump\n", __func__);
1269 	dprintk(1, "%s() DEV_CNTRL2               0x%08X\n", __func__,
1270 		cx_read(DEV_CNTRL2));
1271 	dprintk(1, "%s() PCI_INT_MSK              0x%08X\n", __func__,
1272 		cx23885_irq_get_mask(dev));
1273 	dprintk(1, "%s() AUD_INT_INT_MSK          0x%08X\n", __func__,
1274 		cx_read(AUDIO_INT_INT_MSK));
1275 	dprintk(1, "%s() AUD_INT_DMA_CTL          0x%08X\n", __func__,
1276 		cx_read(AUD_INT_DMA_CTL));
1277 	dprintk(1, "%s() AUD_EXT_INT_MSK          0x%08X\n", __func__,
1278 		cx_read(AUDIO_EXT_INT_MSK));
1279 	dprintk(1, "%s() AUD_EXT_DMA_CTL          0x%08X\n", __func__,
1280 		cx_read(AUD_EXT_DMA_CTL));
1281 	dprintk(1, "%s() PAD_CTRL                 0x%08X\n", __func__,
1282 		cx_read(PAD_CTRL));
1283 	dprintk(1, "%s() ALT_PIN_OUT_SEL          0x%08X\n", __func__,
1284 		cx_read(ALT_PIN_OUT_SEL));
1285 	dprintk(1, "%s() GPIO2                    0x%08X\n", __func__,
1286 		cx_read(GPIO2));
1287 	dprintk(1, "%s() gpcnt(0x%08X)          0x%08X\n", __func__,
1288 		port->reg_gpcnt, cx_read(port->reg_gpcnt));
1289 	dprintk(1, "%s() gpcnt_ctl(0x%08X)      0x%08x\n", __func__,
1290 		port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1291 	dprintk(1, "%s() dma_ctl(0x%08X)        0x%08x\n", __func__,
1292 		port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1293 	if (port->reg_src_sel)
1294 		dprintk(1, "%s() src_sel(0x%08X)        0x%08x\n", __func__,
1295 			port->reg_src_sel, cx_read(port->reg_src_sel));
1296 	dprintk(1, "%s() lngth(0x%08X)          0x%08x\n", __func__,
1297 		port->reg_lngth, cx_read(port->reg_lngth));
1298 	dprintk(1, "%s() hw_sop_ctrl(0x%08X)    0x%08x\n", __func__,
1299 		port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1300 	dprintk(1, "%s() gen_ctrl(0x%08X)       0x%08x\n", __func__,
1301 		port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1302 	dprintk(1, "%s() bd_pkt_status(0x%08X)  0x%08x\n", __func__,
1303 		port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1304 	dprintk(1, "%s() sop_status(0x%08X)     0x%08x\n", __func__,
1305 		port->reg_sop_status, cx_read(port->reg_sop_status));
1306 	dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1307 		port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1308 	dprintk(1, "%s() vld_misc(0x%08X)       0x%08x\n", __func__,
1309 		port->reg_vld_misc, cx_read(port->reg_vld_misc));
1310 	dprintk(1, "%s() ts_clk_en(0x%08X)      0x%08x\n", __func__,
1311 		port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1312 	dprintk(1, "%s() ts_int_msk(0x%08X)     0x%08x\n", __func__,
1313 		port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1314 }
1315 
1316 int cx23885_start_dma(struct cx23885_tsport *port,
1317 			     struct cx23885_dmaqueue *q,
1318 			     struct cx23885_buffer   *buf)
1319 {
1320 	struct cx23885_dev *dev = port->dev;
1321 	u32 reg;
1322 
1323 	dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1324 		dev->width, dev->height, dev->field);
1325 
1326 	/* Stop the fifo and risc engine for this port */
1327 	cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1328 
1329 	/* setup fifo + format */
1330 	cx23885_sram_channel_setup(dev,
1331 				   &dev->sram_channels[port->sram_chno],
1332 				   port->ts_packet_size, buf->risc.dma);
1333 	if (debug > 5) {
1334 		cx23885_sram_channel_dump(dev,
1335 			&dev->sram_channels[port->sram_chno]);
1336 		cx23885_risc_disasm(port, &buf->risc);
1337 	}
1338 
1339 	/* write TS length to chip */
1340 	cx_write(port->reg_lngth, port->ts_packet_size);
1341 
1342 	if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1343 		(!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1344 		pr_err("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1345 			__func__,
1346 			cx23885_boards[dev->board].portb,
1347 			cx23885_boards[dev->board].portc);
1348 		return -EINVAL;
1349 	}
1350 
1351 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1352 		cx23885_av_clk(dev, 0);
1353 
1354 	udelay(100);
1355 
1356 	/* If the port supports SRC SELECT, configure it */
1357 	if (port->reg_src_sel)
1358 		cx_write(port->reg_src_sel, port->src_sel_val);
1359 
1360 	cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1361 	cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1362 	cx_write(port->reg_vld_misc, port->vld_misc_val);
1363 	cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1364 	udelay(100);
1365 
1366 	/* NOTE: this is 2 (reserved) for portb, does it matter? */
1367 	/* reset counter to zero */
1368 	cx_write(port->reg_gpcnt_ctl, 3);
1369 	q->count = 0;
1370 
1371 	/* Set VIDB pins to input */
1372 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1373 		reg = cx_read(PAD_CTRL);
1374 		reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1375 		cx_write(PAD_CTRL, reg);
1376 	}
1377 
1378 	/* Set VIDC pins to input */
1379 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1380 		reg = cx_read(PAD_CTRL);
1381 		reg &= ~0x4; /* Clear TS2_SOP_OE */
1382 		cx_write(PAD_CTRL, reg);
1383 	}
1384 
1385 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1386 
1387 		reg = cx_read(PAD_CTRL);
1388 		reg = reg & ~0x1;    /* Clear TS1_OE */
1389 
1390 		/* FIXME, bit 2 writing here is questionable */
1391 		/* set TS1_SOP_OE and TS1_OE_HI */
1392 		reg = reg | 0xa;
1393 		cx_write(PAD_CTRL, reg);
1394 
1395 		/* FIXME and these two registers should be documented. */
1396 		cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1397 		cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1398 	}
1399 
1400 	switch (dev->bridge) {
1401 	case CX23885_BRIDGE_885:
1402 	case CX23885_BRIDGE_887:
1403 	case CX23885_BRIDGE_888:
1404 		/* enable irqs */
1405 		dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1406 		cx_set(port->reg_ts_int_msk,  port->ts_int_msk_val);
1407 		cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1408 		cx23885_irq_add(dev, port->pci_irqmask);
1409 		cx23885_irq_enable_all(dev);
1410 		break;
1411 	default:
1412 		BUG();
1413 	}
1414 
1415 	cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1416 
1417 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1418 		cx23885_av_clk(dev, 1);
1419 
1420 	if (debug > 4)
1421 		cx23885_tsport_reg_dump(port);
1422 
1423 	return 0;
1424 }
1425 
1426 static int cx23885_stop_dma(struct cx23885_tsport *port)
1427 {
1428 	struct cx23885_dev *dev = port->dev;
1429 	u32 reg;
1430 
1431 	dprintk(1, "%s()\n", __func__);
1432 
1433 	/* Stop interrupts and DMA */
1434 	cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1435 	cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1436 
1437 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1438 
1439 		reg = cx_read(PAD_CTRL);
1440 
1441 		/* Set TS1_OE */
1442 		reg = reg | 0x1;
1443 
1444 		/* clear TS1_SOP_OE and TS1_OE_HI */
1445 		reg = reg & ~0xa;
1446 		cx_write(PAD_CTRL, reg);
1447 		cx_write(port->reg_src_sel, 0);
1448 		cx_write(port->reg_gen_ctrl, 8);
1449 
1450 	}
1451 
1452 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1453 		cx23885_av_clk(dev, 0);
1454 
1455 	return 0;
1456 }
1457 
1458 /* ------------------------------------------------------------------ */
1459 
1460 int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
1461 {
1462 	struct cx23885_dev *dev = port->dev;
1463 	int size = port->ts_packet_size * port->ts_packet_count;
1464 	struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
1465 
1466 	dprintk(1, "%s: %p\n", __func__, buf);
1467 	if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
1468 		return -EINVAL;
1469 	vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1470 
1471 	cx23885_risc_databuffer(dev->pci, &buf->risc,
1472 				sgt->sgl,
1473 				port->ts_packet_size, port->ts_packet_count, 0);
1474 	return 0;
1475 }
1476 
1477 /*
1478  * The risc program for each buffer works as follows: it starts with a simple
1479  * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1480  * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1481  * the initial JUMP).
1482  *
1483  * This is the risc program of the first buffer to be queued if the active list
1484  * is empty and it just keeps DMAing this buffer without generating any
1485  * interrupts.
1486  *
1487  * If a new buffer is added then the initial JUMP in the code for that buffer
1488  * will generate an interrupt which signals that the previous buffer has been
1489  * DMAed successfully and that it can be returned to userspace.
1490  *
1491  * It also sets the final jump of the previous buffer to the start of the new
1492  * buffer, thus chaining the new buffer into the DMA chain. This is a single
1493  * atomic u32 write, so there is no race condition.
1494  *
1495  * The end-result of all this that you only get an interrupt when a buffer
1496  * is ready, so the control flow is very easy.
1497  */
1498 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1499 {
1500 	struct cx23885_buffer    *prev;
1501 	struct cx23885_dev *dev = port->dev;
1502 	struct cx23885_dmaqueue  *cx88q = &port->mpegq;
1503 	unsigned long flags;
1504 
1505 	buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1506 	buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1507 	buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
1508 	buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1509 
1510 	spin_lock_irqsave(&dev->slock, flags);
1511 	if (list_empty(&cx88q->active)) {
1512 		list_add_tail(&buf->queue, &cx88q->active);
1513 		dprintk(1, "[%p/%d] %s - first active\n",
1514 			buf, buf->vb.vb2_buf.index, __func__);
1515 	} else {
1516 		buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
1517 		prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1518 				  queue);
1519 		list_add_tail(&buf->queue, &cx88q->active);
1520 		prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1521 		dprintk(1, "[%p/%d] %s - append to active\n",
1522 			 buf, buf->vb.vb2_buf.index, __func__);
1523 	}
1524 	spin_unlock_irqrestore(&dev->slock, flags);
1525 }
1526 
1527 /* ----------------------------------------------------------- */
1528 
1529 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
1530 {
1531 	struct cx23885_dmaqueue *q = &port->mpegq;
1532 	struct cx23885_buffer *buf;
1533 	unsigned long flags;
1534 
1535 	spin_lock_irqsave(&port->slock, flags);
1536 	while (!list_empty(&q->active)) {
1537 		buf = list_entry(q->active.next, struct cx23885_buffer,
1538 				 queue);
1539 		list_del(&buf->queue);
1540 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1541 		dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1542 			buf, buf->vb.vb2_buf.index, reason,
1543 			(unsigned long)buf->risc.dma);
1544 	}
1545 	spin_unlock_irqrestore(&port->slock, flags);
1546 }
1547 
1548 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1549 {
1550 	dprintk(1, "%s()\n", __func__);
1551 	cx23885_stop_dma(port);
1552 	do_cancel_buffers(port, "cancel");
1553 }
1554 
1555 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1556 {
1557 	/* FIXME: port1 assumption here. */
1558 	struct cx23885_tsport *port = &dev->ts1;
1559 	int count = 0;
1560 	int handled = 0;
1561 
1562 	if (status == 0)
1563 		return handled;
1564 
1565 	count = cx_read(port->reg_gpcnt);
1566 	dprintk(7, "status: 0x%08x  mask: 0x%08x count: 0x%x\n",
1567 		status, cx_read(port->reg_ts_int_msk), count);
1568 
1569 	if ((status & VID_B_MSK_BAD_PKT)         ||
1570 		(status & VID_B_MSK_OPC_ERR)     ||
1571 		(status & VID_B_MSK_VBI_OPC_ERR) ||
1572 		(status & VID_B_MSK_SYNC)        ||
1573 		(status & VID_B_MSK_VBI_SYNC)    ||
1574 		(status & VID_B_MSK_OF)          ||
1575 		(status & VID_B_MSK_VBI_OF)) {
1576 		pr_err("%s: V4L mpeg risc op code error, status = 0x%x\n",
1577 		       dev->name, status);
1578 		if (status & VID_B_MSK_BAD_PKT)
1579 			dprintk(1, "        VID_B_MSK_BAD_PKT\n");
1580 		if (status & VID_B_MSK_OPC_ERR)
1581 			dprintk(1, "        VID_B_MSK_OPC_ERR\n");
1582 		if (status & VID_B_MSK_VBI_OPC_ERR)
1583 			dprintk(1, "        VID_B_MSK_VBI_OPC_ERR\n");
1584 		if (status & VID_B_MSK_SYNC)
1585 			dprintk(1, "        VID_B_MSK_SYNC\n");
1586 		if (status & VID_B_MSK_VBI_SYNC)
1587 			dprintk(1, "        VID_B_MSK_VBI_SYNC\n");
1588 		if (status & VID_B_MSK_OF)
1589 			dprintk(1, "        VID_B_MSK_OF\n");
1590 		if (status & VID_B_MSK_VBI_OF)
1591 			dprintk(1, "        VID_B_MSK_VBI_OF\n");
1592 
1593 		cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1594 		cx23885_sram_channel_dump(dev,
1595 			&dev->sram_channels[port->sram_chno]);
1596 		cx23885_417_check_encoder(dev);
1597 	} else if (status & VID_B_MSK_RISCI1) {
1598 		dprintk(7, "        VID_B_MSK_RISCI1\n");
1599 		spin_lock(&port->slock);
1600 		cx23885_wakeup(port, &port->mpegq, count);
1601 		spin_unlock(&port->slock);
1602 	}
1603 	if (status) {
1604 		cx_write(port->reg_ts_int_stat, status);
1605 		handled = 1;
1606 	}
1607 
1608 	return handled;
1609 }
1610 
1611 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1612 {
1613 	struct cx23885_dev *dev = port->dev;
1614 	int handled = 0;
1615 	u32 count;
1616 
1617 	if ((status & VID_BC_MSK_OPC_ERR) ||
1618 		(status & VID_BC_MSK_BAD_PKT) ||
1619 		(status & VID_BC_MSK_SYNC) ||
1620 		(status & VID_BC_MSK_OF)) {
1621 
1622 		if (status & VID_BC_MSK_OPC_ERR)
1623 			dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1624 				VID_BC_MSK_OPC_ERR);
1625 
1626 		if (status & VID_BC_MSK_BAD_PKT)
1627 			dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1628 				VID_BC_MSK_BAD_PKT);
1629 
1630 		if (status & VID_BC_MSK_SYNC)
1631 			dprintk(7, " (VID_BC_MSK_SYNC    0x%08x)\n",
1632 				VID_BC_MSK_SYNC);
1633 
1634 		if (status & VID_BC_MSK_OF)
1635 			dprintk(7, " (VID_BC_MSK_OF      0x%08x)\n",
1636 				VID_BC_MSK_OF);
1637 
1638 		pr_err("%s: mpeg risc op code error\n", dev->name);
1639 
1640 		cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1641 		cx23885_sram_channel_dump(dev,
1642 			&dev->sram_channels[port->sram_chno]);
1643 
1644 	} else if (status & VID_BC_MSK_RISCI1) {
1645 
1646 		dprintk(7, " (RISCI1            0x%08x)\n", VID_BC_MSK_RISCI1);
1647 
1648 		spin_lock(&port->slock);
1649 		count = cx_read(port->reg_gpcnt);
1650 		cx23885_wakeup(port, &port->mpegq, count);
1651 		spin_unlock(&port->slock);
1652 
1653 	}
1654 	if (status) {
1655 		cx_write(port->reg_ts_int_stat, status);
1656 		handled = 1;
1657 	}
1658 
1659 	return handled;
1660 }
1661 
1662 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1663 {
1664 	struct cx23885_dev *dev = dev_id;
1665 	struct cx23885_tsport *ts1 = &dev->ts1;
1666 	struct cx23885_tsport *ts2 = &dev->ts2;
1667 	u32 pci_status, pci_mask;
1668 	u32 vida_status, vida_mask;
1669 	u32 audint_status, audint_mask;
1670 	u32 ts1_status, ts1_mask;
1671 	u32 ts2_status, ts2_mask;
1672 	int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1673 	int audint_count = 0;
1674 	bool subdev_handled;
1675 
1676 	pci_status = cx_read(PCI_INT_STAT);
1677 	pci_mask = cx23885_irq_get_mask(dev);
1678 	vida_status = cx_read(VID_A_INT_STAT);
1679 	vida_mask = cx_read(VID_A_INT_MSK);
1680 	audint_status = cx_read(AUDIO_INT_INT_STAT);
1681 	audint_mask = cx_read(AUDIO_INT_INT_MSK);
1682 	ts1_status = cx_read(VID_B_INT_STAT);
1683 	ts1_mask = cx_read(VID_B_INT_MSK);
1684 	ts2_status = cx_read(VID_C_INT_STAT);
1685 	ts2_mask = cx_read(VID_C_INT_MSK);
1686 
1687 	if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1688 		goto out;
1689 
1690 	vida_count = cx_read(VID_A_GPCNT);
1691 	audint_count = cx_read(AUD_INT_A_GPCNT);
1692 	ts1_count = cx_read(ts1->reg_gpcnt);
1693 	ts2_count = cx_read(ts2->reg_gpcnt);
1694 	dprintk(7, "pci_status: 0x%08x  pci_mask: 0x%08x\n",
1695 		pci_status, pci_mask);
1696 	dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1697 		vida_status, vida_mask, vida_count);
1698 	dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1699 		audint_status, audint_mask, audint_count);
1700 	dprintk(7, "ts1_status: 0x%08x  ts1_mask: 0x%08x count: 0x%x\n",
1701 		ts1_status, ts1_mask, ts1_count);
1702 	dprintk(7, "ts2_status: 0x%08x  ts2_mask: 0x%08x count: 0x%x\n",
1703 		ts2_status, ts2_mask, ts2_count);
1704 
1705 	if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1706 			  PCI_MSK_AL_RD   | PCI_MSK_AL_WR   | PCI_MSK_APB_DMA |
1707 			  PCI_MSK_VID_C   | PCI_MSK_VID_B   | PCI_MSK_VID_A   |
1708 			  PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1709 			  PCI_MSK_GPIO0   | PCI_MSK_GPIO1   |
1710 			  PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1711 
1712 		if (pci_status & PCI_MSK_RISC_RD)
1713 			dprintk(7, " (PCI_MSK_RISC_RD   0x%08x)\n",
1714 				PCI_MSK_RISC_RD);
1715 
1716 		if (pci_status & PCI_MSK_RISC_WR)
1717 			dprintk(7, " (PCI_MSK_RISC_WR   0x%08x)\n",
1718 				PCI_MSK_RISC_WR);
1719 
1720 		if (pci_status & PCI_MSK_AL_RD)
1721 			dprintk(7, " (PCI_MSK_AL_RD     0x%08x)\n",
1722 				PCI_MSK_AL_RD);
1723 
1724 		if (pci_status & PCI_MSK_AL_WR)
1725 			dprintk(7, " (PCI_MSK_AL_WR     0x%08x)\n",
1726 				PCI_MSK_AL_WR);
1727 
1728 		if (pci_status & PCI_MSK_APB_DMA)
1729 			dprintk(7, " (PCI_MSK_APB_DMA   0x%08x)\n",
1730 				PCI_MSK_APB_DMA);
1731 
1732 		if (pci_status & PCI_MSK_VID_C)
1733 			dprintk(7, " (PCI_MSK_VID_C     0x%08x)\n",
1734 				PCI_MSK_VID_C);
1735 
1736 		if (pci_status & PCI_MSK_VID_B)
1737 			dprintk(7, " (PCI_MSK_VID_B     0x%08x)\n",
1738 				PCI_MSK_VID_B);
1739 
1740 		if (pci_status & PCI_MSK_VID_A)
1741 			dprintk(7, " (PCI_MSK_VID_A     0x%08x)\n",
1742 				PCI_MSK_VID_A);
1743 
1744 		if (pci_status & PCI_MSK_AUD_INT)
1745 			dprintk(7, " (PCI_MSK_AUD_INT   0x%08x)\n",
1746 				PCI_MSK_AUD_INT);
1747 
1748 		if (pci_status & PCI_MSK_AUD_EXT)
1749 			dprintk(7, " (PCI_MSK_AUD_EXT   0x%08x)\n",
1750 				PCI_MSK_AUD_EXT);
1751 
1752 		if (pci_status & PCI_MSK_GPIO0)
1753 			dprintk(7, " (PCI_MSK_GPIO0     0x%08x)\n",
1754 				PCI_MSK_GPIO0);
1755 
1756 		if (pci_status & PCI_MSK_GPIO1)
1757 			dprintk(7, " (PCI_MSK_GPIO1     0x%08x)\n",
1758 				PCI_MSK_GPIO1);
1759 
1760 		if (pci_status & PCI_MSK_AV_CORE)
1761 			dprintk(7, " (PCI_MSK_AV_CORE   0x%08x)\n",
1762 				PCI_MSK_AV_CORE);
1763 
1764 		if (pci_status & PCI_MSK_IR)
1765 			dprintk(7, " (PCI_MSK_IR        0x%08x)\n",
1766 				PCI_MSK_IR);
1767 	}
1768 
1769 	if (cx23885_boards[dev->board].ci_type == 1 &&
1770 			(pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1771 		handled += netup_ci_slot_status(dev, pci_status);
1772 
1773 	if (cx23885_boards[dev->board].ci_type == 2 &&
1774 			(pci_status & PCI_MSK_GPIO0))
1775 		handled += altera_ci_irq(dev);
1776 
1777 	if (ts1_status) {
1778 		if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1779 			handled += cx23885_irq_ts(ts1, ts1_status);
1780 		else
1781 		if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1782 			handled += cx23885_irq_417(dev, ts1_status);
1783 	}
1784 
1785 	if (ts2_status) {
1786 		if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1787 			handled += cx23885_irq_ts(ts2, ts2_status);
1788 		else
1789 		if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1790 			handled += cx23885_irq_417(dev, ts2_status);
1791 	}
1792 
1793 	if (vida_status)
1794 		handled += cx23885_video_irq(dev, vida_status);
1795 
1796 	if (audint_status)
1797 		handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1798 
1799 	if (pci_status & PCI_MSK_IR) {
1800 		subdev_handled = false;
1801 		v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1802 				 pci_status, &subdev_handled);
1803 		if (subdev_handled)
1804 			handled++;
1805 	}
1806 
1807 	if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1808 		cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1809 		schedule_work(&dev->cx25840_work);
1810 		handled++;
1811 	}
1812 
1813 	if (handled)
1814 		cx_write(PCI_INT_STAT, pci_status);
1815 out:
1816 	return IRQ_RETVAL(handled);
1817 }
1818 
1819 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1820 				    unsigned int notification, void *arg)
1821 {
1822 	struct cx23885_dev *dev;
1823 
1824 	if (sd == NULL)
1825 		return;
1826 
1827 	dev = to_cx23885(sd->v4l2_dev);
1828 
1829 	switch (notification) {
1830 	case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1831 		if (sd == dev->sd_ir)
1832 			cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1833 		break;
1834 	case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1835 		if (sd == dev->sd_ir)
1836 			cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1837 		break;
1838 	}
1839 }
1840 
1841 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1842 {
1843 	INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1844 	INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1845 	INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1846 	dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1847 }
1848 
1849 static inline int encoder_on_portb(struct cx23885_dev *dev)
1850 {
1851 	return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1852 }
1853 
1854 static inline int encoder_on_portc(struct cx23885_dev *dev)
1855 {
1856 	return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1857 }
1858 
1859 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1860  * registers depending on the board configuration (and whether the
1861  * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1862  * be pushed into the correct hardware register, regardless of the
1863  * physical location. Certain registers are shared so we sanity check
1864  * and report errors if we think we're tampering with a GPIo that might
1865  * be assigned to the encoder (and used for the host bus).
1866  *
1867  * GPIO  2 thru  0 - On the cx23885 bridge
1868  * GPIO 18 thru  3 - On the cx23417 host bus interface
1869  * GPIO 23 thru 19 - On the cx25840 a/v core
1870  */
1871 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1872 {
1873 	if (mask & 0x7)
1874 		cx_set(GP0_IO, mask & 0x7);
1875 
1876 	if (mask & 0x0007fff8) {
1877 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1878 			pr_err("%s: Setting GPIO on encoder ports\n",
1879 				dev->name);
1880 		cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1881 	}
1882 
1883 	/* TODO: 23-19 */
1884 	if (mask & 0x00f80000)
1885 		pr_info("%s: Unsupported\n", dev->name);
1886 }
1887 
1888 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1889 {
1890 	if (mask & 0x00000007)
1891 		cx_clear(GP0_IO, mask & 0x7);
1892 
1893 	if (mask & 0x0007fff8) {
1894 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1895 			pr_err("%s: Clearing GPIO moving on encoder ports\n",
1896 				dev->name);
1897 		cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1898 	}
1899 
1900 	/* TODO: 23-19 */
1901 	if (mask & 0x00f80000)
1902 		pr_info("%s: Unsupported\n", dev->name);
1903 }
1904 
1905 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1906 {
1907 	if (mask & 0x00000007)
1908 		return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1909 
1910 	if (mask & 0x0007fff8) {
1911 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1912 			pr_err("%s: Reading GPIO moving on encoder ports\n",
1913 				dev->name);
1914 		return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1915 	}
1916 
1917 	/* TODO: 23-19 */
1918 	if (mask & 0x00f80000)
1919 		pr_info("%s: Unsupported\n", dev->name);
1920 
1921 	return 0;
1922 }
1923 
1924 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1925 {
1926 	if ((mask & 0x00000007) && asoutput)
1927 		cx_set(GP0_IO, (mask & 0x7) << 16);
1928 	else if ((mask & 0x00000007) && !asoutput)
1929 		cx_clear(GP0_IO, (mask & 0x7) << 16);
1930 
1931 	if (mask & 0x0007fff8) {
1932 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1933 			pr_err("%s: Enabling GPIO on encoder ports\n",
1934 				dev->name);
1935 	}
1936 
1937 	/* MC417_OEN is active low for output, write 1 for an input */
1938 	if ((mask & 0x0007fff8) && asoutput)
1939 		cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
1940 
1941 	else if ((mask & 0x0007fff8) && !asoutput)
1942 		cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
1943 
1944 	/* TODO: 23-19 */
1945 }
1946 
1947 static int cx23885_initdev(struct pci_dev *pci_dev,
1948 			   const struct pci_device_id *pci_id)
1949 {
1950 	struct cx23885_dev *dev;
1951 	struct v4l2_ctrl_handler *hdl;
1952 	int err;
1953 
1954 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1955 	if (NULL == dev)
1956 		return -ENOMEM;
1957 
1958 	err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
1959 	if (err < 0)
1960 		goto fail_free;
1961 
1962 	hdl = &dev->ctrl_handler;
1963 	v4l2_ctrl_handler_init(hdl, 6);
1964 	if (hdl->error) {
1965 		err = hdl->error;
1966 		goto fail_ctrl;
1967 	}
1968 	dev->v4l2_dev.ctrl_handler = hdl;
1969 
1970 	/* Prepare to handle notifications from subdevices */
1971 	cx23885_v4l2_dev_notify_init(dev);
1972 
1973 	/* pci init */
1974 	dev->pci = pci_dev;
1975 	if (pci_enable_device(pci_dev)) {
1976 		err = -EIO;
1977 		goto fail_ctrl;
1978 	}
1979 
1980 	if (cx23885_dev_setup(dev) < 0) {
1981 		err = -EINVAL;
1982 		goto fail_ctrl;
1983 	}
1984 
1985 	/* print pci info */
1986 	dev->pci_rev = pci_dev->revision;
1987 	pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER,  &dev->pci_lat);
1988 	pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
1989 	       dev->name,
1990 	       pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
1991 	       dev->pci_lat,
1992 		(unsigned long long)pci_resource_start(pci_dev, 0));
1993 
1994 	pci_set_master(pci_dev);
1995 	err = pci_set_dma_mask(pci_dev, 0xffffffff);
1996 	if (err) {
1997 		pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1998 		goto fail_ctrl;
1999 	}
2000 
2001 	err = request_irq(pci_dev->irq, cx23885_irq,
2002 			  IRQF_SHARED, dev->name, dev);
2003 	if (err < 0) {
2004 		pr_err("%s: can't get IRQ %d\n",
2005 		       dev->name, pci_dev->irq);
2006 		goto fail_irq;
2007 	}
2008 
2009 	switch (dev->board) {
2010 	case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2011 		cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2012 		break;
2013 	case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2014 		cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2015 		break;
2016 	}
2017 
2018 	/*
2019 	 * The CX2388[58] IR controller can start firing interrupts when
2020 	 * enabled, so these have to take place after the cx23885_irq() handler
2021 	 * is hooked up by the call to request_irq() above.
2022 	 */
2023 	cx23885_ir_pci_int_enable(dev);
2024 	cx23885_input_init(dev);
2025 
2026 	return 0;
2027 
2028 fail_irq:
2029 	cx23885_dev_unregister(dev);
2030 fail_ctrl:
2031 	v4l2_ctrl_handler_free(hdl);
2032 	v4l2_device_unregister(&dev->v4l2_dev);
2033 fail_free:
2034 	kfree(dev);
2035 	return err;
2036 }
2037 
2038 static void cx23885_finidev(struct pci_dev *pci_dev)
2039 {
2040 	struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2041 	struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2042 
2043 	cx23885_input_fini(dev);
2044 	cx23885_ir_fini(dev);
2045 
2046 	cx23885_shutdown(dev);
2047 
2048 	/* unregister stuff */
2049 	free_irq(pci_dev->irq, dev);
2050 
2051 	pci_disable_device(pci_dev);
2052 
2053 	cx23885_dev_unregister(dev);
2054 	v4l2_ctrl_handler_free(&dev->ctrl_handler);
2055 	v4l2_device_unregister(v4l2_dev);
2056 	kfree(dev);
2057 }
2058 
2059 static const struct pci_device_id cx23885_pci_tbl[] = {
2060 	{
2061 		/* CX23885 */
2062 		.vendor       = 0x14f1,
2063 		.device       = 0x8852,
2064 		.subvendor    = PCI_ANY_ID,
2065 		.subdevice    = PCI_ANY_ID,
2066 	}, {
2067 		/* CX23887 Rev 2 */
2068 		.vendor       = 0x14f1,
2069 		.device       = 0x8880,
2070 		.subvendor    = PCI_ANY_ID,
2071 		.subdevice    = PCI_ANY_ID,
2072 	}, {
2073 		/* --- end of list --- */
2074 	}
2075 };
2076 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2077 
2078 static struct pci_driver cx23885_pci_driver = {
2079 	.name     = "cx23885",
2080 	.id_table = cx23885_pci_tbl,
2081 	.probe    = cx23885_initdev,
2082 	.remove   = cx23885_finidev,
2083 	/* TODO */
2084 	.suspend  = NULL,
2085 	.resume   = NULL,
2086 };
2087 
2088 static int __init cx23885_init(void)
2089 {
2090 	pr_info("cx23885 driver version %s loaded\n",
2091 		CX23885_VERSION);
2092 	return pci_register_driver(&cx23885_pci_driver);
2093 }
2094 
2095 static void __exit cx23885_fini(void)
2096 {
2097 	pci_unregister_driver(&cx23885_pci_driver);
2098 }
2099 
2100 module_init(cx23885_init);
2101 module_exit(cx23885_fini);
2102