1 /*
2  *  Driver for the Conexant CX23885 PCIe bridge
3  *
4  *  Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *
15  *  GNU General Public License for more details.
16  */
17 
18 #include "cx23885.h"
19 
20 #include <linux/init.h>
21 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/kmod.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <asm/div64.h>
30 #include <linux/firmware.h>
31 
32 #include "cimax2.h"
33 #include "altera-ci.h"
34 #include "cx23888-ir.h"
35 #include "cx23885-ir.h"
36 #include "cx23885-av.h"
37 #include "cx23885-input.h"
38 
39 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
40 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
41 MODULE_LICENSE("GPL");
42 MODULE_VERSION(CX23885_VERSION);
43 
44 static unsigned int debug;
45 module_param(debug, int, 0644);
46 MODULE_PARM_DESC(debug, "enable debug messages");
47 
48 static unsigned int card[]  = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
49 module_param_array(card,  int, NULL, 0444);
50 MODULE_PARM_DESC(card, "card type");
51 
52 #define dprintk(level, fmt, arg...)\
53 	do { if (debug >= level)\
54 		printk(KERN_DEBUG pr_fmt("%s: " fmt), \
55 		       __func__, ##arg); \
56 	} while (0)
57 
58 static unsigned int cx23885_devcount;
59 
60 #define NO_SYNC_LINE (-1U)
61 
62 /* FIXME, these allocations will change when
63  * analog arrives. The be reviewed.
64  * CX23887 Assumptions
65  * 1 line = 16 bytes of CDT
66  * cmds size = 80
67  * cdt size = 16 * linesize
68  * iqsize = 64
69  * maxlines = 6
70  *
71  * Address Space:
72  * 0x00000000 0x00008fff FIFO clusters
73  * 0x00010000 0x000104af Channel Management Data Structures
74  * 0x000104b0 0x000104ff Free
75  * 0x00010500 0x000108bf 15 channels * iqsize
76  * 0x000108c0 0x000108ff Free
77  * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
78  *                       15 channels * (iqsize + (maxlines * linesize))
79  * 0x00010ea0 0x00010xxx Free
80  */
81 
82 static struct sram_channel cx23885_sram_channels[] = {
83 	[SRAM_CH01] = {
84 		.name		= "VID A",
85 		.cmds_start	= 0x10000,
86 		.ctrl_start	= 0x10380,
87 		.cdt		= 0x104c0,
88 		.fifo_start	= 0x40,
89 		.fifo_size	= 0x2800,
90 		.ptr1_reg	= DMA1_PTR1,
91 		.ptr2_reg	= DMA1_PTR2,
92 		.cnt1_reg	= DMA1_CNT1,
93 		.cnt2_reg	= DMA1_CNT2,
94 	},
95 	[SRAM_CH02] = {
96 		.name		= "ch2",
97 		.cmds_start	= 0x0,
98 		.ctrl_start	= 0x0,
99 		.cdt		= 0x0,
100 		.fifo_start	= 0x0,
101 		.fifo_size	= 0x0,
102 		.ptr1_reg	= DMA2_PTR1,
103 		.ptr2_reg	= DMA2_PTR2,
104 		.cnt1_reg	= DMA2_CNT1,
105 		.cnt2_reg	= DMA2_CNT2,
106 	},
107 	[SRAM_CH03] = {
108 		.name		= "TS1 B",
109 		.cmds_start	= 0x100A0,
110 		.ctrl_start	= 0x10400,
111 		.cdt		= 0x10580,
112 		.fifo_start	= 0x5000,
113 		.fifo_size	= 0x1000,
114 		.ptr1_reg	= DMA3_PTR1,
115 		.ptr2_reg	= DMA3_PTR2,
116 		.cnt1_reg	= DMA3_CNT1,
117 		.cnt2_reg	= DMA3_CNT2,
118 	},
119 	[SRAM_CH04] = {
120 		.name		= "ch4",
121 		.cmds_start	= 0x0,
122 		.ctrl_start	= 0x0,
123 		.cdt		= 0x0,
124 		.fifo_start	= 0x0,
125 		.fifo_size	= 0x0,
126 		.ptr1_reg	= DMA4_PTR1,
127 		.ptr2_reg	= DMA4_PTR2,
128 		.cnt1_reg	= DMA4_CNT1,
129 		.cnt2_reg	= DMA4_CNT2,
130 	},
131 	[SRAM_CH05] = {
132 		.name		= "ch5",
133 		.cmds_start	= 0x0,
134 		.ctrl_start	= 0x0,
135 		.cdt		= 0x0,
136 		.fifo_start	= 0x0,
137 		.fifo_size	= 0x0,
138 		.ptr1_reg	= DMA5_PTR1,
139 		.ptr2_reg	= DMA5_PTR2,
140 		.cnt1_reg	= DMA5_CNT1,
141 		.cnt2_reg	= DMA5_CNT2,
142 	},
143 	[SRAM_CH06] = {
144 		.name		= "TS2 C",
145 		.cmds_start	= 0x10140,
146 		.ctrl_start	= 0x10440,
147 		.cdt		= 0x105e0,
148 		.fifo_start	= 0x6000,
149 		.fifo_size	= 0x1000,
150 		.ptr1_reg	= DMA5_PTR1,
151 		.ptr2_reg	= DMA5_PTR2,
152 		.cnt1_reg	= DMA5_CNT1,
153 		.cnt2_reg	= DMA5_CNT2,
154 	},
155 	[SRAM_CH07] = {
156 		.name		= "TV Audio",
157 		.cmds_start	= 0x10190,
158 		.ctrl_start	= 0x10480,
159 		.cdt		= 0x10a00,
160 		.fifo_start	= 0x7000,
161 		.fifo_size	= 0x1000,
162 		.ptr1_reg	= DMA6_PTR1,
163 		.ptr2_reg	= DMA6_PTR2,
164 		.cnt1_reg	= DMA6_CNT1,
165 		.cnt2_reg	= DMA6_CNT2,
166 	},
167 	[SRAM_CH08] = {
168 		.name		= "ch8",
169 		.cmds_start	= 0x0,
170 		.ctrl_start	= 0x0,
171 		.cdt		= 0x0,
172 		.fifo_start	= 0x0,
173 		.fifo_size	= 0x0,
174 		.ptr1_reg	= DMA7_PTR1,
175 		.ptr2_reg	= DMA7_PTR2,
176 		.cnt1_reg	= DMA7_CNT1,
177 		.cnt2_reg	= DMA7_CNT2,
178 	},
179 	[SRAM_CH09] = {
180 		.name		= "ch9",
181 		.cmds_start	= 0x0,
182 		.ctrl_start	= 0x0,
183 		.cdt		= 0x0,
184 		.fifo_start	= 0x0,
185 		.fifo_size	= 0x0,
186 		.ptr1_reg	= DMA8_PTR1,
187 		.ptr2_reg	= DMA8_PTR2,
188 		.cnt1_reg	= DMA8_CNT1,
189 		.cnt2_reg	= DMA8_CNT2,
190 	},
191 };
192 
193 static struct sram_channel cx23887_sram_channels[] = {
194 	[SRAM_CH01] = {
195 		.name		= "VID A",
196 		.cmds_start	= 0x10000,
197 		.ctrl_start	= 0x105b0,
198 		.cdt		= 0x107b0,
199 		.fifo_start	= 0x40,
200 		.fifo_size	= 0x2800,
201 		.ptr1_reg	= DMA1_PTR1,
202 		.ptr2_reg	= DMA1_PTR2,
203 		.cnt1_reg	= DMA1_CNT1,
204 		.cnt2_reg	= DMA1_CNT2,
205 	},
206 	[SRAM_CH02] = {
207 		.name		= "VID A (VBI)",
208 		.cmds_start	= 0x10050,
209 		.ctrl_start	= 0x105F0,
210 		.cdt		= 0x10810,
211 		.fifo_start	= 0x3000,
212 		.fifo_size	= 0x1000,
213 		.ptr1_reg	= DMA2_PTR1,
214 		.ptr2_reg	= DMA2_PTR2,
215 		.cnt1_reg	= DMA2_CNT1,
216 		.cnt2_reg	= DMA2_CNT2,
217 	},
218 	[SRAM_CH03] = {
219 		.name		= "TS1 B",
220 		.cmds_start	= 0x100A0,
221 		.ctrl_start	= 0x10630,
222 		.cdt		= 0x10870,
223 		.fifo_start	= 0x5000,
224 		.fifo_size	= 0x1000,
225 		.ptr1_reg	= DMA3_PTR1,
226 		.ptr2_reg	= DMA3_PTR2,
227 		.cnt1_reg	= DMA3_CNT1,
228 		.cnt2_reg	= DMA3_CNT2,
229 	},
230 	[SRAM_CH04] = {
231 		.name		= "ch4",
232 		.cmds_start	= 0x0,
233 		.ctrl_start	= 0x0,
234 		.cdt		= 0x0,
235 		.fifo_start	= 0x0,
236 		.fifo_size	= 0x0,
237 		.ptr1_reg	= DMA4_PTR1,
238 		.ptr2_reg	= DMA4_PTR2,
239 		.cnt1_reg	= DMA4_CNT1,
240 		.cnt2_reg	= DMA4_CNT2,
241 	},
242 	[SRAM_CH05] = {
243 		.name		= "ch5",
244 		.cmds_start	= 0x0,
245 		.ctrl_start	= 0x0,
246 		.cdt		= 0x0,
247 		.fifo_start	= 0x0,
248 		.fifo_size	= 0x0,
249 		.ptr1_reg	= DMA5_PTR1,
250 		.ptr2_reg	= DMA5_PTR2,
251 		.cnt1_reg	= DMA5_CNT1,
252 		.cnt2_reg	= DMA5_CNT2,
253 	},
254 	[SRAM_CH06] = {
255 		.name		= "TS2 C",
256 		.cmds_start	= 0x10140,
257 		.ctrl_start	= 0x10670,
258 		.cdt		= 0x108d0,
259 		.fifo_start	= 0x6000,
260 		.fifo_size	= 0x1000,
261 		.ptr1_reg	= DMA5_PTR1,
262 		.ptr2_reg	= DMA5_PTR2,
263 		.cnt1_reg	= DMA5_CNT1,
264 		.cnt2_reg	= DMA5_CNT2,
265 	},
266 	[SRAM_CH07] = {
267 		.name		= "TV Audio",
268 		.cmds_start	= 0x10190,
269 		.ctrl_start	= 0x106B0,
270 		.cdt		= 0x10930,
271 		.fifo_start	= 0x7000,
272 		.fifo_size	= 0x1000,
273 		.ptr1_reg	= DMA6_PTR1,
274 		.ptr2_reg	= DMA6_PTR2,
275 		.cnt1_reg	= DMA6_CNT1,
276 		.cnt2_reg	= DMA6_CNT2,
277 	},
278 	[SRAM_CH08] = {
279 		.name		= "ch8",
280 		.cmds_start	= 0x0,
281 		.ctrl_start	= 0x0,
282 		.cdt		= 0x0,
283 		.fifo_start	= 0x0,
284 		.fifo_size	= 0x0,
285 		.ptr1_reg	= DMA7_PTR1,
286 		.ptr2_reg	= DMA7_PTR2,
287 		.cnt1_reg	= DMA7_CNT1,
288 		.cnt2_reg	= DMA7_CNT2,
289 	},
290 	[SRAM_CH09] = {
291 		.name		= "ch9",
292 		.cmds_start	= 0x0,
293 		.ctrl_start	= 0x0,
294 		.cdt		= 0x0,
295 		.fifo_start	= 0x0,
296 		.fifo_size	= 0x0,
297 		.ptr1_reg	= DMA8_PTR1,
298 		.ptr2_reg	= DMA8_PTR2,
299 		.cnt1_reg	= DMA8_CNT1,
300 		.cnt2_reg	= DMA8_CNT2,
301 	},
302 };
303 
304 static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
305 {
306 	unsigned long flags;
307 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
308 
309 	dev->pci_irqmask |= mask;
310 
311 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
312 }
313 
314 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
315 {
316 	unsigned long flags;
317 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
318 
319 	dev->pci_irqmask |= mask;
320 	cx_set(PCI_INT_MSK, mask);
321 
322 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
323 }
324 
325 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
326 {
327 	u32 v;
328 	unsigned long flags;
329 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
330 
331 	v = mask & dev->pci_irqmask;
332 	if (v)
333 		cx_set(PCI_INT_MSK, v);
334 
335 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
336 }
337 
338 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
339 {
340 	cx23885_irq_enable(dev, 0xffffffff);
341 }
342 
343 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
344 {
345 	unsigned long flags;
346 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
347 
348 	cx_clear(PCI_INT_MSK, mask);
349 
350 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
351 }
352 
353 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
354 {
355 	cx23885_irq_disable(dev, 0xffffffff);
356 }
357 
358 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
359 {
360 	unsigned long flags;
361 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
362 
363 	dev->pci_irqmask &= ~mask;
364 	cx_clear(PCI_INT_MSK, mask);
365 
366 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
367 }
368 
369 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
370 {
371 	u32 v;
372 	unsigned long flags;
373 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
374 
375 	v = cx_read(PCI_INT_MSK);
376 
377 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
378 	return v;
379 }
380 
381 static int cx23885_risc_decode(u32 risc)
382 {
383 	static char *instr[16] = {
384 		[RISC_SYNC    >> 28] = "sync",
385 		[RISC_WRITE   >> 28] = "write",
386 		[RISC_WRITEC  >> 28] = "writec",
387 		[RISC_READ    >> 28] = "read",
388 		[RISC_READC   >> 28] = "readc",
389 		[RISC_JUMP    >> 28] = "jump",
390 		[RISC_SKIP    >> 28] = "skip",
391 		[RISC_WRITERM >> 28] = "writerm",
392 		[RISC_WRITECM >> 28] = "writecm",
393 		[RISC_WRITECR >> 28] = "writecr",
394 	};
395 	static int incr[16] = {
396 		[RISC_WRITE   >> 28] = 3,
397 		[RISC_JUMP    >> 28] = 3,
398 		[RISC_SKIP    >> 28] = 1,
399 		[RISC_SYNC    >> 28] = 1,
400 		[RISC_WRITERM >> 28] = 3,
401 		[RISC_WRITECM >> 28] = 3,
402 		[RISC_WRITECR >> 28] = 4,
403 	};
404 	static char *bits[] = {
405 		"12",   "13",   "14",   "resync",
406 		"cnt0", "cnt1", "18",   "19",
407 		"20",   "21",   "22",   "23",
408 		"irq1", "irq2", "eol",  "sol",
409 	};
410 	int i;
411 
412 	printk(KERN_DEBUG "0x%08x [ %s", risc,
413 	       instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
414 	for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
415 		if (risc & (1 << (i + 12)))
416 			pr_cont(" %s", bits[i]);
417 	pr_cont(" count=%d ]\n", risc & 0xfff);
418 	return incr[risc >> 28] ? incr[risc >> 28] : 1;
419 }
420 
421 static void cx23885_wakeup(struct cx23885_tsport *port,
422 			   struct cx23885_dmaqueue *q, u32 count)
423 {
424 	struct cx23885_buffer *buf;
425 
426 	if (list_empty(&q->active))
427 		return;
428 	buf = list_entry(q->active.next,
429 			 struct cx23885_buffer, queue);
430 
431 	buf->vb.vb2_buf.timestamp = ktime_get_ns();
432 	buf->vb.sequence = q->count++;
433 	dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
434 		buf->vb.vb2_buf.index,
435 		count, q->count);
436 	list_del(&buf->queue);
437 	vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
438 }
439 
440 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
441 				      struct sram_channel *ch,
442 				      unsigned int bpl, u32 risc)
443 {
444 	unsigned int i, lines;
445 	u32 cdt;
446 
447 	if (ch->cmds_start == 0) {
448 		dprintk(1, "%s() Erasing channel [%s]\n", __func__,
449 			ch->name);
450 		cx_write(ch->ptr1_reg, 0);
451 		cx_write(ch->ptr2_reg, 0);
452 		cx_write(ch->cnt2_reg, 0);
453 		cx_write(ch->cnt1_reg, 0);
454 		return 0;
455 	} else {
456 		dprintk(1, "%s() Configuring channel [%s]\n", __func__,
457 			ch->name);
458 	}
459 
460 	bpl   = (bpl + 7) & ~7; /* alignment */
461 	cdt   = ch->cdt;
462 	lines = ch->fifo_size / bpl;
463 	if (lines > 6)
464 		lines = 6;
465 	BUG_ON(lines < 2);
466 
467 	cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
468 	cx_write(8 + 4, 12);
469 	cx_write(8 + 8, 0);
470 
471 	/* write CDT */
472 	for (i = 0; i < lines; i++) {
473 		dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
474 			ch->fifo_start + bpl*i);
475 		cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
476 		cx_write(cdt + 16*i +  4, 0);
477 		cx_write(cdt + 16*i +  8, 0);
478 		cx_write(cdt + 16*i + 12, 0);
479 	}
480 
481 	/* write CMDS */
482 	if (ch->jumponly)
483 		cx_write(ch->cmds_start + 0, 8);
484 	else
485 		cx_write(ch->cmds_start + 0, risc);
486 	cx_write(ch->cmds_start +  4, 0); /* 64 bits 63-32 */
487 	cx_write(ch->cmds_start +  8, cdt);
488 	cx_write(ch->cmds_start + 12, (lines*16) >> 3);
489 	cx_write(ch->cmds_start + 16, ch->ctrl_start);
490 	if (ch->jumponly)
491 		cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
492 	else
493 		cx_write(ch->cmds_start + 20, 64 >> 2);
494 	for (i = 24; i < 80; i += 4)
495 		cx_write(ch->cmds_start + i, 0);
496 
497 	/* fill registers */
498 	cx_write(ch->ptr1_reg, ch->fifo_start);
499 	cx_write(ch->ptr2_reg, cdt);
500 	cx_write(ch->cnt2_reg, (lines*16) >> 3);
501 	cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
502 
503 	dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
504 		dev->bridge,
505 		ch->name,
506 		bpl,
507 		lines);
508 
509 	return 0;
510 }
511 
512 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
513 				      struct sram_channel *ch)
514 {
515 	static char *name[] = {
516 		"init risc lo",
517 		"init risc hi",
518 		"cdt base",
519 		"cdt size",
520 		"iq base",
521 		"iq size",
522 		"risc pc lo",
523 		"risc pc hi",
524 		"iq wr ptr",
525 		"iq rd ptr",
526 		"cdt current",
527 		"pci target lo",
528 		"pci target hi",
529 		"line / byte",
530 	};
531 	u32 risc;
532 	unsigned int i, j, n;
533 
534 	pr_warn("%s: %s - dma channel status dump\n",
535 		dev->name, ch->name);
536 	for (i = 0; i < ARRAY_SIZE(name); i++)
537 		pr_warn("%s:   cmds: %-15s: 0x%08x\n",
538 			dev->name, name[i],
539 			cx_read(ch->cmds_start + 4*i));
540 
541 	for (i = 0; i < 4; i++) {
542 		risc = cx_read(ch->cmds_start + 4 * (i + 14));
543 		pr_warn("%s:   risc%d: ", dev->name, i);
544 		cx23885_risc_decode(risc);
545 	}
546 	for (i = 0; i < (64 >> 2); i += n) {
547 		risc = cx_read(ch->ctrl_start + 4 * i);
548 		/* No consideration for bits 63-32 */
549 
550 		pr_warn("%s:   (0x%08x) iq %x: ", dev->name,
551 			ch->ctrl_start + 4 * i, i);
552 		n = cx23885_risc_decode(risc);
553 		for (j = 1; j < n; j++) {
554 			risc = cx_read(ch->ctrl_start + 4 * (i + j));
555 			pr_warn("%s:   iq %x: 0x%08x [ arg #%d ]\n",
556 				dev->name, i+j, risc, j);
557 		}
558 	}
559 
560 	pr_warn("%s: fifo: 0x%08x -> 0x%x\n",
561 		dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
562 	pr_warn("%s: ctrl: 0x%08x -> 0x%x\n",
563 		dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
564 	pr_warn("%s:   ptr1_reg: 0x%08x\n",
565 		dev->name, cx_read(ch->ptr1_reg));
566 	pr_warn("%s:   ptr2_reg: 0x%08x\n",
567 		dev->name, cx_read(ch->ptr2_reg));
568 	pr_warn("%s:   cnt1_reg: 0x%08x\n",
569 		dev->name, cx_read(ch->cnt1_reg));
570 	pr_warn("%s:   cnt2_reg: 0x%08x\n",
571 		dev->name, cx_read(ch->cnt2_reg));
572 }
573 
574 static void cx23885_risc_disasm(struct cx23885_tsport *port,
575 				struct cx23885_riscmem *risc)
576 {
577 	struct cx23885_dev *dev = port->dev;
578 	unsigned int i, j, n;
579 
580 	pr_info("%s: risc disasm: %p [dma=0x%08lx]\n",
581 	       dev->name, risc->cpu, (unsigned long)risc->dma);
582 	for (i = 0; i < (risc->size >> 2); i += n) {
583 		pr_info("%s:   %04d: ", dev->name, i);
584 		n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
585 		for (j = 1; j < n; j++)
586 			pr_info("%s:   %04d: 0x%08x [ arg #%d ]\n",
587 				dev->name, i + j, risc->cpu[i + j], j);
588 		if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
589 			break;
590 	}
591 }
592 
593 static void cx23885_shutdown(struct cx23885_dev *dev)
594 {
595 	/* disable RISC controller */
596 	cx_write(DEV_CNTRL2, 0);
597 
598 	/* Disable all IR activity */
599 	cx_write(IR_CNTRL_REG, 0);
600 
601 	/* Disable Video A/B activity */
602 	cx_write(VID_A_DMA_CTL, 0);
603 	cx_write(VID_B_DMA_CTL, 0);
604 	cx_write(VID_C_DMA_CTL, 0);
605 
606 	/* Disable Audio activity */
607 	cx_write(AUD_INT_DMA_CTL, 0);
608 	cx_write(AUD_EXT_DMA_CTL, 0);
609 
610 	/* Disable Serial port */
611 	cx_write(UART_CTL, 0);
612 
613 	/* Disable Interrupts */
614 	cx23885_irq_disable_all(dev);
615 	cx_write(VID_A_INT_MSK, 0);
616 	cx_write(VID_B_INT_MSK, 0);
617 	cx_write(VID_C_INT_MSK, 0);
618 	cx_write(AUDIO_INT_INT_MSK, 0);
619 	cx_write(AUDIO_EXT_INT_MSK, 0);
620 
621 }
622 
623 static void cx23885_reset(struct cx23885_dev *dev)
624 {
625 	dprintk(1, "%s()\n", __func__);
626 
627 	cx23885_shutdown(dev);
628 
629 	cx_write(PCI_INT_STAT, 0xffffffff);
630 	cx_write(VID_A_INT_STAT, 0xffffffff);
631 	cx_write(VID_B_INT_STAT, 0xffffffff);
632 	cx_write(VID_C_INT_STAT, 0xffffffff);
633 	cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
634 	cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
635 	cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
636 	cx_write(PAD_CTRL, 0x00500300);
637 
638 	mdelay(100);
639 
640 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
641 		720*4, 0);
642 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
643 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
644 		188*4, 0);
645 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
646 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
647 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
648 		188*4, 0);
649 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
650 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
651 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
652 
653 	cx23885_gpio_setup(dev);
654 }
655 
656 
657 static int cx23885_pci_quirks(struct cx23885_dev *dev)
658 {
659 	dprintk(1, "%s()\n", __func__);
660 
661 	/* The cx23885 bridge has a weird bug which causes NMI to be asserted
662 	 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
663 	 * occur on the cx23887 bridge.
664 	 */
665 	if (dev->bridge == CX23885_BRIDGE_885)
666 		cx_clear(RDR_TLCTL0, 1 << 4);
667 
668 	return 0;
669 }
670 
671 static int get_resources(struct cx23885_dev *dev)
672 {
673 	if (request_mem_region(pci_resource_start(dev->pci, 0),
674 			       pci_resource_len(dev->pci, 0),
675 			       dev->name))
676 		return 0;
677 
678 	pr_err("%s: can't get MMIO memory @ 0x%llx\n",
679 	       dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
680 
681 	return -EBUSY;
682 }
683 
684 static int cx23885_init_tsport(struct cx23885_dev *dev,
685 	struct cx23885_tsport *port, int portno)
686 {
687 	dprintk(1, "%s(portno=%d)\n", __func__, portno);
688 
689 	/* Transport bus init dma queue  - Common settings */
690 	port->dma_ctl_val        = 0x11; /* Enable RISC controller and Fifo */
691 	port->ts_int_msk_val     = 0x1111; /* TS port bits for RISC */
692 	port->vld_misc_val       = 0x0;
693 	port->hw_sop_ctrl_val    = (0x47 << 16 | 188 << 4);
694 
695 	spin_lock_init(&port->slock);
696 	port->dev = dev;
697 	port->nr = portno;
698 
699 	INIT_LIST_HEAD(&port->mpegq.active);
700 	mutex_init(&port->frontends.lock);
701 	INIT_LIST_HEAD(&port->frontends.felist);
702 	port->frontends.active_fe_id = 0;
703 
704 	/* This should be hardcoded allow a single frontend
705 	 * attachment to this tsport, keeping the -dvb.c
706 	 * code clean and safe.
707 	 */
708 	if (!port->num_frontends)
709 		port->num_frontends = 1;
710 
711 	switch (portno) {
712 	case 1:
713 		port->reg_gpcnt          = VID_B_GPCNT;
714 		port->reg_gpcnt_ctl      = VID_B_GPCNT_CTL;
715 		port->reg_dma_ctl        = VID_B_DMA_CTL;
716 		port->reg_lngth          = VID_B_LNGTH;
717 		port->reg_hw_sop_ctrl    = VID_B_HW_SOP_CTL;
718 		port->reg_gen_ctrl       = VID_B_GEN_CTL;
719 		port->reg_bd_pkt_status  = VID_B_BD_PKT_STATUS;
720 		port->reg_sop_status     = VID_B_SOP_STATUS;
721 		port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
722 		port->reg_vld_misc       = VID_B_VLD_MISC;
723 		port->reg_ts_clk_en      = VID_B_TS_CLK_EN;
724 		port->reg_src_sel        = VID_B_SRC_SEL;
725 		port->reg_ts_int_msk     = VID_B_INT_MSK;
726 		port->reg_ts_int_stat    = VID_B_INT_STAT;
727 		port->sram_chno          = SRAM_CH03; /* VID_B */
728 		port->pci_irqmask        = 0x02; /* VID_B bit1 */
729 		break;
730 	case 2:
731 		port->reg_gpcnt          = VID_C_GPCNT;
732 		port->reg_gpcnt_ctl      = VID_C_GPCNT_CTL;
733 		port->reg_dma_ctl        = VID_C_DMA_CTL;
734 		port->reg_lngth          = VID_C_LNGTH;
735 		port->reg_hw_sop_ctrl    = VID_C_HW_SOP_CTL;
736 		port->reg_gen_ctrl       = VID_C_GEN_CTL;
737 		port->reg_bd_pkt_status  = VID_C_BD_PKT_STATUS;
738 		port->reg_sop_status     = VID_C_SOP_STATUS;
739 		port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
740 		port->reg_vld_misc       = VID_C_VLD_MISC;
741 		port->reg_ts_clk_en      = VID_C_TS_CLK_EN;
742 		port->reg_src_sel        = 0;
743 		port->reg_ts_int_msk     = VID_C_INT_MSK;
744 		port->reg_ts_int_stat    = VID_C_INT_STAT;
745 		port->sram_chno          = SRAM_CH06; /* VID_C */
746 		port->pci_irqmask        = 0x04; /* VID_C bit2 */
747 		break;
748 	default:
749 		BUG();
750 	}
751 
752 	return 0;
753 }
754 
755 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
756 {
757 	switch (cx_read(RDR_CFG2) & 0xff) {
758 	case 0x00:
759 		/* cx23885 */
760 		dev->hwrevision = 0xa0;
761 		break;
762 	case 0x01:
763 		/* CX23885-12Z */
764 		dev->hwrevision = 0xa1;
765 		break;
766 	case 0x02:
767 		/* CX23885-13Z/14Z */
768 		dev->hwrevision = 0xb0;
769 		break;
770 	case 0x03:
771 		if (dev->pci->device == 0x8880) {
772 			/* CX23888-21Z/22Z */
773 			dev->hwrevision = 0xc0;
774 		} else {
775 			/* CX23885-14Z */
776 			dev->hwrevision = 0xa4;
777 		}
778 		break;
779 	case 0x04:
780 		if (dev->pci->device == 0x8880) {
781 			/* CX23888-31Z */
782 			dev->hwrevision = 0xd0;
783 		} else {
784 			/* CX23885-15Z, CX23888-31Z */
785 			dev->hwrevision = 0xa5;
786 		}
787 		break;
788 	case 0x0e:
789 		/* CX23887-15Z */
790 		dev->hwrevision = 0xc0;
791 		break;
792 	case 0x0f:
793 		/* CX23887-14Z */
794 		dev->hwrevision = 0xb1;
795 		break;
796 	default:
797 		pr_err("%s() New hardware revision found 0x%x\n",
798 		       __func__, dev->hwrevision);
799 	}
800 	if (dev->hwrevision)
801 		pr_info("%s() Hardware revision = 0x%02x\n",
802 			__func__, dev->hwrevision);
803 	else
804 		pr_err("%s() Hardware revision unknown 0x%x\n",
805 		       __func__, dev->hwrevision);
806 }
807 
808 /* Find the first v4l2_subdev member of the group id in hw */
809 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
810 {
811 	struct v4l2_subdev *result = NULL;
812 	struct v4l2_subdev *sd;
813 
814 	spin_lock(&dev->v4l2_dev.lock);
815 	v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
816 		if (sd->grp_id == hw) {
817 			result = sd;
818 			break;
819 		}
820 	}
821 	spin_unlock(&dev->v4l2_dev.lock);
822 	return result;
823 }
824 
825 static int cx23885_dev_setup(struct cx23885_dev *dev)
826 {
827 	int i;
828 
829 	spin_lock_init(&dev->pci_irqmask_lock);
830 	spin_lock_init(&dev->slock);
831 
832 	mutex_init(&dev->lock);
833 	mutex_init(&dev->gpio_lock);
834 
835 	atomic_inc(&dev->refcount);
836 
837 	dev->nr = cx23885_devcount++;
838 	sprintf(dev->name, "cx23885[%d]", dev->nr);
839 
840 	/* Configure the internal memory */
841 	if (dev->pci->device == 0x8880) {
842 		/* Could be 887 or 888, assume an 888 default */
843 		dev->bridge = CX23885_BRIDGE_888;
844 		/* Apply a sensible clock frequency for the PCIe bridge */
845 		dev->clk_freq = 50000000;
846 		dev->sram_channels = cx23887_sram_channels;
847 	} else
848 	if (dev->pci->device == 0x8852) {
849 		dev->bridge = CX23885_BRIDGE_885;
850 		/* Apply a sensible clock frequency for the PCIe bridge */
851 		dev->clk_freq = 28000000;
852 		dev->sram_channels = cx23885_sram_channels;
853 	} else
854 		BUG();
855 
856 	dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
857 		__func__, dev->bridge);
858 
859 	/* board config */
860 	dev->board = UNSET;
861 	if (card[dev->nr] < cx23885_bcount)
862 		dev->board = card[dev->nr];
863 	for (i = 0; UNSET == dev->board  &&  i < cx23885_idcount; i++)
864 		if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
865 		    dev->pci->subsystem_device == cx23885_subids[i].subdevice)
866 			dev->board = cx23885_subids[i].card;
867 	if (UNSET == dev->board) {
868 		dev->board = CX23885_BOARD_UNKNOWN;
869 		cx23885_card_list(dev);
870 	}
871 
872 	if (dev->pci->device == 0x8852) {
873 		/* no DIF on cx23885, so no analog tuner support possible */
874 		if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC)
875 			dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885;
876 		else if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_DVB)
877 			dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885;
878 	}
879 
880 	/* If the user specific a clk freq override, apply it */
881 	if (cx23885_boards[dev->board].clk_freq > 0)
882 		dev->clk_freq = cx23885_boards[dev->board].clk_freq;
883 
884 	if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE &&
885 		dev->pci->subsystem_device == 0x7137) {
886 		/* Hauppauge ImpactVCBe device ID 0x7137 is populated
887 		 * with an 888, and a 25Mhz crystal, instead of the
888 		 * usual third overtone 50Mhz. The default clock rate must
889 		 * be overridden so the cx25840 is properly configured
890 		 */
891 		dev->clk_freq = 25000000;
892 	}
893 
894 	dev->pci_bus  = dev->pci->bus->number;
895 	dev->pci_slot = PCI_SLOT(dev->pci->devfn);
896 	cx23885_irq_add(dev, 0x001f00);
897 
898 	/* External Master 1 Bus */
899 	dev->i2c_bus[0].nr = 0;
900 	dev->i2c_bus[0].dev = dev;
901 	dev->i2c_bus[0].reg_stat  = I2C1_STAT;
902 	dev->i2c_bus[0].reg_ctrl  = I2C1_CTRL;
903 	dev->i2c_bus[0].reg_addr  = I2C1_ADDR;
904 	dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
905 	dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
906 	dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
907 
908 	/* External Master 2 Bus */
909 	dev->i2c_bus[1].nr = 1;
910 	dev->i2c_bus[1].dev = dev;
911 	dev->i2c_bus[1].reg_stat  = I2C2_STAT;
912 	dev->i2c_bus[1].reg_ctrl  = I2C2_CTRL;
913 	dev->i2c_bus[1].reg_addr  = I2C2_ADDR;
914 	dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
915 	dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
916 	dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
917 
918 	/* Internal Master 3 Bus */
919 	dev->i2c_bus[2].nr = 2;
920 	dev->i2c_bus[2].dev = dev;
921 	dev->i2c_bus[2].reg_stat  = I2C3_STAT;
922 	dev->i2c_bus[2].reg_ctrl  = I2C3_CTRL;
923 	dev->i2c_bus[2].reg_addr  = I2C3_ADDR;
924 	dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
925 	dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
926 	dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
927 
928 	if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
929 		(cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
930 		cx23885_init_tsport(dev, &dev->ts1, 1);
931 
932 	if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
933 		(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
934 		cx23885_init_tsport(dev, &dev->ts2, 2);
935 
936 	if (get_resources(dev) < 0) {
937 		pr_err("CORE %s No more PCIe resources for subsystem: %04x:%04x\n",
938 		       dev->name, dev->pci->subsystem_vendor,
939 		       dev->pci->subsystem_device);
940 
941 		cx23885_devcount--;
942 		return -ENODEV;
943 	}
944 
945 	/* PCIe stuff */
946 	dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
947 			     pci_resource_len(dev->pci, 0));
948 
949 	dev->bmmio = (u8 __iomem *)dev->lmmio;
950 
951 	pr_info("CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
952 		dev->name, dev->pci->subsystem_vendor,
953 		dev->pci->subsystem_device, cx23885_boards[dev->board].name,
954 		dev->board, card[dev->nr] == dev->board ?
955 		"insmod option" : "autodetected");
956 
957 	cx23885_pci_quirks(dev);
958 
959 	/* Assume some sensible defaults */
960 	dev->tuner_type = cx23885_boards[dev->board].tuner_type;
961 	dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
962 	dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
963 	dev->radio_type = cx23885_boards[dev->board].radio_type;
964 	dev->radio_addr = cx23885_boards[dev->board].radio_addr;
965 
966 	dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
967 		__func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
968 	dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
969 		__func__, dev->radio_type, dev->radio_addr);
970 
971 	/* The cx23417 encoder has GPIO's that need to be initialised
972 	 * before DVB, so that demodulators and tuners are out of
973 	 * reset before DVB uses them.
974 	 */
975 	if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
976 		(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
977 			cx23885_mc417_init(dev);
978 
979 	/* init hardware */
980 	cx23885_reset(dev);
981 
982 	cx23885_i2c_register(&dev->i2c_bus[0]);
983 	cx23885_i2c_register(&dev->i2c_bus[1]);
984 	cx23885_i2c_register(&dev->i2c_bus[2]);
985 	cx23885_card_setup(dev);
986 	call_all(dev, tuner, standby);
987 	cx23885_ir_init(dev);
988 
989 	if (dev->board == CX23885_BOARD_VIEWCAST_460E) {
990 		/*
991 		 * GPIOs 9/8 are input detection bits for the breakout video
992 		 * (gpio 8) and audio (gpio 9) cables. When they're attached,
993 		 * this gpios are pulled high. Make sure these GPIOs are marked
994 		 * as inputs.
995 		 */
996 		cx23885_gpio_enable(dev, 0x300, 0);
997 	}
998 
999 	if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
1000 		if (cx23885_video_register(dev) < 0) {
1001 			pr_err("%s() Failed to register analog video adapters on VID_A\n",
1002 			       __func__);
1003 		}
1004 	}
1005 
1006 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1007 		if (cx23885_boards[dev->board].num_fds_portb)
1008 			dev->ts1.num_frontends =
1009 				cx23885_boards[dev->board].num_fds_portb;
1010 		if (cx23885_dvb_register(&dev->ts1) < 0) {
1011 			pr_err("%s() Failed to register dvb adapters on VID_B\n",
1012 			       __func__);
1013 		}
1014 	} else
1015 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1016 		if (cx23885_417_register(dev) < 0) {
1017 			pr_err("%s() Failed to register 417 on VID_B\n",
1018 			       __func__);
1019 		}
1020 	}
1021 
1022 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1023 		if (cx23885_boards[dev->board].num_fds_portc)
1024 			dev->ts2.num_frontends =
1025 				cx23885_boards[dev->board].num_fds_portc;
1026 		if (cx23885_dvb_register(&dev->ts2) < 0) {
1027 			pr_err("%s() Failed to register dvb on VID_C\n",
1028 			       __func__);
1029 		}
1030 	} else
1031 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1032 		if (cx23885_417_register(dev) < 0) {
1033 			pr_err("%s() Failed to register 417 on VID_C\n",
1034 			       __func__);
1035 		}
1036 	}
1037 
1038 	cx23885_dev_checkrevision(dev);
1039 
1040 	/* disable MSI for NetUP cards, otherwise CI is not working */
1041 	if (cx23885_boards[dev->board].ci_type > 0)
1042 		cx_clear(RDR_RDRCTL1, 1 << 8);
1043 
1044 	switch (dev->board) {
1045 	case CX23885_BOARD_TEVII_S470:
1046 	case CX23885_BOARD_TEVII_S471:
1047 		cx_clear(RDR_RDRCTL1, 1 << 8);
1048 		break;
1049 	}
1050 
1051 	return 0;
1052 }
1053 
1054 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1055 {
1056 	release_mem_region(pci_resource_start(dev->pci, 0),
1057 			   pci_resource_len(dev->pci, 0));
1058 
1059 	if (!atomic_dec_and_test(&dev->refcount))
1060 		return;
1061 
1062 	if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1063 		cx23885_video_unregister(dev);
1064 
1065 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1066 		cx23885_dvb_unregister(&dev->ts1);
1067 
1068 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1069 		cx23885_417_unregister(dev);
1070 
1071 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1072 		cx23885_dvb_unregister(&dev->ts2);
1073 
1074 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1075 		cx23885_417_unregister(dev);
1076 
1077 	cx23885_i2c_unregister(&dev->i2c_bus[2]);
1078 	cx23885_i2c_unregister(&dev->i2c_bus[1]);
1079 	cx23885_i2c_unregister(&dev->i2c_bus[0]);
1080 
1081 	iounmap(dev->lmmio);
1082 }
1083 
1084 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1085 			       unsigned int offset, u32 sync_line,
1086 			       unsigned int bpl, unsigned int padding,
1087 			       unsigned int lines,  unsigned int lpi, bool jump)
1088 {
1089 	struct scatterlist *sg;
1090 	unsigned int line, todo, sol;
1091 
1092 
1093 	if (jump) {
1094 		*(rp++) = cpu_to_le32(RISC_JUMP);
1095 		*(rp++) = cpu_to_le32(0);
1096 		*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1097 	}
1098 
1099 	/* sync instruction */
1100 	if (sync_line != NO_SYNC_LINE)
1101 		*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1102 
1103 	/* scan lines */
1104 	sg = sglist;
1105 	for (line = 0; line < lines; line++) {
1106 		while (offset && offset >= sg_dma_len(sg)) {
1107 			offset -= sg_dma_len(sg);
1108 			sg = sg_next(sg);
1109 		}
1110 
1111 		if (lpi && line > 0 && !(line % lpi))
1112 			sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1113 		else
1114 			sol = RISC_SOL;
1115 
1116 		if (bpl <= sg_dma_len(sg)-offset) {
1117 			/* fits into current chunk */
1118 			*(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
1119 			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1120 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1121 			offset += bpl;
1122 		} else {
1123 			/* scanline needs to be split */
1124 			todo = bpl;
1125 			*(rp++) = cpu_to_le32(RISC_WRITE|sol|
1126 					    (sg_dma_len(sg)-offset));
1127 			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1128 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1129 			todo -= (sg_dma_len(sg)-offset);
1130 			offset = 0;
1131 			sg = sg_next(sg);
1132 			while (todo > sg_dma_len(sg)) {
1133 				*(rp++) = cpu_to_le32(RISC_WRITE|
1134 						    sg_dma_len(sg));
1135 				*(rp++) = cpu_to_le32(sg_dma_address(sg));
1136 				*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1137 				todo -= sg_dma_len(sg);
1138 				sg = sg_next(sg);
1139 			}
1140 			*(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1141 			*(rp++) = cpu_to_le32(sg_dma_address(sg));
1142 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1143 			offset += todo;
1144 		}
1145 		offset += padding;
1146 	}
1147 
1148 	return rp;
1149 }
1150 
1151 int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1152 			struct scatterlist *sglist, unsigned int top_offset,
1153 			unsigned int bottom_offset, unsigned int bpl,
1154 			unsigned int padding, unsigned int lines)
1155 {
1156 	u32 instructions, fields;
1157 	__le32 *rp;
1158 
1159 	fields = 0;
1160 	if (UNSET != top_offset)
1161 		fields++;
1162 	if (UNSET != bottom_offset)
1163 		fields++;
1164 
1165 	/* estimate risc mem: worst case is one write per page border +
1166 	   one write per scan line + syncs + jump (all 2 dwords).  Padding
1167 	   can cause next bpl to start close to a page border.  First DMA
1168 	   region may be smaller than PAGE_SIZE */
1169 	/* write and jump need and extra dword */
1170 	instructions  = fields * (1 + ((bpl + padding) * lines)
1171 		/ PAGE_SIZE + lines);
1172 	instructions += 5;
1173 	risc->size = instructions * 12;
1174 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1175 	if (risc->cpu == NULL)
1176 		return -ENOMEM;
1177 
1178 	/* write risc instructions */
1179 	rp = risc->cpu;
1180 	if (UNSET != top_offset)
1181 		rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1182 					bpl, padding, lines, 0, true);
1183 	if (UNSET != bottom_offset)
1184 		rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1185 					bpl, padding, lines, 0, UNSET == top_offset);
1186 
1187 	/* save pointer to jmp instruction address */
1188 	risc->jmp = rp;
1189 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1190 	return 0;
1191 }
1192 
1193 int cx23885_risc_databuffer(struct pci_dev *pci,
1194 				   struct cx23885_riscmem *risc,
1195 				   struct scatterlist *sglist,
1196 				   unsigned int bpl,
1197 				   unsigned int lines, unsigned int lpi)
1198 {
1199 	u32 instructions;
1200 	__le32 *rp;
1201 
1202 	/* estimate risc mem: worst case is one write per page border +
1203 	   one write per scan line + syncs + jump (all 2 dwords).  Here
1204 	   there is no padding and no sync.  First DMA region may be smaller
1205 	   than PAGE_SIZE */
1206 	/* Jump and write need an extra dword */
1207 	instructions  = 1 + (bpl * lines) / PAGE_SIZE + lines;
1208 	instructions += 4;
1209 
1210 	risc->size = instructions * 12;
1211 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1212 	if (risc->cpu == NULL)
1213 		return -ENOMEM;
1214 
1215 	/* write risc instructions */
1216 	rp = risc->cpu;
1217 	rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
1218 				bpl, 0, lines, lpi, lpi == 0);
1219 
1220 	/* save pointer to jmp instruction address */
1221 	risc->jmp = rp;
1222 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1223 	return 0;
1224 }
1225 
1226 int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1227 			struct scatterlist *sglist, unsigned int top_offset,
1228 			unsigned int bottom_offset, unsigned int bpl,
1229 			unsigned int padding, unsigned int lines)
1230 {
1231 	u32 instructions, fields;
1232 	__le32 *rp;
1233 
1234 	fields = 0;
1235 	if (UNSET != top_offset)
1236 		fields++;
1237 	if (UNSET != bottom_offset)
1238 		fields++;
1239 
1240 	/* estimate risc mem: worst case is one write per page border +
1241 	   one write per scan line + syncs + jump (all 2 dwords).  Padding
1242 	   can cause next bpl to start close to a page border.  First DMA
1243 	   region may be smaller than PAGE_SIZE */
1244 	/* write and jump need and extra dword */
1245 	instructions  = fields * (1 + ((bpl + padding) * lines)
1246 		/ PAGE_SIZE + lines);
1247 	instructions += 5;
1248 	risc->size = instructions * 12;
1249 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1250 	if (risc->cpu == NULL)
1251 		return -ENOMEM;
1252 	/* write risc instructions */
1253 	rp = risc->cpu;
1254 
1255 	/* Sync to line 6, so US CC line 21 will appear in line '12'
1256 	 * in the userland vbi payload */
1257 	if (UNSET != top_offset)
1258 		rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1259 					bpl, padding, lines, 0, true);
1260 
1261 	if (UNSET != bottom_offset)
1262 		rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1263 					bpl, padding, lines, 0, UNSET == top_offset);
1264 
1265 
1266 
1267 	/* save pointer to jmp instruction address */
1268 	risc->jmp = rp;
1269 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1270 	return 0;
1271 }
1272 
1273 
1274 void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
1275 {
1276 	struct cx23885_riscmem *risc = &buf->risc;
1277 
1278 	BUG_ON(in_interrupt());
1279 	pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
1280 }
1281 
1282 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1283 {
1284 	struct cx23885_dev *dev = port->dev;
1285 
1286 	dprintk(1, "%s() Register Dump\n", __func__);
1287 	dprintk(1, "%s() DEV_CNTRL2               0x%08X\n", __func__,
1288 		cx_read(DEV_CNTRL2));
1289 	dprintk(1, "%s() PCI_INT_MSK              0x%08X\n", __func__,
1290 		cx23885_irq_get_mask(dev));
1291 	dprintk(1, "%s() AUD_INT_INT_MSK          0x%08X\n", __func__,
1292 		cx_read(AUDIO_INT_INT_MSK));
1293 	dprintk(1, "%s() AUD_INT_DMA_CTL          0x%08X\n", __func__,
1294 		cx_read(AUD_INT_DMA_CTL));
1295 	dprintk(1, "%s() AUD_EXT_INT_MSK          0x%08X\n", __func__,
1296 		cx_read(AUDIO_EXT_INT_MSK));
1297 	dprintk(1, "%s() AUD_EXT_DMA_CTL          0x%08X\n", __func__,
1298 		cx_read(AUD_EXT_DMA_CTL));
1299 	dprintk(1, "%s() PAD_CTRL                 0x%08X\n", __func__,
1300 		cx_read(PAD_CTRL));
1301 	dprintk(1, "%s() ALT_PIN_OUT_SEL          0x%08X\n", __func__,
1302 		cx_read(ALT_PIN_OUT_SEL));
1303 	dprintk(1, "%s() GPIO2                    0x%08X\n", __func__,
1304 		cx_read(GPIO2));
1305 	dprintk(1, "%s() gpcnt(0x%08X)          0x%08X\n", __func__,
1306 		port->reg_gpcnt, cx_read(port->reg_gpcnt));
1307 	dprintk(1, "%s() gpcnt_ctl(0x%08X)      0x%08x\n", __func__,
1308 		port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1309 	dprintk(1, "%s() dma_ctl(0x%08X)        0x%08x\n", __func__,
1310 		port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1311 	if (port->reg_src_sel)
1312 		dprintk(1, "%s() src_sel(0x%08X)        0x%08x\n", __func__,
1313 			port->reg_src_sel, cx_read(port->reg_src_sel));
1314 	dprintk(1, "%s() lngth(0x%08X)          0x%08x\n", __func__,
1315 		port->reg_lngth, cx_read(port->reg_lngth));
1316 	dprintk(1, "%s() hw_sop_ctrl(0x%08X)    0x%08x\n", __func__,
1317 		port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1318 	dprintk(1, "%s() gen_ctrl(0x%08X)       0x%08x\n", __func__,
1319 		port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1320 	dprintk(1, "%s() bd_pkt_status(0x%08X)  0x%08x\n", __func__,
1321 		port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1322 	dprintk(1, "%s() sop_status(0x%08X)     0x%08x\n", __func__,
1323 		port->reg_sop_status, cx_read(port->reg_sop_status));
1324 	dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1325 		port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1326 	dprintk(1, "%s() vld_misc(0x%08X)       0x%08x\n", __func__,
1327 		port->reg_vld_misc, cx_read(port->reg_vld_misc));
1328 	dprintk(1, "%s() ts_clk_en(0x%08X)      0x%08x\n", __func__,
1329 		port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1330 	dprintk(1, "%s() ts_int_msk(0x%08X)     0x%08x\n", __func__,
1331 		port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1332 }
1333 
1334 int cx23885_start_dma(struct cx23885_tsport *port,
1335 			     struct cx23885_dmaqueue *q,
1336 			     struct cx23885_buffer   *buf)
1337 {
1338 	struct cx23885_dev *dev = port->dev;
1339 	u32 reg;
1340 
1341 	dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1342 		dev->width, dev->height, dev->field);
1343 
1344 	/* Stop the fifo and risc engine for this port */
1345 	cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1346 
1347 	/* setup fifo + format */
1348 	cx23885_sram_channel_setup(dev,
1349 				   &dev->sram_channels[port->sram_chno],
1350 				   port->ts_packet_size, buf->risc.dma);
1351 	if (debug > 5) {
1352 		cx23885_sram_channel_dump(dev,
1353 			&dev->sram_channels[port->sram_chno]);
1354 		cx23885_risc_disasm(port, &buf->risc);
1355 	}
1356 
1357 	/* write TS length to chip */
1358 	cx_write(port->reg_lngth, port->ts_packet_size);
1359 
1360 	if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1361 		(!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1362 		pr_err("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1363 			__func__,
1364 			cx23885_boards[dev->board].portb,
1365 			cx23885_boards[dev->board].portc);
1366 		return -EINVAL;
1367 	}
1368 
1369 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1370 		cx23885_av_clk(dev, 0);
1371 
1372 	udelay(100);
1373 
1374 	/* If the port supports SRC SELECT, configure it */
1375 	if (port->reg_src_sel)
1376 		cx_write(port->reg_src_sel, port->src_sel_val);
1377 
1378 	cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1379 	cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1380 	cx_write(port->reg_vld_misc, port->vld_misc_val);
1381 	cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1382 	udelay(100);
1383 
1384 	/* NOTE: this is 2 (reserved) for portb, does it matter? */
1385 	/* reset counter to zero */
1386 	cx_write(port->reg_gpcnt_ctl, 3);
1387 	q->count = 0;
1388 
1389 	/* Set VIDB pins to input */
1390 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1391 		reg = cx_read(PAD_CTRL);
1392 		reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1393 		cx_write(PAD_CTRL, reg);
1394 	}
1395 
1396 	/* Set VIDC pins to input */
1397 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1398 		reg = cx_read(PAD_CTRL);
1399 		reg &= ~0x4; /* Clear TS2_SOP_OE */
1400 		cx_write(PAD_CTRL, reg);
1401 	}
1402 
1403 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1404 
1405 		reg = cx_read(PAD_CTRL);
1406 		reg = reg & ~0x1;    /* Clear TS1_OE */
1407 
1408 		/* FIXME, bit 2 writing here is questionable */
1409 		/* set TS1_SOP_OE and TS1_OE_HI */
1410 		reg = reg | 0xa;
1411 		cx_write(PAD_CTRL, reg);
1412 
1413 		/* FIXME and these two registers should be documented. */
1414 		cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1415 		cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1416 	}
1417 
1418 	switch (dev->bridge) {
1419 	case CX23885_BRIDGE_885:
1420 	case CX23885_BRIDGE_887:
1421 	case CX23885_BRIDGE_888:
1422 		/* enable irqs */
1423 		dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1424 		cx_set(port->reg_ts_int_msk,  port->ts_int_msk_val);
1425 		cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1426 		cx23885_irq_add(dev, port->pci_irqmask);
1427 		cx23885_irq_enable_all(dev);
1428 		break;
1429 	default:
1430 		BUG();
1431 	}
1432 
1433 	cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1434 
1435 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1436 		cx23885_av_clk(dev, 1);
1437 
1438 	if (debug > 4)
1439 		cx23885_tsport_reg_dump(port);
1440 
1441 	return 0;
1442 }
1443 
1444 static int cx23885_stop_dma(struct cx23885_tsport *port)
1445 {
1446 	struct cx23885_dev *dev = port->dev;
1447 	u32 reg;
1448 
1449 	dprintk(1, "%s()\n", __func__);
1450 
1451 	/* Stop interrupts and DMA */
1452 	cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1453 	cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1454 
1455 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1456 
1457 		reg = cx_read(PAD_CTRL);
1458 
1459 		/* Set TS1_OE */
1460 		reg = reg | 0x1;
1461 
1462 		/* clear TS1_SOP_OE and TS1_OE_HI */
1463 		reg = reg & ~0xa;
1464 		cx_write(PAD_CTRL, reg);
1465 		cx_write(port->reg_src_sel, 0);
1466 		cx_write(port->reg_gen_ctrl, 8);
1467 
1468 	}
1469 
1470 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1471 		cx23885_av_clk(dev, 0);
1472 
1473 	return 0;
1474 }
1475 
1476 /* ------------------------------------------------------------------ */
1477 
1478 int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
1479 {
1480 	struct cx23885_dev *dev = port->dev;
1481 	int size = port->ts_packet_size * port->ts_packet_count;
1482 	struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
1483 
1484 	dprintk(1, "%s: %p\n", __func__, buf);
1485 	if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
1486 		return -EINVAL;
1487 	vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1488 
1489 	cx23885_risc_databuffer(dev->pci, &buf->risc,
1490 				sgt->sgl,
1491 				port->ts_packet_size, port->ts_packet_count, 0);
1492 	return 0;
1493 }
1494 
1495 /*
1496  * The risc program for each buffer works as follows: it starts with a simple
1497  * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1498  * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1499  * the initial JUMP).
1500  *
1501  * This is the risc program of the first buffer to be queued if the active list
1502  * is empty and it just keeps DMAing this buffer without generating any
1503  * interrupts.
1504  *
1505  * If a new buffer is added then the initial JUMP in the code for that buffer
1506  * will generate an interrupt which signals that the previous buffer has been
1507  * DMAed successfully and that it can be returned to userspace.
1508  *
1509  * It also sets the final jump of the previous buffer to the start of the new
1510  * buffer, thus chaining the new buffer into the DMA chain. This is a single
1511  * atomic u32 write, so there is no race condition.
1512  *
1513  * The end-result of all this that you only get an interrupt when a buffer
1514  * is ready, so the control flow is very easy.
1515  */
1516 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1517 {
1518 	struct cx23885_buffer    *prev;
1519 	struct cx23885_dev *dev = port->dev;
1520 	struct cx23885_dmaqueue  *cx88q = &port->mpegq;
1521 	unsigned long flags;
1522 
1523 	buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1524 	buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1525 	buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
1526 	buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1527 
1528 	spin_lock_irqsave(&dev->slock, flags);
1529 	if (list_empty(&cx88q->active)) {
1530 		list_add_tail(&buf->queue, &cx88q->active);
1531 		dprintk(1, "[%p/%d] %s - first active\n",
1532 			buf, buf->vb.vb2_buf.index, __func__);
1533 	} else {
1534 		buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
1535 		prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1536 				  queue);
1537 		list_add_tail(&buf->queue, &cx88q->active);
1538 		prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1539 		dprintk(1, "[%p/%d] %s - append to active\n",
1540 			 buf, buf->vb.vb2_buf.index, __func__);
1541 	}
1542 	spin_unlock_irqrestore(&dev->slock, flags);
1543 }
1544 
1545 /* ----------------------------------------------------------- */
1546 
1547 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
1548 {
1549 	struct cx23885_dmaqueue *q = &port->mpegq;
1550 	struct cx23885_buffer *buf;
1551 	unsigned long flags;
1552 
1553 	spin_lock_irqsave(&port->slock, flags);
1554 	while (!list_empty(&q->active)) {
1555 		buf = list_entry(q->active.next, struct cx23885_buffer,
1556 				 queue);
1557 		list_del(&buf->queue);
1558 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1559 		dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1560 			buf, buf->vb.vb2_buf.index, reason,
1561 			(unsigned long)buf->risc.dma);
1562 	}
1563 	spin_unlock_irqrestore(&port->slock, flags);
1564 }
1565 
1566 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1567 {
1568 	dprintk(1, "%s()\n", __func__);
1569 	cx23885_stop_dma(port);
1570 	do_cancel_buffers(port, "cancel");
1571 }
1572 
1573 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1574 {
1575 	/* FIXME: port1 assumption here. */
1576 	struct cx23885_tsport *port = &dev->ts1;
1577 	int count = 0;
1578 	int handled = 0;
1579 
1580 	if (status == 0)
1581 		return handled;
1582 
1583 	count = cx_read(port->reg_gpcnt);
1584 	dprintk(7, "status: 0x%08x  mask: 0x%08x count: 0x%x\n",
1585 		status, cx_read(port->reg_ts_int_msk), count);
1586 
1587 	if ((status & VID_B_MSK_BAD_PKT)         ||
1588 		(status & VID_B_MSK_OPC_ERR)     ||
1589 		(status & VID_B_MSK_VBI_OPC_ERR) ||
1590 		(status & VID_B_MSK_SYNC)        ||
1591 		(status & VID_B_MSK_VBI_SYNC)    ||
1592 		(status & VID_B_MSK_OF)          ||
1593 		(status & VID_B_MSK_VBI_OF)) {
1594 		pr_err("%s: V4L mpeg risc op code error, status = 0x%x\n",
1595 		       dev->name, status);
1596 		if (status & VID_B_MSK_BAD_PKT)
1597 			dprintk(1, "        VID_B_MSK_BAD_PKT\n");
1598 		if (status & VID_B_MSK_OPC_ERR)
1599 			dprintk(1, "        VID_B_MSK_OPC_ERR\n");
1600 		if (status & VID_B_MSK_VBI_OPC_ERR)
1601 			dprintk(1, "        VID_B_MSK_VBI_OPC_ERR\n");
1602 		if (status & VID_B_MSK_SYNC)
1603 			dprintk(1, "        VID_B_MSK_SYNC\n");
1604 		if (status & VID_B_MSK_VBI_SYNC)
1605 			dprintk(1, "        VID_B_MSK_VBI_SYNC\n");
1606 		if (status & VID_B_MSK_OF)
1607 			dprintk(1, "        VID_B_MSK_OF\n");
1608 		if (status & VID_B_MSK_VBI_OF)
1609 			dprintk(1, "        VID_B_MSK_VBI_OF\n");
1610 
1611 		cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1612 		cx23885_sram_channel_dump(dev,
1613 			&dev->sram_channels[port->sram_chno]);
1614 		cx23885_417_check_encoder(dev);
1615 	} else if (status & VID_B_MSK_RISCI1) {
1616 		dprintk(7, "        VID_B_MSK_RISCI1\n");
1617 		spin_lock(&port->slock);
1618 		cx23885_wakeup(port, &port->mpegq, count);
1619 		spin_unlock(&port->slock);
1620 	}
1621 	if (status) {
1622 		cx_write(port->reg_ts_int_stat, status);
1623 		handled = 1;
1624 	}
1625 
1626 	return handled;
1627 }
1628 
1629 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1630 {
1631 	struct cx23885_dev *dev = port->dev;
1632 	int handled = 0;
1633 	u32 count;
1634 
1635 	if ((status & VID_BC_MSK_OPC_ERR) ||
1636 		(status & VID_BC_MSK_BAD_PKT) ||
1637 		(status & VID_BC_MSK_SYNC) ||
1638 		(status & VID_BC_MSK_OF)) {
1639 
1640 		if (status & VID_BC_MSK_OPC_ERR)
1641 			dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1642 				VID_BC_MSK_OPC_ERR);
1643 
1644 		if (status & VID_BC_MSK_BAD_PKT)
1645 			dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1646 				VID_BC_MSK_BAD_PKT);
1647 
1648 		if (status & VID_BC_MSK_SYNC)
1649 			dprintk(7, " (VID_BC_MSK_SYNC    0x%08x)\n",
1650 				VID_BC_MSK_SYNC);
1651 
1652 		if (status & VID_BC_MSK_OF)
1653 			dprintk(7, " (VID_BC_MSK_OF      0x%08x)\n",
1654 				VID_BC_MSK_OF);
1655 
1656 		pr_err("%s: mpeg risc op code error\n", dev->name);
1657 
1658 		cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1659 		cx23885_sram_channel_dump(dev,
1660 			&dev->sram_channels[port->sram_chno]);
1661 
1662 	} else if (status & VID_BC_MSK_RISCI1) {
1663 
1664 		dprintk(7, " (RISCI1            0x%08x)\n", VID_BC_MSK_RISCI1);
1665 
1666 		spin_lock(&port->slock);
1667 		count = cx_read(port->reg_gpcnt);
1668 		cx23885_wakeup(port, &port->mpegq, count);
1669 		spin_unlock(&port->slock);
1670 
1671 	}
1672 	if (status) {
1673 		cx_write(port->reg_ts_int_stat, status);
1674 		handled = 1;
1675 	}
1676 
1677 	return handled;
1678 }
1679 
1680 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1681 {
1682 	struct cx23885_dev *dev = dev_id;
1683 	struct cx23885_tsport *ts1 = &dev->ts1;
1684 	struct cx23885_tsport *ts2 = &dev->ts2;
1685 	u32 pci_status, pci_mask;
1686 	u32 vida_status, vida_mask;
1687 	u32 audint_status, audint_mask;
1688 	u32 ts1_status, ts1_mask;
1689 	u32 ts2_status, ts2_mask;
1690 	int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1691 	int audint_count = 0;
1692 	bool subdev_handled;
1693 
1694 	pci_status = cx_read(PCI_INT_STAT);
1695 	pci_mask = cx23885_irq_get_mask(dev);
1696 	vida_status = cx_read(VID_A_INT_STAT);
1697 	vida_mask = cx_read(VID_A_INT_MSK);
1698 	audint_status = cx_read(AUDIO_INT_INT_STAT);
1699 	audint_mask = cx_read(AUDIO_INT_INT_MSK);
1700 	ts1_status = cx_read(VID_B_INT_STAT);
1701 	ts1_mask = cx_read(VID_B_INT_MSK);
1702 	ts2_status = cx_read(VID_C_INT_STAT);
1703 	ts2_mask = cx_read(VID_C_INT_MSK);
1704 
1705 	if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1706 		goto out;
1707 
1708 	vida_count = cx_read(VID_A_GPCNT);
1709 	audint_count = cx_read(AUD_INT_A_GPCNT);
1710 	ts1_count = cx_read(ts1->reg_gpcnt);
1711 	ts2_count = cx_read(ts2->reg_gpcnt);
1712 	dprintk(7, "pci_status: 0x%08x  pci_mask: 0x%08x\n",
1713 		pci_status, pci_mask);
1714 	dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1715 		vida_status, vida_mask, vida_count);
1716 	dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1717 		audint_status, audint_mask, audint_count);
1718 	dprintk(7, "ts1_status: 0x%08x  ts1_mask: 0x%08x count: 0x%x\n",
1719 		ts1_status, ts1_mask, ts1_count);
1720 	dprintk(7, "ts2_status: 0x%08x  ts2_mask: 0x%08x count: 0x%x\n",
1721 		ts2_status, ts2_mask, ts2_count);
1722 
1723 	if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1724 			  PCI_MSK_AL_RD   | PCI_MSK_AL_WR   | PCI_MSK_APB_DMA |
1725 			  PCI_MSK_VID_C   | PCI_MSK_VID_B   | PCI_MSK_VID_A   |
1726 			  PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1727 			  PCI_MSK_GPIO0   | PCI_MSK_GPIO1   |
1728 			  PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1729 
1730 		if (pci_status & PCI_MSK_RISC_RD)
1731 			dprintk(7, " (PCI_MSK_RISC_RD   0x%08x)\n",
1732 				PCI_MSK_RISC_RD);
1733 
1734 		if (pci_status & PCI_MSK_RISC_WR)
1735 			dprintk(7, " (PCI_MSK_RISC_WR   0x%08x)\n",
1736 				PCI_MSK_RISC_WR);
1737 
1738 		if (pci_status & PCI_MSK_AL_RD)
1739 			dprintk(7, " (PCI_MSK_AL_RD     0x%08x)\n",
1740 				PCI_MSK_AL_RD);
1741 
1742 		if (pci_status & PCI_MSK_AL_WR)
1743 			dprintk(7, " (PCI_MSK_AL_WR     0x%08x)\n",
1744 				PCI_MSK_AL_WR);
1745 
1746 		if (pci_status & PCI_MSK_APB_DMA)
1747 			dprintk(7, " (PCI_MSK_APB_DMA   0x%08x)\n",
1748 				PCI_MSK_APB_DMA);
1749 
1750 		if (pci_status & PCI_MSK_VID_C)
1751 			dprintk(7, " (PCI_MSK_VID_C     0x%08x)\n",
1752 				PCI_MSK_VID_C);
1753 
1754 		if (pci_status & PCI_MSK_VID_B)
1755 			dprintk(7, " (PCI_MSK_VID_B     0x%08x)\n",
1756 				PCI_MSK_VID_B);
1757 
1758 		if (pci_status & PCI_MSK_VID_A)
1759 			dprintk(7, " (PCI_MSK_VID_A     0x%08x)\n",
1760 				PCI_MSK_VID_A);
1761 
1762 		if (pci_status & PCI_MSK_AUD_INT)
1763 			dprintk(7, " (PCI_MSK_AUD_INT   0x%08x)\n",
1764 				PCI_MSK_AUD_INT);
1765 
1766 		if (pci_status & PCI_MSK_AUD_EXT)
1767 			dprintk(7, " (PCI_MSK_AUD_EXT   0x%08x)\n",
1768 				PCI_MSK_AUD_EXT);
1769 
1770 		if (pci_status & PCI_MSK_GPIO0)
1771 			dprintk(7, " (PCI_MSK_GPIO0     0x%08x)\n",
1772 				PCI_MSK_GPIO0);
1773 
1774 		if (pci_status & PCI_MSK_GPIO1)
1775 			dprintk(7, " (PCI_MSK_GPIO1     0x%08x)\n",
1776 				PCI_MSK_GPIO1);
1777 
1778 		if (pci_status & PCI_MSK_AV_CORE)
1779 			dprintk(7, " (PCI_MSK_AV_CORE   0x%08x)\n",
1780 				PCI_MSK_AV_CORE);
1781 
1782 		if (pci_status & PCI_MSK_IR)
1783 			dprintk(7, " (PCI_MSK_IR        0x%08x)\n",
1784 				PCI_MSK_IR);
1785 	}
1786 
1787 	if (cx23885_boards[dev->board].ci_type == 1 &&
1788 			(pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1789 		handled += netup_ci_slot_status(dev, pci_status);
1790 
1791 	if (cx23885_boards[dev->board].ci_type == 2 &&
1792 			(pci_status & PCI_MSK_GPIO0))
1793 		handled += altera_ci_irq(dev);
1794 
1795 	if (ts1_status) {
1796 		if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1797 			handled += cx23885_irq_ts(ts1, ts1_status);
1798 		else
1799 		if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1800 			handled += cx23885_irq_417(dev, ts1_status);
1801 	}
1802 
1803 	if (ts2_status) {
1804 		if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1805 			handled += cx23885_irq_ts(ts2, ts2_status);
1806 		else
1807 		if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1808 			handled += cx23885_irq_417(dev, ts2_status);
1809 	}
1810 
1811 	if (vida_status)
1812 		handled += cx23885_video_irq(dev, vida_status);
1813 
1814 	if (audint_status)
1815 		handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1816 
1817 	if (pci_status & PCI_MSK_IR) {
1818 		subdev_handled = false;
1819 		v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1820 				 pci_status, &subdev_handled);
1821 		if (subdev_handled)
1822 			handled++;
1823 	}
1824 
1825 	if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1826 		cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1827 		schedule_work(&dev->cx25840_work);
1828 		handled++;
1829 	}
1830 
1831 	if (handled)
1832 		cx_write(PCI_INT_STAT, pci_status);
1833 out:
1834 	return IRQ_RETVAL(handled);
1835 }
1836 
1837 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1838 				    unsigned int notification, void *arg)
1839 {
1840 	struct cx23885_dev *dev;
1841 
1842 	if (sd == NULL)
1843 		return;
1844 
1845 	dev = to_cx23885(sd->v4l2_dev);
1846 
1847 	switch (notification) {
1848 	case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1849 		if (sd == dev->sd_ir)
1850 			cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1851 		break;
1852 	case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1853 		if (sd == dev->sd_ir)
1854 			cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1855 		break;
1856 	}
1857 }
1858 
1859 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1860 {
1861 	INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1862 	INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1863 	INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1864 	dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1865 }
1866 
1867 static inline int encoder_on_portb(struct cx23885_dev *dev)
1868 {
1869 	return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1870 }
1871 
1872 static inline int encoder_on_portc(struct cx23885_dev *dev)
1873 {
1874 	return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1875 }
1876 
1877 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1878  * registers depending on the board configuration (and whether the
1879  * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1880  * be pushed into the correct hardware register, regardless of the
1881  * physical location. Certain registers are shared so we sanity check
1882  * and report errors if we think we're tampering with a GPIo that might
1883  * be assigned to the encoder (and used for the host bus).
1884  *
1885  * GPIO  2 thru  0 - On the cx23885 bridge
1886  * GPIO 18 thru  3 - On the cx23417 host bus interface
1887  * GPIO 23 thru 19 - On the cx25840 a/v core
1888  */
1889 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1890 {
1891 	if (mask & 0x7)
1892 		cx_set(GP0_IO, mask & 0x7);
1893 
1894 	if (mask & 0x0007fff8) {
1895 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1896 			pr_err("%s: Setting GPIO on encoder ports\n",
1897 				dev->name);
1898 		cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1899 	}
1900 
1901 	/* TODO: 23-19 */
1902 	if (mask & 0x00f80000)
1903 		pr_info("%s: Unsupported\n", dev->name);
1904 }
1905 
1906 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1907 {
1908 	if (mask & 0x00000007)
1909 		cx_clear(GP0_IO, mask & 0x7);
1910 
1911 	if (mask & 0x0007fff8) {
1912 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1913 			pr_err("%s: Clearing GPIO moving on encoder ports\n",
1914 				dev->name);
1915 		cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1916 	}
1917 
1918 	/* TODO: 23-19 */
1919 	if (mask & 0x00f80000)
1920 		pr_info("%s: Unsupported\n", dev->name);
1921 }
1922 
1923 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1924 {
1925 	if (mask & 0x00000007)
1926 		return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1927 
1928 	if (mask & 0x0007fff8) {
1929 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1930 			pr_err("%s: Reading GPIO moving on encoder ports\n",
1931 				dev->name);
1932 		return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1933 	}
1934 
1935 	/* TODO: 23-19 */
1936 	if (mask & 0x00f80000)
1937 		pr_info("%s: Unsupported\n", dev->name);
1938 
1939 	return 0;
1940 }
1941 
1942 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1943 {
1944 	if ((mask & 0x00000007) && asoutput)
1945 		cx_set(GP0_IO, (mask & 0x7) << 16);
1946 	else if ((mask & 0x00000007) && !asoutput)
1947 		cx_clear(GP0_IO, (mask & 0x7) << 16);
1948 
1949 	if (mask & 0x0007fff8) {
1950 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1951 			pr_err("%s: Enabling GPIO on encoder ports\n",
1952 				dev->name);
1953 	}
1954 
1955 	/* MC417_OEN is active low for output, write 1 for an input */
1956 	if ((mask & 0x0007fff8) && asoutput)
1957 		cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
1958 
1959 	else if ((mask & 0x0007fff8) && !asoutput)
1960 		cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
1961 
1962 	/* TODO: 23-19 */
1963 }
1964 
1965 static int cx23885_initdev(struct pci_dev *pci_dev,
1966 			   const struct pci_device_id *pci_id)
1967 {
1968 	struct cx23885_dev *dev;
1969 	struct v4l2_ctrl_handler *hdl;
1970 	int err;
1971 
1972 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1973 	if (NULL == dev)
1974 		return -ENOMEM;
1975 
1976 	err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
1977 	if (err < 0)
1978 		goto fail_free;
1979 
1980 	hdl = &dev->ctrl_handler;
1981 	v4l2_ctrl_handler_init(hdl, 6);
1982 	if (hdl->error) {
1983 		err = hdl->error;
1984 		goto fail_ctrl;
1985 	}
1986 	dev->v4l2_dev.ctrl_handler = hdl;
1987 
1988 	/* Prepare to handle notifications from subdevices */
1989 	cx23885_v4l2_dev_notify_init(dev);
1990 
1991 	/* pci init */
1992 	dev->pci = pci_dev;
1993 	if (pci_enable_device(pci_dev)) {
1994 		err = -EIO;
1995 		goto fail_ctrl;
1996 	}
1997 
1998 	if (cx23885_dev_setup(dev) < 0) {
1999 		err = -EINVAL;
2000 		goto fail_ctrl;
2001 	}
2002 
2003 	/* print pci info */
2004 	dev->pci_rev = pci_dev->revision;
2005 	pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER,  &dev->pci_lat);
2006 	pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
2007 	       dev->name,
2008 	       pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
2009 	       dev->pci_lat,
2010 		(unsigned long long)pci_resource_start(pci_dev, 0));
2011 
2012 	pci_set_master(pci_dev);
2013 	err = pci_set_dma_mask(pci_dev, 0xffffffff);
2014 	if (err) {
2015 		pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2016 		goto fail_ctrl;
2017 	}
2018 
2019 	err = request_irq(pci_dev->irq, cx23885_irq,
2020 			  IRQF_SHARED, dev->name, dev);
2021 	if (err < 0) {
2022 		pr_err("%s: can't get IRQ %d\n",
2023 		       dev->name, pci_dev->irq);
2024 		goto fail_irq;
2025 	}
2026 
2027 	switch (dev->board) {
2028 	case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2029 		cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2030 		break;
2031 	case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2032 		cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2033 		break;
2034 	}
2035 
2036 	/*
2037 	 * The CX2388[58] IR controller can start firing interrupts when
2038 	 * enabled, so these have to take place after the cx23885_irq() handler
2039 	 * is hooked up by the call to request_irq() above.
2040 	 */
2041 	cx23885_ir_pci_int_enable(dev);
2042 	cx23885_input_init(dev);
2043 
2044 	return 0;
2045 
2046 fail_irq:
2047 	cx23885_dev_unregister(dev);
2048 fail_ctrl:
2049 	v4l2_ctrl_handler_free(hdl);
2050 	v4l2_device_unregister(&dev->v4l2_dev);
2051 fail_free:
2052 	kfree(dev);
2053 	return err;
2054 }
2055 
2056 static void cx23885_finidev(struct pci_dev *pci_dev)
2057 {
2058 	struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2059 	struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2060 
2061 	cx23885_input_fini(dev);
2062 	cx23885_ir_fini(dev);
2063 
2064 	cx23885_shutdown(dev);
2065 
2066 	/* unregister stuff */
2067 	free_irq(pci_dev->irq, dev);
2068 
2069 	pci_disable_device(pci_dev);
2070 
2071 	cx23885_dev_unregister(dev);
2072 	v4l2_ctrl_handler_free(&dev->ctrl_handler);
2073 	v4l2_device_unregister(v4l2_dev);
2074 	kfree(dev);
2075 }
2076 
2077 static const struct pci_device_id cx23885_pci_tbl[] = {
2078 	{
2079 		/* CX23885 */
2080 		.vendor       = 0x14f1,
2081 		.device       = 0x8852,
2082 		.subvendor    = PCI_ANY_ID,
2083 		.subdevice    = PCI_ANY_ID,
2084 	}, {
2085 		/* CX23887 Rev 2 */
2086 		.vendor       = 0x14f1,
2087 		.device       = 0x8880,
2088 		.subvendor    = PCI_ANY_ID,
2089 		.subdevice    = PCI_ANY_ID,
2090 	}, {
2091 		/* --- end of list --- */
2092 	}
2093 };
2094 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2095 
2096 static struct pci_driver cx23885_pci_driver = {
2097 	.name     = "cx23885",
2098 	.id_table = cx23885_pci_tbl,
2099 	.probe    = cx23885_initdev,
2100 	.remove   = cx23885_finidev,
2101 	/* TODO */
2102 	.suspend  = NULL,
2103 	.resume   = NULL,
2104 };
2105 
2106 static int __init cx23885_init(void)
2107 {
2108 	pr_info("cx23885 driver version %s loaded\n",
2109 		CX23885_VERSION);
2110 	return pci_register_driver(&cx23885_pci_driver);
2111 }
2112 
2113 static void __exit cx23885_fini(void)
2114 {
2115 	pci_unregister_driver(&cx23885_pci_driver);
2116 }
2117 
2118 module_init(cx23885_init);
2119 module_exit(cx23885_fini);
2120