1 /*
2  *  Driver for the Conexant CX23885 PCIe bridge
3  *
4  *  Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *
15  *  GNU General Public License for more details.
16  */
17 
18 #include "cx23885.h"
19 
20 #include <linux/init.h>
21 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/kmod.h>
25 #include <linux/kernel.h>
26 #include <linux/pci.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <asm/div64.h>
31 #include <linux/firmware.h>
32 
33 #include "cimax2.h"
34 #include "altera-ci.h"
35 #include "cx23888-ir.h"
36 #include "cx23885-ir.h"
37 #include "cx23885-av.h"
38 #include "cx23885-input.h"
39 
40 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
41 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
42 MODULE_LICENSE("GPL");
43 MODULE_VERSION(CX23885_VERSION);
44 
45 /*
46  * Some platforms have been found to require periodic resetting of the DMA
47  * engine. Ryzen and XEON platforms are known to be affected. The symptom
48  * encountered is "mpeg risc op code error". Only Ryzen platforms employ
49  * this workaround if the option equals 1. The workaround can be explicitly
50  * disabled for all platforms by setting to 0, the workaround can be forced
51  * on for any platform by setting to 2.
52  */
53 static unsigned int dma_reset_workaround = 1;
54 module_param(dma_reset_workaround, int, 0644);
55 MODULE_PARM_DESC(dma_reset_workaround, "periodic RiSC dma engine reset; 0-force disable, 1-driver detect (default), 2-force enable");
56 
57 static unsigned int debug;
58 module_param(debug, int, 0644);
59 MODULE_PARM_DESC(debug, "enable debug messages");
60 
61 static unsigned int card[]  = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
62 module_param_array(card,  int, NULL, 0444);
63 MODULE_PARM_DESC(card, "card type");
64 
65 #define dprintk(level, fmt, arg...)\
66 	do { if (debug >= level)\
67 		printk(KERN_DEBUG pr_fmt("%s: " fmt), \
68 		       __func__, ##arg); \
69 	} while (0)
70 
71 static unsigned int cx23885_devcount;
72 
73 #define NO_SYNC_LINE (-1U)
74 
75 /* FIXME, these allocations will change when
76  * analog arrives. The be reviewed.
77  * CX23887 Assumptions
78  * 1 line = 16 bytes of CDT
79  * cmds size = 80
80  * cdt size = 16 * linesize
81  * iqsize = 64
82  * maxlines = 6
83  *
84  * Address Space:
85  * 0x00000000 0x00008fff FIFO clusters
86  * 0x00010000 0x000104af Channel Management Data Structures
87  * 0x000104b0 0x000104ff Free
88  * 0x00010500 0x000108bf 15 channels * iqsize
89  * 0x000108c0 0x000108ff Free
90  * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
91  *                       15 channels * (iqsize + (maxlines * linesize))
92  * 0x00010ea0 0x00010xxx Free
93  */
94 
95 static struct sram_channel cx23885_sram_channels[] = {
96 	[SRAM_CH01] = {
97 		.name		= "VID A",
98 		.cmds_start	= 0x10000,
99 		.ctrl_start	= 0x10380,
100 		.cdt		= 0x104c0,
101 		.fifo_start	= 0x40,
102 		.fifo_size	= 0x2800,
103 		.ptr1_reg	= DMA1_PTR1,
104 		.ptr2_reg	= DMA1_PTR2,
105 		.cnt1_reg	= DMA1_CNT1,
106 		.cnt2_reg	= DMA1_CNT2,
107 	},
108 	[SRAM_CH02] = {
109 		.name		= "ch2",
110 		.cmds_start	= 0x0,
111 		.ctrl_start	= 0x0,
112 		.cdt		= 0x0,
113 		.fifo_start	= 0x0,
114 		.fifo_size	= 0x0,
115 		.ptr1_reg	= DMA2_PTR1,
116 		.ptr2_reg	= DMA2_PTR2,
117 		.cnt1_reg	= DMA2_CNT1,
118 		.cnt2_reg	= DMA2_CNT2,
119 	},
120 	[SRAM_CH03] = {
121 		.name		= "TS1 B",
122 		.cmds_start	= 0x100A0,
123 		.ctrl_start	= 0x10400,
124 		.cdt		= 0x10580,
125 		.fifo_start	= 0x5000,
126 		.fifo_size	= 0x1000,
127 		.ptr1_reg	= DMA3_PTR1,
128 		.ptr2_reg	= DMA3_PTR2,
129 		.cnt1_reg	= DMA3_CNT1,
130 		.cnt2_reg	= DMA3_CNT2,
131 	},
132 	[SRAM_CH04] = {
133 		.name		= "ch4",
134 		.cmds_start	= 0x0,
135 		.ctrl_start	= 0x0,
136 		.cdt		= 0x0,
137 		.fifo_start	= 0x0,
138 		.fifo_size	= 0x0,
139 		.ptr1_reg	= DMA4_PTR1,
140 		.ptr2_reg	= DMA4_PTR2,
141 		.cnt1_reg	= DMA4_CNT1,
142 		.cnt2_reg	= DMA4_CNT2,
143 	},
144 	[SRAM_CH05] = {
145 		.name		= "ch5",
146 		.cmds_start	= 0x0,
147 		.ctrl_start	= 0x0,
148 		.cdt		= 0x0,
149 		.fifo_start	= 0x0,
150 		.fifo_size	= 0x0,
151 		.ptr1_reg	= DMA5_PTR1,
152 		.ptr2_reg	= DMA5_PTR2,
153 		.cnt1_reg	= DMA5_CNT1,
154 		.cnt2_reg	= DMA5_CNT2,
155 	},
156 	[SRAM_CH06] = {
157 		.name		= "TS2 C",
158 		.cmds_start	= 0x10140,
159 		.ctrl_start	= 0x10440,
160 		.cdt		= 0x105e0,
161 		.fifo_start	= 0x6000,
162 		.fifo_size	= 0x1000,
163 		.ptr1_reg	= DMA5_PTR1,
164 		.ptr2_reg	= DMA5_PTR2,
165 		.cnt1_reg	= DMA5_CNT1,
166 		.cnt2_reg	= DMA5_CNT2,
167 	},
168 	[SRAM_CH07] = {
169 		.name		= "TV Audio",
170 		.cmds_start	= 0x10190,
171 		.ctrl_start	= 0x10480,
172 		.cdt		= 0x10a00,
173 		.fifo_start	= 0x7000,
174 		.fifo_size	= 0x1000,
175 		.ptr1_reg	= DMA6_PTR1,
176 		.ptr2_reg	= DMA6_PTR2,
177 		.cnt1_reg	= DMA6_CNT1,
178 		.cnt2_reg	= DMA6_CNT2,
179 	},
180 	[SRAM_CH08] = {
181 		.name		= "ch8",
182 		.cmds_start	= 0x0,
183 		.ctrl_start	= 0x0,
184 		.cdt		= 0x0,
185 		.fifo_start	= 0x0,
186 		.fifo_size	= 0x0,
187 		.ptr1_reg	= DMA7_PTR1,
188 		.ptr2_reg	= DMA7_PTR2,
189 		.cnt1_reg	= DMA7_CNT1,
190 		.cnt2_reg	= DMA7_CNT2,
191 	},
192 	[SRAM_CH09] = {
193 		.name		= "ch9",
194 		.cmds_start	= 0x0,
195 		.ctrl_start	= 0x0,
196 		.cdt		= 0x0,
197 		.fifo_start	= 0x0,
198 		.fifo_size	= 0x0,
199 		.ptr1_reg	= DMA8_PTR1,
200 		.ptr2_reg	= DMA8_PTR2,
201 		.cnt1_reg	= DMA8_CNT1,
202 		.cnt2_reg	= DMA8_CNT2,
203 	},
204 };
205 
206 static struct sram_channel cx23887_sram_channels[] = {
207 	[SRAM_CH01] = {
208 		.name		= "VID A",
209 		.cmds_start	= 0x10000,
210 		.ctrl_start	= 0x105b0,
211 		.cdt		= 0x107b0,
212 		.fifo_start	= 0x40,
213 		.fifo_size	= 0x2800,
214 		.ptr1_reg	= DMA1_PTR1,
215 		.ptr2_reg	= DMA1_PTR2,
216 		.cnt1_reg	= DMA1_CNT1,
217 		.cnt2_reg	= DMA1_CNT2,
218 	},
219 	[SRAM_CH02] = {
220 		.name		= "VID A (VBI)",
221 		.cmds_start	= 0x10050,
222 		.ctrl_start	= 0x105F0,
223 		.cdt		= 0x10810,
224 		.fifo_start	= 0x3000,
225 		.fifo_size	= 0x1000,
226 		.ptr1_reg	= DMA2_PTR1,
227 		.ptr2_reg	= DMA2_PTR2,
228 		.cnt1_reg	= DMA2_CNT1,
229 		.cnt2_reg	= DMA2_CNT2,
230 	},
231 	[SRAM_CH03] = {
232 		.name		= "TS1 B",
233 		.cmds_start	= 0x100A0,
234 		.ctrl_start	= 0x10630,
235 		.cdt		= 0x10870,
236 		.fifo_start	= 0x5000,
237 		.fifo_size	= 0x1000,
238 		.ptr1_reg	= DMA3_PTR1,
239 		.ptr2_reg	= DMA3_PTR2,
240 		.cnt1_reg	= DMA3_CNT1,
241 		.cnt2_reg	= DMA3_CNT2,
242 	},
243 	[SRAM_CH04] = {
244 		.name		= "ch4",
245 		.cmds_start	= 0x0,
246 		.ctrl_start	= 0x0,
247 		.cdt		= 0x0,
248 		.fifo_start	= 0x0,
249 		.fifo_size	= 0x0,
250 		.ptr1_reg	= DMA4_PTR1,
251 		.ptr2_reg	= DMA4_PTR2,
252 		.cnt1_reg	= DMA4_CNT1,
253 		.cnt2_reg	= DMA4_CNT2,
254 	},
255 	[SRAM_CH05] = {
256 		.name		= "ch5",
257 		.cmds_start	= 0x0,
258 		.ctrl_start	= 0x0,
259 		.cdt		= 0x0,
260 		.fifo_start	= 0x0,
261 		.fifo_size	= 0x0,
262 		.ptr1_reg	= DMA5_PTR1,
263 		.ptr2_reg	= DMA5_PTR2,
264 		.cnt1_reg	= DMA5_CNT1,
265 		.cnt2_reg	= DMA5_CNT2,
266 	},
267 	[SRAM_CH06] = {
268 		.name		= "TS2 C",
269 		.cmds_start	= 0x10140,
270 		.ctrl_start	= 0x10670,
271 		.cdt		= 0x108d0,
272 		.fifo_start	= 0x6000,
273 		.fifo_size	= 0x1000,
274 		.ptr1_reg	= DMA5_PTR1,
275 		.ptr2_reg	= DMA5_PTR2,
276 		.cnt1_reg	= DMA5_CNT1,
277 		.cnt2_reg	= DMA5_CNT2,
278 	},
279 	[SRAM_CH07] = {
280 		.name		= "TV Audio",
281 		.cmds_start	= 0x10190,
282 		.ctrl_start	= 0x106B0,
283 		.cdt		= 0x10930,
284 		.fifo_start	= 0x7000,
285 		.fifo_size	= 0x1000,
286 		.ptr1_reg	= DMA6_PTR1,
287 		.ptr2_reg	= DMA6_PTR2,
288 		.cnt1_reg	= DMA6_CNT1,
289 		.cnt2_reg	= DMA6_CNT2,
290 	},
291 	[SRAM_CH08] = {
292 		.name		= "ch8",
293 		.cmds_start	= 0x0,
294 		.ctrl_start	= 0x0,
295 		.cdt		= 0x0,
296 		.fifo_start	= 0x0,
297 		.fifo_size	= 0x0,
298 		.ptr1_reg	= DMA7_PTR1,
299 		.ptr2_reg	= DMA7_PTR2,
300 		.cnt1_reg	= DMA7_CNT1,
301 		.cnt2_reg	= DMA7_CNT2,
302 	},
303 	[SRAM_CH09] = {
304 		.name		= "ch9",
305 		.cmds_start	= 0x0,
306 		.ctrl_start	= 0x0,
307 		.cdt		= 0x0,
308 		.fifo_start	= 0x0,
309 		.fifo_size	= 0x0,
310 		.ptr1_reg	= DMA8_PTR1,
311 		.ptr2_reg	= DMA8_PTR2,
312 		.cnt1_reg	= DMA8_CNT1,
313 		.cnt2_reg	= DMA8_CNT2,
314 	},
315 };
316 
317 static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
318 {
319 	unsigned long flags;
320 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
321 
322 	dev->pci_irqmask |= mask;
323 
324 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
325 }
326 
327 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
328 {
329 	unsigned long flags;
330 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
331 
332 	dev->pci_irqmask |= mask;
333 	cx_set(PCI_INT_MSK, mask);
334 
335 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
336 }
337 
338 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
339 {
340 	u32 v;
341 	unsigned long flags;
342 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
343 
344 	v = mask & dev->pci_irqmask;
345 	if (v)
346 		cx_set(PCI_INT_MSK, v);
347 
348 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
349 }
350 
351 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
352 {
353 	cx23885_irq_enable(dev, 0xffffffff);
354 }
355 
356 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
357 {
358 	unsigned long flags;
359 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
360 
361 	cx_clear(PCI_INT_MSK, mask);
362 
363 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
364 }
365 
366 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
367 {
368 	cx23885_irq_disable(dev, 0xffffffff);
369 }
370 
371 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
372 {
373 	unsigned long flags;
374 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
375 
376 	dev->pci_irqmask &= ~mask;
377 	cx_clear(PCI_INT_MSK, mask);
378 
379 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
380 }
381 
382 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
383 {
384 	u32 v;
385 	unsigned long flags;
386 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
387 
388 	v = cx_read(PCI_INT_MSK);
389 
390 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
391 	return v;
392 }
393 
394 static int cx23885_risc_decode(u32 risc)
395 {
396 	static char *instr[16] = {
397 		[RISC_SYNC    >> 28] = "sync",
398 		[RISC_WRITE   >> 28] = "write",
399 		[RISC_WRITEC  >> 28] = "writec",
400 		[RISC_READ    >> 28] = "read",
401 		[RISC_READC   >> 28] = "readc",
402 		[RISC_JUMP    >> 28] = "jump",
403 		[RISC_SKIP    >> 28] = "skip",
404 		[RISC_WRITERM >> 28] = "writerm",
405 		[RISC_WRITECM >> 28] = "writecm",
406 		[RISC_WRITECR >> 28] = "writecr",
407 	};
408 	static int incr[16] = {
409 		[RISC_WRITE   >> 28] = 3,
410 		[RISC_JUMP    >> 28] = 3,
411 		[RISC_SKIP    >> 28] = 1,
412 		[RISC_SYNC    >> 28] = 1,
413 		[RISC_WRITERM >> 28] = 3,
414 		[RISC_WRITECM >> 28] = 3,
415 		[RISC_WRITECR >> 28] = 4,
416 	};
417 	static char *bits[] = {
418 		"12",   "13",   "14",   "resync",
419 		"cnt0", "cnt1", "18",   "19",
420 		"20",   "21",   "22",   "23",
421 		"irq1", "irq2", "eol",  "sol",
422 	};
423 	int i;
424 
425 	printk(KERN_DEBUG "0x%08x [ %s", risc,
426 	       instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
427 	for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
428 		if (risc & (1 << (i + 12)))
429 			pr_cont(" %s", bits[i]);
430 	pr_cont(" count=%d ]\n", risc & 0xfff);
431 	return incr[risc >> 28] ? incr[risc >> 28] : 1;
432 }
433 
434 static void cx23885_wakeup(struct cx23885_tsport *port,
435 			   struct cx23885_dmaqueue *q, u32 count)
436 {
437 	struct cx23885_buffer *buf;
438 	int count_delta;
439 	int max_buf_done = 5; /* service maximum five buffers */
440 
441 	do {
442 		if (list_empty(&q->active))
443 			return;
444 		buf = list_entry(q->active.next,
445 				 struct cx23885_buffer, queue);
446 
447 		buf->vb.vb2_buf.timestamp = ktime_get_ns();
448 		buf->vb.sequence = q->count++;
449 		if (count != (q->count % 65536)) {
450 			dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
451 				buf->vb.vb2_buf.index, count, q->count);
452 		} else {
453 			dprintk(7, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
454 				buf->vb.vb2_buf.index, count, q->count);
455 		}
456 		list_del(&buf->queue);
457 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
458 		max_buf_done--;
459 		/* count register is 16 bits so apply modulo appropriately */
460 		count_delta = ((int)count - (int)(q->count % 65536));
461 	} while ((count_delta > 0) && (max_buf_done > 0));
462 }
463 
464 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
465 				      struct sram_channel *ch,
466 				      unsigned int bpl, u32 risc)
467 {
468 	unsigned int i, lines;
469 	u32 cdt;
470 
471 	if (ch->cmds_start == 0) {
472 		dprintk(1, "%s() Erasing channel [%s]\n", __func__,
473 			ch->name);
474 		cx_write(ch->ptr1_reg, 0);
475 		cx_write(ch->ptr2_reg, 0);
476 		cx_write(ch->cnt2_reg, 0);
477 		cx_write(ch->cnt1_reg, 0);
478 		return 0;
479 	} else {
480 		dprintk(1, "%s() Configuring channel [%s]\n", __func__,
481 			ch->name);
482 	}
483 
484 	bpl   = (bpl + 7) & ~7; /* alignment */
485 	cdt   = ch->cdt;
486 	lines = ch->fifo_size / bpl;
487 	if (lines > 6)
488 		lines = 6;
489 	BUG_ON(lines < 2);
490 
491 	cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
492 	cx_write(8 + 4, 12);
493 	cx_write(8 + 8, 0);
494 
495 	/* write CDT */
496 	for (i = 0; i < lines; i++) {
497 		dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
498 			ch->fifo_start + bpl*i);
499 		cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
500 		cx_write(cdt + 16*i +  4, 0);
501 		cx_write(cdt + 16*i +  8, 0);
502 		cx_write(cdt + 16*i + 12, 0);
503 	}
504 
505 	/* write CMDS */
506 	if (ch->jumponly)
507 		cx_write(ch->cmds_start + 0, 8);
508 	else
509 		cx_write(ch->cmds_start + 0, risc);
510 	cx_write(ch->cmds_start +  4, 0); /* 64 bits 63-32 */
511 	cx_write(ch->cmds_start +  8, cdt);
512 	cx_write(ch->cmds_start + 12, (lines*16) >> 3);
513 	cx_write(ch->cmds_start + 16, ch->ctrl_start);
514 	if (ch->jumponly)
515 		cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
516 	else
517 		cx_write(ch->cmds_start + 20, 64 >> 2);
518 	for (i = 24; i < 80; i += 4)
519 		cx_write(ch->cmds_start + i, 0);
520 
521 	/* fill registers */
522 	cx_write(ch->ptr1_reg, ch->fifo_start);
523 	cx_write(ch->ptr2_reg, cdt);
524 	cx_write(ch->cnt2_reg, (lines*16) >> 3);
525 	cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
526 
527 	dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
528 		dev->bridge,
529 		ch->name,
530 		bpl,
531 		lines);
532 
533 	return 0;
534 }
535 
536 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
537 				      struct sram_channel *ch)
538 {
539 	static char *name[] = {
540 		"init risc lo",
541 		"init risc hi",
542 		"cdt base",
543 		"cdt size",
544 		"iq base",
545 		"iq size",
546 		"risc pc lo",
547 		"risc pc hi",
548 		"iq wr ptr",
549 		"iq rd ptr",
550 		"cdt current",
551 		"pci target lo",
552 		"pci target hi",
553 		"line / byte",
554 	};
555 	u32 risc;
556 	unsigned int i, j, n;
557 
558 	pr_warn("%s: %s - dma channel status dump\n",
559 		dev->name, ch->name);
560 	for (i = 0; i < ARRAY_SIZE(name); i++)
561 		pr_warn("%s:   cmds: %-15s: 0x%08x\n",
562 			dev->name, name[i],
563 			cx_read(ch->cmds_start + 4*i));
564 
565 	for (i = 0; i < 4; i++) {
566 		risc = cx_read(ch->cmds_start + 4 * (i + 14));
567 		pr_warn("%s:   risc%d: ", dev->name, i);
568 		cx23885_risc_decode(risc);
569 	}
570 	for (i = 0; i < (64 >> 2); i += n) {
571 		risc = cx_read(ch->ctrl_start + 4 * i);
572 		/* No consideration for bits 63-32 */
573 
574 		pr_warn("%s:   (0x%08x) iq %x: ", dev->name,
575 			ch->ctrl_start + 4 * i, i);
576 		n = cx23885_risc_decode(risc);
577 		for (j = 1; j < n; j++) {
578 			risc = cx_read(ch->ctrl_start + 4 * (i + j));
579 			pr_warn("%s:   iq %x: 0x%08x [ arg #%d ]\n",
580 				dev->name, i+j, risc, j);
581 		}
582 	}
583 
584 	pr_warn("%s: fifo: 0x%08x -> 0x%x\n",
585 		dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
586 	pr_warn("%s: ctrl: 0x%08x -> 0x%x\n",
587 		dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
588 	pr_warn("%s:   ptr1_reg: 0x%08x\n",
589 		dev->name, cx_read(ch->ptr1_reg));
590 	pr_warn("%s:   ptr2_reg: 0x%08x\n",
591 		dev->name, cx_read(ch->ptr2_reg));
592 	pr_warn("%s:   cnt1_reg: 0x%08x\n",
593 		dev->name, cx_read(ch->cnt1_reg));
594 	pr_warn("%s:   cnt2_reg: 0x%08x\n",
595 		dev->name, cx_read(ch->cnt2_reg));
596 }
597 
598 static void cx23885_risc_disasm(struct cx23885_tsport *port,
599 				struct cx23885_riscmem *risc)
600 {
601 	struct cx23885_dev *dev = port->dev;
602 	unsigned int i, j, n;
603 
604 	pr_info("%s: risc disasm: %p [dma=0x%08lx]\n",
605 	       dev->name, risc->cpu, (unsigned long)risc->dma);
606 	for (i = 0; i < (risc->size >> 2); i += n) {
607 		pr_info("%s:   %04d: ", dev->name, i);
608 		n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
609 		for (j = 1; j < n; j++)
610 			pr_info("%s:   %04d: 0x%08x [ arg #%d ]\n",
611 				dev->name, i + j, risc->cpu[i + j], j);
612 		if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
613 			break;
614 	}
615 }
616 
617 static void cx23885_clear_bridge_error(struct cx23885_dev *dev)
618 {
619 	uint32_t reg1_val, reg2_val;
620 
621 	if (!dev->need_dma_reset)
622 		return;
623 
624 	reg1_val = cx_read(TC_REQ); /* read-only */
625 	reg2_val = cx_read(TC_REQ_SET);
626 
627 	if (reg1_val && reg2_val) {
628 		cx_write(TC_REQ, reg1_val);
629 		cx_write(TC_REQ_SET, reg2_val);
630 		cx_read(VID_B_DMA);
631 		cx_read(VBI_B_DMA);
632 		cx_read(VID_C_DMA);
633 		cx_read(VBI_C_DMA);
634 
635 		dev_info(&dev->pci->dev,
636 			"dma in progress detected 0x%08x 0x%08x, clearing\n",
637 			reg1_val, reg2_val);
638 	}
639 }
640 
641 static void cx23885_shutdown(struct cx23885_dev *dev)
642 {
643 	/* disable RISC controller */
644 	cx_write(DEV_CNTRL2, 0);
645 
646 	/* Disable all IR activity */
647 	cx_write(IR_CNTRL_REG, 0);
648 
649 	/* Disable Video A/B activity */
650 	cx_write(VID_A_DMA_CTL, 0);
651 	cx_write(VID_B_DMA_CTL, 0);
652 	cx_write(VID_C_DMA_CTL, 0);
653 
654 	/* Disable Audio activity */
655 	cx_write(AUD_INT_DMA_CTL, 0);
656 	cx_write(AUD_EXT_DMA_CTL, 0);
657 
658 	/* Disable Serial port */
659 	cx_write(UART_CTL, 0);
660 
661 	/* Disable Interrupts */
662 	cx23885_irq_disable_all(dev);
663 	cx_write(VID_A_INT_MSK, 0);
664 	cx_write(VID_B_INT_MSK, 0);
665 	cx_write(VID_C_INT_MSK, 0);
666 	cx_write(AUDIO_INT_INT_MSK, 0);
667 	cx_write(AUDIO_EXT_INT_MSK, 0);
668 
669 }
670 
671 static void cx23885_reset(struct cx23885_dev *dev)
672 {
673 	dprintk(1, "%s()\n", __func__);
674 
675 	cx23885_shutdown(dev);
676 
677 	cx_write(PCI_INT_STAT, 0xffffffff);
678 	cx_write(VID_A_INT_STAT, 0xffffffff);
679 	cx_write(VID_B_INT_STAT, 0xffffffff);
680 	cx_write(VID_C_INT_STAT, 0xffffffff);
681 	cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
682 	cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
683 	cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
684 	cx_write(PAD_CTRL, 0x00500300);
685 
686 	/* clear dma in progress */
687 	cx23885_clear_bridge_error(dev);
688 	msleep(100);
689 
690 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
691 		720*4, 0);
692 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
693 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
694 		188*4, 0);
695 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
696 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
697 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
698 		188*4, 0);
699 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
700 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
701 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
702 
703 	cx23885_gpio_setup(dev);
704 
705 	cx23885_irq_get_mask(dev);
706 
707 	/* clear dma in progress */
708 	cx23885_clear_bridge_error(dev);
709 }
710 
711 
712 static int cx23885_pci_quirks(struct cx23885_dev *dev)
713 {
714 	dprintk(1, "%s()\n", __func__);
715 
716 	/* The cx23885 bridge has a weird bug which causes NMI to be asserted
717 	 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
718 	 * occur on the cx23887 bridge.
719 	 */
720 	if (dev->bridge == CX23885_BRIDGE_885)
721 		cx_clear(RDR_TLCTL0, 1 << 4);
722 
723 	/* clear dma in progress */
724 	cx23885_clear_bridge_error(dev);
725 	return 0;
726 }
727 
728 static int get_resources(struct cx23885_dev *dev)
729 {
730 	if (request_mem_region(pci_resource_start(dev->pci, 0),
731 			       pci_resource_len(dev->pci, 0),
732 			       dev->name))
733 		return 0;
734 
735 	pr_err("%s: can't get MMIO memory @ 0x%llx\n",
736 	       dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
737 
738 	return -EBUSY;
739 }
740 
741 static int cx23885_init_tsport(struct cx23885_dev *dev,
742 	struct cx23885_tsport *port, int portno)
743 {
744 	dprintk(1, "%s(portno=%d)\n", __func__, portno);
745 
746 	/* Transport bus init dma queue  - Common settings */
747 	port->dma_ctl_val        = 0x11; /* Enable RISC controller and Fifo */
748 	port->ts_int_msk_val     = 0x1111; /* TS port bits for RISC */
749 	port->vld_misc_val       = 0x0;
750 	port->hw_sop_ctrl_val    = (0x47 << 16 | 188 << 4);
751 
752 	spin_lock_init(&port->slock);
753 	port->dev = dev;
754 	port->nr = portno;
755 
756 	INIT_LIST_HEAD(&port->mpegq.active);
757 	mutex_init(&port->frontends.lock);
758 	INIT_LIST_HEAD(&port->frontends.felist);
759 	port->frontends.active_fe_id = 0;
760 
761 	/* This should be hardcoded allow a single frontend
762 	 * attachment to this tsport, keeping the -dvb.c
763 	 * code clean and safe.
764 	 */
765 	if (!port->num_frontends)
766 		port->num_frontends = 1;
767 
768 	switch (portno) {
769 	case 1:
770 		port->reg_gpcnt          = VID_B_GPCNT;
771 		port->reg_gpcnt_ctl      = VID_B_GPCNT_CTL;
772 		port->reg_dma_ctl        = VID_B_DMA_CTL;
773 		port->reg_lngth          = VID_B_LNGTH;
774 		port->reg_hw_sop_ctrl    = VID_B_HW_SOP_CTL;
775 		port->reg_gen_ctrl       = VID_B_GEN_CTL;
776 		port->reg_bd_pkt_status  = VID_B_BD_PKT_STATUS;
777 		port->reg_sop_status     = VID_B_SOP_STATUS;
778 		port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
779 		port->reg_vld_misc       = VID_B_VLD_MISC;
780 		port->reg_ts_clk_en      = VID_B_TS_CLK_EN;
781 		port->reg_src_sel        = VID_B_SRC_SEL;
782 		port->reg_ts_int_msk     = VID_B_INT_MSK;
783 		port->reg_ts_int_stat    = VID_B_INT_STAT;
784 		port->sram_chno          = SRAM_CH03; /* VID_B */
785 		port->pci_irqmask        = 0x02; /* VID_B bit1 */
786 		break;
787 	case 2:
788 		port->reg_gpcnt          = VID_C_GPCNT;
789 		port->reg_gpcnt_ctl      = VID_C_GPCNT_CTL;
790 		port->reg_dma_ctl        = VID_C_DMA_CTL;
791 		port->reg_lngth          = VID_C_LNGTH;
792 		port->reg_hw_sop_ctrl    = VID_C_HW_SOP_CTL;
793 		port->reg_gen_ctrl       = VID_C_GEN_CTL;
794 		port->reg_bd_pkt_status  = VID_C_BD_PKT_STATUS;
795 		port->reg_sop_status     = VID_C_SOP_STATUS;
796 		port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
797 		port->reg_vld_misc       = VID_C_VLD_MISC;
798 		port->reg_ts_clk_en      = VID_C_TS_CLK_EN;
799 		port->reg_src_sel        = 0;
800 		port->reg_ts_int_msk     = VID_C_INT_MSK;
801 		port->reg_ts_int_stat    = VID_C_INT_STAT;
802 		port->sram_chno          = SRAM_CH06; /* VID_C */
803 		port->pci_irqmask        = 0x04; /* VID_C bit2 */
804 		break;
805 	default:
806 		BUG();
807 	}
808 
809 	return 0;
810 }
811 
812 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
813 {
814 	switch (cx_read(RDR_CFG2) & 0xff) {
815 	case 0x00:
816 		/* cx23885 */
817 		dev->hwrevision = 0xa0;
818 		break;
819 	case 0x01:
820 		/* CX23885-12Z */
821 		dev->hwrevision = 0xa1;
822 		break;
823 	case 0x02:
824 		/* CX23885-13Z/14Z */
825 		dev->hwrevision = 0xb0;
826 		break;
827 	case 0x03:
828 		if (dev->pci->device == 0x8880) {
829 			/* CX23888-21Z/22Z */
830 			dev->hwrevision = 0xc0;
831 		} else {
832 			/* CX23885-14Z */
833 			dev->hwrevision = 0xa4;
834 		}
835 		break;
836 	case 0x04:
837 		if (dev->pci->device == 0x8880) {
838 			/* CX23888-31Z */
839 			dev->hwrevision = 0xd0;
840 		} else {
841 			/* CX23885-15Z, CX23888-31Z */
842 			dev->hwrevision = 0xa5;
843 		}
844 		break;
845 	case 0x0e:
846 		/* CX23887-15Z */
847 		dev->hwrevision = 0xc0;
848 		break;
849 	case 0x0f:
850 		/* CX23887-14Z */
851 		dev->hwrevision = 0xb1;
852 		break;
853 	default:
854 		pr_err("%s() New hardware revision found 0x%x\n",
855 		       __func__, dev->hwrevision);
856 	}
857 	if (dev->hwrevision)
858 		pr_info("%s() Hardware revision = 0x%02x\n",
859 			__func__, dev->hwrevision);
860 	else
861 		pr_err("%s() Hardware revision unknown 0x%x\n",
862 		       __func__, dev->hwrevision);
863 }
864 
865 /* Find the first v4l2_subdev member of the group id in hw */
866 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
867 {
868 	struct v4l2_subdev *result = NULL;
869 	struct v4l2_subdev *sd;
870 
871 	spin_lock(&dev->v4l2_dev.lock);
872 	v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
873 		if (sd->grp_id == hw) {
874 			result = sd;
875 			break;
876 		}
877 	}
878 	spin_unlock(&dev->v4l2_dev.lock);
879 	return result;
880 }
881 
882 static int cx23885_dev_setup(struct cx23885_dev *dev)
883 {
884 	int i;
885 
886 	spin_lock_init(&dev->pci_irqmask_lock);
887 	spin_lock_init(&dev->slock);
888 
889 	mutex_init(&dev->lock);
890 	mutex_init(&dev->gpio_lock);
891 
892 	atomic_inc(&dev->refcount);
893 
894 	dev->nr = cx23885_devcount++;
895 	sprintf(dev->name, "cx23885[%d]", dev->nr);
896 
897 	/* Configure the internal memory */
898 	if (dev->pci->device == 0x8880) {
899 		/* Could be 887 or 888, assume an 888 default */
900 		dev->bridge = CX23885_BRIDGE_888;
901 		/* Apply a sensible clock frequency for the PCIe bridge */
902 		dev->clk_freq = 50000000;
903 		dev->sram_channels = cx23887_sram_channels;
904 	} else
905 	if (dev->pci->device == 0x8852) {
906 		dev->bridge = CX23885_BRIDGE_885;
907 		/* Apply a sensible clock frequency for the PCIe bridge */
908 		dev->clk_freq = 28000000;
909 		dev->sram_channels = cx23885_sram_channels;
910 	} else
911 		BUG();
912 
913 	dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
914 		__func__, dev->bridge);
915 
916 	/* board config */
917 	dev->board = UNSET;
918 	if (card[dev->nr] < cx23885_bcount)
919 		dev->board = card[dev->nr];
920 	for (i = 0; UNSET == dev->board  &&  i < cx23885_idcount; i++)
921 		if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
922 		    dev->pci->subsystem_device == cx23885_subids[i].subdevice)
923 			dev->board = cx23885_subids[i].card;
924 	if (UNSET == dev->board) {
925 		dev->board = CX23885_BOARD_UNKNOWN;
926 		cx23885_card_list(dev);
927 	}
928 
929 	if (dev->pci->device == 0x8852) {
930 		/* no DIF on cx23885, so no analog tuner support possible */
931 		if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC)
932 			dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885;
933 		else if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_DVB)
934 			dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885;
935 	}
936 
937 	/* If the user specific a clk freq override, apply it */
938 	if (cx23885_boards[dev->board].clk_freq > 0)
939 		dev->clk_freq = cx23885_boards[dev->board].clk_freq;
940 
941 	if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE &&
942 		dev->pci->subsystem_device == 0x7137) {
943 		/* Hauppauge ImpactVCBe device ID 0x7137 is populated
944 		 * with an 888, and a 25Mhz crystal, instead of the
945 		 * usual third overtone 50Mhz. The default clock rate must
946 		 * be overridden so the cx25840 is properly configured
947 		 */
948 		dev->clk_freq = 25000000;
949 	}
950 
951 	dev->pci_bus  = dev->pci->bus->number;
952 	dev->pci_slot = PCI_SLOT(dev->pci->devfn);
953 	cx23885_irq_add(dev, 0x001f00);
954 
955 	/* External Master 1 Bus */
956 	dev->i2c_bus[0].nr = 0;
957 	dev->i2c_bus[0].dev = dev;
958 	dev->i2c_bus[0].reg_stat  = I2C1_STAT;
959 	dev->i2c_bus[0].reg_ctrl  = I2C1_CTRL;
960 	dev->i2c_bus[0].reg_addr  = I2C1_ADDR;
961 	dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
962 	dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
963 	dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
964 
965 	/* External Master 2 Bus */
966 	dev->i2c_bus[1].nr = 1;
967 	dev->i2c_bus[1].dev = dev;
968 	dev->i2c_bus[1].reg_stat  = I2C2_STAT;
969 	dev->i2c_bus[1].reg_ctrl  = I2C2_CTRL;
970 	dev->i2c_bus[1].reg_addr  = I2C2_ADDR;
971 	dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
972 	dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
973 	dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
974 
975 	/* Internal Master 3 Bus */
976 	dev->i2c_bus[2].nr = 2;
977 	dev->i2c_bus[2].dev = dev;
978 	dev->i2c_bus[2].reg_stat  = I2C3_STAT;
979 	dev->i2c_bus[2].reg_ctrl  = I2C3_CTRL;
980 	dev->i2c_bus[2].reg_addr  = I2C3_ADDR;
981 	dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
982 	dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
983 	dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
984 
985 	if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
986 		(cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
987 		cx23885_init_tsport(dev, &dev->ts1, 1);
988 
989 	if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
990 		(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
991 		cx23885_init_tsport(dev, &dev->ts2, 2);
992 
993 	if (get_resources(dev) < 0) {
994 		pr_err("CORE %s No more PCIe resources for subsystem: %04x:%04x\n",
995 		       dev->name, dev->pci->subsystem_vendor,
996 		       dev->pci->subsystem_device);
997 
998 		cx23885_devcount--;
999 		return -ENODEV;
1000 	}
1001 
1002 	/* PCIe stuff */
1003 	dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
1004 			     pci_resource_len(dev->pci, 0));
1005 
1006 	dev->bmmio = (u8 __iomem *)dev->lmmio;
1007 
1008 	pr_info("CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
1009 		dev->name, dev->pci->subsystem_vendor,
1010 		dev->pci->subsystem_device, cx23885_boards[dev->board].name,
1011 		dev->board, card[dev->nr] == dev->board ?
1012 		"insmod option" : "autodetected");
1013 
1014 	cx23885_pci_quirks(dev);
1015 
1016 	/* Assume some sensible defaults */
1017 	dev->tuner_type = cx23885_boards[dev->board].tuner_type;
1018 	dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
1019 	dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
1020 	dev->radio_type = cx23885_boards[dev->board].radio_type;
1021 	dev->radio_addr = cx23885_boards[dev->board].radio_addr;
1022 
1023 	dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
1024 		__func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
1025 	dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
1026 		__func__, dev->radio_type, dev->radio_addr);
1027 
1028 	/* The cx23417 encoder has GPIO's that need to be initialised
1029 	 * before DVB, so that demodulators and tuners are out of
1030 	 * reset before DVB uses them.
1031 	 */
1032 	if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
1033 		(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
1034 			cx23885_mc417_init(dev);
1035 
1036 	/* init hardware */
1037 	cx23885_reset(dev);
1038 
1039 	cx23885_i2c_register(&dev->i2c_bus[0]);
1040 	cx23885_i2c_register(&dev->i2c_bus[1]);
1041 	cx23885_i2c_register(&dev->i2c_bus[2]);
1042 	cx23885_card_setup(dev);
1043 	call_all(dev, tuner, standby);
1044 	cx23885_ir_init(dev);
1045 
1046 	if (dev->board == CX23885_BOARD_VIEWCAST_460E) {
1047 		/*
1048 		 * GPIOs 9/8 are input detection bits for the breakout video
1049 		 * (gpio 8) and audio (gpio 9) cables. When they're attached,
1050 		 * this gpios are pulled high. Make sure these GPIOs are marked
1051 		 * as inputs.
1052 		 */
1053 		cx23885_gpio_enable(dev, 0x300, 0);
1054 	}
1055 
1056 	if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
1057 		if (cx23885_video_register(dev) < 0) {
1058 			pr_err("%s() Failed to register analog video adapters on VID_A\n",
1059 			       __func__);
1060 		}
1061 	}
1062 
1063 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1064 		if (cx23885_boards[dev->board].num_fds_portb)
1065 			dev->ts1.num_frontends =
1066 				cx23885_boards[dev->board].num_fds_portb;
1067 		if (cx23885_dvb_register(&dev->ts1) < 0) {
1068 			pr_err("%s() Failed to register dvb adapters on VID_B\n",
1069 			       __func__);
1070 		}
1071 	} else
1072 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1073 		if (cx23885_417_register(dev) < 0) {
1074 			pr_err("%s() Failed to register 417 on VID_B\n",
1075 			       __func__);
1076 		}
1077 	}
1078 
1079 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1080 		if (cx23885_boards[dev->board].num_fds_portc)
1081 			dev->ts2.num_frontends =
1082 				cx23885_boards[dev->board].num_fds_portc;
1083 		if (cx23885_dvb_register(&dev->ts2) < 0) {
1084 			pr_err("%s() Failed to register dvb on VID_C\n",
1085 			       __func__);
1086 		}
1087 	} else
1088 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1089 		if (cx23885_417_register(dev) < 0) {
1090 			pr_err("%s() Failed to register 417 on VID_C\n",
1091 			       __func__);
1092 		}
1093 	}
1094 
1095 	cx23885_dev_checkrevision(dev);
1096 
1097 	/* disable MSI for NetUP cards, otherwise CI is not working */
1098 	if (cx23885_boards[dev->board].ci_type > 0)
1099 		cx_clear(RDR_RDRCTL1, 1 << 8);
1100 
1101 	switch (dev->board) {
1102 	case CX23885_BOARD_TEVII_S470:
1103 	case CX23885_BOARD_TEVII_S471:
1104 		cx_clear(RDR_RDRCTL1, 1 << 8);
1105 		break;
1106 	}
1107 
1108 	return 0;
1109 }
1110 
1111 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1112 {
1113 	release_mem_region(pci_resource_start(dev->pci, 0),
1114 			   pci_resource_len(dev->pci, 0));
1115 
1116 	if (!atomic_dec_and_test(&dev->refcount))
1117 		return;
1118 
1119 	if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1120 		cx23885_video_unregister(dev);
1121 
1122 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1123 		cx23885_dvb_unregister(&dev->ts1);
1124 
1125 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1126 		cx23885_417_unregister(dev);
1127 
1128 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1129 		cx23885_dvb_unregister(&dev->ts2);
1130 
1131 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1132 		cx23885_417_unregister(dev);
1133 
1134 	cx23885_i2c_unregister(&dev->i2c_bus[2]);
1135 	cx23885_i2c_unregister(&dev->i2c_bus[1]);
1136 	cx23885_i2c_unregister(&dev->i2c_bus[0]);
1137 
1138 	iounmap(dev->lmmio);
1139 }
1140 
1141 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1142 			       unsigned int offset, u32 sync_line,
1143 			       unsigned int bpl, unsigned int padding,
1144 			       unsigned int lines,  unsigned int lpi, bool jump)
1145 {
1146 	struct scatterlist *sg;
1147 	unsigned int line, todo, sol;
1148 
1149 
1150 	if (jump) {
1151 		*(rp++) = cpu_to_le32(RISC_JUMP);
1152 		*(rp++) = cpu_to_le32(0);
1153 		*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1154 	}
1155 
1156 	/* sync instruction */
1157 	if (sync_line != NO_SYNC_LINE)
1158 		*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1159 
1160 	/* scan lines */
1161 	sg = sglist;
1162 	for (line = 0; line < lines; line++) {
1163 		while (offset && offset >= sg_dma_len(sg)) {
1164 			offset -= sg_dma_len(sg);
1165 			sg = sg_next(sg);
1166 		}
1167 
1168 		if (lpi && line > 0 && !(line % lpi))
1169 			sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1170 		else
1171 			sol = RISC_SOL;
1172 
1173 		if (bpl <= sg_dma_len(sg)-offset) {
1174 			/* fits into current chunk */
1175 			*(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
1176 			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1177 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1178 			offset += bpl;
1179 		} else {
1180 			/* scanline needs to be split */
1181 			todo = bpl;
1182 			*(rp++) = cpu_to_le32(RISC_WRITE|sol|
1183 					    (sg_dma_len(sg)-offset));
1184 			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1185 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1186 			todo -= (sg_dma_len(sg)-offset);
1187 			offset = 0;
1188 			sg = sg_next(sg);
1189 			while (todo > sg_dma_len(sg)) {
1190 				*(rp++) = cpu_to_le32(RISC_WRITE|
1191 						    sg_dma_len(sg));
1192 				*(rp++) = cpu_to_le32(sg_dma_address(sg));
1193 				*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1194 				todo -= sg_dma_len(sg);
1195 				sg = sg_next(sg);
1196 			}
1197 			*(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1198 			*(rp++) = cpu_to_le32(sg_dma_address(sg));
1199 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1200 			offset += todo;
1201 		}
1202 		offset += padding;
1203 	}
1204 
1205 	return rp;
1206 }
1207 
1208 int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1209 			struct scatterlist *sglist, unsigned int top_offset,
1210 			unsigned int bottom_offset, unsigned int bpl,
1211 			unsigned int padding, unsigned int lines)
1212 {
1213 	u32 instructions, fields;
1214 	__le32 *rp;
1215 
1216 	fields = 0;
1217 	if (UNSET != top_offset)
1218 		fields++;
1219 	if (UNSET != bottom_offset)
1220 		fields++;
1221 
1222 	/* estimate risc mem: worst case is one write per page border +
1223 	   one write per scan line + syncs + jump (all 2 dwords).  Padding
1224 	   can cause next bpl to start close to a page border.  First DMA
1225 	   region may be smaller than PAGE_SIZE */
1226 	/* write and jump need and extra dword */
1227 	instructions  = fields * (1 + ((bpl + padding) * lines)
1228 		/ PAGE_SIZE + lines);
1229 	instructions += 5;
1230 	risc->size = instructions * 12;
1231 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1232 	if (risc->cpu == NULL)
1233 		return -ENOMEM;
1234 
1235 	/* write risc instructions */
1236 	rp = risc->cpu;
1237 	if (UNSET != top_offset)
1238 		rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1239 					bpl, padding, lines, 0, true);
1240 	if (UNSET != bottom_offset)
1241 		rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1242 					bpl, padding, lines, 0, UNSET == top_offset);
1243 
1244 	/* save pointer to jmp instruction address */
1245 	risc->jmp = rp;
1246 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1247 	return 0;
1248 }
1249 
1250 int cx23885_risc_databuffer(struct pci_dev *pci,
1251 				   struct cx23885_riscmem *risc,
1252 				   struct scatterlist *sglist,
1253 				   unsigned int bpl,
1254 				   unsigned int lines, unsigned int lpi)
1255 {
1256 	u32 instructions;
1257 	__le32 *rp;
1258 
1259 	/* estimate risc mem: worst case is one write per page border +
1260 	   one write per scan line + syncs + jump (all 2 dwords).  Here
1261 	   there is no padding and no sync.  First DMA region may be smaller
1262 	   than PAGE_SIZE */
1263 	/* Jump and write need an extra dword */
1264 	instructions  = 1 + (bpl * lines) / PAGE_SIZE + lines;
1265 	instructions += 4;
1266 
1267 	risc->size = instructions * 12;
1268 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1269 	if (risc->cpu == NULL)
1270 		return -ENOMEM;
1271 
1272 	/* write risc instructions */
1273 	rp = risc->cpu;
1274 	rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
1275 				bpl, 0, lines, lpi, lpi == 0);
1276 
1277 	/* save pointer to jmp instruction address */
1278 	risc->jmp = rp;
1279 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1280 	return 0;
1281 }
1282 
1283 int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1284 			struct scatterlist *sglist, unsigned int top_offset,
1285 			unsigned int bottom_offset, unsigned int bpl,
1286 			unsigned int padding, unsigned int lines)
1287 {
1288 	u32 instructions, fields;
1289 	__le32 *rp;
1290 
1291 	fields = 0;
1292 	if (UNSET != top_offset)
1293 		fields++;
1294 	if (UNSET != bottom_offset)
1295 		fields++;
1296 
1297 	/* estimate risc mem: worst case is one write per page border +
1298 	   one write per scan line + syncs + jump (all 2 dwords).  Padding
1299 	   can cause next bpl to start close to a page border.  First DMA
1300 	   region may be smaller than PAGE_SIZE */
1301 	/* write and jump need and extra dword */
1302 	instructions  = fields * (1 + ((bpl + padding) * lines)
1303 		/ PAGE_SIZE + lines);
1304 	instructions += 5;
1305 	risc->size = instructions * 12;
1306 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1307 	if (risc->cpu == NULL)
1308 		return -ENOMEM;
1309 	/* write risc instructions */
1310 	rp = risc->cpu;
1311 
1312 	/* Sync to line 6, so US CC line 21 will appear in line '12'
1313 	 * in the userland vbi payload */
1314 	if (UNSET != top_offset)
1315 		rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1316 					bpl, padding, lines, 0, true);
1317 
1318 	if (UNSET != bottom_offset)
1319 		rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1320 					bpl, padding, lines, 0, UNSET == top_offset);
1321 
1322 
1323 
1324 	/* save pointer to jmp instruction address */
1325 	risc->jmp = rp;
1326 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1327 	return 0;
1328 }
1329 
1330 
1331 void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
1332 {
1333 	struct cx23885_riscmem *risc = &buf->risc;
1334 
1335 	BUG_ON(in_interrupt());
1336 	pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
1337 }
1338 
1339 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1340 {
1341 	struct cx23885_dev *dev = port->dev;
1342 
1343 	dprintk(1, "%s() Register Dump\n", __func__);
1344 	dprintk(1, "%s() DEV_CNTRL2               0x%08X\n", __func__,
1345 		cx_read(DEV_CNTRL2));
1346 	dprintk(1, "%s() PCI_INT_MSK              0x%08X\n", __func__,
1347 		cx23885_irq_get_mask(dev));
1348 	dprintk(1, "%s() AUD_INT_INT_MSK          0x%08X\n", __func__,
1349 		cx_read(AUDIO_INT_INT_MSK));
1350 	dprintk(1, "%s() AUD_INT_DMA_CTL          0x%08X\n", __func__,
1351 		cx_read(AUD_INT_DMA_CTL));
1352 	dprintk(1, "%s() AUD_EXT_INT_MSK          0x%08X\n", __func__,
1353 		cx_read(AUDIO_EXT_INT_MSK));
1354 	dprintk(1, "%s() AUD_EXT_DMA_CTL          0x%08X\n", __func__,
1355 		cx_read(AUD_EXT_DMA_CTL));
1356 	dprintk(1, "%s() PAD_CTRL                 0x%08X\n", __func__,
1357 		cx_read(PAD_CTRL));
1358 	dprintk(1, "%s() ALT_PIN_OUT_SEL          0x%08X\n", __func__,
1359 		cx_read(ALT_PIN_OUT_SEL));
1360 	dprintk(1, "%s() GPIO2                    0x%08X\n", __func__,
1361 		cx_read(GPIO2));
1362 	dprintk(1, "%s() gpcnt(0x%08X)          0x%08X\n", __func__,
1363 		port->reg_gpcnt, cx_read(port->reg_gpcnt));
1364 	dprintk(1, "%s() gpcnt_ctl(0x%08X)      0x%08x\n", __func__,
1365 		port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1366 	dprintk(1, "%s() dma_ctl(0x%08X)        0x%08x\n", __func__,
1367 		port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1368 	if (port->reg_src_sel)
1369 		dprintk(1, "%s() src_sel(0x%08X)        0x%08x\n", __func__,
1370 			port->reg_src_sel, cx_read(port->reg_src_sel));
1371 	dprintk(1, "%s() lngth(0x%08X)          0x%08x\n", __func__,
1372 		port->reg_lngth, cx_read(port->reg_lngth));
1373 	dprintk(1, "%s() hw_sop_ctrl(0x%08X)    0x%08x\n", __func__,
1374 		port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1375 	dprintk(1, "%s() gen_ctrl(0x%08X)       0x%08x\n", __func__,
1376 		port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1377 	dprintk(1, "%s() bd_pkt_status(0x%08X)  0x%08x\n", __func__,
1378 		port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1379 	dprintk(1, "%s() sop_status(0x%08X)     0x%08x\n", __func__,
1380 		port->reg_sop_status, cx_read(port->reg_sop_status));
1381 	dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1382 		port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1383 	dprintk(1, "%s() vld_misc(0x%08X)       0x%08x\n", __func__,
1384 		port->reg_vld_misc, cx_read(port->reg_vld_misc));
1385 	dprintk(1, "%s() ts_clk_en(0x%08X)      0x%08x\n", __func__,
1386 		port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1387 	dprintk(1, "%s() ts_int_msk(0x%08X)     0x%08x\n", __func__,
1388 		port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1389 	dprintk(1, "%s() ts_int_status(0x%08X)  0x%08x\n", __func__,
1390 		port->reg_ts_int_stat, cx_read(port->reg_ts_int_stat));
1391 	dprintk(1, "%s() PCI_INT_STAT           0x%08X\n", __func__,
1392 		cx_read(PCI_INT_STAT));
1393 	dprintk(1, "%s() VID_B_INT_MSTAT        0x%08X\n", __func__,
1394 		cx_read(VID_B_INT_MSTAT));
1395 	dprintk(1, "%s() VID_B_INT_SSTAT        0x%08X\n", __func__,
1396 		cx_read(VID_B_INT_SSTAT));
1397 	dprintk(1, "%s() VID_C_INT_MSTAT        0x%08X\n", __func__,
1398 		cx_read(VID_C_INT_MSTAT));
1399 	dprintk(1, "%s() VID_C_INT_SSTAT        0x%08X\n", __func__,
1400 		cx_read(VID_C_INT_SSTAT));
1401 }
1402 
1403 int cx23885_start_dma(struct cx23885_tsport *port,
1404 			     struct cx23885_dmaqueue *q,
1405 			     struct cx23885_buffer   *buf)
1406 {
1407 	struct cx23885_dev *dev = port->dev;
1408 	u32 reg;
1409 
1410 	dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1411 		dev->width, dev->height, dev->field);
1412 
1413 	/* clear dma in progress */
1414 	cx23885_clear_bridge_error(dev);
1415 
1416 	/* Stop the fifo and risc engine for this port */
1417 	cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1418 
1419 	/* setup fifo + format */
1420 	cx23885_sram_channel_setup(dev,
1421 				   &dev->sram_channels[port->sram_chno],
1422 				   port->ts_packet_size, buf->risc.dma);
1423 	if (debug > 5) {
1424 		cx23885_sram_channel_dump(dev,
1425 			&dev->sram_channels[port->sram_chno]);
1426 		cx23885_risc_disasm(port, &buf->risc);
1427 	}
1428 
1429 	/* write TS length to chip */
1430 	cx_write(port->reg_lngth, port->ts_packet_size);
1431 
1432 	if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1433 		(!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1434 		pr_err("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1435 			__func__,
1436 			cx23885_boards[dev->board].portb,
1437 			cx23885_boards[dev->board].portc);
1438 		return -EINVAL;
1439 	}
1440 
1441 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1442 		cx23885_av_clk(dev, 0);
1443 
1444 	udelay(100);
1445 
1446 	/* If the port supports SRC SELECT, configure it */
1447 	if (port->reg_src_sel)
1448 		cx_write(port->reg_src_sel, port->src_sel_val);
1449 
1450 	cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1451 	cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1452 	cx_write(port->reg_vld_misc, port->vld_misc_val);
1453 	cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1454 	udelay(100);
1455 
1456 	/* NOTE: this is 2 (reserved) for portb, does it matter? */
1457 	/* reset counter to zero */
1458 	cx_write(port->reg_gpcnt_ctl, 3);
1459 	q->count = 0;
1460 
1461 	/* Set VIDB pins to input */
1462 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1463 		reg = cx_read(PAD_CTRL);
1464 		reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1465 		cx_write(PAD_CTRL, reg);
1466 	}
1467 
1468 	/* Set VIDC pins to input */
1469 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1470 		reg = cx_read(PAD_CTRL);
1471 		reg &= ~0x4; /* Clear TS2_SOP_OE */
1472 		cx_write(PAD_CTRL, reg);
1473 	}
1474 
1475 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1476 
1477 		reg = cx_read(PAD_CTRL);
1478 		reg = reg & ~0x1;    /* Clear TS1_OE */
1479 
1480 		/* FIXME, bit 2 writing here is questionable */
1481 		/* set TS1_SOP_OE and TS1_OE_HI */
1482 		reg = reg | 0xa;
1483 		cx_write(PAD_CTRL, reg);
1484 
1485 		/* Sets MOE_CLK_DIS to disable MoE clock */
1486 		/* sets MCLK_DLY_SEL/BCLK_DLY_SEL to 1 buffer delay each */
1487 		cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1488 
1489 		/* ALT_GPIO_ALT_SET: GPIO[0]
1490 		 * IR_ALT_TX_SEL: GPIO[1]
1491 		 * GPIO1_ALT_SEL: VIP_656_DATA[0]
1492 		 * GPIO0_ALT_SEL: VIP_656_CLK
1493 		 */
1494 		cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1495 	}
1496 
1497 	switch (dev->bridge) {
1498 	case CX23885_BRIDGE_885:
1499 	case CX23885_BRIDGE_887:
1500 	case CX23885_BRIDGE_888:
1501 		/* enable irqs */
1502 		dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1503 		/* clear dma in progress */
1504 		cx23885_clear_bridge_error(dev);
1505 		cx_set(port->reg_ts_int_msk,  port->ts_int_msk_val);
1506 		cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1507 
1508 		/* clear dma in progress */
1509 		cx23885_clear_bridge_error(dev);
1510 		cx23885_irq_add(dev, port->pci_irqmask);
1511 		cx23885_irq_enable_all(dev);
1512 
1513 		/* clear dma in progress */
1514 		cx23885_clear_bridge_error(dev);
1515 		break;
1516 	default:
1517 		BUG();
1518 	}
1519 
1520 	cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1521 	/* clear dma in progress */
1522 	cx23885_clear_bridge_error(dev);
1523 
1524 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1525 		cx23885_av_clk(dev, 1);
1526 
1527 	if (debug > 4)
1528 		cx23885_tsport_reg_dump(port);
1529 
1530 	cx23885_irq_get_mask(dev);
1531 
1532 	/* clear dma in progress */
1533 	cx23885_clear_bridge_error(dev);
1534 
1535 	return 0;
1536 }
1537 
1538 static int cx23885_stop_dma(struct cx23885_tsport *port)
1539 {
1540 	struct cx23885_dev *dev = port->dev;
1541 	u32 reg;
1542 	int delay = 0;
1543 	uint32_t reg1_val;
1544 	uint32_t reg2_val;
1545 
1546 	dprintk(1, "%s()\n", __func__);
1547 
1548 	/* Stop interrupts and DMA */
1549 	cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1550 	cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1551 	/* just in case wait for any dma to complete before allowing dealloc */
1552 	mdelay(20);
1553 	for (delay = 0; delay < 100; delay++) {
1554 		reg1_val = cx_read(TC_REQ);
1555 		reg2_val = cx_read(TC_REQ_SET);
1556 		if (reg1_val == 0 || reg2_val == 0)
1557 			break;
1558 		mdelay(1);
1559 	}
1560 	dev_dbg(&dev->pci->dev, "delay=%d reg1=0x%08x reg2=0x%08x\n",
1561 		delay, reg1_val, reg2_val);
1562 
1563 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1564 		reg = cx_read(PAD_CTRL);
1565 
1566 		/* Set TS1_OE */
1567 		reg = reg | 0x1;
1568 
1569 		/* clear TS1_SOP_OE and TS1_OE_HI */
1570 		reg = reg & ~0xa;
1571 		cx_write(PAD_CTRL, reg);
1572 		cx_write(port->reg_src_sel, 0);
1573 		cx_write(port->reg_gen_ctrl, 8);
1574 	}
1575 
1576 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1577 		cx23885_av_clk(dev, 0);
1578 
1579 	return 0;
1580 }
1581 
1582 /* ------------------------------------------------------------------ */
1583 
1584 int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
1585 {
1586 	struct cx23885_dev *dev = port->dev;
1587 	int size = port->ts_packet_size * port->ts_packet_count;
1588 	struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
1589 
1590 	dprintk(1, "%s: %p\n", __func__, buf);
1591 	if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
1592 		return -EINVAL;
1593 	vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1594 
1595 	cx23885_risc_databuffer(dev->pci, &buf->risc,
1596 				sgt->sgl,
1597 				port->ts_packet_size, port->ts_packet_count, 0);
1598 	return 0;
1599 }
1600 
1601 /*
1602  * The risc program for each buffer works as follows: it starts with a simple
1603  * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1604  * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1605  * the initial JUMP).
1606  *
1607  * This is the risc program of the first buffer to be queued if the active list
1608  * is empty and it just keeps DMAing this buffer without generating any
1609  * interrupts.
1610  *
1611  * If a new buffer is added then the initial JUMP in the code for that buffer
1612  * will generate an interrupt which signals that the previous buffer has been
1613  * DMAed successfully and that it can be returned to userspace.
1614  *
1615  * It also sets the final jump of the previous buffer to the start of the new
1616  * buffer, thus chaining the new buffer into the DMA chain. This is a single
1617  * atomic u32 write, so there is no race condition.
1618  *
1619  * The end-result of all this that you only get an interrupt when a buffer
1620  * is ready, so the control flow is very easy.
1621  */
1622 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1623 {
1624 	struct cx23885_buffer    *prev;
1625 	struct cx23885_dev *dev = port->dev;
1626 	struct cx23885_dmaqueue  *cx88q = &port->mpegq;
1627 	unsigned long flags;
1628 
1629 	buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1630 	buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1631 	buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
1632 	buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1633 
1634 	spin_lock_irqsave(&dev->slock, flags);
1635 	if (list_empty(&cx88q->active)) {
1636 		list_add_tail(&buf->queue, &cx88q->active);
1637 		dprintk(1, "[%p/%d] %s - first active\n",
1638 			buf, buf->vb.vb2_buf.index, __func__);
1639 	} else {
1640 		buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
1641 		prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1642 				  queue);
1643 		list_add_tail(&buf->queue, &cx88q->active);
1644 		prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1645 		dprintk(1, "[%p/%d] %s - append to active\n",
1646 			 buf, buf->vb.vb2_buf.index, __func__);
1647 	}
1648 	spin_unlock_irqrestore(&dev->slock, flags);
1649 }
1650 
1651 /* ----------------------------------------------------------- */
1652 
1653 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
1654 {
1655 	struct cx23885_dmaqueue *q = &port->mpegq;
1656 	struct cx23885_buffer *buf;
1657 	unsigned long flags;
1658 
1659 	spin_lock_irqsave(&port->slock, flags);
1660 	while (!list_empty(&q->active)) {
1661 		buf = list_entry(q->active.next, struct cx23885_buffer,
1662 				 queue);
1663 		list_del(&buf->queue);
1664 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1665 		dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1666 			buf, buf->vb.vb2_buf.index, reason,
1667 			(unsigned long)buf->risc.dma);
1668 	}
1669 	spin_unlock_irqrestore(&port->slock, flags);
1670 }
1671 
1672 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1673 {
1674 	dprintk(1, "%s()\n", __func__);
1675 	cx23885_stop_dma(port);
1676 	do_cancel_buffers(port, "cancel");
1677 }
1678 
1679 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1680 {
1681 	/* FIXME: port1 assumption here. */
1682 	struct cx23885_tsport *port = &dev->ts1;
1683 	int count = 0;
1684 	int handled = 0;
1685 
1686 	if (status == 0)
1687 		return handled;
1688 
1689 	count = cx_read(port->reg_gpcnt);
1690 	dprintk(7, "status: 0x%08x  mask: 0x%08x count: 0x%x\n",
1691 		status, cx_read(port->reg_ts_int_msk), count);
1692 
1693 	if ((status & VID_B_MSK_BAD_PKT)         ||
1694 		(status & VID_B_MSK_OPC_ERR)     ||
1695 		(status & VID_B_MSK_VBI_OPC_ERR) ||
1696 		(status & VID_B_MSK_SYNC)        ||
1697 		(status & VID_B_MSK_VBI_SYNC)    ||
1698 		(status & VID_B_MSK_OF)          ||
1699 		(status & VID_B_MSK_VBI_OF)) {
1700 		pr_err("%s: V4L mpeg risc op code error, status = 0x%x\n",
1701 		       dev->name, status);
1702 		if (status & VID_B_MSK_BAD_PKT)
1703 			dprintk(1, "        VID_B_MSK_BAD_PKT\n");
1704 		if (status & VID_B_MSK_OPC_ERR)
1705 			dprintk(1, "        VID_B_MSK_OPC_ERR\n");
1706 		if (status & VID_B_MSK_VBI_OPC_ERR)
1707 			dprintk(1, "        VID_B_MSK_VBI_OPC_ERR\n");
1708 		if (status & VID_B_MSK_SYNC)
1709 			dprintk(1, "        VID_B_MSK_SYNC\n");
1710 		if (status & VID_B_MSK_VBI_SYNC)
1711 			dprintk(1, "        VID_B_MSK_VBI_SYNC\n");
1712 		if (status & VID_B_MSK_OF)
1713 			dprintk(1, "        VID_B_MSK_OF\n");
1714 		if (status & VID_B_MSK_VBI_OF)
1715 			dprintk(1, "        VID_B_MSK_VBI_OF\n");
1716 
1717 		cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1718 		cx23885_sram_channel_dump(dev,
1719 			&dev->sram_channels[port->sram_chno]);
1720 		cx23885_417_check_encoder(dev);
1721 	} else if (status & VID_B_MSK_RISCI1) {
1722 		dprintk(7, "        VID_B_MSK_RISCI1\n");
1723 		spin_lock(&port->slock);
1724 		cx23885_wakeup(port, &port->mpegq, count);
1725 		spin_unlock(&port->slock);
1726 	}
1727 	if (status) {
1728 		cx_write(port->reg_ts_int_stat, status);
1729 		handled = 1;
1730 	}
1731 
1732 	return handled;
1733 }
1734 
1735 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1736 {
1737 	struct cx23885_dev *dev = port->dev;
1738 	int handled = 0;
1739 	u32 count;
1740 
1741 	if ((status & VID_BC_MSK_OPC_ERR) ||
1742 		(status & VID_BC_MSK_BAD_PKT) ||
1743 		(status & VID_BC_MSK_SYNC) ||
1744 		(status & VID_BC_MSK_OF)) {
1745 
1746 		if (status & VID_BC_MSK_OPC_ERR)
1747 			dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1748 				VID_BC_MSK_OPC_ERR);
1749 
1750 		if (status & VID_BC_MSK_BAD_PKT)
1751 			dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1752 				VID_BC_MSK_BAD_PKT);
1753 
1754 		if (status & VID_BC_MSK_SYNC)
1755 			dprintk(7, " (VID_BC_MSK_SYNC    0x%08x)\n",
1756 				VID_BC_MSK_SYNC);
1757 
1758 		if (status & VID_BC_MSK_OF)
1759 			dprintk(7, " (VID_BC_MSK_OF      0x%08x)\n",
1760 				VID_BC_MSK_OF);
1761 
1762 		pr_err("%s: mpeg risc op code error\n", dev->name);
1763 
1764 		cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1765 		cx23885_sram_channel_dump(dev,
1766 			&dev->sram_channels[port->sram_chno]);
1767 
1768 	} else if (status & VID_BC_MSK_RISCI1) {
1769 
1770 		dprintk(7, " (RISCI1            0x%08x)\n", VID_BC_MSK_RISCI1);
1771 
1772 		spin_lock(&port->slock);
1773 		count = cx_read(port->reg_gpcnt);
1774 		cx23885_wakeup(port, &port->mpegq, count);
1775 		spin_unlock(&port->slock);
1776 
1777 	}
1778 	if (status) {
1779 		cx_write(port->reg_ts_int_stat, status);
1780 		handled = 1;
1781 	}
1782 
1783 	return handled;
1784 }
1785 
1786 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1787 {
1788 	struct cx23885_dev *dev = dev_id;
1789 	struct cx23885_tsport *ts1 = &dev->ts1;
1790 	struct cx23885_tsport *ts2 = &dev->ts2;
1791 	u32 pci_status, pci_mask;
1792 	u32 vida_status, vida_mask;
1793 	u32 audint_status, audint_mask;
1794 	u32 ts1_status, ts1_mask;
1795 	u32 ts2_status, ts2_mask;
1796 	int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1797 	int audint_count = 0;
1798 	bool subdev_handled;
1799 
1800 	pci_status = cx_read(PCI_INT_STAT);
1801 	pci_mask = cx23885_irq_get_mask(dev);
1802 	if ((pci_status & pci_mask) == 0) {
1803 		dprintk(7, "pci_status: 0x%08x  pci_mask: 0x%08x\n",
1804 			pci_status, pci_mask);
1805 		goto out;
1806 	}
1807 
1808 	vida_status = cx_read(VID_A_INT_STAT);
1809 	vida_mask = cx_read(VID_A_INT_MSK);
1810 	audint_status = cx_read(AUDIO_INT_INT_STAT);
1811 	audint_mask = cx_read(AUDIO_INT_INT_MSK);
1812 	ts1_status = cx_read(VID_B_INT_STAT);
1813 	ts1_mask = cx_read(VID_B_INT_MSK);
1814 	ts2_status = cx_read(VID_C_INT_STAT);
1815 	ts2_mask = cx_read(VID_C_INT_MSK);
1816 
1817 	if (((pci_status & pci_mask) == 0) &&
1818 		((ts2_status & ts2_mask) == 0) &&
1819 		((ts1_status & ts1_mask) == 0))
1820 		goto out;
1821 
1822 	vida_count = cx_read(VID_A_GPCNT);
1823 	audint_count = cx_read(AUD_INT_A_GPCNT);
1824 	ts1_count = cx_read(ts1->reg_gpcnt);
1825 	ts2_count = cx_read(ts2->reg_gpcnt);
1826 	dprintk(7, "pci_status: 0x%08x  pci_mask: 0x%08x\n",
1827 		pci_status, pci_mask);
1828 	dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1829 		vida_status, vida_mask, vida_count);
1830 	dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1831 		audint_status, audint_mask, audint_count);
1832 	dprintk(7, "ts1_status: 0x%08x  ts1_mask: 0x%08x count: 0x%x\n",
1833 		ts1_status, ts1_mask, ts1_count);
1834 	dprintk(7, "ts2_status: 0x%08x  ts2_mask: 0x%08x count: 0x%x\n",
1835 		ts2_status, ts2_mask, ts2_count);
1836 
1837 	if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1838 			  PCI_MSK_AL_RD   | PCI_MSK_AL_WR   | PCI_MSK_APB_DMA |
1839 			  PCI_MSK_VID_C   | PCI_MSK_VID_B   | PCI_MSK_VID_A   |
1840 			  PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1841 			  PCI_MSK_GPIO0   | PCI_MSK_GPIO1   |
1842 			  PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1843 
1844 		if (pci_status & PCI_MSK_RISC_RD)
1845 			dprintk(7, " (PCI_MSK_RISC_RD   0x%08x)\n",
1846 				PCI_MSK_RISC_RD);
1847 
1848 		if (pci_status & PCI_MSK_RISC_WR)
1849 			dprintk(7, " (PCI_MSK_RISC_WR   0x%08x)\n",
1850 				PCI_MSK_RISC_WR);
1851 
1852 		if (pci_status & PCI_MSK_AL_RD)
1853 			dprintk(7, " (PCI_MSK_AL_RD     0x%08x)\n",
1854 				PCI_MSK_AL_RD);
1855 
1856 		if (pci_status & PCI_MSK_AL_WR)
1857 			dprintk(7, " (PCI_MSK_AL_WR     0x%08x)\n",
1858 				PCI_MSK_AL_WR);
1859 
1860 		if (pci_status & PCI_MSK_APB_DMA)
1861 			dprintk(7, " (PCI_MSK_APB_DMA   0x%08x)\n",
1862 				PCI_MSK_APB_DMA);
1863 
1864 		if (pci_status & PCI_MSK_VID_C)
1865 			dprintk(7, " (PCI_MSK_VID_C     0x%08x)\n",
1866 				PCI_MSK_VID_C);
1867 
1868 		if (pci_status & PCI_MSK_VID_B)
1869 			dprintk(7, " (PCI_MSK_VID_B     0x%08x)\n",
1870 				PCI_MSK_VID_B);
1871 
1872 		if (pci_status & PCI_MSK_VID_A)
1873 			dprintk(7, " (PCI_MSK_VID_A     0x%08x)\n",
1874 				PCI_MSK_VID_A);
1875 
1876 		if (pci_status & PCI_MSK_AUD_INT)
1877 			dprintk(7, " (PCI_MSK_AUD_INT   0x%08x)\n",
1878 				PCI_MSK_AUD_INT);
1879 
1880 		if (pci_status & PCI_MSK_AUD_EXT)
1881 			dprintk(7, " (PCI_MSK_AUD_EXT   0x%08x)\n",
1882 				PCI_MSK_AUD_EXT);
1883 
1884 		if (pci_status & PCI_MSK_GPIO0)
1885 			dprintk(7, " (PCI_MSK_GPIO0     0x%08x)\n",
1886 				PCI_MSK_GPIO0);
1887 
1888 		if (pci_status & PCI_MSK_GPIO1)
1889 			dprintk(7, " (PCI_MSK_GPIO1     0x%08x)\n",
1890 				PCI_MSK_GPIO1);
1891 
1892 		if (pci_status & PCI_MSK_AV_CORE)
1893 			dprintk(7, " (PCI_MSK_AV_CORE   0x%08x)\n",
1894 				PCI_MSK_AV_CORE);
1895 
1896 		if (pci_status & PCI_MSK_IR)
1897 			dprintk(7, " (PCI_MSK_IR        0x%08x)\n",
1898 				PCI_MSK_IR);
1899 	}
1900 
1901 	if (cx23885_boards[dev->board].ci_type == 1 &&
1902 			(pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1903 		handled += netup_ci_slot_status(dev, pci_status);
1904 
1905 	if (cx23885_boards[dev->board].ci_type == 2 &&
1906 			(pci_status & PCI_MSK_GPIO0))
1907 		handled += altera_ci_irq(dev);
1908 
1909 	if (ts1_status) {
1910 		if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1911 			handled += cx23885_irq_ts(ts1, ts1_status);
1912 		else
1913 		if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1914 			handled += cx23885_irq_417(dev, ts1_status);
1915 	}
1916 
1917 	if (ts2_status) {
1918 		if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1919 			handled += cx23885_irq_ts(ts2, ts2_status);
1920 		else
1921 		if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1922 			handled += cx23885_irq_417(dev, ts2_status);
1923 	}
1924 
1925 	if (vida_status)
1926 		handled += cx23885_video_irq(dev, vida_status);
1927 
1928 	if (audint_status)
1929 		handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1930 
1931 	if (pci_status & PCI_MSK_IR) {
1932 		subdev_handled = false;
1933 		v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1934 				 pci_status, &subdev_handled);
1935 		if (subdev_handled)
1936 			handled++;
1937 	}
1938 
1939 	if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1940 		cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1941 		schedule_work(&dev->cx25840_work);
1942 		handled++;
1943 	}
1944 
1945 	if (handled)
1946 		cx_write(PCI_INT_STAT, pci_status & pci_mask);
1947 out:
1948 	return IRQ_RETVAL(handled);
1949 }
1950 
1951 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1952 				    unsigned int notification, void *arg)
1953 {
1954 	struct cx23885_dev *dev;
1955 
1956 	if (sd == NULL)
1957 		return;
1958 
1959 	dev = to_cx23885(sd->v4l2_dev);
1960 
1961 	switch (notification) {
1962 	case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1963 		if (sd == dev->sd_ir)
1964 			cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1965 		break;
1966 	case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1967 		if (sd == dev->sd_ir)
1968 			cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1969 		break;
1970 	}
1971 }
1972 
1973 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1974 {
1975 	INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1976 	INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1977 	INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1978 	dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1979 }
1980 
1981 static inline int encoder_on_portb(struct cx23885_dev *dev)
1982 {
1983 	return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1984 }
1985 
1986 static inline int encoder_on_portc(struct cx23885_dev *dev)
1987 {
1988 	return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1989 }
1990 
1991 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1992  * registers depending on the board configuration (and whether the
1993  * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1994  * be pushed into the correct hardware register, regardless of the
1995  * physical location. Certain registers are shared so we sanity check
1996  * and report errors if we think we're tampering with a GPIo that might
1997  * be assigned to the encoder (and used for the host bus).
1998  *
1999  * GPIO  2 thru  0 - On the cx23885 bridge
2000  * GPIO 18 thru  3 - On the cx23417 host bus interface
2001  * GPIO 23 thru 19 - On the cx25840 a/v core
2002  */
2003 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
2004 {
2005 	if (mask & 0x7)
2006 		cx_set(GP0_IO, mask & 0x7);
2007 
2008 	if (mask & 0x0007fff8) {
2009 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
2010 			pr_err("%s: Setting GPIO on encoder ports\n",
2011 				dev->name);
2012 		cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
2013 	}
2014 
2015 	/* TODO: 23-19 */
2016 	if (mask & 0x00f80000)
2017 		pr_info("%s: Unsupported\n", dev->name);
2018 }
2019 
2020 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
2021 {
2022 	if (mask & 0x00000007)
2023 		cx_clear(GP0_IO, mask & 0x7);
2024 
2025 	if (mask & 0x0007fff8) {
2026 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
2027 			pr_err("%s: Clearing GPIO moving on encoder ports\n",
2028 				dev->name);
2029 		cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
2030 	}
2031 
2032 	/* TODO: 23-19 */
2033 	if (mask & 0x00f80000)
2034 		pr_info("%s: Unsupported\n", dev->name);
2035 }
2036 
2037 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
2038 {
2039 	if (mask & 0x00000007)
2040 		return (cx_read(GP0_IO) >> 8) & mask & 0x7;
2041 
2042 	if (mask & 0x0007fff8) {
2043 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
2044 			pr_err("%s: Reading GPIO moving on encoder ports\n",
2045 				dev->name);
2046 		return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
2047 	}
2048 
2049 	/* TODO: 23-19 */
2050 	if (mask & 0x00f80000)
2051 		pr_info("%s: Unsupported\n", dev->name);
2052 
2053 	return 0;
2054 }
2055 
2056 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
2057 {
2058 	if ((mask & 0x00000007) && asoutput)
2059 		cx_set(GP0_IO, (mask & 0x7) << 16);
2060 	else if ((mask & 0x00000007) && !asoutput)
2061 		cx_clear(GP0_IO, (mask & 0x7) << 16);
2062 
2063 	if (mask & 0x0007fff8) {
2064 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
2065 			pr_err("%s: Enabling GPIO on encoder ports\n",
2066 				dev->name);
2067 	}
2068 
2069 	/* MC417_OEN is active low for output, write 1 for an input */
2070 	if ((mask & 0x0007fff8) && asoutput)
2071 		cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
2072 
2073 	else if ((mask & 0x0007fff8) && !asoutput)
2074 		cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
2075 
2076 	/* TODO: 23-19 */
2077 }
2078 
2079 static struct {
2080 	int vendor, dev;
2081 } const broken_dev_id[] = {
2082 	/* According with
2083 	 * https://openbenchmarking.org/system/1703021-RI-AMDZEN08075/Ryzen%207%201800X/lspci,
2084 	 * 0x1451 is PCI ID for the IOMMU found on Ryzen
2085 	 */
2086 	{ PCI_VENDOR_ID_AMD, 0x1451 },
2087 };
2088 
2089 static bool cx23885_does_need_dma_reset(void)
2090 {
2091 	int i;
2092 	struct pci_dev *pdev = NULL;
2093 
2094 	if (dma_reset_workaround == 0)
2095 		return false;
2096 	else if (dma_reset_workaround == 2)
2097 		return true;
2098 
2099 	for (i = 0; i < ARRAY_SIZE(broken_dev_id); i++) {
2100 		pdev = pci_get_device(broken_dev_id[i].vendor,
2101 				      broken_dev_id[i].dev, NULL);
2102 		if (pdev) {
2103 			pci_dev_put(pdev);
2104 			return true;
2105 		}
2106 	}
2107 	return false;
2108 }
2109 
2110 static int cx23885_initdev(struct pci_dev *pci_dev,
2111 			   const struct pci_device_id *pci_id)
2112 {
2113 	struct cx23885_dev *dev;
2114 	struct v4l2_ctrl_handler *hdl;
2115 	int err;
2116 
2117 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2118 	if (NULL == dev)
2119 		return -ENOMEM;
2120 
2121 	dev->need_dma_reset = cx23885_does_need_dma_reset();
2122 
2123 	err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
2124 	if (err < 0)
2125 		goto fail_free;
2126 
2127 	hdl = &dev->ctrl_handler;
2128 	v4l2_ctrl_handler_init(hdl, 6);
2129 	if (hdl->error) {
2130 		err = hdl->error;
2131 		goto fail_ctrl;
2132 	}
2133 	dev->v4l2_dev.ctrl_handler = hdl;
2134 
2135 	/* Prepare to handle notifications from subdevices */
2136 	cx23885_v4l2_dev_notify_init(dev);
2137 
2138 	/* pci init */
2139 	dev->pci = pci_dev;
2140 	if (pci_enable_device(pci_dev)) {
2141 		err = -EIO;
2142 		goto fail_ctrl;
2143 	}
2144 
2145 	if (cx23885_dev_setup(dev) < 0) {
2146 		err = -EINVAL;
2147 		goto fail_ctrl;
2148 	}
2149 
2150 	/* print pci info */
2151 	dev->pci_rev = pci_dev->revision;
2152 	pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER,  &dev->pci_lat);
2153 	pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
2154 	       dev->name,
2155 	       pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
2156 	       dev->pci_lat,
2157 		(unsigned long long)pci_resource_start(pci_dev, 0));
2158 
2159 	pci_set_master(pci_dev);
2160 	err = pci_set_dma_mask(pci_dev, 0xffffffff);
2161 	if (err) {
2162 		pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2163 		goto fail_ctrl;
2164 	}
2165 
2166 	err = request_irq(pci_dev->irq, cx23885_irq,
2167 			  IRQF_SHARED, dev->name, dev);
2168 	if (err < 0) {
2169 		pr_err("%s: can't get IRQ %d\n",
2170 		       dev->name, pci_dev->irq);
2171 		goto fail_irq;
2172 	}
2173 
2174 	switch (dev->board) {
2175 	case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2176 		cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2177 		break;
2178 	case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2179 		cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2180 		break;
2181 	}
2182 
2183 	/*
2184 	 * The CX2388[58] IR controller can start firing interrupts when
2185 	 * enabled, so these have to take place after the cx23885_irq() handler
2186 	 * is hooked up by the call to request_irq() above.
2187 	 */
2188 	cx23885_ir_pci_int_enable(dev);
2189 	cx23885_input_init(dev);
2190 
2191 	return 0;
2192 
2193 fail_irq:
2194 	cx23885_dev_unregister(dev);
2195 fail_ctrl:
2196 	v4l2_ctrl_handler_free(hdl);
2197 	v4l2_device_unregister(&dev->v4l2_dev);
2198 fail_free:
2199 	kfree(dev);
2200 	return err;
2201 }
2202 
2203 static void cx23885_finidev(struct pci_dev *pci_dev)
2204 {
2205 	struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2206 	struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2207 
2208 	cx23885_input_fini(dev);
2209 	cx23885_ir_fini(dev);
2210 
2211 	cx23885_shutdown(dev);
2212 
2213 	/* unregister stuff */
2214 	free_irq(pci_dev->irq, dev);
2215 
2216 	pci_disable_device(pci_dev);
2217 
2218 	cx23885_dev_unregister(dev);
2219 	v4l2_ctrl_handler_free(&dev->ctrl_handler);
2220 	v4l2_device_unregister(v4l2_dev);
2221 	kfree(dev);
2222 }
2223 
2224 static const struct pci_device_id cx23885_pci_tbl[] = {
2225 	{
2226 		/* CX23885 */
2227 		.vendor       = 0x14f1,
2228 		.device       = 0x8852,
2229 		.subvendor    = PCI_ANY_ID,
2230 		.subdevice    = PCI_ANY_ID,
2231 	}, {
2232 		/* CX23887 Rev 2 */
2233 		.vendor       = 0x14f1,
2234 		.device       = 0x8880,
2235 		.subvendor    = PCI_ANY_ID,
2236 		.subdevice    = PCI_ANY_ID,
2237 	}, {
2238 		/* --- end of list --- */
2239 	}
2240 };
2241 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2242 
2243 static struct pci_driver cx23885_pci_driver = {
2244 	.name     = "cx23885",
2245 	.id_table = cx23885_pci_tbl,
2246 	.probe    = cx23885_initdev,
2247 	.remove   = cx23885_finidev,
2248 	/* TODO */
2249 	.suspend  = NULL,
2250 	.resume   = NULL,
2251 };
2252 
2253 static int __init cx23885_init(void)
2254 {
2255 	pr_info("cx23885 driver version %s loaded\n",
2256 		CX23885_VERSION);
2257 	return pci_register_driver(&cx23885_pci_driver);
2258 }
2259 
2260 static void __exit cx23885_fini(void)
2261 {
2262 	pci_unregister_driver(&cx23885_pci_driver);
2263 }
2264 
2265 module_init(cx23885_init);
2266 module_exit(cx23885_fini);
2267