1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for the Conexant CX23885 PCIe bridge 4 * 5 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org> 6 */ 7 8 #include "cx23885.h" 9 10 #include <linux/init.h> 11 #include <linux/list.h> 12 #include <linux/module.h> 13 #include <linux/moduleparam.h> 14 #include <linux/kmod.h> 15 #include <linux/kernel.h> 16 #include <linux/pci.h> 17 #include <linux/slab.h> 18 #include <linux/interrupt.h> 19 #include <linux/delay.h> 20 #include <asm/div64.h> 21 #include <linux/firmware.h> 22 23 #include "cimax2.h" 24 #include "altera-ci.h" 25 #include "cx23888-ir.h" 26 #include "cx23885-ir.h" 27 #include "cx23885-av.h" 28 #include "cx23885-input.h" 29 30 MODULE_DESCRIPTION("Driver for cx23885 based TV cards"); 31 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>"); 32 MODULE_LICENSE("GPL"); 33 MODULE_VERSION(CX23885_VERSION); 34 35 /* 36 * Some platforms have been found to require periodic resetting of the DMA 37 * engine. Ryzen and XEON platforms are known to be affected. The symptom 38 * encountered is "mpeg risc op code error". Only Ryzen platforms employ 39 * this workaround if the option equals 1. The workaround can be explicitly 40 * disabled for all platforms by setting to 0, the workaround can be forced 41 * on for any platform by setting to 2. 42 */ 43 static unsigned int dma_reset_workaround = 1; 44 module_param(dma_reset_workaround, int, 0644); 45 MODULE_PARM_DESC(dma_reset_workaround, "periodic RiSC dma engine reset; 0-force disable, 1-driver detect (default), 2-force enable"); 46 47 static unsigned int debug; 48 module_param(debug, int, 0644); 49 MODULE_PARM_DESC(debug, "enable debug messages"); 50 51 static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET }; 52 module_param_array(card, int, NULL, 0444); 53 MODULE_PARM_DESC(card, "card type"); 54 55 #define dprintk(level, fmt, arg...)\ 56 do { if (debug >= level)\ 57 printk(KERN_DEBUG pr_fmt("%s: " fmt), \ 58 __func__, ##arg); \ 59 } while (0) 60 61 static unsigned int cx23885_devcount; 62 63 #define NO_SYNC_LINE (-1U) 64 65 /* FIXME, these allocations will change when 66 * analog arrives. The be reviewed. 67 * CX23887 Assumptions 68 * 1 line = 16 bytes of CDT 69 * cmds size = 80 70 * cdt size = 16 * linesize 71 * iqsize = 64 72 * maxlines = 6 73 * 74 * Address Space: 75 * 0x00000000 0x00008fff FIFO clusters 76 * 0x00010000 0x000104af Channel Management Data Structures 77 * 0x000104b0 0x000104ff Free 78 * 0x00010500 0x000108bf 15 channels * iqsize 79 * 0x000108c0 0x000108ff Free 80 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables 81 * 15 channels * (iqsize + (maxlines * linesize)) 82 * 0x00010ea0 0x00010xxx Free 83 */ 84 85 static struct sram_channel cx23885_sram_channels[] = { 86 [SRAM_CH01] = { 87 .name = "VID A", 88 .cmds_start = 0x10000, 89 .ctrl_start = 0x10380, 90 .cdt = 0x104c0, 91 .fifo_start = 0x40, 92 .fifo_size = 0x2800, 93 .ptr1_reg = DMA1_PTR1, 94 .ptr2_reg = DMA1_PTR2, 95 .cnt1_reg = DMA1_CNT1, 96 .cnt2_reg = DMA1_CNT2, 97 }, 98 [SRAM_CH02] = { 99 .name = "ch2", 100 .cmds_start = 0x0, 101 .ctrl_start = 0x0, 102 .cdt = 0x0, 103 .fifo_start = 0x0, 104 .fifo_size = 0x0, 105 .ptr1_reg = DMA2_PTR1, 106 .ptr2_reg = DMA2_PTR2, 107 .cnt1_reg = DMA2_CNT1, 108 .cnt2_reg = DMA2_CNT2, 109 }, 110 [SRAM_CH03] = { 111 .name = "TS1 B", 112 .cmds_start = 0x100A0, 113 .ctrl_start = 0x10400, 114 .cdt = 0x10580, 115 .fifo_start = 0x5000, 116 .fifo_size = 0x1000, 117 .ptr1_reg = DMA3_PTR1, 118 .ptr2_reg = DMA3_PTR2, 119 .cnt1_reg = DMA3_CNT1, 120 .cnt2_reg = DMA3_CNT2, 121 }, 122 [SRAM_CH04] = { 123 .name = "ch4", 124 .cmds_start = 0x0, 125 .ctrl_start = 0x0, 126 .cdt = 0x0, 127 .fifo_start = 0x0, 128 .fifo_size = 0x0, 129 .ptr1_reg = DMA4_PTR1, 130 .ptr2_reg = DMA4_PTR2, 131 .cnt1_reg = DMA4_CNT1, 132 .cnt2_reg = DMA4_CNT2, 133 }, 134 [SRAM_CH05] = { 135 .name = "ch5", 136 .cmds_start = 0x0, 137 .ctrl_start = 0x0, 138 .cdt = 0x0, 139 .fifo_start = 0x0, 140 .fifo_size = 0x0, 141 .ptr1_reg = DMA5_PTR1, 142 .ptr2_reg = DMA5_PTR2, 143 .cnt1_reg = DMA5_CNT1, 144 .cnt2_reg = DMA5_CNT2, 145 }, 146 [SRAM_CH06] = { 147 .name = "TS2 C", 148 .cmds_start = 0x10140, 149 .ctrl_start = 0x10440, 150 .cdt = 0x105e0, 151 .fifo_start = 0x6000, 152 .fifo_size = 0x1000, 153 .ptr1_reg = DMA5_PTR1, 154 .ptr2_reg = DMA5_PTR2, 155 .cnt1_reg = DMA5_CNT1, 156 .cnt2_reg = DMA5_CNT2, 157 }, 158 [SRAM_CH07] = { 159 .name = "TV Audio", 160 .cmds_start = 0x10190, 161 .ctrl_start = 0x10480, 162 .cdt = 0x10a00, 163 .fifo_start = 0x7000, 164 .fifo_size = 0x1000, 165 .ptr1_reg = DMA6_PTR1, 166 .ptr2_reg = DMA6_PTR2, 167 .cnt1_reg = DMA6_CNT1, 168 .cnt2_reg = DMA6_CNT2, 169 }, 170 [SRAM_CH08] = { 171 .name = "ch8", 172 .cmds_start = 0x0, 173 .ctrl_start = 0x0, 174 .cdt = 0x0, 175 .fifo_start = 0x0, 176 .fifo_size = 0x0, 177 .ptr1_reg = DMA7_PTR1, 178 .ptr2_reg = DMA7_PTR2, 179 .cnt1_reg = DMA7_CNT1, 180 .cnt2_reg = DMA7_CNT2, 181 }, 182 [SRAM_CH09] = { 183 .name = "ch9", 184 .cmds_start = 0x0, 185 .ctrl_start = 0x0, 186 .cdt = 0x0, 187 .fifo_start = 0x0, 188 .fifo_size = 0x0, 189 .ptr1_reg = DMA8_PTR1, 190 .ptr2_reg = DMA8_PTR2, 191 .cnt1_reg = DMA8_CNT1, 192 .cnt2_reg = DMA8_CNT2, 193 }, 194 }; 195 196 static struct sram_channel cx23887_sram_channels[] = { 197 [SRAM_CH01] = { 198 .name = "VID A", 199 .cmds_start = 0x10000, 200 .ctrl_start = 0x105b0, 201 .cdt = 0x107b0, 202 .fifo_start = 0x40, 203 .fifo_size = 0x2800, 204 .ptr1_reg = DMA1_PTR1, 205 .ptr2_reg = DMA1_PTR2, 206 .cnt1_reg = DMA1_CNT1, 207 .cnt2_reg = DMA1_CNT2, 208 }, 209 [SRAM_CH02] = { 210 .name = "VID A (VBI)", 211 .cmds_start = 0x10050, 212 .ctrl_start = 0x105F0, 213 .cdt = 0x10810, 214 .fifo_start = 0x3000, 215 .fifo_size = 0x1000, 216 .ptr1_reg = DMA2_PTR1, 217 .ptr2_reg = DMA2_PTR2, 218 .cnt1_reg = DMA2_CNT1, 219 .cnt2_reg = DMA2_CNT2, 220 }, 221 [SRAM_CH03] = { 222 .name = "TS1 B", 223 .cmds_start = 0x100A0, 224 .ctrl_start = 0x10630, 225 .cdt = 0x10870, 226 .fifo_start = 0x5000, 227 .fifo_size = 0x1000, 228 .ptr1_reg = DMA3_PTR1, 229 .ptr2_reg = DMA3_PTR2, 230 .cnt1_reg = DMA3_CNT1, 231 .cnt2_reg = DMA3_CNT2, 232 }, 233 [SRAM_CH04] = { 234 .name = "ch4", 235 .cmds_start = 0x0, 236 .ctrl_start = 0x0, 237 .cdt = 0x0, 238 .fifo_start = 0x0, 239 .fifo_size = 0x0, 240 .ptr1_reg = DMA4_PTR1, 241 .ptr2_reg = DMA4_PTR2, 242 .cnt1_reg = DMA4_CNT1, 243 .cnt2_reg = DMA4_CNT2, 244 }, 245 [SRAM_CH05] = { 246 .name = "ch5", 247 .cmds_start = 0x0, 248 .ctrl_start = 0x0, 249 .cdt = 0x0, 250 .fifo_start = 0x0, 251 .fifo_size = 0x0, 252 .ptr1_reg = DMA5_PTR1, 253 .ptr2_reg = DMA5_PTR2, 254 .cnt1_reg = DMA5_CNT1, 255 .cnt2_reg = DMA5_CNT2, 256 }, 257 [SRAM_CH06] = { 258 .name = "TS2 C", 259 .cmds_start = 0x10140, 260 .ctrl_start = 0x10670, 261 .cdt = 0x108d0, 262 .fifo_start = 0x6000, 263 .fifo_size = 0x1000, 264 .ptr1_reg = DMA5_PTR1, 265 .ptr2_reg = DMA5_PTR2, 266 .cnt1_reg = DMA5_CNT1, 267 .cnt2_reg = DMA5_CNT2, 268 }, 269 [SRAM_CH07] = { 270 .name = "TV Audio", 271 .cmds_start = 0x10190, 272 .ctrl_start = 0x106B0, 273 .cdt = 0x10930, 274 .fifo_start = 0x7000, 275 .fifo_size = 0x1000, 276 .ptr1_reg = DMA6_PTR1, 277 .ptr2_reg = DMA6_PTR2, 278 .cnt1_reg = DMA6_CNT1, 279 .cnt2_reg = DMA6_CNT2, 280 }, 281 [SRAM_CH08] = { 282 .name = "ch8", 283 .cmds_start = 0x0, 284 .ctrl_start = 0x0, 285 .cdt = 0x0, 286 .fifo_start = 0x0, 287 .fifo_size = 0x0, 288 .ptr1_reg = DMA7_PTR1, 289 .ptr2_reg = DMA7_PTR2, 290 .cnt1_reg = DMA7_CNT1, 291 .cnt2_reg = DMA7_CNT2, 292 }, 293 [SRAM_CH09] = { 294 .name = "ch9", 295 .cmds_start = 0x0, 296 .ctrl_start = 0x0, 297 .cdt = 0x0, 298 .fifo_start = 0x0, 299 .fifo_size = 0x0, 300 .ptr1_reg = DMA8_PTR1, 301 .ptr2_reg = DMA8_PTR2, 302 .cnt1_reg = DMA8_CNT1, 303 .cnt2_reg = DMA8_CNT2, 304 }, 305 }; 306 307 static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask) 308 { 309 unsigned long flags; 310 spin_lock_irqsave(&dev->pci_irqmask_lock, flags); 311 312 dev->pci_irqmask |= mask; 313 314 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags); 315 } 316 317 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask) 318 { 319 unsigned long flags; 320 spin_lock_irqsave(&dev->pci_irqmask_lock, flags); 321 322 dev->pci_irqmask |= mask; 323 cx_set(PCI_INT_MSK, mask); 324 325 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags); 326 } 327 328 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask) 329 { 330 u32 v; 331 unsigned long flags; 332 spin_lock_irqsave(&dev->pci_irqmask_lock, flags); 333 334 v = mask & dev->pci_irqmask; 335 if (v) 336 cx_set(PCI_INT_MSK, v); 337 338 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags); 339 } 340 341 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev) 342 { 343 cx23885_irq_enable(dev, 0xffffffff); 344 } 345 346 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask) 347 { 348 unsigned long flags; 349 spin_lock_irqsave(&dev->pci_irqmask_lock, flags); 350 351 cx_clear(PCI_INT_MSK, mask); 352 353 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags); 354 } 355 356 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev) 357 { 358 cx23885_irq_disable(dev, 0xffffffff); 359 } 360 361 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask) 362 { 363 unsigned long flags; 364 spin_lock_irqsave(&dev->pci_irqmask_lock, flags); 365 366 dev->pci_irqmask &= ~mask; 367 cx_clear(PCI_INT_MSK, mask); 368 369 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags); 370 } 371 372 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev) 373 { 374 u32 v; 375 unsigned long flags; 376 spin_lock_irqsave(&dev->pci_irqmask_lock, flags); 377 378 v = cx_read(PCI_INT_MSK); 379 380 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags); 381 return v; 382 } 383 384 static int cx23885_risc_decode(u32 risc) 385 { 386 static char *instr[16] = { 387 [RISC_SYNC >> 28] = "sync", 388 [RISC_WRITE >> 28] = "write", 389 [RISC_WRITEC >> 28] = "writec", 390 [RISC_READ >> 28] = "read", 391 [RISC_READC >> 28] = "readc", 392 [RISC_JUMP >> 28] = "jump", 393 [RISC_SKIP >> 28] = "skip", 394 [RISC_WRITERM >> 28] = "writerm", 395 [RISC_WRITECM >> 28] = "writecm", 396 [RISC_WRITECR >> 28] = "writecr", 397 }; 398 static int incr[16] = { 399 [RISC_WRITE >> 28] = 3, 400 [RISC_JUMP >> 28] = 3, 401 [RISC_SKIP >> 28] = 1, 402 [RISC_SYNC >> 28] = 1, 403 [RISC_WRITERM >> 28] = 3, 404 [RISC_WRITECM >> 28] = 3, 405 [RISC_WRITECR >> 28] = 4, 406 }; 407 static char *bits[] = { 408 "12", "13", "14", "resync", 409 "cnt0", "cnt1", "18", "19", 410 "20", "21", "22", "23", 411 "irq1", "irq2", "eol", "sol", 412 }; 413 int i; 414 415 printk(KERN_DEBUG "0x%08x [ %s", risc, 416 instr[risc >> 28] ? instr[risc >> 28] : "INVALID"); 417 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--) 418 if (risc & (1 << (i + 12))) 419 pr_cont(" %s", bits[i]); 420 pr_cont(" count=%d ]\n", risc & 0xfff); 421 return incr[risc >> 28] ? incr[risc >> 28] : 1; 422 } 423 424 static void cx23885_wakeup(struct cx23885_tsport *port, 425 struct cx23885_dmaqueue *q, u32 count) 426 { 427 struct cx23885_buffer *buf; 428 int count_delta; 429 int max_buf_done = 5; /* service maximum five buffers */ 430 431 do { 432 if (list_empty(&q->active)) 433 return; 434 buf = list_entry(q->active.next, 435 struct cx23885_buffer, queue); 436 437 buf->vb.vb2_buf.timestamp = ktime_get_ns(); 438 buf->vb.sequence = q->count++; 439 if (count != (q->count % 65536)) { 440 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf, 441 buf->vb.vb2_buf.index, count, q->count); 442 } else { 443 dprintk(7, "[%p/%d] wakeup reg=%d buf=%d\n", buf, 444 buf->vb.vb2_buf.index, count, q->count); 445 } 446 list_del(&buf->queue); 447 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); 448 max_buf_done--; 449 /* count register is 16 bits so apply modulo appropriately */ 450 count_delta = ((int)count - (int)(q->count % 65536)); 451 } while ((count_delta > 0) && (max_buf_done > 0)); 452 } 453 454 int cx23885_sram_channel_setup(struct cx23885_dev *dev, 455 struct sram_channel *ch, 456 unsigned int bpl, u32 risc) 457 { 458 unsigned int i, lines; 459 u32 cdt; 460 461 if (ch->cmds_start == 0) { 462 dprintk(1, "%s() Erasing channel [%s]\n", __func__, 463 ch->name); 464 cx_write(ch->ptr1_reg, 0); 465 cx_write(ch->ptr2_reg, 0); 466 cx_write(ch->cnt2_reg, 0); 467 cx_write(ch->cnt1_reg, 0); 468 return 0; 469 } else { 470 dprintk(1, "%s() Configuring channel [%s]\n", __func__, 471 ch->name); 472 } 473 474 bpl = (bpl + 7) & ~7; /* alignment */ 475 cdt = ch->cdt; 476 lines = ch->fifo_size / bpl; 477 if (lines > 6) 478 lines = 6; 479 BUG_ON(lines < 2); 480 481 cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET); 482 cx_write(8 + 4, 12); 483 cx_write(8 + 8, 0); 484 485 /* write CDT */ 486 for (i = 0; i < lines; i++) { 487 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i, 488 ch->fifo_start + bpl*i); 489 cx_write(cdt + 16*i, ch->fifo_start + bpl*i); 490 cx_write(cdt + 16*i + 4, 0); 491 cx_write(cdt + 16*i + 8, 0); 492 cx_write(cdt + 16*i + 12, 0); 493 } 494 495 /* write CMDS */ 496 if (ch->jumponly) 497 cx_write(ch->cmds_start + 0, 8); 498 else 499 cx_write(ch->cmds_start + 0, risc); 500 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */ 501 cx_write(ch->cmds_start + 8, cdt); 502 cx_write(ch->cmds_start + 12, (lines*16) >> 3); 503 cx_write(ch->cmds_start + 16, ch->ctrl_start); 504 if (ch->jumponly) 505 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2)); 506 else 507 cx_write(ch->cmds_start + 20, 64 >> 2); 508 for (i = 24; i < 80; i += 4) 509 cx_write(ch->cmds_start + i, 0); 510 511 /* fill registers */ 512 cx_write(ch->ptr1_reg, ch->fifo_start); 513 cx_write(ch->ptr2_reg, cdt); 514 cx_write(ch->cnt2_reg, (lines*16) >> 3); 515 cx_write(ch->cnt1_reg, (bpl >> 3) - 1); 516 517 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n", 518 dev->bridge, 519 ch->name, 520 bpl, 521 lines); 522 523 return 0; 524 } 525 526 void cx23885_sram_channel_dump(struct cx23885_dev *dev, 527 struct sram_channel *ch) 528 { 529 static char *name[] = { 530 "init risc lo", 531 "init risc hi", 532 "cdt base", 533 "cdt size", 534 "iq base", 535 "iq size", 536 "risc pc lo", 537 "risc pc hi", 538 "iq wr ptr", 539 "iq rd ptr", 540 "cdt current", 541 "pci target lo", 542 "pci target hi", 543 "line / byte", 544 }; 545 u32 risc; 546 unsigned int i, j, n; 547 548 pr_warn("%s: %s - dma channel status dump\n", 549 dev->name, ch->name); 550 for (i = 0; i < ARRAY_SIZE(name); i++) 551 pr_warn("%s: cmds: %-15s: 0x%08x\n", 552 dev->name, name[i], 553 cx_read(ch->cmds_start + 4*i)); 554 555 for (i = 0; i < 4; i++) { 556 risc = cx_read(ch->cmds_start + 4 * (i + 14)); 557 pr_warn("%s: risc%d: ", dev->name, i); 558 cx23885_risc_decode(risc); 559 } 560 for (i = 0; i < (64 >> 2); i += n) { 561 risc = cx_read(ch->ctrl_start + 4 * i); 562 /* No consideration for bits 63-32 */ 563 564 pr_warn("%s: (0x%08x) iq %x: ", dev->name, 565 ch->ctrl_start + 4 * i, i); 566 n = cx23885_risc_decode(risc); 567 for (j = 1; j < n; j++) { 568 risc = cx_read(ch->ctrl_start + 4 * (i + j)); 569 pr_warn("%s: iq %x: 0x%08x [ arg #%d ]\n", 570 dev->name, i+j, risc, j); 571 } 572 } 573 574 pr_warn("%s: fifo: 0x%08x -> 0x%x\n", 575 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size); 576 pr_warn("%s: ctrl: 0x%08x -> 0x%x\n", 577 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16); 578 pr_warn("%s: ptr1_reg: 0x%08x\n", 579 dev->name, cx_read(ch->ptr1_reg)); 580 pr_warn("%s: ptr2_reg: 0x%08x\n", 581 dev->name, cx_read(ch->ptr2_reg)); 582 pr_warn("%s: cnt1_reg: 0x%08x\n", 583 dev->name, cx_read(ch->cnt1_reg)); 584 pr_warn("%s: cnt2_reg: 0x%08x\n", 585 dev->name, cx_read(ch->cnt2_reg)); 586 } 587 588 static void cx23885_risc_disasm(struct cx23885_tsport *port, 589 struct cx23885_riscmem *risc) 590 { 591 struct cx23885_dev *dev = port->dev; 592 unsigned int i, j, n; 593 594 pr_info("%s: risc disasm: %p [dma=0x%08lx]\n", 595 dev->name, risc->cpu, (unsigned long)risc->dma); 596 for (i = 0; i < (risc->size >> 2); i += n) { 597 pr_info("%s: %04d: ", dev->name, i); 598 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i])); 599 for (j = 1; j < n; j++) 600 pr_info("%s: %04d: 0x%08x [ arg #%d ]\n", 601 dev->name, i + j, risc->cpu[i + j], j); 602 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP)) 603 break; 604 } 605 } 606 607 static void cx23885_clear_bridge_error(struct cx23885_dev *dev) 608 { 609 uint32_t reg1_val, reg2_val; 610 611 if (!dev->need_dma_reset) 612 return; 613 614 reg1_val = cx_read(TC_REQ); /* read-only */ 615 reg2_val = cx_read(TC_REQ_SET); 616 617 if (reg1_val && reg2_val) { 618 cx_write(TC_REQ, reg1_val); 619 cx_write(TC_REQ_SET, reg2_val); 620 cx_read(VID_B_DMA); 621 cx_read(VBI_B_DMA); 622 cx_read(VID_C_DMA); 623 cx_read(VBI_C_DMA); 624 625 dev_info(&dev->pci->dev, 626 "dma in progress detected 0x%08x 0x%08x, clearing\n", 627 reg1_val, reg2_val); 628 } 629 } 630 631 static void cx23885_shutdown(struct cx23885_dev *dev) 632 { 633 /* disable RISC controller */ 634 cx_write(DEV_CNTRL2, 0); 635 636 /* Disable all IR activity */ 637 cx_write(IR_CNTRL_REG, 0); 638 639 /* Disable Video A/B activity */ 640 cx_write(VID_A_DMA_CTL, 0); 641 cx_write(VID_B_DMA_CTL, 0); 642 cx_write(VID_C_DMA_CTL, 0); 643 644 /* Disable Audio activity */ 645 cx_write(AUD_INT_DMA_CTL, 0); 646 cx_write(AUD_EXT_DMA_CTL, 0); 647 648 /* Disable Serial port */ 649 cx_write(UART_CTL, 0); 650 651 /* Disable Interrupts */ 652 cx23885_irq_disable_all(dev); 653 cx_write(VID_A_INT_MSK, 0); 654 cx_write(VID_B_INT_MSK, 0); 655 cx_write(VID_C_INT_MSK, 0); 656 cx_write(AUDIO_INT_INT_MSK, 0); 657 cx_write(AUDIO_EXT_INT_MSK, 0); 658 659 } 660 661 static void cx23885_reset(struct cx23885_dev *dev) 662 { 663 dprintk(1, "%s()\n", __func__); 664 665 cx23885_shutdown(dev); 666 667 cx_write(PCI_INT_STAT, 0xffffffff); 668 cx_write(VID_A_INT_STAT, 0xffffffff); 669 cx_write(VID_B_INT_STAT, 0xffffffff); 670 cx_write(VID_C_INT_STAT, 0xffffffff); 671 cx_write(AUDIO_INT_INT_STAT, 0xffffffff); 672 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff); 673 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000); 674 cx_write(PAD_CTRL, 0x00500300); 675 676 /* clear dma in progress */ 677 cx23885_clear_bridge_error(dev); 678 msleep(100); 679 680 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01], 681 720*4, 0); 682 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0); 683 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03], 684 188*4, 0); 685 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0); 686 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0); 687 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06], 688 188*4, 0); 689 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0); 690 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0); 691 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0); 692 693 cx23885_gpio_setup(dev); 694 695 cx23885_irq_get_mask(dev); 696 697 /* clear dma in progress */ 698 cx23885_clear_bridge_error(dev); 699 } 700 701 702 static int cx23885_pci_quirks(struct cx23885_dev *dev) 703 { 704 dprintk(1, "%s()\n", __func__); 705 706 /* The cx23885 bridge has a weird bug which causes NMI to be asserted 707 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not 708 * occur on the cx23887 bridge. 709 */ 710 if (dev->bridge == CX23885_BRIDGE_885) 711 cx_clear(RDR_TLCTL0, 1 << 4); 712 713 /* clear dma in progress */ 714 cx23885_clear_bridge_error(dev); 715 return 0; 716 } 717 718 static int get_resources(struct cx23885_dev *dev) 719 { 720 if (request_mem_region(pci_resource_start(dev->pci, 0), 721 pci_resource_len(dev->pci, 0), 722 dev->name)) 723 return 0; 724 725 pr_err("%s: can't get MMIO memory @ 0x%llx\n", 726 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0)); 727 728 return -EBUSY; 729 } 730 731 static int cx23885_init_tsport(struct cx23885_dev *dev, 732 struct cx23885_tsport *port, int portno) 733 { 734 dprintk(1, "%s(portno=%d)\n", __func__, portno); 735 736 /* Transport bus init dma queue - Common settings */ 737 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */ 738 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */ 739 port->vld_misc_val = 0x0; 740 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4); 741 742 spin_lock_init(&port->slock); 743 port->dev = dev; 744 port->nr = portno; 745 746 INIT_LIST_HEAD(&port->mpegq.active); 747 mutex_init(&port->frontends.lock); 748 INIT_LIST_HEAD(&port->frontends.felist); 749 port->frontends.active_fe_id = 0; 750 751 /* This should be hardcoded allow a single frontend 752 * attachment to this tsport, keeping the -dvb.c 753 * code clean and safe. 754 */ 755 if (!port->num_frontends) 756 port->num_frontends = 1; 757 758 switch (portno) { 759 case 1: 760 port->reg_gpcnt = VID_B_GPCNT; 761 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL; 762 port->reg_dma_ctl = VID_B_DMA_CTL; 763 port->reg_lngth = VID_B_LNGTH; 764 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL; 765 port->reg_gen_ctrl = VID_B_GEN_CTL; 766 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS; 767 port->reg_sop_status = VID_B_SOP_STATUS; 768 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT; 769 port->reg_vld_misc = VID_B_VLD_MISC; 770 port->reg_ts_clk_en = VID_B_TS_CLK_EN; 771 port->reg_src_sel = VID_B_SRC_SEL; 772 port->reg_ts_int_msk = VID_B_INT_MSK; 773 port->reg_ts_int_stat = VID_B_INT_STAT; 774 port->sram_chno = SRAM_CH03; /* VID_B */ 775 port->pci_irqmask = 0x02; /* VID_B bit1 */ 776 break; 777 case 2: 778 port->reg_gpcnt = VID_C_GPCNT; 779 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL; 780 port->reg_dma_ctl = VID_C_DMA_CTL; 781 port->reg_lngth = VID_C_LNGTH; 782 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL; 783 port->reg_gen_ctrl = VID_C_GEN_CTL; 784 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS; 785 port->reg_sop_status = VID_C_SOP_STATUS; 786 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT; 787 port->reg_vld_misc = VID_C_VLD_MISC; 788 port->reg_ts_clk_en = VID_C_TS_CLK_EN; 789 port->reg_src_sel = 0; 790 port->reg_ts_int_msk = VID_C_INT_MSK; 791 port->reg_ts_int_stat = VID_C_INT_STAT; 792 port->sram_chno = SRAM_CH06; /* VID_C */ 793 port->pci_irqmask = 0x04; /* VID_C bit2 */ 794 break; 795 default: 796 BUG(); 797 } 798 799 return 0; 800 } 801 802 static void cx23885_dev_checkrevision(struct cx23885_dev *dev) 803 { 804 switch (cx_read(RDR_CFG2) & 0xff) { 805 case 0x00: 806 /* cx23885 */ 807 dev->hwrevision = 0xa0; 808 break; 809 case 0x01: 810 /* CX23885-12Z */ 811 dev->hwrevision = 0xa1; 812 break; 813 case 0x02: 814 /* CX23885-13Z/14Z */ 815 dev->hwrevision = 0xb0; 816 break; 817 case 0x03: 818 if (dev->pci->device == 0x8880) { 819 /* CX23888-21Z/22Z */ 820 dev->hwrevision = 0xc0; 821 } else { 822 /* CX23885-14Z */ 823 dev->hwrevision = 0xa4; 824 } 825 break; 826 case 0x04: 827 if (dev->pci->device == 0x8880) { 828 /* CX23888-31Z */ 829 dev->hwrevision = 0xd0; 830 } else { 831 /* CX23885-15Z, CX23888-31Z */ 832 dev->hwrevision = 0xa5; 833 } 834 break; 835 case 0x0e: 836 /* CX23887-15Z */ 837 dev->hwrevision = 0xc0; 838 break; 839 case 0x0f: 840 /* CX23887-14Z */ 841 dev->hwrevision = 0xb1; 842 break; 843 default: 844 pr_err("%s() New hardware revision found 0x%x\n", 845 __func__, dev->hwrevision); 846 } 847 if (dev->hwrevision) 848 pr_info("%s() Hardware revision = 0x%02x\n", 849 __func__, dev->hwrevision); 850 else 851 pr_err("%s() Hardware revision unknown 0x%x\n", 852 __func__, dev->hwrevision); 853 } 854 855 /* Find the first v4l2_subdev member of the group id in hw */ 856 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw) 857 { 858 struct v4l2_subdev *result = NULL; 859 struct v4l2_subdev *sd; 860 861 spin_lock(&dev->v4l2_dev.lock); 862 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) { 863 if (sd->grp_id == hw) { 864 result = sd; 865 break; 866 } 867 } 868 spin_unlock(&dev->v4l2_dev.lock); 869 return result; 870 } 871 872 static int cx23885_dev_setup(struct cx23885_dev *dev) 873 { 874 int i; 875 876 spin_lock_init(&dev->pci_irqmask_lock); 877 spin_lock_init(&dev->slock); 878 879 mutex_init(&dev->lock); 880 mutex_init(&dev->gpio_lock); 881 882 atomic_inc(&dev->refcount); 883 884 dev->nr = cx23885_devcount++; 885 sprintf(dev->name, "cx23885[%d]", dev->nr); 886 887 /* Configure the internal memory */ 888 if (dev->pci->device == 0x8880) { 889 /* Could be 887 or 888, assume an 888 default */ 890 dev->bridge = CX23885_BRIDGE_888; 891 /* Apply a sensible clock frequency for the PCIe bridge */ 892 dev->clk_freq = 50000000; 893 dev->sram_channels = cx23887_sram_channels; 894 } else 895 if (dev->pci->device == 0x8852) { 896 dev->bridge = CX23885_BRIDGE_885; 897 /* Apply a sensible clock frequency for the PCIe bridge */ 898 dev->clk_freq = 28000000; 899 dev->sram_channels = cx23885_sram_channels; 900 } else 901 BUG(); 902 903 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n", 904 __func__, dev->bridge); 905 906 /* board config */ 907 dev->board = UNSET; 908 if (card[dev->nr] < cx23885_bcount) 909 dev->board = card[dev->nr]; 910 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++) 911 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor && 912 dev->pci->subsystem_device == cx23885_subids[i].subdevice) 913 dev->board = cx23885_subids[i].card; 914 if (UNSET == dev->board) { 915 dev->board = CX23885_BOARD_UNKNOWN; 916 cx23885_card_list(dev); 917 } 918 919 if (dev->pci->device == 0x8852) { 920 /* no DIF on cx23885, so no analog tuner support possible */ 921 if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC) 922 dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885; 923 else if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_DVB) 924 dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885; 925 } 926 927 /* If the user specific a clk freq override, apply it */ 928 if (cx23885_boards[dev->board].clk_freq > 0) 929 dev->clk_freq = cx23885_boards[dev->board].clk_freq; 930 931 if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE && 932 dev->pci->subsystem_device == 0x7137) { 933 /* Hauppauge ImpactVCBe device ID 0x7137 is populated 934 * with an 888, and a 25Mhz crystal, instead of the 935 * usual third overtone 50Mhz. The default clock rate must 936 * be overridden so the cx25840 is properly configured 937 */ 938 dev->clk_freq = 25000000; 939 } 940 941 dev->pci_bus = dev->pci->bus->number; 942 dev->pci_slot = PCI_SLOT(dev->pci->devfn); 943 cx23885_irq_add(dev, 0x001f00); 944 945 /* External Master 1 Bus */ 946 dev->i2c_bus[0].nr = 0; 947 dev->i2c_bus[0].dev = dev; 948 dev->i2c_bus[0].reg_stat = I2C1_STAT; 949 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL; 950 dev->i2c_bus[0].reg_addr = I2C1_ADDR; 951 dev->i2c_bus[0].reg_rdata = I2C1_RDATA; 952 dev->i2c_bus[0].reg_wdata = I2C1_WDATA; 953 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */ 954 955 /* External Master 2 Bus */ 956 dev->i2c_bus[1].nr = 1; 957 dev->i2c_bus[1].dev = dev; 958 dev->i2c_bus[1].reg_stat = I2C2_STAT; 959 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL; 960 dev->i2c_bus[1].reg_addr = I2C2_ADDR; 961 dev->i2c_bus[1].reg_rdata = I2C2_RDATA; 962 dev->i2c_bus[1].reg_wdata = I2C2_WDATA; 963 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */ 964 965 /* Internal Master 3 Bus */ 966 dev->i2c_bus[2].nr = 2; 967 dev->i2c_bus[2].dev = dev; 968 dev->i2c_bus[2].reg_stat = I2C3_STAT; 969 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL; 970 dev->i2c_bus[2].reg_addr = I2C3_ADDR; 971 dev->i2c_bus[2].reg_rdata = I2C3_RDATA; 972 dev->i2c_bus[2].reg_wdata = I2C3_WDATA; 973 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */ 974 975 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) || 976 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)) 977 cx23885_init_tsport(dev, &dev->ts1, 1); 978 979 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) || 980 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)) 981 cx23885_init_tsport(dev, &dev->ts2, 2); 982 983 if (get_resources(dev) < 0) { 984 pr_err("CORE %s No more PCIe resources for subsystem: %04x:%04x\n", 985 dev->name, dev->pci->subsystem_vendor, 986 dev->pci->subsystem_device); 987 988 cx23885_devcount--; 989 return -ENODEV; 990 } 991 992 /* PCIe stuff */ 993 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0), 994 pci_resource_len(dev->pci, 0)); 995 996 dev->bmmio = (u8 __iomem *)dev->lmmio; 997 998 pr_info("CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n", 999 dev->name, dev->pci->subsystem_vendor, 1000 dev->pci->subsystem_device, cx23885_boards[dev->board].name, 1001 dev->board, card[dev->nr] == dev->board ? 1002 "insmod option" : "autodetected"); 1003 1004 cx23885_pci_quirks(dev); 1005 1006 /* Assume some sensible defaults */ 1007 dev->tuner_type = cx23885_boards[dev->board].tuner_type; 1008 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr; 1009 dev->tuner_bus = cx23885_boards[dev->board].tuner_bus; 1010 dev->radio_type = cx23885_boards[dev->board].radio_type; 1011 dev->radio_addr = cx23885_boards[dev->board].radio_addr; 1012 1013 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n", 1014 __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus); 1015 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n", 1016 __func__, dev->radio_type, dev->radio_addr); 1017 1018 /* The cx23417 encoder has GPIO's that need to be initialised 1019 * before DVB, so that demodulators and tuners are out of 1020 * reset before DVB uses them. 1021 */ 1022 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) || 1023 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)) 1024 cx23885_mc417_init(dev); 1025 1026 /* init hardware */ 1027 cx23885_reset(dev); 1028 1029 cx23885_i2c_register(&dev->i2c_bus[0]); 1030 cx23885_i2c_register(&dev->i2c_bus[1]); 1031 cx23885_i2c_register(&dev->i2c_bus[2]); 1032 cx23885_card_setup(dev); 1033 call_all(dev, tuner, standby); 1034 cx23885_ir_init(dev); 1035 1036 if (dev->board == CX23885_BOARD_VIEWCAST_460E) { 1037 /* 1038 * GPIOs 9/8 are input detection bits for the breakout video 1039 * (gpio 8) and audio (gpio 9) cables. When they're attached, 1040 * this gpios are pulled high. Make sure these GPIOs are marked 1041 * as inputs. 1042 */ 1043 cx23885_gpio_enable(dev, 0x300, 0); 1044 } 1045 1046 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) { 1047 if (cx23885_video_register(dev) < 0) { 1048 pr_err("%s() Failed to register analog video adapters on VID_A\n", 1049 __func__); 1050 } 1051 } 1052 1053 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) { 1054 if (cx23885_boards[dev->board].num_fds_portb) 1055 dev->ts1.num_frontends = 1056 cx23885_boards[dev->board].num_fds_portb; 1057 if (cx23885_dvb_register(&dev->ts1) < 0) { 1058 pr_err("%s() Failed to register dvb adapters on VID_B\n", 1059 __func__); 1060 } 1061 } else 1062 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) { 1063 if (cx23885_417_register(dev) < 0) { 1064 pr_err("%s() Failed to register 417 on VID_B\n", 1065 __func__); 1066 } 1067 } 1068 1069 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) { 1070 if (cx23885_boards[dev->board].num_fds_portc) 1071 dev->ts2.num_frontends = 1072 cx23885_boards[dev->board].num_fds_portc; 1073 if (cx23885_dvb_register(&dev->ts2) < 0) { 1074 pr_err("%s() Failed to register dvb on VID_C\n", 1075 __func__); 1076 } 1077 } else 1078 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) { 1079 if (cx23885_417_register(dev) < 0) { 1080 pr_err("%s() Failed to register 417 on VID_C\n", 1081 __func__); 1082 } 1083 } 1084 1085 cx23885_dev_checkrevision(dev); 1086 1087 /* disable MSI for NetUP cards, otherwise CI is not working */ 1088 if (cx23885_boards[dev->board].ci_type > 0) 1089 cx_clear(RDR_RDRCTL1, 1 << 8); 1090 1091 switch (dev->board) { 1092 case CX23885_BOARD_TEVII_S470: 1093 case CX23885_BOARD_TEVII_S471: 1094 cx_clear(RDR_RDRCTL1, 1 << 8); 1095 break; 1096 } 1097 1098 return 0; 1099 } 1100 1101 static void cx23885_dev_unregister(struct cx23885_dev *dev) 1102 { 1103 release_mem_region(pci_resource_start(dev->pci, 0), 1104 pci_resource_len(dev->pci, 0)); 1105 1106 if (!atomic_dec_and_test(&dev->refcount)) 1107 return; 1108 1109 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) 1110 cx23885_video_unregister(dev); 1111 1112 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) 1113 cx23885_dvb_unregister(&dev->ts1); 1114 1115 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) 1116 cx23885_417_unregister(dev); 1117 1118 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) 1119 cx23885_dvb_unregister(&dev->ts2); 1120 1121 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) 1122 cx23885_417_unregister(dev); 1123 1124 cx23885_i2c_unregister(&dev->i2c_bus[2]); 1125 cx23885_i2c_unregister(&dev->i2c_bus[1]); 1126 cx23885_i2c_unregister(&dev->i2c_bus[0]); 1127 1128 iounmap(dev->lmmio); 1129 } 1130 1131 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist, 1132 unsigned int offset, u32 sync_line, 1133 unsigned int bpl, unsigned int padding, 1134 unsigned int lines, unsigned int lpi, bool jump) 1135 { 1136 struct scatterlist *sg; 1137 unsigned int line, todo, sol; 1138 1139 1140 if (jump) { 1141 *(rp++) = cpu_to_le32(RISC_JUMP); 1142 *(rp++) = cpu_to_le32(0); 1143 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1144 } 1145 1146 /* sync instruction */ 1147 if (sync_line != NO_SYNC_LINE) 1148 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line); 1149 1150 /* scan lines */ 1151 sg = sglist; 1152 for (line = 0; line < lines; line++) { 1153 while (offset && offset >= sg_dma_len(sg)) { 1154 offset -= sg_dma_len(sg); 1155 sg = sg_next(sg); 1156 } 1157 1158 if (lpi && line > 0 && !(line % lpi)) 1159 sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC; 1160 else 1161 sol = RISC_SOL; 1162 1163 if (bpl <= sg_dma_len(sg)-offset) { 1164 /* fits into current chunk */ 1165 *(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl); 1166 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset); 1167 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1168 offset += bpl; 1169 } else { 1170 /* scanline needs to be split */ 1171 todo = bpl; 1172 *(rp++) = cpu_to_le32(RISC_WRITE|sol| 1173 (sg_dma_len(sg)-offset)); 1174 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset); 1175 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1176 todo -= (sg_dma_len(sg)-offset); 1177 offset = 0; 1178 sg = sg_next(sg); 1179 while (todo > sg_dma_len(sg)) { 1180 *(rp++) = cpu_to_le32(RISC_WRITE| 1181 sg_dma_len(sg)); 1182 *(rp++) = cpu_to_le32(sg_dma_address(sg)); 1183 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1184 todo -= sg_dma_len(sg); 1185 sg = sg_next(sg); 1186 } 1187 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo); 1188 *(rp++) = cpu_to_le32(sg_dma_address(sg)); 1189 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1190 offset += todo; 1191 } 1192 offset += padding; 1193 } 1194 1195 return rp; 1196 } 1197 1198 int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc, 1199 struct scatterlist *sglist, unsigned int top_offset, 1200 unsigned int bottom_offset, unsigned int bpl, 1201 unsigned int padding, unsigned int lines) 1202 { 1203 u32 instructions, fields; 1204 __le32 *rp; 1205 1206 fields = 0; 1207 if (UNSET != top_offset) 1208 fields++; 1209 if (UNSET != bottom_offset) 1210 fields++; 1211 1212 /* estimate risc mem: worst case is one write per page border + 1213 one write per scan line + syncs + jump (all 2 dwords). Padding 1214 can cause next bpl to start close to a page border. First DMA 1215 region may be smaller than PAGE_SIZE */ 1216 /* write and jump need and extra dword */ 1217 instructions = fields * (1 + ((bpl + padding) * lines) 1218 / PAGE_SIZE + lines); 1219 instructions += 5; 1220 risc->size = instructions * 12; 1221 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma); 1222 if (risc->cpu == NULL) 1223 return -ENOMEM; 1224 1225 /* write risc instructions */ 1226 rp = risc->cpu; 1227 if (UNSET != top_offset) 1228 rp = cx23885_risc_field(rp, sglist, top_offset, 0, 1229 bpl, padding, lines, 0, true); 1230 if (UNSET != bottom_offset) 1231 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200, 1232 bpl, padding, lines, 0, UNSET == top_offset); 1233 1234 /* save pointer to jmp instruction address */ 1235 risc->jmp = rp; 1236 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size); 1237 return 0; 1238 } 1239 1240 int cx23885_risc_databuffer(struct pci_dev *pci, 1241 struct cx23885_riscmem *risc, 1242 struct scatterlist *sglist, 1243 unsigned int bpl, 1244 unsigned int lines, unsigned int lpi) 1245 { 1246 u32 instructions; 1247 __le32 *rp; 1248 1249 /* estimate risc mem: worst case is one write per page border + 1250 one write per scan line + syncs + jump (all 2 dwords). Here 1251 there is no padding and no sync. First DMA region may be smaller 1252 than PAGE_SIZE */ 1253 /* Jump and write need an extra dword */ 1254 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines; 1255 instructions += 4; 1256 1257 risc->size = instructions * 12; 1258 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma); 1259 if (risc->cpu == NULL) 1260 return -ENOMEM; 1261 1262 /* write risc instructions */ 1263 rp = risc->cpu; 1264 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE, 1265 bpl, 0, lines, lpi, lpi == 0); 1266 1267 /* save pointer to jmp instruction address */ 1268 risc->jmp = rp; 1269 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size); 1270 return 0; 1271 } 1272 1273 int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc, 1274 struct scatterlist *sglist, unsigned int top_offset, 1275 unsigned int bottom_offset, unsigned int bpl, 1276 unsigned int padding, unsigned int lines) 1277 { 1278 u32 instructions, fields; 1279 __le32 *rp; 1280 1281 fields = 0; 1282 if (UNSET != top_offset) 1283 fields++; 1284 if (UNSET != bottom_offset) 1285 fields++; 1286 1287 /* estimate risc mem: worst case is one write per page border + 1288 one write per scan line + syncs + jump (all 2 dwords). Padding 1289 can cause next bpl to start close to a page border. First DMA 1290 region may be smaller than PAGE_SIZE */ 1291 /* write and jump need and extra dword */ 1292 instructions = fields * (1 + ((bpl + padding) * lines) 1293 / PAGE_SIZE + lines); 1294 instructions += 5; 1295 risc->size = instructions * 12; 1296 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma); 1297 if (risc->cpu == NULL) 1298 return -ENOMEM; 1299 /* write risc instructions */ 1300 rp = risc->cpu; 1301 1302 /* Sync to line 6, so US CC line 21 will appear in line '12' 1303 * in the userland vbi payload */ 1304 if (UNSET != top_offset) 1305 rp = cx23885_risc_field(rp, sglist, top_offset, 0, 1306 bpl, padding, lines, 0, true); 1307 1308 if (UNSET != bottom_offset) 1309 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200, 1310 bpl, padding, lines, 0, UNSET == top_offset); 1311 1312 1313 1314 /* save pointer to jmp instruction address */ 1315 risc->jmp = rp; 1316 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size); 1317 return 0; 1318 } 1319 1320 1321 void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf) 1322 { 1323 struct cx23885_riscmem *risc = &buf->risc; 1324 1325 pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma); 1326 } 1327 1328 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port) 1329 { 1330 struct cx23885_dev *dev = port->dev; 1331 1332 dprintk(1, "%s() Register Dump\n", __func__); 1333 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__, 1334 cx_read(DEV_CNTRL2)); 1335 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__, 1336 cx23885_irq_get_mask(dev)); 1337 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__, 1338 cx_read(AUDIO_INT_INT_MSK)); 1339 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__, 1340 cx_read(AUD_INT_DMA_CTL)); 1341 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__, 1342 cx_read(AUDIO_EXT_INT_MSK)); 1343 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__, 1344 cx_read(AUD_EXT_DMA_CTL)); 1345 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__, 1346 cx_read(PAD_CTRL)); 1347 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__, 1348 cx_read(ALT_PIN_OUT_SEL)); 1349 dprintk(1, "%s() GPIO2 0x%08X\n", __func__, 1350 cx_read(GPIO2)); 1351 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__, 1352 port->reg_gpcnt, cx_read(port->reg_gpcnt)); 1353 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__, 1354 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl)); 1355 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__, 1356 port->reg_dma_ctl, cx_read(port->reg_dma_ctl)); 1357 if (port->reg_src_sel) 1358 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__, 1359 port->reg_src_sel, cx_read(port->reg_src_sel)); 1360 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__, 1361 port->reg_lngth, cx_read(port->reg_lngth)); 1362 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__, 1363 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl)); 1364 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__, 1365 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl)); 1366 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__, 1367 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status)); 1368 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__, 1369 port->reg_sop_status, cx_read(port->reg_sop_status)); 1370 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__, 1371 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat)); 1372 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__, 1373 port->reg_vld_misc, cx_read(port->reg_vld_misc)); 1374 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__, 1375 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en)); 1376 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__, 1377 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk)); 1378 dprintk(1, "%s() ts_int_status(0x%08X) 0x%08x\n", __func__, 1379 port->reg_ts_int_stat, cx_read(port->reg_ts_int_stat)); 1380 dprintk(1, "%s() PCI_INT_STAT 0x%08X\n", __func__, 1381 cx_read(PCI_INT_STAT)); 1382 dprintk(1, "%s() VID_B_INT_MSTAT 0x%08X\n", __func__, 1383 cx_read(VID_B_INT_MSTAT)); 1384 dprintk(1, "%s() VID_B_INT_SSTAT 0x%08X\n", __func__, 1385 cx_read(VID_B_INT_SSTAT)); 1386 dprintk(1, "%s() VID_C_INT_MSTAT 0x%08X\n", __func__, 1387 cx_read(VID_C_INT_MSTAT)); 1388 dprintk(1, "%s() VID_C_INT_SSTAT 0x%08X\n", __func__, 1389 cx_read(VID_C_INT_SSTAT)); 1390 } 1391 1392 int cx23885_start_dma(struct cx23885_tsport *port, 1393 struct cx23885_dmaqueue *q, 1394 struct cx23885_buffer *buf) 1395 { 1396 struct cx23885_dev *dev = port->dev; 1397 u32 reg; 1398 1399 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__, 1400 dev->width, dev->height, dev->field); 1401 1402 /* clear dma in progress */ 1403 cx23885_clear_bridge_error(dev); 1404 1405 /* Stop the fifo and risc engine for this port */ 1406 cx_clear(port->reg_dma_ctl, port->dma_ctl_val); 1407 1408 /* setup fifo + format */ 1409 cx23885_sram_channel_setup(dev, 1410 &dev->sram_channels[port->sram_chno], 1411 port->ts_packet_size, buf->risc.dma); 1412 if (debug > 5) { 1413 cx23885_sram_channel_dump(dev, 1414 &dev->sram_channels[port->sram_chno]); 1415 cx23885_risc_disasm(port, &buf->risc); 1416 } 1417 1418 /* write TS length to chip */ 1419 cx_write(port->reg_lngth, port->ts_packet_size); 1420 1421 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) && 1422 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) { 1423 pr_err("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n", 1424 __func__, 1425 cx23885_boards[dev->board].portb, 1426 cx23885_boards[dev->board].portc); 1427 return -EINVAL; 1428 } 1429 1430 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) 1431 cx23885_av_clk(dev, 0); 1432 1433 udelay(100); 1434 1435 /* If the port supports SRC SELECT, configure it */ 1436 if (port->reg_src_sel) 1437 cx_write(port->reg_src_sel, port->src_sel_val); 1438 1439 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val); 1440 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val); 1441 cx_write(port->reg_vld_misc, port->vld_misc_val); 1442 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val); 1443 udelay(100); 1444 1445 /* NOTE: this is 2 (reserved) for portb, does it matter? */ 1446 /* reset counter to zero */ 1447 cx_write(port->reg_gpcnt_ctl, 3); 1448 q->count = 0; 1449 1450 /* Set VIDB pins to input */ 1451 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) { 1452 reg = cx_read(PAD_CTRL); 1453 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */ 1454 cx_write(PAD_CTRL, reg); 1455 } 1456 1457 /* Set VIDC pins to input */ 1458 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) { 1459 reg = cx_read(PAD_CTRL); 1460 reg &= ~0x4; /* Clear TS2_SOP_OE */ 1461 cx_write(PAD_CTRL, reg); 1462 } 1463 1464 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) { 1465 1466 reg = cx_read(PAD_CTRL); 1467 reg = reg & ~0x1; /* Clear TS1_OE */ 1468 1469 /* FIXME, bit 2 writing here is questionable */ 1470 /* set TS1_SOP_OE and TS1_OE_HI */ 1471 reg = reg | 0xa; 1472 cx_write(PAD_CTRL, reg); 1473 1474 /* Sets MOE_CLK_DIS to disable MoE clock */ 1475 /* sets MCLK_DLY_SEL/BCLK_DLY_SEL to 1 buffer delay each */ 1476 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011); 1477 1478 /* ALT_GPIO_ALT_SET: GPIO[0] 1479 * IR_ALT_TX_SEL: GPIO[1] 1480 * GPIO1_ALT_SEL: VIP_656_DATA[0] 1481 * GPIO0_ALT_SEL: VIP_656_CLK 1482 */ 1483 cx_write(ALT_PIN_OUT_SEL, 0x10100045); 1484 } 1485 1486 switch (dev->bridge) { 1487 case CX23885_BRIDGE_885: 1488 case CX23885_BRIDGE_887: 1489 case CX23885_BRIDGE_888: 1490 /* enable irqs */ 1491 dprintk(1, "%s() enabling TS int's and DMA\n", __func__); 1492 /* clear dma in progress */ 1493 cx23885_clear_bridge_error(dev); 1494 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val); 1495 cx_set(port->reg_dma_ctl, port->dma_ctl_val); 1496 1497 /* clear dma in progress */ 1498 cx23885_clear_bridge_error(dev); 1499 cx23885_irq_add(dev, port->pci_irqmask); 1500 cx23885_irq_enable_all(dev); 1501 1502 /* clear dma in progress */ 1503 cx23885_clear_bridge_error(dev); 1504 break; 1505 default: 1506 BUG(); 1507 } 1508 1509 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */ 1510 /* clear dma in progress */ 1511 cx23885_clear_bridge_error(dev); 1512 1513 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) 1514 cx23885_av_clk(dev, 1); 1515 1516 if (debug > 4) 1517 cx23885_tsport_reg_dump(port); 1518 1519 cx23885_irq_get_mask(dev); 1520 1521 /* clear dma in progress */ 1522 cx23885_clear_bridge_error(dev); 1523 1524 return 0; 1525 } 1526 1527 static int cx23885_stop_dma(struct cx23885_tsport *port) 1528 { 1529 struct cx23885_dev *dev = port->dev; 1530 u32 reg; 1531 int delay = 0; 1532 uint32_t reg1_val; 1533 uint32_t reg2_val; 1534 1535 dprintk(1, "%s()\n", __func__); 1536 1537 /* Stop interrupts and DMA */ 1538 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val); 1539 cx_clear(port->reg_dma_ctl, port->dma_ctl_val); 1540 /* just in case wait for any dma to complete before allowing dealloc */ 1541 mdelay(20); 1542 for (delay = 0; delay < 100; delay++) { 1543 reg1_val = cx_read(TC_REQ); 1544 reg2_val = cx_read(TC_REQ_SET); 1545 if (reg1_val == 0 || reg2_val == 0) 1546 break; 1547 mdelay(1); 1548 } 1549 dev_dbg(&dev->pci->dev, "delay=%d reg1=0x%08x reg2=0x%08x\n", 1550 delay, reg1_val, reg2_val); 1551 1552 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) { 1553 reg = cx_read(PAD_CTRL); 1554 1555 /* Set TS1_OE */ 1556 reg = reg | 0x1; 1557 1558 /* clear TS1_SOP_OE and TS1_OE_HI */ 1559 reg = reg & ~0xa; 1560 cx_write(PAD_CTRL, reg); 1561 cx_write(port->reg_src_sel, 0); 1562 cx_write(port->reg_gen_ctrl, 8); 1563 } 1564 1565 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) 1566 cx23885_av_clk(dev, 0); 1567 1568 return 0; 1569 } 1570 1571 /* ------------------------------------------------------------------ */ 1572 1573 int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port) 1574 { 1575 struct cx23885_dev *dev = port->dev; 1576 int size = port->ts_packet_size * port->ts_packet_count; 1577 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0); 1578 1579 dprintk(1, "%s: %p\n", __func__, buf); 1580 if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size) 1581 return -EINVAL; 1582 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size); 1583 1584 cx23885_risc_databuffer(dev->pci, &buf->risc, 1585 sgt->sgl, 1586 port->ts_packet_size, port->ts_packet_count, 0); 1587 return 0; 1588 } 1589 1590 /* 1591 * The risc program for each buffer works as follows: it starts with a simple 1592 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the 1593 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping 1594 * the initial JUMP). 1595 * 1596 * This is the risc program of the first buffer to be queued if the active list 1597 * is empty and it just keeps DMAing this buffer without generating any 1598 * interrupts. 1599 * 1600 * If a new buffer is added then the initial JUMP in the code for that buffer 1601 * will generate an interrupt which signals that the previous buffer has been 1602 * DMAed successfully and that it can be returned to userspace. 1603 * 1604 * It also sets the final jump of the previous buffer to the start of the new 1605 * buffer, thus chaining the new buffer into the DMA chain. This is a single 1606 * atomic u32 write, so there is no race condition. 1607 * 1608 * The end-result of all this that you only get an interrupt when a buffer 1609 * is ready, so the control flow is very easy. 1610 */ 1611 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf) 1612 { 1613 struct cx23885_buffer *prev; 1614 struct cx23885_dev *dev = port->dev; 1615 struct cx23885_dmaqueue *cx88q = &port->mpegq; 1616 unsigned long flags; 1617 1618 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12); 1619 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC); 1620 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12); 1621 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */ 1622 1623 spin_lock_irqsave(&dev->slock, flags); 1624 if (list_empty(&cx88q->active)) { 1625 list_add_tail(&buf->queue, &cx88q->active); 1626 dprintk(1, "[%p/%d] %s - first active\n", 1627 buf, buf->vb.vb2_buf.index, __func__); 1628 } else { 1629 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1); 1630 prev = list_entry(cx88q->active.prev, struct cx23885_buffer, 1631 queue); 1632 list_add_tail(&buf->queue, &cx88q->active); 1633 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); 1634 dprintk(1, "[%p/%d] %s - append to active\n", 1635 buf, buf->vb.vb2_buf.index, __func__); 1636 } 1637 spin_unlock_irqrestore(&dev->slock, flags); 1638 } 1639 1640 /* ----------------------------------------------------------- */ 1641 1642 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason) 1643 { 1644 struct cx23885_dmaqueue *q = &port->mpegq; 1645 struct cx23885_buffer *buf; 1646 unsigned long flags; 1647 1648 spin_lock_irqsave(&port->slock, flags); 1649 while (!list_empty(&q->active)) { 1650 buf = list_entry(q->active.next, struct cx23885_buffer, 1651 queue); 1652 list_del(&buf->queue); 1653 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); 1654 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n", 1655 buf, buf->vb.vb2_buf.index, reason, 1656 (unsigned long)buf->risc.dma); 1657 } 1658 spin_unlock_irqrestore(&port->slock, flags); 1659 } 1660 1661 void cx23885_cancel_buffers(struct cx23885_tsport *port) 1662 { 1663 dprintk(1, "%s()\n", __func__); 1664 cx23885_stop_dma(port); 1665 do_cancel_buffers(port, "cancel"); 1666 } 1667 1668 int cx23885_irq_417(struct cx23885_dev *dev, u32 status) 1669 { 1670 /* FIXME: port1 assumption here. */ 1671 struct cx23885_tsport *port = &dev->ts1; 1672 int count = 0; 1673 int handled = 0; 1674 1675 if (status == 0) 1676 return handled; 1677 1678 count = cx_read(port->reg_gpcnt); 1679 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n", 1680 status, cx_read(port->reg_ts_int_msk), count); 1681 1682 if ((status & VID_B_MSK_BAD_PKT) || 1683 (status & VID_B_MSK_OPC_ERR) || 1684 (status & VID_B_MSK_VBI_OPC_ERR) || 1685 (status & VID_B_MSK_SYNC) || 1686 (status & VID_B_MSK_VBI_SYNC) || 1687 (status & VID_B_MSK_OF) || 1688 (status & VID_B_MSK_VBI_OF)) { 1689 pr_err("%s: V4L mpeg risc op code error, status = 0x%x\n", 1690 dev->name, status); 1691 if (status & VID_B_MSK_BAD_PKT) 1692 dprintk(1, " VID_B_MSK_BAD_PKT\n"); 1693 if (status & VID_B_MSK_OPC_ERR) 1694 dprintk(1, " VID_B_MSK_OPC_ERR\n"); 1695 if (status & VID_B_MSK_VBI_OPC_ERR) 1696 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n"); 1697 if (status & VID_B_MSK_SYNC) 1698 dprintk(1, " VID_B_MSK_SYNC\n"); 1699 if (status & VID_B_MSK_VBI_SYNC) 1700 dprintk(1, " VID_B_MSK_VBI_SYNC\n"); 1701 if (status & VID_B_MSK_OF) 1702 dprintk(1, " VID_B_MSK_OF\n"); 1703 if (status & VID_B_MSK_VBI_OF) 1704 dprintk(1, " VID_B_MSK_VBI_OF\n"); 1705 1706 cx_clear(port->reg_dma_ctl, port->dma_ctl_val); 1707 cx23885_sram_channel_dump(dev, 1708 &dev->sram_channels[port->sram_chno]); 1709 cx23885_417_check_encoder(dev); 1710 } else if (status & VID_B_MSK_RISCI1) { 1711 dprintk(7, " VID_B_MSK_RISCI1\n"); 1712 spin_lock(&port->slock); 1713 cx23885_wakeup(port, &port->mpegq, count); 1714 spin_unlock(&port->slock); 1715 } 1716 if (status) { 1717 cx_write(port->reg_ts_int_stat, status); 1718 handled = 1; 1719 } 1720 1721 return handled; 1722 } 1723 1724 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status) 1725 { 1726 struct cx23885_dev *dev = port->dev; 1727 int handled = 0; 1728 u32 count; 1729 1730 if ((status & VID_BC_MSK_OPC_ERR) || 1731 (status & VID_BC_MSK_BAD_PKT) || 1732 (status & VID_BC_MSK_SYNC) || 1733 (status & VID_BC_MSK_OF)) { 1734 1735 if (status & VID_BC_MSK_OPC_ERR) 1736 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n", 1737 VID_BC_MSK_OPC_ERR); 1738 1739 if (status & VID_BC_MSK_BAD_PKT) 1740 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n", 1741 VID_BC_MSK_BAD_PKT); 1742 1743 if (status & VID_BC_MSK_SYNC) 1744 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n", 1745 VID_BC_MSK_SYNC); 1746 1747 if (status & VID_BC_MSK_OF) 1748 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n", 1749 VID_BC_MSK_OF); 1750 1751 pr_err("%s: mpeg risc op code error\n", dev->name); 1752 1753 cx_clear(port->reg_dma_ctl, port->dma_ctl_val); 1754 cx23885_sram_channel_dump(dev, 1755 &dev->sram_channels[port->sram_chno]); 1756 1757 } else if (status & VID_BC_MSK_RISCI1) { 1758 1759 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1); 1760 1761 spin_lock(&port->slock); 1762 count = cx_read(port->reg_gpcnt); 1763 cx23885_wakeup(port, &port->mpegq, count); 1764 spin_unlock(&port->slock); 1765 1766 } 1767 if (status) { 1768 cx_write(port->reg_ts_int_stat, status); 1769 handled = 1; 1770 } 1771 1772 return handled; 1773 } 1774 1775 static irqreturn_t cx23885_irq(int irq, void *dev_id) 1776 { 1777 struct cx23885_dev *dev = dev_id; 1778 struct cx23885_tsport *ts1 = &dev->ts1; 1779 struct cx23885_tsport *ts2 = &dev->ts2; 1780 u32 pci_status, pci_mask; 1781 u32 vida_status, vida_mask; 1782 u32 audint_status, audint_mask; 1783 u32 ts1_status, ts1_mask; 1784 u32 ts2_status, ts2_mask; 1785 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0; 1786 int audint_count = 0; 1787 bool subdev_handled; 1788 1789 pci_status = cx_read(PCI_INT_STAT); 1790 pci_mask = cx23885_irq_get_mask(dev); 1791 if ((pci_status & pci_mask) == 0) { 1792 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n", 1793 pci_status, pci_mask); 1794 goto out; 1795 } 1796 1797 vida_status = cx_read(VID_A_INT_STAT); 1798 vida_mask = cx_read(VID_A_INT_MSK); 1799 audint_status = cx_read(AUDIO_INT_INT_STAT); 1800 audint_mask = cx_read(AUDIO_INT_INT_MSK); 1801 ts1_status = cx_read(VID_B_INT_STAT); 1802 ts1_mask = cx_read(VID_B_INT_MSK); 1803 ts2_status = cx_read(VID_C_INT_STAT); 1804 ts2_mask = cx_read(VID_C_INT_MSK); 1805 1806 if (((pci_status & pci_mask) == 0) && 1807 ((ts2_status & ts2_mask) == 0) && 1808 ((ts1_status & ts1_mask) == 0)) 1809 goto out; 1810 1811 vida_count = cx_read(VID_A_GPCNT); 1812 audint_count = cx_read(AUD_INT_A_GPCNT); 1813 ts1_count = cx_read(ts1->reg_gpcnt); 1814 ts2_count = cx_read(ts2->reg_gpcnt); 1815 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n", 1816 pci_status, pci_mask); 1817 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n", 1818 vida_status, vida_mask, vida_count); 1819 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n", 1820 audint_status, audint_mask, audint_count); 1821 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n", 1822 ts1_status, ts1_mask, ts1_count); 1823 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n", 1824 ts2_status, ts2_mask, ts2_count); 1825 1826 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR | 1827 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA | 1828 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A | 1829 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT | 1830 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 | 1831 PCI_MSK_AV_CORE | PCI_MSK_IR)) { 1832 1833 if (pci_status & PCI_MSK_RISC_RD) 1834 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n", 1835 PCI_MSK_RISC_RD); 1836 1837 if (pci_status & PCI_MSK_RISC_WR) 1838 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n", 1839 PCI_MSK_RISC_WR); 1840 1841 if (pci_status & PCI_MSK_AL_RD) 1842 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n", 1843 PCI_MSK_AL_RD); 1844 1845 if (pci_status & PCI_MSK_AL_WR) 1846 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n", 1847 PCI_MSK_AL_WR); 1848 1849 if (pci_status & PCI_MSK_APB_DMA) 1850 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n", 1851 PCI_MSK_APB_DMA); 1852 1853 if (pci_status & PCI_MSK_VID_C) 1854 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n", 1855 PCI_MSK_VID_C); 1856 1857 if (pci_status & PCI_MSK_VID_B) 1858 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n", 1859 PCI_MSK_VID_B); 1860 1861 if (pci_status & PCI_MSK_VID_A) 1862 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n", 1863 PCI_MSK_VID_A); 1864 1865 if (pci_status & PCI_MSK_AUD_INT) 1866 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n", 1867 PCI_MSK_AUD_INT); 1868 1869 if (pci_status & PCI_MSK_AUD_EXT) 1870 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n", 1871 PCI_MSK_AUD_EXT); 1872 1873 if (pci_status & PCI_MSK_GPIO0) 1874 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n", 1875 PCI_MSK_GPIO0); 1876 1877 if (pci_status & PCI_MSK_GPIO1) 1878 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n", 1879 PCI_MSK_GPIO1); 1880 1881 if (pci_status & PCI_MSK_AV_CORE) 1882 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n", 1883 PCI_MSK_AV_CORE); 1884 1885 if (pci_status & PCI_MSK_IR) 1886 dprintk(7, " (PCI_MSK_IR 0x%08x)\n", 1887 PCI_MSK_IR); 1888 } 1889 1890 if (cx23885_boards[dev->board].ci_type == 1 && 1891 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0))) 1892 handled += netup_ci_slot_status(dev, pci_status); 1893 1894 if (cx23885_boards[dev->board].ci_type == 2 && 1895 (pci_status & PCI_MSK_GPIO0)) 1896 handled += altera_ci_irq(dev); 1897 1898 if (ts1_status) { 1899 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) 1900 handled += cx23885_irq_ts(ts1, ts1_status); 1901 else 1902 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) 1903 handled += cx23885_irq_417(dev, ts1_status); 1904 } 1905 1906 if (ts2_status) { 1907 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) 1908 handled += cx23885_irq_ts(ts2, ts2_status); 1909 else 1910 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) 1911 handled += cx23885_irq_417(dev, ts2_status); 1912 } 1913 1914 if (vida_status) 1915 handled += cx23885_video_irq(dev, vida_status); 1916 1917 if (audint_status) 1918 handled += cx23885_audio_irq(dev, audint_status, audint_mask); 1919 1920 if (pci_status & PCI_MSK_IR) { 1921 subdev_handled = false; 1922 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine, 1923 pci_status, &subdev_handled); 1924 if (subdev_handled) 1925 handled++; 1926 } 1927 1928 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) { 1929 cx23885_irq_disable(dev, PCI_MSK_AV_CORE); 1930 schedule_work(&dev->cx25840_work); 1931 handled++; 1932 } 1933 1934 if (handled) 1935 cx_write(PCI_INT_STAT, pci_status & pci_mask); 1936 out: 1937 return IRQ_RETVAL(handled); 1938 } 1939 1940 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd, 1941 unsigned int notification, void *arg) 1942 { 1943 struct cx23885_dev *dev; 1944 1945 if (sd == NULL) 1946 return; 1947 1948 dev = to_cx23885(sd->v4l2_dev); 1949 1950 switch (notification) { 1951 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */ 1952 if (sd == dev->sd_ir) 1953 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg); 1954 break; 1955 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */ 1956 if (sd == dev->sd_ir) 1957 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg); 1958 break; 1959 } 1960 } 1961 1962 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev) 1963 { 1964 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler); 1965 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler); 1966 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler); 1967 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify; 1968 } 1969 1970 static inline int encoder_on_portb(struct cx23885_dev *dev) 1971 { 1972 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER; 1973 } 1974 1975 static inline int encoder_on_portc(struct cx23885_dev *dev) 1976 { 1977 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER; 1978 } 1979 1980 /* Mask represents 32 different GPIOs, GPIO's are split into multiple 1981 * registers depending on the board configuration (and whether the 1982 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will 1983 * be pushed into the correct hardware register, regardless of the 1984 * physical location. Certain registers are shared so we sanity check 1985 * and report errors if we think we're tampering with a GPIo that might 1986 * be assigned to the encoder (and used for the host bus). 1987 * 1988 * GPIO 2 through 0 - On the cx23885 bridge 1989 * GPIO 18 through 3 - On the cx23417 host bus interface 1990 * GPIO 23 through 19 - On the cx25840 a/v core 1991 */ 1992 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask) 1993 { 1994 if (mask & 0x7) 1995 cx_set(GP0_IO, mask & 0x7); 1996 1997 if (mask & 0x0007fff8) { 1998 if (encoder_on_portb(dev) || encoder_on_portc(dev)) 1999 pr_err("%s: Setting GPIO on encoder ports\n", 2000 dev->name); 2001 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3); 2002 } 2003 2004 /* TODO: 23-19 */ 2005 if (mask & 0x00f80000) 2006 pr_info("%s: Unsupported\n", dev->name); 2007 } 2008 2009 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask) 2010 { 2011 if (mask & 0x00000007) 2012 cx_clear(GP0_IO, mask & 0x7); 2013 2014 if (mask & 0x0007fff8) { 2015 if (encoder_on_portb(dev) || encoder_on_portc(dev)) 2016 pr_err("%s: Clearing GPIO moving on encoder ports\n", 2017 dev->name); 2018 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3); 2019 } 2020 2021 /* TODO: 23-19 */ 2022 if (mask & 0x00f80000) 2023 pr_info("%s: Unsupported\n", dev->name); 2024 } 2025 2026 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask) 2027 { 2028 if (mask & 0x00000007) 2029 return (cx_read(GP0_IO) >> 8) & mask & 0x7; 2030 2031 if (mask & 0x0007fff8) { 2032 if (encoder_on_portb(dev) || encoder_on_portc(dev)) 2033 pr_err("%s: Reading GPIO moving on encoder ports\n", 2034 dev->name); 2035 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3; 2036 } 2037 2038 /* TODO: 23-19 */ 2039 if (mask & 0x00f80000) 2040 pr_info("%s: Unsupported\n", dev->name); 2041 2042 return 0; 2043 } 2044 2045 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput) 2046 { 2047 if ((mask & 0x00000007) && asoutput) 2048 cx_set(GP0_IO, (mask & 0x7) << 16); 2049 else if ((mask & 0x00000007) && !asoutput) 2050 cx_clear(GP0_IO, (mask & 0x7) << 16); 2051 2052 if (mask & 0x0007fff8) { 2053 if (encoder_on_portb(dev) || encoder_on_portc(dev)) 2054 pr_err("%s: Enabling GPIO on encoder ports\n", 2055 dev->name); 2056 } 2057 2058 /* MC417_OEN is active low for output, write 1 for an input */ 2059 if ((mask & 0x0007fff8) && asoutput) 2060 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3); 2061 2062 else if ((mask & 0x0007fff8) && !asoutput) 2063 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3); 2064 2065 /* TODO: 23-19 */ 2066 } 2067 2068 static struct { 2069 int vendor, dev; 2070 } const broken_dev_id[] = { 2071 /* According with 2072 * https://openbenchmarking.org/system/1703021-RI-AMDZEN08075/Ryzen%207%201800X/lspci, 2073 * 0x1451 is PCI ID for the IOMMU found on Ryzen 2074 */ 2075 { PCI_VENDOR_ID_AMD, 0x1451 }, 2076 /* According to sudo lspci -nn, 2077 * 0x1423 is the PCI ID for the IOMMU found on Kaveri 2078 */ 2079 { PCI_VENDOR_ID_AMD, 0x1423 }, 2080 /* 0x1481 is the PCI ID for the IOMMU found on Starship/Matisse 2081 */ 2082 { PCI_VENDOR_ID_AMD, 0x1481 }, 2083 /* 0x1419 is the PCI ID for the IOMMU found on 15h (Models 10h-1fh) family 2084 */ 2085 { PCI_VENDOR_ID_AMD, 0x1419 }, 2086 /* 0x5a23 is the PCI ID for the IOMMU found on RD890S/RD990 2087 */ 2088 { PCI_VENDOR_ID_ATI, 0x5a23 }, 2089 }; 2090 2091 static bool cx23885_does_need_dma_reset(void) 2092 { 2093 int i; 2094 struct pci_dev *pdev = NULL; 2095 2096 if (dma_reset_workaround == 0) 2097 return false; 2098 else if (dma_reset_workaround == 2) 2099 return true; 2100 2101 for (i = 0; i < ARRAY_SIZE(broken_dev_id); i++) { 2102 pdev = pci_get_device(broken_dev_id[i].vendor, 2103 broken_dev_id[i].dev, NULL); 2104 if (pdev) { 2105 pci_dev_put(pdev); 2106 return true; 2107 } 2108 } 2109 return false; 2110 } 2111 2112 static int cx23885_initdev(struct pci_dev *pci_dev, 2113 const struct pci_device_id *pci_id) 2114 { 2115 struct cx23885_dev *dev; 2116 struct v4l2_ctrl_handler *hdl; 2117 int err; 2118 2119 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2120 if (NULL == dev) 2121 return -ENOMEM; 2122 2123 dev->need_dma_reset = cx23885_does_need_dma_reset(); 2124 2125 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev); 2126 if (err < 0) 2127 goto fail_free; 2128 2129 hdl = &dev->ctrl_handler; 2130 v4l2_ctrl_handler_init(hdl, 6); 2131 if (hdl->error) { 2132 err = hdl->error; 2133 goto fail_ctrl; 2134 } 2135 dev->v4l2_dev.ctrl_handler = hdl; 2136 2137 /* Prepare to handle notifications from subdevices */ 2138 cx23885_v4l2_dev_notify_init(dev); 2139 2140 /* pci init */ 2141 dev->pci = pci_dev; 2142 if (pci_enable_device(pci_dev)) { 2143 err = -EIO; 2144 goto fail_ctrl; 2145 } 2146 2147 if (cx23885_dev_setup(dev) < 0) { 2148 err = -EINVAL; 2149 goto fail_ctrl; 2150 } 2151 2152 /* print pci info */ 2153 dev->pci_rev = pci_dev->revision; 2154 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat); 2155 pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n", 2156 dev->name, 2157 pci_name(pci_dev), dev->pci_rev, pci_dev->irq, 2158 dev->pci_lat, 2159 (unsigned long long)pci_resource_start(pci_dev, 0)); 2160 2161 pci_set_master(pci_dev); 2162 err = pci_set_dma_mask(pci_dev, 0xffffffff); 2163 if (err) { 2164 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); 2165 goto fail_ctrl; 2166 } 2167 2168 err = request_irq(pci_dev->irq, cx23885_irq, 2169 IRQF_SHARED, dev->name, dev); 2170 if (err < 0) { 2171 pr_err("%s: can't get IRQ %d\n", 2172 dev->name, pci_dev->irq); 2173 goto fail_irq; 2174 } 2175 2176 switch (dev->board) { 2177 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI: 2178 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0); 2179 break; 2180 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF: 2181 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0); 2182 break; 2183 } 2184 2185 /* 2186 * The CX2388[58] IR controller can start firing interrupts when 2187 * enabled, so these have to take place after the cx23885_irq() handler 2188 * is hooked up by the call to request_irq() above. 2189 */ 2190 cx23885_ir_pci_int_enable(dev); 2191 cx23885_input_init(dev); 2192 2193 return 0; 2194 2195 fail_irq: 2196 cx23885_dev_unregister(dev); 2197 fail_ctrl: 2198 v4l2_ctrl_handler_free(hdl); 2199 v4l2_device_unregister(&dev->v4l2_dev); 2200 fail_free: 2201 kfree(dev); 2202 return err; 2203 } 2204 2205 static void cx23885_finidev(struct pci_dev *pci_dev) 2206 { 2207 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev); 2208 struct cx23885_dev *dev = to_cx23885(v4l2_dev); 2209 2210 cx23885_input_fini(dev); 2211 cx23885_ir_fini(dev); 2212 2213 cx23885_shutdown(dev); 2214 2215 /* unregister stuff */ 2216 free_irq(pci_dev->irq, dev); 2217 2218 pci_disable_device(pci_dev); 2219 2220 cx23885_dev_unregister(dev); 2221 v4l2_ctrl_handler_free(&dev->ctrl_handler); 2222 v4l2_device_unregister(v4l2_dev); 2223 kfree(dev); 2224 } 2225 2226 static const struct pci_device_id cx23885_pci_tbl[] = { 2227 { 2228 /* CX23885 */ 2229 .vendor = 0x14f1, 2230 .device = 0x8852, 2231 .subvendor = PCI_ANY_ID, 2232 .subdevice = PCI_ANY_ID, 2233 }, { 2234 /* CX23887 Rev 2 */ 2235 .vendor = 0x14f1, 2236 .device = 0x8880, 2237 .subvendor = PCI_ANY_ID, 2238 .subdevice = PCI_ANY_ID, 2239 }, { 2240 /* --- end of list --- */ 2241 } 2242 }; 2243 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl); 2244 2245 static struct pci_driver cx23885_pci_driver = { 2246 .name = "cx23885", 2247 .id_table = cx23885_pci_tbl, 2248 .probe = cx23885_initdev, 2249 .remove = cx23885_finidev, 2250 }; 2251 2252 static int __init cx23885_init(void) 2253 { 2254 pr_info("cx23885 driver version %s loaded\n", 2255 CX23885_VERSION); 2256 return pci_register_driver(&cx23885_pci_driver); 2257 } 2258 2259 static void __exit cx23885_fini(void) 2260 { 2261 pci_unregister_driver(&cx23885_pci_driver); 2262 } 2263 2264 module_init(cx23885_init); 2265 module_exit(cx23885_fini); 2266