1 /* 2 * Driver for the Conexant CX23885 PCIe bridge 3 * 4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * 15 * GNU General Public License for more details. 16 */ 17 18 #include "cx23885.h" 19 20 #include <linux/init.h> 21 #include <linux/list.h> 22 #include <linux/module.h> 23 #include <linux/moduleparam.h> 24 #include <linux/kmod.h> 25 #include <linux/kernel.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 #include <asm/div64.h> 30 #include <linux/firmware.h> 31 32 #include "cimax2.h" 33 #include "altera-ci.h" 34 #include "cx23888-ir.h" 35 #include "cx23885-ir.h" 36 #include "cx23885-av.h" 37 #include "cx23885-input.h" 38 39 MODULE_DESCRIPTION("Driver for cx23885 based TV cards"); 40 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>"); 41 MODULE_LICENSE("GPL"); 42 MODULE_VERSION(CX23885_VERSION); 43 44 static unsigned int debug; 45 module_param(debug, int, 0644); 46 MODULE_PARM_DESC(debug, "enable debug messages"); 47 48 static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET }; 49 module_param_array(card, int, NULL, 0444); 50 MODULE_PARM_DESC(card, "card type"); 51 52 #define dprintk(level, fmt, arg...)\ 53 do { if (debug >= level)\ 54 printk(KERN_DEBUG pr_fmt("%s: " fmt), \ 55 __func__, ##arg); \ 56 } while (0) 57 58 static unsigned int cx23885_devcount; 59 60 #define NO_SYNC_LINE (-1U) 61 62 /* FIXME, these allocations will change when 63 * analog arrives. The be reviewed. 64 * CX23887 Assumptions 65 * 1 line = 16 bytes of CDT 66 * cmds size = 80 67 * cdt size = 16 * linesize 68 * iqsize = 64 69 * maxlines = 6 70 * 71 * Address Space: 72 * 0x00000000 0x00008fff FIFO clusters 73 * 0x00010000 0x000104af Channel Management Data Structures 74 * 0x000104b0 0x000104ff Free 75 * 0x00010500 0x000108bf 15 channels * iqsize 76 * 0x000108c0 0x000108ff Free 77 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables 78 * 15 channels * (iqsize + (maxlines * linesize)) 79 * 0x00010ea0 0x00010xxx Free 80 */ 81 82 static struct sram_channel cx23885_sram_channels[] = { 83 [SRAM_CH01] = { 84 .name = "VID A", 85 .cmds_start = 0x10000, 86 .ctrl_start = 0x10380, 87 .cdt = 0x104c0, 88 .fifo_start = 0x40, 89 .fifo_size = 0x2800, 90 .ptr1_reg = DMA1_PTR1, 91 .ptr2_reg = DMA1_PTR2, 92 .cnt1_reg = DMA1_CNT1, 93 .cnt2_reg = DMA1_CNT2, 94 }, 95 [SRAM_CH02] = { 96 .name = "ch2", 97 .cmds_start = 0x0, 98 .ctrl_start = 0x0, 99 .cdt = 0x0, 100 .fifo_start = 0x0, 101 .fifo_size = 0x0, 102 .ptr1_reg = DMA2_PTR1, 103 .ptr2_reg = DMA2_PTR2, 104 .cnt1_reg = DMA2_CNT1, 105 .cnt2_reg = DMA2_CNT2, 106 }, 107 [SRAM_CH03] = { 108 .name = "TS1 B", 109 .cmds_start = 0x100A0, 110 .ctrl_start = 0x10400, 111 .cdt = 0x10580, 112 .fifo_start = 0x5000, 113 .fifo_size = 0x1000, 114 .ptr1_reg = DMA3_PTR1, 115 .ptr2_reg = DMA3_PTR2, 116 .cnt1_reg = DMA3_CNT1, 117 .cnt2_reg = DMA3_CNT2, 118 }, 119 [SRAM_CH04] = { 120 .name = "ch4", 121 .cmds_start = 0x0, 122 .ctrl_start = 0x0, 123 .cdt = 0x0, 124 .fifo_start = 0x0, 125 .fifo_size = 0x0, 126 .ptr1_reg = DMA4_PTR1, 127 .ptr2_reg = DMA4_PTR2, 128 .cnt1_reg = DMA4_CNT1, 129 .cnt2_reg = DMA4_CNT2, 130 }, 131 [SRAM_CH05] = { 132 .name = "ch5", 133 .cmds_start = 0x0, 134 .ctrl_start = 0x0, 135 .cdt = 0x0, 136 .fifo_start = 0x0, 137 .fifo_size = 0x0, 138 .ptr1_reg = DMA5_PTR1, 139 .ptr2_reg = DMA5_PTR2, 140 .cnt1_reg = DMA5_CNT1, 141 .cnt2_reg = DMA5_CNT2, 142 }, 143 [SRAM_CH06] = { 144 .name = "TS2 C", 145 .cmds_start = 0x10140, 146 .ctrl_start = 0x10440, 147 .cdt = 0x105e0, 148 .fifo_start = 0x6000, 149 .fifo_size = 0x1000, 150 .ptr1_reg = DMA5_PTR1, 151 .ptr2_reg = DMA5_PTR2, 152 .cnt1_reg = DMA5_CNT1, 153 .cnt2_reg = DMA5_CNT2, 154 }, 155 [SRAM_CH07] = { 156 .name = "TV Audio", 157 .cmds_start = 0x10190, 158 .ctrl_start = 0x10480, 159 .cdt = 0x10a00, 160 .fifo_start = 0x7000, 161 .fifo_size = 0x1000, 162 .ptr1_reg = DMA6_PTR1, 163 .ptr2_reg = DMA6_PTR2, 164 .cnt1_reg = DMA6_CNT1, 165 .cnt2_reg = DMA6_CNT2, 166 }, 167 [SRAM_CH08] = { 168 .name = "ch8", 169 .cmds_start = 0x0, 170 .ctrl_start = 0x0, 171 .cdt = 0x0, 172 .fifo_start = 0x0, 173 .fifo_size = 0x0, 174 .ptr1_reg = DMA7_PTR1, 175 .ptr2_reg = DMA7_PTR2, 176 .cnt1_reg = DMA7_CNT1, 177 .cnt2_reg = DMA7_CNT2, 178 }, 179 [SRAM_CH09] = { 180 .name = "ch9", 181 .cmds_start = 0x0, 182 .ctrl_start = 0x0, 183 .cdt = 0x0, 184 .fifo_start = 0x0, 185 .fifo_size = 0x0, 186 .ptr1_reg = DMA8_PTR1, 187 .ptr2_reg = DMA8_PTR2, 188 .cnt1_reg = DMA8_CNT1, 189 .cnt2_reg = DMA8_CNT2, 190 }, 191 }; 192 193 static struct sram_channel cx23887_sram_channels[] = { 194 [SRAM_CH01] = { 195 .name = "VID A", 196 .cmds_start = 0x10000, 197 .ctrl_start = 0x105b0, 198 .cdt = 0x107b0, 199 .fifo_start = 0x40, 200 .fifo_size = 0x2800, 201 .ptr1_reg = DMA1_PTR1, 202 .ptr2_reg = DMA1_PTR2, 203 .cnt1_reg = DMA1_CNT1, 204 .cnt2_reg = DMA1_CNT2, 205 }, 206 [SRAM_CH02] = { 207 .name = "VID A (VBI)", 208 .cmds_start = 0x10050, 209 .ctrl_start = 0x105F0, 210 .cdt = 0x10810, 211 .fifo_start = 0x3000, 212 .fifo_size = 0x1000, 213 .ptr1_reg = DMA2_PTR1, 214 .ptr2_reg = DMA2_PTR2, 215 .cnt1_reg = DMA2_CNT1, 216 .cnt2_reg = DMA2_CNT2, 217 }, 218 [SRAM_CH03] = { 219 .name = "TS1 B", 220 .cmds_start = 0x100A0, 221 .ctrl_start = 0x10630, 222 .cdt = 0x10870, 223 .fifo_start = 0x5000, 224 .fifo_size = 0x1000, 225 .ptr1_reg = DMA3_PTR1, 226 .ptr2_reg = DMA3_PTR2, 227 .cnt1_reg = DMA3_CNT1, 228 .cnt2_reg = DMA3_CNT2, 229 }, 230 [SRAM_CH04] = { 231 .name = "ch4", 232 .cmds_start = 0x0, 233 .ctrl_start = 0x0, 234 .cdt = 0x0, 235 .fifo_start = 0x0, 236 .fifo_size = 0x0, 237 .ptr1_reg = DMA4_PTR1, 238 .ptr2_reg = DMA4_PTR2, 239 .cnt1_reg = DMA4_CNT1, 240 .cnt2_reg = DMA4_CNT2, 241 }, 242 [SRAM_CH05] = { 243 .name = "ch5", 244 .cmds_start = 0x0, 245 .ctrl_start = 0x0, 246 .cdt = 0x0, 247 .fifo_start = 0x0, 248 .fifo_size = 0x0, 249 .ptr1_reg = DMA5_PTR1, 250 .ptr2_reg = DMA5_PTR2, 251 .cnt1_reg = DMA5_CNT1, 252 .cnt2_reg = DMA5_CNT2, 253 }, 254 [SRAM_CH06] = { 255 .name = "TS2 C", 256 .cmds_start = 0x10140, 257 .ctrl_start = 0x10670, 258 .cdt = 0x108d0, 259 .fifo_start = 0x6000, 260 .fifo_size = 0x1000, 261 .ptr1_reg = DMA5_PTR1, 262 .ptr2_reg = DMA5_PTR2, 263 .cnt1_reg = DMA5_CNT1, 264 .cnt2_reg = DMA5_CNT2, 265 }, 266 [SRAM_CH07] = { 267 .name = "TV Audio", 268 .cmds_start = 0x10190, 269 .ctrl_start = 0x106B0, 270 .cdt = 0x10930, 271 .fifo_start = 0x7000, 272 .fifo_size = 0x1000, 273 .ptr1_reg = DMA6_PTR1, 274 .ptr2_reg = DMA6_PTR2, 275 .cnt1_reg = DMA6_CNT1, 276 .cnt2_reg = DMA6_CNT2, 277 }, 278 [SRAM_CH08] = { 279 .name = "ch8", 280 .cmds_start = 0x0, 281 .ctrl_start = 0x0, 282 .cdt = 0x0, 283 .fifo_start = 0x0, 284 .fifo_size = 0x0, 285 .ptr1_reg = DMA7_PTR1, 286 .ptr2_reg = DMA7_PTR2, 287 .cnt1_reg = DMA7_CNT1, 288 .cnt2_reg = DMA7_CNT2, 289 }, 290 [SRAM_CH09] = { 291 .name = "ch9", 292 .cmds_start = 0x0, 293 .ctrl_start = 0x0, 294 .cdt = 0x0, 295 .fifo_start = 0x0, 296 .fifo_size = 0x0, 297 .ptr1_reg = DMA8_PTR1, 298 .ptr2_reg = DMA8_PTR2, 299 .cnt1_reg = DMA8_CNT1, 300 .cnt2_reg = DMA8_CNT2, 301 }, 302 }; 303 304 static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask) 305 { 306 unsigned long flags; 307 spin_lock_irqsave(&dev->pci_irqmask_lock, flags); 308 309 dev->pci_irqmask |= mask; 310 311 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags); 312 } 313 314 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask) 315 { 316 unsigned long flags; 317 spin_lock_irqsave(&dev->pci_irqmask_lock, flags); 318 319 dev->pci_irqmask |= mask; 320 cx_set(PCI_INT_MSK, mask); 321 322 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags); 323 } 324 325 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask) 326 { 327 u32 v; 328 unsigned long flags; 329 spin_lock_irqsave(&dev->pci_irqmask_lock, flags); 330 331 v = mask & dev->pci_irqmask; 332 if (v) 333 cx_set(PCI_INT_MSK, v); 334 335 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags); 336 } 337 338 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev) 339 { 340 cx23885_irq_enable(dev, 0xffffffff); 341 } 342 343 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask) 344 { 345 unsigned long flags; 346 spin_lock_irqsave(&dev->pci_irqmask_lock, flags); 347 348 cx_clear(PCI_INT_MSK, mask); 349 350 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags); 351 } 352 353 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev) 354 { 355 cx23885_irq_disable(dev, 0xffffffff); 356 } 357 358 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask) 359 { 360 unsigned long flags; 361 spin_lock_irqsave(&dev->pci_irqmask_lock, flags); 362 363 dev->pci_irqmask &= ~mask; 364 cx_clear(PCI_INT_MSK, mask); 365 366 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags); 367 } 368 369 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev) 370 { 371 u32 v; 372 unsigned long flags; 373 spin_lock_irqsave(&dev->pci_irqmask_lock, flags); 374 375 v = cx_read(PCI_INT_MSK); 376 377 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags); 378 return v; 379 } 380 381 static int cx23885_risc_decode(u32 risc) 382 { 383 static char *instr[16] = { 384 [RISC_SYNC >> 28] = "sync", 385 [RISC_WRITE >> 28] = "write", 386 [RISC_WRITEC >> 28] = "writec", 387 [RISC_READ >> 28] = "read", 388 [RISC_READC >> 28] = "readc", 389 [RISC_JUMP >> 28] = "jump", 390 [RISC_SKIP >> 28] = "skip", 391 [RISC_WRITERM >> 28] = "writerm", 392 [RISC_WRITECM >> 28] = "writecm", 393 [RISC_WRITECR >> 28] = "writecr", 394 }; 395 static int incr[16] = { 396 [RISC_WRITE >> 28] = 3, 397 [RISC_JUMP >> 28] = 3, 398 [RISC_SKIP >> 28] = 1, 399 [RISC_SYNC >> 28] = 1, 400 [RISC_WRITERM >> 28] = 3, 401 [RISC_WRITECM >> 28] = 3, 402 [RISC_WRITECR >> 28] = 4, 403 }; 404 static char *bits[] = { 405 "12", "13", "14", "resync", 406 "cnt0", "cnt1", "18", "19", 407 "20", "21", "22", "23", 408 "irq1", "irq2", "eol", "sol", 409 }; 410 int i; 411 412 printk(KERN_DEBUG "0x%08x [ %s", risc, 413 instr[risc >> 28] ? instr[risc >> 28] : "INVALID"); 414 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--) 415 if (risc & (1 << (i + 12))) 416 pr_cont(" %s", bits[i]); 417 pr_cont(" count=%d ]\n", risc & 0xfff); 418 return incr[risc >> 28] ? incr[risc >> 28] : 1; 419 } 420 421 static void cx23885_wakeup(struct cx23885_tsport *port, 422 struct cx23885_dmaqueue *q, u32 count) 423 { 424 struct cx23885_buffer *buf; 425 int count_delta; 426 int max_buf_done = 5; /* service maximum five buffers */ 427 428 do { 429 if (list_empty(&q->active)) 430 return; 431 buf = list_entry(q->active.next, 432 struct cx23885_buffer, queue); 433 434 buf->vb.vb2_buf.timestamp = ktime_get_ns(); 435 buf->vb.sequence = q->count++; 436 if (count != (q->count % 65536)) { 437 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf, 438 buf->vb.vb2_buf.index, count, q->count); 439 } else { 440 dprintk(7, "[%p/%d] wakeup reg=%d buf=%d\n", buf, 441 buf->vb.vb2_buf.index, count, q->count); 442 } 443 list_del(&buf->queue); 444 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); 445 max_buf_done--; 446 /* count register is 16 bits so apply modulo appropriately */ 447 count_delta = ((int)count - (int)(q->count % 65536)); 448 } while ((count_delta > 0) && (max_buf_done > 0)); 449 } 450 451 int cx23885_sram_channel_setup(struct cx23885_dev *dev, 452 struct sram_channel *ch, 453 unsigned int bpl, u32 risc) 454 { 455 unsigned int i, lines; 456 u32 cdt; 457 458 if (ch->cmds_start == 0) { 459 dprintk(1, "%s() Erasing channel [%s]\n", __func__, 460 ch->name); 461 cx_write(ch->ptr1_reg, 0); 462 cx_write(ch->ptr2_reg, 0); 463 cx_write(ch->cnt2_reg, 0); 464 cx_write(ch->cnt1_reg, 0); 465 return 0; 466 } else { 467 dprintk(1, "%s() Configuring channel [%s]\n", __func__, 468 ch->name); 469 } 470 471 bpl = (bpl + 7) & ~7; /* alignment */ 472 cdt = ch->cdt; 473 lines = ch->fifo_size / bpl; 474 if (lines > 6) 475 lines = 6; 476 BUG_ON(lines < 2); 477 478 cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET); 479 cx_write(8 + 4, 12); 480 cx_write(8 + 8, 0); 481 482 /* write CDT */ 483 for (i = 0; i < lines; i++) { 484 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i, 485 ch->fifo_start + bpl*i); 486 cx_write(cdt + 16*i, ch->fifo_start + bpl*i); 487 cx_write(cdt + 16*i + 4, 0); 488 cx_write(cdt + 16*i + 8, 0); 489 cx_write(cdt + 16*i + 12, 0); 490 } 491 492 /* write CMDS */ 493 if (ch->jumponly) 494 cx_write(ch->cmds_start + 0, 8); 495 else 496 cx_write(ch->cmds_start + 0, risc); 497 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */ 498 cx_write(ch->cmds_start + 8, cdt); 499 cx_write(ch->cmds_start + 12, (lines*16) >> 3); 500 cx_write(ch->cmds_start + 16, ch->ctrl_start); 501 if (ch->jumponly) 502 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2)); 503 else 504 cx_write(ch->cmds_start + 20, 64 >> 2); 505 for (i = 24; i < 80; i += 4) 506 cx_write(ch->cmds_start + i, 0); 507 508 /* fill registers */ 509 cx_write(ch->ptr1_reg, ch->fifo_start); 510 cx_write(ch->ptr2_reg, cdt); 511 cx_write(ch->cnt2_reg, (lines*16) >> 3); 512 cx_write(ch->cnt1_reg, (bpl >> 3) - 1); 513 514 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n", 515 dev->bridge, 516 ch->name, 517 bpl, 518 lines); 519 520 return 0; 521 } 522 523 void cx23885_sram_channel_dump(struct cx23885_dev *dev, 524 struct sram_channel *ch) 525 { 526 static char *name[] = { 527 "init risc lo", 528 "init risc hi", 529 "cdt base", 530 "cdt size", 531 "iq base", 532 "iq size", 533 "risc pc lo", 534 "risc pc hi", 535 "iq wr ptr", 536 "iq rd ptr", 537 "cdt current", 538 "pci target lo", 539 "pci target hi", 540 "line / byte", 541 }; 542 u32 risc; 543 unsigned int i, j, n; 544 545 pr_warn("%s: %s - dma channel status dump\n", 546 dev->name, ch->name); 547 for (i = 0; i < ARRAY_SIZE(name); i++) 548 pr_warn("%s: cmds: %-15s: 0x%08x\n", 549 dev->name, name[i], 550 cx_read(ch->cmds_start + 4*i)); 551 552 for (i = 0; i < 4; i++) { 553 risc = cx_read(ch->cmds_start + 4 * (i + 14)); 554 pr_warn("%s: risc%d: ", dev->name, i); 555 cx23885_risc_decode(risc); 556 } 557 for (i = 0; i < (64 >> 2); i += n) { 558 risc = cx_read(ch->ctrl_start + 4 * i); 559 /* No consideration for bits 63-32 */ 560 561 pr_warn("%s: (0x%08x) iq %x: ", dev->name, 562 ch->ctrl_start + 4 * i, i); 563 n = cx23885_risc_decode(risc); 564 for (j = 1; j < n; j++) { 565 risc = cx_read(ch->ctrl_start + 4 * (i + j)); 566 pr_warn("%s: iq %x: 0x%08x [ arg #%d ]\n", 567 dev->name, i+j, risc, j); 568 } 569 } 570 571 pr_warn("%s: fifo: 0x%08x -> 0x%x\n", 572 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size); 573 pr_warn("%s: ctrl: 0x%08x -> 0x%x\n", 574 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16); 575 pr_warn("%s: ptr1_reg: 0x%08x\n", 576 dev->name, cx_read(ch->ptr1_reg)); 577 pr_warn("%s: ptr2_reg: 0x%08x\n", 578 dev->name, cx_read(ch->ptr2_reg)); 579 pr_warn("%s: cnt1_reg: 0x%08x\n", 580 dev->name, cx_read(ch->cnt1_reg)); 581 pr_warn("%s: cnt2_reg: 0x%08x\n", 582 dev->name, cx_read(ch->cnt2_reg)); 583 } 584 585 static void cx23885_risc_disasm(struct cx23885_tsport *port, 586 struct cx23885_riscmem *risc) 587 { 588 struct cx23885_dev *dev = port->dev; 589 unsigned int i, j, n; 590 591 pr_info("%s: risc disasm: %p [dma=0x%08lx]\n", 592 dev->name, risc->cpu, (unsigned long)risc->dma); 593 for (i = 0; i < (risc->size >> 2); i += n) { 594 pr_info("%s: %04d: ", dev->name, i); 595 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i])); 596 for (j = 1; j < n; j++) 597 pr_info("%s: %04d: 0x%08x [ arg #%d ]\n", 598 dev->name, i + j, risc->cpu[i + j], j); 599 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP)) 600 break; 601 } 602 } 603 604 static void cx23885_clear_bridge_error(struct cx23885_dev *dev) 605 { 606 uint32_t reg1_val = cx_read(TC_REQ); /* read-only */ 607 uint32_t reg2_val = cx_read(TC_REQ_SET); 608 609 if (reg1_val && reg2_val) { 610 cx_write(TC_REQ, reg1_val); 611 cx_write(TC_REQ_SET, reg2_val); 612 cx_read(VID_B_DMA); 613 cx_read(VBI_B_DMA); 614 cx_read(VID_C_DMA); 615 cx_read(VBI_C_DMA); 616 617 dev_info(&dev->pci->dev, 618 "dma in progress detected 0x%08x 0x%08x, clearing\n", 619 reg1_val, reg2_val); 620 } 621 } 622 623 static void cx23885_shutdown(struct cx23885_dev *dev) 624 { 625 /* disable RISC controller */ 626 cx_write(DEV_CNTRL2, 0); 627 628 /* Disable all IR activity */ 629 cx_write(IR_CNTRL_REG, 0); 630 631 /* Disable Video A/B activity */ 632 cx_write(VID_A_DMA_CTL, 0); 633 cx_write(VID_B_DMA_CTL, 0); 634 cx_write(VID_C_DMA_CTL, 0); 635 636 /* Disable Audio activity */ 637 cx_write(AUD_INT_DMA_CTL, 0); 638 cx_write(AUD_EXT_DMA_CTL, 0); 639 640 /* Disable Serial port */ 641 cx_write(UART_CTL, 0); 642 643 /* Disable Interrupts */ 644 cx23885_irq_disable_all(dev); 645 cx_write(VID_A_INT_MSK, 0); 646 cx_write(VID_B_INT_MSK, 0); 647 cx_write(VID_C_INT_MSK, 0); 648 cx_write(AUDIO_INT_INT_MSK, 0); 649 cx_write(AUDIO_EXT_INT_MSK, 0); 650 651 } 652 653 static void cx23885_reset(struct cx23885_dev *dev) 654 { 655 dprintk(1, "%s()\n", __func__); 656 657 cx23885_shutdown(dev); 658 659 cx_write(PCI_INT_STAT, 0xffffffff); 660 cx_write(VID_A_INT_STAT, 0xffffffff); 661 cx_write(VID_B_INT_STAT, 0xffffffff); 662 cx_write(VID_C_INT_STAT, 0xffffffff); 663 cx_write(AUDIO_INT_INT_STAT, 0xffffffff); 664 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff); 665 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000); 666 cx_write(PAD_CTRL, 0x00500300); 667 668 /* clear dma in progress */ 669 cx23885_clear_bridge_error(dev); 670 msleep(100); 671 672 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01], 673 720*4, 0); 674 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0); 675 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03], 676 188*4, 0); 677 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0); 678 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0); 679 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06], 680 188*4, 0); 681 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0); 682 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0); 683 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0); 684 685 cx23885_gpio_setup(dev); 686 687 cx23885_irq_get_mask(dev); 688 689 /* clear dma in progress */ 690 cx23885_clear_bridge_error(dev); 691 } 692 693 694 static int cx23885_pci_quirks(struct cx23885_dev *dev) 695 { 696 dprintk(1, "%s()\n", __func__); 697 698 /* The cx23885 bridge has a weird bug which causes NMI to be asserted 699 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not 700 * occur on the cx23887 bridge. 701 */ 702 if (dev->bridge == CX23885_BRIDGE_885) 703 cx_clear(RDR_TLCTL0, 1 << 4); 704 705 /* clear dma in progress */ 706 cx23885_clear_bridge_error(dev); 707 return 0; 708 } 709 710 static int get_resources(struct cx23885_dev *dev) 711 { 712 if (request_mem_region(pci_resource_start(dev->pci, 0), 713 pci_resource_len(dev->pci, 0), 714 dev->name)) 715 return 0; 716 717 pr_err("%s: can't get MMIO memory @ 0x%llx\n", 718 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0)); 719 720 return -EBUSY; 721 } 722 723 static int cx23885_init_tsport(struct cx23885_dev *dev, 724 struct cx23885_tsport *port, int portno) 725 { 726 dprintk(1, "%s(portno=%d)\n", __func__, portno); 727 728 /* Transport bus init dma queue - Common settings */ 729 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */ 730 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */ 731 port->vld_misc_val = 0x0; 732 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4); 733 734 spin_lock_init(&port->slock); 735 port->dev = dev; 736 port->nr = portno; 737 738 INIT_LIST_HEAD(&port->mpegq.active); 739 mutex_init(&port->frontends.lock); 740 INIT_LIST_HEAD(&port->frontends.felist); 741 port->frontends.active_fe_id = 0; 742 743 /* This should be hardcoded allow a single frontend 744 * attachment to this tsport, keeping the -dvb.c 745 * code clean and safe. 746 */ 747 if (!port->num_frontends) 748 port->num_frontends = 1; 749 750 switch (portno) { 751 case 1: 752 port->reg_gpcnt = VID_B_GPCNT; 753 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL; 754 port->reg_dma_ctl = VID_B_DMA_CTL; 755 port->reg_lngth = VID_B_LNGTH; 756 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL; 757 port->reg_gen_ctrl = VID_B_GEN_CTL; 758 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS; 759 port->reg_sop_status = VID_B_SOP_STATUS; 760 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT; 761 port->reg_vld_misc = VID_B_VLD_MISC; 762 port->reg_ts_clk_en = VID_B_TS_CLK_EN; 763 port->reg_src_sel = VID_B_SRC_SEL; 764 port->reg_ts_int_msk = VID_B_INT_MSK; 765 port->reg_ts_int_stat = VID_B_INT_STAT; 766 port->sram_chno = SRAM_CH03; /* VID_B */ 767 port->pci_irqmask = 0x02; /* VID_B bit1 */ 768 break; 769 case 2: 770 port->reg_gpcnt = VID_C_GPCNT; 771 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL; 772 port->reg_dma_ctl = VID_C_DMA_CTL; 773 port->reg_lngth = VID_C_LNGTH; 774 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL; 775 port->reg_gen_ctrl = VID_C_GEN_CTL; 776 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS; 777 port->reg_sop_status = VID_C_SOP_STATUS; 778 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT; 779 port->reg_vld_misc = VID_C_VLD_MISC; 780 port->reg_ts_clk_en = VID_C_TS_CLK_EN; 781 port->reg_src_sel = 0; 782 port->reg_ts_int_msk = VID_C_INT_MSK; 783 port->reg_ts_int_stat = VID_C_INT_STAT; 784 port->sram_chno = SRAM_CH06; /* VID_C */ 785 port->pci_irqmask = 0x04; /* VID_C bit2 */ 786 break; 787 default: 788 BUG(); 789 } 790 791 return 0; 792 } 793 794 static void cx23885_dev_checkrevision(struct cx23885_dev *dev) 795 { 796 switch (cx_read(RDR_CFG2) & 0xff) { 797 case 0x00: 798 /* cx23885 */ 799 dev->hwrevision = 0xa0; 800 break; 801 case 0x01: 802 /* CX23885-12Z */ 803 dev->hwrevision = 0xa1; 804 break; 805 case 0x02: 806 /* CX23885-13Z/14Z */ 807 dev->hwrevision = 0xb0; 808 break; 809 case 0x03: 810 if (dev->pci->device == 0x8880) { 811 /* CX23888-21Z/22Z */ 812 dev->hwrevision = 0xc0; 813 } else { 814 /* CX23885-14Z */ 815 dev->hwrevision = 0xa4; 816 } 817 break; 818 case 0x04: 819 if (dev->pci->device == 0x8880) { 820 /* CX23888-31Z */ 821 dev->hwrevision = 0xd0; 822 } else { 823 /* CX23885-15Z, CX23888-31Z */ 824 dev->hwrevision = 0xa5; 825 } 826 break; 827 case 0x0e: 828 /* CX23887-15Z */ 829 dev->hwrevision = 0xc0; 830 break; 831 case 0x0f: 832 /* CX23887-14Z */ 833 dev->hwrevision = 0xb1; 834 break; 835 default: 836 pr_err("%s() New hardware revision found 0x%x\n", 837 __func__, dev->hwrevision); 838 } 839 if (dev->hwrevision) 840 pr_info("%s() Hardware revision = 0x%02x\n", 841 __func__, dev->hwrevision); 842 else 843 pr_err("%s() Hardware revision unknown 0x%x\n", 844 __func__, dev->hwrevision); 845 } 846 847 /* Find the first v4l2_subdev member of the group id in hw */ 848 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw) 849 { 850 struct v4l2_subdev *result = NULL; 851 struct v4l2_subdev *sd; 852 853 spin_lock(&dev->v4l2_dev.lock); 854 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) { 855 if (sd->grp_id == hw) { 856 result = sd; 857 break; 858 } 859 } 860 spin_unlock(&dev->v4l2_dev.lock); 861 return result; 862 } 863 864 static int cx23885_dev_setup(struct cx23885_dev *dev) 865 { 866 int i; 867 868 spin_lock_init(&dev->pci_irqmask_lock); 869 spin_lock_init(&dev->slock); 870 871 mutex_init(&dev->lock); 872 mutex_init(&dev->gpio_lock); 873 874 atomic_inc(&dev->refcount); 875 876 dev->nr = cx23885_devcount++; 877 sprintf(dev->name, "cx23885[%d]", dev->nr); 878 879 /* Configure the internal memory */ 880 if (dev->pci->device == 0x8880) { 881 /* Could be 887 or 888, assume an 888 default */ 882 dev->bridge = CX23885_BRIDGE_888; 883 /* Apply a sensible clock frequency for the PCIe bridge */ 884 dev->clk_freq = 50000000; 885 dev->sram_channels = cx23887_sram_channels; 886 } else 887 if (dev->pci->device == 0x8852) { 888 dev->bridge = CX23885_BRIDGE_885; 889 /* Apply a sensible clock frequency for the PCIe bridge */ 890 dev->clk_freq = 28000000; 891 dev->sram_channels = cx23885_sram_channels; 892 } else 893 BUG(); 894 895 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n", 896 __func__, dev->bridge); 897 898 /* board config */ 899 dev->board = UNSET; 900 if (card[dev->nr] < cx23885_bcount) 901 dev->board = card[dev->nr]; 902 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++) 903 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor && 904 dev->pci->subsystem_device == cx23885_subids[i].subdevice) 905 dev->board = cx23885_subids[i].card; 906 if (UNSET == dev->board) { 907 dev->board = CX23885_BOARD_UNKNOWN; 908 cx23885_card_list(dev); 909 } 910 911 if (dev->pci->device == 0x8852) { 912 /* no DIF on cx23885, so no analog tuner support possible */ 913 if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC) 914 dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885; 915 else if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_DVB) 916 dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885; 917 } 918 919 /* If the user specific a clk freq override, apply it */ 920 if (cx23885_boards[dev->board].clk_freq > 0) 921 dev->clk_freq = cx23885_boards[dev->board].clk_freq; 922 923 if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE && 924 dev->pci->subsystem_device == 0x7137) { 925 /* Hauppauge ImpactVCBe device ID 0x7137 is populated 926 * with an 888, and a 25Mhz crystal, instead of the 927 * usual third overtone 50Mhz. The default clock rate must 928 * be overridden so the cx25840 is properly configured 929 */ 930 dev->clk_freq = 25000000; 931 } 932 933 dev->pci_bus = dev->pci->bus->number; 934 dev->pci_slot = PCI_SLOT(dev->pci->devfn); 935 cx23885_irq_add(dev, 0x001f00); 936 937 /* External Master 1 Bus */ 938 dev->i2c_bus[0].nr = 0; 939 dev->i2c_bus[0].dev = dev; 940 dev->i2c_bus[0].reg_stat = I2C1_STAT; 941 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL; 942 dev->i2c_bus[0].reg_addr = I2C1_ADDR; 943 dev->i2c_bus[0].reg_rdata = I2C1_RDATA; 944 dev->i2c_bus[0].reg_wdata = I2C1_WDATA; 945 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */ 946 947 /* External Master 2 Bus */ 948 dev->i2c_bus[1].nr = 1; 949 dev->i2c_bus[1].dev = dev; 950 dev->i2c_bus[1].reg_stat = I2C2_STAT; 951 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL; 952 dev->i2c_bus[1].reg_addr = I2C2_ADDR; 953 dev->i2c_bus[1].reg_rdata = I2C2_RDATA; 954 dev->i2c_bus[1].reg_wdata = I2C2_WDATA; 955 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */ 956 957 /* Internal Master 3 Bus */ 958 dev->i2c_bus[2].nr = 2; 959 dev->i2c_bus[2].dev = dev; 960 dev->i2c_bus[2].reg_stat = I2C3_STAT; 961 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL; 962 dev->i2c_bus[2].reg_addr = I2C3_ADDR; 963 dev->i2c_bus[2].reg_rdata = I2C3_RDATA; 964 dev->i2c_bus[2].reg_wdata = I2C3_WDATA; 965 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */ 966 967 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) || 968 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)) 969 cx23885_init_tsport(dev, &dev->ts1, 1); 970 971 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) || 972 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)) 973 cx23885_init_tsport(dev, &dev->ts2, 2); 974 975 if (get_resources(dev) < 0) { 976 pr_err("CORE %s No more PCIe resources for subsystem: %04x:%04x\n", 977 dev->name, dev->pci->subsystem_vendor, 978 dev->pci->subsystem_device); 979 980 cx23885_devcount--; 981 return -ENODEV; 982 } 983 984 /* PCIe stuff */ 985 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0), 986 pci_resource_len(dev->pci, 0)); 987 988 dev->bmmio = (u8 __iomem *)dev->lmmio; 989 990 pr_info("CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n", 991 dev->name, dev->pci->subsystem_vendor, 992 dev->pci->subsystem_device, cx23885_boards[dev->board].name, 993 dev->board, card[dev->nr] == dev->board ? 994 "insmod option" : "autodetected"); 995 996 cx23885_pci_quirks(dev); 997 998 /* Assume some sensible defaults */ 999 dev->tuner_type = cx23885_boards[dev->board].tuner_type; 1000 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr; 1001 dev->tuner_bus = cx23885_boards[dev->board].tuner_bus; 1002 dev->radio_type = cx23885_boards[dev->board].radio_type; 1003 dev->radio_addr = cx23885_boards[dev->board].radio_addr; 1004 1005 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n", 1006 __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus); 1007 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n", 1008 __func__, dev->radio_type, dev->radio_addr); 1009 1010 /* The cx23417 encoder has GPIO's that need to be initialised 1011 * before DVB, so that demodulators and tuners are out of 1012 * reset before DVB uses them. 1013 */ 1014 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) || 1015 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)) 1016 cx23885_mc417_init(dev); 1017 1018 /* init hardware */ 1019 cx23885_reset(dev); 1020 1021 cx23885_i2c_register(&dev->i2c_bus[0]); 1022 cx23885_i2c_register(&dev->i2c_bus[1]); 1023 cx23885_i2c_register(&dev->i2c_bus[2]); 1024 cx23885_card_setup(dev); 1025 call_all(dev, tuner, standby); 1026 cx23885_ir_init(dev); 1027 1028 if (dev->board == CX23885_BOARD_VIEWCAST_460E) { 1029 /* 1030 * GPIOs 9/8 are input detection bits for the breakout video 1031 * (gpio 8) and audio (gpio 9) cables. When they're attached, 1032 * this gpios are pulled high. Make sure these GPIOs are marked 1033 * as inputs. 1034 */ 1035 cx23885_gpio_enable(dev, 0x300, 0); 1036 } 1037 1038 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) { 1039 if (cx23885_video_register(dev) < 0) { 1040 pr_err("%s() Failed to register analog video adapters on VID_A\n", 1041 __func__); 1042 } 1043 } 1044 1045 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) { 1046 if (cx23885_boards[dev->board].num_fds_portb) 1047 dev->ts1.num_frontends = 1048 cx23885_boards[dev->board].num_fds_portb; 1049 if (cx23885_dvb_register(&dev->ts1) < 0) { 1050 pr_err("%s() Failed to register dvb adapters on VID_B\n", 1051 __func__); 1052 } 1053 } else 1054 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) { 1055 if (cx23885_417_register(dev) < 0) { 1056 pr_err("%s() Failed to register 417 on VID_B\n", 1057 __func__); 1058 } 1059 } 1060 1061 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) { 1062 if (cx23885_boards[dev->board].num_fds_portc) 1063 dev->ts2.num_frontends = 1064 cx23885_boards[dev->board].num_fds_portc; 1065 if (cx23885_dvb_register(&dev->ts2) < 0) { 1066 pr_err("%s() Failed to register dvb on VID_C\n", 1067 __func__); 1068 } 1069 } else 1070 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) { 1071 if (cx23885_417_register(dev) < 0) { 1072 pr_err("%s() Failed to register 417 on VID_C\n", 1073 __func__); 1074 } 1075 } 1076 1077 cx23885_dev_checkrevision(dev); 1078 1079 /* disable MSI for NetUP cards, otherwise CI is not working */ 1080 if (cx23885_boards[dev->board].ci_type > 0) 1081 cx_clear(RDR_RDRCTL1, 1 << 8); 1082 1083 switch (dev->board) { 1084 case CX23885_BOARD_TEVII_S470: 1085 case CX23885_BOARD_TEVII_S471: 1086 cx_clear(RDR_RDRCTL1, 1 << 8); 1087 break; 1088 } 1089 1090 return 0; 1091 } 1092 1093 static void cx23885_dev_unregister(struct cx23885_dev *dev) 1094 { 1095 release_mem_region(pci_resource_start(dev->pci, 0), 1096 pci_resource_len(dev->pci, 0)); 1097 1098 if (!atomic_dec_and_test(&dev->refcount)) 1099 return; 1100 1101 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) 1102 cx23885_video_unregister(dev); 1103 1104 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) 1105 cx23885_dvb_unregister(&dev->ts1); 1106 1107 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) 1108 cx23885_417_unregister(dev); 1109 1110 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) 1111 cx23885_dvb_unregister(&dev->ts2); 1112 1113 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) 1114 cx23885_417_unregister(dev); 1115 1116 cx23885_i2c_unregister(&dev->i2c_bus[2]); 1117 cx23885_i2c_unregister(&dev->i2c_bus[1]); 1118 cx23885_i2c_unregister(&dev->i2c_bus[0]); 1119 1120 iounmap(dev->lmmio); 1121 } 1122 1123 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist, 1124 unsigned int offset, u32 sync_line, 1125 unsigned int bpl, unsigned int padding, 1126 unsigned int lines, unsigned int lpi, bool jump) 1127 { 1128 struct scatterlist *sg; 1129 unsigned int line, todo, sol; 1130 1131 1132 if (jump) { 1133 *(rp++) = cpu_to_le32(RISC_JUMP); 1134 *(rp++) = cpu_to_le32(0); 1135 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1136 } 1137 1138 /* sync instruction */ 1139 if (sync_line != NO_SYNC_LINE) 1140 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line); 1141 1142 /* scan lines */ 1143 sg = sglist; 1144 for (line = 0; line < lines; line++) { 1145 while (offset && offset >= sg_dma_len(sg)) { 1146 offset -= sg_dma_len(sg); 1147 sg = sg_next(sg); 1148 } 1149 1150 if (lpi && line > 0 && !(line % lpi)) 1151 sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC; 1152 else 1153 sol = RISC_SOL; 1154 1155 if (bpl <= sg_dma_len(sg)-offset) { 1156 /* fits into current chunk */ 1157 *(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl); 1158 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset); 1159 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1160 offset += bpl; 1161 } else { 1162 /* scanline needs to be split */ 1163 todo = bpl; 1164 *(rp++) = cpu_to_le32(RISC_WRITE|sol| 1165 (sg_dma_len(sg)-offset)); 1166 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset); 1167 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1168 todo -= (sg_dma_len(sg)-offset); 1169 offset = 0; 1170 sg = sg_next(sg); 1171 while (todo > sg_dma_len(sg)) { 1172 *(rp++) = cpu_to_le32(RISC_WRITE| 1173 sg_dma_len(sg)); 1174 *(rp++) = cpu_to_le32(sg_dma_address(sg)); 1175 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1176 todo -= sg_dma_len(sg); 1177 sg = sg_next(sg); 1178 } 1179 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo); 1180 *(rp++) = cpu_to_le32(sg_dma_address(sg)); 1181 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1182 offset += todo; 1183 } 1184 offset += padding; 1185 } 1186 1187 return rp; 1188 } 1189 1190 int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc, 1191 struct scatterlist *sglist, unsigned int top_offset, 1192 unsigned int bottom_offset, unsigned int bpl, 1193 unsigned int padding, unsigned int lines) 1194 { 1195 u32 instructions, fields; 1196 __le32 *rp; 1197 1198 fields = 0; 1199 if (UNSET != top_offset) 1200 fields++; 1201 if (UNSET != bottom_offset) 1202 fields++; 1203 1204 /* estimate risc mem: worst case is one write per page border + 1205 one write per scan line + syncs + jump (all 2 dwords). Padding 1206 can cause next bpl to start close to a page border. First DMA 1207 region may be smaller than PAGE_SIZE */ 1208 /* write and jump need and extra dword */ 1209 instructions = fields * (1 + ((bpl + padding) * lines) 1210 / PAGE_SIZE + lines); 1211 instructions += 5; 1212 risc->size = instructions * 12; 1213 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma); 1214 if (risc->cpu == NULL) 1215 return -ENOMEM; 1216 1217 /* write risc instructions */ 1218 rp = risc->cpu; 1219 if (UNSET != top_offset) 1220 rp = cx23885_risc_field(rp, sglist, top_offset, 0, 1221 bpl, padding, lines, 0, true); 1222 if (UNSET != bottom_offset) 1223 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200, 1224 bpl, padding, lines, 0, UNSET == top_offset); 1225 1226 /* save pointer to jmp instruction address */ 1227 risc->jmp = rp; 1228 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size); 1229 return 0; 1230 } 1231 1232 int cx23885_risc_databuffer(struct pci_dev *pci, 1233 struct cx23885_riscmem *risc, 1234 struct scatterlist *sglist, 1235 unsigned int bpl, 1236 unsigned int lines, unsigned int lpi) 1237 { 1238 u32 instructions; 1239 __le32 *rp; 1240 1241 /* estimate risc mem: worst case is one write per page border + 1242 one write per scan line + syncs + jump (all 2 dwords). Here 1243 there is no padding and no sync. First DMA region may be smaller 1244 than PAGE_SIZE */ 1245 /* Jump and write need an extra dword */ 1246 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines; 1247 instructions += 4; 1248 1249 risc->size = instructions * 12; 1250 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma); 1251 if (risc->cpu == NULL) 1252 return -ENOMEM; 1253 1254 /* write risc instructions */ 1255 rp = risc->cpu; 1256 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE, 1257 bpl, 0, lines, lpi, lpi == 0); 1258 1259 /* save pointer to jmp instruction address */ 1260 risc->jmp = rp; 1261 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size); 1262 return 0; 1263 } 1264 1265 int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc, 1266 struct scatterlist *sglist, unsigned int top_offset, 1267 unsigned int bottom_offset, unsigned int bpl, 1268 unsigned int padding, unsigned int lines) 1269 { 1270 u32 instructions, fields; 1271 __le32 *rp; 1272 1273 fields = 0; 1274 if (UNSET != top_offset) 1275 fields++; 1276 if (UNSET != bottom_offset) 1277 fields++; 1278 1279 /* estimate risc mem: worst case is one write per page border + 1280 one write per scan line + syncs + jump (all 2 dwords). Padding 1281 can cause next bpl to start close to a page border. First DMA 1282 region may be smaller than PAGE_SIZE */ 1283 /* write and jump need and extra dword */ 1284 instructions = fields * (1 + ((bpl + padding) * lines) 1285 / PAGE_SIZE + lines); 1286 instructions += 5; 1287 risc->size = instructions * 12; 1288 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma); 1289 if (risc->cpu == NULL) 1290 return -ENOMEM; 1291 /* write risc instructions */ 1292 rp = risc->cpu; 1293 1294 /* Sync to line 6, so US CC line 21 will appear in line '12' 1295 * in the userland vbi payload */ 1296 if (UNSET != top_offset) 1297 rp = cx23885_risc_field(rp, sglist, top_offset, 0, 1298 bpl, padding, lines, 0, true); 1299 1300 if (UNSET != bottom_offset) 1301 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200, 1302 bpl, padding, lines, 0, UNSET == top_offset); 1303 1304 1305 1306 /* save pointer to jmp instruction address */ 1307 risc->jmp = rp; 1308 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size); 1309 return 0; 1310 } 1311 1312 1313 void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf) 1314 { 1315 struct cx23885_riscmem *risc = &buf->risc; 1316 1317 BUG_ON(in_interrupt()); 1318 pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma); 1319 } 1320 1321 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port) 1322 { 1323 struct cx23885_dev *dev = port->dev; 1324 1325 dprintk(1, "%s() Register Dump\n", __func__); 1326 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__, 1327 cx_read(DEV_CNTRL2)); 1328 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__, 1329 cx23885_irq_get_mask(dev)); 1330 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__, 1331 cx_read(AUDIO_INT_INT_MSK)); 1332 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__, 1333 cx_read(AUD_INT_DMA_CTL)); 1334 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__, 1335 cx_read(AUDIO_EXT_INT_MSK)); 1336 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__, 1337 cx_read(AUD_EXT_DMA_CTL)); 1338 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__, 1339 cx_read(PAD_CTRL)); 1340 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__, 1341 cx_read(ALT_PIN_OUT_SEL)); 1342 dprintk(1, "%s() GPIO2 0x%08X\n", __func__, 1343 cx_read(GPIO2)); 1344 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__, 1345 port->reg_gpcnt, cx_read(port->reg_gpcnt)); 1346 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__, 1347 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl)); 1348 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__, 1349 port->reg_dma_ctl, cx_read(port->reg_dma_ctl)); 1350 if (port->reg_src_sel) 1351 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__, 1352 port->reg_src_sel, cx_read(port->reg_src_sel)); 1353 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__, 1354 port->reg_lngth, cx_read(port->reg_lngth)); 1355 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__, 1356 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl)); 1357 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__, 1358 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl)); 1359 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__, 1360 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status)); 1361 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__, 1362 port->reg_sop_status, cx_read(port->reg_sop_status)); 1363 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__, 1364 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat)); 1365 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__, 1366 port->reg_vld_misc, cx_read(port->reg_vld_misc)); 1367 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__, 1368 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en)); 1369 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__, 1370 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk)); 1371 dprintk(1, "%s() ts_int_status(0x%08X) 0x%08x\n", __func__, 1372 port->reg_ts_int_stat, cx_read(port->reg_ts_int_stat)); 1373 dprintk(1, "%s() PCI_INT_STAT 0x%08X\n", __func__, 1374 cx_read(PCI_INT_STAT)); 1375 dprintk(1, "%s() VID_B_INT_MSTAT 0x%08X\n", __func__, 1376 cx_read(VID_B_INT_MSTAT)); 1377 dprintk(1, "%s() VID_B_INT_SSTAT 0x%08X\n", __func__, 1378 cx_read(VID_B_INT_SSTAT)); 1379 dprintk(1, "%s() VID_C_INT_MSTAT 0x%08X\n", __func__, 1380 cx_read(VID_C_INT_MSTAT)); 1381 dprintk(1, "%s() VID_C_INT_SSTAT 0x%08X\n", __func__, 1382 cx_read(VID_C_INT_SSTAT)); 1383 } 1384 1385 int cx23885_start_dma(struct cx23885_tsport *port, 1386 struct cx23885_dmaqueue *q, 1387 struct cx23885_buffer *buf) 1388 { 1389 struct cx23885_dev *dev = port->dev; 1390 u32 reg; 1391 1392 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__, 1393 dev->width, dev->height, dev->field); 1394 1395 /* clear dma in progress */ 1396 cx23885_clear_bridge_error(dev); 1397 1398 /* Stop the fifo and risc engine for this port */ 1399 cx_clear(port->reg_dma_ctl, port->dma_ctl_val); 1400 1401 /* setup fifo + format */ 1402 cx23885_sram_channel_setup(dev, 1403 &dev->sram_channels[port->sram_chno], 1404 port->ts_packet_size, buf->risc.dma); 1405 if (debug > 5) { 1406 cx23885_sram_channel_dump(dev, 1407 &dev->sram_channels[port->sram_chno]); 1408 cx23885_risc_disasm(port, &buf->risc); 1409 } 1410 1411 /* write TS length to chip */ 1412 cx_write(port->reg_lngth, port->ts_packet_size); 1413 1414 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) && 1415 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) { 1416 pr_err("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n", 1417 __func__, 1418 cx23885_boards[dev->board].portb, 1419 cx23885_boards[dev->board].portc); 1420 return -EINVAL; 1421 } 1422 1423 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) 1424 cx23885_av_clk(dev, 0); 1425 1426 udelay(100); 1427 1428 /* If the port supports SRC SELECT, configure it */ 1429 if (port->reg_src_sel) 1430 cx_write(port->reg_src_sel, port->src_sel_val); 1431 1432 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val); 1433 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val); 1434 cx_write(port->reg_vld_misc, port->vld_misc_val); 1435 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val); 1436 udelay(100); 1437 1438 /* NOTE: this is 2 (reserved) for portb, does it matter? */ 1439 /* reset counter to zero */ 1440 cx_write(port->reg_gpcnt_ctl, 3); 1441 q->count = 0; 1442 1443 /* Set VIDB pins to input */ 1444 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) { 1445 reg = cx_read(PAD_CTRL); 1446 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */ 1447 cx_write(PAD_CTRL, reg); 1448 } 1449 1450 /* Set VIDC pins to input */ 1451 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) { 1452 reg = cx_read(PAD_CTRL); 1453 reg &= ~0x4; /* Clear TS2_SOP_OE */ 1454 cx_write(PAD_CTRL, reg); 1455 } 1456 1457 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) { 1458 1459 reg = cx_read(PAD_CTRL); 1460 reg = reg & ~0x1; /* Clear TS1_OE */ 1461 1462 /* FIXME, bit 2 writing here is questionable */ 1463 /* set TS1_SOP_OE and TS1_OE_HI */ 1464 reg = reg | 0xa; 1465 cx_write(PAD_CTRL, reg); 1466 1467 /* Sets MOE_CLK_DIS to disable MoE clock */ 1468 /* sets MCLK_DLY_SEL/BCLK_DLY_SEL to 1 buffer delay each */ 1469 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011); 1470 1471 /* ALT_GPIO_ALT_SET: GPIO[0] 1472 * IR_ALT_TX_SEL: GPIO[1] 1473 * GPIO1_ALT_SEL: VIP_656_DATA[0] 1474 * GPIO0_ALT_SEL: VIP_656_CLK 1475 */ 1476 cx_write(ALT_PIN_OUT_SEL, 0x10100045); 1477 } 1478 1479 switch (dev->bridge) { 1480 case CX23885_BRIDGE_885: 1481 case CX23885_BRIDGE_887: 1482 case CX23885_BRIDGE_888: 1483 /* enable irqs */ 1484 dprintk(1, "%s() enabling TS int's and DMA\n", __func__); 1485 /* clear dma in progress */ 1486 cx23885_clear_bridge_error(dev); 1487 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val); 1488 cx_set(port->reg_dma_ctl, port->dma_ctl_val); 1489 1490 /* clear dma in progress */ 1491 cx23885_clear_bridge_error(dev); 1492 cx23885_irq_add(dev, port->pci_irqmask); 1493 cx23885_irq_enable_all(dev); 1494 1495 /* clear dma in progress */ 1496 cx23885_clear_bridge_error(dev); 1497 break; 1498 default: 1499 BUG(); 1500 } 1501 1502 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */ 1503 /* clear dma in progress */ 1504 cx23885_clear_bridge_error(dev); 1505 1506 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) 1507 cx23885_av_clk(dev, 1); 1508 1509 if (debug > 4) 1510 cx23885_tsport_reg_dump(port); 1511 1512 cx23885_irq_get_mask(dev); 1513 1514 /* clear dma in progress */ 1515 cx23885_clear_bridge_error(dev); 1516 1517 return 0; 1518 } 1519 1520 static int cx23885_stop_dma(struct cx23885_tsport *port) 1521 { 1522 struct cx23885_dev *dev = port->dev; 1523 u32 reg; 1524 int delay = 0; 1525 uint32_t reg1_val; 1526 uint32_t reg2_val; 1527 1528 dprintk(1, "%s()\n", __func__); 1529 1530 /* Stop interrupts and DMA */ 1531 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val); 1532 cx_clear(port->reg_dma_ctl, port->dma_ctl_val); 1533 /* just in case wait for any dma to complete before allowing dealloc */ 1534 mdelay(20); 1535 for (delay = 0; delay < 100; delay++) { 1536 reg1_val = cx_read(TC_REQ); 1537 reg2_val = cx_read(TC_REQ_SET); 1538 if (reg1_val == 0 || reg2_val == 0) 1539 break; 1540 mdelay(1); 1541 } 1542 dev_dbg(&dev->pci->dev, "delay=%d reg1=0x%08x reg2=0x%08x\n", 1543 delay, reg1_val, reg2_val); 1544 1545 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) { 1546 reg = cx_read(PAD_CTRL); 1547 1548 /* Set TS1_OE */ 1549 reg = reg | 0x1; 1550 1551 /* clear TS1_SOP_OE and TS1_OE_HI */ 1552 reg = reg & ~0xa; 1553 cx_write(PAD_CTRL, reg); 1554 cx_write(port->reg_src_sel, 0); 1555 cx_write(port->reg_gen_ctrl, 8); 1556 } 1557 1558 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) 1559 cx23885_av_clk(dev, 0); 1560 1561 return 0; 1562 } 1563 1564 /* ------------------------------------------------------------------ */ 1565 1566 int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port) 1567 { 1568 struct cx23885_dev *dev = port->dev; 1569 int size = port->ts_packet_size * port->ts_packet_count; 1570 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0); 1571 1572 dprintk(1, "%s: %p\n", __func__, buf); 1573 if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size) 1574 return -EINVAL; 1575 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size); 1576 1577 cx23885_risc_databuffer(dev->pci, &buf->risc, 1578 sgt->sgl, 1579 port->ts_packet_size, port->ts_packet_count, 0); 1580 return 0; 1581 } 1582 1583 /* 1584 * The risc program for each buffer works as follows: it starts with a simple 1585 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the 1586 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping 1587 * the initial JUMP). 1588 * 1589 * This is the risc program of the first buffer to be queued if the active list 1590 * is empty and it just keeps DMAing this buffer without generating any 1591 * interrupts. 1592 * 1593 * If a new buffer is added then the initial JUMP in the code for that buffer 1594 * will generate an interrupt which signals that the previous buffer has been 1595 * DMAed successfully and that it can be returned to userspace. 1596 * 1597 * It also sets the final jump of the previous buffer to the start of the new 1598 * buffer, thus chaining the new buffer into the DMA chain. This is a single 1599 * atomic u32 write, so there is no race condition. 1600 * 1601 * The end-result of all this that you only get an interrupt when a buffer 1602 * is ready, so the control flow is very easy. 1603 */ 1604 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf) 1605 { 1606 struct cx23885_buffer *prev; 1607 struct cx23885_dev *dev = port->dev; 1608 struct cx23885_dmaqueue *cx88q = &port->mpegq; 1609 unsigned long flags; 1610 1611 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12); 1612 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC); 1613 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12); 1614 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */ 1615 1616 spin_lock_irqsave(&dev->slock, flags); 1617 if (list_empty(&cx88q->active)) { 1618 list_add_tail(&buf->queue, &cx88q->active); 1619 dprintk(1, "[%p/%d] %s - first active\n", 1620 buf, buf->vb.vb2_buf.index, __func__); 1621 } else { 1622 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1); 1623 prev = list_entry(cx88q->active.prev, struct cx23885_buffer, 1624 queue); 1625 list_add_tail(&buf->queue, &cx88q->active); 1626 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); 1627 dprintk(1, "[%p/%d] %s - append to active\n", 1628 buf, buf->vb.vb2_buf.index, __func__); 1629 } 1630 spin_unlock_irqrestore(&dev->slock, flags); 1631 } 1632 1633 /* ----------------------------------------------------------- */ 1634 1635 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason) 1636 { 1637 struct cx23885_dmaqueue *q = &port->mpegq; 1638 struct cx23885_buffer *buf; 1639 unsigned long flags; 1640 1641 spin_lock_irqsave(&port->slock, flags); 1642 while (!list_empty(&q->active)) { 1643 buf = list_entry(q->active.next, struct cx23885_buffer, 1644 queue); 1645 list_del(&buf->queue); 1646 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); 1647 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n", 1648 buf, buf->vb.vb2_buf.index, reason, 1649 (unsigned long)buf->risc.dma); 1650 } 1651 spin_unlock_irqrestore(&port->slock, flags); 1652 } 1653 1654 void cx23885_cancel_buffers(struct cx23885_tsport *port) 1655 { 1656 dprintk(1, "%s()\n", __func__); 1657 cx23885_stop_dma(port); 1658 do_cancel_buffers(port, "cancel"); 1659 } 1660 1661 int cx23885_irq_417(struct cx23885_dev *dev, u32 status) 1662 { 1663 /* FIXME: port1 assumption here. */ 1664 struct cx23885_tsport *port = &dev->ts1; 1665 int count = 0; 1666 int handled = 0; 1667 1668 if (status == 0) 1669 return handled; 1670 1671 count = cx_read(port->reg_gpcnt); 1672 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n", 1673 status, cx_read(port->reg_ts_int_msk), count); 1674 1675 if ((status & VID_B_MSK_BAD_PKT) || 1676 (status & VID_B_MSK_OPC_ERR) || 1677 (status & VID_B_MSK_VBI_OPC_ERR) || 1678 (status & VID_B_MSK_SYNC) || 1679 (status & VID_B_MSK_VBI_SYNC) || 1680 (status & VID_B_MSK_OF) || 1681 (status & VID_B_MSK_VBI_OF)) { 1682 pr_err("%s: V4L mpeg risc op code error, status = 0x%x\n", 1683 dev->name, status); 1684 if (status & VID_B_MSK_BAD_PKT) 1685 dprintk(1, " VID_B_MSK_BAD_PKT\n"); 1686 if (status & VID_B_MSK_OPC_ERR) 1687 dprintk(1, " VID_B_MSK_OPC_ERR\n"); 1688 if (status & VID_B_MSK_VBI_OPC_ERR) 1689 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n"); 1690 if (status & VID_B_MSK_SYNC) 1691 dprintk(1, " VID_B_MSK_SYNC\n"); 1692 if (status & VID_B_MSK_VBI_SYNC) 1693 dprintk(1, " VID_B_MSK_VBI_SYNC\n"); 1694 if (status & VID_B_MSK_OF) 1695 dprintk(1, " VID_B_MSK_OF\n"); 1696 if (status & VID_B_MSK_VBI_OF) 1697 dprintk(1, " VID_B_MSK_VBI_OF\n"); 1698 1699 cx_clear(port->reg_dma_ctl, port->dma_ctl_val); 1700 cx23885_sram_channel_dump(dev, 1701 &dev->sram_channels[port->sram_chno]); 1702 cx23885_417_check_encoder(dev); 1703 } else if (status & VID_B_MSK_RISCI1) { 1704 dprintk(7, " VID_B_MSK_RISCI1\n"); 1705 spin_lock(&port->slock); 1706 cx23885_wakeup(port, &port->mpegq, count); 1707 spin_unlock(&port->slock); 1708 } 1709 if (status) { 1710 cx_write(port->reg_ts_int_stat, status); 1711 handled = 1; 1712 } 1713 1714 return handled; 1715 } 1716 1717 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status) 1718 { 1719 struct cx23885_dev *dev = port->dev; 1720 int handled = 0; 1721 u32 count; 1722 1723 if ((status & VID_BC_MSK_OPC_ERR) || 1724 (status & VID_BC_MSK_BAD_PKT) || 1725 (status & VID_BC_MSK_SYNC) || 1726 (status & VID_BC_MSK_OF)) { 1727 1728 if (status & VID_BC_MSK_OPC_ERR) 1729 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n", 1730 VID_BC_MSK_OPC_ERR); 1731 1732 if (status & VID_BC_MSK_BAD_PKT) 1733 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n", 1734 VID_BC_MSK_BAD_PKT); 1735 1736 if (status & VID_BC_MSK_SYNC) 1737 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n", 1738 VID_BC_MSK_SYNC); 1739 1740 if (status & VID_BC_MSK_OF) 1741 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n", 1742 VID_BC_MSK_OF); 1743 1744 pr_err("%s: mpeg risc op code error\n", dev->name); 1745 1746 cx_clear(port->reg_dma_ctl, port->dma_ctl_val); 1747 cx23885_sram_channel_dump(dev, 1748 &dev->sram_channels[port->sram_chno]); 1749 1750 } else if (status & VID_BC_MSK_RISCI1) { 1751 1752 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1); 1753 1754 spin_lock(&port->slock); 1755 count = cx_read(port->reg_gpcnt); 1756 cx23885_wakeup(port, &port->mpegq, count); 1757 spin_unlock(&port->slock); 1758 1759 } 1760 if (status) { 1761 cx_write(port->reg_ts_int_stat, status); 1762 handled = 1; 1763 } 1764 1765 return handled; 1766 } 1767 1768 static irqreturn_t cx23885_irq(int irq, void *dev_id) 1769 { 1770 struct cx23885_dev *dev = dev_id; 1771 struct cx23885_tsport *ts1 = &dev->ts1; 1772 struct cx23885_tsport *ts2 = &dev->ts2; 1773 u32 pci_status, pci_mask; 1774 u32 vida_status, vida_mask; 1775 u32 audint_status, audint_mask; 1776 u32 ts1_status, ts1_mask; 1777 u32 ts2_status, ts2_mask; 1778 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0; 1779 int audint_count = 0; 1780 bool subdev_handled; 1781 1782 pci_status = cx_read(PCI_INT_STAT); 1783 pci_mask = cx23885_irq_get_mask(dev); 1784 if ((pci_status & pci_mask) == 0) { 1785 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n", 1786 pci_status, pci_mask); 1787 goto out; 1788 } 1789 1790 vida_status = cx_read(VID_A_INT_STAT); 1791 vida_mask = cx_read(VID_A_INT_MSK); 1792 audint_status = cx_read(AUDIO_INT_INT_STAT); 1793 audint_mask = cx_read(AUDIO_INT_INT_MSK); 1794 ts1_status = cx_read(VID_B_INT_STAT); 1795 ts1_mask = cx_read(VID_B_INT_MSK); 1796 ts2_status = cx_read(VID_C_INT_STAT); 1797 ts2_mask = cx_read(VID_C_INT_MSK); 1798 1799 if (((pci_status & pci_mask) == 0) && 1800 ((ts2_status & ts2_mask) == 0) && 1801 ((ts1_status & ts1_mask) == 0)) 1802 goto out; 1803 1804 vida_count = cx_read(VID_A_GPCNT); 1805 audint_count = cx_read(AUD_INT_A_GPCNT); 1806 ts1_count = cx_read(ts1->reg_gpcnt); 1807 ts2_count = cx_read(ts2->reg_gpcnt); 1808 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n", 1809 pci_status, pci_mask); 1810 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n", 1811 vida_status, vida_mask, vida_count); 1812 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n", 1813 audint_status, audint_mask, audint_count); 1814 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n", 1815 ts1_status, ts1_mask, ts1_count); 1816 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n", 1817 ts2_status, ts2_mask, ts2_count); 1818 1819 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR | 1820 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA | 1821 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A | 1822 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT | 1823 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 | 1824 PCI_MSK_AV_CORE | PCI_MSK_IR)) { 1825 1826 if (pci_status & PCI_MSK_RISC_RD) 1827 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n", 1828 PCI_MSK_RISC_RD); 1829 1830 if (pci_status & PCI_MSK_RISC_WR) 1831 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n", 1832 PCI_MSK_RISC_WR); 1833 1834 if (pci_status & PCI_MSK_AL_RD) 1835 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n", 1836 PCI_MSK_AL_RD); 1837 1838 if (pci_status & PCI_MSK_AL_WR) 1839 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n", 1840 PCI_MSK_AL_WR); 1841 1842 if (pci_status & PCI_MSK_APB_DMA) 1843 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n", 1844 PCI_MSK_APB_DMA); 1845 1846 if (pci_status & PCI_MSK_VID_C) 1847 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n", 1848 PCI_MSK_VID_C); 1849 1850 if (pci_status & PCI_MSK_VID_B) 1851 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n", 1852 PCI_MSK_VID_B); 1853 1854 if (pci_status & PCI_MSK_VID_A) 1855 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n", 1856 PCI_MSK_VID_A); 1857 1858 if (pci_status & PCI_MSK_AUD_INT) 1859 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n", 1860 PCI_MSK_AUD_INT); 1861 1862 if (pci_status & PCI_MSK_AUD_EXT) 1863 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n", 1864 PCI_MSK_AUD_EXT); 1865 1866 if (pci_status & PCI_MSK_GPIO0) 1867 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n", 1868 PCI_MSK_GPIO0); 1869 1870 if (pci_status & PCI_MSK_GPIO1) 1871 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n", 1872 PCI_MSK_GPIO1); 1873 1874 if (pci_status & PCI_MSK_AV_CORE) 1875 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n", 1876 PCI_MSK_AV_CORE); 1877 1878 if (pci_status & PCI_MSK_IR) 1879 dprintk(7, " (PCI_MSK_IR 0x%08x)\n", 1880 PCI_MSK_IR); 1881 } 1882 1883 if (cx23885_boards[dev->board].ci_type == 1 && 1884 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0))) 1885 handled += netup_ci_slot_status(dev, pci_status); 1886 1887 if (cx23885_boards[dev->board].ci_type == 2 && 1888 (pci_status & PCI_MSK_GPIO0)) 1889 handled += altera_ci_irq(dev); 1890 1891 if (ts1_status) { 1892 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) 1893 handled += cx23885_irq_ts(ts1, ts1_status); 1894 else 1895 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) 1896 handled += cx23885_irq_417(dev, ts1_status); 1897 } 1898 1899 if (ts2_status) { 1900 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) 1901 handled += cx23885_irq_ts(ts2, ts2_status); 1902 else 1903 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) 1904 handled += cx23885_irq_417(dev, ts2_status); 1905 } 1906 1907 if (vida_status) 1908 handled += cx23885_video_irq(dev, vida_status); 1909 1910 if (audint_status) 1911 handled += cx23885_audio_irq(dev, audint_status, audint_mask); 1912 1913 if (pci_status & PCI_MSK_IR) { 1914 subdev_handled = false; 1915 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine, 1916 pci_status, &subdev_handled); 1917 if (subdev_handled) 1918 handled++; 1919 } 1920 1921 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) { 1922 cx23885_irq_disable(dev, PCI_MSK_AV_CORE); 1923 schedule_work(&dev->cx25840_work); 1924 handled++; 1925 } 1926 1927 if (handled) 1928 cx_write(PCI_INT_STAT, pci_status & pci_mask); 1929 out: 1930 return IRQ_RETVAL(handled); 1931 } 1932 1933 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd, 1934 unsigned int notification, void *arg) 1935 { 1936 struct cx23885_dev *dev; 1937 1938 if (sd == NULL) 1939 return; 1940 1941 dev = to_cx23885(sd->v4l2_dev); 1942 1943 switch (notification) { 1944 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */ 1945 if (sd == dev->sd_ir) 1946 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg); 1947 break; 1948 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */ 1949 if (sd == dev->sd_ir) 1950 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg); 1951 break; 1952 } 1953 } 1954 1955 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev) 1956 { 1957 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler); 1958 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler); 1959 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler); 1960 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify; 1961 } 1962 1963 static inline int encoder_on_portb(struct cx23885_dev *dev) 1964 { 1965 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER; 1966 } 1967 1968 static inline int encoder_on_portc(struct cx23885_dev *dev) 1969 { 1970 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER; 1971 } 1972 1973 /* Mask represents 32 different GPIOs, GPIO's are split into multiple 1974 * registers depending on the board configuration (and whether the 1975 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will 1976 * be pushed into the correct hardware register, regardless of the 1977 * physical location. Certain registers are shared so we sanity check 1978 * and report errors if we think we're tampering with a GPIo that might 1979 * be assigned to the encoder (and used for the host bus). 1980 * 1981 * GPIO 2 thru 0 - On the cx23885 bridge 1982 * GPIO 18 thru 3 - On the cx23417 host bus interface 1983 * GPIO 23 thru 19 - On the cx25840 a/v core 1984 */ 1985 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask) 1986 { 1987 if (mask & 0x7) 1988 cx_set(GP0_IO, mask & 0x7); 1989 1990 if (mask & 0x0007fff8) { 1991 if (encoder_on_portb(dev) || encoder_on_portc(dev)) 1992 pr_err("%s: Setting GPIO on encoder ports\n", 1993 dev->name); 1994 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3); 1995 } 1996 1997 /* TODO: 23-19 */ 1998 if (mask & 0x00f80000) 1999 pr_info("%s: Unsupported\n", dev->name); 2000 } 2001 2002 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask) 2003 { 2004 if (mask & 0x00000007) 2005 cx_clear(GP0_IO, mask & 0x7); 2006 2007 if (mask & 0x0007fff8) { 2008 if (encoder_on_portb(dev) || encoder_on_portc(dev)) 2009 pr_err("%s: Clearing GPIO moving on encoder ports\n", 2010 dev->name); 2011 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3); 2012 } 2013 2014 /* TODO: 23-19 */ 2015 if (mask & 0x00f80000) 2016 pr_info("%s: Unsupported\n", dev->name); 2017 } 2018 2019 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask) 2020 { 2021 if (mask & 0x00000007) 2022 return (cx_read(GP0_IO) >> 8) & mask & 0x7; 2023 2024 if (mask & 0x0007fff8) { 2025 if (encoder_on_portb(dev) || encoder_on_portc(dev)) 2026 pr_err("%s: Reading GPIO moving on encoder ports\n", 2027 dev->name); 2028 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3; 2029 } 2030 2031 /* TODO: 23-19 */ 2032 if (mask & 0x00f80000) 2033 pr_info("%s: Unsupported\n", dev->name); 2034 2035 return 0; 2036 } 2037 2038 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput) 2039 { 2040 if ((mask & 0x00000007) && asoutput) 2041 cx_set(GP0_IO, (mask & 0x7) << 16); 2042 else if ((mask & 0x00000007) && !asoutput) 2043 cx_clear(GP0_IO, (mask & 0x7) << 16); 2044 2045 if (mask & 0x0007fff8) { 2046 if (encoder_on_portb(dev) || encoder_on_portc(dev)) 2047 pr_err("%s: Enabling GPIO on encoder ports\n", 2048 dev->name); 2049 } 2050 2051 /* MC417_OEN is active low for output, write 1 for an input */ 2052 if ((mask & 0x0007fff8) && asoutput) 2053 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3); 2054 2055 else if ((mask & 0x0007fff8) && !asoutput) 2056 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3); 2057 2058 /* TODO: 23-19 */ 2059 } 2060 2061 static int cx23885_initdev(struct pci_dev *pci_dev, 2062 const struct pci_device_id *pci_id) 2063 { 2064 struct cx23885_dev *dev; 2065 struct v4l2_ctrl_handler *hdl; 2066 int err; 2067 2068 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2069 if (NULL == dev) 2070 return -ENOMEM; 2071 2072 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev); 2073 if (err < 0) 2074 goto fail_free; 2075 2076 hdl = &dev->ctrl_handler; 2077 v4l2_ctrl_handler_init(hdl, 6); 2078 if (hdl->error) { 2079 err = hdl->error; 2080 goto fail_ctrl; 2081 } 2082 dev->v4l2_dev.ctrl_handler = hdl; 2083 2084 /* Prepare to handle notifications from subdevices */ 2085 cx23885_v4l2_dev_notify_init(dev); 2086 2087 /* pci init */ 2088 dev->pci = pci_dev; 2089 if (pci_enable_device(pci_dev)) { 2090 err = -EIO; 2091 goto fail_ctrl; 2092 } 2093 2094 if (cx23885_dev_setup(dev) < 0) { 2095 err = -EINVAL; 2096 goto fail_ctrl; 2097 } 2098 2099 /* print pci info */ 2100 dev->pci_rev = pci_dev->revision; 2101 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat); 2102 pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n", 2103 dev->name, 2104 pci_name(pci_dev), dev->pci_rev, pci_dev->irq, 2105 dev->pci_lat, 2106 (unsigned long long)pci_resource_start(pci_dev, 0)); 2107 2108 pci_set_master(pci_dev); 2109 err = pci_set_dma_mask(pci_dev, 0xffffffff); 2110 if (err) { 2111 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); 2112 goto fail_ctrl; 2113 } 2114 2115 err = request_irq(pci_dev->irq, cx23885_irq, 2116 IRQF_SHARED, dev->name, dev); 2117 if (err < 0) { 2118 pr_err("%s: can't get IRQ %d\n", 2119 dev->name, pci_dev->irq); 2120 goto fail_irq; 2121 } 2122 2123 switch (dev->board) { 2124 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI: 2125 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0); 2126 break; 2127 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF: 2128 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0); 2129 break; 2130 } 2131 2132 /* 2133 * The CX2388[58] IR controller can start firing interrupts when 2134 * enabled, so these have to take place after the cx23885_irq() handler 2135 * is hooked up by the call to request_irq() above. 2136 */ 2137 cx23885_ir_pci_int_enable(dev); 2138 cx23885_input_init(dev); 2139 2140 return 0; 2141 2142 fail_irq: 2143 cx23885_dev_unregister(dev); 2144 fail_ctrl: 2145 v4l2_ctrl_handler_free(hdl); 2146 v4l2_device_unregister(&dev->v4l2_dev); 2147 fail_free: 2148 kfree(dev); 2149 return err; 2150 } 2151 2152 static void cx23885_finidev(struct pci_dev *pci_dev) 2153 { 2154 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev); 2155 struct cx23885_dev *dev = to_cx23885(v4l2_dev); 2156 2157 cx23885_input_fini(dev); 2158 cx23885_ir_fini(dev); 2159 2160 cx23885_shutdown(dev); 2161 2162 /* unregister stuff */ 2163 free_irq(pci_dev->irq, dev); 2164 2165 pci_disable_device(pci_dev); 2166 2167 cx23885_dev_unregister(dev); 2168 v4l2_ctrl_handler_free(&dev->ctrl_handler); 2169 v4l2_device_unregister(v4l2_dev); 2170 kfree(dev); 2171 } 2172 2173 static const struct pci_device_id cx23885_pci_tbl[] = { 2174 { 2175 /* CX23885 */ 2176 .vendor = 0x14f1, 2177 .device = 0x8852, 2178 .subvendor = PCI_ANY_ID, 2179 .subdevice = PCI_ANY_ID, 2180 }, { 2181 /* CX23887 Rev 2 */ 2182 .vendor = 0x14f1, 2183 .device = 0x8880, 2184 .subvendor = PCI_ANY_ID, 2185 .subdevice = PCI_ANY_ID, 2186 }, { 2187 /* --- end of list --- */ 2188 } 2189 }; 2190 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl); 2191 2192 static struct pci_driver cx23885_pci_driver = { 2193 .name = "cx23885", 2194 .id_table = cx23885_pci_tbl, 2195 .probe = cx23885_initdev, 2196 .remove = cx23885_finidev, 2197 /* TODO */ 2198 .suspend = NULL, 2199 .resume = NULL, 2200 }; 2201 2202 static int __init cx23885_init(void) 2203 { 2204 pr_info("cx23885 driver version %s loaded\n", 2205 CX23885_VERSION); 2206 return pci_register_driver(&cx23885_pci_driver); 2207 } 2208 2209 static void __exit cx23885_fini(void) 2210 { 2211 pci_unregister_driver(&cx23885_pci_driver); 2212 } 2213 2214 module_init(cx23885_init); 2215 module_exit(cx23885_fini); 2216