1 /* 2 * 3 * hfcpci.c low level driver for CCD's hfc-pci based cards 4 * 5 * Author Werner Cornelius (werner@isdn4linux.de) 6 * based on existing driver for CCD hfc ISA cards 7 * type approval valid for HFC-S PCI A based card 8 * 9 * Copyright 1999 by Werner Cornelius (werner@isdn-development.de) 10 * Copyright 2008 by Karsten Keil <kkeil@novell.com> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * Module options: 27 * 28 * debug: 29 * NOTE: only one poll value must be given for all cards 30 * See hfc_pci.h for debug flags. 31 * 32 * poll: 33 * NOTE: only one poll value must be given for all cards 34 * Give the number of samples for each fifo process. 35 * By default 128 is used. Decrease to reduce delay, increase to 36 * reduce cpu load. If unsure, don't mess with it! 37 * A value of 128 will use controller's interrupt. Other values will 38 * use kernel timer, because the controller will not allow lower values 39 * than 128. 40 * Also note that the value depends on the kernel timer frequency. 41 * If kernel uses a frequency of 1000 Hz, steps of 8 samples are possible. 42 * If the kernel uses 100 Hz, steps of 80 samples are possible. 43 * If the kernel uses 300 Hz, steps of about 26 samples are possible. 44 * 45 */ 46 47 #include <linux/module.h> 48 #include <linux/pci.h> 49 #include <linux/delay.h> 50 #include <linux/mISDNhw.h> 51 52 #include "hfc_pci.h" 53 54 static const char *hfcpci_revision = "2.0"; 55 56 static int HFC_cnt; 57 static uint debug; 58 static uint poll, tics; 59 static struct timer_list hfc_tl; 60 static unsigned long hfc_jiffies; 61 62 MODULE_AUTHOR("Karsten Keil"); 63 MODULE_LICENSE("GPL"); 64 module_param(debug, uint, S_IRUGO | S_IWUSR); 65 module_param(poll, uint, S_IRUGO | S_IWUSR); 66 67 enum { 68 HFC_CCD_2BD0, 69 HFC_CCD_B000, 70 HFC_CCD_B006, 71 HFC_CCD_B007, 72 HFC_CCD_B008, 73 HFC_CCD_B009, 74 HFC_CCD_B00A, 75 HFC_CCD_B00B, 76 HFC_CCD_B00C, 77 HFC_CCD_B100, 78 HFC_CCD_B700, 79 HFC_CCD_B701, 80 HFC_ASUS_0675, 81 HFC_BERKOM_A1T, 82 HFC_BERKOM_TCONCEPT, 83 HFC_ANIGMA_MC145575, 84 HFC_ZOLTRIX_2BD0, 85 HFC_DIGI_DF_M_IOM2_E, 86 HFC_DIGI_DF_M_E, 87 HFC_DIGI_DF_M_IOM2_A, 88 HFC_DIGI_DF_M_A, 89 HFC_ABOCOM_2BD1, 90 HFC_SITECOM_DC105V2, 91 }; 92 93 struct hfcPCI_hw { 94 unsigned char cirm; 95 unsigned char ctmt; 96 unsigned char clkdel; 97 unsigned char states; 98 unsigned char conn; 99 unsigned char mst_m; 100 unsigned char int_m1; 101 unsigned char int_m2; 102 unsigned char sctrl; 103 unsigned char sctrl_r; 104 unsigned char sctrl_e; 105 unsigned char trm; 106 unsigned char fifo_en; 107 unsigned char bswapped; 108 unsigned char protocol; 109 int nt_timer; 110 unsigned char __iomem *pci_io; /* start of PCI IO memory */ 111 dma_addr_t dmahandle; 112 void *fifos; /* FIFO memory */ 113 int last_bfifo_cnt[2]; 114 /* marker saving last b-fifo frame count */ 115 struct timer_list timer; 116 }; 117 118 #define HFC_CFG_MASTER 1 119 #define HFC_CFG_SLAVE 2 120 #define HFC_CFG_PCM 3 121 #define HFC_CFG_2HFC 4 122 #define HFC_CFG_SLAVEHFC 5 123 #define HFC_CFG_NEG_F0 6 124 #define HFC_CFG_SW_DD_DU 7 125 126 #define FLG_HFC_TIMER_T1 16 127 #define FLG_HFC_TIMER_T3 17 128 129 #define NT_T1_COUNT 1120 /* number of 3.125ms interrupts (3.5s) */ 130 #define NT_T3_COUNT 31 /* number of 3.125ms interrupts (97 ms) */ 131 #define CLKDEL_TE 0x0e /* CLKDEL in TE mode */ 132 #define CLKDEL_NT 0x6c /* CLKDEL in NT mode */ 133 134 135 struct hfc_pci { 136 u_char subtype; 137 u_char chanlimit; 138 u_char initdone; 139 u_long cfg; 140 u_int irq; 141 u_int irqcnt; 142 struct pci_dev *pdev; 143 struct hfcPCI_hw hw; 144 spinlock_t lock; /* card lock */ 145 struct dchannel dch; 146 struct bchannel bch[2]; 147 }; 148 149 /* Interface functions */ 150 static void 151 enable_hwirq(struct hfc_pci *hc) 152 { 153 hc->hw.int_m2 |= HFCPCI_IRQ_ENABLE; 154 Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2); 155 } 156 157 static void 158 disable_hwirq(struct hfc_pci *hc) 159 { 160 hc->hw.int_m2 &= ~((u_char)HFCPCI_IRQ_ENABLE); 161 Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2); 162 } 163 164 /* 165 * free hardware resources used by driver 166 */ 167 static void 168 release_io_hfcpci(struct hfc_pci *hc) 169 { 170 /* disable memory mapped ports + busmaster */ 171 pci_write_config_word(hc->pdev, PCI_COMMAND, 0); 172 del_timer(&hc->hw.timer); 173 pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos, hc->hw.dmahandle); 174 iounmap(hc->hw.pci_io); 175 } 176 177 /* 178 * set mode (NT or TE) 179 */ 180 static void 181 hfcpci_setmode(struct hfc_pci *hc) 182 { 183 if (hc->hw.protocol == ISDN_P_NT_S0) { 184 hc->hw.clkdel = CLKDEL_NT; /* ST-Bit delay for NT-Mode */ 185 hc->hw.sctrl |= SCTRL_MODE_NT; /* NT-MODE */ 186 hc->hw.states = 1; /* G1 */ 187 } else { 188 hc->hw.clkdel = CLKDEL_TE; /* ST-Bit delay for TE-Mode */ 189 hc->hw.sctrl &= ~SCTRL_MODE_NT; /* TE-MODE */ 190 hc->hw.states = 2; /* F2 */ 191 } 192 Write_hfc(hc, HFCPCI_CLKDEL, hc->hw.clkdel); 193 Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | hc->hw.states); 194 udelay(10); 195 Write_hfc(hc, HFCPCI_STATES, hc->hw.states | 0x40); /* Deactivate */ 196 Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl); 197 } 198 199 /* 200 * function called to reset the HFC PCI chip. A complete software reset of chip 201 * and fifos is done. 202 */ 203 static void 204 reset_hfcpci(struct hfc_pci *hc) 205 { 206 u_char val; 207 int cnt = 0; 208 209 printk(KERN_DEBUG "reset_hfcpci: entered\n"); 210 val = Read_hfc(hc, HFCPCI_CHIP_ID); 211 printk(KERN_INFO "HFC_PCI: resetting HFC ChipId(%x)\n", val); 212 /* enable memory mapped ports, disable busmaster */ 213 pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO); 214 disable_hwirq(hc); 215 /* enable memory ports + busmaster */ 216 pci_write_config_word(hc->pdev, PCI_COMMAND, 217 PCI_ENA_MEMIO + PCI_ENA_MASTER); 218 val = Read_hfc(hc, HFCPCI_STATUS); 219 printk(KERN_DEBUG "HFC-PCI status(%x) before reset\n", val); 220 hc->hw.cirm = HFCPCI_RESET; /* Reset On */ 221 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm); 222 set_current_state(TASK_UNINTERRUPTIBLE); 223 mdelay(10); /* Timeout 10ms */ 224 hc->hw.cirm = 0; /* Reset Off */ 225 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm); 226 val = Read_hfc(hc, HFCPCI_STATUS); 227 printk(KERN_DEBUG "HFC-PCI status(%x) after reset\n", val); 228 while (cnt < 50000) { /* max 50000 us */ 229 udelay(5); 230 cnt += 5; 231 val = Read_hfc(hc, HFCPCI_STATUS); 232 if (!(val & 2)) 233 break; 234 } 235 printk(KERN_DEBUG "HFC-PCI status(%x) after %dus\n", val, cnt); 236 237 hc->hw.fifo_en = 0x30; /* only D fifos enabled */ 238 239 hc->hw.bswapped = 0; /* no exchange */ 240 hc->hw.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER; 241 hc->hw.trm = HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */ 242 hc->hw.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */ 243 hc->hw.sctrl_r = 0; 244 hc->hw.sctrl_e = HFCPCI_AUTO_AWAKE; /* S/T Auto awake */ 245 hc->hw.mst_m = 0; 246 if (test_bit(HFC_CFG_MASTER, &hc->cfg)) 247 hc->hw.mst_m |= HFCPCI_MASTER; /* HFC Master Mode */ 248 if (test_bit(HFC_CFG_NEG_F0, &hc->cfg)) 249 hc->hw.mst_m |= HFCPCI_F0_NEGATIV; 250 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en); 251 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm); 252 Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e); 253 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt); 254 255 hc->hw.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC | 256 HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER; 257 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); 258 259 /* Clear already pending ints */ 260 val = Read_hfc(hc, HFCPCI_INT_S1); 261 262 /* set NT/TE mode */ 263 hfcpci_setmode(hc); 264 265 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); 266 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r); 267 268 /* 269 * Init GCI/IOM2 in master mode 270 * Slots 0 and 1 are set for B-chan 1 and 2 271 * D- and monitor/CI channel are not enabled 272 * STIO1 is used as output for data, B1+B2 from ST->IOM+HFC 273 * STIO2 is used as data input, B1+B2 from IOM->ST 274 * ST B-channel send disabled -> continous 1s 275 * The IOM slots are always enabled 276 */ 277 if (test_bit(HFC_CFG_PCM, &hc->cfg)) { 278 /* set data flow directions: connect B1,B2: HFC to/from PCM */ 279 hc->hw.conn = 0x09; 280 } else { 281 hc->hw.conn = 0x36; /* set data flow directions */ 282 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) { 283 Write_hfc(hc, HFCPCI_B1_SSL, 0xC0); 284 Write_hfc(hc, HFCPCI_B2_SSL, 0xC1); 285 Write_hfc(hc, HFCPCI_B1_RSL, 0xC0); 286 Write_hfc(hc, HFCPCI_B2_RSL, 0xC1); 287 } else { 288 Write_hfc(hc, HFCPCI_B1_SSL, 0x80); 289 Write_hfc(hc, HFCPCI_B2_SSL, 0x81); 290 Write_hfc(hc, HFCPCI_B1_RSL, 0x80); 291 Write_hfc(hc, HFCPCI_B2_RSL, 0x81); 292 } 293 } 294 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); 295 val = Read_hfc(hc, HFCPCI_INT_S2); 296 } 297 298 /* 299 * Timer function called when kernel timer expires 300 */ 301 static void 302 hfcpci_Timer(struct hfc_pci *hc) 303 { 304 hc->hw.timer.expires = jiffies + 75; 305 /* WD RESET */ 306 /* 307 * WriteReg(hc, HFCD_DATA, HFCD_CTMT, hc->hw.ctmt | 0x80); 308 * add_timer(&hc->hw.timer); 309 */ 310 } 311 312 313 /* 314 * select a b-channel entry matching and active 315 */ 316 static struct bchannel * 317 Sel_BCS(struct hfc_pci *hc, int channel) 318 { 319 if (test_bit(FLG_ACTIVE, &hc->bch[0].Flags) && 320 (hc->bch[0].nr & channel)) 321 return &hc->bch[0]; 322 else if (test_bit(FLG_ACTIVE, &hc->bch[1].Flags) && 323 (hc->bch[1].nr & channel)) 324 return &hc->bch[1]; 325 else 326 return NULL; 327 } 328 329 /* 330 * clear the desired B-channel rx fifo 331 */ 332 static void 333 hfcpci_clear_fifo_rx(struct hfc_pci *hc, int fifo) 334 { 335 u_char fifo_state; 336 struct bzfifo *bzr; 337 338 if (fifo) { 339 bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2; 340 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2RX; 341 } else { 342 bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1; 343 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1RX; 344 } 345 if (fifo_state) 346 hc->hw.fifo_en ^= fifo_state; 347 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en); 348 hc->hw.last_bfifo_cnt[fifo] = 0; 349 bzr->f1 = MAX_B_FRAMES; 350 bzr->f2 = bzr->f1; /* init F pointers to remain constant */ 351 bzr->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1); 352 bzr->za[MAX_B_FRAMES].z2 = cpu_to_le16( 353 le16_to_cpu(bzr->za[MAX_B_FRAMES].z1)); 354 if (fifo_state) 355 hc->hw.fifo_en |= fifo_state; 356 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en); 357 } 358 359 /* 360 * clear the desired B-channel tx fifo 361 */ 362 static void hfcpci_clear_fifo_tx(struct hfc_pci *hc, int fifo) 363 { 364 u_char fifo_state; 365 struct bzfifo *bzt; 366 367 if (fifo) { 368 bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2; 369 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2TX; 370 } else { 371 bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1; 372 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1TX; 373 } 374 if (fifo_state) 375 hc->hw.fifo_en ^= fifo_state; 376 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en); 377 if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL) 378 printk(KERN_DEBUG "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) " 379 "z1(%x) z2(%x) state(%x)\n", 380 fifo, bzt->f1, bzt->f2, 381 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1), 382 le16_to_cpu(bzt->za[MAX_B_FRAMES].z2), 383 fifo_state); 384 bzt->f2 = MAX_B_FRAMES; 385 bzt->f1 = bzt->f2; /* init F pointers to remain constant */ 386 bzt->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1); 387 bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 2); 388 if (fifo_state) 389 hc->hw.fifo_en |= fifo_state; 390 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en); 391 if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL) 392 printk(KERN_DEBUG 393 "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) z1(%x) z2(%x)\n", 394 fifo, bzt->f1, bzt->f2, 395 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1), 396 le16_to_cpu(bzt->za[MAX_B_FRAMES].z2)); 397 } 398 399 /* 400 * read a complete B-frame out of the buffer 401 */ 402 static void 403 hfcpci_empty_bfifo(struct bchannel *bch, struct bzfifo *bz, 404 u_char *bdata, int count) 405 { 406 u_char *ptr, *ptr1, new_f2; 407 int total, maxlen, new_z2; 408 struct zt *zp; 409 410 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO)) 411 printk(KERN_DEBUG "hfcpci_empty_fifo\n"); 412 zp = &bz->za[bz->f2]; /* point to Z-Regs */ 413 new_z2 = le16_to_cpu(zp->z2) + count; /* new position in fifo */ 414 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL)) 415 new_z2 -= B_FIFO_SIZE; /* buffer wrap */ 416 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES; 417 if ((count > MAX_DATA_SIZE + 3) || (count < 4) || 418 (*(bdata + (le16_to_cpu(zp->z1) - B_SUB_VAL)))) { 419 if (bch->debug & DEBUG_HW) 420 printk(KERN_DEBUG "hfcpci_empty_fifo: incoming packet " 421 "invalid length %d or crc\n", count); 422 #ifdef ERROR_STATISTIC 423 bch->err_inv++; 424 #endif 425 bz->za[new_f2].z2 = cpu_to_le16(new_z2); 426 bz->f2 = new_f2; /* next buffer */ 427 } else { 428 bch->rx_skb = mI_alloc_skb(count - 3, GFP_ATOMIC); 429 if (!bch->rx_skb) { 430 printk(KERN_WARNING "HFCPCI: receive out of memory\n"); 431 return; 432 } 433 total = count; 434 count -= 3; 435 ptr = skb_put(bch->rx_skb, count); 436 437 if (le16_to_cpu(zp->z2) + count <= B_FIFO_SIZE + B_SUB_VAL) 438 maxlen = count; /* complete transfer */ 439 else 440 maxlen = B_FIFO_SIZE + B_SUB_VAL - 441 le16_to_cpu(zp->z2); /* maximum */ 442 443 ptr1 = bdata + (le16_to_cpu(zp->z2) - B_SUB_VAL); 444 /* start of data */ 445 memcpy(ptr, ptr1, maxlen); /* copy data */ 446 count -= maxlen; 447 448 if (count) { /* rest remaining */ 449 ptr += maxlen; 450 ptr1 = bdata; /* start of buffer */ 451 memcpy(ptr, ptr1, count); /* rest */ 452 } 453 bz->za[new_f2].z2 = cpu_to_le16(new_z2); 454 bz->f2 = new_f2; /* next buffer */ 455 recv_Bchannel(bch, MISDN_ID_ANY); 456 } 457 } 458 459 /* 460 * D-channel receive procedure 461 */ 462 static int 463 receive_dmsg(struct hfc_pci *hc) 464 { 465 struct dchannel *dch = &hc->dch; 466 int maxlen; 467 int rcnt, total; 468 int count = 5; 469 u_char *ptr, *ptr1; 470 struct dfifo *df; 471 struct zt *zp; 472 473 df = &((union fifo_area *)(hc->hw.fifos))->d_chan.d_rx; 474 while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) { 475 zp = &df->za[df->f2 & D_FREG_MASK]; 476 rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2); 477 if (rcnt < 0) 478 rcnt += D_FIFO_SIZE; 479 rcnt++; 480 if (dch->debug & DEBUG_HW_DCHANNEL) 481 printk(KERN_DEBUG 482 "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)\n", 483 df->f1, df->f2, 484 le16_to_cpu(zp->z1), 485 le16_to_cpu(zp->z2), 486 rcnt); 487 488 if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) || 489 (df->data[le16_to_cpu(zp->z1)])) { 490 if (dch->debug & DEBUG_HW) 491 printk(KERN_DEBUG 492 "empty_fifo hfcpci paket inv. len " 493 "%d or crc %d\n", 494 rcnt, 495 df->data[le16_to_cpu(zp->z1)]); 496 #ifdef ERROR_STATISTIC 497 cs->err_rx++; 498 #endif 499 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | 500 (MAX_D_FRAMES + 1); /* next buffer */ 501 df->za[df->f2 & D_FREG_MASK].z2 = 502 cpu_to_le16((le16_to_cpu(zp->z2) + rcnt) & 503 (D_FIFO_SIZE - 1)); 504 } else { 505 dch->rx_skb = mI_alloc_skb(rcnt - 3, GFP_ATOMIC); 506 if (!dch->rx_skb) { 507 printk(KERN_WARNING 508 "HFC-PCI: D receive out of memory\n"); 509 break; 510 } 511 total = rcnt; 512 rcnt -= 3; 513 ptr = skb_put(dch->rx_skb, rcnt); 514 515 if (le16_to_cpu(zp->z2) + rcnt <= D_FIFO_SIZE) 516 maxlen = rcnt; /* complete transfer */ 517 else 518 maxlen = D_FIFO_SIZE - le16_to_cpu(zp->z2); 519 /* maximum */ 520 521 ptr1 = df->data + le16_to_cpu(zp->z2); 522 /* start of data */ 523 memcpy(ptr, ptr1, maxlen); /* copy data */ 524 rcnt -= maxlen; 525 526 if (rcnt) { /* rest remaining */ 527 ptr += maxlen; 528 ptr1 = df->data; /* start of buffer */ 529 memcpy(ptr, ptr1, rcnt); /* rest */ 530 } 531 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | 532 (MAX_D_FRAMES + 1); /* next buffer */ 533 df->za[df->f2 & D_FREG_MASK].z2 = cpu_to_le16(( 534 le16_to_cpu(zp->z2) + total) & (D_FIFO_SIZE - 1)); 535 recv_Dchannel(dch); 536 } 537 } 538 return 1; 539 } 540 541 /* 542 * check for transparent receive data and read max one 'poll' size if avail 543 */ 544 static void 545 hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *rxbz, 546 struct bzfifo *txbz, u_char *bdata) 547 { 548 __le16 *z1r, *z2r, *z1t, *z2t; 549 int new_z2, fcnt_rx, fcnt_tx, maxlen; 550 u_char *ptr, *ptr1; 551 552 z1r = &rxbz->za[MAX_B_FRAMES].z1; /* pointer to z reg */ 553 z2r = z1r + 1; 554 z1t = &txbz->za[MAX_B_FRAMES].z1; 555 z2t = z1t + 1; 556 557 fcnt_rx = le16_to_cpu(*z1r) - le16_to_cpu(*z2r); 558 if (!fcnt_rx) 559 return; /* no data avail */ 560 561 if (fcnt_rx <= 0) 562 fcnt_rx += B_FIFO_SIZE; /* bytes actually buffered */ 563 new_z2 = le16_to_cpu(*z2r) + fcnt_rx; /* new position in fifo */ 564 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL)) 565 new_z2 -= B_FIFO_SIZE; /* buffer wrap */ 566 567 if (fcnt_rx > MAX_DATA_SIZE) { /* flush, if oversized */ 568 *z2r = cpu_to_le16(new_z2); /* new position */ 569 return; 570 } 571 572 fcnt_tx = le16_to_cpu(*z2t) - le16_to_cpu(*z1t); 573 if (fcnt_tx <= 0) 574 fcnt_tx += B_FIFO_SIZE; 575 /* fcnt_tx contains available bytes in tx-fifo */ 576 fcnt_tx = B_FIFO_SIZE - fcnt_tx; 577 /* remaining bytes to send (bytes in tx-fifo) */ 578 579 bch->rx_skb = mI_alloc_skb(fcnt_rx, GFP_ATOMIC); 580 if (bch->rx_skb) { 581 ptr = skb_put(bch->rx_skb, fcnt_rx); 582 if (le16_to_cpu(*z2r) + fcnt_rx <= B_FIFO_SIZE + B_SUB_VAL) 583 maxlen = fcnt_rx; /* complete transfer */ 584 else 585 maxlen = B_FIFO_SIZE + B_SUB_VAL - le16_to_cpu(*z2r); 586 /* maximum */ 587 588 ptr1 = bdata + (le16_to_cpu(*z2r) - B_SUB_VAL); 589 /* start of data */ 590 memcpy(ptr, ptr1, maxlen); /* copy data */ 591 fcnt_rx -= maxlen; 592 593 if (fcnt_rx) { /* rest remaining */ 594 ptr += maxlen; 595 ptr1 = bdata; /* start of buffer */ 596 memcpy(ptr, ptr1, fcnt_rx); /* rest */ 597 } 598 recv_Bchannel(bch, fcnt_tx); /* bch, id */ 599 } else 600 printk(KERN_WARNING "HFCPCI: receive out of memory\n"); 601 602 *z2r = cpu_to_le16(new_z2); /* new position */ 603 } 604 605 /* 606 * B-channel main receive routine 607 */ 608 static void 609 main_rec_hfcpci(struct bchannel *bch) 610 { 611 struct hfc_pci *hc = bch->hw; 612 int rcnt, real_fifo; 613 int receive = 0, count = 5; 614 struct bzfifo *txbz, *rxbz; 615 u_char *bdata; 616 struct zt *zp; 617 618 if ((bch->nr & 2) && (!hc->hw.bswapped)) { 619 rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2; 620 txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2; 621 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b2; 622 real_fifo = 1; 623 } else { 624 rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1; 625 txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1; 626 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b1; 627 real_fifo = 0; 628 } 629 Begin: 630 count--; 631 if (rxbz->f1 != rxbz->f2) { 632 if (bch->debug & DEBUG_HW_BCHANNEL) 633 printk(KERN_DEBUG "hfcpci rec ch(%x) f1(%d) f2(%d)\n", 634 bch->nr, rxbz->f1, rxbz->f2); 635 zp = &rxbz->za[rxbz->f2]; 636 637 rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2); 638 if (rcnt < 0) 639 rcnt += B_FIFO_SIZE; 640 rcnt++; 641 if (bch->debug & DEBUG_HW_BCHANNEL) 642 printk(KERN_DEBUG 643 "hfcpci rec ch(%x) z1(%x) z2(%x) cnt(%d)\n", 644 bch->nr, le16_to_cpu(zp->z1), 645 le16_to_cpu(zp->z2), rcnt); 646 hfcpci_empty_bfifo(bch, rxbz, bdata, rcnt); 647 rcnt = rxbz->f1 - rxbz->f2; 648 if (rcnt < 0) 649 rcnt += MAX_B_FRAMES + 1; 650 if (hc->hw.last_bfifo_cnt[real_fifo] > rcnt + 1) { 651 rcnt = 0; 652 hfcpci_clear_fifo_rx(hc, real_fifo); 653 } 654 hc->hw.last_bfifo_cnt[real_fifo] = rcnt; 655 if (rcnt > 1) 656 receive = 1; 657 else 658 receive = 0; 659 } else if (test_bit(FLG_TRANSPARENT, &bch->Flags)) { 660 hfcpci_empty_fifo_trans(bch, rxbz, txbz, bdata); 661 return; 662 } else 663 receive = 0; 664 if (count && receive) 665 goto Begin; 666 667 } 668 669 /* 670 * D-channel send routine 671 */ 672 static void 673 hfcpci_fill_dfifo(struct hfc_pci *hc) 674 { 675 struct dchannel *dch = &hc->dch; 676 int fcnt; 677 int count, new_z1, maxlen; 678 struct dfifo *df; 679 u_char *src, *dst, new_f1; 680 681 if ((dch->debug & DEBUG_HW_DCHANNEL) && !(dch->debug & DEBUG_HW_DFIFO)) 682 printk(KERN_DEBUG "%s\n", __func__); 683 684 if (!dch->tx_skb) 685 return; 686 count = dch->tx_skb->len - dch->tx_idx; 687 if (count <= 0) 688 return; 689 df = &((union fifo_area *) (hc->hw.fifos))->d_chan.d_tx; 690 691 if (dch->debug & DEBUG_HW_DFIFO) 692 printk(KERN_DEBUG "%s:f1(%d) f2(%d) z1(f1)(%x)\n", __func__, 693 df->f1, df->f2, 694 le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1)); 695 fcnt = df->f1 - df->f2; /* frame count actually buffered */ 696 if (fcnt < 0) 697 fcnt += (MAX_D_FRAMES + 1); /* if wrap around */ 698 if (fcnt > (MAX_D_FRAMES - 1)) { 699 if (dch->debug & DEBUG_HW_DCHANNEL) 700 printk(KERN_DEBUG 701 "hfcpci_fill_Dfifo more as 14 frames\n"); 702 #ifdef ERROR_STATISTIC 703 cs->err_tx++; 704 #endif 705 return; 706 } 707 /* now determine free bytes in FIFO buffer */ 708 maxlen = le16_to_cpu(df->za[df->f2 & D_FREG_MASK].z2) - 709 le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) - 1; 710 if (maxlen <= 0) 711 maxlen += D_FIFO_SIZE; /* count now contains available bytes */ 712 713 if (dch->debug & DEBUG_HW_DCHANNEL) 714 printk(KERN_DEBUG "hfcpci_fill_Dfifo count(%d/%d)\n", 715 count, maxlen); 716 if (count > maxlen) { 717 if (dch->debug & DEBUG_HW_DCHANNEL) 718 printk(KERN_DEBUG "hfcpci_fill_Dfifo no fifo mem\n"); 719 return; 720 } 721 new_z1 = (le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) + count) & 722 (D_FIFO_SIZE - 1); 723 new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1); 724 src = dch->tx_skb->data + dch->tx_idx; /* source pointer */ 725 dst = df->data + le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1); 726 maxlen = D_FIFO_SIZE - le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1); 727 /* end fifo */ 728 if (maxlen > count) 729 maxlen = count; /* limit size */ 730 memcpy(dst, src, maxlen); /* first copy */ 731 732 count -= maxlen; /* remaining bytes */ 733 if (count) { 734 dst = df->data; /* start of buffer */ 735 src += maxlen; /* new position */ 736 memcpy(dst, src, count); 737 } 738 df->za[new_f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1); 739 /* for next buffer */ 740 df->za[df->f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1); 741 /* new pos actual buffer */ 742 df->f1 = new_f1; /* next frame */ 743 dch->tx_idx = dch->tx_skb->len; 744 } 745 746 /* 747 * B-channel send routine 748 */ 749 static void 750 hfcpci_fill_fifo(struct bchannel *bch) 751 { 752 struct hfc_pci *hc = bch->hw; 753 int maxlen, fcnt; 754 int count, new_z1; 755 struct bzfifo *bz; 756 u_char *bdata; 757 u_char new_f1, *src, *dst; 758 __le16 *z1t, *z2t; 759 760 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO)) 761 printk(KERN_DEBUG "%s\n", __func__); 762 if ((!bch->tx_skb) || bch->tx_skb->len <= 0) 763 return; 764 count = bch->tx_skb->len - bch->tx_idx; 765 if ((bch->nr & 2) && (!hc->hw.bswapped)) { 766 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2; 767 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b2; 768 } else { 769 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1; 770 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b1; 771 } 772 773 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) { 774 z1t = &bz->za[MAX_B_FRAMES].z1; 775 z2t = z1t + 1; 776 if (bch->debug & DEBUG_HW_BCHANNEL) 777 printk(KERN_DEBUG "hfcpci_fill_fifo_trans ch(%x) " 778 "cnt(%d) z1(%x) z2(%x)\n", bch->nr, count, 779 le16_to_cpu(*z1t), le16_to_cpu(*z2t)); 780 fcnt = le16_to_cpu(*z2t) - le16_to_cpu(*z1t); 781 if (fcnt <= 0) 782 fcnt += B_FIFO_SIZE; 783 /* fcnt contains available bytes in fifo */ 784 fcnt = B_FIFO_SIZE - fcnt; 785 /* remaining bytes to send (bytes in fifo) */ 786 787 /* "fill fifo if empty" feature */ 788 if (test_bit(FLG_FILLEMPTY, &bch->Flags) && !fcnt) { 789 /* printk(KERN_DEBUG "%s: buffer empty, so we have " 790 "underrun\n", __func__); */ 791 /* fill buffer, to prevent future underrun */ 792 count = HFCPCI_FILLEMPTY; 793 new_z1 = le16_to_cpu(*z1t) + count; 794 /* new buffer Position */ 795 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL)) 796 new_z1 -= B_FIFO_SIZE; /* buffer wrap */ 797 dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL); 798 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t); 799 /* end of fifo */ 800 if (bch->debug & DEBUG_HW_BFIFO) 801 printk(KERN_DEBUG "hfcpci_FFt fillempty " 802 "fcnt(%d) maxl(%d) nz1(%x) dst(%p)\n", 803 fcnt, maxlen, new_z1, dst); 804 fcnt += count; 805 if (maxlen > count) 806 maxlen = count; /* limit size */ 807 memset(dst, 0x2a, maxlen); /* first copy */ 808 count -= maxlen; /* remaining bytes */ 809 if (count) { 810 dst = bdata; /* start of buffer */ 811 memset(dst, 0x2a, count); 812 } 813 *z1t = cpu_to_le16(new_z1); /* now send data */ 814 } 815 816 next_t_frame: 817 count = bch->tx_skb->len - bch->tx_idx; 818 /* maximum fill shall be poll*2 */ 819 if (count > (poll << 1) - fcnt) 820 count = (poll << 1) - fcnt; 821 if (count <= 0) 822 return; 823 /* data is suitable for fifo */ 824 new_z1 = le16_to_cpu(*z1t) + count; 825 /* new buffer Position */ 826 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL)) 827 new_z1 -= B_FIFO_SIZE; /* buffer wrap */ 828 src = bch->tx_skb->data + bch->tx_idx; 829 /* source pointer */ 830 dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL); 831 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t); 832 /* end of fifo */ 833 if (bch->debug & DEBUG_HW_BFIFO) 834 printk(KERN_DEBUG "hfcpci_FFt fcnt(%d) " 835 "maxl(%d) nz1(%x) dst(%p)\n", 836 fcnt, maxlen, new_z1, dst); 837 fcnt += count; 838 bch->tx_idx += count; 839 if (maxlen > count) 840 maxlen = count; /* limit size */ 841 memcpy(dst, src, maxlen); /* first copy */ 842 count -= maxlen; /* remaining bytes */ 843 if (count) { 844 dst = bdata; /* start of buffer */ 845 src += maxlen; /* new position */ 846 memcpy(dst, src, count); 847 } 848 *z1t = cpu_to_le16(new_z1); /* now send data */ 849 if (bch->tx_idx < bch->tx_skb->len) 850 return; 851 /* send confirm, on trans, free on hdlc. */ 852 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) 853 confirm_Bsend(bch); 854 dev_kfree_skb(bch->tx_skb); 855 if (get_next_bframe(bch)) 856 goto next_t_frame; 857 return; 858 } 859 if (bch->debug & DEBUG_HW_BCHANNEL) 860 printk(KERN_DEBUG 861 "%s: ch(%x) f1(%d) f2(%d) z1(f1)(%x)\n", 862 __func__, bch->nr, bz->f1, bz->f2, 863 bz->za[bz->f1].z1); 864 fcnt = bz->f1 - bz->f2; /* frame count actually buffered */ 865 if (fcnt < 0) 866 fcnt += (MAX_B_FRAMES + 1); /* if wrap around */ 867 if (fcnt > (MAX_B_FRAMES - 1)) { 868 if (bch->debug & DEBUG_HW_BCHANNEL) 869 printk(KERN_DEBUG 870 "hfcpci_fill_Bfifo more as 14 frames\n"); 871 return; 872 } 873 /* now determine free bytes in FIFO buffer */ 874 maxlen = le16_to_cpu(bz->za[bz->f2].z2) - 875 le16_to_cpu(bz->za[bz->f1].z1) - 1; 876 if (maxlen <= 0) 877 maxlen += B_FIFO_SIZE; /* count now contains available bytes */ 878 879 if (bch->debug & DEBUG_HW_BCHANNEL) 880 printk(KERN_DEBUG "hfcpci_fill_fifo ch(%x) count(%d/%d)\n", 881 bch->nr, count, maxlen); 882 883 if (maxlen < count) { 884 if (bch->debug & DEBUG_HW_BCHANNEL) 885 printk(KERN_DEBUG "hfcpci_fill_fifo no fifo mem\n"); 886 return; 887 } 888 new_z1 = le16_to_cpu(bz->za[bz->f1].z1) + count; 889 /* new buffer Position */ 890 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL)) 891 new_z1 -= B_FIFO_SIZE; /* buffer wrap */ 892 893 new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES); 894 src = bch->tx_skb->data + bch->tx_idx; /* source pointer */ 895 dst = bdata + (le16_to_cpu(bz->za[bz->f1].z1) - B_SUB_VAL); 896 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(bz->za[bz->f1].z1); 897 /* end fifo */ 898 if (maxlen > count) 899 maxlen = count; /* limit size */ 900 memcpy(dst, src, maxlen); /* first copy */ 901 902 count -= maxlen; /* remaining bytes */ 903 if (count) { 904 dst = bdata; /* start of buffer */ 905 src += maxlen; /* new position */ 906 memcpy(dst, src, count); 907 } 908 bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */ 909 bz->f1 = new_f1; /* next frame */ 910 dev_kfree_skb(bch->tx_skb); 911 get_next_bframe(bch); 912 } 913 914 915 916 /* 917 * handle L1 state changes TE 918 */ 919 920 static void 921 ph_state_te(struct dchannel *dch) 922 { 923 if (dch->debug) 924 printk(KERN_DEBUG "%s: TE newstate %x\n", 925 __func__, dch->state); 926 switch (dch->state) { 927 case 0: 928 l1_event(dch->l1, HW_RESET_IND); 929 break; 930 case 3: 931 l1_event(dch->l1, HW_DEACT_IND); 932 break; 933 case 5: 934 case 8: 935 l1_event(dch->l1, ANYSIGNAL); 936 break; 937 case 6: 938 l1_event(dch->l1, INFO2); 939 break; 940 case 7: 941 l1_event(dch->l1, INFO4_P8); 942 break; 943 } 944 } 945 946 /* 947 * handle L1 state changes NT 948 */ 949 950 static void 951 handle_nt_timer3(struct dchannel *dch) { 952 struct hfc_pci *hc = dch->hw; 953 954 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags); 955 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER; 956 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); 957 hc->hw.nt_timer = 0; 958 test_and_set_bit(FLG_ACTIVE, &dch->Flags); 959 if (test_bit(HFC_CFG_MASTER, &hc->cfg)) 960 hc->hw.mst_m |= HFCPCI_MASTER; 961 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); 962 _queue_data(&dch->dev.D, PH_ACTIVATE_IND, 963 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); 964 } 965 966 static void 967 ph_state_nt(struct dchannel *dch) 968 { 969 struct hfc_pci *hc = dch->hw; 970 u_char val; 971 972 if (dch->debug) 973 printk(KERN_DEBUG "%s: NT newstate %x\n", 974 __func__, dch->state); 975 switch (dch->state) { 976 case 2: 977 if (hc->hw.nt_timer < 0) { 978 hc->hw.nt_timer = 0; 979 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags); 980 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags); 981 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER; 982 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); 983 /* Clear already pending ints */ 984 val = Read_hfc(hc, HFCPCI_INT_S1); 985 Write_hfc(hc, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE); 986 udelay(10); 987 Write_hfc(hc, HFCPCI_STATES, 4); 988 dch->state = 4; 989 } else if (hc->hw.nt_timer == 0) { 990 hc->hw.int_m1 |= HFCPCI_INTS_TIMER; 991 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); 992 hc->hw.nt_timer = NT_T1_COUNT; 993 hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER; 994 hc->hw.ctmt |= HFCPCI_TIM3_125; 995 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | 996 HFCPCI_CLTIMER); 997 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags); 998 test_and_set_bit(FLG_HFC_TIMER_T1, &dch->Flags); 999 /* allow G2 -> G3 transition */ 1000 Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3); 1001 } else { 1002 Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3); 1003 } 1004 break; 1005 case 1: 1006 hc->hw.nt_timer = 0; 1007 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags); 1008 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags); 1009 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER; 1010 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); 1011 test_and_clear_bit(FLG_ACTIVE, &dch->Flags); 1012 hc->hw.mst_m &= ~HFCPCI_MASTER; 1013 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); 1014 test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); 1015 _queue_data(&dch->dev.D, PH_DEACTIVATE_IND, 1016 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); 1017 break; 1018 case 4: 1019 hc->hw.nt_timer = 0; 1020 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags); 1021 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags); 1022 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER; 1023 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); 1024 break; 1025 case 3: 1026 if (!test_and_set_bit(FLG_HFC_TIMER_T3, &dch->Flags)) { 1027 if (!test_and_clear_bit(FLG_L2_ACTIVATED, 1028 &dch->Flags)) { 1029 handle_nt_timer3(dch); 1030 break; 1031 } 1032 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags); 1033 hc->hw.int_m1 |= HFCPCI_INTS_TIMER; 1034 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); 1035 hc->hw.nt_timer = NT_T3_COUNT; 1036 hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER; 1037 hc->hw.ctmt |= HFCPCI_TIM3_125; 1038 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | 1039 HFCPCI_CLTIMER); 1040 } 1041 break; 1042 } 1043 } 1044 1045 static void 1046 ph_state(struct dchannel *dch) 1047 { 1048 struct hfc_pci *hc = dch->hw; 1049 1050 if (hc->hw.protocol == ISDN_P_NT_S0) { 1051 if (test_bit(FLG_HFC_TIMER_T3, &dch->Flags) && 1052 hc->hw.nt_timer < 0) 1053 handle_nt_timer3(dch); 1054 else 1055 ph_state_nt(dch); 1056 } else 1057 ph_state_te(dch); 1058 } 1059 1060 /* 1061 * Layer 1 callback function 1062 */ 1063 static int 1064 hfc_l1callback(struct dchannel *dch, u_int cmd) 1065 { 1066 struct hfc_pci *hc = dch->hw; 1067 1068 switch (cmd) { 1069 case INFO3_P8: 1070 case INFO3_P10: 1071 if (test_bit(HFC_CFG_MASTER, &hc->cfg)) 1072 hc->hw.mst_m |= HFCPCI_MASTER; 1073 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); 1074 break; 1075 case HW_RESET_REQ: 1076 Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3); 1077 /* HFC ST 3 */ 1078 udelay(6); 1079 Write_hfc(hc, HFCPCI_STATES, 3); /* HFC ST 2 */ 1080 if (test_bit(HFC_CFG_MASTER, &hc->cfg)) 1081 hc->hw.mst_m |= HFCPCI_MASTER; 1082 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); 1083 Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE | 1084 HFCPCI_DO_ACTION); 1085 l1_event(dch->l1, HW_POWERUP_IND); 1086 break; 1087 case HW_DEACT_REQ: 1088 hc->hw.mst_m &= ~HFCPCI_MASTER; 1089 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); 1090 skb_queue_purge(&dch->squeue); 1091 if (dch->tx_skb) { 1092 dev_kfree_skb(dch->tx_skb); 1093 dch->tx_skb = NULL; 1094 } 1095 dch->tx_idx = 0; 1096 if (dch->rx_skb) { 1097 dev_kfree_skb(dch->rx_skb); 1098 dch->rx_skb = NULL; 1099 } 1100 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); 1101 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) 1102 del_timer(&dch->timer); 1103 break; 1104 case HW_POWERUP_REQ: 1105 Write_hfc(hc, HFCPCI_STATES, HFCPCI_DO_ACTION); 1106 break; 1107 case PH_ACTIVATE_IND: 1108 test_and_set_bit(FLG_ACTIVE, &dch->Flags); 1109 _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL, 1110 GFP_ATOMIC); 1111 break; 1112 case PH_DEACTIVATE_IND: 1113 test_and_clear_bit(FLG_ACTIVE, &dch->Flags); 1114 _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL, 1115 GFP_ATOMIC); 1116 break; 1117 default: 1118 if (dch->debug & DEBUG_HW) 1119 printk(KERN_DEBUG "%s: unknown command %x\n", 1120 __func__, cmd); 1121 return -1; 1122 } 1123 return 0; 1124 } 1125 1126 /* 1127 * Interrupt handler 1128 */ 1129 static inline void 1130 tx_birq(struct bchannel *bch) 1131 { 1132 if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len) 1133 hfcpci_fill_fifo(bch); 1134 else { 1135 if (bch->tx_skb) 1136 dev_kfree_skb(bch->tx_skb); 1137 if (get_next_bframe(bch)) 1138 hfcpci_fill_fifo(bch); 1139 } 1140 } 1141 1142 static inline void 1143 tx_dirq(struct dchannel *dch) 1144 { 1145 if (dch->tx_skb && dch->tx_idx < dch->tx_skb->len) 1146 hfcpci_fill_dfifo(dch->hw); 1147 else { 1148 if (dch->tx_skb) 1149 dev_kfree_skb(dch->tx_skb); 1150 if (get_next_dframe(dch)) 1151 hfcpci_fill_dfifo(dch->hw); 1152 } 1153 } 1154 1155 static irqreturn_t 1156 hfcpci_int(int intno, void *dev_id) 1157 { 1158 struct hfc_pci *hc = dev_id; 1159 u_char exval; 1160 struct bchannel *bch; 1161 u_char val, stat; 1162 1163 spin_lock(&hc->lock); 1164 if (!(hc->hw.int_m2 & 0x08)) { 1165 spin_unlock(&hc->lock); 1166 return IRQ_NONE; /* not initialised */ 1167 } 1168 stat = Read_hfc(hc, HFCPCI_STATUS); 1169 if (HFCPCI_ANYINT & stat) { 1170 val = Read_hfc(hc, HFCPCI_INT_S1); 1171 if (hc->dch.debug & DEBUG_HW_DCHANNEL) 1172 printk(KERN_DEBUG 1173 "HFC-PCI: stat(%02x) s1(%02x)\n", stat, val); 1174 } else { 1175 /* shared */ 1176 spin_unlock(&hc->lock); 1177 return IRQ_NONE; 1178 } 1179 hc->irqcnt++; 1180 1181 if (hc->dch.debug & DEBUG_HW_DCHANNEL) 1182 printk(KERN_DEBUG "HFC-PCI irq %x\n", val); 1183 val &= hc->hw.int_m1; 1184 if (val & 0x40) { /* state machine irq */ 1185 exval = Read_hfc(hc, HFCPCI_STATES) & 0xf; 1186 if (hc->dch.debug & DEBUG_HW_DCHANNEL) 1187 printk(KERN_DEBUG "ph_state chg %d->%d\n", 1188 hc->dch.state, exval); 1189 hc->dch.state = exval; 1190 schedule_event(&hc->dch, FLG_PHCHANGE); 1191 val &= ~0x40; 1192 } 1193 if (val & 0x80) { /* timer irq */ 1194 if (hc->hw.protocol == ISDN_P_NT_S0) { 1195 if ((--hc->hw.nt_timer) < 0) 1196 schedule_event(&hc->dch, FLG_PHCHANGE); 1197 } 1198 val &= ~0x80; 1199 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER); 1200 } 1201 if (val & 0x08) { /* B1 rx */ 1202 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1); 1203 if (bch) 1204 main_rec_hfcpci(bch); 1205 else if (hc->dch.debug) 1206 printk(KERN_DEBUG "hfcpci spurious 0x08 IRQ\n"); 1207 } 1208 if (val & 0x10) { /* B2 rx */ 1209 bch = Sel_BCS(hc, 2); 1210 if (bch) 1211 main_rec_hfcpci(bch); 1212 else if (hc->dch.debug) 1213 printk(KERN_DEBUG "hfcpci spurious 0x10 IRQ\n"); 1214 } 1215 if (val & 0x01) { /* B1 tx */ 1216 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1); 1217 if (bch) 1218 tx_birq(bch); 1219 else if (hc->dch.debug) 1220 printk(KERN_DEBUG "hfcpci spurious 0x01 IRQ\n"); 1221 } 1222 if (val & 0x02) { /* B2 tx */ 1223 bch = Sel_BCS(hc, 2); 1224 if (bch) 1225 tx_birq(bch); 1226 else if (hc->dch.debug) 1227 printk(KERN_DEBUG "hfcpci spurious 0x02 IRQ\n"); 1228 } 1229 if (val & 0x20) /* D rx */ 1230 receive_dmsg(hc); 1231 if (val & 0x04) { /* D tx */ 1232 if (test_and_clear_bit(FLG_BUSY_TIMER, &hc->dch.Flags)) 1233 del_timer(&hc->dch.timer); 1234 tx_dirq(&hc->dch); 1235 } 1236 spin_unlock(&hc->lock); 1237 return IRQ_HANDLED; 1238 } 1239 1240 /* 1241 * timer callback for D-chan busy resolution. Currently no function 1242 */ 1243 static void 1244 hfcpci_dbusy_timer(struct hfc_pci *hc) 1245 { 1246 } 1247 1248 /* 1249 * activate/deactivate hardware for selected channels and mode 1250 */ 1251 static int 1252 mode_hfcpci(struct bchannel *bch, int bc, int protocol) 1253 { 1254 struct hfc_pci *hc = bch->hw; 1255 int fifo2; 1256 u_char rx_slot = 0, tx_slot = 0, pcm_mode; 1257 1258 if (bch->debug & DEBUG_HW_BCHANNEL) 1259 printk(KERN_DEBUG 1260 "HFCPCI bchannel protocol %x-->%x ch %x-->%x\n", 1261 bch->state, protocol, bch->nr, bc); 1262 1263 fifo2 = bc; 1264 pcm_mode = (bc>>24) & 0xff; 1265 if (pcm_mode) { /* PCM SLOT USE */ 1266 if (!test_bit(HFC_CFG_PCM, &hc->cfg)) 1267 printk(KERN_WARNING 1268 "%s: pcm channel id without HFC_CFG_PCM\n", 1269 __func__); 1270 rx_slot = (bc>>8) & 0xff; 1271 tx_slot = (bc>>16) & 0xff; 1272 bc = bc & 0xff; 1273 } else if (test_bit(HFC_CFG_PCM, &hc->cfg) && (protocol > ISDN_P_NONE)) 1274 printk(KERN_WARNING "%s: no pcm channel id but HFC_CFG_PCM\n", 1275 __func__); 1276 if (hc->chanlimit > 1) { 1277 hc->hw.bswapped = 0; /* B1 and B2 normal mode */ 1278 hc->hw.sctrl_e &= ~0x80; 1279 } else { 1280 if (bc & 2) { 1281 if (protocol != ISDN_P_NONE) { 1282 hc->hw.bswapped = 1; /* B1 and B2 exchanged */ 1283 hc->hw.sctrl_e |= 0x80; 1284 } else { 1285 hc->hw.bswapped = 0; /* B1 and B2 normal mode */ 1286 hc->hw.sctrl_e &= ~0x80; 1287 } 1288 fifo2 = 1; 1289 } else { 1290 hc->hw.bswapped = 0; /* B1 and B2 normal mode */ 1291 hc->hw.sctrl_e &= ~0x80; 1292 } 1293 } 1294 switch (protocol) { 1295 case (-1): /* used for init */ 1296 bch->state = -1; 1297 bch->nr = bc; 1298 case (ISDN_P_NONE): 1299 if (bch->state == ISDN_P_NONE) 1300 return 0; 1301 if (bc & 2) { 1302 hc->hw.sctrl &= ~SCTRL_B2_ENA; 1303 hc->hw.sctrl_r &= ~SCTRL_B2_ENA; 1304 } else { 1305 hc->hw.sctrl &= ~SCTRL_B1_ENA; 1306 hc->hw.sctrl_r &= ~SCTRL_B1_ENA; 1307 } 1308 if (fifo2 & 2) { 1309 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2; 1310 hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS + 1311 HFCPCI_INTS_B2REC); 1312 } else { 1313 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1; 1314 hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS + 1315 HFCPCI_INTS_B1REC); 1316 } 1317 #ifdef REVERSE_BITORDER 1318 if (bch->nr & 2) 1319 hc->hw.cirm &= 0x7f; 1320 else 1321 hc->hw.cirm &= 0xbf; 1322 #endif 1323 bch->state = ISDN_P_NONE; 1324 bch->nr = bc; 1325 test_and_clear_bit(FLG_HDLC, &bch->Flags); 1326 test_and_clear_bit(FLG_TRANSPARENT, &bch->Flags); 1327 break; 1328 case (ISDN_P_B_RAW): 1329 bch->state = protocol; 1330 bch->nr = bc; 1331 hfcpci_clear_fifo_rx(hc, (fifo2 & 2) ? 1 : 0); 1332 hfcpci_clear_fifo_tx(hc, (fifo2 & 2) ? 1 : 0); 1333 if (bc & 2) { 1334 hc->hw.sctrl |= SCTRL_B2_ENA; 1335 hc->hw.sctrl_r |= SCTRL_B2_ENA; 1336 #ifdef REVERSE_BITORDER 1337 hc->hw.cirm |= 0x80; 1338 #endif 1339 } else { 1340 hc->hw.sctrl |= SCTRL_B1_ENA; 1341 hc->hw.sctrl_r |= SCTRL_B1_ENA; 1342 #ifdef REVERSE_BITORDER 1343 hc->hw.cirm |= 0x40; 1344 #endif 1345 } 1346 if (fifo2 & 2) { 1347 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2; 1348 if (!tics) 1349 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS + 1350 HFCPCI_INTS_B2REC); 1351 hc->hw.ctmt |= 2; 1352 hc->hw.conn &= ~0x18; 1353 } else { 1354 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1; 1355 if (!tics) 1356 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS + 1357 HFCPCI_INTS_B1REC); 1358 hc->hw.ctmt |= 1; 1359 hc->hw.conn &= ~0x03; 1360 } 1361 test_and_set_bit(FLG_TRANSPARENT, &bch->Flags); 1362 break; 1363 case (ISDN_P_B_HDLC): 1364 bch->state = protocol; 1365 bch->nr = bc; 1366 hfcpci_clear_fifo_rx(hc, (fifo2 & 2) ? 1 : 0); 1367 hfcpci_clear_fifo_tx(hc, (fifo2 & 2) ? 1 : 0); 1368 if (bc & 2) { 1369 hc->hw.sctrl |= SCTRL_B2_ENA; 1370 hc->hw.sctrl_r |= SCTRL_B2_ENA; 1371 } else { 1372 hc->hw.sctrl |= SCTRL_B1_ENA; 1373 hc->hw.sctrl_r |= SCTRL_B1_ENA; 1374 } 1375 if (fifo2 & 2) { 1376 hc->hw.last_bfifo_cnt[1] = 0; 1377 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2; 1378 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS + 1379 HFCPCI_INTS_B2REC); 1380 hc->hw.ctmt &= ~2; 1381 hc->hw.conn &= ~0x18; 1382 } else { 1383 hc->hw.last_bfifo_cnt[0] = 0; 1384 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1; 1385 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS + 1386 HFCPCI_INTS_B1REC); 1387 hc->hw.ctmt &= ~1; 1388 hc->hw.conn &= ~0x03; 1389 } 1390 test_and_set_bit(FLG_HDLC, &bch->Flags); 1391 break; 1392 default: 1393 printk(KERN_DEBUG "prot not known %x\n", protocol); 1394 return -ENOPROTOOPT; 1395 } 1396 if (test_bit(HFC_CFG_PCM, &hc->cfg)) { 1397 if ((protocol == ISDN_P_NONE) || 1398 (protocol == -1)) { /* init case */ 1399 rx_slot = 0; 1400 tx_slot = 0; 1401 } else { 1402 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) { 1403 rx_slot |= 0xC0; 1404 tx_slot |= 0xC0; 1405 } else { 1406 rx_slot |= 0x80; 1407 tx_slot |= 0x80; 1408 } 1409 } 1410 if (bc & 2) { 1411 hc->hw.conn &= 0xc7; 1412 hc->hw.conn |= 0x08; 1413 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL 0x%x\n", 1414 __func__, tx_slot); 1415 printk(KERN_DEBUG "%s: Write_hfc: B2_RSL 0x%x\n", 1416 __func__, rx_slot); 1417 Write_hfc(hc, HFCPCI_B2_SSL, tx_slot); 1418 Write_hfc(hc, HFCPCI_B2_RSL, rx_slot); 1419 } else { 1420 hc->hw.conn &= 0xf8; 1421 hc->hw.conn |= 0x01; 1422 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL 0x%x\n", 1423 __func__, tx_slot); 1424 printk(KERN_DEBUG "%s: Write_hfc: B1_RSL 0x%x\n", 1425 __func__, rx_slot); 1426 Write_hfc(hc, HFCPCI_B1_SSL, tx_slot); 1427 Write_hfc(hc, HFCPCI_B1_RSL, rx_slot); 1428 } 1429 } 1430 Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e); 1431 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); 1432 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en); 1433 Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl); 1434 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r); 1435 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt); 1436 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); 1437 #ifdef REVERSE_BITORDER 1438 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm); 1439 #endif 1440 return 0; 1441 } 1442 1443 static int 1444 set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan) 1445 { 1446 struct hfc_pci *hc = bch->hw; 1447 1448 if (bch->debug & DEBUG_HW_BCHANNEL) 1449 printk(KERN_DEBUG 1450 "HFCPCI bchannel test rx protocol %x-->%x ch %x-->%x\n", 1451 bch->state, protocol, bch->nr, chan); 1452 if (bch->nr != chan) { 1453 printk(KERN_DEBUG 1454 "HFCPCI rxtest wrong channel parameter %x/%x\n", 1455 bch->nr, chan); 1456 return -EINVAL; 1457 } 1458 switch (protocol) { 1459 case (ISDN_P_B_RAW): 1460 bch->state = protocol; 1461 hfcpci_clear_fifo_rx(hc, (chan & 2) ? 1 : 0); 1462 if (chan & 2) { 1463 hc->hw.sctrl_r |= SCTRL_B2_ENA; 1464 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX; 1465 if (!tics) 1466 hc->hw.int_m1 |= HFCPCI_INTS_B2REC; 1467 hc->hw.ctmt |= 2; 1468 hc->hw.conn &= ~0x18; 1469 #ifdef REVERSE_BITORDER 1470 hc->hw.cirm |= 0x80; 1471 #endif 1472 } else { 1473 hc->hw.sctrl_r |= SCTRL_B1_ENA; 1474 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX; 1475 if (!tics) 1476 hc->hw.int_m1 |= HFCPCI_INTS_B1REC; 1477 hc->hw.ctmt |= 1; 1478 hc->hw.conn &= ~0x03; 1479 #ifdef REVERSE_BITORDER 1480 hc->hw.cirm |= 0x40; 1481 #endif 1482 } 1483 break; 1484 case (ISDN_P_B_HDLC): 1485 bch->state = protocol; 1486 hfcpci_clear_fifo_rx(hc, (chan & 2) ? 1 : 0); 1487 if (chan & 2) { 1488 hc->hw.sctrl_r |= SCTRL_B2_ENA; 1489 hc->hw.last_bfifo_cnt[1] = 0; 1490 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX; 1491 hc->hw.int_m1 |= HFCPCI_INTS_B2REC; 1492 hc->hw.ctmt &= ~2; 1493 hc->hw.conn &= ~0x18; 1494 } else { 1495 hc->hw.sctrl_r |= SCTRL_B1_ENA; 1496 hc->hw.last_bfifo_cnt[0] = 0; 1497 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX; 1498 hc->hw.int_m1 |= HFCPCI_INTS_B1REC; 1499 hc->hw.ctmt &= ~1; 1500 hc->hw.conn &= ~0x03; 1501 } 1502 break; 1503 default: 1504 printk(KERN_DEBUG "prot not known %x\n", protocol); 1505 return -ENOPROTOOPT; 1506 } 1507 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); 1508 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en); 1509 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r); 1510 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt); 1511 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); 1512 #ifdef REVERSE_BITORDER 1513 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm); 1514 #endif 1515 return 0; 1516 } 1517 1518 static void 1519 deactivate_bchannel(struct bchannel *bch) 1520 { 1521 struct hfc_pci *hc = bch->hw; 1522 u_long flags; 1523 1524 spin_lock_irqsave(&hc->lock, flags); 1525 mISDN_clear_bchannel(bch); 1526 mode_hfcpci(bch, bch->nr, ISDN_P_NONE); 1527 spin_unlock_irqrestore(&hc->lock, flags); 1528 } 1529 1530 /* 1531 * Layer 1 B-channel hardware access 1532 */ 1533 static int 1534 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) 1535 { 1536 int ret = 0; 1537 1538 switch (cq->op) { 1539 case MISDN_CTRL_GETOP: 1540 cq->op = MISDN_CTRL_FILL_EMPTY; 1541 break; 1542 case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */ 1543 test_and_set_bit(FLG_FILLEMPTY, &bch->Flags); 1544 if (debug & DEBUG_HW_OPEN) 1545 printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d " 1546 "off=%d)\n", __func__, bch->nr, !!cq->p1); 1547 break; 1548 default: 1549 printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op); 1550 ret = -EINVAL; 1551 break; 1552 } 1553 return ret; 1554 } 1555 static int 1556 hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg) 1557 { 1558 struct bchannel *bch = container_of(ch, struct bchannel, ch); 1559 struct hfc_pci *hc = bch->hw; 1560 int ret = -EINVAL; 1561 u_long flags; 1562 1563 if (bch->debug & DEBUG_HW) 1564 printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg); 1565 switch (cmd) { 1566 case HW_TESTRX_RAW: 1567 spin_lock_irqsave(&hc->lock, flags); 1568 ret = set_hfcpci_rxtest(bch, ISDN_P_B_RAW, (int)(long)arg); 1569 spin_unlock_irqrestore(&hc->lock, flags); 1570 break; 1571 case HW_TESTRX_HDLC: 1572 spin_lock_irqsave(&hc->lock, flags); 1573 ret = set_hfcpci_rxtest(bch, ISDN_P_B_HDLC, (int)(long)arg); 1574 spin_unlock_irqrestore(&hc->lock, flags); 1575 break; 1576 case HW_TESTRX_OFF: 1577 spin_lock_irqsave(&hc->lock, flags); 1578 mode_hfcpci(bch, bch->nr, ISDN_P_NONE); 1579 spin_unlock_irqrestore(&hc->lock, flags); 1580 ret = 0; 1581 break; 1582 case CLOSE_CHANNEL: 1583 test_and_clear_bit(FLG_OPEN, &bch->Flags); 1584 if (test_bit(FLG_ACTIVE, &bch->Flags)) 1585 deactivate_bchannel(bch); 1586 ch->protocol = ISDN_P_NONE; 1587 ch->peer = NULL; 1588 module_put(THIS_MODULE); 1589 ret = 0; 1590 break; 1591 case CONTROL_CHANNEL: 1592 ret = channel_bctrl(bch, arg); 1593 break; 1594 default: 1595 printk(KERN_WARNING "%s: unknown prim(%x)\n", 1596 __func__, cmd); 1597 } 1598 return ret; 1599 } 1600 1601 /* 1602 * Layer2 -> Layer 1 Dchannel data 1603 */ 1604 static int 1605 hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb) 1606 { 1607 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D); 1608 struct dchannel *dch = container_of(dev, struct dchannel, dev); 1609 struct hfc_pci *hc = dch->hw; 1610 int ret = -EINVAL; 1611 struct mISDNhead *hh = mISDN_HEAD_P(skb); 1612 unsigned int id; 1613 u_long flags; 1614 1615 switch (hh->prim) { 1616 case PH_DATA_REQ: 1617 spin_lock_irqsave(&hc->lock, flags); 1618 ret = dchannel_senddata(dch, skb); 1619 if (ret > 0) { /* direct TX */ 1620 id = hh->id; /* skb can be freed */ 1621 hfcpci_fill_dfifo(dch->hw); 1622 ret = 0; 1623 spin_unlock_irqrestore(&hc->lock, flags); 1624 queue_ch_frame(ch, PH_DATA_CNF, id, NULL); 1625 } else 1626 spin_unlock_irqrestore(&hc->lock, flags); 1627 return ret; 1628 case PH_ACTIVATE_REQ: 1629 spin_lock_irqsave(&hc->lock, flags); 1630 if (hc->hw.protocol == ISDN_P_NT_S0) { 1631 ret = 0; 1632 if (test_bit(HFC_CFG_MASTER, &hc->cfg)) 1633 hc->hw.mst_m |= HFCPCI_MASTER; 1634 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); 1635 if (test_bit(FLG_ACTIVE, &dch->Flags)) { 1636 spin_unlock_irqrestore(&hc->lock, flags); 1637 _queue_data(&dch->dev.D, PH_ACTIVATE_IND, 1638 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); 1639 break; 1640 } 1641 test_and_set_bit(FLG_L2_ACTIVATED, &dch->Flags); 1642 Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE | 1643 HFCPCI_DO_ACTION | 1); 1644 } else 1645 ret = l1_event(dch->l1, hh->prim); 1646 spin_unlock_irqrestore(&hc->lock, flags); 1647 break; 1648 case PH_DEACTIVATE_REQ: 1649 test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); 1650 spin_lock_irqsave(&hc->lock, flags); 1651 if (hc->hw.protocol == ISDN_P_NT_S0) { 1652 /* prepare deactivation */ 1653 Write_hfc(hc, HFCPCI_STATES, 0x40); 1654 skb_queue_purge(&dch->squeue); 1655 if (dch->tx_skb) { 1656 dev_kfree_skb(dch->tx_skb); 1657 dch->tx_skb = NULL; 1658 } 1659 dch->tx_idx = 0; 1660 if (dch->rx_skb) { 1661 dev_kfree_skb(dch->rx_skb); 1662 dch->rx_skb = NULL; 1663 } 1664 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); 1665 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) 1666 del_timer(&dch->timer); 1667 #ifdef FIXME 1668 if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags)) 1669 dchannel_sched_event(&hc->dch, D_CLEARBUSY); 1670 #endif 1671 hc->hw.mst_m &= ~HFCPCI_MASTER; 1672 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); 1673 ret = 0; 1674 } else { 1675 ret = l1_event(dch->l1, hh->prim); 1676 } 1677 spin_unlock_irqrestore(&hc->lock, flags); 1678 break; 1679 } 1680 if (!ret) 1681 dev_kfree_skb(skb); 1682 return ret; 1683 } 1684 1685 /* 1686 * Layer2 -> Layer 1 Bchannel data 1687 */ 1688 static int 1689 hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb) 1690 { 1691 struct bchannel *bch = container_of(ch, struct bchannel, ch); 1692 struct hfc_pci *hc = bch->hw; 1693 int ret = -EINVAL; 1694 struct mISDNhead *hh = mISDN_HEAD_P(skb); 1695 unsigned int id; 1696 u_long flags; 1697 1698 switch (hh->prim) { 1699 case PH_DATA_REQ: 1700 spin_lock_irqsave(&hc->lock, flags); 1701 ret = bchannel_senddata(bch, skb); 1702 if (ret > 0) { /* direct TX */ 1703 id = hh->id; /* skb can be freed */ 1704 hfcpci_fill_fifo(bch); 1705 ret = 0; 1706 spin_unlock_irqrestore(&hc->lock, flags); 1707 if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) 1708 queue_ch_frame(ch, PH_DATA_CNF, id, NULL); 1709 } else 1710 spin_unlock_irqrestore(&hc->lock, flags); 1711 return ret; 1712 case PH_ACTIVATE_REQ: 1713 spin_lock_irqsave(&hc->lock, flags); 1714 if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags)) 1715 ret = mode_hfcpci(bch, bch->nr, ch->protocol); 1716 else 1717 ret = 0; 1718 spin_unlock_irqrestore(&hc->lock, flags); 1719 if (!ret) 1720 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, 1721 NULL, GFP_KERNEL); 1722 break; 1723 case PH_DEACTIVATE_REQ: 1724 deactivate_bchannel(bch); 1725 _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0, 1726 NULL, GFP_KERNEL); 1727 ret = 0; 1728 break; 1729 } 1730 if (!ret) 1731 dev_kfree_skb(skb); 1732 return ret; 1733 } 1734 1735 /* 1736 * called for card init message 1737 */ 1738 1739 static void 1740 inithfcpci(struct hfc_pci *hc) 1741 { 1742 printk(KERN_DEBUG "inithfcpci: entered\n"); 1743 hc->dch.timer.function = (void *) hfcpci_dbusy_timer; 1744 hc->dch.timer.data = (long) &hc->dch; 1745 init_timer(&hc->dch.timer); 1746 hc->chanlimit = 2; 1747 mode_hfcpci(&hc->bch[0], 1, -1); 1748 mode_hfcpci(&hc->bch[1], 2, -1); 1749 } 1750 1751 1752 static int 1753 init_card(struct hfc_pci *hc) 1754 { 1755 int cnt = 3; 1756 u_long flags; 1757 1758 printk(KERN_DEBUG "init_card: entered\n"); 1759 1760 1761 spin_lock_irqsave(&hc->lock, flags); 1762 disable_hwirq(hc); 1763 spin_unlock_irqrestore(&hc->lock, flags); 1764 if (request_irq(hc->irq, hfcpci_int, IRQF_SHARED, "HFC PCI", hc)) { 1765 printk(KERN_WARNING 1766 "mISDN: couldn't get interrupt %d\n", hc->irq); 1767 return -EIO; 1768 } 1769 spin_lock_irqsave(&hc->lock, flags); 1770 reset_hfcpci(hc); 1771 while (cnt) { 1772 inithfcpci(hc); 1773 /* 1774 * Finally enable IRQ output 1775 * this is only allowed, if an IRQ routine is allready 1776 * established for this HFC, so don't do that earlier 1777 */ 1778 enable_hwirq(hc); 1779 spin_unlock_irqrestore(&hc->lock, flags); 1780 /* Timeout 80ms */ 1781 current->state = TASK_UNINTERRUPTIBLE; 1782 schedule_timeout((80*HZ)/1000); 1783 printk(KERN_INFO "HFC PCI: IRQ %d count %d\n", 1784 hc->irq, hc->irqcnt); 1785 /* now switch timer interrupt off */ 1786 spin_lock_irqsave(&hc->lock, flags); 1787 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER; 1788 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); 1789 /* reinit mode reg */ 1790 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); 1791 if (!hc->irqcnt) { 1792 printk(KERN_WARNING 1793 "HFC PCI: IRQ(%d) getting no interrupts " 1794 "during init %d\n", hc->irq, 4 - cnt); 1795 if (cnt == 1) 1796 break; 1797 else { 1798 reset_hfcpci(hc); 1799 cnt--; 1800 } 1801 } else { 1802 spin_unlock_irqrestore(&hc->lock, flags); 1803 hc->initdone = 1; 1804 return 0; 1805 } 1806 } 1807 disable_hwirq(hc); 1808 spin_unlock_irqrestore(&hc->lock, flags); 1809 free_irq(hc->irq, hc); 1810 return -EIO; 1811 } 1812 1813 static int 1814 channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq) 1815 { 1816 int ret = 0; 1817 u_char slot; 1818 1819 switch (cq->op) { 1820 case MISDN_CTRL_GETOP: 1821 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT | 1822 MISDN_CTRL_DISCONNECT; 1823 break; 1824 case MISDN_CTRL_LOOP: 1825 /* channel 0 disabled loop */ 1826 if (cq->channel < 0 || cq->channel > 2) { 1827 ret = -EINVAL; 1828 break; 1829 } 1830 if (cq->channel & 1) { 1831 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) 1832 slot = 0xC0; 1833 else 1834 slot = 0x80; 1835 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n", 1836 __func__, slot); 1837 Write_hfc(hc, HFCPCI_B1_SSL, slot); 1838 Write_hfc(hc, HFCPCI_B1_RSL, slot); 1839 hc->hw.conn = (hc->hw.conn & ~7) | 6; 1840 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); 1841 } 1842 if (cq->channel & 2) { 1843 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) 1844 slot = 0xC1; 1845 else 1846 slot = 0x81; 1847 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n", 1848 __func__, slot); 1849 Write_hfc(hc, HFCPCI_B2_SSL, slot); 1850 Write_hfc(hc, HFCPCI_B2_RSL, slot); 1851 hc->hw.conn = (hc->hw.conn & ~0x38) | 0x30; 1852 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); 1853 } 1854 if (cq->channel & 3) 1855 hc->hw.trm |= 0x80; /* enable IOM-loop */ 1856 else { 1857 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09; 1858 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); 1859 hc->hw.trm &= 0x7f; /* disable IOM-loop */ 1860 } 1861 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm); 1862 break; 1863 case MISDN_CTRL_CONNECT: 1864 if (cq->channel == cq->p1) { 1865 ret = -EINVAL; 1866 break; 1867 } 1868 if (cq->channel < 1 || cq->channel > 2 || 1869 cq->p1 < 1 || cq->p1 > 2) { 1870 ret = -EINVAL; 1871 break; 1872 } 1873 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) 1874 slot = 0xC0; 1875 else 1876 slot = 0x80; 1877 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n", 1878 __func__, slot); 1879 Write_hfc(hc, HFCPCI_B1_SSL, slot); 1880 Write_hfc(hc, HFCPCI_B2_RSL, slot); 1881 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) 1882 slot = 0xC1; 1883 else 1884 slot = 0x81; 1885 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n", 1886 __func__, slot); 1887 Write_hfc(hc, HFCPCI_B2_SSL, slot); 1888 Write_hfc(hc, HFCPCI_B1_RSL, slot); 1889 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x36; 1890 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); 1891 hc->hw.trm |= 0x80; 1892 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm); 1893 break; 1894 case MISDN_CTRL_DISCONNECT: 1895 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09; 1896 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); 1897 hc->hw.trm &= 0x7f; /* disable IOM-loop */ 1898 break; 1899 default: 1900 printk(KERN_WARNING "%s: unknown Op %x\n", 1901 __func__, cq->op); 1902 ret = -EINVAL; 1903 break; 1904 } 1905 return ret; 1906 } 1907 1908 static int 1909 open_dchannel(struct hfc_pci *hc, struct mISDNchannel *ch, 1910 struct channel_req *rq) 1911 { 1912 int err = 0; 1913 1914 if (debug & DEBUG_HW_OPEN) 1915 printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__, 1916 hc->dch.dev.id, __builtin_return_address(0)); 1917 if (rq->protocol == ISDN_P_NONE) 1918 return -EINVAL; 1919 if (rq->adr.channel == 1) { 1920 /* TODO: E-Channel */ 1921 return -EINVAL; 1922 } 1923 if (!hc->initdone) { 1924 if (rq->protocol == ISDN_P_TE_S0) { 1925 err = create_l1(&hc->dch, hfc_l1callback); 1926 if (err) 1927 return err; 1928 } 1929 hc->hw.protocol = rq->protocol; 1930 ch->protocol = rq->protocol; 1931 err = init_card(hc); 1932 if (err) 1933 return err; 1934 } else { 1935 if (rq->protocol != ch->protocol) { 1936 if (hc->hw.protocol == ISDN_P_TE_S0) 1937 l1_event(hc->dch.l1, CLOSE_CHANNEL); 1938 if (rq->protocol == ISDN_P_TE_S0) { 1939 err = create_l1(&hc->dch, hfc_l1callback); 1940 if (err) 1941 return err; 1942 } 1943 hc->hw.protocol = rq->protocol; 1944 ch->protocol = rq->protocol; 1945 hfcpci_setmode(hc); 1946 } 1947 } 1948 1949 if (((ch->protocol == ISDN_P_NT_S0) && (hc->dch.state == 3)) || 1950 ((ch->protocol == ISDN_P_TE_S0) && (hc->dch.state == 7))) { 1951 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 1952 0, NULL, GFP_KERNEL); 1953 } 1954 rq->ch = ch; 1955 if (!try_module_get(THIS_MODULE)) 1956 printk(KERN_WARNING "%s:cannot get module\n", __func__); 1957 return 0; 1958 } 1959 1960 static int 1961 open_bchannel(struct hfc_pci *hc, struct channel_req *rq) 1962 { 1963 struct bchannel *bch; 1964 1965 if (rq->adr.channel > 2) 1966 return -EINVAL; 1967 if (rq->protocol == ISDN_P_NONE) 1968 return -EINVAL; 1969 bch = &hc->bch[rq->adr.channel - 1]; 1970 if (test_and_set_bit(FLG_OPEN, &bch->Flags)) 1971 return -EBUSY; /* b-channel can be only open once */ 1972 test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags); 1973 bch->ch.protocol = rq->protocol; 1974 rq->ch = &bch->ch; /* TODO: E-channel */ 1975 if (!try_module_get(THIS_MODULE)) 1976 printk(KERN_WARNING "%s:cannot get module\n", __func__); 1977 return 0; 1978 } 1979 1980 /* 1981 * device control function 1982 */ 1983 static int 1984 hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg) 1985 { 1986 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D); 1987 struct dchannel *dch = container_of(dev, struct dchannel, dev); 1988 struct hfc_pci *hc = dch->hw; 1989 struct channel_req *rq; 1990 int err = 0; 1991 1992 if (dch->debug & DEBUG_HW) 1993 printk(KERN_DEBUG "%s: cmd:%x %p\n", 1994 __func__, cmd, arg); 1995 switch (cmd) { 1996 case OPEN_CHANNEL: 1997 rq = arg; 1998 if ((rq->protocol == ISDN_P_TE_S0) || 1999 (rq->protocol == ISDN_P_NT_S0)) 2000 err = open_dchannel(hc, ch, rq); 2001 else 2002 err = open_bchannel(hc, rq); 2003 break; 2004 case CLOSE_CHANNEL: 2005 if (debug & DEBUG_HW_OPEN) 2006 printk(KERN_DEBUG "%s: dev(%d) close from %p\n", 2007 __func__, hc->dch.dev.id, 2008 __builtin_return_address(0)); 2009 module_put(THIS_MODULE); 2010 break; 2011 case CONTROL_CHANNEL: 2012 err = channel_ctrl(hc, arg); 2013 break; 2014 default: 2015 if (dch->debug & DEBUG_HW) 2016 printk(KERN_DEBUG "%s: unknown command %x\n", 2017 __func__, cmd); 2018 return -EINVAL; 2019 } 2020 return err; 2021 } 2022 2023 static int 2024 setup_hw(struct hfc_pci *hc) 2025 { 2026 void *buffer; 2027 2028 printk(KERN_INFO "mISDN: HFC-PCI driver %s\n", hfcpci_revision); 2029 hc->hw.cirm = 0; 2030 hc->dch.state = 0; 2031 pci_set_master(hc->pdev); 2032 if (!hc->irq) { 2033 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n"); 2034 return 1; 2035 } 2036 hc->hw.pci_io = 2037 (char __iomem *)(unsigned long)hc->pdev->resource[1].start; 2038 2039 if (!hc->hw.pci_io) { 2040 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n"); 2041 return 1; 2042 } 2043 /* Allocate memory for FIFOS */ 2044 /* the memory needs to be on a 32k boundary within the first 4G */ 2045 pci_set_dma_mask(hc->pdev, 0xFFFF8000); 2046 buffer = pci_alloc_consistent(hc->pdev, 0x8000, &hc->hw.dmahandle); 2047 /* We silently assume the address is okay if nonzero */ 2048 if (!buffer) { 2049 printk(KERN_WARNING 2050 "HFC-PCI: Error allocating memory for FIFO!\n"); 2051 return 1; 2052 } 2053 hc->hw.fifos = buffer; 2054 pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle); 2055 hc->hw.pci_io = ioremap((ulong) hc->hw.pci_io, 256); 2056 printk(KERN_INFO 2057 "HFC-PCI: defined at mem %#lx fifo %#lx(%#lx) IRQ %d HZ %d\n", 2058 (u_long) hc->hw.pci_io, (u_long) hc->hw.fifos, 2059 (u_long) hc->hw.dmahandle, hc->irq, HZ); 2060 /* enable memory mapped ports, disable busmaster */ 2061 pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO); 2062 hc->hw.int_m2 = 0; 2063 disable_hwirq(hc); 2064 hc->hw.int_m1 = 0; 2065 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); 2066 /* At this point the needed PCI config is done */ 2067 /* fifos are still not enabled */ 2068 hc->hw.timer.function = (void *) hfcpci_Timer; 2069 hc->hw.timer.data = (long) hc; 2070 init_timer(&hc->hw.timer); 2071 /* default PCM master */ 2072 test_and_set_bit(HFC_CFG_MASTER, &hc->cfg); 2073 return 0; 2074 } 2075 2076 static void 2077 release_card(struct hfc_pci *hc) { 2078 u_long flags; 2079 2080 spin_lock_irqsave(&hc->lock, flags); 2081 hc->hw.int_m2 = 0; /* interrupt output off ! */ 2082 disable_hwirq(hc); 2083 mode_hfcpci(&hc->bch[0], 1, ISDN_P_NONE); 2084 mode_hfcpci(&hc->bch[1], 2, ISDN_P_NONE); 2085 if (hc->dch.timer.function != NULL) { 2086 del_timer(&hc->dch.timer); 2087 hc->dch.timer.function = NULL; 2088 } 2089 spin_unlock_irqrestore(&hc->lock, flags); 2090 if (hc->hw.protocol == ISDN_P_TE_S0) 2091 l1_event(hc->dch.l1, CLOSE_CHANNEL); 2092 if (hc->initdone) 2093 free_irq(hc->irq, hc); 2094 release_io_hfcpci(hc); /* must release after free_irq! */ 2095 mISDN_unregister_device(&hc->dch.dev); 2096 mISDN_freebchannel(&hc->bch[1]); 2097 mISDN_freebchannel(&hc->bch[0]); 2098 mISDN_freedchannel(&hc->dch); 2099 pci_set_drvdata(hc->pdev, NULL); 2100 kfree(hc); 2101 } 2102 2103 static int 2104 setup_card(struct hfc_pci *card) 2105 { 2106 int err = -EINVAL; 2107 u_int i; 2108 char name[MISDN_MAX_IDLEN]; 2109 2110 card->dch.debug = debug; 2111 spin_lock_init(&card->lock); 2112 mISDN_initdchannel(&card->dch, MAX_DFRAME_LEN_L1, ph_state); 2113 card->dch.hw = card; 2114 card->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0); 2115 card->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) | 2116 (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK)); 2117 card->dch.dev.D.send = hfcpci_l2l1D; 2118 card->dch.dev.D.ctrl = hfc_dctrl; 2119 card->dch.dev.nrbchan = 2; 2120 for (i = 0; i < 2; i++) { 2121 card->bch[i].nr = i + 1; 2122 set_channelmap(i + 1, card->dch.dev.channelmap); 2123 card->bch[i].debug = debug; 2124 mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM); 2125 card->bch[i].hw = card; 2126 card->bch[i].ch.send = hfcpci_l2l1B; 2127 card->bch[i].ch.ctrl = hfc_bctrl; 2128 card->bch[i].ch.nr = i + 1; 2129 list_add(&card->bch[i].ch.list, &card->dch.dev.bchannels); 2130 } 2131 err = setup_hw(card); 2132 if (err) 2133 goto error; 2134 snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-pci.%d", HFC_cnt + 1); 2135 err = mISDN_register_device(&card->dch.dev, &card->pdev->dev, name); 2136 if (err) 2137 goto error; 2138 HFC_cnt++; 2139 printk(KERN_INFO "HFC %d cards installed\n", HFC_cnt); 2140 return 0; 2141 error: 2142 mISDN_freebchannel(&card->bch[1]); 2143 mISDN_freebchannel(&card->bch[0]); 2144 mISDN_freedchannel(&card->dch); 2145 kfree(card); 2146 return err; 2147 } 2148 2149 /* private data in the PCI devices list */ 2150 struct _hfc_map { 2151 u_int subtype; 2152 u_int flag; 2153 char *name; 2154 }; 2155 2156 static const struct _hfc_map hfc_map[] = 2157 { 2158 {HFC_CCD_2BD0, 0, "CCD/Billion/Asuscom 2BD0"}, 2159 {HFC_CCD_B000, 0, "Billion B000"}, 2160 {HFC_CCD_B006, 0, "Billion B006"}, 2161 {HFC_CCD_B007, 0, "Billion B007"}, 2162 {HFC_CCD_B008, 0, "Billion B008"}, 2163 {HFC_CCD_B009, 0, "Billion B009"}, 2164 {HFC_CCD_B00A, 0, "Billion B00A"}, 2165 {HFC_CCD_B00B, 0, "Billion B00B"}, 2166 {HFC_CCD_B00C, 0, "Billion B00C"}, 2167 {HFC_CCD_B100, 0, "Seyeon B100"}, 2168 {HFC_CCD_B700, 0, "Primux II S0 B700"}, 2169 {HFC_CCD_B701, 0, "Primux II S0 NT B701"}, 2170 {HFC_ABOCOM_2BD1, 0, "Abocom/Magitek 2BD1"}, 2171 {HFC_ASUS_0675, 0, "Asuscom/Askey 675"}, 2172 {HFC_BERKOM_TCONCEPT, 0, "German telekom T-Concept"}, 2173 {HFC_BERKOM_A1T, 0, "German telekom A1T"}, 2174 {HFC_ANIGMA_MC145575, 0, "Motorola MC145575"}, 2175 {HFC_ZOLTRIX_2BD0, 0, "Zoltrix 2BD0"}, 2176 {HFC_DIGI_DF_M_IOM2_E, 0, 2177 "Digi International DataFire Micro V IOM2 (Europe)"}, 2178 {HFC_DIGI_DF_M_E, 0, 2179 "Digi International DataFire Micro V (Europe)"}, 2180 {HFC_DIGI_DF_M_IOM2_A, 0, 2181 "Digi International DataFire Micro V IOM2 (North America)"}, 2182 {HFC_DIGI_DF_M_A, 0, 2183 "Digi International DataFire Micro V (North America)"}, 2184 {HFC_SITECOM_DC105V2, 0, "Sitecom Connectivity DC-105 ISDN TA"}, 2185 {}, 2186 }; 2187 2188 static struct pci_device_id hfc_ids[] = 2189 { 2190 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_2BD0, 2191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[0]}, 2192 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B000, 2193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[1]}, 2194 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B006, 2195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[2]}, 2196 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B007, 2197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[3]}, 2198 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B008, 2199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[4]}, 2200 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B009, 2201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[5]}, 2202 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00A, 2203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[6]}, 2204 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B, 2205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[7]}, 2206 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C, 2207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[8]}, 2208 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100, 2209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[9]}, 2210 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B700, 2211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[10]}, 2212 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B701, 2213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[11]}, 2214 {PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1, 2215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[12]}, 2216 {PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675, 2217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[13]}, 2218 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT, 2219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[14]}, 2220 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_A1T, 2221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[15]}, 2222 {PCI_VENDOR_ID_ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575, 2223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[16]}, 2224 {PCI_VENDOR_ID_ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0, 2225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[17]}, 2226 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E, 2227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[18]}, 2228 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E, 2229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[19]}, 2230 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A, 2231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[20]}, 2232 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A, 2233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[21]}, 2234 {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2, 2235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[22]}, 2236 {}, 2237 }; 2238 2239 static int __devinit 2240 hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2241 { 2242 int err = -ENOMEM; 2243 struct hfc_pci *card; 2244 struct _hfc_map *m = (struct _hfc_map *)ent->driver_data; 2245 2246 card = kzalloc(sizeof(struct hfc_pci), GFP_ATOMIC); 2247 if (!card) { 2248 printk(KERN_ERR "No kmem for HFC card\n"); 2249 return err; 2250 } 2251 card->pdev = pdev; 2252 card->subtype = m->subtype; 2253 err = pci_enable_device(pdev); 2254 if (err) { 2255 kfree(card); 2256 return err; 2257 } 2258 2259 printk(KERN_INFO "mISDN_hfcpci: found adapter %s at %s\n", 2260 m->name, pci_name(pdev)); 2261 2262 card->irq = pdev->irq; 2263 pci_set_drvdata(pdev, card); 2264 err = setup_card(card); 2265 if (err) 2266 pci_set_drvdata(pdev, NULL); 2267 return err; 2268 } 2269 2270 static void __devexit 2271 hfc_remove_pci(struct pci_dev *pdev) 2272 { 2273 struct hfc_pci *card = pci_get_drvdata(pdev); 2274 2275 if (card) 2276 release_card(card); 2277 else 2278 if (debug) 2279 printk(KERN_DEBUG "%s: drvdata already removed\n", 2280 __func__); 2281 } 2282 2283 2284 static struct pci_driver hfc_driver = { 2285 .name = "hfcpci", 2286 .probe = hfc_probe, 2287 .remove = __devexit_p(hfc_remove_pci), 2288 .id_table = hfc_ids, 2289 }; 2290 2291 static int 2292 _hfcpci_softirq(struct device *dev, void *arg) 2293 { 2294 struct hfc_pci *hc = dev_get_drvdata(dev); 2295 struct bchannel *bch; 2296 if (hc == NULL) 2297 return 0; 2298 2299 if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) { 2300 spin_lock(&hc->lock); 2301 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1); 2302 if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */ 2303 main_rec_hfcpci(bch); 2304 tx_birq(bch); 2305 } 2306 bch = Sel_BCS(hc, hc->hw.bswapped ? 1 : 2); 2307 if (bch && bch->state == ISDN_P_B_RAW) { /* B2 rx&tx */ 2308 main_rec_hfcpci(bch); 2309 tx_birq(bch); 2310 } 2311 spin_unlock(&hc->lock); 2312 } 2313 return 0; 2314 } 2315 2316 static void 2317 hfcpci_softirq(void *arg) 2318 { 2319 (void) driver_for_each_device(&hfc_driver.driver, NULL, arg, 2320 _hfcpci_softirq); 2321 2322 /* if next event would be in the past ... */ 2323 if ((s32)(hfc_jiffies + tics - jiffies) <= 0) 2324 hfc_jiffies = jiffies + 1; 2325 else 2326 hfc_jiffies += tics; 2327 hfc_tl.expires = hfc_jiffies; 2328 add_timer(&hfc_tl); 2329 } 2330 2331 static int __init 2332 HFC_init(void) 2333 { 2334 int err; 2335 2336 if (!poll) 2337 poll = HFCPCI_BTRANS_THRESHOLD; 2338 2339 if (poll != HFCPCI_BTRANS_THRESHOLD) { 2340 tics = (poll * HZ) / 8000; 2341 if (tics < 1) 2342 tics = 1; 2343 poll = (tics * 8000) / HZ; 2344 if (poll > 256 || poll < 8) { 2345 printk(KERN_ERR "%s: Wrong poll value %d not in range " 2346 "of 8..256.\n", __func__, poll); 2347 err = -EINVAL; 2348 return err; 2349 } 2350 } 2351 if (poll != HFCPCI_BTRANS_THRESHOLD) { 2352 printk(KERN_INFO "%s: Using alternative poll value of %d\n", 2353 __func__, poll); 2354 hfc_tl.function = (void *)hfcpci_softirq; 2355 hfc_tl.data = 0; 2356 init_timer(&hfc_tl); 2357 hfc_tl.expires = jiffies + tics; 2358 hfc_jiffies = hfc_tl.expires; 2359 add_timer(&hfc_tl); 2360 } else 2361 tics = 0; /* indicate the use of controller's timer */ 2362 2363 err = pci_register_driver(&hfc_driver); 2364 if (err) { 2365 if (timer_pending(&hfc_tl)) 2366 del_timer(&hfc_tl); 2367 } 2368 2369 return err; 2370 } 2371 2372 static void __exit 2373 HFC_cleanup(void) 2374 { 2375 if (timer_pending(&hfc_tl)) 2376 del_timer(&hfc_tl); 2377 2378 pci_unregister_driver(&hfc_driver); 2379 } 2380 2381 module_init(HFC_init); 2382 module_exit(HFC_cleanup); 2383 2384 MODULE_DEVICE_TABLE(pci, hfc_ids); 2385