1 /* 2 * NETJet mISDN driver 3 * 4 * Author Karsten Keil <keil@isdn4linux.de> 5 * 6 * Copyright 2009 by Karsten Keil <keil@isdn4linux.de> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 * 21 */ 22 23 #include <linux/module.h> 24 #include <linux/pci.h> 25 #include <linux/delay.h> 26 #include <linux/mISDNhw.h> 27 #include "ipac.h" 28 #include "iohelper.h" 29 #include "netjet.h" 30 #include <linux/isdn/hdlc.h> 31 32 #define NETJET_REV "2.0" 33 34 enum nj_types { 35 NETJET_S_TJ300, 36 NETJET_S_TJ320, 37 ENTERNOW__TJ320, 38 }; 39 40 struct tiger_dma { 41 size_t size; 42 u32 *start; 43 int idx; 44 u32 dmastart; 45 u32 dmairq; 46 u32 dmaend; 47 u32 dmacur; 48 }; 49 50 struct tiger_hw; 51 52 struct tiger_ch { 53 struct bchannel bch; 54 struct tiger_hw *nj; 55 int idx; 56 int free; 57 int lastrx; 58 u16 rxstate; 59 u16 txstate; 60 struct isdnhdlc_vars hsend; 61 struct isdnhdlc_vars hrecv; 62 u8 *hsbuf; 63 u8 *hrbuf; 64 }; 65 66 #define TX_INIT 0x0001 67 #define TX_IDLE 0x0002 68 #define TX_RUN 0x0004 69 #define TX_UNDERRUN 0x0100 70 #define RX_OVERRUN 0x0100 71 72 #define LOG_SIZE 64 73 74 struct tiger_hw { 75 struct list_head list; 76 struct pci_dev *pdev; 77 char name[MISDN_MAX_IDLEN]; 78 enum nj_types typ; 79 int irq; 80 u32 irqcnt; 81 u32 base; 82 size_t base_s; 83 dma_addr_t dma; 84 void *dma_p; 85 spinlock_t lock; /* lock HW */ 86 struct isac_hw isac; 87 struct tiger_dma send; 88 struct tiger_dma recv; 89 struct tiger_ch bc[2]; 90 u8 ctrlreg; 91 u8 dmactrl; 92 u8 auxd; 93 u8 last_is0; 94 u8 irqmask0; 95 char log[LOG_SIZE]; 96 }; 97 98 static LIST_HEAD(Cards); 99 static DEFINE_RWLOCK(card_lock); /* protect Cards */ 100 static u32 debug; 101 static int nj_cnt; 102 103 static void 104 _set_debug(struct tiger_hw *card) 105 { 106 card->isac.dch.debug = debug; 107 card->bc[0].bch.debug = debug; 108 card->bc[1].bch.debug = debug; 109 } 110 111 static int 112 set_debug(const char *val, struct kernel_param *kp) 113 { 114 int ret; 115 struct tiger_hw *card; 116 117 ret = param_set_uint(val, kp); 118 if (!ret) { 119 read_lock(&card_lock); 120 list_for_each_entry(card, &Cards, list) 121 _set_debug(card); 122 read_unlock(&card_lock); 123 } 124 return ret; 125 } 126 127 MODULE_AUTHOR("Karsten Keil"); 128 MODULE_LICENSE("GPL v2"); 129 MODULE_VERSION(NETJET_REV); 130 module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR); 131 MODULE_PARM_DESC(debug, "Netjet debug mask"); 132 133 static void 134 nj_disable_hwirq(struct tiger_hw *card) 135 { 136 outb(0, card->base + NJ_IRQMASK0); 137 outb(0, card->base + NJ_IRQMASK1); 138 } 139 140 141 static u8 142 ReadISAC_nj(void *p, u8 offset) 143 { 144 struct tiger_hw *card = p; 145 u8 ret; 146 147 card->auxd &= 0xfc; 148 card->auxd |= (offset >> 4) & 3; 149 outb(card->auxd, card->base + NJ_AUXDATA); 150 ret = inb(card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2)); 151 return ret; 152 } 153 154 static void 155 WriteISAC_nj(void *p, u8 offset, u8 value) 156 { 157 struct tiger_hw *card = p; 158 159 card->auxd &= 0xfc; 160 card->auxd |= (offset >> 4) & 3; 161 outb(card->auxd, card->base + NJ_AUXDATA); 162 outb(value, card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2)); 163 } 164 165 static void 166 ReadFiFoISAC_nj(void *p, u8 offset, u8 *data, int size) 167 { 168 struct tiger_hw *card = p; 169 170 card->auxd &= 0xfc; 171 outb(card->auxd, card->base + NJ_AUXDATA); 172 insb(card->base + NJ_ISAC_OFF, data, size); 173 } 174 175 static void 176 WriteFiFoISAC_nj(void *p, u8 offset, u8 *data, int size) 177 { 178 struct tiger_hw *card = p; 179 180 card->auxd &= 0xfc; 181 outb(card->auxd, card->base + NJ_AUXDATA); 182 outsb(card->base + NJ_ISAC_OFF, data, size); 183 } 184 185 static void 186 fill_mem(struct tiger_ch *bc, u32 idx, u32 cnt, u32 fill) 187 { 188 struct tiger_hw *card = bc->bch.hw; 189 u32 mask = 0xff, val; 190 191 pr_debug("%s: B%1d fill %02x len %d idx %d/%d\n", card->name, 192 bc->bch.nr, fill, cnt, idx, card->send.idx); 193 if (bc->bch.nr & 2) { 194 fill <<= 8; 195 mask <<= 8; 196 } 197 mask ^= 0xffffffff; 198 while (cnt--) { 199 val = card->send.start[idx]; 200 val &= mask; 201 val |= fill; 202 card->send.start[idx++] = val; 203 if (idx >= card->send.size) 204 idx = 0; 205 } 206 } 207 208 static int 209 mode_tiger(struct tiger_ch *bc, u32 protocol) 210 { 211 struct tiger_hw *card = bc->bch.hw; 212 213 pr_debug("%s: B%1d protocol %x-->%x\n", card->name, 214 bc->bch.nr, bc->bch.state, protocol); 215 switch (protocol) { 216 case ISDN_P_NONE: 217 if (bc->bch.state == ISDN_P_NONE) 218 break; 219 fill_mem(bc, 0, card->send.size, 0xff); 220 bc->bch.state = protocol; 221 /* only stop dma and interrupts if both channels NULL */ 222 if ((card->bc[0].bch.state == ISDN_P_NONE) && 223 (card->bc[1].bch.state == ISDN_P_NONE)) { 224 card->dmactrl = 0; 225 outb(card->dmactrl, card->base + NJ_DMACTRL); 226 outb(0, card->base + NJ_IRQMASK0); 227 } 228 test_and_clear_bit(FLG_HDLC, &bc->bch.Flags); 229 test_and_clear_bit(FLG_TRANSPARENT, &bc->bch.Flags); 230 bc->txstate = 0; 231 bc->rxstate = 0; 232 bc->lastrx = -1; 233 break; 234 case ISDN_P_B_RAW: 235 test_and_set_bit(FLG_TRANSPARENT, &bc->bch.Flags); 236 bc->bch.state = protocol; 237 bc->idx = 0; 238 bc->free = card->send.size/2; 239 bc->rxstate = 0; 240 bc->txstate = TX_INIT | TX_IDLE; 241 bc->lastrx = -1; 242 if (!card->dmactrl) { 243 card->dmactrl = 1; 244 outb(card->dmactrl, card->base + NJ_DMACTRL); 245 outb(0x0f, card->base + NJ_IRQMASK0); 246 } 247 break; 248 case ISDN_P_B_HDLC: 249 test_and_set_bit(FLG_HDLC, &bc->bch.Flags); 250 bc->bch.state = protocol; 251 bc->idx = 0; 252 bc->free = card->send.size/2; 253 bc->rxstate = 0; 254 bc->txstate = TX_INIT | TX_IDLE; 255 isdnhdlc_rcv_init(&bc->hrecv, 0); 256 isdnhdlc_out_init(&bc->hsend, 0); 257 bc->lastrx = -1; 258 if (!card->dmactrl) { 259 card->dmactrl = 1; 260 outb(card->dmactrl, card->base + NJ_DMACTRL); 261 outb(0x0f, card->base + NJ_IRQMASK0); 262 } 263 break; 264 default: 265 pr_info("%s: %s protocol %x not handled\n", card->name, 266 __func__, protocol); 267 return -ENOPROTOOPT; 268 } 269 card->send.dmacur = inl(card->base + NJ_DMA_READ_ADR); 270 card->recv.dmacur = inl(card->base + NJ_DMA_WRITE_ADR); 271 card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2; 272 card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2; 273 pr_debug("%s: %s ctrl %x irq %02x/%02x idx %d/%d\n", 274 card->name, __func__, 275 inb(card->base + NJ_DMACTRL), 276 inb(card->base + NJ_IRQMASK0), 277 inb(card->base + NJ_IRQSTAT0), 278 card->send.idx, 279 card->recv.idx); 280 return 0; 281 } 282 283 static void 284 nj_reset(struct tiger_hw *card) 285 { 286 outb(0xff, card->base + NJ_CTRL); /* Reset On */ 287 mdelay(1); 288 289 /* now edge triggered for TJ320 GE 13/07/00 */ 290 /* see comment in IRQ function */ 291 if (card->typ == NETJET_S_TJ320) /* TJ320 */ 292 card->ctrlreg = 0x40; /* Reset Off and status read clear */ 293 else 294 card->ctrlreg = 0x00; /* Reset Off and status read clear */ 295 outb(card->ctrlreg, card->base + NJ_CTRL); 296 mdelay(10); 297 298 /* configure AUX pins (all output except ISAC IRQ pin) */ 299 card->auxd = 0; 300 card->dmactrl = 0; 301 outb(~NJ_ISACIRQ, card->base + NJ_AUXCTRL); 302 outb(NJ_ISACIRQ, card->base + NJ_IRQMASK1); 303 outb(card->auxd, card->base + NJ_AUXDATA); 304 } 305 306 static int 307 inittiger(struct tiger_hw *card) 308 { 309 int i; 310 311 card->dma_p = pci_alloc_consistent(card->pdev, NJ_DMA_SIZE, 312 &card->dma); 313 if (!card->dma_p) { 314 pr_info("%s: No DMA memory\n", card->name); 315 return -ENOMEM; 316 } 317 if ((u64)card->dma > 0xffffffff) { 318 pr_info("%s: DMA outside 32 bit\n", card->name); 319 return -ENOMEM; 320 } 321 for (i = 0; i < 2; i++) { 322 card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_KERNEL); 323 if (!card->bc[i].hsbuf) { 324 pr_info("%s: no B%d send buffer\n", card->name, i + 1); 325 return -ENOMEM; 326 } 327 card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_KERNEL); 328 if (!card->bc[i].hrbuf) { 329 pr_info("%s: no B%d recv buffer\n", card->name, i + 1); 330 return -ENOMEM; 331 } 332 } 333 memset(card->dma_p, 0xff, NJ_DMA_SIZE); 334 335 card->send.start = card->dma_p; 336 card->send.dmastart = (u32)card->dma; 337 card->send.dmaend = card->send.dmastart + 338 (4 * (NJ_DMA_TXSIZE - 1)); 339 card->send.dmairq = card->send.dmastart + 340 (4 * ((NJ_DMA_TXSIZE / 2) - 1)); 341 card->send.size = NJ_DMA_TXSIZE; 342 343 if (debug & DEBUG_HW) 344 pr_notice("%s: send buffer phy %#x - %#x - %#x virt %p" 345 " size %zu u32\n", card->name, 346 card->send.dmastart, card->send.dmairq, 347 card->send.dmaend, card->send.start, card->send.size); 348 349 outl(card->send.dmastart, card->base + NJ_DMA_READ_START); 350 outl(card->send.dmairq, card->base + NJ_DMA_READ_IRQ); 351 outl(card->send.dmaend, card->base + NJ_DMA_READ_END); 352 353 card->recv.start = card->dma_p + (NJ_DMA_SIZE / 2); 354 card->recv.dmastart = (u32)card->dma + (NJ_DMA_SIZE / 2); 355 card->recv.dmaend = card->recv.dmastart + 356 (4 * (NJ_DMA_RXSIZE - 1)); 357 card->recv.dmairq = card->recv.dmastart + 358 (4 * ((NJ_DMA_RXSIZE / 2) - 1)); 359 card->recv.size = NJ_DMA_RXSIZE; 360 361 if (debug & DEBUG_HW) 362 pr_notice("%s: recv buffer phy %#x - %#x - %#x virt %p" 363 " size %zu u32\n", card->name, 364 card->recv.dmastart, card->recv.dmairq, 365 card->recv.dmaend, card->recv.start, card->recv.size); 366 367 outl(card->recv.dmastart, card->base + NJ_DMA_WRITE_START); 368 outl(card->recv.dmairq, card->base + NJ_DMA_WRITE_IRQ); 369 outl(card->recv.dmaend, card->base + NJ_DMA_WRITE_END); 370 return 0; 371 } 372 373 static void 374 read_dma(struct tiger_ch *bc, u32 idx, int cnt) 375 { 376 struct tiger_hw *card = bc->bch.hw; 377 int i, stat; 378 u32 val; 379 u8 *p, *pn; 380 381 if (bc->lastrx == idx) { 382 bc->rxstate |= RX_OVERRUN; 383 pr_info("%s: B%1d overrun at idx %d\n", card->name, 384 bc->bch.nr, idx); 385 } 386 bc->lastrx = idx; 387 if (!bc->bch.rx_skb) { 388 bc->bch.rx_skb = mI_alloc_skb(bc->bch.maxlen, GFP_ATOMIC); 389 if (!bc->bch.rx_skb) { 390 pr_info("%s: B%1d receive out of memory\n", 391 card->name, bc->bch.nr); 392 return; 393 } 394 } 395 396 if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) { 397 if ((bc->bch.rx_skb->len + cnt) > bc->bch.maxlen) { 398 pr_debug("%s: B%1d overrun %d\n", card->name, 399 bc->bch.nr, bc->bch.rx_skb->len + cnt); 400 skb_trim(bc->bch.rx_skb, 0); 401 return; 402 } 403 p = skb_put(bc->bch.rx_skb, cnt); 404 } else 405 p = bc->hrbuf; 406 407 for (i = 0; i < cnt; i++) { 408 val = card->recv.start[idx++]; 409 if (bc->bch.nr & 2) 410 val >>= 8; 411 if (idx >= card->recv.size) 412 idx = 0; 413 p[i] = val & 0xff; 414 } 415 pn = bc->hrbuf; 416 next_frame: 417 if (test_bit(FLG_HDLC, &bc->bch.Flags)) { 418 stat = isdnhdlc_decode(&bc->hrecv, pn, cnt, &i, 419 bc->bch.rx_skb->data, bc->bch.maxlen); 420 if (stat > 0) /* valid frame received */ 421 p = skb_put(bc->bch.rx_skb, stat); 422 else if (stat == -HDLC_CRC_ERROR) 423 pr_info("%s: B%1d receive frame CRC error\n", 424 card->name, bc->bch.nr); 425 else if (stat == -HDLC_FRAMING_ERROR) 426 pr_info("%s: B%1d receive framing error\n", 427 card->name, bc->bch.nr); 428 else if (stat == -HDLC_LENGTH_ERROR) 429 pr_info("%s: B%1d receive frame too long (> %d)\n", 430 card->name, bc->bch.nr, bc->bch.maxlen); 431 } else 432 stat = cnt; 433 434 if (stat > 0) { 435 if (debug & DEBUG_HW_BFIFO) { 436 snprintf(card->log, LOG_SIZE, "B%1d-recv %s %d ", 437 bc->bch.nr, card->name, stat); 438 print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, 439 p, stat); 440 } 441 recv_Bchannel(&bc->bch, 0); 442 } 443 if (test_bit(FLG_HDLC, &bc->bch.Flags)) { 444 pn += i; 445 cnt -= i; 446 if (!bc->bch.rx_skb) { 447 bc->bch.rx_skb = mI_alloc_skb(bc->bch.maxlen, 448 GFP_ATOMIC); 449 if (!bc->bch.rx_skb) { 450 pr_info("%s: B%1d receive out of memory\n", 451 card->name, bc->bch.nr); 452 return; 453 } 454 } 455 if (cnt > 0) 456 goto next_frame; 457 } 458 } 459 460 static void 461 recv_tiger(struct tiger_hw *card, u8 irq_stat) 462 { 463 u32 idx; 464 int cnt = card->recv.size / 2; 465 466 /* Note receive is via the WRITE DMA channel */ 467 card->last_is0 &= ~NJ_IRQM0_WR_MASK; 468 card->last_is0 |= (irq_stat & NJ_IRQM0_WR_MASK); 469 470 if (irq_stat & NJ_IRQM0_WR_END) 471 idx = cnt - 1; 472 else 473 idx = card->recv.size - 1; 474 475 if (test_bit(FLG_ACTIVE, &card->bc[0].bch.Flags)) 476 read_dma(&card->bc[0], idx, cnt); 477 if (test_bit(FLG_ACTIVE, &card->bc[1].bch.Flags)) 478 read_dma(&card->bc[1], idx, cnt); 479 } 480 481 /* sync with current DMA address at start or after exception */ 482 static void 483 resync(struct tiger_ch *bc, struct tiger_hw *card) 484 { 485 card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR); 486 card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2; 487 if (bc->free > card->send.size / 2) 488 bc->free = card->send.size / 2; 489 /* currently we simple sync to the next complete free area 490 * this hast the advantage that we have always maximum time to 491 * handle TX irq 492 */ 493 if (card->send.idx < ((card->send.size / 2) - 1)) 494 bc->idx = (card->recv.size / 2) - 1; 495 else 496 bc->idx = card->recv.size - 1; 497 bc->txstate = TX_RUN; 498 pr_debug("%s: %s B%1d free %d idx %d/%d\n", card->name, 499 __func__, bc->bch.nr, bc->free, bc->idx, card->send.idx); 500 } 501 502 static int bc_next_frame(struct tiger_ch *); 503 504 static void 505 fill_hdlc_flag(struct tiger_ch *bc) 506 { 507 struct tiger_hw *card = bc->bch.hw; 508 int count, i; 509 u32 m, v; 510 u8 *p; 511 512 if (bc->free == 0) 513 return; 514 pr_debug("%s: %s B%1d %d state %x idx %d/%d\n", card->name, 515 __func__, bc->bch.nr, bc->free, bc->txstate, 516 bc->idx, card->send.idx); 517 if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN)) 518 resync(bc, card); 519 count = isdnhdlc_encode(&bc->hsend, NULL, 0, &i, 520 bc->hsbuf, bc->free); 521 pr_debug("%s: B%1d hdlc encoded %d flags\n", card->name, 522 bc->bch.nr, count); 523 bc->free -= count; 524 p = bc->hsbuf; 525 m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff; 526 for (i = 0; i < count; i++) { 527 if (bc->idx >= card->send.size) 528 bc->idx = 0; 529 v = card->send.start[bc->idx]; 530 v &= m; 531 v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8; 532 card->send.start[bc->idx++] = v; 533 } 534 if (debug & DEBUG_HW_BFIFO) { 535 snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ", 536 bc->bch.nr, card->name, count); 537 print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count); 538 } 539 } 540 541 static void 542 fill_dma(struct tiger_ch *bc) 543 { 544 struct tiger_hw *card = bc->bch.hw; 545 int count, i; 546 u32 m, v; 547 u8 *p; 548 549 if (bc->free == 0) 550 return; 551 count = bc->bch.tx_skb->len - bc->bch.tx_idx; 552 if (count <= 0) 553 return; 554 pr_debug("%s: %s B%1d %d/%d/%d/%d state %x idx %d/%d\n", card->name, 555 __func__, bc->bch.nr, count, bc->free, bc->bch.tx_idx, 556 bc->bch.tx_skb->len, bc->txstate, bc->idx, card->send.idx); 557 if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN)) 558 resync(bc, card); 559 p = bc->bch.tx_skb->data + bc->bch.tx_idx; 560 if (test_bit(FLG_HDLC, &bc->bch.Flags)) { 561 count = isdnhdlc_encode(&bc->hsend, p, count, &i, 562 bc->hsbuf, bc->free); 563 pr_debug("%s: B%1d hdlc encoded %d in %d\n", card->name, 564 bc->bch.nr, i, count); 565 bc->bch.tx_idx += i; 566 bc->free -= count; 567 p = bc->hsbuf; 568 } else { 569 if (count > bc->free) 570 count = bc->free; 571 bc->bch.tx_idx += count; 572 bc->free -= count; 573 } 574 m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff; 575 for (i = 0; i < count; i++) { 576 if (bc->idx >= card->send.size) 577 bc->idx = 0; 578 v = card->send.start[bc->idx]; 579 v &= m; 580 v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8; 581 card->send.start[bc->idx++] = v; 582 } 583 if (debug & DEBUG_HW_BFIFO) { 584 snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ", 585 bc->bch.nr, card->name, count); 586 print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count); 587 } 588 if (bc->free) 589 bc_next_frame(bc); 590 } 591 592 593 static int 594 bc_next_frame(struct tiger_ch *bc) 595 { 596 if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len) 597 fill_dma(bc); 598 else { 599 if (bc->bch.tx_skb) { 600 /* send confirm, on trans, free on hdlc. */ 601 if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) 602 confirm_Bsend(&bc->bch); 603 dev_kfree_skb(bc->bch.tx_skb); 604 } 605 if (get_next_bframe(&bc->bch)) 606 fill_dma(bc); 607 else 608 return 0; 609 } 610 return 1; 611 } 612 613 static void 614 send_tiger_bc(struct tiger_hw *card, struct tiger_ch *bc) 615 { 616 int ret; 617 618 bc->free += card->send.size / 2; 619 if (bc->free >= card->send.size) { 620 if (!(bc->txstate & (TX_UNDERRUN | TX_INIT))) { 621 pr_info("%s: B%1d TX underrun state %x\n", card->name, 622 bc->bch.nr, bc->txstate); 623 bc->txstate |= TX_UNDERRUN; 624 } 625 bc->free = card->send.size; 626 } 627 ret = bc_next_frame(bc); 628 if (!ret) { 629 if (test_bit(FLG_HDLC, &bc->bch.Flags)) { 630 fill_hdlc_flag(bc); 631 return; 632 } 633 pr_debug("%s: B%1d TX no data free %d idx %d/%d\n", card->name, 634 bc->bch.nr, bc->free, bc->idx, card->send.idx); 635 if (!(bc->txstate & (TX_IDLE | TX_INIT))) { 636 fill_mem(bc, bc->idx, bc->free, 0xff); 637 if (bc->free == card->send.size) 638 bc->txstate |= TX_IDLE; 639 } 640 } 641 } 642 643 static void 644 send_tiger(struct tiger_hw *card, u8 irq_stat) 645 { 646 int i; 647 648 /* Note send is via the READ DMA channel */ 649 if ((irq_stat & card->last_is0) & NJ_IRQM0_RD_MASK) { 650 pr_info("%s: tiger warn write double dma %x/%x\n", 651 card->name, irq_stat, card->last_is0); 652 return; 653 } else { 654 card->last_is0 &= ~NJ_IRQM0_RD_MASK; 655 card->last_is0 |= (irq_stat & NJ_IRQM0_RD_MASK); 656 } 657 for (i = 0; i < 2; i++) { 658 if (test_bit(FLG_ACTIVE, &card->bc[i].bch.Flags)) 659 send_tiger_bc(card, &card->bc[i]); 660 } 661 } 662 663 static irqreturn_t 664 nj_irq(int intno, void *dev_id) 665 { 666 struct tiger_hw *card = dev_id; 667 u8 val, s1val, s0val; 668 669 spin_lock(&card->lock); 670 s0val = inb(card->base | NJ_IRQSTAT0); 671 s1val = inb(card->base | NJ_IRQSTAT1); 672 if ((s1val & NJ_ISACIRQ) && (s0val == 0)) { 673 /* shared IRQ */ 674 spin_unlock(&card->lock); 675 return IRQ_NONE; 676 } 677 pr_debug("%s: IRQSTAT0 %02x IRQSTAT1 %02x\n", card->name, s0val, s1val); 678 card->irqcnt++; 679 if (!(s1val & NJ_ISACIRQ)) { 680 val = ReadISAC_nj(card, ISAC_ISTA); 681 if (val) 682 mISDNisac_irq(&card->isac, val); 683 } 684 685 if (s0val) 686 /* write to clear */ 687 outb(s0val, card->base | NJ_IRQSTAT0); 688 else 689 goto end; 690 s1val = s0val; 691 /* set bits in sval to indicate which page is free */ 692 card->recv.dmacur = inl(card->base | NJ_DMA_WRITE_ADR); 693 card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2; 694 if (card->recv.dmacur < card->recv.dmairq) 695 s0val = 0x08; /* the 2nd write area is free */ 696 else 697 s0val = 0x04; /* the 1st write area is free */ 698 699 card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR); 700 card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2; 701 if (card->send.dmacur < card->send.dmairq) 702 s0val |= 0x02; /* the 2nd read area is free */ 703 else 704 s0val |= 0x01; /* the 1st read area is free */ 705 706 pr_debug("%s: DMA Status %02x/%02x/%02x %d/%d\n", card->name, 707 s1val, s0val, card->last_is0, 708 card->recv.idx, card->send.idx); 709 /* test if we have a DMA interrupt */ 710 if (s0val != card->last_is0) { 711 if ((s0val & NJ_IRQM0_RD_MASK) != 712 (card->last_is0 & NJ_IRQM0_RD_MASK)) 713 /* got a write dma int */ 714 send_tiger(card, s0val); 715 if ((s0val & NJ_IRQM0_WR_MASK) != 716 (card->last_is0 & NJ_IRQM0_WR_MASK)) 717 /* got a read dma int */ 718 recv_tiger(card, s0val); 719 } 720 end: 721 spin_unlock(&card->lock); 722 return IRQ_HANDLED; 723 } 724 725 static int 726 nj_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb) 727 { 728 int ret = -EINVAL; 729 struct bchannel *bch = container_of(ch, struct bchannel, ch); 730 struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch); 731 struct tiger_hw *card = bch->hw; 732 struct mISDNhead *hh = mISDN_HEAD_P(skb); 733 u32 id; 734 u_long flags; 735 736 switch (hh->prim) { 737 case PH_DATA_REQ: 738 spin_lock_irqsave(&card->lock, flags); 739 ret = bchannel_senddata(bch, skb); 740 if (ret > 0) { /* direct TX */ 741 id = hh->id; /* skb can be freed */ 742 fill_dma(bc); 743 ret = 0; 744 spin_unlock_irqrestore(&card->lock, flags); 745 if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) 746 queue_ch_frame(ch, PH_DATA_CNF, id, NULL); 747 } else 748 spin_unlock_irqrestore(&card->lock, flags); 749 return ret; 750 case PH_ACTIVATE_REQ: 751 spin_lock_irqsave(&card->lock, flags); 752 if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags)) 753 ret = mode_tiger(bc, ch->protocol); 754 else 755 ret = 0; 756 spin_unlock_irqrestore(&card->lock, flags); 757 if (!ret) 758 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, 759 NULL, GFP_KERNEL); 760 break; 761 case PH_DEACTIVATE_REQ: 762 spin_lock_irqsave(&card->lock, flags); 763 mISDN_clear_bchannel(bch); 764 mode_tiger(bc, ISDN_P_NONE); 765 spin_unlock_irqrestore(&card->lock, flags); 766 _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0, 767 NULL, GFP_KERNEL); 768 ret = 0; 769 break; 770 } 771 if (!ret) 772 dev_kfree_skb(skb); 773 return ret; 774 } 775 776 static int 777 channel_bctrl(struct tiger_ch *bc, struct mISDN_ctrl_req *cq) 778 { 779 int ret = 0; 780 struct tiger_hw *card = bc->bch.hw; 781 782 switch (cq->op) { 783 case MISDN_CTRL_GETOP: 784 cq->op = 0; 785 break; 786 /* Nothing implemented yet */ 787 case MISDN_CTRL_FILL_EMPTY: 788 default: 789 pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op); 790 ret = -EINVAL; 791 break; 792 } 793 return ret; 794 } 795 796 static int 797 nj_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg) 798 { 799 struct bchannel *bch = container_of(ch, struct bchannel, ch); 800 struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch); 801 struct tiger_hw *card = bch->hw; 802 int ret = -EINVAL; 803 u_long flags; 804 805 pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg); 806 switch (cmd) { 807 case CLOSE_CHANNEL: 808 test_and_clear_bit(FLG_OPEN, &bch->Flags); 809 if (test_bit(FLG_ACTIVE, &bch->Flags)) { 810 spin_lock_irqsave(&card->lock, flags); 811 mISDN_freebchannel(bch); 812 test_and_clear_bit(FLG_TX_BUSY, &bch->Flags); 813 test_and_clear_bit(FLG_ACTIVE, &bch->Flags); 814 mode_tiger(bc, ISDN_P_NONE); 815 spin_unlock_irqrestore(&card->lock, flags); 816 } 817 ch->protocol = ISDN_P_NONE; 818 ch->peer = NULL; 819 module_put(THIS_MODULE); 820 ret = 0; 821 break; 822 case CONTROL_CHANNEL: 823 ret = channel_bctrl(bc, arg); 824 break; 825 default: 826 pr_info("%s: %s unknown prim(%x)\n", card->name, __func__, cmd); 827 } 828 return ret; 829 } 830 831 static int 832 channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq) 833 { 834 int ret = 0; 835 836 switch (cq->op) { 837 case MISDN_CTRL_GETOP: 838 cq->op = MISDN_CTRL_LOOP; 839 break; 840 case MISDN_CTRL_LOOP: 841 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */ 842 if (cq->channel < 0 || cq->channel > 3) { 843 ret = -EINVAL; 844 break; 845 } 846 ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel); 847 break; 848 default: 849 pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op); 850 ret = -EINVAL; 851 break; 852 } 853 return ret; 854 } 855 856 static int 857 open_bchannel(struct tiger_hw *card, struct channel_req *rq) 858 { 859 struct bchannel *bch; 860 861 if (rq->adr.channel > 2) 862 return -EINVAL; 863 if (rq->protocol == ISDN_P_NONE) 864 return -EINVAL; 865 bch = &card->bc[rq->adr.channel - 1].bch; 866 if (test_and_set_bit(FLG_OPEN, &bch->Flags)) 867 return -EBUSY; /* b-channel can be only open once */ 868 test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags); 869 bch->ch.protocol = rq->protocol; 870 rq->ch = &bch->ch; 871 return 0; 872 } 873 874 /* 875 * device control function 876 */ 877 static int 878 nj_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg) 879 { 880 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D); 881 struct dchannel *dch = container_of(dev, struct dchannel, dev); 882 struct tiger_hw *card = dch->hw; 883 struct channel_req *rq; 884 int err = 0; 885 886 pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg); 887 switch (cmd) { 888 case OPEN_CHANNEL: 889 rq = arg; 890 if (rq->protocol == ISDN_P_TE_S0) 891 err = card->isac.open(&card->isac, rq); 892 else 893 err = open_bchannel(card, rq); 894 if (err) 895 break; 896 if (!try_module_get(THIS_MODULE)) 897 pr_info("%s: cannot get module\n", card->name); 898 break; 899 case CLOSE_CHANNEL: 900 pr_debug("%s: dev(%d) close from %p\n", card->name, dch->dev.id, 901 __builtin_return_address(0)); 902 module_put(THIS_MODULE); 903 break; 904 case CONTROL_CHANNEL: 905 err = channel_ctrl(card, arg); 906 break; 907 default: 908 pr_debug("%s: %s unknown command %x\n", 909 card->name, __func__, cmd); 910 return -EINVAL; 911 } 912 return err; 913 } 914 915 static int 916 nj_init_card(struct tiger_hw *card) 917 { 918 u_long flags; 919 int ret; 920 921 spin_lock_irqsave(&card->lock, flags); 922 nj_disable_hwirq(card); 923 spin_unlock_irqrestore(&card->lock, flags); 924 925 card->irq = card->pdev->irq; 926 if (request_irq(card->irq, nj_irq, IRQF_SHARED, card->name, card)) { 927 pr_info("%s: couldn't get interrupt %d\n", 928 card->name, card->irq); 929 card->irq = -1; 930 return -EIO; 931 } 932 933 spin_lock_irqsave(&card->lock, flags); 934 nj_reset(card); 935 ret = card->isac.init(&card->isac); 936 if (ret) 937 goto error; 938 ret = inittiger(card); 939 if (ret) 940 goto error; 941 mode_tiger(&card->bc[0], ISDN_P_NONE); 942 mode_tiger(&card->bc[1], ISDN_P_NONE); 943 error: 944 spin_unlock_irqrestore(&card->lock, flags); 945 return ret; 946 } 947 948 949 static void 950 nj_release(struct tiger_hw *card) 951 { 952 u_long flags; 953 int i; 954 955 if (card->base_s) { 956 spin_lock_irqsave(&card->lock, flags); 957 nj_disable_hwirq(card); 958 mode_tiger(&card->bc[0], ISDN_P_NONE); 959 mode_tiger(&card->bc[1], ISDN_P_NONE); 960 card->isac.release(&card->isac); 961 spin_unlock_irqrestore(&card->lock, flags); 962 release_region(card->base, card->base_s); 963 card->base_s = 0; 964 } 965 if (card->irq > 0) 966 free_irq(card->irq, card); 967 if (card->isac.dch.dev.dev.class) 968 mISDN_unregister_device(&card->isac.dch.dev); 969 970 for (i = 0; i < 2; i++) { 971 mISDN_freebchannel(&card->bc[i].bch); 972 kfree(card->bc[i].hsbuf); 973 kfree(card->bc[i].hrbuf); 974 } 975 if (card->dma_p) 976 pci_free_consistent(card->pdev, NJ_DMA_SIZE, 977 card->dma_p, card->dma); 978 write_lock_irqsave(&card_lock, flags); 979 list_del(&card->list); 980 write_unlock_irqrestore(&card_lock, flags); 981 pci_clear_master(card->pdev); 982 pci_disable_device(card->pdev); 983 pci_set_drvdata(card->pdev, NULL); 984 kfree(card); 985 } 986 987 988 static int 989 nj_setup(struct tiger_hw *card) 990 { 991 card->base = pci_resource_start(card->pdev, 0); 992 card->base_s = pci_resource_len(card->pdev, 0); 993 if (!request_region(card->base, card->base_s, card->name)) { 994 pr_info("%s: NETjet config port %#x-%#x already in use\n", 995 card->name, card->base, 996 (u32)(card->base + card->base_s - 1)); 997 card->base_s = 0; 998 return -EIO; 999 } 1000 ASSIGN_FUNC(nj, ISAC, card->isac); 1001 return 0; 1002 } 1003 1004 1005 static int __devinit 1006 setup_instance(struct tiger_hw *card) 1007 { 1008 int i, err; 1009 u_long flags; 1010 1011 snprintf(card->name, MISDN_MAX_IDLEN - 1, "netjet.%d", nj_cnt + 1); 1012 write_lock_irqsave(&card_lock, flags); 1013 list_add_tail(&card->list, &Cards); 1014 write_unlock_irqrestore(&card_lock, flags); 1015 1016 _set_debug(card); 1017 card->isac.name = card->name; 1018 spin_lock_init(&card->lock); 1019 card->isac.hwlock = &card->lock; 1020 mISDNisac_init(&card->isac, card); 1021 1022 card->isac.dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) | 1023 (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK)); 1024 card->isac.dch.dev.D.ctrl = nj_dctrl; 1025 for (i = 0; i < 2; i++) { 1026 card->bc[i].bch.nr = i + 1; 1027 set_channelmap(i + 1, card->isac.dch.dev.channelmap); 1028 mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM); 1029 card->bc[i].bch.hw = card; 1030 card->bc[i].bch.ch.send = nj_l2l1B; 1031 card->bc[i].bch.ch.ctrl = nj_bctrl; 1032 card->bc[i].bch.ch.nr = i + 1; 1033 list_add(&card->bc[i].bch.ch.list, 1034 &card->isac.dch.dev.bchannels); 1035 card->bc[i].bch.hw = card; 1036 } 1037 err = nj_setup(card); 1038 if (err) 1039 goto error; 1040 err = mISDN_register_device(&card->isac.dch.dev, &card->pdev->dev, 1041 card->name); 1042 if (err) 1043 goto error; 1044 err = nj_init_card(card); 1045 if (!err) { 1046 nj_cnt++; 1047 pr_notice("Netjet %d cards installed\n", nj_cnt); 1048 return 0; 1049 } 1050 error: 1051 nj_release(card); 1052 return err; 1053 } 1054 1055 static int __devinit 1056 nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1057 { 1058 int err = -ENOMEM; 1059 int cfg; 1060 struct tiger_hw *card; 1061 1062 if (pdev->subsystem_vendor == 0x8086 && 1063 pdev->subsystem_device == 0x0003) { 1064 pr_notice("Netjet: Digium X100P/X101P not handled\n"); 1065 return -ENODEV; 1066 } 1067 1068 if (pdev->subsystem_vendor == 0x55 && 1069 pdev->subsystem_device == 0x02) { 1070 pr_notice("Netjet: Enter!Now not handled yet\n"); 1071 return -ENODEV; 1072 } 1073 1074 card = kzalloc(sizeof(struct tiger_hw), GFP_ATOMIC); 1075 if (!card) { 1076 pr_info("No kmem for Netjet\n"); 1077 return err; 1078 } 1079 1080 card->pdev = pdev; 1081 1082 err = pci_enable_device(pdev); 1083 if (err) { 1084 kfree(card); 1085 return err; 1086 } 1087 1088 printk(KERN_INFO "nj_probe(mISDN): found adapter at %s\n", 1089 pci_name(pdev)); 1090 1091 pci_set_master(pdev); 1092 1093 /* the TJ300 and TJ320 must be detected, the IRQ handling is different 1094 * unfortunately the chips use the same device ID, but the TJ320 has 1095 * the bit20 in status PCI cfg register set 1096 */ 1097 pci_read_config_dword(pdev, 0x04, &cfg); 1098 if (cfg & 0x00100000) 1099 card->typ = NETJET_S_TJ320; 1100 else 1101 card->typ = NETJET_S_TJ300; 1102 1103 card->base = pci_resource_start(pdev, 0); 1104 card->irq = pdev->irq; 1105 pci_set_drvdata(pdev, card); 1106 err = setup_instance(card); 1107 if (err) 1108 pci_set_drvdata(pdev, NULL); 1109 1110 return err; 1111 } 1112 1113 1114 static void __devexit nj_remove(struct pci_dev *pdev) 1115 { 1116 struct tiger_hw *card = pci_get_drvdata(pdev); 1117 1118 if (card) 1119 nj_release(card); 1120 else 1121 pr_info("%s drvdata already removed\n", __func__); 1122 } 1123 1124 /* We cannot select cards with PCI_SUB... IDs, since here are cards with 1125 * SUB IDs set to PCI_ANY_ID, so we need to match all and reject 1126 * known other cards which not work with this driver - see probe function */ 1127 static struct pci_device_id nj_pci_ids[] __devinitdata = { 1128 { PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300, 1129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1130 { } 1131 }; 1132 MODULE_DEVICE_TABLE(pci, nj_pci_ids); 1133 1134 static struct pci_driver nj_driver = { 1135 .name = "netjet", 1136 .probe = nj_probe, 1137 .remove = __devexit_p(nj_remove), 1138 .id_table = nj_pci_ids, 1139 }; 1140 1141 static int __init nj_init(void) 1142 { 1143 int err; 1144 1145 pr_notice("Netjet PCI driver Rev. %s\n", NETJET_REV); 1146 err = pci_register_driver(&nj_driver); 1147 return err; 1148 } 1149 1150 static void __exit nj_cleanup(void) 1151 { 1152 pci_unregister_driver(&nj_driver); 1153 } 1154 1155 module_init(nj_init); 1156 module_exit(nj_cleanup); 1157