1 /* 2 A FORE Systems 200E-series driver for ATM on Linux. 3 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003. 4 5 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de). 6 7 This driver simultaneously supports PCA-200E and SBA-200E adapters 8 on i386, alpha (untested), powerpc, sparc and sparc64 architectures. 9 10 This program is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; either version 2 of the License, or 13 (at your option) any later version. 14 15 This program is distributed in the hope that it will be useful, 16 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 GNU General Public License for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with this program; if not, write to the Free Software 22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 */ 24 25 26 #include <linux/kernel.h> 27 #include <linux/slab.h> 28 #include <linux/init.h> 29 #include <linux/capability.h> 30 #include <linux/interrupt.h> 31 #include <linux/bitops.h> 32 #include <linux/pci.h> 33 #include <linux/module.h> 34 #include <linux/atmdev.h> 35 #include <linux/sonet.h> 36 #include <linux/atm_suni.h> 37 #include <linux/dma-mapping.h> 38 #include <linux/delay.h> 39 #include <asm/io.h> 40 #include <asm/string.h> 41 #include <asm/page.h> 42 #include <asm/irq.h> 43 #include <asm/dma.h> 44 #include <asm/byteorder.h> 45 #include <asm/uaccess.h> 46 #include <asm/atomic.h> 47 48 #ifdef CONFIG_ATM_FORE200E_SBA 49 #include <asm/idprom.h> 50 #include <asm/sbus.h> 51 #include <asm/openprom.h> 52 #include <asm/oplib.h> 53 #include <asm/pgtable.h> 54 #endif 55 56 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */ 57 #define FORE200E_USE_TASKLET 58 #endif 59 60 #if 0 /* enable the debugging code of the buffer supply queues */ 61 #define FORE200E_BSQ_DEBUG 62 #endif 63 64 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */ 65 #define FORE200E_52BYTE_AAL0_SDU 66 #endif 67 68 #include "fore200e.h" 69 #include "suni.h" 70 71 #define FORE200E_VERSION "0.3e" 72 73 #define FORE200E "fore200e: " 74 75 #if 0 /* override .config */ 76 #define CONFIG_ATM_FORE200E_DEBUG 1 77 #endif 78 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0) 79 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \ 80 printk(FORE200E format, ##args); } while (0) 81 #else 82 #define DPRINTK(level, format, args...) do {} while (0) 83 #endif 84 85 86 #define FORE200E_ALIGN(addr, alignment) \ 87 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr)) 88 89 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type)) 90 91 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ]) 92 93 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo)) 94 95 #if 1 96 #define ASSERT(expr) if (!(expr)) { \ 97 printk(FORE200E "assertion failed! %s[%d]: %s\n", \ 98 __FUNCTION__, __LINE__, #expr); \ 99 panic(FORE200E "%s", __FUNCTION__); \ 100 } 101 #else 102 #define ASSERT(expr) do {} while (0) 103 #endif 104 105 106 static const struct atmdev_ops fore200e_ops; 107 static const struct fore200e_bus fore200e_bus[]; 108 109 static LIST_HEAD(fore200e_boards); 110 111 112 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen"); 113 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION); 114 MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E"); 115 116 117 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { 118 { BUFFER_S1_NBR, BUFFER_L1_NBR }, 119 { BUFFER_S2_NBR, BUFFER_L2_NBR } 120 }; 121 122 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { 123 { BUFFER_S1_SIZE, BUFFER_L1_SIZE }, 124 { BUFFER_S2_SIZE, BUFFER_L2_SIZE } 125 }; 126 127 128 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0) 129 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" }; 130 #endif 131 132 133 #if 0 /* currently unused */ 134 static int 135 fore200e_fore2atm_aal(enum fore200e_aal aal) 136 { 137 switch(aal) { 138 case FORE200E_AAL0: return ATM_AAL0; 139 case FORE200E_AAL34: return ATM_AAL34; 140 case FORE200E_AAL5: return ATM_AAL5; 141 } 142 143 return -EINVAL; 144 } 145 #endif 146 147 148 static enum fore200e_aal 149 fore200e_atm2fore_aal(int aal) 150 { 151 switch(aal) { 152 case ATM_AAL0: return FORE200E_AAL0; 153 case ATM_AAL34: return FORE200E_AAL34; 154 case ATM_AAL1: 155 case ATM_AAL2: 156 case ATM_AAL5: return FORE200E_AAL5; 157 } 158 159 return -EINVAL; 160 } 161 162 163 static char* 164 fore200e_irq_itoa(int irq) 165 { 166 static char str[8]; 167 sprintf(str, "%d", irq); 168 return str; 169 } 170 171 172 /* allocate and align a chunk of memory intended to hold the data behing exchanged 173 between the driver and the adapter (using streaming DVMA) */ 174 175 static int 176 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction) 177 { 178 unsigned long offset = 0; 179 180 if (alignment <= sizeof(int)) 181 alignment = 0; 182 183 chunk->alloc_size = size + alignment; 184 chunk->align_size = size; 185 chunk->direction = direction; 186 187 chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA); 188 if (chunk->alloc_addr == NULL) 189 return -ENOMEM; 190 191 if (alignment > 0) 192 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment); 193 194 chunk->align_addr = chunk->alloc_addr + offset; 195 196 chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction); 197 198 return 0; 199 } 200 201 202 /* free a chunk of memory */ 203 204 static void 205 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 206 { 207 fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction); 208 209 kfree(chunk->alloc_addr); 210 } 211 212 213 static void 214 fore200e_spin(int msecs) 215 { 216 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 217 while (time_before(jiffies, timeout)); 218 } 219 220 221 static int 222 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs) 223 { 224 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 225 int ok; 226 227 mb(); 228 do { 229 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR)) 230 break; 231 232 } while (time_before(jiffies, timeout)); 233 234 #if 1 235 if (!ok) { 236 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n", 237 *addr, val); 238 } 239 #endif 240 241 return ok; 242 } 243 244 245 static int 246 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs) 247 { 248 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 249 int ok; 250 251 do { 252 if ((ok = (fore200e->bus->read(addr) == val))) 253 break; 254 255 } while (time_before(jiffies, timeout)); 256 257 #if 1 258 if (!ok) { 259 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n", 260 fore200e->bus->read(addr), val); 261 } 262 #endif 263 264 return ok; 265 } 266 267 268 static void 269 fore200e_free_rx_buf(struct fore200e* fore200e) 270 { 271 int scheme, magn, nbr; 272 struct buffer* buffer; 273 274 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 275 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 276 277 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) { 278 279 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) { 280 281 struct chunk* data = &buffer[ nbr ].data; 282 283 if (data->alloc_addr != NULL) 284 fore200e_chunk_free(fore200e, data); 285 } 286 } 287 } 288 } 289 } 290 291 292 static void 293 fore200e_uninit_bs_queue(struct fore200e* fore200e) 294 { 295 int scheme, magn; 296 297 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 298 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 299 300 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status; 301 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block; 302 303 if (status->alloc_addr) 304 fore200e->bus->dma_chunk_free(fore200e, status); 305 306 if (rbd_block->alloc_addr) 307 fore200e->bus->dma_chunk_free(fore200e, rbd_block); 308 } 309 } 310 } 311 312 313 static int 314 fore200e_reset(struct fore200e* fore200e, int diag) 315 { 316 int ok; 317 318 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET; 319 320 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat); 321 322 fore200e->bus->reset(fore200e); 323 324 if (diag) { 325 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000); 326 if (ok == 0) { 327 328 printk(FORE200E "device %s self-test failed\n", fore200e->name); 329 return -ENODEV; 330 } 331 332 printk(FORE200E "device %s self-test passed\n", fore200e->name); 333 334 fore200e->state = FORE200E_STATE_RESET; 335 } 336 337 return 0; 338 } 339 340 341 static void 342 fore200e_shutdown(struct fore200e* fore200e) 343 { 344 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n", 345 fore200e->name, fore200e->phys_base, 346 fore200e_irq_itoa(fore200e->irq)); 347 348 if (fore200e->state > FORE200E_STATE_RESET) { 349 /* first, reset the board to prevent further interrupts or data transfers */ 350 fore200e_reset(fore200e, 0); 351 } 352 353 /* then, release all allocated resources */ 354 switch(fore200e->state) { 355 356 case FORE200E_STATE_COMPLETE: 357 kfree(fore200e->stats); 358 359 case FORE200E_STATE_IRQ: 360 free_irq(fore200e->irq, fore200e->atm_dev); 361 362 case FORE200E_STATE_ALLOC_BUF: 363 fore200e_free_rx_buf(fore200e); 364 365 case FORE200E_STATE_INIT_BSQ: 366 fore200e_uninit_bs_queue(fore200e); 367 368 case FORE200E_STATE_INIT_RXQ: 369 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status); 370 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd); 371 372 case FORE200E_STATE_INIT_TXQ: 373 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status); 374 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd); 375 376 case FORE200E_STATE_INIT_CMDQ: 377 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status); 378 379 case FORE200E_STATE_INITIALIZE: 380 /* nothing to do for that state */ 381 382 case FORE200E_STATE_START_FW: 383 /* nothing to do for that state */ 384 385 case FORE200E_STATE_LOAD_FW: 386 /* nothing to do for that state */ 387 388 case FORE200E_STATE_RESET: 389 /* nothing to do for that state */ 390 391 case FORE200E_STATE_MAP: 392 fore200e->bus->unmap(fore200e); 393 394 case FORE200E_STATE_CONFIGURE: 395 /* nothing to do for that state */ 396 397 case FORE200E_STATE_REGISTER: 398 /* XXX shouldn't we *start* by deregistering the device? */ 399 atm_dev_deregister(fore200e->atm_dev); 400 401 case FORE200E_STATE_BLANK: 402 /* nothing to do for that state */ 403 break; 404 } 405 } 406 407 408 #ifdef CONFIG_ATM_FORE200E_PCA 409 410 static u32 fore200e_pca_read(volatile u32 __iomem *addr) 411 { 412 /* on big-endian hosts, the board is configured to convert 413 the endianess of slave RAM accesses */ 414 return le32_to_cpu(readl(addr)); 415 } 416 417 418 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr) 419 { 420 /* on big-endian hosts, the board is configured to convert 421 the endianess of slave RAM accesses */ 422 writel(cpu_to_le32(val), addr); 423 } 424 425 426 static u32 427 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction) 428 { 429 u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction); 430 431 DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n", 432 virt_addr, size, direction, dma_addr); 433 434 return dma_addr; 435 } 436 437 438 static void 439 fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 440 { 441 DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n", 442 dma_addr, size, direction); 443 444 pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction); 445 } 446 447 448 static void 449 fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 450 { 451 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); 452 453 pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction); 454 } 455 456 static void 457 fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 458 { 459 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); 460 461 pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction); 462 } 463 464 465 /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism 466 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */ 467 468 static int 469 fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, 470 int size, int nbr, int alignment) 471 { 472 /* returned chunks are page-aligned */ 473 chunk->alloc_size = size * nbr; 474 chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev, 475 chunk->alloc_size, 476 &chunk->dma_addr); 477 478 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0)) 479 return -ENOMEM; 480 481 chunk->align_addr = chunk->alloc_addr; 482 483 return 0; 484 } 485 486 487 /* free a DMA consistent chunk of memory */ 488 489 static void 490 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 491 { 492 pci_free_consistent((struct pci_dev*)fore200e->bus_dev, 493 chunk->alloc_size, 494 chunk->alloc_addr, 495 chunk->dma_addr); 496 } 497 498 499 static int 500 fore200e_pca_irq_check(struct fore200e* fore200e) 501 { 502 /* this is a 1 bit register */ 503 int irq_posted = readl(fore200e->regs.pca.psr); 504 505 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2) 506 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) { 507 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number); 508 } 509 #endif 510 511 return irq_posted; 512 } 513 514 515 static void 516 fore200e_pca_irq_ack(struct fore200e* fore200e) 517 { 518 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr); 519 } 520 521 522 static void 523 fore200e_pca_reset(struct fore200e* fore200e) 524 { 525 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr); 526 fore200e_spin(10); 527 writel(0, fore200e->regs.pca.hcr); 528 } 529 530 531 static int __devinit 532 fore200e_pca_map(struct fore200e* fore200e) 533 { 534 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name); 535 536 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH); 537 538 if (fore200e->virt_base == NULL) { 539 printk(FORE200E "can't map device %s\n", fore200e->name); 540 return -EFAULT; 541 } 542 543 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base); 544 545 /* gain access to the PCA specific registers */ 546 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET; 547 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET; 548 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET; 549 550 fore200e->state = FORE200E_STATE_MAP; 551 return 0; 552 } 553 554 555 static void 556 fore200e_pca_unmap(struct fore200e* fore200e) 557 { 558 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name); 559 560 if (fore200e->virt_base != NULL) 561 iounmap(fore200e->virt_base); 562 } 563 564 565 static int __devinit 566 fore200e_pca_configure(struct fore200e* fore200e) 567 { 568 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev; 569 u8 master_ctrl, latency; 570 571 DPRINTK(2, "device %s being configured\n", fore200e->name); 572 573 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) { 574 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n"); 575 return -EIO; 576 } 577 578 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl); 579 580 master_ctrl = master_ctrl 581 #if defined(__BIG_ENDIAN) 582 /* request the PCA board to convert the endianess of slave RAM accesses */ 583 | PCA200E_CTRL_CONVERT_ENDIAN 584 #endif 585 #if 0 586 | PCA200E_CTRL_DIS_CACHE_RD 587 | PCA200E_CTRL_DIS_WRT_INVAL 588 | PCA200E_CTRL_ENA_CONT_REQ_MODE 589 | PCA200E_CTRL_2_CACHE_WRT_INVAL 590 #endif 591 | PCA200E_CTRL_LARGE_PCI_BURSTS; 592 593 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl); 594 595 /* raise latency from 32 (default) to 192, as this seems to prevent NIC 596 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition. 597 this may impact the performances of other PCI devices on the same bus, though */ 598 latency = 192; 599 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency); 600 601 fore200e->state = FORE200E_STATE_CONFIGURE; 602 return 0; 603 } 604 605 606 static int __init 607 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom) 608 { 609 struct host_cmdq* cmdq = &fore200e->host_cmdq; 610 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 611 struct prom_opcode opcode; 612 int ok; 613 u32 prom_dma; 614 615 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 616 617 opcode.opcode = OPCODE_GET_PROM; 618 opcode.pad = 0; 619 620 prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE); 621 622 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr); 623 624 *entry->status = STATUS_PENDING; 625 626 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode); 627 628 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 629 630 *entry->status = STATUS_FREE; 631 632 fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE); 633 634 if (ok == 0) { 635 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name); 636 return -EIO; 637 } 638 639 #if defined(__BIG_ENDIAN) 640 641 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) )) 642 643 /* MAC address is stored as little-endian */ 644 swap_here(&prom->mac_addr[0]); 645 swap_here(&prom->mac_addr[4]); 646 #endif 647 648 return 0; 649 } 650 651 652 static int 653 fore200e_pca_proc_read(struct fore200e* fore200e, char *page) 654 { 655 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev; 656 657 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n", 658 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); 659 } 660 661 #endif /* CONFIG_ATM_FORE200E_PCA */ 662 663 664 #ifdef CONFIG_ATM_FORE200E_SBA 665 666 static u32 667 fore200e_sba_read(volatile u32 __iomem *addr) 668 { 669 return sbus_readl(addr); 670 } 671 672 673 static void 674 fore200e_sba_write(u32 val, volatile u32 __iomem *addr) 675 { 676 sbus_writel(val, addr); 677 } 678 679 680 static u32 681 fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction) 682 { 683 u32 dma_addr = sbus_map_single((struct sbus_dev*)fore200e->bus_dev, virt_addr, size, direction); 684 685 DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n", 686 virt_addr, size, direction, dma_addr); 687 688 return dma_addr; 689 } 690 691 692 static void 693 fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 694 { 695 DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n", 696 dma_addr, size, direction); 697 698 sbus_unmap_single((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction); 699 } 700 701 702 static void 703 fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 704 { 705 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); 706 707 sbus_dma_sync_single_for_cpu((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction); 708 } 709 710 static void 711 fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 712 { 713 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); 714 715 sbus_dma_sync_single_for_device((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction); 716 } 717 718 719 /* allocate a DVMA consistent chunk of memory intended to act as a communication mechanism 720 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */ 721 722 static int 723 fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, 724 int size, int nbr, int alignment) 725 { 726 chunk->alloc_size = chunk->align_size = size * nbr; 727 728 /* returned chunks are page-aligned */ 729 chunk->alloc_addr = sbus_alloc_consistent((struct sbus_dev*)fore200e->bus_dev, 730 chunk->alloc_size, 731 &chunk->dma_addr); 732 733 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0)) 734 return -ENOMEM; 735 736 chunk->align_addr = chunk->alloc_addr; 737 738 return 0; 739 } 740 741 742 /* free a DVMA consistent chunk of memory */ 743 744 static void 745 fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 746 { 747 sbus_free_consistent((struct sbus_dev*)fore200e->bus_dev, 748 chunk->alloc_size, 749 chunk->alloc_addr, 750 chunk->dma_addr); 751 } 752 753 754 static void 755 fore200e_sba_irq_enable(struct fore200e* fore200e) 756 { 757 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY; 758 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr); 759 } 760 761 762 static int 763 fore200e_sba_irq_check(struct fore200e* fore200e) 764 { 765 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ; 766 } 767 768 769 static void 770 fore200e_sba_irq_ack(struct fore200e* fore200e) 771 { 772 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY; 773 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr); 774 } 775 776 777 static void 778 fore200e_sba_reset(struct fore200e* fore200e) 779 { 780 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr); 781 fore200e_spin(10); 782 fore200e->bus->write(0, fore200e->regs.sba.hcr); 783 } 784 785 786 static int __init 787 fore200e_sba_map(struct fore200e* fore200e) 788 { 789 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev; 790 unsigned int bursts; 791 792 /* gain access to the SBA specific registers */ 793 fore200e->regs.sba.hcr = sbus_ioremap(&sbus_dev->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR"); 794 fore200e->regs.sba.bsr = sbus_ioremap(&sbus_dev->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR"); 795 fore200e->regs.sba.isr = sbus_ioremap(&sbus_dev->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR"); 796 fore200e->virt_base = sbus_ioremap(&sbus_dev->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM"); 797 798 if (fore200e->virt_base == NULL) { 799 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name); 800 return -EFAULT; 801 } 802 803 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base); 804 805 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */ 806 807 /* get the supported DVMA burst sizes */ 808 bursts = prom_getintdefault(sbus_dev->bus->prom_node, "burst-sizes", 0x00); 809 810 if (sbus_can_dma_64bit(sbus_dev)) 811 sbus_set_sbus64(sbus_dev, bursts); 812 813 fore200e->state = FORE200E_STATE_MAP; 814 return 0; 815 } 816 817 818 static void 819 fore200e_sba_unmap(struct fore200e* fore200e) 820 { 821 sbus_iounmap(fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH); 822 sbus_iounmap(fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH); 823 sbus_iounmap(fore200e->regs.sba.isr, SBA200E_ISR_LENGTH); 824 sbus_iounmap(fore200e->virt_base, SBA200E_RAM_LENGTH); 825 } 826 827 828 static int __init 829 fore200e_sba_configure(struct fore200e* fore200e) 830 { 831 fore200e->state = FORE200E_STATE_CONFIGURE; 832 return 0; 833 } 834 835 836 static struct fore200e* __init 837 fore200e_sba_detect(const struct fore200e_bus* bus, int index) 838 { 839 struct fore200e* fore200e; 840 struct sbus_bus* sbus_bus; 841 struct sbus_dev* sbus_dev = NULL; 842 843 unsigned int count = 0; 844 845 for_each_sbus (sbus_bus) { 846 for_each_sbusdev (sbus_dev, sbus_bus) { 847 if (strcmp(sbus_dev->prom_name, SBA200E_PROM_NAME) == 0) { 848 if (count >= index) 849 goto found; 850 count++; 851 } 852 } 853 } 854 return NULL; 855 856 found: 857 if (sbus_dev->num_registers != 4) { 858 printk(FORE200E "this %s device has %d instead of 4 registers\n", 859 bus->model_name, sbus_dev->num_registers); 860 return NULL; 861 } 862 863 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); 864 if (fore200e == NULL) 865 return NULL; 866 867 fore200e->bus = bus; 868 fore200e->bus_dev = sbus_dev; 869 fore200e->irq = sbus_dev->irqs[ 0 ]; 870 871 fore200e->phys_base = (unsigned long)sbus_dev; 872 873 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1); 874 875 return fore200e; 876 } 877 878 879 static int __init 880 fore200e_sba_prom_read(struct fore200e* fore200e, struct prom_data* prom) 881 { 882 struct sbus_dev* sbus_dev = (struct sbus_dev*) fore200e->bus_dev; 883 int len; 884 885 len = prom_getproperty(sbus_dev->prom_node, "macaddrlo2", &prom->mac_addr[ 4 ], 4); 886 if (len < 0) 887 return -EBUSY; 888 889 len = prom_getproperty(sbus_dev->prom_node, "macaddrhi4", &prom->mac_addr[ 2 ], 4); 890 if (len < 0) 891 return -EBUSY; 892 893 prom_getproperty(sbus_dev->prom_node, "serialnumber", 894 (char*)&prom->serial_number, sizeof(prom->serial_number)); 895 896 prom_getproperty(sbus_dev->prom_node, "promversion", 897 (char*)&prom->hw_revision, sizeof(prom->hw_revision)); 898 899 return 0; 900 } 901 902 903 static int 904 fore200e_sba_proc_read(struct fore200e* fore200e, char *page) 905 { 906 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev; 907 908 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", sbus_dev->slot, sbus_dev->prom_name); 909 } 910 #endif /* CONFIG_ATM_FORE200E_SBA */ 911 912 913 static void 914 fore200e_tx_irq(struct fore200e* fore200e) 915 { 916 struct host_txq* txq = &fore200e->host_txq; 917 struct host_txq_entry* entry; 918 struct atm_vcc* vcc; 919 struct fore200e_vc_map* vc_map; 920 921 if (fore200e->host_txq.txing == 0) 922 return; 923 924 for (;;) { 925 926 entry = &txq->host_entry[ txq->tail ]; 927 928 if ((*entry->status & STATUS_COMPLETE) == 0) { 929 break; 930 } 931 932 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n", 933 entry, txq->tail, entry->vc_map, entry->skb); 934 935 /* free copy of misaligned data */ 936 kfree(entry->data); 937 938 /* remove DMA mapping */ 939 fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length, 940 DMA_TO_DEVICE); 941 942 vc_map = entry->vc_map; 943 944 /* vcc closed since the time the entry was submitted for tx? */ 945 if ((vc_map->vcc == NULL) || 946 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) { 947 948 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n", 949 fore200e->atm_dev->number); 950 951 dev_kfree_skb_any(entry->skb); 952 } 953 else { 954 ASSERT(vc_map->vcc); 955 956 /* vcc closed then immediately re-opened? */ 957 if (vc_map->incarn != entry->incarn) { 958 959 /* when a vcc is closed, some PDUs may be still pending in the tx queue. 960 if the same vcc is immediately re-opened, those pending PDUs must 961 not be popped after the completion of their emission, as they refer 962 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc 963 would be decremented by the size of the (unrelated) skb, possibly 964 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc. 965 we thus bind the tx entry to the current incarnation of the vcc 966 when the entry is submitted for tx. When the tx later completes, 967 if the incarnation number of the tx entry does not match the one 968 of the vcc, then this implies that the vcc has been closed then re-opened. 969 we thus just drop the skb here. */ 970 971 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n", 972 fore200e->atm_dev->number); 973 974 dev_kfree_skb_any(entry->skb); 975 } 976 else { 977 vcc = vc_map->vcc; 978 ASSERT(vcc); 979 980 /* notify tx completion */ 981 if (vcc->pop) { 982 vcc->pop(vcc, entry->skb); 983 } 984 else { 985 dev_kfree_skb_any(entry->skb); 986 } 987 #if 1 988 /* race fixed by the above incarnation mechanism, but... */ 989 if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) { 990 atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0); 991 } 992 #endif 993 /* check error condition */ 994 if (*entry->status & STATUS_ERROR) 995 atomic_inc(&vcc->stats->tx_err); 996 else 997 atomic_inc(&vcc->stats->tx); 998 } 999 } 1000 1001 *entry->status = STATUS_FREE; 1002 1003 fore200e->host_txq.txing--; 1004 1005 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX); 1006 } 1007 } 1008 1009 1010 #ifdef FORE200E_BSQ_DEBUG 1011 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn) 1012 { 1013 struct buffer* buffer; 1014 int count = 0; 1015 1016 buffer = bsq->freebuf; 1017 while (buffer) { 1018 1019 if (buffer->supplied) { 1020 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n", 1021 where, scheme, magn, buffer->index); 1022 } 1023 1024 if (buffer->magn != magn) { 1025 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n", 1026 where, scheme, magn, buffer->index, buffer->magn); 1027 } 1028 1029 if (buffer->scheme != scheme) { 1030 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n", 1031 where, scheme, magn, buffer->index, buffer->scheme); 1032 } 1033 1034 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) { 1035 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n", 1036 where, scheme, magn, buffer->index); 1037 } 1038 1039 count++; 1040 buffer = buffer->next; 1041 } 1042 1043 if (count != bsq->freebuf_count) { 1044 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n", 1045 where, scheme, magn, count, bsq->freebuf_count); 1046 } 1047 return 0; 1048 } 1049 #endif 1050 1051 1052 static void 1053 fore200e_supply(struct fore200e* fore200e) 1054 { 1055 int scheme, magn, i; 1056 1057 struct host_bsq* bsq; 1058 struct host_bsq_entry* entry; 1059 struct buffer* buffer; 1060 1061 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 1062 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 1063 1064 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 1065 1066 #ifdef FORE200E_BSQ_DEBUG 1067 bsq_audit(1, bsq, scheme, magn); 1068 #endif 1069 while (bsq->freebuf_count >= RBD_BLK_SIZE) { 1070 1071 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n", 1072 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count); 1073 1074 entry = &bsq->host_entry[ bsq->head ]; 1075 1076 for (i = 0; i < RBD_BLK_SIZE; i++) { 1077 1078 /* take the first buffer in the free buffer list */ 1079 buffer = bsq->freebuf; 1080 if (!buffer) { 1081 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n", 1082 scheme, magn, bsq->freebuf_count); 1083 return; 1084 } 1085 bsq->freebuf = buffer->next; 1086 1087 #ifdef FORE200E_BSQ_DEBUG 1088 if (buffer->supplied) 1089 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n", 1090 scheme, magn, buffer->index); 1091 buffer->supplied = 1; 1092 #endif 1093 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr; 1094 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer); 1095 } 1096 1097 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS); 1098 1099 /* decrease accordingly the number of free rx buffers */ 1100 bsq->freebuf_count -= RBD_BLK_SIZE; 1101 1102 *entry->status = STATUS_PENDING; 1103 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr); 1104 } 1105 } 1106 } 1107 } 1108 1109 1110 static int 1111 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd) 1112 { 1113 struct sk_buff* skb; 1114 struct buffer* buffer; 1115 struct fore200e_vcc* fore200e_vcc; 1116 int i, pdu_len = 0; 1117 #ifdef FORE200E_52BYTE_AAL0_SDU 1118 u32 cell_header = 0; 1119 #endif 1120 1121 ASSERT(vcc); 1122 1123 fore200e_vcc = FORE200E_VCC(vcc); 1124 ASSERT(fore200e_vcc); 1125 1126 #ifdef FORE200E_52BYTE_AAL0_SDU 1127 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) { 1128 1129 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) | 1130 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) | 1131 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) | 1132 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) | 1133 rpd->atm_header.clp; 1134 pdu_len = 4; 1135 } 1136 #endif 1137 1138 /* compute total PDU length */ 1139 for (i = 0; i < rpd->nseg; i++) 1140 pdu_len += rpd->rsd[ i ].length; 1141 1142 skb = alloc_skb(pdu_len, GFP_ATOMIC); 1143 if (skb == NULL) { 1144 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len); 1145 1146 atomic_inc(&vcc->stats->rx_drop); 1147 return -ENOMEM; 1148 } 1149 1150 __net_timestamp(skb); 1151 1152 #ifdef FORE200E_52BYTE_AAL0_SDU 1153 if (cell_header) { 1154 *((u32*)skb_put(skb, 4)) = cell_header; 1155 } 1156 #endif 1157 1158 /* reassemble segments */ 1159 for (i = 0; i < rpd->nseg; i++) { 1160 1161 /* rebuild rx buffer address from rsd handle */ 1162 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle); 1163 1164 /* Make device DMA transfer visible to CPU. */ 1165 fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE); 1166 1167 memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length); 1168 1169 /* Now let the device get at it again. */ 1170 fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE); 1171 } 1172 1173 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize); 1174 1175 if (pdu_len < fore200e_vcc->rx_min_pdu) 1176 fore200e_vcc->rx_min_pdu = pdu_len; 1177 if (pdu_len > fore200e_vcc->rx_max_pdu) 1178 fore200e_vcc->rx_max_pdu = pdu_len; 1179 fore200e_vcc->rx_pdu++; 1180 1181 /* push PDU */ 1182 if (atm_charge(vcc, skb->truesize) == 0) { 1183 1184 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n", 1185 vcc->itf, vcc->vpi, vcc->vci); 1186 1187 dev_kfree_skb_any(skb); 1188 1189 atomic_inc(&vcc->stats->rx_drop); 1190 return -ENOMEM; 1191 } 1192 1193 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); 1194 1195 vcc->push(vcc, skb); 1196 atomic_inc(&vcc->stats->rx); 1197 1198 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); 1199 1200 return 0; 1201 } 1202 1203 1204 static void 1205 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd) 1206 { 1207 struct host_bsq* bsq; 1208 struct buffer* buffer; 1209 int i; 1210 1211 for (i = 0; i < rpd->nseg; i++) { 1212 1213 /* rebuild rx buffer address from rsd handle */ 1214 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle); 1215 1216 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ]; 1217 1218 #ifdef FORE200E_BSQ_DEBUG 1219 bsq_audit(2, bsq, buffer->scheme, buffer->magn); 1220 1221 if (buffer->supplied == 0) 1222 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n", 1223 buffer->scheme, buffer->magn, buffer->index); 1224 buffer->supplied = 0; 1225 #endif 1226 1227 /* re-insert the buffer into the free buffer list */ 1228 buffer->next = bsq->freebuf; 1229 bsq->freebuf = buffer; 1230 1231 /* then increment the number of free rx buffers */ 1232 bsq->freebuf_count++; 1233 } 1234 } 1235 1236 1237 static void 1238 fore200e_rx_irq(struct fore200e* fore200e) 1239 { 1240 struct host_rxq* rxq = &fore200e->host_rxq; 1241 struct host_rxq_entry* entry; 1242 struct atm_vcc* vcc; 1243 struct fore200e_vc_map* vc_map; 1244 1245 for (;;) { 1246 1247 entry = &rxq->host_entry[ rxq->head ]; 1248 1249 /* no more received PDUs */ 1250 if ((*entry->status & STATUS_COMPLETE) == 0) 1251 break; 1252 1253 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1254 1255 if ((vc_map->vcc == NULL) || 1256 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) { 1257 1258 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n", 1259 fore200e->atm_dev->number, 1260 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1261 } 1262 else { 1263 vcc = vc_map->vcc; 1264 ASSERT(vcc); 1265 1266 if ((*entry->status & STATUS_ERROR) == 0) { 1267 1268 fore200e_push_rpd(fore200e, vcc, entry->rpd); 1269 } 1270 else { 1271 DPRINTK(2, "damaged PDU on %d.%d.%d\n", 1272 fore200e->atm_dev->number, 1273 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1274 atomic_inc(&vcc->stats->rx_err); 1275 } 1276 } 1277 1278 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX); 1279 1280 fore200e_collect_rpd(fore200e, entry->rpd); 1281 1282 /* rewrite the rpd address to ack the received PDU */ 1283 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr); 1284 *entry->status = STATUS_FREE; 1285 1286 fore200e_supply(fore200e); 1287 } 1288 } 1289 1290 1291 #ifndef FORE200E_USE_TASKLET 1292 static void 1293 fore200e_irq(struct fore200e* fore200e) 1294 { 1295 unsigned long flags; 1296 1297 spin_lock_irqsave(&fore200e->q_lock, flags); 1298 fore200e_rx_irq(fore200e); 1299 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1300 1301 spin_lock_irqsave(&fore200e->q_lock, flags); 1302 fore200e_tx_irq(fore200e); 1303 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1304 } 1305 #endif 1306 1307 1308 static irqreturn_t 1309 fore200e_interrupt(int irq, void* dev) 1310 { 1311 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev); 1312 1313 if (fore200e->bus->irq_check(fore200e) == 0) { 1314 1315 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number); 1316 return IRQ_NONE; 1317 } 1318 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number); 1319 1320 #ifdef FORE200E_USE_TASKLET 1321 tasklet_schedule(&fore200e->tx_tasklet); 1322 tasklet_schedule(&fore200e->rx_tasklet); 1323 #else 1324 fore200e_irq(fore200e); 1325 #endif 1326 1327 fore200e->bus->irq_ack(fore200e); 1328 return IRQ_HANDLED; 1329 } 1330 1331 1332 #ifdef FORE200E_USE_TASKLET 1333 static void 1334 fore200e_tx_tasklet(unsigned long data) 1335 { 1336 struct fore200e* fore200e = (struct fore200e*) data; 1337 unsigned long flags; 1338 1339 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number); 1340 1341 spin_lock_irqsave(&fore200e->q_lock, flags); 1342 fore200e_tx_irq(fore200e); 1343 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1344 } 1345 1346 1347 static void 1348 fore200e_rx_tasklet(unsigned long data) 1349 { 1350 struct fore200e* fore200e = (struct fore200e*) data; 1351 unsigned long flags; 1352 1353 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number); 1354 1355 spin_lock_irqsave(&fore200e->q_lock, flags); 1356 fore200e_rx_irq((struct fore200e*) data); 1357 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1358 } 1359 #endif 1360 1361 1362 static int 1363 fore200e_select_scheme(struct atm_vcc* vcc) 1364 { 1365 /* fairly balance the VCs over (identical) buffer schemes */ 1366 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO; 1367 1368 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n", 1369 vcc->itf, vcc->vpi, vcc->vci, scheme); 1370 1371 return scheme; 1372 } 1373 1374 1375 static int 1376 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu) 1377 { 1378 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1379 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1380 struct activate_opcode activ_opcode; 1381 struct deactivate_opcode deactiv_opcode; 1382 struct vpvc vpvc; 1383 int ok; 1384 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal); 1385 1386 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1387 1388 if (activate) { 1389 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc); 1390 1391 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN; 1392 activ_opcode.aal = aal; 1393 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme; 1394 activ_opcode.pad = 0; 1395 } 1396 else { 1397 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN; 1398 deactiv_opcode.pad = 0; 1399 } 1400 1401 vpvc.vci = vcc->vci; 1402 vpvc.vpi = vcc->vpi; 1403 1404 *entry->status = STATUS_PENDING; 1405 1406 if (activate) { 1407 1408 #ifdef FORE200E_52BYTE_AAL0_SDU 1409 mtu = 48; 1410 #endif 1411 /* the MTU is not used by the cp, except in the case of AAL0 */ 1412 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu); 1413 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc); 1414 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode); 1415 } 1416 else { 1417 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc); 1418 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode); 1419 } 1420 1421 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1422 1423 *entry->status = STATUS_FREE; 1424 1425 if (ok == 0) { 1426 printk(FORE200E "unable to %s VC %d.%d.%d\n", 1427 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci); 1428 return -EIO; 1429 } 1430 1431 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci, 1432 activate ? "open" : "clos"); 1433 1434 return 0; 1435 } 1436 1437 1438 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */ 1439 1440 static void 1441 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate) 1442 { 1443 if (qos->txtp.max_pcr < ATM_OC3_PCR) { 1444 1445 /* compute the data cells to idle cells ratio from the tx PCR */ 1446 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR; 1447 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells; 1448 } 1449 else { 1450 /* disable rate control */ 1451 rate->data_cells = rate->idle_cells = 0; 1452 } 1453 } 1454 1455 1456 static int 1457 fore200e_open(struct atm_vcc *vcc) 1458 { 1459 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1460 struct fore200e_vcc* fore200e_vcc; 1461 struct fore200e_vc_map* vc_map; 1462 unsigned long flags; 1463 int vci = vcc->vci; 1464 short vpi = vcc->vpi; 1465 1466 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS)); 1467 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS)); 1468 1469 spin_lock_irqsave(&fore200e->q_lock, flags); 1470 1471 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci); 1472 if (vc_map->vcc) { 1473 1474 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1475 1476 printk(FORE200E "VC %d.%d.%d already in use\n", 1477 fore200e->atm_dev->number, vpi, vci); 1478 1479 return -EINVAL; 1480 } 1481 1482 vc_map->vcc = vcc; 1483 1484 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1485 1486 fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC); 1487 if (fore200e_vcc == NULL) { 1488 vc_map->vcc = NULL; 1489 return -ENOMEM; 1490 } 1491 1492 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; " 1493 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n", 1494 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1495 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ], 1496 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu, 1497 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ], 1498 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu); 1499 1500 /* pseudo-CBR bandwidth requested? */ 1501 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1502 1503 mutex_lock(&fore200e->rate_mtx); 1504 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) { 1505 mutex_unlock(&fore200e->rate_mtx); 1506 1507 kfree(fore200e_vcc); 1508 vc_map->vcc = NULL; 1509 return -EAGAIN; 1510 } 1511 1512 /* reserve bandwidth */ 1513 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr; 1514 mutex_unlock(&fore200e->rate_mtx); 1515 } 1516 1517 vcc->itf = vcc->dev->number; 1518 1519 set_bit(ATM_VF_PARTIAL,&vcc->flags); 1520 set_bit(ATM_VF_ADDR, &vcc->flags); 1521 1522 vcc->dev_data = fore200e_vcc; 1523 1524 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) { 1525 1526 vc_map->vcc = NULL; 1527 1528 clear_bit(ATM_VF_ADDR, &vcc->flags); 1529 clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1530 1531 vcc->dev_data = NULL; 1532 1533 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1534 1535 kfree(fore200e_vcc); 1536 return -EINVAL; 1537 } 1538 1539 /* compute rate control parameters */ 1540 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1541 1542 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate); 1543 set_bit(ATM_VF_HASQOS, &vcc->flags); 1544 1545 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n", 1546 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1547 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr, 1548 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells); 1549 } 1550 1551 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1; 1552 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0; 1553 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0; 1554 1555 /* new incarnation of the vcc */ 1556 vc_map->incarn = ++fore200e->incarn_count; 1557 1558 /* VC unusable before this flag is set */ 1559 set_bit(ATM_VF_READY, &vcc->flags); 1560 1561 return 0; 1562 } 1563 1564 1565 static void 1566 fore200e_close(struct atm_vcc* vcc) 1567 { 1568 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1569 struct fore200e_vcc* fore200e_vcc; 1570 struct fore200e_vc_map* vc_map; 1571 unsigned long flags; 1572 1573 ASSERT(vcc); 1574 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS)); 1575 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS)); 1576 1577 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal)); 1578 1579 clear_bit(ATM_VF_READY, &vcc->flags); 1580 1581 fore200e_activate_vcin(fore200e, 0, vcc, 0); 1582 1583 spin_lock_irqsave(&fore200e->q_lock, flags); 1584 1585 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci); 1586 1587 /* the vc is no longer considered as "in use" by fore200e_open() */ 1588 vc_map->vcc = NULL; 1589 1590 vcc->itf = vcc->vci = vcc->vpi = 0; 1591 1592 fore200e_vcc = FORE200E_VCC(vcc); 1593 vcc->dev_data = NULL; 1594 1595 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1596 1597 /* release reserved bandwidth, if any */ 1598 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1599 1600 mutex_lock(&fore200e->rate_mtx); 1601 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1602 mutex_unlock(&fore200e->rate_mtx); 1603 1604 clear_bit(ATM_VF_HASQOS, &vcc->flags); 1605 } 1606 1607 clear_bit(ATM_VF_ADDR, &vcc->flags); 1608 clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1609 1610 ASSERT(fore200e_vcc); 1611 kfree(fore200e_vcc); 1612 } 1613 1614 1615 static int 1616 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb) 1617 { 1618 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1619 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc); 1620 struct fore200e_vc_map* vc_map; 1621 struct host_txq* txq = &fore200e->host_txq; 1622 struct host_txq_entry* entry; 1623 struct tpd* tpd; 1624 struct tpd_haddr tpd_haddr; 1625 int retry = CONFIG_ATM_FORE200E_TX_RETRY; 1626 int tx_copy = 0; 1627 int tx_len = skb->len; 1628 u32* cell_header = NULL; 1629 unsigned char* skb_data; 1630 int skb_len; 1631 unsigned char* data; 1632 unsigned long flags; 1633 1634 ASSERT(vcc); 1635 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); 1636 ASSERT(fore200e); 1637 ASSERT(fore200e_vcc); 1638 1639 if (!test_bit(ATM_VF_READY, &vcc->flags)) { 1640 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi); 1641 dev_kfree_skb_any(skb); 1642 return -EINVAL; 1643 } 1644 1645 #ifdef FORE200E_52BYTE_AAL0_SDU 1646 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) { 1647 cell_header = (u32*) skb->data; 1648 skb_data = skb->data + 4; /* skip 4-byte cell header */ 1649 skb_len = tx_len = skb->len - 4; 1650 1651 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header); 1652 } 1653 else 1654 #endif 1655 { 1656 skb_data = skb->data; 1657 skb_len = skb->len; 1658 } 1659 1660 if (((unsigned long)skb_data) & 0x3) { 1661 1662 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name); 1663 tx_copy = 1; 1664 tx_len = skb_len; 1665 } 1666 1667 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) { 1668 1669 /* this simply NUKES the PCA board */ 1670 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name); 1671 tx_copy = 1; 1672 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD; 1673 } 1674 1675 if (tx_copy) { 1676 data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA); 1677 if (data == NULL) { 1678 if (vcc->pop) { 1679 vcc->pop(vcc, skb); 1680 } 1681 else { 1682 dev_kfree_skb_any(skb); 1683 } 1684 return -ENOMEM; 1685 } 1686 1687 memcpy(data, skb_data, skb_len); 1688 if (skb_len < tx_len) 1689 memset(data + skb_len, 0x00, tx_len - skb_len); 1690 } 1691 else { 1692 data = skb_data; 1693 } 1694 1695 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci); 1696 ASSERT(vc_map->vcc == vcc); 1697 1698 retry_here: 1699 1700 spin_lock_irqsave(&fore200e->q_lock, flags); 1701 1702 entry = &txq->host_entry[ txq->head ]; 1703 1704 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) { 1705 1706 /* try to free completed tx queue entries */ 1707 fore200e_tx_irq(fore200e); 1708 1709 if (*entry->status != STATUS_FREE) { 1710 1711 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1712 1713 /* retry once again? */ 1714 if (--retry > 0) { 1715 udelay(50); 1716 goto retry_here; 1717 } 1718 1719 atomic_inc(&vcc->stats->tx_err); 1720 1721 fore200e->tx_sat++; 1722 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n", 1723 fore200e->name, fore200e->cp_queues->heartbeat); 1724 if (vcc->pop) { 1725 vcc->pop(vcc, skb); 1726 } 1727 else { 1728 dev_kfree_skb_any(skb); 1729 } 1730 1731 if (tx_copy) 1732 kfree(data); 1733 1734 return -ENOBUFS; 1735 } 1736 } 1737 1738 entry->incarn = vc_map->incarn; 1739 entry->vc_map = vc_map; 1740 entry->skb = skb; 1741 entry->data = tx_copy ? data : NULL; 1742 1743 tpd = entry->tpd; 1744 tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE); 1745 tpd->tsd[ 0 ].length = tx_len; 1746 1747 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX); 1748 txq->txing++; 1749 1750 /* The dma_map call above implies a dma_sync so the device can use it, 1751 * thus no explicit dma_sync call is necessary here. 1752 */ 1753 1754 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n", 1755 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1756 tpd->tsd[0].length, skb_len); 1757 1758 if (skb_len < fore200e_vcc->tx_min_pdu) 1759 fore200e_vcc->tx_min_pdu = skb_len; 1760 if (skb_len > fore200e_vcc->tx_max_pdu) 1761 fore200e_vcc->tx_max_pdu = skb_len; 1762 fore200e_vcc->tx_pdu++; 1763 1764 /* set tx rate control information */ 1765 tpd->rate.data_cells = fore200e_vcc->rate.data_cells; 1766 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells; 1767 1768 if (cell_header) { 1769 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP); 1770 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; 1771 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT; 1772 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT; 1773 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT; 1774 } 1775 else { 1776 /* set the ATM header, common to all cells conveying the PDU */ 1777 tpd->atm_header.clp = 0; 1778 tpd->atm_header.plt = 0; 1779 tpd->atm_header.vci = vcc->vci; 1780 tpd->atm_header.vpi = vcc->vpi; 1781 tpd->atm_header.gfc = 0; 1782 } 1783 1784 tpd->spec.length = tx_len; 1785 tpd->spec.nseg = 1; 1786 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal); 1787 tpd->spec.intr = 1; 1788 1789 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */ 1790 tpd_haddr.pad = 0; 1791 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */ 1792 1793 *entry->status = STATUS_PENDING; 1794 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr); 1795 1796 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1797 1798 return 0; 1799 } 1800 1801 1802 static int 1803 fore200e_getstats(struct fore200e* fore200e) 1804 { 1805 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1806 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1807 struct stats_opcode opcode; 1808 int ok; 1809 u32 stats_dma_addr; 1810 1811 if (fore200e->stats == NULL) { 1812 fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA); 1813 if (fore200e->stats == NULL) 1814 return -ENOMEM; 1815 } 1816 1817 stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats, 1818 sizeof(struct stats), DMA_FROM_DEVICE); 1819 1820 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1821 1822 opcode.opcode = OPCODE_GET_STATS; 1823 opcode.pad = 0; 1824 1825 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr); 1826 1827 *entry->status = STATUS_PENDING; 1828 1829 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode); 1830 1831 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1832 1833 *entry->status = STATUS_FREE; 1834 1835 fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE); 1836 1837 if (ok == 0) { 1838 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name); 1839 return -EIO; 1840 } 1841 1842 return 0; 1843 } 1844 1845 1846 static int 1847 fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen) 1848 { 1849 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */ 1850 1851 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n", 1852 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen); 1853 1854 return -EINVAL; 1855 } 1856 1857 1858 static int 1859 fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen) 1860 { 1861 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */ 1862 1863 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n", 1864 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen); 1865 1866 return -EINVAL; 1867 } 1868 1869 1870 #if 0 /* currently unused */ 1871 static int 1872 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs) 1873 { 1874 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1875 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1876 struct oc3_opcode opcode; 1877 int ok; 1878 u32 oc3_regs_dma_addr; 1879 1880 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE); 1881 1882 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1883 1884 opcode.opcode = OPCODE_GET_OC3; 1885 opcode.reg = 0; 1886 opcode.value = 0; 1887 opcode.mask = 0; 1888 1889 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr); 1890 1891 *entry->status = STATUS_PENDING; 1892 1893 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode); 1894 1895 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1896 1897 *entry->status = STATUS_FREE; 1898 1899 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE); 1900 1901 if (ok == 0) { 1902 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name); 1903 return -EIO; 1904 } 1905 1906 return 0; 1907 } 1908 #endif 1909 1910 1911 static int 1912 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask) 1913 { 1914 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1915 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1916 struct oc3_opcode opcode; 1917 int ok; 1918 1919 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask); 1920 1921 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1922 1923 opcode.opcode = OPCODE_SET_OC3; 1924 opcode.reg = reg; 1925 opcode.value = value; 1926 opcode.mask = mask; 1927 1928 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr); 1929 1930 *entry->status = STATUS_PENDING; 1931 1932 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode); 1933 1934 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1935 1936 *entry->status = STATUS_FREE; 1937 1938 if (ok == 0) { 1939 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name); 1940 return -EIO; 1941 } 1942 1943 return 0; 1944 } 1945 1946 1947 static int 1948 fore200e_setloop(struct fore200e* fore200e, int loop_mode) 1949 { 1950 u32 mct_value, mct_mask; 1951 int error; 1952 1953 if (!capable(CAP_NET_ADMIN)) 1954 return -EPERM; 1955 1956 switch (loop_mode) { 1957 1958 case ATM_LM_NONE: 1959 mct_value = 0; 1960 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE; 1961 break; 1962 1963 case ATM_LM_LOC_PHY: 1964 mct_value = mct_mask = SUNI_MCT_DLE; 1965 break; 1966 1967 case ATM_LM_RMT_PHY: 1968 mct_value = mct_mask = SUNI_MCT_LLE; 1969 break; 1970 1971 default: 1972 return -EINVAL; 1973 } 1974 1975 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask); 1976 if (error == 0) 1977 fore200e->loop_mode = loop_mode; 1978 1979 return error; 1980 } 1981 1982 1983 static int 1984 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg) 1985 { 1986 struct sonet_stats tmp; 1987 1988 if (fore200e_getstats(fore200e) < 0) 1989 return -EIO; 1990 1991 tmp.section_bip = cpu_to_be32(fore200e->stats->oc3.section_bip8_errors); 1992 tmp.line_bip = cpu_to_be32(fore200e->stats->oc3.line_bip24_errors); 1993 tmp.path_bip = cpu_to_be32(fore200e->stats->oc3.path_bip8_errors); 1994 tmp.line_febe = cpu_to_be32(fore200e->stats->oc3.line_febe_errors); 1995 tmp.path_febe = cpu_to_be32(fore200e->stats->oc3.path_febe_errors); 1996 tmp.corr_hcs = cpu_to_be32(fore200e->stats->oc3.corr_hcs_errors); 1997 tmp.uncorr_hcs = cpu_to_be32(fore200e->stats->oc3.ucorr_hcs_errors); 1998 tmp.tx_cells = cpu_to_be32(fore200e->stats->aal0.cells_transmitted) + 1999 cpu_to_be32(fore200e->stats->aal34.cells_transmitted) + 2000 cpu_to_be32(fore200e->stats->aal5.cells_transmitted); 2001 tmp.rx_cells = cpu_to_be32(fore200e->stats->aal0.cells_received) + 2002 cpu_to_be32(fore200e->stats->aal34.cells_received) + 2003 cpu_to_be32(fore200e->stats->aal5.cells_received); 2004 2005 if (arg) 2006 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0; 2007 2008 return 0; 2009 } 2010 2011 2012 static int 2013 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg) 2014 { 2015 struct fore200e* fore200e = FORE200E_DEV(dev); 2016 2017 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg); 2018 2019 switch (cmd) { 2020 2021 case SONET_GETSTAT: 2022 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg); 2023 2024 case SONET_GETDIAG: 2025 return put_user(0, (int __user *)arg) ? -EFAULT : 0; 2026 2027 case ATM_SETLOOP: 2028 return fore200e_setloop(fore200e, (int)(unsigned long)arg); 2029 2030 case ATM_GETLOOP: 2031 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0; 2032 2033 case ATM_QUERYLOOP: 2034 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0; 2035 } 2036 2037 return -ENOSYS; /* not implemented */ 2038 } 2039 2040 2041 static int 2042 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags) 2043 { 2044 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc); 2045 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 2046 2047 if (!test_bit(ATM_VF_READY, &vcc->flags)) { 2048 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi); 2049 return -EINVAL; 2050 } 2051 2052 DPRINTK(2, "change_qos %d.%d.%d, " 2053 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; " 2054 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n" 2055 "available_cell_rate = %u", 2056 vcc->itf, vcc->vpi, vcc->vci, 2057 fore200e_traffic_class[ qos->txtp.traffic_class ], 2058 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu, 2059 fore200e_traffic_class[ qos->rxtp.traffic_class ], 2060 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu, 2061 flags, fore200e->available_cell_rate); 2062 2063 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) { 2064 2065 mutex_lock(&fore200e->rate_mtx); 2066 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) { 2067 mutex_unlock(&fore200e->rate_mtx); 2068 return -EAGAIN; 2069 } 2070 2071 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 2072 fore200e->available_cell_rate -= qos->txtp.max_pcr; 2073 2074 mutex_unlock(&fore200e->rate_mtx); 2075 2076 memcpy(&vcc->qos, qos, sizeof(struct atm_qos)); 2077 2078 /* update rate control parameters */ 2079 fore200e_rate_ctrl(qos, &fore200e_vcc->rate); 2080 2081 set_bit(ATM_VF_HASQOS, &vcc->flags); 2082 2083 return 0; 2084 } 2085 2086 return -EINVAL; 2087 } 2088 2089 2090 static int __devinit 2091 fore200e_irq_request(struct fore200e* fore200e) 2092 { 2093 if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) { 2094 2095 printk(FORE200E "unable to reserve IRQ %s for device %s\n", 2096 fore200e_irq_itoa(fore200e->irq), fore200e->name); 2097 return -EBUSY; 2098 } 2099 2100 printk(FORE200E "IRQ %s reserved for device %s\n", 2101 fore200e_irq_itoa(fore200e->irq), fore200e->name); 2102 2103 #ifdef FORE200E_USE_TASKLET 2104 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e); 2105 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e); 2106 #endif 2107 2108 fore200e->state = FORE200E_STATE_IRQ; 2109 return 0; 2110 } 2111 2112 2113 static int __devinit 2114 fore200e_get_esi(struct fore200e* fore200e) 2115 { 2116 struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA); 2117 int ok, i; 2118 2119 if (!prom) 2120 return -ENOMEM; 2121 2122 ok = fore200e->bus->prom_read(fore200e, prom); 2123 if (ok < 0) { 2124 kfree(prom); 2125 return -EBUSY; 2126 } 2127 2128 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n", 2129 fore200e->name, 2130 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */ 2131 prom->serial_number & 0xFFFF, 2132 prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ], 2133 prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]); 2134 2135 for (i = 0; i < ESI_LEN; i++) { 2136 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ]; 2137 } 2138 2139 kfree(prom); 2140 2141 return 0; 2142 } 2143 2144 2145 static int __devinit 2146 fore200e_alloc_rx_buf(struct fore200e* fore200e) 2147 { 2148 int scheme, magn, nbr, size, i; 2149 2150 struct host_bsq* bsq; 2151 struct buffer* buffer; 2152 2153 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 2154 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 2155 2156 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 2157 2158 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ]; 2159 size = fore200e_rx_buf_size[ scheme ][ magn ]; 2160 2161 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn); 2162 2163 /* allocate the array of receive buffers */ 2164 buffer = bsq->buffer = kzalloc(nbr * sizeof(struct buffer), GFP_KERNEL); 2165 2166 if (buffer == NULL) 2167 return -ENOMEM; 2168 2169 bsq->freebuf = NULL; 2170 2171 for (i = 0; i < nbr; i++) { 2172 2173 buffer[ i ].scheme = scheme; 2174 buffer[ i ].magn = magn; 2175 #ifdef FORE200E_BSQ_DEBUG 2176 buffer[ i ].index = i; 2177 buffer[ i ].supplied = 0; 2178 #endif 2179 2180 /* allocate the receive buffer body */ 2181 if (fore200e_chunk_alloc(fore200e, 2182 &buffer[ i ].data, size, fore200e->bus->buffer_alignment, 2183 DMA_FROM_DEVICE) < 0) { 2184 2185 while (i > 0) 2186 fore200e_chunk_free(fore200e, &buffer[ --i ].data); 2187 kfree(buffer); 2188 2189 return -ENOMEM; 2190 } 2191 2192 /* insert the buffer into the free buffer list */ 2193 buffer[ i ].next = bsq->freebuf; 2194 bsq->freebuf = &buffer[ i ]; 2195 } 2196 /* all the buffers are free, initially */ 2197 bsq->freebuf_count = nbr; 2198 2199 #ifdef FORE200E_BSQ_DEBUG 2200 bsq_audit(3, bsq, scheme, magn); 2201 #endif 2202 } 2203 } 2204 2205 fore200e->state = FORE200E_STATE_ALLOC_BUF; 2206 return 0; 2207 } 2208 2209 2210 static int __devinit 2211 fore200e_init_bs_queue(struct fore200e* fore200e) 2212 { 2213 int scheme, magn, i; 2214 2215 struct host_bsq* bsq; 2216 struct cp_bsq_entry __iomem * cp_entry; 2217 2218 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 2219 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 2220 2221 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn); 2222 2223 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 2224 2225 /* allocate and align the array of status words */ 2226 if (fore200e->bus->dma_chunk_alloc(fore200e, 2227 &bsq->status, 2228 sizeof(enum status), 2229 QUEUE_SIZE_BS, 2230 fore200e->bus->status_alignment) < 0) { 2231 return -ENOMEM; 2232 } 2233 2234 /* allocate and align the array of receive buffer descriptors */ 2235 if (fore200e->bus->dma_chunk_alloc(fore200e, 2236 &bsq->rbd_block, 2237 sizeof(struct rbd_block), 2238 QUEUE_SIZE_BS, 2239 fore200e->bus->descr_alignment) < 0) { 2240 2241 fore200e->bus->dma_chunk_free(fore200e, &bsq->status); 2242 return -ENOMEM; 2243 } 2244 2245 /* get the base address of the cp resident buffer supply queue entries */ 2246 cp_entry = fore200e->virt_base + 2247 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]); 2248 2249 /* fill the host resident and cp resident buffer supply queue entries */ 2250 for (i = 0; i < QUEUE_SIZE_BS; i++) { 2251 2252 bsq->host_entry[ i ].status = 2253 FORE200E_INDEX(bsq->status.align_addr, enum status, i); 2254 bsq->host_entry[ i ].rbd_block = 2255 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i); 2256 bsq->host_entry[ i ].rbd_block_dma = 2257 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i); 2258 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2259 2260 *bsq->host_entry[ i ].status = STATUS_FREE; 2261 2262 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i), 2263 &cp_entry[ i ].status_haddr); 2264 } 2265 } 2266 } 2267 2268 fore200e->state = FORE200E_STATE_INIT_BSQ; 2269 return 0; 2270 } 2271 2272 2273 static int __devinit 2274 fore200e_init_rx_queue(struct fore200e* fore200e) 2275 { 2276 struct host_rxq* rxq = &fore200e->host_rxq; 2277 struct cp_rxq_entry __iomem * cp_entry; 2278 int i; 2279 2280 DPRINTK(2, "receive queue is being initialized\n"); 2281 2282 /* allocate and align the array of status words */ 2283 if (fore200e->bus->dma_chunk_alloc(fore200e, 2284 &rxq->status, 2285 sizeof(enum status), 2286 QUEUE_SIZE_RX, 2287 fore200e->bus->status_alignment) < 0) { 2288 return -ENOMEM; 2289 } 2290 2291 /* allocate and align the array of receive PDU descriptors */ 2292 if (fore200e->bus->dma_chunk_alloc(fore200e, 2293 &rxq->rpd, 2294 sizeof(struct rpd), 2295 QUEUE_SIZE_RX, 2296 fore200e->bus->descr_alignment) < 0) { 2297 2298 fore200e->bus->dma_chunk_free(fore200e, &rxq->status); 2299 return -ENOMEM; 2300 } 2301 2302 /* get the base address of the cp resident rx queue entries */ 2303 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq); 2304 2305 /* fill the host resident and cp resident rx entries */ 2306 for (i=0; i < QUEUE_SIZE_RX; i++) { 2307 2308 rxq->host_entry[ i ].status = 2309 FORE200E_INDEX(rxq->status.align_addr, enum status, i); 2310 rxq->host_entry[ i ].rpd = 2311 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i); 2312 rxq->host_entry[ i ].rpd_dma = 2313 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i); 2314 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2315 2316 *rxq->host_entry[ i ].status = STATUS_FREE; 2317 2318 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i), 2319 &cp_entry[ i ].status_haddr); 2320 2321 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i), 2322 &cp_entry[ i ].rpd_haddr); 2323 } 2324 2325 /* set the head entry of the queue */ 2326 rxq->head = 0; 2327 2328 fore200e->state = FORE200E_STATE_INIT_RXQ; 2329 return 0; 2330 } 2331 2332 2333 static int __devinit 2334 fore200e_init_tx_queue(struct fore200e* fore200e) 2335 { 2336 struct host_txq* txq = &fore200e->host_txq; 2337 struct cp_txq_entry __iomem * cp_entry; 2338 int i; 2339 2340 DPRINTK(2, "transmit queue is being initialized\n"); 2341 2342 /* allocate and align the array of status words */ 2343 if (fore200e->bus->dma_chunk_alloc(fore200e, 2344 &txq->status, 2345 sizeof(enum status), 2346 QUEUE_SIZE_TX, 2347 fore200e->bus->status_alignment) < 0) { 2348 return -ENOMEM; 2349 } 2350 2351 /* allocate and align the array of transmit PDU descriptors */ 2352 if (fore200e->bus->dma_chunk_alloc(fore200e, 2353 &txq->tpd, 2354 sizeof(struct tpd), 2355 QUEUE_SIZE_TX, 2356 fore200e->bus->descr_alignment) < 0) { 2357 2358 fore200e->bus->dma_chunk_free(fore200e, &txq->status); 2359 return -ENOMEM; 2360 } 2361 2362 /* get the base address of the cp resident tx queue entries */ 2363 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq); 2364 2365 /* fill the host resident and cp resident tx entries */ 2366 for (i=0; i < QUEUE_SIZE_TX; i++) { 2367 2368 txq->host_entry[ i ].status = 2369 FORE200E_INDEX(txq->status.align_addr, enum status, i); 2370 txq->host_entry[ i ].tpd = 2371 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i); 2372 txq->host_entry[ i ].tpd_dma = 2373 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i); 2374 txq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2375 2376 *txq->host_entry[ i ].status = STATUS_FREE; 2377 2378 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i), 2379 &cp_entry[ i ].status_haddr); 2380 2381 /* although there is a one-to-one mapping of tx queue entries and tpds, 2382 we do not write here the DMA (physical) base address of each tpd into 2383 the related cp resident entry, because the cp relies on this write 2384 operation to detect that a new pdu has been submitted for tx */ 2385 } 2386 2387 /* set the head and tail entries of the queue */ 2388 txq->head = 0; 2389 txq->tail = 0; 2390 2391 fore200e->state = FORE200E_STATE_INIT_TXQ; 2392 return 0; 2393 } 2394 2395 2396 static int __devinit 2397 fore200e_init_cmd_queue(struct fore200e* fore200e) 2398 { 2399 struct host_cmdq* cmdq = &fore200e->host_cmdq; 2400 struct cp_cmdq_entry __iomem * cp_entry; 2401 int i; 2402 2403 DPRINTK(2, "command queue is being initialized\n"); 2404 2405 /* allocate and align the array of status words */ 2406 if (fore200e->bus->dma_chunk_alloc(fore200e, 2407 &cmdq->status, 2408 sizeof(enum status), 2409 QUEUE_SIZE_CMD, 2410 fore200e->bus->status_alignment) < 0) { 2411 return -ENOMEM; 2412 } 2413 2414 /* get the base address of the cp resident cmd queue entries */ 2415 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq); 2416 2417 /* fill the host resident and cp resident cmd entries */ 2418 for (i=0; i < QUEUE_SIZE_CMD; i++) { 2419 2420 cmdq->host_entry[ i ].status = 2421 FORE200E_INDEX(cmdq->status.align_addr, enum status, i); 2422 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2423 2424 *cmdq->host_entry[ i ].status = STATUS_FREE; 2425 2426 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i), 2427 &cp_entry[ i ].status_haddr); 2428 } 2429 2430 /* set the head entry of the queue */ 2431 cmdq->head = 0; 2432 2433 fore200e->state = FORE200E_STATE_INIT_CMDQ; 2434 return 0; 2435 } 2436 2437 2438 static void __devinit 2439 fore200e_param_bs_queue(struct fore200e* fore200e, 2440 enum buffer_scheme scheme, enum buffer_magn magn, 2441 int queue_length, int pool_size, int supply_blksize) 2442 { 2443 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ]; 2444 2445 fore200e->bus->write(queue_length, &bs_spec->queue_length); 2446 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size); 2447 fore200e->bus->write(pool_size, &bs_spec->pool_size); 2448 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize); 2449 } 2450 2451 2452 static int __devinit 2453 fore200e_initialize(struct fore200e* fore200e) 2454 { 2455 struct cp_queues __iomem * cpq; 2456 int ok, scheme, magn; 2457 2458 DPRINTK(2, "device %s being initialized\n", fore200e->name); 2459 2460 mutex_init(&fore200e->rate_mtx); 2461 spin_lock_init(&fore200e->q_lock); 2462 2463 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET; 2464 2465 /* enable cp to host interrupts */ 2466 fore200e->bus->write(1, &cpq->imask); 2467 2468 if (fore200e->bus->irq_enable) 2469 fore200e->bus->irq_enable(fore200e); 2470 2471 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect); 2472 2473 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len); 2474 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len); 2475 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len); 2476 2477 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension); 2478 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension); 2479 2480 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) 2481 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) 2482 fore200e_param_bs_queue(fore200e, scheme, magn, 2483 QUEUE_SIZE_BS, 2484 fore200e_rx_buf_nbr[ scheme ][ magn ], 2485 RBD_BLK_SIZE); 2486 2487 /* issue the initialize command */ 2488 fore200e->bus->write(STATUS_PENDING, &cpq->init.status); 2489 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode); 2490 2491 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000); 2492 if (ok == 0) { 2493 printk(FORE200E "device %s initialization failed\n", fore200e->name); 2494 return -ENODEV; 2495 } 2496 2497 printk(FORE200E "device %s initialized\n", fore200e->name); 2498 2499 fore200e->state = FORE200E_STATE_INITIALIZE; 2500 return 0; 2501 } 2502 2503 2504 static void __devinit 2505 fore200e_monitor_putc(struct fore200e* fore200e, char c) 2506 { 2507 struct cp_monitor __iomem * monitor = fore200e->cp_monitor; 2508 2509 #if 0 2510 printk("%c", c); 2511 #endif 2512 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send); 2513 } 2514 2515 2516 static int __devinit 2517 fore200e_monitor_getc(struct fore200e* fore200e) 2518 { 2519 struct cp_monitor __iomem * monitor = fore200e->cp_monitor; 2520 unsigned long timeout = jiffies + msecs_to_jiffies(50); 2521 int c; 2522 2523 while (time_before(jiffies, timeout)) { 2524 2525 c = (int) fore200e->bus->read(&monitor->soft_uart.recv); 2526 2527 if (c & FORE200E_CP_MONITOR_UART_AVAIL) { 2528 2529 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv); 2530 #if 0 2531 printk("%c", c & 0xFF); 2532 #endif 2533 return c & 0xFF; 2534 } 2535 } 2536 2537 return -1; 2538 } 2539 2540 2541 static void __devinit 2542 fore200e_monitor_puts(struct fore200e* fore200e, char* str) 2543 { 2544 while (*str) { 2545 2546 /* the i960 monitor doesn't accept any new character if it has something to say */ 2547 while (fore200e_monitor_getc(fore200e) >= 0); 2548 2549 fore200e_monitor_putc(fore200e, *str++); 2550 } 2551 2552 while (fore200e_monitor_getc(fore200e) >= 0); 2553 } 2554 2555 2556 static int __devinit 2557 fore200e_start_fw(struct fore200e* fore200e) 2558 { 2559 int ok; 2560 char cmd[ 48 ]; 2561 struct fw_header* fw_header = (struct fw_header*) fore200e->bus->fw_data; 2562 2563 DPRINTK(2, "device %s firmware being started\n", fore200e->name); 2564 2565 #if defined(__sparc_v9__) 2566 /* reported to be required by SBA cards on some sparc64 hosts */ 2567 fore200e_spin(100); 2568 #endif 2569 2570 sprintf(cmd, "\rgo %x\r", le32_to_cpu(fw_header->start_offset)); 2571 2572 fore200e_monitor_puts(fore200e, cmd); 2573 2574 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000); 2575 if (ok == 0) { 2576 printk(FORE200E "device %s firmware didn't start\n", fore200e->name); 2577 return -ENODEV; 2578 } 2579 2580 printk(FORE200E "device %s firmware started\n", fore200e->name); 2581 2582 fore200e->state = FORE200E_STATE_START_FW; 2583 return 0; 2584 } 2585 2586 2587 static int __devinit 2588 fore200e_load_fw(struct fore200e* fore200e) 2589 { 2590 u32* fw_data = (u32*) fore200e->bus->fw_data; 2591 u32 fw_size = (u32) *fore200e->bus->fw_size / sizeof(u32); 2592 2593 struct fw_header* fw_header = (struct fw_header*) fw_data; 2594 2595 u32 __iomem *load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset); 2596 2597 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n", 2598 fore200e->name, load_addr, fw_size); 2599 2600 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) { 2601 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name); 2602 return -ENODEV; 2603 } 2604 2605 for (; fw_size--; fw_data++, load_addr++) 2606 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr); 2607 2608 fore200e->state = FORE200E_STATE_LOAD_FW; 2609 return 0; 2610 } 2611 2612 2613 static int __devinit 2614 fore200e_register(struct fore200e* fore200e) 2615 { 2616 struct atm_dev* atm_dev; 2617 2618 DPRINTK(2, "device %s being registered\n", fore200e->name); 2619 2620 atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1, 2621 NULL); 2622 if (atm_dev == NULL) { 2623 printk(FORE200E "unable to register device %s\n", fore200e->name); 2624 return -ENODEV; 2625 } 2626 2627 atm_dev->dev_data = fore200e; 2628 fore200e->atm_dev = atm_dev; 2629 2630 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS; 2631 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS; 2632 2633 fore200e->available_cell_rate = ATM_OC3_PCR; 2634 2635 fore200e->state = FORE200E_STATE_REGISTER; 2636 return 0; 2637 } 2638 2639 2640 static int __devinit 2641 fore200e_init(struct fore200e* fore200e) 2642 { 2643 if (fore200e_register(fore200e) < 0) 2644 return -ENODEV; 2645 2646 if (fore200e->bus->configure(fore200e) < 0) 2647 return -ENODEV; 2648 2649 if (fore200e->bus->map(fore200e) < 0) 2650 return -ENODEV; 2651 2652 if (fore200e_reset(fore200e, 1) < 0) 2653 return -ENODEV; 2654 2655 if (fore200e_load_fw(fore200e) < 0) 2656 return -ENODEV; 2657 2658 if (fore200e_start_fw(fore200e) < 0) 2659 return -ENODEV; 2660 2661 if (fore200e_initialize(fore200e) < 0) 2662 return -ENODEV; 2663 2664 if (fore200e_init_cmd_queue(fore200e) < 0) 2665 return -ENOMEM; 2666 2667 if (fore200e_init_tx_queue(fore200e) < 0) 2668 return -ENOMEM; 2669 2670 if (fore200e_init_rx_queue(fore200e) < 0) 2671 return -ENOMEM; 2672 2673 if (fore200e_init_bs_queue(fore200e) < 0) 2674 return -ENOMEM; 2675 2676 if (fore200e_alloc_rx_buf(fore200e) < 0) 2677 return -ENOMEM; 2678 2679 if (fore200e_get_esi(fore200e) < 0) 2680 return -EIO; 2681 2682 if (fore200e_irq_request(fore200e) < 0) 2683 return -EBUSY; 2684 2685 fore200e_supply(fore200e); 2686 2687 /* all done, board initialization is now complete */ 2688 fore200e->state = FORE200E_STATE_COMPLETE; 2689 return 0; 2690 } 2691 2692 2693 static int __devinit 2694 fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent) 2695 { 2696 const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data; 2697 struct fore200e* fore200e; 2698 int err = 0; 2699 static int index = 0; 2700 2701 if (pci_enable_device(pci_dev)) { 2702 err = -EINVAL; 2703 goto out; 2704 } 2705 2706 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); 2707 if (fore200e == NULL) { 2708 err = -ENOMEM; 2709 goto out_disable; 2710 } 2711 2712 fore200e->bus = bus; 2713 fore200e->bus_dev = pci_dev; 2714 fore200e->irq = pci_dev->irq; 2715 fore200e->phys_base = pci_resource_start(pci_dev, 0); 2716 2717 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1); 2718 2719 pci_set_master(pci_dev); 2720 2721 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n", 2722 fore200e->bus->model_name, 2723 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq)); 2724 2725 sprintf(fore200e->name, "%s-%d", bus->model_name, index); 2726 2727 err = fore200e_init(fore200e); 2728 if (err < 0) { 2729 fore200e_shutdown(fore200e); 2730 goto out_free; 2731 } 2732 2733 ++index; 2734 pci_set_drvdata(pci_dev, fore200e); 2735 2736 out: 2737 return err; 2738 2739 out_free: 2740 kfree(fore200e); 2741 out_disable: 2742 pci_disable_device(pci_dev); 2743 goto out; 2744 } 2745 2746 2747 static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev) 2748 { 2749 struct fore200e *fore200e; 2750 2751 fore200e = pci_get_drvdata(pci_dev); 2752 2753 fore200e_shutdown(fore200e); 2754 kfree(fore200e); 2755 pci_disable_device(pci_dev); 2756 } 2757 2758 2759 #ifdef CONFIG_ATM_FORE200E_PCA 2760 static struct pci_device_id fore200e_pca_tbl[] = { 2761 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID, 2762 0, 0, (unsigned long) &fore200e_bus[0] }, 2763 { 0, } 2764 }; 2765 2766 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl); 2767 2768 static struct pci_driver fore200e_pca_driver = { 2769 .name = "fore_200e", 2770 .probe = fore200e_pca_detect, 2771 .remove = __devexit_p(fore200e_pca_remove_one), 2772 .id_table = fore200e_pca_tbl, 2773 }; 2774 #endif 2775 2776 2777 static int __init 2778 fore200e_module_init(void) 2779 { 2780 const struct fore200e_bus* bus; 2781 struct fore200e* fore200e; 2782 int index; 2783 2784 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n"); 2785 2786 /* for each configured bus interface */ 2787 for (bus = fore200e_bus; bus->model_name; bus++) { 2788 2789 /* detect all boards present on that bus */ 2790 for (index = 0; bus->detect && (fore200e = bus->detect(bus, index)); index++) { 2791 2792 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n", 2793 fore200e->bus->model_name, 2794 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq)); 2795 2796 sprintf(fore200e->name, "%s-%d", bus->model_name, index); 2797 2798 if (fore200e_init(fore200e) < 0) { 2799 2800 fore200e_shutdown(fore200e); 2801 break; 2802 } 2803 2804 list_add(&fore200e->entry, &fore200e_boards); 2805 } 2806 } 2807 2808 #ifdef CONFIG_ATM_FORE200E_PCA 2809 if (!pci_register_driver(&fore200e_pca_driver)) 2810 return 0; 2811 #endif 2812 2813 if (!list_empty(&fore200e_boards)) 2814 return 0; 2815 2816 return -ENODEV; 2817 } 2818 2819 2820 static void __exit 2821 fore200e_module_cleanup(void) 2822 { 2823 struct fore200e *fore200e, *next; 2824 2825 #ifdef CONFIG_ATM_FORE200E_PCA 2826 pci_unregister_driver(&fore200e_pca_driver); 2827 #endif 2828 2829 list_for_each_entry_safe(fore200e, next, &fore200e_boards, entry) { 2830 fore200e_shutdown(fore200e); 2831 kfree(fore200e); 2832 } 2833 DPRINTK(1, "module being removed\n"); 2834 } 2835 2836 2837 static int 2838 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page) 2839 { 2840 struct fore200e* fore200e = FORE200E_DEV(dev); 2841 struct fore200e_vcc* fore200e_vcc; 2842 struct atm_vcc* vcc; 2843 int i, len, left = *pos; 2844 unsigned long flags; 2845 2846 if (!left--) { 2847 2848 if (fore200e_getstats(fore200e) < 0) 2849 return -EIO; 2850 2851 len = sprintf(page,"\n" 2852 " device:\n" 2853 " internal name:\t\t%s\n", fore200e->name); 2854 2855 /* print bus-specific information */ 2856 if (fore200e->bus->proc_read) 2857 len += fore200e->bus->proc_read(fore200e, page + len); 2858 2859 len += sprintf(page + len, 2860 " interrupt line:\t\t%s\n" 2861 " physical base address:\t0x%p\n" 2862 " virtual base address:\t0x%p\n" 2863 " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n" 2864 " board serial number:\t\t%d\n\n", 2865 fore200e_irq_itoa(fore200e->irq), 2866 (void*)fore200e->phys_base, 2867 fore200e->virt_base, 2868 fore200e->esi[0], fore200e->esi[1], fore200e->esi[2], 2869 fore200e->esi[3], fore200e->esi[4], fore200e->esi[5], 2870 fore200e->esi[4] * 256 + fore200e->esi[5]); 2871 2872 return len; 2873 } 2874 2875 if (!left--) 2876 return sprintf(page, 2877 " free small bufs, scheme 1:\t%d\n" 2878 " free large bufs, scheme 1:\t%d\n" 2879 " free small bufs, scheme 2:\t%d\n" 2880 " free large bufs, scheme 2:\t%d\n", 2881 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count, 2882 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count, 2883 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count, 2884 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count); 2885 2886 if (!left--) { 2887 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat); 2888 2889 len = sprintf(page,"\n\n" 2890 " cell processor:\n" 2891 " heartbeat state:\t\t"); 2892 2893 if (hb >> 16 != 0xDEAD) 2894 len += sprintf(page + len, "0x%08x\n", hb); 2895 else 2896 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF); 2897 2898 return len; 2899 } 2900 2901 if (!left--) { 2902 static const char* media_name[] = { 2903 "unshielded twisted pair", 2904 "multimode optical fiber ST", 2905 "multimode optical fiber SC", 2906 "single-mode optical fiber ST", 2907 "single-mode optical fiber SC", 2908 "unknown" 2909 }; 2910 2911 static const char* oc3_mode[] = { 2912 "normal operation", 2913 "diagnostic loopback", 2914 "line loopback", 2915 "unknown" 2916 }; 2917 2918 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release); 2919 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release); 2920 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision); 2921 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type)); 2922 u32 oc3_index; 2923 2924 if ((media_index < 0) || (media_index > 4)) 2925 media_index = 5; 2926 2927 switch (fore200e->loop_mode) { 2928 case ATM_LM_NONE: oc3_index = 0; 2929 break; 2930 case ATM_LM_LOC_PHY: oc3_index = 1; 2931 break; 2932 case ATM_LM_RMT_PHY: oc3_index = 2; 2933 break; 2934 default: oc3_index = 3; 2935 } 2936 2937 return sprintf(page, 2938 " firmware release:\t\t%d.%d.%d\n" 2939 " monitor release:\t\t%d.%d\n" 2940 " media type:\t\t\t%s\n" 2941 " OC-3 revision:\t\t0x%x\n" 2942 " OC-3 mode:\t\t\t%s", 2943 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24, 2944 mon960_release >> 16, mon960_release << 16 >> 16, 2945 media_name[ media_index ], 2946 oc3_revision, 2947 oc3_mode[ oc3_index ]); 2948 } 2949 2950 if (!left--) { 2951 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor; 2952 2953 return sprintf(page, 2954 "\n\n" 2955 " monitor:\n" 2956 " version number:\t\t%d\n" 2957 " boot status word:\t\t0x%08x\n", 2958 fore200e->bus->read(&cp_monitor->mon_version), 2959 fore200e->bus->read(&cp_monitor->bstat)); 2960 } 2961 2962 if (!left--) 2963 return sprintf(page, 2964 "\n" 2965 " device statistics:\n" 2966 " 4b5b:\n" 2967 " crc_header_errors:\t\t%10u\n" 2968 " framing_errors:\t\t%10u\n", 2969 cpu_to_be32(fore200e->stats->phy.crc_header_errors), 2970 cpu_to_be32(fore200e->stats->phy.framing_errors)); 2971 2972 if (!left--) 2973 return sprintf(page, "\n" 2974 " OC-3:\n" 2975 " section_bip8_errors:\t%10u\n" 2976 " path_bip8_errors:\t\t%10u\n" 2977 " line_bip24_errors:\t\t%10u\n" 2978 " line_febe_errors:\t\t%10u\n" 2979 " path_febe_errors:\t\t%10u\n" 2980 " corr_hcs_errors:\t\t%10u\n" 2981 " ucorr_hcs_errors:\t\t%10u\n", 2982 cpu_to_be32(fore200e->stats->oc3.section_bip8_errors), 2983 cpu_to_be32(fore200e->stats->oc3.path_bip8_errors), 2984 cpu_to_be32(fore200e->stats->oc3.line_bip24_errors), 2985 cpu_to_be32(fore200e->stats->oc3.line_febe_errors), 2986 cpu_to_be32(fore200e->stats->oc3.path_febe_errors), 2987 cpu_to_be32(fore200e->stats->oc3.corr_hcs_errors), 2988 cpu_to_be32(fore200e->stats->oc3.ucorr_hcs_errors)); 2989 2990 if (!left--) 2991 return sprintf(page,"\n" 2992 " ATM:\t\t\t\t cells\n" 2993 " TX:\t\t\t%10u\n" 2994 " RX:\t\t\t%10u\n" 2995 " vpi out of range:\t\t%10u\n" 2996 " vpi no conn:\t\t%10u\n" 2997 " vci out of range:\t\t%10u\n" 2998 " vci no conn:\t\t%10u\n", 2999 cpu_to_be32(fore200e->stats->atm.cells_transmitted), 3000 cpu_to_be32(fore200e->stats->atm.cells_received), 3001 cpu_to_be32(fore200e->stats->atm.vpi_bad_range), 3002 cpu_to_be32(fore200e->stats->atm.vpi_no_conn), 3003 cpu_to_be32(fore200e->stats->atm.vci_bad_range), 3004 cpu_to_be32(fore200e->stats->atm.vci_no_conn)); 3005 3006 if (!left--) 3007 return sprintf(page,"\n" 3008 " AAL0:\t\t\t cells\n" 3009 " TX:\t\t\t%10u\n" 3010 " RX:\t\t\t%10u\n" 3011 " dropped:\t\t\t%10u\n", 3012 cpu_to_be32(fore200e->stats->aal0.cells_transmitted), 3013 cpu_to_be32(fore200e->stats->aal0.cells_received), 3014 cpu_to_be32(fore200e->stats->aal0.cells_dropped)); 3015 3016 if (!left--) 3017 return sprintf(page,"\n" 3018 " AAL3/4:\n" 3019 " SAR sublayer:\t\t cells\n" 3020 " TX:\t\t\t%10u\n" 3021 " RX:\t\t\t%10u\n" 3022 " dropped:\t\t\t%10u\n" 3023 " CRC errors:\t\t%10u\n" 3024 " protocol errors:\t\t%10u\n\n" 3025 " CS sublayer:\t\t PDUs\n" 3026 " TX:\t\t\t%10u\n" 3027 " RX:\t\t\t%10u\n" 3028 " dropped:\t\t\t%10u\n" 3029 " protocol errors:\t\t%10u\n", 3030 cpu_to_be32(fore200e->stats->aal34.cells_transmitted), 3031 cpu_to_be32(fore200e->stats->aal34.cells_received), 3032 cpu_to_be32(fore200e->stats->aal34.cells_dropped), 3033 cpu_to_be32(fore200e->stats->aal34.cells_crc_errors), 3034 cpu_to_be32(fore200e->stats->aal34.cells_protocol_errors), 3035 cpu_to_be32(fore200e->stats->aal34.cspdus_transmitted), 3036 cpu_to_be32(fore200e->stats->aal34.cspdus_received), 3037 cpu_to_be32(fore200e->stats->aal34.cspdus_dropped), 3038 cpu_to_be32(fore200e->stats->aal34.cspdus_protocol_errors)); 3039 3040 if (!left--) 3041 return sprintf(page,"\n" 3042 " AAL5:\n" 3043 " SAR sublayer:\t\t cells\n" 3044 " TX:\t\t\t%10u\n" 3045 " RX:\t\t\t%10u\n" 3046 " dropped:\t\t\t%10u\n" 3047 " congestions:\t\t%10u\n\n" 3048 " CS sublayer:\t\t PDUs\n" 3049 " TX:\t\t\t%10u\n" 3050 " RX:\t\t\t%10u\n" 3051 " dropped:\t\t\t%10u\n" 3052 " CRC errors:\t\t%10u\n" 3053 " protocol errors:\t\t%10u\n", 3054 cpu_to_be32(fore200e->stats->aal5.cells_transmitted), 3055 cpu_to_be32(fore200e->stats->aal5.cells_received), 3056 cpu_to_be32(fore200e->stats->aal5.cells_dropped), 3057 cpu_to_be32(fore200e->stats->aal5.congestion_experienced), 3058 cpu_to_be32(fore200e->stats->aal5.cspdus_transmitted), 3059 cpu_to_be32(fore200e->stats->aal5.cspdus_received), 3060 cpu_to_be32(fore200e->stats->aal5.cspdus_dropped), 3061 cpu_to_be32(fore200e->stats->aal5.cspdus_crc_errors), 3062 cpu_to_be32(fore200e->stats->aal5.cspdus_protocol_errors)); 3063 3064 if (!left--) 3065 return sprintf(page,"\n" 3066 " AUX:\t\t allocation failures\n" 3067 " small b1:\t\t\t%10u\n" 3068 " large b1:\t\t\t%10u\n" 3069 " small b2:\t\t\t%10u\n" 3070 " large b2:\t\t\t%10u\n" 3071 " RX PDUs:\t\t\t%10u\n" 3072 " TX PDUs:\t\t\t%10lu\n", 3073 cpu_to_be32(fore200e->stats->aux.small_b1_failed), 3074 cpu_to_be32(fore200e->stats->aux.large_b1_failed), 3075 cpu_to_be32(fore200e->stats->aux.small_b2_failed), 3076 cpu_to_be32(fore200e->stats->aux.large_b2_failed), 3077 cpu_to_be32(fore200e->stats->aux.rpd_alloc_failed), 3078 fore200e->tx_sat); 3079 3080 if (!left--) 3081 return sprintf(page,"\n" 3082 " receive carrier:\t\t\t%s\n", 3083 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!"); 3084 3085 if (!left--) { 3086 return sprintf(page,"\n" 3087 " VCCs:\n address VPI VCI AAL " 3088 "TX PDUs TX min/max size RX PDUs RX min/max size\n"); 3089 } 3090 3091 for (i = 0; i < NBR_CONNECT; i++) { 3092 3093 vcc = fore200e->vc_map[i].vcc; 3094 3095 if (vcc == NULL) 3096 continue; 3097 3098 spin_lock_irqsave(&fore200e->q_lock, flags); 3099 3100 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) { 3101 3102 fore200e_vcc = FORE200E_VCC(vcc); 3103 ASSERT(fore200e_vcc); 3104 3105 len = sprintf(page, 3106 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n", 3107 (u32)(unsigned long)vcc, 3108 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 3109 fore200e_vcc->tx_pdu, 3110 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu, 3111 fore200e_vcc->tx_max_pdu, 3112 fore200e_vcc->rx_pdu, 3113 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu, 3114 fore200e_vcc->rx_max_pdu); 3115 3116 spin_unlock_irqrestore(&fore200e->q_lock, flags); 3117 return len; 3118 } 3119 3120 spin_unlock_irqrestore(&fore200e->q_lock, flags); 3121 } 3122 3123 return 0; 3124 } 3125 3126 module_init(fore200e_module_init); 3127 module_exit(fore200e_module_cleanup); 3128 3129 3130 static const struct atmdev_ops fore200e_ops = 3131 { 3132 .open = fore200e_open, 3133 .close = fore200e_close, 3134 .ioctl = fore200e_ioctl, 3135 .getsockopt = fore200e_getsockopt, 3136 .setsockopt = fore200e_setsockopt, 3137 .send = fore200e_send, 3138 .change_qos = fore200e_change_qos, 3139 .proc_read = fore200e_proc_read, 3140 .owner = THIS_MODULE 3141 }; 3142 3143 3144 #ifdef CONFIG_ATM_FORE200E_PCA 3145 extern const unsigned char _fore200e_pca_fw_data[]; 3146 extern const unsigned int _fore200e_pca_fw_size; 3147 #endif 3148 #ifdef CONFIG_ATM_FORE200E_SBA 3149 extern const unsigned char _fore200e_sba_fw_data[]; 3150 extern const unsigned int _fore200e_sba_fw_size; 3151 #endif 3152 3153 static const struct fore200e_bus fore200e_bus[] = { 3154 #ifdef CONFIG_ATM_FORE200E_PCA 3155 { "PCA-200E", "pca200e", 32, 4, 32, 3156 _fore200e_pca_fw_data, &_fore200e_pca_fw_size, 3157 fore200e_pca_read, 3158 fore200e_pca_write, 3159 fore200e_pca_dma_map, 3160 fore200e_pca_dma_unmap, 3161 fore200e_pca_dma_sync_for_cpu, 3162 fore200e_pca_dma_sync_for_device, 3163 fore200e_pca_dma_chunk_alloc, 3164 fore200e_pca_dma_chunk_free, 3165 NULL, 3166 fore200e_pca_configure, 3167 fore200e_pca_map, 3168 fore200e_pca_reset, 3169 fore200e_pca_prom_read, 3170 fore200e_pca_unmap, 3171 NULL, 3172 fore200e_pca_irq_check, 3173 fore200e_pca_irq_ack, 3174 fore200e_pca_proc_read, 3175 }, 3176 #endif 3177 #ifdef CONFIG_ATM_FORE200E_SBA 3178 { "SBA-200E", "sba200e", 32, 64, 32, 3179 _fore200e_sba_fw_data, &_fore200e_sba_fw_size, 3180 fore200e_sba_read, 3181 fore200e_sba_write, 3182 fore200e_sba_dma_map, 3183 fore200e_sba_dma_unmap, 3184 fore200e_sba_dma_sync_for_cpu, 3185 fore200e_sba_dma_sync_for_device, 3186 fore200e_sba_dma_chunk_alloc, 3187 fore200e_sba_dma_chunk_free, 3188 fore200e_sba_detect, 3189 fore200e_sba_configure, 3190 fore200e_sba_map, 3191 fore200e_sba_reset, 3192 fore200e_sba_prom_read, 3193 fore200e_sba_unmap, 3194 fore200e_sba_irq_enable, 3195 fore200e_sba_irq_check, 3196 fore200e_sba_irq_ack, 3197 fore200e_sba_proc_read, 3198 }, 3199 #endif 3200 {} 3201 }; 3202 3203 #ifdef MODULE_LICENSE 3204 MODULE_LICENSE("GPL"); 3205 #endif 3206