1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 A FORE Systems 200E-series driver for ATM on Linux. 4 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003. 5 6 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de). 7 8 This driver simultaneously supports PCA-200E and SBA-200E adapters 9 on i386, alpha (untested), powerpc, sparc and sparc64 architectures. 10 11 */ 12 13 14 #include <linux/kernel.h> 15 #include <linux/slab.h> 16 #include <linux/init.h> 17 #include <linux/capability.h> 18 #include <linux/interrupt.h> 19 #include <linux/bitops.h> 20 #include <linux/pci.h> 21 #include <linux/module.h> 22 #include <linux/atmdev.h> 23 #include <linux/sonet.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/delay.h> 26 #include <linux/firmware.h> 27 #include <linux/pgtable.h> 28 #include <asm/io.h> 29 #include <asm/string.h> 30 #include <asm/page.h> 31 #include <asm/irq.h> 32 #include <asm/dma.h> 33 #include <asm/byteorder.h> 34 #include <linux/uaccess.h> 35 #include <linux/atomic.h> 36 37 #ifdef CONFIG_SBUS 38 #include <linux/of.h> 39 #include <linux/of_device.h> 40 #include <asm/idprom.h> 41 #include <asm/openprom.h> 42 #include <asm/oplib.h> 43 #endif 44 45 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */ 46 #define FORE200E_USE_TASKLET 47 #endif 48 49 #if 0 /* enable the debugging code of the buffer supply queues */ 50 #define FORE200E_BSQ_DEBUG 51 #endif 52 53 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */ 54 #define FORE200E_52BYTE_AAL0_SDU 55 #endif 56 57 #include "fore200e.h" 58 #include "suni.h" 59 60 #define FORE200E_VERSION "0.3e" 61 62 #define FORE200E "fore200e: " 63 64 #if 0 /* override .config */ 65 #define CONFIG_ATM_FORE200E_DEBUG 1 66 #endif 67 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0) 68 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \ 69 printk(FORE200E format, ##args); } while (0) 70 #else 71 #define DPRINTK(level, format, args...) do {} while (0) 72 #endif 73 74 75 #define FORE200E_ALIGN(addr, alignment) \ 76 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr)) 77 78 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type)) 79 80 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ]) 81 82 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ((index) + 1) % (modulo)) 83 84 #if 1 85 #define ASSERT(expr) if (!(expr)) { \ 86 printk(FORE200E "assertion failed! %s[%d]: %s\n", \ 87 __func__, __LINE__, #expr); \ 88 panic(FORE200E "%s", __func__); \ 89 } 90 #else 91 #define ASSERT(expr) do {} while (0) 92 #endif 93 94 95 static const struct atmdev_ops fore200e_ops; 96 97 static LIST_HEAD(fore200e_boards); 98 99 100 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen"); 101 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION); 102 MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E"); 103 104 105 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { 106 { BUFFER_S1_NBR, BUFFER_L1_NBR }, 107 { BUFFER_S2_NBR, BUFFER_L2_NBR } 108 }; 109 110 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { 111 { BUFFER_S1_SIZE, BUFFER_L1_SIZE }, 112 { BUFFER_S2_SIZE, BUFFER_L2_SIZE } 113 }; 114 115 116 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0) 117 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" }; 118 #endif 119 120 121 #if 0 /* currently unused */ 122 static int 123 fore200e_fore2atm_aal(enum fore200e_aal aal) 124 { 125 switch(aal) { 126 case FORE200E_AAL0: return ATM_AAL0; 127 case FORE200E_AAL34: return ATM_AAL34; 128 case FORE200E_AAL5: return ATM_AAL5; 129 } 130 131 return -EINVAL; 132 } 133 #endif 134 135 136 static enum fore200e_aal 137 fore200e_atm2fore_aal(int aal) 138 { 139 switch(aal) { 140 case ATM_AAL0: return FORE200E_AAL0; 141 case ATM_AAL34: return FORE200E_AAL34; 142 case ATM_AAL1: 143 case ATM_AAL2: 144 case ATM_AAL5: return FORE200E_AAL5; 145 } 146 147 return -EINVAL; 148 } 149 150 151 static char* 152 fore200e_irq_itoa(int irq) 153 { 154 static char str[8]; 155 sprintf(str, "%d", irq); 156 return str; 157 } 158 159 160 /* allocate and align a chunk of memory intended to hold the data behing exchanged 161 between the driver and the adapter (using streaming DVMA) */ 162 163 static int 164 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction) 165 { 166 unsigned long offset = 0; 167 168 if (alignment <= sizeof(int)) 169 alignment = 0; 170 171 chunk->alloc_size = size + alignment; 172 chunk->direction = direction; 173 174 chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL); 175 if (chunk->alloc_addr == NULL) 176 return -ENOMEM; 177 178 if (alignment > 0) 179 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment); 180 181 chunk->align_addr = chunk->alloc_addr + offset; 182 183 chunk->dma_addr = dma_map_single(fore200e->dev, chunk->align_addr, 184 size, direction); 185 if (dma_mapping_error(fore200e->dev, chunk->dma_addr)) { 186 kfree(chunk->alloc_addr); 187 return -ENOMEM; 188 } 189 return 0; 190 } 191 192 193 /* free a chunk of memory */ 194 195 static void 196 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 197 { 198 dma_unmap_single(fore200e->dev, chunk->dma_addr, chunk->dma_size, 199 chunk->direction); 200 kfree(chunk->alloc_addr); 201 } 202 203 /* 204 * Allocate a DMA consistent chunk of memory intended to act as a communication 205 * mechanism (to hold descriptors, status, queues, etc.) shared by the driver 206 * and the adapter. 207 */ 208 static int 209 fore200e_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk, 210 int size, int nbr, int alignment) 211 { 212 /* returned chunks are page-aligned */ 213 chunk->alloc_size = size * nbr; 214 chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size, 215 &chunk->dma_addr, GFP_KERNEL); 216 if (!chunk->alloc_addr) 217 return -ENOMEM; 218 chunk->align_addr = chunk->alloc_addr; 219 return 0; 220 } 221 222 /* 223 * Free a DMA consistent chunk of memory. 224 */ 225 static void 226 fore200e_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 227 { 228 dma_free_coherent(fore200e->dev, chunk->alloc_size, chunk->alloc_addr, 229 chunk->dma_addr); 230 } 231 232 static void 233 fore200e_spin(int msecs) 234 { 235 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 236 while (time_before(jiffies, timeout)); 237 } 238 239 240 static int 241 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs) 242 { 243 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 244 int ok; 245 246 mb(); 247 do { 248 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR)) 249 break; 250 251 } while (time_before(jiffies, timeout)); 252 253 #if 1 254 if (!ok) { 255 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n", 256 *addr, val); 257 } 258 #endif 259 260 return ok; 261 } 262 263 264 static int 265 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs) 266 { 267 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 268 int ok; 269 270 do { 271 if ((ok = (fore200e->bus->read(addr) == val))) 272 break; 273 274 } while (time_before(jiffies, timeout)); 275 276 #if 1 277 if (!ok) { 278 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n", 279 fore200e->bus->read(addr), val); 280 } 281 #endif 282 283 return ok; 284 } 285 286 287 static void 288 fore200e_free_rx_buf(struct fore200e* fore200e) 289 { 290 int scheme, magn, nbr; 291 struct buffer* buffer; 292 293 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 294 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 295 296 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) { 297 298 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) { 299 300 struct chunk* data = &buffer[ nbr ].data; 301 302 if (data->alloc_addr != NULL) 303 fore200e_chunk_free(fore200e, data); 304 } 305 } 306 } 307 } 308 } 309 310 311 static void 312 fore200e_uninit_bs_queue(struct fore200e* fore200e) 313 { 314 int scheme, magn; 315 316 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 317 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 318 319 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status; 320 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block; 321 322 if (status->alloc_addr) 323 fore200e_dma_chunk_free(fore200e, status); 324 325 if (rbd_block->alloc_addr) 326 fore200e_dma_chunk_free(fore200e, rbd_block); 327 } 328 } 329 } 330 331 332 static int 333 fore200e_reset(struct fore200e* fore200e, int diag) 334 { 335 int ok; 336 337 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET; 338 339 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat); 340 341 fore200e->bus->reset(fore200e); 342 343 if (diag) { 344 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000); 345 if (ok == 0) { 346 347 printk(FORE200E "device %s self-test failed\n", fore200e->name); 348 return -ENODEV; 349 } 350 351 printk(FORE200E "device %s self-test passed\n", fore200e->name); 352 353 fore200e->state = FORE200E_STATE_RESET; 354 } 355 356 return 0; 357 } 358 359 360 static void 361 fore200e_shutdown(struct fore200e* fore200e) 362 { 363 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n", 364 fore200e->name, fore200e->phys_base, 365 fore200e_irq_itoa(fore200e->irq)); 366 367 if (fore200e->state > FORE200E_STATE_RESET) { 368 /* first, reset the board to prevent further interrupts or data transfers */ 369 fore200e_reset(fore200e, 0); 370 } 371 372 /* then, release all allocated resources */ 373 switch(fore200e->state) { 374 375 case FORE200E_STATE_COMPLETE: 376 kfree(fore200e->stats); 377 378 fallthrough; 379 case FORE200E_STATE_IRQ: 380 free_irq(fore200e->irq, fore200e->atm_dev); 381 382 fallthrough; 383 case FORE200E_STATE_ALLOC_BUF: 384 fore200e_free_rx_buf(fore200e); 385 386 fallthrough; 387 case FORE200E_STATE_INIT_BSQ: 388 fore200e_uninit_bs_queue(fore200e); 389 390 fallthrough; 391 case FORE200E_STATE_INIT_RXQ: 392 fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status); 393 fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd); 394 395 fallthrough; 396 case FORE200E_STATE_INIT_TXQ: 397 fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status); 398 fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd); 399 400 fallthrough; 401 case FORE200E_STATE_INIT_CMDQ: 402 fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status); 403 404 fallthrough; 405 case FORE200E_STATE_INITIALIZE: 406 /* nothing to do for that state */ 407 408 case FORE200E_STATE_START_FW: 409 /* nothing to do for that state */ 410 411 case FORE200E_STATE_RESET: 412 /* nothing to do for that state */ 413 414 case FORE200E_STATE_MAP: 415 fore200e->bus->unmap(fore200e); 416 417 fallthrough; 418 case FORE200E_STATE_CONFIGURE: 419 /* nothing to do for that state */ 420 421 case FORE200E_STATE_REGISTER: 422 /* XXX shouldn't we *start* by deregistering the device? */ 423 atm_dev_deregister(fore200e->atm_dev); 424 425 case FORE200E_STATE_BLANK: 426 /* nothing to do for that state */ 427 break; 428 } 429 } 430 431 432 #ifdef CONFIG_PCI 433 434 static u32 fore200e_pca_read(volatile u32 __iomem *addr) 435 { 436 /* on big-endian hosts, the board is configured to convert 437 the endianess of slave RAM accesses */ 438 return le32_to_cpu(readl(addr)); 439 } 440 441 442 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr) 443 { 444 /* on big-endian hosts, the board is configured to convert 445 the endianess of slave RAM accesses */ 446 writel(cpu_to_le32(val), addr); 447 } 448 449 static int 450 fore200e_pca_irq_check(struct fore200e* fore200e) 451 { 452 /* this is a 1 bit register */ 453 int irq_posted = readl(fore200e->regs.pca.psr); 454 455 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2) 456 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) { 457 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number); 458 } 459 #endif 460 461 return irq_posted; 462 } 463 464 465 static void 466 fore200e_pca_irq_ack(struct fore200e* fore200e) 467 { 468 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr); 469 } 470 471 472 static void 473 fore200e_pca_reset(struct fore200e* fore200e) 474 { 475 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr); 476 fore200e_spin(10); 477 writel(0, fore200e->regs.pca.hcr); 478 } 479 480 481 static int fore200e_pca_map(struct fore200e* fore200e) 482 { 483 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name); 484 485 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH); 486 487 if (fore200e->virt_base == NULL) { 488 printk(FORE200E "can't map device %s\n", fore200e->name); 489 return -EFAULT; 490 } 491 492 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base); 493 494 /* gain access to the PCA specific registers */ 495 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET; 496 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET; 497 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET; 498 499 fore200e->state = FORE200E_STATE_MAP; 500 return 0; 501 } 502 503 504 static void 505 fore200e_pca_unmap(struct fore200e* fore200e) 506 { 507 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name); 508 509 if (fore200e->virt_base != NULL) 510 iounmap(fore200e->virt_base); 511 } 512 513 514 static int fore200e_pca_configure(struct fore200e *fore200e) 515 { 516 struct pci_dev *pci_dev = to_pci_dev(fore200e->dev); 517 u8 master_ctrl, latency; 518 519 DPRINTK(2, "device %s being configured\n", fore200e->name); 520 521 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) { 522 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n"); 523 return -EIO; 524 } 525 526 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl); 527 528 master_ctrl = master_ctrl 529 #if defined(__BIG_ENDIAN) 530 /* request the PCA board to convert the endianess of slave RAM accesses */ 531 | PCA200E_CTRL_CONVERT_ENDIAN 532 #endif 533 #if 0 534 | PCA200E_CTRL_DIS_CACHE_RD 535 | PCA200E_CTRL_DIS_WRT_INVAL 536 | PCA200E_CTRL_ENA_CONT_REQ_MODE 537 | PCA200E_CTRL_2_CACHE_WRT_INVAL 538 #endif 539 | PCA200E_CTRL_LARGE_PCI_BURSTS; 540 541 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl); 542 543 /* raise latency from 32 (default) to 192, as this seems to prevent NIC 544 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition. 545 this may impact the performances of other PCI devices on the same bus, though */ 546 latency = 192; 547 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency); 548 549 fore200e->state = FORE200E_STATE_CONFIGURE; 550 return 0; 551 } 552 553 554 static int __init 555 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom) 556 { 557 struct host_cmdq* cmdq = &fore200e->host_cmdq; 558 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 559 struct prom_opcode opcode; 560 int ok; 561 u32 prom_dma; 562 563 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 564 565 opcode.opcode = OPCODE_GET_PROM; 566 opcode.pad = 0; 567 568 prom_dma = dma_map_single(fore200e->dev, prom, sizeof(struct prom_data), 569 DMA_FROM_DEVICE); 570 if (dma_mapping_error(fore200e->dev, prom_dma)) 571 return -ENOMEM; 572 573 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr); 574 575 *entry->status = STATUS_PENDING; 576 577 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode); 578 579 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 580 581 *entry->status = STATUS_FREE; 582 583 dma_unmap_single(fore200e->dev, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE); 584 585 if (ok == 0) { 586 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name); 587 return -EIO; 588 } 589 590 #if defined(__BIG_ENDIAN) 591 592 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) )) 593 594 /* MAC address is stored as little-endian */ 595 swap_here(&prom->mac_addr[0]); 596 swap_here(&prom->mac_addr[4]); 597 #endif 598 599 return 0; 600 } 601 602 603 static int 604 fore200e_pca_proc_read(struct fore200e* fore200e, char *page) 605 { 606 struct pci_dev *pci_dev = to_pci_dev(fore200e->dev); 607 608 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n", 609 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); 610 } 611 612 static const struct fore200e_bus fore200e_pci_ops = { 613 .model_name = "PCA-200E", 614 .proc_name = "pca200e", 615 .descr_alignment = 32, 616 .buffer_alignment = 4, 617 .status_alignment = 32, 618 .read = fore200e_pca_read, 619 .write = fore200e_pca_write, 620 .configure = fore200e_pca_configure, 621 .map = fore200e_pca_map, 622 .reset = fore200e_pca_reset, 623 .prom_read = fore200e_pca_prom_read, 624 .unmap = fore200e_pca_unmap, 625 .irq_check = fore200e_pca_irq_check, 626 .irq_ack = fore200e_pca_irq_ack, 627 .proc_read = fore200e_pca_proc_read, 628 }; 629 #endif /* CONFIG_PCI */ 630 631 #ifdef CONFIG_SBUS 632 633 static u32 fore200e_sba_read(volatile u32 __iomem *addr) 634 { 635 return sbus_readl(addr); 636 } 637 638 static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr) 639 { 640 sbus_writel(val, addr); 641 } 642 643 static void fore200e_sba_irq_enable(struct fore200e *fore200e) 644 { 645 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY; 646 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr); 647 } 648 649 static int fore200e_sba_irq_check(struct fore200e *fore200e) 650 { 651 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ; 652 } 653 654 static void fore200e_sba_irq_ack(struct fore200e *fore200e) 655 { 656 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY; 657 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr); 658 } 659 660 static void fore200e_sba_reset(struct fore200e *fore200e) 661 { 662 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr); 663 fore200e_spin(10); 664 fore200e->bus->write(0, fore200e->regs.sba.hcr); 665 } 666 667 static int __init fore200e_sba_map(struct fore200e *fore200e) 668 { 669 struct platform_device *op = to_platform_device(fore200e->dev); 670 unsigned int bursts; 671 672 /* gain access to the SBA specific registers */ 673 fore200e->regs.sba.hcr = of_ioremap(&op->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR"); 674 fore200e->regs.sba.bsr = of_ioremap(&op->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR"); 675 fore200e->regs.sba.isr = of_ioremap(&op->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR"); 676 fore200e->virt_base = of_ioremap(&op->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM"); 677 678 if (!fore200e->virt_base) { 679 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name); 680 return -EFAULT; 681 } 682 683 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base); 684 685 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */ 686 687 /* get the supported DVMA burst sizes */ 688 bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00); 689 690 if (sbus_can_dma_64bit()) 691 sbus_set_sbus64(&op->dev, bursts); 692 693 fore200e->state = FORE200E_STATE_MAP; 694 return 0; 695 } 696 697 static void fore200e_sba_unmap(struct fore200e *fore200e) 698 { 699 struct platform_device *op = to_platform_device(fore200e->dev); 700 701 of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH); 702 of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH); 703 of_iounmap(&op->resource[2], fore200e->regs.sba.isr, SBA200E_ISR_LENGTH); 704 of_iounmap(&op->resource[3], fore200e->virt_base, SBA200E_RAM_LENGTH); 705 } 706 707 static int __init fore200e_sba_configure(struct fore200e *fore200e) 708 { 709 fore200e->state = FORE200E_STATE_CONFIGURE; 710 return 0; 711 } 712 713 static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom) 714 { 715 struct platform_device *op = to_platform_device(fore200e->dev); 716 const u8 *prop; 717 int len; 718 719 prop = of_get_property(op->dev.of_node, "madaddrlo2", &len); 720 if (!prop) 721 return -ENODEV; 722 memcpy(&prom->mac_addr[4], prop, 4); 723 724 prop = of_get_property(op->dev.of_node, "madaddrhi4", &len); 725 if (!prop) 726 return -ENODEV; 727 memcpy(&prom->mac_addr[2], prop, 4); 728 729 prom->serial_number = of_getintprop_default(op->dev.of_node, 730 "serialnumber", 0); 731 prom->hw_revision = of_getintprop_default(op->dev.of_node, 732 "promversion", 0); 733 734 return 0; 735 } 736 737 static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page) 738 { 739 struct platform_device *op = to_platform_device(fore200e->dev); 740 const struct linux_prom_registers *regs; 741 742 regs = of_get_property(op->dev.of_node, "reg", NULL); 743 744 return sprintf(page, " SBUS slot/device:\t\t%d/'%pOFn'\n", 745 (regs ? regs->which_io : 0), op->dev.of_node); 746 } 747 748 static const struct fore200e_bus fore200e_sbus_ops = { 749 .model_name = "SBA-200E", 750 .proc_name = "sba200e", 751 .descr_alignment = 32, 752 .buffer_alignment = 64, 753 .status_alignment = 32, 754 .read = fore200e_sba_read, 755 .write = fore200e_sba_write, 756 .configure = fore200e_sba_configure, 757 .map = fore200e_sba_map, 758 .reset = fore200e_sba_reset, 759 .prom_read = fore200e_sba_prom_read, 760 .unmap = fore200e_sba_unmap, 761 .irq_enable = fore200e_sba_irq_enable, 762 .irq_check = fore200e_sba_irq_check, 763 .irq_ack = fore200e_sba_irq_ack, 764 .proc_read = fore200e_sba_proc_read, 765 }; 766 #endif /* CONFIG_SBUS */ 767 768 static void 769 fore200e_tx_irq(struct fore200e* fore200e) 770 { 771 struct host_txq* txq = &fore200e->host_txq; 772 struct host_txq_entry* entry; 773 struct atm_vcc* vcc; 774 struct fore200e_vc_map* vc_map; 775 776 if (fore200e->host_txq.txing == 0) 777 return; 778 779 for (;;) { 780 781 entry = &txq->host_entry[ txq->tail ]; 782 783 if ((*entry->status & STATUS_COMPLETE) == 0) { 784 break; 785 } 786 787 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n", 788 entry, txq->tail, entry->vc_map, entry->skb); 789 790 /* free copy of misaligned data */ 791 kfree(entry->data); 792 793 /* remove DMA mapping */ 794 dma_unmap_single(fore200e->dev, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length, 795 DMA_TO_DEVICE); 796 797 vc_map = entry->vc_map; 798 799 /* vcc closed since the time the entry was submitted for tx? */ 800 if ((vc_map->vcc == NULL) || 801 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) { 802 803 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n", 804 fore200e->atm_dev->number); 805 806 dev_kfree_skb_any(entry->skb); 807 } 808 else { 809 ASSERT(vc_map->vcc); 810 811 /* vcc closed then immediately re-opened? */ 812 if (vc_map->incarn != entry->incarn) { 813 814 /* when a vcc is closed, some PDUs may be still pending in the tx queue. 815 if the same vcc is immediately re-opened, those pending PDUs must 816 not be popped after the completion of their emission, as they refer 817 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc 818 would be decremented by the size of the (unrelated) skb, possibly 819 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc. 820 we thus bind the tx entry to the current incarnation of the vcc 821 when the entry is submitted for tx. When the tx later completes, 822 if the incarnation number of the tx entry does not match the one 823 of the vcc, then this implies that the vcc has been closed then re-opened. 824 we thus just drop the skb here. */ 825 826 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n", 827 fore200e->atm_dev->number); 828 829 dev_kfree_skb_any(entry->skb); 830 } 831 else { 832 vcc = vc_map->vcc; 833 ASSERT(vcc); 834 835 /* notify tx completion */ 836 if (vcc->pop) { 837 vcc->pop(vcc, entry->skb); 838 } 839 else { 840 dev_kfree_skb_any(entry->skb); 841 } 842 843 /* check error condition */ 844 if (*entry->status & STATUS_ERROR) 845 atomic_inc(&vcc->stats->tx_err); 846 else 847 atomic_inc(&vcc->stats->tx); 848 } 849 } 850 851 *entry->status = STATUS_FREE; 852 853 fore200e->host_txq.txing--; 854 855 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX); 856 } 857 } 858 859 860 #ifdef FORE200E_BSQ_DEBUG 861 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn) 862 { 863 struct buffer* buffer; 864 int count = 0; 865 866 buffer = bsq->freebuf; 867 while (buffer) { 868 869 if (buffer->supplied) { 870 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n", 871 where, scheme, magn, buffer->index); 872 } 873 874 if (buffer->magn != magn) { 875 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n", 876 where, scheme, magn, buffer->index, buffer->magn); 877 } 878 879 if (buffer->scheme != scheme) { 880 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n", 881 where, scheme, magn, buffer->index, buffer->scheme); 882 } 883 884 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) { 885 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n", 886 where, scheme, magn, buffer->index); 887 } 888 889 count++; 890 buffer = buffer->next; 891 } 892 893 if (count != bsq->freebuf_count) { 894 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n", 895 where, scheme, magn, count, bsq->freebuf_count); 896 } 897 return 0; 898 } 899 #endif 900 901 902 static void 903 fore200e_supply(struct fore200e* fore200e) 904 { 905 int scheme, magn, i; 906 907 struct host_bsq* bsq; 908 struct host_bsq_entry* entry; 909 struct buffer* buffer; 910 911 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 912 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 913 914 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 915 916 #ifdef FORE200E_BSQ_DEBUG 917 bsq_audit(1, bsq, scheme, magn); 918 #endif 919 while (bsq->freebuf_count >= RBD_BLK_SIZE) { 920 921 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n", 922 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count); 923 924 entry = &bsq->host_entry[ bsq->head ]; 925 926 for (i = 0; i < RBD_BLK_SIZE; i++) { 927 928 /* take the first buffer in the free buffer list */ 929 buffer = bsq->freebuf; 930 if (!buffer) { 931 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n", 932 scheme, magn, bsq->freebuf_count); 933 return; 934 } 935 bsq->freebuf = buffer->next; 936 937 #ifdef FORE200E_BSQ_DEBUG 938 if (buffer->supplied) 939 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n", 940 scheme, magn, buffer->index); 941 buffer->supplied = 1; 942 #endif 943 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr; 944 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer); 945 } 946 947 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS); 948 949 /* decrease accordingly the number of free rx buffers */ 950 bsq->freebuf_count -= RBD_BLK_SIZE; 951 952 *entry->status = STATUS_PENDING; 953 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr); 954 } 955 } 956 } 957 } 958 959 960 static int 961 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd) 962 { 963 struct sk_buff* skb; 964 struct buffer* buffer; 965 struct fore200e_vcc* fore200e_vcc; 966 int i, pdu_len = 0; 967 #ifdef FORE200E_52BYTE_AAL0_SDU 968 u32 cell_header = 0; 969 #endif 970 971 ASSERT(vcc); 972 973 fore200e_vcc = FORE200E_VCC(vcc); 974 ASSERT(fore200e_vcc); 975 976 #ifdef FORE200E_52BYTE_AAL0_SDU 977 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) { 978 979 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) | 980 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) | 981 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) | 982 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) | 983 rpd->atm_header.clp; 984 pdu_len = 4; 985 } 986 #endif 987 988 /* compute total PDU length */ 989 for (i = 0; i < rpd->nseg; i++) 990 pdu_len += rpd->rsd[ i ].length; 991 992 skb = alloc_skb(pdu_len, GFP_ATOMIC); 993 if (skb == NULL) { 994 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len); 995 996 atomic_inc(&vcc->stats->rx_drop); 997 return -ENOMEM; 998 } 999 1000 __net_timestamp(skb); 1001 1002 #ifdef FORE200E_52BYTE_AAL0_SDU 1003 if (cell_header) { 1004 *((u32*)skb_put(skb, 4)) = cell_header; 1005 } 1006 #endif 1007 1008 /* reassemble segments */ 1009 for (i = 0; i < rpd->nseg; i++) { 1010 1011 /* rebuild rx buffer address from rsd handle */ 1012 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle); 1013 1014 /* Make device DMA transfer visible to CPU. */ 1015 dma_sync_single_for_cpu(fore200e->dev, buffer->data.dma_addr, 1016 rpd->rsd[i].length, DMA_FROM_DEVICE); 1017 1018 skb_put_data(skb, buffer->data.align_addr, rpd->rsd[i].length); 1019 1020 /* Now let the device get at it again. */ 1021 dma_sync_single_for_device(fore200e->dev, buffer->data.dma_addr, 1022 rpd->rsd[i].length, DMA_FROM_DEVICE); 1023 } 1024 1025 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize); 1026 1027 if (pdu_len < fore200e_vcc->rx_min_pdu) 1028 fore200e_vcc->rx_min_pdu = pdu_len; 1029 if (pdu_len > fore200e_vcc->rx_max_pdu) 1030 fore200e_vcc->rx_max_pdu = pdu_len; 1031 fore200e_vcc->rx_pdu++; 1032 1033 /* push PDU */ 1034 if (atm_charge(vcc, skb->truesize) == 0) { 1035 1036 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n", 1037 vcc->itf, vcc->vpi, vcc->vci); 1038 1039 dev_kfree_skb_any(skb); 1040 1041 atomic_inc(&vcc->stats->rx_drop); 1042 return -ENOMEM; 1043 } 1044 1045 vcc->push(vcc, skb); 1046 atomic_inc(&vcc->stats->rx); 1047 1048 return 0; 1049 } 1050 1051 1052 static void 1053 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd) 1054 { 1055 struct host_bsq* bsq; 1056 struct buffer* buffer; 1057 int i; 1058 1059 for (i = 0; i < rpd->nseg; i++) { 1060 1061 /* rebuild rx buffer address from rsd handle */ 1062 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle); 1063 1064 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ]; 1065 1066 #ifdef FORE200E_BSQ_DEBUG 1067 bsq_audit(2, bsq, buffer->scheme, buffer->magn); 1068 1069 if (buffer->supplied == 0) 1070 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n", 1071 buffer->scheme, buffer->magn, buffer->index); 1072 buffer->supplied = 0; 1073 #endif 1074 1075 /* re-insert the buffer into the free buffer list */ 1076 buffer->next = bsq->freebuf; 1077 bsq->freebuf = buffer; 1078 1079 /* then increment the number of free rx buffers */ 1080 bsq->freebuf_count++; 1081 } 1082 } 1083 1084 1085 static void 1086 fore200e_rx_irq(struct fore200e* fore200e) 1087 { 1088 struct host_rxq* rxq = &fore200e->host_rxq; 1089 struct host_rxq_entry* entry; 1090 struct atm_vcc* vcc; 1091 struct fore200e_vc_map* vc_map; 1092 1093 for (;;) { 1094 1095 entry = &rxq->host_entry[ rxq->head ]; 1096 1097 /* no more received PDUs */ 1098 if ((*entry->status & STATUS_COMPLETE) == 0) 1099 break; 1100 1101 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1102 1103 if ((vc_map->vcc == NULL) || 1104 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) { 1105 1106 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n", 1107 fore200e->atm_dev->number, 1108 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1109 } 1110 else { 1111 vcc = vc_map->vcc; 1112 ASSERT(vcc); 1113 1114 if ((*entry->status & STATUS_ERROR) == 0) { 1115 1116 fore200e_push_rpd(fore200e, vcc, entry->rpd); 1117 } 1118 else { 1119 DPRINTK(2, "damaged PDU on %d.%d.%d\n", 1120 fore200e->atm_dev->number, 1121 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1122 atomic_inc(&vcc->stats->rx_err); 1123 } 1124 } 1125 1126 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX); 1127 1128 fore200e_collect_rpd(fore200e, entry->rpd); 1129 1130 /* rewrite the rpd address to ack the received PDU */ 1131 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr); 1132 *entry->status = STATUS_FREE; 1133 1134 fore200e_supply(fore200e); 1135 } 1136 } 1137 1138 1139 #ifndef FORE200E_USE_TASKLET 1140 static void 1141 fore200e_irq(struct fore200e* fore200e) 1142 { 1143 unsigned long flags; 1144 1145 spin_lock_irqsave(&fore200e->q_lock, flags); 1146 fore200e_rx_irq(fore200e); 1147 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1148 1149 spin_lock_irqsave(&fore200e->q_lock, flags); 1150 fore200e_tx_irq(fore200e); 1151 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1152 } 1153 #endif 1154 1155 1156 static irqreturn_t 1157 fore200e_interrupt(int irq, void* dev) 1158 { 1159 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev); 1160 1161 if (fore200e->bus->irq_check(fore200e) == 0) { 1162 1163 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number); 1164 return IRQ_NONE; 1165 } 1166 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number); 1167 1168 #ifdef FORE200E_USE_TASKLET 1169 tasklet_schedule(&fore200e->tx_tasklet); 1170 tasklet_schedule(&fore200e->rx_tasklet); 1171 #else 1172 fore200e_irq(fore200e); 1173 #endif 1174 1175 fore200e->bus->irq_ack(fore200e); 1176 return IRQ_HANDLED; 1177 } 1178 1179 1180 #ifdef FORE200E_USE_TASKLET 1181 static void 1182 fore200e_tx_tasklet(unsigned long data) 1183 { 1184 struct fore200e* fore200e = (struct fore200e*) data; 1185 unsigned long flags; 1186 1187 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number); 1188 1189 spin_lock_irqsave(&fore200e->q_lock, flags); 1190 fore200e_tx_irq(fore200e); 1191 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1192 } 1193 1194 1195 static void 1196 fore200e_rx_tasklet(unsigned long data) 1197 { 1198 struct fore200e* fore200e = (struct fore200e*) data; 1199 unsigned long flags; 1200 1201 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number); 1202 1203 spin_lock_irqsave(&fore200e->q_lock, flags); 1204 fore200e_rx_irq((struct fore200e*) data); 1205 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1206 } 1207 #endif 1208 1209 1210 static int 1211 fore200e_select_scheme(struct atm_vcc* vcc) 1212 { 1213 /* fairly balance the VCs over (identical) buffer schemes */ 1214 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO; 1215 1216 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n", 1217 vcc->itf, vcc->vpi, vcc->vci, scheme); 1218 1219 return scheme; 1220 } 1221 1222 1223 static int 1224 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu) 1225 { 1226 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1227 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1228 struct activate_opcode activ_opcode; 1229 struct deactivate_opcode deactiv_opcode; 1230 struct vpvc vpvc; 1231 int ok; 1232 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal); 1233 1234 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1235 1236 if (activate) { 1237 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc); 1238 1239 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN; 1240 activ_opcode.aal = aal; 1241 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme; 1242 activ_opcode.pad = 0; 1243 } 1244 else { 1245 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN; 1246 deactiv_opcode.pad = 0; 1247 } 1248 1249 vpvc.vci = vcc->vci; 1250 vpvc.vpi = vcc->vpi; 1251 1252 *entry->status = STATUS_PENDING; 1253 1254 if (activate) { 1255 1256 #ifdef FORE200E_52BYTE_AAL0_SDU 1257 mtu = 48; 1258 #endif 1259 /* the MTU is not used by the cp, except in the case of AAL0 */ 1260 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu); 1261 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc); 1262 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode); 1263 } 1264 else { 1265 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc); 1266 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode); 1267 } 1268 1269 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1270 1271 *entry->status = STATUS_FREE; 1272 1273 if (ok == 0) { 1274 printk(FORE200E "unable to %s VC %d.%d.%d\n", 1275 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci); 1276 return -EIO; 1277 } 1278 1279 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci, 1280 activate ? "open" : "clos"); 1281 1282 return 0; 1283 } 1284 1285 1286 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */ 1287 1288 static void 1289 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate) 1290 { 1291 if (qos->txtp.max_pcr < ATM_OC3_PCR) { 1292 1293 /* compute the data cells to idle cells ratio from the tx PCR */ 1294 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR; 1295 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells; 1296 } 1297 else { 1298 /* disable rate control */ 1299 rate->data_cells = rate->idle_cells = 0; 1300 } 1301 } 1302 1303 1304 static int 1305 fore200e_open(struct atm_vcc *vcc) 1306 { 1307 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1308 struct fore200e_vcc* fore200e_vcc; 1309 struct fore200e_vc_map* vc_map; 1310 unsigned long flags; 1311 int vci = vcc->vci; 1312 short vpi = vcc->vpi; 1313 1314 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS)); 1315 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS)); 1316 1317 spin_lock_irqsave(&fore200e->q_lock, flags); 1318 1319 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci); 1320 if (vc_map->vcc) { 1321 1322 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1323 1324 printk(FORE200E "VC %d.%d.%d already in use\n", 1325 fore200e->atm_dev->number, vpi, vci); 1326 1327 return -EINVAL; 1328 } 1329 1330 vc_map->vcc = vcc; 1331 1332 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1333 1334 fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC); 1335 if (fore200e_vcc == NULL) { 1336 vc_map->vcc = NULL; 1337 return -ENOMEM; 1338 } 1339 1340 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; " 1341 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n", 1342 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1343 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ], 1344 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu, 1345 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ], 1346 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu); 1347 1348 /* pseudo-CBR bandwidth requested? */ 1349 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1350 1351 mutex_lock(&fore200e->rate_mtx); 1352 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) { 1353 mutex_unlock(&fore200e->rate_mtx); 1354 1355 kfree(fore200e_vcc); 1356 vc_map->vcc = NULL; 1357 return -EAGAIN; 1358 } 1359 1360 /* reserve bandwidth */ 1361 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr; 1362 mutex_unlock(&fore200e->rate_mtx); 1363 } 1364 1365 vcc->itf = vcc->dev->number; 1366 1367 set_bit(ATM_VF_PARTIAL,&vcc->flags); 1368 set_bit(ATM_VF_ADDR, &vcc->flags); 1369 1370 vcc->dev_data = fore200e_vcc; 1371 1372 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) { 1373 1374 vc_map->vcc = NULL; 1375 1376 clear_bit(ATM_VF_ADDR, &vcc->flags); 1377 clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1378 1379 vcc->dev_data = NULL; 1380 1381 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1382 1383 kfree(fore200e_vcc); 1384 return -EINVAL; 1385 } 1386 1387 /* compute rate control parameters */ 1388 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1389 1390 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate); 1391 set_bit(ATM_VF_HASQOS, &vcc->flags); 1392 1393 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n", 1394 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1395 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr, 1396 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells); 1397 } 1398 1399 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1; 1400 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0; 1401 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0; 1402 1403 /* new incarnation of the vcc */ 1404 vc_map->incarn = ++fore200e->incarn_count; 1405 1406 /* VC unusable before this flag is set */ 1407 set_bit(ATM_VF_READY, &vcc->flags); 1408 1409 return 0; 1410 } 1411 1412 1413 static void 1414 fore200e_close(struct atm_vcc* vcc) 1415 { 1416 struct fore200e_vcc* fore200e_vcc; 1417 struct fore200e* fore200e; 1418 struct fore200e_vc_map* vc_map; 1419 unsigned long flags; 1420 1421 ASSERT(vcc); 1422 fore200e = FORE200E_DEV(vcc->dev); 1423 1424 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS)); 1425 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS)); 1426 1427 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal)); 1428 1429 clear_bit(ATM_VF_READY, &vcc->flags); 1430 1431 fore200e_activate_vcin(fore200e, 0, vcc, 0); 1432 1433 spin_lock_irqsave(&fore200e->q_lock, flags); 1434 1435 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci); 1436 1437 /* the vc is no longer considered as "in use" by fore200e_open() */ 1438 vc_map->vcc = NULL; 1439 1440 vcc->itf = vcc->vci = vcc->vpi = 0; 1441 1442 fore200e_vcc = FORE200E_VCC(vcc); 1443 vcc->dev_data = NULL; 1444 1445 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1446 1447 /* release reserved bandwidth, if any */ 1448 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1449 1450 mutex_lock(&fore200e->rate_mtx); 1451 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1452 mutex_unlock(&fore200e->rate_mtx); 1453 1454 clear_bit(ATM_VF_HASQOS, &vcc->flags); 1455 } 1456 1457 clear_bit(ATM_VF_ADDR, &vcc->flags); 1458 clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1459 1460 ASSERT(fore200e_vcc); 1461 kfree(fore200e_vcc); 1462 } 1463 1464 1465 static int 1466 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb) 1467 { 1468 struct fore200e* fore200e; 1469 struct fore200e_vcc* fore200e_vcc; 1470 struct fore200e_vc_map* vc_map; 1471 struct host_txq* txq; 1472 struct host_txq_entry* entry; 1473 struct tpd* tpd; 1474 struct tpd_haddr tpd_haddr; 1475 int retry = CONFIG_ATM_FORE200E_TX_RETRY; 1476 int tx_copy = 0; 1477 int tx_len = skb->len; 1478 u32* cell_header = NULL; 1479 unsigned char* skb_data; 1480 int skb_len; 1481 unsigned char* data; 1482 unsigned long flags; 1483 1484 if (!vcc) 1485 return -EINVAL; 1486 1487 fore200e = FORE200E_DEV(vcc->dev); 1488 fore200e_vcc = FORE200E_VCC(vcc); 1489 1490 if (!fore200e) 1491 return -EINVAL; 1492 1493 txq = &fore200e->host_txq; 1494 if (!fore200e_vcc) 1495 return -EINVAL; 1496 1497 if (!test_bit(ATM_VF_READY, &vcc->flags)) { 1498 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi); 1499 dev_kfree_skb_any(skb); 1500 return -EINVAL; 1501 } 1502 1503 #ifdef FORE200E_52BYTE_AAL0_SDU 1504 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) { 1505 cell_header = (u32*) skb->data; 1506 skb_data = skb->data + 4; /* skip 4-byte cell header */ 1507 skb_len = tx_len = skb->len - 4; 1508 1509 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header); 1510 } 1511 else 1512 #endif 1513 { 1514 skb_data = skb->data; 1515 skb_len = skb->len; 1516 } 1517 1518 if (((unsigned long)skb_data) & 0x3) { 1519 1520 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name); 1521 tx_copy = 1; 1522 tx_len = skb_len; 1523 } 1524 1525 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) { 1526 1527 /* this simply NUKES the PCA board */ 1528 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name); 1529 tx_copy = 1; 1530 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD; 1531 } 1532 1533 if (tx_copy) { 1534 data = kmalloc(tx_len, GFP_ATOMIC); 1535 if (data == NULL) { 1536 if (vcc->pop) { 1537 vcc->pop(vcc, skb); 1538 } 1539 else { 1540 dev_kfree_skb_any(skb); 1541 } 1542 return -ENOMEM; 1543 } 1544 1545 memcpy(data, skb_data, skb_len); 1546 if (skb_len < tx_len) 1547 memset(data + skb_len, 0x00, tx_len - skb_len); 1548 } 1549 else { 1550 data = skb_data; 1551 } 1552 1553 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci); 1554 ASSERT(vc_map->vcc == vcc); 1555 1556 retry_here: 1557 1558 spin_lock_irqsave(&fore200e->q_lock, flags); 1559 1560 entry = &txq->host_entry[ txq->head ]; 1561 1562 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) { 1563 1564 /* try to free completed tx queue entries */ 1565 fore200e_tx_irq(fore200e); 1566 1567 if (*entry->status != STATUS_FREE) { 1568 1569 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1570 1571 /* retry once again? */ 1572 if (--retry > 0) { 1573 udelay(50); 1574 goto retry_here; 1575 } 1576 1577 atomic_inc(&vcc->stats->tx_err); 1578 1579 fore200e->tx_sat++; 1580 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n", 1581 fore200e->name, fore200e->cp_queues->heartbeat); 1582 if (vcc->pop) { 1583 vcc->pop(vcc, skb); 1584 } 1585 else { 1586 dev_kfree_skb_any(skb); 1587 } 1588 1589 if (tx_copy) 1590 kfree(data); 1591 1592 return -ENOBUFS; 1593 } 1594 } 1595 1596 entry->incarn = vc_map->incarn; 1597 entry->vc_map = vc_map; 1598 entry->skb = skb; 1599 entry->data = tx_copy ? data : NULL; 1600 1601 tpd = entry->tpd; 1602 tpd->tsd[ 0 ].buffer = dma_map_single(fore200e->dev, data, tx_len, 1603 DMA_TO_DEVICE); 1604 if (dma_mapping_error(fore200e->dev, tpd->tsd[0].buffer)) { 1605 if (tx_copy) 1606 kfree(data); 1607 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1608 return -ENOMEM; 1609 } 1610 tpd->tsd[ 0 ].length = tx_len; 1611 1612 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX); 1613 txq->txing++; 1614 1615 /* The dma_map call above implies a dma_sync so the device can use it, 1616 * thus no explicit dma_sync call is necessary here. 1617 */ 1618 1619 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n", 1620 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1621 tpd->tsd[0].length, skb_len); 1622 1623 if (skb_len < fore200e_vcc->tx_min_pdu) 1624 fore200e_vcc->tx_min_pdu = skb_len; 1625 if (skb_len > fore200e_vcc->tx_max_pdu) 1626 fore200e_vcc->tx_max_pdu = skb_len; 1627 fore200e_vcc->tx_pdu++; 1628 1629 /* set tx rate control information */ 1630 tpd->rate.data_cells = fore200e_vcc->rate.data_cells; 1631 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells; 1632 1633 if (cell_header) { 1634 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP); 1635 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; 1636 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT; 1637 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT; 1638 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT; 1639 } 1640 else { 1641 /* set the ATM header, common to all cells conveying the PDU */ 1642 tpd->atm_header.clp = 0; 1643 tpd->atm_header.plt = 0; 1644 tpd->atm_header.vci = vcc->vci; 1645 tpd->atm_header.vpi = vcc->vpi; 1646 tpd->atm_header.gfc = 0; 1647 } 1648 1649 tpd->spec.length = tx_len; 1650 tpd->spec.nseg = 1; 1651 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal); 1652 tpd->spec.intr = 1; 1653 1654 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */ 1655 tpd_haddr.pad = 0; 1656 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */ 1657 1658 *entry->status = STATUS_PENDING; 1659 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr); 1660 1661 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1662 1663 return 0; 1664 } 1665 1666 1667 static int 1668 fore200e_getstats(struct fore200e* fore200e) 1669 { 1670 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1671 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1672 struct stats_opcode opcode; 1673 int ok; 1674 u32 stats_dma_addr; 1675 1676 if (fore200e->stats == NULL) { 1677 fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL); 1678 if (fore200e->stats == NULL) 1679 return -ENOMEM; 1680 } 1681 1682 stats_dma_addr = dma_map_single(fore200e->dev, fore200e->stats, 1683 sizeof(struct stats), DMA_FROM_DEVICE); 1684 if (dma_mapping_error(fore200e->dev, stats_dma_addr)) 1685 return -ENOMEM; 1686 1687 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1688 1689 opcode.opcode = OPCODE_GET_STATS; 1690 opcode.pad = 0; 1691 1692 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr); 1693 1694 *entry->status = STATUS_PENDING; 1695 1696 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode); 1697 1698 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1699 1700 *entry->status = STATUS_FREE; 1701 1702 dma_unmap_single(fore200e->dev, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE); 1703 1704 if (ok == 0) { 1705 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name); 1706 return -EIO; 1707 } 1708 1709 return 0; 1710 } 1711 1712 #if 0 /* currently unused */ 1713 static int 1714 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs) 1715 { 1716 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1717 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1718 struct oc3_opcode opcode; 1719 int ok; 1720 u32 oc3_regs_dma_addr; 1721 1722 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE); 1723 1724 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1725 1726 opcode.opcode = OPCODE_GET_OC3; 1727 opcode.reg = 0; 1728 opcode.value = 0; 1729 opcode.mask = 0; 1730 1731 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr); 1732 1733 *entry->status = STATUS_PENDING; 1734 1735 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode); 1736 1737 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1738 1739 *entry->status = STATUS_FREE; 1740 1741 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE); 1742 1743 if (ok == 0) { 1744 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name); 1745 return -EIO; 1746 } 1747 1748 return 0; 1749 } 1750 #endif 1751 1752 1753 static int 1754 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask) 1755 { 1756 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1757 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1758 struct oc3_opcode opcode; 1759 int ok; 1760 1761 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask); 1762 1763 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1764 1765 opcode.opcode = OPCODE_SET_OC3; 1766 opcode.reg = reg; 1767 opcode.value = value; 1768 opcode.mask = mask; 1769 1770 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr); 1771 1772 *entry->status = STATUS_PENDING; 1773 1774 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode); 1775 1776 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1777 1778 *entry->status = STATUS_FREE; 1779 1780 if (ok == 0) { 1781 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name); 1782 return -EIO; 1783 } 1784 1785 return 0; 1786 } 1787 1788 1789 static int 1790 fore200e_setloop(struct fore200e* fore200e, int loop_mode) 1791 { 1792 u32 mct_value, mct_mask; 1793 int error; 1794 1795 if (!capable(CAP_NET_ADMIN)) 1796 return -EPERM; 1797 1798 switch (loop_mode) { 1799 1800 case ATM_LM_NONE: 1801 mct_value = 0; 1802 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE; 1803 break; 1804 1805 case ATM_LM_LOC_PHY: 1806 mct_value = mct_mask = SUNI_MCT_DLE; 1807 break; 1808 1809 case ATM_LM_RMT_PHY: 1810 mct_value = mct_mask = SUNI_MCT_LLE; 1811 break; 1812 1813 default: 1814 return -EINVAL; 1815 } 1816 1817 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask); 1818 if (error == 0) 1819 fore200e->loop_mode = loop_mode; 1820 1821 return error; 1822 } 1823 1824 1825 static int 1826 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg) 1827 { 1828 struct sonet_stats tmp; 1829 1830 if (fore200e_getstats(fore200e) < 0) 1831 return -EIO; 1832 1833 tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors); 1834 tmp.line_bip = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors); 1835 tmp.path_bip = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors); 1836 tmp.line_febe = be32_to_cpu(fore200e->stats->oc3.line_febe_errors); 1837 tmp.path_febe = be32_to_cpu(fore200e->stats->oc3.path_febe_errors); 1838 tmp.corr_hcs = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors); 1839 tmp.uncorr_hcs = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors); 1840 tmp.tx_cells = be32_to_cpu(fore200e->stats->aal0.cells_transmitted) + 1841 be32_to_cpu(fore200e->stats->aal34.cells_transmitted) + 1842 be32_to_cpu(fore200e->stats->aal5.cells_transmitted); 1843 tmp.rx_cells = be32_to_cpu(fore200e->stats->aal0.cells_received) + 1844 be32_to_cpu(fore200e->stats->aal34.cells_received) + 1845 be32_to_cpu(fore200e->stats->aal5.cells_received); 1846 1847 if (arg) 1848 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0; 1849 1850 return 0; 1851 } 1852 1853 1854 static int 1855 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg) 1856 { 1857 struct fore200e* fore200e = FORE200E_DEV(dev); 1858 1859 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg); 1860 1861 switch (cmd) { 1862 1863 case SONET_GETSTAT: 1864 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg); 1865 1866 case SONET_GETDIAG: 1867 return put_user(0, (int __user *)arg) ? -EFAULT : 0; 1868 1869 case ATM_SETLOOP: 1870 return fore200e_setloop(fore200e, (int)(unsigned long)arg); 1871 1872 case ATM_GETLOOP: 1873 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0; 1874 1875 case ATM_QUERYLOOP: 1876 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0; 1877 } 1878 1879 return -ENOSYS; /* not implemented */ 1880 } 1881 1882 1883 static int 1884 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags) 1885 { 1886 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc); 1887 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1888 1889 if (!test_bit(ATM_VF_READY, &vcc->flags)) { 1890 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi); 1891 return -EINVAL; 1892 } 1893 1894 DPRINTK(2, "change_qos %d.%d.%d, " 1895 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; " 1896 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n" 1897 "available_cell_rate = %u", 1898 vcc->itf, vcc->vpi, vcc->vci, 1899 fore200e_traffic_class[ qos->txtp.traffic_class ], 1900 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu, 1901 fore200e_traffic_class[ qos->rxtp.traffic_class ], 1902 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu, 1903 flags, fore200e->available_cell_rate); 1904 1905 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) { 1906 1907 mutex_lock(&fore200e->rate_mtx); 1908 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) { 1909 mutex_unlock(&fore200e->rate_mtx); 1910 return -EAGAIN; 1911 } 1912 1913 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1914 fore200e->available_cell_rate -= qos->txtp.max_pcr; 1915 1916 mutex_unlock(&fore200e->rate_mtx); 1917 1918 memcpy(&vcc->qos, qos, sizeof(struct atm_qos)); 1919 1920 /* update rate control parameters */ 1921 fore200e_rate_ctrl(qos, &fore200e_vcc->rate); 1922 1923 set_bit(ATM_VF_HASQOS, &vcc->flags); 1924 1925 return 0; 1926 } 1927 1928 return -EINVAL; 1929 } 1930 1931 1932 static int fore200e_irq_request(struct fore200e *fore200e) 1933 { 1934 if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) { 1935 1936 printk(FORE200E "unable to reserve IRQ %s for device %s\n", 1937 fore200e_irq_itoa(fore200e->irq), fore200e->name); 1938 return -EBUSY; 1939 } 1940 1941 printk(FORE200E "IRQ %s reserved for device %s\n", 1942 fore200e_irq_itoa(fore200e->irq), fore200e->name); 1943 1944 #ifdef FORE200E_USE_TASKLET 1945 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e); 1946 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e); 1947 #endif 1948 1949 fore200e->state = FORE200E_STATE_IRQ; 1950 return 0; 1951 } 1952 1953 1954 static int fore200e_get_esi(struct fore200e *fore200e) 1955 { 1956 struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL); 1957 int ok, i; 1958 1959 if (!prom) 1960 return -ENOMEM; 1961 1962 ok = fore200e->bus->prom_read(fore200e, prom); 1963 if (ok < 0) { 1964 kfree(prom); 1965 return -EBUSY; 1966 } 1967 1968 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n", 1969 fore200e->name, 1970 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */ 1971 prom->serial_number & 0xFFFF, &prom->mac_addr[2]); 1972 1973 for (i = 0; i < ESI_LEN; i++) { 1974 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ]; 1975 } 1976 1977 kfree(prom); 1978 1979 return 0; 1980 } 1981 1982 1983 static int fore200e_alloc_rx_buf(struct fore200e *fore200e) 1984 { 1985 int scheme, magn, nbr, size, i; 1986 1987 struct host_bsq* bsq; 1988 struct buffer* buffer; 1989 1990 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 1991 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 1992 1993 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 1994 1995 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ]; 1996 size = fore200e_rx_buf_size[ scheme ][ magn ]; 1997 1998 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn); 1999 2000 /* allocate the array of receive buffers */ 2001 buffer = bsq->buffer = kcalloc(nbr, sizeof(struct buffer), 2002 GFP_KERNEL); 2003 2004 if (buffer == NULL) 2005 return -ENOMEM; 2006 2007 bsq->freebuf = NULL; 2008 2009 for (i = 0; i < nbr; i++) { 2010 2011 buffer[ i ].scheme = scheme; 2012 buffer[ i ].magn = magn; 2013 #ifdef FORE200E_BSQ_DEBUG 2014 buffer[ i ].index = i; 2015 buffer[ i ].supplied = 0; 2016 #endif 2017 2018 /* allocate the receive buffer body */ 2019 if (fore200e_chunk_alloc(fore200e, 2020 &buffer[ i ].data, size, fore200e->bus->buffer_alignment, 2021 DMA_FROM_DEVICE) < 0) { 2022 2023 while (i > 0) 2024 fore200e_chunk_free(fore200e, &buffer[ --i ].data); 2025 kfree(buffer); 2026 2027 return -ENOMEM; 2028 } 2029 2030 /* insert the buffer into the free buffer list */ 2031 buffer[ i ].next = bsq->freebuf; 2032 bsq->freebuf = &buffer[ i ]; 2033 } 2034 /* all the buffers are free, initially */ 2035 bsq->freebuf_count = nbr; 2036 2037 #ifdef FORE200E_BSQ_DEBUG 2038 bsq_audit(3, bsq, scheme, magn); 2039 #endif 2040 } 2041 } 2042 2043 fore200e->state = FORE200E_STATE_ALLOC_BUF; 2044 return 0; 2045 } 2046 2047 2048 static int fore200e_init_bs_queue(struct fore200e *fore200e) 2049 { 2050 int scheme, magn, i; 2051 2052 struct host_bsq* bsq; 2053 struct cp_bsq_entry __iomem * cp_entry; 2054 2055 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 2056 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 2057 2058 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn); 2059 2060 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 2061 2062 /* allocate and align the array of status words */ 2063 if (fore200e_dma_chunk_alloc(fore200e, 2064 &bsq->status, 2065 sizeof(enum status), 2066 QUEUE_SIZE_BS, 2067 fore200e->bus->status_alignment) < 0) { 2068 return -ENOMEM; 2069 } 2070 2071 /* allocate and align the array of receive buffer descriptors */ 2072 if (fore200e_dma_chunk_alloc(fore200e, 2073 &bsq->rbd_block, 2074 sizeof(struct rbd_block), 2075 QUEUE_SIZE_BS, 2076 fore200e->bus->descr_alignment) < 0) { 2077 2078 fore200e_dma_chunk_free(fore200e, &bsq->status); 2079 return -ENOMEM; 2080 } 2081 2082 /* get the base address of the cp resident buffer supply queue entries */ 2083 cp_entry = fore200e->virt_base + 2084 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]); 2085 2086 /* fill the host resident and cp resident buffer supply queue entries */ 2087 for (i = 0; i < QUEUE_SIZE_BS; i++) { 2088 2089 bsq->host_entry[ i ].status = 2090 FORE200E_INDEX(bsq->status.align_addr, enum status, i); 2091 bsq->host_entry[ i ].rbd_block = 2092 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i); 2093 bsq->host_entry[ i ].rbd_block_dma = 2094 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i); 2095 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2096 2097 *bsq->host_entry[ i ].status = STATUS_FREE; 2098 2099 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i), 2100 &cp_entry[ i ].status_haddr); 2101 } 2102 } 2103 } 2104 2105 fore200e->state = FORE200E_STATE_INIT_BSQ; 2106 return 0; 2107 } 2108 2109 2110 static int fore200e_init_rx_queue(struct fore200e *fore200e) 2111 { 2112 struct host_rxq* rxq = &fore200e->host_rxq; 2113 struct cp_rxq_entry __iomem * cp_entry; 2114 int i; 2115 2116 DPRINTK(2, "receive queue is being initialized\n"); 2117 2118 /* allocate and align the array of status words */ 2119 if (fore200e_dma_chunk_alloc(fore200e, 2120 &rxq->status, 2121 sizeof(enum status), 2122 QUEUE_SIZE_RX, 2123 fore200e->bus->status_alignment) < 0) { 2124 return -ENOMEM; 2125 } 2126 2127 /* allocate and align the array of receive PDU descriptors */ 2128 if (fore200e_dma_chunk_alloc(fore200e, 2129 &rxq->rpd, 2130 sizeof(struct rpd), 2131 QUEUE_SIZE_RX, 2132 fore200e->bus->descr_alignment) < 0) { 2133 2134 fore200e_dma_chunk_free(fore200e, &rxq->status); 2135 return -ENOMEM; 2136 } 2137 2138 /* get the base address of the cp resident rx queue entries */ 2139 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq); 2140 2141 /* fill the host resident and cp resident rx entries */ 2142 for (i=0; i < QUEUE_SIZE_RX; i++) { 2143 2144 rxq->host_entry[ i ].status = 2145 FORE200E_INDEX(rxq->status.align_addr, enum status, i); 2146 rxq->host_entry[ i ].rpd = 2147 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i); 2148 rxq->host_entry[ i ].rpd_dma = 2149 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i); 2150 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2151 2152 *rxq->host_entry[ i ].status = STATUS_FREE; 2153 2154 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i), 2155 &cp_entry[ i ].status_haddr); 2156 2157 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i), 2158 &cp_entry[ i ].rpd_haddr); 2159 } 2160 2161 /* set the head entry of the queue */ 2162 rxq->head = 0; 2163 2164 fore200e->state = FORE200E_STATE_INIT_RXQ; 2165 return 0; 2166 } 2167 2168 2169 static int fore200e_init_tx_queue(struct fore200e *fore200e) 2170 { 2171 struct host_txq* txq = &fore200e->host_txq; 2172 struct cp_txq_entry __iomem * cp_entry; 2173 int i; 2174 2175 DPRINTK(2, "transmit queue is being initialized\n"); 2176 2177 /* allocate and align the array of status words */ 2178 if (fore200e_dma_chunk_alloc(fore200e, 2179 &txq->status, 2180 sizeof(enum status), 2181 QUEUE_SIZE_TX, 2182 fore200e->bus->status_alignment) < 0) { 2183 return -ENOMEM; 2184 } 2185 2186 /* allocate and align the array of transmit PDU descriptors */ 2187 if (fore200e_dma_chunk_alloc(fore200e, 2188 &txq->tpd, 2189 sizeof(struct tpd), 2190 QUEUE_SIZE_TX, 2191 fore200e->bus->descr_alignment) < 0) { 2192 2193 fore200e_dma_chunk_free(fore200e, &txq->status); 2194 return -ENOMEM; 2195 } 2196 2197 /* get the base address of the cp resident tx queue entries */ 2198 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq); 2199 2200 /* fill the host resident and cp resident tx entries */ 2201 for (i=0; i < QUEUE_SIZE_TX; i++) { 2202 2203 txq->host_entry[ i ].status = 2204 FORE200E_INDEX(txq->status.align_addr, enum status, i); 2205 txq->host_entry[ i ].tpd = 2206 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i); 2207 txq->host_entry[ i ].tpd_dma = 2208 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i); 2209 txq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2210 2211 *txq->host_entry[ i ].status = STATUS_FREE; 2212 2213 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i), 2214 &cp_entry[ i ].status_haddr); 2215 2216 /* although there is a one-to-one mapping of tx queue entries and tpds, 2217 we do not write here the DMA (physical) base address of each tpd into 2218 the related cp resident entry, because the cp relies on this write 2219 operation to detect that a new pdu has been submitted for tx */ 2220 } 2221 2222 /* set the head and tail entries of the queue */ 2223 txq->head = 0; 2224 txq->tail = 0; 2225 2226 fore200e->state = FORE200E_STATE_INIT_TXQ; 2227 return 0; 2228 } 2229 2230 2231 static int fore200e_init_cmd_queue(struct fore200e *fore200e) 2232 { 2233 struct host_cmdq* cmdq = &fore200e->host_cmdq; 2234 struct cp_cmdq_entry __iomem * cp_entry; 2235 int i; 2236 2237 DPRINTK(2, "command queue is being initialized\n"); 2238 2239 /* allocate and align the array of status words */ 2240 if (fore200e_dma_chunk_alloc(fore200e, 2241 &cmdq->status, 2242 sizeof(enum status), 2243 QUEUE_SIZE_CMD, 2244 fore200e->bus->status_alignment) < 0) { 2245 return -ENOMEM; 2246 } 2247 2248 /* get the base address of the cp resident cmd queue entries */ 2249 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq); 2250 2251 /* fill the host resident and cp resident cmd entries */ 2252 for (i=0; i < QUEUE_SIZE_CMD; i++) { 2253 2254 cmdq->host_entry[ i ].status = 2255 FORE200E_INDEX(cmdq->status.align_addr, enum status, i); 2256 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2257 2258 *cmdq->host_entry[ i ].status = STATUS_FREE; 2259 2260 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i), 2261 &cp_entry[ i ].status_haddr); 2262 } 2263 2264 /* set the head entry of the queue */ 2265 cmdq->head = 0; 2266 2267 fore200e->state = FORE200E_STATE_INIT_CMDQ; 2268 return 0; 2269 } 2270 2271 2272 static void fore200e_param_bs_queue(struct fore200e *fore200e, 2273 enum buffer_scheme scheme, 2274 enum buffer_magn magn, int queue_length, 2275 int pool_size, int supply_blksize) 2276 { 2277 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ]; 2278 2279 fore200e->bus->write(queue_length, &bs_spec->queue_length); 2280 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size); 2281 fore200e->bus->write(pool_size, &bs_spec->pool_size); 2282 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize); 2283 } 2284 2285 2286 static int fore200e_initialize(struct fore200e *fore200e) 2287 { 2288 struct cp_queues __iomem * cpq; 2289 int ok, scheme, magn; 2290 2291 DPRINTK(2, "device %s being initialized\n", fore200e->name); 2292 2293 mutex_init(&fore200e->rate_mtx); 2294 spin_lock_init(&fore200e->q_lock); 2295 2296 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET; 2297 2298 /* enable cp to host interrupts */ 2299 fore200e->bus->write(1, &cpq->imask); 2300 2301 if (fore200e->bus->irq_enable) 2302 fore200e->bus->irq_enable(fore200e); 2303 2304 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect); 2305 2306 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len); 2307 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len); 2308 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len); 2309 2310 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension); 2311 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension); 2312 2313 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) 2314 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) 2315 fore200e_param_bs_queue(fore200e, scheme, magn, 2316 QUEUE_SIZE_BS, 2317 fore200e_rx_buf_nbr[ scheme ][ magn ], 2318 RBD_BLK_SIZE); 2319 2320 /* issue the initialize command */ 2321 fore200e->bus->write(STATUS_PENDING, &cpq->init.status); 2322 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode); 2323 2324 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000); 2325 if (ok == 0) { 2326 printk(FORE200E "device %s initialization failed\n", fore200e->name); 2327 return -ENODEV; 2328 } 2329 2330 printk(FORE200E "device %s initialized\n", fore200e->name); 2331 2332 fore200e->state = FORE200E_STATE_INITIALIZE; 2333 return 0; 2334 } 2335 2336 2337 static void fore200e_monitor_putc(struct fore200e *fore200e, char c) 2338 { 2339 struct cp_monitor __iomem * monitor = fore200e->cp_monitor; 2340 2341 #if 0 2342 printk("%c", c); 2343 #endif 2344 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send); 2345 } 2346 2347 2348 static int fore200e_monitor_getc(struct fore200e *fore200e) 2349 { 2350 struct cp_monitor __iomem * monitor = fore200e->cp_monitor; 2351 unsigned long timeout = jiffies + msecs_to_jiffies(50); 2352 int c; 2353 2354 while (time_before(jiffies, timeout)) { 2355 2356 c = (int) fore200e->bus->read(&monitor->soft_uart.recv); 2357 2358 if (c & FORE200E_CP_MONITOR_UART_AVAIL) { 2359 2360 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv); 2361 #if 0 2362 printk("%c", c & 0xFF); 2363 #endif 2364 return c & 0xFF; 2365 } 2366 } 2367 2368 return -1; 2369 } 2370 2371 2372 static void fore200e_monitor_puts(struct fore200e *fore200e, char *str) 2373 { 2374 while (*str) { 2375 2376 /* the i960 monitor doesn't accept any new character if it has something to say */ 2377 while (fore200e_monitor_getc(fore200e) >= 0); 2378 2379 fore200e_monitor_putc(fore200e, *str++); 2380 } 2381 2382 while (fore200e_monitor_getc(fore200e) >= 0); 2383 } 2384 2385 #ifdef __LITTLE_ENDIAN 2386 #define FW_EXT ".bin" 2387 #else 2388 #define FW_EXT "_ecd.bin2" 2389 #endif 2390 2391 static int fore200e_load_and_start_fw(struct fore200e *fore200e) 2392 { 2393 const struct firmware *firmware; 2394 const struct fw_header *fw_header; 2395 const __le32 *fw_data; 2396 u32 fw_size; 2397 u32 __iomem *load_addr; 2398 char buf[48]; 2399 int err; 2400 2401 sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT); 2402 if ((err = request_firmware(&firmware, buf, fore200e->dev)) < 0) { 2403 printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name); 2404 return err; 2405 } 2406 2407 fw_data = (const __le32 *)firmware->data; 2408 fw_size = firmware->size / sizeof(u32); 2409 fw_header = (const struct fw_header *)firmware->data; 2410 load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset); 2411 2412 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n", 2413 fore200e->name, load_addr, fw_size); 2414 2415 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) { 2416 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name); 2417 goto release; 2418 } 2419 2420 for (; fw_size--; fw_data++, load_addr++) 2421 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr); 2422 2423 DPRINTK(2, "device %s firmware being started\n", fore200e->name); 2424 2425 #if defined(__sparc_v9__) 2426 /* reported to be required by SBA cards on some sparc64 hosts */ 2427 fore200e_spin(100); 2428 #endif 2429 2430 sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset)); 2431 fore200e_monitor_puts(fore200e, buf); 2432 2433 if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) { 2434 printk(FORE200E "device %s firmware didn't start\n", fore200e->name); 2435 goto release; 2436 } 2437 2438 printk(FORE200E "device %s firmware started\n", fore200e->name); 2439 2440 fore200e->state = FORE200E_STATE_START_FW; 2441 err = 0; 2442 2443 release: 2444 release_firmware(firmware); 2445 return err; 2446 } 2447 2448 2449 static int fore200e_register(struct fore200e *fore200e, struct device *parent) 2450 { 2451 struct atm_dev* atm_dev; 2452 2453 DPRINTK(2, "device %s being registered\n", fore200e->name); 2454 2455 atm_dev = atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops, 2456 -1, NULL); 2457 if (atm_dev == NULL) { 2458 printk(FORE200E "unable to register device %s\n", fore200e->name); 2459 return -ENODEV; 2460 } 2461 2462 atm_dev->dev_data = fore200e; 2463 fore200e->atm_dev = atm_dev; 2464 2465 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS; 2466 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS; 2467 2468 fore200e->available_cell_rate = ATM_OC3_PCR; 2469 2470 fore200e->state = FORE200E_STATE_REGISTER; 2471 return 0; 2472 } 2473 2474 2475 static int fore200e_init(struct fore200e *fore200e, struct device *parent) 2476 { 2477 if (fore200e_register(fore200e, parent) < 0) 2478 return -ENODEV; 2479 2480 if (fore200e->bus->configure(fore200e) < 0) 2481 return -ENODEV; 2482 2483 if (fore200e->bus->map(fore200e) < 0) 2484 return -ENODEV; 2485 2486 if (fore200e_reset(fore200e, 1) < 0) 2487 return -ENODEV; 2488 2489 if (fore200e_load_and_start_fw(fore200e) < 0) 2490 return -ENODEV; 2491 2492 if (fore200e_initialize(fore200e) < 0) 2493 return -ENODEV; 2494 2495 if (fore200e_init_cmd_queue(fore200e) < 0) 2496 return -ENOMEM; 2497 2498 if (fore200e_init_tx_queue(fore200e) < 0) 2499 return -ENOMEM; 2500 2501 if (fore200e_init_rx_queue(fore200e) < 0) 2502 return -ENOMEM; 2503 2504 if (fore200e_init_bs_queue(fore200e) < 0) 2505 return -ENOMEM; 2506 2507 if (fore200e_alloc_rx_buf(fore200e) < 0) 2508 return -ENOMEM; 2509 2510 if (fore200e_get_esi(fore200e) < 0) 2511 return -EIO; 2512 2513 if (fore200e_irq_request(fore200e) < 0) 2514 return -EBUSY; 2515 2516 fore200e_supply(fore200e); 2517 2518 /* all done, board initialization is now complete */ 2519 fore200e->state = FORE200E_STATE_COMPLETE; 2520 return 0; 2521 } 2522 2523 #ifdef CONFIG_SBUS 2524 static const struct of_device_id fore200e_sba_match[]; 2525 static int fore200e_sba_probe(struct platform_device *op) 2526 { 2527 const struct of_device_id *match; 2528 struct fore200e *fore200e; 2529 static int index = 0; 2530 int err; 2531 2532 match = of_match_device(fore200e_sba_match, &op->dev); 2533 if (!match) 2534 return -EINVAL; 2535 2536 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); 2537 if (!fore200e) 2538 return -ENOMEM; 2539 2540 fore200e->bus = &fore200e_sbus_ops; 2541 fore200e->dev = &op->dev; 2542 fore200e->irq = op->archdata.irqs[0]; 2543 fore200e->phys_base = op->resource[0].start; 2544 2545 sprintf(fore200e->name, "SBA-200E-%d", index); 2546 2547 err = fore200e_init(fore200e, &op->dev); 2548 if (err < 0) { 2549 fore200e_shutdown(fore200e); 2550 kfree(fore200e); 2551 return err; 2552 } 2553 2554 index++; 2555 dev_set_drvdata(&op->dev, fore200e); 2556 2557 return 0; 2558 } 2559 2560 static int fore200e_sba_remove(struct platform_device *op) 2561 { 2562 struct fore200e *fore200e = dev_get_drvdata(&op->dev); 2563 2564 fore200e_shutdown(fore200e); 2565 kfree(fore200e); 2566 2567 return 0; 2568 } 2569 2570 static const struct of_device_id fore200e_sba_match[] = { 2571 { 2572 .name = SBA200E_PROM_NAME, 2573 }, 2574 {}, 2575 }; 2576 MODULE_DEVICE_TABLE(of, fore200e_sba_match); 2577 2578 static struct platform_driver fore200e_sba_driver = { 2579 .driver = { 2580 .name = "fore_200e", 2581 .of_match_table = fore200e_sba_match, 2582 }, 2583 .probe = fore200e_sba_probe, 2584 .remove = fore200e_sba_remove, 2585 }; 2586 #endif 2587 2588 #ifdef CONFIG_PCI 2589 static int fore200e_pca_detect(struct pci_dev *pci_dev, 2590 const struct pci_device_id *pci_ent) 2591 { 2592 struct fore200e* fore200e; 2593 int err = 0; 2594 static int index = 0; 2595 2596 if (pci_enable_device(pci_dev)) { 2597 err = -EINVAL; 2598 goto out; 2599 } 2600 2601 if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) { 2602 err = -EINVAL; 2603 goto out; 2604 } 2605 2606 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); 2607 if (fore200e == NULL) { 2608 err = -ENOMEM; 2609 goto out_disable; 2610 } 2611 2612 fore200e->bus = &fore200e_pci_ops; 2613 fore200e->dev = &pci_dev->dev; 2614 fore200e->irq = pci_dev->irq; 2615 fore200e->phys_base = pci_resource_start(pci_dev, 0); 2616 2617 sprintf(fore200e->name, "PCA-200E-%d", index - 1); 2618 2619 pci_set_master(pci_dev); 2620 2621 printk(FORE200E "device PCA-200E found at 0x%lx, IRQ %s\n", 2622 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq)); 2623 2624 sprintf(fore200e->name, "PCA-200E-%d", index); 2625 2626 err = fore200e_init(fore200e, &pci_dev->dev); 2627 if (err < 0) { 2628 fore200e_shutdown(fore200e); 2629 goto out_free; 2630 } 2631 2632 ++index; 2633 pci_set_drvdata(pci_dev, fore200e); 2634 2635 out: 2636 return err; 2637 2638 out_free: 2639 kfree(fore200e); 2640 out_disable: 2641 pci_disable_device(pci_dev); 2642 goto out; 2643 } 2644 2645 2646 static void fore200e_pca_remove_one(struct pci_dev *pci_dev) 2647 { 2648 struct fore200e *fore200e; 2649 2650 fore200e = pci_get_drvdata(pci_dev); 2651 2652 fore200e_shutdown(fore200e); 2653 kfree(fore200e); 2654 pci_disable_device(pci_dev); 2655 } 2656 2657 2658 static const struct pci_device_id fore200e_pca_tbl[] = { 2659 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID }, 2660 { 0, } 2661 }; 2662 2663 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl); 2664 2665 static struct pci_driver fore200e_pca_driver = { 2666 .name = "fore_200e", 2667 .probe = fore200e_pca_detect, 2668 .remove = fore200e_pca_remove_one, 2669 .id_table = fore200e_pca_tbl, 2670 }; 2671 #endif 2672 2673 static int __init fore200e_module_init(void) 2674 { 2675 int err = 0; 2676 2677 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n"); 2678 2679 #ifdef CONFIG_SBUS 2680 err = platform_driver_register(&fore200e_sba_driver); 2681 if (err) 2682 return err; 2683 #endif 2684 2685 #ifdef CONFIG_PCI 2686 err = pci_register_driver(&fore200e_pca_driver); 2687 #endif 2688 2689 #ifdef CONFIG_SBUS 2690 if (err) 2691 platform_driver_unregister(&fore200e_sba_driver); 2692 #endif 2693 2694 return err; 2695 } 2696 2697 static void __exit fore200e_module_cleanup(void) 2698 { 2699 #ifdef CONFIG_PCI 2700 pci_unregister_driver(&fore200e_pca_driver); 2701 #endif 2702 #ifdef CONFIG_SBUS 2703 platform_driver_unregister(&fore200e_sba_driver); 2704 #endif 2705 } 2706 2707 static int 2708 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page) 2709 { 2710 struct fore200e* fore200e = FORE200E_DEV(dev); 2711 struct fore200e_vcc* fore200e_vcc; 2712 struct atm_vcc* vcc; 2713 int i, len, left = *pos; 2714 unsigned long flags; 2715 2716 if (!left--) { 2717 2718 if (fore200e_getstats(fore200e) < 0) 2719 return -EIO; 2720 2721 len = sprintf(page,"\n" 2722 " device:\n" 2723 " internal name:\t\t%s\n", fore200e->name); 2724 2725 /* print bus-specific information */ 2726 if (fore200e->bus->proc_read) 2727 len += fore200e->bus->proc_read(fore200e, page + len); 2728 2729 len += sprintf(page + len, 2730 " interrupt line:\t\t%s\n" 2731 " physical base address:\t0x%p\n" 2732 " virtual base address:\t0x%p\n" 2733 " factory address (ESI):\t%pM\n" 2734 " board serial number:\t\t%d\n\n", 2735 fore200e_irq_itoa(fore200e->irq), 2736 (void*)fore200e->phys_base, 2737 fore200e->virt_base, 2738 fore200e->esi, 2739 fore200e->esi[4] * 256 + fore200e->esi[5]); 2740 2741 return len; 2742 } 2743 2744 if (!left--) 2745 return sprintf(page, 2746 " free small bufs, scheme 1:\t%d\n" 2747 " free large bufs, scheme 1:\t%d\n" 2748 " free small bufs, scheme 2:\t%d\n" 2749 " free large bufs, scheme 2:\t%d\n", 2750 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count, 2751 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count, 2752 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count, 2753 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count); 2754 2755 if (!left--) { 2756 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat); 2757 2758 len = sprintf(page,"\n\n" 2759 " cell processor:\n" 2760 " heartbeat state:\t\t"); 2761 2762 if (hb >> 16 != 0xDEAD) 2763 len += sprintf(page + len, "0x%08x\n", hb); 2764 else 2765 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF); 2766 2767 return len; 2768 } 2769 2770 if (!left--) { 2771 static const char* media_name[] = { 2772 "unshielded twisted pair", 2773 "multimode optical fiber ST", 2774 "multimode optical fiber SC", 2775 "single-mode optical fiber ST", 2776 "single-mode optical fiber SC", 2777 "unknown" 2778 }; 2779 2780 static const char* oc3_mode[] = { 2781 "normal operation", 2782 "diagnostic loopback", 2783 "line loopback", 2784 "unknown" 2785 }; 2786 2787 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release); 2788 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release); 2789 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision); 2790 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type)); 2791 u32 oc3_index; 2792 2793 if (media_index > 4) 2794 media_index = 5; 2795 2796 switch (fore200e->loop_mode) { 2797 case ATM_LM_NONE: oc3_index = 0; 2798 break; 2799 case ATM_LM_LOC_PHY: oc3_index = 1; 2800 break; 2801 case ATM_LM_RMT_PHY: oc3_index = 2; 2802 break; 2803 default: oc3_index = 3; 2804 } 2805 2806 return sprintf(page, 2807 " firmware release:\t\t%d.%d.%d\n" 2808 " monitor release:\t\t%d.%d\n" 2809 " media type:\t\t\t%s\n" 2810 " OC-3 revision:\t\t0x%x\n" 2811 " OC-3 mode:\t\t\t%s", 2812 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24, 2813 mon960_release >> 16, mon960_release << 16 >> 16, 2814 media_name[ media_index ], 2815 oc3_revision, 2816 oc3_mode[ oc3_index ]); 2817 } 2818 2819 if (!left--) { 2820 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor; 2821 2822 return sprintf(page, 2823 "\n\n" 2824 " monitor:\n" 2825 " version number:\t\t%d\n" 2826 " boot status word:\t\t0x%08x\n", 2827 fore200e->bus->read(&cp_monitor->mon_version), 2828 fore200e->bus->read(&cp_monitor->bstat)); 2829 } 2830 2831 if (!left--) 2832 return sprintf(page, 2833 "\n" 2834 " device statistics:\n" 2835 " 4b5b:\n" 2836 " crc_header_errors:\t\t%10u\n" 2837 " framing_errors:\t\t%10u\n", 2838 be32_to_cpu(fore200e->stats->phy.crc_header_errors), 2839 be32_to_cpu(fore200e->stats->phy.framing_errors)); 2840 2841 if (!left--) 2842 return sprintf(page, "\n" 2843 " OC-3:\n" 2844 " section_bip8_errors:\t%10u\n" 2845 " path_bip8_errors:\t\t%10u\n" 2846 " line_bip24_errors:\t\t%10u\n" 2847 " line_febe_errors:\t\t%10u\n" 2848 " path_febe_errors:\t\t%10u\n" 2849 " corr_hcs_errors:\t\t%10u\n" 2850 " ucorr_hcs_errors:\t\t%10u\n", 2851 be32_to_cpu(fore200e->stats->oc3.section_bip8_errors), 2852 be32_to_cpu(fore200e->stats->oc3.path_bip8_errors), 2853 be32_to_cpu(fore200e->stats->oc3.line_bip24_errors), 2854 be32_to_cpu(fore200e->stats->oc3.line_febe_errors), 2855 be32_to_cpu(fore200e->stats->oc3.path_febe_errors), 2856 be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors), 2857 be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors)); 2858 2859 if (!left--) 2860 return sprintf(page,"\n" 2861 " ATM:\t\t\t\t cells\n" 2862 " TX:\t\t\t%10u\n" 2863 " RX:\t\t\t%10u\n" 2864 " vpi out of range:\t\t%10u\n" 2865 " vpi no conn:\t\t%10u\n" 2866 " vci out of range:\t\t%10u\n" 2867 " vci no conn:\t\t%10u\n", 2868 be32_to_cpu(fore200e->stats->atm.cells_transmitted), 2869 be32_to_cpu(fore200e->stats->atm.cells_received), 2870 be32_to_cpu(fore200e->stats->atm.vpi_bad_range), 2871 be32_to_cpu(fore200e->stats->atm.vpi_no_conn), 2872 be32_to_cpu(fore200e->stats->atm.vci_bad_range), 2873 be32_to_cpu(fore200e->stats->atm.vci_no_conn)); 2874 2875 if (!left--) 2876 return sprintf(page,"\n" 2877 " AAL0:\t\t\t cells\n" 2878 " TX:\t\t\t%10u\n" 2879 " RX:\t\t\t%10u\n" 2880 " dropped:\t\t\t%10u\n", 2881 be32_to_cpu(fore200e->stats->aal0.cells_transmitted), 2882 be32_to_cpu(fore200e->stats->aal0.cells_received), 2883 be32_to_cpu(fore200e->stats->aal0.cells_dropped)); 2884 2885 if (!left--) 2886 return sprintf(page,"\n" 2887 " AAL3/4:\n" 2888 " SAR sublayer:\t\t cells\n" 2889 " TX:\t\t\t%10u\n" 2890 " RX:\t\t\t%10u\n" 2891 " dropped:\t\t\t%10u\n" 2892 " CRC errors:\t\t%10u\n" 2893 " protocol errors:\t\t%10u\n\n" 2894 " CS sublayer:\t\t PDUs\n" 2895 " TX:\t\t\t%10u\n" 2896 " RX:\t\t\t%10u\n" 2897 " dropped:\t\t\t%10u\n" 2898 " protocol errors:\t\t%10u\n", 2899 be32_to_cpu(fore200e->stats->aal34.cells_transmitted), 2900 be32_to_cpu(fore200e->stats->aal34.cells_received), 2901 be32_to_cpu(fore200e->stats->aal34.cells_dropped), 2902 be32_to_cpu(fore200e->stats->aal34.cells_crc_errors), 2903 be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors), 2904 be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted), 2905 be32_to_cpu(fore200e->stats->aal34.cspdus_received), 2906 be32_to_cpu(fore200e->stats->aal34.cspdus_dropped), 2907 be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors)); 2908 2909 if (!left--) 2910 return sprintf(page,"\n" 2911 " AAL5:\n" 2912 " SAR sublayer:\t\t cells\n" 2913 " TX:\t\t\t%10u\n" 2914 " RX:\t\t\t%10u\n" 2915 " dropped:\t\t\t%10u\n" 2916 " congestions:\t\t%10u\n\n" 2917 " CS sublayer:\t\t PDUs\n" 2918 " TX:\t\t\t%10u\n" 2919 " RX:\t\t\t%10u\n" 2920 " dropped:\t\t\t%10u\n" 2921 " CRC errors:\t\t%10u\n" 2922 " protocol errors:\t\t%10u\n", 2923 be32_to_cpu(fore200e->stats->aal5.cells_transmitted), 2924 be32_to_cpu(fore200e->stats->aal5.cells_received), 2925 be32_to_cpu(fore200e->stats->aal5.cells_dropped), 2926 be32_to_cpu(fore200e->stats->aal5.congestion_experienced), 2927 be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted), 2928 be32_to_cpu(fore200e->stats->aal5.cspdus_received), 2929 be32_to_cpu(fore200e->stats->aal5.cspdus_dropped), 2930 be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors), 2931 be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors)); 2932 2933 if (!left--) 2934 return sprintf(page,"\n" 2935 " AUX:\t\t allocation failures\n" 2936 " small b1:\t\t\t%10u\n" 2937 " large b1:\t\t\t%10u\n" 2938 " small b2:\t\t\t%10u\n" 2939 " large b2:\t\t\t%10u\n" 2940 " RX PDUs:\t\t\t%10u\n" 2941 " TX PDUs:\t\t\t%10lu\n", 2942 be32_to_cpu(fore200e->stats->aux.small_b1_failed), 2943 be32_to_cpu(fore200e->stats->aux.large_b1_failed), 2944 be32_to_cpu(fore200e->stats->aux.small_b2_failed), 2945 be32_to_cpu(fore200e->stats->aux.large_b2_failed), 2946 be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed), 2947 fore200e->tx_sat); 2948 2949 if (!left--) 2950 return sprintf(page,"\n" 2951 " receive carrier:\t\t\t%s\n", 2952 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!"); 2953 2954 if (!left--) { 2955 return sprintf(page,"\n" 2956 " VCCs:\n address VPI VCI AAL " 2957 "TX PDUs TX min/max size RX PDUs RX min/max size\n"); 2958 } 2959 2960 for (i = 0; i < NBR_CONNECT; i++) { 2961 2962 vcc = fore200e->vc_map[i].vcc; 2963 2964 if (vcc == NULL) 2965 continue; 2966 2967 spin_lock_irqsave(&fore200e->q_lock, flags); 2968 2969 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) { 2970 2971 fore200e_vcc = FORE200E_VCC(vcc); 2972 ASSERT(fore200e_vcc); 2973 2974 len = sprintf(page, 2975 " %pK %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n", 2976 vcc, 2977 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 2978 fore200e_vcc->tx_pdu, 2979 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu, 2980 fore200e_vcc->tx_max_pdu, 2981 fore200e_vcc->rx_pdu, 2982 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu, 2983 fore200e_vcc->rx_max_pdu); 2984 2985 spin_unlock_irqrestore(&fore200e->q_lock, flags); 2986 return len; 2987 } 2988 2989 spin_unlock_irqrestore(&fore200e->q_lock, flags); 2990 } 2991 2992 return 0; 2993 } 2994 2995 module_init(fore200e_module_init); 2996 module_exit(fore200e_module_cleanup); 2997 2998 2999 static const struct atmdev_ops fore200e_ops = { 3000 .open = fore200e_open, 3001 .close = fore200e_close, 3002 .ioctl = fore200e_ioctl, 3003 .send = fore200e_send, 3004 .change_qos = fore200e_change_qos, 3005 .proc_read = fore200e_proc_read, 3006 .owner = THIS_MODULE 3007 }; 3008 3009 MODULE_LICENSE("GPL"); 3010 #ifdef CONFIG_PCI 3011 #ifdef __LITTLE_ENDIAN__ 3012 MODULE_FIRMWARE("pca200e.bin"); 3013 #else 3014 MODULE_FIRMWARE("pca200e_ecd.bin2"); 3015 #endif 3016 #endif /* CONFIG_PCI */ 3017 #ifdef CONFIG_SBUS 3018 MODULE_FIRMWARE("sba200e_ecd.bin2"); 3019 #endif 3020