xref: /openbmc/linux/drivers/atm/fore200e.c (revision b7058842)
1 /*
2   A FORE Systems 200E-series driver for ATM on Linux.
3   Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
4 
5   Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
6 
7   This driver simultaneously supports PCA-200E and SBA-200E adapters
8   on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
9 
10   This program is free software; you can redistribute it and/or modify
11   it under the terms of the GNU General Public License as published by
12   the Free Software Foundation; either version 2 of the License, or
13   (at your option) any later version.
14 
15   This program is distributed in the hope that it will be useful,
16   but WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18   GNU General Public License for more details.
19 
20   You should have received a copy of the GNU General Public License
21   along with this program; if not, write to the Free Software
22   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23 */
24 
25 
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/capability.h>
30 #include <linux/interrupt.h>
31 #include <linux/bitops.h>
32 #include <linux/pci.h>
33 #include <linux/module.h>
34 #include <linux/atmdev.h>
35 #include <linux/sonet.h>
36 #include <linux/atm_suni.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/delay.h>
39 #include <linux/firmware.h>
40 #include <asm/io.h>
41 #include <asm/string.h>
42 #include <asm/page.h>
43 #include <asm/irq.h>
44 #include <asm/dma.h>
45 #include <asm/byteorder.h>
46 #include <asm/uaccess.h>
47 #include <asm/atomic.h>
48 
49 #ifdef CONFIG_SBUS
50 #include <linux/of.h>
51 #include <linux/of_device.h>
52 #include <asm/idprom.h>
53 #include <asm/openprom.h>
54 #include <asm/oplib.h>
55 #include <asm/pgtable.h>
56 #endif
57 
58 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
59 #define FORE200E_USE_TASKLET
60 #endif
61 
62 #if 0 /* enable the debugging code of the buffer supply queues */
63 #define FORE200E_BSQ_DEBUG
64 #endif
65 
66 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
67 #define FORE200E_52BYTE_AAL0_SDU
68 #endif
69 
70 #include "fore200e.h"
71 #include "suni.h"
72 
73 #define FORE200E_VERSION "0.3e"
74 
75 #define FORE200E         "fore200e: "
76 
77 #if 0 /* override .config */
78 #define CONFIG_ATM_FORE200E_DEBUG 1
79 #endif
80 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
81 #define DPRINTK(level, format, args...)  do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
82                                                   printk(FORE200E format, ##args); } while (0)
83 #else
84 #define DPRINTK(level, format, args...)  do {} while (0)
85 #endif
86 
87 
88 #define FORE200E_ALIGN(addr, alignment) \
89         ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
90 
91 #define FORE200E_DMA_INDEX(dma_addr, type, index)  ((dma_addr) + (index) * sizeof(type))
92 
93 #define FORE200E_INDEX(virt_addr, type, index)     (&((type *)(virt_addr))[ index ])
94 
95 #define FORE200E_NEXT_ENTRY(index, modulo)         (index = ++(index) % (modulo))
96 
97 #if 1
98 #define ASSERT(expr)     if (!(expr)) { \
99 			     printk(FORE200E "assertion failed! %s[%d]: %s\n", \
100 				    __func__, __LINE__, #expr); \
101 			     panic(FORE200E "%s", __func__); \
102 			 }
103 #else
104 #define ASSERT(expr)     do {} while (0)
105 #endif
106 
107 
108 static const struct atmdev_ops   fore200e_ops;
109 static const struct fore200e_bus fore200e_bus[];
110 
111 static LIST_HEAD(fore200e_boards);
112 
113 
114 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
115 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
116 MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
117 
118 
119 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
120     { BUFFER_S1_NBR, BUFFER_L1_NBR },
121     { BUFFER_S2_NBR, BUFFER_L2_NBR }
122 };
123 
124 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
125     { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
126     { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
127 };
128 
129 
130 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
131 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
132 #endif
133 
134 
135 #if 0 /* currently unused */
136 static int
137 fore200e_fore2atm_aal(enum fore200e_aal aal)
138 {
139     switch(aal) {
140     case FORE200E_AAL0:  return ATM_AAL0;
141     case FORE200E_AAL34: return ATM_AAL34;
142     case FORE200E_AAL5:  return ATM_AAL5;
143     }
144 
145     return -EINVAL;
146 }
147 #endif
148 
149 
150 static enum fore200e_aal
151 fore200e_atm2fore_aal(int aal)
152 {
153     switch(aal) {
154     case ATM_AAL0:  return FORE200E_AAL0;
155     case ATM_AAL34: return FORE200E_AAL34;
156     case ATM_AAL1:
157     case ATM_AAL2:
158     case ATM_AAL5:  return FORE200E_AAL5;
159     }
160 
161     return -EINVAL;
162 }
163 
164 
165 static char*
166 fore200e_irq_itoa(int irq)
167 {
168     static char str[8];
169     sprintf(str, "%d", irq);
170     return str;
171 }
172 
173 
174 /* allocate and align a chunk of memory intended to hold the data behing exchanged
175    between the driver and the adapter (using streaming DVMA) */
176 
177 static int
178 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
179 {
180     unsigned long offset = 0;
181 
182     if (alignment <= sizeof(int))
183 	alignment = 0;
184 
185     chunk->alloc_size = size + alignment;
186     chunk->align_size = size;
187     chunk->direction  = direction;
188 
189     chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
190     if (chunk->alloc_addr == NULL)
191 	return -ENOMEM;
192 
193     if (alignment > 0)
194 	offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
195 
196     chunk->align_addr = chunk->alloc_addr + offset;
197 
198     chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
199 
200     return 0;
201 }
202 
203 
204 /* free a chunk of memory */
205 
206 static void
207 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
208 {
209     fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
210 
211     kfree(chunk->alloc_addr);
212 }
213 
214 
215 static void
216 fore200e_spin(int msecs)
217 {
218     unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
219     while (time_before(jiffies, timeout));
220 }
221 
222 
223 static int
224 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
225 {
226     unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
227     int           ok;
228 
229     mb();
230     do {
231 	if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
232 	    break;
233 
234     } while (time_before(jiffies, timeout));
235 
236 #if 1
237     if (!ok) {
238 	printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
239 	       *addr, val);
240     }
241 #endif
242 
243     return ok;
244 }
245 
246 
247 static int
248 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
249 {
250     unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
251     int           ok;
252 
253     do {
254 	if ((ok = (fore200e->bus->read(addr) == val)))
255 	    break;
256 
257     } while (time_before(jiffies, timeout));
258 
259 #if 1
260     if (!ok) {
261 	printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
262 	       fore200e->bus->read(addr), val);
263     }
264 #endif
265 
266     return ok;
267 }
268 
269 
270 static void
271 fore200e_free_rx_buf(struct fore200e* fore200e)
272 {
273     int scheme, magn, nbr;
274     struct buffer* buffer;
275 
276     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
277 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
278 
279 	    if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
280 
281 		for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
282 
283 		    struct chunk* data = &buffer[ nbr ].data;
284 
285 		    if (data->alloc_addr != NULL)
286 			fore200e_chunk_free(fore200e, data);
287 		}
288 	    }
289 	}
290     }
291 }
292 
293 
294 static void
295 fore200e_uninit_bs_queue(struct fore200e* fore200e)
296 {
297     int scheme, magn;
298 
299     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
300 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
301 
302 	    struct chunk* status    = &fore200e->host_bsq[ scheme ][ magn ].status;
303 	    struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
304 
305 	    if (status->alloc_addr)
306 		fore200e->bus->dma_chunk_free(fore200e, status);
307 
308 	    if (rbd_block->alloc_addr)
309 		fore200e->bus->dma_chunk_free(fore200e, rbd_block);
310 	}
311     }
312 }
313 
314 
315 static int
316 fore200e_reset(struct fore200e* fore200e, int diag)
317 {
318     int ok;
319 
320     fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
321 
322     fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
323 
324     fore200e->bus->reset(fore200e);
325 
326     if (diag) {
327 	ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
328 	if (ok == 0) {
329 
330 	    printk(FORE200E "device %s self-test failed\n", fore200e->name);
331 	    return -ENODEV;
332 	}
333 
334 	printk(FORE200E "device %s self-test passed\n", fore200e->name);
335 
336 	fore200e->state = FORE200E_STATE_RESET;
337     }
338 
339     return 0;
340 }
341 
342 
343 static void
344 fore200e_shutdown(struct fore200e* fore200e)
345 {
346     printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
347 	   fore200e->name, fore200e->phys_base,
348 	   fore200e_irq_itoa(fore200e->irq));
349 
350     if (fore200e->state > FORE200E_STATE_RESET) {
351 	/* first, reset the board to prevent further interrupts or data transfers */
352 	fore200e_reset(fore200e, 0);
353     }
354 
355     /* then, release all allocated resources */
356     switch(fore200e->state) {
357 
358     case FORE200E_STATE_COMPLETE:
359 	kfree(fore200e->stats);
360 
361     case FORE200E_STATE_IRQ:
362 	free_irq(fore200e->irq, fore200e->atm_dev);
363 
364     case FORE200E_STATE_ALLOC_BUF:
365 	fore200e_free_rx_buf(fore200e);
366 
367     case FORE200E_STATE_INIT_BSQ:
368 	fore200e_uninit_bs_queue(fore200e);
369 
370     case FORE200E_STATE_INIT_RXQ:
371 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
372 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
373 
374     case FORE200E_STATE_INIT_TXQ:
375 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
376 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
377 
378     case FORE200E_STATE_INIT_CMDQ:
379 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
380 
381     case FORE200E_STATE_INITIALIZE:
382 	/* nothing to do for that state */
383 
384     case FORE200E_STATE_START_FW:
385 	/* nothing to do for that state */
386 
387     case FORE200E_STATE_RESET:
388 	/* nothing to do for that state */
389 
390     case FORE200E_STATE_MAP:
391 	fore200e->bus->unmap(fore200e);
392 
393     case FORE200E_STATE_CONFIGURE:
394 	/* nothing to do for that state */
395 
396     case FORE200E_STATE_REGISTER:
397 	/* XXX shouldn't we *start* by deregistering the device? */
398 	atm_dev_deregister(fore200e->atm_dev);
399 
400     case FORE200E_STATE_BLANK:
401 	/* nothing to do for that state */
402 	break;
403     }
404 }
405 
406 
407 #ifdef CONFIG_PCI
408 
409 static u32 fore200e_pca_read(volatile u32 __iomem *addr)
410 {
411     /* on big-endian hosts, the board is configured to convert
412        the endianess of slave RAM accesses  */
413     return le32_to_cpu(readl(addr));
414 }
415 
416 
417 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
418 {
419     /* on big-endian hosts, the board is configured to convert
420        the endianess of slave RAM accesses  */
421     writel(cpu_to_le32(val), addr);
422 }
423 
424 
425 static u32
426 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
427 {
428     u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
429 
430     DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d,  --> dma_addr = 0x%08x\n",
431 	    virt_addr, size, direction, dma_addr);
432 
433     return dma_addr;
434 }
435 
436 
437 static void
438 fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
439 {
440     DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
441 	    dma_addr, size, direction);
442 
443     pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
444 }
445 
446 
447 static void
448 fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
449 {
450     DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
451 
452     pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
453 }
454 
455 static void
456 fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
457 {
458     DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
459 
460     pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
461 }
462 
463 
464 /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
465    (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
466 
467 static int
468 fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
469 			     int size, int nbr, int alignment)
470 {
471     /* returned chunks are page-aligned */
472     chunk->alloc_size = size * nbr;
473     chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
474 					     chunk->alloc_size,
475 					     &chunk->dma_addr);
476 
477     if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
478 	return -ENOMEM;
479 
480     chunk->align_addr = chunk->alloc_addr;
481 
482     return 0;
483 }
484 
485 
486 /* free a DMA consistent chunk of memory */
487 
488 static void
489 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
490 {
491     pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
492 			chunk->alloc_size,
493 			chunk->alloc_addr,
494 			chunk->dma_addr);
495 }
496 
497 
498 static int
499 fore200e_pca_irq_check(struct fore200e* fore200e)
500 {
501     /* this is a 1 bit register */
502     int irq_posted = readl(fore200e->regs.pca.psr);
503 
504 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
505     if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
506 	DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
507     }
508 #endif
509 
510     return irq_posted;
511 }
512 
513 
514 static void
515 fore200e_pca_irq_ack(struct fore200e* fore200e)
516 {
517     writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
518 }
519 
520 
521 static void
522 fore200e_pca_reset(struct fore200e* fore200e)
523 {
524     writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
525     fore200e_spin(10);
526     writel(0, fore200e->regs.pca.hcr);
527 }
528 
529 
530 static int __devinit
531 fore200e_pca_map(struct fore200e* fore200e)
532 {
533     DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
534 
535     fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
536 
537     if (fore200e->virt_base == NULL) {
538 	printk(FORE200E "can't map device %s\n", fore200e->name);
539 	return -EFAULT;
540     }
541 
542     DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
543 
544     /* gain access to the PCA specific registers  */
545     fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
546     fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
547     fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
548 
549     fore200e->state = FORE200E_STATE_MAP;
550     return 0;
551 }
552 
553 
554 static void
555 fore200e_pca_unmap(struct fore200e* fore200e)
556 {
557     DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
558 
559     if (fore200e->virt_base != NULL)
560 	iounmap(fore200e->virt_base);
561 }
562 
563 
564 static int __devinit
565 fore200e_pca_configure(struct fore200e* fore200e)
566 {
567     struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
568     u8              master_ctrl, latency;
569 
570     DPRINTK(2, "device %s being configured\n", fore200e->name);
571 
572     if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
573 	printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
574 	return -EIO;
575     }
576 
577     pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
578 
579     master_ctrl = master_ctrl
580 #if defined(__BIG_ENDIAN)
581 	/* request the PCA board to convert the endianess of slave RAM accesses */
582 	| PCA200E_CTRL_CONVERT_ENDIAN
583 #endif
584 #if 0
585         | PCA200E_CTRL_DIS_CACHE_RD
586         | PCA200E_CTRL_DIS_WRT_INVAL
587         | PCA200E_CTRL_ENA_CONT_REQ_MODE
588         | PCA200E_CTRL_2_CACHE_WRT_INVAL
589 #endif
590 	| PCA200E_CTRL_LARGE_PCI_BURSTS;
591 
592     pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
593 
594     /* raise latency from 32 (default) to 192, as this seems to prevent NIC
595        lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
596        this may impact the performances of other PCI devices on the same bus, though */
597     latency = 192;
598     pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
599 
600     fore200e->state = FORE200E_STATE_CONFIGURE;
601     return 0;
602 }
603 
604 
605 static int __init
606 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
607 {
608     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
609     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
610     struct prom_opcode      opcode;
611     int                     ok;
612     u32                     prom_dma;
613 
614     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
615 
616     opcode.opcode = OPCODE_GET_PROM;
617     opcode.pad    = 0;
618 
619     prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
620 
621     fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
622 
623     *entry->status = STATUS_PENDING;
624 
625     fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
626 
627     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
628 
629     *entry->status = STATUS_FREE;
630 
631     fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
632 
633     if (ok == 0) {
634 	printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
635 	return -EIO;
636     }
637 
638 #if defined(__BIG_ENDIAN)
639 
640 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
641 
642     /* MAC address is stored as little-endian */
643     swap_here(&prom->mac_addr[0]);
644     swap_here(&prom->mac_addr[4]);
645 #endif
646 
647     return 0;
648 }
649 
650 
651 static int
652 fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
653 {
654     struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
655 
656     return sprintf(page, "   PCI bus/slot/function:\t%d/%d/%d\n",
657 		   pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
658 }
659 
660 #endif /* CONFIG_PCI */
661 
662 
663 #ifdef CONFIG_SBUS
664 
665 static u32 fore200e_sba_read(volatile u32 __iomem *addr)
666 {
667     return sbus_readl(addr);
668 }
669 
670 static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
671 {
672     sbus_writel(val, addr);
673 }
674 
675 static u32 fore200e_sba_dma_map(struct fore200e *fore200e, void* virt_addr, int size, int direction)
676 {
677 	struct of_device *op = fore200e->bus_dev;
678 	u32 dma_addr;
679 
680 	dma_addr = dma_map_single(&op->dev, virt_addr, size, direction);
681 
682 	DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
683 		virt_addr, size, direction, dma_addr);
684 
685 	return dma_addr;
686 }
687 
688 static void fore200e_sba_dma_unmap(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
689 {
690 	struct of_device *op = fore200e->bus_dev;
691 
692 	DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
693 		dma_addr, size, direction);
694 
695 	dma_unmap_single(&op->dev, dma_addr, size, direction);
696 }
697 
698 static void fore200e_sba_dma_sync_for_cpu(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
699 {
700 	struct of_device *op = fore200e->bus_dev;
701 
702 	DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
703 
704 	dma_sync_single_for_cpu(&op->dev, dma_addr, size, direction);
705 }
706 
707 static void fore200e_sba_dma_sync_for_device(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
708 {
709 	struct of_device *op = fore200e->bus_dev;
710 
711 	DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
712 
713 	dma_sync_single_for_device(&op->dev, dma_addr, size, direction);
714 }
715 
716 /* Allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
717  * (to hold descriptors, status, queues, etc.) shared by the driver and the adapter.
718  */
719 static int fore200e_sba_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk,
720 					int size, int nbr, int alignment)
721 {
722 	struct of_device *op = fore200e->bus_dev;
723 
724 	chunk->alloc_size = chunk->align_size = size * nbr;
725 
726 	/* returned chunks are page-aligned */
727 	chunk->alloc_addr = dma_alloc_coherent(&op->dev, chunk->alloc_size,
728 					       &chunk->dma_addr, GFP_ATOMIC);
729 
730 	if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
731 		return -ENOMEM;
732 
733 	chunk->align_addr = chunk->alloc_addr;
734 
735 	return 0;
736 }
737 
738 /* free a DVMA consistent chunk of memory */
739 static void fore200e_sba_dma_chunk_free(struct fore200e *fore200e, struct chunk *chunk)
740 {
741 	struct of_device *op = fore200e->bus_dev;
742 
743 	dma_free_coherent(&op->dev, chunk->alloc_size,
744 			  chunk->alloc_addr, chunk->dma_addr);
745 }
746 
747 static void fore200e_sba_irq_enable(struct fore200e *fore200e)
748 {
749 	u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
750 	fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
751 }
752 
753 static int fore200e_sba_irq_check(struct fore200e *fore200e)
754 {
755 	return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
756 }
757 
758 static void fore200e_sba_irq_ack(struct fore200e *fore200e)
759 {
760 	u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
761 	fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
762 }
763 
764 static void fore200e_sba_reset(struct fore200e *fore200e)
765 {
766 	fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
767 	fore200e_spin(10);
768 	fore200e->bus->write(0, fore200e->regs.sba.hcr);
769 }
770 
771 static int __init fore200e_sba_map(struct fore200e *fore200e)
772 {
773 	struct of_device *op = fore200e->bus_dev;
774 	unsigned int bursts;
775 
776 	/* gain access to the SBA specific registers  */
777 	fore200e->regs.sba.hcr = of_ioremap(&op->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
778 	fore200e->regs.sba.bsr = of_ioremap(&op->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
779 	fore200e->regs.sba.isr = of_ioremap(&op->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
780 	fore200e->virt_base    = of_ioremap(&op->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
781 
782 	if (!fore200e->virt_base) {
783 		printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
784 		return -EFAULT;
785 	}
786 
787 	DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
788 
789 	fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
790 
791 	/* get the supported DVMA burst sizes */
792 	bursts = of_getintprop_default(op->node->parent, "burst-sizes", 0x00);
793 
794 	if (sbus_can_dma_64bit())
795 		sbus_set_sbus64(&op->dev, bursts);
796 
797 	fore200e->state = FORE200E_STATE_MAP;
798 	return 0;
799 }
800 
801 static void fore200e_sba_unmap(struct fore200e *fore200e)
802 {
803 	struct of_device *op = fore200e->bus_dev;
804 
805 	of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
806 	of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
807 	of_iounmap(&op->resource[2], fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
808 	of_iounmap(&op->resource[3], fore200e->virt_base,    SBA200E_RAM_LENGTH);
809 }
810 
811 static int __init fore200e_sba_configure(struct fore200e *fore200e)
812 {
813 	fore200e->state = FORE200E_STATE_CONFIGURE;
814 	return 0;
815 }
816 
817 static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom)
818 {
819 	struct of_device *op = fore200e->bus_dev;
820 	const u8 *prop;
821 	int len;
822 
823 	prop = of_get_property(op->node, "madaddrlo2", &len);
824 	if (!prop)
825 		return -ENODEV;
826 	memcpy(&prom->mac_addr[4], prop, 4);
827 
828 	prop = of_get_property(op->node, "madaddrhi4", &len);
829 	if (!prop)
830 		return -ENODEV;
831 	memcpy(&prom->mac_addr[2], prop, 4);
832 
833 	prom->serial_number = of_getintprop_default(op->node, "serialnumber", 0);
834 	prom->hw_revision = of_getintprop_default(op->node, "promversion", 0);
835 
836 	return 0;
837 }
838 
839 static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
840 {
841 	struct of_device *op = fore200e->bus_dev;
842 	const struct linux_prom_registers *regs;
843 
844 	regs = of_get_property(op->node, "reg", NULL);
845 
846 	return sprintf(page, "   SBUS slot/device:\t\t%d/'%s'\n",
847 		       (regs ? regs->which_io : 0), op->node->name);
848 }
849 #endif /* CONFIG_SBUS */
850 
851 
852 static void
853 fore200e_tx_irq(struct fore200e* fore200e)
854 {
855     struct host_txq*        txq = &fore200e->host_txq;
856     struct host_txq_entry*  entry;
857     struct atm_vcc*         vcc;
858     struct fore200e_vc_map* vc_map;
859 
860     if (fore200e->host_txq.txing == 0)
861 	return;
862 
863     for (;;) {
864 
865 	entry = &txq->host_entry[ txq->tail ];
866 
867         if ((*entry->status & STATUS_COMPLETE) == 0) {
868 	    break;
869 	}
870 
871 	DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
872 		entry, txq->tail, entry->vc_map, entry->skb);
873 
874 	/* free copy of misaligned data */
875 	kfree(entry->data);
876 
877 	/* remove DMA mapping */
878 	fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
879 				 DMA_TO_DEVICE);
880 
881 	vc_map = entry->vc_map;
882 
883 	/* vcc closed since the time the entry was submitted for tx? */
884 	if ((vc_map->vcc == NULL) ||
885 	    (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
886 
887 	    DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
888 		    fore200e->atm_dev->number);
889 
890 	    dev_kfree_skb_any(entry->skb);
891 	}
892 	else {
893 	    ASSERT(vc_map->vcc);
894 
895 	    /* vcc closed then immediately re-opened? */
896 	    if (vc_map->incarn != entry->incarn) {
897 
898 		/* when a vcc is closed, some PDUs may be still pending in the tx queue.
899 		   if the same vcc is immediately re-opened, those pending PDUs must
900 		   not be popped after the completion of their emission, as they refer
901 		   to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
902 		   would be decremented by the size of the (unrelated) skb, possibly
903 		   leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
904 		   we thus bind the tx entry to the current incarnation of the vcc
905 		   when the entry is submitted for tx. When the tx later completes,
906 		   if the incarnation number of the tx entry does not match the one
907 		   of the vcc, then this implies that the vcc has been closed then re-opened.
908 		   we thus just drop the skb here. */
909 
910 		DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
911 			fore200e->atm_dev->number);
912 
913 		dev_kfree_skb_any(entry->skb);
914 	    }
915 	    else {
916 		vcc = vc_map->vcc;
917 		ASSERT(vcc);
918 
919 		/* notify tx completion */
920 		if (vcc->pop) {
921 		    vcc->pop(vcc, entry->skb);
922 		}
923 		else {
924 		    dev_kfree_skb_any(entry->skb);
925 		}
926 #if 1
927 		/* race fixed by the above incarnation mechanism, but... */
928 		if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
929 		    atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
930 		}
931 #endif
932 		/* check error condition */
933 		if (*entry->status & STATUS_ERROR)
934 		    atomic_inc(&vcc->stats->tx_err);
935 		else
936 		    atomic_inc(&vcc->stats->tx);
937 	    }
938 	}
939 
940 	*entry->status = STATUS_FREE;
941 
942 	fore200e->host_txq.txing--;
943 
944 	FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
945     }
946 }
947 
948 
949 #ifdef FORE200E_BSQ_DEBUG
950 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
951 {
952     struct buffer* buffer;
953     int count = 0;
954 
955     buffer = bsq->freebuf;
956     while (buffer) {
957 
958 	if (buffer->supplied) {
959 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
960 		   where, scheme, magn, buffer->index);
961 	}
962 
963 	if (buffer->magn != magn) {
964 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
965 		   where, scheme, magn, buffer->index, buffer->magn);
966 	}
967 
968 	if (buffer->scheme != scheme) {
969 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
970 		   where, scheme, magn, buffer->index, buffer->scheme);
971 	}
972 
973 	if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
974 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
975 		   where, scheme, magn, buffer->index);
976 	}
977 
978 	count++;
979 	buffer = buffer->next;
980     }
981 
982     if (count != bsq->freebuf_count) {
983 	printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
984 	       where, scheme, magn, count, bsq->freebuf_count);
985     }
986     return 0;
987 }
988 #endif
989 
990 
991 static void
992 fore200e_supply(struct fore200e* fore200e)
993 {
994     int  scheme, magn, i;
995 
996     struct host_bsq*       bsq;
997     struct host_bsq_entry* entry;
998     struct buffer*         buffer;
999 
1000     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1001 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1002 
1003 	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
1004 
1005 #ifdef FORE200E_BSQ_DEBUG
1006 	    bsq_audit(1, bsq, scheme, magn);
1007 #endif
1008 	    while (bsq->freebuf_count >= RBD_BLK_SIZE) {
1009 
1010 		DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
1011 			RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
1012 
1013 		entry = &bsq->host_entry[ bsq->head ];
1014 
1015 		for (i = 0; i < RBD_BLK_SIZE; i++) {
1016 
1017 		    /* take the first buffer in the free buffer list */
1018 		    buffer = bsq->freebuf;
1019 		    if (!buffer) {
1020 			printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
1021 			       scheme, magn, bsq->freebuf_count);
1022 			return;
1023 		    }
1024 		    bsq->freebuf = buffer->next;
1025 
1026 #ifdef FORE200E_BSQ_DEBUG
1027 		    if (buffer->supplied)
1028 			printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
1029 			       scheme, magn, buffer->index);
1030 		    buffer->supplied = 1;
1031 #endif
1032 		    entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
1033 		    entry->rbd_block->rbd[ i ].handle       = FORE200E_BUF2HDL(buffer);
1034 		}
1035 
1036 		FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
1037 
1038  		/* decrease accordingly the number of free rx buffers */
1039 		bsq->freebuf_count -= RBD_BLK_SIZE;
1040 
1041 		*entry->status = STATUS_PENDING;
1042 		fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
1043 	    }
1044 	}
1045     }
1046 }
1047 
1048 
1049 static int
1050 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
1051 {
1052     struct sk_buff*      skb;
1053     struct buffer*       buffer;
1054     struct fore200e_vcc* fore200e_vcc;
1055     int                  i, pdu_len = 0;
1056 #ifdef FORE200E_52BYTE_AAL0_SDU
1057     u32                  cell_header = 0;
1058 #endif
1059 
1060     ASSERT(vcc);
1061 
1062     fore200e_vcc = FORE200E_VCC(vcc);
1063     ASSERT(fore200e_vcc);
1064 
1065 #ifdef FORE200E_52BYTE_AAL0_SDU
1066     if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
1067 
1068 	cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
1069 	              (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
1070                       (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
1071                       (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
1072                        rpd->atm_header.clp;
1073 	pdu_len = 4;
1074     }
1075 #endif
1076 
1077     /* compute total PDU length */
1078     for (i = 0; i < rpd->nseg; i++)
1079 	pdu_len += rpd->rsd[ i ].length;
1080 
1081     skb = alloc_skb(pdu_len, GFP_ATOMIC);
1082     if (skb == NULL) {
1083 	DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1084 
1085 	atomic_inc(&vcc->stats->rx_drop);
1086 	return -ENOMEM;
1087     }
1088 
1089     __net_timestamp(skb);
1090 
1091 #ifdef FORE200E_52BYTE_AAL0_SDU
1092     if (cell_header) {
1093 	*((u32*)skb_put(skb, 4)) = cell_header;
1094     }
1095 #endif
1096 
1097     /* reassemble segments */
1098     for (i = 0; i < rpd->nseg; i++) {
1099 
1100 	/* rebuild rx buffer address from rsd handle */
1101 	buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1102 
1103 	/* Make device DMA transfer visible to CPU.  */
1104 	fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1105 
1106 	memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length);
1107 
1108 	/* Now let the device get at it again.  */
1109 	fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1110     }
1111 
1112     DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1113 
1114     if (pdu_len < fore200e_vcc->rx_min_pdu)
1115 	fore200e_vcc->rx_min_pdu = pdu_len;
1116     if (pdu_len > fore200e_vcc->rx_max_pdu)
1117 	fore200e_vcc->rx_max_pdu = pdu_len;
1118     fore200e_vcc->rx_pdu++;
1119 
1120     /* push PDU */
1121     if (atm_charge(vcc, skb->truesize) == 0) {
1122 
1123 	DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1124 		vcc->itf, vcc->vpi, vcc->vci);
1125 
1126 	dev_kfree_skb_any(skb);
1127 
1128 	atomic_inc(&vcc->stats->rx_drop);
1129 	return -ENOMEM;
1130     }
1131 
1132     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1133 
1134     vcc->push(vcc, skb);
1135     atomic_inc(&vcc->stats->rx);
1136 
1137     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1138 
1139     return 0;
1140 }
1141 
1142 
1143 static void
1144 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1145 {
1146     struct host_bsq* bsq;
1147     struct buffer*   buffer;
1148     int              i;
1149 
1150     for (i = 0; i < rpd->nseg; i++) {
1151 
1152 	/* rebuild rx buffer address from rsd handle */
1153 	buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1154 
1155 	bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1156 
1157 #ifdef FORE200E_BSQ_DEBUG
1158 	bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1159 
1160 	if (buffer->supplied == 0)
1161 	    printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1162 		   buffer->scheme, buffer->magn, buffer->index);
1163 	buffer->supplied = 0;
1164 #endif
1165 
1166 	/* re-insert the buffer into the free buffer list */
1167 	buffer->next = bsq->freebuf;
1168 	bsq->freebuf = buffer;
1169 
1170 	/* then increment the number of free rx buffers */
1171 	bsq->freebuf_count++;
1172     }
1173 }
1174 
1175 
1176 static void
1177 fore200e_rx_irq(struct fore200e* fore200e)
1178 {
1179     struct host_rxq*        rxq = &fore200e->host_rxq;
1180     struct host_rxq_entry*  entry;
1181     struct atm_vcc*         vcc;
1182     struct fore200e_vc_map* vc_map;
1183 
1184     for (;;) {
1185 
1186 	entry = &rxq->host_entry[ rxq->head ];
1187 
1188 	/* no more received PDUs */
1189 	if ((*entry->status & STATUS_COMPLETE) == 0)
1190 	    break;
1191 
1192 	vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1193 
1194 	if ((vc_map->vcc == NULL) ||
1195 	    (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1196 
1197 	    DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1198 		    fore200e->atm_dev->number,
1199 		    entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1200 	}
1201 	else {
1202 	    vcc = vc_map->vcc;
1203 	    ASSERT(vcc);
1204 
1205 	    if ((*entry->status & STATUS_ERROR) == 0) {
1206 
1207 		fore200e_push_rpd(fore200e, vcc, entry->rpd);
1208 	    }
1209 	    else {
1210 		DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1211 			fore200e->atm_dev->number,
1212 			entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1213 		atomic_inc(&vcc->stats->rx_err);
1214 	    }
1215 	}
1216 
1217 	FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1218 
1219 	fore200e_collect_rpd(fore200e, entry->rpd);
1220 
1221 	/* rewrite the rpd address to ack the received PDU */
1222 	fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1223 	*entry->status = STATUS_FREE;
1224 
1225 	fore200e_supply(fore200e);
1226     }
1227 }
1228 
1229 
1230 #ifndef FORE200E_USE_TASKLET
1231 static void
1232 fore200e_irq(struct fore200e* fore200e)
1233 {
1234     unsigned long flags;
1235 
1236     spin_lock_irqsave(&fore200e->q_lock, flags);
1237     fore200e_rx_irq(fore200e);
1238     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1239 
1240     spin_lock_irqsave(&fore200e->q_lock, flags);
1241     fore200e_tx_irq(fore200e);
1242     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1243 }
1244 #endif
1245 
1246 
1247 static irqreturn_t
1248 fore200e_interrupt(int irq, void* dev)
1249 {
1250     struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1251 
1252     if (fore200e->bus->irq_check(fore200e) == 0) {
1253 
1254 	DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1255 	return IRQ_NONE;
1256     }
1257     DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1258 
1259 #ifdef FORE200E_USE_TASKLET
1260     tasklet_schedule(&fore200e->tx_tasklet);
1261     tasklet_schedule(&fore200e->rx_tasklet);
1262 #else
1263     fore200e_irq(fore200e);
1264 #endif
1265 
1266     fore200e->bus->irq_ack(fore200e);
1267     return IRQ_HANDLED;
1268 }
1269 
1270 
1271 #ifdef FORE200E_USE_TASKLET
1272 static void
1273 fore200e_tx_tasklet(unsigned long data)
1274 {
1275     struct fore200e* fore200e = (struct fore200e*) data;
1276     unsigned long flags;
1277 
1278     DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1279 
1280     spin_lock_irqsave(&fore200e->q_lock, flags);
1281     fore200e_tx_irq(fore200e);
1282     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1283 }
1284 
1285 
1286 static void
1287 fore200e_rx_tasklet(unsigned long data)
1288 {
1289     struct fore200e* fore200e = (struct fore200e*) data;
1290     unsigned long    flags;
1291 
1292     DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1293 
1294     spin_lock_irqsave(&fore200e->q_lock, flags);
1295     fore200e_rx_irq((struct fore200e*) data);
1296     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1297 }
1298 #endif
1299 
1300 
1301 static int
1302 fore200e_select_scheme(struct atm_vcc* vcc)
1303 {
1304     /* fairly balance the VCs over (identical) buffer schemes */
1305     int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1306 
1307     DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1308 	    vcc->itf, vcc->vpi, vcc->vci, scheme);
1309 
1310     return scheme;
1311 }
1312 
1313 
1314 static int
1315 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1316 {
1317     struct host_cmdq*        cmdq  = &fore200e->host_cmdq;
1318     struct host_cmdq_entry*  entry = &cmdq->host_entry[ cmdq->head ];
1319     struct activate_opcode   activ_opcode;
1320     struct deactivate_opcode deactiv_opcode;
1321     struct vpvc              vpvc;
1322     int                      ok;
1323     enum fore200e_aal        aal = fore200e_atm2fore_aal(vcc->qos.aal);
1324 
1325     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1326 
1327     if (activate) {
1328 	FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1329 
1330 	activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1331 	activ_opcode.aal    = aal;
1332 	activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1333 	activ_opcode.pad    = 0;
1334     }
1335     else {
1336 	deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1337 	deactiv_opcode.pad    = 0;
1338     }
1339 
1340     vpvc.vci = vcc->vci;
1341     vpvc.vpi = vcc->vpi;
1342 
1343     *entry->status = STATUS_PENDING;
1344 
1345     if (activate) {
1346 
1347 #ifdef FORE200E_52BYTE_AAL0_SDU
1348 	mtu = 48;
1349 #endif
1350 	/* the MTU is not used by the cp, except in the case of AAL0 */
1351 	fore200e->bus->write(mtu,                        &entry->cp_entry->cmd.activate_block.mtu);
1352 	fore200e->bus->write(*(u32*)&vpvc,         (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1353 	fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1354     }
1355     else {
1356 	fore200e->bus->write(*(u32*)&vpvc,         (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1357 	fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1358     }
1359 
1360     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1361 
1362     *entry->status = STATUS_FREE;
1363 
1364     if (ok == 0) {
1365 	printk(FORE200E "unable to %s VC %d.%d.%d\n",
1366 	       activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1367 	return -EIO;
1368     }
1369 
1370     DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1371 	    activate ? "open" : "clos");
1372 
1373     return 0;
1374 }
1375 
1376 
1377 #define FORE200E_MAX_BACK2BACK_CELLS 255    /* XXX depends on CDVT */
1378 
1379 static void
1380 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1381 {
1382     if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1383 
1384 	/* compute the data cells to idle cells ratio from the tx PCR */
1385 	rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1386 	rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1387     }
1388     else {
1389 	/* disable rate control */
1390 	rate->data_cells = rate->idle_cells = 0;
1391     }
1392 }
1393 
1394 
1395 static int
1396 fore200e_open(struct atm_vcc *vcc)
1397 {
1398     struct fore200e*        fore200e = FORE200E_DEV(vcc->dev);
1399     struct fore200e_vcc*    fore200e_vcc;
1400     struct fore200e_vc_map* vc_map;
1401     unsigned long	    flags;
1402     int			    vci = vcc->vci;
1403     short		    vpi = vcc->vpi;
1404 
1405     ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1406     ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1407 
1408     spin_lock_irqsave(&fore200e->q_lock, flags);
1409 
1410     vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1411     if (vc_map->vcc) {
1412 
1413 	spin_unlock_irqrestore(&fore200e->q_lock, flags);
1414 
1415 	printk(FORE200E "VC %d.%d.%d already in use\n",
1416 	       fore200e->atm_dev->number, vpi, vci);
1417 
1418 	return -EINVAL;
1419     }
1420 
1421     vc_map->vcc = vcc;
1422 
1423     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1424 
1425     fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1426     if (fore200e_vcc == NULL) {
1427 	vc_map->vcc = NULL;
1428 	return -ENOMEM;
1429     }
1430 
1431     DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1432 	    "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1433 	    vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1434 	    fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1435 	    vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1436 	    fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1437 	    vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1438 
1439     /* pseudo-CBR bandwidth requested? */
1440     if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1441 
1442 	mutex_lock(&fore200e->rate_mtx);
1443 	if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1444 	    mutex_unlock(&fore200e->rate_mtx);
1445 
1446 	    kfree(fore200e_vcc);
1447 	    vc_map->vcc = NULL;
1448 	    return -EAGAIN;
1449 	}
1450 
1451 	/* reserve bandwidth */
1452 	fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1453 	mutex_unlock(&fore200e->rate_mtx);
1454     }
1455 
1456     vcc->itf = vcc->dev->number;
1457 
1458     set_bit(ATM_VF_PARTIAL,&vcc->flags);
1459     set_bit(ATM_VF_ADDR, &vcc->flags);
1460 
1461     vcc->dev_data = fore200e_vcc;
1462 
1463     if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1464 
1465 	vc_map->vcc = NULL;
1466 
1467 	clear_bit(ATM_VF_ADDR, &vcc->flags);
1468 	clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1469 
1470 	vcc->dev_data = NULL;
1471 
1472 	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1473 
1474 	kfree(fore200e_vcc);
1475 	return -EINVAL;
1476     }
1477 
1478     /* compute rate control parameters */
1479     if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1480 
1481 	fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1482 	set_bit(ATM_VF_HASQOS, &vcc->flags);
1483 
1484 	DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1485 		vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1486 		vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1487 		fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1488     }
1489 
1490     fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1491     fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1492     fore200e_vcc->tx_pdu     = fore200e_vcc->rx_pdu     = 0;
1493 
1494     /* new incarnation of the vcc */
1495     vc_map->incarn = ++fore200e->incarn_count;
1496 
1497     /* VC unusable before this flag is set */
1498     set_bit(ATM_VF_READY, &vcc->flags);
1499 
1500     return 0;
1501 }
1502 
1503 
1504 static void
1505 fore200e_close(struct atm_vcc* vcc)
1506 {
1507     struct fore200e*        fore200e = FORE200E_DEV(vcc->dev);
1508     struct fore200e_vcc*    fore200e_vcc;
1509     struct fore200e_vc_map* vc_map;
1510     unsigned long           flags;
1511 
1512     ASSERT(vcc);
1513     ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1514     ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1515 
1516     DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1517 
1518     clear_bit(ATM_VF_READY, &vcc->flags);
1519 
1520     fore200e_activate_vcin(fore200e, 0, vcc, 0);
1521 
1522     spin_lock_irqsave(&fore200e->q_lock, flags);
1523 
1524     vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1525 
1526     /* the vc is no longer considered as "in use" by fore200e_open() */
1527     vc_map->vcc = NULL;
1528 
1529     vcc->itf = vcc->vci = vcc->vpi = 0;
1530 
1531     fore200e_vcc = FORE200E_VCC(vcc);
1532     vcc->dev_data = NULL;
1533 
1534     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1535 
1536     /* release reserved bandwidth, if any */
1537     if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1538 
1539 	mutex_lock(&fore200e->rate_mtx);
1540 	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1541 	mutex_unlock(&fore200e->rate_mtx);
1542 
1543 	clear_bit(ATM_VF_HASQOS, &vcc->flags);
1544     }
1545 
1546     clear_bit(ATM_VF_ADDR, &vcc->flags);
1547     clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1548 
1549     ASSERT(fore200e_vcc);
1550     kfree(fore200e_vcc);
1551 }
1552 
1553 
1554 static int
1555 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1556 {
1557     struct fore200e*        fore200e     = FORE200E_DEV(vcc->dev);
1558     struct fore200e_vcc*    fore200e_vcc = FORE200E_VCC(vcc);
1559     struct fore200e_vc_map* vc_map;
1560     struct host_txq*        txq          = &fore200e->host_txq;
1561     struct host_txq_entry*  entry;
1562     struct tpd*             tpd;
1563     struct tpd_haddr        tpd_haddr;
1564     int                     retry        = CONFIG_ATM_FORE200E_TX_RETRY;
1565     int                     tx_copy      = 0;
1566     int                     tx_len       = skb->len;
1567     u32*                    cell_header  = NULL;
1568     unsigned char*          skb_data;
1569     int                     skb_len;
1570     unsigned char*          data;
1571     unsigned long           flags;
1572 
1573     ASSERT(vcc);
1574     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1575     ASSERT(fore200e);
1576     ASSERT(fore200e_vcc);
1577 
1578     if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1579 	DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1580 	dev_kfree_skb_any(skb);
1581 	return -EINVAL;
1582     }
1583 
1584 #ifdef FORE200E_52BYTE_AAL0_SDU
1585     if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1586 	cell_header = (u32*) skb->data;
1587 	skb_data    = skb->data + 4;    /* skip 4-byte cell header */
1588 	skb_len     = tx_len = skb->len  - 4;
1589 
1590 	DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1591     }
1592     else
1593 #endif
1594     {
1595 	skb_data = skb->data;
1596 	skb_len  = skb->len;
1597     }
1598 
1599     if (((unsigned long)skb_data) & 0x3) {
1600 
1601 	DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1602 	tx_copy = 1;
1603 	tx_len  = skb_len;
1604     }
1605 
1606     if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1607 
1608         /* this simply NUKES the PCA board */
1609 	DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1610 	tx_copy = 1;
1611 	tx_len  = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1612     }
1613 
1614     if (tx_copy) {
1615 	data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
1616 	if (data == NULL) {
1617 	    if (vcc->pop) {
1618 		vcc->pop(vcc, skb);
1619 	    }
1620 	    else {
1621 		dev_kfree_skb_any(skb);
1622 	    }
1623 	    return -ENOMEM;
1624 	}
1625 
1626 	memcpy(data, skb_data, skb_len);
1627 	if (skb_len < tx_len)
1628 	    memset(data + skb_len, 0x00, tx_len - skb_len);
1629     }
1630     else {
1631 	data = skb_data;
1632     }
1633 
1634     vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1635     ASSERT(vc_map->vcc == vcc);
1636 
1637   retry_here:
1638 
1639     spin_lock_irqsave(&fore200e->q_lock, flags);
1640 
1641     entry = &txq->host_entry[ txq->head ];
1642 
1643     if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1644 
1645 	/* try to free completed tx queue entries */
1646 	fore200e_tx_irq(fore200e);
1647 
1648 	if (*entry->status != STATUS_FREE) {
1649 
1650 	    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1651 
1652 	    /* retry once again? */
1653 	    if (--retry > 0) {
1654 		udelay(50);
1655 		goto retry_here;
1656 	    }
1657 
1658 	    atomic_inc(&vcc->stats->tx_err);
1659 
1660 	    fore200e->tx_sat++;
1661 	    DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1662 		    fore200e->name, fore200e->cp_queues->heartbeat);
1663 	    if (vcc->pop) {
1664 		vcc->pop(vcc, skb);
1665 	    }
1666 	    else {
1667 		dev_kfree_skb_any(skb);
1668 	    }
1669 
1670 	    if (tx_copy)
1671 		kfree(data);
1672 
1673 	    return -ENOBUFS;
1674 	}
1675     }
1676 
1677     entry->incarn = vc_map->incarn;
1678     entry->vc_map = vc_map;
1679     entry->skb    = skb;
1680     entry->data   = tx_copy ? data : NULL;
1681 
1682     tpd = entry->tpd;
1683     tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
1684     tpd->tsd[ 0 ].length = tx_len;
1685 
1686     FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1687     txq->txing++;
1688 
1689     /* The dma_map call above implies a dma_sync so the device can use it,
1690      * thus no explicit dma_sync call is necessary here.
1691      */
1692 
1693     DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1694 	    vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1695 	    tpd->tsd[0].length, skb_len);
1696 
1697     if (skb_len < fore200e_vcc->tx_min_pdu)
1698 	fore200e_vcc->tx_min_pdu = skb_len;
1699     if (skb_len > fore200e_vcc->tx_max_pdu)
1700 	fore200e_vcc->tx_max_pdu = skb_len;
1701     fore200e_vcc->tx_pdu++;
1702 
1703     /* set tx rate control information */
1704     tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1705     tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1706 
1707     if (cell_header) {
1708 	tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1709 	tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1710 	tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1711 	tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1712 	tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1713     }
1714     else {
1715 	/* set the ATM header, common to all cells conveying the PDU */
1716 	tpd->atm_header.clp = 0;
1717 	tpd->atm_header.plt = 0;
1718 	tpd->atm_header.vci = vcc->vci;
1719 	tpd->atm_header.vpi = vcc->vpi;
1720 	tpd->atm_header.gfc = 0;
1721     }
1722 
1723     tpd->spec.length = tx_len;
1724     tpd->spec.nseg   = 1;
1725     tpd->spec.aal    = fore200e_atm2fore_aal(vcc->qos.aal);
1726     tpd->spec.intr   = 1;
1727 
1728     tpd_haddr.size  = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT);  /* size is expressed in 32 byte blocks */
1729     tpd_haddr.pad   = 0;
1730     tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT;          /* shift the address, as we are in a bitfield */
1731 
1732     *entry->status = STATUS_PENDING;
1733     fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1734 
1735     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1736 
1737     return 0;
1738 }
1739 
1740 
1741 static int
1742 fore200e_getstats(struct fore200e* fore200e)
1743 {
1744     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1745     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1746     struct stats_opcode     opcode;
1747     int                     ok;
1748     u32                     stats_dma_addr;
1749 
1750     if (fore200e->stats == NULL) {
1751 	fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
1752 	if (fore200e->stats == NULL)
1753 	    return -ENOMEM;
1754     }
1755 
1756     stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
1757 					    sizeof(struct stats), DMA_FROM_DEVICE);
1758 
1759     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1760 
1761     opcode.opcode = OPCODE_GET_STATS;
1762     opcode.pad    = 0;
1763 
1764     fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1765 
1766     *entry->status = STATUS_PENDING;
1767 
1768     fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1769 
1770     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1771 
1772     *entry->status = STATUS_FREE;
1773 
1774     fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1775 
1776     if (ok == 0) {
1777 	printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1778 	return -EIO;
1779     }
1780 
1781     return 0;
1782 }
1783 
1784 
1785 static int
1786 fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1787 {
1788     /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1789 
1790     DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1791 	    vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1792 
1793     return -EINVAL;
1794 }
1795 
1796 
1797 static int
1798 fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, unsigned int optlen)
1799 {
1800     /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1801 
1802     DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1803 	    vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1804 
1805     return -EINVAL;
1806 }
1807 
1808 
1809 #if 0 /* currently unused */
1810 static int
1811 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1812 {
1813     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1814     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1815     struct oc3_opcode       opcode;
1816     int                     ok;
1817     u32                     oc3_regs_dma_addr;
1818 
1819     oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1820 
1821     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1822 
1823     opcode.opcode = OPCODE_GET_OC3;
1824     opcode.reg    = 0;
1825     opcode.value  = 0;
1826     opcode.mask   = 0;
1827 
1828     fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1829 
1830     *entry->status = STATUS_PENDING;
1831 
1832     fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1833 
1834     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1835 
1836     *entry->status = STATUS_FREE;
1837 
1838     fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1839 
1840     if (ok == 0) {
1841 	printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1842 	return -EIO;
1843     }
1844 
1845     return 0;
1846 }
1847 #endif
1848 
1849 
1850 static int
1851 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1852 {
1853     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1854     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1855     struct oc3_opcode       opcode;
1856     int                     ok;
1857 
1858     DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1859 
1860     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1861 
1862     opcode.opcode = OPCODE_SET_OC3;
1863     opcode.reg    = reg;
1864     opcode.value  = value;
1865     opcode.mask   = mask;
1866 
1867     fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1868 
1869     *entry->status = STATUS_PENDING;
1870 
1871     fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1872 
1873     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1874 
1875     *entry->status = STATUS_FREE;
1876 
1877     if (ok == 0) {
1878 	printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1879 	return -EIO;
1880     }
1881 
1882     return 0;
1883 }
1884 
1885 
1886 static int
1887 fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1888 {
1889     u32 mct_value, mct_mask;
1890     int error;
1891 
1892     if (!capable(CAP_NET_ADMIN))
1893 	return -EPERM;
1894 
1895     switch (loop_mode) {
1896 
1897     case ATM_LM_NONE:
1898 	mct_value = 0;
1899 	mct_mask  = SUNI_MCT_DLE | SUNI_MCT_LLE;
1900 	break;
1901 
1902     case ATM_LM_LOC_PHY:
1903 	mct_value = mct_mask = SUNI_MCT_DLE;
1904 	break;
1905 
1906     case ATM_LM_RMT_PHY:
1907 	mct_value = mct_mask = SUNI_MCT_LLE;
1908 	break;
1909 
1910     default:
1911 	return -EINVAL;
1912     }
1913 
1914     error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1915     if (error == 0)
1916 	fore200e->loop_mode = loop_mode;
1917 
1918     return error;
1919 }
1920 
1921 
1922 static int
1923 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
1924 {
1925     struct sonet_stats tmp;
1926 
1927     if (fore200e_getstats(fore200e) < 0)
1928 	return -EIO;
1929 
1930     tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors);
1931     tmp.line_bip    = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors);
1932     tmp.path_bip    = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors);
1933     tmp.line_febe   = be32_to_cpu(fore200e->stats->oc3.line_febe_errors);
1934     tmp.path_febe   = be32_to_cpu(fore200e->stats->oc3.path_febe_errors);
1935     tmp.corr_hcs    = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors);
1936     tmp.uncorr_hcs  = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors);
1937     tmp.tx_cells    = be32_to_cpu(fore200e->stats->aal0.cells_transmitted)  +
1938 	              be32_to_cpu(fore200e->stats->aal34.cells_transmitted) +
1939 	              be32_to_cpu(fore200e->stats->aal5.cells_transmitted);
1940     tmp.rx_cells    = be32_to_cpu(fore200e->stats->aal0.cells_received)     +
1941 	              be32_to_cpu(fore200e->stats->aal34.cells_received)    +
1942 	              be32_to_cpu(fore200e->stats->aal5.cells_received);
1943 
1944     if (arg)
1945 	return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
1946 
1947     return 0;
1948 }
1949 
1950 
1951 static int
1952 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
1953 {
1954     struct fore200e* fore200e = FORE200E_DEV(dev);
1955 
1956     DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
1957 
1958     switch (cmd) {
1959 
1960     case SONET_GETSTAT:
1961 	return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
1962 
1963     case SONET_GETDIAG:
1964 	return put_user(0, (int __user *)arg) ? -EFAULT : 0;
1965 
1966     case ATM_SETLOOP:
1967 	return fore200e_setloop(fore200e, (int)(unsigned long)arg);
1968 
1969     case ATM_GETLOOP:
1970 	return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
1971 
1972     case ATM_QUERYLOOP:
1973 	return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
1974     }
1975 
1976     return -ENOSYS; /* not implemented */
1977 }
1978 
1979 
1980 static int
1981 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
1982 {
1983     struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1984     struct fore200e*     fore200e     = FORE200E_DEV(vcc->dev);
1985 
1986     if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1987 	DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
1988 	return -EINVAL;
1989     }
1990 
1991     DPRINTK(2, "change_qos %d.%d.%d, "
1992 	    "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1993 	    "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
1994 	    "available_cell_rate = %u",
1995 	    vcc->itf, vcc->vpi, vcc->vci,
1996 	    fore200e_traffic_class[ qos->txtp.traffic_class ],
1997 	    qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
1998 	    fore200e_traffic_class[ qos->rxtp.traffic_class ],
1999 	    qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
2000 	    flags, fore200e->available_cell_rate);
2001 
2002     if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
2003 
2004 	mutex_lock(&fore200e->rate_mtx);
2005 	if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
2006 	    mutex_unlock(&fore200e->rate_mtx);
2007 	    return -EAGAIN;
2008 	}
2009 
2010 	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
2011 	fore200e->available_cell_rate -= qos->txtp.max_pcr;
2012 
2013 	mutex_unlock(&fore200e->rate_mtx);
2014 
2015 	memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
2016 
2017 	/* update rate control parameters */
2018 	fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
2019 
2020 	set_bit(ATM_VF_HASQOS, &vcc->flags);
2021 
2022 	return 0;
2023     }
2024 
2025     return -EINVAL;
2026 }
2027 
2028 
2029 static int __devinit
2030 fore200e_irq_request(struct fore200e* fore200e)
2031 {
2032     if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
2033 
2034 	printk(FORE200E "unable to reserve IRQ %s for device %s\n",
2035 	       fore200e_irq_itoa(fore200e->irq), fore200e->name);
2036 	return -EBUSY;
2037     }
2038 
2039     printk(FORE200E "IRQ %s reserved for device %s\n",
2040 	   fore200e_irq_itoa(fore200e->irq), fore200e->name);
2041 
2042 #ifdef FORE200E_USE_TASKLET
2043     tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
2044     tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
2045 #endif
2046 
2047     fore200e->state = FORE200E_STATE_IRQ;
2048     return 0;
2049 }
2050 
2051 
2052 static int __devinit
2053 fore200e_get_esi(struct fore200e* fore200e)
2054 {
2055     struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
2056     int ok, i;
2057 
2058     if (!prom)
2059 	return -ENOMEM;
2060 
2061     ok = fore200e->bus->prom_read(fore200e, prom);
2062     if (ok < 0) {
2063 	kfree(prom);
2064 	return -EBUSY;
2065     }
2066 
2067     printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n",
2068 	   fore200e->name,
2069 	   (prom->hw_revision & 0xFF) + '@',    /* probably meaningless with SBA boards */
2070 	   prom->serial_number & 0xFFFF,
2071 	   prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
2072 	   prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
2073 
2074     for (i = 0; i < ESI_LEN; i++) {
2075 	fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
2076     }
2077 
2078     kfree(prom);
2079 
2080     return 0;
2081 }
2082 
2083 
2084 static int __devinit
2085 fore200e_alloc_rx_buf(struct fore200e* fore200e)
2086 {
2087     int scheme, magn, nbr, size, i;
2088 
2089     struct host_bsq* bsq;
2090     struct buffer*   buffer;
2091 
2092     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2093 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2094 
2095 	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
2096 
2097 	    nbr  = fore200e_rx_buf_nbr[ scheme ][ magn ];
2098 	    size = fore200e_rx_buf_size[ scheme ][ magn ];
2099 
2100 	    DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2101 
2102 	    /* allocate the array of receive buffers */
2103 	    buffer = bsq->buffer = kzalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
2104 
2105 	    if (buffer == NULL)
2106 		return -ENOMEM;
2107 
2108 	    bsq->freebuf = NULL;
2109 
2110 	    for (i = 0; i < nbr; i++) {
2111 
2112 		buffer[ i ].scheme = scheme;
2113 		buffer[ i ].magn   = magn;
2114 #ifdef FORE200E_BSQ_DEBUG
2115 		buffer[ i ].index  = i;
2116 		buffer[ i ].supplied = 0;
2117 #endif
2118 
2119 		/* allocate the receive buffer body */
2120 		if (fore200e_chunk_alloc(fore200e,
2121 					 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2122 					 DMA_FROM_DEVICE) < 0) {
2123 
2124 		    while (i > 0)
2125 			fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2126 		    kfree(buffer);
2127 
2128 		    return -ENOMEM;
2129 		}
2130 
2131 		/* insert the buffer into the free buffer list */
2132 		buffer[ i ].next = bsq->freebuf;
2133 		bsq->freebuf = &buffer[ i ];
2134 	    }
2135 	    /* all the buffers are free, initially */
2136 	    bsq->freebuf_count = nbr;
2137 
2138 #ifdef FORE200E_BSQ_DEBUG
2139 	    bsq_audit(3, bsq, scheme, magn);
2140 #endif
2141 	}
2142     }
2143 
2144     fore200e->state = FORE200E_STATE_ALLOC_BUF;
2145     return 0;
2146 }
2147 
2148 
2149 static int __devinit
2150 fore200e_init_bs_queue(struct fore200e* fore200e)
2151 {
2152     int scheme, magn, i;
2153 
2154     struct host_bsq*     bsq;
2155     struct cp_bsq_entry __iomem * cp_entry;
2156 
2157     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2158 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2159 
2160 	    DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2161 
2162 	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
2163 
2164 	    /* allocate and align the array of status words */
2165 	    if (fore200e->bus->dma_chunk_alloc(fore200e,
2166 					       &bsq->status,
2167 					       sizeof(enum status),
2168 					       QUEUE_SIZE_BS,
2169 					       fore200e->bus->status_alignment) < 0) {
2170 		return -ENOMEM;
2171 	    }
2172 
2173 	    /* allocate and align the array of receive buffer descriptors */
2174 	    if (fore200e->bus->dma_chunk_alloc(fore200e,
2175 					       &bsq->rbd_block,
2176 					       sizeof(struct rbd_block),
2177 					       QUEUE_SIZE_BS,
2178 					       fore200e->bus->descr_alignment) < 0) {
2179 
2180 		fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
2181 		return -ENOMEM;
2182 	    }
2183 
2184 	    /* get the base address of the cp resident buffer supply queue entries */
2185 	    cp_entry = fore200e->virt_base +
2186 		       fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2187 
2188 	    /* fill the host resident and cp resident buffer supply queue entries */
2189 	    for (i = 0; i < QUEUE_SIZE_BS; i++) {
2190 
2191 		bsq->host_entry[ i ].status =
2192 		                     FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2193 	        bsq->host_entry[ i ].rbd_block =
2194 		                     FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2195 		bsq->host_entry[ i ].rbd_block_dma =
2196 		                     FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2197 		bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2198 
2199 		*bsq->host_entry[ i ].status = STATUS_FREE;
2200 
2201 		fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2202 				     &cp_entry[ i ].status_haddr);
2203 	    }
2204 	}
2205     }
2206 
2207     fore200e->state = FORE200E_STATE_INIT_BSQ;
2208     return 0;
2209 }
2210 
2211 
2212 static int __devinit
2213 fore200e_init_rx_queue(struct fore200e* fore200e)
2214 {
2215     struct host_rxq*     rxq =  &fore200e->host_rxq;
2216     struct cp_rxq_entry __iomem * cp_entry;
2217     int i;
2218 
2219     DPRINTK(2, "receive queue is being initialized\n");
2220 
2221     /* allocate and align the array of status words */
2222     if (fore200e->bus->dma_chunk_alloc(fore200e,
2223 				       &rxq->status,
2224 				       sizeof(enum status),
2225 				       QUEUE_SIZE_RX,
2226 				       fore200e->bus->status_alignment) < 0) {
2227 	return -ENOMEM;
2228     }
2229 
2230     /* allocate and align the array of receive PDU descriptors */
2231     if (fore200e->bus->dma_chunk_alloc(fore200e,
2232 				       &rxq->rpd,
2233 				       sizeof(struct rpd),
2234 				       QUEUE_SIZE_RX,
2235 				       fore200e->bus->descr_alignment) < 0) {
2236 
2237 	fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2238 	return -ENOMEM;
2239     }
2240 
2241     /* get the base address of the cp resident rx queue entries */
2242     cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2243 
2244     /* fill the host resident and cp resident rx entries */
2245     for (i=0; i < QUEUE_SIZE_RX; i++) {
2246 
2247 	rxq->host_entry[ i ].status =
2248 	                     FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2249 	rxq->host_entry[ i ].rpd =
2250 	                     FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2251 	rxq->host_entry[ i ].rpd_dma =
2252 	                     FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2253 	rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2254 
2255 	*rxq->host_entry[ i ].status = STATUS_FREE;
2256 
2257 	fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2258 			     &cp_entry[ i ].status_haddr);
2259 
2260 	fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2261 			     &cp_entry[ i ].rpd_haddr);
2262     }
2263 
2264     /* set the head entry of the queue */
2265     rxq->head = 0;
2266 
2267     fore200e->state = FORE200E_STATE_INIT_RXQ;
2268     return 0;
2269 }
2270 
2271 
2272 static int __devinit
2273 fore200e_init_tx_queue(struct fore200e* fore200e)
2274 {
2275     struct host_txq*     txq =  &fore200e->host_txq;
2276     struct cp_txq_entry __iomem * cp_entry;
2277     int i;
2278 
2279     DPRINTK(2, "transmit queue is being initialized\n");
2280 
2281     /* allocate and align the array of status words */
2282     if (fore200e->bus->dma_chunk_alloc(fore200e,
2283 				       &txq->status,
2284 				       sizeof(enum status),
2285 				       QUEUE_SIZE_TX,
2286 				       fore200e->bus->status_alignment) < 0) {
2287 	return -ENOMEM;
2288     }
2289 
2290     /* allocate and align the array of transmit PDU descriptors */
2291     if (fore200e->bus->dma_chunk_alloc(fore200e,
2292 				       &txq->tpd,
2293 				       sizeof(struct tpd),
2294 				       QUEUE_SIZE_TX,
2295 				       fore200e->bus->descr_alignment) < 0) {
2296 
2297 	fore200e->bus->dma_chunk_free(fore200e, &txq->status);
2298 	return -ENOMEM;
2299     }
2300 
2301     /* get the base address of the cp resident tx queue entries */
2302     cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2303 
2304     /* fill the host resident and cp resident tx entries */
2305     for (i=0; i < QUEUE_SIZE_TX; i++) {
2306 
2307 	txq->host_entry[ i ].status =
2308 	                     FORE200E_INDEX(txq->status.align_addr, enum status, i);
2309 	txq->host_entry[ i ].tpd =
2310 	                     FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2311 	txq->host_entry[ i ].tpd_dma  =
2312                              FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2313 	txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2314 
2315 	*txq->host_entry[ i ].status = STATUS_FREE;
2316 
2317 	fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2318 			     &cp_entry[ i ].status_haddr);
2319 
2320         /* although there is a one-to-one mapping of tx queue entries and tpds,
2321 	   we do not write here the DMA (physical) base address of each tpd into
2322 	   the related cp resident entry, because the cp relies on this write
2323 	   operation to detect that a new pdu has been submitted for tx */
2324     }
2325 
2326     /* set the head and tail entries of the queue */
2327     txq->head = 0;
2328     txq->tail = 0;
2329 
2330     fore200e->state = FORE200E_STATE_INIT_TXQ;
2331     return 0;
2332 }
2333 
2334 
2335 static int __devinit
2336 fore200e_init_cmd_queue(struct fore200e* fore200e)
2337 {
2338     struct host_cmdq*     cmdq =  &fore200e->host_cmdq;
2339     struct cp_cmdq_entry __iomem * cp_entry;
2340     int i;
2341 
2342     DPRINTK(2, "command queue is being initialized\n");
2343 
2344     /* allocate and align the array of status words */
2345     if (fore200e->bus->dma_chunk_alloc(fore200e,
2346 				       &cmdq->status,
2347 				       sizeof(enum status),
2348 				       QUEUE_SIZE_CMD,
2349 				       fore200e->bus->status_alignment) < 0) {
2350 	return -ENOMEM;
2351     }
2352 
2353     /* get the base address of the cp resident cmd queue entries */
2354     cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2355 
2356     /* fill the host resident and cp resident cmd entries */
2357     for (i=0; i < QUEUE_SIZE_CMD; i++) {
2358 
2359 	cmdq->host_entry[ i ].status   =
2360                               FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2361 	cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2362 
2363 	*cmdq->host_entry[ i ].status = STATUS_FREE;
2364 
2365 	fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2366                              &cp_entry[ i ].status_haddr);
2367     }
2368 
2369     /* set the head entry of the queue */
2370     cmdq->head = 0;
2371 
2372     fore200e->state = FORE200E_STATE_INIT_CMDQ;
2373     return 0;
2374 }
2375 
2376 
2377 static void __devinit
2378 fore200e_param_bs_queue(struct fore200e* fore200e,
2379 			enum buffer_scheme scheme, enum buffer_magn magn,
2380 			int queue_length, int pool_size, int supply_blksize)
2381 {
2382     struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2383 
2384     fore200e->bus->write(queue_length,                           &bs_spec->queue_length);
2385     fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2386     fore200e->bus->write(pool_size,                              &bs_spec->pool_size);
2387     fore200e->bus->write(supply_blksize,                         &bs_spec->supply_blksize);
2388 }
2389 
2390 
2391 static int __devinit
2392 fore200e_initialize(struct fore200e* fore200e)
2393 {
2394     struct cp_queues __iomem * cpq;
2395     int               ok, scheme, magn;
2396 
2397     DPRINTK(2, "device %s being initialized\n", fore200e->name);
2398 
2399     mutex_init(&fore200e->rate_mtx);
2400     spin_lock_init(&fore200e->q_lock);
2401 
2402     cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2403 
2404     /* enable cp to host interrupts */
2405     fore200e->bus->write(1, &cpq->imask);
2406 
2407     if (fore200e->bus->irq_enable)
2408 	fore200e->bus->irq_enable(fore200e);
2409 
2410     fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2411 
2412     fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2413     fore200e->bus->write(QUEUE_SIZE_RX,  &cpq->init.rx_queue_len);
2414     fore200e->bus->write(QUEUE_SIZE_TX,  &cpq->init.tx_queue_len);
2415 
2416     fore200e->bus->write(RSD_EXTENSION,  &cpq->init.rsd_extension);
2417     fore200e->bus->write(TSD_EXTENSION,  &cpq->init.tsd_extension);
2418 
2419     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2420 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2421 	    fore200e_param_bs_queue(fore200e, scheme, magn,
2422 				    QUEUE_SIZE_BS,
2423 				    fore200e_rx_buf_nbr[ scheme ][ magn ],
2424 				    RBD_BLK_SIZE);
2425 
2426     /* issue the initialize command */
2427     fore200e->bus->write(STATUS_PENDING,    &cpq->init.status);
2428     fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2429 
2430     ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2431     if (ok == 0) {
2432 	printk(FORE200E "device %s initialization failed\n", fore200e->name);
2433 	return -ENODEV;
2434     }
2435 
2436     printk(FORE200E "device %s initialized\n", fore200e->name);
2437 
2438     fore200e->state = FORE200E_STATE_INITIALIZE;
2439     return 0;
2440 }
2441 
2442 
2443 static void __devinit
2444 fore200e_monitor_putc(struct fore200e* fore200e, char c)
2445 {
2446     struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2447 
2448 #if 0
2449     printk("%c", c);
2450 #endif
2451     fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2452 }
2453 
2454 
2455 static int __devinit
2456 fore200e_monitor_getc(struct fore200e* fore200e)
2457 {
2458     struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2459     unsigned long      timeout = jiffies + msecs_to_jiffies(50);
2460     int                c;
2461 
2462     while (time_before(jiffies, timeout)) {
2463 
2464 	c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2465 
2466 	if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2467 
2468 	    fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2469 #if 0
2470 	    printk("%c", c & 0xFF);
2471 #endif
2472 	    return c & 0xFF;
2473 	}
2474     }
2475 
2476     return -1;
2477 }
2478 
2479 
2480 static void __devinit
2481 fore200e_monitor_puts(struct fore200e* fore200e, char* str)
2482 {
2483     while (*str) {
2484 
2485 	/* the i960 monitor doesn't accept any new character if it has something to say */
2486 	while (fore200e_monitor_getc(fore200e) >= 0);
2487 
2488 	fore200e_monitor_putc(fore200e, *str++);
2489     }
2490 
2491     while (fore200e_monitor_getc(fore200e) >= 0);
2492 }
2493 
2494 #ifdef __LITTLE_ENDIAN
2495 #define FW_EXT ".bin"
2496 #else
2497 #define FW_EXT "_ecd.bin2"
2498 #endif
2499 
2500 static int __devinit
2501 fore200e_load_and_start_fw(struct fore200e* fore200e)
2502 {
2503     const struct firmware *firmware;
2504     struct device *device;
2505     struct fw_header *fw_header;
2506     const __le32 *fw_data;
2507     u32 fw_size;
2508     u32 __iomem *load_addr;
2509     char buf[48];
2510     int err = -ENODEV;
2511 
2512     if (strcmp(fore200e->bus->model_name, "PCA-200E") == 0)
2513 	device = &((struct pci_dev *) fore200e->bus_dev)->dev;
2514 #ifdef CONFIG_SBUS
2515     else if (strcmp(fore200e->bus->model_name, "SBA-200E") == 0)
2516 	device = &((struct of_device *) fore200e->bus_dev)->dev;
2517 #endif
2518     else
2519 	return err;
2520 
2521     sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
2522     if ((err = request_firmware(&firmware, buf, device)) < 0) {
2523 	printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
2524 	return err;
2525     }
2526 
2527     fw_data = (__le32 *) firmware->data;
2528     fw_size = firmware->size / sizeof(u32);
2529     fw_header = (struct fw_header *) firmware->data;
2530     load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2531 
2532     DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2533 	    fore200e->name, load_addr, fw_size);
2534 
2535     if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2536 	printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2537 	goto release;
2538     }
2539 
2540     for (; fw_size--; fw_data++, load_addr++)
2541 	fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2542 
2543     DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2544 
2545 #if defined(__sparc_v9__)
2546     /* reported to be required by SBA cards on some sparc64 hosts */
2547     fore200e_spin(100);
2548 #endif
2549 
2550     sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2551     fore200e_monitor_puts(fore200e, buf);
2552 
2553     if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) {
2554 	printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2555 	goto release;
2556     }
2557 
2558     printk(FORE200E "device %s firmware started\n", fore200e->name);
2559 
2560     fore200e->state = FORE200E_STATE_START_FW;
2561     err = 0;
2562 
2563 release:
2564     release_firmware(firmware);
2565     return err;
2566 }
2567 
2568 
2569 static int __devinit
2570 fore200e_register(struct fore200e* fore200e)
2571 {
2572     struct atm_dev* atm_dev;
2573 
2574     DPRINTK(2, "device %s being registered\n", fore200e->name);
2575 
2576     atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1,
2577       NULL);
2578     if (atm_dev == NULL) {
2579 	printk(FORE200E "unable to register device %s\n", fore200e->name);
2580 	return -ENODEV;
2581     }
2582 
2583     atm_dev->dev_data = fore200e;
2584     fore200e->atm_dev = atm_dev;
2585 
2586     atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2587     atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2588 
2589     fore200e->available_cell_rate = ATM_OC3_PCR;
2590 
2591     fore200e->state = FORE200E_STATE_REGISTER;
2592     return 0;
2593 }
2594 
2595 
2596 static int __devinit
2597 fore200e_init(struct fore200e* fore200e)
2598 {
2599     if (fore200e_register(fore200e) < 0)
2600 	return -ENODEV;
2601 
2602     if (fore200e->bus->configure(fore200e) < 0)
2603 	return -ENODEV;
2604 
2605     if (fore200e->bus->map(fore200e) < 0)
2606 	return -ENODEV;
2607 
2608     if (fore200e_reset(fore200e, 1) < 0)
2609 	return -ENODEV;
2610 
2611     if (fore200e_load_and_start_fw(fore200e) < 0)
2612 	return -ENODEV;
2613 
2614     if (fore200e_initialize(fore200e) < 0)
2615 	return -ENODEV;
2616 
2617     if (fore200e_init_cmd_queue(fore200e) < 0)
2618 	return -ENOMEM;
2619 
2620     if (fore200e_init_tx_queue(fore200e) < 0)
2621 	return -ENOMEM;
2622 
2623     if (fore200e_init_rx_queue(fore200e) < 0)
2624 	return -ENOMEM;
2625 
2626     if (fore200e_init_bs_queue(fore200e) < 0)
2627 	return -ENOMEM;
2628 
2629     if (fore200e_alloc_rx_buf(fore200e) < 0)
2630 	return -ENOMEM;
2631 
2632     if (fore200e_get_esi(fore200e) < 0)
2633 	return -EIO;
2634 
2635     if (fore200e_irq_request(fore200e) < 0)
2636 	return -EBUSY;
2637 
2638     fore200e_supply(fore200e);
2639 
2640     /* all done, board initialization is now complete */
2641     fore200e->state = FORE200E_STATE_COMPLETE;
2642     return 0;
2643 }
2644 
2645 #ifdef CONFIG_SBUS
2646 static int __devinit fore200e_sba_probe(struct of_device *op,
2647 					const struct of_device_id *match)
2648 {
2649 	const struct fore200e_bus *bus = match->data;
2650 	struct fore200e *fore200e;
2651 	static int index = 0;
2652 	int err;
2653 
2654 	fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2655 	if (!fore200e)
2656 		return -ENOMEM;
2657 
2658 	fore200e->bus = bus;
2659 	fore200e->bus_dev = op;
2660 	fore200e->irq = op->irqs[0];
2661 	fore200e->phys_base = op->resource[0].start;
2662 
2663 	sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2664 
2665 	err = fore200e_init(fore200e);
2666 	if (err < 0) {
2667 		fore200e_shutdown(fore200e);
2668 		kfree(fore200e);
2669 		return err;
2670 	}
2671 
2672 	index++;
2673 	dev_set_drvdata(&op->dev, fore200e);
2674 
2675 	return 0;
2676 }
2677 
2678 static int __devexit fore200e_sba_remove(struct of_device *op)
2679 {
2680 	struct fore200e *fore200e = dev_get_drvdata(&op->dev);
2681 
2682 	fore200e_shutdown(fore200e);
2683 	kfree(fore200e);
2684 
2685 	return 0;
2686 }
2687 
2688 static const struct of_device_id fore200e_sba_match[] = {
2689 	{
2690 		.name = SBA200E_PROM_NAME,
2691 		.data = (void *) &fore200e_bus[1],
2692 	},
2693 	{},
2694 };
2695 MODULE_DEVICE_TABLE(of, fore200e_sba_match);
2696 
2697 static struct of_platform_driver fore200e_sba_driver = {
2698 	.name		= "fore_200e",
2699 	.match_table	= fore200e_sba_match,
2700 	.probe		= fore200e_sba_probe,
2701 	.remove		= __devexit_p(fore200e_sba_remove),
2702 };
2703 #endif
2704 
2705 #ifdef CONFIG_PCI
2706 static int __devinit
2707 fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
2708 {
2709     const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
2710     struct fore200e* fore200e;
2711     int err = 0;
2712     static int index = 0;
2713 
2714     if (pci_enable_device(pci_dev)) {
2715 	err = -EINVAL;
2716 	goto out;
2717     }
2718 
2719     fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2720     if (fore200e == NULL) {
2721 	err = -ENOMEM;
2722 	goto out_disable;
2723     }
2724 
2725     fore200e->bus       = bus;
2726     fore200e->bus_dev   = pci_dev;
2727     fore200e->irq       = pci_dev->irq;
2728     fore200e->phys_base = pci_resource_start(pci_dev, 0);
2729 
2730     sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
2731 
2732     pci_set_master(pci_dev);
2733 
2734     printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2735 	   fore200e->bus->model_name,
2736 	   fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2737 
2738     sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2739 
2740     err = fore200e_init(fore200e);
2741     if (err < 0) {
2742 	fore200e_shutdown(fore200e);
2743 	goto out_free;
2744     }
2745 
2746     ++index;
2747     pci_set_drvdata(pci_dev, fore200e);
2748 
2749 out:
2750     return err;
2751 
2752 out_free:
2753     kfree(fore200e);
2754 out_disable:
2755     pci_disable_device(pci_dev);
2756     goto out;
2757 }
2758 
2759 
2760 static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev)
2761 {
2762     struct fore200e *fore200e;
2763 
2764     fore200e = pci_get_drvdata(pci_dev);
2765 
2766     fore200e_shutdown(fore200e);
2767     kfree(fore200e);
2768     pci_disable_device(pci_dev);
2769 }
2770 
2771 
2772 static struct pci_device_id fore200e_pca_tbl[] = {
2773     { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
2774       0, 0, (unsigned long) &fore200e_bus[0] },
2775     { 0, }
2776 };
2777 
2778 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2779 
2780 static struct pci_driver fore200e_pca_driver = {
2781     .name =     "fore_200e",
2782     .probe =    fore200e_pca_detect,
2783     .remove =   __devexit_p(fore200e_pca_remove_one),
2784     .id_table = fore200e_pca_tbl,
2785 };
2786 #endif
2787 
2788 static int __init fore200e_module_init(void)
2789 {
2790 	int err;
2791 
2792 	printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2793 
2794 #ifdef CONFIG_SBUS
2795 	err = of_register_driver(&fore200e_sba_driver, &of_bus_type);
2796 	if (err)
2797 		return err;
2798 #endif
2799 
2800 #ifdef CONFIG_PCI
2801 	err = pci_register_driver(&fore200e_pca_driver);
2802 #endif
2803 
2804 #ifdef CONFIG_SBUS
2805 	if (err)
2806 		of_unregister_driver(&fore200e_sba_driver);
2807 #endif
2808 
2809 	return err;
2810 }
2811 
2812 static void __exit fore200e_module_cleanup(void)
2813 {
2814 #ifdef CONFIG_PCI
2815 	pci_unregister_driver(&fore200e_pca_driver);
2816 #endif
2817 #ifdef CONFIG_SBUS
2818 	of_unregister_driver(&fore200e_sba_driver);
2819 #endif
2820 }
2821 
2822 static int
2823 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2824 {
2825     struct fore200e*     fore200e  = FORE200E_DEV(dev);
2826     struct fore200e_vcc* fore200e_vcc;
2827     struct atm_vcc*      vcc;
2828     int                  i, len, left = *pos;
2829     unsigned long        flags;
2830 
2831     if (!left--) {
2832 
2833 	if (fore200e_getstats(fore200e) < 0)
2834 	    return -EIO;
2835 
2836 	len = sprintf(page,"\n"
2837 		       " device:\n"
2838 		       "   internal name:\t\t%s\n", fore200e->name);
2839 
2840 	/* print bus-specific information */
2841 	if (fore200e->bus->proc_read)
2842 	    len += fore200e->bus->proc_read(fore200e, page + len);
2843 
2844 	len += sprintf(page + len,
2845 		"   interrupt line:\t\t%s\n"
2846 		"   physical base address:\t0x%p\n"
2847 		"   virtual base address:\t0x%p\n"
2848 		"   factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n"
2849 		"   board serial number:\t\t%d\n\n",
2850 		fore200e_irq_itoa(fore200e->irq),
2851 		(void*)fore200e->phys_base,
2852 		fore200e->virt_base,
2853 		fore200e->esi[0], fore200e->esi[1], fore200e->esi[2],
2854 		fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
2855 		fore200e->esi[4] * 256 + fore200e->esi[5]);
2856 
2857 	return len;
2858     }
2859 
2860     if (!left--)
2861 	return sprintf(page,
2862 		       "   free small bufs, scheme 1:\t%d\n"
2863 		       "   free large bufs, scheme 1:\t%d\n"
2864 		       "   free small bufs, scheme 2:\t%d\n"
2865 		       "   free large bufs, scheme 2:\t%d\n",
2866 		       fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2867 		       fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2868 		       fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2869 		       fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2870 
2871     if (!left--) {
2872 	u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2873 
2874 	len = sprintf(page,"\n\n"
2875 		      " cell processor:\n"
2876 		      "   heartbeat state:\t\t");
2877 
2878 	if (hb >> 16 != 0xDEAD)
2879 	    len += sprintf(page + len, "0x%08x\n", hb);
2880 	else
2881 	    len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2882 
2883 	return len;
2884     }
2885 
2886     if (!left--) {
2887 	static const char* media_name[] = {
2888 	    "unshielded twisted pair",
2889 	    "multimode optical fiber ST",
2890 	    "multimode optical fiber SC",
2891 	    "single-mode optical fiber ST",
2892 	    "single-mode optical fiber SC",
2893 	    "unknown"
2894 	};
2895 
2896 	static const char* oc3_mode[] = {
2897 	    "normal operation",
2898 	    "diagnostic loopback",
2899 	    "line loopback",
2900 	    "unknown"
2901 	};
2902 
2903 	u32 fw_release     = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2904 	u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2905 	u32 oc3_revision   = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2906 	u32 media_index    = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2907 	u32 oc3_index;
2908 
2909 	if ((media_index < 0) || (media_index > 4))
2910 	    media_index = 5;
2911 
2912 	switch (fore200e->loop_mode) {
2913 	    case ATM_LM_NONE:    oc3_index = 0;
2914 		                 break;
2915 	    case ATM_LM_LOC_PHY: oc3_index = 1;
2916 		                 break;
2917 	    case ATM_LM_RMT_PHY: oc3_index = 2;
2918 		                 break;
2919 	    default:             oc3_index = 3;
2920 	}
2921 
2922 	return sprintf(page,
2923 		       "   firmware release:\t\t%d.%d.%d\n"
2924 		       "   monitor release:\t\t%d.%d\n"
2925 		       "   media type:\t\t\t%s\n"
2926 		       "   OC-3 revision:\t\t0x%x\n"
2927                        "   OC-3 mode:\t\t\t%s",
2928 		       fw_release >> 16, fw_release << 16 >> 24,  fw_release << 24 >> 24,
2929 		       mon960_release >> 16, mon960_release << 16 >> 16,
2930 		       media_name[ media_index ],
2931 		       oc3_revision,
2932 		       oc3_mode[ oc3_index ]);
2933     }
2934 
2935     if (!left--) {
2936 	struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2937 
2938 	return sprintf(page,
2939 		       "\n\n"
2940 		       " monitor:\n"
2941 		       "   version number:\t\t%d\n"
2942 		       "   boot status word:\t\t0x%08x\n",
2943 		       fore200e->bus->read(&cp_monitor->mon_version),
2944 		       fore200e->bus->read(&cp_monitor->bstat));
2945     }
2946 
2947     if (!left--)
2948 	return sprintf(page,
2949 		       "\n"
2950 		       " device statistics:\n"
2951 		       "  4b5b:\n"
2952 		       "     crc_header_errors:\t\t%10u\n"
2953 		       "     framing_errors:\t\t%10u\n",
2954 		       be32_to_cpu(fore200e->stats->phy.crc_header_errors),
2955 		       be32_to_cpu(fore200e->stats->phy.framing_errors));
2956 
2957     if (!left--)
2958 	return sprintf(page, "\n"
2959 		       "  OC-3:\n"
2960 		       "     section_bip8_errors:\t%10u\n"
2961 		       "     path_bip8_errors:\t\t%10u\n"
2962 		       "     line_bip24_errors:\t\t%10u\n"
2963 		       "     line_febe_errors:\t\t%10u\n"
2964 		       "     path_febe_errors:\t\t%10u\n"
2965 		       "     corr_hcs_errors:\t\t%10u\n"
2966 		       "     ucorr_hcs_errors:\t\t%10u\n",
2967 		       be32_to_cpu(fore200e->stats->oc3.section_bip8_errors),
2968 		       be32_to_cpu(fore200e->stats->oc3.path_bip8_errors),
2969 		       be32_to_cpu(fore200e->stats->oc3.line_bip24_errors),
2970 		       be32_to_cpu(fore200e->stats->oc3.line_febe_errors),
2971 		       be32_to_cpu(fore200e->stats->oc3.path_febe_errors),
2972 		       be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors),
2973 		       be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors));
2974 
2975     if (!left--)
2976 	return sprintf(page,"\n"
2977 		       "   ATM:\t\t\t\t     cells\n"
2978 		       "     TX:\t\t\t%10u\n"
2979 		       "     RX:\t\t\t%10u\n"
2980 		       "     vpi out of range:\t\t%10u\n"
2981 		       "     vpi no conn:\t\t%10u\n"
2982 		       "     vci out of range:\t\t%10u\n"
2983 		       "     vci no conn:\t\t%10u\n",
2984 		       be32_to_cpu(fore200e->stats->atm.cells_transmitted),
2985 		       be32_to_cpu(fore200e->stats->atm.cells_received),
2986 		       be32_to_cpu(fore200e->stats->atm.vpi_bad_range),
2987 		       be32_to_cpu(fore200e->stats->atm.vpi_no_conn),
2988 		       be32_to_cpu(fore200e->stats->atm.vci_bad_range),
2989 		       be32_to_cpu(fore200e->stats->atm.vci_no_conn));
2990 
2991     if (!left--)
2992 	return sprintf(page,"\n"
2993 		       "   AAL0:\t\t\t     cells\n"
2994 		       "     TX:\t\t\t%10u\n"
2995 		       "     RX:\t\t\t%10u\n"
2996 		       "     dropped:\t\t\t%10u\n",
2997 		       be32_to_cpu(fore200e->stats->aal0.cells_transmitted),
2998 		       be32_to_cpu(fore200e->stats->aal0.cells_received),
2999 		       be32_to_cpu(fore200e->stats->aal0.cells_dropped));
3000 
3001     if (!left--)
3002 	return sprintf(page,"\n"
3003 		       "   AAL3/4:\n"
3004 		       "     SAR sublayer:\t\t     cells\n"
3005 		       "       TX:\t\t\t%10u\n"
3006 		       "       RX:\t\t\t%10u\n"
3007 		       "       dropped:\t\t\t%10u\n"
3008 		       "       CRC errors:\t\t%10u\n"
3009 		       "       protocol errors:\t\t%10u\n\n"
3010 		       "     CS  sublayer:\t\t      PDUs\n"
3011 		       "       TX:\t\t\t%10u\n"
3012 		       "       RX:\t\t\t%10u\n"
3013 		       "       dropped:\t\t\t%10u\n"
3014 		       "       protocol errors:\t\t%10u\n",
3015 		       be32_to_cpu(fore200e->stats->aal34.cells_transmitted),
3016 		       be32_to_cpu(fore200e->stats->aal34.cells_received),
3017 		       be32_to_cpu(fore200e->stats->aal34.cells_dropped),
3018 		       be32_to_cpu(fore200e->stats->aal34.cells_crc_errors),
3019 		       be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors),
3020 		       be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted),
3021 		       be32_to_cpu(fore200e->stats->aal34.cspdus_received),
3022 		       be32_to_cpu(fore200e->stats->aal34.cspdus_dropped),
3023 		       be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors));
3024 
3025     if (!left--)
3026 	return sprintf(page,"\n"
3027 		       "   AAL5:\n"
3028 		       "     SAR sublayer:\t\t     cells\n"
3029 		       "       TX:\t\t\t%10u\n"
3030 		       "       RX:\t\t\t%10u\n"
3031 		       "       dropped:\t\t\t%10u\n"
3032 		       "       congestions:\t\t%10u\n\n"
3033 		       "     CS  sublayer:\t\t      PDUs\n"
3034 		       "       TX:\t\t\t%10u\n"
3035 		       "       RX:\t\t\t%10u\n"
3036 		       "       dropped:\t\t\t%10u\n"
3037 		       "       CRC errors:\t\t%10u\n"
3038 		       "       protocol errors:\t\t%10u\n",
3039 		       be32_to_cpu(fore200e->stats->aal5.cells_transmitted),
3040 		       be32_to_cpu(fore200e->stats->aal5.cells_received),
3041 		       be32_to_cpu(fore200e->stats->aal5.cells_dropped),
3042 		       be32_to_cpu(fore200e->stats->aal5.congestion_experienced),
3043 		       be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted),
3044 		       be32_to_cpu(fore200e->stats->aal5.cspdus_received),
3045 		       be32_to_cpu(fore200e->stats->aal5.cspdus_dropped),
3046 		       be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors),
3047 		       be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors));
3048 
3049     if (!left--)
3050 	return sprintf(page,"\n"
3051 		       "   AUX:\t\t       allocation failures\n"
3052 		       "     small b1:\t\t\t%10u\n"
3053 		       "     large b1:\t\t\t%10u\n"
3054 		       "     small b2:\t\t\t%10u\n"
3055 		       "     large b2:\t\t\t%10u\n"
3056 		       "     RX PDUs:\t\t\t%10u\n"
3057 		       "     TX PDUs:\t\t\t%10lu\n",
3058 		       be32_to_cpu(fore200e->stats->aux.small_b1_failed),
3059 		       be32_to_cpu(fore200e->stats->aux.large_b1_failed),
3060 		       be32_to_cpu(fore200e->stats->aux.small_b2_failed),
3061 		       be32_to_cpu(fore200e->stats->aux.large_b2_failed),
3062 		       be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed),
3063 		       fore200e->tx_sat);
3064 
3065     if (!left--)
3066 	return sprintf(page,"\n"
3067 		       " receive carrier:\t\t\t%s\n",
3068 		       fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
3069 
3070     if (!left--) {
3071         return sprintf(page,"\n"
3072 		       " VCCs:\n  address   VPI VCI   AAL "
3073 		       "TX PDUs   TX min/max size  RX PDUs   RX min/max size\n");
3074     }
3075 
3076     for (i = 0; i < NBR_CONNECT; i++) {
3077 
3078 	vcc = fore200e->vc_map[i].vcc;
3079 
3080 	if (vcc == NULL)
3081 	    continue;
3082 
3083 	spin_lock_irqsave(&fore200e->q_lock, flags);
3084 
3085 	if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
3086 
3087 	    fore200e_vcc = FORE200E_VCC(vcc);
3088 	    ASSERT(fore200e_vcc);
3089 
3090 	    len = sprintf(page,
3091 			  "  %08x  %03d %05d %1d   %09lu %05d/%05d      %09lu %05d/%05d\n",
3092 			  (u32)(unsigned long)vcc,
3093 			  vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3094 			  fore200e_vcc->tx_pdu,
3095 			  fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3096 			  fore200e_vcc->tx_max_pdu,
3097 			  fore200e_vcc->rx_pdu,
3098 			  fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3099 			  fore200e_vcc->rx_max_pdu);
3100 
3101 	    spin_unlock_irqrestore(&fore200e->q_lock, flags);
3102 	    return len;
3103 	}
3104 
3105 	spin_unlock_irqrestore(&fore200e->q_lock, flags);
3106     }
3107 
3108     return 0;
3109 }
3110 
3111 module_init(fore200e_module_init);
3112 module_exit(fore200e_module_cleanup);
3113 
3114 
3115 static const struct atmdev_ops fore200e_ops =
3116 {
3117 	.open       = fore200e_open,
3118 	.close      = fore200e_close,
3119 	.ioctl      = fore200e_ioctl,
3120 	.getsockopt = fore200e_getsockopt,
3121 	.setsockopt = fore200e_setsockopt,
3122 	.send       = fore200e_send,
3123 	.change_qos = fore200e_change_qos,
3124 	.proc_read  = fore200e_proc_read,
3125 	.owner      = THIS_MODULE
3126 };
3127 
3128 
3129 static const struct fore200e_bus fore200e_bus[] = {
3130 #ifdef CONFIG_PCI
3131     { "PCA-200E", "pca200e", 32, 4, 32,
3132       fore200e_pca_read,
3133       fore200e_pca_write,
3134       fore200e_pca_dma_map,
3135       fore200e_pca_dma_unmap,
3136       fore200e_pca_dma_sync_for_cpu,
3137       fore200e_pca_dma_sync_for_device,
3138       fore200e_pca_dma_chunk_alloc,
3139       fore200e_pca_dma_chunk_free,
3140       fore200e_pca_configure,
3141       fore200e_pca_map,
3142       fore200e_pca_reset,
3143       fore200e_pca_prom_read,
3144       fore200e_pca_unmap,
3145       NULL,
3146       fore200e_pca_irq_check,
3147       fore200e_pca_irq_ack,
3148       fore200e_pca_proc_read,
3149     },
3150 #endif
3151 #ifdef CONFIG_SBUS
3152     { "SBA-200E", "sba200e", 32, 64, 32,
3153       fore200e_sba_read,
3154       fore200e_sba_write,
3155       fore200e_sba_dma_map,
3156       fore200e_sba_dma_unmap,
3157       fore200e_sba_dma_sync_for_cpu,
3158       fore200e_sba_dma_sync_for_device,
3159       fore200e_sba_dma_chunk_alloc,
3160       fore200e_sba_dma_chunk_free,
3161       fore200e_sba_configure,
3162       fore200e_sba_map,
3163       fore200e_sba_reset,
3164       fore200e_sba_prom_read,
3165       fore200e_sba_unmap,
3166       fore200e_sba_irq_enable,
3167       fore200e_sba_irq_check,
3168       fore200e_sba_irq_ack,
3169       fore200e_sba_proc_read,
3170     },
3171 #endif
3172     {}
3173 };
3174 
3175 MODULE_LICENSE("GPL");
3176 #ifdef CONFIG_PCI
3177 #ifdef __LITTLE_ENDIAN__
3178 MODULE_FIRMWARE("pca200e.bin");
3179 #else
3180 MODULE_FIRMWARE("pca200e_ecd.bin2");
3181 #endif
3182 #endif /* CONFIG_PCI */
3183 #ifdef CONFIG_SBUS
3184 MODULE_FIRMWARE("sba200e_ecd.bin2");
3185 #endif
3186