xref: /openbmc/linux/drivers/atm/fore200e.c (revision 1fa6ac37)
1 /*
2   A FORE Systems 200E-series driver for ATM on Linux.
3   Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
4 
5   Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
6 
7   This driver simultaneously supports PCA-200E and SBA-200E adapters
8   on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
9 
10   This program is free software; you can redistribute it and/or modify
11   it under the terms of the GNU General Public License as published by
12   the Free Software Foundation; either version 2 of the License, or
13   (at your option) any later version.
14 
15   This program is distributed in the hope that it will be useful,
16   but WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18   GNU General Public License for more details.
19 
20   You should have received a copy of the GNU General Public License
21   along with this program; if not, write to the Free Software
22   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23 */
24 
25 
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/capability.h>
30 #include <linux/interrupt.h>
31 #include <linux/bitops.h>
32 #include <linux/pci.h>
33 #include <linux/module.h>
34 #include <linux/atmdev.h>
35 #include <linux/sonet.h>
36 #include <linux/atm_suni.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/delay.h>
39 #include <linux/firmware.h>
40 #include <asm/io.h>
41 #include <asm/string.h>
42 #include <asm/page.h>
43 #include <asm/irq.h>
44 #include <asm/dma.h>
45 #include <asm/byteorder.h>
46 #include <asm/uaccess.h>
47 #include <asm/atomic.h>
48 
49 #ifdef CONFIG_SBUS
50 #include <linux/of.h>
51 #include <linux/of_device.h>
52 #include <asm/idprom.h>
53 #include <asm/openprom.h>
54 #include <asm/oplib.h>
55 #include <asm/pgtable.h>
56 #endif
57 
58 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
59 #define FORE200E_USE_TASKLET
60 #endif
61 
62 #if 0 /* enable the debugging code of the buffer supply queues */
63 #define FORE200E_BSQ_DEBUG
64 #endif
65 
66 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
67 #define FORE200E_52BYTE_AAL0_SDU
68 #endif
69 
70 #include "fore200e.h"
71 #include "suni.h"
72 
73 #define FORE200E_VERSION "0.3e"
74 
75 #define FORE200E         "fore200e: "
76 
77 #if 0 /* override .config */
78 #define CONFIG_ATM_FORE200E_DEBUG 1
79 #endif
80 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
81 #define DPRINTK(level, format, args...)  do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
82                                                   printk(FORE200E format, ##args); } while (0)
83 #else
84 #define DPRINTK(level, format, args...)  do {} while (0)
85 #endif
86 
87 
88 #define FORE200E_ALIGN(addr, alignment) \
89         ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
90 
91 #define FORE200E_DMA_INDEX(dma_addr, type, index)  ((dma_addr) + (index) * sizeof(type))
92 
93 #define FORE200E_INDEX(virt_addr, type, index)     (&((type *)(virt_addr))[ index ])
94 
95 #define FORE200E_NEXT_ENTRY(index, modulo)         (index = ++(index) % (modulo))
96 
97 #if 1
98 #define ASSERT(expr)     if (!(expr)) { \
99 			     printk(FORE200E "assertion failed! %s[%d]: %s\n", \
100 				    __func__, __LINE__, #expr); \
101 			     panic(FORE200E "%s", __func__); \
102 			 }
103 #else
104 #define ASSERT(expr)     do {} while (0)
105 #endif
106 
107 
108 static const struct atmdev_ops   fore200e_ops;
109 static const struct fore200e_bus fore200e_bus[];
110 
111 static LIST_HEAD(fore200e_boards);
112 
113 
114 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
115 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
116 MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
117 
118 
119 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
120     { BUFFER_S1_NBR, BUFFER_L1_NBR },
121     { BUFFER_S2_NBR, BUFFER_L2_NBR }
122 };
123 
124 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
125     { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
126     { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
127 };
128 
129 
130 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
131 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
132 #endif
133 
134 
135 #if 0 /* currently unused */
136 static int
137 fore200e_fore2atm_aal(enum fore200e_aal aal)
138 {
139     switch(aal) {
140     case FORE200E_AAL0:  return ATM_AAL0;
141     case FORE200E_AAL34: return ATM_AAL34;
142     case FORE200E_AAL5:  return ATM_AAL5;
143     }
144 
145     return -EINVAL;
146 }
147 #endif
148 
149 
150 static enum fore200e_aal
151 fore200e_atm2fore_aal(int aal)
152 {
153     switch(aal) {
154     case ATM_AAL0:  return FORE200E_AAL0;
155     case ATM_AAL34: return FORE200E_AAL34;
156     case ATM_AAL1:
157     case ATM_AAL2:
158     case ATM_AAL5:  return FORE200E_AAL5;
159     }
160 
161     return -EINVAL;
162 }
163 
164 
165 static char*
166 fore200e_irq_itoa(int irq)
167 {
168     static char str[8];
169     sprintf(str, "%d", irq);
170     return str;
171 }
172 
173 
174 /* allocate and align a chunk of memory intended to hold the data behing exchanged
175    between the driver and the adapter (using streaming DVMA) */
176 
177 static int
178 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
179 {
180     unsigned long offset = 0;
181 
182     if (alignment <= sizeof(int))
183 	alignment = 0;
184 
185     chunk->alloc_size = size + alignment;
186     chunk->align_size = size;
187     chunk->direction  = direction;
188 
189     chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
190     if (chunk->alloc_addr == NULL)
191 	return -ENOMEM;
192 
193     if (alignment > 0)
194 	offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
195 
196     chunk->align_addr = chunk->alloc_addr + offset;
197 
198     chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
199 
200     return 0;
201 }
202 
203 
204 /* free a chunk of memory */
205 
206 static void
207 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
208 {
209     fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
210 
211     kfree(chunk->alloc_addr);
212 }
213 
214 
215 static void
216 fore200e_spin(int msecs)
217 {
218     unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
219     while (time_before(jiffies, timeout));
220 }
221 
222 
223 static int
224 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
225 {
226     unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
227     int           ok;
228 
229     mb();
230     do {
231 	if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
232 	    break;
233 
234     } while (time_before(jiffies, timeout));
235 
236 #if 1
237     if (!ok) {
238 	printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
239 	       *addr, val);
240     }
241 #endif
242 
243     return ok;
244 }
245 
246 
247 static int
248 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
249 {
250     unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
251     int           ok;
252 
253     do {
254 	if ((ok = (fore200e->bus->read(addr) == val)))
255 	    break;
256 
257     } while (time_before(jiffies, timeout));
258 
259 #if 1
260     if (!ok) {
261 	printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
262 	       fore200e->bus->read(addr), val);
263     }
264 #endif
265 
266     return ok;
267 }
268 
269 
270 static void
271 fore200e_free_rx_buf(struct fore200e* fore200e)
272 {
273     int scheme, magn, nbr;
274     struct buffer* buffer;
275 
276     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
277 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
278 
279 	    if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
280 
281 		for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
282 
283 		    struct chunk* data = &buffer[ nbr ].data;
284 
285 		    if (data->alloc_addr != NULL)
286 			fore200e_chunk_free(fore200e, data);
287 		}
288 	    }
289 	}
290     }
291 }
292 
293 
294 static void
295 fore200e_uninit_bs_queue(struct fore200e* fore200e)
296 {
297     int scheme, magn;
298 
299     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
300 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
301 
302 	    struct chunk* status    = &fore200e->host_bsq[ scheme ][ magn ].status;
303 	    struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
304 
305 	    if (status->alloc_addr)
306 		fore200e->bus->dma_chunk_free(fore200e, status);
307 
308 	    if (rbd_block->alloc_addr)
309 		fore200e->bus->dma_chunk_free(fore200e, rbd_block);
310 	}
311     }
312 }
313 
314 
315 static int
316 fore200e_reset(struct fore200e* fore200e, int diag)
317 {
318     int ok;
319 
320     fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
321 
322     fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
323 
324     fore200e->bus->reset(fore200e);
325 
326     if (diag) {
327 	ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
328 	if (ok == 0) {
329 
330 	    printk(FORE200E "device %s self-test failed\n", fore200e->name);
331 	    return -ENODEV;
332 	}
333 
334 	printk(FORE200E "device %s self-test passed\n", fore200e->name);
335 
336 	fore200e->state = FORE200E_STATE_RESET;
337     }
338 
339     return 0;
340 }
341 
342 
343 static void
344 fore200e_shutdown(struct fore200e* fore200e)
345 {
346     printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
347 	   fore200e->name, fore200e->phys_base,
348 	   fore200e_irq_itoa(fore200e->irq));
349 
350     if (fore200e->state > FORE200E_STATE_RESET) {
351 	/* first, reset the board to prevent further interrupts or data transfers */
352 	fore200e_reset(fore200e, 0);
353     }
354 
355     /* then, release all allocated resources */
356     switch(fore200e->state) {
357 
358     case FORE200E_STATE_COMPLETE:
359 	kfree(fore200e->stats);
360 
361     case FORE200E_STATE_IRQ:
362 	free_irq(fore200e->irq, fore200e->atm_dev);
363 
364     case FORE200E_STATE_ALLOC_BUF:
365 	fore200e_free_rx_buf(fore200e);
366 
367     case FORE200E_STATE_INIT_BSQ:
368 	fore200e_uninit_bs_queue(fore200e);
369 
370     case FORE200E_STATE_INIT_RXQ:
371 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
372 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
373 
374     case FORE200E_STATE_INIT_TXQ:
375 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
376 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
377 
378     case FORE200E_STATE_INIT_CMDQ:
379 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
380 
381     case FORE200E_STATE_INITIALIZE:
382 	/* nothing to do for that state */
383 
384     case FORE200E_STATE_START_FW:
385 	/* nothing to do for that state */
386 
387     case FORE200E_STATE_RESET:
388 	/* nothing to do for that state */
389 
390     case FORE200E_STATE_MAP:
391 	fore200e->bus->unmap(fore200e);
392 
393     case FORE200E_STATE_CONFIGURE:
394 	/* nothing to do for that state */
395 
396     case FORE200E_STATE_REGISTER:
397 	/* XXX shouldn't we *start* by deregistering the device? */
398 	atm_dev_deregister(fore200e->atm_dev);
399 
400     case FORE200E_STATE_BLANK:
401 	/* nothing to do for that state */
402 	break;
403     }
404 }
405 
406 
407 #ifdef CONFIG_PCI
408 
409 static u32 fore200e_pca_read(volatile u32 __iomem *addr)
410 {
411     /* on big-endian hosts, the board is configured to convert
412        the endianess of slave RAM accesses  */
413     return le32_to_cpu(readl(addr));
414 }
415 
416 
417 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
418 {
419     /* on big-endian hosts, the board is configured to convert
420        the endianess of slave RAM accesses  */
421     writel(cpu_to_le32(val), addr);
422 }
423 
424 
425 static u32
426 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
427 {
428     u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
429 
430     DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d,  --> dma_addr = 0x%08x\n",
431 	    virt_addr, size, direction, dma_addr);
432 
433     return dma_addr;
434 }
435 
436 
437 static void
438 fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
439 {
440     DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
441 	    dma_addr, size, direction);
442 
443     pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
444 }
445 
446 
447 static void
448 fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
449 {
450     DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
451 
452     pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
453 }
454 
455 static void
456 fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
457 {
458     DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
459 
460     pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
461 }
462 
463 
464 /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
465    (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
466 
467 static int
468 fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
469 			     int size, int nbr, int alignment)
470 {
471     /* returned chunks are page-aligned */
472     chunk->alloc_size = size * nbr;
473     chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
474 					     chunk->alloc_size,
475 					     &chunk->dma_addr);
476 
477     if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
478 	return -ENOMEM;
479 
480     chunk->align_addr = chunk->alloc_addr;
481 
482     return 0;
483 }
484 
485 
486 /* free a DMA consistent chunk of memory */
487 
488 static void
489 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
490 {
491     pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
492 			chunk->alloc_size,
493 			chunk->alloc_addr,
494 			chunk->dma_addr);
495 }
496 
497 
498 static int
499 fore200e_pca_irq_check(struct fore200e* fore200e)
500 {
501     /* this is a 1 bit register */
502     int irq_posted = readl(fore200e->regs.pca.psr);
503 
504 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
505     if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
506 	DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
507     }
508 #endif
509 
510     return irq_posted;
511 }
512 
513 
514 static void
515 fore200e_pca_irq_ack(struct fore200e* fore200e)
516 {
517     writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
518 }
519 
520 
521 static void
522 fore200e_pca_reset(struct fore200e* fore200e)
523 {
524     writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
525     fore200e_spin(10);
526     writel(0, fore200e->regs.pca.hcr);
527 }
528 
529 
530 static int __devinit
531 fore200e_pca_map(struct fore200e* fore200e)
532 {
533     DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
534 
535     fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
536 
537     if (fore200e->virt_base == NULL) {
538 	printk(FORE200E "can't map device %s\n", fore200e->name);
539 	return -EFAULT;
540     }
541 
542     DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
543 
544     /* gain access to the PCA specific registers  */
545     fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
546     fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
547     fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
548 
549     fore200e->state = FORE200E_STATE_MAP;
550     return 0;
551 }
552 
553 
554 static void
555 fore200e_pca_unmap(struct fore200e* fore200e)
556 {
557     DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
558 
559     if (fore200e->virt_base != NULL)
560 	iounmap(fore200e->virt_base);
561 }
562 
563 
564 static int __devinit
565 fore200e_pca_configure(struct fore200e* fore200e)
566 {
567     struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
568     u8              master_ctrl, latency;
569 
570     DPRINTK(2, "device %s being configured\n", fore200e->name);
571 
572     if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
573 	printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
574 	return -EIO;
575     }
576 
577     pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
578 
579     master_ctrl = master_ctrl
580 #if defined(__BIG_ENDIAN)
581 	/* request the PCA board to convert the endianess of slave RAM accesses */
582 	| PCA200E_CTRL_CONVERT_ENDIAN
583 #endif
584 #if 0
585         | PCA200E_CTRL_DIS_CACHE_RD
586         | PCA200E_CTRL_DIS_WRT_INVAL
587         | PCA200E_CTRL_ENA_CONT_REQ_MODE
588         | PCA200E_CTRL_2_CACHE_WRT_INVAL
589 #endif
590 	| PCA200E_CTRL_LARGE_PCI_BURSTS;
591 
592     pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
593 
594     /* raise latency from 32 (default) to 192, as this seems to prevent NIC
595        lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
596        this may impact the performances of other PCI devices on the same bus, though */
597     latency = 192;
598     pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
599 
600     fore200e->state = FORE200E_STATE_CONFIGURE;
601     return 0;
602 }
603 
604 
605 static int __init
606 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
607 {
608     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
609     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
610     struct prom_opcode      opcode;
611     int                     ok;
612     u32                     prom_dma;
613 
614     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
615 
616     opcode.opcode = OPCODE_GET_PROM;
617     opcode.pad    = 0;
618 
619     prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
620 
621     fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
622 
623     *entry->status = STATUS_PENDING;
624 
625     fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
626 
627     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
628 
629     *entry->status = STATUS_FREE;
630 
631     fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
632 
633     if (ok == 0) {
634 	printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
635 	return -EIO;
636     }
637 
638 #if defined(__BIG_ENDIAN)
639 
640 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
641 
642     /* MAC address is stored as little-endian */
643     swap_here(&prom->mac_addr[0]);
644     swap_here(&prom->mac_addr[4]);
645 #endif
646 
647     return 0;
648 }
649 
650 
651 static int
652 fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
653 {
654     struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
655 
656     return sprintf(page, "   PCI bus/slot/function:\t%d/%d/%d\n",
657 		   pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
658 }
659 
660 #endif /* CONFIG_PCI */
661 
662 
663 #ifdef CONFIG_SBUS
664 
665 static u32 fore200e_sba_read(volatile u32 __iomem *addr)
666 {
667     return sbus_readl(addr);
668 }
669 
670 static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
671 {
672     sbus_writel(val, addr);
673 }
674 
675 static u32 fore200e_sba_dma_map(struct fore200e *fore200e, void* virt_addr, int size, int direction)
676 {
677 	struct of_device *op = fore200e->bus_dev;
678 	u32 dma_addr;
679 
680 	dma_addr = dma_map_single(&op->dev, virt_addr, size, direction);
681 
682 	DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
683 		virt_addr, size, direction, dma_addr);
684 
685 	return dma_addr;
686 }
687 
688 static void fore200e_sba_dma_unmap(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
689 {
690 	struct of_device *op = fore200e->bus_dev;
691 
692 	DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
693 		dma_addr, size, direction);
694 
695 	dma_unmap_single(&op->dev, dma_addr, size, direction);
696 }
697 
698 static void fore200e_sba_dma_sync_for_cpu(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
699 {
700 	struct of_device *op = fore200e->bus_dev;
701 
702 	DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
703 
704 	dma_sync_single_for_cpu(&op->dev, dma_addr, size, direction);
705 }
706 
707 static void fore200e_sba_dma_sync_for_device(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
708 {
709 	struct of_device *op = fore200e->bus_dev;
710 
711 	DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
712 
713 	dma_sync_single_for_device(&op->dev, dma_addr, size, direction);
714 }
715 
716 /* Allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
717  * (to hold descriptors, status, queues, etc.) shared by the driver and the adapter.
718  */
719 static int fore200e_sba_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk,
720 					int size, int nbr, int alignment)
721 {
722 	struct of_device *op = fore200e->bus_dev;
723 
724 	chunk->alloc_size = chunk->align_size = size * nbr;
725 
726 	/* returned chunks are page-aligned */
727 	chunk->alloc_addr = dma_alloc_coherent(&op->dev, chunk->alloc_size,
728 					       &chunk->dma_addr, GFP_ATOMIC);
729 
730 	if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
731 		return -ENOMEM;
732 
733 	chunk->align_addr = chunk->alloc_addr;
734 
735 	return 0;
736 }
737 
738 /* free a DVMA consistent chunk of memory */
739 static void fore200e_sba_dma_chunk_free(struct fore200e *fore200e, struct chunk *chunk)
740 {
741 	struct of_device *op = fore200e->bus_dev;
742 
743 	dma_free_coherent(&op->dev, chunk->alloc_size,
744 			  chunk->alloc_addr, chunk->dma_addr);
745 }
746 
747 static void fore200e_sba_irq_enable(struct fore200e *fore200e)
748 {
749 	u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
750 	fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
751 }
752 
753 static int fore200e_sba_irq_check(struct fore200e *fore200e)
754 {
755 	return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
756 }
757 
758 static void fore200e_sba_irq_ack(struct fore200e *fore200e)
759 {
760 	u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
761 	fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
762 }
763 
764 static void fore200e_sba_reset(struct fore200e *fore200e)
765 {
766 	fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
767 	fore200e_spin(10);
768 	fore200e->bus->write(0, fore200e->regs.sba.hcr);
769 }
770 
771 static int __init fore200e_sba_map(struct fore200e *fore200e)
772 {
773 	struct of_device *op = fore200e->bus_dev;
774 	unsigned int bursts;
775 
776 	/* gain access to the SBA specific registers  */
777 	fore200e->regs.sba.hcr = of_ioremap(&op->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
778 	fore200e->regs.sba.bsr = of_ioremap(&op->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
779 	fore200e->regs.sba.isr = of_ioremap(&op->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
780 	fore200e->virt_base    = of_ioremap(&op->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
781 
782 	if (!fore200e->virt_base) {
783 		printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
784 		return -EFAULT;
785 	}
786 
787 	DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
788 
789 	fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
790 
791 	/* get the supported DVMA burst sizes */
792 	bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00);
793 
794 	if (sbus_can_dma_64bit())
795 		sbus_set_sbus64(&op->dev, bursts);
796 
797 	fore200e->state = FORE200E_STATE_MAP;
798 	return 0;
799 }
800 
801 static void fore200e_sba_unmap(struct fore200e *fore200e)
802 {
803 	struct of_device *op = fore200e->bus_dev;
804 
805 	of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
806 	of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
807 	of_iounmap(&op->resource[2], fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
808 	of_iounmap(&op->resource[3], fore200e->virt_base,    SBA200E_RAM_LENGTH);
809 }
810 
811 static int __init fore200e_sba_configure(struct fore200e *fore200e)
812 {
813 	fore200e->state = FORE200E_STATE_CONFIGURE;
814 	return 0;
815 }
816 
817 static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom)
818 {
819 	struct of_device *op = fore200e->bus_dev;
820 	const u8 *prop;
821 	int len;
822 
823 	prop = of_get_property(op->dev.of_node, "madaddrlo2", &len);
824 	if (!prop)
825 		return -ENODEV;
826 	memcpy(&prom->mac_addr[4], prop, 4);
827 
828 	prop = of_get_property(op->dev.of_node, "madaddrhi4", &len);
829 	if (!prop)
830 		return -ENODEV;
831 	memcpy(&prom->mac_addr[2], prop, 4);
832 
833 	prom->serial_number = of_getintprop_default(op->dev.of_node,
834 						    "serialnumber", 0);
835 	prom->hw_revision = of_getintprop_default(op->dev.of_node,
836 						  "promversion", 0);
837 
838 	return 0;
839 }
840 
841 static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
842 {
843 	struct of_device *op = fore200e->bus_dev;
844 	const struct linux_prom_registers *regs;
845 
846 	regs = of_get_property(op->dev.of_node, "reg", NULL);
847 
848 	return sprintf(page, "   SBUS slot/device:\t\t%d/'%s'\n",
849 		       (regs ? regs->which_io : 0), op->dev.of_node->name);
850 }
851 #endif /* CONFIG_SBUS */
852 
853 
854 static void
855 fore200e_tx_irq(struct fore200e* fore200e)
856 {
857     struct host_txq*        txq = &fore200e->host_txq;
858     struct host_txq_entry*  entry;
859     struct atm_vcc*         vcc;
860     struct fore200e_vc_map* vc_map;
861 
862     if (fore200e->host_txq.txing == 0)
863 	return;
864 
865     for (;;) {
866 
867 	entry = &txq->host_entry[ txq->tail ];
868 
869         if ((*entry->status & STATUS_COMPLETE) == 0) {
870 	    break;
871 	}
872 
873 	DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
874 		entry, txq->tail, entry->vc_map, entry->skb);
875 
876 	/* free copy of misaligned data */
877 	kfree(entry->data);
878 
879 	/* remove DMA mapping */
880 	fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
881 				 DMA_TO_DEVICE);
882 
883 	vc_map = entry->vc_map;
884 
885 	/* vcc closed since the time the entry was submitted for tx? */
886 	if ((vc_map->vcc == NULL) ||
887 	    (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
888 
889 	    DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
890 		    fore200e->atm_dev->number);
891 
892 	    dev_kfree_skb_any(entry->skb);
893 	}
894 	else {
895 	    ASSERT(vc_map->vcc);
896 
897 	    /* vcc closed then immediately re-opened? */
898 	    if (vc_map->incarn != entry->incarn) {
899 
900 		/* when a vcc is closed, some PDUs may be still pending in the tx queue.
901 		   if the same vcc is immediately re-opened, those pending PDUs must
902 		   not be popped after the completion of their emission, as they refer
903 		   to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
904 		   would be decremented by the size of the (unrelated) skb, possibly
905 		   leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
906 		   we thus bind the tx entry to the current incarnation of the vcc
907 		   when the entry is submitted for tx. When the tx later completes,
908 		   if the incarnation number of the tx entry does not match the one
909 		   of the vcc, then this implies that the vcc has been closed then re-opened.
910 		   we thus just drop the skb here. */
911 
912 		DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
913 			fore200e->atm_dev->number);
914 
915 		dev_kfree_skb_any(entry->skb);
916 	    }
917 	    else {
918 		vcc = vc_map->vcc;
919 		ASSERT(vcc);
920 
921 		/* notify tx completion */
922 		if (vcc->pop) {
923 		    vcc->pop(vcc, entry->skb);
924 		}
925 		else {
926 		    dev_kfree_skb_any(entry->skb);
927 		}
928 #if 1
929 		/* race fixed by the above incarnation mechanism, but... */
930 		if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
931 		    atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
932 		}
933 #endif
934 		/* check error condition */
935 		if (*entry->status & STATUS_ERROR)
936 		    atomic_inc(&vcc->stats->tx_err);
937 		else
938 		    atomic_inc(&vcc->stats->tx);
939 	    }
940 	}
941 
942 	*entry->status = STATUS_FREE;
943 
944 	fore200e->host_txq.txing--;
945 
946 	FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
947     }
948 }
949 
950 
951 #ifdef FORE200E_BSQ_DEBUG
952 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
953 {
954     struct buffer* buffer;
955     int count = 0;
956 
957     buffer = bsq->freebuf;
958     while (buffer) {
959 
960 	if (buffer->supplied) {
961 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
962 		   where, scheme, magn, buffer->index);
963 	}
964 
965 	if (buffer->magn != magn) {
966 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
967 		   where, scheme, magn, buffer->index, buffer->magn);
968 	}
969 
970 	if (buffer->scheme != scheme) {
971 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
972 		   where, scheme, magn, buffer->index, buffer->scheme);
973 	}
974 
975 	if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
976 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
977 		   where, scheme, magn, buffer->index);
978 	}
979 
980 	count++;
981 	buffer = buffer->next;
982     }
983 
984     if (count != bsq->freebuf_count) {
985 	printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
986 	       where, scheme, magn, count, bsq->freebuf_count);
987     }
988     return 0;
989 }
990 #endif
991 
992 
993 static void
994 fore200e_supply(struct fore200e* fore200e)
995 {
996     int  scheme, magn, i;
997 
998     struct host_bsq*       bsq;
999     struct host_bsq_entry* entry;
1000     struct buffer*         buffer;
1001 
1002     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1003 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1004 
1005 	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
1006 
1007 #ifdef FORE200E_BSQ_DEBUG
1008 	    bsq_audit(1, bsq, scheme, magn);
1009 #endif
1010 	    while (bsq->freebuf_count >= RBD_BLK_SIZE) {
1011 
1012 		DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
1013 			RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
1014 
1015 		entry = &bsq->host_entry[ bsq->head ];
1016 
1017 		for (i = 0; i < RBD_BLK_SIZE; i++) {
1018 
1019 		    /* take the first buffer in the free buffer list */
1020 		    buffer = bsq->freebuf;
1021 		    if (!buffer) {
1022 			printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
1023 			       scheme, magn, bsq->freebuf_count);
1024 			return;
1025 		    }
1026 		    bsq->freebuf = buffer->next;
1027 
1028 #ifdef FORE200E_BSQ_DEBUG
1029 		    if (buffer->supplied)
1030 			printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
1031 			       scheme, magn, buffer->index);
1032 		    buffer->supplied = 1;
1033 #endif
1034 		    entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
1035 		    entry->rbd_block->rbd[ i ].handle       = FORE200E_BUF2HDL(buffer);
1036 		}
1037 
1038 		FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
1039 
1040  		/* decrease accordingly the number of free rx buffers */
1041 		bsq->freebuf_count -= RBD_BLK_SIZE;
1042 
1043 		*entry->status = STATUS_PENDING;
1044 		fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
1045 	    }
1046 	}
1047     }
1048 }
1049 
1050 
1051 static int
1052 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
1053 {
1054     struct sk_buff*      skb;
1055     struct buffer*       buffer;
1056     struct fore200e_vcc* fore200e_vcc;
1057     int                  i, pdu_len = 0;
1058 #ifdef FORE200E_52BYTE_AAL0_SDU
1059     u32                  cell_header = 0;
1060 #endif
1061 
1062     ASSERT(vcc);
1063 
1064     fore200e_vcc = FORE200E_VCC(vcc);
1065     ASSERT(fore200e_vcc);
1066 
1067 #ifdef FORE200E_52BYTE_AAL0_SDU
1068     if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
1069 
1070 	cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
1071 	              (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
1072                       (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
1073                       (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
1074                        rpd->atm_header.clp;
1075 	pdu_len = 4;
1076     }
1077 #endif
1078 
1079     /* compute total PDU length */
1080     for (i = 0; i < rpd->nseg; i++)
1081 	pdu_len += rpd->rsd[ i ].length;
1082 
1083     skb = alloc_skb(pdu_len, GFP_ATOMIC);
1084     if (skb == NULL) {
1085 	DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1086 
1087 	atomic_inc(&vcc->stats->rx_drop);
1088 	return -ENOMEM;
1089     }
1090 
1091     __net_timestamp(skb);
1092 
1093 #ifdef FORE200E_52BYTE_AAL0_SDU
1094     if (cell_header) {
1095 	*((u32*)skb_put(skb, 4)) = cell_header;
1096     }
1097 #endif
1098 
1099     /* reassemble segments */
1100     for (i = 0; i < rpd->nseg; i++) {
1101 
1102 	/* rebuild rx buffer address from rsd handle */
1103 	buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1104 
1105 	/* Make device DMA transfer visible to CPU.  */
1106 	fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1107 
1108 	memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length);
1109 
1110 	/* Now let the device get at it again.  */
1111 	fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1112     }
1113 
1114     DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1115 
1116     if (pdu_len < fore200e_vcc->rx_min_pdu)
1117 	fore200e_vcc->rx_min_pdu = pdu_len;
1118     if (pdu_len > fore200e_vcc->rx_max_pdu)
1119 	fore200e_vcc->rx_max_pdu = pdu_len;
1120     fore200e_vcc->rx_pdu++;
1121 
1122     /* push PDU */
1123     if (atm_charge(vcc, skb->truesize) == 0) {
1124 
1125 	DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1126 		vcc->itf, vcc->vpi, vcc->vci);
1127 
1128 	dev_kfree_skb_any(skb);
1129 
1130 	atomic_inc(&vcc->stats->rx_drop);
1131 	return -ENOMEM;
1132     }
1133 
1134     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1135 
1136     vcc->push(vcc, skb);
1137     atomic_inc(&vcc->stats->rx);
1138 
1139     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1140 
1141     return 0;
1142 }
1143 
1144 
1145 static void
1146 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1147 {
1148     struct host_bsq* bsq;
1149     struct buffer*   buffer;
1150     int              i;
1151 
1152     for (i = 0; i < rpd->nseg; i++) {
1153 
1154 	/* rebuild rx buffer address from rsd handle */
1155 	buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1156 
1157 	bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1158 
1159 #ifdef FORE200E_BSQ_DEBUG
1160 	bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1161 
1162 	if (buffer->supplied == 0)
1163 	    printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1164 		   buffer->scheme, buffer->magn, buffer->index);
1165 	buffer->supplied = 0;
1166 #endif
1167 
1168 	/* re-insert the buffer into the free buffer list */
1169 	buffer->next = bsq->freebuf;
1170 	bsq->freebuf = buffer;
1171 
1172 	/* then increment the number of free rx buffers */
1173 	bsq->freebuf_count++;
1174     }
1175 }
1176 
1177 
1178 static void
1179 fore200e_rx_irq(struct fore200e* fore200e)
1180 {
1181     struct host_rxq*        rxq = &fore200e->host_rxq;
1182     struct host_rxq_entry*  entry;
1183     struct atm_vcc*         vcc;
1184     struct fore200e_vc_map* vc_map;
1185 
1186     for (;;) {
1187 
1188 	entry = &rxq->host_entry[ rxq->head ];
1189 
1190 	/* no more received PDUs */
1191 	if ((*entry->status & STATUS_COMPLETE) == 0)
1192 	    break;
1193 
1194 	vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1195 
1196 	if ((vc_map->vcc == NULL) ||
1197 	    (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1198 
1199 	    DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1200 		    fore200e->atm_dev->number,
1201 		    entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1202 	}
1203 	else {
1204 	    vcc = vc_map->vcc;
1205 	    ASSERT(vcc);
1206 
1207 	    if ((*entry->status & STATUS_ERROR) == 0) {
1208 
1209 		fore200e_push_rpd(fore200e, vcc, entry->rpd);
1210 	    }
1211 	    else {
1212 		DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1213 			fore200e->atm_dev->number,
1214 			entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1215 		atomic_inc(&vcc->stats->rx_err);
1216 	    }
1217 	}
1218 
1219 	FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1220 
1221 	fore200e_collect_rpd(fore200e, entry->rpd);
1222 
1223 	/* rewrite the rpd address to ack the received PDU */
1224 	fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1225 	*entry->status = STATUS_FREE;
1226 
1227 	fore200e_supply(fore200e);
1228     }
1229 }
1230 
1231 
1232 #ifndef FORE200E_USE_TASKLET
1233 static void
1234 fore200e_irq(struct fore200e* fore200e)
1235 {
1236     unsigned long flags;
1237 
1238     spin_lock_irqsave(&fore200e->q_lock, flags);
1239     fore200e_rx_irq(fore200e);
1240     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1241 
1242     spin_lock_irqsave(&fore200e->q_lock, flags);
1243     fore200e_tx_irq(fore200e);
1244     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1245 }
1246 #endif
1247 
1248 
1249 static irqreturn_t
1250 fore200e_interrupt(int irq, void* dev)
1251 {
1252     struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1253 
1254     if (fore200e->bus->irq_check(fore200e) == 0) {
1255 
1256 	DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1257 	return IRQ_NONE;
1258     }
1259     DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1260 
1261 #ifdef FORE200E_USE_TASKLET
1262     tasklet_schedule(&fore200e->tx_tasklet);
1263     tasklet_schedule(&fore200e->rx_tasklet);
1264 #else
1265     fore200e_irq(fore200e);
1266 #endif
1267 
1268     fore200e->bus->irq_ack(fore200e);
1269     return IRQ_HANDLED;
1270 }
1271 
1272 
1273 #ifdef FORE200E_USE_TASKLET
1274 static void
1275 fore200e_tx_tasklet(unsigned long data)
1276 {
1277     struct fore200e* fore200e = (struct fore200e*) data;
1278     unsigned long flags;
1279 
1280     DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1281 
1282     spin_lock_irqsave(&fore200e->q_lock, flags);
1283     fore200e_tx_irq(fore200e);
1284     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1285 }
1286 
1287 
1288 static void
1289 fore200e_rx_tasklet(unsigned long data)
1290 {
1291     struct fore200e* fore200e = (struct fore200e*) data;
1292     unsigned long    flags;
1293 
1294     DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1295 
1296     spin_lock_irqsave(&fore200e->q_lock, flags);
1297     fore200e_rx_irq((struct fore200e*) data);
1298     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1299 }
1300 #endif
1301 
1302 
1303 static int
1304 fore200e_select_scheme(struct atm_vcc* vcc)
1305 {
1306     /* fairly balance the VCs over (identical) buffer schemes */
1307     int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1308 
1309     DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1310 	    vcc->itf, vcc->vpi, vcc->vci, scheme);
1311 
1312     return scheme;
1313 }
1314 
1315 
1316 static int
1317 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1318 {
1319     struct host_cmdq*        cmdq  = &fore200e->host_cmdq;
1320     struct host_cmdq_entry*  entry = &cmdq->host_entry[ cmdq->head ];
1321     struct activate_opcode   activ_opcode;
1322     struct deactivate_opcode deactiv_opcode;
1323     struct vpvc              vpvc;
1324     int                      ok;
1325     enum fore200e_aal        aal = fore200e_atm2fore_aal(vcc->qos.aal);
1326 
1327     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1328 
1329     if (activate) {
1330 	FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1331 
1332 	activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1333 	activ_opcode.aal    = aal;
1334 	activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1335 	activ_opcode.pad    = 0;
1336     }
1337     else {
1338 	deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1339 	deactiv_opcode.pad    = 0;
1340     }
1341 
1342     vpvc.vci = vcc->vci;
1343     vpvc.vpi = vcc->vpi;
1344 
1345     *entry->status = STATUS_PENDING;
1346 
1347     if (activate) {
1348 
1349 #ifdef FORE200E_52BYTE_AAL0_SDU
1350 	mtu = 48;
1351 #endif
1352 	/* the MTU is not used by the cp, except in the case of AAL0 */
1353 	fore200e->bus->write(mtu,                        &entry->cp_entry->cmd.activate_block.mtu);
1354 	fore200e->bus->write(*(u32*)&vpvc,         (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1355 	fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1356     }
1357     else {
1358 	fore200e->bus->write(*(u32*)&vpvc,         (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1359 	fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1360     }
1361 
1362     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1363 
1364     *entry->status = STATUS_FREE;
1365 
1366     if (ok == 0) {
1367 	printk(FORE200E "unable to %s VC %d.%d.%d\n",
1368 	       activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1369 	return -EIO;
1370     }
1371 
1372     DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1373 	    activate ? "open" : "clos");
1374 
1375     return 0;
1376 }
1377 
1378 
1379 #define FORE200E_MAX_BACK2BACK_CELLS 255    /* XXX depends on CDVT */
1380 
1381 static void
1382 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1383 {
1384     if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1385 
1386 	/* compute the data cells to idle cells ratio from the tx PCR */
1387 	rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1388 	rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1389     }
1390     else {
1391 	/* disable rate control */
1392 	rate->data_cells = rate->idle_cells = 0;
1393     }
1394 }
1395 
1396 
1397 static int
1398 fore200e_open(struct atm_vcc *vcc)
1399 {
1400     struct fore200e*        fore200e = FORE200E_DEV(vcc->dev);
1401     struct fore200e_vcc*    fore200e_vcc;
1402     struct fore200e_vc_map* vc_map;
1403     unsigned long	    flags;
1404     int			    vci = vcc->vci;
1405     short		    vpi = vcc->vpi;
1406 
1407     ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1408     ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1409 
1410     spin_lock_irqsave(&fore200e->q_lock, flags);
1411 
1412     vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1413     if (vc_map->vcc) {
1414 
1415 	spin_unlock_irqrestore(&fore200e->q_lock, flags);
1416 
1417 	printk(FORE200E "VC %d.%d.%d already in use\n",
1418 	       fore200e->atm_dev->number, vpi, vci);
1419 
1420 	return -EINVAL;
1421     }
1422 
1423     vc_map->vcc = vcc;
1424 
1425     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1426 
1427     fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1428     if (fore200e_vcc == NULL) {
1429 	vc_map->vcc = NULL;
1430 	return -ENOMEM;
1431     }
1432 
1433     DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1434 	    "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1435 	    vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1436 	    fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1437 	    vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1438 	    fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1439 	    vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1440 
1441     /* pseudo-CBR bandwidth requested? */
1442     if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1443 
1444 	mutex_lock(&fore200e->rate_mtx);
1445 	if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1446 	    mutex_unlock(&fore200e->rate_mtx);
1447 
1448 	    kfree(fore200e_vcc);
1449 	    vc_map->vcc = NULL;
1450 	    return -EAGAIN;
1451 	}
1452 
1453 	/* reserve bandwidth */
1454 	fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1455 	mutex_unlock(&fore200e->rate_mtx);
1456     }
1457 
1458     vcc->itf = vcc->dev->number;
1459 
1460     set_bit(ATM_VF_PARTIAL,&vcc->flags);
1461     set_bit(ATM_VF_ADDR, &vcc->flags);
1462 
1463     vcc->dev_data = fore200e_vcc;
1464 
1465     if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1466 
1467 	vc_map->vcc = NULL;
1468 
1469 	clear_bit(ATM_VF_ADDR, &vcc->flags);
1470 	clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1471 
1472 	vcc->dev_data = NULL;
1473 
1474 	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1475 
1476 	kfree(fore200e_vcc);
1477 	return -EINVAL;
1478     }
1479 
1480     /* compute rate control parameters */
1481     if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1482 
1483 	fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1484 	set_bit(ATM_VF_HASQOS, &vcc->flags);
1485 
1486 	DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1487 		vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1488 		vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1489 		fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1490     }
1491 
1492     fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1493     fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1494     fore200e_vcc->tx_pdu     = fore200e_vcc->rx_pdu     = 0;
1495 
1496     /* new incarnation of the vcc */
1497     vc_map->incarn = ++fore200e->incarn_count;
1498 
1499     /* VC unusable before this flag is set */
1500     set_bit(ATM_VF_READY, &vcc->flags);
1501 
1502     return 0;
1503 }
1504 
1505 
1506 static void
1507 fore200e_close(struct atm_vcc* vcc)
1508 {
1509     struct fore200e*        fore200e = FORE200E_DEV(vcc->dev);
1510     struct fore200e_vcc*    fore200e_vcc;
1511     struct fore200e_vc_map* vc_map;
1512     unsigned long           flags;
1513 
1514     ASSERT(vcc);
1515     ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1516     ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1517 
1518     DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1519 
1520     clear_bit(ATM_VF_READY, &vcc->flags);
1521 
1522     fore200e_activate_vcin(fore200e, 0, vcc, 0);
1523 
1524     spin_lock_irqsave(&fore200e->q_lock, flags);
1525 
1526     vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1527 
1528     /* the vc is no longer considered as "in use" by fore200e_open() */
1529     vc_map->vcc = NULL;
1530 
1531     vcc->itf = vcc->vci = vcc->vpi = 0;
1532 
1533     fore200e_vcc = FORE200E_VCC(vcc);
1534     vcc->dev_data = NULL;
1535 
1536     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1537 
1538     /* release reserved bandwidth, if any */
1539     if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1540 
1541 	mutex_lock(&fore200e->rate_mtx);
1542 	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1543 	mutex_unlock(&fore200e->rate_mtx);
1544 
1545 	clear_bit(ATM_VF_HASQOS, &vcc->flags);
1546     }
1547 
1548     clear_bit(ATM_VF_ADDR, &vcc->flags);
1549     clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1550 
1551     ASSERT(fore200e_vcc);
1552     kfree(fore200e_vcc);
1553 }
1554 
1555 
1556 static int
1557 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1558 {
1559     struct fore200e*        fore200e     = FORE200E_DEV(vcc->dev);
1560     struct fore200e_vcc*    fore200e_vcc = FORE200E_VCC(vcc);
1561     struct fore200e_vc_map* vc_map;
1562     struct host_txq*        txq          = &fore200e->host_txq;
1563     struct host_txq_entry*  entry;
1564     struct tpd*             tpd;
1565     struct tpd_haddr        tpd_haddr;
1566     int                     retry        = CONFIG_ATM_FORE200E_TX_RETRY;
1567     int                     tx_copy      = 0;
1568     int                     tx_len       = skb->len;
1569     u32*                    cell_header  = NULL;
1570     unsigned char*          skb_data;
1571     int                     skb_len;
1572     unsigned char*          data;
1573     unsigned long           flags;
1574 
1575     ASSERT(vcc);
1576     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1577     ASSERT(fore200e);
1578     ASSERT(fore200e_vcc);
1579 
1580     if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1581 	DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1582 	dev_kfree_skb_any(skb);
1583 	return -EINVAL;
1584     }
1585 
1586 #ifdef FORE200E_52BYTE_AAL0_SDU
1587     if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1588 	cell_header = (u32*) skb->data;
1589 	skb_data    = skb->data + 4;    /* skip 4-byte cell header */
1590 	skb_len     = tx_len = skb->len  - 4;
1591 
1592 	DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1593     }
1594     else
1595 #endif
1596     {
1597 	skb_data = skb->data;
1598 	skb_len  = skb->len;
1599     }
1600 
1601     if (((unsigned long)skb_data) & 0x3) {
1602 
1603 	DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1604 	tx_copy = 1;
1605 	tx_len  = skb_len;
1606     }
1607 
1608     if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1609 
1610         /* this simply NUKES the PCA board */
1611 	DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1612 	tx_copy = 1;
1613 	tx_len  = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1614     }
1615 
1616     if (tx_copy) {
1617 	data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
1618 	if (data == NULL) {
1619 	    if (vcc->pop) {
1620 		vcc->pop(vcc, skb);
1621 	    }
1622 	    else {
1623 		dev_kfree_skb_any(skb);
1624 	    }
1625 	    return -ENOMEM;
1626 	}
1627 
1628 	memcpy(data, skb_data, skb_len);
1629 	if (skb_len < tx_len)
1630 	    memset(data + skb_len, 0x00, tx_len - skb_len);
1631     }
1632     else {
1633 	data = skb_data;
1634     }
1635 
1636     vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1637     ASSERT(vc_map->vcc == vcc);
1638 
1639   retry_here:
1640 
1641     spin_lock_irqsave(&fore200e->q_lock, flags);
1642 
1643     entry = &txq->host_entry[ txq->head ];
1644 
1645     if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1646 
1647 	/* try to free completed tx queue entries */
1648 	fore200e_tx_irq(fore200e);
1649 
1650 	if (*entry->status != STATUS_FREE) {
1651 
1652 	    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1653 
1654 	    /* retry once again? */
1655 	    if (--retry > 0) {
1656 		udelay(50);
1657 		goto retry_here;
1658 	    }
1659 
1660 	    atomic_inc(&vcc->stats->tx_err);
1661 
1662 	    fore200e->tx_sat++;
1663 	    DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1664 		    fore200e->name, fore200e->cp_queues->heartbeat);
1665 	    if (vcc->pop) {
1666 		vcc->pop(vcc, skb);
1667 	    }
1668 	    else {
1669 		dev_kfree_skb_any(skb);
1670 	    }
1671 
1672 	    if (tx_copy)
1673 		kfree(data);
1674 
1675 	    return -ENOBUFS;
1676 	}
1677     }
1678 
1679     entry->incarn = vc_map->incarn;
1680     entry->vc_map = vc_map;
1681     entry->skb    = skb;
1682     entry->data   = tx_copy ? data : NULL;
1683 
1684     tpd = entry->tpd;
1685     tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
1686     tpd->tsd[ 0 ].length = tx_len;
1687 
1688     FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1689     txq->txing++;
1690 
1691     /* The dma_map call above implies a dma_sync so the device can use it,
1692      * thus no explicit dma_sync call is necessary here.
1693      */
1694 
1695     DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1696 	    vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1697 	    tpd->tsd[0].length, skb_len);
1698 
1699     if (skb_len < fore200e_vcc->tx_min_pdu)
1700 	fore200e_vcc->tx_min_pdu = skb_len;
1701     if (skb_len > fore200e_vcc->tx_max_pdu)
1702 	fore200e_vcc->tx_max_pdu = skb_len;
1703     fore200e_vcc->tx_pdu++;
1704 
1705     /* set tx rate control information */
1706     tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1707     tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1708 
1709     if (cell_header) {
1710 	tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1711 	tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1712 	tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1713 	tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1714 	tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1715     }
1716     else {
1717 	/* set the ATM header, common to all cells conveying the PDU */
1718 	tpd->atm_header.clp = 0;
1719 	tpd->atm_header.plt = 0;
1720 	tpd->atm_header.vci = vcc->vci;
1721 	tpd->atm_header.vpi = vcc->vpi;
1722 	tpd->atm_header.gfc = 0;
1723     }
1724 
1725     tpd->spec.length = tx_len;
1726     tpd->spec.nseg   = 1;
1727     tpd->spec.aal    = fore200e_atm2fore_aal(vcc->qos.aal);
1728     tpd->spec.intr   = 1;
1729 
1730     tpd_haddr.size  = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT);  /* size is expressed in 32 byte blocks */
1731     tpd_haddr.pad   = 0;
1732     tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT;          /* shift the address, as we are in a bitfield */
1733 
1734     *entry->status = STATUS_PENDING;
1735     fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1736 
1737     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1738 
1739     return 0;
1740 }
1741 
1742 
1743 static int
1744 fore200e_getstats(struct fore200e* fore200e)
1745 {
1746     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1747     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1748     struct stats_opcode     opcode;
1749     int                     ok;
1750     u32                     stats_dma_addr;
1751 
1752     if (fore200e->stats == NULL) {
1753 	fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
1754 	if (fore200e->stats == NULL)
1755 	    return -ENOMEM;
1756     }
1757 
1758     stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
1759 					    sizeof(struct stats), DMA_FROM_DEVICE);
1760 
1761     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1762 
1763     opcode.opcode = OPCODE_GET_STATS;
1764     opcode.pad    = 0;
1765 
1766     fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1767 
1768     *entry->status = STATUS_PENDING;
1769 
1770     fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1771 
1772     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1773 
1774     *entry->status = STATUS_FREE;
1775 
1776     fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1777 
1778     if (ok == 0) {
1779 	printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1780 	return -EIO;
1781     }
1782 
1783     return 0;
1784 }
1785 
1786 
1787 static int
1788 fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1789 {
1790     /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1791 
1792     DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1793 	    vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1794 
1795     return -EINVAL;
1796 }
1797 
1798 
1799 static int
1800 fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, unsigned int optlen)
1801 {
1802     /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1803 
1804     DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1805 	    vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1806 
1807     return -EINVAL;
1808 }
1809 
1810 
1811 #if 0 /* currently unused */
1812 static int
1813 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1814 {
1815     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1816     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1817     struct oc3_opcode       opcode;
1818     int                     ok;
1819     u32                     oc3_regs_dma_addr;
1820 
1821     oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1822 
1823     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1824 
1825     opcode.opcode = OPCODE_GET_OC3;
1826     opcode.reg    = 0;
1827     opcode.value  = 0;
1828     opcode.mask   = 0;
1829 
1830     fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1831 
1832     *entry->status = STATUS_PENDING;
1833 
1834     fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1835 
1836     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1837 
1838     *entry->status = STATUS_FREE;
1839 
1840     fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1841 
1842     if (ok == 0) {
1843 	printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1844 	return -EIO;
1845     }
1846 
1847     return 0;
1848 }
1849 #endif
1850 
1851 
1852 static int
1853 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1854 {
1855     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1856     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1857     struct oc3_opcode       opcode;
1858     int                     ok;
1859 
1860     DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1861 
1862     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1863 
1864     opcode.opcode = OPCODE_SET_OC3;
1865     opcode.reg    = reg;
1866     opcode.value  = value;
1867     opcode.mask   = mask;
1868 
1869     fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1870 
1871     *entry->status = STATUS_PENDING;
1872 
1873     fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1874 
1875     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1876 
1877     *entry->status = STATUS_FREE;
1878 
1879     if (ok == 0) {
1880 	printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1881 	return -EIO;
1882     }
1883 
1884     return 0;
1885 }
1886 
1887 
1888 static int
1889 fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1890 {
1891     u32 mct_value, mct_mask;
1892     int error;
1893 
1894     if (!capable(CAP_NET_ADMIN))
1895 	return -EPERM;
1896 
1897     switch (loop_mode) {
1898 
1899     case ATM_LM_NONE:
1900 	mct_value = 0;
1901 	mct_mask  = SUNI_MCT_DLE | SUNI_MCT_LLE;
1902 	break;
1903 
1904     case ATM_LM_LOC_PHY:
1905 	mct_value = mct_mask = SUNI_MCT_DLE;
1906 	break;
1907 
1908     case ATM_LM_RMT_PHY:
1909 	mct_value = mct_mask = SUNI_MCT_LLE;
1910 	break;
1911 
1912     default:
1913 	return -EINVAL;
1914     }
1915 
1916     error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1917     if (error == 0)
1918 	fore200e->loop_mode = loop_mode;
1919 
1920     return error;
1921 }
1922 
1923 
1924 static int
1925 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
1926 {
1927     struct sonet_stats tmp;
1928 
1929     if (fore200e_getstats(fore200e) < 0)
1930 	return -EIO;
1931 
1932     tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors);
1933     tmp.line_bip    = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors);
1934     tmp.path_bip    = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors);
1935     tmp.line_febe   = be32_to_cpu(fore200e->stats->oc3.line_febe_errors);
1936     tmp.path_febe   = be32_to_cpu(fore200e->stats->oc3.path_febe_errors);
1937     tmp.corr_hcs    = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors);
1938     tmp.uncorr_hcs  = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors);
1939     tmp.tx_cells    = be32_to_cpu(fore200e->stats->aal0.cells_transmitted)  +
1940 	              be32_to_cpu(fore200e->stats->aal34.cells_transmitted) +
1941 	              be32_to_cpu(fore200e->stats->aal5.cells_transmitted);
1942     tmp.rx_cells    = be32_to_cpu(fore200e->stats->aal0.cells_received)     +
1943 	              be32_to_cpu(fore200e->stats->aal34.cells_received)    +
1944 	              be32_to_cpu(fore200e->stats->aal5.cells_received);
1945 
1946     if (arg)
1947 	return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
1948 
1949     return 0;
1950 }
1951 
1952 
1953 static int
1954 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
1955 {
1956     struct fore200e* fore200e = FORE200E_DEV(dev);
1957 
1958     DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
1959 
1960     switch (cmd) {
1961 
1962     case SONET_GETSTAT:
1963 	return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
1964 
1965     case SONET_GETDIAG:
1966 	return put_user(0, (int __user *)arg) ? -EFAULT : 0;
1967 
1968     case ATM_SETLOOP:
1969 	return fore200e_setloop(fore200e, (int)(unsigned long)arg);
1970 
1971     case ATM_GETLOOP:
1972 	return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
1973 
1974     case ATM_QUERYLOOP:
1975 	return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
1976     }
1977 
1978     return -ENOSYS; /* not implemented */
1979 }
1980 
1981 
1982 static int
1983 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
1984 {
1985     struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1986     struct fore200e*     fore200e     = FORE200E_DEV(vcc->dev);
1987 
1988     if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1989 	DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
1990 	return -EINVAL;
1991     }
1992 
1993     DPRINTK(2, "change_qos %d.%d.%d, "
1994 	    "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1995 	    "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
1996 	    "available_cell_rate = %u",
1997 	    vcc->itf, vcc->vpi, vcc->vci,
1998 	    fore200e_traffic_class[ qos->txtp.traffic_class ],
1999 	    qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
2000 	    fore200e_traffic_class[ qos->rxtp.traffic_class ],
2001 	    qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
2002 	    flags, fore200e->available_cell_rate);
2003 
2004     if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
2005 
2006 	mutex_lock(&fore200e->rate_mtx);
2007 	if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
2008 	    mutex_unlock(&fore200e->rate_mtx);
2009 	    return -EAGAIN;
2010 	}
2011 
2012 	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
2013 	fore200e->available_cell_rate -= qos->txtp.max_pcr;
2014 
2015 	mutex_unlock(&fore200e->rate_mtx);
2016 
2017 	memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
2018 
2019 	/* update rate control parameters */
2020 	fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
2021 
2022 	set_bit(ATM_VF_HASQOS, &vcc->flags);
2023 
2024 	return 0;
2025     }
2026 
2027     return -EINVAL;
2028 }
2029 
2030 
2031 static int __devinit
2032 fore200e_irq_request(struct fore200e* fore200e)
2033 {
2034     if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
2035 
2036 	printk(FORE200E "unable to reserve IRQ %s for device %s\n",
2037 	       fore200e_irq_itoa(fore200e->irq), fore200e->name);
2038 	return -EBUSY;
2039     }
2040 
2041     printk(FORE200E "IRQ %s reserved for device %s\n",
2042 	   fore200e_irq_itoa(fore200e->irq), fore200e->name);
2043 
2044 #ifdef FORE200E_USE_TASKLET
2045     tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
2046     tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
2047 #endif
2048 
2049     fore200e->state = FORE200E_STATE_IRQ;
2050     return 0;
2051 }
2052 
2053 
2054 static int __devinit
2055 fore200e_get_esi(struct fore200e* fore200e)
2056 {
2057     struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
2058     int ok, i;
2059 
2060     if (!prom)
2061 	return -ENOMEM;
2062 
2063     ok = fore200e->bus->prom_read(fore200e, prom);
2064     if (ok < 0) {
2065 	kfree(prom);
2066 	return -EBUSY;
2067     }
2068 
2069     printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n",
2070 	   fore200e->name,
2071 	   (prom->hw_revision & 0xFF) + '@',    /* probably meaningless with SBA boards */
2072 	   prom->serial_number & 0xFFFF, &prom->mac_addr[2]);
2073 
2074     for (i = 0; i < ESI_LEN; i++) {
2075 	fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
2076     }
2077 
2078     kfree(prom);
2079 
2080     return 0;
2081 }
2082 
2083 
2084 static int __devinit
2085 fore200e_alloc_rx_buf(struct fore200e* fore200e)
2086 {
2087     int scheme, magn, nbr, size, i;
2088 
2089     struct host_bsq* bsq;
2090     struct buffer*   buffer;
2091 
2092     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2093 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2094 
2095 	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
2096 
2097 	    nbr  = fore200e_rx_buf_nbr[ scheme ][ magn ];
2098 	    size = fore200e_rx_buf_size[ scheme ][ magn ];
2099 
2100 	    DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2101 
2102 	    /* allocate the array of receive buffers */
2103 	    buffer = bsq->buffer = kzalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
2104 
2105 	    if (buffer == NULL)
2106 		return -ENOMEM;
2107 
2108 	    bsq->freebuf = NULL;
2109 
2110 	    for (i = 0; i < nbr; i++) {
2111 
2112 		buffer[ i ].scheme = scheme;
2113 		buffer[ i ].magn   = magn;
2114 #ifdef FORE200E_BSQ_DEBUG
2115 		buffer[ i ].index  = i;
2116 		buffer[ i ].supplied = 0;
2117 #endif
2118 
2119 		/* allocate the receive buffer body */
2120 		if (fore200e_chunk_alloc(fore200e,
2121 					 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2122 					 DMA_FROM_DEVICE) < 0) {
2123 
2124 		    while (i > 0)
2125 			fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2126 		    kfree(buffer);
2127 
2128 		    return -ENOMEM;
2129 		}
2130 
2131 		/* insert the buffer into the free buffer list */
2132 		buffer[ i ].next = bsq->freebuf;
2133 		bsq->freebuf = &buffer[ i ];
2134 	    }
2135 	    /* all the buffers are free, initially */
2136 	    bsq->freebuf_count = nbr;
2137 
2138 #ifdef FORE200E_BSQ_DEBUG
2139 	    bsq_audit(3, bsq, scheme, magn);
2140 #endif
2141 	}
2142     }
2143 
2144     fore200e->state = FORE200E_STATE_ALLOC_BUF;
2145     return 0;
2146 }
2147 
2148 
2149 static int __devinit
2150 fore200e_init_bs_queue(struct fore200e* fore200e)
2151 {
2152     int scheme, magn, i;
2153 
2154     struct host_bsq*     bsq;
2155     struct cp_bsq_entry __iomem * cp_entry;
2156 
2157     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2158 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2159 
2160 	    DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2161 
2162 	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
2163 
2164 	    /* allocate and align the array of status words */
2165 	    if (fore200e->bus->dma_chunk_alloc(fore200e,
2166 					       &bsq->status,
2167 					       sizeof(enum status),
2168 					       QUEUE_SIZE_BS,
2169 					       fore200e->bus->status_alignment) < 0) {
2170 		return -ENOMEM;
2171 	    }
2172 
2173 	    /* allocate and align the array of receive buffer descriptors */
2174 	    if (fore200e->bus->dma_chunk_alloc(fore200e,
2175 					       &bsq->rbd_block,
2176 					       sizeof(struct rbd_block),
2177 					       QUEUE_SIZE_BS,
2178 					       fore200e->bus->descr_alignment) < 0) {
2179 
2180 		fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
2181 		return -ENOMEM;
2182 	    }
2183 
2184 	    /* get the base address of the cp resident buffer supply queue entries */
2185 	    cp_entry = fore200e->virt_base +
2186 		       fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2187 
2188 	    /* fill the host resident and cp resident buffer supply queue entries */
2189 	    for (i = 0; i < QUEUE_SIZE_BS; i++) {
2190 
2191 		bsq->host_entry[ i ].status =
2192 		                     FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2193 	        bsq->host_entry[ i ].rbd_block =
2194 		                     FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2195 		bsq->host_entry[ i ].rbd_block_dma =
2196 		                     FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2197 		bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2198 
2199 		*bsq->host_entry[ i ].status = STATUS_FREE;
2200 
2201 		fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2202 				     &cp_entry[ i ].status_haddr);
2203 	    }
2204 	}
2205     }
2206 
2207     fore200e->state = FORE200E_STATE_INIT_BSQ;
2208     return 0;
2209 }
2210 
2211 
2212 static int __devinit
2213 fore200e_init_rx_queue(struct fore200e* fore200e)
2214 {
2215     struct host_rxq*     rxq =  &fore200e->host_rxq;
2216     struct cp_rxq_entry __iomem * cp_entry;
2217     int i;
2218 
2219     DPRINTK(2, "receive queue is being initialized\n");
2220 
2221     /* allocate and align the array of status words */
2222     if (fore200e->bus->dma_chunk_alloc(fore200e,
2223 				       &rxq->status,
2224 				       sizeof(enum status),
2225 				       QUEUE_SIZE_RX,
2226 				       fore200e->bus->status_alignment) < 0) {
2227 	return -ENOMEM;
2228     }
2229 
2230     /* allocate and align the array of receive PDU descriptors */
2231     if (fore200e->bus->dma_chunk_alloc(fore200e,
2232 				       &rxq->rpd,
2233 				       sizeof(struct rpd),
2234 				       QUEUE_SIZE_RX,
2235 				       fore200e->bus->descr_alignment) < 0) {
2236 
2237 	fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2238 	return -ENOMEM;
2239     }
2240 
2241     /* get the base address of the cp resident rx queue entries */
2242     cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2243 
2244     /* fill the host resident and cp resident rx entries */
2245     for (i=0; i < QUEUE_SIZE_RX; i++) {
2246 
2247 	rxq->host_entry[ i ].status =
2248 	                     FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2249 	rxq->host_entry[ i ].rpd =
2250 	                     FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2251 	rxq->host_entry[ i ].rpd_dma =
2252 	                     FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2253 	rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2254 
2255 	*rxq->host_entry[ i ].status = STATUS_FREE;
2256 
2257 	fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2258 			     &cp_entry[ i ].status_haddr);
2259 
2260 	fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2261 			     &cp_entry[ i ].rpd_haddr);
2262     }
2263 
2264     /* set the head entry of the queue */
2265     rxq->head = 0;
2266 
2267     fore200e->state = FORE200E_STATE_INIT_RXQ;
2268     return 0;
2269 }
2270 
2271 
2272 static int __devinit
2273 fore200e_init_tx_queue(struct fore200e* fore200e)
2274 {
2275     struct host_txq*     txq =  &fore200e->host_txq;
2276     struct cp_txq_entry __iomem * cp_entry;
2277     int i;
2278 
2279     DPRINTK(2, "transmit queue is being initialized\n");
2280 
2281     /* allocate and align the array of status words */
2282     if (fore200e->bus->dma_chunk_alloc(fore200e,
2283 				       &txq->status,
2284 				       sizeof(enum status),
2285 				       QUEUE_SIZE_TX,
2286 				       fore200e->bus->status_alignment) < 0) {
2287 	return -ENOMEM;
2288     }
2289 
2290     /* allocate and align the array of transmit PDU descriptors */
2291     if (fore200e->bus->dma_chunk_alloc(fore200e,
2292 				       &txq->tpd,
2293 				       sizeof(struct tpd),
2294 				       QUEUE_SIZE_TX,
2295 				       fore200e->bus->descr_alignment) < 0) {
2296 
2297 	fore200e->bus->dma_chunk_free(fore200e, &txq->status);
2298 	return -ENOMEM;
2299     }
2300 
2301     /* get the base address of the cp resident tx queue entries */
2302     cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2303 
2304     /* fill the host resident and cp resident tx entries */
2305     for (i=0; i < QUEUE_SIZE_TX; i++) {
2306 
2307 	txq->host_entry[ i ].status =
2308 	                     FORE200E_INDEX(txq->status.align_addr, enum status, i);
2309 	txq->host_entry[ i ].tpd =
2310 	                     FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2311 	txq->host_entry[ i ].tpd_dma  =
2312                              FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2313 	txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2314 
2315 	*txq->host_entry[ i ].status = STATUS_FREE;
2316 
2317 	fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2318 			     &cp_entry[ i ].status_haddr);
2319 
2320         /* although there is a one-to-one mapping of tx queue entries and tpds,
2321 	   we do not write here the DMA (physical) base address of each tpd into
2322 	   the related cp resident entry, because the cp relies on this write
2323 	   operation to detect that a new pdu has been submitted for tx */
2324     }
2325 
2326     /* set the head and tail entries of the queue */
2327     txq->head = 0;
2328     txq->tail = 0;
2329 
2330     fore200e->state = FORE200E_STATE_INIT_TXQ;
2331     return 0;
2332 }
2333 
2334 
2335 static int __devinit
2336 fore200e_init_cmd_queue(struct fore200e* fore200e)
2337 {
2338     struct host_cmdq*     cmdq =  &fore200e->host_cmdq;
2339     struct cp_cmdq_entry __iomem * cp_entry;
2340     int i;
2341 
2342     DPRINTK(2, "command queue is being initialized\n");
2343 
2344     /* allocate and align the array of status words */
2345     if (fore200e->bus->dma_chunk_alloc(fore200e,
2346 				       &cmdq->status,
2347 				       sizeof(enum status),
2348 				       QUEUE_SIZE_CMD,
2349 				       fore200e->bus->status_alignment) < 0) {
2350 	return -ENOMEM;
2351     }
2352 
2353     /* get the base address of the cp resident cmd queue entries */
2354     cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2355 
2356     /* fill the host resident and cp resident cmd entries */
2357     for (i=0; i < QUEUE_SIZE_CMD; i++) {
2358 
2359 	cmdq->host_entry[ i ].status   =
2360                               FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2361 	cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2362 
2363 	*cmdq->host_entry[ i ].status = STATUS_FREE;
2364 
2365 	fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2366                              &cp_entry[ i ].status_haddr);
2367     }
2368 
2369     /* set the head entry of the queue */
2370     cmdq->head = 0;
2371 
2372     fore200e->state = FORE200E_STATE_INIT_CMDQ;
2373     return 0;
2374 }
2375 
2376 
2377 static void __devinit
2378 fore200e_param_bs_queue(struct fore200e* fore200e,
2379 			enum buffer_scheme scheme, enum buffer_magn magn,
2380 			int queue_length, int pool_size, int supply_blksize)
2381 {
2382     struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2383 
2384     fore200e->bus->write(queue_length,                           &bs_spec->queue_length);
2385     fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2386     fore200e->bus->write(pool_size,                              &bs_spec->pool_size);
2387     fore200e->bus->write(supply_blksize,                         &bs_spec->supply_blksize);
2388 }
2389 
2390 
2391 static int __devinit
2392 fore200e_initialize(struct fore200e* fore200e)
2393 {
2394     struct cp_queues __iomem * cpq;
2395     int               ok, scheme, magn;
2396 
2397     DPRINTK(2, "device %s being initialized\n", fore200e->name);
2398 
2399     mutex_init(&fore200e->rate_mtx);
2400     spin_lock_init(&fore200e->q_lock);
2401 
2402     cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2403 
2404     /* enable cp to host interrupts */
2405     fore200e->bus->write(1, &cpq->imask);
2406 
2407     if (fore200e->bus->irq_enable)
2408 	fore200e->bus->irq_enable(fore200e);
2409 
2410     fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2411 
2412     fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2413     fore200e->bus->write(QUEUE_SIZE_RX,  &cpq->init.rx_queue_len);
2414     fore200e->bus->write(QUEUE_SIZE_TX,  &cpq->init.tx_queue_len);
2415 
2416     fore200e->bus->write(RSD_EXTENSION,  &cpq->init.rsd_extension);
2417     fore200e->bus->write(TSD_EXTENSION,  &cpq->init.tsd_extension);
2418 
2419     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2420 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2421 	    fore200e_param_bs_queue(fore200e, scheme, magn,
2422 				    QUEUE_SIZE_BS,
2423 				    fore200e_rx_buf_nbr[ scheme ][ magn ],
2424 				    RBD_BLK_SIZE);
2425 
2426     /* issue the initialize command */
2427     fore200e->bus->write(STATUS_PENDING,    &cpq->init.status);
2428     fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2429 
2430     ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2431     if (ok == 0) {
2432 	printk(FORE200E "device %s initialization failed\n", fore200e->name);
2433 	return -ENODEV;
2434     }
2435 
2436     printk(FORE200E "device %s initialized\n", fore200e->name);
2437 
2438     fore200e->state = FORE200E_STATE_INITIALIZE;
2439     return 0;
2440 }
2441 
2442 
2443 static void __devinit
2444 fore200e_monitor_putc(struct fore200e* fore200e, char c)
2445 {
2446     struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2447 
2448 #if 0
2449     printk("%c", c);
2450 #endif
2451     fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2452 }
2453 
2454 
2455 static int __devinit
2456 fore200e_monitor_getc(struct fore200e* fore200e)
2457 {
2458     struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2459     unsigned long      timeout = jiffies + msecs_to_jiffies(50);
2460     int                c;
2461 
2462     while (time_before(jiffies, timeout)) {
2463 
2464 	c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2465 
2466 	if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2467 
2468 	    fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2469 #if 0
2470 	    printk("%c", c & 0xFF);
2471 #endif
2472 	    return c & 0xFF;
2473 	}
2474     }
2475 
2476     return -1;
2477 }
2478 
2479 
2480 static void __devinit
2481 fore200e_monitor_puts(struct fore200e* fore200e, char* str)
2482 {
2483     while (*str) {
2484 
2485 	/* the i960 monitor doesn't accept any new character if it has something to say */
2486 	while (fore200e_monitor_getc(fore200e) >= 0);
2487 
2488 	fore200e_monitor_putc(fore200e, *str++);
2489     }
2490 
2491     while (fore200e_monitor_getc(fore200e) >= 0);
2492 }
2493 
2494 #ifdef __LITTLE_ENDIAN
2495 #define FW_EXT ".bin"
2496 #else
2497 #define FW_EXT "_ecd.bin2"
2498 #endif
2499 
2500 static int __devinit
2501 fore200e_load_and_start_fw(struct fore200e* fore200e)
2502 {
2503     const struct firmware *firmware;
2504     struct device *device;
2505     struct fw_header *fw_header;
2506     const __le32 *fw_data;
2507     u32 fw_size;
2508     u32 __iomem *load_addr;
2509     char buf[48];
2510     int err = -ENODEV;
2511 
2512     if (strcmp(fore200e->bus->model_name, "PCA-200E") == 0)
2513 	device = &((struct pci_dev *) fore200e->bus_dev)->dev;
2514 #ifdef CONFIG_SBUS
2515     else if (strcmp(fore200e->bus->model_name, "SBA-200E") == 0)
2516 	device = &((struct of_device *) fore200e->bus_dev)->dev;
2517 #endif
2518     else
2519 	return err;
2520 
2521     sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
2522     if ((err = request_firmware(&firmware, buf, device)) < 0) {
2523 	printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
2524 	return err;
2525     }
2526 
2527     fw_data = (__le32 *) firmware->data;
2528     fw_size = firmware->size / sizeof(u32);
2529     fw_header = (struct fw_header *) firmware->data;
2530     load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2531 
2532     DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2533 	    fore200e->name, load_addr, fw_size);
2534 
2535     if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2536 	printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2537 	goto release;
2538     }
2539 
2540     for (; fw_size--; fw_data++, load_addr++)
2541 	fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2542 
2543     DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2544 
2545 #if defined(__sparc_v9__)
2546     /* reported to be required by SBA cards on some sparc64 hosts */
2547     fore200e_spin(100);
2548 #endif
2549 
2550     sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2551     fore200e_monitor_puts(fore200e, buf);
2552 
2553     if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) {
2554 	printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2555 	goto release;
2556     }
2557 
2558     printk(FORE200E "device %s firmware started\n", fore200e->name);
2559 
2560     fore200e->state = FORE200E_STATE_START_FW;
2561     err = 0;
2562 
2563 release:
2564     release_firmware(firmware);
2565     return err;
2566 }
2567 
2568 
2569 static int __devinit
2570 fore200e_register(struct fore200e* fore200e)
2571 {
2572     struct atm_dev* atm_dev;
2573 
2574     DPRINTK(2, "device %s being registered\n", fore200e->name);
2575 
2576     atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1,
2577       NULL);
2578     if (atm_dev == NULL) {
2579 	printk(FORE200E "unable to register device %s\n", fore200e->name);
2580 	return -ENODEV;
2581     }
2582 
2583     atm_dev->dev_data = fore200e;
2584     fore200e->atm_dev = atm_dev;
2585 
2586     atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2587     atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2588 
2589     fore200e->available_cell_rate = ATM_OC3_PCR;
2590 
2591     fore200e->state = FORE200E_STATE_REGISTER;
2592     return 0;
2593 }
2594 
2595 
2596 static int __devinit
2597 fore200e_init(struct fore200e* fore200e)
2598 {
2599     if (fore200e_register(fore200e) < 0)
2600 	return -ENODEV;
2601 
2602     if (fore200e->bus->configure(fore200e) < 0)
2603 	return -ENODEV;
2604 
2605     if (fore200e->bus->map(fore200e) < 0)
2606 	return -ENODEV;
2607 
2608     if (fore200e_reset(fore200e, 1) < 0)
2609 	return -ENODEV;
2610 
2611     if (fore200e_load_and_start_fw(fore200e) < 0)
2612 	return -ENODEV;
2613 
2614     if (fore200e_initialize(fore200e) < 0)
2615 	return -ENODEV;
2616 
2617     if (fore200e_init_cmd_queue(fore200e) < 0)
2618 	return -ENOMEM;
2619 
2620     if (fore200e_init_tx_queue(fore200e) < 0)
2621 	return -ENOMEM;
2622 
2623     if (fore200e_init_rx_queue(fore200e) < 0)
2624 	return -ENOMEM;
2625 
2626     if (fore200e_init_bs_queue(fore200e) < 0)
2627 	return -ENOMEM;
2628 
2629     if (fore200e_alloc_rx_buf(fore200e) < 0)
2630 	return -ENOMEM;
2631 
2632     if (fore200e_get_esi(fore200e) < 0)
2633 	return -EIO;
2634 
2635     if (fore200e_irq_request(fore200e) < 0)
2636 	return -EBUSY;
2637 
2638     fore200e_supply(fore200e);
2639 
2640     /* all done, board initialization is now complete */
2641     fore200e->state = FORE200E_STATE_COMPLETE;
2642     return 0;
2643 }
2644 
2645 #ifdef CONFIG_SBUS
2646 static int __devinit fore200e_sba_probe(struct of_device *op,
2647 					const struct of_device_id *match)
2648 {
2649 	const struct fore200e_bus *bus = match->data;
2650 	struct fore200e *fore200e;
2651 	static int index = 0;
2652 	int err;
2653 
2654 	fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2655 	if (!fore200e)
2656 		return -ENOMEM;
2657 
2658 	fore200e->bus = bus;
2659 	fore200e->bus_dev = op;
2660 	fore200e->irq = op->irqs[0];
2661 	fore200e->phys_base = op->resource[0].start;
2662 
2663 	sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2664 
2665 	err = fore200e_init(fore200e);
2666 	if (err < 0) {
2667 		fore200e_shutdown(fore200e);
2668 		kfree(fore200e);
2669 		return err;
2670 	}
2671 
2672 	index++;
2673 	dev_set_drvdata(&op->dev, fore200e);
2674 
2675 	return 0;
2676 }
2677 
2678 static int __devexit fore200e_sba_remove(struct of_device *op)
2679 {
2680 	struct fore200e *fore200e = dev_get_drvdata(&op->dev);
2681 
2682 	fore200e_shutdown(fore200e);
2683 	kfree(fore200e);
2684 
2685 	return 0;
2686 }
2687 
2688 static const struct of_device_id fore200e_sba_match[] = {
2689 	{
2690 		.name = SBA200E_PROM_NAME,
2691 		.data = (void *) &fore200e_bus[1],
2692 	},
2693 	{},
2694 };
2695 MODULE_DEVICE_TABLE(of, fore200e_sba_match);
2696 
2697 static struct of_platform_driver fore200e_sba_driver = {
2698 	.driver = {
2699 		.name = "fore_200e",
2700 		.owner = THIS_MODULE,
2701 		.of_match_table = fore200e_sba_match,
2702 	},
2703 	.probe		= fore200e_sba_probe,
2704 	.remove		= __devexit_p(fore200e_sba_remove),
2705 };
2706 #endif
2707 
2708 #ifdef CONFIG_PCI
2709 static int __devinit
2710 fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
2711 {
2712     const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
2713     struct fore200e* fore200e;
2714     int err = 0;
2715     static int index = 0;
2716 
2717     if (pci_enable_device(pci_dev)) {
2718 	err = -EINVAL;
2719 	goto out;
2720     }
2721 
2722     fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2723     if (fore200e == NULL) {
2724 	err = -ENOMEM;
2725 	goto out_disable;
2726     }
2727 
2728     fore200e->bus       = bus;
2729     fore200e->bus_dev   = pci_dev;
2730     fore200e->irq       = pci_dev->irq;
2731     fore200e->phys_base = pci_resource_start(pci_dev, 0);
2732 
2733     sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
2734 
2735     pci_set_master(pci_dev);
2736 
2737     printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2738 	   fore200e->bus->model_name,
2739 	   fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2740 
2741     sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2742 
2743     err = fore200e_init(fore200e);
2744     if (err < 0) {
2745 	fore200e_shutdown(fore200e);
2746 	goto out_free;
2747     }
2748 
2749     ++index;
2750     pci_set_drvdata(pci_dev, fore200e);
2751 
2752 out:
2753     return err;
2754 
2755 out_free:
2756     kfree(fore200e);
2757 out_disable:
2758     pci_disable_device(pci_dev);
2759     goto out;
2760 }
2761 
2762 
2763 static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev)
2764 {
2765     struct fore200e *fore200e;
2766 
2767     fore200e = pci_get_drvdata(pci_dev);
2768 
2769     fore200e_shutdown(fore200e);
2770     kfree(fore200e);
2771     pci_disable_device(pci_dev);
2772 }
2773 
2774 
2775 static struct pci_device_id fore200e_pca_tbl[] = {
2776     { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
2777       0, 0, (unsigned long) &fore200e_bus[0] },
2778     { 0, }
2779 };
2780 
2781 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2782 
2783 static struct pci_driver fore200e_pca_driver = {
2784     .name =     "fore_200e",
2785     .probe =    fore200e_pca_detect,
2786     .remove =   __devexit_p(fore200e_pca_remove_one),
2787     .id_table = fore200e_pca_tbl,
2788 };
2789 #endif
2790 
2791 static int __init fore200e_module_init(void)
2792 {
2793 	int err;
2794 
2795 	printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2796 
2797 #ifdef CONFIG_SBUS
2798 	err = of_register_driver(&fore200e_sba_driver, &of_bus_type);
2799 	if (err)
2800 		return err;
2801 #endif
2802 
2803 #ifdef CONFIG_PCI
2804 	err = pci_register_driver(&fore200e_pca_driver);
2805 #endif
2806 
2807 #ifdef CONFIG_SBUS
2808 	if (err)
2809 		of_unregister_driver(&fore200e_sba_driver);
2810 #endif
2811 
2812 	return err;
2813 }
2814 
2815 static void __exit fore200e_module_cleanup(void)
2816 {
2817 #ifdef CONFIG_PCI
2818 	pci_unregister_driver(&fore200e_pca_driver);
2819 #endif
2820 #ifdef CONFIG_SBUS
2821 	of_unregister_driver(&fore200e_sba_driver);
2822 #endif
2823 }
2824 
2825 static int
2826 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2827 {
2828     struct fore200e*     fore200e  = FORE200E_DEV(dev);
2829     struct fore200e_vcc* fore200e_vcc;
2830     struct atm_vcc*      vcc;
2831     int                  i, len, left = *pos;
2832     unsigned long        flags;
2833 
2834     if (!left--) {
2835 
2836 	if (fore200e_getstats(fore200e) < 0)
2837 	    return -EIO;
2838 
2839 	len = sprintf(page,"\n"
2840 		       " device:\n"
2841 		       "   internal name:\t\t%s\n", fore200e->name);
2842 
2843 	/* print bus-specific information */
2844 	if (fore200e->bus->proc_read)
2845 	    len += fore200e->bus->proc_read(fore200e, page + len);
2846 
2847 	len += sprintf(page + len,
2848 		"   interrupt line:\t\t%s\n"
2849 		"   physical base address:\t0x%p\n"
2850 		"   virtual base address:\t0x%p\n"
2851 		"   factory address (ESI):\t%pM\n"
2852 		"   board serial number:\t\t%d\n\n",
2853 		fore200e_irq_itoa(fore200e->irq),
2854 		(void*)fore200e->phys_base,
2855 		fore200e->virt_base,
2856 		fore200e->esi,
2857 		fore200e->esi[4] * 256 + fore200e->esi[5]);
2858 
2859 	return len;
2860     }
2861 
2862     if (!left--)
2863 	return sprintf(page,
2864 		       "   free small bufs, scheme 1:\t%d\n"
2865 		       "   free large bufs, scheme 1:\t%d\n"
2866 		       "   free small bufs, scheme 2:\t%d\n"
2867 		       "   free large bufs, scheme 2:\t%d\n",
2868 		       fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2869 		       fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2870 		       fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2871 		       fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2872 
2873     if (!left--) {
2874 	u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2875 
2876 	len = sprintf(page,"\n\n"
2877 		      " cell processor:\n"
2878 		      "   heartbeat state:\t\t");
2879 
2880 	if (hb >> 16 != 0xDEAD)
2881 	    len += sprintf(page + len, "0x%08x\n", hb);
2882 	else
2883 	    len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2884 
2885 	return len;
2886     }
2887 
2888     if (!left--) {
2889 	static const char* media_name[] = {
2890 	    "unshielded twisted pair",
2891 	    "multimode optical fiber ST",
2892 	    "multimode optical fiber SC",
2893 	    "single-mode optical fiber ST",
2894 	    "single-mode optical fiber SC",
2895 	    "unknown"
2896 	};
2897 
2898 	static const char* oc3_mode[] = {
2899 	    "normal operation",
2900 	    "diagnostic loopback",
2901 	    "line loopback",
2902 	    "unknown"
2903 	};
2904 
2905 	u32 fw_release     = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2906 	u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2907 	u32 oc3_revision   = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2908 	u32 media_index    = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2909 	u32 oc3_index;
2910 
2911 	if (media_index > 4)
2912 		media_index = 5;
2913 
2914 	switch (fore200e->loop_mode) {
2915 	    case ATM_LM_NONE:    oc3_index = 0;
2916 		                 break;
2917 	    case ATM_LM_LOC_PHY: oc3_index = 1;
2918 		                 break;
2919 	    case ATM_LM_RMT_PHY: oc3_index = 2;
2920 		                 break;
2921 	    default:             oc3_index = 3;
2922 	}
2923 
2924 	return sprintf(page,
2925 		       "   firmware release:\t\t%d.%d.%d\n"
2926 		       "   monitor release:\t\t%d.%d\n"
2927 		       "   media type:\t\t\t%s\n"
2928 		       "   OC-3 revision:\t\t0x%x\n"
2929                        "   OC-3 mode:\t\t\t%s",
2930 		       fw_release >> 16, fw_release << 16 >> 24,  fw_release << 24 >> 24,
2931 		       mon960_release >> 16, mon960_release << 16 >> 16,
2932 		       media_name[ media_index ],
2933 		       oc3_revision,
2934 		       oc3_mode[ oc3_index ]);
2935     }
2936 
2937     if (!left--) {
2938 	struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2939 
2940 	return sprintf(page,
2941 		       "\n\n"
2942 		       " monitor:\n"
2943 		       "   version number:\t\t%d\n"
2944 		       "   boot status word:\t\t0x%08x\n",
2945 		       fore200e->bus->read(&cp_monitor->mon_version),
2946 		       fore200e->bus->read(&cp_monitor->bstat));
2947     }
2948 
2949     if (!left--)
2950 	return sprintf(page,
2951 		       "\n"
2952 		       " device statistics:\n"
2953 		       "  4b5b:\n"
2954 		       "     crc_header_errors:\t\t%10u\n"
2955 		       "     framing_errors:\t\t%10u\n",
2956 		       be32_to_cpu(fore200e->stats->phy.crc_header_errors),
2957 		       be32_to_cpu(fore200e->stats->phy.framing_errors));
2958 
2959     if (!left--)
2960 	return sprintf(page, "\n"
2961 		       "  OC-3:\n"
2962 		       "     section_bip8_errors:\t%10u\n"
2963 		       "     path_bip8_errors:\t\t%10u\n"
2964 		       "     line_bip24_errors:\t\t%10u\n"
2965 		       "     line_febe_errors:\t\t%10u\n"
2966 		       "     path_febe_errors:\t\t%10u\n"
2967 		       "     corr_hcs_errors:\t\t%10u\n"
2968 		       "     ucorr_hcs_errors:\t\t%10u\n",
2969 		       be32_to_cpu(fore200e->stats->oc3.section_bip8_errors),
2970 		       be32_to_cpu(fore200e->stats->oc3.path_bip8_errors),
2971 		       be32_to_cpu(fore200e->stats->oc3.line_bip24_errors),
2972 		       be32_to_cpu(fore200e->stats->oc3.line_febe_errors),
2973 		       be32_to_cpu(fore200e->stats->oc3.path_febe_errors),
2974 		       be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors),
2975 		       be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors));
2976 
2977     if (!left--)
2978 	return sprintf(page,"\n"
2979 		       "   ATM:\t\t\t\t     cells\n"
2980 		       "     TX:\t\t\t%10u\n"
2981 		       "     RX:\t\t\t%10u\n"
2982 		       "     vpi out of range:\t\t%10u\n"
2983 		       "     vpi no conn:\t\t%10u\n"
2984 		       "     vci out of range:\t\t%10u\n"
2985 		       "     vci no conn:\t\t%10u\n",
2986 		       be32_to_cpu(fore200e->stats->atm.cells_transmitted),
2987 		       be32_to_cpu(fore200e->stats->atm.cells_received),
2988 		       be32_to_cpu(fore200e->stats->atm.vpi_bad_range),
2989 		       be32_to_cpu(fore200e->stats->atm.vpi_no_conn),
2990 		       be32_to_cpu(fore200e->stats->atm.vci_bad_range),
2991 		       be32_to_cpu(fore200e->stats->atm.vci_no_conn));
2992 
2993     if (!left--)
2994 	return sprintf(page,"\n"
2995 		       "   AAL0:\t\t\t     cells\n"
2996 		       "     TX:\t\t\t%10u\n"
2997 		       "     RX:\t\t\t%10u\n"
2998 		       "     dropped:\t\t\t%10u\n",
2999 		       be32_to_cpu(fore200e->stats->aal0.cells_transmitted),
3000 		       be32_to_cpu(fore200e->stats->aal0.cells_received),
3001 		       be32_to_cpu(fore200e->stats->aal0.cells_dropped));
3002 
3003     if (!left--)
3004 	return sprintf(page,"\n"
3005 		       "   AAL3/4:\n"
3006 		       "     SAR sublayer:\t\t     cells\n"
3007 		       "       TX:\t\t\t%10u\n"
3008 		       "       RX:\t\t\t%10u\n"
3009 		       "       dropped:\t\t\t%10u\n"
3010 		       "       CRC errors:\t\t%10u\n"
3011 		       "       protocol errors:\t\t%10u\n\n"
3012 		       "     CS  sublayer:\t\t      PDUs\n"
3013 		       "       TX:\t\t\t%10u\n"
3014 		       "       RX:\t\t\t%10u\n"
3015 		       "       dropped:\t\t\t%10u\n"
3016 		       "       protocol errors:\t\t%10u\n",
3017 		       be32_to_cpu(fore200e->stats->aal34.cells_transmitted),
3018 		       be32_to_cpu(fore200e->stats->aal34.cells_received),
3019 		       be32_to_cpu(fore200e->stats->aal34.cells_dropped),
3020 		       be32_to_cpu(fore200e->stats->aal34.cells_crc_errors),
3021 		       be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors),
3022 		       be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted),
3023 		       be32_to_cpu(fore200e->stats->aal34.cspdus_received),
3024 		       be32_to_cpu(fore200e->stats->aal34.cspdus_dropped),
3025 		       be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors));
3026 
3027     if (!left--)
3028 	return sprintf(page,"\n"
3029 		       "   AAL5:\n"
3030 		       "     SAR sublayer:\t\t     cells\n"
3031 		       "       TX:\t\t\t%10u\n"
3032 		       "       RX:\t\t\t%10u\n"
3033 		       "       dropped:\t\t\t%10u\n"
3034 		       "       congestions:\t\t%10u\n\n"
3035 		       "     CS  sublayer:\t\t      PDUs\n"
3036 		       "       TX:\t\t\t%10u\n"
3037 		       "       RX:\t\t\t%10u\n"
3038 		       "       dropped:\t\t\t%10u\n"
3039 		       "       CRC errors:\t\t%10u\n"
3040 		       "       protocol errors:\t\t%10u\n",
3041 		       be32_to_cpu(fore200e->stats->aal5.cells_transmitted),
3042 		       be32_to_cpu(fore200e->stats->aal5.cells_received),
3043 		       be32_to_cpu(fore200e->stats->aal5.cells_dropped),
3044 		       be32_to_cpu(fore200e->stats->aal5.congestion_experienced),
3045 		       be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted),
3046 		       be32_to_cpu(fore200e->stats->aal5.cspdus_received),
3047 		       be32_to_cpu(fore200e->stats->aal5.cspdus_dropped),
3048 		       be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors),
3049 		       be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors));
3050 
3051     if (!left--)
3052 	return sprintf(page,"\n"
3053 		       "   AUX:\t\t       allocation failures\n"
3054 		       "     small b1:\t\t\t%10u\n"
3055 		       "     large b1:\t\t\t%10u\n"
3056 		       "     small b2:\t\t\t%10u\n"
3057 		       "     large b2:\t\t\t%10u\n"
3058 		       "     RX PDUs:\t\t\t%10u\n"
3059 		       "     TX PDUs:\t\t\t%10lu\n",
3060 		       be32_to_cpu(fore200e->stats->aux.small_b1_failed),
3061 		       be32_to_cpu(fore200e->stats->aux.large_b1_failed),
3062 		       be32_to_cpu(fore200e->stats->aux.small_b2_failed),
3063 		       be32_to_cpu(fore200e->stats->aux.large_b2_failed),
3064 		       be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed),
3065 		       fore200e->tx_sat);
3066 
3067     if (!left--)
3068 	return sprintf(page,"\n"
3069 		       " receive carrier:\t\t\t%s\n",
3070 		       fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
3071 
3072     if (!left--) {
3073         return sprintf(page,"\n"
3074 		       " VCCs:\n  address   VPI VCI   AAL "
3075 		       "TX PDUs   TX min/max size  RX PDUs   RX min/max size\n");
3076     }
3077 
3078     for (i = 0; i < NBR_CONNECT; i++) {
3079 
3080 	vcc = fore200e->vc_map[i].vcc;
3081 
3082 	if (vcc == NULL)
3083 	    continue;
3084 
3085 	spin_lock_irqsave(&fore200e->q_lock, flags);
3086 
3087 	if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
3088 
3089 	    fore200e_vcc = FORE200E_VCC(vcc);
3090 	    ASSERT(fore200e_vcc);
3091 
3092 	    len = sprintf(page,
3093 			  "  %08x  %03d %05d %1d   %09lu %05d/%05d      %09lu %05d/%05d\n",
3094 			  (u32)(unsigned long)vcc,
3095 			  vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3096 			  fore200e_vcc->tx_pdu,
3097 			  fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3098 			  fore200e_vcc->tx_max_pdu,
3099 			  fore200e_vcc->rx_pdu,
3100 			  fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3101 			  fore200e_vcc->rx_max_pdu);
3102 
3103 	    spin_unlock_irqrestore(&fore200e->q_lock, flags);
3104 	    return len;
3105 	}
3106 
3107 	spin_unlock_irqrestore(&fore200e->q_lock, flags);
3108     }
3109 
3110     return 0;
3111 }
3112 
3113 module_init(fore200e_module_init);
3114 module_exit(fore200e_module_cleanup);
3115 
3116 
3117 static const struct atmdev_ops fore200e_ops =
3118 {
3119 	.open       = fore200e_open,
3120 	.close      = fore200e_close,
3121 	.ioctl      = fore200e_ioctl,
3122 	.getsockopt = fore200e_getsockopt,
3123 	.setsockopt = fore200e_setsockopt,
3124 	.send       = fore200e_send,
3125 	.change_qos = fore200e_change_qos,
3126 	.proc_read  = fore200e_proc_read,
3127 	.owner      = THIS_MODULE
3128 };
3129 
3130 
3131 static const struct fore200e_bus fore200e_bus[] = {
3132 #ifdef CONFIG_PCI
3133     { "PCA-200E", "pca200e", 32, 4, 32,
3134       fore200e_pca_read,
3135       fore200e_pca_write,
3136       fore200e_pca_dma_map,
3137       fore200e_pca_dma_unmap,
3138       fore200e_pca_dma_sync_for_cpu,
3139       fore200e_pca_dma_sync_for_device,
3140       fore200e_pca_dma_chunk_alloc,
3141       fore200e_pca_dma_chunk_free,
3142       fore200e_pca_configure,
3143       fore200e_pca_map,
3144       fore200e_pca_reset,
3145       fore200e_pca_prom_read,
3146       fore200e_pca_unmap,
3147       NULL,
3148       fore200e_pca_irq_check,
3149       fore200e_pca_irq_ack,
3150       fore200e_pca_proc_read,
3151     },
3152 #endif
3153 #ifdef CONFIG_SBUS
3154     { "SBA-200E", "sba200e", 32, 64, 32,
3155       fore200e_sba_read,
3156       fore200e_sba_write,
3157       fore200e_sba_dma_map,
3158       fore200e_sba_dma_unmap,
3159       fore200e_sba_dma_sync_for_cpu,
3160       fore200e_sba_dma_sync_for_device,
3161       fore200e_sba_dma_chunk_alloc,
3162       fore200e_sba_dma_chunk_free,
3163       fore200e_sba_configure,
3164       fore200e_sba_map,
3165       fore200e_sba_reset,
3166       fore200e_sba_prom_read,
3167       fore200e_sba_unmap,
3168       fore200e_sba_irq_enable,
3169       fore200e_sba_irq_check,
3170       fore200e_sba_irq_ack,
3171       fore200e_sba_proc_read,
3172     },
3173 #endif
3174     {}
3175 };
3176 
3177 MODULE_LICENSE("GPL");
3178 #ifdef CONFIG_PCI
3179 #ifdef __LITTLE_ENDIAN__
3180 MODULE_FIRMWARE("pca200e.bin");
3181 #else
3182 MODULE_FIRMWARE("pca200e_ecd.bin2");
3183 #endif
3184 #endif /* CONFIG_PCI */
3185 #ifdef CONFIG_SBUS
3186 MODULE_FIRMWARE("sba200e_ecd.bin2");
3187 #endif
3188