xref: /openbmc/linux/drivers/atm/fore200e.c (revision aff9d262)
1 /*
2   A FORE Systems 200E-series driver for ATM on Linux.
3   Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
4 
5   Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
6 
7   This driver simultaneously supports PCA-200E and SBA-200E adapters
8   on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
9 
10   This program is free software; you can redistribute it and/or modify
11   it under the terms of the GNU General Public License as published by
12   the Free Software Foundation; either version 2 of the License, or
13   (at your option) any later version.
14 
15   This program is distributed in the hope that it will be useful,
16   but WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18   GNU General Public License for more details.
19 
20   You should have received a copy of the GNU General Public License
21   along with this program; if not, write to the Free Software
22   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23 */
24 
25 
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/capability.h>
30 #include <linux/interrupt.h>
31 #include <linux/bitops.h>
32 #include <linux/pci.h>
33 #include <linux/module.h>
34 #include <linux/atmdev.h>
35 #include <linux/sonet.h>
36 #include <linux/atm_suni.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/delay.h>
39 #include <linux/firmware.h>
40 #include <asm/io.h>
41 #include <asm/string.h>
42 #include <asm/page.h>
43 #include <asm/irq.h>
44 #include <asm/dma.h>
45 #include <asm/byteorder.h>
46 #include <linux/uaccess.h>
47 #include <linux/atomic.h>
48 
49 #ifdef CONFIG_SBUS
50 #include <linux/of.h>
51 #include <linux/of_device.h>
52 #include <asm/idprom.h>
53 #include <asm/openprom.h>
54 #include <asm/oplib.h>
55 #include <asm/pgtable.h>
56 #endif
57 
58 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
59 #define FORE200E_USE_TASKLET
60 #endif
61 
62 #if 0 /* enable the debugging code of the buffer supply queues */
63 #define FORE200E_BSQ_DEBUG
64 #endif
65 
66 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
67 #define FORE200E_52BYTE_AAL0_SDU
68 #endif
69 
70 #include "fore200e.h"
71 #include "suni.h"
72 
73 #define FORE200E_VERSION "0.3e"
74 
75 #define FORE200E         "fore200e: "
76 
77 #if 0 /* override .config */
78 #define CONFIG_ATM_FORE200E_DEBUG 1
79 #endif
80 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
81 #define DPRINTK(level, format, args...)  do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
82                                                   printk(FORE200E format, ##args); } while (0)
83 #else
84 #define DPRINTK(level, format, args...)  do {} while (0)
85 #endif
86 
87 
88 #define FORE200E_ALIGN(addr, alignment) \
89         ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
90 
91 #define FORE200E_DMA_INDEX(dma_addr, type, index)  ((dma_addr) + (index) * sizeof(type))
92 
93 #define FORE200E_INDEX(virt_addr, type, index)     (&((type *)(virt_addr))[ index ])
94 
95 #define FORE200E_NEXT_ENTRY(index, modulo)         (index = ((index) + 1) % (modulo))
96 
97 #if 1
98 #define ASSERT(expr)     if (!(expr)) { \
99 			     printk(FORE200E "assertion failed! %s[%d]: %s\n", \
100 				    __func__, __LINE__, #expr); \
101 			     panic(FORE200E "%s", __func__); \
102 			 }
103 #else
104 #define ASSERT(expr)     do {} while (0)
105 #endif
106 
107 
108 static const struct atmdev_ops   fore200e_ops;
109 
110 static LIST_HEAD(fore200e_boards);
111 
112 
113 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
114 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
115 MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
116 
117 
118 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
119     { BUFFER_S1_NBR, BUFFER_L1_NBR },
120     { BUFFER_S2_NBR, BUFFER_L2_NBR }
121 };
122 
123 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
124     { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
125     { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
126 };
127 
128 
129 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
130 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
131 #endif
132 
133 
134 #if 0 /* currently unused */
135 static int
136 fore200e_fore2atm_aal(enum fore200e_aal aal)
137 {
138     switch(aal) {
139     case FORE200E_AAL0:  return ATM_AAL0;
140     case FORE200E_AAL34: return ATM_AAL34;
141     case FORE200E_AAL5:  return ATM_AAL5;
142     }
143 
144     return -EINVAL;
145 }
146 #endif
147 
148 
149 static enum fore200e_aal
150 fore200e_atm2fore_aal(int aal)
151 {
152     switch(aal) {
153     case ATM_AAL0:  return FORE200E_AAL0;
154     case ATM_AAL34: return FORE200E_AAL34;
155     case ATM_AAL1:
156     case ATM_AAL2:
157     case ATM_AAL5:  return FORE200E_AAL5;
158     }
159 
160     return -EINVAL;
161 }
162 
163 
164 static char*
165 fore200e_irq_itoa(int irq)
166 {
167     static char str[8];
168     sprintf(str, "%d", irq);
169     return str;
170 }
171 
172 
173 /* allocate and align a chunk of memory intended to hold the data behing exchanged
174    between the driver and the adapter (using streaming DVMA) */
175 
176 static int
177 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
178 {
179     unsigned long offset = 0;
180 
181     if (alignment <= sizeof(int))
182 	alignment = 0;
183 
184     chunk->alloc_size = size + alignment;
185     chunk->align_size = size;
186     chunk->direction  = direction;
187 
188     chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
189     if (chunk->alloc_addr == NULL)
190 	return -ENOMEM;
191 
192     if (alignment > 0)
193 	offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
194 
195     chunk->align_addr = chunk->alloc_addr + offset;
196 
197     chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
198 
199     return 0;
200 }
201 
202 
203 /* free a chunk of memory */
204 
205 static void
206 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
207 {
208     fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
209 
210     kfree(chunk->alloc_addr);
211 }
212 
213 
214 static void
215 fore200e_spin(int msecs)
216 {
217     unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
218     while (time_before(jiffies, timeout));
219 }
220 
221 
222 static int
223 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
224 {
225     unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
226     int           ok;
227 
228     mb();
229     do {
230 	if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
231 	    break;
232 
233     } while (time_before(jiffies, timeout));
234 
235 #if 1
236     if (!ok) {
237 	printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
238 	       *addr, val);
239     }
240 #endif
241 
242     return ok;
243 }
244 
245 
246 static int
247 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
248 {
249     unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
250     int           ok;
251 
252     do {
253 	if ((ok = (fore200e->bus->read(addr) == val)))
254 	    break;
255 
256     } while (time_before(jiffies, timeout));
257 
258 #if 1
259     if (!ok) {
260 	printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
261 	       fore200e->bus->read(addr), val);
262     }
263 #endif
264 
265     return ok;
266 }
267 
268 
269 static void
270 fore200e_free_rx_buf(struct fore200e* fore200e)
271 {
272     int scheme, magn, nbr;
273     struct buffer* buffer;
274 
275     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
276 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
277 
278 	    if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
279 
280 		for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
281 
282 		    struct chunk* data = &buffer[ nbr ].data;
283 
284 		    if (data->alloc_addr != NULL)
285 			fore200e_chunk_free(fore200e, data);
286 		}
287 	    }
288 	}
289     }
290 }
291 
292 
293 static void
294 fore200e_uninit_bs_queue(struct fore200e* fore200e)
295 {
296     int scheme, magn;
297 
298     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
299 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
300 
301 	    struct chunk* status    = &fore200e->host_bsq[ scheme ][ magn ].status;
302 	    struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
303 
304 	    if (status->alloc_addr)
305 		fore200e->bus->dma_chunk_free(fore200e, status);
306 
307 	    if (rbd_block->alloc_addr)
308 		fore200e->bus->dma_chunk_free(fore200e, rbd_block);
309 	}
310     }
311 }
312 
313 
314 static int
315 fore200e_reset(struct fore200e* fore200e, int diag)
316 {
317     int ok;
318 
319     fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
320 
321     fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
322 
323     fore200e->bus->reset(fore200e);
324 
325     if (diag) {
326 	ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
327 	if (ok == 0) {
328 
329 	    printk(FORE200E "device %s self-test failed\n", fore200e->name);
330 	    return -ENODEV;
331 	}
332 
333 	printk(FORE200E "device %s self-test passed\n", fore200e->name);
334 
335 	fore200e->state = FORE200E_STATE_RESET;
336     }
337 
338     return 0;
339 }
340 
341 
342 static void
343 fore200e_shutdown(struct fore200e* fore200e)
344 {
345     printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
346 	   fore200e->name, fore200e->phys_base,
347 	   fore200e_irq_itoa(fore200e->irq));
348 
349     if (fore200e->state > FORE200E_STATE_RESET) {
350 	/* first, reset the board to prevent further interrupts or data transfers */
351 	fore200e_reset(fore200e, 0);
352     }
353 
354     /* then, release all allocated resources */
355     switch(fore200e->state) {
356 
357     case FORE200E_STATE_COMPLETE:
358 	kfree(fore200e->stats);
359 
360 	/* fall through */
361     case FORE200E_STATE_IRQ:
362 	free_irq(fore200e->irq, fore200e->atm_dev);
363 
364 	/* fall through */
365     case FORE200E_STATE_ALLOC_BUF:
366 	fore200e_free_rx_buf(fore200e);
367 
368 	/* fall through */
369     case FORE200E_STATE_INIT_BSQ:
370 	fore200e_uninit_bs_queue(fore200e);
371 
372 	/* fall through */
373     case FORE200E_STATE_INIT_RXQ:
374 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
375 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
376 
377 	/* fall through */
378     case FORE200E_STATE_INIT_TXQ:
379 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
380 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
381 
382 	/* fall through */
383     case FORE200E_STATE_INIT_CMDQ:
384 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
385 
386 	/* fall through */
387     case FORE200E_STATE_INITIALIZE:
388 	/* nothing to do for that state */
389 
390     case FORE200E_STATE_START_FW:
391 	/* nothing to do for that state */
392 
393     case FORE200E_STATE_RESET:
394 	/* nothing to do for that state */
395 
396     case FORE200E_STATE_MAP:
397 	fore200e->bus->unmap(fore200e);
398 
399 	/* fall through */
400     case FORE200E_STATE_CONFIGURE:
401 	/* nothing to do for that state */
402 
403     case FORE200E_STATE_REGISTER:
404 	/* XXX shouldn't we *start* by deregistering the device? */
405 	atm_dev_deregister(fore200e->atm_dev);
406 
407     case FORE200E_STATE_BLANK:
408 	/* nothing to do for that state */
409 	break;
410     }
411 }
412 
413 
414 #ifdef CONFIG_PCI
415 
416 static u32 fore200e_pca_read(volatile u32 __iomem *addr)
417 {
418     /* on big-endian hosts, the board is configured to convert
419        the endianess of slave RAM accesses  */
420     return le32_to_cpu(readl(addr));
421 }
422 
423 
424 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
425 {
426     /* on big-endian hosts, the board is configured to convert
427        the endianess of slave RAM accesses  */
428     writel(cpu_to_le32(val), addr);
429 }
430 
431 
432 static u32
433 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
434 {
435     u32 dma_addr = dma_map_single(fore200e->dev, virt_addr, size, direction);
436 
437     DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d,  --> dma_addr = 0x%08x\n",
438 	    virt_addr, size, direction, dma_addr);
439 
440     return dma_addr;
441 }
442 
443 
444 static void
445 fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
446 {
447     DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
448 	    dma_addr, size, direction);
449 
450     dma_unmap_single(fore200e->dev, dma_addr, size, direction);
451 }
452 
453 
454 static void
455 fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
456 {
457     DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
458 
459     dma_sync_single_for_cpu(fore200e->dev, dma_addr, size, direction);
460 }
461 
462 static void
463 fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
464 {
465     DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
466 
467     dma_sync_single_for_device(fore200e->dev, dma_addr, size, direction);
468 }
469 
470 
471 /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
472    (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
473 
474 static int
475 fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
476 			     int size, int nbr, int alignment)
477 {
478     /* returned chunks are page-aligned */
479     chunk->alloc_size = size * nbr;
480     chunk->alloc_addr = dma_alloc_coherent(fore200e->dev,
481 					   chunk->alloc_size,
482 					   &chunk->dma_addr,
483 					   GFP_KERNEL);
484 
485     if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
486 	return -ENOMEM;
487 
488     chunk->align_addr = chunk->alloc_addr;
489 
490     return 0;
491 }
492 
493 
494 /* free a DMA consistent chunk of memory */
495 
496 static void
497 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
498 {
499     dma_free_coherent(fore200e->dev,
500 			chunk->alloc_size,
501 			chunk->alloc_addr,
502 			chunk->dma_addr);
503 }
504 
505 
506 static int
507 fore200e_pca_irq_check(struct fore200e* fore200e)
508 {
509     /* this is a 1 bit register */
510     int irq_posted = readl(fore200e->regs.pca.psr);
511 
512 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
513     if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
514 	DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
515     }
516 #endif
517 
518     return irq_posted;
519 }
520 
521 
522 static void
523 fore200e_pca_irq_ack(struct fore200e* fore200e)
524 {
525     writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
526 }
527 
528 
529 static void
530 fore200e_pca_reset(struct fore200e* fore200e)
531 {
532     writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
533     fore200e_spin(10);
534     writel(0, fore200e->regs.pca.hcr);
535 }
536 
537 
538 static int fore200e_pca_map(struct fore200e* fore200e)
539 {
540     DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
541 
542     fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
543 
544     if (fore200e->virt_base == NULL) {
545 	printk(FORE200E "can't map device %s\n", fore200e->name);
546 	return -EFAULT;
547     }
548 
549     DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
550 
551     /* gain access to the PCA specific registers  */
552     fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
553     fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
554     fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
555 
556     fore200e->state = FORE200E_STATE_MAP;
557     return 0;
558 }
559 
560 
561 static void
562 fore200e_pca_unmap(struct fore200e* fore200e)
563 {
564     DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
565 
566     if (fore200e->virt_base != NULL)
567 	iounmap(fore200e->virt_base);
568 }
569 
570 
571 static int fore200e_pca_configure(struct fore200e *fore200e)
572 {
573     struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
574     u8              master_ctrl, latency;
575 
576     DPRINTK(2, "device %s being configured\n", fore200e->name);
577 
578     if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
579 	printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
580 	return -EIO;
581     }
582 
583     pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
584 
585     master_ctrl = master_ctrl
586 #if defined(__BIG_ENDIAN)
587 	/* request the PCA board to convert the endianess of slave RAM accesses */
588 	| PCA200E_CTRL_CONVERT_ENDIAN
589 #endif
590 #if 0
591         | PCA200E_CTRL_DIS_CACHE_RD
592         | PCA200E_CTRL_DIS_WRT_INVAL
593         | PCA200E_CTRL_ENA_CONT_REQ_MODE
594         | PCA200E_CTRL_2_CACHE_WRT_INVAL
595 #endif
596 	| PCA200E_CTRL_LARGE_PCI_BURSTS;
597 
598     pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
599 
600     /* raise latency from 32 (default) to 192, as this seems to prevent NIC
601        lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
602        this may impact the performances of other PCI devices on the same bus, though */
603     latency = 192;
604     pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
605 
606     fore200e->state = FORE200E_STATE_CONFIGURE;
607     return 0;
608 }
609 
610 
611 static int __init
612 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
613 {
614     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
615     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
616     struct prom_opcode      opcode;
617     int                     ok;
618     u32                     prom_dma;
619 
620     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
621 
622     opcode.opcode = OPCODE_GET_PROM;
623     opcode.pad    = 0;
624 
625     prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
626 
627     fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
628 
629     *entry->status = STATUS_PENDING;
630 
631     fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
632 
633     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
634 
635     *entry->status = STATUS_FREE;
636 
637     fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
638 
639     if (ok == 0) {
640 	printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
641 	return -EIO;
642     }
643 
644 #if defined(__BIG_ENDIAN)
645 
646 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
647 
648     /* MAC address is stored as little-endian */
649     swap_here(&prom->mac_addr[0]);
650     swap_here(&prom->mac_addr[4]);
651 #endif
652 
653     return 0;
654 }
655 
656 
657 static int
658 fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
659 {
660     struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
661 
662     return sprintf(page, "   PCI bus/slot/function:\t%d/%d/%d\n",
663 		   pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
664 }
665 
666 static const struct fore200e_bus fore200e_pci_ops = {
667 	.model_name		= "PCA-200E",
668 	.proc_name		= "pca200e",
669 	.descr_alignment	= 32,
670 	.buffer_alignment	= 4,
671 	.status_alignment	= 32,
672 	.read			= fore200e_pca_read,
673 	.write			= fore200e_pca_write,
674 	.dma_map		= fore200e_pca_dma_map,
675 	.dma_unmap		= fore200e_pca_dma_unmap,
676 	.dma_sync_for_cpu	= fore200e_pca_dma_sync_for_cpu,
677 	.dma_sync_for_device	= fore200e_pca_dma_sync_for_device,
678 	.dma_chunk_alloc	= fore200e_pca_dma_chunk_alloc,
679 	.dma_chunk_free		= fore200e_pca_dma_chunk_free,
680 	.configure		= fore200e_pca_configure,
681 	.map			= fore200e_pca_map,
682 	.reset			= fore200e_pca_reset,
683 	.prom_read		= fore200e_pca_prom_read,
684 	.unmap			= fore200e_pca_unmap,
685 	.irq_check		= fore200e_pca_irq_check,
686 	.irq_ack		= fore200e_pca_irq_ack,
687 	.proc_read		= fore200e_pca_proc_read,
688 };
689 #endif /* CONFIG_PCI */
690 
691 #ifdef CONFIG_SBUS
692 
693 static u32 fore200e_sba_read(volatile u32 __iomem *addr)
694 {
695     return sbus_readl(addr);
696 }
697 
698 static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
699 {
700     sbus_writel(val, addr);
701 }
702 
703 static u32 fore200e_sba_dma_map(struct fore200e *fore200e, void* virt_addr, int size, int direction)
704 {
705 	u32 dma_addr;
706 
707 	dma_addr = dma_map_single(fore200e->dev, virt_addr, size, direction);
708 
709 	DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
710 		virt_addr, size, direction, dma_addr);
711 
712 	return dma_addr;
713 }
714 
715 static void fore200e_sba_dma_unmap(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
716 {
717 	DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
718 		dma_addr, size, direction);
719 
720 	dma_unmap_single(fore200e->dev, dma_addr, size, direction);
721 }
722 
723 static void fore200e_sba_dma_sync_for_cpu(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
724 {
725 	DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
726 
727 	dma_sync_single_for_cpu(fore200e->dev, dma_addr, size, direction);
728 }
729 
730 static void fore200e_sba_dma_sync_for_device(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
731 {
732 	DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
733 
734 	dma_sync_single_for_device(fore200e->dev, dma_addr, size, direction);
735 }
736 
737 /* Allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
738  * (to hold descriptors, status, queues, etc.) shared by the driver and the adapter.
739  */
740 static int fore200e_sba_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk,
741 					int size, int nbr, int alignment)
742 {
743 	chunk->alloc_size = chunk->align_size = size * nbr;
744 
745 	/* returned chunks are page-aligned */
746 	chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size,
747 					       &chunk->dma_addr, GFP_ATOMIC);
748 
749 	if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
750 		return -ENOMEM;
751 
752 	chunk->align_addr = chunk->alloc_addr;
753 
754 	return 0;
755 }
756 
757 /* free a DVMA consistent chunk of memory */
758 static void fore200e_sba_dma_chunk_free(struct fore200e *fore200e, struct chunk *chunk)
759 {
760 	dma_free_coherent(fore200e->dev, chunk->alloc_size,
761 			  chunk->alloc_addr, chunk->dma_addr);
762 }
763 
764 static void fore200e_sba_irq_enable(struct fore200e *fore200e)
765 {
766 	u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
767 	fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
768 }
769 
770 static int fore200e_sba_irq_check(struct fore200e *fore200e)
771 {
772 	return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
773 }
774 
775 static void fore200e_sba_irq_ack(struct fore200e *fore200e)
776 {
777 	u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
778 	fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
779 }
780 
781 static void fore200e_sba_reset(struct fore200e *fore200e)
782 {
783 	fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
784 	fore200e_spin(10);
785 	fore200e->bus->write(0, fore200e->regs.sba.hcr);
786 }
787 
788 static int __init fore200e_sba_map(struct fore200e *fore200e)
789 {
790 	struct platform_device *op = to_platform_device(fore200e->dev);
791 	unsigned int bursts;
792 
793 	/* gain access to the SBA specific registers  */
794 	fore200e->regs.sba.hcr = of_ioremap(&op->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
795 	fore200e->regs.sba.bsr = of_ioremap(&op->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
796 	fore200e->regs.sba.isr = of_ioremap(&op->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
797 	fore200e->virt_base    = of_ioremap(&op->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
798 
799 	if (!fore200e->virt_base) {
800 		printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
801 		return -EFAULT;
802 	}
803 
804 	DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
805 
806 	fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
807 
808 	/* get the supported DVMA burst sizes */
809 	bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00);
810 
811 	if (sbus_can_dma_64bit())
812 		sbus_set_sbus64(&op->dev, bursts);
813 
814 	fore200e->state = FORE200E_STATE_MAP;
815 	return 0;
816 }
817 
818 static void fore200e_sba_unmap(struct fore200e *fore200e)
819 {
820 	struct platform_device *op = to_platform_device(fore200e->dev);
821 
822 	of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
823 	of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
824 	of_iounmap(&op->resource[2], fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
825 	of_iounmap(&op->resource[3], fore200e->virt_base,    SBA200E_RAM_LENGTH);
826 }
827 
828 static int __init fore200e_sba_configure(struct fore200e *fore200e)
829 {
830 	fore200e->state = FORE200E_STATE_CONFIGURE;
831 	return 0;
832 }
833 
834 static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom)
835 {
836 	struct platform_device *op = to_platform_device(fore200e->dev);
837 	const u8 *prop;
838 	int len;
839 
840 	prop = of_get_property(op->dev.of_node, "madaddrlo2", &len);
841 	if (!prop)
842 		return -ENODEV;
843 	memcpy(&prom->mac_addr[4], prop, 4);
844 
845 	prop = of_get_property(op->dev.of_node, "madaddrhi4", &len);
846 	if (!prop)
847 		return -ENODEV;
848 	memcpy(&prom->mac_addr[2], prop, 4);
849 
850 	prom->serial_number = of_getintprop_default(op->dev.of_node,
851 						    "serialnumber", 0);
852 	prom->hw_revision = of_getintprop_default(op->dev.of_node,
853 						  "promversion", 0);
854 
855 	return 0;
856 }
857 
858 static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
859 {
860 	struct platform_device *op = to_platform_device(fore200e->dev);
861 	const struct linux_prom_registers *regs;
862 
863 	regs = of_get_property(op->dev.of_node, "reg", NULL);
864 
865 	return sprintf(page, "   SBUS slot/device:\t\t%d/'%s'\n",
866 		       (regs ? regs->which_io : 0), op->dev.of_node->name);
867 }
868 
869 static const struct fore200e_bus fore200e_sbus_ops = {
870 	.model_name		= "SBA-200E",
871 	.proc_name		= "sba200e",
872 	.descr_alignment	= 32,
873 	.buffer_alignent	= 64,
874 	.status_alignment	= 32,
875 	.read			= fore200e_sba_read,
876 	.write			= fore200e_sba_write,
877 	.dma_map		= fore200e_sba_dma_map,
878 	.dma_unap		= fore200e_sba_dma_unmap,
879 	.dma_sync_for_cpu	= fore200e_sba_dma_sync_for_cpu,
880 	.dma_sync_for_device	= fore200e_sba_dma_sync_for_device,
881 	.dma_chunk_alloc	= fore200e_sba_dma_chunk_alloc,
882 	.dma_chunk_free		= fore200e_sba_dma_chunk_free,
883 	.configure		= fore200e_sba_configure,
884 	.map			= fore200e_sba_map,
885 	.reset			= fore200e_sba_reset,
886 	.prom_read		= fore200e_sba_prom_read,
887 	.unmap			= fore200e_sba_unmap,
888 	.irq_enable		= fore200e_sba_irq_enable,
889 	.irq_check		= fore200e_sba_irq_check,
890 	.irq_ack		= fore200e_sba_irq_ack,
891 	.proc_read		= fore200e_sba_proc_read,
892 };
893 #endif /* CONFIG_SBUS */
894 
895 static void
896 fore200e_tx_irq(struct fore200e* fore200e)
897 {
898     struct host_txq*        txq = &fore200e->host_txq;
899     struct host_txq_entry*  entry;
900     struct atm_vcc*         vcc;
901     struct fore200e_vc_map* vc_map;
902 
903     if (fore200e->host_txq.txing == 0)
904 	return;
905 
906     for (;;) {
907 
908 	entry = &txq->host_entry[ txq->tail ];
909 
910         if ((*entry->status & STATUS_COMPLETE) == 0) {
911 	    break;
912 	}
913 
914 	DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
915 		entry, txq->tail, entry->vc_map, entry->skb);
916 
917 	/* free copy of misaligned data */
918 	kfree(entry->data);
919 
920 	/* remove DMA mapping */
921 	fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
922 				 DMA_TO_DEVICE);
923 
924 	vc_map = entry->vc_map;
925 
926 	/* vcc closed since the time the entry was submitted for tx? */
927 	if ((vc_map->vcc == NULL) ||
928 	    (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
929 
930 	    DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
931 		    fore200e->atm_dev->number);
932 
933 	    dev_kfree_skb_any(entry->skb);
934 	}
935 	else {
936 	    ASSERT(vc_map->vcc);
937 
938 	    /* vcc closed then immediately re-opened? */
939 	    if (vc_map->incarn != entry->incarn) {
940 
941 		/* when a vcc is closed, some PDUs may be still pending in the tx queue.
942 		   if the same vcc is immediately re-opened, those pending PDUs must
943 		   not be popped after the completion of their emission, as they refer
944 		   to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
945 		   would be decremented by the size of the (unrelated) skb, possibly
946 		   leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
947 		   we thus bind the tx entry to the current incarnation of the vcc
948 		   when the entry is submitted for tx. When the tx later completes,
949 		   if the incarnation number of the tx entry does not match the one
950 		   of the vcc, then this implies that the vcc has been closed then re-opened.
951 		   we thus just drop the skb here. */
952 
953 		DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
954 			fore200e->atm_dev->number);
955 
956 		dev_kfree_skb_any(entry->skb);
957 	    }
958 	    else {
959 		vcc = vc_map->vcc;
960 		ASSERT(vcc);
961 
962 		/* notify tx completion */
963 		if (vcc->pop) {
964 		    vcc->pop(vcc, entry->skb);
965 		}
966 		else {
967 		    dev_kfree_skb_any(entry->skb);
968 		}
969 
970 		/* check error condition */
971 		if (*entry->status & STATUS_ERROR)
972 		    atomic_inc(&vcc->stats->tx_err);
973 		else
974 		    atomic_inc(&vcc->stats->tx);
975 	    }
976 	}
977 
978 	*entry->status = STATUS_FREE;
979 
980 	fore200e->host_txq.txing--;
981 
982 	FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
983     }
984 }
985 
986 
987 #ifdef FORE200E_BSQ_DEBUG
988 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
989 {
990     struct buffer* buffer;
991     int count = 0;
992 
993     buffer = bsq->freebuf;
994     while (buffer) {
995 
996 	if (buffer->supplied) {
997 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
998 		   where, scheme, magn, buffer->index);
999 	}
1000 
1001 	if (buffer->magn != magn) {
1002 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
1003 		   where, scheme, magn, buffer->index, buffer->magn);
1004 	}
1005 
1006 	if (buffer->scheme != scheme) {
1007 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
1008 		   where, scheme, magn, buffer->index, buffer->scheme);
1009 	}
1010 
1011 	if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
1012 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
1013 		   where, scheme, magn, buffer->index);
1014 	}
1015 
1016 	count++;
1017 	buffer = buffer->next;
1018     }
1019 
1020     if (count != bsq->freebuf_count) {
1021 	printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
1022 	       where, scheme, magn, count, bsq->freebuf_count);
1023     }
1024     return 0;
1025 }
1026 #endif
1027 
1028 
1029 static void
1030 fore200e_supply(struct fore200e* fore200e)
1031 {
1032     int  scheme, magn, i;
1033 
1034     struct host_bsq*       bsq;
1035     struct host_bsq_entry* entry;
1036     struct buffer*         buffer;
1037 
1038     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1039 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1040 
1041 	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
1042 
1043 #ifdef FORE200E_BSQ_DEBUG
1044 	    bsq_audit(1, bsq, scheme, magn);
1045 #endif
1046 	    while (bsq->freebuf_count >= RBD_BLK_SIZE) {
1047 
1048 		DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
1049 			RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
1050 
1051 		entry = &bsq->host_entry[ bsq->head ];
1052 
1053 		for (i = 0; i < RBD_BLK_SIZE; i++) {
1054 
1055 		    /* take the first buffer in the free buffer list */
1056 		    buffer = bsq->freebuf;
1057 		    if (!buffer) {
1058 			printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
1059 			       scheme, magn, bsq->freebuf_count);
1060 			return;
1061 		    }
1062 		    bsq->freebuf = buffer->next;
1063 
1064 #ifdef FORE200E_BSQ_DEBUG
1065 		    if (buffer->supplied)
1066 			printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
1067 			       scheme, magn, buffer->index);
1068 		    buffer->supplied = 1;
1069 #endif
1070 		    entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
1071 		    entry->rbd_block->rbd[ i ].handle       = FORE200E_BUF2HDL(buffer);
1072 		}
1073 
1074 		FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
1075 
1076  		/* decrease accordingly the number of free rx buffers */
1077 		bsq->freebuf_count -= RBD_BLK_SIZE;
1078 
1079 		*entry->status = STATUS_PENDING;
1080 		fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
1081 	    }
1082 	}
1083     }
1084 }
1085 
1086 
1087 static int
1088 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
1089 {
1090     struct sk_buff*      skb;
1091     struct buffer*       buffer;
1092     struct fore200e_vcc* fore200e_vcc;
1093     int                  i, pdu_len = 0;
1094 #ifdef FORE200E_52BYTE_AAL0_SDU
1095     u32                  cell_header = 0;
1096 #endif
1097 
1098     ASSERT(vcc);
1099 
1100     fore200e_vcc = FORE200E_VCC(vcc);
1101     ASSERT(fore200e_vcc);
1102 
1103 #ifdef FORE200E_52BYTE_AAL0_SDU
1104     if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
1105 
1106 	cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
1107 	              (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
1108                       (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
1109                       (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
1110                        rpd->atm_header.clp;
1111 	pdu_len = 4;
1112     }
1113 #endif
1114 
1115     /* compute total PDU length */
1116     for (i = 0; i < rpd->nseg; i++)
1117 	pdu_len += rpd->rsd[ i ].length;
1118 
1119     skb = alloc_skb(pdu_len, GFP_ATOMIC);
1120     if (skb == NULL) {
1121 	DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1122 
1123 	atomic_inc(&vcc->stats->rx_drop);
1124 	return -ENOMEM;
1125     }
1126 
1127     __net_timestamp(skb);
1128 
1129 #ifdef FORE200E_52BYTE_AAL0_SDU
1130     if (cell_header) {
1131 	*((u32*)skb_put(skb, 4)) = cell_header;
1132     }
1133 #endif
1134 
1135     /* reassemble segments */
1136     for (i = 0; i < rpd->nseg; i++) {
1137 
1138 	/* rebuild rx buffer address from rsd handle */
1139 	buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1140 
1141 	/* Make device DMA transfer visible to CPU.  */
1142 	fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1143 
1144 	skb_put_data(skb, buffer->data.align_addr, rpd->rsd[i].length);
1145 
1146 	/* Now let the device get at it again.  */
1147 	fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1148     }
1149 
1150     DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1151 
1152     if (pdu_len < fore200e_vcc->rx_min_pdu)
1153 	fore200e_vcc->rx_min_pdu = pdu_len;
1154     if (pdu_len > fore200e_vcc->rx_max_pdu)
1155 	fore200e_vcc->rx_max_pdu = pdu_len;
1156     fore200e_vcc->rx_pdu++;
1157 
1158     /* push PDU */
1159     if (atm_charge(vcc, skb->truesize) == 0) {
1160 
1161 	DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1162 		vcc->itf, vcc->vpi, vcc->vci);
1163 
1164 	dev_kfree_skb_any(skb);
1165 
1166 	atomic_inc(&vcc->stats->rx_drop);
1167 	return -ENOMEM;
1168     }
1169 
1170     vcc->push(vcc, skb);
1171     atomic_inc(&vcc->stats->rx);
1172 
1173     return 0;
1174 }
1175 
1176 
1177 static void
1178 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1179 {
1180     struct host_bsq* bsq;
1181     struct buffer*   buffer;
1182     int              i;
1183 
1184     for (i = 0; i < rpd->nseg; i++) {
1185 
1186 	/* rebuild rx buffer address from rsd handle */
1187 	buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1188 
1189 	bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1190 
1191 #ifdef FORE200E_BSQ_DEBUG
1192 	bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1193 
1194 	if (buffer->supplied == 0)
1195 	    printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1196 		   buffer->scheme, buffer->magn, buffer->index);
1197 	buffer->supplied = 0;
1198 #endif
1199 
1200 	/* re-insert the buffer into the free buffer list */
1201 	buffer->next = bsq->freebuf;
1202 	bsq->freebuf = buffer;
1203 
1204 	/* then increment the number of free rx buffers */
1205 	bsq->freebuf_count++;
1206     }
1207 }
1208 
1209 
1210 static void
1211 fore200e_rx_irq(struct fore200e* fore200e)
1212 {
1213     struct host_rxq*        rxq = &fore200e->host_rxq;
1214     struct host_rxq_entry*  entry;
1215     struct atm_vcc*         vcc;
1216     struct fore200e_vc_map* vc_map;
1217 
1218     for (;;) {
1219 
1220 	entry = &rxq->host_entry[ rxq->head ];
1221 
1222 	/* no more received PDUs */
1223 	if ((*entry->status & STATUS_COMPLETE) == 0)
1224 	    break;
1225 
1226 	vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1227 
1228 	if ((vc_map->vcc == NULL) ||
1229 	    (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1230 
1231 	    DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1232 		    fore200e->atm_dev->number,
1233 		    entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1234 	}
1235 	else {
1236 	    vcc = vc_map->vcc;
1237 	    ASSERT(vcc);
1238 
1239 	    if ((*entry->status & STATUS_ERROR) == 0) {
1240 
1241 		fore200e_push_rpd(fore200e, vcc, entry->rpd);
1242 	    }
1243 	    else {
1244 		DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1245 			fore200e->atm_dev->number,
1246 			entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1247 		atomic_inc(&vcc->stats->rx_err);
1248 	    }
1249 	}
1250 
1251 	FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1252 
1253 	fore200e_collect_rpd(fore200e, entry->rpd);
1254 
1255 	/* rewrite the rpd address to ack the received PDU */
1256 	fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1257 	*entry->status = STATUS_FREE;
1258 
1259 	fore200e_supply(fore200e);
1260     }
1261 }
1262 
1263 
1264 #ifndef FORE200E_USE_TASKLET
1265 static void
1266 fore200e_irq(struct fore200e* fore200e)
1267 {
1268     unsigned long flags;
1269 
1270     spin_lock_irqsave(&fore200e->q_lock, flags);
1271     fore200e_rx_irq(fore200e);
1272     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1273 
1274     spin_lock_irqsave(&fore200e->q_lock, flags);
1275     fore200e_tx_irq(fore200e);
1276     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1277 }
1278 #endif
1279 
1280 
1281 static irqreturn_t
1282 fore200e_interrupt(int irq, void* dev)
1283 {
1284     struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1285 
1286     if (fore200e->bus->irq_check(fore200e) == 0) {
1287 
1288 	DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1289 	return IRQ_NONE;
1290     }
1291     DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1292 
1293 #ifdef FORE200E_USE_TASKLET
1294     tasklet_schedule(&fore200e->tx_tasklet);
1295     tasklet_schedule(&fore200e->rx_tasklet);
1296 #else
1297     fore200e_irq(fore200e);
1298 #endif
1299 
1300     fore200e->bus->irq_ack(fore200e);
1301     return IRQ_HANDLED;
1302 }
1303 
1304 
1305 #ifdef FORE200E_USE_TASKLET
1306 static void
1307 fore200e_tx_tasklet(unsigned long data)
1308 {
1309     struct fore200e* fore200e = (struct fore200e*) data;
1310     unsigned long flags;
1311 
1312     DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1313 
1314     spin_lock_irqsave(&fore200e->q_lock, flags);
1315     fore200e_tx_irq(fore200e);
1316     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1317 }
1318 
1319 
1320 static void
1321 fore200e_rx_tasklet(unsigned long data)
1322 {
1323     struct fore200e* fore200e = (struct fore200e*) data;
1324     unsigned long    flags;
1325 
1326     DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1327 
1328     spin_lock_irqsave(&fore200e->q_lock, flags);
1329     fore200e_rx_irq((struct fore200e*) data);
1330     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1331 }
1332 #endif
1333 
1334 
1335 static int
1336 fore200e_select_scheme(struct atm_vcc* vcc)
1337 {
1338     /* fairly balance the VCs over (identical) buffer schemes */
1339     int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1340 
1341     DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1342 	    vcc->itf, vcc->vpi, vcc->vci, scheme);
1343 
1344     return scheme;
1345 }
1346 
1347 
1348 static int
1349 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1350 {
1351     struct host_cmdq*        cmdq  = &fore200e->host_cmdq;
1352     struct host_cmdq_entry*  entry = &cmdq->host_entry[ cmdq->head ];
1353     struct activate_opcode   activ_opcode;
1354     struct deactivate_opcode deactiv_opcode;
1355     struct vpvc              vpvc;
1356     int                      ok;
1357     enum fore200e_aal        aal = fore200e_atm2fore_aal(vcc->qos.aal);
1358 
1359     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1360 
1361     if (activate) {
1362 	FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1363 
1364 	activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1365 	activ_opcode.aal    = aal;
1366 	activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1367 	activ_opcode.pad    = 0;
1368     }
1369     else {
1370 	deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1371 	deactiv_opcode.pad    = 0;
1372     }
1373 
1374     vpvc.vci = vcc->vci;
1375     vpvc.vpi = vcc->vpi;
1376 
1377     *entry->status = STATUS_PENDING;
1378 
1379     if (activate) {
1380 
1381 #ifdef FORE200E_52BYTE_AAL0_SDU
1382 	mtu = 48;
1383 #endif
1384 	/* the MTU is not used by the cp, except in the case of AAL0 */
1385 	fore200e->bus->write(mtu,                        &entry->cp_entry->cmd.activate_block.mtu);
1386 	fore200e->bus->write(*(u32*)&vpvc,         (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1387 	fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1388     }
1389     else {
1390 	fore200e->bus->write(*(u32*)&vpvc,         (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1391 	fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1392     }
1393 
1394     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1395 
1396     *entry->status = STATUS_FREE;
1397 
1398     if (ok == 0) {
1399 	printk(FORE200E "unable to %s VC %d.%d.%d\n",
1400 	       activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1401 	return -EIO;
1402     }
1403 
1404     DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1405 	    activate ? "open" : "clos");
1406 
1407     return 0;
1408 }
1409 
1410 
1411 #define FORE200E_MAX_BACK2BACK_CELLS 255    /* XXX depends on CDVT */
1412 
1413 static void
1414 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1415 {
1416     if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1417 
1418 	/* compute the data cells to idle cells ratio from the tx PCR */
1419 	rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1420 	rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1421     }
1422     else {
1423 	/* disable rate control */
1424 	rate->data_cells = rate->idle_cells = 0;
1425     }
1426 }
1427 
1428 
1429 static int
1430 fore200e_open(struct atm_vcc *vcc)
1431 {
1432     struct fore200e*        fore200e = FORE200E_DEV(vcc->dev);
1433     struct fore200e_vcc*    fore200e_vcc;
1434     struct fore200e_vc_map* vc_map;
1435     unsigned long	    flags;
1436     int			    vci = vcc->vci;
1437     short		    vpi = vcc->vpi;
1438 
1439     ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1440     ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1441 
1442     spin_lock_irqsave(&fore200e->q_lock, flags);
1443 
1444     vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1445     if (vc_map->vcc) {
1446 
1447 	spin_unlock_irqrestore(&fore200e->q_lock, flags);
1448 
1449 	printk(FORE200E "VC %d.%d.%d already in use\n",
1450 	       fore200e->atm_dev->number, vpi, vci);
1451 
1452 	return -EINVAL;
1453     }
1454 
1455     vc_map->vcc = vcc;
1456 
1457     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1458 
1459     fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1460     if (fore200e_vcc == NULL) {
1461 	vc_map->vcc = NULL;
1462 	return -ENOMEM;
1463     }
1464 
1465     DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1466 	    "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1467 	    vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1468 	    fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1469 	    vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1470 	    fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1471 	    vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1472 
1473     /* pseudo-CBR bandwidth requested? */
1474     if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1475 
1476 	mutex_lock(&fore200e->rate_mtx);
1477 	if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1478 	    mutex_unlock(&fore200e->rate_mtx);
1479 
1480 	    kfree(fore200e_vcc);
1481 	    vc_map->vcc = NULL;
1482 	    return -EAGAIN;
1483 	}
1484 
1485 	/* reserve bandwidth */
1486 	fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1487 	mutex_unlock(&fore200e->rate_mtx);
1488     }
1489 
1490     vcc->itf = vcc->dev->number;
1491 
1492     set_bit(ATM_VF_PARTIAL,&vcc->flags);
1493     set_bit(ATM_VF_ADDR, &vcc->flags);
1494 
1495     vcc->dev_data = fore200e_vcc;
1496 
1497     if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1498 
1499 	vc_map->vcc = NULL;
1500 
1501 	clear_bit(ATM_VF_ADDR, &vcc->flags);
1502 	clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1503 
1504 	vcc->dev_data = NULL;
1505 
1506 	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1507 
1508 	kfree(fore200e_vcc);
1509 	return -EINVAL;
1510     }
1511 
1512     /* compute rate control parameters */
1513     if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1514 
1515 	fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1516 	set_bit(ATM_VF_HASQOS, &vcc->flags);
1517 
1518 	DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1519 		vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1520 		vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1521 		fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1522     }
1523 
1524     fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1525     fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1526     fore200e_vcc->tx_pdu     = fore200e_vcc->rx_pdu     = 0;
1527 
1528     /* new incarnation of the vcc */
1529     vc_map->incarn = ++fore200e->incarn_count;
1530 
1531     /* VC unusable before this flag is set */
1532     set_bit(ATM_VF_READY, &vcc->flags);
1533 
1534     return 0;
1535 }
1536 
1537 
1538 static void
1539 fore200e_close(struct atm_vcc* vcc)
1540 {
1541     struct fore200e*        fore200e = FORE200E_DEV(vcc->dev);
1542     struct fore200e_vcc*    fore200e_vcc;
1543     struct fore200e_vc_map* vc_map;
1544     unsigned long           flags;
1545 
1546     ASSERT(vcc);
1547     ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1548     ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1549 
1550     DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1551 
1552     clear_bit(ATM_VF_READY, &vcc->flags);
1553 
1554     fore200e_activate_vcin(fore200e, 0, vcc, 0);
1555 
1556     spin_lock_irqsave(&fore200e->q_lock, flags);
1557 
1558     vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1559 
1560     /* the vc is no longer considered as "in use" by fore200e_open() */
1561     vc_map->vcc = NULL;
1562 
1563     vcc->itf = vcc->vci = vcc->vpi = 0;
1564 
1565     fore200e_vcc = FORE200E_VCC(vcc);
1566     vcc->dev_data = NULL;
1567 
1568     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1569 
1570     /* release reserved bandwidth, if any */
1571     if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1572 
1573 	mutex_lock(&fore200e->rate_mtx);
1574 	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1575 	mutex_unlock(&fore200e->rate_mtx);
1576 
1577 	clear_bit(ATM_VF_HASQOS, &vcc->flags);
1578     }
1579 
1580     clear_bit(ATM_VF_ADDR, &vcc->flags);
1581     clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1582 
1583     ASSERT(fore200e_vcc);
1584     kfree(fore200e_vcc);
1585 }
1586 
1587 
1588 static int
1589 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1590 {
1591     struct fore200e*        fore200e     = FORE200E_DEV(vcc->dev);
1592     struct fore200e_vcc*    fore200e_vcc = FORE200E_VCC(vcc);
1593     struct fore200e_vc_map* vc_map;
1594     struct host_txq*        txq          = &fore200e->host_txq;
1595     struct host_txq_entry*  entry;
1596     struct tpd*             tpd;
1597     struct tpd_haddr        tpd_haddr;
1598     int                     retry        = CONFIG_ATM_FORE200E_TX_RETRY;
1599     int                     tx_copy      = 0;
1600     int                     tx_len       = skb->len;
1601     u32*                    cell_header  = NULL;
1602     unsigned char*          skb_data;
1603     int                     skb_len;
1604     unsigned char*          data;
1605     unsigned long           flags;
1606 
1607     ASSERT(vcc);
1608     ASSERT(fore200e);
1609     ASSERT(fore200e_vcc);
1610 
1611     if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1612 	DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1613 	dev_kfree_skb_any(skb);
1614 	return -EINVAL;
1615     }
1616 
1617 #ifdef FORE200E_52BYTE_AAL0_SDU
1618     if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1619 	cell_header = (u32*) skb->data;
1620 	skb_data    = skb->data + 4;    /* skip 4-byte cell header */
1621 	skb_len     = tx_len = skb->len  - 4;
1622 
1623 	DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1624     }
1625     else
1626 #endif
1627     {
1628 	skb_data = skb->data;
1629 	skb_len  = skb->len;
1630     }
1631 
1632     if (((unsigned long)skb_data) & 0x3) {
1633 
1634 	DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1635 	tx_copy = 1;
1636 	tx_len  = skb_len;
1637     }
1638 
1639     if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1640 
1641         /* this simply NUKES the PCA board */
1642 	DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1643 	tx_copy = 1;
1644 	tx_len  = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1645     }
1646 
1647     if (tx_copy) {
1648 	data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
1649 	if (data == NULL) {
1650 	    if (vcc->pop) {
1651 		vcc->pop(vcc, skb);
1652 	    }
1653 	    else {
1654 		dev_kfree_skb_any(skb);
1655 	    }
1656 	    return -ENOMEM;
1657 	}
1658 
1659 	memcpy(data, skb_data, skb_len);
1660 	if (skb_len < tx_len)
1661 	    memset(data + skb_len, 0x00, tx_len - skb_len);
1662     }
1663     else {
1664 	data = skb_data;
1665     }
1666 
1667     vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1668     ASSERT(vc_map->vcc == vcc);
1669 
1670   retry_here:
1671 
1672     spin_lock_irqsave(&fore200e->q_lock, flags);
1673 
1674     entry = &txq->host_entry[ txq->head ];
1675 
1676     if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1677 
1678 	/* try to free completed tx queue entries */
1679 	fore200e_tx_irq(fore200e);
1680 
1681 	if (*entry->status != STATUS_FREE) {
1682 
1683 	    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1684 
1685 	    /* retry once again? */
1686 	    if (--retry > 0) {
1687 		udelay(50);
1688 		goto retry_here;
1689 	    }
1690 
1691 	    atomic_inc(&vcc->stats->tx_err);
1692 
1693 	    fore200e->tx_sat++;
1694 	    DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1695 		    fore200e->name, fore200e->cp_queues->heartbeat);
1696 	    if (vcc->pop) {
1697 		vcc->pop(vcc, skb);
1698 	    }
1699 	    else {
1700 		dev_kfree_skb_any(skb);
1701 	    }
1702 
1703 	    if (tx_copy)
1704 		kfree(data);
1705 
1706 	    return -ENOBUFS;
1707 	}
1708     }
1709 
1710     entry->incarn = vc_map->incarn;
1711     entry->vc_map = vc_map;
1712     entry->skb    = skb;
1713     entry->data   = tx_copy ? data : NULL;
1714 
1715     tpd = entry->tpd;
1716     tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
1717     tpd->tsd[ 0 ].length = tx_len;
1718 
1719     FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1720     txq->txing++;
1721 
1722     /* The dma_map call above implies a dma_sync so the device can use it,
1723      * thus no explicit dma_sync call is necessary here.
1724      */
1725 
1726     DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1727 	    vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1728 	    tpd->tsd[0].length, skb_len);
1729 
1730     if (skb_len < fore200e_vcc->tx_min_pdu)
1731 	fore200e_vcc->tx_min_pdu = skb_len;
1732     if (skb_len > fore200e_vcc->tx_max_pdu)
1733 	fore200e_vcc->tx_max_pdu = skb_len;
1734     fore200e_vcc->tx_pdu++;
1735 
1736     /* set tx rate control information */
1737     tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1738     tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1739 
1740     if (cell_header) {
1741 	tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1742 	tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1743 	tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1744 	tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1745 	tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1746     }
1747     else {
1748 	/* set the ATM header, common to all cells conveying the PDU */
1749 	tpd->atm_header.clp = 0;
1750 	tpd->atm_header.plt = 0;
1751 	tpd->atm_header.vci = vcc->vci;
1752 	tpd->atm_header.vpi = vcc->vpi;
1753 	tpd->atm_header.gfc = 0;
1754     }
1755 
1756     tpd->spec.length = tx_len;
1757     tpd->spec.nseg   = 1;
1758     tpd->spec.aal    = fore200e_atm2fore_aal(vcc->qos.aal);
1759     tpd->spec.intr   = 1;
1760 
1761     tpd_haddr.size  = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT);  /* size is expressed in 32 byte blocks */
1762     tpd_haddr.pad   = 0;
1763     tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT;          /* shift the address, as we are in a bitfield */
1764 
1765     *entry->status = STATUS_PENDING;
1766     fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1767 
1768     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1769 
1770     return 0;
1771 }
1772 
1773 
1774 static int
1775 fore200e_getstats(struct fore200e* fore200e)
1776 {
1777     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1778     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1779     struct stats_opcode     opcode;
1780     int                     ok;
1781     u32                     stats_dma_addr;
1782 
1783     if (fore200e->stats == NULL) {
1784 	fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
1785 	if (fore200e->stats == NULL)
1786 	    return -ENOMEM;
1787     }
1788 
1789     stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
1790 					    sizeof(struct stats), DMA_FROM_DEVICE);
1791 
1792     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1793 
1794     opcode.opcode = OPCODE_GET_STATS;
1795     opcode.pad    = 0;
1796 
1797     fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1798 
1799     *entry->status = STATUS_PENDING;
1800 
1801     fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1802 
1803     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1804 
1805     *entry->status = STATUS_FREE;
1806 
1807     fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1808 
1809     if (ok == 0) {
1810 	printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1811 	return -EIO;
1812     }
1813 
1814     return 0;
1815 }
1816 
1817 
1818 static int
1819 fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1820 {
1821     /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1822 
1823     DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1824 	    vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1825 
1826     return -EINVAL;
1827 }
1828 
1829 
1830 static int
1831 fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, unsigned int optlen)
1832 {
1833     /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1834 
1835     DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1836 	    vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1837 
1838     return -EINVAL;
1839 }
1840 
1841 
1842 #if 0 /* currently unused */
1843 static int
1844 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1845 {
1846     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1847     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1848     struct oc3_opcode       opcode;
1849     int                     ok;
1850     u32                     oc3_regs_dma_addr;
1851 
1852     oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1853 
1854     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1855 
1856     opcode.opcode = OPCODE_GET_OC3;
1857     opcode.reg    = 0;
1858     opcode.value  = 0;
1859     opcode.mask   = 0;
1860 
1861     fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1862 
1863     *entry->status = STATUS_PENDING;
1864 
1865     fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1866 
1867     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1868 
1869     *entry->status = STATUS_FREE;
1870 
1871     fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1872 
1873     if (ok == 0) {
1874 	printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1875 	return -EIO;
1876     }
1877 
1878     return 0;
1879 }
1880 #endif
1881 
1882 
1883 static int
1884 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1885 {
1886     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1887     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1888     struct oc3_opcode       opcode;
1889     int                     ok;
1890 
1891     DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1892 
1893     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1894 
1895     opcode.opcode = OPCODE_SET_OC3;
1896     opcode.reg    = reg;
1897     opcode.value  = value;
1898     opcode.mask   = mask;
1899 
1900     fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1901 
1902     *entry->status = STATUS_PENDING;
1903 
1904     fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1905 
1906     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1907 
1908     *entry->status = STATUS_FREE;
1909 
1910     if (ok == 0) {
1911 	printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1912 	return -EIO;
1913     }
1914 
1915     return 0;
1916 }
1917 
1918 
1919 static int
1920 fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1921 {
1922     u32 mct_value, mct_mask;
1923     int error;
1924 
1925     if (!capable(CAP_NET_ADMIN))
1926 	return -EPERM;
1927 
1928     switch (loop_mode) {
1929 
1930     case ATM_LM_NONE:
1931 	mct_value = 0;
1932 	mct_mask  = SUNI_MCT_DLE | SUNI_MCT_LLE;
1933 	break;
1934 
1935     case ATM_LM_LOC_PHY:
1936 	mct_value = mct_mask = SUNI_MCT_DLE;
1937 	break;
1938 
1939     case ATM_LM_RMT_PHY:
1940 	mct_value = mct_mask = SUNI_MCT_LLE;
1941 	break;
1942 
1943     default:
1944 	return -EINVAL;
1945     }
1946 
1947     error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1948     if (error == 0)
1949 	fore200e->loop_mode = loop_mode;
1950 
1951     return error;
1952 }
1953 
1954 
1955 static int
1956 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
1957 {
1958     struct sonet_stats tmp;
1959 
1960     if (fore200e_getstats(fore200e) < 0)
1961 	return -EIO;
1962 
1963     tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors);
1964     tmp.line_bip    = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors);
1965     tmp.path_bip    = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors);
1966     tmp.line_febe   = be32_to_cpu(fore200e->stats->oc3.line_febe_errors);
1967     tmp.path_febe   = be32_to_cpu(fore200e->stats->oc3.path_febe_errors);
1968     tmp.corr_hcs    = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors);
1969     tmp.uncorr_hcs  = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors);
1970     tmp.tx_cells    = be32_to_cpu(fore200e->stats->aal0.cells_transmitted)  +
1971 	              be32_to_cpu(fore200e->stats->aal34.cells_transmitted) +
1972 	              be32_to_cpu(fore200e->stats->aal5.cells_transmitted);
1973     tmp.rx_cells    = be32_to_cpu(fore200e->stats->aal0.cells_received)     +
1974 	              be32_to_cpu(fore200e->stats->aal34.cells_received)    +
1975 	              be32_to_cpu(fore200e->stats->aal5.cells_received);
1976 
1977     if (arg)
1978 	return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
1979 
1980     return 0;
1981 }
1982 
1983 
1984 static int
1985 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
1986 {
1987     struct fore200e* fore200e = FORE200E_DEV(dev);
1988 
1989     DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
1990 
1991     switch (cmd) {
1992 
1993     case SONET_GETSTAT:
1994 	return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
1995 
1996     case SONET_GETDIAG:
1997 	return put_user(0, (int __user *)arg) ? -EFAULT : 0;
1998 
1999     case ATM_SETLOOP:
2000 	return fore200e_setloop(fore200e, (int)(unsigned long)arg);
2001 
2002     case ATM_GETLOOP:
2003 	return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
2004 
2005     case ATM_QUERYLOOP:
2006 	return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
2007     }
2008 
2009     return -ENOSYS; /* not implemented */
2010 }
2011 
2012 
2013 static int
2014 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
2015 {
2016     struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
2017     struct fore200e*     fore200e     = FORE200E_DEV(vcc->dev);
2018 
2019     if (!test_bit(ATM_VF_READY, &vcc->flags)) {
2020 	DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
2021 	return -EINVAL;
2022     }
2023 
2024     DPRINTK(2, "change_qos %d.%d.%d, "
2025 	    "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
2026 	    "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
2027 	    "available_cell_rate = %u",
2028 	    vcc->itf, vcc->vpi, vcc->vci,
2029 	    fore200e_traffic_class[ qos->txtp.traffic_class ],
2030 	    qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
2031 	    fore200e_traffic_class[ qos->rxtp.traffic_class ],
2032 	    qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
2033 	    flags, fore200e->available_cell_rate);
2034 
2035     if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
2036 
2037 	mutex_lock(&fore200e->rate_mtx);
2038 	if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
2039 	    mutex_unlock(&fore200e->rate_mtx);
2040 	    return -EAGAIN;
2041 	}
2042 
2043 	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
2044 	fore200e->available_cell_rate -= qos->txtp.max_pcr;
2045 
2046 	mutex_unlock(&fore200e->rate_mtx);
2047 
2048 	memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
2049 
2050 	/* update rate control parameters */
2051 	fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
2052 
2053 	set_bit(ATM_VF_HASQOS, &vcc->flags);
2054 
2055 	return 0;
2056     }
2057 
2058     return -EINVAL;
2059 }
2060 
2061 
2062 static int fore200e_irq_request(struct fore200e *fore200e)
2063 {
2064     if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
2065 
2066 	printk(FORE200E "unable to reserve IRQ %s for device %s\n",
2067 	       fore200e_irq_itoa(fore200e->irq), fore200e->name);
2068 	return -EBUSY;
2069     }
2070 
2071     printk(FORE200E "IRQ %s reserved for device %s\n",
2072 	   fore200e_irq_itoa(fore200e->irq), fore200e->name);
2073 
2074 #ifdef FORE200E_USE_TASKLET
2075     tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
2076     tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
2077 #endif
2078 
2079     fore200e->state = FORE200E_STATE_IRQ;
2080     return 0;
2081 }
2082 
2083 
2084 static int fore200e_get_esi(struct fore200e *fore200e)
2085 {
2086     struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
2087     int ok, i;
2088 
2089     if (!prom)
2090 	return -ENOMEM;
2091 
2092     ok = fore200e->bus->prom_read(fore200e, prom);
2093     if (ok < 0) {
2094 	kfree(prom);
2095 	return -EBUSY;
2096     }
2097 
2098     printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n",
2099 	   fore200e->name,
2100 	   (prom->hw_revision & 0xFF) + '@',    /* probably meaningless with SBA boards */
2101 	   prom->serial_number & 0xFFFF, &prom->mac_addr[2]);
2102 
2103     for (i = 0; i < ESI_LEN; i++) {
2104 	fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
2105     }
2106 
2107     kfree(prom);
2108 
2109     return 0;
2110 }
2111 
2112 
2113 static int fore200e_alloc_rx_buf(struct fore200e *fore200e)
2114 {
2115     int scheme, magn, nbr, size, i;
2116 
2117     struct host_bsq* bsq;
2118     struct buffer*   buffer;
2119 
2120     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2121 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2122 
2123 	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
2124 
2125 	    nbr  = fore200e_rx_buf_nbr[ scheme ][ magn ];
2126 	    size = fore200e_rx_buf_size[ scheme ][ magn ];
2127 
2128 	    DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2129 
2130 	    /* allocate the array of receive buffers */
2131 	    buffer = bsq->buffer = kcalloc(nbr, sizeof(struct buffer),
2132                                            GFP_KERNEL);
2133 
2134 	    if (buffer == NULL)
2135 		return -ENOMEM;
2136 
2137 	    bsq->freebuf = NULL;
2138 
2139 	    for (i = 0; i < nbr; i++) {
2140 
2141 		buffer[ i ].scheme = scheme;
2142 		buffer[ i ].magn   = magn;
2143 #ifdef FORE200E_BSQ_DEBUG
2144 		buffer[ i ].index  = i;
2145 		buffer[ i ].supplied = 0;
2146 #endif
2147 
2148 		/* allocate the receive buffer body */
2149 		if (fore200e_chunk_alloc(fore200e,
2150 					 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2151 					 DMA_FROM_DEVICE) < 0) {
2152 
2153 		    while (i > 0)
2154 			fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2155 		    kfree(buffer);
2156 
2157 		    return -ENOMEM;
2158 		}
2159 
2160 		/* insert the buffer into the free buffer list */
2161 		buffer[ i ].next = bsq->freebuf;
2162 		bsq->freebuf = &buffer[ i ];
2163 	    }
2164 	    /* all the buffers are free, initially */
2165 	    bsq->freebuf_count = nbr;
2166 
2167 #ifdef FORE200E_BSQ_DEBUG
2168 	    bsq_audit(3, bsq, scheme, magn);
2169 #endif
2170 	}
2171     }
2172 
2173     fore200e->state = FORE200E_STATE_ALLOC_BUF;
2174     return 0;
2175 }
2176 
2177 
2178 static int fore200e_init_bs_queue(struct fore200e *fore200e)
2179 {
2180     int scheme, magn, i;
2181 
2182     struct host_bsq*     bsq;
2183     struct cp_bsq_entry __iomem * cp_entry;
2184 
2185     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2186 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2187 
2188 	    DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2189 
2190 	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
2191 
2192 	    /* allocate and align the array of status words */
2193 	    if (fore200e->bus->dma_chunk_alloc(fore200e,
2194 					       &bsq->status,
2195 					       sizeof(enum status),
2196 					       QUEUE_SIZE_BS,
2197 					       fore200e->bus->status_alignment) < 0) {
2198 		return -ENOMEM;
2199 	    }
2200 
2201 	    /* allocate and align the array of receive buffer descriptors */
2202 	    if (fore200e->bus->dma_chunk_alloc(fore200e,
2203 					       &bsq->rbd_block,
2204 					       sizeof(struct rbd_block),
2205 					       QUEUE_SIZE_BS,
2206 					       fore200e->bus->descr_alignment) < 0) {
2207 
2208 		fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
2209 		return -ENOMEM;
2210 	    }
2211 
2212 	    /* get the base address of the cp resident buffer supply queue entries */
2213 	    cp_entry = fore200e->virt_base +
2214 		       fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2215 
2216 	    /* fill the host resident and cp resident buffer supply queue entries */
2217 	    for (i = 0; i < QUEUE_SIZE_BS; i++) {
2218 
2219 		bsq->host_entry[ i ].status =
2220 		                     FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2221 	        bsq->host_entry[ i ].rbd_block =
2222 		                     FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2223 		bsq->host_entry[ i ].rbd_block_dma =
2224 		                     FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2225 		bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2226 
2227 		*bsq->host_entry[ i ].status = STATUS_FREE;
2228 
2229 		fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2230 				     &cp_entry[ i ].status_haddr);
2231 	    }
2232 	}
2233     }
2234 
2235     fore200e->state = FORE200E_STATE_INIT_BSQ;
2236     return 0;
2237 }
2238 
2239 
2240 static int fore200e_init_rx_queue(struct fore200e *fore200e)
2241 {
2242     struct host_rxq*     rxq =  &fore200e->host_rxq;
2243     struct cp_rxq_entry __iomem * cp_entry;
2244     int i;
2245 
2246     DPRINTK(2, "receive queue is being initialized\n");
2247 
2248     /* allocate and align the array of status words */
2249     if (fore200e->bus->dma_chunk_alloc(fore200e,
2250 				       &rxq->status,
2251 				       sizeof(enum status),
2252 				       QUEUE_SIZE_RX,
2253 				       fore200e->bus->status_alignment) < 0) {
2254 	return -ENOMEM;
2255     }
2256 
2257     /* allocate and align the array of receive PDU descriptors */
2258     if (fore200e->bus->dma_chunk_alloc(fore200e,
2259 				       &rxq->rpd,
2260 				       sizeof(struct rpd),
2261 				       QUEUE_SIZE_RX,
2262 				       fore200e->bus->descr_alignment) < 0) {
2263 
2264 	fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2265 	return -ENOMEM;
2266     }
2267 
2268     /* get the base address of the cp resident rx queue entries */
2269     cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2270 
2271     /* fill the host resident and cp resident rx entries */
2272     for (i=0; i < QUEUE_SIZE_RX; i++) {
2273 
2274 	rxq->host_entry[ i ].status =
2275 	                     FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2276 	rxq->host_entry[ i ].rpd =
2277 	                     FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2278 	rxq->host_entry[ i ].rpd_dma =
2279 	                     FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2280 	rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2281 
2282 	*rxq->host_entry[ i ].status = STATUS_FREE;
2283 
2284 	fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2285 			     &cp_entry[ i ].status_haddr);
2286 
2287 	fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2288 			     &cp_entry[ i ].rpd_haddr);
2289     }
2290 
2291     /* set the head entry of the queue */
2292     rxq->head = 0;
2293 
2294     fore200e->state = FORE200E_STATE_INIT_RXQ;
2295     return 0;
2296 }
2297 
2298 
2299 static int fore200e_init_tx_queue(struct fore200e *fore200e)
2300 {
2301     struct host_txq*     txq =  &fore200e->host_txq;
2302     struct cp_txq_entry __iomem * cp_entry;
2303     int i;
2304 
2305     DPRINTK(2, "transmit queue is being initialized\n");
2306 
2307     /* allocate and align the array of status words */
2308     if (fore200e->bus->dma_chunk_alloc(fore200e,
2309 				       &txq->status,
2310 				       sizeof(enum status),
2311 				       QUEUE_SIZE_TX,
2312 				       fore200e->bus->status_alignment) < 0) {
2313 	return -ENOMEM;
2314     }
2315 
2316     /* allocate and align the array of transmit PDU descriptors */
2317     if (fore200e->bus->dma_chunk_alloc(fore200e,
2318 				       &txq->tpd,
2319 				       sizeof(struct tpd),
2320 				       QUEUE_SIZE_TX,
2321 				       fore200e->bus->descr_alignment) < 0) {
2322 
2323 	fore200e->bus->dma_chunk_free(fore200e, &txq->status);
2324 	return -ENOMEM;
2325     }
2326 
2327     /* get the base address of the cp resident tx queue entries */
2328     cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2329 
2330     /* fill the host resident and cp resident tx entries */
2331     for (i=0; i < QUEUE_SIZE_TX; i++) {
2332 
2333 	txq->host_entry[ i ].status =
2334 	                     FORE200E_INDEX(txq->status.align_addr, enum status, i);
2335 	txq->host_entry[ i ].tpd =
2336 	                     FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2337 	txq->host_entry[ i ].tpd_dma  =
2338                              FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2339 	txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2340 
2341 	*txq->host_entry[ i ].status = STATUS_FREE;
2342 
2343 	fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2344 			     &cp_entry[ i ].status_haddr);
2345 
2346         /* although there is a one-to-one mapping of tx queue entries and tpds,
2347 	   we do not write here the DMA (physical) base address of each tpd into
2348 	   the related cp resident entry, because the cp relies on this write
2349 	   operation to detect that a new pdu has been submitted for tx */
2350     }
2351 
2352     /* set the head and tail entries of the queue */
2353     txq->head = 0;
2354     txq->tail = 0;
2355 
2356     fore200e->state = FORE200E_STATE_INIT_TXQ;
2357     return 0;
2358 }
2359 
2360 
2361 static int fore200e_init_cmd_queue(struct fore200e *fore200e)
2362 {
2363     struct host_cmdq*     cmdq =  &fore200e->host_cmdq;
2364     struct cp_cmdq_entry __iomem * cp_entry;
2365     int i;
2366 
2367     DPRINTK(2, "command queue is being initialized\n");
2368 
2369     /* allocate and align the array of status words */
2370     if (fore200e->bus->dma_chunk_alloc(fore200e,
2371 				       &cmdq->status,
2372 				       sizeof(enum status),
2373 				       QUEUE_SIZE_CMD,
2374 				       fore200e->bus->status_alignment) < 0) {
2375 	return -ENOMEM;
2376     }
2377 
2378     /* get the base address of the cp resident cmd queue entries */
2379     cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2380 
2381     /* fill the host resident and cp resident cmd entries */
2382     for (i=0; i < QUEUE_SIZE_CMD; i++) {
2383 
2384 	cmdq->host_entry[ i ].status   =
2385                               FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2386 	cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2387 
2388 	*cmdq->host_entry[ i ].status = STATUS_FREE;
2389 
2390 	fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2391                              &cp_entry[ i ].status_haddr);
2392     }
2393 
2394     /* set the head entry of the queue */
2395     cmdq->head = 0;
2396 
2397     fore200e->state = FORE200E_STATE_INIT_CMDQ;
2398     return 0;
2399 }
2400 
2401 
2402 static void fore200e_param_bs_queue(struct fore200e *fore200e,
2403 				    enum buffer_scheme scheme,
2404 				    enum buffer_magn magn, int queue_length,
2405 				    int pool_size, int supply_blksize)
2406 {
2407     struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2408 
2409     fore200e->bus->write(queue_length,                           &bs_spec->queue_length);
2410     fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2411     fore200e->bus->write(pool_size,                              &bs_spec->pool_size);
2412     fore200e->bus->write(supply_blksize,                         &bs_spec->supply_blksize);
2413 }
2414 
2415 
2416 static int fore200e_initialize(struct fore200e *fore200e)
2417 {
2418     struct cp_queues __iomem * cpq;
2419     int               ok, scheme, magn;
2420 
2421     DPRINTK(2, "device %s being initialized\n", fore200e->name);
2422 
2423     mutex_init(&fore200e->rate_mtx);
2424     spin_lock_init(&fore200e->q_lock);
2425 
2426     cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2427 
2428     /* enable cp to host interrupts */
2429     fore200e->bus->write(1, &cpq->imask);
2430 
2431     if (fore200e->bus->irq_enable)
2432 	fore200e->bus->irq_enable(fore200e);
2433 
2434     fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2435 
2436     fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2437     fore200e->bus->write(QUEUE_SIZE_RX,  &cpq->init.rx_queue_len);
2438     fore200e->bus->write(QUEUE_SIZE_TX,  &cpq->init.tx_queue_len);
2439 
2440     fore200e->bus->write(RSD_EXTENSION,  &cpq->init.rsd_extension);
2441     fore200e->bus->write(TSD_EXTENSION,  &cpq->init.tsd_extension);
2442 
2443     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2444 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2445 	    fore200e_param_bs_queue(fore200e, scheme, magn,
2446 				    QUEUE_SIZE_BS,
2447 				    fore200e_rx_buf_nbr[ scheme ][ magn ],
2448 				    RBD_BLK_SIZE);
2449 
2450     /* issue the initialize command */
2451     fore200e->bus->write(STATUS_PENDING,    &cpq->init.status);
2452     fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2453 
2454     ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2455     if (ok == 0) {
2456 	printk(FORE200E "device %s initialization failed\n", fore200e->name);
2457 	return -ENODEV;
2458     }
2459 
2460     printk(FORE200E "device %s initialized\n", fore200e->name);
2461 
2462     fore200e->state = FORE200E_STATE_INITIALIZE;
2463     return 0;
2464 }
2465 
2466 
2467 static void fore200e_monitor_putc(struct fore200e *fore200e, char c)
2468 {
2469     struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2470 
2471 #if 0
2472     printk("%c", c);
2473 #endif
2474     fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2475 }
2476 
2477 
2478 static int fore200e_monitor_getc(struct fore200e *fore200e)
2479 {
2480     struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2481     unsigned long      timeout = jiffies + msecs_to_jiffies(50);
2482     int                c;
2483 
2484     while (time_before(jiffies, timeout)) {
2485 
2486 	c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2487 
2488 	if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2489 
2490 	    fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2491 #if 0
2492 	    printk("%c", c & 0xFF);
2493 #endif
2494 	    return c & 0xFF;
2495 	}
2496     }
2497 
2498     return -1;
2499 }
2500 
2501 
2502 static void fore200e_monitor_puts(struct fore200e *fore200e, char *str)
2503 {
2504     while (*str) {
2505 
2506 	/* the i960 monitor doesn't accept any new character if it has something to say */
2507 	while (fore200e_monitor_getc(fore200e) >= 0);
2508 
2509 	fore200e_monitor_putc(fore200e, *str++);
2510     }
2511 
2512     while (fore200e_monitor_getc(fore200e) >= 0);
2513 }
2514 
2515 #ifdef __LITTLE_ENDIAN
2516 #define FW_EXT ".bin"
2517 #else
2518 #define FW_EXT "_ecd.bin2"
2519 #endif
2520 
2521 static int fore200e_load_and_start_fw(struct fore200e *fore200e)
2522 {
2523     const struct firmware *firmware;
2524     const struct fw_header *fw_header;
2525     const __le32 *fw_data;
2526     u32 fw_size;
2527     u32 __iomem *load_addr;
2528     char buf[48];
2529     int err;
2530 
2531     sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
2532     if ((err = request_firmware(&firmware, buf, fore200e->dev)) < 0) {
2533 	printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
2534 	return err;
2535     }
2536 
2537     fw_data = (const __le32 *)firmware->data;
2538     fw_size = firmware->size / sizeof(u32);
2539     fw_header = (const struct fw_header *)firmware->data;
2540     load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2541 
2542     DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2543 	    fore200e->name, load_addr, fw_size);
2544 
2545     if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2546 	printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2547 	goto release;
2548     }
2549 
2550     for (; fw_size--; fw_data++, load_addr++)
2551 	fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2552 
2553     DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2554 
2555 #if defined(__sparc_v9__)
2556     /* reported to be required by SBA cards on some sparc64 hosts */
2557     fore200e_spin(100);
2558 #endif
2559 
2560     sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2561     fore200e_monitor_puts(fore200e, buf);
2562 
2563     if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) {
2564 	printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2565 	goto release;
2566     }
2567 
2568     printk(FORE200E "device %s firmware started\n", fore200e->name);
2569 
2570     fore200e->state = FORE200E_STATE_START_FW;
2571     err = 0;
2572 
2573 release:
2574     release_firmware(firmware);
2575     return err;
2576 }
2577 
2578 
2579 static int fore200e_register(struct fore200e *fore200e, struct device *parent)
2580 {
2581     struct atm_dev* atm_dev;
2582 
2583     DPRINTK(2, "device %s being registered\n", fore200e->name);
2584 
2585     atm_dev = atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops,
2586                                -1, NULL);
2587     if (atm_dev == NULL) {
2588 	printk(FORE200E "unable to register device %s\n", fore200e->name);
2589 	return -ENODEV;
2590     }
2591 
2592     atm_dev->dev_data = fore200e;
2593     fore200e->atm_dev = atm_dev;
2594 
2595     atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2596     atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2597 
2598     fore200e->available_cell_rate = ATM_OC3_PCR;
2599 
2600     fore200e->state = FORE200E_STATE_REGISTER;
2601     return 0;
2602 }
2603 
2604 
2605 static int fore200e_init(struct fore200e *fore200e, struct device *parent)
2606 {
2607     if (fore200e_register(fore200e, parent) < 0)
2608 	return -ENODEV;
2609 
2610     if (fore200e->bus->configure(fore200e) < 0)
2611 	return -ENODEV;
2612 
2613     if (fore200e->bus->map(fore200e) < 0)
2614 	return -ENODEV;
2615 
2616     if (fore200e_reset(fore200e, 1) < 0)
2617 	return -ENODEV;
2618 
2619     if (fore200e_load_and_start_fw(fore200e) < 0)
2620 	return -ENODEV;
2621 
2622     if (fore200e_initialize(fore200e) < 0)
2623 	return -ENODEV;
2624 
2625     if (fore200e_init_cmd_queue(fore200e) < 0)
2626 	return -ENOMEM;
2627 
2628     if (fore200e_init_tx_queue(fore200e) < 0)
2629 	return -ENOMEM;
2630 
2631     if (fore200e_init_rx_queue(fore200e) < 0)
2632 	return -ENOMEM;
2633 
2634     if (fore200e_init_bs_queue(fore200e) < 0)
2635 	return -ENOMEM;
2636 
2637     if (fore200e_alloc_rx_buf(fore200e) < 0)
2638 	return -ENOMEM;
2639 
2640     if (fore200e_get_esi(fore200e) < 0)
2641 	return -EIO;
2642 
2643     if (fore200e_irq_request(fore200e) < 0)
2644 	return -EBUSY;
2645 
2646     fore200e_supply(fore200e);
2647 
2648     /* all done, board initialization is now complete */
2649     fore200e->state = FORE200E_STATE_COMPLETE;
2650     return 0;
2651 }
2652 
2653 #ifdef CONFIG_SBUS
2654 static const struct of_device_id fore200e_sba_match[];
2655 static int fore200e_sba_probe(struct platform_device *op)
2656 {
2657 	const struct of_device_id *match;
2658 	struct fore200e *fore200e;
2659 	static int index = 0;
2660 	int err;
2661 
2662 	match = of_match_device(fore200e_sba_match, &op->dev);
2663 	if (!match)
2664 		return -EINVAL;
2665 
2666 	fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2667 	if (!fore200e)
2668 		return -ENOMEM;
2669 
2670 	fore200e->bus = &fore200e_sbus_ops;
2671 	fore200e->dev = &op->dev;
2672 	fore200e->irq = op->archdata.irqs[0];
2673 	fore200e->phys_base = op->resource[0].start;
2674 
2675 	sprintf(fore200e->name, "SBA-200E-%d", index);
2676 
2677 	err = fore200e_init(fore200e, &op->dev);
2678 	if (err < 0) {
2679 		fore200e_shutdown(fore200e);
2680 		kfree(fore200e);
2681 		return err;
2682 	}
2683 
2684 	index++;
2685 	dev_set_drvdata(&op->dev, fore200e);
2686 
2687 	return 0;
2688 }
2689 
2690 static int fore200e_sba_remove(struct platform_device *op)
2691 {
2692 	struct fore200e *fore200e = dev_get_drvdata(&op->dev);
2693 
2694 	fore200e_shutdown(fore200e);
2695 	kfree(fore200e);
2696 
2697 	return 0;
2698 }
2699 
2700 static const struct of_device_id fore200e_sba_match[] = {
2701 	{
2702 		.name = SBA200E_PROM_NAME,
2703 	},
2704 	{},
2705 };
2706 MODULE_DEVICE_TABLE(of, fore200e_sba_match);
2707 
2708 static struct platform_driver fore200e_sba_driver = {
2709 	.driver = {
2710 		.name = "fore_200e",
2711 		.of_match_table = fore200e_sba_match,
2712 	},
2713 	.probe		= fore200e_sba_probe,
2714 	.remove		= fore200e_sba_remove,
2715 };
2716 #endif
2717 
2718 #ifdef CONFIG_PCI
2719 static int fore200e_pca_detect(struct pci_dev *pci_dev,
2720 			       const struct pci_device_id *pci_ent)
2721 {
2722     struct fore200e* fore200e;
2723     int err = 0;
2724     static int index = 0;
2725 
2726     if (pci_enable_device(pci_dev)) {
2727 	err = -EINVAL;
2728 	goto out;
2729     }
2730 
2731     if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
2732 	err = -EINVAL;
2733 	goto out;
2734     }
2735 
2736     fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2737     if (fore200e == NULL) {
2738 	err = -ENOMEM;
2739 	goto out_disable;
2740     }
2741 
2742     fore200e->bus       = &fore200e_pci_ops;
2743     fore200e->dev	= &pci_dev->dev;
2744     fore200e->irq       = pci_dev->irq;
2745     fore200e->phys_base = pci_resource_start(pci_dev, 0);
2746 
2747     sprintf(fore200e->name, "PCA-200E-%d", index - 1);
2748 
2749     pci_set_master(pci_dev);
2750 
2751     printk(FORE200E "device PCA-200E found at 0x%lx, IRQ %s\n",
2752 	   fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2753 
2754     sprintf(fore200e->name, "PCA-200E-%d", index);
2755 
2756     err = fore200e_init(fore200e, &pci_dev->dev);
2757     if (err < 0) {
2758 	fore200e_shutdown(fore200e);
2759 	goto out_free;
2760     }
2761 
2762     ++index;
2763     pci_set_drvdata(pci_dev, fore200e);
2764 
2765 out:
2766     return err;
2767 
2768 out_free:
2769     kfree(fore200e);
2770 out_disable:
2771     pci_disable_device(pci_dev);
2772     goto out;
2773 }
2774 
2775 
2776 static void fore200e_pca_remove_one(struct pci_dev *pci_dev)
2777 {
2778     struct fore200e *fore200e;
2779 
2780     fore200e = pci_get_drvdata(pci_dev);
2781 
2782     fore200e_shutdown(fore200e);
2783     kfree(fore200e);
2784     pci_disable_device(pci_dev);
2785 }
2786 
2787 
2788 static const struct pci_device_id fore200e_pca_tbl[] = {
2789     { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID },
2790     { 0, }
2791 };
2792 
2793 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2794 
2795 static struct pci_driver fore200e_pca_driver = {
2796     .name =     "fore_200e",
2797     .probe =    fore200e_pca_detect,
2798     .remove =   fore200e_pca_remove_one,
2799     .id_table = fore200e_pca_tbl,
2800 };
2801 #endif
2802 
2803 static int __init fore200e_module_init(void)
2804 {
2805 	int err = 0;
2806 
2807 	printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2808 
2809 #ifdef CONFIG_SBUS
2810 	err = platform_driver_register(&fore200e_sba_driver);
2811 	if (err)
2812 		return err;
2813 #endif
2814 
2815 #ifdef CONFIG_PCI
2816 	err = pci_register_driver(&fore200e_pca_driver);
2817 #endif
2818 
2819 #ifdef CONFIG_SBUS
2820 	if (err)
2821 		platform_driver_unregister(&fore200e_sba_driver);
2822 #endif
2823 
2824 	return err;
2825 }
2826 
2827 static void __exit fore200e_module_cleanup(void)
2828 {
2829 #ifdef CONFIG_PCI
2830 	pci_unregister_driver(&fore200e_pca_driver);
2831 #endif
2832 #ifdef CONFIG_SBUS
2833 	platform_driver_unregister(&fore200e_sba_driver);
2834 #endif
2835 }
2836 
2837 static int
2838 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2839 {
2840     struct fore200e*     fore200e  = FORE200E_DEV(dev);
2841     struct fore200e_vcc* fore200e_vcc;
2842     struct atm_vcc*      vcc;
2843     int                  i, len, left = *pos;
2844     unsigned long        flags;
2845 
2846     if (!left--) {
2847 
2848 	if (fore200e_getstats(fore200e) < 0)
2849 	    return -EIO;
2850 
2851 	len = sprintf(page,"\n"
2852 		       " device:\n"
2853 		       "   internal name:\t\t%s\n", fore200e->name);
2854 
2855 	/* print bus-specific information */
2856 	if (fore200e->bus->proc_read)
2857 	    len += fore200e->bus->proc_read(fore200e, page + len);
2858 
2859 	len += sprintf(page + len,
2860 		"   interrupt line:\t\t%s\n"
2861 		"   physical base address:\t0x%p\n"
2862 		"   virtual base address:\t0x%p\n"
2863 		"   factory address (ESI):\t%pM\n"
2864 		"   board serial number:\t\t%d\n\n",
2865 		fore200e_irq_itoa(fore200e->irq),
2866 		(void*)fore200e->phys_base,
2867 		fore200e->virt_base,
2868 		fore200e->esi,
2869 		fore200e->esi[4] * 256 + fore200e->esi[5]);
2870 
2871 	return len;
2872     }
2873 
2874     if (!left--)
2875 	return sprintf(page,
2876 		       "   free small bufs, scheme 1:\t%d\n"
2877 		       "   free large bufs, scheme 1:\t%d\n"
2878 		       "   free small bufs, scheme 2:\t%d\n"
2879 		       "   free large bufs, scheme 2:\t%d\n",
2880 		       fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2881 		       fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2882 		       fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2883 		       fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2884 
2885     if (!left--) {
2886 	u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2887 
2888 	len = sprintf(page,"\n\n"
2889 		      " cell processor:\n"
2890 		      "   heartbeat state:\t\t");
2891 
2892 	if (hb >> 16 != 0xDEAD)
2893 	    len += sprintf(page + len, "0x%08x\n", hb);
2894 	else
2895 	    len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2896 
2897 	return len;
2898     }
2899 
2900     if (!left--) {
2901 	static const char* media_name[] = {
2902 	    "unshielded twisted pair",
2903 	    "multimode optical fiber ST",
2904 	    "multimode optical fiber SC",
2905 	    "single-mode optical fiber ST",
2906 	    "single-mode optical fiber SC",
2907 	    "unknown"
2908 	};
2909 
2910 	static const char* oc3_mode[] = {
2911 	    "normal operation",
2912 	    "diagnostic loopback",
2913 	    "line loopback",
2914 	    "unknown"
2915 	};
2916 
2917 	u32 fw_release     = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2918 	u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2919 	u32 oc3_revision   = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2920 	u32 media_index    = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2921 	u32 oc3_index;
2922 
2923 	if (media_index > 4)
2924 		media_index = 5;
2925 
2926 	switch (fore200e->loop_mode) {
2927 	    case ATM_LM_NONE:    oc3_index = 0;
2928 		                 break;
2929 	    case ATM_LM_LOC_PHY: oc3_index = 1;
2930 		                 break;
2931 	    case ATM_LM_RMT_PHY: oc3_index = 2;
2932 		                 break;
2933 	    default:             oc3_index = 3;
2934 	}
2935 
2936 	return sprintf(page,
2937 		       "   firmware release:\t\t%d.%d.%d\n"
2938 		       "   monitor release:\t\t%d.%d\n"
2939 		       "   media type:\t\t\t%s\n"
2940 		       "   OC-3 revision:\t\t0x%x\n"
2941                        "   OC-3 mode:\t\t\t%s",
2942 		       fw_release >> 16, fw_release << 16 >> 24,  fw_release << 24 >> 24,
2943 		       mon960_release >> 16, mon960_release << 16 >> 16,
2944 		       media_name[ media_index ],
2945 		       oc3_revision,
2946 		       oc3_mode[ oc3_index ]);
2947     }
2948 
2949     if (!left--) {
2950 	struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2951 
2952 	return sprintf(page,
2953 		       "\n\n"
2954 		       " monitor:\n"
2955 		       "   version number:\t\t%d\n"
2956 		       "   boot status word:\t\t0x%08x\n",
2957 		       fore200e->bus->read(&cp_monitor->mon_version),
2958 		       fore200e->bus->read(&cp_monitor->bstat));
2959     }
2960 
2961     if (!left--)
2962 	return sprintf(page,
2963 		       "\n"
2964 		       " device statistics:\n"
2965 		       "  4b5b:\n"
2966 		       "     crc_header_errors:\t\t%10u\n"
2967 		       "     framing_errors:\t\t%10u\n",
2968 		       be32_to_cpu(fore200e->stats->phy.crc_header_errors),
2969 		       be32_to_cpu(fore200e->stats->phy.framing_errors));
2970 
2971     if (!left--)
2972 	return sprintf(page, "\n"
2973 		       "  OC-3:\n"
2974 		       "     section_bip8_errors:\t%10u\n"
2975 		       "     path_bip8_errors:\t\t%10u\n"
2976 		       "     line_bip24_errors:\t\t%10u\n"
2977 		       "     line_febe_errors:\t\t%10u\n"
2978 		       "     path_febe_errors:\t\t%10u\n"
2979 		       "     corr_hcs_errors:\t\t%10u\n"
2980 		       "     ucorr_hcs_errors:\t\t%10u\n",
2981 		       be32_to_cpu(fore200e->stats->oc3.section_bip8_errors),
2982 		       be32_to_cpu(fore200e->stats->oc3.path_bip8_errors),
2983 		       be32_to_cpu(fore200e->stats->oc3.line_bip24_errors),
2984 		       be32_to_cpu(fore200e->stats->oc3.line_febe_errors),
2985 		       be32_to_cpu(fore200e->stats->oc3.path_febe_errors),
2986 		       be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors),
2987 		       be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors));
2988 
2989     if (!left--)
2990 	return sprintf(page,"\n"
2991 		       "   ATM:\t\t\t\t     cells\n"
2992 		       "     TX:\t\t\t%10u\n"
2993 		       "     RX:\t\t\t%10u\n"
2994 		       "     vpi out of range:\t\t%10u\n"
2995 		       "     vpi no conn:\t\t%10u\n"
2996 		       "     vci out of range:\t\t%10u\n"
2997 		       "     vci no conn:\t\t%10u\n",
2998 		       be32_to_cpu(fore200e->stats->atm.cells_transmitted),
2999 		       be32_to_cpu(fore200e->stats->atm.cells_received),
3000 		       be32_to_cpu(fore200e->stats->atm.vpi_bad_range),
3001 		       be32_to_cpu(fore200e->stats->atm.vpi_no_conn),
3002 		       be32_to_cpu(fore200e->stats->atm.vci_bad_range),
3003 		       be32_to_cpu(fore200e->stats->atm.vci_no_conn));
3004 
3005     if (!left--)
3006 	return sprintf(page,"\n"
3007 		       "   AAL0:\t\t\t     cells\n"
3008 		       "     TX:\t\t\t%10u\n"
3009 		       "     RX:\t\t\t%10u\n"
3010 		       "     dropped:\t\t\t%10u\n",
3011 		       be32_to_cpu(fore200e->stats->aal0.cells_transmitted),
3012 		       be32_to_cpu(fore200e->stats->aal0.cells_received),
3013 		       be32_to_cpu(fore200e->stats->aal0.cells_dropped));
3014 
3015     if (!left--)
3016 	return sprintf(page,"\n"
3017 		       "   AAL3/4:\n"
3018 		       "     SAR sublayer:\t\t     cells\n"
3019 		       "       TX:\t\t\t%10u\n"
3020 		       "       RX:\t\t\t%10u\n"
3021 		       "       dropped:\t\t\t%10u\n"
3022 		       "       CRC errors:\t\t%10u\n"
3023 		       "       protocol errors:\t\t%10u\n\n"
3024 		       "     CS  sublayer:\t\t      PDUs\n"
3025 		       "       TX:\t\t\t%10u\n"
3026 		       "       RX:\t\t\t%10u\n"
3027 		       "       dropped:\t\t\t%10u\n"
3028 		       "       protocol errors:\t\t%10u\n",
3029 		       be32_to_cpu(fore200e->stats->aal34.cells_transmitted),
3030 		       be32_to_cpu(fore200e->stats->aal34.cells_received),
3031 		       be32_to_cpu(fore200e->stats->aal34.cells_dropped),
3032 		       be32_to_cpu(fore200e->stats->aal34.cells_crc_errors),
3033 		       be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors),
3034 		       be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted),
3035 		       be32_to_cpu(fore200e->stats->aal34.cspdus_received),
3036 		       be32_to_cpu(fore200e->stats->aal34.cspdus_dropped),
3037 		       be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors));
3038 
3039     if (!left--)
3040 	return sprintf(page,"\n"
3041 		       "   AAL5:\n"
3042 		       "     SAR sublayer:\t\t     cells\n"
3043 		       "       TX:\t\t\t%10u\n"
3044 		       "       RX:\t\t\t%10u\n"
3045 		       "       dropped:\t\t\t%10u\n"
3046 		       "       congestions:\t\t%10u\n\n"
3047 		       "     CS  sublayer:\t\t      PDUs\n"
3048 		       "       TX:\t\t\t%10u\n"
3049 		       "       RX:\t\t\t%10u\n"
3050 		       "       dropped:\t\t\t%10u\n"
3051 		       "       CRC errors:\t\t%10u\n"
3052 		       "       protocol errors:\t\t%10u\n",
3053 		       be32_to_cpu(fore200e->stats->aal5.cells_transmitted),
3054 		       be32_to_cpu(fore200e->stats->aal5.cells_received),
3055 		       be32_to_cpu(fore200e->stats->aal5.cells_dropped),
3056 		       be32_to_cpu(fore200e->stats->aal5.congestion_experienced),
3057 		       be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted),
3058 		       be32_to_cpu(fore200e->stats->aal5.cspdus_received),
3059 		       be32_to_cpu(fore200e->stats->aal5.cspdus_dropped),
3060 		       be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors),
3061 		       be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors));
3062 
3063     if (!left--)
3064 	return sprintf(page,"\n"
3065 		       "   AUX:\t\t       allocation failures\n"
3066 		       "     small b1:\t\t\t%10u\n"
3067 		       "     large b1:\t\t\t%10u\n"
3068 		       "     small b2:\t\t\t%10u\n"
3069 		       "     large b2:\t\t\t%10u\n"
3070 		       "     RX PDUs:\t\t\t%10u\n"
3071 		       "     TX PDUs:\t\t\t%10lu\n",
3072 		       be32_to_cpu(fore200e->stats->aux.small_b1_failed),
3073 		       be32_to_cpu(fore200e->stats->aux.large_b1_failed),
3074 		       be32_to_cpu(fore200e->stats->aux.small_b2_failed),
3075 		       be32_to_cpu(fore200e->stats->aux.large_b2_failed),
3076 		       be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed),
3077 		       fore200e->tx_sat);
3078 
3079     if (!left--)
3080 	return sprintf(page,"\n"
3081 		       " receive carrier:\t\t\t%s\n",
3082 		       fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
3083 
3084     if (!left--) {
3085         return sprintf(page,"\n"
3086 		       " VCCs:\n  address   VPI VCI   AAL "
3087 		       "TX PDUs   TX min/max size  RX PDUs   RX min/max size\n");
3088     }
3089 
3090     for (i = 0; i < NBR_CONNECT; i++) {
3091 
3092 	vcc = fore200e->vc_map[i].vcc;
3093 
3094 	if (vcc == NULL)
3095 	    continue;
3096 
3097 	spin_lock_irqsave(&fore200e->q_lock, flags);
3098 
3099 	if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
3100 
3101 	    fore200e_vcc = FORE200E_VCC(vcc);
3102 	    ASSERT(fore200e_vcc);
3103 
3104 	    len = sprintf(page,
3105 			  "  %pK  %03d %05d %1d   %09lu %05d/%05d      %09lu %05d/%05d\n",
3106 			  vcc,
3107 			  vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3108 			  fore200e_vcc->tx_pdu,
3109 			  fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3110 			  fore200e_vcc->tx_max_pdu,
3111 			  fore200e_vcc->rx_pdu,
3112 			  fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3113 			  fore200e_vcc->rx_max_pdu);
3114 
3115 	    spin_unlock_irqrestore(&fore200e->q_lock, flags);
3116 	    return len;
3117 	}
3118 
3119 	spin_unlock_irqrestore(&fore200e->q_lock, flags);
3120     }
3121 
3122     return 0;
3123 }
3124 
3125 module_init(fore200e_module_init);
3126 module_exit(fore200e_module_cleanup);
3127 
3128 
3129 static const struct atmdev_ops fore200e_ops = {
3130 	.open       = fore200e_open,
3131 	.close      = fore200e_close,
3132 	.ioctl      = fore200e_ioctl,
3133 	.getsockopt = fore200e_getsockopt,
3134 	.setsockopt = fore200e_setsockopt,
3135 	.send       = fore200e_send,
3136 	.change_qos = fore200e_change_qos,
3137 	.proc_read  = fore200e_proc_read,
3138 	.owner      = THIS_MODULE
3139 };
3140 
3141 MODULE_LICENSE("GPL");
3142 #ifdef CONFIG_PCI
3143 #ifdef __LITTLE_ENDIAN__
3144 MODULE_FIRMWARE("pca200e.bin");
3145 #else
3146 MODULE_FIRMWARE("pca200e_ecd.bin2");
3147 #endif
3148 #endif /* CONFIG_PCI */
3149 #ifdef CONFIG_SBUS
3150 MODULE_FIRMWARE("sba200e_ecd.bin2");
3151 #endif
3152