xref: /openbmc/linux/drivers/atm/fore200e.c (revision 47ce0b65)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3   A FORE Systems 200E-series driver for ATM on Linux.
4   Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
5 
6   Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
7 
8   This driver simultaneously supports PCA-200E and SBA-200E adapters
9   on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
10 
11 */
12 
13 
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/capability.h>
18 #include <linux/interrupt.h>
19 #include <linux/bitops.h>
20 #include <linux/pci.h>
21 #include <linux/module.h>
22 #include <linux/atmdev.h>
23 #include <linux/sonet.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/delay.h>
26 #include <linux/firmware.h>
27 #include <linux/pgtable.h>
28 #include <asm/io.h>
29 #include <asm/string.h>
30 #include <asm/page.h>
31 #include <asm/irq.h>
32 #include <asm/dma.h>
33 #include <asm/byteorder.h>
34 #include <linux/uaccess.h>
35 #include <linux/atomic.h>
36 
37 #ifdef CONFIG_SBUS
38 #include <linux/of.h>
39 #include <linux/of_device.h>
40 #include <asm/idprom.h>
41 #include <asm/openprom.h>
42 #include <asm/oplib.h>
43 #endif
44 
45 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
46 #define FORE200E_USE_TASKLET
47 #endif
48 
49 #if 0 /* enable the debugging code of the buffer supply queues */
50 #define FORE200E_BSQ_DEBUG
51 #endif
52 
53 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
54 #define FORE200E_52BYTE_AAL0_SDU
55 #endif
56 
57 #include "fore200e.h"
58 #include "suni.h"
59 
60 #define FORE200E_VERSION "0.3e"
61 
62 #define FORE200E         "fore200e: "
63 
64 #if 0 /* override .config */
65 #define CONFIG_ATM_FORE200E_DEBUG 1
66 #endif
67 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
68 #define DPRINTK(level, format, args...)  do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
69                                                   printk(FORE200E format, ##args); } while (0)
70 #else
71 #define DPRINTK(level, format, args...)  do {} while (0)
72 #endif
73 
74 
75 #define FORE200E_ALIGN(addr, alignment) \
76         ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
77 
78 #define FORE200E_DMA_INDEX(dma_addr, type, index)  ((dma_addr) + (index) * sizeof(type))
79 
80 #define FORE200E_INDEX(virt_addr, type, index)     (&((type *)(virt_addr))[ index ])
81 
82 #define FORE200E_NEXT_ENTRY(index, modulo)         (index = ((index) + 1) % (modulo))
83 
84 #if 1
85 #define ASSERT(expr)     if (!(expr)) { \
86 			     printk(FORE200E "assertion failed! %s[%d]: %s\n", \
87 				    __func__, __LINE__, #expr); \
88 			     panic(FORE200E "%s", __func__); \
89 			 }
90 #else
91 #define ASSERT(expr)     do {} while (0)
92 #endif
93 
94 
95 static const struct atmdev_ops   fore200e_ops;
96 
97 static LIST_HEAD(fore200e_boards);
98 
99 
100 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
101 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
102 
103 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
104     { BUFFER_S1_NBR, BUFFER_L1_NBR },
105     { BUFFER_S2_NBR, BUFFER_L2_NBR }
106 };
107 
108 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
109     { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
110     { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
111 };
112 
113 
114 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
115 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
116 #endif
117 
118 
119 #if 0 /* currently unused */
120 static int
121 fore200e_fore2atm_aal(enum fore200e_aal aal)
122 {
123     switch(aal) {
124     case FORE200E_AAL0:  return ATM_AAL0;
125     case FORE200E_AAL34: return ATM_AAL34;
126     case FORE200E_AAL5:  return ATM_AAL5;
127     }
128 
129     return -EINVAL;
130 }
131 #endif
132 
133 
134 static enum fore200e_aal
fore200e_atm2fore_aal(int aal)135 fore200e_atm2fore_aal(int aal)
136 {
137     switch(aal) {
138     case ATM_AAL0:  return FORE200E_AAL0;
139     case ATM_AAL34: return FORE200E_AAL34;
140     case ATM_AAL1:
141     case ATM_AAL2:
142     case ATM_AAL5:  return FORE200E_AAL5;
143     }
144 
145     return -EINVAL;
146 }
147 
148 
149 static char*
fore200e_irq_itoa(int irq)150 fore200e_irq_itoa(int irq)
151 {
152     static char str[8];
153     sprintf(str, "%d", irq);
154     return str;
155 }
156 
157 
158 /* allocate and align a chunk of memory intended to hold the data behing exchanged
159    between the driver and the adapter (using streaming DVMA) */
160 
161 static int
fore200e_chunk_alloc(struct fore200e * fore200e,struct chunk * chunk,int size,int alignment,int direction)162 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
163 {
164     unsigned long offset = 0;
165 
166     if (alignment <= sizeof(int))
167 	alignment = 0;
168 
169     chunk->alloc_size = size + alignment;
170     chunk->direction  = direction;
171 
172     chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL);
173     if (chunk->alloc_addr == NULL)
174 	return -ENOMEM;
175 
176     if (alignment > 0)
177 	offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
178 
179     chunk->align_addr = chunk->alloc_addr + offset;
180 
181     chunk->dma_addr = dma_map_single(fore200e->dev, chunk->align_addr,
182 				     size, direction);
183     if (dma_mapping_error(fore200e->dev, chunk->dma_addr)) {
184 	kfree(chunk->alloc_addr);
185 	return -ENOMEM;
186     }
187     return 0;
188 }
189 
190 
191 /* free a chunk of memory */
192 
193 static void
fore200e_chunk_free(struct fore200e * fore200e,struct chunk * chunk)194 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
195 {
196     dma_unmap_single(fore200e->dev, chunk->dma_addr, chunk->dma_size,
197 		     chunk->direction);
198     kfree(chunk->alloc_addr);
199 }
200 
201 /*
202  * Allocate a DMA consistent chunk of memory intended to act as a communication
203  * mechanism (to hold descriptors, status, queues, etc.) shared by the driver
204  * and the adapter.
205  */
206 static int
fore200e_dma_chunk_alloc(struct fore200e * fore200e,struct chunk * chunk,int size,int nbr,int alignment)207 fore200e_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk,
208 		int size, int nbr, int alignment)
209 {
210 	/* returned chunks are page-aligned */
211 	chunk->alloc_size = size * nbr;
212 	chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size,
213 					       &chunk->dma_addr, GFP_KERNEL);
214 	if (!chunk->alloc_addr)
215 		return -ENOMEM;
216 	chunk->align_addr = chunk->alloc_addr;
217 	return 0;
218 }
219 
220 /*
221  * Free a DMA consistent chunk of memory.
222  */
223 static void
fore200e_dma_chunk_free(struct fore200e * fore200e,struct chunk * chunk)224 fore200e_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
225 {
226 	dma_free_coherent(fore200e->dev, chunk->alloc_size, chunk->alloc_addr,
227 			  chunk->dma_addr);
228 }
229 
230 static void
fore200e_spin(int msecs)231 fore200e_spin(int msecs)
232 {
233     unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
234     while (time_before(jiffies, timeout));
235 }
236 
237 
238 static int
fore200e_poll(struct fore200e * fore200e,volatile u32 * addr,u32 val,int msecs)239 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
240 {
241     unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
242     int           ok;
243 
244     mb();
245     do {
246 	if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
247 	    break;
248 
249     } while (time_before(jiffies, timeout));
250 
251 #if 1
252     if (!ok) {
253 	printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
254 	       *addr, val);
255     }
256 #endif
257 
258     return ok;
259 }
260 
261 
262 static int
fore200e_io_poll(struct fore200e * fore200e,volatile u32 __iomem * addr,u32 val,int msecs)263 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
264 {
265     unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
266     int           ok;
267 
268     do {
269 	if ((ok = (fore200e->bus->read(addr) == val)))
270 	    break;
271 
272     } while (time_before(jiffies, timeout));
273 
274 #if 1
275     if (!ok) {
276 	printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
277 	       fore200e->bus->read(addr), val);
278     }
279 #endif
280 
281     return ok;
282 }
283 
284 
285 static void
fore200e_free_rx_buf(struct fore200e * fore200e)286 fore200e_free_rx_buf(struct fore200e* fore200e)
287 {
288     int scheme, magn, nbr;
289     struct buffer* buffer;
290 
291     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
292 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
293 
294 	    if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
295 
296 		for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
297 
298 		    struct chunk* data = &buffer[ nbr ].data;
299 
300 		    if (data->alloc_addr != NULL)
301 			fore200e_chunk_free(fore200e, data);
302 		}
303 	    }
304 	}
305     }
306 }
307 
308 
309 static void
fore200e_uninit_bs_queue(struct fore200e * fore200e)310 fore200e_uninit_bs_queue(struct fore200e* fore200e)
311 {
312     int scheme, magn;
313 
314     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
315 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
316 
317 	    struct chunk* status    = &fore200e->host_bsq[ scheme ][ magn ].status;
318 	    struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
319 
320 	    if (status->alloc_addr)
321 		fore200e_dma_chunk_free(fore200e, status);
322 
323 	    if (rbd_block->alloc_addr)
324 		fore200e_dma_chunk_free(fore200e, rbd_block);
325 	}
326     }
327 }
328 
329 
330 static int
fore200e_reset(struct fore200e * fore200e,int diag)331 fore200e_reset(struct fore200e* fore200e, int diag)
332 {
333     int ok;
334 
335     fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
336 
337     fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
338 
339     fore200e->bus->reset(fore200e);
340 
341     if (diag) {
342 	ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
343 	if (ok == 0) {
344 
345 	    printk(FORE200E "device %s self-test failed\n", fore200e->name);
346 	    return -ENODEV;
347 	}
348 
349 	printk(FORE200E "device %s self-test passed\n", fore200e->name);
350 
351 	fore200e->state = FORE200E_STATE_RESET;
352     }
353 
354     return 0;
355 }
356 
357 
358 static void
fore200e_shutdown(struct fore200e * fore200e)359 fore200e_shutdown(struct fore200e* fore200e)
360 {
361     printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
362 	   fore200e->name, fore200e->phys_base,
363 	   fore200e_irq_itoa(fore200e->irq));
364 
365     if (fore200e->state > FORE200E_STATE_RESET) {
366 	/* first, reset the board to prevent further interrupts or data transfers */
367 	fore200e_reset(fore200e, 0);
368     }
369 
370     /* then, release all allocated resources */
371     switch(fore200e->state) {
372 
373     case FORE200E_STATE_COMPLETE:
374 	kfree(fore200e->stats);
375 
376 	fallthrough;
377     case FORE200E_STATE_IRQ:
378 	free_irq(fore200e->irq, fore200e->atm_dev);
379 
380 	fallthrough;
381     case FORE200E_STATE_ALLOC_BUF:
382 	fore200e_free_rx_buf(fore200e);
383 
384 	fallthrough;
385     case FORE200E_STATE_INIT_BSQ:
386 	fore200e_uninit_bs_queue(fore200e);
387 
388 	fallthrough;
389     case FORE200E_STATE_INIT_RXQ:
390 	fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status);
391 	fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
392 
393 	fallthrough;
394     case FORE200E_STATE_INIT_TXQ:
395 	fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status);
396 	fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
397 
398 	fallthrough;
399     case FORE200E_STATE_INIT_CMDQ:
400 	fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
401 
402 	fallthrough;
403     case FORE200E_STATE_INITIALIZE:
404 	/* nothing to do for that state */
405 
406     case FORE200E_STATE_START_FW:
407 	/* nothing to do for that state */
408 
409     case FORE200E_STATE_RESET:
410 	/* nothing to do for that state */
411 
412     case FORE200E_STATE_MAP:
413 	fore200e->bus->unmap(fore200e);
414 
415 	fallthrough;
416     case FORE200E_STATE_CONFIGURE:
417 	/* nothing to do for that state */
418 
419     case FORE200E_STATE_REGISTER:
420 	/* XXX shouldn't we *start* by deregistering the device? */
421 	atm_dev_deregister(fore200e->atm_dev);
422 
423 	fallthrough;
424     case FORE200E_STATE_BLANK:
425 	/* nothing to do for that state */
426 	break;
427     }
428 }
429 
430 
431 #ifdef CONFIG_PCI
432 
fore200e_pca_read(volatile u32 __iomem * addr)433 static u32 fore200e_pca_read(volatile u32 __iomem *addr)
434 {
435     /* on big-endian hosts, the board is configured to convert
436        the endianess of slave RAM accesses  */
437     return le32_to_cpu(readl(addr));
438 }
439 
440 
fore200e_pca_write(u32 val,volatile u32 __iomem * addr)441 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
442 {
443     /* on big-endian hosts, the board is configured to convert
444        the endianess of slave RAM accesses  */
445     writel(cpu_to_le32(val), addr);
446 }
447 
448 static int
fore200e_pca_irq_check(struct fore200e * fore200e)449 fore200e_pca_irq_check(struct fore200e* fore200e)
450 {
451     /* this is a 1 bit register */
452     int irq_posted = readl(fore200e->regs.pca.psr);
453 
454 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
455     if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
456 	DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
457     }
458 #endif
459 
460     return irq_posted;
461 }
462 
463 
464 static void
fore200e_pca_irq_ack(struct fore200e * fore200e)465 fore200e_pca_irq_ack(struct fore200e* fore200e)
466 {
467     writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
468 }
469 
470 
471 static void
fore200e_pca_reset(struct fore200e * fore200e)472 fore200e_pca_reset(struct fore200e* fore200e)
473 {
474     writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
475     fore200e_spin(10);
476     writel(0, fore200e->regs.pca.hcr);
477 }
478 
479 
fore200e_pca_map(struct fore200e * fore200e)480 static int fore200e_pca_map(struct fore200e* fore200e)
481 {
482     DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
483 
484     fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
485 
486     if (fore200e->virt_base == NULL) {
487 	printk(FORE200E "can't map device %s\n", fore200e->name);
488 	return -EFAULT;
489     }
490 
491     DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
492 
493     /* gain access to the PCA specific registers  */
494     fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
495     fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
496     fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
497 
498     fore200e->state = FORE200E_STATE_MAP;
499     return 0;
500 }
501 
502 
503 static void
fore200e_pca_unmap(struct fore200e * fore200e)504 fore200e_pca_unmap(struct fore200e* fore200e)
505 {
506     DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
507 
508     if (fore200e->virt_base != NULL)
509 	iounmap(fore200e->virt_base);
510 }
511 
512 
fore200e_pca_configure(struct fore200e * fore200e)513 static int fore200e_pca_configure(struct fore200e *fore200e)
514 {
515     struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
516     u8              master_ctrl, latency;
517 
518     DPRINTK(2, "device %s being configured\n", fore200e->name);
519 
520     if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
521 	printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
522 	return -EIO;
523     }
524 
525     pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
526 
527     master_ctrl = master_ctrl
528 #if defined(__BIG_ENDIAN)
529 	/* request the PCA board to convert the endianess of slave RAM accesses */
530 	| PCA200E_CTRL_CONVERT_ENDIAN
531 #endif
532 #if 0
533         | PCA200E_CTRL_DIS_CACHE_RD
534         | PCA200E_CTRL_DIS_WRT_INVAL
535         | PCA200E_CTRL_ENA_CONT_REQ_MODE
536         | PCA200E_CTRL_2_CACHE_WRT_INVAL
537 #endif
538 	| PCA200E_CTRL_LARGE_PCI_BURSTS;
539 
540     pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
541 
542     /* raise latency from 32 (default) to 192, as this seems to prevent NIC
543        lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
544        this may impact the performances of other PCI devices on the same bus, though */
545     latency = 192;
546     pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
547 
548     fore200e->state = FORE200E_STATE_CONFIGURE;
549     return 0;
550 }
551 
552 
553 static int __init
fore200e_pca_prom_read(struct fore200e * fore200e,struct prom_data * prom)554 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
555 {
556     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
557     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
558     struct prom_opcode      opcode;
559     int                     ok;
560     u32                     prom_dma;
561 
562     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
563 
564     opcode.opcode = OPCODE_GET_PROM;
565     opcode.pad    = 0;
566 
567     prom_dma = dma_map_single(fore200e->dev, prom, sizeof(struct prom_data),
568 			      DMA_FROM_DEVICE);
569     if (dma_mapping_error(fore200e->dev, prom_dma))
570 	return -ENOMEM;
571 
572     fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
573 
574     *entry->status = STATUS_PENDING;
575 
576     fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
577 
578     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
579 
580     *entry->status = STATUS_FREE;
581 
582     dma_unmap_single(fore200e->dev, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
583 
584     if (ok == 0) {
585 	printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
586 	return -EIO;
587     }
588 
589 #if defined(__BIG_ENDIAN)
590 
591 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
592 
593     /* MAC address is stored as little-endian */
594     swap_here(&prom->mac_addr[0]);
595     swap_here(&prom->mac_addr[4]);
596 #endif
597 
598     return 0;
599 }
600 
601 
602 static int
fore200e_pca_proc_read(struct fore200e * fore200e,char * page)603 fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
604 {
605     struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
606 
607     return sprintf(page, "   PCI bus/slot/function:\t%d/%d/%d\n",
608 		   pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
609 }
610 
611 static const struct fore200e_bus fore200e_pci_ops = {
612 	.model_name		= "PCA-200E",
613 	.proc_name		= "pca200e",
614 	.descr_alignment	= 32,
615 	.buffer_alignment	= 4,
616 	.status_alignment	= 32,
617 	.read			= fore200e_pca_read,
618 	.write			= fore200e_pca_write,
619 	.configure		= fore200e_pca_configure,
620 	.map			= fore200e_pca_map,
621 	.reset			= fore200e_pca_reset,
622 	.prom_read		= fore200e_pca_prom_read,
623 	.unmap			= fore200e_pca_unmap,
624 	.irq_check		= fore200e_pca_irq_check,
625 	.irq_ack		= fore200e_pca_irq_ack,
626 	.proc_read		= fore200e_pca_proc_read,
627 };
628 #endif /* CONFIG_PCI */
629 
630 #ifdef CONFIG_SBUS
631 
fore200e_sba_read(volatile u32 __iomem * addr)632 static u32 fore200e_sba_read(volatile u32 __iomem *addr)
633 {
634     return sbus_readl(addr);
635 }
636 
fore200e_sba_write(u32 val,volatile u32 __iomem * addr)637 static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
638 {
639     sbus_writel(val, addr);
640 }
641 
fore200e_sba_irq_enable(struct fore200e * fore200e)642 static void fore200e_sba_irq_enable(struct fore200e *fore200e)
643 {
644 	u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
645 	fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
646 }
647 
fore200e_sba_irq_check(struct fore200e * fore200e)648 static int fore200e_sba_irq_check(struct fore200e *fore200e)
649 {
650 	return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
651 }
652 
fore200e_sba_irq_ack(struct fore200e * fore200e)653 static void fore200e_sba_irq_ack(struct fore200e *fore200e)
654 {
655 	u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
656 	fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
657 }
658 
fore200e_sba_reset(struct fore200e * fore200e)659 static void fore200e_sba_reset(struct fore200e *fore200e)
660 {
661 	fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
662 	fore200e_spin(10);
663 	fore200e->bus->write(0, fore200e->regs.sba.hcr);
664 }
665 
fore200e_sba_map(struct fore200e * fore200e)666 static int __init fore200e_sba_map(struct fore200e *fore200e)
667 {
668 	struct platform_device *op = to_platform_device(fore200e->dev);
669 	unsigned int bursts;
670 
671 	/* gain access to the SBA specific registers  */
672 	fore200e->regs.sba.hcr = of_ioremap(&op->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
673 	fore200e->regs.sba.bsr = of_ioremap(&op->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
674 	fore200e->regs.sba.isr = of_ioremap(&op->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
675 	fore200e->virt_base    = of_ioremap(&op->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
676 
677 	if (!fore200e->virt_base) {
678 		printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
679 		return -EFAULT;
680 	}
681 
682 	DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
683 
684 	fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
685 
686 	/* get the supported DVMA burst sizes */
687 	bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00);
688 
689 	if (sbus_can_dma_64bit())
690 		sbus_set_sbus64(&op->dev, bursts);
691 
692 	fore200e->state = FORE200E_STATE_MAP;
693 	return 0;
694 }
695 
fore200e_sba_unmap(struct fore200e * fore200e)696 static void fore200e_sba_unmap(struct fore200e *fore200e)
697 {
698 	struct platform_device *op = to_platform_device(fore200e->dev);
699 
700 	of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
701 	of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
702 	of_iounmap(&op->resource[2], fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
703 	of_iounmap(&op->resource[3], fore200e->virt_base,    SBA200E_RAM_LENGTH);
704 }
705 
fore200e_sba_configure(struct fore200e * fore200e)706 static int __init fore200e_sba_configure(struct fore200e *fore200e)
707 {
708 	fore200e->state = FORE200E_STATE_CONFIGURE;
709 	return 0;
710 }
711 
fore200e_sba_prom_read(struct fore200e * fore200e,struct prom_data * prom)712 static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom)
713 {
714 	struct platform_device *op = to_platform_device(fore200e->dev);
715 	const u8 *prop;
716 	int len;
717 
718 	prop = of_get_property(op->dev.of_node, "madaddrlo2", &len);
719 	if (!prop)
720 		return -ENODEV;
721 	memcpy(&prom->mac_addr[4], prop, 4);
722 
723 	prop = of_get_property(op->dev.of_node, "madaddrhi4", &len);
724 	if (!prop)
725 		return -ENODEV;
726 	memcpy(&prom->mac_addr[2], prop, 4);
727 
728 	prom->serial_number = of_getintprop_default(op->dev.of_node,
729 						    "serialnumber", 0);
730 	prom->hw_revision = of_getintprop_default(op->dev.of_node,
731 						  "promversion", 0);
732 
733 	return 0;
734 }
735 
fore200e_sba_proc_read(struct fore200e * fore200e,char * page)736 static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
737 {
738 	struct platform_device *op = to_platform_device(fore200e->dev);
739 	const struct linux_prom_registers *regs;
740 
741 	regs = of_get_property(op->dev.of_node, "reg", NULL);
742 
743 	return sprintf(page, "   SBUS slot/device:\t\t%d/'%pOFn'\n",
744 		       (regs ? regs->which_io : 0), op->dev.of_node);
745 }
746 
747 static const struct fore200e_bus fore200e_sbus_ops = {
748 	.model_name		= "SBA-200E",
749 	.proc_name		= "sba200e",
750 	.descr_alignment	= 32,
751 	.buffer_alignment	= 64,
752 	.status_alignment	= 32,
753 	.read			= fore200e_sba_read,
754 	.write			= fore200e_sba_write,
755 	.configure		= fore200e_sba_configure,
756 	.map			= fore200e_sba_map,
757 	.reset			= fore200e_sba_reset,
758 	.prom_read		= fore200e_sba_prom_read,
759 	.unmap			= fore200e_sba_unmap,
760 	.irq_enable		= fore200e_sba_irq_enable,
761 	.irq_check		= fore200e_sba_irq_check,
762 	.irq_ack		= fore200e_sba_irq_ack,
763 	.proc_read		= fore200e_sba_proc_read,
764 };
765 #endif /* CONFIG_SBUS */
766 
767 static void
fore200e_tx_irq(struct fore200e * fore200e)768 fore200e_tx_irq(struct fore200e* fore200e)
769 {
770     struct host_txq*        txq = &fore200e->host_txq;
771     struct host_txq_entry*  entry;
772     struct atm_vcc*         vcc;
773     struct fore200e_vc_map* vc_map;
774 
775     if (fore200e->host_txq.txing == 0)
776 	return;
777 
778     for (;;) {
779 
780 	entry = &txq->host_entry[ txq->tail ];
781 
782         if ((*entry->status & STATUS_COMPLETE) == 0) {
783 	    break;
784 	}
785 
786 	DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
787 		entry, txq->tail, entry->vc_map, entry->skb);
788 
789 	/* free copy of misaligned data */
790 	kfree(entry->data);
791 
792 	/* remove DMA mapping */
793 	dma_unmap_single(fore200e->dev, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
794 				 DMA_TO_DEVICE);
795 
796 	vc_map = entry->vc_map;
797 
798 	/* vcc closed since the time the entry was submitted for tx? */
799 	if ((vc_map->vcc == NULL) ||
800 	    (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
801 
802 	    DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
803 		    fore200e->atm_dev->number);
804 
805 	    dev_kfree_skb_any(entry->skb);
806 	}
807 	else {
808 	    ASSERT(vc_map->vcc);
809 
810 	    /* vcc closed then immediately re-opened? */
811 	    if (vc_map->incarn != entry->incarn) {
812 
813 		/* when a vcc is closed, some PDUs may be still pending in the tx queue.
814 		   if the same vcc is immediately re-opened, those pending PDUs must
815 		   not be popped after the completion of their emission, as they refer
816 		   to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
817 		   would be decremented by the size of the (unrelated) skb, possibly
818 		   leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
819 		   we thus bind the tx entry to the current incarnation of the vcc
820 		   when the entry is submitted for tx. When the tx later completes,
821 		   if the incarnation number of the tx entry does not match the one
822 		   of the vcc, then this implies that the vcc has been closed then re-opened.
823 		   we thus just drop the skb here. */
824 
825 		DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
826 			fore200e->atm_dev->number);
827 
828 		dev_kfree_skb_any(entry->skb);
829 	    }
830 	    else {
831 		vcc = vc_map->vcc;
832 		ASSERT(vcc);
833 
834 		/* notify tx completion */
835 		if (vcc->pop) {
836 		    vcc->pop(vcc, entry->skb);
837 		}
838 		else {
839 		    dev_kfree_skb_any(entry->skb);
840 		}
841 
842 		/* check error condition */
843 		if (*entry->status & STATUS_ERROR)
844 		    atomic_inc(&vcc->stats->tx_err);
845 		else
846 		    atomic_inc(&vcc->stats->tx);
847 	    }
848 	}
849 
850 	*entry->status = STATUS_FREE;
851 
852 	fore200e->host_txq.txing--;
853 
854 	FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
855     }
856 }
857 
858 
859 #ifdef FORE200E_BSQ_DEBUG
bsq_audit(int where,struct host_bsq * bsq,int scheme,int magn)860 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
861 {
862     struct buffer* buffer;
863     int count = 0;
864 
865     buffer = bsq->freebuf;
866     while (buffer) {
867 
868 	if (buffer->supplied) {
869 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
870 		   where, scheme, magn, buffer->index);
871 	}
872 
873 	if (buffer->magn != magn) {
874 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
875 		   where, scheme, magn, buffer->index, buffer->magn);
876 	}
877 
878 	if (buffer->scheme != scheme) {
879 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
880 		   where, scheme, magn, buffer->index, buffer->scheme);
881 	}
882 
883 	if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
884 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
885 		   where, scheme, magn, buffer->index);
886 	}
887 
888 	count++;
889 	buffer = buffer->next;
890     }
891 
892     if (count != bsq->freebuf_count) {
893 	printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
894 	       where, scheme, magn, count, bsq->freebuf_count);
895     }
896     return 0;
897 }
898 #endif
899 
900 
901 static void
fore200e_supply(struct fore200e * fore200e)902 fore200e_supply(struct fore200e* fore200e)
903 {
904     int  scheme, magn, i;
905 
906     struct host_bsq*       bsq;
907     struct host_bsq_entry* entry;
908     struct buffer*         buffer;
909 
910     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
911 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
912 
913 	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
914 
915 #ifdef FORE200E_BSQ_DEBUG
916 	    bsq_audit(1, bsq, scheme, magn);
917 #endif
918 	    while (bsq->freebuf_count >= RBD_BLK_SIZE) {
919 
920 		DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
921 			RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
922 
923 		entry = &bsq->host_entry[ bsq->head ];
924 
925 		for (i = 0; i < RBD_BLK_SIZE; i++) {
926 
927 		    /* take the first buffer in the free buffer list */
928 		    buffer = bsq->freebuf;
929 		    if (!buffer) {
930 			printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
931 			       scheme, magn, bsq->freebuf_count);
932 			return;
933 		    }
934 		    bsq->freebuf = buffer->next;
935 
936 #ifdef FORE200E_BSQ_DEBUG
937 		    if (buffer->supplied)
938 			printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
939 			       scheme, magn, buffer->index);
940 		    buffer->supplied = 1;
941 #endif
942 		    entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
943 		    entry->rbd_block->rbd[ i ].handle       = FORE200E_BUF2HDL(buffer);
944 		}
945 
946 		FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
947 
948  		/* decrease accordingly the number of free rx buffers */
949 		bsq->freebuf_count -= RBD_BLK_SIZE;
950 
951 		*entry->status = STATUS_PENDING;
952 		fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
953 	    }
954 	}
955     }
956 }
957 
958 
959 static int
fore200e_push_rpd(struct fore200e * fore200e,struct atm_vcc * vcc,struct rpd * rpd)960 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
961 {
962     struct sk_buff*      skb;
963     struct buffer*       buffer;
964     struct fore200e_vcc* fore200e_vcc;
965     int                  i, pdu_len = 0;
966 #ifdef FORE200E_52BYTE_AAL0_SDU
967     u32                  cell_header = 0;
968 #endif
969 
970     ASSERT(vcc);
971 
972     fore200e_vcc = FORE200E_VCC(vcc);
973     ASSERT(fore200e_vcc);
974 
975 #ifdef FORE200E_52BYTE_AAL0_SDU
976     if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
977 
978 	cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
979 	              (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
980                       (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
981                       (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
982                        rpd->atm_header.clp;
983 	pdu_len = 4;
984     }
985 #endif
986 
987     /* compute total PDU length */
988     for (i = 0; i < rpd->nseg; i++)
989 	pdu_len += rpd->rsd[ i ].length;
990 
991     skb = alloc_skb(pdu_len, GFP_ATOMIC);
992     if (skb == NULL) {
993 	DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
994 
995 	atomic_inc(&vcc->stats->rx_drop);
996 	return -ENOMEM;
997     }
998 
999     __net_timestamp(skb);
1000 
1001 #ifdef FORE200E_52BYTE_AAL0_SDU
1002     if (cell_header) {
1003 	*((u32*)skb_put(skb, 4)) = cell_header;
1004     }
1005 #endif
1006 
1007     /* reassemble segments */
1008     for (i = 0; i < rpd->nseg; i++) {
1009 
1010 	/* rebuild rx buffer address from rsd handle */
1011 	buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1012 
1013 	/* Make device DMA transfer visible to CPU.  */
1014 	dma_sync_single_for_cpu(fore200e->dev, buffer->data.dma_addr,
1015 				rpd->rsd[i].length, DMA_FROM_DEVICE);
1016 
1017 	skb_put_data(skb, buffer->data.align_addr, rpd->rsd[i].length);
1018 
1019 	/* Now let the device get at it again.  */
1020 	dma_sync_single_for_device(fore200e->dev, buffer->data.dma_addr,
1021 				   rpd->rsd[i].length, DMA_FROM_DEVICE);
1022     }
1023 
1024     DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1025 
1026     if (pdu_len < fore200e_vcc->rx_min_pdu)
1027 	fore200e_vcc->rx_min_pdu = pdu_len;
1028     if (pdu_len > fore200e_vcc->rx_max_pdu)
1029 	fore200e_vcc->rx_max_pdu = pdu_len;
1030     fore200e_vcc->rx_pdu++;
1031 
1032     /* push PDU */
1033     if (atm_charge(vcc, skb->truesize) == 0) {
1034 
1035 	DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1036 		vcc->itf, vcc->vpi, vcc->vci);
1037 
1038 	dev_kfree_skb_any(skb);
1039 
1040 	atomic_inc(&vcc->stats->rx_drop);
1041 	return -ENOMEM;
1042     }
1043 
1044     vcc->push(vcc, skb);
1045     atomic_inc(&vcc->stats->rx);
1046 
1047     return 0;
1048 }
1049 
1050 
1051 static void
fore200e_collect_rpd(struct fore200e * fore200e,struct rpd * rpd)1052 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1053 {
1054     struct host_bsq* bsq;
1055     struct buffer*   buffer;
1056     int              i;
1057 
1058     for (i = 0; i < rpd->nseg; i++) {
1059 
1060 	/* rebuild rx buffer address from rsd handle */
1061 	buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1062 
1063 	bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1064 
1065 #ifdef FORE200E_BSQ_DEBUG
1066 	bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1067 
1068 	if (buffer->supplied == 0)
1069 	    printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1070 		   buffer->scheme, buffer->magn, buffer->index);
1071 	buffer->supplied = 0;
1072 #endif
1073 
1074 	/* re-insert the buffer into the free buffer list */
1075 	buffer->next = bsq->freebuf;
1076 	bsq->freebuf = buffer;
1077 
1078 	/* then increment the number of free rx buffers */
1079 	bsq->freebuf_count++;
1080     }
1081 }
1082 
1083 
1084 static void
fore200e_rx_irq(struct fore200e * fore200e)1085 fore200e_rx_irq(struct fore200e* fore200e)
1086 {
1087     struct host_rxq*        rxq = &fore200e->host_rxq;
1088     struct host_rxq_entry*  entry;
1089     struct atm_vcc*         vcc;
1090     struct fore200e_vc_map* vc_map;
1091 
1092     for (;;) {
1093 
1094 	entry = &rxq->host_entry[ rxq->head ];
1095 
1096 	/* no more received PDUs */
1097 	if ((*entry->status & STATUS_COMPLETE) == 0)
1098 	    break;
1099 
1100 	vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1101 
1102 	if ((vc_map->vcc == NULL) ||
1103 	    (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1104 
1105 	    DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1106 		    fore200e->atm_dev->number,
1107 		    entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1108 	}
1109 	else {
1110 	    vcc = vc_map->vcc;
1111 	    ASSERT(vcc);
1112 
1113 	    if ((*entry->status & STATUS_ERROR) == 0) {
1114 
1115 		fore200e_push_rpd(fore200e, vcc, entry->rpd);
1116 	    }
1117 	    else {
1118 		DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1119 			fore200e->atm_dev->number,
1120 			entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1121 		atomic_inc(&vcc->stats->rx_err);
1122 	    }
1123 	}
1124 
1125 	FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1126 
1127 	fore200e_collect_rpd(fore200e, entry->rpd);
1128 
1129 	/* rewrite the rpd address to ack the received PDU */
1130 	fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1131 	*entry->status = STATUS_FREE;
1132 
1133 	fore200e_supply(fore200e);
1134     }
1135 }
1136 
1137 
1138 #ifndef FORE200E_USE_TASKLET
1139 static void
fore200e_irq(struct fore200e * fore200e)1140 fore200e_irq(struct fore200e* fore200e)
1141 {
1142     unsigned long flags;
1143 
1144     spin_lock_irqsave(&fore200e->q_lock, flags);
1145     fore200e_rx_irq(fore200e);
1146     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1147 
1148     spin_lock_irqsave(&fore200e->q_lock, flags);
1149     fore200e_tx_irq(fore200e);
1150     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1151 }
1152 #endif
1153 
1154 
1155 static irqreturn_t
fore200e_interrupt(int irq,void * dev)1156 fore200e_interrupt(int irq, void* dev)
1157 {
1158     struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1159 
1160     if (fore200e->bus->irq_check(fore200e) == 0) {
1161 
1162 	DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1163 	return IRQ_NONE;
1164     }
1165     DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1166 
1167 #ifdef FORE200E_USE_TASKLET
1168     tasklet_schedule(&fore200e->tx_tasklet);
1169     tasklet_schedule(&fore200e->rx_tasklet);
1170 #else
1171     fore200e_irq(fore200e);
1172 #endif
1173 
1174     fore200e->bus->irq_ack(fore200e);
1175     return IRQ_HANDLED;
1176 }
1177 
1178 
1179 #ifdef FORE200E_USE_TASKLET
1180 static void
fore200e_tx_tasklet(unsigned long data)1181 fore200e_tx_tasklet(unsigned long data)
1182 {
1183     struct fore200e* fore200e = (struct fore200e*) data;
1184     unsigned long flags;
1185 
1186     DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1187 
1188     spin_lock_irqsave(&fore200e->q_lock, flags);
1189     fore200e_tx_irq(fore200e);
1190     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1191 }
1192 
1193 
1194 static void
fore200e_rx_tasklet(unsigned long data)1195 fore200e_rx_tasklet(unsigned long data)
1196 {
1197     struct fore200e* fore200e = (struct fore200e*) data;
1198     unsigned long    flags;
1199 
1200     DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1201 
1202     spin_lock_irqsave(&fore200e->q_lock, flags);
1203     fore200e_rx_irq((struct fore200e*) data);
1204     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1205 }
1206 #endif
1207 
1208 
1209 static int
fore200e_select_scheme(struct atm_vcc * vcc)1210 fore200e_select_scheme(struct atm_vcc* vcc)
1211 {
1212     /* fairly balance the VCs over (identical) buffer schemes */
1213     int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1214 
1215     DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1216 	    vcc->itf, vcc->vpi, vcc->vci, scheme);
1217 
1218     return scheme;
1219 }
1220 
1221 
1222 static int
fore200e_activate_vcin(struct fore200e * fore200e,int activate,struct atm_vcc * vcc,int mtu)1223 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1224 {
1225     struct host_cmdq*        cmdq  = &fore200e->host_cmdq;
1226     struct host_cmdq_entry*  entry = &cmdq->host_entry[ cmdq->head ];
1227     struct activate_opcode   activ_opcode;
1228     struct deactivate_opcode deactiv_opcode;
1229     struct vpvc              vpvc;
1230     int                      ok;
1231     enum fore200e_aal        aal = fore200e_atm2fore_aal(vcc->qos.aal);
1232 
1233     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1234 
1235     if (activate) {
1236 	FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1237 
1238 	activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1239 	activ_opcode.aal    = aal;
1240 	activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1241 	activ_opcode.pad    = 0;
1242     }
1243     else {
1244 	deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1245 	deactiv_opcode.pad    = 0;
1246     }
1247 
1248     vpvc.vci = vcc->vci;
1249     vpvc.vpi = vcc->vpi;
1250 
1251     *entry->status = STATUS_PENDING;
1252 
1253     if (activate) {
1254 
1255 #ifdef FORE200E_52BYTE_AAL0_SDU
1256 	mtu = 48;
1257 #endif
1258 	/* the MTU is not used by the cp, except in the case of AAL0 */
1259 	fore200e->bus->write(mtu,                        &entry->cp_entry->cmd.activate_block.mtu);
1260 	fore200e->bus->write(*(u32*)&vpvc,         (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1261 	fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1262     }
1263     else {
1264 	fore200e->bus->write(*(u32*)&vpvc,         (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1265 	fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1266     }
1267 
1268     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1269 
1270     *entry->status = STATUS_FREE;
1271 
1272     if (ok == 0) {
1273 	printk(FORE200E "unable to %s VC %d.%d.%d\n",
1274 	       activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1275 	return -EIO;
1276     }
1277 
1278     DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1279 	    activate ? "open" : "clos");
1280 
1281     return 0;
1282 }
1283 
1284 
1285 #define FORE200E_MAX_BACK2BACK_CELLS 255    /* XXX depends on CDVT */
1286 
1287 static void
fore200e_rate_ctrl(struct atm_qos * qos,struct tpd_rate * rate)1288 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1289 {
1290     if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1291 
1292 	/* compute the data cells to idle cells ratio from the tx PCR */
1293 	rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1294 	rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1295     }
1296     else {
1297 	/* disable rate control */
1298 	rate->data_cells = rate->idle_cells = 0;
1299     }
1300 }
1301 
1302 
1303 static int
fore200e_open(struct atm_vcc * vcc)1304 fore200e_open(struct atm_vcc *vcc)
1305 {
1306     struct fore200e*        fore200e = FORE200E_DEV(vcc->dev);
1307     struct fore200e_vcc*    fore200e_vcc;
1308     struct fore200e_vc_map* vc_map;
1309     unsigned long	    flags;
1310     int			    vci = vcc->vci;
1311     short		    vpi = vcc->vpi;
1312 
1313     ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1314     ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1315 
1316     spin_lock_irqsave(&fore200e->q_lock, flags);
1317 
1318     vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1319     if (vc_map->vcc) {
1320 
1321 	spin_unlock_irqrestore(&fore200e->q_lock, flags);
1322 
1323 	printk(FORE200E "VC %d.%d.%d already in use\n",
1324 	       fore200e->atm_dev->number, vpi, vci);
1325 
1326 	return -EINVAL;
1327     }
1328 
1329     vc_map->vcc = vcc;
1330 
1331     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1332 
1333     fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1334     if (fore200e_vcc == NULL) {
1335 	vc_map->vcc = NULL;
1336 	return -ENOMEM;
1337     }
1338 
1339     DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1340 	    "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1341 	    vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1342 	    fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1343 	    vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1344 	    fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1345 	    vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1346 
1347     /* pseudo-CBR bandwidth requested? */
1348     if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1349 
1350 	mutex_lock(&fore200e->rate_mtx);
1351 	if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1352 	    mutex_unlock(&fore200e->rate_mtx);
1353 
1354 	    kfree(fore200e_vcc);
1355 	    vc_map->vcc = NULL;
1356 	    return -EAGAIN;
1357 	}
1358 
1359 	/* reserve bandwidth */
1360 	fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1361 	mutex_unlock(&fore200e->rate_mtx);
1362     }
1363 
1364     vcc->itf = vcc->dev->number;
1365 
1366     set_bit(ATM_VF_PARTIAL,&vcc->flags);
1367     set_bit(ATM_VF_ADDR, &vcc->flags);
1368 
1369     vcc->dev_data = fore200e_vcc;
1370 
1371     if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1372 
1373 	vc_map->vcc = NULL;
1374 
1375 	clear_bit(ATM_VF_ADDR, &vcc->flags);
1376 	clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1377 
1378 	vcc->dev_data = NULL;
1379 
1380 	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1381 
1382 	kfree(fore200e_vcc);
1383 	return -EINVAL;
1384     }
1385 
1386     /* compute rate control parameters */
1387     if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1388 
1389 	fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1390 	set_bit(ATM_VF_HASQOS, &vcc->flags);
1391 
1392 	DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1393 		vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1394 		vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1395 		fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1396     }
1397 
1398     fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1399     fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1400     fore200e_vcc->tx_pdu     = fore200e_vcc->rx_pdu     = 0;
1401 
1402     /* new incarnation of the vcc */
1403     vc_map->incarn = ++fore200e->incarn_count;
1404 
1405     /* VC unusable before this flag is set */
1406     set_bit(ATM_VF_READY, &vcc->flags);
1407 
1408     return 0;
1409 }
1410 
1411 
1412 static void
fore200e_close(struct atm_vcc * vcc)1413 fore200e_close(struct atm_vcc* vcc)
1414 {
1415     struct fore200e_vcc*    fore200e_vcc;
1416     struct fore200e*        fore200e;
1417     struct fore200e_vc_map* vc_map;
1418     unsigned long           flags;
1419 
1420     ASSERT(vcc);
1421     fore200e = FORE200E_DEV(vcc->dev);
1422 
1423     ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1424     ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1425 
1426     DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1427 
1428     clear_bit(ATM_VF_READY, &vcc->flags);
1429 
1430     fore200e_activate_vcin(fore200e, 0, vcc, 0);
1431 
1432     spin_lock_irqsave(&fore200e->q_lock, flags);
1433 
1434     vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1435 
1436     /* the vc is no longer considered as "in use" by fore200e_open() */
1437     vc_map->vcc = NULL;
1438 
1439     vcc->itf = vcc->vci = vcc->vpi = 0;
1440 
1441     fore200e_vcc = FORE200E_VCC(vcc);
1442     vcc->dev_data = NULL;
1443 
1444     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1445 
1446     /* release reserved bandwidth, if any */
1447     if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1448 
1449 	mutex_lock(&fore200e->rate_mtx);
1450 	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1451 	mutex_unlock(&fore200e->rate_mtx);
1452 
1453 	clear_bit(ATM_VF_HASQOS, &vcc->flags);
1454     }
1455 
1456     clear_bit(ATM_VF_ADDR, &vcc->flags);
1457     clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1458 
1459     ASSERT(fore200e_vcc);
1460     kfree(fore200e_vcc);
1461 }
1462 
1463 
1464 static int
fore200e_send(struct atm_vcc * vcc,struct sk_buff * skb)1465 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1466 {
1467     struct fore200e*        fore200e;
1468     struct fore200e_vcc*    fore200e_vcc;
1469     struct fore200e_vc_map* vc_map;
1470     struct host_txq*        txq;
1471     struct host_txq_entry*  entry;
1472     struct tpd*             tpd;
1473     struct tpd_haddr        tpd_haddr;
1474     int                     retry        = CONFIG_ATM_FORE200E_TX_RETRY;
1475     int                     tx_copy      = 0;
1476     int                     tx_len       = skb->len;
1477     u32*                    cell_header  = NULL;
1478     unsigned char*          skb_data;
1479     int                     skb_len;
1480     unsigned char*          data;
1481     unsigned long           flags;
1482 
1483     if (!vcc)
1484         return -EINVAL;
1485 
1486     fore200e = FORE200E_DEV(vcc->dev);
1487     fore200e_vcc = FORE200E_VCC(vcc);
1488 
1489     if (!fore200e)
1490         return -EINVAL;
1491 
1492     txq = &fore200e->host_txq;
1493     if (!fore200e_vcc)
1494         return -EINVAL;
1495 
1496     if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1497 	DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1498 	dev_kfree_skb_any(skb);
1499 	return -EINVAL;
1500     }
1501 
1502 #ifdef FORE200E_52BYTE_AAL0_SDU
1503     if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1504 	cell_header = (u32*) skb->data;
1505 	skb_data    = skb->data + 4;    /* skip 4-byte cell header */
1506 	skb_len     = tx_len = skb->len  - 4;
1507 
1508 	DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1509     }
1510     else
1511 #endif
1512     {
1513 	skb_data = skb->data;
1514 	skb_len  = skb->len;
1515     }
1516 
1517     if (((unsigned long)skb_data) & 0x3) {
1518 
1519 	DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1520 	tx_copy = 1;
1521 	tx_len  = skb_len;
1522     }
1523 
1524     if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1525 
1526         /* this simply NUKES the PCA board */
1527 	DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1528 	tx_copy = 1;
1529 	tx_len  = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1530     }
1531 
1532     if (tx_copy) {
1533 	data = kmalloc(tx_len, GFP_ATOMIC);
1534 	if (data == NULL) {
1535 	    if (vcc->pop) {
1536 		vcc->pop(vcc, skb);
1537 	    }
1538 	    else {
1539 		dev_kfree_skb_any(skb);
1540 	    }
1541 	    return -ENOMEM;
1542 	}
1543 
1544 	memcpy(data, skb_data, skb_len);
1545 	if (skb_len < tx_len)
1546 	    memset(data + skb_len, 0x00, tx_len - skb_len);
1547     }
1548     else {
1549 	data = skb_data;
1550     }
1551 
1552     vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1553     ASSERT(vc_map->vcc == vcc);
1554 
1555   retry_here:
1556 
1557     spin_lock_irqsave(&fore200e->q_lock, flags);
1558 
1559     entry = &txq->host_entry[ txq->head ];
1560 
1561     if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1562 
1563 	/* try to free completed tx queue entries */
1564 	fore200e_tx_irq(fore200e);
1565 
1566 	if (*entry->status != STATUS_FREE) {
1567 
1568 	    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1569 
1570 	    /* retry once again? */
1571 	    if (--retry > 0) {
1572 		udelay(50);
1573 		goto retry_here;
1574 	    }
1575 
1576 	    atomic_inc(&vcc->stats->tx_err);
1577 
1578 	    fore200e->tx_sat++;
1579 	    DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1580 		    fore200e->name, fore200e->cp_queues->heartbeat);
1581 	    if (vcc->pop) {
1582 		vcc->pop(vcc, skb);
1583 	    }
1584 	    else {
1585 		dev_kfree_skb_any(skb);
1586 	    }
1587 
1588 	    if (tx_copy)
1589 		kfree(data);
1590 
1591 	    return -ENOBUFS;
1592 	}
1593     }
1594 
1595     entry->incarn = vc_map->incarn;
1596     entry->vc_map = vc_map;
1597     entry->skb    = skb;
1598     entry->data   = tx_copy ? data : NULL;
1599 
1600     tpd = entry->tpd;
1601     tpd->tsd[ 0 ].buffer = dma_map_single(fore200e->dev, data, tx_len,
1602 					  DMA_TO_DEVICE);
1603     if (dma_mapping_error(fore200e->dev, tpd->tsd[0].buffer)) {
1604 	if (tx_copy)
1605 	    kfree(data);
1606 	spin_unlock_irqrestore(&fore200e->q_lock, flags);
1607 	return -ENOMEM;
1608     }
1609     tpd->tsd[ 0 ].length = tx_len;
1610 
1611     FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1612     txq->txing++;
1613 
1614     /* The dma_map call above implies a dma_sync so the device can use it,
1615      * thus no explicit dma_sync call is necessary here.
1616      */
1617 
1618     DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1619 	    vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1620 	    tpd->tsd[0].length, skb_len);
1621 
1622     if (skb_len < fore200e_vcc->tx_min_pdu)
1623 	fore200e_vcc->tx_min_pdu = skb_len;
1624     if (skb_len > fore200e_vcc->tx_max_pdu)
1625 	fore200e_vcc->tx_max_pdu = skb_len;
1626     fore200e_vcc->tx_pdu++;
1627 
1628     /* set tx rate control information */
1629     tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1630     tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1631 
1632     if (cell_header) {
1633 	tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1634 	tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1635 	tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1636 	tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1637 	tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1638     }
1639     else {
1640 	/* set the ATM header, common to all cells conveying the PDU */
1641 	tpd->atm_header.clp = 0;
1642 	tpd->atm_header.plt = 0;
1643 	tpd->atm_header.vci = vcc->vci;
1644 	tpd->atm_header.vpi = vcc->vpi;
1645 	tpd->atm_header.gfc = 0;
1646     }
1647 
1648     tpd->spec.length = tx_len;
1649     tpd->spec.nseg   = 1;
1650     tpd->spec.aal    = fore200e_atm2fore_aal(vcc->qos.aal);
1651     tpd->spec.intr   = 1;
1652 
1653     tpd_haddr.size  = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT);  /* size is expressed in 32 byte blocks */
1654     tpd_haddr.pad   = 0;
1655     tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT;          /* shift the address, as we are in a bitfield */
1656 
1657     *entry->status = STATUS_PENDING;
1658     fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1659 
1660     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1661 
1662     return 0;
1663 }
1664 
1665 
1666 static int
fore200e_getstats(struct fore200e * fore200e)1667 fore200e_getstats(struct fore200e* fore200e)
1668 {
1669     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1670     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1671     struct stats_opcode     opcode;
1672     int                     ok;
1673     u32                     stats_dma_addr;
1674 
1675     if (fore200e->stats == NULL) {
1676 	fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL);
1677 	if (fore200e->stats == NULL)
1678 	    return -ENOMEM;
1679     }
1680 
1681     stats_dma_addr = dma_map_single(fore200e->dev, fore200e->stats,
1682 				    sizeof(struct stats), DMA_FROM_DEVICE);
1683     if (dma_mapping_error(fore200e->dev, stats_dma_addr))
1684     	return -ENOMEM;
1685 
1686     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1687 
1688     opcode.opcode = OPCODE_GET_STATS;
1689     opcode.pad    = 0;
1690 
1691     fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1692 
1693     *entry->status = STATUS_PENDING;
1694 
1695     fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1696 
1697     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1698 
1699     *entry->status = STATUS_FREE;
1700 
1701     dma_unmap_single(fore200e->dev, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1702 
1703     if (ok == 0) {
1704 	printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1705 	return -EIO;
1706     }
1707 
1708     return 0;
1709 }
1710 
1711 #if 0 /* currently unused */
1712 static int
1713 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1714 {
1715     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1716     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1717     struct oc3_opcode       opcode;
1718     int                     ok;
1719     u32                     oc3_regs_dma_addr;
1720 
1721     oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1722 
1723     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1724 
1725     opcode.opcode = OPCODE_GET_OC3;
1726     opcode.reg    = 0;
1727     opcode.value  = 0;
1728     opcode.mask   = 0;
1729 
1730     fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1731 
1732     *entry->status = STATUS_PENDING;
1733 
1734     fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1735 
1736     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1737 
1738     *entry->status = STATUS_FREE;
1739 
1740     fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1741 
1742     if (ok == 0) {
1743 	printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1744 	return -EIO;
1745     }
1746 
1747     return 0;
1748 }
1749 #endif
1750 
1751 
1752 static int
fore200e_set_oc3(struct fore200e * fore200e,u32 reg,u32 value,u32 mask)1753 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1754 {
1755     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1756     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1757     struct oc3_opcode       opcode;
1758     int                     ok;
1759 
1760     DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1761 
1762     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1763 
1764     opcode.opcode = OPCODE_SET_OC3;
1765     opcode.reg    = reg;
1766     opcode.value  = value;
1767     opcode.mask   = mask;
1768 
1769     fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1770 
1771     *entry->status = STATUS_PENDING;
1772 
1773     fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1774 
1775     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1776 
1777     *entry->status = STATUS_FREE;
1778 
1779     if (ok == 0) {
1780 	printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1781 	return -EIO;
1782     }
1783 
1784     return 0;
1785 }
1786 
1787 
1788 static int
fore200e_setloop(struct fore200e * fore200e,int loop_mode)1789 fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1790 {
1791     u32 mct_value, mct_mask;
1792     int error;
1793 
1794     if (!capable(CAP_NET_ADMIN))
1795 	return -EPERM;
1796 
1797     switch (loop_mode) {
1798 
1799     case ATM_LM_NONE:
1800 	mct_value = 0;
1801 	mct_mask  = SUNI_MCT_DLE | SUNI_MCT_LLE;
1802 	break;
1803 
1804     case ATM_LM_LOC_PHY:
1805 	mct_value = mct_mask = SUNI_MCT_DLE;
1806 	break;
1807 
1808     case ATM_LM_RMT_PHY:
1809 	mct_value = mct_mask = SUNI_MCT_LLE;
1810 	break;
1811 
1812     default:
1813 	return -EINVAL;
1814     }
1815 
1816     error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1817     if (error == 0)
1818 	fore200e->loop_mode = loop_mode;
1819 
1820     return error;
1821 }
1822 
1823 
1824 static int
fore200e_fetch_stats(struct fore200e * fore200e,struct sonet_stats __user * arg)1825 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
1826 {
1827     struct sonet_stats tmp;
1828 
1829     if (fore200e_getstats(fore200e) < 0)
1830 	return -EIO;
1831 
1832     tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors);
1833     tmp.line_bip    = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors);
1834     tmp.path_bip    = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors);
1835     tmp.line_febe   = be32_to_cpu(fore200e->stats->oc3.line_febe_errors);
1836     tmp.path_febe   = be32_to_cpu(fore200e->stats->oc3.path_febe_errors);
1837     tmp.corr_hcs    = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors);
1838     tmp.uncorr_hcs  = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors);
1839     tmp.tx_cells    = be32_to_cpu(fore200e->stats->aal0.cells_transmitted)  +
1840 	              be32_to_cpu(fore200e->stats->aal34.cells_transmitted) +
1841 	              be32_to_cpu(fore200e->stats->aal5.cells_transmitted);
1842     tmp.rx_cells    = be32_to_cpu(fore200e->stats->aal0.cells_received)     +
1843 	              be32_to_cpu(fore200e->stats->aal34.cells_received)    +
1844 	              be32_to_cpu(fore200e->stats->aal5.cells_received);
1845 
1846     if (arg)
1847 	return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
1848 
1849     return 0;
1850 }
1851 
1852 
1853 static int
fore200e_ioctl(struct atm_dev * dev,unsigned int cmd,void __user * arg)1854 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
1855 {
1856     struct fore200e* fore200e = FORE200E_DEV(dev);
1857 
1858     DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
1859 
1860     switch (cmd) {
1861 
1862     case SONET_GETSTAT:
1863 	return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
1864 
1865     case SONET_GETDIAG:
1866 	return put_user(0, (int __user *)arg) ? -EFAULT : 0;
1867 
1868     case ATM_SETLOOP:
1869 	return fore200e_setloop(fore200e, (int)(unsigned long)arg);
1870 
1871     case ATM_GETLOOP:
1872 	return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
1873 
1874     case ATM_QUERYLOOP:
1875 	return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
1876     }
1877 
1878     return -ENOSYS; /* not implemented */
1879 }
1880 
1881 
1882 static int
fore200e_change_qos(struct atm_vcc * vcc,struct atm_qos * qos,int flags)1883 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
1884 {
1885     struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1886     struct fore200e*     fore200e     = FORE200E_DEV(vcc->dev);
1887 
1888     if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1889 	DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
1890 	return -EINVAL;
1891     }
1892 
1893     DPRINTK(2, "change_qos %d.%d.%d, "
1894 	    "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1895 	    "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
1896 	    "available_cell_rate = %u",
1897 	    vcc->itf, vcc->vpi, vcc->vci,
1898 	    fore200e_traffic_class[ qos->txtp.traffic_class ],
1899 	    qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
1900 	    fore200e_traffic_class[ qos->rxtp.traffic_class ],
1901 	    qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
1902 	    flags, fore200e->available_cell_rate);
1903 
1904     if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
1905 
1906 	mutex_lock(&fore200e->rate_mtx);
1907 	if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
1908 	    mutex_unlock(&fore200e->rate_mtx);
1909 	    return -EAGAIN;
1910 	}
1911 
1912 	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1913 	fore200e->available_cell_rate -= qos->txtp.max_pcr;
1914 
1915 	mutex_unlock(&fore200e->rate_mtx);
1916 
1917 	memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
1918 
1919 	/* update rate control parameters */
1920 	fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
1921 
1922 	set_bit(ATM_VF_HASQOS, &vcc->flags);
1923 
1924 	return 0;
1925     }
1926 
1927     return -EINVAL;
1928 }
1929 
1930 
fore200e_irq_request(struct fore200e * fore200e)1931 static int fore200e_irq_request(struct fore200e *fore200e)
1932 {
1933     if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
1934 
1935 	printk(FORE200E "unable to reserve IRQ %s for device %s\n",
1936 	       fore200e_irq_itoa(fore200e->irq), fore200e->name);
1937 	return -EBUSY;
1938     }
1939 
1940     printk(FORE200E "IRQ %s reserved for device %s\n",
1941 	   fore200e_irq_itoa(fore200e->irq), fore200e->name);
1942 
1943 #ifdef FORE200E_USE_TASKLET
1944     tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
1945     tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
1946 #endif
1947 
1948     fore200e->state = FORE200E_STATE_IRQ;
1949     return 0;
1950 }
1951 
1952 
fore200e_get_esi(struct fore200e * fore200e)1953 static int fore200e_get_esi(struct fore200e *fore200e)
1954 {
1955     struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL);
1956     int ok, i;
1957 
1958     if (!prom)
1959 	return -ENOMEM;
1960 
1961     ok = fore200e->bus->prom_read(fore200e, prom);
1962     if (ok < 0) {
1963 	kfree(prom);
1964 	return -EBUSY;
1965     }
1966 
1967     printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n",
1968 	   fore200e->name,
1969 	   (prom->hw_revision & 0xFF) + '@',    /* probably meaningless with SBA boards */
1970 	   prom->serial_number & 0xFFFF, &prom->mac_addr[2]);
1971 
1972     for (i = 0; i < ESI_LEN; i++) {
1973 	fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
1974     }
1975 
1976     kfree(prom);
1977 
1978     return 0;
1979 }
1980 
1981 
fore200e_alloc_rx_buf(struct fore200e * fore200e)1982 static int fore200e_alloc_rx_buf(struct fore200e *fore200e)
1983 {
1984     int scheme, magn, nbr, size, i;
1985 
1986     struct host_bsq* bsq;
1987     struct buffer*   buffer;
1988 
1989     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1990 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1991 
1992 	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
1993 
1994 	    nbr  = fore200e_rx_buf_nbr[ scheme ][ magn ];
1995 	    size = fore200e_rx_buf_size[ scheme ][ magn ];
1996 
1997 	    DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
1998 
1999 	    /* allocate the array of receive buffers */
2000 	    buffer = bsq->buffer = kcalloc(nbr, sizeof(struct buffer),
2001                                            GFP_KERNEL);
2002 
2003 	    if (buffer == NULL)
2004 		return -ENOMEM;
2005 
2006 	    bsq->freebuf = NULL;
2007 
2008 	    for (i = 0; i < nbr; i++) {
2009 
2010 		buffer[ i ].scheme = scheme;
2011 		buffer[ i ].magn   = magn;
2012 #ifdef FORE200E_BSQ_DEBUG
2013 		buffer[ i ].index  = i;
2014 		buffer[ i ].supplied = 0;
2015 #endif
2016 
2017 		/* allocate the receive buffer body */
2018 		if (fore200e_chunk_alloc(fore200e,
2019 					 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2020 					 DMA_FROM_DEVICE) < 0) {
2021 
2022 		    while (i > 0)
2023 			fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2024 		    kfree(buffer);
2025 
2026 		    return -ENOMEM;
2027 		}
2028 
2029 		/* insert the buffer into the free buffer list */
2030 		buffer[ i ].next = bsq->freebuf;
2031 		bsq->freebuf = &buffer[ i ];
2032 	    }
2033 	    /* all the buffers are free, initially */
2034 	    bsq->freebuf_count = nbr;
2035 
2036 #ifdef FORE200E_BSQ_DEBUG
2037 	    bsq_audit(3, bsq, scheme, magn);
2038 #endif
2039 	}
2040     }
2041 
2042     fore200e->state = FORE200E_STATE_ALLOC_BUF;
2043     return 0;
2044 }
2045 
2046 
fore200e_init_bs_queue(struct fore200e * fore200e)2047 static int fore200e_init_bs_queue(struct fore200e *fore200e)
2048 {
2049     int scheme, magn, i;
2050 
2051     struct host_bsq*     bsq;
2052     struct cp_bsq_entry __iomem * cp_entry;
2053 
2054     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2055 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2056 
2057 	    DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2058 
2059 	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
2060 
2061 	    /* allocate and align the array of status words */
2062 	    if (fore200e_dma_chunk_alloc(fore200e,
2063 					       &bsq->status,
2064 					       sizeof(enum status),
2065 					       QUEUE_SIZE_BS,
2066 					       fore200e->bus->status_alignment) < 0) {
2067 		return -ENOMEM;
2068 	    }
2069 
2070 	    /* allocate and align the array of receive buffer descriptors */
2071 	    if (fore200e_dma_chunk_alloc(fore200e,
2072 					       &bsq->rbd_block,
2073 					       sizeof(struct rbd_block),
2074 					       QUEUE_SIZE_BS,
2075 					       fore200e->bus->descr_alignment) < 0) {
2076 
2077 		fore200e_dma_chunk_free(fore200e, &bsq->status);
2078 		return -ENOMEM;
2079 	    }
2080 
2081 	    /* get the base address of the cp resident buffer supply queue entries */
2082 	    cp_entry = fore200e->virt_base +
2083 		       fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2084 
2085 	    /* fill the host resident and cp resident buffer supply queue entries */
2086 	    for (i = 0; i < QUEUE_SIZE_BS; i++) {
2087 
2088 		bsq->host_entry[ i ].status =
2089 		                     FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2090 	        bsq->host_entry[ i ].rbd_block =
2091 		                     FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2092 		bsq->host_entry[ i ].rbd_block_dma =
2093 		                     FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2094 		bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2095 
2096 		*bsq->host_entry[ i ].status = STATUS_FREE;
2097 
2098 		fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2099 				     &cp_entry[ i ].status_haddr);
2100 	    }
2101 	}
2102     }
2103 
2104     fore200e->state = FORE200E_STATE_INIT_BSQ;
2105     return 0;
2106 }
2107 
2108 
fore200e_init_rx_queue(struct fore200e * fore200e)2109 static int fore200e_init_rx_queue(struct fore200e *fore200e)
2110 {
2111     struct host_rxq*     rxq =  &fore200e->host_rxq;
2112     struct cp_rxq_entry __iomem * cp_entry;
2113     int i;
2114 
2115     DPRINTK(2, "receive queue is being initialized\n");
2116 
2117     /* allocate and align the array of status words */
2118     if (fore200e_dma_chunk_alloc(fore200e,
2119 				       &rxq->status,
2120 				       sizeof(enum status),
2121 				       QUEUE_SIZE_RX,
2122 				       fore200e->bus->status_alignment) < 0) {
2123 	return -ENOMEM;
2124     }
2125 
2126     /* allocate and align the array of receive PDU descriptors */
2127     if (fore200e_dma_chunk_alloc(fore200e,
2128 				       &rxq->rpd,
2129 				       sizeof(struct rpd),
2130 				       QUEUE_SIZE_RX,
2131 				       fore200e->bus->descr_alignment) < 0) {
2132 
2133 	fore200e_dma_chunk_free(fore200e, &rxq->status);
2134 	return -ENOMEM;
2135     }
2136 
2137     /* get the base address of the cp resident rx queue entries */
2138     cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2139 
2140     /* fill the host resident and cp resident rx entries */
2141     for (i=0; i < QUEUE_SIZE_RX; i++) {
2142 
2143 	rxq->host_entry[ i ].status =
2144 	                     FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2145 	rxq->host_entry[ i ].rpd =
2146 	                     FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2147 	rxq->host_entry[ i ].rpd_dma =
2148 	                     FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2149 	rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2150 
2151 	*rxq->host_entry[ i ].status = STATUS_FREE;
2152 
2153 	fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2154 			     &cp_entry[ i ].status_haddr);
2155 
2156 	fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2157 			     &cp_entry[ i ].rpd_haddr);
2158     }
2159 
2160     /* set the head entry of the queue */
2161     rxq->head = 0;
2162 
2163     fore200e->state = FORE200E_STATE_INIT_RXQ;
2164     return 0;
2165 }
2166 
2167 
fore200e_init_tx_queue(struct fore200e * fore200e)2168 static int fore200e_init_tx_queue(struct fore200e *fore200e)
2169 {
2170     struct host_txq*     txq =  &fore200e->host_txq;
2171     struct cp_txq_entry __iomem * cp_entry;
2172     int i;
2173 
2174     DPRINTK(2, "transmit queue is being initialized\n");
2175 
2176     /* allocate and align the array of status words */
2177     if (fore200e_dma_chunk_alloc(fore200e,
2178 				       &txq->status,
2179 				       sizeof(enum status),
2180 				       QUEUE_SIZE_TX,
2181 				       fore200e->bus->status_alignment) < 0) {
2182 	return -ENOMEM;
2183     }
2184 
2185     /* allocate and align the array of transmit PDU descriptors */
2186     if (fore200e_dma_chunk_alloc(fore200e,
2187 				       &txq->tpd,
2188 				       sizeof(struct tpd),
2189 				       QUEUE_SIZE_TX,
2190 				       fore200e->bus->descr_alignment) < 0) {
2191 
2192 	fore200e_dma_chunk_free(fore200e, &txq->status);
2193 	return -ENOMEM;
2194     }
2195 
2196     /* get the base address of the cp resident tx queue entries */
2197     cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2198 
2199     /* fill the host resident and cp resident tx entries */
2200     for (i=0; i < QUEUE_SIZE_TX; i++) {
2201 
2202 	txq->host_entry[ i ].status =
2203 	                     FORE200E_INDEX(txq->status.align_addr, enum status, i);
2204 	txq->host_entry[ i ].tpd =
2205 	                     FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2206 	txq->host_entry[ i ].tpd_dma  =
2207                              FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2208 	txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2209 
2210 	*txq->host_entry[ i ].status = STATUS_FREE;
2211 
2212 	fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2213 			     &cp_entry[ i ].status_haddr);
2214 
2215         /* although there is a one-to-one mapping of tx queue entries and tpds,
2216 	   we do not write here the DMA (physical) base address of each tpd into
2217 	   the related cp resident entry, because the cp relies on this write
2218 	   operation to detect that a new pdu has been submitted for tx */
2219     }
2220 
2221     /* set the head and tail entries of the queue */
2222     txq->head = 0;
2223     txq->tail = 0;
2224 
2225     fore200e->state = FORE200E_STATE_INIT_TXQ;
2226     return 0;
2227 }
2228 
2229 
fore200e_init_cmd_queue(struct fore200e * fore200e)2230 static int fore200e_init_cmd_queue(struct fore200e *fore200e)
2231 {
2232     struct host_cmdq*     cmdq =  &fore200e->host_cmdq;
2233     struct cp_cmdq_entry __iomem * cp_entry;
2234     int i;
2235 
2236     DPRINTK(2, "command queue is being initialized\n");
2237 
2238     /* allocate and align the array of status words */
2239     if (fore200e_dma_chunk_alloc(fore200e,
2240 				       &cmdq->status,
2241 				       sizeof(enum status),
2242 				       QUEUE_SIZE_CMD,
2243 				       fore200e->bus->status_alignment) < 0) {
2244 	return -ENOMEM;
2245     }
2246 
2247     /* get the base address of the cp resident cmd queue entries */
2248     cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2249 
2250     /* fill the host resident and cp resident cmd entries */
2251     for (i=0; i < QUEUE_SIZE_CMD; i++) {
2252 
2253 	cmdq->host_entry[ i ].status   =
2254                               FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2255 	cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2256 
2257 	*cmdq->host_entry[ i ].status = STATUS_FREE;
2258 
2259 	fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2260                              &cp_entry[ i ].status_haddr);
2261     }
2262 
2263     /* set the head entry of the queue */
2264     cmdq->head = 0;
2265 
2266     fore200e->state = FORE200E_STATE_INIT_CMDQ;
2267     return 0;
2268 }
2269 
2270 
fore200e_param_bs_queue(struct fore200e * fore200e,enum buffer_scheme scheme,enum buffer_magn magn,int queue_length,int pool_size,int supply_blksize)2271 static void fore200e_param_bs_queue(struct fore200e *fore200e,
2272 				    enum buffer_scheme scheme,
2273 				    enum buffer_magn magn, int queue_length,
2274 				    int pool_size, int supply_blksize)
2275 {
2276     struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2277 
2278     fore200e->bus->write(queue_length,                           &bs_spec->queue_length);
2279     fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2280     fore200e->bus->write(pool_size,                              &bs_spec->pool_size);
2281     fore200e->bus->write(supply_blksize,                         &bs_spec->supply_blksize);
2282 }
2283 
2284 
fore200e_initialize(struct fore200e * fore200e)2285 static int fore200e_initialize(struct fore200e *fore200e)
2286 {
2287     struct cp_queues __iomem * cpq;
2288     int               ok, scheme, magn;
2289 
2290     DPRINTK(2, "device %s being initialized\n", fore200e->name);
2291 
2292     mutex_init(&fore200e->rate_mtx);
2293     spin_lock_init(&fore200e->q_lock);
2294 
2295     cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2296 
2297     /* enable cp to host interrupts */
2298     fore200e->bus->write(1, &cpq->imask);
2299 
2300     if (fore200e->bus->irq_enable)
2301 	fore200e->bus->irq_enable(fore200e);
2302 
2303     fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2304 
2305     fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2306     fore200e->bus->write(QUEUE_SIZE_RX,  &cpq->init.rx_queue_len);
2307     fore200e->bus->write(QUEUE_SIZE_TX,  &cpq->init.tx_queue_len);
2308 
2309     fore200e->bus->write(RSD_EXTENSION,  &cpq->init.rsd_extension);
2310     fore200e->bus->write(TSD_EXTENSION,  &cpq->init.tsd_extension);
2311 
2312     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2313 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2314 	    fore200e_param_bs_queue(fore200e, scheme, magn,
2315 				    QUEUE_SIZE_BS,
2316 				    fore200e_rx_buf_nbr[ scheme ][ magn ],
2317 				    RBD_BLK_SIZE);
2318 
2319     /* issue the initialize command */
2320     fore200e->bus->write(STATUS_PENDING,    &cpq->init.status);
2321     fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2322 
2323     ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2324     if (ok == 0) {
2325 	printk(FORE200E "device %s initialization failed\n", fore200e->name);
2326 	return -ENODEV;
2327     }
2328 
2329     printk(FORE200E "device %s initialized\n", fore200e->name);
2330 
2331     fore200e->state = FORE200E_STATE_INITIALIZE;
2332     return 0;
2333 }
2334 
2335 
fore200e_monitor_putc(struct fore200e * fore200e,char c)2336 static void fore200e_monitor_putc(struct fore200e *fore200e, char c)
2337 {
2338     struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2339 
2340 #if 0
2341     printk("%c", c);
2342 #endif
2343     fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2344 }
2345 
2346 
fore200e_monitor_getc(struct fore200e * fore200e)2347 static int fore200e_monitor_getc(struct fore200e *fore200e)
2348 {
2349     struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2350     unsigned long      timeout = jiffies + msecs_to_jiffies(50);
2351     int                c;
2352 
2353     while (time_before(jiffies, timeout)) {
2354 
2355 	c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2356 
2357 	if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2358 
2359 	    fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2360 #if 0
2361 	    printk("%c", c & 0xFF);
2362 #endif
2363 	    return c & 0xFF;
2364 	}
2365     }
2366 
2367     return -1;
2368 }
2369 
2370 
fore200e_monitor_puts(struct fore200e * fore200e,char * str)2371 static void fore200e_monitor_puts(struct fore200e *fore200e, char *str)
2372 {
2373     while (*str) {
2374 
2375 	/* the i960 monitor doesn't accept any new character if it has something to say */
2376 	while (fore200e_monitor_getc(fore200e) >= 0);
2377 
2378 	fore200e_monitor_putc(fore200e, *str++);
2379     }
2380 
2381     while (fore200e_monitor_getc(fore200e) >= 0);
2382 }
2383 
2384 #ifdef __LITTLE_ENDIAN
2385 #define FW_EXT ".bin"
2386 #else
2387 #define FW_EXT "_ecd.bin2"
2388 #endif
2389 
fore200e_load_and_start_fw(struct fore200e * fore200e)2390 static int fore200e_load_and_start_fw(struct fore200e *fore200e)
2391 {
2392     const struct firmware *firmware;
2393     const struct fw_header *fw_header;
2394     const __le32 *fw_data;
2395     u32 fw_size;
2396     u32 __iomem *load_addr;
2397     char buf[48];
2398     int err;
2399 
2400     sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
2401     if ((err = request_firmware(&firmware, buf, fore200e->dev)) < 0) {
2402 	printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
2403 	return err;
2404     }
2405 
2406     fw_data = (const __le32 *)firmware->data;
2407     fw_size = firmware->size / sizeof(u32);
2408     fw_header = (const struct fw_header *)firmware->data;
2409     load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2410 
2411     DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2412 	    fore200e->name, load_addr, fw_size);
2413 
2414     if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2415 	printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2416 	goto release;
2417     }
2418 
2419     for (; fw_size--; fw_data++, load_addr++)
2420 	fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2421 
2422     DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2423 
2424 #if defined(__sparc_v9__)
2425     /* reported to be required by SBA cards on some sparc64 hosts */
2426     fore200e_spin(100);
2427 #endif
2428 
2429     sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2430     fore200e_monitor_puts(fore200e, buf);
2431 
2432     if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) {
2433 	printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2434 	goto release;
2435     }
2436 
2437     printk(FORE200E "device %s firmware started\n", fore200e->name);
2438 
2439     fore200e->state = FORE200E_STATE_START_FW;
2440     err = 0;
2441 
2442 release:
2443     release_firmware(firmware);
2444     return err;
2445 }
2446 
2447 
fore200e_register(struct fore200e * fore200e,struct device * parent)2448 static int fore200e_register(struct fore200e *fore200e, struct device *parent)
2449 {
2450     struct atm_dev* atm_dev;
2451 
2452     DPRINTK(2, "device %s being registered\n", fore200e->name);
2453 
2454     atm_dev = atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops,
2455                                -1, NULL);
2456     if (atm_dev == NULL) {
2457 	printk(FORE200E "unable to register device %s\n", fore200e->name);
2458 	return -ENODEV;
2459     }
2460 
2461     atm_dev->dev_data = fore200e;
2462     fore200e->atm_dev = atm_dev;
2463 
2464     atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2465     atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2466 
2467     fore200e->available_cell_rate = ATM_OC3_PCR;
2468 
2469     fore200e->state = FORE200E_STATE_REGISTER;
2470     return 0;
2471 }
2472 
2473 
fore200e_init(struct fore200e * fore200e,struct device * parent)2474 static int fore200e_init(struct fore200e *fore200e, struct device *parent)
2475 {
2476     if (fore200e_register(fore200e, parent) < 0)
2477 	return -ENODEV;
2478 
2479     if (fore200e->bus->configure(fore200e) < 0)
2480 	return -ENODEV;
2481 
2482     if (fore200e->bus->map(fore200e) < 0)
2483 	return -ENODEV;
2484 
2485     if (fore200e_reset(fore200e, 1) < 0)
2486 	return -ENODEV;
2487 
2488     if (fore200e_load_and_start_fw(fore200e) < 0)
2489 	return -ENODEV;
2490 
2491     if (fore200e_initialize(fore200e) < 0)
2492 	return -ENODEV;
2493 
2494     if (fore200e_init_cmd_queue(fore200e) < 0)
2495 	return -ENOMEM;
2496 
2497     if (fore200e_init_tx_queue(fore200e) < 0)
2498 	return -ENOMEM;
2499 
2500     if (fore200e_init_rx_queue(fore200e) < 0)
2501 	return -ENOMEM;
2502 
2503     if (fore200e_init_bs_queue(fore200e) < 0)
2504 	return -ENOMEM;
2505 
2506     if (fore200e_alloc_rx_buf(fore200e) < 0)
2507 	return -ENOMEM;
2508 
2509     if (fore200e_get_esi(fore200e) < 0)
2510 	return -EIO;
2511 
2512     if (fore200e_irq_request(fore200e) < 0)
2513 	return -EBUSY;
2514 
2515     fore200e_supply(fore200e);
2516 
2517     /* all done, board initialization is now complete */
2518     fore200e->state = FORE200E_STATE_COMPLETE;
2519     return 0;
2520 }
2521 
2522 #ifdef CONFIG_SBUS
2523 static const struct of_device_id fore200e_sba_match[];
fore200e_sba_probe(struct platform_device * op)2524 static int fore200e_sba_probe(struct platform_device *op)
2525 {
2526 	const struct of_device_id *match;
2527 	struct fore200e *fore200e;
2528 	static int index = 0;
2529 	int err;
2530 
2531 	match = of_match_device(fore200e_sba_match, &op->dev);
2532 	if (!match)
2533 		return -EINVAL;
2534 
2535 	fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2536 	if (!fore200e)
2537 		return -ENOMEM;
2538 
2539 	fore200e->bus = &fore200e_sbus_ops;
2540 	fore200e->dev = &op->dev;
2541 	fore200e->irq = op->archdata.irqs[0];
2542 	fore200e->phys_base = op->resource[0].start;
2543 
2544 	sprintf(fore200e->name, "SBA-200E-%d", index);
2545 
2546 	err = fore200e_init(fore200e, &op->dev);
2547 	if (err < 0) {
2548 		fore200e_shutdown(fore200e);
2549 		kfree(fore200e);
2550 		return err;
2551 	}
2552 
2553 	index++;
2554 	dev_set_drvdata(&op->dev, fore200e);
2555 
2556 	return 0;
2557 }
2558 
fore200e_sba_remove(struct platform_device * op)2559 static int fore200e_sba_remove(struct platform_device *op)
2560 {
2561 	struct fore200e *fore200e = dev_get_drvdata(&op->dev);
2562 
2563 	fore200e_shutdown(fore200e);
2564 	kfree(fore200e);
2565 
2566 	return 0;
2567 }
2568 
2569 static const struct of_device_id fore200e_sba_match[] = {
2570 	{
2571 		.name = SBA200E_PROM_NAME,
2572 	},
2573 	{},
2574 };
2575 MODULE_DEVICE_TABLE(of, fore200e_sba_match);
2576 
2577 static struct platform_driver fore200e_sba_driver = {
2578 	.driver = {
2579 		.name = "fore_200e",
2580 		.of_match_table = fore200e_sba_match,
2581 	},
2582 	.probe		= fore200e_sba_probe,
2583 	.remove		= fore200e_sba_remove,
2584 };
2585 #endif
2586 
2587 #ifdef CONFIG_PCI
fore200e_pca_detect(struct pci_dev * pci_dev,const struct pci_device_id * pci_ent)2588 static int fore200e_pca_detect(struct pci_dev *pci_dev,
2589 			       const struct pci_device_id *pci_ent)
2590 {
2591     struct fore200e* fore200e;
2592     int err = 0;
2593     static int index = 0;
2594 
2595     if (pci_enable_device(pci_dev)) {
2596 	err = -EINVAL;
2597 	goto out;
2598     }
2599 
2600     if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
2601 	err = -EINVAL;
2602 	goto out;
2603     }
2604 
2605     fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2606     if (fore200e == NULL) {
2607 	err = -ENOMEM;
2608 	goto out_disable;
2609     }
2610 
2611     fore200e->bus       = &fore200e_pci_ops;
2612     fore200e->dev	= &pci_dev->dev;
2613     fore200e->irq       = pci_dev->irq;
2614     fore200e->phys_base = pci_resource_start(pci_dev, 0);
2615 
2616     sprintf(fore200e->name, "PCA-200E-%d", index - 1);
2617 
2618     pci_set_master(pci_dev);
2619 
2620     printk(FORE200E "device PCA-200E found at 0x%lx, IRQ %s\n",
2621 	   fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2622 
2623     sprintf(fore200e->name, "PCA-200E-%d", index);
2624 
2625     err = fore200e_init(fore200e, &pci_dev->dev);
2626     if (err < 0) {
2627 	fore200e_shutdown(fore200e);
2628 	goto out_free;
2629     }
2630 
2631     ++index;
2632     pci_set_drvdata(pci_dev, fore200e);
2633 
2634 out:
2635     return err;
2636 
2637 out_free:
2638     kfree(fore200e);
2639 out_disable:
2640     pci_disable_device(pci_dev);
2641     goto out;
2642 }
2643 
2644 
fore200e_pca_remove_one(struct pci_dev * pci_dev)2645 static void fore200e_pca_remove_one(struct pci_dev *pci_dev)
2646 {
2647     struct fore200e *fore200e;
2648 
2649     fore200e = pci_get_drvdata(pci_dev);
2650 
2651     fore200e_shutdown(fore200e);
2652     kfree(fore200e);
2653     pci_disable_device(pci_dev);
2654 }
2655 
2656 
2657 static const struct pci_device_id fore200e_pca_tbl[] = {
2658     { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID },
2659     { 0, }
2660 };
2661 
2662 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2663 
2664 static struct pci_driver fore200e_pca_driver = {
2665     .name =     "fore_200e",
2666     .probe =    fore200e_pca_detect,
2667     .remove =   fore200e_pca_remove_one,
2668     .id_table = fore200e_pca_tbl,
2669 };
2670 #endif
2671 
fore200e_module_init(void)2672 static int __init fore200e_module_init(void)
2673 {
2674 	int err = 0;
2675 
2676 	printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2677 
2678 #ifdef CONFIG_SBUS
2679 	err = platform_driver_register(&fore200e_sba_driver);
2680 	if (err)
2681 		return err;
2682 #endif
2683 
2684 #ifdef CONFIG_PCI
2685 	err = pci_register_driver(&fore200e_pca_driver);
2686 #endif
2687 
2688 #ifdef CONFIG_SBUS
2689 	if (err)
2690 		platform_driver_unregister(&fore200e_sba_driver);
2691 #endif
2692 
2693 	return err;
2694 }
2695 
fore200e_module_cleanup(void)2696 static void __exit fore200e_module_cleanup(void)
2697 {
2698 #ifdef CONFIG_PCI
2699 	pci_unregister_driver(&fore200e_pca_driver);
2700 #endif
2701 #ifdef CONFIG_SBUS
2702 	platform_driver_unregister(&fore200e_sba_driver);
2703 #endif
2704 }
2705 
2706 static int
fore200e_proc_read(struct atm_dev * dev,loff_t * pos,char * page)2707 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2708 {
2709     struct fore200e*     fore200e  = FORE200E_DEV(dev);
2710     struct fore200e_vcc* fore200e_vcc;
2711     struct atm_vcc*      vcc;
2712     int                  i, len, left = *pos;
2713     unsigned long        flags;
2714 
2715     if (!left--) {
2716 
2717 	if (fore200e_getstats(fore200e) < 0)
2718 	    return -EIO;
2719 
2720 	len = sprintf(page,"\n"
2721 		       " device:\n"
2722 		       "   internal name:\t\t%s\n", fore200e->name);
2723 
2724 	/* print bus-specific information */
2725 	if (fore200e->bus->proc_read)
2726 	    len += fore200e->bus->proc_read(fore200e, page + len);
2727 
2728 	len += sprintf(page + len,
2729 		"   interrupt line:\t\t%s\n"
2730 		"   physical base address:\t0x%p\n"
2731 		"   virtual base address:\t0x%p\n"
2732 		"   factory address (ESI):\t%pM\n"
2733 		"   board serial number:\t\t%d\n\n",
2734 		fore200e_irq_itoa(fore200e->irq),
2735 		(void*)fore200e->phys_base,
2736 		fore200e->virt_base,
2737 		fore200e->esi,
2738 		fore200e->esi[4] * 256 + fore200e->esi[5]);
2739 
2740 	return len;
2741     }
2742 
2743     if (!left--)
2744 	return sprintf(page,
2745 		       "   free small bufs, scheme 1:\t%d\n"
2746 		       "   free large bufs, scheme 1:\t%d\n"
2747 		       "   free small bufs, scheme 2:\t%d\n"
2748 		       "   free large bufs, scheme 2:\t%d\n",
2749 		       fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2750 		       fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2751 		       fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2752 		       fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2753 
2754     if (!left--) {
2755 	u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2756 
2757 	len = sprintf(page,"\n\n"
2758 		      " cell processor:\n"
2759 		      "   heartbeat state:\t\t");
2760 
2761 	if (hb >> 16 != 0xDEAD)
2762 	    len += sprintf(page + len, "0x%08x\n", hb);
2763 	else
2764 	    len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2765 
2766 	return len;
2767     }
2768 
2769     if (!left--) {
2770 	static const char* media_name[] = {
2771 	    "unshielded twisted pair",
2772 	    "multimode optical fiber ST",
2773 	    "multimode optical fiber SC",
2774 	    "single-mode optical fiber ST",
2775 	    "single-mode optical fiber SC",
2776 	    "unknown"
2777 	};
2778 
2779 	static const char* oc3_mode[] = {
2780 	    "normal operation",
2781 	    "diagnostic loopback",
2782 	    "line loopback",
2783 	    "unknown"
2784 	};
2785 
2786 	u32 fw_release     = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2787 	u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2788 	u32 oc3_revision   = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2789 	u32 media_index    = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2790 	u32 oc3_index;
2791 
2792 	if (media_index > 4)
2793 		media_index = 5;
2794 
2795 	switch (fore200e->loop_mode) {
2796 	    case ATM_LM_NONE:    oc3_index = 0;
2797 		                 break;
2798 	    case ATM_LM_LOC_PHY: oc3_index = 1;
2799 		                 break;
2800 	    case ATM_LM_RMT_PHY: oc3_index = 2;
2801 		                 break;
2802 	    default:             oc3_index = 3;
2803 	}
2804 
2805 	return sprintf(page,
2806 		       "   firmware release:\t\t%d.%d.%d\n"
2807 		       "   monitor release:\t\t%d.%d\n"
2808 		       "   media type:\t\t\t%s\n"
2809 		       "   OC-3 revision:\t\t0x%x\n"
2810                        "   OC-3 mode:\t\t\t%s",
2811 		       fw_release >> 16, fw_release << 16 >> 24,  fw_release << 24 >> 24,
2812 		       mon960_release >> 16, mon960_release << 16 >> 16,
2813 		       media_name[ media_index ],
2814 		       oc3_revision,
2815 		       oc3_mode[ oc3_index ]);
2816     }
2817 
2818     if (!left--) {
2819 	struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2820 
2821 	return sprintf(page,
2822 		       "\n\n"
2823 		       " monitor:\n"
2824 		       "   version number:\t\t%d\n"
2825 		       "   boot status word:\t\t0x%08x\n",
2826 		       fore200e->bus->read(&cp_monitor->mon_version),
2827 		       fore200e->bus->read(&cp_monitor->bstat));
2828     }
2829 
2830     if (!left--)
2831 	return sprintf(page,
2832 		       "\n"
2833 		       " device statistics:\n"
2834 		       "  4b5b:\n"
2835 		       "     crc_header_errors:\t\t%10u\n"
2836 		       "     framing_errors:\t\t%10u\n",
2837 		       be32_to_cpu(fore200e->stats->phy.crc_header_errors),
2838 		       be32_to_cpu(fore200e->stats->phy.framing_errors));
2839 
2840     if (!left--)
2841 	return sprintf(page, "\n"
2842 		       "  OC-3:\n"
2843 		       "     section_bip8_errors:\t%10u\n"
2844 		       "     path_bip8_errors:\t\t%10u\n"
2845 		       "     line_bip24_errors:\t\t%10u\n"
2846 		       "     line_febe_errors:\t\t%10u\n"
2847 		       "     path_febe_errors:\t\t%10u\n"
2848 		       "     corr_hcs_errors:\t\t%10u\n"
2849 		       "     ucorr_hcs_errors:\t\t%10u\n",
2850 		       be32_to_cpu(fore200e->stats->oc3.section_bip8_errors),
2851 		       be32_to_cpu(fore200e->stats->oc3.path_bip8_errors),
2852 		       be32_to_cpu(fore200e->stats->oc3.line_bip24_errors),
2853 		       be32_to_cpu(fore200e->stats->oc3.line_febe_errors),
2854 		       be32_to_cpu(fore200e->stats->oc3.path_febe_errors),
2855 		       be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors),
2856 		       be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors));
2857 
2858     if (!left--)
2859 	return sprintf(page,"\n"
2860 		       "   ATM:\t\t\t\t     cells\n"
2861 		       "     TX:\t\t\t%10u\n"
2862 		       "     RX:\t\t\t%10u\n"
2863 		       "     vpi out of range:\t\t%10u\n"
2864 		       "     vpi no conn:\t\t%10u\n"
2865 		       "     vci out of range:\t\t%10u\n"
2866 		       "     vci no conn:\t\t%10u\n",
2867 		       be32_to_cpu(fore200e->stats->atm.cells_transmitted),
2868 		       be32_to_cpu(fore200e->stats->atm.cells_received),
2869 		       be32_to_cpu(fore200e->stats->atm.vpi_bad_range),
2870 		       be32_to_cpu(fore200e->stats->atm.vpi_no_conn),
2871 		       be32_to_cpu(fore200e->stats->atm.vci_bad_range),
2872 		       be32_to_cpu(fore200e->stats->atm.vci_no_conn));
2873 
2874     if (!left--)
2875 	return sprintf(page,"\n"
2876 		       "   AAL0:\t\t\t     cells\n"
2877 		       "     TX:\t\t\t%10u\n"
2878 		       "     RX:\t\t\t%10u\n"
2879 		       "     dropped:\t\t\t%10u\n",
2880 		       be32_to_cpu(fore200e->stats->aal0.cells_transmitted),
2881 		       be32_to_cpu(fore200e->stats->aal0.cells_received),
2882 		       be32_to_cpu(fore200e->stats->aal0.cells_dropped));
2883 
2884     if (!left--)
2885 	return sprintf(page,"\n"
2886 		       "   AAL3/4:\n"
2887 		       "     SAR sublayer:\t\t     cells\n"
2888 		       "       TX:\t\t\t%10u\n"
2889 		       "       RX:\t\t\t%10u\n"
2890 		       "       dropped:\t\t\t%10u\n"
2891 		       "       CRC errors:\t\t%10u\n"
2892 		       "       protocol errors:\t\t%10u\n\n"
2893 		       "     CS  sublayer:\t\t      PDUs\n"
2894 		       "       TX:\t\t\t%10u\n"
2895 		       "       RX:\t\t\t%10u\n"
2896 		       "       dropped:\t\t\t%10u\n"
2897 		       "       protocol errors:\t\t%10u\n",
2898 		       be32_to_cpu(fore200e->stats->aal34.cells_transmitted),
2899 		       be32_to_cpu(fore200e->stats->aal34.cells_received),
2900 		       be32_to_cpu(fore200e->stats->aal34.cells_dropped),
2901 		       be32_to_cpu(fore200e->stats->aal34.cells_crc_errors),
2902 		       be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors),
2903 		       be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted),
2904 		       be32_to_cpu(fore200e->stats->aal34.cspdus_received),
2905 		       be32_to_cpu(fore200e->stats->aal34.cspdus_dropped),
2906 		       be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors));
2907 
2908     if (!left--)
2909 	return sprintf(page,"\n"
2910 		       "   AAL5:\n"
2911 		       "     SAR sublayer:\t\t     cells\n"
2912 		       "       TX:\t\t\t%10u\n"
2913 		       "       RX:\t\t\t%10u\n"
2914 		       "       dropped:\t\t\t%10u\n"
2915 		       "       congestions:\t\t%10u\n\n"
2916 		       "     CS  sublayer:\t\t      PDUs\n"
2917 		       "       TX:\t\t\t%10u\n"
2918 		       "       RX:\t\t\t%10u\n"
2919 		       "       dropped:\t\t\t%10u\n"
2920 		       "       CRC errors:\t\t%10u\n"
2921 		       "       protocol errors:\t\t%10u\n",
2922 		       be32_to_cpu(fore200e->stats->aal5.cells_transmitted),
2923 		       be32_to_cpu(fore200e->stats->aal5.cells_received),
2924 		       be32_to_cpu(fore200e->stats->aal5.cells_dropped),
2925 		       be32_to_cpu(fore200e->stats->aal5.congestion_experienced),
2926 		       be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted),
2927 		       be32_to_cpu(fore200e->stats->aal5.cspdus_received),
2928 		       be32_to_cpu(fore200e->stats->aal5.cspdus_dropped),
2929 		       be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors),
2930 		       be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors));
2931 
2932     if (!left--)
2933 	return sprintf(page,"\n"
2934 		       "   AUX:\t\t       allocation failures\n"
2935 		       "     small b1:\t\t\t%10u\n"
2936 		       "     large b1:\t\t\t%10u\n"
2937 		       "     small b2:\t\t\t%10u\n"
2938 		       "     large b2:\t\t\t%10u\n"
2939 		       "     RX PDUs:\t\t\t%10u\n"
2940 		       "     TX PDUs:\t\t\t%10lu\n",
2941 		       be32_to_cpu(fore200e->stats->aux.small_b1_failed),
2942 		       be32_to_cpu(fore200e->stats->aux.large_b1_failed),
2943 		       be32_to_cpu(fore200e->stats->aux.small_b2_failed),
2944 		       be32_to_cpu(fore200e->stats->aux.large_b2_failed),
2945 		       be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed),
2946 		       fore200e->tx_sat);
2947 
2948     if (!left--)
2949 	return sprintf(page,"\n"
2950 		       " receive carrier:\t\t\t%s\n",
2951 		       fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
2952 
2953     if (!left--) {
2954         return sprintf(page,"\n"
2955 		       " VCCs:\n  address   VPI VCI   AAL "
2956 		       "TX PDUs   TX min/max size  RX PDUs   RX min/max size\n");
2957     }
2958 
2959     for (i = 0; i < NBR_CONNECT; i++) {
2960 
2961 	vcc = fore200e->vc_map[i].vcc;
2962 
2963 	if (vcc == NULL)
2964 	    continue;
2965 
2966 	spin_lock_irqsave(&fore200e->q_lock, flags);
2967 
2968 	if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
2969 
2970 	    fore200e_vcc = FORE200E_VCC(vcc);
2971 	    ASSERT(fore200e_vcc);
2972 
2973 	    len = sprintf(page,
2974 			  "  %pK  %03d %05d %1d   %09lu %05d/%05d      %09lu %05d/%05d\n",
2975 			  vcc,
2976 			  vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
2977 			  fore200e_vcc->tx_pdu,
2978 			  fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
2979 			  fore200e_vcc->tx_max_pdu,
2980 			  fore200e_vcc->rx_pdu,
2981 			  fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
2982 			  fore200e_vcc->rx_max_pdu);
2983 
2984 	    spin_unlock_irqrestore(&fore200e->q_lock, flags);
2985 	    return len;
2986 	}
2987 
2988 	spin_unlock_irqrestore(&fore200e->q_lock, flags);
2989     }
2990 
2991     return 0;
2992 }
2993 
2994 module_init(fore200e_module_init);
2995 module_exit(fore200e_module_cleanup);
2996 
2997 
2998 static const struct atmdev_ops fore200e_ops = {
2999 	.open       = fore200e_open,
3000 	.close      = fore200e_close,
3001 	.ioctl      = fore200e_ioctl,
3002 	.send       = fore200e_send,
3003 	.change_qos = fore200e_change_qos,
3004 	.proc_read  = fore200e_proc_read,
3005 	.owner      = THIS_MODULE
3006 };
3007 
3008 MODULE_LICENSE("GPL");
3009 #ifdef CONFIG_PCI
3010 #ifdef __LITTLE_ENDIAN__
3011 MODULE_FIRMWARE("pca200e.bin");
3012 #else
3013 MODULE_FIRMWARE("pca200e_ecd.bin2");
3014 #endif
3015 #endif /* CONFIG_PCI */
3016 #ifdef CONFIG_SBUS
3017 MODULE_FIRMWARE("sba200e_ecd.bin2");
3018 #endif
3019