xref: /openbmc/linux/drivers/atm/he.c (revision a61bbcf2)
1 /* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
2 
3 /*
4 
5   he.c
6 
7   ForeRunnerHE ATM Adapter driver for ATM on Linux
8   Copyright (C) 1999-2001  Naval Research Laboratory
9 
10   This library is free software; you can redistribute it and/or
11   modify it under the terms of the GNU Lesser General Public
12   License as published by the Free Software Foundation; either
13   version 2.1 of the License, or (at your option) any later version.
14 
15   This library is distributed in the hope that it will be useful,
16   but WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18   Lesser General Public License for more details.
19 
20   You should have received a copy of the GNU Lesser General Public
21   License along with this library; if not, write to the Free Software
22   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23 
24 */
25 
26 /*
27 
28   he.c
29 
30   ForeRunnerHE ATM Adapter driver for ATM on Linux
31   Copyright (C) 1999-2001  Naval Research Laboratory
32 
33   Permission to use, copy, modify and distribute this software and its
34   documentation is hereby granted, provided that both the copyright
35   notice and this permission notice appear in all copies of the software,
36   derivative works or modified versions, and any portions thereof, and
37   that both notices appear in supporting documentation.
38 
39   NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
40   DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
41   RESULTING FROM THE USE OF THIS SOFTWARE.
42 
43   This driver was written using the "Programmer's Reference Manual for
44   ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
45 
46   AUTHORS:
47 	chas williams <chas@cmf.nrl.navy.mil>
48 	eric kinzie <ekinzie@cmf.nrl.navy.mil>
49 
50   NOTES:
51 	4096 supported 'connections'
52 	group 0 is used for all traffic
53 	interrupt queue 0 is used for all interrupts
54 	aal0 support (based on work from ulrich.u.muller@nokia.com)
55 
56  */
57 
58 #include <linux/config.h>
59 #include <linux/module.h>
60 #include <linux/kernel.h>
61 #include <linux/skbuff.h>
62 #include <linux/pci.h>
63 #include <linux/errno.h>
64 #include <linux/types.h>
65 #include <linux/string.h>
66 #include <linux/delay.h>
67 #include <linux/init.h>
68 #include <linux/mm.h>
69 #include <linux/sched.h>
70 #include <linux/timer.h>
71 #include <linux/interrupt.h>
72 #include <linux/dma-mapping.h>
73 #include <asm/io.h>
74 #include <asm/byteorder.h>
75 #include <asm/uaccess.h>
76 
77 #include <linux/atmdev.h>
78 #include <linux/atm.h>
79 #include <linux/sonet.h>
80 
81 #define USE_TASKLET
82 #undef USE_SCATTERGATHER
83 #undef USE_CHECKSUM_HW			/* still confused about this */
84 #define USE_RBPS
85 #undef USE_RBPS_POOL			/* if memory is tight try this */
86 #undef USE_RBPL_POOL			/* if memory is tight try this */
87 #define USE_TPD_POOL
88 /* #undef CONFIG_ATM_HE_USE_SUNI */
89 /* #undef HE_DEBUG */
90 
91 #include "he.h"
92 #include "suni.h"
93 #include <linux/atm_he.h>
94 
95 #define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
96 
97 #ifdef HE_DEBUG
98 #define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
99 #else /* !HE_DEBUG */
100 #define HPRINTK(fmt,args...)	do { } while (0)
101 #endif /* HE_DEBUG */
102 
103 /* version definition */
104 
105 static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
106 
107 /* declarations */
108 
109 static int he_open(struct atm_vcc *vcc);
110 static void he_close(struct atm_vcc *vcc);
111 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
112 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
113 static irqreturn_t he_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
114 static void he_tasklet(unsigned long data);
115 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
116 static int he_start(struct atm_dev *dev);
117 static void he_stop(struct he_dev *dev);
118 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
119 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
120 
121 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
122 
123 /* globals */
124 
125 static struct he_dev *he_devs;
126 static int disable64;
127 static short nvpibits = -1;
128 static short nvcibits = -1;
129 static short rx_skb_reserve = 16;
130 static int irq_coalesce = 1;
131 static int sdh = 0;
132 
133 /* Read from EEPROM = 0000 0011b */
134 static unsigned int readtab[] = {
135 	CS_HIGH | CLK_HIGH,
136 	CS_LOW | CLK_LOW,
137 	CLK_HIGH,               /* 0 */
138 	CLK_LOW,
139 	CLK_HIGH,               /* 0 */
140 	CLK_LOW,
141 	CLK_HIGH,               /* 0 */
142 	CLK_LOW,
143 	CLK_HIGH,               /* 0 */
144 	CLK_LOW,
145 	CLK_HIGH,               /* 0 */
146 	CLK_LOW,
147 	CLK_HIGH,               /* 0 */
148 	CLK_LOW | SI_HIGH,
149 	CLK_HIGH | SI_HIGH,     /* 1 */
150 	CLK_LOW | SI_HIGH,
151 	CLK_HIGH | SI_HIGH      /* 1 */
152 };
153 
154 /* Clock to read from/write to the EEPROM */
155 static unsigned int clocktab[] = {
156 	CLK_LOW,
157 	CLK_HIGH,
158 	CLK_LOW,
159 	CLK_HIGH,
160 	CLK_LOW,
161 	CLK_HIGH,
162 	CLK_LOW,
163 	CLK_HIGH,
164 	CLK_LOW,
165 	CLK_HIGH,
166 	CLK_LOW,
167 	CLK_HIGH,
168 	CLK_LOW,
169 	CLK_HIGH,
170 	CLK_LOW,
171 	CLK_HIGH,
172 	CLK_LOW
173 };
174 
175 static struct atmdev_ops he_ops =
176 {
177 	.open =		he_open,
178 	.close =	he_close,
179 	.ioctl =	he_ioctl,
180 	.send =		he_send,
181 	.phy_put =	he_phy_put,
182 	.phy_get =	he_phy_get,
183 	.proc_read =	he_proc_read,
184 	.owner =	THIS_MODULE
185 };
186 
187 #define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
188 #define he_readl(dev, reg)		readl((dev)->membase + (reg))
189 
190 /* section 2.12 connection memory access */
191 
192 static __inline__ void
193 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
194 								unsigned flags)
195 {
196 	he_writel(he_dev, val, CON_DAT);
197 	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
198 	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
199 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
200 }
201 
202 #define he_writel_rcm(dev, val, reg) 				\
203 			he_writel_internal(dev, val, reg, CON_CTL_RCM)
204 
205 #define he_writel_tcm(dev, val, reg) 				\
206 			he_writel_internal(dev, val, reg, CON_CTL_TCM)
207 
208 #define he_writel_mbox(dev, val, reg) 				\
209 			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
210 
211 static unsigned
212 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
213 {
214 	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
215 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
216 	return he_readl(he_dev, CON_DAT);
217 }
218 
219 #define he_readl_rcm(dev, reg) \
220 			he_readl_internal(dev, reg, CON_CTL_RCM)
221 
222 #define he_readl_tcm(dev, reg) \
223 			he_readl_internal(dev, reg, CON_CTL_TCM)
224 
225 #define he_readl_mbox(dev, reg) \
226 			he_readl_internal(dev, reg, CON_CTL_MBOX)
227 
228 
229 /* figure 2.2 connection id */
230 
231 #define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
232 
233 /* 2.5.1 per connection transmit state registers */
234 
235 #define he_writel_tsr0(dev, val, cid) \
236 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
237 #define he_readl_tsr0(dev, cid) \
238 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
239 
240 #define he_writel_tsr1(dev, val, cid) \
241 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
242 
243 #define he_writel_tsr2(dev, val, cid) \
244 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
245 
246 #define he_writel_tsr3(dev, val, cid) \
247 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
248 
249 #define he_writel_tsr4(dev, val, cid) \
250 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
251 
252 	/* from page 2-20
253 	 *
254 	 * NOTE While the transmit connection is active, bits 23 through 0
255 	 *      of this register must not be written by the host.  Byte
256 	 *      enables should be used during normal operation when writing
257 	 *      the most significant byte.
258 	 */
259 
260 #define he_writel_tsr4_upper(dev, val, cid) \
261 		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
262 							CON_CTL_TCM \
263 							| CON_BYTE_DISABLE_2 \
264 							| CON_BYTE_DISABLE_1 \
265 							| CON_BYTE_DISABLE_0)
266 
267 #define he_readl_tsr4(dev, cid) \
268 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
269 
270 #define he_writel_tsr5(dev, val, cid) \
271 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
272 
273 #define he_writel_tsr6(dev, val, cid) \
274 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
275 
276 #define he_writel_tsr7(dev, val, cid) \
277 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
278 
279 
280 #define he_writel_tsr8(dev, val, cid) \
281 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
282 
283 #define he_writel_tsr9(dev, val, cid) \
284 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
285 
286 #define he_writel_tsr10(dev, val, cid) \
287 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
288 
289 #define he_writel_tsr11(dev, val, cid) \
290 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
291 
292 
293 #define he_writel_tsr12(dev, val, cid) \
294 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
295 
296 #define he_writel_tsr13(dev, val, cid) \
297 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
298 
299 
300 #define he_writel_tsr14(dev, val, cid) \
301 		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
302 
303 #define he_writel_tsr14_upper(dev, val, cid) \
304 		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
305 							CON_CTL_TCM \
306 							| CON_BYTE_DISABLE_2 \
307 							| CON_BYTE_DISABLE_1 \
308 							| CON_BYTE_DISABLE_0)
309 
310 /* 2.7.1 per connection receive state registers */
311 
312 #define he_writel_rsr0(dev, val, cid) \
313 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
314 #define he_readl_rsr0(dev, cid) \
315 		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
316 
317 #define he_writel_rsr1(dev, val, cid) \
318 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
319 
320 #define he_writel_rsr2(dev, val, cid) \
321 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
322 
323 #define he_writel_rsr3(dev, val, cid) \
324 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
325 
326 #define he_writel_rsr4(dev, val, cid) \
327 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
328 
329 #define he_writel_rsr5(dev, val, cid) \
330 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
331 
332 #define he_writel_rsr6(dev, val, cid) \
333 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
334 
335 #define he_writel_rsr7(dev, val, cid) \
336 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
337 
338 static __inline__ struct atm_vcc*
339 __find_vcc(struct he_dev *he_dev, unsigned cid)
340 {
341 	struct hlist_head *head;
342 	struct atm_vcc *vcc;
343 	struct hlist_node *node;
344 	struct sock *s;
345 	short vpi;
346 	int vci;
347 
348 	vpi = cid >> he_dev->vcibits;
349 	vci = cid & ((1 << he_dev->vcibits) - 1);
350 	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
351 
352 	sk_for_each(s, node, head) {
353 		vcc = atm_sk(s);
354 		if (vcc->dev == he_dev->atm_dev &&
355 		    vcc->vci == vci && vcc->vpi == vpi &&
356 		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
357 				return vcc;
358 		}
359 	}
360 	return NULL;
361 }
362 
363 static int __devinit
364 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
365 {
366 	struct atm_dev *atm_dev = NULL;
367 	struct he_dev *he_dev = NULL;
368 	int err = 0;
369 
370 	printk(KERN_INFO "he: %s\n", version);
371 
372 	if (pci_enable_device(pci_dev))
373 		return -EIO;
374 	if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK) != 0) {
375 		printk(KERN_WARNING "he: no suitable dma available\n");
376 		err = -EIO;
377 		goto init_one_failure;
378 	}
379 
380 	atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
381 	if (!atm_dev) {
382 		err = -ENODEV;
383 		goto init_one_failure;
384 	}
385 	pci_set_drvdata(pci_dev, atm_dev);
386 
387 	he_dev = (struct he_dev *) kmalloc(sizeof(struct he_dev),
388 							GFP_KERNEL);
389 	if (!he_dev) {
390 		err = -ENOMEM;
391 		goto init_one_failure;
392 	}
393 	memset(he_dev, 0, sizeof(struct he_dev));
394 
395 	he_dev->pci_dev = pci_dev;
396 	he_dev->atm_dev = atm_dev;
397 	he_dev->atm_dev->dev_data = he_dev;
398 	atm_dev->dev_data = he_dev;
399 	he_dev->number = atm_dev->number;
400 	if (he_start(atm_dev)) {
401 		he_stop(he_dev);
402 		err = -ENODEV;
403 		goto init_one_failure;
404 	}
405 	he_dev->next = NULL;
406 	if (he_devs)
407 		he_dev->next = he_devs;
408 	he_devs = he_dev;
409 	return 0;
410 
411 init_one_failure:
412 	if (atm_dev)
413 		atm_dev_deregister(atm_dev);
414 	kfree(he_dev);
415 	pci_disable_device(pci_dev);
416 	return err;
417 }
418 
419 static void __devexit
420 he_remove_one (struct pci_dev *pci_dev)
421 {
422 	struct atm_dev *atm_dev;
423 	struct he_dev *he_dev;
424 
425 	atm_dev = pci_get_drvdata(pci_dev);
426 	he_dev = HE_DEV(atm_dev);
427 
428 	/* need to remove from he_devs */
429 
430 	he_stop(he_dev);
431 	atm_dev_deregister(atm_dev);
432 	kfree(he_dev);
433 
434 	pci_set_drvdata(pci_dev, NULL);
435 	pci_disable_device(pci_dev);
436 }
437 
438 
439 static unsigned
440 rate_to_atmf(unsigned rate)		/* cps to atm forum format */
441 {
442 #define NONZERO (1 << 14)
443 
444 	unsigned exp = 0;
445 
446 	if (rate == 0)
447 		return 0;
448 
449 	rate <<= 9;
450 	while (rate > 0x3ff) {
451 		++exp;
452 		rate >>= 1;
453 	}
454 
455 	return (NONZERO | (exp << 9) | (rate & 0x1ff));
456 }
457 
458 static void __init
459 he_init_rx_lbfp0(struct he_dev *he_dev)
460 {
461 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
462 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
463 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
464 	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
465 
466 	lbufd_index = 0;
467 	lbm_offset = he_readl(he_dev, RCMLBM_BA);
468 
469 	he_writel(he_dev, lbufd_index, RLBF0_H);
470 
471 	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
472 		lbufd_index += 2;
473 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
474 
475 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
476 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
477 
478 		if (++lbuf_count == lbufs_per_row) {
479 			lbuf_count = 0;
480 			row_offset += he_dev->bytes_per_row;
481 		}
482 		lbm_offset += 4;
483 	}
484 
485 	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
486 	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
487 }
488 
489 static void __init
490 he_init_rx_lbfp1(struct he_dev *he_dev)
491 {
492 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
493 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
494 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
495 	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
496 
497 	lbufd_index = 1;
498 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
499 
500 	he_writel(he_dev, lbufd_index, RLBF1_H);
501 
502 	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
503 		lbufd_index += 2;
504 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
505 
506 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
507 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
508 
509 		if (++lbuf_count == lbufs_per_row) {
510 			lbuf_count = 0;
511 			row_offset += he_dev->bytes_per_row;
512 		}
513 		lbm_offset += 4;
514 	}
515 
516 	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
517 	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
518 }
519 
520 static void __init
521 he_init_tx_lbfp(struct he_dev *he_dev)
522 {
523 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
524 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
525 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
526 	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
527 
528 	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
529 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
530 
531 	he_writel(he_dev, lbufd_index, TLBF_H);
532 
533 	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
534 		lbufd_index += 1;
535 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
536 
537 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
538 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
539 
540 		if (++lbuf_count == lbufs_per_row) {
541 			lbuf_count = 0;
542 			row_offset += he_dev->bytes_per_row;
543 		}
544 		lbm_offset += 2;
545 	}
546 
547 	he_writel(he_dev, lbufd_index - 1, TLBF_T);
548 }
549 
550 static int __init
551 he_init_tpdrq(struct he_dev *he_dev)
552 {
553 	he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
554 		CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
555 	if (he_dev->tpdrq_base == NULL) {
556 		hprintk("failed to alloc tpdrq\n");
557 		return -ENOMEM;
558 	}
559 	memset(he_dev->tpdrq_base, 0,
560 				CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
561 
562 	he_dev->tpdrq_tail = he_dev->tpdrq_base;
563 	he_dev->tpdrq_head = he_dev->tpdrq_base;
564 
565 	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
566 	he_writel(he_dev, 0, TPDRQ_T);
567 	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
568 
569 	return 0;
570 }
571 
572 static void __init
573 he_init_cs_block(struct he_dev *he_dev)
574 {
575 	unsigned clock, rate, delta;
576 	int reg;
577 
578 	/* 5.1.7 cs block initialization */
579 
580 	for (reg = 0; reg < 0x20; ++reg)
581 		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
582 
583 	/* rate grid timer reload values */
584 
585 	clock = he_is622(he_dev) ? 66667000 : 50000000;
586 	rate = he_dev->atm_dev->link_rate;
587 	delta = rate / 16 / 2;
588 
589 	for (reg = 0; reg < 0x10; ++reg) {
590 		/* 2.4 internal transmit function
591 		 *
592 	 	 * we initialize the first row in the rate grid.
593 		 * values are period (in clock cycles) of timer
594 		 */
595 		unsigned period = clock / rate;
596 
597 		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
598 		rate -= delta;
599 	}
600 
601 	if (he_is622(he_dev)) {
602 		/* table 5.2 (4 cells per lbuf) */
603 		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
604 		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
605 		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
606 		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
607 		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
608 
609 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
610 		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
611 		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
612 		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
613 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
614 		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
615 		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
616 
617 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
618 
619 		/* table 5.8 */
620 		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
621 		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
622 		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
623 		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
624 		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
625 		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
626 
627 		/* table 5.9 */
628 		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
629 		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
630 	} else {
631 		/* table 5.1 (4 cells per lbuf) */
632 		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
633 		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
634 		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
635 		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
636 		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
637 
638 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
639 		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
640 		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
641 		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
642 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
643 		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
644 		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
645 
646 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
647 
648 		/* table 5.8 */
649 		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
650 		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
651 		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
652 		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
653 		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
654 		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
655 
656 		/* table 5.9 */
657 		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
658 		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
659 	}
660 
661 	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
662 
663 	for (reg = 0; reg < 0x8; ++reg)
664 		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
665 
666 }
667 
668 static int __init
669 he_init_cs_block_rcm(struct he_dev *he_dev)
670 {
671 	unsigned (*rategrid)[16][16];
672 	unsigned rate, delta;
673 	int i, j, reg;
674 
675 	unsigned rate_atmf, exp, man;
676 	unsigned long long rate_cps;
677 	int mult, buf, buf_limit = 4;
678 
679 	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
680 	if (!rategrid)
681 		return -ENOMEM;
682 
683 	/* initialize rate grid group table */
684 
685 	for (reg = 0x0; reg < 0xff; ++reg)
686 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
687 
688 	/* initialize rate controller groups */
689 
690 	for (reg = 0x100; reg < 0x1ff; ++reg)
691 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
692 
693 	/* initialize tNrm lookup table */
694 
695 	/* the manual makes reference to a routine in a sample driver
696 	   for proper configuration; fortunately, we only need this
697 	   in order to support abr connection */
698 
699 	/* initialize rate to group table */
700 
701 	rate = he_dev->atm_dev->link_rate;
702 	delta = rate / 32;
703 
704 	/*
705 	 * 2.4 transmit internal functions
706 	 *
707 	 * we construct a copy of the rate grid used by the scheduler
708 	 * in order to construct the rate to group table below
709 	 */
710 
711 	for (j = 0; j < 16; j++) {
712 		(*rategrid)[0][j] = rate;
713 		rate -= delta;
714 	}
715 
716 	for (i = 1; i < 16; i++)
717 		for (j = 0; j < 16; j++)
718 			if (i > 14)
719 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
720 			else
721 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
722 
723 	/*
724 	 * 2.4 transmit internal function
725 	 *
726 	 * this table maps the upper 5 bits of exponent and mantissa
727 	 * of the atm forum representation of the rate into an index
728 	 * on rate grid
729 	 */
730 
731 	rate_atmf = 0;
732 	while (rate_atmf < 0x400) {
733 		man = (rate_atmf & 0x1f) << 4;
734 		exp = rate_atmf >> 5;
735 
736 		/*
737 			instead of '/ 512', use '>> 9' to prevent a call
738 			to divdu3 on x86 platforms
739 		*/
740 		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
741 
742 		if (rate_cps < 10)
743 			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
744 
745 		for (i = 255; i > 0; i--)
746 			if ((*rategrid)[i/16][i%16] >= rate_cps)
747 				break;	 /* pick nearest rate instead? */
748 
749 		/*
750 		 * each table entry is 16 bits: (rate grid index (8 bits)
751 		 * and a buffer limit (8 bits)
752 		 * there are two table entries in each 32-bit register
753 		 */
754 
755 #ifdef notdef
756 		buf = rate_cps * he_dev->tx_numbuffs /
757 				(he_dev->atm_dev->link_rate * 2);
758 #else
759 		/* this is pretty, but avoids _divdu3 and is mostly correct */
760 		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
761 		if (rate_cps > (272 * mult))
762 			buf = 4;
763 		else if (rate_cps > (204 * mult))
764 			buf = 3;
765 		else if (rate_cps > (136 * mult))
766 			buf = 2;
767 		else if (rate_cps > (68 * mult))
768 			buf = 1;
769 		else
770 			buf = 0;
771 #endif
772 		if (buf > buf_limit)
773 			buf = buf_limit;
774 		reg = (reg << 16) | ((i << 8) | buf);
775 
776 #define RTGTBL_OFFSET 0x400
777 
778 		if (rate_atmf & 0x1)
779 			he_writel_rcm(he_dev, reg,
780 				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
781 
782 		++rate_atmf;
783 	}
784 
785 	kfree(rategrid);
786 	return 0;
787 }
788 
789 static int __init
790 he_init_group(struct he_dev *he_dev, int group)
791 {
792 	int i;
793 
794 #ifdef USE_RBPS
795 	/* small buffer pool */
796 #ifdef USE_RBPS_POOL
797 	he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
798 			CONFIG_RBPS_BUFSIZE, 8, 0);
799 	if (he_dev->rbps_pool == NULL) {
800 		hprintk("unable to create rbps pages\n");
801 		return -ENOMEM;
802 	}
803 #else /* !USE_RBPS_POOL */
804 	he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
805 		CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
806 	if (he_dev->rbps_pages == NULL) {
807 		hprintk("unable to create rbps page pool\n");
808 		return -ENOMEM;
809 	}
810 #endif /* USE_RBPS_POOL */
811 
812 	he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
813 		CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
814 	if (he_dev->rbps_base == NULL) {
815 		hprintk("failed to alloc rbps\n");
816 		return -ENOMEM;
817 	}
818 	memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
819 	he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
820 
821 	for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
822 		dma_addr_t dma_handle;
823 		void *cpuaddr;
824 
825 #ifdef USE_RBPS_POOL
826 		cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
827 		if (cpuaddr == NULL)
828 			return -ENOMEM;
829 #else
830 		cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
831 		dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
832 #endif
833 
834 		he_dev->rbps_virt[i].virt = cpuaddr;
835 		he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
836 		he_dev->rbps_base[i].phys = dma_handle;
837 
838 	}
839 	he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
840 
841 	he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
842 	he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
843 						G0_RBPS_T + (group * 32));
844 	he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
845 						G0_RBPS_BS + (group * 32));
846 	he_writel(he_dev,
847 			RBP_THRESH(CONFIG_RBPS_THRESH) |
848 			RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
849 			RBP_INT_ENB,
850 						G0_RBPS_QI + (group * 32));
851 #else /* !USE_RBPS */
852 	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
853 	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
854 	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
855 	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
856 						G0_RBPS_BS + (group * 32));
857 #endif /* USE_RBPS */
858 
859 	/* large buffer pool */
860 #ifdef USE_RBPL_POOL
861 	he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
862 			CONFIG_RBPL_BUFSIZE, 8, 0);
863 	if (he_dev->rbpl_pool == NULL) {
864 		hprintk("unable to create rbpl pool\n");
865 		return -ENOMEM;
866 	}
867 #else /* !USE_RBPL_POOL */
868 	he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
869 		CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
870 	if (he_dev->rbpl_pages == NULL) {
871 		hprintk("unable to create rbpl pages\n");
872 		return -ENOMEM;
873 	}
874 #endif /* USE_RBPL_POOL */
875 
876 	he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
877 		CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
878 	if (he_dev->rbpl_base == NULL) {
879 		hprintk("failed to alloc rbpl\n");
880 		return -ENOMEM;
881 	}
882 	memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
883 	he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
884 
885 	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
886 		dma_addr_t dma_handle;
887 		void *cpuaddr;
888 
889 #ifdef USE_RBPL_POOL
890 		cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
891 		if (cpuaddr == NULL)
892 			return -ENOMEM;
893 #else
894 		cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
895 		dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
896 #endif
897 
898 		he_dev->rbpl_virt[i].virt = cpuaddr;
899 		he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
900 		he_dev->rbpl_base[i].phys = dma_handle;
901 	}
902 	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
903 
904 	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
905 	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
906 						G0_RBPL_T + (group * 32));
907 	he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
908 						G0_RBPL_BS + (group * 32));
909 	he_writel(he_dev,
910 			RBP_THRESH(CONFIG_RBPL_THRESH) |
911 			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
912 			RBP_INT_ENB,
913 						G0_RBPL_QI + (group * 32));
914 
915 	/* rx buffer ready queue */
916 
917 	he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
918 		CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
919 	if (he_dev->rbrq_base == NULL) {
920 		hprintk("failed to allocate rbrq\n");
921 		return -ENOMEM;
922 	}
923 	memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
924 
925 	he_dev->rbrq_head = he_dev->rbrq_base;
926 	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
927 	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
928 	he_writel(he_dev,
929 		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
930 						G0_RBRQ_Q + (group * 16));
931 	if (irq_coalesce) {
932 		hprintk("coalescing interrupts\n");
933 		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
934 						G0_RBRQ_I + (group * 16));
935 	} else
936 		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
937 						G0_RBRQ_I + (group * 16));
938 
939 	/* tx buffer ready queue */
940 
941 	he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
942 		CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
943 	if (he_dev->tbrq_base == NULL) {
944 		hprintk("failed to allocate tbrq\n");
945 		return -ENOMEM;
946 	}
947 	memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
948 
949 	he_dev->tbrq_head = he_dev->tbrq_base;
950 
951 	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
952 	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
953 	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
954 	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
955 
956 	return 0;
957 }
958 
959 static int __init
960 he_init_irq(struct he_dev *he_dev)
961 {
962 	int i;
963 
964 	/* 2.9.3.5  tail offset for each interrupt queue is located after the
965 		    end of the interrupt queue */
966 
967 	he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
968 			(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
969 	if (he_dev->irq_base == NULL) {
970 		hprintk("failed to allocate irq\n");
971 		return -ENOMEM;
972 	}
973 	he_dev->irq_tailoffset = (unsigned *)
974 					&he_dev->irq_base[CONFIG_IRQ_SIZE];
975 	*he_dev->irq_tailoffset = 0;
976 	he_dev->irq_head = he_dev->irq_base;
977 	he_dev->irq_tail = he_dev->irq_base;
978 
979 	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
980 		he_dev->irq_base[i].isw = ITYPE_INVALID;
981 
982 	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
983 	he_writel(he_dev,
984 		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
985 								IRQ0_HEAD);
986 	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
987 	he_writel(he_dev, 0x0, IRQ0_DATA);
988 
989 	he_writel(he_dev, 0x0, IRQ1_BASE);
990 	he_writel(he_dev, 0x0, IRQ1_HEAD);
991 	he_writel(he_dev, 0x0, IRQ1_CNTL);
992 	he_writel(he_dev, 0x0, IRQ1_DATA);
993 
994 	he_writel(he_dev, 0x0, IRQ2_BASE);
995 	he_writel(he_dev, 0x0, IRQ2_HEAD);
996 	he_writel(he_dev, 0x0, IRQ2_CNTL);
997 	he_writel(he_dev, 0x0, IRQ2_DATA);
998 
999 	he_writel(he_dev, 0x0, IRQ3_BASE);
1000 	he_writel(he_dev, 0x0, IRQ3_HEAD);
1001 	he_writel(he_dev, 0x0, IRQ3_CNTL);
1002 	he_writel(he_dev, 0x0, IRQ3_DATA);
1003 
1004 	/* 2.9.3.2 interrupt queue mapping registers */
1005 
1006 	he_writel(he_dev, 0x0, GRP_10_MAP);
1007 	he_writel(he_dev, 0x0, GRP_32_MAP);
1008 	he_writel(he_dev, 0x0, GRP_54_MAP);
1009 	he_writel(he_dev, 0x0, GRP_76_MAP);
1010 
1011 	if (request_irq(he_dev->pci_dev->irq, he_irq_handler, SA_INTERRUPT|SA_SHIRQ, DEV_LABEL, he_dev)) {
1012 		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
1013 		return -EINVAL;
1014 	}
1015 
1016 	he_dev->irq = he_dev->pci_dev->irq;
1017 
1018 	return 0;
1019 }
1020 
1021 static int __init
1022 he_start(struct atm_dev *dev)
1023 {
1024 	struct he_dev *he_dev;
1025 	struct pci_dev *pci_dev;
1026 	unsigned long membase;
1027 
1028 	u16 command;
1029 	u32 gen_cntl_0, host_cntl, lb_swap;
1030 	u8 cache_size, timer;
1031 
1032 	unsigned err;
1033 	unsigned int status, reg;
1034 	int i, group;
1035 
1036 	he_dev = HE_DEV(dev);
1037 	pci_dev = he_dev->pci_dev;
1038 
1039 	membase = pci_resource_start(pci_dev, 0);
1040 	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
1041 
1042 	/*
1043 	 * pci bus controller initialization
1044 	 */
1045 
1046 	/* 4.3 pci bus controller-specific initialization */
1047 	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1048 		hprintk("can't read GEN_CNTL_0\n");
1049 		return -EINVAL;
1050 	}
1051 	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1052 	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1053 		hprintk("can't write GEN_CNTL_0.\n");
1054 		return -EINVAL;
1055 	}
1056 
1057 	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1058 		hprintk("can't read PCI_COMMAND.\n");
1059 		return -EINVAL;
1060 	}
1061 
1062 	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1063 	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1064 		hprintk("can't enable memory.\n");
1065 		return -EINVAL;
1066 	}
1067 
1068 	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1069 		hprintk("can't read cache line size?\n");
1070 		return -EINVAL;
1071 	}
1072 
1073 	if (cache_size < 16) {
1074 		cache_size = 16;
1075 		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1076 			hprintk("can't set cache line size to %d\n", cache_size);
1077 	}
1078 
1079 	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1080 		hprintk("can't read latency timer?\n");
1081 		return -EINVAL;
1082 	}
1083 
1084 	/* from table 3.9
1085 	 *
1086 	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1087 	 *
1088 	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1089 	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1090 	 *
1091 	 */
1092 #define LAT_TIMER 209
1093 	if (timer < LAT_TIMER) {
1094 		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1095 		timer = LAT_TIMER;
1096 		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1097 			hprintk("can't set latency timer to %d\n", timer);
1098 	}
1099 
1100 	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1101 		hprintk("can't set up page mapping\n");
1102 		return -EINVAL;
1103 	}
1104 
1105 	/* 4.4 card reset */
1106 	he_writel(he_dev, 0x0, RESET_CNTL);
1107 	he_writel(he_dev, 0xff, RESET_CNTL);
1108 
1109 	udelay(16*1000);	/* 16 ms */
1110 	status = he_readl(he_dev, RESET_CNTL);
1111 	if ((status & BOARD_RST_STATUS) == 0) {
1112 		hprintk("reset failed\n");
1113 		return -EINVAL;
1114 	}
1115 
1116 	/* 4.5 set bus width */
1117 	host_cntl = he_readl(he_dev, HOST_CNTL);
1118 	if (host_cntl & PCI_BUS_SIZE64)
1119 		gen_cntl_0 |= ENBL_64;
1120 	else
1121 		gen_cntl_0 &= ~ENBL_64;
1122 
1123 	if (disable64 == 1) {
1124 		hprintk("disabling 64-bit pci bus transfers\n");
1125 		gen_cntl_0 &= ~ENBL_64;
1126 	}
1127 
1128 	if (gen_cntl_0 & ENBL_64)
1129 		hprintk("64-bit transfers enabled\n");
1130 
1131 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1132 
1133 	/* 4.7 read prom contents */
1134 	for (i = 0; i < PROD_ID_LEN; ++i)
1135 		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1136 
1137 	he_dev->media = read_prom_byte(he_dev, MEDIA);
1138 
1139 	for (i = 0; i < 6; ++i)
1140 		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1141 
1142 	hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1143 				he_dev->prod_id,
1144 					he_dev->media & 0x40 ? "SM" : "MM",
1145 						dev->esi[0],
1146 						dev->esi[1],
1147 						dev->esi[2],
1148 						dev->esi[3],
1149 						dev->esi[4],
1150 						dev->esi[5]);
1151 	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1152 						ATM_OC12_PCR : ATM_OC3_PCR;
1153 
1154 	/* 4.6 set host endianess */
1155 	lb_swap = he_readl(he_dev, LB_SWAP);
1156 	if (he_is622(he_dev))
1157 		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1158 	else
1159 		lb_swap |= XFER_SIZE;		/* 8 cells */
1160 #ifdef __BIG_ENDIAN
1161 	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1162 #else
1163 	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1164 			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1165 #endif /* __BIG_ENDIAN */
1166 	he_writel(he_dev, lb_swap, LB_SWAP);
1167 
1168 	/* 4.8 sdram controller initialization */
1169 	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1170 
1171 	/* 4.9 initialize rnum value */
1172 	lb_swap |= SWAP_RNUM_MAX(0xf);
1173 	he_writel(he_dev, lb_swap, LB_SWAP);
1174 
1175 	/* 4.10 initialize the interrupt queues */
1176 	if ((err = he_init_irq(he_dev)) != 0)
1177 		return err;
1178 
1179 #ifdef USE_TASKLET
1180 	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
1181 #endif
1182 	spin_lock_init(&he_dev->global_lock);
1183 
1184 	/* 4.11 enable pci bus controller state machines */
1185 	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1186 				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1187 	he_writel(he_dev, host_cntl, HOST_CNTL);
1188 
1189 	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1190 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1191 
1192 	/*
1193 	 * atm network controller initialization
1194 	 */
1195 
1196 	/* 5.1.1 generic configuration state */
1197 
1198 	/*
1199 	 *		local (cell) buffer memory map
1200 	 *
1201 	 *             HE155                          HE622
1202 	 *
1203 	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1204 	 *         |            |            |                   |   |
1205 	 *         |  utility   |            |        rx0        |   |
1206 	 *        5|____________|         255|___________________| u |
1207 	 *        6|            |         256|                   | t |
1208 	 *         |            |            |                   | i |
1209 	 *         |    rx0     |     row    |        tx         | l |
1210 	 *         |            |            |                   | i |
1211 	 *         |            |         767|___________________| t |
1212 	 *      517|____________|         768|                   | y |
1213 	 * row  518|            |            |        rx1        |   |
1214 	 *         |            |        1023|___________________|___|
1215 	 *         |            |
1216 	 *         |    tx      |
1217 	 *         |            |
1218 	 *         |            |
1219 	 *     1535|____________|
1220 	 *     1536|            |
1221 	 *         |    rx1     |
1222 	 *     2047|____________|
1223 	 *
1224 	 */
1225 
1226 	/* total 4096 connections */
1227 	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1228 	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1229 
1230 	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1231 		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1232 		return -ENODEV;
1233 	}
1234 
1235 	if (nvpibits != -1) {
1236 		he_dev->vpibits = nvpibits;
1237 		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1238 	}
1239 
1240 	if (nvcibits != -1) {
1241 		he_dev->vcibits = nvcibits;
1242 		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1243 	}
1244 
1245 
1246 	if (he_is622(he_dev)) {
1247 		he_dev->cells_per_row = 40;
1248 		he_dev->bytes_per_row = 2048;
1249 		he_dev->r0_numrows = 256;
1250 		he_dev->tx_numrows = 512;
1251 		he_dev->r1_numrows = 256;
1252 		he_dev->r0_startrow = 0;
1253 		he_dev->tx_startrow = 256;
1254 		he_dev->r1_startrow = 768;
1255 	} else {
1256 		he_dev->cells_per_row = 20;
1257 		he_dev->bytes_per_row = 1024;
1258 		he_dev->r0_numrows = 512;
1259 		he_dev->tx_numrows = 1018;
1260 		he_dev->r1_numrows = 512;
1261 		he_dev->r0_startrow = 6;
1262 		he_dev->tx_startrow = 518;
1263 		he_dev->r1_startrow = 1536;
1264 	}
1265 
1266 	he_dev->cells_per_lbuf = 4;
1267 	he_dev->buffer_limit = 4;
1268 	he_dev->r0_numbuffs = he_dev->r0_numrows *
1269 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1270 	if (he_dev->r0_numbuffs > 2560)
1271 		he_dev->r0_numbuffs = 2560;
1272 
1273 	he_dev->r1_numbuffs = he_dev->r1_numrows *
1274 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1275 	if (he_dev->r1_numbuffs > 2560)
1276 		he_dev->r1_numbuffs = 2560;
1277 
1278 	he_dev->tx_numbuffs = he_dev->tx_numrows *
1279 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1280 	if (he_dev->tx_numbuffs > 5120)
1281 		he_dev->tx_numbuffs = 5120;
1282 
1283 	/* 5.1.2 configure hardware dependent registers */
1284 
1285 	he_writel(he_dev,
1286 		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1287 		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1288 		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1289 		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1290 								LBARB);
1291 
1292 	he_writel(he_dev, BANK_ON |
1293 		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1294 								SDRAMCON);
1295 
1296 	he_writel(he_dev,
1297 		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1298 						RM_RW_WAIT(1), RCMCONFIG);
1299 	he_writel(he_dev,
1300 		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1301 						TM_RW_WAIT(1), TCMCONFIG);
1302 
1303 	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1304 
1305 	he_writel(he_dev,
1306 		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1307 		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1308 		RX_VALVP(he_dev->vpibits) |
1309 		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1310 
1311 	he_writel(he_dev, DRF_THRESH(0x20) |
1312 		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1313 		TX_VCI_MASK(he_dev->vcibits) |
1314 		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1315 
1316 	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1317 
1318 	he_writel(he_dev, PHY_INT_ENB |
1319 		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1320 								RH_CONFIG);
1321 
1322 	/* 5.1.3 initialize connection memory */
1323 
1324 	for (i = 0; i < TCM_MEM_SIZE; ++i)
1325 		he_writel_tcm(he_dev, 0, i);
1326 
1327 	for (i = 0; i < RCM_MEM_SIZE; ++i)
1328 		he_writel_rcm(he_dev, 0, i);
1329 
1330 	/*
1331 	 *	transmit connection memory map
1332 	 *
1333 	 *                  tx memory
1334 	 *          0x0 ___________________
1335 	 *             |                   |
1336 	 *             |                   |
1337 	 *             |       TSRa        |
1338 	 *             |                   |
1339 	 *             |                   |
1340 	 *       0x8000|___________________|
1341 	 *             |                   |
1342 	 *             |       TSRb        |
1343 	 *       0xc000|___________________|
1344 	 *             |                   |
1345 	 *             |       TSRc        |
1346 	 *       0xe000|___________________|
1347 	 *             |       TSRd        |
1348 	 *       0xf000|___________________|
1349 	 *             |       tmABR       |
1350 	 *      0x10000|___________________|
1351 	 *             |                   |
1352 	 *             |       tmTPD       |
1353 	 *             |___________________|
1354 	 *             |                   |
1355 	 *                      ....
1356 	 *      0x1ffff|___________________|
1357 	 *
1358 	 *
1359 	 */
1360 
1361 	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1362 	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1363 	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1364 	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1365 	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1366 
1367 
1368 	/*
1369 	 *	receive connection memory map
1370 	 *
1371 	 *          0x0 ___________________
1372 	 *             |                   |
1373 	 *             |                   |
1374 	 *             |       RSRa        |
1375 	 *             |                   |
1376 	 *             |                   |
1377 	 *       0x8000|___________________|
1378 	 *             |                   |
1379 	 *             |             rx0/1 |
1380 	 *             |       LBM         |   link lists of local
1381 	 *             |             tx    |   buffer memory
1382 	 *             |                   |
1383 	 *       0xd000|___________________|
1384 	 *             |                   |
1385 	 *             |      rmABR        |
1386 	 *       0xe000|___________________|
1387 	 *             |                   |
1388 	 *             |       RSRb        |
1389 	 *             |___________________|
1390 	 *             |                   |
1391 	 *                      ....
1392 	 *       0xffff|___________________|
1393 	 */
1394 
1395 	he_writel(he_dev, 0x08000, RCMLBM_BA);
1396 	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1397 	he_writel(he_dev, 0x0d800, RCMABR_BA);
1398 
1399 	/* 5.1.4 initialize local buffer free pools linked lists */
1400 
1401 	he_init_rx_lbfp0(he_dev);
1402 	he_init_rx_lbfp1(he_dev);
1403 
1404 	he_writel(he_dev, 0x0, RLBC_H);
1405 	he_writel(he_dev, 0x0, RLBC_T);
1406 	he_writel(he_dev, 0x0, RLBC_H2);
1407 
1408 	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1409 	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1410 
1411 	he_init_tx_lbfp(he_dev);
1412 
1413 	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1414 
1415 	/* 5.1.5 initialize intermediate receive queues */
1416 
1417 	if (he_is622(he_dev)) {
1418 		he_writel(he_dev, 0x000f, G0_INMQ_S);
1419 		he_writel(he_dev, 0x200f, G0_INMQ_L);
1420 
1421 		he_writel(he_dev, 0x001f, G1_INMQ_S);
1422 		he_writel(he_dev, 0x201f, G1_INMQ_L);
1423 
1424 		he_writel(he_dev, 0x002f, G2_INMQ_S);
1425 		he_writel(he_dev, 0x202f, G2_INMQ_L);
1426 
1427 		he_writel(he_dev, 0x003f, G3_INMQ_S);
1428 		he_writel(he_dev, 0x203f, G3_INMQ_L);
1429 
1430 		he_writel(he_dev, 0x004f, G4_INMQ_S);
1431 		he_writel(he_dev, 0x204f, G4_INMQ_L);
1432 
1433 		he_writel(he_dev, 0x005f, G5_INMQ_S);
1434 		he_writel(he_dev, 0x205f, G5_INMQ_L);
1435 
1436 		he_writel(he_dev, 0x006f, G6_INMQ_S);
1437 		he_writel(he_dev, 0x206f, G6_INMQ_L);
1438 
1439 		he_writel(he_dev, 0x007f, G7_INMQ_S);
1440 		he_writel(he_dev, 0x207f, G7_INMQ_L);
1441 	} else {
1442 		he_writel(he_dev, 0x0000, G0_INMQ_S);
1443 		he_writel(he_dev, 0x0008, G0_INMQ_L);
1444 
1445 		he_writel(he_dev, 0x0001, G1_INMQ_S);
1446 		he_writel(he_dev, 0x0009, G1_INMQ_L);
1447 
1448 		he_writel(he_dev, 0x0002, G2_INMQ_S);
1449 		he_writel(he_dev, 0x000a, G2_INMQ_L);
1450 
1451 		he_writel(he_dev, 0x0003, G3_INMQ_S);
1452 		he_writel(he_dev, 0x000b, G3_INMQ_L);
1453 
1454 		he_writel(he_dev, 0x0004, G4_INMQ_S);
1455 		he_writel(he_dev, 0x000c, G4_INMQ_L);
1456 
1457 		he_writel(he_dev, 0x0005, G5_INMQ_S);
1458 		he_writel(he_dev, 0x000d, G5_INMQ_L);
1459 
1460 		he_writel(he_dev, 0x0006, G6_INMQ_S);
1461 		he_writel(he_dev, 0x000e, G6_INMQ_L);
1462 
1463 		he_writel(he_dev, 0x0007, G7_INMQ_S);
1464 		he_writel(he_dev, 0x000f, G7_INMQ_L);
1465 	}
1466 
1467 	/* 5.1.6 application tunable parameters */
1468 
1469 	he_writel(he_dev, 0x0, MCC);
1470 	he_writel(he_dev, 0x0, OEC);
1471 	he_writel(he_dev, 0x0, DCC);
1472 	he_writel(he_dev, 0x0, CEC);
1473 
1474 	/* 5.1.7 cs block initialization */
1475 
1476 	he_init_cs_block(he_dev);
1477 
1478 	/* 5.1.8 cs block connection memory initialization */
1479 
1480 	if (he_init_cs_block_rcm(he_dev) < 0)
1481 		return -ENOMEM;
1482 
1483 	/* 5.1.10 initialize host structures */
1484 
1485 	he_init_tpdrq(he_dev);
1486 
1487 #ifdef USE_TPD_POOL
1488 	he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1489 		sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1490 	if (he_dev->tpd_pool == NULL) {
1491 		hprintk("unable to create tpd pci_pool\n");
1492 		return -ENOMEM;
1493 	}
1494 
1495 	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1496 #else
1497 	he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
1498 			CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
1499 	if (!he_dev->tpd_base)
1500 		return -ENOMEM;
1501 
1502 	for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1503 		he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
1504 		he_dev->tpd_base[i].inuse = 0;
1505 	}
1506 
1507 	he_dev->tpd_head = he_dev->tpd_base;
1508 	he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
1509 #endif
1510 
1511 	if (he_init_group(he_dev, 0) != 0)
1512 		return -ENOMEM;
1513 
1514 	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1515 		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1516 		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1517 		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1518 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1519 						G0_RBPS_BS + (group * 32));
1520 
1521 		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1522 		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1523 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1524 						G0_RBPL_QI + (group * 32));
1525 		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1526 
1527 		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1528 		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1529 		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1530 						G0_RBRQ_Q + (group * 16));
1531 		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1532 
1533 		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1534 		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1535 		he_writel(he_dev, TBRQ_THRESH(0x1),
1536 						G0_TBRQ_THRESH + (group * 16));
1537 		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1538 	}
1539 
1540 	/* host status page */
1541 
1542 	he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1543 				sizeof(struct he_hsp), &he_dev->hsp_phys);
1544 	if (he_dev->hsp == NULL) {
1545 		hprintk("failed to allocate host status page\n");
1546 		return -ENOMEM;
1547 	}
1548 	memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1549 	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1550 
1551 	/* initialize framer */
1552 
1553 #ifdef CONFIG_ATM_HE_USE_SUNI
1554 	suni_init(he_dev->atm_dev);
1555 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1556 		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1557 #endif /* CONFIG_ATM_HE_USE_SUNI */
1558 
1559 	if (sdh) {
1560 		/* this really should be in suni.c but for now... */
1561 		int val;
1562 
1563 		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1564 		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1565 		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1566 	}
1567 
1568 	/* 5.1.12 enable transmit and receive */
1569 
1570 	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1571 	reg |= TX_ENABLE|ER_ENABLE;
1572 	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1573 
1574 	reg = he_readl(he_dev, RC_CONFIG);
1575 	reg |= RX_ENABLE;
1576 	he_writel(he_dev, reg, RC_CONFIG);
1577 
1578 	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1579 		he_dev->cs_stper[i].inuse = 0;
1580 		he_dev->cs_stper[i].pcr = -1;
1581 	}
1582 	he_dev->total_bw = 0;
1583 
1584 
1585 	/* atm linux initialization */
1586 
1587 	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1588 	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1589 
1590 	he_dev->irq_peak = 0;
1591 	he_dev->rbrq_peak = 0;
1592 	he_dev->rbpl_peak = 0;
1593 	he_dev->tbrq_peak = 0;
1594 
1595 	HPRINTK("hell bent for leather!\n");
1596 
1597 	return 0;
1598 }
1599 
1600 static void
1601 he_stop(struct he_dev *he_dev)
1602 {
1603 	u16 command;
1604 	u32 gen_cntl_0, reg;
1605 	struct pci_dev *pci_dev;
1606 
1607 	pci_dev = he_dev->pci_dev;
1608 
1609 	/* disable interrupts */
1610 
1611 	if (he_dev->membase) {
1612 		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1613 		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1614 		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1615 
1616 #ifdef USE_TASKLET
1617 		tasklet_disable(&he_dev->tasklet);
1618 #endif
1619 
1620 		/* disable recv and transmit */
1621 
1622 		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1623 		reg &= ~(TX_ENABLE|ER_ENABLE);
1624 		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1625 
1626 		reg = he_readl(he_dev, RC_CONFIG);
1627 		reg &= ~(RX_ENABLE);
1628 		he_writel(he_dev, reg, RC_CONFIG);
1629 	}
1630 
1631 #ifdef CONFIG_ATM_HE_USE_SUNI
1632 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1633 		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1634 #endif /* CONFIG_ATM_HE_USE_SUNI */
1635 
1636 	if (he_dev->irq)
1637 		free_irq(he_dev->irq, he_dev);
1638 
1639 	if (he_dev->irq_base)
1640 		pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1641 			* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1642 
1643 	if (he_dev->hsp)
1644 		pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1645 						he_dev->hsp, he_dev->hsp_phys);
1646 
1647 	if (he_dev->rbpl_base) {
1648 #ifdef USE_RBPL_POOL
1649 		for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1650 			void *cpuaddr = he_dev->rbpl_virt[i].virt;
1651 			dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1652 
1653 			pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1654 		}
1655 #else
1656 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1657 			* CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
1658 #endif
1659 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1660 			* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1661 	}
1662 
1663 #ifdef USE_RBPL_POOL
1664 	if (he_dev->rbpl_pool)
1665 		pci_pool_destroy(he_dev->rbpl_pool);
1666 #endif
1667 
1668 #ifdef USE_RBPS
1669 	if (he_dev->rbps_base) {
1670 #ifdef USE_RBPS_POOL
1671 		for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1672 			void *cpuaddr = he_dev->rbps_virt[i].virt;
1673 			dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1674 
1675 			pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1676 		}
1677 #else
1678 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1679 			* CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
1680 #endif
1681 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1682 			* sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1683 	}
1684 
1685 #ifdef USE_RBPS_POOL
1686 	if (he_dev->rbps_pool)
1687 		pci_pool_destroy(he_dev->rbps_pool);
1688 #endif
1689 
1690 #endif /* USE_RBPS */
1691 
1692 	if (he_dev->rbrq_base)
1693 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1694 							he_dev->rbrq_base, he_dev->rbrq_phys);
1695 
1696 	if (he_dev->tbrq_base)
1697 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1698 							he_dev->tbrq_base, he_dev->tbrq_phys);
1699 
1700 	if (he_dev->tpdrq_base)
1701 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1702 							he_dev->tpdrq_base, he_dev->tpdrq_phys);
1703 
1704 #ifdef USE_TPD_POOL
1705 	if (he_dev->tpd_pool)
1706 		pci_pool_destroy(he_dev->tpd_pool);
1707 #else
1708 	if (he_dev->tpd_base)
1709 		pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
1710 							he_dev->tpd_base, he_dev->tpd_base_phys);
1711 #endif
1712 
1713 	if (he_dev->pci_dev) {
1714 		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1715 		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1716 		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1717 	}
1718 
1719 	if (he_dev->membase)
1720 		iounmap(he_dev->membase);
1721 }
1722 
1723 static struct he_tpd *
1724 __alloc_tpd(struct he_dev *he_dev)
1725 {
1726 #ifdef USE_TPD_POOL
1727 	struct he_tpd *tpd;
1728 	dma_addr_t dma_handle;
1729 
1730 	tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle);
1731 	if (tpd == NULL)
1732 		return NULL;
1733 
1734 	tpd->status = TPD_ADDR(dma_handle);
1735 	tpd->reserved = 0;
1736 	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1737 	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1738 	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1739 
1740 	return tpd;
1741 #else
1742 	int i;
1743 
1744 	for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1745 		++he_dev->tpd_head;
1746 		if (he_dev->tpd_head > he_dev->tpd_end) {
1747 			he_dev->tpd_head = he_dev->tpd_base;
1748 		}
1749 
1750 		if (!he_dev->tpd_head->inuse) {
1751 			he_dev->tpd_head->inuse = 1;
1752 			he_dev->tpd_head->status &= TPD_MASK;
1753 			he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
1754 			he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
1755 			he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
1756 			return he_dev->tpd_head;
1757 		}
1758 	}
1759 	hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
1760 	return NULL;
1761 #endif
1762 }
1763 
1764 #define AAL5_LEN(buf,len) 						\
1765 			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1766 				(((unsigned char *)(buf))[(len)-5]))
1767 
1768 /* 2.10.1.2 receive
1769  *
1770  * aal5 packets can optionally return the tcp checksum in the lower
1771  * 16 bits of the crc (RSR0_TCP_CKSUM)
1772  */
1773 
1774 #define TCP_CKSUM(buf,len) 						\
1775 			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1776 				(((unsigned char *)(buf))[(len-1)]))
1777 
1778 static int
1779 he_service_rbrq(struct he_dev *he_dev, int group)
1780 {
1781 	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1782 				((unsigned long)he_dev->rbrq_base |
1783 					he_dev->hsp->group[group].rbrq_tail);
1784 	struct he_rbp *rbp = NULL;
1785 	unsigned cid, lastcid = -1;
1786 	unsigned buf_len = 0;
1787 	struct sk_buff *skb;
1788 	struct atm_vcc *vcc = NULL;
1789 	struct he_vcc *he_vcc;
1790 	struct he_iovec *iov;
1791 	int pdus_assembled = 0;
1792 	int updated = 0;
1793 
1794 	read_lock(&vcc_sklist_lock);
1795 	while (he_dev->rbrq_head != rbrq_tail) {
1796 		++updated;
1797 
1798 		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1799 			he_dev->rbrq_head, group,
1800 			RBRQ_ADDR(he_dev->rbrq_head),
1801 			RBRQ_BUFLEN(he_dev->rbrq_head),
1802 			RBRQ_CID(he_dev->rbrq_head),
1803 			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1804 			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1805 			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1806 			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1807 			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1808 			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1809 
1810 #ifdef USE_RBPS
1811 		if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1812 			rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1813 		else
1814 #endif
1815 			rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1816 
1817 		buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1818 		cid = RBRQ_CID(he_dev->rbrq_head);
1819 
1820 		if (cid != lastcid)
1821 			vcc = __find_vcc(he_dev, cid);
1822 		lastcid = cid;
1823 
1824 		if (vcc == NULL) {
1825 			hprintk("vcc == NULL  (cid 0x%x)\n", cid);
1826 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1827 					rbp->status &= ~RBP_LOANED;
1828 
1829 			goto next_rbrq_entry;
1830 		}
1831 
1832 		he_vcc = HE_VCC(vcc);
1833 		if (he_vcc == NULL) {
1834 			hprintk("he_vcc == NULL  (cid 0x%x)\n", cid);
1835 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1836 					rbp->status &= ~RBP_LOANED;
1837 			goto next_rbrq_entry;
1838 		}
1839 
1840 		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1841 			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1842 				atomic_inc(&vcc->stats->rx_drop);
1843 			goto return_host_buffers;
1844 		}
1845 
1846 		he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1847 		he_vcc->iov_tail->iov_len = buf_len;
1848 		he_vcc->pdu_len += buf_len;
1849 		++he_vcc->iov_tail;
1850 
1851 		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1852 			lastcid = -1;
1853 			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1854 			wake_up(&he_vcc->rx_waitq);
1855 			goto return_host_buffers;
1856 		}
1857 
1858 #ifdef notdef
1859 		if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1860 			hprintk("iovec full!  cid 0x%x\n", cid);
1861 			goto return_host_buffers;
1862 		}
1863 #endif
1864 		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1865 			goto next_rbrq_entry;
1866 
1867 		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1868 				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1869 			HPRINTK("%s%s (%d.%d)\n",
1870 				RBRQ_CRC_ERR(he_dev->rbrq_head)
1871 							? "CRC_ERR " : "",
1872 				RBRQ_LEN_ERR(he_dev->rbrq_head)
1873 							? "LEN_ERR" : "",
1874 							vcc->vpi, vcc->vci);
1875 			atomic_inc(&vcc->stats->rx_err);
1876 			goto return_host_buffers;
1877 		}
1878 
1879 		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1880 							GFP_ATOMIC);
1881 		if (!skb) {
1882 			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1883 			goto return_host_buffers;
1884 		}
1885 
1886 		if (rx_skb_reserve > 0)
1887 			skb_reserve(skb, rx_skb_reserve);
1888 
1889 		__net_timestamp(skb);
1890 
1891 		for (iov = he_vcc->iov_head;
1892 				iov < he_vcc->iov_tail; ++iov) {
1893 #ifdef USE_RBPS
1894 			if (iov->iov_base & RBP_SMALLBUF)
1895 				memcpy(skb_put(skb, iov->iov_len),
1896 					he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1897 			else
1898 #endif
1899 				memcpy(skb_put(skb, iov->iov_len),
1900 					he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1901 		}
1902 
1903 		switch (vcc->qos.aal) {
1904 			case ATM_AAL0:
1905 				/* 2.10.1.5 raw cell receive */
1906 				skb->len = ATM_AAL0_SDU;
1907 				skb->tail = skb->data + skb->len;
1908 				break;
1909 			case ATM_AAL5:
1910 				/* 2.10.1.2 aal5 receive */
1911 
1912 				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1913 				skb->tail = skb->data + skb->len;
1914 #ifdef USE_CHECKSUM_HW
1915 				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1916 					skb->ip_summed = CHECKSUM_HW;
1917 					skb->csum = TCP_CKSUM(skb->data,
1918 							he_vcc->pdu_len);
1919 				}
1920 #endif
1921 				break;
1922 		}
1923 
1924 #ifdef should_never_happen
1925 		if (skb->len > vcc->qos.rxtp.max_sdu)
1926 			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1927 #endif
1928 
1929 #ifdef notdef
1930 		ATM_SKB(skb)->vcc = vcc;
1931 #endif
1932 		vcc->push(vcc, skb);
1933 
1934 		atomic_inc(&vcc->stats->rx);
1935 
1936 return_host_buffers:
1937 		++pdus_assembled;
1938 
1939 		for (iov = he_vcc->iov_head;
1940 				iov < he_vcc->iov_tail; ++iov) {
1941 #ifdef USE_RBPS
1942 			if (iov->iov_base & RBP_SMALLBUF)
1943 				rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1944 			else
1945 #endif
1946 				rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1947 
1948 			rbp->status &= ~RBP_LOANED;
1949 		}
1950 
1951 		he_vcc->iov_tail = he_vcc->iov_head;
1952 		he_vcc->pdu_len = 0;
1953 
1954 next_rbrq_entry:
1955 		he_dev->rbrq_head = (struct he_rbrq *)
1956 				((unsigned long) he_dev->rbrq_base |
1957 					RBRQ_MASK(++he_dev->rbrq_head));
1958 
1959 	}
1960 	read_unlock(&vcc_sklist_lock);
1961 
1962 	if (updated) {
1963 		if (updated > he_dev->rbrq_peak)
1964 			he_dev->rbrq_peak = updated;
1965 
1966 		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1967 						G0_RBRQ_H + (group * 16));
1968 	}
1969 
1970 	return pdus_assembled;
1971 }
1972 
1973 static void
1974 he_service_tbrq(struct he_dev *he_dev, int group)
1975 {
1976 	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1977 				((unsigned long)he_dev->tbrq_base |
1978 					he_dev->hsp->group[group].tbrq_tail);
1979 	struct he_tpd *tpd;
1980 	int slot, updated = 0;
1981 #ifdef USE_TPD_POOL
1982 	struct he_tpd *__tpd;
1983 #endif
1984 
1985 	/* 2.1.6 transmit buffer return queue */
1986 
1987 	while (he_dev->tbrq_head != tbrq_tail) {
1988 		++updated;
1989 
1990 		HPRINTK("tbrq%d 0x%x%s%s\n",
1991 			group,
1992 			TBRQ_TPD(he_dev->tbrq_head),
1993 			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1994 			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1995 #ifdef USE_TPD_POOL
1996 		tpd = NULL;
1997 		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1998 			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1999 				tpd = __tpd;
2000 				list_del(&__tpd->entry);
2001 				break;
2002 			}
2003 		}
2004 
2005 		if (tpd == NULL) {
2006 			hprintk("unable to locate tpd for dma buffer %x\n",
2007 						TBRQ_TPD(he_dev->tbrq_head));
2008 			goto next_tbrq_entry;
2009 		}
2010 #else
2011 		tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
2012 #endif
2013 
2014 		if (TBRQ_EOS(he_dev->tbrq_head)) {
2015 			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2016 				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
2017 			if (tpd->vcc)
2018 				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
2019 
2020 			goto next_tbrq_entry;
2021 		}
2022 
2023 		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2024 			if (tpd->iovec[slot].addr)
2025 				pci_unmap_single(he_dev->pci_dev,
2026 					tpd->iovec[slot].addr,
2027 					tpd->iovec[slot].len & TPD_LEN_MASK,
2028 							PCI_DMA_TODEVICE);
2029 			if (tpd->iovec[slot].len & TPD_LST)
2030 				break;
2031 
2032 		}
2033 
2034 		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2035 			if (tpd->vcc && tpd->vcc->pop)
2036 				tpd->vcc->pop(tpd->vcc, tpd->skb);
2037 			else
2038 				dev_kfree_skb_any(tpd->skb);
2039 		}
2040 
2041 next_tbrq_entry:
2042 #ifdef USE_TPD_POOL
2043 		if (tpd)
2044 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2045 #else
2046 		tpd->inuse = 0;
2047 #endif
2048 		he_dev->tbrq_head = (struct he_tbrq *)
2049 				((unsigned long) he_dev->tbrq_base |
2050 					TBRQ_MASK(++he_dev->tbrq_head));
2051 	}
2052 
2053 	if (updated) {
2054 		if (updated > he_dev->tbrq_peak)
2055 			he_dev->tbrq_peak = updated;
2056 
2057 		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
2058 						G0_TBRQ_H + (group * 16));
2059 	}
2060 }
2061 
2062 
2063 static void
2064 he_service_rbpl(struct he_dev *he_dev, int group)
2065 {
2066 	struct he_rbp *newtail;
2067 	struct he_rbp *rbpl_head;
2068 	int moved = 0;
2069 
2070 	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2071 					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
2072 
2073 	for (;;) {
2074 		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2075 						RBPL_MASK(he_dev->rbpl_tail+1));
2076 
2077 		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2078 		if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
2079 			break;
2080 
2081 		newtail->status |= RBP_LOANED;
2082 		he_dev->rbpl_tail = newtail;
2083 		++moved;
2084 	}
2085 
2086 	if (moved)
2087 		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2088 }
2089 
2090 #ifdef USE_RBPS
2091 static void
2092 he_service_rbps(struct he_dev *he_dev, int group)
2093 {
2094 	struct he_rbp *newtail;
2095 	struct he_rbp *rbps_head;
2096 	int moved = 0;
2097 
2098 	rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2099 					RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2100 
2101 	for (;;) {
2102 		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2103 						RBPS_MASK(he_dev->rbps_tail+1));
2104 
2105 		/* table 3.42 -- rbps_tail should never be set to rbps_head */
2106 		if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2107 			break;
2108 
2109 		newtail->status |= RBP_LOANED;
2110 		he_dev->rbps_tail = newtail;
2111 		++moved;
2112 	}
2113 
2114 	if (moved)
2115 		he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2116 }
2117 #endif /* USE_RBPS */
2118 
2119 static void
2120 he_tasklet(unsigned long data)
2121 {
2122 	unsigned long flags;
2123 	struct he_dev *he_dev = (struct he_dev *) data;
2124 	int group, type;
2125 	int updated = 0;
2126 
2127 	HPRINTK("tasklet (0x%lx)\n", data);
2128 #ifdef USE_TASKLET
2129 	spin_lock_irqsave(&he_dev->global_lock, flags);
2130 #endif
2131 
2132 	while (he_dev->irq_head != he_dev->irq_tail) {
2133 		++updated;
2134 
2135 		type = ITYPE_TYPE(he_dev->irq_head->isw);
2136 		group = ITYPE_GROUP(he_dev->irq_head->isw);
2137 
2138 		switch (type) {
2139 			case ITYPE_RBRQ_THRESH:
2140 				HPRINTK("rbrq%d threshold\n", group);
2141 				/* fall through */
2142 			case ITYPE_RBRQ_TIMER:
2143 				if (he_service_rbrq(he_dev, group)) {
2144 					he_service_rbpl(he_dev, group);
2145 #ifdef USE_RBPS
2146 					he_service_rbps(he_dev, group);
2147 #endif /* USE_RBPS */
2148 				}
2149 				break;
2150 			case ITYPE_TBRQ_THRESH:
2151 				HPRINTK("tbrq%d threshold\n", group);
2152 				/* fall through */
2153 			case ITYPE_TPD_COMPLETE:
2154 				he_service_tbrq(he_dev, group);
2155 				break;
2156 			case ITYPE_RBPL_THRESH:
2157 				he_service_rbpl(he_dev, group);
2158 				break;
2159 			case ITYPE_RBPS_THRESH:
2160 #ifdef USE_RBPS
2161 				he_service_rbps(he_dev, group);
2162 #endif /* USE_RBPS */
2163 				break;
2164 			case ITYPE_PHY:
2165 				HPRINTK("phy interrupt\n");
2166 #ifdef CONFIG_ATM_HE_USE_SUNI
2167 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2168 				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2169 					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2170 				spin_lock_irqsave(&he_dev->global_lock, flags);
2171 #endif
2172 				break;
2173 			case ITYPE_OTHER:
2174 				switch (type|group) {
2175 					case ITYPE_PARITY:
2176 						hprintk("parity error\n");
2177 						break;
2178 					case ITYPE_ABORT:
2179 						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2180 						break;
2181 				}
2182 				break;
2183 			case ITYPE_TYPE(ITYPE_INVALID):
2184 				/* see 8.1.1 -- check all queues */
2185 
2186 				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2187 
2188 				he_service_rbrq(he_dev, 0);
2189 				he_service_rbpl(he_dev, 0);
2190 #ifdef USE_RBPS
2191 				he_service_rbps(he_dev, 0);
2192 #endif /* USE_RBPS */
2193 				he_service_tbrq(he_dev, 0);
2194 				break;
2195 			default:
2196 				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2197 		}
2198 
2199 		he_dev->irq_head->isw = ITYPE_INVALID;
2200 
2201 		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2202 	}
2203 
2204 	if (updated) {
2205 		if (updated > he_dev->irq_peak)
2206 			he_dev->irq_peak = updated;
2207 
2208 		he_writel(he_dev,
2209 			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2210 			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2211 			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2212 		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2213 	}
2214 #ifdef USE_TASKLET
2215 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2216 #endif
2217 }
2218 
2219 static irqreturn_t
2220 he_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
2221 {
2222 	unsigned long flags;
2223 	struct he_dev *he_dev = (struct he_dev * )dev_id;
2224 	int handled = 0;
2225 
2226 	if (he_dev == NULL)
2227 		return IRQ_NONE;
2228 
2229 	spin_lock_irqsave(&he_dev->global_lock, flags);
2230 
2231 	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2232 						(*he_dev->irq_tailoffset << 2));
2233 
2234 	if (he_dev->irq_tail == he_dev->irq_head) {
2235 		HPRINTK("tailoffset not updated?\n");
2236 		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2237 			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2238 		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2239 	}
2240 
2241 #ifdef DEBUG
2242 	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2243 		hprintk("spurious (or shared) interrupt?\n");
2244 #endif
2245 
2246 	if (he_dev->irq_head != he_dev->irq_tail) {
2247 		handled = 1;
2248 #ifdef USE_TASKLET
2249 		tasklet_schedule(&he_dev->tasklet);
2250 #else
2251 		he_tasklet((unsigned long) he_dev);
2252 #endif
2253 		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2254 		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2255 	}
2256 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2257 	return IRQ_RETVAL(handled);
2258 
2259 }
2260 
2261 static __inline__ void
2262 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2263 {
2264 	struct he_tpdrq *new_tail;
2265 
2266 	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2267 					tpd, cid, he_dev->tpdrq_tail);
2268 
2269 	/* new_tail = he_dev->tpdrq_tail; */
2270 	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2271 					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2272 
2273 	/*
2274 	 * check to see if we are about to set the tail == head
2275 	 * if true, update the head pointer from the adapter
2276 	 * to see if this is really the case (reading the queue
2277 	 * head for every enqueue would be unnecessarily slow)
2278 	 */
2279 
2280 	if (new_tail == he_dev->tpdrq_head) {
2281 		he_dev->tpdrq_head = (struct he_tpdrq *)
2282 			(((unsigned long)he_dev->tpdrq_base) |
2283 				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2284 
2285 		if (new_tail == he_dev->tpdrq_head) {
2286 			hprintk("tpdrq full (cid 0x%x)\n", cid);
2287 			/*
2288 			 * FIXME
2289 			 * push tpd onto a transmit backlog queue
2290 			 * after service_tbrq, service the backlog
2291 			 * for now, we just drop the pdu
2292 			 */
2293 			if (tpd->skb) {
2294 				if (tpd->vcc->pop)
2295 					tpd->vcc->pop(tpd->vcc, tpd->skb);
2296 				else
2297 					dev_kfree_skb_any(tpd->skb);
2298 				atomic_inc(&tpd->vcc->stats->tx_err);
2299 			}
2300 #ifdef USE_TPD_POOL
2301 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2302 #else
2303 			tpd->inuse = 0;
2304 #endif
2305 			return;
2306 		}
2307 	}
2308 
2309 	/* 2.1.5 transmit packet descriptor ready queue */
2310 #ifdef USE_TPD_POOL
2311 	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2312 	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2313 #else
2314 	he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
2315 				(TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
2316 #endif
2317 	he_dev->tpdrq_tail->cid = cid;
2318 	wmb();
2319 
2320 	he_dev->tpdrq_tail = new_tail;
2321 
2322 	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2323 	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2324 }
2325 
2326 static int
2327 he_open(struct atm_vcc *vcc)
2328 {
2329 	unsigned long flags;
2330 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2331 	struct he_vcc *he_vcc;
2332 	int err = 0;
2333 	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2334 	short vpi = vcc->vpi;
2335 	int vci = vcc->vci;
2336 
2337 	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2338 		return 0;
2339 
2340 	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2341 
2342 	set_bit(ATM_VF_ADDR, &vcc->flags);
2343 
2344 	cid = he_mkcid(he_dev, vpi, vci);
2345 
2346 	he_vcc = (struct he_vcc *) kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2347 	if (he_vcc == NULL) {
2348 		hprintk("unable to allocate he_vcc during open\n");
2349 		return -ENOMEM;
2350 	}
2351 
2352 	he_vcc->iov_tail = he_vcc->iov_head;
2353 	he_vcc->pdu_len = 0;
2354 	he_vcc->rc_index = -1;
2355 
2356 	init_waitqueue_head(&he_vcc->rx_waitq);
2357 	init_waitqueue_head(&he_vcc->tx_waitq);
2358 
2359 	vcc->dev_data = he_vcc;
2360 
2361 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2362 		int pcr_goal;
2363 
2364 		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2365 		if (pcr_goal == 0)
2366 			pcr_goal = he_dev->atm_dev->link_rate;
2367 		if (pcr_goal < 0)	/* means round down, technically */
2368 			pcr_goal = -pcr_goal;
2369 
2370 		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2371 
2372 		switch (vcc->qos.aal) {
2373 			case ATM_AAL5:
2374 				tsr0_aal = TSR0_AAL5;
2375 				tsr4 = TSR4_AAL5;
2376 				break;
2377 			case ATM_AAL0:
2378 				tsr0_aal = TSR0_AAL0_SDU;
2379 				tsr4 = TSR4_AAL0_SDU;
2380 				break;
2381 			default:
2382 				err = -EINVAL;
2383 				goto open_failed;
2384 		}
2385 
2386 		spin_lock_irqsave(&he_dev->global_lock, flags);
2387 		tsr0 = he_readl_tsr0(he_dev, cid);
2388 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2389 
2390 		if (TSR0_CONN_STATE(tsr0) != 0) {
2391 			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2392 			err = -EBUSY;
2393 			goto open_failed;
2394 		}
2395 
2396 		switch (vcc->qos.txtp.traffic_class) {
2397 			case ATM_UBR:
2398 				/* 2.3.3.1 open connection ubr */
2399 
2400 				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2401 					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2402 				break;
2403 
2404 			case ATM_CBR:
2405 				/* 2.3.3.2 open connection cbr */
2406 
2407 				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2408 				if ((he_dev->total_bw + pcr_goal)
2409 					> (he_dev->atm_dev->link_rate * 9 / 10))
2410 				{
2411 					err = -EBUSY;
2412 					goto open_failed;
2413 				}
2414 
2415 				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2416 
2417 				/* find an unused cs_stper register */
2418 				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2419 					if (he_dev->cs_stper[reg].inuse == 0 ||
2420 					    he_dev->cs_stper[reg].pcr == pcr_goal)
2421 							break;
2422 
2423 				if (reg == HE_NUM_CS_STPER) {
2424 					err = -EBUSY;
2425 					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2426 					goto open_failed;
2427 				}
2428 
2429 				he_dev->total_bw += pcr_goal;
2430 
2431 				he_vcc->rc_index = reg;
2432 				++he_dev->cs_stper[reg].inuse;
2433 				he_dev->cs_stper[reg].pcr = pcr_goal;
2434 
2435 				clock = he_is622(he_dev) ? 66667000 : 50000000;
2436 				period = clock / pcr_goal;
2437 
2438 				HPRINTK("rc_index = %d period = %d\n",
2439 								reg, period);
2440 
2441 				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2442 							CS_STPER0 + reg);
2443 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2444 
2445 				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2446 							TSR0_RC_INDEX(reg);
2447 
2448 				break;
2449 			default:
2450 				err = -EINVAL;
2451 				goto open_failed;
2452 		}
2453 
2454 		spin_lock_irqsave(&he_dev->global_lock, flags);
2455 
2456 		he_writel_tsr0(he_dev, tsr0, cid);
2457 		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2458 		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2459 					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2460 		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2461 		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2462 
2463 		he_writel_tsr3(he_dev, 0x0, cid);
2464 		he_writel_tsr5(he_dev, 0x0, cid);
2465 		he_writel_tsr6(he_dev, 0x0, cid);
2466 		he_writel_tsr7(he_dev, 0x0, cid);
2467 		he_writel_tsr8(he_dev, 0x0, cid);
2468 		he_writel_tsr10(he_dev, 0x0, cid);
2469 		he_writel_tsr11(he_dev, 0x0, cid);
2470 		he_writel_tsr12(he_dev, 0x0, cid);
2471 		he_writel_tsr13(he_dev, 0x0, cid);
2472 		he_writel_tsr14(he_dev, 0x0, cid);
2473 		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2474 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2475 	}
2476 
2477 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2478 		unsigned aal;
2479 
2480 		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2481 		 				&HE_VCC(vcc)->rx_waitq);
2482 
2483 		switch (vcc->qos.aal) {
2484 			case ATM_AAL5:
2485 				aal = RSR0_AAL5;
2486 				break;
2487 			case ATM_AAL0:
2488 				aal = RSR0_RAWCELL;
2489 				break;
2490 			default:
2491 				err = -EINVAL;
2492 				goto open_failed;
2493 		}
2494 
2495 		spin_lock_irqsave(&he_dev->global_lock, flags);
2496 
2497 		rsr0 = he_readl_rsr0(he_dev, cid);
2498 		if (rsr0 & RSR0_OPEN_CONN) {
2499 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2500 
2501 			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2502 			err = -EBUSY;
2503 			goto open_failed;
2504 		}
2505 
2506 #ifdef USE_RBPS
2507 		rsr1 = RSR1_GROUP(0);
2508 		rsr4 = RSR4_GROUP(0);
2509 #else /* !USE_RBPS */
2510 		rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
2511 		rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
2512 #endif /* USE_RBPS */
2513 		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2514 				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2515 
2516 #ifdef USE_CHECKSUM_HW
2517 		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2518 			rsr0 |= RSR0_TCP_CKSUM;
2519 #endif
2520 
2521 		he_writel_rsr4(he_dev, rsr4, cid);
2522 		he_writel_rsr1(he_dev, rsr1, cid);
2523 		/* 5.1.11 last parameter initialized should be
2524 			  the open/closed indication in rsr0 */
2525 		he_writel_rsr0(he_dev,
2526 			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2527 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2528 
2529 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2530 	}
2531 
2532 open_failed:
2533 
2534 	if (err) {
2535 		kfree(he_vcc);
2536 		clear_bit(ATM_VF_ADDR, &vcc->flags);
2537 	}
2538 	else
2539 		set_bit(ATM_VF_READY, &vcc->flags);
2540 
2541 	return err;
2542 }
2543 
2544 static void
2545 he_close(struct atm_vcc *vcc)
2546 {
2547 	unsigned long flags;
2548 	DECLARE_WAITQUEUE(wait, current);
2549 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2550 	struct he_tpd *tpd;
2551 	unsigned cid;
2552 	struct he_vcc *he_vcc = HE_VCC(vcc);
2553 #define MAX_RETRY 30
2554 	int retry = 0, sleep = 1, tx_inuse;
2555 
2556 	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2557 
2558 	clear_bit(ATM_VF_READY, &vcc->flags);
2559 	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2560 
2561 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2562 		int timeout;
2563 
2564 		HPRINTK("close rx cid 0x%x\n", cid);
2565 
2566 		/* 2.7.2.2 close receive operation */
2567 
2568 		/* wait for previous close (if any) to finish */
2569 
2570 		spin_lock_irqsave(&he_dev->global_lock, flags);
2571 		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2572 			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2573 			udelay(250);
2574 		}
2575 
2576 		set_current_state(TASK_UNINTERRUPTIBLE);
2577 		add_wait_queue(&he_vcc->rx_waitq, &wait);
2578 
2579 		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2580 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2581 		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2582 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2583 
2584 		timeout = schedule_timeout(30*HZ);
2585 
2586 		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2587 		set_current_state(TASK_RUNNING);
2588 
2589 		if (timeout == 0)
2590 			hprintk("close rx timeout cid 0x%x\n", cid);
2591 
2592 		HPRINTK("close rx cid 0x%x complete\n", cid);
2593 
2594 	}
2595 
2596 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2597 		volatile unsigned tsr4, tsr0;
2598 		int timeout;
2599 
2600 		HPRINTK("close tx cid 0x%x\n", cid);
2601 
2602 		/* 2.1.2
2603 		 *
2604 		 * ... the host must first stop queueing packets to the TPDRQ
2605 		 * on the connection to be closed, then wait for all outstanding
2606 		 * packets to be transmitted and their buffers returned to the
2607 		 * TBRQ. When the last packet on the connection arrives in the
2608 		 * TBRQ, the host issues the close command to the adapter.
2609 		 */
2610 
2611 		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 0) &&
2612 		       (retry < MAX_RETRY)) {
2613 			msleep(sleep);
2614 			if (sleep < 250)
2615 				sleep = sleep * 2;
2616 
2617 			++retry;
2618 		}
2619 
2620 		if (tx_inuse)
2621 			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2622 
2623 		/* 2.3.1.1 generic close operations with flush */
2624 
2625 		spin_lock_irqsave(&he_dev->global_lock, flags);
2626 		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2627 					/* also clears TSR4_SESSION_ENDED */
2628 
2629 		switch (vcc->qos.txtp.traffic_class) {
2630 			case ATM_UBR:
2631 				he_writel_tsr1(he_dev,
2632 					TSR1_MCR(rate_to_atmf(200000))
2633 					| TSR1_PCR(0), cid);
2634 				break;
2635 			case ATM_CBR:
2636 				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2637 				break;
2638 		}
2639 		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2640 
2641 		tpd = __alloc_tpd(he_dev);
2642 		if (tpd == NULL) {
2643 			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2644 			goto close_tx_incomplete;
2645 		}
2646 		tpd->status |= TPD_EOS | TPD_INT;
2647 		tpd->skb = NULL;
2648 		tpd->vcc = vcc;
2649 		wmb();
2650 
2651 		set_current_state(TASK_UNINTERRUPTIBLE);
2652 		add_wait_queue(&he_vcc->tx_waitq, &wait);
2653 		__enqueue_tpd(he_dev, tpd, cid);
2654 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2655 
2656 		timeout = schedule_timeout(30*HZ);
2657 
2658 		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2659 		set_current_state(TASK_RUNNING);
2660 
2661 		spin_lock_irqsave(&he_dev->global_lock, flags);
2662 
2663 		if (timeout == 0) {
2664 			hprintk("close tx timeout cid 0x%x\n", cid);
2665 			goto close_tx_incomplete;
2666 		}
2667 
2668 		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2669 			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2670 			udelay(250);
2671 		}
2672 
2673 		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2674 			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2675 			udelay(250);
2676 		}
2677 
2678 close_tx_incomplete:
2679 
2680 		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2681 			int reg = he_vcc->rc_index;
2682 
2683 			HPRINTK("cs_stper reg = %d\n", reg);
2684 
2685 			if (he_dev->cs_stper[reg].inuse == 0)
2686 				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2687 			else
2688 				--he_dev->cs_stper[reg].inuse;
2689 
2690 			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2691 		}
2692 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2693 
2694 		HPRINTK("close tx cid 0x%x complete\n", cid);
2695 	}
2696 
2697 	kfree(he_vcc);
2698 
2699 	clear_bit(ATM_VF_ADDR, &vcc->flags);
2700 }
2701 
2702 static int
2703 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2704 {
2705 	unsigned long flags;
2706 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2707 	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2708 	struct he_tpd *tpd;
2709 #ifdef USE_SCATTERGATHER
2710 	int i, slot = 0;
2711 #endif
2712 
2713 #define HE_TPD_BUFSIZE 0xffff
2714 
2715 	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2716 
2717 	if ((skb->len > HE_TPD_BUFSIZE) ||
2718 	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2719 		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2720 		if (vcc->pop)
2721 			vcc->pop(vcc, skb);
2722 		else
2723 			dev_kfree_skb_any(skb);
2724 		atomic_inc(&vcc->stats->tx_err);
2725 		return -EINVAL;
2726 	}
2727 
2728 #ifndef USE_SCATTERGATHER
2729 	if (skb_shinfo(skb)->nr_frags) {
2730 		hprintk("no scatter/gather support\n");
2731 		if (vcc->pop)
2732 			vcc->pop(vcc, skb);
2733 		else
2734 			dev_kfree_skb_any(skb);
2735 		atomic_inc(&vcc->stats->tx_err);
2736 		return -EINVAL;
2737 	}
2738 #endif
2739 	spin_lock_irqsave(&he_dev->global_lock, flags);
2740 
2741 	tpd = __alloc_tpd(he_dev);
2742 	if (tpd == NULL) {
2743 		if (vcc->pop)
2744 			vcc->pop(vcc, skb);
2745 		else
2746 			dev_kfree_skb_any(skb);
2747 		atomic_inc(&vcc->stats->tx_err);
2748 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2749 		return -ENOMEM;
2750 	}
2751 
2752 	if (vcc->qos.aal == ATM_AAL5)
2753 		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2754 	else {
2755 		char *pti_clp = (void *) (skb->data + 3);
2756 		int clp, pti;
2757 
2758 		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2759 		clp = (*pti_clp & ATM_HDR_CLP);
2760 		tpd->status |= TPD_CELLTYPE(pti);
2761 		if (clp)
2762 			tpd->status |= TPD_CLP;
2763 
2764 		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2765 	}
2766 
2767 #ifdef USE_SCATTERGATHER
2768 	tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2769 				skb->len - skb->data_len, PCI_DMA_TODEVICE);
2770 	tpd->iovec[slot].len = skb->len - skb->data_len;
2771 	++slot;
2772 
2773 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2774 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2775 
2776 		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2777 			tpd->vcc = vcc;
2778 			tpd->skb = NULL;	/* not the last fragment
2779 						   so dont ->push() yet */
2780 			wmb();
2781 
2782 			__enqueue_tpd(he_dev, tpd, cid);
2783 			tpd = __alloc_tpd(he_dev);
2784 			if (tpd == NULL) {
2785 				if (vcc->pop)
2786 					vcc->pop(vcc, skb);
2787 				else
2788 					dev_kfree_skb_any(skb);
2789 				atomic_inc(&vcc->stats->tx_err);
2790 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2791 				return -ENOMEM;
2792 			}
2793 			tpd->status |= TPD_USERCELL;
2794 			slot = 0;
2795 		}
2796 
2797 		tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2798 			(void *) page_address(frag->page) + frag->page_offset,
2799 				frag->size, PCI_DMA_TODEVICE);
2800 		tpd->iovec[slot].len = frag->size;
2801 		++slot;
2802 
2803 	}
2804 
2805 	tpd->iovec[slot - 1].len |= TPD_LST;
2806 #else
2807 	tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2808 	tpd->length0 = skb->len | TPD_LST;
2809 #endif
2810 	tpd->status |= TPD_INT;
2811 
2812 	tpd->vcc = vcc;
2813 	tpd->skb = skb;
2814 	wmb();
2815 	ATM_SKB(skb)->vcc = vcc;
2816 
2817 	__enqueue_tpd(he_dev, tpd, cid);
2818 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2819 
2820 	atomic_inc(&vcc->stats->tx);
2821 
2822 	return 0;
2823 }
2824 
2825 static int
2826 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2827 {
2828 	unsigned long flags;
2829 	struct he_dev *he_dev = HE_DEV(atm_dev);
2830 	struct he_ioctl_reg reg;
2831 	int err = 0;
2832 
2833 	switch (cmd) {
2834 		case HE_GET_REG:
2835 			if (!capable(CAP_NET_ADMIN))
2836 				return -EPERM;
2837 
2838 			if (copy_from_user(&reg, arg,
2839 					   sizeof(struct he_ioctl_reg)))
2840 				return -EFAULT;
2841 
2842 			spin_lock_irqsave(&he_dev->global_lock, flags);
2843 			switch (reg.type) {
2844 				case HE_REGTYPE_PCI:
2845 					reg.val = he_readl(he_dev, reg.addr);
2846 					break;
2847 				case HE_REGTYPE_RCM:
2848 					reg.val =
2849 						he_readl_rcm(he_dev, reg.addr);
2850 					break;
2851 				case HE_REGTYPE_TCM:
2852 					reg.val =
2853 						he_readl_tcm(he_dev, reg.addr);
2854 					break;
2855 				case HE_REGTYPE_MBOX:
2856 					reg.val =
2857 						he_readl_mbox(he_dev, reg.addr);
2858 					break;
2859 				default:
2860 					err = -EINVAL;
2861 					break;
2862 			}
2863 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2864 			if (err == 0)
2865 				if (copy_to_user(arg, &reg,
2866 							sizeof(struct he_ioctl_reg)))
2867 					return -EFAULT;
2868 			break;
2869 		default:
2870 #ifdef CONFIG_ATM_HE_USE_SUNI
2871 			if (atm_dev->phy && atm_dev->phy->ioctl)
2872 				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2873 #else /* CONFIG_ATM_HE_USE_SUNI */
2874 			err = -EINVAL;
2875 #endif /* CONFIG_ATM_HE_USE_SUNI */
2876 			break;
2877 	}
2878 
2879 	return err;
2880 }
2881 
2882 static void
2883 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2884 {
2885 	unsigned long flags;
2886 	struct he_dev *he_dev = HE_DEV(atm_dev);
2887 
2888 	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2889 
2890 	spin_lock_irqsave(&he_dev->global_lock, flags);
2891 	he_writel(he_dev, val, FRAMER + (addr*4));
2892 	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2893 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2894 }
2895 
2896 
2897 static unsigned char
2898 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2899 {
2900 	unsigned long flags;
2901 	struct he_dev *he_dev = HE_DEV(atm_dev);
2902 	unsigned reg;
2903 
2904 	spin_lock_irqsave(&he_dev->global_lock, flags);
2905 	reg = he_readl(he_dev, FRAMER + (addr*4));
2906 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2907 
2908 	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2909 	return reg;
2910 }
2911 
2912 static int
2913 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2914 {
2915 	unsigned long flags;
2916 	struct he_dev *he_dev = HE_DEV(dev);
2917 	int left, i;
2918 #ifdef notdef
2919 	struct he_rbrq *rbrq_tail;
2920 	struct he_tpdrq *tpdrq_head;
2921 	int rbpl_head, rbpl_tail;
2922 #endif
2923 	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2924 
2925 
2926 	left = *pos;
2927 	if (!left--)
2928 		return sprintf(page, "%s\n", version);
2929 
2930 	if (!left--)
2931 		return sprintf(page, "%s%s\n\n",
2932 			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2933 
2934 	if (!left--)
2935 		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2936 
2937 	spin_lock_irqsave(&he_dev->global_lock, flags);
2938 	mcc += he_readl(he_dev, MCC);
2939 	oec += he_readl(he_dev, OEC);
2940 	dcc += he_readl(he_dev, DCC);
2941 	cec += he_readl(he_dev, CEC);
2942 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2943 
2944 	if (!left--)
2945 		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n",
2946 							mcc, oec, dcc, cec);
2947 
2948 	if (!left--)
2949 		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2950 				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2951 
2952 	if (!left--)
2953 		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2954 						CONFIG_TPDRQ_SIZE);
2955 
2956 	if (!left--)
2957 		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2958 				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2959 
2960 	if (!left--)
2961 		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2962 					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2963 
2964 
2965 #ifdef notdef
2966 	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2967 	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2968 
2969 	inuse = rbpl_head - rbpl_tail;
2970 	if (inuse < 0)
2971 		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2972 	inuse /= sizeof(struct he_rbp);
2973 
2974 	if (!left--)
2975 		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2976 						CONFIG_RBPL_SIZE, inuse);
2977 #endif
2978 
2979 	if (!left--)
2980 		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2981 
2982 	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2983 		if (!left--)
2984 			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2985 						he_dev->cs_stper[i].pcr,
2986 						he_dev->cs_stper[i].inuse);
2987 
2988 	if (!left--)
2989 		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2990 			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2991 
2992 	return 0;
2993 }
2994 
2995 /* eeprom routines  -- see 4.7 */
2996 
2997 u8
2998 read_prom_byte(struct he_dev *he_dev, int addr)
2999 {
3000 	u32 val = 0, tmp_read = 0;
3001 	int i, j = 0;
3002 	u8 byte_read = 0;
3003 
3004 	val = readl(he_dev->membase + HOST_CNTL);
3005 	val &= 0xFFFFE0FF;
3006 
3007 	/* Turn on write enable */
3008 	val |= 0x800;
3009 	he_writel(he_dev, val, HOST_CNTL);
3010 
3011 	/* Send READ instruction */
3012 	for (i = 0; i < sizeof(readtab)/sizeof(readtab[0]); i++) {
3013 		he_writel(he_dev, val | readtab[i], HOST_CNTL);
3014 		udelay(EEPROM_DELAY);
3015 	}
3016 
3017 	/* Next, we need to send the byte address to read from */
3018 	for (i = 7; i >= 0; i--) {
3019 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3020 		udelay(EEPROM_DELAY);
3021 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3022 		udelay(EEPROM_DELAY);
3023 	}
3024 
3025 	j = 0;
3026 
3027 	val &= 0xFFFFF7FF;      /* Turn off write enable */
3028 	he_writel(he_dev, val, HOST_CNTL);
3029 
3030 	/* Now, we can read data from the EEPROM by clocking it in */
3031 	for (i = 7; i >= 0; i--) {
3032 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3033 		udelay(EEPROM_DELAY);
3034 		tmp_read = he_readl(he_dev, HOST_CNTL);
3035 		byte_read |= (unsigned char)
3036 			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
3037 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3038 		udelay(EEPROM_DELAY);
3039 	}
3040 
3041 	he_writel(he_dev, val | ID_CS, HOST_CNTL);
3042 	udelay(EEPROM_DELAY);
3043 
3044 	return byte_read;
3045 }
3046 
3047 MODULE_LICENSE("GPL");
3048 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3049 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3050 module_param(disable64, bool, 0);
3051 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
3052 module_param(nvpibits, short, 0);
3053 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
3054 module_param(nvcibits, short, 0);
3055 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
3056 module_param(rx_skb_reserve, short, 0);
3057 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
3058 module_param(irq_coalesce, bool, 0);
3059 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
3060 module_param(sdh, bool, 0);
3061 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
3062 
3063 static struct pci_device_id he_pci_tbl[] = {
3064 	{ PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
3065 	  0, 0, 0 },
3066 	{ 0, }
3067 };
3068 
3069 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
3070 
3071 static struct pci_driver he_driver = {
3072 	.name =		"he",
3073 	.probe =	he_init_one,
3074 	.remove =	__devexit_p(he_remove_one),
3075 	.id_table =	he_pci_tbl,
3076 };
3077 
3078 static int __init he_init(void)
3079 {
3080 	return pci_register_driver(&he_driver);
3081 }
3082 
3083 static void __exit he_cleanup(void)
3084 {
3085 	pci_unregister_driver(&he_driver);
3086 }
3087 
3088 module_init(he_init);
3089 module_exit(he_cleanup);
3090