xref: /openbmc/linux/drivers/atm/he.c (revision 84fa7933)
1 /* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
2 
3 /*
4 
5   he.c
6 
7   ForeRunnerHE ATM Adapter driver for ATM on Linux
8   Copyright (C) 1999-2001  Naval Research Laboratory
9 
10   This library is free software; you can redistribute it and/or
11   modify it under the terms of the GNU Lesser General Public
12   License as published by the Free Software Foundation; either
13   version 2.1 of the License, or (at your option) any later version.
14 
15   This library is distributed in the hope that it will be useful,
16   but WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18   Lesser General Public License for more details.
19 
20   You should have received a copy of the GNU Lesser General Public
21   License along with this library; if not, write to the Free Software
22   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23 
24 */
25 
26 /*
27 
28   he.c
29 
30   ForeRunnerHE ATM Adapter driver for ATM on Linux
31   Copyright (C) 1999-2001  Naval Research Laboratory
32 
33   Permission to use, copy, modify and distribute this software and its
34   documentation is hereby granted, provided that both the copyright
35   notice and this permission notice appear in all copies of the software,
36   derivative works or modified versions, and any portions thereof, and
37   that both notices appear in supporting documentation.
38 
39   NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
40   DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
41   RESULTING FROM THE USE OF THIS SOFTWARE.
42 
43   This driver was written using the "Programmer's Reference Manual for
44   ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
45 
46   AUTHORS:
47 	chas williams <chas@cmf.nrl.navy.mil>
48 	eric kinzie <ekinzie@cmf.nrl.navy.mil>
49 
50   NOTES:
51 	4096 supported 'connections'
52 	group 0 is used for all traffic
53 	interrupt queue 0 is used for all interrupts
54 	aal0 support (based on work from ulrich.u.muller@nokia.com)
55 
56  */
57 
58 #include <linux/module.h>
59 #include <linux/kernel.h>
60 #include <linux/skbuff.h>
61 #include <linux/pci.h>
62 #include <linux/errno.h>
63 #include <linux/types.h>
64 #include <linux/string.h>
65 #include <linux/delay.h>
66 #include <linux/init.h>
67 #include <linux/mm.h>
68 #include <linux/sched.h>
69 #include <linux/timer.h>
70 #include <linux/interrupt.h>
71 #include <linux/dma-mapping.h>
72 #include <asm/io.h>
73 #include <asm/byteorder.h>
74 #include <asm/uaccess.h>
75 
76 #include <linux/atmdev.h>
77 #include <linux/atm.h>
78 #include <linux/sonet.h>
79 
80 #define USE_TASKLET
81 #undef USE_SCATTERGATHER
82 #undef USE_CHECKSUM_HW			/* still confused about this */
83 #define USE_RBPS
84 #undef USE_RBPS_POOL			/* if memory is tight try this */
85 #undef USE_RBPL_POOL			/* if memory is tight try this */
86 #define USE_TPD_POOL
87 /* #undef CONFIG_ATM_HE_USE_SUNI */
88 /* #undef HE_DEBUG */
89 
90 #include "he.h"
91 #include "suni.h"
92 #include <linux/atm_he.h>
93 
94 #define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
95 
96 #ifdef HE_DEBUG
97 #define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
98 #else /* !HE_DEBUG */
99 #define HPRINTK(fmt,args...)	do { } while (0)
100 #endif /* HE_DEBUG */
101 
102 /* version definition */
103 
104 static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
105 
106 /* declarations */
107 
108 static int he_open(struct atm_vcc *vcc);
109 static void he_close(struct atm_vcc *vcc);
110 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
111 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
112 static irqreturn_t he_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
113 static void he_tasklet(unsigned long data);
114 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
115 static int he_start(struct atm_dev *dev);
116 static void he_stop(struct he_dev *dev);
117 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
118 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
119 
120 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
121 
122 /* globals */
123 
124 static struct he_dev *he_devs;
125 static int disable64;
126 static short nvpibits = -1;
127 static short nvcibits = -1;
128 static short rx_skb_reserve = 16;
129 static int irq_coalesce = 1;
130 static int sdh = 0;
131 
132 /* Read from EEPROM = 0000 0011b */
133 static unsigned int readtab[] = {
134 	CS_HIGH | CLK_HIGH,
135 	CS_LOW | CLK_LOW,
136 	CLK_HIGH,               /* 0 */
137 	CLK_LOW,
138 	CLK_HIGH,               /* 0 */
139 	CLK_LOW,
140 	CLK_HIGH,               /* 0 */
141 	CLK_LOW,
142 	CLK_HIGH,               /* 0 */
143 	CLK_LOW,
144 	CLK_HIGH,               /* 0 */
145 	CLK_LOW,
146 	CLK_HIGH,               /* 0 */
147 	CLK_LOW | SI_HIGH,
148 	CLK_HIGH | SI_HIGH,     /* 1 */
149 	CLK_LOW | SI_HIGH,
150 	CLK_HIGH | SI_HIGH      /* 1 */
151 };
152 
153 /* Clock to read from/write to the EEPROM */
154 static unsigned int clocktab[] = {
155 	CLK_LOW,
156 	CLK_HIGH,
157 	CLK_LOW,
158 	CLK_HIGH,
159 	CLK_LOW,
160 	CLK_HIGH,
161 	CLK_LOW,
162 	CLK_HIGH,
163 	CLK_LOW,
164 	CLK_HIGH,
165 	CLK_LOW,
166 	CLK_HIGH,
167 	CLK_LOW,
168 	CLK_HIGH,
169 	CLK_LOW,
170 	CLK_HIGH,
171 	CLK_LOW
172 };
173 
174 static struct atmdev_ops he_ops =
175 {
176 	.open =		he_open,
177 	.close =	he_close,
178 	.ioctl =	he_ioctl,
179 	.send =		he_send,
180 	.phy_put =	he_phy_put,
181 	.phy_get =	he_phy_get,
182 	.proc_read =	he_proc_read,
183 	.owner =	THIS_MODULE
184 };
185 
186 #define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
187 #define he_readl(dev, reg)		readl((dev)->membase + (reg))
188 
189 /* section 2.12 connection memory access */
190 
191 static __inline__ void
192 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
193 								unsigned flags)
194 {
195 	he_writel(he_dev, val, CON_DAT);
196 	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
197 	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
198 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
199 }
200 
201 #define he_writel_rcm(dev, val, reg) 				\
202 			he_writel_internal(dev, val, reg, CON_CTL_RCM)
203 
204 #define he_writel_tcm(dev, val, reg) 				\
205 			he_writel_internal(dev, val, reg, CON_CTL_TCM)
206 
207 #define he_writel_mbox(dev, val, reg) 				\
208 			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
209 
210 static unsigned
211 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
212 {
213 	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
214 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
215 	return he_readl(he_dev, CON_DAT);
216 }
217 
218 #define he_readl_rcm(dev, reg) \
219 			he_readl_internal(dev, reg, CON_CTL_RCM)
220 
221 #define he_readl_tcm(dev, reg) \
222 			he_readl_internal(dev, reg, CON_CTL_TCM)
223 
224 #define he_readl_mbox(dev, reg) \
225 			he_readl_internal(dev, reg, CON_CTL_MBOX)
226 
227 
228 /* figure 2.2 connection id */
229 
230 #define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
231 
232 /* 2.5.1 per connection transmit state registers */
233 
234 #define he_writel_tsr0(dev, val, cid) \
235 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
236 #define he_readl_tsr0(dev, cid) \
237 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
238 
239 #define he_writel_tsr1(dev, val, cid) \
240 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
241 
242 #define he_writel_tsr2(dev, val, cid) \
243 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
244 
245 #define he_writel_tsr3(dev, val, cid) \
246 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
247 
248 #define he_writel_tsr4(dev, val, cid) \
249 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
250 
251 	/* from page 2-20
252 	 *
253 	 * NOTE While the transmit connection is active, bits 23 through 0
254 	 *      of this register must not be written by the host.  Byte
255 	 *      enables should be used during normal operation when writing
256 	 *      the most significant byte.
257 	 */
258 
259 #define he_writel_tsr4_upper(dev, val, cid) \
260 		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
261 							CON_CTL_TCM \
262 							| CON_BYTE_DISABLE_2 \
263 							| CON_BYTE_DISABLE_1 \
264 							| CON_BYTE_DISABLE_0)
265 
266 #define he_readl_tsr4(dev, cid) \
267 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
268 
269 #define he_writel_tsr5(dev, val, cid) \
270 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
271 
272 #define he_writel_tsr6(dev, val, cid) \
273 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
274 
275 #define he_writel_tsr7(dev, val, cid) \
276 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
277 
278 
279 #define he_writel_tsr8(dev, val, cid) \
280 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
281 
282 #define he_writel_tsr9(dev, val, cid) \
283 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
284 
285 #define he_writel_tsr10(dev, val, cid) \
286 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
287 
288 #define he_writel_tsr11(dev, val, cid) \
289 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
290 
291 
292 #define he_writel_tsr12(dev, val, cid) \
293 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
294 
295 #define he_writel_tsr13(dev, val, cid) \
296 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
297 
298 
299 #define he_writel_tsr14(dev, val, cid) \
300 		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
301 
302 #define he_writel_tsr14_upper(dev, val, cid) \
303 		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
304 							CON_CTL_TCM \
305 							| CON_BYTE_DISABLE_2 \
306 							| CON_BYTE_DISABLE_1 \
307 							| CON_BYTE_DISABLE_0)
308 
309 /* 2.7.1 per connection receive state registers */
310 
311 #define he_writel_rsr0(dev, val, cid) \
312 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
313 #define he_readl_rsr0(dev, cid) \
314 		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
315 
316 #define he_writel_rsr1(dev, val, cid) \
317 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
318 
319 #define he_writel_rsr2(dev, val, cid) \
320 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
321 
322 #define he_writel_rsr3(dev, val, cid) \
323 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
324 
325 #define he_writel_rsr4(dev, val, cid) \
326 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
327 
328 #define he_writel_rsr5(dev, val, cid) \
329 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
330 
331 #define he_writel_rsr6(dev, val, cid) \
332 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
333 
334 #define he_writel_rsr7(dev, val, cid) \
335 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
336 
337 static __inline__ struct atm_vcc*
338 __find_vcc(struct he_dev *he_dev, unsigned cid)
339 {
340 	struct hlist_head *head;
341 	struct atm_vcc *vcc;
342 	struct hlist_node *node;
343 	struct sock *s;
344 	short vpi;
345 	int vci;
346 
347 	vpi = cid >> he_dev->vcibits;
348 	vci = cid & ((1 << he_dev->vcibits) - 1);
349 	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
350 
351 	sk_for_each(s, node, head) {
352 		vcc = atm_sk(s);
353 		if (vcc->dev == he_dev->atm_dev &&
354 		    vcc->vci == vci && vcc->vpi == vpi &&
355 		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
356 				return vcc;
357 		}
358 	}
359 	return NULL;
360 }
361 
362 static int __devinit
363 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
364 {
365 	struct atm_dev *atm_dev = NULL;
366 	struct he_dev *he_dev = NULL;
367 	int err = 0;
368 
369 	printk(KERN_INFO "he: %s\n", version);
370 
371 	if (pci_enable_device(pci_dev))
372 		return -EIO;
373 	if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK) != 0) {
374 		printk(KERN_WARNING "he: no suitable dma available\n");
375 		err = -EIO;
376 		goto init_one_failure;
377 	}
378 
379 	atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
380 	if (!atm_dev) {
381 		err = -ENODEV;
382 		goto init_one_failure;
383 	}
384 	pci_set_drvdata(pci_dev, atm_dev);
385 
386 	he_dev = (struct he_dev *) kmalloc(sizeof(struct he_dev),
387 							GFP_KERNEL);
388 	if (!he_dev) {
389 		err = -ENOMEM;
390 		goto init_one_failure;
391 	}
392 	memset(he_dev, 0, sizeof(struct he_dev));
393 
394 	he_dev->pci_dev = pci_dev;
395 	he_dev->atm_dev = atm_dev;
396 	he_dev->atm_dev->dev_data = he_dev;
397 	atm_dev->dev_data = he_dev;
398 	he_dev->number = atm_dev->number;
399 	if (he_start(atm_dev)) {
400 		he_stop(he_dev);
401 		err = -ENODEV;
402 		goto init_one_failure;
403 	}
404 	he_dev->next = NULL;
405 	if (he_devs)
406 		he_dev->next = he_devs;
407 	he_devs = he_dev;
408 	return 0;
409 
410 init_one_failure:
411 	if (atm_dev)
412 		atm_dev_deregister(atm_dev);
413 	kfree(he_dev);
414 	pci_disable_device(pci_dev);
415 	return err;
416 }
417 
418 static void __devexit
419 he_remove_one (struct pci_dev *pci_dev)
420 {
421 	struct atm_dev *atm_dev;
422 	struct he_dev *he_dev;
423 
424 	atm_dev = pci_get_drvdata(pci_dev);
425 	he_dev = HE_DEV(atm_dev);
426 
427 	/* need to remove from he_devs */
428 
429 	he_stop(he_dev);
430 	atm_dev_deregister(atm_dev);
431 	kfree(he_dev);
432 
433 	pci_set_drvdata(pci_dev, NULL);
434 	pci_disable_device(pci_dev);
435 }
436 
437 
438 static unsigned
439 rate_to_atmf(unsigned rate)		/* cps to atm forum format */
440 {
441 #define NONZERO (1 << 14)
442 
443 	unsigned exp = 0;
444 
445 	if (rate == 0)
446 		return 0;
447 
448 	rate <<= 9;
449 	while (rate > 0x3ff) {
450 		++exp;
451 		rate >>= 1;
452 	}
453 
454 	return (NONZERO | (exp << 9) | (rate & 0x1ff));
455 }
456 
457 static void __init
458 he_init_rx_lbfp0(struct he_dev *he_dev)
459 {
460 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
461 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
462 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
463 	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
464 
465 	lbufd_index = 0;
466 	lbm_offset = he_readl(he_dev, RCMLBM_BA);
467 
468 	he_writel(he_dev, lbufd_index, RLBF0_H);
469 
470 	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
471 		lbufd_index += 2;
472 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
473 
474 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
475 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
476 
477 		if (++lbuf_count == lbufs_per_row) {
478 			lbuf_count = 0;
479 			row_offset += he_dev->bytes_per_row;
480 		}
481 		lbm_offset += 4;
482 	}
483 
484 	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
485 	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
486 }
487 
488 static void __init
489 he_init_rx_lbfp1(struct he_dev *he_dev)
490 {
491 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
492 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
493 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
494 	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
495 
496 	lbufd_index = 1;
497 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
498 
499 	he_writel(he_dev, lbufd_index, RLBF1_H);
500 
501 	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
502 		lbufd_index += 2;
503 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
504 
505 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
506 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
507 
508 		if (++lbuf_count == lbufs_per_row) {
509 			lbuf_count = 0;
510 			row_offset += he_dev->bytes_per_row;
511 		}
512 		lbm_offset += 4;
513 	}
514 
515 	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
516 	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
517 }
518 
519 static void __init
520 he_init_tx_lbfp(struct he_dev *he_dev)
521 {
522 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
523 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
524 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
525 	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
526 
527 	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
528 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
529 
530 	he_writel(he_dev, lbufd_index, TLBF_H);
531 
532 	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
533 		lbufd_index += 1;
534 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
535 
536 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
537 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
538 
539 		if (++lbuf_count == lbufs_per_row) {
540 			lbuf_count = 0;
541 			row_offset += he_dev->bytes_per_row;
542 		}
543 		lbm_offset += 2;
544 	}
545 
546 	he_writel(he_dev, lbufd_index - 1, TLBF_T);
547 }
548 
549 static int __init
550 he_init_tpdrq(struct he_dev *he_dev)
551 {
552 	he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
553 		CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
554 	if (he_dev->tpdrq_base == NULL) {
555 		hprintk("failed to alloc tpdrq\n");
556 		return -ENOMEM;
557 	}
558 	memset(he_dev->tpdrq_base, 0,
559 				CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
560 
561 	he_dev->tpdrq_tail = he_dev->tpdrq_base;
562 	he_dev->tpdrq_head = he_dev->tpdrq_base;
563 
564 	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
565 	he_writel(he_dev, 0, TPDRQ_T);
566 	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
567 
568 	return 0;
569 }
570 
571 static void __init
572 he_init_cs_block(struct he_dev *he_dev)
573 {
574 	unsigned clock, rate, delta;
575 	int reg;
576 
577 	/* 5.1.7 cs block initialization */
578 
579 	for (reg = 0; reg < 0x20; ++reg)
580 		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
581 
582 	/* rate grid timer reload values */
583 
584 	clock = he_is622(he_dev) ? 66667000 : 50000000;
585 	rate = he_dev->atm_dev->link_rate;
586 	delta = rate / 16 / 2;
587 
588 	for (reg = 0; reg < 0x10; ++reg) {
589 		/* 2.4 internal transmit function
590 		 *
591 	 	 * we initialize the first row in the rate grid.
592 		 * values are period (in clock cycles) of timer
593 		 */
594 		unsigned period = clock / rate;
595 
596 		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
597 		rate -= delta;
598 	}
599 
600 	if (he_is622(he_dev)) {
601 		/* table 5.2 (4 cells per lbuf) */
602 		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
603 		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
604 		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
605 		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
606 		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
607 
608 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
609 		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
610 		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
611 		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
612 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
613 		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
614 		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
615 
616 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
617 
618 		/* table 5.8 */
619 		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
620 		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
621 		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
622 		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
623 		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
624 		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
625 
626 		/* table 5.9 */
627 		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
628 		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
629 	} else {
630 		/* table 5.1 (4 cells per lbuf) */
631 		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
632 		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
633 		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
634 		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
635 		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
636 
637 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
638 		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
639 		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
640 		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
641 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
642 		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
643 		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
644 
645 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
646 
647 		/* table 5.8 */
648 		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
649 		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
650 		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
651 		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
652 		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
653 		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
654 
655 		/* table 5.9 */
656 		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
657 		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
658 	}
659 
660 	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
661 
662 	for (reg = 0; reg < 0x8; ++reg)
663 		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
664 
665 }
666 
667 static int __init
668 he_init_cs_block_rcm(struct he_dev *he_dev)
669 {
670 	unsigned (*rategrid)[16][16];
671 	unsigned rate, delta;
672 	int i, j, reg;
673 
674 	unsigned rate_atmf, exp, man;
675 	unsigned long long rate_cps;
676 	int mult, buf, buf_limit = 4;
677 
678 	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
679 	if (!rategrid)
680 		return -ENOMEM;
681 
682 	/* initialize rate grid group table */
683 
684 	for (reg = 0x0; reg < 0xff; ++reg)
685 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
686 
687 	/* initialize rate controller groups */
688 
689 	for (reg = 0x100; reg < 0x1ff; ++reg)
690 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
691 
692 	/* initialize tNrm lookup table */
693 
694 	/* the manual makes reference to a routine in a sample driver
695 	   for proper configuration; fortunately, we only need this
696 	   in order to support abr connection */
697 
698 	/* initialize rate to group table */
699 
700 	rate = he_dev->atm_dev->link_rate;
701 	delta = rate / 32;
702 
703 	/*
704 	 * 2.4 transmit internal functions
705 	 *
706 	 * we construct a copy of the rate grid used by the scheduler
707 	 * in order to construct the rate to group table below
708 	 */
709 
710 	for (j = 0; j < 16; j++) {
711 		(*rategrid)[0][j] = rate;
712 		rate -= delta;
713 	}
714 
715 	for (i = 1; i < 16; i++)
716 		for (j = 0; j < 16; j++)
717 			if (i > 14)
718 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
719 			else
720 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
721 
722 	/*
723 	 * 2.4 transmit internal function
724 	 *
725 	 * this table maps the upper 5 bits of exponent and mantissa
726 	 * of the atm forum representation of the rate into an index
727 	 * on rate grid
728 	 */
729 
730 	rate_atmf = 0;
731 	while (rate_atmf < 0x400) {
732 		man = (rate_atmf & 0x1f) << 4;
733 		exp = rate_atmf >> 5;
734 
735 		/*
736 			instead of '/ 512', use '>> 9' to prevent a call
737 			to divdu3 on x86 platforms
738 		*/
739 		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
740 
741 		if (rate_cps < 10)
742 			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
743 
744 		for (i = 255; i > 0; i--)
745 			if ((*rategrid)[i/16][i%16] >= rate_cps)
746 				break;	 /* pick nearest rate instead? */
747 
748 		/*
749 		 * each table entry is 16 bits: (rate grid index (8 bits)
750 		 * and a buffer limit (8 bits)
751 		 * there are two table entries in each 32-bit register
752 		 */
753 
754 #ifdef notdef
755 		buf = rate_cps * he_dev->tx_numbuffs /
756 				(he_dev->atm_dev->link_rate * 2);
757 #else
758 		/* this is pretty, but avoids _divdu3 and is mostly correct */
759 		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
760 		if (rate_cps > (272 * mult))
761 			buf = 4;
762 		else if (rate_cps > (204 * mult))
763 			buf = 3;
764 		else if (rate_cps > (136 * mult))
765 			buf = 2;
766 		else if (rate_cps > (68 * mult))
767 			buf = 1;
768 		else
769 			buf = 0;
770 #endif
771 		if (buf > buf_limit)
772 			buf = buf_limit;
773 		reg = (reg << 16) | ((i << 8) | buf);
774 
775 #define RTGTBL_OFFSET 0x400
776 
777 		if (rate_atmf & 0x1)
778 			he_writel_rcm(he_dev, reg,
779 				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
780 
781 		++rate_atmf;
782 	}
783 
784 	kfree(rategrid);
785 	return 0;
786 }
787 
788 static int __init
789 he_init_group(struct he_dev *he_dev, int group)
790 {
791 	int i;
792 
793 #ifdef USE_RBPS
794 	/* small buffer pool */
795 #ifdef USE_RBPS_POOL
796 	he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
797 			CONFIG_RBPS_BUFSIZE, 8, 0);
798 	if (he_dev->rbps_pool == NULL) {
799 		hprintk("unable to create rbps pages\n");
800 		return -ENOMEM;
801 	}
802 #else /* !USE_RBPS_POOL */
803 	he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
804 		CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
805 	if (he_dev->rbps_pages == NULL) {
806 		hprintk("unable to create rbps page pool\n");
807 		return -ENOMEM;
808 	}
809 #endif /* USE_RBPS_POOL */
810 
811 	he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
812 		CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
813 	if (he_dev->rbps_base == NULL) {
814 		hprintk("failed to alloc rbps\n");
815 		return -ENOMEM;
816 	}
817 	memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
818 	he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
819 
820 	for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
821 		dma_addr_t dma_handle;
822 		void *cpuaddr;
823 
824 #ifdef USE_RBPS_POOL
825 		cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
826 		if (cpuaddr == NULL)
827 			return -ENOMEM;
828 #else
829 		cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
830 		dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
831 #endif
832 
833 		he_dev->rbps_virt[i].virt = cpuaddr;
834 		he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
835 		he_dev->rbps_base[i].phys = dma_handle;
836 
837 	}
838 	he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
839 
840 	he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
841 	he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
842 						G0_RBPS_T + (group * 32));
843 	he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
844 						G0_RBPS_BS + (group * 32));
845 	he_writel(he_dev,
846 			RBP_THRESH(CONFIG_RBPS_THRESH) |
847 			RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
848 			RBP_INT_ENB,
849 						G0_RBPS_QI + (group * 32));
850 #else /* !USE_RBPS */
851 	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
852 	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
853 	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
854 	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
855 						G0_RBPS_BS + (group * 32));
856 #endif /* USE_RBPS */
857 
858 	/* large buffer pool */
859 #ifdef USE_RBPL_POOL
860 	he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
861 			CONFIG_RBPL_BUFSIZE, 8, 0);
862 	if (he_dev->rbpl_pool == NULL) {
863 		hprintk("unable to create rbpl pool\n");
864 		return -ENOMEM;
865 	}
866 #else /* !USE_RBPL_POOL */
867 	he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
868 		CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
869 	if (he_dev->rbpl_pages == NULL) {
870 		hprintk("unable to create rbpl pages\n");
871 		return -ENOMEM;
872 	}
873 #endif /* USE_RBPL_POOL */
874 
875 	he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
876 		CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
877 	if (he_dev->rbpl_base == NULL) {
878 		hprintk("failed to alloc rbpl\n");
879 		return -ENOMEM;
880 	}
881 	memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
882 	he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
883 
884 	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
885 		dma_addr_t dma_handle;
886 		void *cpuaddr;
887 
888 #ifdef USE_RBPL_POOL
889 		cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
890 		if (cpuaddr == NULL)
891 			return -ENOMEM;
892 #else
893 		cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
894 		dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
895 #endif
896 
897 		he_dev->rbpl_virt[i].virt = cpuaddr;
898 		he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
899 		he_dev->rbpl_base[i].phys = dma_handle;
900 	}
901 	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
902 
903 	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
904 	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
905 						G0_RBPL_T + (group * 32));
906 	he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
907 						G0_RBPL_BS + (group * 32));
908 	he_writel(he_dev,
909 			RBP_THRESH(CONFIG_RBPL_THRESH) |
910 			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
911 			RBP_INT_ENB,
912 						G0_RBPL_QI + (group * 32));
913 
914 	/* rx buffer ready queue */
915 
916 	he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
917 		CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
918 	if (he_dev->rbrq_base == NULL) {
919 		hprintk("failed to allocate rbrq\n");
920 		return -ENOMEM;
921 	}
922 	memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
923 
924 	he_dev->rbrq_head = he_dev->rbrq_base;
925 	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
926 	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
927 	he_writel(he_dev,
928 		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
929 						G0_RBRQ_Q + (group * 16));
930 	if (irq_coalesce) {
931 		hprintk("coalescing interrupts\n");
932 		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
933 						G0_RBRQ_I + (group * 16));
934 	} else
935 		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
936 						G0_RBRQ_I + (group * 16));
937 
938 	/* tx buffer ready queue */
939 
940 	he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
941 		CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
942 	if (he_dev->tbrq_base == NULL) {
943 		hprintk("failed to allocate tbrq\n");
944 		return -ENOMEM;
945 	}
946 	memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
947 
948 	he_dev->tbrq_head = he_dev->tbrq_base;
949 
950 	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
951 	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
952 	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
953 	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
954 
955 	return 0;
956 }
957 
958 static int __init
959 he_init_irq(struct he_dev *he_dev)
960 {
961 	int i;
962 
963 	/* 2.9.3.5  tail offset for each interrupt queue is located after the
964 		    end of the interrupt queue */
965 
966 	he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
967 			(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
968 	if (he_dev->irq_base == NULL) {
969 		hprintk("failed to allocate irq\n");
970 		return -ENOMEM;
971 	}
972 	he_dev->irq_tailoffset = (unsigned *)
973 					&he_dev->irq_base[CONFIG_IRQ_SIZE];
974 	*he_dev->irq_tailoffset = 0;
975 	he_dev->irq_head = he_dev->irq_base;
976 	he_dev->irq_tail = he_dev->irq_base;
977 
978 	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
979 		he_dev->irq_base[i].isw = ITYPE_INVALID;
980 
981 	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
982 	he_writel(he_dev,
983 		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
984 								IRQ0_HEAD);
985 	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
986 	he_writel(he_dev, 0x0, IRQ0_DATA);
987 
988 	he_writel(he_dev, 0x0, IRQ1_BASE);
989 	he_writel(he_dev, 0x0, IRQ1_HEAD);
990 	he_writel(he_dev, 0x0, IRQ1_CNTL);
991 	he_writel(he_dev, 0x0, IRQ1_DATA);
992 
993 	he_writel(he_dev, 0x0, IRQ2_BASE);
994 	he_writel(he_dev, 0x0, IRQ2_HEAD);
995 	he_writel(he_dev, 0x0, IRQ2_CNTL);
996 	he_writel(he_dev, 0x0, IRQ2_DATA);
997 
998 	he_writel(he_dev, 0x0, IRQ3_BASE);
999 	he_writel(he_dev, 0x0, IRQ3_HEAD);
1000 	he_writel(he_dev, 0x0, IRQ3_CNTL);
1001 	he_writel(he_dev, 0x0, IRQ3_DATA);
1002 
1003 	/* 2.9.3.2 interrupt queue mapping registers */
1004 
1005 	he_writel(he_dev, 0x0, GRP_10_MAP);
1006 	he_writel(he_dev, 0x0, GRP_32_MAP);
1007 	he_writel(he_dev, 0x0, GRP_54_MAP);
1008 	he_writel(he_dev, 0x0, GRP_76_MAP);
1009 
1010 	if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_DISABLED|IRQF_SHARED, DEV_LABEL, he_dev)) {
1011 		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
1012 		return -EINVAL;
1013 	}
1014 
1015 	he_dev->irq = he_dev->pci_dev->irq;
1016 
1017 	return 0;
1018 }
1019 
1020 static int __devinit
1021 he_start(struct atm_dev *dev)
1022 {
1023 	struct he_dev *he_dev;
1024 	struct pci_dev *pci_dev;
1025 	unsigned long membase;
1026 
1027 	u16 command;
1028 	u32 gen_cntl_0, host_cntl, lb_swap;
1029 	u8 cache_size, timer;
1030 
1031 	unsigned err;
1032 	unsigned int status, reg;
1033 	int i, group;
1034 
1035 	he_dev = HE_DEV(dev);
1036 	pci_dev = he_dev->pci_dev;
1037 
1038 	membase = pci_resource_start(pci_dev, 0);
1039 	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
1040 
1041 	/*
1042 	 * pci bus controller initialization
1043 	 */
1044 
1045 	/* 4.3 pci bus controller-specific initialization */
1046 	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1047 		hprintk("can't read GEN_CNTL_0\n");
1048 		return -EINVAL;
1049 	}
1050 	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1051 	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1052 		hprintk("can't write GEN_CNTL_0.\n");
1053 		return -EINVAL;
1054 	}
1055 
1056 	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1057 		hprintk("can't read PCI_COMMAND.\n");
1058 		return -EINVAL;
1059 	}
1060 
1061 	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1062 	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1063 		hprintk("can't enable memory.\n");
1064 		return -EINVAL;
1065 	}
1066 
1067 	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1068 		hprintk("can't read cache line size?\n");
1069 		return -EINVAL;
1070 	}
1071 
1072 	if (cache_size < 16) {
1073 		cache_size = 16;
1074 		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1075 			hprintk("can't set cache line size to %d\n", cache_size);
1076 	}
1077 
1078 	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1079 		hprintk("can't read latency timer?\n");
1080 		return -EINVAL;
1081 	}
1082 
1083 	/* from table 3.9
1084 	 *
1085 	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1086 	 *
1087 	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1088 	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1089 	 *
1090 	 */
1091 #define LAT_TIMER 209
1092 	if (timer < LAT_TIMER) {
1093 		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1094 		timer = LAT_TIMER;
1095 		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1096 			hprintk("can't set latency timer to %d\n", timer);
1097 	}
1098 
1099 	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1100 		hprintk("can't set up page mapping\n");
1101 		return -EINVAL;
1102 	}
1103 
1104 	/* 4.4 card reset */
1105 	he_writel(he_dev, 0x0, RESET_CNTL);
1106 	he_writel(he_dev, 0xff, RESET_CNTL);
1107 
1108 	udelay(16*1000);	/* 16 ms */
1109 	status = he_readl(he_dev, RESET_CNTL);
1110 	if ((status & BOARD_RST_STATUS) == 0) {
1111 		hprintk("reset failed\n");
1112 		return -EINVAL;
1113 	}
1114 
1115 	/* 4.5 set bus width */
1116 	host_cntl = he_readl(he_dev, HOST_CNTL);
1117 	if (host_cntl & PCI_BUS_SIZE64)
1118 		gen_cntl_0 |= ENBL_64;
1119 	else
1120 		gen_cntl_0 &= ~ENBL_64;
1121 
1122 	if (disable64 == 1) {
1123 		hprintk("disabling 64-bit pci bus transfers\n");
1124 		gen_cntl_0 &= ~ENBL_64;
1125 	}
1126 
1127 	if (gen_cntl_0 & ENBL_64)
1128 		hprintk("64-bit transfers enabled\n");
1129 
1130 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1131 
1132 	/* 4.7 read prom contents */
1133 	for (i = 0; i < PROD_ID_LEN; ++i)
1134 		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1135 
1136 	he_dev->media = read_prom_byte(he_dev, MEDIA);
1137 
1138 	for (i = 0; i < 6; ++i)
1139 		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1140 
1141 	hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1142 				he_dev->prod_id,
1143 					he_dev->media & 0x40 ? "SM" : "MM",
1144 						dev->esi[0],
1145 						dev->esi[1],
1146 						dev->esi[2],
1147 						dev->esi[3],
1148 						dev->esi[4],
1149 						dev->esi[5]);
1150 	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1151 						ATM_OC12_PCR : ATM_OC3_PCR;
1152 
1153 	/* 4.6 set host endianess */
1154 	lb_swap = he_readl(he_dev, LB_SWAP);
1155 	if (he_is622(he_dev))
1156 		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1157 	else
1158 		lb_swap |= XFER_SIZE;		/* 8 cells */
1159 #ifdef __BIG_ENDIAN
1160 	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1161 #else
1162 	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1163 			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1164 #endif /* __BIG_ENDIAN */
1165 	he_writel(he_dev, lb_swap, LB_SWAP);
1166 
1167 	/* 4.8 sdram controller initialization */
1168 	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1169 
1170 	/* 4.9 initialize rnum value */
1171 	lb_swap |= SWAP_RNUM_MAX(0xf);
1172 	he_writel(he_dev, lb_swap, LB_SWAP);
1173 
1174 	/* 4.10 initialize the interrupt queues */
1175 	if ((err = he_init_irq(he_dev)) != 0)
1176 		return err;
1177 
1178 #ifdef USE_TASKLET
1179 	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
1180 #endif
1181 	spin_lock_init(&he_dev->global_lock);
1182 
1183 	/* 4.11 enable pci bus controller state machines */
1184 	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1185 				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1186 	he_writel(he_dev, host_cntl, HOST_CNTL);
1187 
1188 	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1189 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1190 
1191 	/*
1192 	 * atm network controller initialization
1193 	 */
1194 
1195 	/* 5.1.1 generic configuration state */
1196 
1197 	/*
1198 	 *		local (cell) buffer memory map
1199 	 *
1200 	 *             HE155                          HE622
1201 	 *
1202 	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1203 	 *         |            |            |                   |   |
1204 	 *         |  utility   |            |        rx0        |   |
1205 	 *        5|____________|         255|___________________| u |
1206 	 *        6|            |         256|                   | t |
1207 	 *         |            |            |                   | i |
1208 	 *         |    rx0     |     row    |        tx         | l |
1209 	 *         |            |            |                   | i |
1210 	 *         |            |         767|___________________| t |
1211 	 *      517|____________|         768|                   | y |
1212 	 * row  518|            |            |        rx1        |   |
1213 	 *         |            |        1023|___________________|___|
1214 	 *         |            |
1215 	 *         |    tx      |
1216 	 *         |            |
1217 	 *         |            |
1218 	 *     1535|____________|
1219 	 *     1536|            |
1220 	 *         |    rx1     |
1221 	 *     2047|____________|
1222 	 *
1223 	 */
1224 
1225 	/* total 4096 connections */
1226 	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1227 	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1228 
1229 	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1230 		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1231 		return -ENODEV;
1232 	}
1233 
1234 	if (nvpibits != -1) {
1235 		he_dev->vpibits = nvpibits;
1236 		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1237 	}
1238 
1239 	if (nvcibits != -1) {
1240 		he_dev->vcibits = nvcibits;
1241 		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1242 	}
1243 
1244 
1245 	if (he_is622(he_dev)) {
1246 		he_dev->cells_per_row = 40;
1247 		he_dev->bytes_per_row = 2048;
1248 		he_dev->r0_numrows = 256;
1249 		he_dev->tx_numrows = 512;
1250 		he_dev->r1_numrows = 256;
1251 		he_dev->r0_startrow = 0;
1252 		he_dev->tx_startrow = 256;
1253 		he_dev->r1_startrow = 768;
1254 	} else {
1255 		he_dev->cells_per_row = 20;
1256 		he_dev->bytes_per_row = 1024;
1257 		he_dev->r0_numrows = 512;
1258 		he_dev->tx_numrows = 1018;
1259 		he_dev->r1_numrows = 512;
1260 		he_dev->r0_startrow = 6;
1261 		he_dev->tx_startrow = 518;
1262 		he_dev->r1_startrow = 1536;
1263 	}
1264 
1265 	he_dev->cells_per_lbuf = 4;
1266 	he_dev->buffer_limit = 4;
1267 	he_dev->r0_numbuffs = he_dev->r0_numrows *
1268 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1269 	if (he_dev->r0_numbuffs > 2560)
1270 		he_dev->r0_numbuffs = 2560;
1271 
1272 	he_dev->r1_numbuffs = he_dev->r1_numrows *
1273 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1274 	if (he_dev->r1_numbuffs > 2560)
1275 		he_dev->r1_numbuffs = 2560;
1276 
1277 	he_dev->tx_numbuffs = he_dev->tx_numrows *
1278 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1279 	if (he_dev->tx_numbuffs > 5120)
1280 		he_dev->tx_numbuffs = 5120;
1281 
1282 	/* 5.1.2 configure hardware dependent registers */
1283 
1284 	he_writel(he_dev,
1285 		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1286 		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1287 		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1288 		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1289 								LBARB);
1290 
1291 	he_writel(he_dev, BANK_ON |
1292 		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1293 								SDRAMCON);
1294 
1295 	he_writel(he_dev,
1296 		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1297 						RM_RW_WAIT(1), RCMCONFIG);
1298 	he_writel(he_dev,
1299 		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1300 						TM_RW_WAIT(1), TCMCONFIG);
1301 
1302 	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1303 
1304 	he_writel(he_dev,
1305 		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1306 		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1307 		RX_VALVP(he_dev->vpibits) |
1308 		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1309 
1310 	he_writel(he_dev, DRF_THRESH(0x20) |
1311 		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1312 		TX_VCI_MASK(he_dev->vcibits) |
1313 		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1314 
1315 	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1316 
1317 	he_writel(he_dev, PHY_INT_ENB |
1318 		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1319 								RH_CONFIG);
1320 
1321 	/* 5.1.3 initialize connection memory */
1322 
1323 	for (i = 0; i < TCM_MEM_SIZE; ++i)
1324 		he_writel_tcm(he_dev, 0, i);
1325 
1326 	for (i = 0; i < RCM_MEM_SIZE; ++i)
1327 		he_writel_rcm(he_dev, 0, i);
1328 
1329 	/*
1330 	 *	transmit connection memory map
1331 	 *
1332 	 *                  tx memory
1333 	 *          0x0 ___________________
1334 	 *             |                   |
1335 	 *             |                   |
1336 	 *             |       TSRa        |
1337 	 *             |                   |
1338 	 *             |                   |
1339 	 *       0x8000|___________________|
1340 	 *             |                   |
1341 	 *             |       TSRb        |
1342 	 *       0xc000|___________________|
1343 	 *             |                   |
1344 	 *             |       TSRc        |
1345 	 *       0xe000|___________________|
1346 	 *             |       TSRd        |
1347 	 *       0xf000|___________________|
1348 	 *             |       tmABR       |
1349 	 *      0x10000|___________________|
1350 	 *             |                   |
1351 	 *             |       tmTPD       |
1352 	 *             |___________________|
1353 	 *             |                   |
1354 	 *                      ....
1355 	 *      0x1ffff|___________________|
1356 	 *
1357 	 *
1358 	 */
1359 
1360 	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1361 	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1362 	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1363 	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1364 	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1365 
1366 
1367 	/*
1368 	 *	receive connection memory map
1369 	 *
1370 	 *          0x0 ___________________
1371 	 *             |                   |
1372 	 *             |                   |
1373 	 *             |       RSRa        |
1374 	 *             |                   |
1375 	 *             |                   |
1376 	 *       0x8000|___________________|
1377 	 *             |                   |
1378 	 *             |             rx0/1 |
1379 	 *             |       LBM         |   link lists of local
1380 	 *             |             tx    |   buffer memory
1381 	 *             |                   |
1382 	 *       0xd000|___________________|
1383 	 *             |                   |
1384 	 *             |      rmABR        |
1385 	 *       0xe000|___________________|
1386 	 *             |                   |
1387 	 *             |       RSRb        |
1388 	 *             |___________________|
1389 	 *             |                   |
1390 	 *                      ....
1391 	 *       0xffff|___________________|
1392 	 */
1393 
1394 	he_writel(he_dev, 0x08000, RCMLBM_BA);
1395 	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1396 	he_writel(he_dev, 0x0d800, RCMABR_BA);
1397 
1398 	/* 5.1.4 initialize local buffer free pools linked lists */
1399 
1400 	he_init_rx_lbfp0(he_dev);
1401 	he_init_rx_lbfp1(he_dev);
1402 
1403 	he_writel(he_dev, 0x0, RLBC_H);
1404 	he_writel(he_dev, 0x0, RLBC_T);
1405 	he_writel(he_dev, 0x0, RLBC_H2);
1406 
1407 	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1408 	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1409 
1410 	he_init_tx_lbfp(he_dev);
1411 
1412 	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1413 
1414 	/* 5.1.5 initialize intermediate receive queues */
1415 
1416 	if (he_is622(he_dev)) {
1417 		he_writel(he_dev, 0x000f, G0_INMQ_S);
1418 		he_writel(he_dev, 0x200f, G0_INMQ_L);
1419 
1420 		he_writel(he_dev, 0x001f, G1_INMQ_S);
1421 		he_writel(he_dev, 0x201f, G1_INMQ_L);
1422 
1423 		he_writel(he_dev, 0x002f, G2_INMQ_S);
1424 		he_writel(he_dev, 0x202f, G2_INMQ_L);
1425 
1426 		he_writel(he_dev, 0x003f, G3_INMQ_S);
1427 		he_writel(he_dev, 0x203f, G3_INMQ_L);
1428 
1429 		he_writel(he_dev, 0x004f, G4_INMQ_S);
1430 		he_writel(he_dev, 0x204f, G4_INMQ_L);
1431 
1432 		he_writel(he_dev, 0x005f, G5_INMQ_S);
1433 		he_writel(he_dev, 0x205f, G5_INMQ_L);
1434 
1435 		he_writel(he_dev, 0x006f, G6_INMQ_S);
1436 		he_writel(he_dev, 0x206f, G6_INMQ_L);
1437 
1438 		he_writel(he_dev, 0x007f, G7_INMQ_S);
1439 		he_writel(he_dev, 0x207f, G7_INMQ_L);
1440 	} else {
1441 		he_writel(he_dev, 0x0000, G0_INMQ_S);
1442 		he_writel(he_dev, 0x0008, G0_INMQ_L);
1443 
1444 		he_writel(he_dev, 0x0001, G1_INMQ_S);
1445 		he_writel(he_dev, 0x0009, G1_INMQ_L);
1446 
1447 		he_writel(he_dev, 0x0002, G2_INMQ_S);
1448 		he_writel(he_dev, 0x000a, G2_INMQ_L);
1449 
1450 		he_writel(he_dev, 0x0003, G3_INMQ_S);
1451 		he_writel(he_dev, 0x000b, G3_INMQ_L);
1452 
1453 		he_writel(he_dev, 0x0004, G4_INMQ_S);
1454 		he_writel(he_dev, 0x000c, G4_INMQ_L);
1455 
1456 		he_writel(he_dev, 0x0005, G5_INMQ_S);
1457 		he_writel(he_dev, 0x000d, G5_INMQ_L);
1458 
1459 		he_writel(he_dev, 0x0006, G6_INMQ_S);
1460 		he_writel(he_dev, 0x000e, G6_INMQ_L);
1461 
1462 		he_writel(he_dev, 0x0007, G7_INMQ_S);
1463 		he_writel(he_dev, 0x000f, G7_INMQ_L);
1464 	}
1465 
1466 	/* 5.1.6 application tunable parameters */
1467 
1468 	he_writel(he_dev, 0x0, MCC);
1469 	he_writel(he_dev, 0x0, OEC);
1470 	he_writel(he_dev, 0x0, DCC);
1471 	he_writel(he_dev, 0x0, CEC);
1472 
1473 	/* 5.1.7 cs block initialization */
1474 
1475 	he_init_cs_block(he_dev);
1476 
1477 	/* 5.1.8 cs block connection memory initialization */
1478 
1479 	if (he_init_cs_block_rcm(he_dev) < 0)
1480 		return -ENOMEM;
1481 
1482 	/* 5.1.10 initialize host structures */
1483 
1484 	he_init_tpdrq(he_dev);
1485 
1486 #ifdef USE_TPD_POOL
1487 	he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1488 		sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1489 	if (he_dev->tpd_pool == NULL) {
1490 		hprintk("unable to create tpd pci_pool\n");
1491 		return -ENOMEM;
1492 	}
1493 
1494 	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1495 #else
1496 	he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
1497 			CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
1498 	if (!he_dev->tpd_base)
1499 		return -ENOMEM;
1500 
1501 	for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1502 		he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
1503 		he_dev->tpd_base[i].inuse = 0;
1504 	}
1505 
1506 	he_dev->tpd_head = he_dev->tpd_base;
1507 	he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
1508 #endif
1509 
1510 	if (he_init_group(he_dev, 0) != 0)
1511 		return -ENOMEM;
1512 
1513 	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1514 		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1515 		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1516 		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1517 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1518 						G0_RBPS_BS + (group * 32));
1519 
1520 		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1521 		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1522 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1523 						G0_RBPL_QI + (group * 32));
1524 		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1525 
1526 		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1527 		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1528 		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1529 						G0_RBRQ_Q + (group * 16));
1530 		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1531 
1532 		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1533 		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1534 		he_writel(he_dev, TBRQ_THRESH(0x1),
1535 						G0_TBRQ_THRESH + (group * 16));
1536 		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1537 	}
1538 
1539 	/* host status page */
1540 
1541 	he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1542 				sizeof(struct he_hsp), &he_dev->hsp_phys);
1543 	if (he_dev->hsp == NULL) {
1544 		hprintk("failed to allocate host status page\n");
1545 		return -ENOMEM;
1546 	}
1547 	memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1548 	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1549 
1550 	/* initialize framer */
1551 
1552 #ifdef CONFIG_ATM_HE_USE_SUNI
1553 	suni_init(he_dev->atm_dev);
1554 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1555 		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1556 #endif /* CONFIG_ATM_HE_USE_SUNI */
1557 
1558 	if (sdh) {
1559 		/* this really should be in suni.c but for now... */
1560 		int val;
1561 
1562 		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1563 		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1564 		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1565 	}
1566 
1567 	/* 5.1.12 enable transmit and receive */
1568 
1569 	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1570 	reg |= TX_ENABLE|ER_ENABLE;
1571 	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1572 
1573 	reg = he_readl(he_dev, RC_CONFIG);
1574 	reg |= RX_ENABLE;
1575 	he_writel(he_dev, reg, RC_CONFIG);
1576 
1577 	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1578 		he_dev->cs_stper[i].inuse = 0;
1579 		he_dev->cs_stper[i].pcr = -1;
1580 	}
1581 	he_dev->total_bw = 0;
1582 
1583 
1584 	/* atm linux initialization */
1585 
1586 	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1587 	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1588 
1589 	he_dev->irq_peak = 0;
1590 	he_dev->rbrq_peak = 0;
1591 	he_dev->rbpl_peak = 0;
1592 	he_dev->tbrq_peak = 0;
1593 
1594 	HPRINTK("hell bent for leather!\n");
1595 
1596 	return 0;
1597 }
1598 
1599 static void
1600 he_stop(struct he_dev *he_dev)
1601 {
1602 	u16 command;
1603 	u32 gen_cntl_0, reg;
1604 	struct pci_dev *pci_dev;
1605 
1606 	pci_dev = he_dev->pci_dev;
1607 
1608 	/* disable interrupts */
1609 
1610 	if (he_dev->membase) {
1611 		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1612 		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1613 		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1614 
1615 #ifdef USE_TASKLET
1616 		tasklet_disable(&he_dev->tasklet);
1617 #endif
1618 
1619 		/* disable recv and transmit */
1620 
1621 		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1622 		reg &= ~(TX_ENABLE|ER_ENABLE);
1623 		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1624 
1625 		reg = he_readl(he_dev, RC_CONFIG);
1626 		reg &= ~(RX_ENABLE);
1627 		he_writel(he_dev, reg, RC_CONFIG);
1628 	}
1629 
1630 #ifdef CONFIG_ATM_HE_USE_SUNI
1631 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1632 		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1633 #endif /* CONFIG_ATM_HE_USE_SUNI */
1634 
1635 	if (he_dev->irq)
1636 		free_irq(he_dev->irq, he_dev);
1637 
1638 	if (he_dev->irq_base)
1639 		pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1640 			* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1641 
1642 	if (he_dev->hsp)
1643 		pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1644 						he_dev->hsp, he_dev->hsp_phys);
1645 
1646 	if (he_dev->rbpl_base) {
1647 #ifdef USE_RBPL_POOL
1648 		for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1649 			void *cpuaddr = he_dev->rbpl_virt[i].virt;
1650 			dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1651 
1652 			pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1653 		}
1654 #else
1655 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1656 			* CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
1657 #endif
1658 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1659 			* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1660 	}
1661 
1662 #ifdef USE_RBPL_POOL
1663 	if (he_dev->rbpl_pool)
1664 		pci_pool_destroy(he_dev->rbpl_pool);
1665 #endif
1666 
1667 #ifdef USE_RBPS
1668 	if (he_dev->rbps_base) {
1669 #ifdef USE_RBPS_POOL
1670 		for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1671 			void *cpuaddr = he_dev->rbps_virt[i].virt;
1672 			dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1673 
1674 			pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1675 		}
1676 #else
1677 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1678 			* CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
1679 #endif
1680 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1681 			* sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1682 	}
1683 
1684 #ifdef USE_RBPS_POOL
1685 	if (he_dev->rbps_pool)
1686 		pci_pool_destroy(he_dev->rbps_pool);
1687 #endif
1688 
1689 #endif /* USE_RBPS */
1690 
1691 	if (he_dev->rbrq_base)
1692 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1693 							he_dev->rbrq_base, he_dev->rbrq_phys);
1694 
1695 	if (he_dev->tbrq_base)
1696 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1697 							he_dev->tbrq_base, he_dev->tbrq_phys);
1698 
1699 	if (he_dev->tpdrq_base)
1700 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1701 							he_dev->tpdrq_base, he_dev->tpdrq_phys);
1702 
1703 #ifdef USE_TPD_POOL
1704 	if (he_dev->tpd_pool)
1705 		pci_pool_destroy(he_dev->tpd_pool);
1706 #else
1707 	if (he_dev->tpd_base)
1708 		pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
1709 							he_dev->tpd_base, he_dev->tpd_base_phys);
1710 #endif
1711 
1712 	if (he_dev->pci_dev) {
1713 		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1714 		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1715 		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1716 	}
1717 
1718 	if (he_dev->membase)
1719 		iounmap(he_dev->membase);
1720 }
1721 
1722 static struct he_tpd *
1723 __alloc_tpd(struct he_dev *he_dev)
1724 {
1725 #ifdef USE_TPD_POOL
1726 	struct he_tpd *tpd;
1727 	dma_addr_t dma_handle;
1728 
1729 	tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle);
1730 	if (tpd == NULL)
1731 		return NULL;
1732 
1733 	tpd->status = TPD_ADDR(dma_handle);
1734 	tpd->reserved = 0;
1735 	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1736 	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1737 	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1738 
1739 	return tpd;
1740 #else
1741 	int i;
1742 
1743 	for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1744 		++he_dev->tpd_head;
1745 		if (he_dev->tpd_head > he_dev->tpd_end) {
1746 			he_dev->tpd_head = he_dev->tpd_base;
1747 		}
1748 
1749 		if (!he_dev->tpd_head->inuse) {
1750 			he_dev->tpd_head->inuse = 1;
1751 			he_dev->tpd_head->status &= TPD_MASK;
1752 			he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
1753 			he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
1754 			he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
1755 			return he_dev->tpd_head;
1756 		}
1757 	}
1758 	hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
1759 	return NULL;
1760 #endif
1761 }
1762 
1763 #define AAL5_LEN(buf,len) 						\
1764 			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1765 				(((unsigned char *)(buf))[(len)-5]))
1766 
1767 /* 2.10.1.2 receive
1768  *
1769  * aal5 packets can optionally return the tcp checksum in the lower
1770  * 16 bits of the crc (RSR0_TCP_CKSUM)
1771  */
1772 
1773 #define TCP_CKSUM(buf,len) 						\
1774 			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1775 				(((unsigned char *)(buf))[(len-1)]))
1776 
1777 static int
1778 he_service_rbrq(struct he_dev *he_dev, int group)
1779 {
1780 	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1781 				((unsigned long)he_dev->rbrq_base |
1782 					he_dev->hsp->group[group].rbrq_tail);
1783 	struct he_rbp *rbp = NULL;
1784 	unsigned cid, lastcid = -1;
1785 	unsigned buf_len = 0;
1786 	struct sk_buff *skb;
1787 	struct atm_vcc *vcc = NULL;
1788 	struct he_vcc *he_vcc;
1789 	struct he_iovec *iov;
1790 	int pdus_assembled = 0;
1791 	int updated = 0;
1792 
1793 	read_lock(&vcc_sklist_lock);
1794 	while (he_dev->rbrq_head != rbrq_tail) {
1795 		++updated;
1796 
1797 		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1798 			he_dev->rbrq_head, group,
1799 			RBRQ_ADDR(he_dev->rbrq_head),
1800 			RBRQ_BUFLEN(he_dev->rbrq_head),
1801 			RBRQ_CID(he_dev->rbrq_head),
1802 			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1803 			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1804 			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1805 			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1806 			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1807 			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1808 
1809 #ifdef USE_RBPS
1810 		if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1811 			rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1812 		else
1813 #endif
1814 			rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1815 
1816 		buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1817 		cid = RBRQ_CID(he_dev->rbrq_head);
1818 
1819 		if (cid != lastcid)
1820 			vcc = __find_vcc(he_dev, cid);
1821 		lastcid = cid;
1822 
1823 		if (vcc == NULL) {
1824 			hprintk("vcc == NULL  (cid 0x%x)\n", cid);
1825 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1826 					rbp->status &= ~RBP_LOANED;
1827 
1828 			goto next_rbrq_entry;
1829 		}
1830 
1831 		he_vcc = HE_VCC(vcc);
1832 		if (he_vcc == NULL) {
1833 			hprintk("he_vcc == NULL  (cid 0x%x)\n", cid);
1834 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1835 					rbp->status &= ~RBP_LOANED;
1836 			goto next_rbrq_entry;
1837 		}
1838 
1839 		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1840 			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1841 				atomic_inc(&vcc->stats->rx_drop);
1842 			goto return_host_buffers;
1843 		}
1844 
1845 		he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1846 		he_vcc->iov_tail->iov_len = buf_len;
1847 		he_vcc->pdu_len += buf_len;
1848 		++he_vcc->iov_tail;
1849 
1850 		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1851 			lastcid = -1;
1852 			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1853 			wake_up(&he_vcc->rx_waitq);
1854 			goto return_host_buffers;
1855 		}
1856 
1857 #ifdef notdef
1858 		if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1859 			hprintk("iovec full!  cid 0x%x\n", cid);
1860 			goto return_host_buffers;
1861 		}
1862 #endif
1863 		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1864 			goto next_rbrq_entry;
1865 
1866 		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1867 				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1868 			HPRINTK("%s%s (%d.%d)\n",
1869 				RBRQ_CRC_ERR(he_dev->rbrq_head)
1870 							? "CRC_ERR " : "",
1871 				RBRQ_LEN_ERR(he_dev->rbrq_head)
1872 							? "LEN_ERR" : "",
1873 							vcc->vpi, vcc->vci);
1874 			atomic_inc(&vcc->stats->rx_err);
1875 			goto return_host_buffers;
1876 		}
1877 
1878 		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1879 							GFP_ATOMIC);
1880 		if (!skb) {
1881 			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1882 			goto return_host_buffers;
1883 		}
1884 
1885 		if (rx_skb_reserve > 0)
1886 			skb_reserve(skb, rx_skb_reserve);
1887 
1888 		__net_timestamp(skb);
1889 
1890 		for (iov = he_vcc->iov_head;
1891 				iov < he_vcc->iov_tail; ++iov) {
1892 #ifdef USE_RBPS
1893 			if (iov->iov_base & RBP_SMALLBUF)
1894 				memcpy(skb_put(skb, iov->iov_len),
1895 					he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1896 			else
1897 #endif
1898 				memcpy(skb_put(skb, iov->iov_len),
1899 					he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1900 		}
1901 
1902 		switch (vcc->qos.aal) {
1903 			case ATM_AAL0:
1904 				/* 2.10.1.5 raw cell receive */
1905 				skb->len = ATM_AAL0_SDU;
1906 				skb->tail = skb->data + skb->len;
1907 				break;
1908 			case ATM_AAL5:
1909 				/* 2.10.1.2 aal5 receive */
1910 
1911 				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1912 				skb->tail = skb->data + skb->len;
1913 #ifdef USE_CHECKSUM_HW
1914 				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1915 					skb->ip_summed = CHECKSUM_COMPLETE;
1916 					skb->csum = TCP_CKSUM(skb->data,
1917 							he_vcc->pdu_len);
1918 				}
1919 #endif
1920 				break;
1921 		}
1922 
1923 #ifdef should_never_happen
1924 		if (skb->len > vcc->qos.rxtp.max_sdu)
1925 			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1926 #endif
1927 
1928 #ifdef notdef
1929 		ATM_SKB(skb)->vcc = vcc;
1930 #endif
1931 		spin_unlock(&he_dev->global_lock);
1932 		vcc->push(vcc, skb);
1933 		spin_lock(&he_dev->global_lock);
1934 
1935 		atomic_inc(&vcc->stats->rx);
1936 
1937 return_host_buffers:
1938 		++pdus_assembled;
1939 
1940 		for (iov = he_vcc->iov_head;
1941 				iov < he_vcc->iov_tail; ++iov) {
1942 #ifdef USE_RBPS
1943 			if (iov->iov_base & RBP_SMALLBUF)
1944 				rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1945 			else
1946 #endif
1947 				rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1948 
1949 			rbp->status &= ~RBP_LOANED;
1950 		}
1951 
1952 		he_vcc->iov_tail = he_vcc->iov_head;
1953 		he_vcc->pdu_len = 0;
1954 
1955 next_rbrq_entry:
1956 		he_dev->rbrq_head = (struct he_rbrq *)
1957 				((unsigned long) he_dev->rbrq_base |
1958 					RBRQ_MASK(++he_dev->rbrq_head));
1959 
1960 	}
1961 	read_unlock(&vcc_sklist_lock);
1962 
1963 	if (updated) {
1964 		if (updated > he_dev->rbrq_peak)
1965 			he_dev->rbrq_peak = updated;
1966 
1967 		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1968 						G0_RBRQ_H + (group * 16));
1969 	}
1970 
1971 	return pdus_assembled;
1972 }
1973 
1974 static void
1975 he_service_tbrq(struct he_dev *he_dev, int group)
1976 {
1977 	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1978 				((unsigned long)he_dev->tbrq_base |
1979 					he_dev->hsp->group[group].tbrq_tail);
1980 	struct he_tpd *tpd;
1981 	int slot, updated = 0;
1982 #ifdef USE_TPD_POOL
1983 	struct he_tpd *__tpd;
1984 #endif
1985 
1986 	/* 2.1.6 transmit buffer return queue */
1987 
1988 	while (he_dev->tbrq_head != tbrq_tail) {
1989 		++updated;
1990 
1991 		HPRINTK("tbrq%d 0x%x%s%s\n",
1992 			group,
1993 			TBRQ_TPD(he_dev->tbrq_head),
1994 			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1995 			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1996 #ifdef USE_TPD_POOL
1997 		tpd = NULL;
1998 		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1999 			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
2000 				tpd = __tpd;
2001 				list_del(&__tpd->entry);
2002 				break;
2003 			}
2004 		}
2005 
2006 		if (tpd == NULL) {
2007 			hprintk("unable to locate tpd for dma buffer %x\n",
2008 						TBRQ_TPD(he_dev->tbrq_head));
2009 			goto next_tbrq_entry;
2010 		}
2011 #else
2012 		tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
2013 #endif
2014 
2015 		if (TBRQ_EOS(he_dev->tbrq_head)) {
2016 			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2017 				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
2018 			if (tpd->vcc)
2019 				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
2020 
2021 			goto next_tbrq_entry;
2022 		}
2023 
2024 		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2025 			if (tpd->iovec[slot].addr)
2026 				pci_unmap_single(he_dev->pci_dev,
2027 					tpd->iovec[slot].addr,
2028 					tpd->iovec[slot].len & TPD_LEN_MASK,
2029 							PCI_DMA_TODEVICE);
2030 			if (tpd->iovec[slot].len & TPD_LST)
2031 				break;
2032 
2033 		}
2034 
2035 		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2036 			if (tpd->vcc && tpd->vcc->pop)
2037 				tpd->vcc->pop(tpd->vcc, tpd->skb);
2038 			else
2039 				dev_kfree_skb_any(tpd->skb);
2040 		}
2041 
2042 next_tbrq_entry:
2043 #ifdef USE_TPD_POOL
2044 		if (tpd)
2045 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2046 #else
2047 		tpd->inuse = 0;
2048 #endif
2049 		he_dev->tbrq_head = (struct he_tbrq *)
2050 				((unsigned long) he_dev->tbrq_base |
2051 					TBRQ_MASK(++he_dev->tbrq_head));
2052 	}
2053 
2054 	if (updated) {
2055 		if (updated > he_dev->tbrq_peak)
2056 			he_dev->tbrq_peak = updated;
2057 
2058 		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
2059 						G0_TBRQ_H + (group * 16));
2060 	}
2061 }
2062 
2063 
2064 static void
2065 he_service_rbpl(struct he_dev *he_dev, int group)
2066 {
2067 	struct he_rbp *newtail;
2068 	struct he_rbp *rbpl_head;
2069 	int moved = 0;
2070 
2071 	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2072 					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
2073 
2074 	for (;;) {
2075 		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2076 						RBPL_MASK(he_dev->rbpl_tail+1));
2077 
2078 		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2079 		if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
2080 			break;
2081 
2082 		newtail->status |= RBP_LOANED;
2083 		he_dev->rbpl_tail = newtail;
2084 		++moved;
2085 	}
2086 
2087 	if (moved)
2088 		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2089 }
2090 
2091 #ifdef USE_RBPS
2092 static void
2093 he_service_rbps(struct he_dev *he_dev, int group)
2094 {
2095 	struct he_rbp *newtail;
2096 	struct he_rbp *rbps_head;
2097 	int moved = 0;
2098 
2099 	rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2100 					RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2101 
2102 	for (;;) {
2103 		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2104 						RBPS_MASK(he_dev->rbps_tail+1));
2105 
2106 		/* table 3.42 -- rbps_tail should never be set to rbps_head */
2107 		if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2108 			break;
2109 
2110 		newtail->status |= RBP_LOANED;
2111 		he_dev->rbps_tail = newtail;
2112 		++moved;
2113 	}
2114 
2115 	if (moved)
2116 		he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2117 }
2118 #endif /* USE_RBPS */
2119 
2120 static void
2121 he_tasklet(unsigned long data)
2122 {
2123 	unsigned long flags;
2124 	struct he_dev *he_dev = (struct he_dev *) data;
2125 	int group, type;
2126 	int updated = 0;
2127 
2128 	HPRINTK("tasklet (0x%lx)\n", data);
2129 #ifdef USE_TASKLET
2130 	spin_lock_irqsave(&he_dev->global_lock, flags);
2131 #endif
2132 
2133 	while (he_dev->irq_head != he_dev->irq_tail) {
2134 		++updated;
2135 
2136 		type = ITYPE_TYPE(he_dev->irq_head->isw);
2137 		group = ITYPE_GROUP(he_dev->irq_head->isw);
2138 
2139 		switch (type) {
2140 			case ITYPE_RBRQ_THRESH:
2141 				HPRINTK("rbrq%d threshold\n", group);
2142 				/* fall through */
2143 			case ITYPE_RBRQ_TIMER:
2144 				if (he_service_rbrq(he_dev, group)) {
2145 					he_service_rbpl(he_dev, group);
2146 #ifdef USE_RBPS
2147 					he_service_rbps(he_dev, group);
2148 #endif /* USE_RBPS */
2149 				}
2150 				break;
2151 			case ITYPE_TBRQ_THRESH:
2152 				HPRINTK("tbrq%d threshold\n", group);
2153 				/* fall through */
2154 			case ITYPE_TPD_COMPLETE:
2155 				he_service_tbrq(he_dev, group);
2156 				break;
2157 			case ITYPE_RBPL_THRESH:
2158 				he_service_rbpl(he_dev, group);
2159 				break;
2160 			case ITYPE_RBPS_THRESH:
2161 #ifdef USE_RBPS
2162 				he_service_rbps(he_dev, group);
2163 #endif /* USE_RBPS */
2164 				break;
2165 			case ITYPE_PHY:
2166 				HPRINTK("phy interrupt\n");
2167 #ifdef CONFIG_ATM_HE_USE_SUNI
2168 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2169 				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2170 					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2171 				spin_lock_irqsave(&he_dev->global_lock, flags);
2172 #endif
2173 				break;
2174 			case ITYPE_OTHER:
2175 				switch (type|group) {
2176 					case ITYPE_PARITY:
2177 						hprintk("parity error\n");
2178 						break;
2179 					case ITYPE_ABORT:
2180 						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2181 						break;
2182 				}
2183 				break;
2184 			case ITYPE_TYPE(ITYPE_INVALID):
2185 				/* see 8.1.1 -- check all queues */
2186 
2187 				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2188 
2189 				he_service_rbrq(he_dev, 0);
2190 				he_service_rbpl(he_dev, 0);
2191 #ifdef USE_RBPS
2192 				he_service_rbps(he_dev, 0);
2193 #endif /* USE_RBPS */
2194 				he_service_tbrq(he_dev, 0);
2195 				break;
2196 			default:
2197 				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2198 		}
2199 
2200 		he_dev->irq_head->isw = ITYPE_INVALID;
2201 
2202 		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2203 	}
2204 
2205 	if (updated) {
2206 		if (updated > he_dev->irq_peak)
2207 			he_dev->irq_peak = updated;
2208 
2209 		he_writel(he_dev,
2210 			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2211 			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2212 			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2213 		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2214 	}
2215 #ifdef USE_TASKLET
2216 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2217 #endif
2218 }
2219 
2220 static irqreturn_t
2221 he_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
2222 {
2223 	unsigned long flags;
2224 	struct he_dev *he_dev = (struct he_dev * )dev_id;
2225 	int handled = 0;
2226 
2227 	if (he_dev == NULL)
2228 		return IRQ_NONE;
2229 
2230 	spin_lock_irqsave(&he_dev->global_lock, flags);
2231 
2232 	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2233 						(*he_dev->irq_tailoffset << 2));
2234 
2235 	if (he_dev->irq_tail == he_dev->irq_head) {
2236 		HPRINTK("tailoffset not updated?\n");
2237 		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2238 			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2239 		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2240 	}
2241 
2242 #ifdef DEBUG
2243 	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2244 		hprintk("spurious (or shared) interrupt?\n");
2245 #endif
2246 
2247 	if (he_dev->irq_head != he_dev->irq_tail) {
2248 		handled = 1;
2249 #ifdef USE_TASKLET
2250 		tasklet_schedule(&he_dev->tasklet);
2251 #else
2252 		he_tasklet((unsigned long) he_dev);
2253 #endif
2254 		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2255 		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2256 	}
2257 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2258 	return IRQ_RETVAL(handled);
2259 
2260 }
2261 
2262 static __inline__ void
2263 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2264 {
2265 	struct he_tpdrq *new_tail;
2266 
2267 	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2268 					tpd, cid, he_dev->tpdrq_tail);
2269 
2270 	/* new_tail = he_dev->tpdrq_tail; */
2271 	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2272 					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2273 
2274 	/*
2275 	 * check to see if we are about to set the tail == head
2276 	 * if true, update the head pointer from the adapter
2277 	 * to see if this is really the case (reading the queue
2278 	 * head for every enqueue would be unnecessarily slow)
2279 	 */
2280 
2281 	if (new_tail == he_dev->tpdrq_head) {
2282 		he_dev->tpdrq_head = (struct he_tpdrq *)
2283 			(((unsigned long)he_dev->tpdrq_base) |
2284 				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2285 
2286 		if (new_tail == he_dev->tpdrq_head) {
2287 			int slot;
2288 
2289 			hprintk("tpdrq full (cid 0x%x)\n", cid);
2290 			/*
2291 			 * FIXME
2292 			 * push tpd onto a transmit backlog queue
2293 			 * after service_tbrq, service the backlog
2294 			 * for now, we just drop the pdu
2295 			 */
2296 			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2297 				if (tpd->iovec[slot].addr)
2298 					pci_unmap_single(he_dev->pci_dev,
2299 						tpd->iovec[slot].addr,
2300 						tpd->iovec[slot].len & TPD_LEN_MASK,
2301 								PCI_DMA_TODEVICE);
2302 			}
2303 			if (tpd->skb) {
2304 				if (tpd->vcc->pop)
2305 					tpd->vcc->pop(tpd->vcc, tpd->skb);
2306 				else
2307 					dev_kfree_skb_any(tpd->skb);
2308 				atomic_inc(&tpd->vcc->stats->tx_err);
2309 			}
2310 #ifdef USE_TPD_POOL
2311 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2312 #else
2313 			tpd->inuse = 0;
2314 #endif
2315 			return;
2316 		}
2317 	}
2318 
2319 	/* 2.1.5 transmit packet descriptor ready queue */
2320 #ifdef USE_TPD_POOL
2321 	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2322 	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2323 #else
2324 	he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
2325 				(TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
2326 #endif
2327 	he_dev->tpdrq_tail->cid = cid;
2328 	wmb();
2329 
2330 	he_dev->tpdrq_tail = new_tail;
2331 
2332 	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2333 	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2334 }
2335 
2336 static int
2337 he_open(struct atm_vcc *vcc)
2338 {
2339 	unsigned long flags;
2340 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2341 	struct he_vcc *he_vcc;
2342 	int err = 0;
2343 	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2344 	short vpi = vcc->vpi;
2345 	int vci = vcc->vci;
2346 
2347 	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2348 		return 0;
2349 
2350 	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2351 
2352 	set_bit(ATM_VF_ADDR, &vcc->flags);
2353 
2354 	cid = he_mkcid(he_dev, vpi, vci);
2355 
2356 	he_vcc = (struct he_vcc *) kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2357 	if (he_vcc == NULL) {
2358 		hprintk("unable to allocate he_vcc during open\n");
2359 		return -ENOMEM;
2360 	}
2361 
2362 	he_vcc->iov_tail = he_vcc->iov_head;
2363 	he_vcc->pdu_len = 0;
2364 	he_vcc->rc_index = -1;
2365 
2366 	init_waitqueue_head(&he_vcc->rx_waitq);
2367 	init_waitqueue_head(&he_vcc->tx_waitq);
2368 
2369 	vcc->dev_data = he_vcc;
2370 
2371 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2372 		int pcr_goal;
2373 
2374 		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2375 		if (pcr_goal == 0)
2376 			pcr_goal = he_dev->atm_dev->link_rate;
2377 		if (pcr_goal < 0)	/* means round down, technically */
2378 			pcr_goal = -pcr_goal;
2379 
2380 		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2381 
2382 		switch (vcc->qos.aal) {
2383 			case ATM_AAL5:
2384 				tsr0_aal = TSR0_AAL5;
2385 				tsr4 = TSR4_AAL5;
2386 				break;
2387 			case ATM_AAL0:
2388 				tsr0_aal = TSR0_AAL0_SDU;
2389 				tsr4 = TSR4_AAL0_SDU;
2390 				break;
2391 			default:
2392 				err = -EINVAL;
2393 				goto open_failed;
2394 		}
2395 
2396 		spin_lock_irqsave(&he_dev->global_lock, flags);
2397 		tsr0 = he_readl_tsr0(he_dev, cid);
2398 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2399 
2400 		if (TSR0_CONN_STATE(tsr0) != 0) {
2401 			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2402 			err = -EBUSY;
2403 			goto open_failed;
2404 		}
2405 
2406 		switch (vcc->qos.txtp.traffic_class) {
2407 			case ATM_UBR:
2408 				/* 2.3.3.1 open connection ubr */
2409 
2410 				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2411 					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2412 				break;
2413 
2414 			case ATM_CBR:
2415 				/* 2.3.3.2 open connection cbr */
2416 
2417 				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2418 				if ((he_dev->total_bw + pcr_goal)
2419 					> (he_dev->atm_dev->link_rate * 9 / 10))
2420 				{
2421 					err = -EBUSY;
2422 					goto open_failed;
2423 				}
2424 
2425 				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2426 
2427 				/* find an unused cs_stper register */
2428 				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2429 					if (he_dev->cs_stper[reg].inuse == 0 ||
2430 					    he_dev->cs_stper[reg].pcr == pcr_goal)
2431 							break;
2432 
2433 				if (reg == HE_NUM_CS_STPER) {
2434 					err = -EBUSY;
2435 					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2436 					goto open_failed;
2437 				}
2438 
2439 				he_dev->total_bw += pcr_goal;
2440 
2441 				he_vcc->rc_index = reg;
2442 				++he_dev->cs_stper[reg].inuse;
2443 				he_dev->cs_stper[reg].pcr = pcr_goal;
2444 
2445 				clock = he_is622(he_dev) ? 66667000 : 50000000;
2446 				period = clock / pcr_goal;
2447 
2448 				HPRINTK("rc_index = %d period = %d\n",
2449 								reg, period);
2450 
2451 				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2452 							CS_STPER0 + reg);
2453 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2454 
2455 				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2456 							TSR0_RC_INDEX(reg);
2457 
2458 				break;
2459 			default:
2460 				err = -EINVAL;
2461 				goto open_failed;
2462 		}
2463 
2464 		spin_lock_irqsave(&he_dev->global_lock, flags);
2465 
2466 		he_writel_tsr0(he_dev, tsr0, cid);
2467 		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2468 		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2469 					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2470 		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2471 		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2472 
2473 		he_writel_tsr3(he_dev, 0x0, cid);
2474 		he_writel_tsr5(he_dev, 0x0, cid);
2475 		he_writel_tsr6(he_dev, 0x0, cid);
2476 		he_writel_tsr7(he_dev, 0x0, cid);
2477 		he_writel_tsr8(he_dev, 0x0, cid);
2478 		he_writel_tsr10(he_dev, 0x0, cid);
2479 		he_writel_tsr11(he_dev, 0x0, cid);
2480 		he_writel_tsr12(he_dev, 0x0, cid);
2481 		he_writel_tsr13(he_dev, 0x0, cid);
2482 		he_writel_tsr14(he_dev, 0x0, cid);
2483 		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2484 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2485 	}
2486 
2487 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2488 		unsigned aal;
2489 
2490 		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2491 		 				&HE_VCC(vcc)->rx_waitq);
2492 
2493 		switch (vcc->qos.aal) {
2494 			case ATM_AAL5:
2495 				aal = RSR0_AAL5;
2496 				break;
2497 			case ATM_AAL0:
2498 				aal = RSR0_RAWCELL;
2499 				break;
2500 			default:
2501 				err = -EINVAL;
2502 				goto open_failed;
2503 		}
2504 
2505 		spin_lock_irqsave(&he_dev->global_lock, flags);
2506 
2507 		rsr0 = he_readl_rsr0(he_dev, cid);
2508 		if (rsr0 & RSR0_OPEN_CONN) {
2509 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2510 
2511 			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2512 			err = -EBUSY;
2513 			goto open_failed;
2514 		}
2515 
2516 #ifdef USE_RBPS
2517 		rsr1 = RSR1_GROUP(0);
2518 		rsr4 = RSR4_GROUP(0);
2519 #else /* !USE_RBPS */
2520 		rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
2521 		rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
2522 #endif /* USE_RBPS */
2523 		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2524 				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2525 
2526 #ifdef USE_CHECKSUM_HW
2527 		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2528 			rsr0 |= RSR0_TCP_CKSUM;
2529 #endif
2530 
2531 		he_writel_rsr4(he_dev, rsr4, cid);
2532 		he_writel_rsr1(he_dev, rsr1, cid);
2533 		/* 5.1.11 last parameter initialized should be
2534 			  the open/closed indication in rsr0 */
2535 		he_writel_rsr0(he_dev,
2536 			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2537 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2538 
2539 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2540 	}
2541 
2542 open_failed:
2543 
2544 	if (err) {
2545 		kfree(he_vcc);
2546 		clear_bit(ATM_VF_ADDR, &vcc->flags);
2547 	}
2548 	else
2549 		set_bit(ATM_VF_READY, &vcc->flags);
2550 
2551 	return err;
2552 }
2553 
2554 static void
2555 he_close(struct atm_vcc *vcc)
2556 {
2557 	unsigned long flags;
2558 	DECLARE_WAITQUEUE(wait, current);
2559 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2560 	struct he_tpd *tpd;
2561 	unsigned cid;
2562 	struct he_vcc *he_vcc = HE_VCC(vcc);
2563 #define MAX_RETRY 30
2564 	int retry = 0, sleep = 1, tx_inuse;
2565 
2566 	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2567 
2568 	clear_bit(ATM_VF_READY, &vcc->flags);
2569 	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2570 
2571 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2572 		int timeout;
2573 
2574 		HPRINTK("close rx cid 0x%x\n", cid);
2575 
2576 		/* 2.7.2.2 close receive operation */
2577 
2578 		/* wait for previous close (if any) to finish */
2579 
2580 		spin_lock_irqsave(&he_dev->global_lock, flags);
2581 		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2582 			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2583 			udelay(250);
2584 		}
2585 
2586 		set_current_state(TASK_UNINTERRUPTIBLE);
2587 		add_wait_queue(&he_vcc->rx_waitq, &wait);
2588 
2589 		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2590 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2591 		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2592 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2593 
2594 		timeout = schedule_timeout(30*HZ);
2595 
2596 		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2597 		set_current_state(TASK_RUNNING);
2598 
2599 		if (timeout == 0)
2600 			hprintk("close rx timeout cid 0x%x\n", cid);
2601 
2602 		HPRINTK("close rx cid 0x%x complete\n", cid);
2603 
2604 	}
2605 
2606 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2607 		volatile unsigned tsr4, tsr0;
2608 		int timeout;
2609 
2610 		HPRINTK("close tx cid 0x%x\n", cid);
2611 
2612 		/* 2.1.2
2613 		 *
2614 		 * ... the host must first stop queueing packets to the TPDRQ
2615 		 * on the connection to be closed, then wait for all outstanding
2616 		 * packets to be transmitted and their buffers returned to the
2617 		 * TBRQ. When the last packet on the connection arrives in the
2618 		 * TBRQ, the host issues the close command to the adapter.
2619 		 */
2620 
2621 		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 0) &&
2622 		       (retry < MAX_RETRY)) {
2623 			msleep(sleep);
2624 			if (sleep < 250)
2625 				sleep = sleep * 2;
2626 
2627 			++retry;
2628 		}
2629 
2630 		if (tx_inuse)
2631 			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2632 
2633 		/* 2.3.1.1 generic close operations with flush */
2634 
2635 		spin_lock_irqsave(&he_dev->global_lock, flags);
2636 		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2637 					/* also clears TSR4_SESSION_ENDED */
2638 
2639 		switch (vcc->qos.txtp.traffic_class) {
2640 			case ATM_UBR:
2641 				he_writel_tsr1(he_dev,
2642 					TSR1_MCR(rate_to_atmf(200000))
2643 					| TSR1_PCR(0), cid);
2644 				break;
2645 			case ATM_CBR:
2646 				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2647 				break;
2648 		}
2649 		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2650 
2651 		tpd = __alloc_tpd(he_dev);
2652 		if (tpd == NULL) {
2653 			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2654 			goto close_tx_incomplete;
2655 		}
2656 		tpd->status |= TPD_EOS | TPD_INT;
2657 		tpd->skb = NULL;
2658 		tpd->vcc = vcc;
2659 		wmb();
2660 
2661 		set_current_state(TASK_UNINTERRUPTIBLE);
2662 		add_wait_queue(&he_vcc->tx_waitq, &wait);
2663 		__enqueue_tpd(he_dev, tpd, cid);
2664 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2665 
2666 		timeout = schedule_timeout(30*HZ);
2667 
2668 		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2669 		set_current_state(TASK_RUNNING);
2670 
2671 		spin_lock_irqsave(&he_dev->global_lock, flags);
2672 
2673 		if (timeout == 0) {
2674 			hprintk("close tx timeout cid 0x%x\n", cid);
2675 			goto close_tx_incomplete;
2676 		}
2677 
2678 		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2679 			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2680 			udelay(250);
2681 		}
2682 
2683 		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2684 			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2685 			udelay(250);
2686 		}
2687 
2688 close_tx_incomplete:
2689 
2690 		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2691 			int reg = he_vcc->rc_index;
2692 
2693 			HPRINTK("cs_stper reg = %d\n", reg);
2694 
2695 			if (he_dev->cs_stper[reg].inuse == 0)
2696 				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2697 			else
2698 				--he_dev->cs_stper[reg].inuse;
2699 
2700 			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2701 		}
2702 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2703 
2704 		HPRINTK("close tx cid 0x%x complete\n", cid);
2705 	}
2706 
2707 	kfree(he_vcc);
2708 
2709 	clear_bit(ATM_VF_ADDR, &vcc->flags);
2710 }
2711 
2712 static int
2713 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2714 {
2715 	unsigned long flags;
2716 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2717 	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2718 	struct he_tpd *tpd;
2719 #ifdef USE_SCATTERGATHER
2720 	int i, slot = 0;
2721 #endif
2722 
2723 #define HE_TPD_BUFSIZE 0xffff
2724 
2725 	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2726 
2727 	if ((skb->len > HE_TPD_BUFSIZE) ||
2728 	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2729 		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2730 		if (vcc->pop)
2731 			vcc->pop(vcc, skb);
2732 		else
2733 			dev_kfree_skb_any(skb);
2734 		atomic_inc(&vcc->stats->tx_err);
2735 		return -EINVAL;
2736 	}
2737 
2738 #ifndef USE_SCATTERGATHER
2739 	if (skb_shinfo(skb)->nr_frags) {
2740 		hprintk("no scatter/gather support\n");
2741 		if (vcc->pop)
2742 			vcc->pop(vcc, skb);
2743 		else
2744 			dev_kfree_skb_any(skb);
2745 		atomic_inc(&vcc->stats->tx_err);
2746 		return -EINVAL;
2747 	}
2748 #endif
2749 	spin_lock_irqsave(&he_dev->global_lock, flags);
2750 
2751 	tpd = __alloc_tpd(he_dev);
2752 	if (tpd == NULL) {
2753 		if (vcc->pop)
2754 			vcc->pop(vcc, skb);
2755 		else
2756 			dev_kfree_skb_any(skb);
2757 		atomic_inc(&vcc->stats->tx_err);
2758 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2759 		return -ENOMEM;
2760 	}
2761 
2762 	if (vcc->qos.aal == ATM_AAL5)
2763 		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2764 	else {
2765 		char *pti_clp = (void *) (skb->data + 3);
2766 		int clp, pti;
2767 
2768 		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2769 		clp = (*pti_clp & ATM_HDR_CLP);
2770 		tpd->status |= TPD_CELLTYPE(pti);
2771 		if (clp)
2772 			tpd->status |= TPD_CLP;
2773 
2774 		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2775 	}
2776 
2777 #ifdef USE_SCATTERGATHER
2778 	tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2779 				skb->len - skb->data_len, PCI_DMA_TODEVICE);
2780 	tpd->iovec[slot].len = skb->len - skb->data_len;
2781 	++slot;
2782 
2783 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2784 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2785 
2786 		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2787 			tpd->vcc = vcc;
2788 			tpd->skb = NULL;	/* not the last fragment
2789 						   so dont ->push() yet */
2790 			wmb();
2791 
2792 			__enqueue_tpd(he_dev, tpd, cid);
2793 			tpd = __alloc_tpd(he_dev);
2794 			if (tpd == NULL) {
2795 				if (vcc->pop)
2796 					vcc->pop(vcc, skb);
2797 				else
2798 					dev_kfree_skb_any(skb);
2799 				atomic_inc(&vcc->stats->tx_err);
2800 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2801 				return -ENOMEM;
2802 			}
2803 			tpd->status |= TPD_USERCELL;
2804 			slot = 0;
2805 		}
2806 
2807 		tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2808 			(void *) page_address(frag->page) + frag->page_offset,
2809 				frag->size, PCI_DMA_TODEVICE);
2810 		tpd->iovec[slot].len = frag->size;
2811 		++slot;
2812 
2813 	}
2814 
2815 	tpd->iovec[slot - 1].len |= TPD_LST;
2816 #else
2817 	tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2818 	tpd->length0 = skb->len | TPD_LST;
2819 #endif
2820 	tpd->status |= TPD_INT;
2821 
2822 	tpd->vcc = vcc;
2823 	tpd->skb = skb;
2824 	wmb();
2825 	ATM_SKB(skb)->vcc = vcc;
2826 
2827 	__enqueue_tpd(he_dev, tpd, cid);
2828 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2829 
2830 	atomic_inc(&vcc->stats->tx);
2831 
2832 	return 0;
2833 }
2834 
2835 static int
2836 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2837 {
2838 	unsigned long flags;
2839 	struct he_dev *he_dev = HE_DEV(atm_dev);
2840 	struct he_ioctl_reg reg;
2841 	int err = 0;
2842 
2843 	switch (cmd) {
2844 		case HE_GET_REG:
2845 			if (!capable(CAP_NET_ADMIN))
2846 				return -EPERM;
2847 
2848 			if (copy_from_user(&reg, arg,
2849 					   sizeof(struct he_ioctl_reg)))
2850 				return -EFAULT;
2851 
2852 			spin_lock_irqsave(&he_dev->global_lock, flags);
2853 			switch (reg.type) {
2854 				case HE_REGTYPE_PCI:
2855 					reg.val = he_readl(he_dev, reg.addr);
2856 					break;
2857 				case HE_REGTYPE_RCM:
2858 					reg.val =
2859 						he_readl_rcm(he_dev, reg.addr);
2860 					break;
2861 				case HE_REGTYPE_TCM:
2862 					reg.val =
2863 						he_readl_tcm(he_dev, reg.addr);
2864 					break;
2865 				case HE_REGTYPE_MBOX:
2866 					reg.val =
2867 						he_readl_mbox(he_dev, reg.addr);
2868 					break;
2869 				default:
2870 					err = -EINVAL;
2871 					break;
2872 			}
2873 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2874 			if (err == 0)
2875 				if (copy_to_user(arg, &reg,
2876 							sizeof(struct he_ioctl_reg)))
2877 					return -EFAULT;
2878 			break;
2879 		default:
2880 #ifdef CONFIG_ATM_HE_USE_SUNI
2881 			if (atm_dev->phy && atm_dev->phy->ioctl)
2882 				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2883 #else /* CONFIG_ATM_HE_USE_SUNI */
2884 			err = -EINVAL;
2885 #endif /* CONFIG_ATM_HE_USE_SUNI */
2886 			break;
2887 	}
2888 
2889 	return err;
2890 }
2891 
2892 static void
2893 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2894 {
2895 	unsigned long flags;
2896 	struct he_dev *he_dev = HE_DEV(atm_dev);
2897 
2898 	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2899 
2900 	spin_lock_irqsave(&he_dev->global_lock, flags);
2901 	he_writel(he_dev, val, FRAMER + (addr*4));
2902 	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2903 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2904 }
2905 
2906 
2907 static unsigned char
2908 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2909 {
2910 	unsigned long flags;
2911 	struct he_dev *he_dev = HE_DEV(atm_dev);
2912 	unsigned reg;
2913 
2914 	spin_lock_irqsave(&he_dev->global_lock, flags);
2915 	reg = he_readl(he_dev, FRAMER + (addr*4));
2916 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2917 
2918 	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2919 	return reg;
2920 }
2921 
2922 static int
2923 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2924 {
2925 	unsigned long flags;
2926 	struct he_dev *he_dev = HE_DEV(dev);
2927 	int left, i;
2928 #ifdef notdef
2929 	struct he_rbrq *rbrq_tail;
2930 	struct he_tpdrq *tpdrq_head;
2931 	int rbpl_head, rbpl_tail;
2932 #endif
2933 	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2934 
2935 
2936 	left = *pos;
2937 	if (!left--)
2938 		return sprintf(page, "%s\n", version);
2939 
2940 	if (!left--)
2941 		return sprintf(page, "%s%s\n\n",
2942 			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2943 
2944 	if (!left--)
2945 		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2946 
2947 	spin_lock_irqsave(&he_dev->global_lock, flags);
2948 	mcc += he_readl(he_dev, MCC);
2949 	oec += he_readl(he_dev, OEC);
2950 	dcc += he_readl(he_dev, DCC);
2951 	cec += he_readl(he_dev, CEC);
2952 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2953 
2954 	if (!left--)
2955 		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n",
2956 							mcc, oec, dcc, cec);
2957 
2958 	if (!left--)
2959 		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2960 				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2961 
2962 	if (!left--)
2963 		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2964 						CONFIG_TPDRQ_SIZE);
2965 
2966 	if (!left--)
2967 		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2968 				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2969 
2970 	if (!left--)
2971 		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2972 					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2973 
2974 
2975 #ifdef notdef
2976 	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2977 	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2978 
2979 	inuse = rbpl_head - rbpl_tail;
2980 	if (inuse < 0)
2981 		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2982 	inuse /= sizeof(struct he_rbp);
2983 
2984 	if (!left--)
2985 		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2986 						CONFIG_RBPL_SIZE, inuse);
2987 #endif
2988 
2989 	if (!left--)
2990 		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2991 
2992 	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2993 		if (!left--)
2994 			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2995 						he_dev->cs_stper[i].pcr,
2996 						he_dev->cs_stper[i].inuse);
2997 
2998 	if (!left--)
2999 		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
3000 			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
3001 
3002 	return 0;
3003 }
3004 
3005 /* eeprom routines  -- see 4.7 */
3006 
3007 u8
3008 read_prom_byte(struct he_dev *he_dev, int addr)
3009 {
3010 	u32 val = 0, tmp_read = 0;
3011 	int i, j = 0;
3012 	u8 byte_read = 0;
3013 
3014 	val = readl(he_dev->membase + HOST_CNTL);
3015 	val &= 0xFFFFE0FF;
3016 
3017 	/* Turn on write enable */
3018 	val |= 0x800;
3019 	he_writel(he_dev, val, HOST_CNTL);
3020 
3021 	/* Send READ instruction */
3022 	for (i = 0; i < sizeof(readtab)/sizeof(readtab[0]); i++) {
3023 		he_writel(he_dev, val | readtab[i], HOST_CNTL);
3024 		udelay(EEPROM_DELAY);
3025 	}
3026 
3027 	/* Next, we need to send the byte address to read from */
3028 	for (i = 7; i >= 0; i--) {
3029 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3030 		udelay(EEPROM_DELAY);
3031 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3032 		udelay(EEPROM_DELAY);
3033 	}
3034 
3035 	j = 0;
3036 
3037 	val &= 0xFFFFF7FF;      /* Turn off write enable */
3038 	he_writel(he_dev, val, HOST_CNTL);
3039 
3040 	/* Now, we can read data from the EEPROM by clocking it in */
3041 	for (i = 7; i >= 0; i--) {
3042 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3043 		udelay(EEPROM_DELAY);
3044 		tmp_read = he_readl(he_dev, HOST_CNTL);
3045 		byte_read |= (unsigned char)
3046 			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
3047 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3048 		udelay(EEPROM_DELAY);
3049 	}
3050 
3051 	he_writel(he_dev, val | ID_CS, HOST_CNTL);
3052 	udelay(EEPROM_DELAY);
3053 
3054 	return byte_read;
3055 }
3056 
3057 MODULE_LICENSE("GPL");
3058 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3059 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3060 module_param(disable64, bool, 0);
3061 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
3062 module_param(nvpibits, short, 0);
3063 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
3064 module_param(nvcibits, short, 0);
3065 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
3066 module_param(rx_skb_reserve, short, 0);
3067 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
3068 module_param(irq_coalesce, bool, 0);
3069 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
3070 module_param(sdh, bool, 0);
3071 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
3072 
3073 static struct pci_device_id he_pci_tbl[] = {
3074 	{ PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
3075 	  0, 0, 0 },
3076 	{ 0, }
3077 };
3078 
3079 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
3080 
3081 static struct pci_driver he_driver = {
3082 	.name =		"he",
3083 	.probe =	he_init_one,
3084 	.remove =	__devexit_p(he_remove_one),
3085 	.id_table =	he_pci_tbl,
3086 };
3087 
3088 static int __init he_init(void)
3089 {
3090 	return pci_register_driver(&he_driver);
3091 }
3092 
3093 static void __exit he_cleanup(void)
3094 {
3095 	pci_unregister_driver(&he_driver);
3096 }
3097 
3098 module_init(he_init);
3099 module_exit(he_cleanup);
3100