xref: /openbmc/linux/drivers/atm/he.c (revision a2c1aa54)
1 /* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
2 
3 /*
4 
5   he.c
6 
7   ForeRunnerHE ATM Adapter driver for ATM on Linux
8   Copyright (C) 1999-2001  Naval Research Laboratory
9 
10   This library is free software; you can redistribute it and/or
11   modify it under the terms of the GNU Lesser General Public
12   License as published by the Free Software Foundation; either
13   version 2.1 of the License, or (at your option) any later version.
14 
15   This library is distributed in the hope that it will be useful,
16   but WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18   Lesser General Public License for more details.
19 
20   You should have received a copy of the GNU Lesser General Public
21   License along with this library; if not, write to the Free Software
22   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23 
24 */
25 
26 /*
27 
28   he.c
29 
30   ForeRunnerHE ATM Adapter driver for ATM on Linux
31   Copyright (C) 1999-2001  Naval Research Laboratory
32 
33   Permission to use, copy, modify and distribute this software and its
34   documentation is hereby granted, provided that both the copyright
35   notice and this permission notice appear in all copies of the software,
36   derivative works or modified versions, and any portions thereof, and
37   that both notices appear in supporting documentation.
38 
39   NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
40   DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
41   RESULTING FROM THE USE OF THIS SOFTWARE.
42 
43   This driver was written using the "Programmer's Reference Manual for
44   ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
45 
46   AUTHORS:
47 	chas williams <chas@cmf.nrl.navy.mil>
48 	eric kinzie <ekinzie@cmf.nrl.navy.mil>
49 
50   NOTES:
51 	4096 supported 'connections'
52 	group 0 is used for all traffic
53 	interrupt queue 0 is used for all interrupts
54 	aal0 support (based on work from ulrich.u.muller@nokia.com)
55 
56  */
57 
58 #include <linux/config.h>
59 #include <linux/module.h>
60 #include <linux/version.h>
61 #include <linux/kernel.h>
62 #include <linux/skbuff.h>
63 #include <linux/pci.h>
64 #include <linux/errno.h>
65 #include <linux/types.h>
66 #include <linux/string.h>
67 #include <linux/delay.h>
68 #include <linux/init.h>
69 #include <linux/mm.h>
70 #include <linux/sched.h>
71 #include <linux/timer.h>
72 #include <linux/interrupt.h>
73 #include <linux/dma-mapping.h>
74 #include <asm/io.h>
75 #include <asm/byteorder.h>
76 #include <asm/uaccess.h>
77 
78 #include <linux/atmdev.h>
79 #include <linux/atm.h>
80 #include <linux/sonet.h>
81 
82 #define USE_TASKLET
83 #undef USE_SCATTERGATHER
84 #undef USE_CHECKSUM_HW			/* still confused about this */
85 #define USE_RBPS
86 #undef USE_RBPS_POOL			/* if memory is tight try this */
87 #undef USE_RBPL_POOL			/* if memory is tight try this */
88 #define USE_TPD_POOL
89 /* #undef CONFIG_ATM_HE_USE_SUNI */
90 /* #undef HE_DEBUG */
91 
92 #include "he.h"
93 #include "suni.h"
94 #include <linux/atm_he.h>
95 
96 #define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
97 
98 #ifdef HE_DEBUG
99 #define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
100 #else /* !HE_DEBUG */
101 #define HPRINTK(fmt,args...)	do { } while (0)
102 #endif /* HE_DEBUG */
103 
104 /* version definition */
105 
106 static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
107 
108 /* declarations */
109 
110 static int he_open(struct atm_vcc *vcc);
111 static void he_close(struct atm_vcc *vcc);
112 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
113 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
114 static irqreturn_t he_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
115 static void he_tasklet(unsigned long data);
116 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
117 static int he_start(struct atm_dev *dev);
118 static void he_stop(struct he_dev *dev);
119 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
120 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
121 
122 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
123 
124 /* globals */
125 
126 static struct he_dev *he_devs;
127 static int disable64;
128 static short nvpibits = -1;
129 static short nvcibits = -1;
130 static short rx_skb_reserve = 16;
131 static int irq_coalesce = 1;
132 static int sdh = 0;
133 
134 /* Read from EEPROM = 0000 0011b */
135 static unsigned int readtab[] = {
136 	CS_HIGH | CLK_HIGH,
137 	CS_LOW | CLK_LOW,
138 	CLK_HIGH,               /* 0 */
139 	CLK_LOW,
140 	CLK_HIGH,               /* 0 */
141 	CLK_LOW,
142 	CLK_HIGH,               /* 0 */
143 	CLK_LOW,
144 	CLK_HIGH,               /* 0 */
145 	CLK_LOW,
146 	CLK_HIGH,               /* 0 */
147 	CLK_LOW,
148 	CLK_HIGH,               /* 0 */
149 	CLK_LOW | SI_HIGH,
150 	CLK_HIGH | SI_HIGH,     /* 1 */
151 	CLK_LOW | SI_HIGH,
152 	CLK_HIGH | SI_HIGH      /* 1 */
153 };
154 
155 /* Clock to read from/write to the EEPROM */
156 static unsigned int clocktab[] = {
157 	CLK_LOW,
158 	CLK_HIGH,
159 	CLK_LOW,
160 	CLK_HIGH,
161 	CLK_LOW,
162 	CLK_HIGH,
163 	CLK_LOW,
164 	CLK_HIGH,
165 	CLK_LOW,
166 	CLK_HIGH,
167 	CLK_LOW,
168 	CLK_HIGH,
169 	CLK_LOW,
170 	CLK_HIGH,
171 	CLK_LOW,
172 	CLK_HIGH,
173 	CLK_LOW
174 };
175 
176 static struct atmdev_ops he_ops =
177 {
178 	.open =		he_open,
179 	.close =	he_close,
180 	.ioctl =	he_ioctl,
181 	.send =		he_send,
182 	.phy_put =	he_phy_put,
183 	.phy_get =	he_phy_get,
184 	.proc_read =	he_proc_read,
185 	.owner =	THIS_MODULE
186 };
187 
188 #define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
189 #define he_readl(dev, reg)		readl((dev)->membase + (reg))
190 
191 /* section 2.12 connection memory access */
192 
193 static __inline__ void
194 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
195 								unsigned flags)
196 {
197 	he_writel(he_dev, val, CON_DAT);
198 	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
199 	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
200 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
201 }
202 
203 #define he_writel_rcm(dev, val, reg) 				\
204 			he_writel_internal(dev, val, reg, CON_CTL_RCM)
205 
206 #define he_writel_tcm(dev, val, reg) 				\
207 			he_writel_internal(dev, val, reg, CON_CTL_TCM)
208 
209 #define he_writel_mbox(dev, val, reg) 				\
210 			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
211 
212 static unsigned
213 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
214 {
215 	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
216 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
217 	return he_readl(he_dev, CON_DAT);
218 }
219 
220 #define he_readl_rcm(dev, reg) \
221 			he_readl_internal(dev, reg, CON_CTL_RCM)
222 
223 #define he_readl_tcm(dev, reg) \
224 			he_readl_internal(dev, reg, CON_CTL_TCM)
225 
226 #define he_readl_mbox(dev, reg) \
227 			he_readl_internal(dev, reg, CON_CTL_MBOX)
228 
229 
230 /* figure 2.2 connection id */
231 
232 #define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
233 
234 /* 2.5.1 per connection transmit state registers */
235 
236 #define he_writel_tsr0(dev, val, cid) \
237 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
238 #define he_readl_tsr0(dev, cid) \
239 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
240 
241 #define he_writel_tsr1(dev, val, cid) \
242 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
243 
244 #define he_writel_tsr2(dev, val, cid) \
245 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
246 
247 #define he_writel_tsr3(dev, val, cid) \
248 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
249 
250 #define he_writel_tsr4(dev, val, cid) \
251 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
252 
253 	/* from page 2-20
254 	 *
255 	 * NOTE While the transmit connection is active, bits 23 through 0
256 	 *      of this register must not be written by the host.  Byte
257 	 *      enables should be used during normal operation when writing
258 	 *      the most significant byte.
259 	 */
260 
261 #define he_writel_tsr4_upper(dev, val, cid) \
262 		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
263 							CON_CTL_TCM \
264 							| CON_BYTE_DISABLE_2 \
265 							| CON_BYTE_DISABLE_1 \
266 							| CON_BYTE_DISABLE_0)
267 
268 #define he_readl_tsr4(dev, cid) \
269 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
270 
271 #define he_writel_tsr5(dev, val, cid) \
272 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
273 
274 #define he_writel_tsr6(dev, val, cid) \
275 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
276 
277 #define he_writel_tsr7(dev, val, cid) \
278 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
279 
280 
281 #define he_writel_tsr8(dev, val, cid) \
282 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
283 
284 #define he_writel_tsr9(dev, val, cid) \
285 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
286 
287 #define he_writel_tsr10(dev, val, cid) \
288 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
289 
290 #define he_writel_tsr11(dev, val, cid) \
291 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
292 
293 
294 #define he_writel_tsr12(dev, val, cid) \
295 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
296 
297 #define he_writel_tsr13(dev, val, cid) \
298 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
299 
300 
301 #define he_writel_tsr14(dev, val, cid) \
302 		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
303 
304 #define he_writel_tsr14_upper(dev, val, cid) \
305 		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
306 							CON_CTL_TCM \
307 							| CON_BYTE_DISABLE_2 \
308 							| CON_BYTE_DISABLE_1 \
309 							| CON_BYTE_DISABLE_0)
310 
311 /* 2.7.1 per connection receive state registers */
312 
313 #define he_writel_rsr0(dev, val, cid) \
314 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
315 #define he_readl_rsr0(dev, cid) \
316 		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
317 
318 #define he_writel_rsr1(dev, val, cid) \
319 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
320 
321 #define he_writel_rsr2(dev, val, cid) \
322 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
323 
324 #define he_writel_rsr3(dev, val, cid) \
325 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
326 
327 #define he_writel_rsr4(dev, val, cid) \
328 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
329 
330 #define he_writel_rsr5(dev, val, cid) \
331 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
332 
333 #define he_writel_rsr6(dev, val, cid) \
334 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
335 
336 #define he_writel_rsr7(dev, val, cid) \
337 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
338 
339 static __inline__ struct atm_vcc*
340 __find_vcc(struct he_dev *he_dev, unsigned cid)
341 {
342 	struct hlist_head *head;
343 	struct atm_vcc *vcc;
344 	struct hlist_node *node;
345 	struct sock *s;
346 	short vpi;
347 	int vci;
348 
349 	vpi = cid >> he_dev->vcibits;
350 	vci = cid & ((1 << he_dev->vcibits) - 1);
351 	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
352 
353 	sk_for_each(s, node, head) {
354 		vcc = atm_sk(s);
355 		if (vcc->dev == he_dev->atm_dev &&
356 		    vcc->vci == vci && vcc->vpi == vpi &&
357 		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
358 				return vcc;
359 		}
360 	}
361 	return NULL;
362 }
363 
364 static int __devinit
365 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
366 {
367 	struct atm_dev *atm_dev = NULL;
368 	struct he_dev *he_dev = NULL;
369 	int err = 0;
370 
371 	printk(KERN_INFO "he: %s\n", version);
372 
373 	if (pci_enable_device(pci_dev))
374 		return -EIO;
375 	if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK) != 0) {
376 		printk(KERN_WARNING "he: no suitable dma available\n");
377 		err = -EIO;
378 		goto init_one_failure;
379 	}
380 
381 	atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
382 	if (!atm_dev) {
383 		err = -ENODEV;
384 		goto init_one_failure;
385 	}
386 	pci_set_drvdata(pci_dev, atm_dev);
387 
388 	he_dev = (struct he_dev *) kmalloc(sizeof(struct he_dev),
389 							GFP_KERNEL);
390 	if (!he_dev) {
391 		err = -ENOMEM;
392 		goto init_one_failure;
393 	}
394 	memset(he_dev, 0, sizeof(struct he_dev));
395 
396 	he_dev->pci_dev = pci_dev;
397 	he_dev->atm_dev = atm_dev;
398 	he_dev->atm_dev->dev_data = he_dev;
399 	atm_dev->dev_data = he_dev;
400 	he_dev->number = atm_dev->number;
401 	if (he_start(atm_dev)) {
402 		he_stop(he_dev);
403 		err = -ENODEV;
404 		goto init_one_failure;
405 	}
406 	he_dev->next = NULL;
407 	if (he_devs)
408 		he_dev->next = he_devs;
409 	he_devs = he_dev;
410 	return 0;
411 
412 init_one_failure:
413 	if (atm_dev)
414 		atm_dev_deregister(atm_dev);
415 	kfree(he_dev);
416 	pci_disable_device(pci_dev);
417 	return err;
418 }
419 
420 static void __devexit
421 he_remove_one (struct pci_dev *pci_dev)
422 {
423 	struct atm_dev *atm_dev;
424 	struct he_dev *he_dev;
425 
426 	atm_dev = pci_get_drvdata(pci_dev);
427 	he_dev = HE_DEV(atm_dev);
428 
429 	/* need to remove from he_devs */
430 
431 	he_stop(he_dev);
432 	atm_dev_deregister(atm_dev);
433 	kfree(he_dev);
434 
435 	pci_set_drvdata(pci_dev, NULL);
436 	pci_disable_device(pci_dev);
437 }
438 
439 
440 static unsigned
441 rate_to_atmf(unsigned rate)		/* cps to atm forum format */
442 {
443 #define NONZERO (1 << 14)
444 
445 	unsigned exp = 0;
446 
447 	if (rate == 0)
448 		return 0;
449 
450 	rate <<= 9;
451 	while (rate > 0x3ff) {
452 		++exp;
453 		rate >>= 1;
454 	}
455 
456 	return (NONZERO | (exp << 9) | (rate & 0x1ff));
457 }
458 
459 static void __init
460 he_init_rx_lbfp0(struct he_dev *he_dev)
461 {
462 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
463 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
464 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
465 	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
466 
467 	lbufd_index = 0;
468 	lbm_offset = he_readl(he_dev, RCMLBM_BA);
469 
470 	he_writel(he_dev, lbufd_index, RLBF0_H);
471 
472 	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
473 		lbufd_index += 2;
474 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
475 
476 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
477 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
478 
479 		if (++lbuf_count == lbufs_per_row) {
480 			lbuf_count = 0;
481 			row_offset += he_dev->bytes_per_row;
482 		}
483 		lbm_offset += 4;
484 	}
485 
486 	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
487 	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
488 }
489 
490 static void __init
491 he_init_rx_lbfp1(struct he_dev *he_dev)
492 {
493 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
494 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
495 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
496 	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
497 
498 	lbufd_index = 1;
499 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
500 
501 	he_writel(he_dev, lbufd_index, RLBF1_H);
502 
503 	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
504 		lbufd_index += 2;
505 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
506 
507 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
508 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
509 
510 		if (++lbuf_count == lbufs_per_row) {
511 			lbuf_count = 0;
512 			row_offset += he_dev->bytes_per_row;
513 		}
514 		lbm_offset += 4;
515 	}
516 
517 	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
518 	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
519 }
520 
521 static void __init
522 he_init_tx_lbfp(struct he_dev *he_dev)
523 {
524 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
525 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
526 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
527 	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
528 
529 	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
530 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
531 
532 	he_writel(he_dev, lbufd_index, TLBF_H);
533 
534 	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
535 		lbufd_index += 1;
536 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
537 
538 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
539 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
540 
541 		if (++lbuf_count == lbufs_per_row) {
542 			lbuf_count = 0;
543 			row_offset += he_dev->bytes_per_row;
544 		}
545 		lbm_offset += 2;
546 	}
547 
548 	he_writel(he_dev, lbufd_index - 1, TLBF_T);
549 }
550 
551 static int __init
552 he_init_tpdrq(struct he_dev *he_dev)
553 {
554 	he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
555 		CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
556 	if (he_dev->tpdrq_base == NULL) {
557 		hprintk("failed to alloc tpdrq\n");
558 		return -ENOMEM;
559 	}
560 	memset(he_dev->tpdrq_base, 0,
561 				CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
562 
563 	he_dev->tpdrq_tail = he_dev->tpdrq_base;
564 	he_dev->tpdrq_head = he_dev->tpdrq_base;
565 
566 	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
567 	he_writel(he_dev, 0, TPDRQ_T);
568 	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
569 
570 	return 0;
571 }
572 
573 static void __init
574 he_init_cs_block(struct he_dev *he_dev)
575 {
576 	unsigned clock, rate, delta;
577 	int reg;
578 
579 	/* 5.1.7 cs block initialization */
580 
581 	for (reg = 0; reg < 0x20; ++reg)
582 		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
583 
584 	/* rate grid timer reload values */
585 
586 	clock = he_is622(he_dev) ? 66667000 : 50000000;
587 	rate = he_dev->atm_dev->link_rate;
588 	delta = rate / 16 / 2;
589 
590 	for (reg = 0; reg < 0x10; ++reg) {
591 		/* 2.4 internal transmit function
592 		 *
593 	 	 * we initialize the first row in the rate grid.
594 		 * values are period (in clock cycles) of timer
595 		 */
596 		unsigned period = clock / rate;
597 
598 		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
599 		rate -= delta;
600 	}
601 
602 	if (he_is622(he_dev)) {
603 		/* table 5.2 (4 cells per lbuf) */
604 		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
605 		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
606 		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
607 		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
608 		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
609 
610 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
611 		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
612 		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
613 		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
614 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
615 		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
616 		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
617 
618 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
619 
620 		/* table 5.8 */
621 		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
622 		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
623 		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
624 		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
625 		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
626 		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
627 
628 		/* table 5.9 */
629 		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
630 		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
631 	} else {
632 		/* table 5.1 (4 cells per lbuf) */
633 		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
634 		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
635 		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
636 		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
637 		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
638 
639 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
640 		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
641 		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
642 		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
643 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
644 		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
645 		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
646 
647 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
648 
649 		/* table 5.8 */
650 		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
651 		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
652 		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
653 		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
654 		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
655 		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
656 
657 		/* table 5.9 */
658 		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
659 		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
660 	}
661 
662 	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
663 
664 	for (reg = 0; reg < 0x8; ++reg)
665 		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
666 
667 }
668 
669 static int __init
670 he_init_cs_block_rcm(struct he_dev *he_dev)
671 {
672 	unsigned (*rategrid)[16][16];
673 	unsigned rate, delta;
674 	int i, j, reg;
675 
676 	unsigned rate_atmf, exp, man;
677 	unsigned long long rate_cps;
678 	int mult, buf, buf_limit = 4;
679 
680 	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
681 	if (!rategrid)
682 		return -ENOMEM;
683 
684 	/* initialize rate grid group table */
685 
686 	for (reg = 0x0; reg < 0xff; ++reg)
687 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
688 
689 	/* initialize rate controller groups */
690 
691 	for (reg = 0x100; reg < 0x1ff; ++reg)
692 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
693 
694 	/* initialize tNrm lookup table */
695 
696 	/* the manual makes reference to a routine in a sample driver
697 	   for proper configuration; fortunately, we only need this
698 	   in order to support abr connection */
699 
700 	/* initialize rate to group table */
701 
702 	rate = he_dev->atm_dev->link_rate;
703 	delta = rate / 32;
704 
705 	/*
706 	 * 2.4 transmit internal functions
707 	 *
708 	 * we construct a copy of the rate grid used by the scheduler
709 	 * in order to construct the rate to group table below
710 	 */
711 
712 	for (j = 0; j < 16; j++) {
713 		(*rategrid)[0][j] = rate;
714 		rate -= delta;
715 	}
716 
717 	for (i = 1; i < 16; i++)
718 		for (j = 0; j < 16; j++)
719 			if (i > 14)
720 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
721 			else
722 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
723 
724 	/*
725 	 * 2.4 transmit internal function
726 	 *
727 	 * this table maps the upper 5 bits of exponent and mantissa
728 	 * of the atm forum representation of the rate into an index
729 	 * on rate grid
730 	 */
731 
732 	rate_atmf = 0;
733 	while (rate_atmf < 0x400) {
734 		man = (rate_atmf & 0x1f) << 4;
735 		exp = rate_atmf >> 5;
736 
737 		/*
738 			instead of '/ 512', use '>> 9' to prevent a call
739 			to divdu3 on x86 platforms
740 		*/
741 		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
742 
743 		if (rate_cps < 10)
744 			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
745 
746 		for (i = 255; i > 0; i--)
747 			if ((*rategrid)[i/16][i%16] >= rate_cps)
748 				break;	 /* pick nearest rate instead? */
749 
750 		/*
751 		 * each table entry is 16 bits: (rate grid index (8 bits)
752 		 * and a buffer limit (8 bits)
753 		 * there are two table entries in each 32-bit register
754 		 */
755 
756 #ifdef notdef
757 		buf = rate_cps * he_dev->tx_numbuffs /
758 				(he_dev->atm_dev->link_rate * 2);
759 #else
760 		/* this is pretty, but avoids _divdu3 and is mostly correct */
761 		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
762 		if (rate_cps > (272 * mult))
763 			buf = 4;
764 		else if (rate_cps > (204 * mult))
765 			buf = 3;
766 		else if (rate_cps > (136 * mult))
767 			buf = 2;
768 		else if (rate_cps > (68 * mult))
769 			buf = 1;
770 		else
771 			buf = 0;
772 #endif
773 		if (buf > buf_limit)
774 			buf = buf_limit;
775 		reg = (reg << 16) | ((i << 8) | buf);
776 
777 #define RTGTBL_OFFSET 0x400
778 
779 		if (rate_atmf & 0x1)
780 			he_writel_rcm(he_dev, reg,
781 				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
782 
783 		++rate_atmf;
784 	}
785 
786 	kfree(rategrid);
787 	return 0;
788 }
789 
790 static int __init
791 he_init_group(struct he_dev *he_dev, int group)
792 {
793 	int i;
794 
795 #ifdef USE_RBPS
796 	/* small buffer pool */
797 #ifdef USE_RBPS_POOL
798 	he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
799 			CONFIG_RBPS_BUFSIZE, 8, 0);
800 	if (he_dev->rbps_pool == NULL) {
801 		hprintk("unable to create rbps pages\n");
802 		return -ENOMEM;
803 	}
804 #else /* !USE_RBPS_POOL */
805 	he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
806 		CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
807 	if (he_dev->rbps_pages == NULL) {
808 		hprintk("unable to create rbps page pool\n");
809 		return -ENOMEM;
810 	}
811 #endif /* USE_RBPS_POOL */
812 
813 	he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
814 		CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
815 	if (he_dev->rbps_base == NULL) {
816 		hprintk("failed to alloc rbps\n");
817 		return -ENOMEM;
818 	}
819 	memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
820 	he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
821 
822 	for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
823 		dma_addr_t dma_handle;
824 		void *cpuaddr;
825 
826 #ifdef USE_RBPS_POOL
827 		cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
828 		if (cpuaddr == NULL)
829 			return -ENOMEM;
830 #else
831 		cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
832 		dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
833 #endif
834 
835 		he_dev->rbps_virt[i].virt = cpuaddr;
836 		he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
837 		he_dev->rbps_base[i].phys = dma_handle;
838 
839 	}
840 	he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
841 
842 	he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
843 	he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
844 						G0_RBPS_T + (group * 32));
845 	he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
846 						G0_RBPS_BS + (group * 32));
847 	he_writel(he_dev,
848 			RBP_THRESH(CONFIG_RBPS_THRESH) |
849 			RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
850 			RBP_INT_ENB,
851 						G0_RBPS_QI + (group * 32));
852 #else /* !USE_RBPS */
853 	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
854 	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
855 	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
856 	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
857 						G0_RBPS_BS + (group * 32));
858 #endif /* USE_RBPS */
859 
860 	/* large buffer pool */
861 #ifdef USE_RBPL_POOL
862 	he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
863 			CONFIG_RBPL_BUFSIZE, 8, 0);
864 	if (he_dev->rbpl_pool == NULL) {
865 		hprintk("unable to create rbpl pool\n");
866 		return -ENOMEM;
867 	}
868 #else /* !USE_RBPL_POOL */
869 	he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
870 		CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
871 	if (he_dev->rbpl_pages == NULL) {
872 		hprintk("unable to create rbpl pages\n");
873 		return -ENOMEM;
874 	}
875 #endif /* USE_RBPL_POOL */
876 
877 	he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
878 		CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
879 	if (he_dev->rbpl_base == NULL) {
880 		hprintk("failed to alloc rbpl\n");
881 		return -ENOMEM;
882 	}
883 	memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
884 	he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
885 
886 	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
887 		dma_addr_t dma_handle;
888 		void *cpuaddr;
889 
890 #ifdef USE_RBPL_POOL
891 		cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
892 		if (cpuaddr == NULL)
893 			return -ENOMEM;
894 #else
895 		cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
896 		dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
897 #endif
898 
899 		he_dev->rbpl_virt[i].virt = cpuaddr;
900 		he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
901 		he_dev->rbpl_base[i].phys = dma_handle;
902 	}
903 	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
904 
905 	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
906 	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
907 						G0_RBPL_T + (group * 32));
908 	he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
909 						G0_RBPL_BS + (group * 32));
910 	he_writel(he_dev,
911 			RBP_THRESH(CONFIG_RBPL_THRESH) |
912 			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
913 			RBP_INT_ENB,
914 						G0_RBPL_QI + (group * 32));
915 
916 	/* rx buffer ready queue */
917 
918 	he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
919 		CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
920 	if (he_dev->rbrq_base == NULL) {
921 		hprintk("failed to allocate rbrq\n");
922 		return -ENOMEM;
923 	}
924 	memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
925 
926 	he_dev->rbrq_head = he_dev->rbrq_base;
927 	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
928 	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
929 	he_writel(he_dev,
930 		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
931 						G0_RBRQ_Q + (group * 16));
932 	if (irq_coalesce) {
933 		hprintk("coalescing interrupts\n");
934 		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
935 						G0_RBRQ_I + (group * 16));
936 	} else
937 		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
938 						G0_RBRQ_I + (group * 16));
939 
940 	/* tx buffer ready queue */
941 
942 	he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
943 		CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
944 	if (he_dev->tbrq_base == NULL) {
945 		hprintk("failed to allocate tbrq\n");
946 		return -ENOMEM;
947 	}
948 	memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
949 
950 	he_dev->tbrq_head = he_dev->tbrq_base;
951 
952 	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
953 	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
954 	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
955 	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
956 
957 	return 0;
958 }
959 
960 static int __init
961 he_init_irq(struct he_dev *he_dev)
962 {
963 	int i;
964 
965 	/* 2.9.3.5  tail offset for each interrupt queue is located after the
966 		    end of the interrupt queue */
967 
968 	he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
969 			(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
970 	if (he_dev->irq_base == NULL) {
971 		hprintk("failed to allocate irq\n");
972 		return -ENOMEM;
973 	}
974 	he_dev->irq_tailoffset = (unsigned *)
975 					&he_dev->irq_base[CONFIG_IRQ_SIZE];
976 	*he_dev->irq_tailoffset = 0;
977 	he_dev->irq_head = he_dev->irq_base;
978 	he_dev->irq_tail = he_dev->irq_base;
979 
980 	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
981 		he_dev->irq_base[i].isw = ITYPE_INVALID;
982 
983 	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
984 	he_writel(he_dev,
985 		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
986 								IRQ0_HEAD);
987 	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
988 	he_writel(he_dev, 0x0, IRQ0_DATA);
989 
990 	he_writel(he_dev, 0x0, IRQ1_BASE);
991 	he_writel(he_dev, 0x0, IRQ1_HEAD);
992 	he_writel(he_dev, 0x0, IRQ1_CNTL);
993 	he_writel(he_dev, 0x0, IRQ1_DATA);
994 
995 	he_writel(he_dev, 0x0, IRQ2_BASE);
996 	he_writel(he_dev, 0x0, IRQ2_HEAD);
997 	he_writel(he_dev, 0x0, IRQ2_CNTL);
998 	he_writel(he_dev, 0x0, IRQ2_DATA);
999 
1000 	he_writel(he_dev, 0x0, IRQ3_BASE);
1001 	he_writel(he_dev, 0x0, IRQ3_HEAD);
1002 	he_writel(he_dev, 0x0, IRQ3_CNTL);
1003 	he_writel(he_dev, 0x0, IRQ3_DATA);
1004 
1005 	/* 2.9.3.2 interrupt queue mapping registers */
1006 
1007 	he_writel(he_dev, 0x0, GRP_10_MAP);
1008 	he_writel(he_dev, 0x0, GRP_32_MAP);
1009 	he_writel(he_dev, 0x0, GRP_54_MAP);
1010 	he_writel(he_dev, 0x0, GRP_76_MAP);
1011 
1012 	if (request_irq(he_dev->pci_dev->irq, he_irq_handler, SA_INTERRUPT|SA_SHIRQ, DEV_LABEL, he_dev)) {
1013 		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
1014 		return -EINVAL;
1015 	}
1016 
1017 	he_dev->irq = he_dev->pci_dev->irq;
1018 
1019 	return 0;
1020 }
1021 
1022 static int __init
1023 he_start(struct atm_dev *dev)
1024 {
1025 	struct he_dev *he_dev;
1026 	struct pci_dev *pci_dev;
1027 	unsigned long membase;
1028 
1029 	u16 command;
1030 	u32 gen_cntl_0, host_cntl, lb_swap;
1031 	u8 cache_size, timer;
1032 
1033 	unsigned err;
1034 	unsigned int status, reg;
1035 	int i, group;
1036 
1037 	he_dev = HE_DEV(dev);
1038 	pci_dev = he_dev->pci_dev;
1039 
1040 	membase = pci_resource_start(pci_dev, 0);
1041 	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
1042 
1043 	/*
1044 	 * pci bus controller initialization
1045 	 */
1046 
1047 	/* 4.3 pci bus controller-specific initialization */
1048 	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1049 		hprintk("can't read GEN_CNTL_0\n");
1050 		return -EINVAL;
1051 	}
1052 	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1053 	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1054 		hprintk("can't write GEN_CNTL_0.\n");
1055 		return -EINVAL;
1056 	}
1057 
1058 	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1059 		hprintk("can't read PCI_COMMAND.\n");
1060 		return -EINVAL;
1061 	}
1062 
1063 	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1064 	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1065 		hprintk("can't enable memory.\n");
1066 		return -EINVAL;
1067 	}
1068 
1069 	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1070 		hprintk("can't read cache line size?\n");
1071 		return -EINVAL;
1072 	}
1073 
1074 	if (cache_size < 16) {
1075 		cache_size = 16;
1076 		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1077 			hprintk("can't set cache line size to %d\n", cache_size);
1078 	}
1079 
1080 	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1081 		hprintk("can't read latency timer?\n");
1082 		return -EINVAL;
1083 	}
1084 
1085 	/* from table 3.9
1086 	 *
1087 	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1088 	 *
1089 	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1090 	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1091 	 *
1092 	 */
1093 #define LAT_TIMER 209
1094 	if (timer < LAT_TIMER) {
1095 		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1096 		timer = LAT_TIMER;
1097 		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1098 			hprintk("can't set latency timer to %d\n", timer);
1099 	}
1100 
1101 	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1102 		hprintk("can't set up page mapping\n");
1103 		return -EINVAL;
1104 	}
1105 
1106 	/* 4.4 card reset */
1107 	he_writel(he_dev, 0x0, RESET_CNTL);
1108 	he_writel(he_dev, 0xff, RESET_CNTL);
1109 
1110 	udelay(16*1000);	/* 16 ms */
1111 	status = he_readl(he_dev, RESET_CNTL);
1112 	if ((status & BOARD_RST_STATUS) == 0) {
1113 		hprintk("reset failed\n");
1114 		return -EINVAL;
1115 	}
1116 
1117 	/* 4.5 set bus width */
1118 	host_cntl = he_readl(he_dev, HOST_CNTL);
1119 	if (host_cntl & PCI_BUS_SIZE64)
1120 		gen_cntl_0 |= ENBL_64;
1121 	else
1122 		gen_cntl_0 &= ~ENBL_64;
1123 
1124 	if (disable64 == 1) {
1125 		hprintk("disabling 64-bit pci bus transfers\n");
1126 		gen_cntl_0 &= ~ENBL_64;
1127 	}
1128 
1129 	if (gen_cntl_0 & ENBL_64)
1130 		hprintk("64-bit transfers enabled\n");
1131 
1132 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1133 
1134 	/* 4.7 read prom contents */
1135 	for (i = 0; i < PROD_ID_LEN; ++i)
1136 		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1137 
1138 	he_dev->media = read_prom_byte(he_dev, MEDIA);
1139 
1140 	for (i = 0; i < 6; ++i)
1141 		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1142 
1143 	hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1144 				he_dev->prod_id,
1145 					he_dev->media & 0x40 ? "SM" : "MM",
1146 						dev->esi[0],
1147 						dev->esi[1],
1148 						dev->esi[2],
1149 						dev->esi[3],
1150 						dev->esi[4],
1151 						dev->esi[5]);
1152 	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1153 						ATM_OC12_PCR : ATM_OC3_PCR;
1154 
1155 	/* 4.6 set host endianess */
1156 	lb_swap = he_readl(he_dev, LB_SWAP);
1157 	if (he_is622(he_dev))
1158 		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1159 	else
1160 		lb_swap |= XFER_SIZE;		/* 8 cells */
1161 #ifdef __BIG_ENDIAN
1162 	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1163 #else
1164 	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1165 			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1166 #endif /* __BIG_ENDIAN */
1167 	he_writel(he_dev, lb_swap, LB_SWAP);
1168 
1169 	/* 4.8 sdram controller initialization */
1170 	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1171 
1172 	/* 4.9 initialize rnum value */
1173 	lb_swap |= SWAP_RNUM_MAX(0xf);
1174 	he_writel(he_dev, lb_swap, LB_SWAP);
1175 
1176 	/* 4.10 initialize the interrupt queues */
1177 	if ((err = he_init_irq(he_dev)) != 0)
1178 		return err;
1179 
1180 #ifdef USE_TASKLET
1181 	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
1182 #endif
1183 	spin_lock_init(&he_dev->global_lock);
1184 
1185 	/* 4.11 enable pci bus controller state machines */
1186 	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1187 				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1188 	he_writel(he_dev, host_cntl, HOST_CNTL);
1189 
1190 	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1191 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1192 
1193 	/*
1194 	 * atm network controller initialization
1195 	 */
1196 
1197 	/* 5.1.1 generic configuration state */
1198 
1199 	/*
1200 	 *		local (cell) buffer memory map
1201 	 *
1202 	 *             HE155                          HE622
1203 	 *
1204 	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1205 	 *         |            |            |                   |   |
1206 	 *         |  utility   |            |        rx0        |   |
1207 	 *        5|____________|         255|___________________| u |
1208 	 *        6|            |         256|                   | t |
1209 	 *         |            |            |                   | i |
1210 	 *         |    rx0     |     row    |        tx         | l |
1211 	 *         |            |            |                   | i |
1212 	 *         |            |         767|___________________| t |
1213 	 *      517|____________|         768|                   | y |
1214 	 * row  518|            |            |        rx1        |   |
1215 	 *         |            |        1023|___________________|___|
1216 	 *         |            |
1217 	 *         |    tx      |
1218 	 *         |            |
1219 	 *         |            |
1220 	 *     1535|____________|
1221 	 *     1536|            |
1222 	 *         |    rx1     |
1223 	 *     2047|____________|
1224 	 *
1225 	 */
1226 
1227 	/* total 4096 connections */
1228 	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1229 	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1230 
1231 	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1232 		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1233 		return -ENODEV;
1234 	}
1235 
1236 	if (nvpibits != -1) {
1237 		he_dev->vpibits = nvpibits;
1238 		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1239 	}
1240 
1241 	if (nvcibits != -1) {
1242 		he_dev->vcibits = nvcibits;
1243 		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1244 	}
1245 
1246 
1247 	if (he_is622(he_dev)) {
1248 		he_dev->cells_per_row = 40;
1249 		he_dev->bytes_per_row = 2048;
1250 		he_dev->r0_numrows = 256;
1251 		he_dev->tx_numrows = 512;
1252 		he_dev->r1_numrows = 256;
1253 		he_dev->r0_startrow = 0;
1254 		he_dev->tx_startrow = 256;
1255 		he_dev->r1_startrow = 768;
1256 	} else {
1257 		he_dev->cells_per_row = 20;
1258 		he_dev->bytes_per_row = 1024;
1259 		he_dev->r0_numrows = 512;
1260 		he_dev->tx_numrows = 1018;
1261 		he_dev->r1_numrows = 512;
1262 		he_dev->r0_startrow = 6;
1263 		he_dev->tx_startrow = 518;
1264 		he_dev->r1_startrow = 1536;
1265 	}
1266 
1267 	he_dev->cells_per_lbuf = 4;
1268 	he_dev->buffer_limit = 4;
1269 	he_dev->r0_numbuffs = he_dev->r0_numrows *
1270 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1271 	if (he_dev->r0_numbuffs > 2560)
1272 		he_dev->r0_numbuffs = 2560;
1273 
1274 	he_dev->r1_numbuffs = he_dev->r1_numrows *
1275 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1276 	if (he_dev->r1_numbuffs > 2560)
1277 		he_dev->r1_numbuffs = 2560;
1278 
1279 	he_dev->tx_numbuffs = he_dev->tx_numrows *
1280 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1281 	if (he_dev->tx_numbuffs > 5120)
1282 		he_dev->tx_numbuffs = 5120;
1283 
1284 	/* 5.1.2 configure hardware dependent registers */
1285 
1286 	he_writel(he_dev,
1287 		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1288 		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1289 		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1290 		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1291 								LBARB);
1292 
1293 	he_writel(he_dev, BANK_ON |
1294 		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1295 								SDRAMCON);
1296 
1297 	he_writel(he_dev,
1298 		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1299 						RM_RW_WAIT(1), RCMCONFIG);
1300 	he_writel(he_dev,
1301 		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1302 						TM_RW_WAIT(1), TCMCONFIG);
1303 
1304 	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1305 
1306 	he_writel(he_dev,
1307 		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1308 		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1309 		RX_VALVP(he_dev->vpibits) |
1310 		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1311 
1312 	he_writel(he_dev, DRF_THRESH(0x20) |
1313 		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1314 		TX_VCI_MASK(he_dev->vcibits) |
1315 		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1316 
1317 	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1318 
1319 	he_writel(he_dev, PHY_INT_ENB |
1320 		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1321 								RH_CONFIG);
1322 
1323 	/* 5.1.3 initialize connection memory */
1324 
1325 	for (i = 0; i < TCM_MEM_SIZE; ++i)
1326 		he_writel_tcm(he_dev, 0, i);
1327 
1328 	for (i = 0; i < RCM_MEM_SIZE; ++i)
1329 		he_writel_rcm(he_dev, 0, i);
1330 
1331 	/*
1332 	 *	transmit connection memory map
1333 	 *
1334 	 *                  tx memory
1335 	 *          0x0 ___________________
1336 	 *             |                   |
1337 	 *             |                   |
1338 	 *             |       TSRa        |
1339 	 *             |                   |
1340 	 *             |                   |
1341 	 *       0x8000|___________________|
1342 	 *             |                   |
1343 	 *             |       TSRb        |
1344 	 *       0xc000|___________________|
1345 	 *             |                   |
1346 	 *             |       TSRc        |
1347 	 *       0xe000|___________________|
1348 	 *             |       TSRd        |
1349 	 *       0xf000|___________________|
1350 	 *             |       tmABR       |
1351 	 *      0x10000|___________________|
1352 	 *             |                   |
1353 	 *             |       tmTPD       |
1354 	 *             |___________________|
1355 	 *             |                   |
1356 	 *                      ....
1357 	 *      0x1ffff|___________________|
1358 	 *
1359 	 *
1360 	 */
1361 
1362 	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1363 	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1364 	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1365 	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1366 	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1367 
1368 
1369 	/*
1370 	 *	receive connection memory map
1371 	 *
1372 	 *          0x0 ___________________
1373 	 *             |                   |
1374 	 *             |                   |
1375 	 *             |       RSRa        |
1376 	 *             |                   |
1377 	 *             |                   |
1378 	 *       0x8000|___________________|
1379 	 *             |                   |
1380 	 *             |             rx0/1 |
1381 	 *             |       LBM         |   link lists of local
1382 	 *             |             tx    |   buffer memory
1383 	 *             |                   |
1384 	 *       0xd000|___________________|
1385 	 *             |                   |
1386 	 *             |      rmABR        |
1387 	 *       0xe000|___________________|
1388 	 *             |                   |
1389 	 *             |       RSRb        |
1390 	 *             |___________________|
1391 	 *             |                   |
1392 	 *                      ....
1393 	 *       0xffff|___________________|
1394 	 */
1395 
1396 	he_writel(he_dev, 0x08000, RCMLBM_BA);
1397 	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1398 	he_writel(he_dev, 0x0d800, RCMABR_BA);
1399 
1400 	/* 5.1.4 initialize local buffer free pools linked lists */
1401 
1402 	he_init_rx_lbfp0(he_dev);
1403 	he_init_rx_lbfp1(he_dev);
1404 
1405 	he_writel(he_dev, 0x0, RLBC_H);
1406 	he_writel(he_dev, 0x0, RLBC_T);
1407 	he_writel(he_dev, 0x0, RLBC_H2);
1408 
1409 	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1410 	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1411 
1412 	he_init_tx_lbfp(he_dev);
1413 
1414 	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1415 
1416 	/* 5.1.5 initialize intermediate receive queues */
1417 
1418 	if (he_is622(he_dev)) {
1419 		he_writel(he_dev, 0x000f, G0_INMQ_S);
1420 		he_writel(he_dev, 0x200f, G0_INMQ_L);
1421 
1422 		he_writel(he_dev, 0x001f, G1_INMQ_S);
1423 		he_writel(he_dev, 0x201f, G1_INMQ_L);
1424 
1425 		he_writel(he_dev, 0x002f, G2_INMQ_S);
1426 		he_writel(he_dev, 0x202f, G2_INMQ_L);
1427 
1428 		he_writel(he_dev, 0x003f, G3_INMQ_S);
1429 		he_writel(he_dev, 0x203f, G3_INMQ_L);
1430 
1431 		he_writel(he_dev, 0x004f, G4_INMQ_S);
1432 		he_writel(he_dev, 0x204f, G4_INMQ_L);
1433 
1434 		he_writel(he_dev, 0x005f, G5_INMQ_S);
1435 		he_writel(he_dev, 0x205f, G5_INMQ_L);
1436 
1437 		he_writel(he_dev, 0x006f, G6_INMQ_S);
1438 		he_writel(he_dev, 0x206f, G6_INMQ_L);
1439 
1440 		he_writel(he_dev, 0x007f, G7_INMQ_S);
1441 		he_writel(he_dev, 0x207f, G7_INMQ_L);
1442 	} else {
1443 		he_writel(he_dev, 0x0000, G0_INMQ_S);
1444 		he_writel(he_dev, 0x0008, G0_INMQ_L);
1445 
1446 		he_writel(he_dev, 0x0001, G1_INMQ_S);
1447 		he_writel(he_dev, 0x0009, G1_INMQ_L);
1448 
1449 		he_writel(he_dev, 0x0002, G2_INMQ_S);
1450 		he_writel(he_dev, 0x000a, G2_INMQ_L);
1451 
1452 		he_writel(he_dev, 0x0003, G3_INMQ_S);
1453 		he_writel(he_dev, 0x000b, G3_INMQ_L);
1454 
1455 		he_writel(he_dev, 0x0004, G4_INMQ_S);
1456 		he_writel(he_dev, 0x000c, G4_INMQ_L);
1457 
1458 		he_writel(he_dev, 0x0005, G5_INMQ_S);
1459 		he_writel(he_dev, 0x000d, G5_INMQ_L);
1460 
1461 		he_writel(he_dev, 0x0006, G6_INMQ_S);
1462 		he_writel(he_dev, 0x000e, G6_INMQ_L);
1463 
1464 		he_writel(he_dev, 0x0007, G7_INMQ_S);
1465 		he_writel(he_dev, 0x000f, G7_INMQ_L);
1466 	}
1467 
1468 	/* 5.1.6 application tunable parameters */
1469 
1470 	he_writel(he_dev, 0x0, MCC);
1471 	he_writel(he_dev, 0x0, OEC);
1472 	he_writel(he_dev, 0x0, DCC);
1473 	he_writel(he_dev, 0x0, CEC);
1474 
1475 	/* 5.1.7 cs block initialization */
1476 
1477 	he_init_cs_block(he_dev);
1478 
1479 	/* 5.1.8 cs block connection memory initialization */
1480 
1481 	if (he_init_cs_block_rcm(he_dev) < 0)
1482 		return -ENOMEM;
1483 
1484 	/* 5.1.10 initialize host structures */
1485 
1486 	he_init_tpdrq(he_dev);
1487 
1488 #ifdef USE_TPD_POOL
1489 	he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1490 		sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1491 	if (he_dev->tpd_pool == NULL) {
1492 		hprintk("unable to create tpd pci_pool\n");
1493 		return -ENOMEM;
1494 	}
1495 
1496 	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1497 #else
1498 	he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
1499 			CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
1500 	if (!he_dev->tpd_base)
1501 		return -ENOMEM;
1502 
1503 	for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1504 		he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
1505 		he_dev->tpd_base[i].inuse = 0;
1506 	}
1507 
1508 	he_dev->tpd_head = he_dev->tpd_base;
1509 	he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
1510 #endif
1511 
1512 	if (he_init_group(he_dev, 0) != 0)
1513 		return -ENOMEM;
1514 
1515 	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1516 		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1517 		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1518 		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1519 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1520 						G0_RBPS_BS + (group * 32));
1521 
1522 		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1523 		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1524 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1525 						G0_RBPL_QI + (group * 32));
1526 		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1527 
1528 		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1529 		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1530 		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1531 						G0_RBRQ_Q + (group * 16));
1532 		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1533 
1534 		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1535 		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1536 		he_writel(he_dev, TBRQ_THRESH(0x1),
1537 						G0_TBRQ_THRESH + (group * 16));
1538 		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1539 	}
1540 
1541 	/* host status page */
1542 
1543 	he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1544 				sizeof(struct he_hsp), &he_dev->hsp_phys);
1545 	if (he_dev->hsp == NULL) {
1546 		hprintk("failed to allocate host status page\n");
1547 		return -ENOMEM;
1548 	}
1549 	memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1550 	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1551 
1552 	/* initialize framer */
1553 
1554 #ifdef CONFIG_ATM_HE_USE_SUNI
1555 	suni_init(he_dev->atm_dev);
1556 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1557 		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1558 #endif /* CONFIG_ATM_HE_USE_SUNI */
1559 
1560 	if (sdh) {
1561 		/* this really should be in suni.c but for now... */
1562 		int val;
1563 
1564 		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1565 		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1566 		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1567 	}
1568 
1569 	/* 5.1.12 enable transmit and receive */
1570 
1571 	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1572 	reg |= TX_ENABLE|ER_ENABLE;
1573 	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1574 
1575 	reg = he_readl(he_dev, RC_CONFIG);
1576 	reg |= RX_ENABLE;
1577 	he_writel(he_dev, reg, RC_CONFIG);
1578 
1579 	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1580 		he_dev->cs_stper[i].inuse = 0;
1581 		he_dev->cs_stper[i].pcr = -1;
1582 	}
1583 	he_dev->total_bw = 0;
1584 
1585 
1586 	/* atm linux initialization */
1587 
1588 	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1589 	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1590 
1591 	he_dev->irq_peak = 0;
1592 	he_dev->rbrq_peak = 0;
1593 	he_dev->rbpl_peak = 0;
1594 	he_dev->tbrq_peak = 0;
1595 
1596 	HPRINTK("hell bent for leather!\n");
1597 
1598 	return 0;
1599 }
1600 
1601 static void
1602 he_stop(struct he_dev *he_dev)
1603 {
1604 	u16 command;
1605 	u32 gen_cntl_0, reg;
1606 	struct pci_dev *pci_dev;
1607 
1608 	pci_dev = he_dev->pci_dev;
1609 
1610 	/* disable interrupts */
1611 
1612 	if (he_dev->membase) {
1613 		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1614 		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1615 		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1616 
1617 #ifdef USE_TASKLET
1618 		tasklet_disable(&he_dev->tasklet);
1619 #endif
1620 
1621 		/* disable recv and transmit */
1622 
1623 		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1624 		reg &= ~(TX_ENABLE|ER_ENABLE);
1625 		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1626 
1627 		reg = he_readl(he_dev, RC_CONFIG);
1628 		reg &= ~(RX_ENABLE);
1629 		he_writel(he_dev, reg, RC_CONFIG);
1630 	}
1631 
1632 #ifdef CONFIG_ATM_HE_USE_SUNI
1633 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1634 		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1635 #endif /* CONFIG_ATM_HE_USE_SUNI */
1636 
1637 	if (he_dev->irq)
1638 		free_irq(he_dev->irq, he_dev);
1639 
1640 	if (he_dev->irq_base)
1641 		pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1642 			* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1643 
1644 	if (he_dev->hsp)
1645 		pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1646 						he_dev->hsp, he_dev->hsp_phys);
1647 
1648 	if (he_dev->rbpl_base) {
1649 #ifdef USE_RBPL_POOL
1650 		for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1651 			void *cpuaddr = he_dev->rbpl_virt[i].virt;
1652 			dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1653 
1654 			pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1655 		}
1656 #else
1657 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1658 			* CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
1659 #endif
1660 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1661 			* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1662 	}
1663 
1664 #ifdef USE_RBPL_POOL
1665 	if (he_dev->rbpl_pool)
1666 		pci_pool_destroy(he_dev->rbpl_pool);
1667 #endif
1668 
1669 #ifdef USE_RBPS
1670 	if (he_dev->rbps_base) {
1671 #ifdef USE_RBPS_POOL
1672 		for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1673 			void *cpuaddr = he_dev->rbps_virt[i].virt;
1674 			dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1675 
1676 			pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1677 		}
1678 #else
1679 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1680 			* CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
1681 #endif
1682 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1683 			* sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1684 	}
1685 
1686 #ifdef USE_RBPS_POOL
1687 	if (he_dev->rbps_pool)
1688 		pci_pool_destroy(he_dev->rbps_pool);
1689 #endif
1690 
1691 #endif /* USE_RBPS */
1692 
1693 	if (he_dev->rbrq_base)
1694 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1695 							he_dev->rbrq_base, he_dev->rbrq_phys);
1696 
1697 	if (he_dev->tbrq_base)
1698 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1699 							he_dev->tbrq_base, he_dev->tbrq_phys);
1700 
1701 	if (he_dev->tpdrq_base)
1702 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1703 							he_dev->tpdrq_base, he_dev->tpdrq_phys);
1704 
1705 #ifdef USE_TPD_POOL
1706 	if (he_dev->tpd_pool)
1707 		pci_pool_destroy(he_dev->tpd_pool);
1708 #else
1709 	if (he_dev->tpd_base)
1710 		pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
1711 							he_dev->tpd_base, he_dev->tpd_base_phys);
1712 #endif
1713 
1714 	if (he_dev->pci_dev) {
1715 		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1716 		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1717 		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1718 	}
1719 
1720 	if (he_dev->membase)
1721 		iounmap(he_dev->membase);
1722 }
1723 
1724 static struct he_tpd *
1725 __alloc_tpd(struct he_dev *he_dev)
1726 {
1727 #ifdef USE_TPD_POOL
1728 	struct he_tpd *tpd;
1729 	dma_addr_t dma_handle;
1730 
1731 	tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle);
1732 	if (tpd == NULL)
1733 		return NULL;
1734 
1735 	tpd->status = TPD_ADDR(dma_handle);
1736 	tpd->reserved = 0;
1737 	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1738 	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1739 	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1740 
1741 	return tpd;
1742 #else
1743 	int i;
1744 
1745 	for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1746 		++he_dev->tpd_head;
1747 		if (he_dev->tpd_head > he_dev->tpd_end) {
1748 			he_dev->tpd_head = he_dev->tpd_base;
1749 		}
1750 
1751 		if (!he_dev->tpd_head->inuse) {
1752 			he_dev->tpd_head->inuse = 1;
1753 			he_dev->tpd_head->status &= TPD_MASK;
1754 			he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
1755 			he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
1756 			he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
1757 			return he_dev->tpd_head;
1758 		}
1759 	}
1760 	hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
1761 	return NULL;
1762 #endif
1763 }
1764 
1765 #define AAL5_LEN(buf,len) 						\
1766 			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1767 				(((unsigned char *)(buf))[(len)-5]))
1768 
1769 /* 2.10.1.2 receive
1770  *
1771  * aal5 packets can optionally return the tcp checksum in the lower
1772  * 16 bits of the crc (RSR0_TCP_CKSUM)
1773  */
1774 
1775 #define TCP_CKSUM(buf,len) 						\
1776 			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1777 				(((unsigned char *)(buf))[(len-1)]))
1778 
1779 static int
1780 he_service_rbrq(struct he_dev *he_dev, int group)
1781 {
1782 	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1783 				((unsigned long)he_dev->rbrq_base |
1784 					he_dev->hsp->group[group].rbrq_tail);
1785 	struct he_rbp *rbp = NULL;
1786 	unsigned cid, lastcid = -1;
1787 	unsigned buf_len = 0;
1788 	struct sk_buff *skb;
1789 	struct atm_vcc *vcc = NULL;
1790 	struct he_vcc *he_vcc;
1791 	struct he_iovec *iov;
1792 	int pdus_assembled = 0;
1793 	int updated = 0;
1794 
1795 	read_lock(&vcc_sklist_lock);
1796 	while (he_dev->rbrq_head != rbrq_tail) {
1797 		++updated;
1798 
1799 		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1800 			he_dev->rbrq_head, group,
1801 			RBRQ_ADDR(he_dev->rbrq_head),
1802 			RBRQ_BUFLEN(he_dev->rbrq_head),
1803 			RBRQ_CID(he_dev->rbrq_head),
1804 			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1805 			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1806 			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1807 			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1808 			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1809 			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1810 
1811 #ifdef USE_RBPS
1812 		if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1813 			rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1814 		else
1815 #endif
1816 			rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1817 
1818 		buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1819 		cid = RBRQ_CID(he_dev->rbrq_head);
1820 
1821 		if (cid != lastcid)
1822 			vcc = __find_vcc(he_dev, cid);
1823 		lastcid = cid;
1824 
1825 		if (vcc == NULL) {
1826 			hprintk("vcc == NULL  (cid 0x%x)\n", cid);
1827 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1828 					rbp->status &= ~RBP_LOANED;
1829 
1830 			goto next_rbrq_entry;
1831 		}
1832 
1833 		he_vcc = HE_VCC(vcc);
1834 		if (he_vcc == NULL) {
1835 			hprintk("he_vcc == NULL  (cid 0x%x)\n", cid);
1836 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1837 					rbp->status &= ~RBP_LOANED;
1838 			goto next_rbrq_entry;
1839 		}
1840 
1841 		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1842 			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1843 				atomic_inc(&vcc->stats->rx_drop);
1844 			goto return_host_buffers;
1845 		}
1846 
1847 		he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1848 		he_vcc->iov_tail->iov_len = buf_len;
1849 		he_vcc->pdu_len += buf_len;
1850 		++he_vcc->iov_tail;
1851 
1852 		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1853 			lastcid = -1;
1854 			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1855 			wake_up(&he_vcc->rx_waitq);
1856 			goto return_host_buffers;
1857 		}
1858 
1859 #ifdef notdef
1860 		if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1861 			hprintk("iovec full!  cid 0x%x\n", cid);
1862 			goto return_host_buffers;
1863 		}
1864 #endif
1865 		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1866 			goto next_rbrq_entry;
1867 
1868 		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1869 				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1870 			HPRINTK("%s%s (%d.%d)\n",
1871 				RBRQ_CRC_ERR(he_dev->rbrq_head)
1872 							? "CRC_ERR " : "",
1873 				RBRQ_LEN_ERR(he_dev->rbrq_head)
1874 							? "LEN_ERR" : "",
1875 							vcc->vpi, vcc->vci);
1876 			atomic_inc(&vcc->stats->rx_err);
1877 			goto return_host_buffers;
1878 		}
1879 
1880 		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1881 							GFP_ATOMIC);
1882 		if (!skb) {
1883 			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1884 			goto return_host_buffers;
1885 		}
1886 
1887 		if (rx_skb_reserve > 0)
1888 			skb_reserve(skb, rx_skb_reserve);
1889 
1890 		do_gettimeofday(&skb->stamp);
1891 
1892 		for (iov = he_vcc->iov_head;
1893 				iov < he_vcc->iov_tail; ++iov) {
1894 #ifdef USE_RBPS
1895 			if (iov->iov_base & RBP_SMALLBUF)
1896 				memcpy(skb_put(skb, iov->iov_len),
1897 					he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1898 			else
1899 #endif
1900 				memcpy(skb_put(skb, iov->iov_len),
1901 					he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1902 		}
1903 
1904 		switch (vcc->qos.aal) {
1905 			case ATM_AAL0:
1906 				/* 2.10.1.5 raw cell receive */
1907 				skb->len = ATM_AAL0_SDU;
1908 				skb->tail = skb->data + skb->len;
1909 				break;
1910 			case ATM_AAL5:
1911 				/* 2.10.1.2 aal5 receive */
1912 
1913 				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1914 				skb->tail = skb->data + skb->len;
1915 #ifdef USE_CHECKSUM_HW
1916 				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1917 					skb->ip_summed = CHECKSUM_HW;
1918 					skb->csum = TCP_CKSUM(skb->data,
1919 							he_vcc->pdu_len);
1920 				}
1921 #endif
1922 				break;
1923 		}
1924 
1925 #ifdef should_never_happen
1926 		if (skb->len > vcc->qos.rxtp.max_sdu)
1927 			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1928 #endif
1929 
1930 #ifdef notdef
1931 		ATM_SKB(skb)->vcc = vcc;
1932 #endif
1933 		vcc->push(vcc, skb);
1934 
1935 		atomic_inc(&vcc->stats->rx);
1936 
1937 return_host_buffers:
1938 		++pdus_assembled;
1939 
1940 		for (iov = he_vcc->iov_head;
1941 				iov < he_vcc->iov_tail; ++iov) {
1942 #ifdef USE_RBPS
1943 			if (iov->iov_base & RBP_SMALLBUF)
1944 				rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1945 			else
1946 #endif
1947 				rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1948 
1949 			rbp->status &= ~RBP_LOANED;
1950 		}
1951 
1952 		he_vcc->iov_tail = he_vcc->iov_head;
1953 		he_vcc->pdu_len = 0;
1954 
1955 next_rbrq_entry:
1956 		he_dev->rbrq_head = (struct he_rbrq *)
1957 				((unsigned long) he_dev->rbrq_base |
1958 					RBRQ_MASK(++he_dev->rbrq_head));
1959 
1960 	}
1961 	read_unlock(&vcc_sklist_lock);
1962 
1963 	if (updated) {
1964 		if (updated > he_dev->rbrq_peak)
1965 			he_dev->rbrq_peak = updated;
1966 
1967 		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1968 						G0_RBRQ_H + (group * 16));
1969 	}
1970 
1971 	return pdus_assembled;
1972 }
1973 
1974 static void
1975 he_service_tbrq(struct he_dev *he_dev, int group)
1976 {
1977 	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1978 				((unsigned long)he_dev->tbrq_base |
1979 					he_dev->hsp->group[group].tbrq_tail);
1980 	struct he_tpd *tpd;
1981 	int slot, updated = 0;
1982 #ifdef USE_TPD_POOL
1983 	struct he_tpd *__tpd;
1984 #endif
1985 
1986 	/* 2.1.6 transmit buffer return queue */
1987 
1988 	while (he_dev->tbrq_head != tbrq_tail) {
1989 		++updated;
1990 
1991 		HPRINTK("tbrq%d 0x%x%s%s\n",
1992 			group,
1993 			TBRQ_TPD(he_dev->tbrq_head),
1994 			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1995 			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1996 #ifdef USE_TPD_POOL
1997 		tpd = NULL;
1998 		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1999 			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
2000 				tpd = __tpd;
2001 				list_del(&__tpd->entry);
2002 				break;
2003 			}
2004 		}
2005 
2006 		if (tpd == NULL) {
2007 			hprintk("unable to locate tpd for dma buffer %x\n",
2008 						TBRQ_TPD(he_dev->tbrq_head));
2009 			goto next_tbrq_entry;
2010 		}
2011 #else
2012 		tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
2013 #endif
2014 
2015 		if (TBRQ_EOS(he_dev->tbrq_head)) {
2016 			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2017 				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
2018 			if (tpd->vcc)
2019 				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
2020 
2021 			goto next_tbrq_entry;
2022 		}
2023 
2024 		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2025 			if (tpd->iovec[slot].addr)
2026 				pci_unmap_single(he_dev->pci_dev,
2027 					tpd->iovec[slot].addr,
2028 					tpd->iovec[slot].len & TPD_LEN_MASK,
2029 							PCI_DMA_TODEVICE);
2030 			if (tpd->iovec[slot].len & TPD_LST)
2031 				break;
2032 
2033 		}
2034 
2035 		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2036 			if (tpd->vcc && tpd->vcc->pop)
2037 				tpd->vcc->pop(tpd->vcc, tpd->skb);
2038 			else
2039 				dev_kfree_skb_any(tpd->skb);
2040 		}
2041 
2042 next_tbrq_entry:
2043 #ifdef USE_TPD_POOL
2044 		if (tpd)
2045 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2046 #else
2047 		tpd->inuse = 0;
2048 #endif
2049 		he_dev->tbrq_head = (struct he_tbrq *)
2050 				((unsigned long) he_dev->tbrq_base |
2051 					TBRQ_MASK(++he_dev->tbrq_head));
2052 	}
2053 
2054 	if (updated) {
2055 		if (updated > he_dev->tbrq_peak)
2056 			he_dev->tbrq_peak = updated;
2057 
2058 		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
2059 						G0_TBRQ_H + (group * 16));
2060 	}
2061 }
2062 
2063 
2064 static void
2065 he_service_rbpl(struct he_dev *he_dev, int group)
2066 {
2067 	struct he_rbp *newtail;
2068 	struct he_rbp *rbpl_head;
2069 	int moved = 0;
2070 
2071 	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2072 					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
2073 
2074 	for (;;) {
2075 		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2076 						RBPL_MASK(he_dev->rbpl_tail+1));
2077 
2078 		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2079 		if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
2080 			break;
2081 
2082 		newtail->status |= RBP_LOANED;
2083 		he_dev->rbpl_tail = newtail;
2084 		++moved;
2085 	}
2086 
2087 	if (moved)
2088 		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2089 }
2090 
2091 #ifdef USE_RBPS
2092 static void
2093 he_service_rbps(struct he_dev *he_dev, int group)
2094 {
2095 	struct he_rbp *newtail;
2096 	struct he_rbp *rbps_head;
2097 	int moved = 0;
2098 
2099 	rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2100 					RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2101 
2102 	for (;;) {
2103 		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2104 						RBPS_MASK(he_dev->rbps_tail+1));
2105 
2106 		/* table 3.42 -- rbps_tail should never be set to rbps_head */
2107 		if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2108 			break;
2109 
2110 		newtail->status |= RBP_LOANED;
2111 		he_dev->rbps_tail = newtail;
2112 		++moved;
2113 	}
2114 
2115 	if (moved)
2116 		he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2117 }
2118 #endif /* USE_RBPS */
2119 
2120 static void
2121 he_tasklet(unsigned long data)
2122 {
2123 	unsigned long flags;
2124 	struct he_dev *he_dev = (struct he_dev *) data;
2125 	int group, type;
2126 	int updated = 0;
2127 
2128 	HPRINTK("tasklet (0x%lx)\n", data);
2129 #ifdef USE_TASKLET
2130 	spin_lock_irqsave(&he_dev->global_lock, flags);
2131 #endif
2132 
2133 	while (he_dev->irq_head != he_dev->irq_tail) {
2134 		++updated;
2135 
2136 		type = ITYPE_TYPE(he_dev->irq_head->isw);
2137 		group = ITYPE_GROUP(he_dev->irq_head->isw);
2138 
2139 		switch (type) {
2140 			case ITYPE_RBRQ_THRESH:
2141 				HPRINTK("rbrq%d threshold\n", group);
2142 				/* fall through */
2143 			case ITYPE_RBRQ_TIMER:
2144 				if (he_service_rbrq(he_dev, group)) {
2145 					he_service_rbpl(he_dev, group);
2146 #ifdef USE_RBPS
2147 					he_service_rbps(he_dev, group);
2148 #endif /* USE_RBPS */
2149 				}
2150 				break;
2151 			case ITYPE_TBRQ_THRESH:
2152 				HPRINTK("tbrq%d threshold\n", group);
2153 				/* fall through */
2154 			case ITYPE_TPD_COMPLETE:
2155 				he_service_tbrq(he_dev, group);
2156 				break;
2157 			case ITYPE_RBPL_THRESH:
2158 				he_service_rbpl(he_dev, group);
2159 				break;
2160 			case ITYPE_RBPS_THRESH:
2161 #ifdef USE_RBPS
2162 				he_service_rbps(he_dev, group);
2163 #endif /* USE_RBPS */
2164 				break;
2165 			case ITYPE_PHY:
2166 				HPRINTK("phy interrupt\n");
2167 #ifdef CONFIG_ATM_HE_USE_SUNI
2168 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2169 				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2170 					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2171 				spin_lock_irqsave(&he_dev->global_lock, flags);
2172 #endif
2173 				break;
2174 			case ITYPE_OTHER:
2175 				switch (type|group) {
2176 					case ITYPE_PARITY:
2177 						hprintk("parity error\n");
2178 						break;
2179 					case ITYPE_ABORT:
2180 						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2181 						break;
2182 				}
2183 				break;
2184 			case ITYPE_TYPE(ITYPE_INVALID):
2185 				/* see 8.1.1 -- check all queues */
2186 
2187 				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2188 
2189 				he_service_rbrq(he_dev, 0);
2190 				he_service_rbpl(he_dev, 0);
2191 #ifdef USE_RBPS
2192 				he_service_rbps(he_dev, 0);
2193 #endif /* USE_RBPS */
2194 				he_service_tbrq(he_dev, 0);
2195 				break;
2196 			default:
2197 				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2198 		}
2199 
2200 		he_dev->irq_head->isw = ITYPE_INVALID;
2201 
2202 		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2203 	}
2204 
2205 	if (updated) {
2206 		if (updated > he_dev->irq_peak)
2207 			he_dev->irq_peak = updated;
2208 
2209 		he_writel(he_dev,
2210 			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2211 			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2212 			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2213 		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2214 	}
2215 #ifdef USE_TASKLET
2216 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2217 #endif
2218 }
2219 
2220 static irqreturn_t
2221 he_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
2222 {
2223 	unsigned long flags;
2224 	struct he_dev *he_dev = (struct he_dev * )dev_id;
2225 	int handled = 0;
2226 
2227 	if (he_dev == NULL)
2228 		return IRQ_NONE;
2229 
2230 	spin_lock_irqsave(&he_dev->global_lock, flags);
2231 
2232 	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2233 						(*he_dev->irq_tailoffset << 2));
2234 
2235 	if (he_dev->irq_tail == he_dev->irq_head) {
2236 		HPRINTK("tailoffset not updated?\n");
2237 		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2238 			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2239 		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2240 	}
2241 
2242 #ifdef DEBUG
2243 	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2244 		hprintk("spurious (or shared) interrupt?\n");
2245 #endif
2246 
2247 	if (he_dev->irq_head != he_dev->irq_tail) {
2248 		handled = 1;
2249 #ifdef USE_TASKLET
2250 		tasklet_schedule(&he_dev->tasklet);
2251 #else
2252 		he_tasklet((unsigned long) he_dev);
2253 #endif
2254 		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2255 		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2256 	}
2257 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2258 	return IRQ_RETVAL(handled);
2259 
2260 }
2261 
2262 static __inline__ void
2263 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2264 {
2265 	struct he_tpdrq *new_tail;
2266 
2267 	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2268 					tpd, cid, he_dev->tpdrq_tail);
2269 
2270 	/* new_tail = he_dev->tpdrq_tail; */
2271 	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2272 					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2273 
2274 	/*
2275 	 * check to see if we are about to set the tail == head
2276 	 * if true, update the head pointer from the adapter
2277 	 * to see if this is really the case (reading the queue
2278 	 * head for every enqueue would be unnecessarily slow)
2279 	 */
2280 
2281 	if (new_tail == he_dev->tpdrq_head) {
2282 		he_dev->tpdrq_head = (struct he_tpdrq *)
2283 			(((unsigned long)he_dev->tpdrq_base) |
2284 				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2285 
2286 		if (new_tail == he_dev->tpdrq_head) {
2287 			hprintk("tpdrq full (cid 0x%x)\n", cid);
2288 			/*
2289 			 * FIXME
2290 			 * push tpd onto a transmit backlog queue
2291 			 * after service_tbrq, service the backlog
2292 			 * for now, we just drop the pdu
2293 			 */
2294 			if (tpd->skb) {
2295 				if (tpd->vcc->pop)
2296 					tpd->vcc->pop(tpd->vcc, tpd->skb);
2297 				else
2298 					dev_kfree_skb_any(tpd->skb);
2299 				atomic_inc(&tpd->vcc->stats->tx_err);
2300 			}
2301 #ifdef USE_TPD_POOL
2302 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2303 #else
2304 			tpd->inuse = 0;
2305 #endif
2306 			return;
2307 		}
2308 	}
2309 
2310 	/* 2.1.5 transmit packet descriptor ready queue */
2311 #ifdef USE_TPD_POOL
2312 	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2313 	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2314 #else
2315 	he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
2316 				(TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
2317 #endif
2318 	he_dev->tpdrq_tail->cid = cid;
2319 	wmb();
2320 
2321 	he_dev->tpdrq_tail = new_tail;
2322 
2323 	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2324 	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2325 }
2326 
2327 static int
2328 he_open(struct atm_vcc *vcc)
2329 {
2330 	unsigned long flags;
2331 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2332 	struct he_vcc *he_vcc;
2333 	int err = 0;
2334 	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2335 	short vpi = vcc->vpi;
2336 	int vci = vcc->vci;
2337 
2338 	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2339 		return 0;
2340 
2341 	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2342 
2343 	set_bit(ATM_VF_ADDR, &vcc->flags);
2344 
2345 	cid = he_mkcid(he_dev, vpi, vci);
2346 
2347 	he_vcc = (struct he_vcc *) kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2348 	if (he_vcc == NULL) {
2349 		hprintk("unable to allocate he_vcc during open\n");
2350 		return -ENOMEM;
2351 	}
2352 
2353 	he_vcc->iov_tail = he_vcc->iov_head;
2354 	he_vcc->pdu_len = 0;
2355 	he_vcc->rc_index = -1;
2356 
2357 	init_waitqueue_head(&he_vcc->rx_waitq);
2358 	init_waitqueue_head(&he_vcc->tx_waitq);
2359 
2360 	vcc->dev_data = he_vcc;
2361 
2362 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2363 		int pcr_goal;
2364 
2365 		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2366 		if (pcr_goal == 0)
2367 			pcr_goal = he_dev->atm_dev->link_rate;
2368 		if (pcr_goal < 0)	/* means round down, technically */
2369 			pcr_goal = -pcr_goal;
2370 
2371 		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2372 
2373 		switch (vcc->qos.aal) {
2374 			case ATM_AAL5:
2375 				tsr0_aal = TSR0_AAL5;
2376 				tsr4 = TSR4_AAL5;
2377 				break;
2378 			case ATM_AAL0:
2379 				tsr0_aal = TSR0_AAL0_SDU;
2380 				tsr4 = TSR4_AAL0_SDU;
2381 				break;
2382 			default:
2383 				err = -EINVAL;
2384 				goto open_failed;
2385 		}
2386 
2387 		spin_lock_irqsave(&he_dev->global_lock, flags);
2388 		tsr0 = he_readl_tsr0(he_dev, cid);
2389 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2390 
2391 		if (TSR0_CONN_STATE(tsr0) != 0) {
2392 			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2393 			err = -EBUSY;
2394 			goto open_failed;
2395 		}
2396 
2397 		switch (vcc->qos.txtp.traffic_class) {
2398 			case ATM_UBR:
2399 				/* 2.3.3.1 open connection ubr */
2400 
2401 				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2402 					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2403 				break;
2404 
2405 			case ATM_CBR:
2406 				/* 2.3.3.2 open connection cbr */
2407 
2408 				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2409 				if ((he_dev->total_bw + pcr_goal)
2410 					> (he_dev->atm_dev->link_rate * 9 / 10))
2411 				{
2412 					err = -EBUSY;
2413 					goto open_failed;
2414 				}
2415 
2416 				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2417 
2418 				/* find an unused cs_stper register */
2419 				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2420 					if (he_dev->cs_stper[reg].inuse == 0 ||
2421 					    he_dev->cs_stper[reg].pcr == pcr_goal)
2422 							break;
2423 
2424 				if (reg == HE_NUM_CS_STPER) {
2425 					err = -EBUSY;
2426 					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2427 					goto open_failed;
2428 				}
2429 
2430 				he_dev->total_bw += pcr_goal;
2431 
2432 				he_vcc->rc_index = reg;
2433 				++he_dev->cs_stper[reg].inuse;
2434 				he_dev->cs_stper[reg].pcr = pcr_goal;
2435 
2436 				clock = he_is622(he_dev) ? 66667000 : 50000000;
2437 				period = clock / pcr_goal;
2438 
2439 				HPRINTK("rc_index = %d period = %d\n",
2440 								reg, period);
2441 
2442 				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2443 							CS_STPER0 + reg);
2444 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2445 
2446 				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2447 							TSR0_RC_INDEX(reg);
2448 
2449 				break;
2450 			default:
2451 				err = -EINVAL;
2452 				goto open_failed;
2453 		}
2454 
2455 		spin_lock_irqsave(&he_dev->global_lock, flags);
2456 
2457 		he_writel_tsr0(he_dev, tsr0, cid);
2458 		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2459 		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2460 					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2461 		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2462 		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2463 
2464 		he_writel_tsr3(he_dev, 0x0, cid);
2465 		he_writel_tsr5(he_dev, 0x0, cid);
2466 		he_writel_tsr6(he_dev, 0x0, cid);
2467 		he_writel_tsr7(he_dev, 0x0, cid);
2468 		he_writel_tsr8(he_dev, 0x0, cid);
2469 		he_writel_tsr10(he_dev, 0x0, cid);
2470 		he_writel_tsr11(he_dev, 0x0, cid);
2471 		he_writel_tsr12(he_dev, 0x0, cid);
2472 		he_writel_tsr13(he_dev, 0x0, cid);
2473 		he_writel_tsr14(he_dev, 0x0, cid);
2474 		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2475 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2476 	}
2477 
2478 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2479 		unsigned aal;
2480 
2481 		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2482 		 				&HE_VCC(vcc)->rx_waitq);
2483 
2484 		switch (vcc->qos.aal) {
2485 			case ATM_AAL5:
2486 				aal = RSR0_AAL5;
2487 				break;
2488 			case ATM_AAL0:
2489 				aal = RSR0_RAWCELL;
2490 				break;
2491 			default:
2492 				err = -EINVAL;
2493 				goto open_failed;
2494 		}
2495 
2496 		spin_lock_irqsave(&he_dev->global_lock, flags);
2497 
2498 		rsr0 = he_readl_rsr0(he_dev, cid);
2499 		if (rsr0 & RSR0_OPEN_CONN) {
2500 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2501 
2502 			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2503 			err = -EBUSY;
2504 			goto open_failed;
2505 		}
2506 
2507 #ifdef USE_RBPS
2508 		rsr1 = RSR1_GROUP(0);
2509 		rsr4 = RSR4_GROUP(0);
2510 #else /* !USE_RBPS */
2511 		rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
2512 		rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
2513 #endif /* USE_RBPS */
2514 		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2515 				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2516 
2517 #ifdef USE_CHECKSUM_HW
2518 		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2519 			rsr0 |= RSR0_TCP_CKSUM;
2520 #endif
2521 
2522 		he_writel_rsr4(he_dev, rsr4, cid);
2523 		he_writel_rsr1(he_dev, rsr1, cid);
2524 		/* 5.1.11 last parameter initialized should be
2525 			  the open/closed indication in rsr0 */
2526 		he_writel_rsr0(he_dev,
2527 			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2528 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2529 
2530 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2531 	}
2532 
2533 open_failed:
2534 
2535 	if (err) {
2536 		kfree(he_vcc);
2537 		clear_bit(ATM_VF_ADDR, &vcc->flags);
2538 	}
2539 	else
2540 		set_bit(ATM_VF_READY, &vcc->flags);
2541 
2542 	return err;
2543 }
2544 
2545 static void
2546 he_close(struct atm_vcc *vcc)
2547 {
2548 	unsigned long flags;
2549 	DECLARE_WAITQUEUE(wait, current);
2550 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2551 	struct he_tpd *tpd;
2552 	unsigned cid;
2553 	struct he_vcc *he_vcc = HE_VCC(vcc);
2554 #define MAX_RETRY 30
2555 	int retry = 0, sleep = 1, tx_inuse;
2556 
2557 	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2558 
2559 	clear_bit(ATM_VF_READY, &vcc->flags);
2560 	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2561 
2562 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2563 		int timeout;
2564 
2565 		HPRINTK("close rx cid 0x%x\n", cid);
2566 
2567 		/* 2.7.2.2 close receive operation */
2568 
2569 		/* wait for previous close (if any) to finish */
2570 
2571 		spin_lock_irqsave(&he_dev->global_lock, flags);
2572 		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2573 			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2574 			udelay(250);
2575 		}
2576 
2577 		set_current_state(TASK_UNINTERRUPTIBLE);
2578 		add_wait_queue(&he_vcc->rx_waitq, &wait);
2579 
2580 		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2581 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2582 		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2583 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2584 
2585 		timeout = schedule_timeout(30*HZ);
2586 
2587 		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2588 		set_current_state(TASK_RUNNING);
2589 
2590 		if (timeout == 0)
2591 			hprintk("close rx timeout cid 0x%x\n", cid);
2592 
2593 		HPRINTK("close rx cid 0x%x complete\n", cid);
2594 
2595 	}
2596 
2597 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2598 		volatile unsigned tsr4, tsr0;
2599 		int timeout;
2600 
2601 		HPRINTK("close tx cid 0x%x\n", cid);
2602 
2603 		/* 2.1.2
2604 		 *
2605 		 * ... the host must first stop queueing packets to the TPDRQ
2606 		 * on the connection to be closed, then wait for all outstanding
2607 		 * packets to be transmitted and their buffers returned to the
2608 		 * TBRQ. When the last packet on the connection arrives in the
2609 		 * TBRQ, the host issues the close command to the adapter.
2610 		 */
2611 
2612 		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 0) &&
2613 		       (retry < MAX_RETRY)) {
2614 			msleep(sleep);
2615 			if (sleep < 250)
2616 				sleep = sleep * 2;
2617 
2618 			++retry;
2619 		}
2620 
2621 		if (tx_inuse)
2622 			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2623 
2624 		/* 2.3.1.1 generic close operations with flush */
2625 
2626 		spin_lock_irqsave(&he_dev->global_lock, flags);
2627 		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2628 					/* also clears TSR4_SESSION_ENDED */
2629 
2630 		switch (vcc->qos.txtp.traffic_class) {
2631 			case ATM_UBR:
2632 				he_writel_tsr1(he_dev,
2633 					TSR1_MCR(rate_to_atmf(200000))
2634 					| TSR1_PCR(0), cid);
2635 				break;
2636 			case ATM_CBR:
2637 				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2638 				break;
2639 		}
2640 		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2641 
2642 		tpd = __alloc_tpd(he_dev);
2643 		if (tpd == NULL) {
2644 			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2645 			goto close_tx_incomplete;
2646 		}
2647 		tpd->status |= TPD_EOS | TPD_INT;
2648 		tpd->skb = NULL;
2649 		tpd->vcc = vcc;
2650 		wmb();
2651 
2652 		set_current_state(TASK_UNINTERRUPTIBLE);
2653 		add_wait_queue(&he_vcc->tx_waitq, &wait);
2654 		__enqueue_tpd(he_dev, tpd, cid);
2655 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2656 
2657 		timeout = schedule_timeout(30*HZ);
2658 
2659 		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2660 		set_current_state(TASK_RUNNING);
2661 
2662 		spin_lock_irqsave(&he_dev->global_lock, flags);
2663 
2664 		if (timeout == 0) {
2665 			hprintk("close tx timeout cid 0x%x\n", cid);
2666 			goto close_tx_incomplete;
2667 		}
2668 
2669 		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2670 			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2671 			udelay(250);
2672 		}
2673 
2674 		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2675 			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2676 			udelay(250);
2677 		}
2678 
2679 close_tx_incomplete:
2680 
2681 		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2682 			int reg = he_vcc->rc_index;
2683 
2684 			HPRINTK("cs_stper reg = %d\n", reg);
2685 
2686 			if (he_dev->cs_stper[reg].inuse == 0)
2687 				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2688 			else
2689 				--he_dev->cs_stper[reg].inuse;
2690 
2691 			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2692 		}
2693 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2694 
2695 		HPRINTK("close tx cid 0x%x complete\n", cid);
2696 	}
2697 
2698 	kfree(he_vcc);
2699 
2700 	clear_bit(ATM_VF_ADDR, &vcc->flags);
2701 }
2702 
2703 static int
2704 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2705 {
2706 	unsigned long flags;
2707 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2708 	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2709 	struct he_tpd *tpd;
2710 #ifdef USE_SCATTERGATHER
2711 	int i, slot = 0;
2712 #endif
2713 
2714 #define HE_TPD_BUFSIZE 0xffff
2715 
2716 	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2717 
2718 	if ((skb->len > HE_TPD_BUFSIZE) ||
2719 	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2720 		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2721 		if (vcc->pop)
2722 			vcc->pop(vcc, skb);
2723 		else
2724 			dev_kfree_skb_any(skb);
2725 		atomic_inc(&vcc->stats->tx_err);
2726 		return -EINVAL;
2727 	}
2728 
2729 #ifndef USE_SCATTERGATHER
2730 	if (skb_shinfo(skb)->nr_frags) {
2731 		hprintk("no scatter/gather support\n");
2732 		if (vcc->pop)
2733 			vcc->pop(vcc, skb);
2734 		else
2735 			dev_kfree_skb_any(skb);
2736 		atomic_inc(&vcc->stats->tx_err);
2737 		return -EINVAL;
2738 	}
2739 #endif
2740 	spin_lock_irqsave(&he_dev->global_lock, flags);
2741 
2742 	tpd = __alloc_tpd(he_dev);
2743 	if (tpd == NULL) {
2744 		if (vcc->pop)
2745 			vcc->pop(vcc, skb);
2746 		else
2747 			dev_kfree_skb_any(skb);
2748 		atomic_inc(&vcc->stats->tx_err);
2749 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2750 		return -ENOMEM;
2751 	}
2752 
2753 	if (vcc->qos.aal == ATM_AAL5)
2754 		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2755 	else {
2756 		char *pti_clp = (void *) (skb->data + 3);
2757 		int clp, pti;
2758 
2759 		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2760 		clp = (*pti_clp & ATM_HDR_CLP);
2761 		tpd->status |= TPD_CELLTYPE(pti);
2762 		if (clp)
2763 			tpd->status |= TPD_CLP;
2764 
2765 		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2766 	}
2767 
2768 #ifdef USE_SCATTERGATHER
2769 	tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2770 				skb->len - skb->data_len, PCI_DMA_TODEVICE);
2771 	tpd->iovec[slot].len = skb->len - skb->data_len;
2772 	++slot;
2773 
2774 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2775 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2776 
2777 		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2778 			tpd->vcc = vcc;
2779 			tpd->skb = NULL;	/* not the last fragment
2780 						   so dont ->push() yet */
2781 			wmb();
2782 
2783 			__enqueue_tpd(he_dev, tpd, cid);
2784 			tpd = __alloc_tpd(he_dev);
2785 			if (tpd == NULL) {
2786 				if (vcc->pop)
2787 					vcc->pop(vcc, skb);
2788 				else
2789 					dev_kfree_skb_any(skb);
2790 				atomic_inc(&vcc->stats->tx_err);
2791 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2792 				return -ENOMEM;
2793 			}
2794 			tpd->status |= TPD_USERCELL;
2795 			slot = 0;
2796 		}
2797 
2798 		tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2799 			(void *) page_address(frag->page) + frag->page_offset,
2800 				frag->size, PCI_DMA_TODEVICE);
2801 		tpd->iovec[slot].len = frag->size;
2802 		++slot;
2803 
2804 	}
2805 
2806 	tpd->iovec[slot - 1].len |= TPD_LST;
2807 #else
2808 	tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2809 	tpd->length0 = skb->len | TPD_LST;
2810 #endif
2811 	tpd->status |= TPD_INT;
2812 
2813 	tpd->vcc = vcc;
2814 	tpd->skb = skb;
2815 	wmb();
2816 	ATM_SKB(skb)->vcc = vcc;
2817 
2818 	__enqueue_tpd(he_dev, tpd, cid);
2819 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2820 
2821 	atomic_inc(&vcc->stats->tx);
2822 
2823 	return 0;
2824 }
2825 
2826 static int
2827 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2828 {
2829 	unsigned long flags;
2830 	struct he_dev *he_dev = HE_DEV(atm_dev);
2831 	struct he_ioctl_reg reg;
2832 	int err = 0;
2833 
2834 	switch (cmd) {
2835 		case HE_GET_REG:
2836 			if (!capable(CAP_NET_ADMIN))
2837 				return -EPERM;
2838 
2839 			if (copy_from_user(&reg, arg,
2840 					   sizeof(struct he_ioctl_reg)))
2841 				return -EFAULT;
2842 
2843 			spin_lock_irqsave(&he_dev->global_lock, flags);
2844 			switch (reg.type) {
2845 				case HE_REGTYPE_PCI:
2846 					reg.val = he_readl(he_dev, reg.addr);
2847 					break;
2848 				case HE_REGTYPE_RCM:
2849 					reg.val =
2850 						he_readl_rcm(he_dev, reg.addr);
2851 					break;
2852 				case HE_REGTYPE_TCM:
2853 					reg.val =
2854 						he_readl_tcm(he_dev, reg.addr);
2855 					break;
2856 				case HE_REGTYPE_MBOX:
2857 					reg.val =
2858 						he_readl_mbox(he_dev, reg.addr);
2859 					break;
2860 				default:
2861 					err = -EINVAL;
2862 					break;
2863 			}
2864 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2865 			if (err == 0)
2866 				if (copy_to_user(arg, &reg,
2867 							sizeof(struct he_ioctl_reg)))
2868 					return -EFAULT;
2869 			break;
2870 		default:
2871 #ifdef CONFIG_ATM_HE_USE_SUNI
2872 			if (atm_dev->phy && atm_dev->phy->ioctl)
2873 				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2874 #else /* CONFIG_ATM_HE_USE_SUNI */
2875 			err = -EINVAL;
2876 #endif /* CONFIG_ATM_HE_USE_SUNI */
2877 			break;
2878 	}
2879 
2880 	return err;
2881 }
2882 
2883 static void
2884 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2885 {
2886 	unsigned long flags;
2887 	struct he_dev *he_dev = HE_DEV(atm_dev);
2888 
2889 	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2890 
2891 	spin_lock_irqsave(&he_dev->global_lock, flags);
2892 	he_writel(he_dev, val, FRAMER + (addr*4));
2893 	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2894 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2895 }
2896 
2897 
2898 static unsigned char
2899 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2900 {
2901 	unsigned long flags;
2902 	struct he_dev *he_dev = HE_DEV(atm_dev);
2903 	unsigned reg;
2904 
2905 	spin_lock_irqsave(&he_dev->global_lock, flags);
2906 	reg = he_readl(he_dev, FRAMER + (addr*4));
2907 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2908 
2909 	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2910 	return reg;
2911 }
2912 
2913 static int
2914 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2915 {
2916 	unsigned long flags;
2917 	struct he_dev *he_dev = HE_DEV(dev);
2918 	int left, i;
2919 #ifdef notdef
2920 	struct he_rbrq *rbrq_tail;
2921 	struct he_tpdrq *tpdrq_head;
2922 	int rbpl_head, rbpl_tail;
2923 #endif
2924 	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2925 
2926 
2927 	left = *pos;
2928 	if (!left--)
2929 		return sprintf(page, "%s\n", version);
2930 
2931 	if (!left--)
2932 		return sprintf(page, "%s%s\n\n",
2933 			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2934 
2935 	if (!left--)
2936 		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2937 
2938 	spin_lock_irqsave(&he_dev->global_lock, flags);
2939 	mcc += he_readl(he_dev, MCC);
2940 	oec += he_readl(he_dev, OEC);
2941 	dcc += he_readl(he_dev, DCC);
2942 	cec += he_readl(he_dev, CEC);
2943 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2944 
2945 	if (!left--)
2946 		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n",
2947 							mcc, oec, dcc, cec);
2948 
2949 	if (!left--)
2950 		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2951 				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2952 
2953 	if (!left--)
2954 		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2955 						CONFIG_TPDRQ_SIZE);
2956 
2957 	if (!left--)
2958 		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2959 				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2960 
2961 	if (!left--)
2962 		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2963 					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2964 
2965 
2966 #ifdef notdef
2967 	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2968 	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2969 
2970 	inuse = rbpl_head - rbpl_tail;
2971 	if (inuse < 0)
2972 		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2973 	inuse /= sizeof(struct he_rbp);
2974 
2975 	if (!left--)
2976 		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2977 						CONFIG_RBPL_SIZE, inuse);
2978 #endif
2979 
2980 	if (!left--)
2981 		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2982 
2983 	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2984 		if (!left--)
2985 			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2986 						he_dev->cs_stper[i].pcr,
2987 						he_dev->cs_stper[i].inuse);
2988 
2989 	if (!left--)
2990 		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2991 			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2992 
2993 	return 0;
2994 }
2995 
2996 /* eeprom routines  -- see 4.7 */
2997 
2998 u8
2999 read_prom_byte(struct he_dev *he_dev, int addr)
3000 {
3001 	u32 val = 0, tmp_read = 0;
3002 	int i, j = 0;
3003 	u8 byte_read = 0;
3004 
3005 	val = readl(he_dev->membase + HOST_CNTL);
3006 	val &= 0xFFFFE0FF;
3007 
3008 	/* Turn on write enable */
3009 	val |= 0x800;
3010 	he_writel(he_dev, val, HOST_CNTL);
3011 
3012 	/* Send READ instruction */
3013 	for (i = 0; i < sizeof(readtab)/sizeof(readtab[0]); i++) {
3014 		he_writel(he_dev, val | readtab[i], HOST_CNTL);
3015 		udelay(EEPROM_DELAY);
3016 	}
3017 
3018 	/* Next, we need to send the byte address to read from */
3019 	for (i = 7; i >= 0; i--) {
3020 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3021 		udelay(EEPROM_DELAY);
3022 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3023 		udelay(EEPROM_DELAY);
3024 	}
3025 
3026 	j = 0;
3027 
3028 	val &= 0xFFFFF7FF;      /* Turn off write enable */
3029 	he_writel(he_dev, val, HOST_CNTL);
3030 
3031 	/* Now, we can read data from the EEPROM by clocking it in */
3032 	for (i = 7; i >= 0; i--) {
3033 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3034 		udelay(EEPROM_DELAY);
3035 		tmp_read = he_readl(he_dev, HOST_CNTL);
3036 		byte_read |= (unsigned char)
3037 			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
3038 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3039 		udelay(EEPROM_DELAY);
3040 	}
3041 
3042 	he_writel(he_dev, val | ID_CS, HOST_CNTL);
3043 	udelay(EEPROM_DELAY);
3044 
3045 	return byte_read;
3046 }
3047 
3048 MODULE_LICENSE("GPL");
3049 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3050 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3051 module_param(disable64, bool, 0);
3052 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
3053 module_param(nvpibits, short, 0);
3054 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
3055 module_param(nvcibits, short, 0);
3056 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
3057 module_param(rx_skb_reserve, short, 0);
3058 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
3059 module_param(irq_coalesce, bool, 0);
3060 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
3061 module_param(sdh, bool, 0);
3062 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
3063 
3064 static struct pci_device_id he_pci_tbl[] = {
3065 	{ PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
3066 	  0, 0, 0 },
3067 	{ 0, }
3068 };
3069 
3070 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
3071 
3072 static struct pci_driver he_driver = {
3073 	.name =		"he",
3074 	.probe =	he_init_one,
3075 	.remove =	__devexit_p(he_remove_one),
3076 	.id_table =	he_pci_tbl,
3077 };
3078 
3079 static int __init he_init(void)
3080 {
3081 	return pci_register_driver(&he_driver);
3082 }
3083 
3084 static void __exit he_cleanup(void)
3085 {
3086 	pci_unregister_driver(&he_driver);
3087 }
3088 
3089 module_init(he_init);
3090 module_exit(he_cleanup);
3091