xref: /openbmc/linux/drivers/atm/he.c (revision 6c44512d)
1 /*
2 
3   he.c
4 
5   ForeRunnerHE ATM Adapter driver for ATM on Linux
6   Copyright (C) 1999-2001  Naval Research Laboratory
7 
8   This library is free software; you can redistribute it and/or
9   modify it under the terms of the GNU Lesser General Public
10   License as published by the Free Software Foundation; either
11   version 2.1 of the License, or (at your option) any later version.
12 
13   This library is distributed in the hope that it will be useful,
14   but WITHOUT ANY WARRANTY; without even the implied warranty of
15   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16   Lesser General Public License for more details.
17 
18   You should have received a copy of the GNU Lesser General Public
19   License along with this library; if not, write to the Free Software
20   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 
22 */
23 
24 /*
25 
26   he.c
27 
28   ForeRunnerHE ATM Adapter driver for ATM on Linux
29   Copyright (C) 1999-2001  Naval Research Laboratory
30 
31   Permission to use, copy, modify and distribute this software and its
32   documentation is hereby granted, provided that both the copyright
33   notice and this permission notice appear in all copies of the software,
34   derivative works or modified versions, and any portions thereof, and
35   that both notices appear in supporting documentation.
36 
37   NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38   DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39   RESULTING FROM THE USE OF THIS SOFTWARE.
40 
41   This driver was written using the "Programmer's Reference Manual for
42   ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43 
44   AUTHORS:
45 	chas williams <chas@cmf.nrl.navy.mil>
46 	eric kinzie <ekinzie@cmf.nrl.navy.mil>
47 
48   NOTES:
49 	4096 supported 'connections'
50 	group 0 is used for all traffic
51 	interrupt queue 0 is used for all interrupts
52 	aal0 support (based on work from ulrich.u.muller@nokia.com)
53 
54  */
55 
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
65 #include <linux/mm.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/bitmap.h>
71 #include <linux/slab.h>
72 #include <asm/io.h>
73 #include <asm/byteorder.h>
74 #include <asm/uaccess.h>
75 
76 #include <linux/atmdev.h>
77 #include <linux/atm.h>
78 #include <linux/sonet.h>
79 
80 #undef USE_SCATTERGATHER
81 #undef USE_CHECKSUM_HW			/* still confused about this */
82 /* #undef HE_DEBUG */
83 
84 #include "he.h"
85 #include "suni.h"
86 #include <linux/atm_he.h>
87 
88 #define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
89 
90 #ifdef HE_DEBUG
91 #define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92 #else /* !HE_DEBUG */
93 #define HPRINTK(fmt,args...)	do { } while (0)
94 #endif /* HE_DEBUG */
95 
96 /* declarations */
97 
98 static int he_open(struct atm_vcc *vcc);
99 static void he_close(struct atm_vcc *vcc);
100 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102 static irqreturn_t he_irq_handler(int irq, void *dev_id);
103 static void he_tasklet(unsigned long data);
104 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105 static int he_start(struct atm_dev *dev);
106 static void he_stop(struct he_dev *dev);
107 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109 
110 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
111 
112 /* globals */
113 
114 static struct he_dev *he_devs;
115 static bool disable64;
116 static short nvpibits = -1;
117 static short nvcibits = -1;
118 static short rx_skb_reserve = 16;
119 static bool irq_coalesce = 1;
120 static bool sdh = 0;
121 
122 /* Read from EEPROM = 0000 0011b */
123 static unsigned int readtab[] = {
124 	CS_HIGH | CLK_HIGH,
125 	CS_LOW | CLK_LOW,
126 	CLK_HIGH,               /* 0 */
127 	CLK_LOW,
128 	CLK_HIGH,               /* 0 */
129 	CLK_LOW,
130 	CLK_HIGH,               /* 0 */
131 	CLK_LOW,
132 	CLK_HIGH,               /* 0 */
133 	CLK_LOW,
134 	CLK_HIGH,               /* 0 */
135 	CLK_LOW,
136 	CLK_HIGH,               /* 0 */
137 	CLK_LOW | SI_HIGH,
138 	CLK_HIGH | SI_HIGH,     /* 1 */
139 	CLK_LOW | SI_HIGH,
140 	CLK_HIGH | SI_HIGH      /* 1 */
141 };
142 
143 /* Clock to read from/write to the EEPROM */
144 static unsigned int clocktab[] = {
145 	CLK_LOW,
146 	CLK_HIGH,
147 	CLK_LOW,
148 	CLK_HIGH,
149 	CLK_LOW,
150 	CLK_HIGH,
151 	CLK_LOW,
152 	CLK_HIGH,
153 	CLK_LOW,
154 	CLK_HIGH,
155 	CLK_LOW,
156 	CLK_HIGH,
157 	CLK_LOW,
158 	CLK_HIGH,
159 	CLK_LOW,
160 	CLK_HIGH,
161 	CLK_LOW
162 };
163 
164 static struct atmdev_ops he_ops =
165 {
166 	.open =		he_open,
167 	.close =	he_close,
168 	.ioctl =	he_ioctl,
169 	.send =		he_send,
170 	.phy_put =	he_phy_put,
171 	.phy_get =	he_phy_get,
172 	.proc_read =	he_proc_read,
173 	.owner =	THIS_MODULE
174 };
175 
176 #define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177 #define he_readl(dev, reg)		readl((dev)->membase + (reg))
178 
179 /* section 2.12 connection memory access */
180 
181 static __inline__ void
182 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
183 								unsigned flags)
184 {
185 	he_writel(he_dev, val, CON_DAT);
186 	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
187 	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
189 }
190 
191 #define he_writel_rcm(dev, val, reg) 				\
192 			he_writel_internal(dev, val, reg, CON_CTL_RCM)
193 
194 #define he_writel_tcm(dev, val, reg) 				\
195 			he_writel_internal(dev, val, reg, CON_CTL_TCM)
196 
197 #define he_writel_mbox(dev, val, reg) 				\
198 			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
199 
200 static unsigned
201 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202 {
203 	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205 	return he_readl(he_dev, CON_DAT);
206 }
207 
208 #define he_readl_rcm(dev, reg) \
209 			he_readl_internal(dev, reg, CON_CTL_RCM)
210 
211 #define he_readl_tcm(dev, reg) \
212 			he_readl_internal(dev, reg, CON_CTL_TCM)
213 
214 #define he_readl_mbox(dev, reg) \
215 			he_readl_internal(dev, reg, CON_CTL_MBOX)
216 
217 
218 /* figure 2.2 connection id */
219 
220 #define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
221 
222 /* 2.5.1 per connection transmit state registers */
223 
224 #define he_writel_tsr0(dev, val, cid) \
225 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226 #define he_readl_tsr0(dev, cid) \
227 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228 
229 #define he_writel_tsr1(dev, val, cid) \
230 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231 
232 #define he_writel_tsr2(dev, val, cid) \
233 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234 
235 #define he_writel_tsr3(dev, val, cid) \
236 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237 
238 #define he_writel_tsr4(dev, val, cid) \
239 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
240 
241 	/* from page 2-20
242 	 *
243 	 * NOTE While the transmit connection is active, bits 23 through 0
244 	 *      of this register must not be written by the host.  Byte
245 	 *      enables should be used during normal operation when writing
246 	 *      the most significant byte.
247 	 */
248 
249 #define he_writel_tsr4_upper(dev, val, cid) \
250 		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251 							CON_CTL_TCM \
252 							| CON_BYTE_DISABLE_2 \
253 							| CON_BYTE_DISABLE_1 \
254 							| CON_BYTE_DISABLE_0)
255 
256 #define he_readl_tsr4(dev, cid) \
257 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258 
259 #define he_writel_tsr5(dev, val, cid) \
260 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261 
262 #define he_writel_tsr6(dev, val, cid) \
263 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264 
265 #define he_writel_tsr7(dev, val, cid) \
266 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
267 
268 
269 #define he_writel_tsr8(dev, val, cid) \
270 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271 
272 #define he_writel_tsr9(dev, val, cid) \
273 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274 
275 #define he_writel_tsr10(dev, val, cid) \
276 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277 
278 #define he_writel_tsr11(dev, val, cid) \
279 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
280 
281 
282 #define he_writel_tsr12(dev, val, cid) \
283 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284 
285 #define he_writel_tsr13(dev, val, cid) \
286 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
287 
288 
289 #define he_writel_tsr14(dev, val, cid) \
290 		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291 
292 #define he_writel_tsr14_upper(dev, val, cid) \
293 		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294 							CON_CTL_TCM \
295 							| CON_BYTE_DISABLE_2 \
296 							| CON_BYTE_DISABLE_1 \
297 							| CON_BYTE_DISABLE_0)
298 
299 /* 2.7.1 per connection receive state registers */
300 
301 #define he_writel_rsr0(dev, val, cid) \
302 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303 #define he_readl_rsr0(dev, cid) \
304 		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305 
306 #define he_writel_rsr1(dev, val, cid) \
307 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308 
309 #define he_writel_rsr2(dev, val, cid) \
310 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311 
312 #define he_writel_rsr3(dev, val, cid) \
313 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314 
315 #define he_writel_rsr4(dev, val, cid) \
316 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317 
318 #define he_writel_rsr5(dev, val, cid) \
319 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320 
321 #define he_writel_rsr6(dev, val, cid) \
322 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323 
324 #define he_writel_rsr7(dev, val, cid) \
325 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326 
327 static __inline__ struct atm_vcc*
328 __find_vcc(struct he_dev *he_dev, unsigned cid)
329 {
330 	struct hlist_head *head;
331 	struct atm_vcc *vcc;
332 	struct hlist_node *node;
333 	struct sock *s;
334 	short vpi;
335 	int vci;
336 
337 	vpi = cid >> he_dev->vcibits;
338 	vci = cid & ((1 << he_dev->vcibits) - 1);
339 	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
340 
341 	sk_for_each(s, node, head) {
342 		vcc = atm_sk(s);
343 		if (vcc->dev == he_dev->atm_dev &&
344 		    vcc->vci == vci && vcc->vpi == vpi &&
345 		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
346 				return vcc;
347 		}
348 	}
349 	return NULL;
350 }
351 
352 static int he_init_one(struct pci_dev *pci_dev,
353 		       const struct pci_device_id *pci_ent)
354 {
355 	struct atm_dev *atm_dev = NULL;
356 	struct he_dev *he_dev = NULL;
357 	int err = 0;
358 
359 	printk(KERN_INFO "ATM he driver\n");
360 
361 	if (pci_enable_device(pci_dev))
362 		return -EIO;
363 	if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
364 		printk(KERN_WARNING "he: no suitable dma available\n");
365 		err = -EIO;
366 		goto init_one_failure;
367 	}
368 
369 	atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
370 	if (!atm_dev) {
371 		err = -ENODEV;
372 		goto init_one_failure;
373 	}
374 	pci_set_drvdata(pci_dev, atm_dev);
375 
376 	he_dev = kzalloc(sizeof(struct he_dev),
377 							GFP_KERNEL);
378 	if (!he_dev) {
379 		err = -ENOMEM;
380 		goto init_one_failure;
381 	}
382 	he_dev->pci_dev = pci_dev;
383 	he_dev->atm_dev = atm_dev;
384 	he_dev->atm_dev->dev_data = he_dev;
385 	atm_dev->dev_data = he_dev;
386 	he_dev->number = atm_dev->number;
387 	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
388 	spin_lock_init(&he_dev->global_lock);
389 
390 	if (he_start(atm_dev)) {
391 		he_stop(he_dev);
392 		err = -ENODEV;
393 		goto init_one_failure;
394 	}
395 	he_dev->next = NULL;
396 	if (he_devs)
397 		he_dev->next = he_devs;
398 	he_devs = he_dev;
399 	return 0;
400 
401 init_one_failure:
402 	if (atm_dev)
403 		atm_dev_deregister(atm_dev);
404 	kfree(he_dev);
405 	pci_disable_device(pci_dev);
406 	return err;
407 }
408 
409 static void he_remove_one(struct pci_dev *pci_dev)
410 {
411 	struct atm_dev *atm_dev;
412 	struct he_dev *he_dev;
413 
414 	atm_dev = pci_get_drvdata(pci_dev);
415 	he_dev = HE_DEV(atm_dev);
416 
417 	/* need to remove from he_devs */
418 
419 	he_stop(he_dev);
420 	atm_dev_deregister(atm_dev);
421 	kfree(he_dev);
422 
423 	pci_set_drvdata(pci_dev, NULL);
424 	pci_disable_device(pci_dev);
425 }
426 
427 
428 static unsigned
429 rate_to_atmf(unsigned rate)		/* cps to atm forum format */
430 {
431 #define NONZERO (1 << 14)
432 
433 	unsigned exp = 0;
434 
435 	if (rate == 0)
436 		return 0;
437 
438 	rate <<= 9;
439 	while (rate > 0x3ff) {
440 		++exp;
441 		rate >>= 1;
442 	}
443 
444 	return (NONZERO | (exp << 9) | (rate & 0x1ff));
445 }
446 
447 static void he_init_rx_lbfp0(struct he_dev *he_dev)
448 {
449 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
450 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
451 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
452 	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
453 
454 	lbufd_index = 0;
455 	lbm_offset = he_readl(he_dev, RCMLBM_BA);
456 
457 	he_writel(he_dev, lbufd_index, RLBF0_H);
458 
459 	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
460 		lbufd_index += 2;
461 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
462 
463 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
464 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
465 
466 		if (++lbuf_count == lbufs_per_row) {
467 			lbuf_count = 0;
468 			row_offset += he_dev->bytes_per_row;
469 		}
470 		lbm_offset += 4;
471 	}
472 
473 	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
474 	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
475 }
476 
477 static void he_init_rx_lbfp1(struct he_dev *he_dev)
478 {
479 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
480 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
481 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
482 	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
483 
484 	lbufd_index = 1;
485 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
486 
487 	he_writel(he_dev, lbufd_index, RLBF1_H);
488 
489 	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
490 		lbufd_index += 2;
491 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
492 
493 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
494 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
495 
496 		if (++lbuf_count == lbufs_per_row) {
497 			lbuf_count = 0;
498 			row_offset += he_dev->bytes_per_row;
499 		}
500 		lbm_offset += 4;
501 	}
502 
503 	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
504 	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
505 }
506 
507 static void he_init_tx_lbfp(struct he_dev *he_dev)
508 {
509 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
510 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
511 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
512 	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
513 
514 	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
515 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
516 
517 	he_writel(he_dev, lbufd_index, TLBF_H);
518 
519 	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
520 		lbufd_index += 1;
521 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
522 
523 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
524 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
525 
526 		if (++lbuf_count == lbufs_per_row) {
527 			lbuf_count = 0;
528 			row_offset += he_dev->bytes_per_row;
529 		}
530 		lbm_offset += 2;
531 	}
532 
533 	he_writel(he_dev, lbufd_index - 1, TLBF_T);
534 }
535 
536 static int he_init_tpdrq(struct he_dev *he_dev)
537 {
538 	he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
539 		CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
540 	if (he_dev->tpdrq_base == NULL) {
541 		hprintk("failed to alloc tpdrq\n");
542 		return -ENOMEM;
543 	}
544 	memset(he_dev->tpdrq_base, 0,
545 				CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
546 
547 	he_dev->tpdrq_tail = he_dev->tpdrq_base;
548 	he_dev->tpdrq_head = he_dev->tpdrq_base;
549 
550 	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
551 	he_writel(he_dev, 0, TPDRQ_T);
552 	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
553 
554 	return 0;
555 }
556 
557 static void he_init_cs_block(struct he_dev *he_dev)
558 {
559 	unsigned clock, rate, delta;
560 	int reg;
561 
562 	/* 5.1.7 cs block initialization */
563 
564 	for (reg = 0; reg < 0x20; ++reg)
565 		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
566 
567 	/* rate grid timer reload values */
568 
569 	clock = he_is622(he_dev) ? 66667000 : 50000000;
570 	rate = he_dev->atm_dev->link_rate;
571 	delta = rate / 16 / 2;
572 
573 	for (reg = 0; reg < 0x10; ++reg) {
574 		/* 2.4 internal transmit function
575 		 *
576 	 	 * we initialize the first row in the rate grid.
577 		 * values are period (in clock cycles) of timer
578 		 */
579 		unsigned period = clock / rate;
580 
581 		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
582 		rate -= delta;
583 	}
584 
585 	if (he_is622(he_dev)) {
586 		/* table 5.2 (4 cells per lbuf) */
587 		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
588 		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
589 		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
590 		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
591 		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
592 
593 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
594 		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
595 		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
596 		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
597 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
598 		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
599 		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
600 
601 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
602 
603 		/* table 5.8 */
604 		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
605 		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
606 		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
607 		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
608 		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
609 		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
610 
611 		/* table 5.9 */
612 		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
613 		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
614 	} else {
615 		/* table 5.1 (4 cells per lbuf) */
616 		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
617 		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
618 		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
619 		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
620 		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
621 
622 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
623 		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
624 		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
625 		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
626 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
627 		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
628 		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
629 
630 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
631 
632 		/* table 5.8 */
633 		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
634 		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
635 		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
636 		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
637 		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
638 		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
639 
640 		/* table 5.9 */
641 		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
642 		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
643 	}
644 
645 	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
646 
647 	for (reg = 0; reg < 0x8; ++reg)
648 		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
649 
650 }
651 
652 static int he_init_cs_block_rcm(struct he_dev *he_dev)
653 {
654 	unsigned (*rategrid)[16][16];
655 	unsigned rate, delta;
656 	int i, j, reg;
657 
658 	unsigned rate_atmf, exp, man;
659 	unsigned long long rate_cps;
660 	int mult, buf, buf_limit = 4;
661 
662 	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
663 	if (!rategrid)
664 		return -ENOMEM;
665 
666 	/* initialize rate grid group table */
667 
668 	for (reg = 0x0; reg < 0xff; ++reg)
669 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
670 
671 	/* initialize rate controller groups */
672 
673 	for (reg = 0x100; reg < 0x1ff; ++reg)
674 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
675 
676 	/* initialize tNrm lookup table */
677 
678 	/* the manual makes reference to a routine in a sample driver
679 	   for proper configuration; fortunately, we only need this
680 	   in order to support abr connection */
681 
682 	/* initialize rate to group table */
683 
684 	rate = he_dev->atm_dev->link_rate;
685 	delta = rate / 32;
686 
687 	/*
688 	 * 2.4 transmit internal functions
689 	 *
690 	 * we construct a copy of the rate grid used by the scheduler
691 	 * in order to construct the rate to group table below
692 	 */
693 
694 	for (j = 0; j < 16; j++) {
695 		(*rategrid)[0][j] = rate;
696 		rate -= delta;
697 	}
698 
699 	for (i = 1; i < 16; i++)
700 		for (j = 0; j < 16; j++)
701 			if (i > 14)
702 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
703 			else
704 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
705 
706 	/*
707 	 * 2.4 transmit internal function
708 	 *
709 	 * this table maps the upper 5 bits of exponent and mantissa
710 	 * of the atm forum representation of the rate into an index
711 	 * on rate grid
712 	 */
713 
714 	rate_atmf = 0;
715 	while (rate_atmf < 0x400) {
716 		man = (rate_atmf & 0x1f) << 4;
717 		exp = rate_atmf >> 5;
718 
719 		/*
720 			instead of '/ 512', use '>> 9' to prevent a call
721 			to divdu3 on x86 platforms
722 		*/
723 		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
724 
725 		if (rate_cps < 10)
726 			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
727 
728 		for (i = 255; i > 0; i--)
729 			if ((*rategrid)[i/16][i%16] >= rate_cps)
730 				break;	 /* pick nearest rate instead? */
731 
732 		/*
733 		 * each table entry is 16 bits: (rate grid index (8 bits)
734 		 * and a buffer limit (8 bits)
735 		 * there are two table entries in each 32-bit register
736 		 */
737 
738 #ifdef notdef
739 		buf = rate_cps * he_dev->tx_numbuffs /
740 				(he_dev->atm_dev->link_rate * 2);
741 #else
742 		/* this is pretty, but avoids _divdu3 and is mostly correct */
743 		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
744 		if (rate_cps > (272 * mult))
745 			buf = 4;
746 		else if (rate_cps > (204 * mult))
747 			buf = 3;
748 		else if (rate_cps > (136 * mult))
749 			buf = 2;
750 		else if (rate_cps > (68 * mult))
751 			buf = 1;
752 		else
753 			buf = 0;
754 #endif
755 		if (buf > buf_limit)
756 			buf = buf_limit;
757 		reg = (reg << 16) | ((i << 8) | buf);
758 
759 #define RTGTBL_OFFSET 0x400
760 
761 		if (rate_atmf & 0x1)
762 			he_writel_rcm(he_dev, reg,
763 				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
764 
765 		++rate_atmf;
766 	}
767 
768 	kfree(rategrid);
769 	return 0;
770 }
771 
772 static int he_init_group(struct he_dev *he_dev, int group)
773 {
774 	struct he_buff *heb, *next;
775 	dma_addr_t mapping;
776 	int i;
777 
778 	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
779 	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
780 	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
781 	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
782 		  G0_RBPS_BS + (group * 32));
783 
784 	/* bitmap table */
785 	he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
786 				     * sizeof(unsigned long), GFP_KERNEL);
787 	if (!he_dev->rbpl_table) {
788 		hprintk("unable to allocate rbpl bitmap table\n");
789 		return -ENOMEM;
790 	}
791 	bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
792 
793 	/* rbpl_virt 64-bit pointers */
794 	he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
795 				    * sizeof(struct he_buff *), GFP_KERNEL);
796 	if (!he_dev->rbpl_virt) {
797 		hprintk("unable to allocate rbpl virt table\n");
798 		goto out_free_rbpl_table;
799 	}
800 
801 	/* large buffer pool */
802 	he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
803 					    CONFIG_RBPL_BUFSIZE, 64, 0);
804 	if (he_dev->rbpl_pool == NULL) {
805 		hprintk("unable to create rbpl pool\n");
806 		goto out_free_rbpl_virt;
807 	}
808 
809 	he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
810 		CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
811 	if (he_dev->rbpl_base == NULL) {
812 		hprintk("failed to alloc rbpl_base\n");
813 		goto out_destroy_rbpl_pool;
814 	}
815 	memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
816 
817 	INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
818 
819 	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
820 
821 		heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
822 		if (!heb)
823 			goto out_free_rbpl;
824 		heb->mapping = mapping;
825 		list_add(&heb->entry, &he_dev->rbpl_outstanding);
826 
827 		set_bit(i, he_dev->rbpl_table);
828 		he_dev->rbpl_virt[i] = heb;
829 		he_dev->rbpl_hint = i + 1;
830 		he_dev->rbpl_base[i].idx =  i << RBP_IDX_OFFSET;
831 		he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
832 	}
833 	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
834 
835 	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
836 	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
837 						G0_RBPL_T + (group * 32));
838 	he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
839 						G0_RBPL_BS + (group * 32));
840 	he_writel(he_dev,
841 			RBP_THRESH(CONFIG_RBPL_THRESH) |
842 			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
843 			RBP_INT_ENB,
844 						G0_RBPL_QI + (group * 32));
845 
846 	/* rx buffer ready queue */
847 
848 	he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
849 		CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
850 	if (he_dev->rbrq_base == NULL) {
851 		hprintk("failed to allocate rbrq\n");
852 		goto out_free_rbpl;
853 	}
854 	memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
855 
856 	he_dev->rbrq_head = he_dev->rbrq_base;
857 	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
858 	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
859 	he_writel(he_dev,
860 		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
861 						G0_RBRQ_Q + (group * 16));
862 	if (irq_coalesce) {
863 		hprintk("coalescing interrupts\n");
864 		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
865 						G0_RBRQ_I + (group * 16));
866 	} else
867 		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
868 						G0_RBRQ_I + (group * 16));
869 
870 	/* tx buffer ready queue */
871 
872 	he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
873 		CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
874 	if (he_dev->tbrq_base == NULL) {
875 		hprintk("failed to allocate tbrq\n");
876 		goto out_free_rbpq_base;
877 	}
878 	memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
879 
880 	he_dev->tbrq_head = he_dev->tbrq_base;
881 
882 	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
883 	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
884 	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
885 	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
886 
887 	return 0;
888 
889 out_free_rbpq_base:
890 	pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
891 			sizeof(struct he_rbrq), he_dev->rbrq_base,
892 			he_dev->rbrq_phys);
893 out_free_rbpl:
894 	list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
895 		pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
896 
897 	pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
898 			sizeof(struct he_rbp), he_dev->rbpl_base,
899 			he_dev->rbpl_phys);
900 out_destroy_rbpl_pool:
901 	pci_pool_destroy(he_dev->rbpl_pool);
902 out_free_rbpl_virt:
903 	kfree(he_dev->rbpl_virt);
904 out_free_rbpl_table:
905 	kfree(he_dev->rbpl_table);
906 
907 	return -ENOMEM;
908 }
909 
910 static int he_init_irq(struct he_dev *he_dev)
911 {
912 	int i;
913 
914 	/* 2.9.3.5  tail offset for each interrupt queue is located after the
915 		    end of the interrupt queue */
916 
917 	he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
918 			(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
919 	if (he_dev->irq_base == NULL) {
920 		hprintk("failed to allocate irq\n");
921 		return -ENOMEM;
922 	}
923 	he_dev->irq_tailoffset = (unsigned *)
924 					&he_dev->irq_base[CONFIG_IRQ_SIZE];
925 	*he_dev->irq_tailoffset = 0;
926 	he_dev->irq_head = he_dev->irq_base;
927 	he_dev->irq_tail = he_dev->irq_base;
928 
929 	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
930 		he_dev->irq_base[i].isw = ITYPE_INVALID;
931 
932 	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
933 	he_writel(he_dev,
934 		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
935 								IRQ0_HEAD);
936 	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
937 	he_writel(he_dev, 0x0, IRQ0_DATA);
938 
939 	he_writel(he_dev, 0x0, IRQ1_BASE);
940 	he_writel(he_dev, 0x0, IRQ1_HEAD);
941 	he_writel(he_dev, 0x0, IRQ1_CNTL);
942 	he_writel(he_dev, 0x0, IRQ1_DATA);
943 
944 	he_writel(he_dev, 0x0, IRQ2_BASE);
945 	he_writel(he_dev, 0x0, IRQ2_HEAD);
946 	he_writel(he_dev, 0x0, IRQ2_CNTL);
947 	he_writel(he_dev, 0x0, IRQ2_DATA);
948 
949 	he_writel(he_dev, 0x0, IRQ3_BASE);
950 	he_writel(he_dev, 0x0, IRQ3_HEAD);
951 	he_writel(he_dev, 0x0, IRQ3_CNTL);
952 	he_writel(he_dev, 0x0, IRQ3_DATA);
953 
954 	/* 2.9.3.2 interrupt queue mapping registers */
955 
956 	he_writel(he_dev, 0x0, GRP_10_MAP);
957 	he_writel(he_dev, 0x0, GRP_32_MAP);
958 	he_writel(he_dev, 0x0, GRP_54_MAP);
959 	he_writel(he_dev, 0x0, GRP_76_MAP);
960 
961 	if (request_irq(he_dev->pci_dev->irq,
962 			he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
963 		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
964 		return -EINVAL;
965 	}
966 
967 	he_dev->irq = he_dev->pci_dev->irq;
968 
969 	return 0;
970 }
971 
972 static int he_start(struct atm_dev *dev)
973 {
974 	struct he_dev *he_dev;
975 	struct pci_dev *pci_dev;
976 	unsigned long membase;
977 
978 	u16 command;
979 	u32 gen_cntl_0, host_cntl, lb_swap;
980 	u8 cache_size, timer;
981 
982 	unsigned err;
983 	unsigned int status, reg;
984 	int i, group;
985 
986 	he_dev = HE_DEV(dev);
987 	pci_dev = he_dev->pci_dev;
988 
989 	membase = pci_resource_start(pci_dev, 0);
990 	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
991 
992 	/*
993 	 * pci bus controller initialization
994 	 */
995 
996 	/* 4.3 pci bus controller-specific initialization */
997 	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
998 		hprintk("can't read GEN_CNTL_0\n");
999 		return -EINVAL;
1000 	}
1001 	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1002 	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1003 		hprintk("can't write GEN_CNTL_0.\n");
1004 		return -EINVAL;
1005 	}
1006 
1007 	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1008 		hprintk("can't read PCI_COMMAND.\n");
1009 		return -EINVAL;
1010 	}
1011 
1012 	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1013 	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1014 		hprintk("can't enable memory.\n");
1015 		return -EINVAL;
1016 	}
1017 
1018 	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1019 		hprintk("can't read cache line size?\n");
1020 		return -EINVAL;
1021 	}
1022 
1023 	if (cache_size < 16) {
1024 		cache_size = 16;
1025 		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1026 			hprintk("can't set cache line size to %d\n", cache_size);
1027 	}
1028 
1029 	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1030 		hprintk("can't read latency timer?\n");
1031 		return -EINVAL;
1032 	}
1033 
1034 	/* from table 3.9
1035 	 *
1036 	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1037 	 *
1038 	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1039 	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1040 	 *
1041 	 */
1042 #define LAT_TIMER 209
1043 	if (timer < LAT_TIMER) {
1044 		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1045 		timer = LAT_TIMER;
1046 		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1047 			hprintk("can't set latency timer to %d\n", timer);
1048 	}
1049 
1050 	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1051 		hprintk("can't set up page mapping\n");
1052 		return -EINVAL;
1053 	}
1054 
1055 	/* 4.4 card reset */
1056 	he_writel(he_dev, 0x0, RESET_CNTL);
1057 	he_writel(he_dev, 0xff, RESET_CNTL);
1058 
1059 	udelay(16*1000);	/* 16 ms */
1060 	status = he_readl(he_dev, RESET_CNTL);
1061 	if ((status & BOARD_RST_STATUS) == 0) {
1062 		hprintk("reset failed\n");
1063 		return -EINVAL;
1064 	}
1065 
1066 	/* 4.5 set bus width */
1067 	host_cntl = he_readl(he_dev, HOST_CNTL);
1068 	if (host_cntl & PCI_BUS_SIZE64)
1069 		gen_cntl_0 |= ENBL_64;
1070 	else
1071 		gen_cntl_0 &= ~ENBL_64;
1072 
1073 	if (disable64 == 1) {
1074 		hprintk("disabling 64-bit pci bus transfers\n");
1075 		gen_cntl_0 &= ~ENBL_64;
1076 	}
1077 
1078 	if (gen_cntl_0 & ENBL_64)
1079 		hprintk("64-bit transfers enabled\n");
1080 
1081 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1082 
1083 	/* 4.7 read prom contents */
1084 	for (i = 0; i < PROD_ID_LEN; ++i)
1085 		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1086 
1087 	he_dev->media = read_prom_byte(he_dev, MEDIA);
1088 
1089 	for (i = 0; i < 6; ++i)
1090 		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1091 
1092 	hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1093 				he_dev->prod_id,
1094 					he_dev->media & 0x40 ? "SM" : "MM",
1095 						dev->esi[0],
1096 						dev->esi[1],
1097 						dev->esi[2],
1098 						dev->esi[3],
1099 						dev->esi[4],
1100 						dev->esi[5]);
1101 	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1102 						ATM_OC12_PCR : ATM_OC3_PCR;
1103 
1104 	/* 4.6 set host endianess */
1105 	lb_swap = he_readl(he_dev, LB_SWAP);
1106 	if (he_is622(he_dev))
1107 		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1108 	else
1109 		lb_swap |= XFER_SIZE;		/* 8 cells */
1110 #ifdef __BIG_ENDIAN
1111 	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1112 #else
1113 	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1114 			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1115 #endif /* __BIG_ENDIAN */
1116 	he_writel(he_dev, lb_swap, LB_SWAP);
1117 
1118 	/* 4.8 sdram controller initialization */
1119 	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1120 
1121 	/* 4.9 initialize rnum value */
1122 	lb_swap |= SWAP_RNUM_MAX(0xf);
1123 	he_writel(he_dev, lb_swap, LB_SWAP);
1124 
1125 	/* 4.10 initialize the interrupt queues */
1126 	if ((err = he_init_irq(he_dev)) != 0)
1127 		return err;
1128 
1129 	/* 4.11 enable pci bus controller state machines */
1130 	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1131 				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1132 	he_writel(he_dev, host_cntl, HOST_CNTL);
1133 
1134 	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1135 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1136 
1137 	/*
1138 	 * atm network controller initialization
1139 	 */
1140 
1141 	/* 5.1.1 generic configuration state */
1142 
1143 	/*
1144 	 *		local (cell) buffer memory map
1145 	 *
1146 	 *             HE155                          HE622
1147 	 *
1148 	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1149 	 *         |            |            |                   |   |
1150 	 *         |  utility   |            |        rx0        |   |
1151 	 *        5|____________|         255|___________________| u |
1152 	 *        6|            |         256|                   | t |
1153 	 *         |            |            |                   | i |
1154 	 *         |    rx0     |     row    |        tx         | l |
1155 	 *         |            |            |                   | i |
1156 	 *         |            |         767|___________________| t |
1157 	 *      517|____________|         768|                   | y |
1158 	 * row  518|            |            |        rx1        |   |
1159 	 *         |            |        1023|___________________|___|
1160 	 *         |            |
1161 	 *         |    tx      |
1162 	 *         |            |
1163 	 *         |            |
1164 	 *     1535|____________|
1165 	 *     1536|            |
1166 	 *         |    rx1     |
1167 	 *     2047|____________|
1168 	 *
1169 	 */
1170 
1171 	/* total 4096 connections */
1172 	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1173 	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1174 
1175 	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1176 		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1177 		return -ENODEV;
1178 	}
1179 
1180 	if (nvpibits != -1) {
1181 		he_dev->vpibits = nvpibits;
1182 		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1183 	}
1184 
1185 	if (nvcibits != -1) {
1186 		he_dev->vcibits = nvcibits;
1187 		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1188 	}
1189 
1190 
1191 	if (he_is622(he_dev)) {
1192 		he_dev->cells_per_row = 40;
1193 		he_dev->bytes_per_row = 2048;
1194 		he_dev->r0_numrows = 256;
1195 		he_dev->tx_numrows = 512;
1196 		he_dev->r1_numrows = 256;
1197 		he_dev->r0_startrow = 0;
1198 		he_dev->tx_startrow = 256;
1199 		he_dev->r1_startrow = 768;
1200 	} else {
1201 		he_dev->cells_per_row = 20;
1202 		he_dev->bytes_per_row = 1024;
1203 		he_dev->r0_numrows = 512;
1204 		he_dev->tx_numrows = 1018;
1205 		he_dev->r1_numrows = 512;
1206 		he_dev->r0_startrow = 6;
1207 		he_dev->tx_startrow = 518;
1208 		he_dev->r1_startrow = 1536;
1209 	}
1210 
1211 	he_dev->cells_per_lbuf = 4;
1212 	he_dev->buffer_limit = 4;
1213 	he_dev->r0_numbuffs = he_dev->r0_numrows *
1214 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1215 	if (he_dev->r0_numbuffs > 2560)
1216 		he_dev->r0_numbuffs = 2560;
1217 
1218 	he_dev->r1_numbuffs = he_dev->r1_numrows *
1219 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1220 	if (he_dev->r1_numbuffs > 2560)
1221 		he_dev->r1_numbuffs = 2560;
1222 
1223 	he_dev->tx_numbuffs = he_dev->tx_numrows *
1224 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1225 	if (he_dev->tx_numbuffs > 5120)
1226 		he_dev->tx_numbuffs = 5120;
1227 
1228 	/* 5.1.2 configure hardware dependent registers */
1229 
1230 	he_writel(he_dev,
1231 		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1232 		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1233 		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1234 		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1235 								LBARB);
1236 
1237 	he_writel(he_dev, BANK_ON |
1238 		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1239 								SDRAMCON);
1240 
1241 	he_writel(he_dev,
1242 		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1243 						RM_RW_WAIT(1), RCMCONFIG);
1244 	he_writel(he_dev,
1245 		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1246 						TM_RW_WAIT(1), TCMCONFIG);
1247 
1248 	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1249 
1250 	he_writel(he_dev,
1251 		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1252 		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1253 		RX_VALVP(he_dev->vpibits) |
1254 		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1255 
1256 	he_writel(he_dev, DRF_THRESH(0x20) |
1257 		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1258 		TX_VCI_MASK(he_dev->vcibits) |
1259 		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1260 
1261 	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1262 
1263 	he_writel(he_dev, PHY_INT_ENB |
1264 		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1265 								RH_CONFIG);
1266 
1267 	/* 5.1.3 initialize connection memory */
1268 
1269 	for (i = 0; i < TCM_MEM_SIZE; ++i)
1270 		he_writel_tcm(he_dev, 0, i);
1271 
1272 	for (i = 0; i < RCM_MEM_SIZE; ++i)
1273 		he_writel_rcm(he_dev, 0, i);
1274 
1275 	/*
1276 	 *	transmit connection memory map
1277 	 *
1278 	 *                  tx memory
1279 	 *          0x0 ___________________
1280 	 *             |                   |
1281 	 *             |                   |
1282 	 *             |       TSRa        |
1283 	 *             |                   |
1284 	 *             |                   |
1285 	 *       0x8000|___________________|
1286 	 *             |                   |
1287 	 *             |       TSRb        |
1288 	 *       0xc000|___________________|
1289 	 *             |                   |
1290 	 *             |       TSRc        |
1291 	 *       0xe000|___________________|
1292 	 *             |       TSRd        |
1293 	 *       0xf000|___________________|
1294 	 *             |       tmABR       |
1295 	 *      0x10000|___________________|
1296 	 *             |                   |
1297 	 *             |       tmTPD       |
1298 	 *             |___________________|
1299 	 *             |                   |
1300 	 *                      ....
1301 	 *      0x1ffff|___________________|
1302 	 *
1303 	 *
1304 	 */
1305 
1306 	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1307 	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1308 	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1309 	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1310 	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1311 
1312 
1313 	/*
1314 	 *	receive connection memory map
1315 	 *
1316 	 *          0x0 ___________________
1317 	 *             |                   |
1318 	 *             |                   |
1319 	 *             |       RSRa        |
1320 	 *             |                   |
1321 	 *             |                   |
1322 	 *       0x8000|___________________|
1323 	 *             |                   |
1324 	 *             |             rx0/1 |
1325 	 *             |       LBM         |   link lists of local
1326 	 *             |             tx    |   buffer memory
1327 	 *             |                   |
1328 	 *       0xd000|___________________|
1329 	 *             |                   |
1330 	 *             |      rmABR        |
1331 	 *       0xe000|___________________|
1332 	 *             |                   |
1333 	 *             |       RSRb        |
1334 	 *             |___________________|
1335 	 *             |                   |
1336 	 *                      ....
1337 	 *       0xffff|___________________|
1338 	 */
1339 
1340 	he_writel(he_dev, 0x08000, RCMLBM_BA);
1341 	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1342 	he_writel(he_dev, 0x0d800, RCMABR_BA);
1343 
1344 	/* 5.1.4 initialize local buffer free pools linked lists */
1345 
1346 	he_init_rx_lbfp0(he_dev);
1347 	he_init_rx_lbfp1(he_dev);
1348 
1349 	he_writel(he_dev, 0x0, RLBC_H);
1350 	he_writel(he_dev, 0x0, RLBC_T);
1351 	he_writel(he_dev, 0x0, RLBC_H2);
1352 
1353 	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1354 	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1355 
1356 	he_init_tx_lbfp(he_dev);
1357 
1358 	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1359 
1360 	/* 5.1.5 initialize intermediate receive queues */
1361 
1362 	if (he_is622(he_dev)) {
1363 		he_writel(he_dev, 0x000f, G0_INMQ_S);
1364 		he_writel(he_dev, 0x200f, G0_INMQ_L);
1365 
1366 		he_writel(he_dev, 0x001f, G1_INMQ_S);
1367 		he_writel(he_dev, 0x201f, G1_INMQ_L);
1368 
1369 		he_writel(he_dev, 0x002f, G2_INMQ_S);
1370 		he_writel(he_dev, 0x202f, G2_INMQ_L);
1371 
1372 		he_writel(he_dev, 0x003f, G3_INMQ_S);
1373 		he_writel(he_dev, 0x203f, G3_INMQ_L);
1374 
1375 		he_writel(he_dev, 0x004f, G4_INMQ_S);
1376 		he_writel(he_dev, 0x204f, G4_INMQ_L);
1377 
1378 		he_writel(he_dev, 0x005f, G5_INMQ_S);
1379 		he_writel(he_dev, 0x205f, G5_INMQ_L);
1380 
1381 		he_writel(he_dev, 0x006f, G6_INMQ_S);
1382 		he_writel(he_dev, 0x206f, G6_INMQ_L);
1383 
1384 		he_writel(he_dev, 0x007f, G7_INMQ_S);
1385 		he_writel(he_dev, 0x207f, G7_INMQ_L);
1386 	} else {
1387 		he_writel(he_dev, 0x0000, G0_INMQ_S);
1388 		he_writel(he_dev, 0x0008, G0_INMQ_L);
1389 
1390 		he_writel(he_dev, 0x0001, G1_INMQ_S);
1391 		he_writel(he_dev, 0x0009, G1_INMQ_L);
1392 
1393 		he_writel(he_dev, 0x0002, G2_INMQ_S);
1394 		he_writel(he_dev, 0x000a, G2_INMQ_L);
1395 
1396 		he_writel(he_dev, 0x0003, G3_INMQ_S);
1397 		he_writel(he_dev, 0x000b, G3_INMQ_L);
1398 
1399 		he_writel(he_dev, 0x0004, G4_INMQ_S);
1400 		he_writel(he_dev, 0x000c, G4_INMQ_L);
1401 
1402 		he_writel(he_dev, 0x0005, G5_INMQ_S);
1403 		he_writel(he_dev, 0x000d, G5_INMQ_L);
1404 
1405 		he_writel(he_dev, 0x0006, G6_INMQ_S);
1406 		he_writel(he_dev, 0x000e, G6_INMQ_L);
1407 
1408 		he_writel(he_dev, 0x0007, G7_INMQ_S);
1409 		he_writel(he_dev, 0x000f, G7_INMQ_L);
1410 	}
1411 
1412 	/* 5.1.6 application tunable parameters */
1413 
1414 	he_writel(he_dev, 0x0, MCC);
1415 	he_writel(he_dev, 0x0, OEC);
1416 	he_writel(he_dev, 0x0, DCC);
1417 	he_writel(he_dev, 0x0, CEC);
1418 
1419 	/* 5.1.7 cs block initialization */
1420 
1421 	he_init_cs_block(he_dev);
1422 
1423 	/* 5.1.8 cs block connection memory initialization */
1424 
1425 	if (he_init_cs_block_rcm(he_dev) < 0)
1426 		return -ENOMEM;
1427 
1428 	/* 5.1.10 initialize host structures */
1429 
1430 	he_init_tpdrq(he_dev);
1431 
1432 	he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1433 		sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1434 	if (he_dev->tpd_pool == NULL) {
1435 		hprintk("unable to create tpd pci_pool\n");
1436 		return -ENOMEM;
1437 	}
1438 
1439 	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1440 
1441 	if (he_init_group(he_dev, 0) != 0)
1442 		return -ENOMEM;
1443 
1444 	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1445 		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1446 		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1447 		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1448 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1449 						G0_RBPS_BS + (group * 32));
1450 
1451 		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1452 		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1453 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1454 						G0_RBPL_QI + (group * 32));
1455 		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1456 
1457 		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1458 		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1459 		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1460 						G0_RBRQ_Q + (group * 16));
1461 		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1462 
1463 		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1464 		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1465 		he_writel(he_dev, TBRQ_THRESH(0x1),
1466 						G0_TBRQ_THRESH + (group * 16));
1467 		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1468 	}
1469 
1470 	/* host status page */
1471 
1472 	he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1473 				sizeof(struct he_hsp), &he_dev->hsp_phys);
1474 	if (he_dev->hsp == NULL) {
1475 		hprintk("failed to allocate host status page\n");
1476 		return -ENOMEM;
1477 	}
1478 	memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1479 	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1480 
1481 	/* initialize framer */
1482 
1483 #ifdef CONFIG_ATM_HE_USE_SUNI
1484 	if (he_isMM(he_dev))
1485 		suni_init(he_dev->atm_dev);
1486 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1487 		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1488 #endif /* CONFIG_ATM_HE_USE_SUNI */
1489 
1490 	if (sdh) {
1491 		/* this really should be in suni.c but for now... */
1492 		int val;
1493 
1494 		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1495 		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1496 		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1497 		he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1498 	}
1499 
1500 	/* 5.1.12 enable transmit and receive */
1501 
1502 	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1503 	reg |= TX_ENABLE|ER_ENABLE;
1504 	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1505 
1506 	reg = he_readl(he_dev, RC_CONFIG);
1507 	reg |= RX_ENABLE;
1508 	he_writel(he_dev, reg, RC_CONFIG);
1509 
1510 	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1511 		he_dev->cs_stper[i].inuse = 0;
1512 		he_dev->cs_stper[i].pcr = -1;
1513 	}
1514 	he_dev->total_bw = 0;
1515 
1516 
1517 	/* atm linux initialization */
1518 
1519 	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1520 	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1521 
1522 	he_dev->irq_peak = 0;
1523 	he_dev->rbrq_peak = 0;
1524 	he_dev->rbpl_peak = 0;
1525 	he_dev->tbrq_peak = 0;
1526 
1527 	HPRINTK("hell bent for leather!\n");
1528 
1529 	return 0;
1530 }
1531 
1532 static void
1533 he_stop(struct he_dev *he_dev)
1534 {
1535 	struct he_buff *heb, *next;
1536 	struct pci_dev *pci_dev;
1537 	u32 gen_cntl_0, reg;
1538 	u16 command;
1539 
1540 	pci_dev = he_dev->pci_dev;
1541 
1542 	/* disable interrupts */
1543 
1544 	if (he_dev->membase) {
1545 		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1546 		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1547 		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1548 
1549 		tasklet_disable(&he_dev->tasklet);
1550 
1551 		/* disable recv and transmit */
1552 
1553 		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1554 		reg &= ~(TX_ENABLE|ER_ENABLE);
1555 		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1556 
1557 		reg = he_readl(he_dev, RC_CONFIG);
1558 		reg &= ~(RX_ENABLE);
1559 		he_writel(he_dev, reg, RC_CONFIG);
1560 	}
1561 
1562 #ifdef CONFIG_ATM_HE_USE_SUNI
1563 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1564 		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1565 #endif /* CONFIG_ATM_HE_USE_SUNI */
1566 
1567 	if (he_dev->irq)
1568 		free_irq(he_dev->irq, he_dev);
1569 
1570 	if (he_dev->irq_base)
1571 		pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1572 			* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1573 
1574 	if (he_dev->hsp)
1575 		pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1576 						he_dev->hsp, he_dev->hsp_phys);
1577 
1578 	if (he_dev->rbpl_base) {
1579 		list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1580 			pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1581 
1582 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1583 			* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1584 	}
1585 
1586 	kfree(he_dev->rbpl_virt);
1587 	kfree(he_dev->rbpl_table);
1588 
1589 	if (he_dev->rbpl_pool)
1590 		pci_pool_destroy(he_dev->rbpl_pool);
1591 
1592 	if (he_dev->rbrq_base)
1593 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1594 							he_dev->rbrq_base, he_dev->rbrq_phys);
1595 
1596 	if (he_dev->tbrq_base)
1597 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1598 							he_dev->tbrq_base, he_dev->tbrq_phys);
1599 
1600 	if (he_dev->tpdrq_base)
1601 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1602 							he_dev->tpdrq_base, he_dev->tpdrq_phys);
1603 
1604 	if (he_dev->tpd_pool)
1605 		pci_pool_destroy(he_dev->tpd_pool);
1606 
1607 	if (he_dev->pci_dev) {
1608 		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1609 		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1610 		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1611 	}
1612 
1613 	if (he_dev->membase)
1614 		iounmap(he_dev->membase);
1615 }
1616 
1617 static struct he_tpd *
1618 __alloc_tpd(struct he_dev *he_dev)
1619 {
1620 	struct he_tpd *tpd;
1621 	dma_addr_t mapping;
1622 
1623 	tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1624 	if (tpd == NULL)
1625 		return NULL;
1626 
1627 	tpd->status = TPD_ADDR(mapping);
1628 	tpd->reserved = 0;
1629 	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1630 	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1631 	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1632 
1633 	return tpd;
1634 }
1635 
1636 #define AAL5_LEN(buf,len) 						\
1637 			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1638 				(((unsigned char *)(buf))[(len)-5]))
1639 
1640 /* 2.10.1.2 receive
1641  *
1642  * aal5 packets can optionally return the tcp checksum in the lower
1643  * 16 bits of the crc (RSR0_TCP_CKSUM)
1644  */
1645 
1646 #define TCP_CKSUM(buf,len) 						\
1647 			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1648 				(((unsigned char *)(buf))[(len-1)]))
1649 
1650 static int
1651 he_service_rbrq(struct he_dev *he_dev, int group)
1652 {
1653 	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1654 				((unsigned long)he_dev->rbrq_base |
1655 					he_dev->hsp->group[group].rbrq_tail);
1656 	unsigned cid, lastcid = -1;
1657 	struct sk_buff *skb;
1658 	struct atm_vcc *vcc = NULL;
1659 	struct he_vcc *he_vcc;
1660 	struct he_buff *heb, *next;
1661 	int i;
1662 	int pdus_assembled = 0;
1663 	int updated = 0;
1664 
1665 	read_lock(&vcc_sklist_lock);
1666 	while (he_dev->rbrq_head != rbrq_tail) {
1667 		++updated;
1668 
1669 		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1670 			he_dev->rbrq_head, group,
1671 			RBRQ_ADDR(he_dev->rbrq_head),
1672 			RBRQ_BUFLEN(he_dev->rbrq_head),
1673 			RBRQ_CID(he_dev->rbrq_head),
1674 			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1675 			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1676 			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1677 			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1678 			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1679 			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1680 
1681 		i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1682 		heb = he_dev->rbpl_virt[i];
1683 
1684 		cid = RBRQ_CID(he_dev->rbrq_head);
1685 		if (cid != lastcid)
1686 			vcc = __find_vcc(he_dev, cid);
1687 		lastcid = cid;
1688 
1689 		if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1690 			hprintk("vcc/he_vcc == NULL  (cid 0x%x)\n", cid);
1691 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1692 				clear_bit(i, he_dev->rbpl_table);
1693 				list_del(&heb->entry);
1694 				pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1695 			}
1696 
1697 			goto next_rbrq_entry;
1698 		}
1699 
1700 		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1701 			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1702 				atomic_inc(&vcc->stats->rx_drop);
1703 			goto return_host_buffers;
1704 		}
1705 
1706 		heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1707 		clear_bit(i, he_dev->rbpl_table);
1708 		list_move_tail(&heb->entry, &he_vcc->buffers);
1709 		he_vcc->pdu_len += heb->len;
1710 
1711 		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1712 			lastcid = -1;
1713 			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1714 			wake_up(&he_vcc->rx_waitq);
1715 			goto return_host_buffers;
1716 		}
1717 
1718 		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1719 			goto next_rbrq_entry;
1720 
1721 		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1722 				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1723 			HPRINTK("%s%s (%d.%d)\n",
1724 				RBRQ_CRC_ERR(he_dev->rbrq_head)
1725 							? "CRC_ERR " : "",
1726 				RBRQ_LEN_ERR(he_dev->rbrq_head)
1727 							? "LEN_ERR" : "",
1728 							vcc->vpi, vcc->vci);
1729 			atomic_inc(&vcc->stats->rx_err);
1730 			goto return_host_buffers;
1731 		}
1732 
1733 		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1734 							GFP_ATOMIC);
1735 		if (!skb) {
1736 			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1737 			goto return_host_buffers;
1738 		}
1739 
1740 		if (rx_skb_reserve > 0)
1741 			skb_reserve(skb, rx_skb_reserve);
1742 
1743 		__net_timestamp(skb);
1744 
1745 		list_for_each_entry(heb, &he_vcc->buffers, entry)
1746 			memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1747 
1748 		switch (vcc->qos.aal) {
1749 			case ATM_AAL0:
1750 				/* 2.10.1.5 raw cell receive */
1751 				skb->len = ATM_AAL0_SDU;
1752 				skb_set_tail_pointer(skb, skb->len);
1753 				break;
1754 			case ATM_AAL5:
1755 				/* 2.10.1.2 aal5 receive */
1756 
1757 				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1758 				skb_set_tail_pointer(skb, skb->len);
1759 #ifdef USE_CHECKSUM_HW
1760 				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1761 					skb->ip_summed = CHECKSUM_COMPLETE;
1762 					skb->csum = TCP_CKSUM(skb->data,
1763 							he_vcc->pdu_len);
1764 				}
1765 #endif
1766 				break;
1767 		}
1768 
1769 #ifdef should_never_happen
1770 		if (skb->len > vcc->qos.rxtp.max_sdu)
1771 			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1772 #endif
1773 
1774 #ifdef notdef
1775 		ATM_SKB(skb)->vcc = vcc;
1776 #endif
1777 		spin_unlock(&he_dev->global_lock);
1778 		vcc->push(vcc, skb);
1779 		spin_lock(&he_dev->global_lock);
1780 
1781 		atomic_inc(&vcc->stats->rx);
1782 
1783 return_host_buffers:
1784 		++pdus_assembled;
1785 
1786 		list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1787 			pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1788 		INIT_LIST_HEAD(&he_vcc->buffers);
1789 		he_vcc->pdu_len = 0;
1790 
1791 next_rbrq_entry:
1792 		he_dev->rbrq_head = (struct he_rbrq *)
1793 				((unsigned long) he_dev->rbrq_base |
1794 					RBRQ_MASK(he_dev->rbrq_head + 1));
1795 
1796 	}
1797 	read_unlock(&vcc_sklist_lock);
1798 
1799 	if (updated) {
1800 		if (updated > he_dev->rbrq_peak)
1801 			he_dev->rbrq_peak = updated;
1802 
1803 		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1804 						G0_RBRQ_H + (group * 16));
1805 	}
1806 
1807 	return pdus_assembled;
1808 }
1809 
1810 static void
1811 he_service_tbrq(struct he_dev *he_dev, int group)
1812 {
1813 	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1814 				((unsigned long)he_dev->tbrq_base |
1815 					he_dev->hsp->group[group].tbrq_tail);
1816 	struct he_tpd *tpd;
1817 	int slot, updated = 0;
1818 	struct he_tpd *__tpd;
1819 
1820 	/* 2.1.6 transmit buffer return queue */
1821 
1822 	while (he_dev->tbrq_head != tbrq_tail) {
1823 		++updated;
1824 
1825 		HPRINTK("tbrq%d 0x%x%s%s\n",
1826 			group,
1827 			TBRQ_TPD(he_dev->tbrq_head),
1828 			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1829 			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1830 		tpd = NULL;
1831 		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1832 			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1833 				tpd = __tpd;
1834 				list_del(&__tpd->entry);
1835 				break;
1836 			}
1837 		}
1838 
1839 		if (tpd == NULL) {
1840 			hprintk("unable to locate tpd for dma buffer %x\n",
1841 						TBRQ_TPD(he_dev->tbrq_head));
1842 			goto next_tbrq_entry;
1843 		}
1844 
1845 		if (TBRQ_EOS(he_dev->tbrq_head)) {
1846 			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1847 				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1848 			if (tpd->vcc)
1849 				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1850 
1851 			goto next_tbrq_entry;
1852 		}
1853 
1854 		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1855 			if (tpd->iovec[slot].addr)
1856 				pci_unmap_single(he_dev->pci_dev,
1857 					tpd->iovec[slot].addr,
1858 					tpd->iovec[slot].len & TPD_LEN_MASK,
1859 							PCI_DMA_TODEVICE);
1860 			if (tpd->iovec[slot].len & TPD_LST)
1861 				break;
1862 
1863 		}
1864 
1865 		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1866 			if (tpd->vcc && tpd->vcc->pop)
1867 				tpd->vcc->pop(tpd->vcc, tpd->skb);
1868 			else
1869 				dev_kfree_skb_any(tpd->skb);
1870 		}
1871 
1872 next_tbrq_entry:
1873 		if (tpd)
1874 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1875 		he_dev->tbrq_head = (struct he_tbrq *)
1876 				((unsigned long) he_dev->tbrq_base |
1877 					TBRQ_MASK(he_dev->tbrq_head + 1));
1878 	}
1879 
1880 	if (updated) {
1881 		if (updated > he_dev->tbrq_peak)
1882 			he_dev->tbrq_peak = updated;
1883 
1884 		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1885 						G0_TBRQ_H + (group * 16));
1886 	}
1887 }
1888 
1889 static void
1890 he_service_rbpl(struct he_dev *he_dev, int group)
1891 {
1892 	struct he_rbp *new_tail;
1893 	struct he_rbp *rbpl_head;
1894 	struct he_buff *heb;
1895 	dma_addr_t mapping;
1896 	int i;
1897 	int moved = 0;
1898 
1899 	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1900 					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1901 
1902 	for (;;) {
1903 		new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1904 						RBPL_MASK(he_dev->rbpl_tail+1));
1905 
1906 		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1907 		if (new_tail == rbpl_head)
1908 			break;
1909 
1910 		i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1911 		if (i > (RBPL_TABLE_SIZE - 1)) {
1912 			i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1913 			if (i > (RBPL_TABLE_SIZE - 1))
1914 				break;
1915 		}
1916 		he_dev->rbpl_hint = i + 1;
1917 
1918 		heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1919 		if (!heb)
1920 			break;
1921 		heb->mapping = mapping;
1922 		list_add(&heb->entry, &he_dev->rbpl_outstanding);
1923 		he_dev->rbpl_virt[i] = heb;
1924 		set_bit(i, he_dev->rbpl_table);
1925 		new_tail->idx = i << RBP_IDX_OFFSET;
1926 		new_tail->phys = mapping + offsetof(struct he_buff, data);
1927 
1928 		he_dev->rbpl_tail = new_tail;
1929 		++moved;
1930 	}
1931 
1932 	if (moved)
1933 		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1934 }
1935 
1936 static void
1937 he_tasklet(unsigned long data)
1938 {
1939 	unsigned long flags;
1940 	struct he_dev *he_dev = (struct he_dev *) data;
1941 	int group, type;
1942 	int updated = 0;
1943 
1944 	HPRINTK("tasklet (0x%lx)\n", data);
1945 	spin_lock_irqsave(&he_dev->global_lock, flags);
1946 
1947 	while (he_dev->irq_head != he_dev->irq_tail) {
1948 		++updated;
1949 
1950 		type = ITYPE_TYPE(he_dev->irq_head->isw);
1951 		group = ITYPE_GROUP(he_dev->irq_head->isw);
1952 
1953 		switch (type) {
1954 			case ITYPE_RBRQ_THRESH:
1955 				HPRINTK("rbrq%d threshold\n", group);
1956 				/* fall through */
1957 			case ITYPE_RBRQ_TIMER:
1958 				if (he_service_rbrq(he_dev, group))
1959 					he_service_rbpl(he_dev, group);
1960 				break;
1961 			case ITYPE_TBRQ_THRESH:
1962 				HPRINTK("tbrq%d threshold\n", group);
1963 				/* fall through */
1964 			case ITYPE_TPD_COMPLETE:
1965 				he_service_tbrq(he_dev, group);
1966 				break;
1967 			case ITYPE_RBPL_THRESH:
1968 				he_service_rbpl(he_dev, group);
1969 				break;
1970 			case ITYPE_RBPS_THRESH:
1971 				/* shouldn't happen unless small buffers enabled */
1972 				break;
1973 			case ITYPE_PHY:
1974 				HPRINTK("phy interrupt\n");
1975 #ifdef CONFIG_ATM_HE_USE_SUNI
1976 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
1977 				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1978 					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1979 				spin_lock_irqsave(&he_dev->global_lock, flags);
1980 #endif
1981 				break;
1982 			case ITYPE_OTHER:
1983 				switch (type|group) {
1984 					case ITYPE_PARITY:
1985 						hprintk("parity error\n");
1986 						break;
1987 					case ITYPE_ABORT:
1988 						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1989 						break;
1990 				}
1991 				break;
1992 			case ITYPE_TYPE(ITYPE_INVALID):
1993 				/* see 8.1.1 -- check all queues */
1994 
1995 				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1996 
1997 				he_service_rbrq(he_dev, 0);
1998 				he_service_rbpl(he_dev, 0);
1999 				he_service_tbrq(he_dev, 0);
2000 				break;
2001 			default:
2002 				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2003 		}
2004 
2005 		he_dev->irq_head->isw = ITYPE_INVALID;
2006 
2007 		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2008 	}
2009 
2010 	if (updated) {
2011 		if (updated > he_dev->irq_peak)
2012 			he_dev->irq_peak = updated;
2013 
2014 		he_writel(he_dev,
2015 			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2016 			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2017 			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2018 		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2019 	}
2020 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2021 }
2022 
2023 static irqreturn_t
2024 he_irq_handler(int irq, void *dev_id)
2025 {
2026 	unsigned long flags;
2027 	struct he_dev *he_dev = (struct he_dev * )dev_id;
2028 	int handled = 0;
2029 
2030 	if (he_dev == NULL)
2031 		return IRQ_NONE;
2032 
2033 	spin_lock_irqsave(&he_dev->global_lock, flags);
2034 
2035 	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2036 						(*he_dev->irq_tailoffset << 2));
2037 
2038 	if (he_dev->irq_tail == he_dev->irq_head) {
2039 		HPRINTK("tailoffset not updated?\n");
2040 		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2041 			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2042 		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2043 	}
2044 
2045 #ifdef DEBUG
2046 	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2047 		hprintk("spurious (or shared) interrupt?\n");
2048 #endif
2049 
2050 	if (he_dev->irq_head != he_dev->irq_tail) {
2051 		handled = 1;
2052 		tasklet_schedule(&he_dev->tasklet);
2053 		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2054 		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2055 	}
2056 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2057 	return IRQ_RETVAL(handled);
2058 
2059 }
2060 
2061 static __inline__ void
2062 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2063 {
2064 	struct he_tpdrq *new_tail;
2065 
2066 	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2067 					tpd, cid, he_dev->tpdrq_tail);
2068 
2069 	/* new_tail = he_dev->tpdrq_tail; */
2070 	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2071 					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2072 
2073 	/*
2074 	 * check to see if we are about to set the tail == head
2075 	 * if true, update the head pointer from the adapter
2076 	 * to see if this is really the case (reading the queue
2077 	 * head for every enqueue would be unnecessarily slow)
2078 	 */
2079 
2080 	if (new_tail == he_dev->tpdrq_head) {
2081 		he_dev->tpdrq_head = (struct he_tpdrq *)
2082 			(((unsigned long)he_dev->tpdrq_base) |
2083 				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2084 
2085 		if (new_tail == he_dev->tpdrq_head) {
2086 			int slot;
2087 
2088 			hprintk("tpdrq full (cid 0x%x)\n", cid);
2089 			/*
2090 			 * FIXME
2091 			 * push tpd onto a transmit backlog queue
2092 			 * after service_tbrq, service the backlog
2093 			 * for now, we just drop the pdu
2094 			 */
2095 			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2096 				if (tpd->iovec[slot].addr)
2097 					pci_unmap_single(he_dev->pci_dev,
2098 						tpd->iovec[slot].addr,
2099 						tpd->iovec[slot].len & TPD_LEN_MASK,
2100 								PCI_DMA_TODEVICE);
2101 			}
2102 			if (tpd->skb) {
2103 				if (tpd->vcc->pop)
2104 					tpd->vcc->pop(tpd->vcc, tpd->skb);
2105 				else
2106 					dev_kfree_skb_any(tpd->skb);
2107 				atomic_inc(&tpd->vcc->stats->tx_err);
2108 			}
2109 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2110 			return;
2111 		}
2112 	}
2113 
2114 	/* 2.1.5 transmit packet descriptor ready queue */
2115 	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2116 	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2117 	he_dev->tpdrq_tail->cid = cid;
2118 	wmb();
2119 
2120 	he_dev->tpdrq_tail = new_tail;
2121 
2122 	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2123 	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2124 }
2125 
2126 static int
2127 he_open(struct atm_vcc *vcc)
2128 {
2129 	unsigned long flags;
2130 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2131 	struct he_vcc *he_vcc;
2132 	int err = 0;
2133 	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2134 	short vpi = vcc->vpi;
2135 	int vci = vcc->vci;
2136 
2137 	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2138 		return 0;
2139 
2140 	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2141 
2142 	set_bit(ATM_VF_ADDR, &vcc->flags);
2143 
2144 	cid = he_mkcid(he_dev, vpi, vci);
2145 
2146 	he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2147 	if (he_vcc == NULL) {
2148 		hprintk("unable to allocate he_vcc during open\n");
2149 		return -ENOMEM;
2150 	}
2151 
2152 	INIT_LIST_HEAD(&he_vcc->buffers);
2153 	he_vcc->pdu_len = 0;
2154 	he_vcc->rc_index = -1;
2155 
2156 	init_waitqueue_head(&he_vcc->rx_waitq);
2157 	init_waitqueue_head(&he_vcc->tx_waitq);
2158 
2159 	vcc->dev_data = he_vcc;
2160 
2161 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2162 		int pcr_goal;
2163 
2164 		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2165 		if (pcr_goal == 0)
2166 			pcr_goal = he_dev->atm_dev->link_rate;
2167 		if (pcr_goal < 0)	/* means round down, technically */
2168 			pcr_goal = -pcr_goal;
2169 
2170 		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2171 
2172 		switch (vcc->qos.aal) {
2173 			case ATM_AAL5:
2174 				tsr0_aal = TSR0_AAL5;
2175 				tsr4 = TSR4_AAL5;
2176 				break;
2177 			case ATM_AAL0:
2178 				tsr0_aal = TSR0_AAL0_SDU;
2179 				tsr4 = TSR4_AAL0_SDU;
2180 				break;
2181 			default:
2182 				err = -EINVAL;
2183 				goto open_failed;
2184 		}
2185 
2186 		spin_lock_irqsave(&he_dev->global_lock, flags);
2187 		tsr0 = he_readl_tsr0(he_dev, cid);
2188 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2189 
2190 		if (TSR0_CONN_STATE(tsr0) != 0) {
2191 			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2192 			err = -EBUSY;
2193 			goto open_failed;
2194 		}
2195 
2196 		switch (vcc->qos.txtp.traffic_class) {
2197 			case ATM_UBR:
2198 				/* 2.3.3.1 open connection ubr */
2199 
2200 				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2201 					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2202 				break;
2203 
2204 			case ATM_CBR:
2205 				/* 2.3.3.2 open connection cbr */
2206 
2207 				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2208 				if ((he_dev->total_bw + pcr_goal)
2209 					> (he_dev->atm_dev->link_rate * 9 / 10))
2210 				{
2211 					err = -EBUSY;
2212 					goto open_failed;
2213 				}
2214 
2215 				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2216 
2217 				/* find an unused cs_stper register */
2218 				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2219 					if (he_dev->cs_stper[reg].inuse == 0 ||
2220 					    he_dev->cs_stper[reg].pcr == pcr_goal)
2221 							break;
2222 
2223 				if (reg == HE_NUM_CS_STPER) {
2224 					err = -EBUSY;
2225 					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2226 					goto open_failed;
2227 				}
2228 
2229 				he_dev->total_bw += pcr_goal;
2230 
2231 				he_vcc->rc_index = reg;
2232 				++he_dev->cs_stper[reg].inuse;
2233 				he_dev->cs_stper[reg].pcr = pcr_goal;
2234 
2235 				clock = he_is622(he_dev) ? 66667000 : 50000000;
2236 				period = clock / pcr_goal;
2237 
2238 				HPRINTK("rc_index = %d period = %d\n",
2239 								reg, period);
2240 
2241 				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2242 							CS_STPER0 + reg);
2243 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2244 
2245 				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2246 							TSR0_RC_INDEX(reg);
2247 
2248 				break;
2249 			default:
2250 				err = -EINVAL;
2251 				goto open_failed;
2252 		}
2253 
2254 		spin_lock_irqsave(&he_dev->global_lock, flags);
2255 
2256 		he_writel_tsr0(he_dev, tsr0, cid);
2257 		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2258 		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2259 					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2260 		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2261 		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2262 
2263 		he_writel_tsr3(he_dev, 0x0, cid);
2264 		he_writel_tsr5(he_dev, 0x0, cid);
2265 		he_writel_tsr6(he_dev, 0x0, cid);
2266 		he_writel_tsr7(he_dev, 0x0, cid);
2267 		he_writel_tsr8(he_dev, 0x0, cid);
2268 		he_writel_tsr10(he_dev, 0x0, cid);
2269 		he_writel_tsr11(he_dev, 0x0, cid);
2270 		he_writel_tsr12(he_dev, 0x0, cid);
2271 		he_writel_tsr13(he_dev, 0x0, cid);
2272 		he_writel_tsr14(he_dev, 0x0, cid);
2273 		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2274 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2275 	}
2276 
2277 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2278 		unsigned aal;
2279 
2280 		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2281 		 				&HE_VCC(vcc)->rx_waitq);
2282 
2283 		switch (vcc->qos.aal) {
2284 			case ATM_AAL5:
2285 				aal = RSR0_AAL5;
2286 				break;
2287 			case ATM_AAL0:
2288 				aal = RSR0_RAWCELL;
2289 				break;
2290 			default:
2291 				err = -EINVAL;
2292 				goto open_failed;
2293 		}
2294 
2295 		spin_lock_irqsave(&he_dev->global_lock, flags);
2296 
2297 		rsr0 = he_readl_rsr0(he_dev, cid);
2298 		if (rsr0 & RSR0_OPEN_CONN) {
2299 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2300 
2301 			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2302 			err = -EBUSY;
2303 			goto open_failed;
2304 		}
2305 
2306 		rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2307 		rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2308 		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2309 				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2310 
2311 #ifdef USE_CHECKSUM_HW
2312 		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2313 			rsr0 |= RSR0_TCP_CKSUM;
2314 #endif
2315 
2316 		he_writel_rsr4(he_dev, rsr4, cid);
2317 		he_writel_rsr1(he_dev, rsr1, cid);
2318 		/* 5.1.11 last parameter initialized should be
2319 			  the open/closed indication in rsr0 */
2320 		he_writel_rsr0(he_dev,
2321 			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2322 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2323 
2324 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2325 	}
2326 
2327 open_failed:
2328 
2329 	if (err) {
2330 		kfree(he_vcc);
2331 		clear_bit(ATM_VF_ADDR, &vcc->flags);
2332 	}
2333 	else
2334 		set_bit(ATM_VF_READY, &vcc->flags);
2335 
2336 	return err;
2337 }
2338 
2339 static void
2340 he_close(struct atm_vcc *vcc)
2341 {
2342 	unsigned long flags;
2343 	DECLARE_WAITQUEUE(wait, current);
2344 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2345 	struct he_tpd *tpd;
2346 	unsigned cid;
2347 	struct he_vcc *he_vcc = HE_VCC(vcc);
2348 #define MAX_RETRY 30
2349 	int retry = 0, sleep = 1, tx_inuse;
2350 
2351 	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2352 
2353 	clear_bit(ATM_VF_READY, &vcc->flags);
2354 	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2355 
2356 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2357 		int timeout;
2358 
2359 		HPRINTK("close rx cid 0x%x\n", cid);
2360 
2361 		/* 2.7.2.2 close receive operation */
2362 
2363 		/* wait for previous close (if any) to finish */
2364 
2365 		spin_lock_irqsave(&he_dev->global_lock, flags);
2366 		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2367 			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2368 			udelay(250);
2369 		}
2370 
2371 		set_current_state(TASK_UNINTERRUPTIBLE);
2372 		add_wait_queue(&he_vcc->rx_waitq, &wait);
2373 
2374 		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2375 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2376 		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2377 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2378 
2379 		timeout = schedule_timeout(30*HZ);
2380 
2381 		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2382 		set_current_state(TASK_RUNNING);
2383 
2384 		if (timeout == 0)
2385 			hprintk("close rx timeout cid 0x%x\n", cid);
2386 
2387 		HPRINTK("close rx cid 0x%x complete\n", cid);
2388 
2389 	}
2390 
2391 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2392 		volatile unsigned tsr4, tsr0;
2393 		int timeout;
2394 
2395 		HPRINTK("close tx cid 0x%x\n", cid);
2396 
2397 		/* 2.1.2
2398 		 *
2399 		 * ... the host must first stop queueing packets to the TPDRQ
2400 		 * on the connection to be closed, then wait for all outstanding
2401 		 * packets to be transmitted and their buffers returned to the
2402 		 * TBRQ. When the last packet on the connection arrives in the
2403 		 * TBRQ, the host issues the close command to the adapter.
2404 		 */
2405 
2406 		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2407 		       (retry < MAX_RETRY)) {
2408 			msleep(sleep);
2409 			if (sleep < 250)
2410 				sleep = sleep * 2;
2411 
2412 			++retry;
2413 		}
2414 
2415 		if (tx_inuse > 1)
2416 			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2417 
2418 		/* 2.3.1.1 generic close operations with flush */
2419 
2420 		spin_lock_irqsave(&he_dev->global_lock, flags);
2421 		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2422 					/* also clears TSR4_SESSION_ENDED */
2423 
2424 		switch (vcc->qos.txtp.traffic_class) {
2425 			case ATM_UBR:
2426 				he_writel_tsr1(he_dev,
2427 					TSR1_MCR(rate_to_atmf(200000))
2428 					| TSR1_PCR(0), cid);
2429 				break;
2430 			case ATM_CBR:
2431 				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2432 				break;
2433 		}
2434 		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2435 
2436 		tpd = __alloc_tpd(he_dev);
2437 		if (tpd == NULL) {
2438 			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2439 			goto close_tx_incomplete;
2440 		}
2441 		tpd->status |= TPD_EOS | TPD_INT;
2442 		tpd->skb = NULL;
2443 		tpd->vcc = vcc;
2444 		wmb();
2445 
2446 		set_current_state(TASK_UNINTERRUPTIBLE);
2447 		add_wait_queue(&he_vcc->tx_waitq, &wait);
2448 		__enqueue_tpd(he_dev, tpd, cid);
2449 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2450 
2451 		timeout = schedule_timeout(30*HZ);
2452 
2453 		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2454 		set_current_state(TASK_RUNNING);
2455 
2456 		spin_lock_irqsave(&he_dev->global_lock, flags);
2457 
2458 		if (timeout == 0) {
2459 			hprintk("close tx timeout cid 0x%x\n", cid);
2460 			goto close_tx_incomplete;
2461 		}
2462 
2463 		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2464 			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2465 			udelay(250);
2466 		}
2467 
2468 		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2469 			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2470 			udelay(250);
2471 		}
2472 
2473 close_tx_incomplete:
2474 
2475 		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2476 			int reg = he_vcc->rc_index;
2477 
2478 			HPRINTK("cs_stper reg = %d\n", reg);
2479 
2480 			if (he_dev->cs_stper[reg].inuse == 0)
2481 				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2482 			else
2483 				--he_dev->cs_stper[reg].inuse;
2484 
2485 			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2486 		}
2487 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2488 
2489 		HPRINTK("close tx cid 0x%x complete\n", cid);
2490 	}
2491 
2492 	kfree(he_vcc);
2493 
2494 	clear_bit(ATM_VF_ADDR, &vcc->flags);
2495 }
2496 
2497 static int
2498 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2499 {
2500 	unsigned long flags;
2501 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2502 	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2503 	struct he_tpd *tpd;
2504 #ifdef USE_SCATTERGATHER
2505 	int i, slot = 0;
2506 #endif
2507 
2508 #define HE_TPD_BUFSIZE 0xffff
2509 
2510 	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2511 
2512 	if ((skb->len > HE_TPD_BUFSIZE) ||
2513 	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2514 		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2515 		if (vcc->pop)
2516 			vcc->pop(vcc, skb);
2517 		else
2518 			dev_kfree_skb_any(skb);
2519 		atomic_inc(&vcc->stats->tx_err);
2520 		return -EINVAL;
2521 	}
2522 
2523 #ifndef USE_SCATTERGATHER
2524 	if (skb_shinfo(skb)->nr_frags) {
2525 		hprintk("no scatter/gather support\n");
2526 		if (vcc->pop)
2527 			vcc->pop(vcc, skb);
2528 		else
2529 			dev_kfree_skb_any(skb);
2530 		atomic_inc(&vcc->stats->tx_err);
2531 		return -EINVAL;
2532 	}
2533 #endif
2534 	spin_lock_irqsave(&he_dev->global_lock, flags);
2535 
2536 	tpd = __alloc_tpd(he_dev);
2537 	if (tpd == NULL) {
2538 		if (vcc->pop)
2539 			vcc->pop(vcc, skb);
2540 		else
2541 			dev_kfree_skb_any(skb);
2542 		atomic_inc(&vcc->stats->tx_err);
2543 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2544 		return -ENOMEM;
2545 	}
2546 
2547 	if (vcc->qos.aal == ATM_AAL5)
2548 		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2549 	else {
2550 		char *pti_clp = (void *) (skb->data + 3);
2551 		int clp, pti;
2552 
2553 		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2554 		clp = (*pti_clp & ATM_HDR_CLP);
2555 		tpd->status |= TPD_CELLTYPE(pti);
2556 		if (clp)
2557 			tpd->status |= TPD_CLP;
2558 
2559 		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2560 	}
2561 
2562 #ifdef USE_SCATTERGATHER
2563 	tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2564 				skb_headlen(skb), PCI_DMA_TODEVICE);
2565 	tpd->iovec[slot].len = skb_headlen(skb);
2566 	++slot;
2567 
2568 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2569 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2570 
2571 		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2572 			tpd->vcc = vcc;
2573 			tpd->skb = NULL;	/* not the last fragment
2574 						   so dont ->push() yet */
2575 			wmb();
2576 
2577 			__enqueue_tpd(he_dev, tpd, cid);
2578 			tpd = __alloc_tpd(he_dev);
2579 			if (tpd == NULL) {
2580 				if (vcc->pop)
2581 					vcc->pop(vcc, skb);
2582 				else
2583 					dev_kfree_skb_any(skb);
2584 				atomic_inc(&vcc->stats->tx_err);
2585 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2586 				return -ENOMEM;
2587 			}
2588 			tpd->status |= TPD_USERCELL;
2589 			slot = 0;
2590 		}
2591 
2592 		tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2593 			(void *) page_address(frag->page) + frag->page_offset,
2594 				frag->size, PCI_DMA_TODEVICE);
2595 		tpd->iovec[slot].len = frag->size;
2596 		++slot;
2597 
2598 	}
2599 
2600 	tpd->iovec[slot - 1].len |= TPD_LST;
2601 #else
2602 	tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2603 	tpd->length0 = skb->len | TPD_LST;
2604 #endif
2605 	tpd->status |= TPD_INT;
2606 
2607 	tpd->vcc = vcc;
2608 	tpd->skb = skb;
2609 	wmb();
2610 	ATM_SKB(skb)->vcc = vcc;
2611 
2612 	__enqueue_tpd(he_dev, tpd, cid);
2613 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2614 
2615 	atomic_inc(&vcc->stats->tx);
2616 
2617 	return 0;
2618 }
2619 
2620 static int
2621 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2622 {
2623 	unsigned long flags;
2624 	struct he_dev *he_dev = HE_DEV(atm_dev);
2625 	struct he_ioctl_reg reg;
2626 	int err = 0;
2627 
2628 	switch (cmd) {
2629 		case HE_GET_REG:
2630 			if (!capable(CAP_NET_ADMIN))
2631 				return -EPERM;
2632 
2633 			if (copy_from_user(&reg, arg,
2634 					   sizeof(struct he_ioctl_reg)))
2635 				return -EFAULT;
2636 
2637 			spin_lock_irqsave(&he_dev->global_lock, flags);
2638 			switch (reg.type) {
2639 				case HE_REGTYPE_PCI:
2640 					if (reg.addr >= HE_REGMAP_SIZE) {
2641 						err = -EINVAL;
2642 						break;
2643 					}
2644 
2645 					reg.val = he_readl(he_dev, reg.addr);
2646 					break;
2647 				case HE_REGTYPE_RCM:
2648 					reg.val =
2649 						he_readl_rcm(he_dev, reg.addr);
2650 					break;
2651 				case HE_REGTYPE_TCM:
2652 					reg.val =
2653 						he_readl_tcm(he_dev, reg.addr);
2654 					break;
2655 				case HE_REGTYPE_MBOX:
2656 					reg.val =
2657 						he_readl_mbox(he_dev, reg.addr);
2658 					break;
2659 				default:
2660 					err = -EINVAL;
2661 					break;
2662 			}
2663 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2664 			if (err == 0)
2665 				if (copy_to_user(arg, &reg,
2666 							sizeof(struct he_ioctl_reg)))
2667 					return -EFAULT;
2668 			break;
2669 		default:
2670 #ifdef CONFIG_ATM_HE_USE_SUNI
2671 			if (atm_dev->phy && atm_dev->phy->ioctl)
2672 				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2673 #else /* CONFIG_ATM_HE_USE_SUNI */
2674 			err = -EINVAL;
2675 #endif /* CONFIG_ATM_HE_USE_SUNI */
2676 			break;
2677 	}
2678 
2679 	return err;
2680 }
2681 
2682 static void
2683 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2684 {
2685 	unsigned long flags;
2686 	struct he_dev *he_dev = HE_DEV(atm_dev);
2687 
2688 	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2689 
2690 	spin_lock_irqsave(&he_dev->global_lock, flags);
2691 	he_writel(he_dev, val, FRAMER + (addr*4));
2692 	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2693 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2694 }
2695 
2696 
2697 static unsigned char
2698 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2699 {
2700 	unsigned long flags;
2701 	struct he_dev *he_dev = HE_DEV(atm_dev);
2702 	unsigned reg;
2703 
2704 	spin_lock_irqsave(&he_dev->global_lock, flags);
2705 	reg = he_readl(he_dev, FRAMER + (addr*4));
2706 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2707 
2708 	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2709 	return reg;
2710 }
2711 
2712 static int
2713 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2714 {
2715 	unsigned long flags;
2716 	struct he_dev *he_dev = HE_DEV(dev);
2717 	int left, i;
2718 #ifdef notdef
2719 	struct he_rbrq *rbrq_tail;
2720 	struct he_tpdrq *tpdrq_head;
2721 	int rbpl_head, rbpl_tail;
2722 #endif
2723 	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2724 
2725 
2726 	left = *pos;
2727 	if (!left--)
2728 		return sprintf(page, "ATM he driver\n");
2729 
2730 	if (!left--)
2731 		return sprintf(page, "%s%s\n\n",
2732 			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2733 
2734 	if (!left--)
2735 		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2736 
2737 	spin_lock_irqsave(&he_dev->global_lock, flags);
2738 	mcc += he_readl(he_dev, MCC);
2739 	oec += he_readl(he_dev, OEC);
2740 	dcc += he_readl(he_dev, DCC);
2741 	cec += he_readl(he_dev, CEC);
2742 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2743 
2744 	if (!left--)
2745 		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n",
2746 							mcc, oec, dcc, cec);
2747 
2748 	if (!left--)
2749 		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2750 				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2751 
2752 	if (!left--)
2753 		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2754 						CONFIG_TPDRQ_SIZE);
2755 
2756 	if (!left--)
2757 		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2758 				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2759 
2760 	if (!left--)
2761 		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2762 					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2763 
2764 
2765 #ifdef notdef
2766 	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2767 	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2768 
2769 	inuse = rbpl_head - rbpl_tail;
2770 	if (inuse < 0)
2771 		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2772 	inuse /= sizeof(struct he_rbp);
2773 
2774 	if (!left--)
2775 		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2776 						CONFIG_RBPL_SIZE, inuse);
2777 #endif
2778 
2779 	if (!left--)
2780 		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2781 
2782 	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2783 		if (!left--)
2784 			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2785 						he_dev->cs_stper[i].pcr,
2786 						he_dev->cs_stper[i].inuse);
2787 
2788 	if (!left--)
2789 		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2790 			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2791 
2792 	return 0;
2793 }
2794 
2795 /* eeprom routines  -- see 4.7 */
2796 
2797 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2798 {
2799 	u32 val = 0, tmp_read = 0;
2800 	int i, j = 0;
2801 	u8 byte_read = 0;
2802 
2803 	val = readl(he_dev->membase + HOST_CNTL);
2804 	val &= 0xFFFFE0FF;
2805 
2806 	/* Turn on write enable */
2807 	val |= 0x800;
2808 	he_writel(he_dev, val, HOST_CNTL);
2809 
2810 	/* Send READ instruction */
2811 	for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2812 		he_writel(he_dev, val | readtab[i], HOST_CNTL);
2813 		udelay(EEPROM_DELAY);
2814 	}
2815 
2816 	/* Next, we need to send the byte address to read from */
2817 	for (i = 7; i >= 0; i--) {
2818 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2819 		udelay(EEPROM_DELAY);
2820 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2821 		udelay(EEPROM_DELAY);
2822 	}
2823 
2824 	j = 0;
2825 
2826 	val &= 0xFFFFF7FF;      /* Turn off write enable */
2827 	he_writel(he_dev, val, HOST_CNTL);
2828 
2829 	/* Now, we can read data from the EEPROM by clocking it in */
2830 	for (i = 7; i >= 0; i--) {
2831 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2832 		udelay(EEPROM_DELAY);
2833 		tmp_read = he_readl(he_dev, HOST_CNTL);
2834 		byte_read |= (unsigned char)
2835 			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2836 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2837 		udelay(EEPROM_DELAY);
2838 	}
2839 
2840 	he_writel(he_dev, val | ID_CS, HOST_CNTL);
2841 	udelay(EEPROM_DELAY);
2842 
2843 	return byte_read;
2844 }
2845 
2846 MODULE_LICENSE("GPL");
2847 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2848 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2849 module_param(disable64, bool, 0);
2850 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2851 module_param(nvpibits, short, 0);
2852 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2853 module_param(nvcibits, short, 0);
2854 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2855 module_param(rx_skb_reserve, short, 0);
2856 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2857 module_param(irq_coalesce, bool, 0);
2858 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2859 module_param(sdh, bool, 0);
2860 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2861 
2862 static struct pci_device_id he_pci_tbl[] = {
2863 	{ PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2864 	{ 0, }
2865 };
2866 
2867 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2868 
2869 static struct pci_driver he_driver = {
2870 	.name =		"he",
2871 	.probe =	he_init_one,
2872 	.remove =	he_remove_one,
2873 	.id_table =	he_pci_tbl,
2874 };
2875 
2876 static int __init he_init(void)
2877 {
2878 	return pci_register_driver(&he_driver);
2879 }
2880 
2881 static void __exit he_cleanup(void)
2882 {
2883 	pci_unregister_driver(&he_driver);
2884 }
2885 
2886 module_init(he_init);
2887 module_exit(he_cleanup);
2888