xref: /openbmc/linux/drivers/atm/he.c (revision ee89bd6b)
1 /*
2 
3   he.c
4 
5   ForeRunnerHE ATM Adapter driver for ATM on Linux
6   Copyright (C) 1999-2001  Naval Research Laboratory
7 
8   This library is free software; you can redistribute it and/or
9   modify it under the terms of the GNU Lesser General Public
10   License as published by the Free Software Foundation; either
11   version 2.1 of the License, or (at your option) any later version.
12 
13   This library is distributed in the hope that it will be useful,
14   but WITHOUT ANY WARRANTY; without even the implied warranty of
15   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16   Lesser General Public License for more details.
17 
18   You should have received a copy of the GNU Lesser General Public
19   License along with this library; if not, write to the Free Software
20   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 
22 */
23 
24 /*
25 
26   he.c
27 
28   ForeRunnerHE ATM Adapter driver for ATM on Linux
29   Copyright (C) 1999-2001  Naval Research Laboratory
30 
31   Permission to use, copy, modify and distribute this software and its
32   documentation is hereby granted, provided that both the copyright
33   notice and this permission notice appear in all copies of the software,
34   derivative works or modified versions, and any portions thereof, and
35   that both notices appear in supporting documentation.
36 
37   NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38   DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39   RESULTING FROM THE USE OF THIS SOFTWARE.
40 
41   This driver was written using the "Programmer's Reference Manual for
42   ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43 
44   AUTHORS:
45 	chas williams <chas@cmf.nrl.navy.mil>
46 	eric kinzie <ekinzie@cmf.nrl.navy.mil>
47 
48   NOTES:
49 	4096 supported 'connections'
50 	group 0 is used for all traffic
51 	interrupt queue 0 is used for all interrupts
52 	aal0 support (based on work from ulrich.u.muller@nokia.com)
53 
54  */
55 
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
65 #include <linux/mm.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/bitmap.h>
71 #include <linux/slab.h>
72 #include <asm/io.h>
73 #include <asm/byteorder.h>
74 #include <asm/uaccess.h>
75 
76 #include <linux/atmdev.h>
77 #include <linux/atm.h>
78 #include <linux/sonet.h>
79 
80 #undef USE_SCATTERGATHER
81 #undef USE_CHECKSUM_HW			/* still confused about this */
82 /* #undef HE_DEBUG */
83 
84 #include "he.h"
85 #include "suni.h"
86 #include <linux/atm_he.h>
87 
88 #define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
89 
90 #ifdef HE_DEBUG
91 #define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92 #else /* !HE_DEBUG */
93 #define HPRINTK(fmt,args...)	do { } while (0)
94 #endif /* HE_DEBUG */
95 
96 /* declarations */
97 
98 static int he_open(struct atm_vcc *vcc);
99 static void he_close(struct atm_vcc *vcc);
100 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102 static irqreturn_t he_irq_handler(int irq, void *dev_id);
103 static void he_tasklet(unsigned long data);
104 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105 static int he_start(struct atm_dev *dev);
106 static void he_stop(struct he_dev *dev);
107 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109 
110 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
111 
112 /* globals */
113 
114 static struct he_dev *he_devs;
115 static bool disable64;
116 static short nvpibits = -1;
117 static short nvcibits = -1;
118 static short rx_skb_reserve = 16;
119 static bool irq_coalesce = 1;
120 static bool sdh = 0;
121 
122 /* Read from EEPROM = 0000 0011b */
123 static unsigned int readtab[] = {
124 	CS_HIGH | CLK_HIGH,
125 	CS_LOW | CLK_LOW,
126 	CLK_HIGH,               /* 0 */
127 	CLK_LOW,
128 	CLK_HIGH,               /* 0 */
129 	CLK_LOW,
130 	CLK_HIGH,               /* 0 */
131 	CLK_LOW,
132 	CLK_HIGH,               /* 0 */
133 	CLK_LOW,
134 	CLK_HIGH,               /* 0 */
135 	CLK_LOW,
136 	CLK_HIGH,               /* 0 */
137 	CLK_LOW | SI_HIGH,
138 	CLK_HIGH | SI_HIGH,     /* 1 */
139 	CLK_LOW | SI_HIGH,
140 	CLK_HIGH | SI_HIGH      /* 1 */
141 };
142 
143 /* Clock to read from/write to the EEPROM */
144 static unsigned int clocktab[] = {
145 	CLK_LOW,
146 	CLK_HIGH,
147 	CLK_LOW,
148 	CLK_HIGH,
149 	CLK_LOW,
150 	CLK_HIGH,
151 	CLK_LOW,
152 	CLK_HIGH,
153 	CLK_LOW,
154 	CLK_HIGH,
155 	CLK_LOW,
156 	CLK_HIGH,
157 	CLK_LOW,
158 	CLK_HIGH,
159 	CLK_LOW,
160 	CLK_HIGH,
161 	CLK_LOW
162 };
163 
164 static struct atmdev_ops he_ops =
165 {
166 	.open =		he_open,
167 	.close =	he_close,
168 	.ioctl =	he_ioctl,
169 	.send =		he_send,
170 	.phy_put =	he_phy_put,
171 	.phy_get =	he_phy_get,
172 	.proc_read =	he_proc_read,
173 	.owner =	THIS_MODULE
174 };
175 
176 #define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177 #define he_readl(dev, reg)		readl((dev)->membase + (reg))
178 
179 /* section 2.12 connection memory access */
180 
181 static __inline__ void
182 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
183 								unsigned flags)
184 {
185 	he_writel(he_dev, val, CON_DAT);
186 	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
187 	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
189 }
190 
191 #define he_writel_rcm(dev, val, reg) 				\
192 			he_writel_internal(dev, val, reg, CON_CTL_RCM)
193 
194 #define he_writel_tcm(dev, val, reg) 				\
195 			he_writel_internal(dev, val, reg, CON_CTL_TCM)
196 
197 #define he_writel_mbox(dev, val, reg) 				\
198 			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
199 
200 static unsigned
201 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202 {
203 	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205 	return he_readl(he_dev, CON_DAT);
206 }
207 
208 #define he_readl_rcm(dev, reg) \
209 			he_readl_internal(dev, reg, CON_CTL_RCM)
210 
211 #define he_readl_tcm(dev, reg) \
212 			he_readl_internal(dev, reg, CON_CTL_TCM)
213 
214 #define he_readl_mbox(dev, reg) \
215 			he_readl_internal(dev, reg, CON_CTL_MBOX)
216 
217 
218 /* figure 2.2 connection id */
219 
220 #define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
221 
222 /* 2.5.1 per connection transmit state registers */
223 
224 #define he_writel_tsr0(dev, val, cid) \
225 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226 #define he_readl_tsr0(dev, cid) \
227 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228 
229 #define he_writel_tsr1(dev, val, cid) \
230 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231 
232 #define he_writel_tsr2(dev, val, cid) \
233 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234 
235 #define he_writel_tsr3(dev, val, cid) \
236 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237 
238 #define he_writel_tsr4(dev, val, cid) \
239 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
240 
241 	/* from page 2-20
242 	 *
243 	 * NOTE While the transmit connection is active, bits 23 through 0
244 	 *      of this register must not be written by the host.  Byte
245 	 *      enables should be used during normal operation when writing
246 	 *      the most significant byte.
247 	 */
248 
249 #define he_writel_tsr4_upper(dev, val, cid) \
250 		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251 							CON_CTL_TCM \
252 							| CON_BYTE_DISABLE_2 \
253 							| CON_BYTE_DISABLE_1 \
254 							| CON_BYTE_DISABLE_0)
255 
256 #define he_readl_tsr4(dev, cid) \
257 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258 
259 #define he_writel_tsr5(dev, val, cid) \
260 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261 
262 #define he_writel_tsr6(dev, val, cid) \
263 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264 
265 #define he_writel_tsr7(dev, val, cid) \
266 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
267 
268 
269 #define he_writel_tsr8(dev, val, cid) \
270 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271 
272 #define he_writel_tsr9(dev, val, cid) \
273 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274 
275 #define he_writel_tsr10(dev, val, cid) \
276 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277 
278 #define he_writel_tsr11(dev, val, cid) \
279 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
280 
281 
282 #define he_writel_tsr12(dev, val, cid) \
283 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284 
285 #define he_writel_tsr13(dev, val, cid) \
286 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
287 
288 
289 #define he_writel_tsr14(dev, val, cid) \
290 		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291 
292 #define he_writel_tsr14_upper(dev, val, cid) \
293 		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294 							CON_CTL_TCM \
295 							| CON_BYTE_DISABLE_2 \
296 							| CON_BYTE_DISABLE_1 \
297 							| CON_BYTE_DISABLE_0)
298 
299 /* 2.7.1 per connection receive state registers */
300 
301 #define he_writel_rsr0(dev, val, cid) \
302 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303 #define he_readl_rsr0(dev, cid) \
304 		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305 
306 #define he_writel_rsr1(dev, val, cid) \
307 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308 
309 #define he_writel_rsr2(dev, val, cid) \
310 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311 
312 #define he_writel_rsr3(dev, val, cid) \
313 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314 
315 #define he_writel_rsr4(dev, val, cid) \
316 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317 
318 #define he_writel_rsr5(dev, val, cid) \
319 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320 
321 #define he_writel_rsr6(dev, val, cid) \
322 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323 
324 #define he_writel_rsr7(dev, val, cid) \
325 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326 
327 static __inline__ struct atm_vcc*
328 __find_vcc(struct he_dev *he_dev, unsigned cid)
329 {
330 	struct hlist_head *head;
331 	struct atm_vcc *vcc;
332 	struct sock *s;
333 	short vpi;
334 	int vci;
335 
336 	vpi = cid >> he_dev->vcibits;
337 	vci = cid & ((1 << he_dev->vcibits) - 1);
338 	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
339 
340 	sk_for_each(s, head) {
341 		vcc = atm_sk(s);
342 		if (vcc->dev == he_dev->atm_dev &&
343 		    vcc->vci == vci && vcc->vpi == vpi &&
344 		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
345 				return vcc;
346 		}
347 	}
348 	return NULL;
349 }
350 
351 static int he_init_one(struct pci_dev *pci_dev,
352 		       const struct pci_device_id *pci_ent)
353 {
354 	struct atm_dev *atm_dev = NULL;
355 	struct he_dev *he_dev = NULL;
356 	int err = 0;
357 
358 	printk(KERN_INFO "ATM he driver\n");
359 
360 	if (pci_enable_device(pci_dev))
361 		return -EIO;
362 	if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
363 		printk(KERN_WARNING "he: no suitable dma available\n");
364 		err = -EIO;
365 		goto init_one_failure;
366 	}
367 
368 	atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
369 	if (!atm_dev) {
370 		err = -ENODEV;
371 		goto init_one_failure;
372 	}
373 	pci_set_drvdata(pci_dev, atm_dev);
374 
375 	he_dev = kzalloc(sizeof(struct he_dev),
376 							GFP_KERNEL);
377 	if (!he_dev) {
378 		err = -ENOMEM;
379 		goto init_one_failure;
380 	}
381 	he_dev->pci_dev = pci_dev;
382 	he_dev->atm_dev = atm_dev;
383 	he_dev->atm_dev->dev_data = he_dev;
384 	atm_dev->dev_data = he_dev;
385 	he_dev->number = atm_dev->number;
386 	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
387 	spin_lock_init(&he_dev->global_lock);
388 
389 	if (he_start(atm_dev)) {
390 		he_stop(he_dev);
391 		err = -ENODEV;
392 		goto init_one_failure;
393 	}
394 	he_dev->next = NULL;
395 	if (he_devs)
396 		he_dev->next = he_devs;
397 	he_devs = he_dev;
398 	return 0;
399 
400 init_one_failure:
401 	if (atm_dev)
402 		atm_dev_deregister(atm_dev);
403 	kfree(he_dev);
404 	pci_disable_device(pci_dev);
405 	return err;
406 }
407 
408 static void he_remove_one(struct pci_dev *pci_dev)
409 {
410 	struct atm_dev *atm_dev;
411 	struct he_dev *he_dev;
412 
413 	atm_dev = pci_get_drvdata(pci_dev);
414 	he_dev = HE_DEV(atm_dev);
415 
416 	/* need to remove from he_devs */
417 
418 	he_stop(he_dev);
419 	atm_dev_deregister(atm_dev);
420 	kfree(he_dev);
421 
422 	pci_set_drvdata(pci_dev, NULL);
423 	pci_disable_device(pci_dev);
424 }
425 
426 
427 static unsigned
428 rate_to_atmf(unsigned rate)		/* cps to atm forum format */
429 {
430 #define NONZERO (1 << 14)
431 
432 	unsigned exp = 0;
433 
434 	if (rate == 0)
435 		return 0;
436 
437 	rate <<= 9;
438 	while (rate > 0x3ff) {
439 		++exp;
440 		rate >>= 1;
441 	}
442 
443 	return (NONZERO | (exp << 9) | (rate & 0x1ff));
444 }
445 
446 static void he_init_rx_lbfp0(struct he_dev *he_dev)
447 {
448 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
449 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
450 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
451 	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
452 
453 	lbufd_index = 0;
454 	lbm_offset = he_readl(he_dev, RCMLBM_BA);
455 
456 	he_writel(he_dev, lbufd_index, RLBF0_H);
457 
458 	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
459 		lbufd_index += 2;
460 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
461 
462 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
463 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
464 
465 		if (++lbuf_count == lbufs_per_row) {
466 			lbuf_count = 0;
467 			row_offset += he_dev->bytes_per_row;
468 		}
469 		lbm_offset += 4;
470 	}
471 
472 	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
473 	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
474 }
475 
476 static void he_init_rx_lbfp1(struct he_dev *he_dev)
477 {
478 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
479 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
480 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
481 	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
482 
483 	lbufd_index = 1;
484 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
485 
486 	he_writel(he_dev, lbufd_index, RLBF1_H);
487 
488 	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
489 		lbufd_index += 2;
490 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
491 
492 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
493 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
494 
495 		if (++lbuf_count == lbufs_per_row) {
496 			lbuf_count = 0;
497 			row_offset += he_dev->bytes_per_row;
498 		}
499 		lbm_offset += 4;
500 	}
501 
502 	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
503 	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
504 }
505 
506 static void he_init_tx_lbfp(struct he_dev *he_dev)
507 {
508 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
509 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
510 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
511 	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
512 
513 	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
514 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
515 
516 	he_writel(he_dev, lbufd_index, TLBF_H);
517 
518 	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
519 		lbufd_index += 1;
520 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
521 
522 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
523 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
524 
525 		if (++lbuf_count == lbufs_per_row) {
526 			lbuf_count = 0;
527 			row_offset += he_dev->bytes_per_row;
528 		}
529 		lbm_offset += 2;
530 	}
531 
532 	he_writel(he_dev, lbufd_index - 1, TLBF_T);
533 }
534 
535 static int he_init_tpdrq(struct he_dev *he_dev)
536 {
537 	he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
538 		CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
539 	if (he_dev->tpdrq_base == NULL) {
540 		hprintk("failed to alloc tpdrq\n");
541 		return -ENOMEM;
542 	}
543 	memset(he_dev->tpdrq_base, 0,
544 				CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
545 
546 	he_dev->tpdrq_tail = he_dev->tpdrq_base;
547 	he_dev->tpdrq_head = he_dev->tpdrq_base;
548 
549 	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
550 	he_writel(he_dev, 0, TPDRQ_T);
551 	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
552 
553 	return 0;
554 }
555 
556 static void he_init_cs_block(struct he_dev *he_dev)
557 {
558 	unsigned clock, rate, delta;
559 	int reg;
560 
561 	/* 5.1.7 cs block initialization */
562 
563 	for (reg = 0; reg < 0x20; ++reg)
564 		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
565 
566 	/* rate grid timer reload values */
567 
568 	clock = he_is622(he_dev) ? 66667000 : 50000000;
569 	rate = he_dev->atm_dev->link_rate;
570 	delta = rate / 16 / 2;
571 
572 	for (reg = 0; reg < 0x10; ++reg) {
573 		/* 2.4 internal transmit function
574 		 *
575 	 	 * we initialize the first row in the rate grid.
576 		 * values are period (in clock cycles) of timer
577 		 */
578 		unsigned period = clock / rate;
579 
580 		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
581 		rate -= delta;
582 	}
583 
584 	if (he_is622(he_dev)) {
585 		/* table 5.2 (4 cells per lbuf) */
586 		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
587 		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
588 		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
589 		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
590 		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
591 
592 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
593 		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
594 		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
595 		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
596 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
597 		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
598 		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
599 
600 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
601 
602 		/* table 5.8 */
603 		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
604 		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
605 		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
606 		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
607 		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
608 		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
609 
610 		/* table 5.9 */
611 		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
612 		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
613 	} else {
614 		/* table 5.1 (4 cells per lbuf) */
615 		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
616 		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
617 		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
618 		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
619 		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
620 
621 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
622 		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
623 		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
624 		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
625 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
626 		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
627 		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
628 
629 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
630 
631 		/* table 5.8 */
632 		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
633 		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
634 		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
635 		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
636 		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
637 		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
638 
639 		/* table 5.9 */
640 		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
641 		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
642 	}
643 
644 	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
645 
646 	for (reg = 0; reg < 0x8; ++reg)
647 		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
648 
649 }
650 
651 static int he_init_cs_block_rcm(struct he_dev *he_dev)
652 {
653 	unsigned (*rategrid)[16][16];
654 	unsigned rate, delta;
655 	int i, j, reg;
656 
657 	unsigned rate_atmf, exp, man;
658 	unsigned long long rate_cps;
659 	int mult, buf, buf_limit = 4;
660 
661 	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
662 	if (!rategrid)
663 		return -ENOMEM;
664 
665 	/* initialize rate grid group table */
666 
667 	for (reg = 0x0; reg < 0xff; ++reg)
668 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
669 
670 	/* initialize rate controller groups */
671 
672 	for (reg = 0x100; reg < 0x1ff; ++reg)
673 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
674 
675 	/* initialize tNrm lookup table */
676 
677 	/* the manual makes reference to a routine in a sample driver
678 	   for proper configuration; fortunately, we only need this
679 	   in order to support abr connection */
680 
681 	/* initialize rate to group table */
682 
683 	rate = he_dev->atm_dev->link_rate;
684 	delta = rate / 32;
685 
686 	/*
687 	 * 2.4 transmit internal functions
688 	 *
689 	 * we construct a copy of the rate grid used by the scheduler
690 	 * in order to construct the rate to group table below
691 	 */
692 
693 	for (j = 0; j < 16; j++) {
694 		(*rategrid)[0][j] = rate;
695 		rate -= delta;
696 	}
697 
698 	for (i = 1; i < 16; i++)
699 		for (j = 0; j < 16; j++)
700 			if (i > 14)
701 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
702 			else
703 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
704 
705 	/*
706 	 * 2.4 transmit internal function
707 	 *
708 	 * this table maps the upper 5 bits of exponent and mantissa
709 	 * of the atm forum representation of the rate into an index
710 	 * on rate grid
711 	 */
712 
713 	rate_atmf = 0;
714 	while (rate_atmf < 0x400) {
715 		man = (rate_atmf & 0x1f) << 4;
716 		exp = rate_atmf >> 5;
717 
718 		/*
719 			instead of '/ 512', use '>> 9' to prevent a call
720 			to divdu3 on x86 platforms
721 		*/
722 		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
723 
724 		if (rate_cps < 10)
725 			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
726 
727 		for (i = 255; i > 0; i--)
728 			if ((*rategrid)[i/16][i%16] >= rate_cps)
729 				break;	 /* pick nearest rate instead? */
730 
731 		/*
732 		 * each table entry is 16 bits: (rate grid index (8 bits)
733 		 * and a buffer limit (8 bits)
734 		 * there are two table entries in each 32-bit register
735 		 */
736 
737 #ifdef notdef
738 		buf = rate_cps * he_dev->tx_numbuffs /
739 				(he_dev->atm_dev->link_rate * 2);
740 #else
741 		/* this is pretty, but avoids _divdu3 and is mostly correct */
742 		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
743 		if (rate_cps > (272 * mult))
744 			buf = 4;
745 		else if (rate_cps > (204 * mult))
746 			buf = 3;
747 		else if (rate_cps > (136 * mult))
748 			buf = 2;
749 		else if (rate_cps > (68 * mult))
750 			buf = 1;
751 		else
752 			buf = 0;
753 #endif
754 		if (buf > buf_limit)
755 			buf = buf_limit;
756 		reg = (reg << 16) | ((i << 8) | buf);
757 
758 #define RTGTBL_OFFSET 0x400
759 
760 		if (rate_atmf & 0x1)
761 			he_writel_rcm(he_dev, reg,
762 				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
763 
764 		++rate_atmf;
765 	}
766 
767 	kfree(rategrid);
768 	return 0;
769 }
770 
771 static int he_init_group(struct he_dev *he_dev, int group)
772 {
773 	struct he_buff *heb, *next;
774 	dma_addr_t mapping;
775 	int i;
776 
777 	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
778 	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
779 	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
780 	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
781 		  G0_RBPS_BS + (group * 32));
782 
783 	/* bitmap table */
784 	he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
785 				     * sizeof(unsigned long), GFP_KERNEL);
786 	if (!he_dev->rbpl_table) {
787 		hprintk("unable to allocate rbpl bitmap table\n");
788 		return -ENOMEM;
789 	}
790 	bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
791 
792 	/* rbpl_virt 64-bit pointers */
793 	he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
794 				    * sizeof(struct he_buff *), GFP_KERNEL);
795 	if (!he_dev->rbpl_virt) {
796 		hprintk("unable to allocate rbpl virt table\n");
797 		goto out_free_rbpl_table;
798 	}
799 
800 	/* large buffer pool */
801 	he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
802 					    CONFIG_RBPL_BUFSIZE, 64, 0);
803 	if (he_dev->rbpl_pool == NULL) {
804 		hprintk("unable to create rbpl pool\n");
805 		goto out_free_rbpl_virt;
806 	}
807 
808 	he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
809 		CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
810 	if (he_dev->rbpl_base == NULL) {
811 		hprintk("failed to alloc rbpl_base\n");
812 		goto out_destroy_rbpl_pool;
813 	}
814 	memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
815 
816 	INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
817 
818 	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
819 
820 		heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
821 		if (!heb)
822 			goto out_free_rbpl;
823 		heb->mapping = mapping;
824 		list_add(&heb->entry, &he_dev->rbpl_outstanding);
825 
826 		set_bit(i, he_dev->rbpl_table);
827 		he_dev->rbpl_virt[i] = heb;
828 		he_dev->rbpl_hint = i + 1;
829 		he_dev->rbpl_base[i].idx =  i << RBP_IDX_OFFSET;
830 		he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
831 	}
832 	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
833 
834 	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
835 	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
836 						G0_RBPL_T + (group * 32));
837 	he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
838 						G0_RBPL_BS + (group * 32));
839 	he_writel(he_dev,
840 			RBP_THRESH(CONFIG_RBPL_THRESH) |
841 			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
842 			RBP_INT_ENB,
843 						G0_RBPL_QI + (group * 32));
844 
845 	/* rx buffer ready queue */
846 
847 	he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
848 		CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
849 	if (he_dev->rbrq_base == NULL) {
850 		hprintk("failed to allocate rbrq\n");
851 		goto out_free_rbpl;
852 	}
853 	memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
854 
855 	he_dev->rbrq_head = he_dev->rbrq_base;
856 	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
857 	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
858 	he_writel(he_dev,
859 		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
860 						G0_RBRQ_Q + (group * 16));
861 	if (irq_coalesce) {
862 		hprintk("coalescing interrupts\n");
863 		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
864 						G0_RBRQ_I + (group * 16));
865 	} else
866 		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
867 						G0_RBRQ_I + (group * 16));
868 
869 	/* tx buffer ready queue */
870 
871 	he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
872 		CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
873 	if (he_dev->tbrq_base == NULL) {
874 		hprintk("failed to allocate tbrq\n");
875 		goto out_free_rbpq_base;
876 	}
877 	memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
878 
879 	he_dev->tbrq_head = he_dev->tbrq_base;
880 
881 	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
882 	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
883 	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
884 	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
885 
886 	return 0;
887 
888 out_free_rbpq_base:
889 	pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
890 			sizeof(struct he_rbrq), he_dev->rbrq_base,
891 			he_dev->rbrq_phys);
892 out_free_rbpl:
893 	list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
894 		pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
895 
896 	pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
897 			sizeof(struct he_rbp), he_dev->rbpl_base,
898 			he_dev->rbpl_phys);
899 out_destroy_rbpl_pool:
900 	pci_pool_destroy(he_dev->rbpl_pool);
901 out_free_rbpl_virt:
902 	kfree(he_dev->rbpl_virt);
903 out_free_rbpl_table:
904 	kfree(he_dev->rbpl_table);
905 
906 	return -ENOMEM;
907 }
908 
909 static int he_init_irq(struct he_dev *he_dev)
910 {
911 	int i;
912 
913 	/* 2.9.3.5  tail offset for each interrupt queue is located after the
914 		    end of the interrupt queue */
915 
916 	he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
917 			(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
918 	if (he_dev->irq_base == NULL) {
919 		hprintk("failed to allocate irq\n");
920 		return -ENOMEM;
921 	}
922 	he_dev->irq_tailoffset = (unsigned *)
923 					&he_dev->irq_base[CONFIG_IRQ_SIZE];
924 	*he_dev->irq_tailoffset = 0;
925 	he_dev->irq_head = he_dev->irq_base;
926 	he_dev->irq_tail = he_dev->irq_base;
927 
928 	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
929 		he_dev->irq_base[i].isw = ITYPE_INVALID;
930 
931 	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
932 	he_writel(he_dev,
933 		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
934 								IRQ0_HEAD);
935 	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
936 	he_writel(he_dev, 0x0, IRQ0_DATA);
937 
938 	he_writel(he_dev, 0x0, IRQ1_BASE);
939 	he_writel(he_dev, 0x0, IRQ1_HEAD);
940 	he_writel(he_dev, 0x0, IRQ1_CNTL);
941 	he_writel(he_dev, 0x0, IRQ1_DATA);
942 
943 	he_writel(he_dev, 0x0, IRQ2_BASE);
944 	he_writel(he_dev, 0x0, IRQ2_HEAD);
945 	he_writel(he_dev, 0x0, IRQ2_CNTL);
946 	he_writel(he_dev, 0x0, IRQ2_DATA);
947 
948 	he_writel(he_dev, 0x0, IRQ3_BASE);
949 	he_writel(he_dev, 0x0, IRQ3_HEAD);
950 	he_writel(he_dev, 0x0, IRQ3_CNTL);
951 	he_writel(he_dev, 0x0, IRQ3_DATA);
952 
953 	/* 2.9.3.2 interrupt queue mapping registers */
954 
955 	he_writel(he_dev, 0x0, GRP_10_MAP);
956 	he_writel(he_dev, 0x0, GRP_32_MAP);
957 	he_writel(he_dev, 0x0, GRP_54_MAP);
958 	he_writel(he_dev, 0x0, GRP_76_MAP);
959 
960 	if (request_irq(he_dev->pci_dev->irq,
961 			he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
962 		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
963 		return -EINVAL;
964 	}
965 
966 	he_dev->irq = he_dev->pci_dev->irq;
967 
968 	return 0;
969 }
970 
971 static int he_start(struct atm_dev *dev)
972 {
973 	struct he_dev *he_dev;
974 	struct pci_dev *pci_dev;
975 	unsigned long membase;
976 
977 	u16 command;
978 	u32 gen_cntl_0, host_cntl, lb_swap;
979 	u8 cache_size, timer;
980 
981 	unsigned err;
982 	unsigned int status, reg;
983 	int i, group;
984 
985 	he_dev = HE_DEV(dev);
986 	pci_dev = he_dev->pci_dev;
987 
988 	membase = pci_resource_start(pci_dev, 0);
989 	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
990 
991 	/*
992 	 * pci bus controller initialization
993 	 */
994 
995 	/* 4.3 pci bus controller-specific initialization */
996 	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
997 		hprintk("can't read GEN_CNTL_0\n");
998 		return -EINVAL;
999 	}
1000 	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1001 	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1002 		hprintk("can't write GEN_CNTL_0.\n");
1003 		return -EINVAL;
1004 	}
1005 
1006 	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1007 		hprintk("can't read PCI_COMMAND.\n");
1008 		return -EINVAL;
1009 	}
1010 
1011 	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1012 	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1013 		hprintk("can't enable memory.\n");
1014 		return -EINVAL;
1015 	}
1016 
1017 	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1018 		hprintk("can't read cache line size?\n");
1019 		return -EINVAL;
1020 	}
1021 
1022 	if (cache_size < 16) {
1023 		cache_size = 16;
1024 		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1025 			hprintk("can't set cache line size to %d\n", cache_size);
1026 	}
1027 
1028 	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1029 		hprintk("can't read latency timer?\n");
1030 		return -EINVAL;
1031 	}
1032 
1033 	/* from table 3.9
1034 	 *
1035 	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1036 	 *
1037 	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1038 	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1039 	 *
1040 	 */
1041 #define LAT_TIMER 209
1042 	if (timer < LAT_TIMER) {
1043 		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1044 		timer = LAT_TIMER;
1045 		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1046 			hprintk("can't set latency timer to %d\n", timer);
1047 	}
1048 
1049 	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1050 		hprintk("can't set up page mapping\n");
1051 		return -EINVAL;
1052 	}
1053 
1054 	/* 4.4 card reset */
1055 	he_writel(he_dev, 0x0, RESET_CNTL);
1056 	he_writel(he_dev, 0xff, RESET_CNTL);
1057 
1058 	msleep(16);	/* 16 ms */
1059 	status = he_readl(he_dev, RESET_CNTL);
1060 	if ((status & BOARD_RST_STATUS) == 0) {
1061 		hprintk("reset failed\n");
1062 		return -EINVAL;
1063 	}
1064 
1065 	/* 4.5 set bus width */
1066 	host_cntl = he_readl(he_dev, HOST_CNTL);
1067 	if (host_cntl & PCI_BUS_SIZE64)
1068 		gen_cntl_0 |= ENBL_64;
1069 	else
1070 		gen_cntl_0 &= ~ENBL_64;
1071 
1072 	if (disable64 == 1) {
1073 		hprintk("disabling 64-bit pci bus transfers\n");
1074 		gen_cntl_0 &= ~ENBL_64;
1075 	}
1076 
1077 	if (gen_cntl_0 & ENBL_64)
1078 		hprintk("64-bit transfers enabled\n");
1079 
1080 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1081 
1082 	/* 4.7 read prom contents */
1083 	for (i = 0; i < PROD_ID_LEN; ++i)
1084 		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1085 
1086 	he_dev->media = read_prom_byte(he_dev, MEDIA);
1087 
1088 	for (i = 0; i < 6; ++i)
1089 		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1090 
1091 	hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1092 				he_dev->prod_id,
1093 					he_dev->media & 0x40 ? "SM" : "MM",
1094 						dev->esi[0],
1095 						dev->esi[1],
1096 						dev->esi[2],
1097 						dev->esi[3],
1098 						dev->esi[4],
1099 						dev->esi[5]);
1100 	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1101 						ATM_OC12_PCR : ATM_OC3_PCR;
1102 
1103 	/* 4.6 set host endianess */
1104 	lb_swap = he_readl(he_dev, LB_SWAP);
1105 	if (he_is622(he_dev))
1106 		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1107 	else
1108 		lb_swap |= XFER_SIZE;		/* 8 cells */
1109 #ifdef __BIG_ENDIAN
1110 	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1111 #else
1112 	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1113 			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1114 #endif /* __BIG_ENDIAN */
1115 	he_writel(he_dev, lb_swap, LB_SWAP);
1116 
1117 	/* 4.8 sdram controller initialization */
1118 	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1119 
1120 	/* 4.9 initialize rnum value */
1121 	lb_swap |= SWAP_RNUM_MAX(0xf);
1122 	he_writel(he_dev, lb_swap, LB_SWAP);
1123 
1124 	/* 4.10 initialize the interrupt queues */
1125 	if ((err = he_init_irq(he_dev)) != 0)
1126 		return err;
1127 
1128 	/* 4.11 enable pci bus controller state machines */
1129 	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1130 				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1131 	he_writel(he_dev, host_cntl, HOST_CNTL);
1132 
1133 	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1134 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1135 
1136 	/*
1137 	 * atm network controller initialization
1138 	 */
1139 
1140 	/* 5.1.1 generic configuration state */
1141 
1142 	/*
1143 	 *		local (cell) buffer memory map
1144 	 *
1145 	 *             HE155                          HE622
1146 	 *
1147 	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1148 	 *         |            |            |                   |   |
1149 	 *         |  utility   |            |        rx0        |   |
1150 	 *        5|____________|         255|___________________| u |
1151 	 *        6|            |         256|                   | t |
1152 	 *         |            |            |                   | i |
1153 	 *         |    rx0     |     row    |        tx         | l |
1154 	 *         |            |            |                   | i |
1155 	 *         |            |         767|___________________| t |
1156 	 *      517|____________|         768|                   | y |
1157 	 * row  518|            |            |        rx1        |   |
1158 	 *         |            |        1023|___________________|___|
1159 	 *         |            |
1160 	 *         |    tx      |
1161 	 *         |            |
1162 	 *         |            |
1163 	 *     1535|____________|
1164 	 *     1536|            |
1165 	 *         |    rx1     |
1166 	 *     2047|____________|
1167 	 *
1168 	 */
1169 
1170 	/* total 4096 connections */
1171 	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1172 	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1173 
1174 	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1175 		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1176 		return -ENODEV;
1177 	}
1178 
1179 	if (nvpibits != -1) {
1180 		he_dev->vpibits = nvpibits;
1181 		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1182 	}
1183 
1184 	if (nvcibits != -1) {
1185 		he_dev->vcibits = nvcibits;
1186 		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1187 	}
1188 
1189 
1190 	if (he_is622(he_dev)) {
1191 		he_dev->cells_per_row = 40;
1192 		he_dev->bytes_per_row = 2048;
1193 		he_dev->r0_numrows = 256;
1194 		he_dev->tx_numrows = 512;
1195 		he_dev->r1_numrows = 256;
1196 		he_dev->r0_startrow = 0;
1197 		he_dev->tx_startrow = 256;
1198 		he_dev->r1_startrow = 768;
1199 	} else {
1200 		he_dev->cells_per_row = 20;
1201 		he_dev->bytes_per_row = 1024;
1202 		he_dev->r0_numrows = 512;
1203 		he_dev->tx_numrows = 1018;
1204 		he_dev->r1_numrows = 512;
1205 		he_dev->r0_startrow = 6;
1206 		he_dev->tx_startrow = 518;
1207 		he_dev->r1_startrow = 1536;
1208 	}
1209 
1210 	he_dev->cells_per_lbuf = 4;
1211 	he_dev->buffer_limit = 4;
1212 	he_dev->r0_numbuffs = he_dev->r0_numrows *
1213 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1214 	if (he_dev->r0_numbuffs > 2560)
1215 		he_dev->r0_numbuffs = 2560;
1216 
1217 	he_dev->r1_numbuffs = he_dev->r1_numrows *
1218 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1219 	if (he_dev->r1_numbuffs > 2560)
1220 		he_dev->r1_numbuffs = 2560;
1221 
1222 	he_dev->tx_numbuffs = he_dev->tx_numrows *
1223 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1224 	if (he_dev->tx_numbuffs > 5120)
1225 		he_dev->tx_numbuffs = 5120;
1226 
1227 	/* 5.1.2 configure hardware dependent registers */
1228 
1229 	he_writel(he_dev,
1230 		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1231 		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1232 		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1233 		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1234 								LBARB);
1235 
1236 	he_writel(he_dev, BANK_ON |
1237 		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1238 								SDRAMCON);
1239 
1240 	he_writel(he_dev,
1241 		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1242 						RM_RW_WAIT(1), RCMCONFIG);
1243 	he_writel(he_dev,
1244 		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1245 						TM_RW_WAIT(1), TCMCONFIG);
1246 
1247 	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1248 
1249 	he_writel(he_dev,
1250 		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1251 		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1252 		RX_VALVP(he_dev->vpibits) |
1253 		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1254 
1255 	he_writel(he_dev, DRF_THRESH(0x20) |
1256 		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1257 		TX_VCI_MASK(he_dev->vcibits) |
1258 		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1259 
1260 	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1261 
1262 	he_writel(he_dev, PHY_INT_ENB |
1263 		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1264 								RH_CONFIG);
1265 
1266 	/* 5.1.3 initialize connection memory */
1267 
1268 	for (i = 0; i < TCM_MEM_SIZE; ++i)
1269 		he_writel_tcm(he_dev, 0, i);
1270 
1271 	for (i = 0; i < RCM_MEM_SIZE; ++i)
1272 		he_writel_rcm(he_dev, 0, i);
1273 
1274 	/*
1275 	 *	transmit connection memory map
1276 	 *
1277 	 *                  tx memory
1278 	 *          0x0 ___________________
1279 	 *             |                   |
1280 	 *             |                   |
1281 	 *             |       TSRa        |
1282 	 *             |                   |
1283 	 *             |                   |
1284 	 *       0x8000|___________________|
1285 	 *             |                   |
1286 	 *             |       TSRb        |
1287 	 *       0xc000|___________________|
1288 	 *             |                   |
1289 	 *             |       TSRc        |
1290 	 *       0xe000|___________________|
1291 	 *             |       TSRd        |
1292 	 *       0xf000|___________________|
1293 	 *             |       tmABR       |
1294 	 *      0x10000|___________________|
1295 	 *             |                   |
1296 	 *             |       tmTPD       |
1297 	 *             |___________________|
1298 	 *             |                   |
1299 	 *                      ....
1300 	 *      0x1ffff|___________________|
1301 	 *
1302 	 *
1303 	 */
1304 
1305 	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1306 	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1307 	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1308 	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1309 	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1310 
1311 
1312 	/*
1313 	 *	receive connection memory map
1314 	 *
1315 	 *          0x0 ___________________
1316 	 *             |                   |
1317 	 *             |                   |
1318 	 *             |       RSRa        |
1319 	 *             |                   |
1320 	 *             |                   |
1321 	 *       0x8000|___________________|
1322 	 *             |                   |
1323 	 *             |             rx0/1 |
1324 	 *             |       LBM         |   link lists of local
1325 	 *             |             tx    |   buffer memory
1326 	 *             |                   |
1327 	 *       0xd000|___________________|
1328 	 *             |                   |
1329 	 *             |      rmABR        |
1330 	 *       0xe000|___________________|
1331 	 *             |                   |
1332 	 *             |       RSRb        |
1333 	 *             |___________________|
1334 	 *             |                   |
1335 	 *                      ....
1336 	 *       0xffff|___________________|
1337 	 */
1338 
1339 	he_writel(he_dev, 0x08000, RCMLBM_BA);
1340 	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1341 	he_writel(he_dev, 0x0d800, RCMABR_BA);
1342 
1343 	/* 5.1.4 initialize local buffer free pools linked lists */
1344 
1345 	he_init_rx_lbfp0(he_dev);
1346 	he_init_rx_lbfp1(he_dev);
1347 
1348 	he_writel(he_dev, 0x0, RLBC_H);
1349 	he_writel(he_dev, 0x0, RLBC_T);
1350 	he_writel(he_dev, 0x0, RLBC_H2);
1351 
1352 	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1353 	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1354 
1355 	he_init_tx_lbfp(he_dev);
1356 
1357 	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1358 
1359 	/* 5.1.5 initialize intermediate receive queues */
1360 
1361 	if (he_is622(he_dev)) {
1362 		he_writel(he_dev, 0x000f, G0_INMQ_S);
1363 		he_writel(he_dev, 0x200f, G0_INMQ_L);
1364 
1365 		he_writel(he_dev, 0x001f, G1_INMQ_S);
1366 		he_writel(he_dev, 0x201f, G1_INMQ_L);
1367 
1368 		he_writel(he_dev, 0x002f, G2_INMQ_S);
1369 		he_writel(he_dev, 0x202f, G2_INMQ_L);
1370 
1371 		he_writel(he_dev, 0x003f, G3_INMQ_S);
1372 		he_writel(he_dev, 0x203f, G3_INMQ_L);
1373 
1374 		he_writel(he_dev, 0x004f, G4_INMQ_S);
1375 		he_writel(he_dev, 0x204f, G4_INMQ_L);
1376 
1377 		he_writel(he_dev, 0x005f, G5_INMQ_S);
1378 		he_writel(he_dev, 0x205f, G5_INMQ_L);
1379 
1380 		he_writel(he_dev, 0x006f, G6_INMQ_S);
1381 		he_writel(he_dev, 0x206f, G6_INMQ_L);
1382 
1383 		he_writel(he_dev, 0x007f, G7_INMQ_S);
1384 		he_writel(he_dev, 0x207f, G7_INMQ_L);
1385 	} else {
1386 		he_writel(he_dev, 0x0000, G0_INMQ_S);
1387 		he_writel(he_dev, 0x0008, G0_INMQ_L);
1388 
1389 		he_writel(he_dev, 0x0001, G1_INMQ_S);
1390 		he_writel(he_dev, 0x0009, G1_INMQ_L);
1391 
1392 		he_writel(he_dev, 0x0002, G2_INMQ_S);
1393 		he_writel(he_dev, 0x000a, G2_INMQ_L);
1394 
1395 		he_writel(he_dev, 0x0003, G3_INMQ_S);
1396 		he_writel(he_dev, 0x000b, G3_INMQ_L);
1397 
1398 		he_writel(he_dev, 0x0004, G4_INMQ_S);
1399 		he_writel(he_dev, 0x000c, G4_INMQ_L);
1400 
1401 		he_writel(he_dev, 0x0005, G5_INMQ_S);
1402 		he_writel(he_dev, 0x000d, G5_INMQ_L);
1403 
1404 		he_writel(he_dev, 0x0006, G6_INMQ_S);
1405 		he_writel(he_dev, 0x000e, G6_INMQ_L);
1406 
1407 		he_writel(he_dev, 0x0007, G7_INMQ_S);
1408 		he_writel(he_dev, 0x000f, G7_INMQ_L);
1409 	}
1410 
1411 	/* 5.1.6 application tunable parameters */
1412 
1413 	he_writel(he_dev, 0x0, MCC);
1414 	he_writel(he_dev, 0x0, OEC);
1415 	he_writel(he_dev, 0x0, DCC);
1416 	he_writel(he_dev, 0x0, CEC);
1417 
1418 	/* 5.1.7 cs block initialization */
1419 
1420 	he_init_cs_block(he_dev);
1421 
1422 	/* 5.1.8 cs block connection memory initialization */
1423 
1424 	if (he_init_cs_block_rcm(he_dev) < 0)
1425 		return -ENOMEM;
1426 
1427 	/* 5.1.10 initialize host structures */
1428 
1429 	he_init_tpdrq(he_dev);
1430 
1431 	he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1432 		sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1433 	if (he_dev->tpd_pool == NULL) {
1434 		hprintk("unable to create tpd pci_pool\n");
1435 		return -ENOMEM;
1436 	}
1437 
1438 	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1439 
1440 	if (he_init_group(he_dev, 0) != 0)
1441 		return -ENOMEM;
1442 
1443 	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1444 		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1445 		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1446 		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1447 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1448 						G0_RBPS_BS + (group * 32));
1449 
1450 		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1451 		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1452 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1453 						G0_RBPL_QI + (group * 32));
1454 		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1455 
1456 		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1457 		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1458 		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1459 						G0_RBRQ_Q + (group * 16));
1460 		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1461 
1462 		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1463 		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1464 		he_writel(he_dev, TBRQ_THRESH(0x1),
1465 						G0_TBRQ_THRESH + (group * 16));
1466 		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1467 	}
1468 
1469 	/* host status page */
1470 
1471 	he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1472 				sizeof(struct he_hsp), &he_dev->hsp_phys);
1473 	if (he_dev->hsp == NULL) {
1474 		hprintk("failed to allocate host status page\n");
1475 		return -ENOMEM;
1476 	}
1477 	memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1478 	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1479 
1480 	/* initialize framer */
1481 
1482 #ifdef CONFIG_ATM_HE_USE_SUNI
1483 	if (he_isMM(he_dev))
1484 		suni_init(he_dev->atm_dev);
1485 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1486 		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1487 #endif /* CONFIG_ATM_HE_USE_SUNI */
1488 
1489 	if (sdh) {
1490 		/* this really should be in suni.c but for now... */
1491 		int val;
1492 
1493 		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1494 		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1495 		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1496 		he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1497 	}
1498 
1499 	/* 5.1.12 enable transmit and receive */
1500 
1501 	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1502 	reg |= TX_ENABLE|ER_ENABLE;
1503 	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1504 
1505 	reg = he_readl(he_dev, RC_CONFIG);
1506 	reg |= RX_ENABLE;
1507 	he_writel(he_dev, reg, RC_CONFIG);
1508 
1509 	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1510 		he_dev->cs_stper[i].inuse = 0;
1511 		he_dev->cs_stper[i].pcr = -1;
1512 	}
1513 	he_dev->total_bw = 0;
1514 
1515 
1516 	/* atm linux initialization */
1517 
1518 	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1519 	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1520 
1521 	he_dev->irq_peak = 0;
1522 	he_dev->rbrq_peak = 0;
1523 	he_dev->rbpl_peak = 0;
1524 	he_dev->tbrq_peak = 0;
1525 
1526 	HPRINTK("hell bent for leather!\n");
1527 
1528 	return 0;
1529 }
1530 
1531 static void
1532 he_stop(struct he_dev *he_dev)
1533 {
1534 	struct he_buff *heb, *next;
1535 	struct pci_dev *pci_dev;
1536 	u32 gen_cntl_0, reg;
1537 	u16 command;
1538 
1539 	pci_dev = he_dev->pci_dev;
1540 
1541 	/* disable interrupts */
1542 
1543 	if (he_dev->membase) {
1544 		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1545 		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1546 		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1547 
1548 		tasklet_disable(&he_dev->tasklet);
1549 
1550 		/* disable recv and transmit */
1551 
1552 		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1553 		reg &= ~(TX_ENABLE|ER_ENABLE);
1554 		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1555 
1556 		reg = he_readl(he_dev, RC_CONFIG);
1557 		reg &= ~(RX_ENABLE);
1558 		he_writel(he_dev, reg, RC_CONFIG);
1559 	}
1560 
1561 #ifdef CONFIG_ATM_HE_USE_SUNI
1562 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1563 		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1564 #endif /* CONFIG_ATM_HE_USE_SUNI */
1565 
1566 	if (he_dev->irq)
1567 		free_irq(he_dev->irq, he_dev);
1568 
1569 	if (he_dev->irq_base)
1570 		pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1571 			* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1572 
1573 	if (he_dev->hsp)
1574 		pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1575 						he_dev->hsp, he_dev->hsp_phys);
1576 
1577 	if (he_dev->rbpl_base) {
1578 		list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1579 			pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1580 
1581 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1582 			* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1583 	}
1584 
1585 	kfree(he_dev->rbpl_virt);
1586 	kfree(he_dev->rbpl_table);
1587 
1588 	if (he_dev->rbpl_pool)
1589 		pci_pool_destroy(he_dev->rbpl_pool);
1590 
1591 	if (he_dev->rbrq_base)
1592 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1593 							he_dev->rbrq_base, he_dev->rbrq_phys);
1594 
1595 	if (he_dev->tbrq_base)
1596 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1597 							he_dev->tbrq_base, he_dev->tbrq_phys);
1598 
1599 	if (he_dev->tpdrq_base)
1600 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1601 							he_dev->tpdrq_base, he_dev->tpdrq_phys);
1602 
1603 	if (he_dev->tpd_pool)
1604 		pci_pool_destroy(he_dev->tpd_pool);
1605 
1606 	if (he_dev->pci_dev) {
1607 		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1608 		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1609 		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1610 	}
1611 
1612 	if (he_dev->membase)
1613 		iounmap(he_dev->membase);
1614 }
1615 
1616 static struct he_tpd *
1617 __alloc_tpd(struct he_dev *he_dev)
1618 {
1619 	struct he_tpd *tpd;
1620 	dma_addr_t mapping;
1621 
1622 	tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1623 	if (tpd == NULL)
1624 		return NULL;
1625 
1626 	tpd->status = TPD_ADDR(mapping);
1627 	tpd->reserved = 0;
1628 	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1629 	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1630 	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1631 
1632 	return tpd;
1633 }
1634 
1635 #define AAL5_LEN(buf,len) 						\
1636 			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1637 				(((unsigned char *)(buf))[(len)-5]))
1638 
1639 /* 2.10.1.2 receive
1640  *
1641  * aal5 packets can optionally return the tcp checksum in the lower
1642  * 16 bits of the crc (RSR0_TCP_CKSUM)
1643  */
1644 
1645 #define TCP_CKSUM(buf,len) 						\
1646 			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1647 				(((unsigned char *)(buf))[(len-1)]))
1648 
1649 static int
1650 he_service_rbrq(struct he_dev *he_dev, int group)
1651 {
1652 	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1653 				((unsigned long)he_dev->rbrq_base |
1654 					he_dev->hsp->group[group].rbrq_tail);
1655 	unsigned cid, lastcid = -1;
1656 	struct sk_buff *skb;
1657 	struct atm_vcc *vcc = NULL;
1658 	struct he_vcc *he_vcc;
1659 	struct he_buff *heb, *next;
1660 	int i;
1661 	int pdus_assembled = 0;
1662 	int updated = 0;
1663 
1664 	read_lock(&vcc_sklist_lock);
1665 	while (he_dev->rbrq_head != rbrq_tail) {
1666 		++updated;
1667 
1668 		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1669 			he_dev->rbrq_head, group,
1670 			RBRQ_ADDR(he_dev->rbrq_head),
1671 			RBRQ_BUFLEN(he_dev->rbrq_head),
1672 			RBRQ_CID(he_dev->rbrq_head),
1673 			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1674 			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1675 			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1676 			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1677 			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1678 			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1679 
1680 		i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1681 		heb = he_dev->rbpl_virt[i];
1682 
1683 		cid = RBRQ_CID(he_dev->rbrq_head);
1684 		if (cid != lastcid)
1685 			vcc = __find_vcc(he_dev, cid);
1686 		lastcid = cid;
1687 
1688 		if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1689 			hprintk("vcc/he_vcc == NULL  (cid 0x%x)\n", cid);
1690 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1691 				clear_bit(i, he_dev->rbpl_table);
1692 				list_del(&heb->entry);
1693 				pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1694 			}
1695 
1696 			goto next_rbrq_entry;
1697 		}
1698 
1699 		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1700 			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1701 				atomic_inc(&vcc->stats->rx_drop);
1702 			goto return_host_buffers;
1703 		}
1704 
1705 		heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1706 		clear_bit(i, he_dev->rbpl_table);
1707 		list_move_tail(&heb->entry, &he_vcc->buffers);
1708 		he_vcc->pdu_len += heb->len;
1709 
1710 		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1711 			lastcid = -1;
1712 			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1713 			wake_up(&he_vcc->rx_waitq);
1714 			goto return_host_buffers;
1715 		}
1716 
1717 		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1718 			goto next_rbrq_entry;
1719 
1720 		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1721 				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1722 			HPRINTK("%s%s (%d.%d)\n",
1723 				RBRQ_CRC_ERR(he_dev->rbrq_head)
1724 							? "CRC_ERR " : "",
1725 				RBRQ_LEN_ERR(he_dev->rbrq_head)
1726 							? "LEN_ERR" : "",
1727 							vcc->vpi, vcc->vci);
1728 			atomic_inc(&vcc->stats->rx_err);
1729 			goto return_host_buffers;
1730 		}
1731 
1732 		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1733 							GFP_ATOMIC);
1734 		if (!skb) {
1735 			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1736 			goto return_host_buffers;
1737 		}
1738 
1739 		if (rx_skb_reserve > 0)
1740 			skb_reserve(skb, rx_skb_reserve);
1741 
1742 		__net_timestamp(skb);
1743 
1744 		list_for_each_entry(heb, &he_vcc->buffers, entry)
1745 			memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1746 
1747 		switch (vcc->qos.aal) {
1748 			case ATM_AAL0:
1749 				/* 2.10.1.5 raw cell receive */
1750 				skb->len = ATM_AAL0_SDU;
1751 				skb_set_tail_pointer(skb, skb->len);
1752 				break;
1753 			case ATM_AAL5:
1754 				/* 2.10.1.2 aal5 receive */
1755 
1756 				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1757 				skb_set_tail_pointer(skb, skb->len);
1758 #ifdef USE_CHECKSUM_HW
1759 				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1760 					skb->ip_summed = CHECKSUM_COMPLETE;
1761 					skb->csum = TCP_CKSUM(skb->data,
1762 							he_vcc->pdu_len);
1763 				}
1764 #endif
1765 				break;
1766 		}
1767 
1768 #ifdef should_never_happen
1769 		if (skb->len > vcc->qos.rxtp.max_sdu)
1770 			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1771 #endif
1772 
1773 #ifdef notdef
1774 		ATM_SKB(skb)->vcc = vcc;
1775 #endif
1776 		spin_unlock(&he_dev->global_lock);
1777 		vcc->push(vcc, skb);
1778 		spin_lock(&he_dev->global_lock);
1779 
1780 		atomic_inc(&vcc->stats->rx);
1781 
1782 return_host_buffers:
1783 		++pdus_assembled;
1784 
1785 		list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1786 			pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1787 		INIT_LIST_HEAD(&he_vcc->buffers);
1788 		he_vcc->pdu_len = 0;
1789 
1790 next_rbrq_entry:
1791 		he_dev->rbrq_head = (struct he_rbrq *)
1792 				((unsigned long) he_dev->rbrq_base |
1793 					RBRQ_MASK(he_dev->rbrq_head + 1));
1794 
1795 	}
1796 	read_unlock(&vcc_sklist_lock);
1797 
1798 	if (updated) {
1799 		if (updated > he_dev->rbrq_peak)
1800 			he_dev->rbrq_peak = updated;
1801 
1802 		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1803 						G0_RBRQ_H + (group * 16));
1804 	}
1805 
1806 	return pdus_assembled;
1807 }
1808 
1809 static void
1810 he_service_tbrq(struct he_dev *he_dev, int group)
1811 {
1812 	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1813 				((unsigned long)he_dev->tbrq_base |
1814 					he_dev->hsp->group[group].tbrq_tail);
1815 	struct he_tpd *tpd;
1816 	int slot, updated = 0;
1817 	struct he_tpd *__tpd;
1818 
1819 	/* 2.1.6 transmit buffer return queue */
1820 
1821 	while (he_dev->tbrq_head != tbrq_tail) {
1822 		++updated;
1823 
1824 		HPRINTK("tbrq%d 0x%x%s%s\n",
1825 			group,
1826 			TBRQ_TPD(he_dev->tbrq_head),
1827 			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1828 			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1829 		tpd = NULL;
1830 		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1831 			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1832 				tpd = __tpd;
1833 				list_del(&__tpd->entry);
1834 				break;
1835 			}
1836 		}
1837 
1838 		if (tpd == NULL) {
1839 			hprintk("unable to locate tpd for dma buffer %x\n",
1840 						TBRQ_TPD(he_dev->tbrq_head));
1841 			goto next_tbrq_entry;
1842 		}
1843 
1844 		if (TBRQ_EOS(he_dev->tbrq_head)) {
1845 			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1846 				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1847 			if (tpd->vcc)
1848 				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1849 
1850 			goto next_tbrq_entry;
1851 		}
1852 
1853 		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1854 			if (tpd->iovec[slot].addr)
1855 				pci_unmap_single(he_dev->pci_dev,
1856 					tpd->iovec[slot].addr,
1857 					tpd->iovec[slot].len & TPD_LEN_MASK,
1858 							PCI_DMA_TODEVICE);
1859 			if (tpd->iovec[slot].len & TPD_LST)
1860 				break;
1861 
1862 		}
1863 
1864 		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1865 			if (tpd->vcc && tpd->vcc->pop)
1866 				tpd->vcc->pop(tpd->vcc, tpd->skb);
1867 			else
1868 				dev_kfree_skb_any(tpd->skb);
1869 		}
1870 
1871 next_tbrq_entry:
1872 		if (tpd)
1873 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1874 		he_dev->tbrq_head = (struct he_tbrq *)
1875 				((unsigned long) he_dev->tbrq_base |
1876 					TBRQ_MASK(he_dev->tbrq_head + 1));
1877 	}
1878 
1879 	if (updated) {
1880 		if (updated > he_dev->tbrq_peak)
1881 			he_dev->tbrq_peak = updated;
1882 
1883 		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1884 						G0_TBRQ_H + (group * 16));
1885 	}
1886 }
1887 
1888 static void
1889 he_service_rbpl(struct he_dev *he_dev, int group)
1890 {
1891 	struct he_rbp *new_tail;
1892 	struct he_rbp *rbpl_head;
1893 	struct he_buff *heb;
1894 	dma_addr_t mapping;
1895 	int i;
1896 	int moved = 0;
1897 
1898 	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1899 					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1900 
1901 	for (;;) {
1902 		new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1903 						RBPL_MASK(he_dev->rbpl_tail+1));
1904 
1905 		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1906 		if (new_tail == rbpl_head)
1907 			break;
1908 
1909 		i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1910 		if (i > (RBPL_TABLE_SIZE - 1)) {
1911 			i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1912 			if (i > (RBPL_TABLE_SIZE - 1))
1913 				break;
1914 		}
1915 		he_dev->rbpl_hint = i + 1;
1916 
1917 		heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1918 		if (!heb)
1919 			break;
1920 		heb->mapping = mapping;
1921 		list_add(&heb->entry, &he_dev->rbpl_outstanding);
1922 		he_dev->rbpl_virt[i] = heb;
1923 		set_bit(i, he_dev->rbpl_table);
1924 		new_tail->idx = i << RBP_IDX_OFFSET;
1925 		new_tail->phys = mapping + offsetof(struct he_buff, data);
1926 
1927 		he_dev->rbpl_tail = new_tail;
1928 		++moved;
1929 	}
1930 
1931 	if (moved)
1932 		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1933 }
1934 
1935 static void
1936 he_tasklet(unsigned long data)
1937 {
1938 	unsigned long flags;
1939 	struct he_dev *he_dev = (struct he_dev *) data;
1940 	int group, type;
1941 	int updated = 0;
1942 
1943 	HPRINTK("tasklet (0x%lx)\n", data);
1944 	spin_lock_irqsave(&he_dev->global_lock, flags);
1945 
1946 	while (he_dev->irq_head != he_dev->irq_tail) {
1947 		++updated;
1948 
1949 		type = ITYPE_TYPE(he_dev->irq_head->isw);
1950 		group = ITYPE_GROUP(he_dev->irq_head->isw);
1951 
1952 		switch (type) {
1953 			case ITYPE_RBRQ_THRESH:
1954 				HPRINTK("rbrq%d threshold\n", group);
1955 				/* fall through */
1956 			case ITYPE_RBRQ_TIMER:
1957 				if (he_service_rbrq(he_dev, group))
1958 					he_service_rbpl(he_dev, group);
1959 				break;
1960 			case ITYPE_TBRQ_THRESH:
1961 				HPRINTK("tbrq%d threshold\n", group);
1962 				/* fall through */
1963 			case ITYPE_TPD_COMPLETE:
1964 				he_service_tbrq(he_dev, group);
1965 				break;
1966 			case ITYPE_RBPL_THRESH:
1967 				he_service_rbpl(he_dev, group);
1968 				break;
1969 			case ITYPE_RBPS_THRESH:
1970 				/* shouldn't happen unless small buffers enabled */
1971 				break;
1972 			case ITYPE_PHY:
1973 				HPRINTK("phy interrupt\n");
1974 #ifdef CONFIG_ATM_HE_USE_SUNI
1975 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
1976 				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1977 					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1978 				spin_lock_irqsave(&he_dev->global_lock, flags);
1979 #endif
1980 				break;
1981 			case ITYPE_OTHER:
1982 				switch (type|group) {
1983 					case ITYPE_PARITY:
1984 						hprintk("parity error\n");
1985 						break;
1986 					case ITYPE_ABORT:
1987 						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1988 						break;
1989 				}
1990 				break;
1991 			case ITYPE_TYPE(ITYPE_INVALID):
1992 				/* see 8.1.1 -- check all queues */
1993 
1994 				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1995 
1996 				he_service_rbrq(he_dev, 0);
1997 				he_service_rbpl(he_dev, 0);
1998 				he_service_tbrq(he_dev, 0);
1999 				break;
2000 			default:
2001 				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2002 		}
2003 
2004 		he_dev->irq_head->isw = ITYPE_INVALID;
2005 
2006 		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2007 	}
2008 
2009 	if (updated) {
2010 		if (updated > he_dev->irq_peak)
2011 			he_dev->irq_peak = updated;
2012 
2013 		he_writel(he_dev,
2014 			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2015 			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2016 			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2017 		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2018 	}
2019 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2020 }
2021 
2022 static irqreturn_t
2023 he_irq_handler(int irq, void *dev_id)
2024 {
2025 	unsigned long flags;
2026 	struct he_dev *he_dev = (struct he_dev * )dev_id;
2027 	int handled = 0;
2028 
2029 	if (he_dev == NULL)
2030 		return IRQ_NONE;
2031 
2032 	spin_lock_irqsave(&he_dev->global_lock, flags);
2033 
2034 	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2035 						(*he_dev->irq_tailoffset << 2));
2036 
2037 	if (he_dev->irq_tail == he_dev->irq_head) {
2038 		HPRINTK("tailoffset not updated?\n");
2039 		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2040 			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2041 		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2042 	}
2043 
2044 #ifdef DEBUG
2045 	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2046 		hprintk("spurious (or shared) interrupt?\n");
2047 #endif
2048 
2049 	if (he_dev->irq_head != he_dev->irq_tail) {
2050 		handled = 1;
2051 		tasklet_schedule(&he_dev->tasklet);
2052 		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2053 		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2054 	}
2055 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2056 	return IRQ_RETVAL(handled);
2057 
2058 }
2059 
2060 static __inline__ void
2061 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2062 {
2063 	struct he_tpdrq *new_tail;
2064 
2065 	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2066 					tpd, cid, he_dev->tpdrq_tail);
2067 
2068 	/* new_tail = he_dev->tpdrq_tail; */
2069 	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2070 					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2071 
2072 	/*
2073 	 * check to see if we are about to set the tail == head
2074 	 * if true, update the head pointer from the adapter
2075 	 * to see if this is really the case (reading the queue
2076 	 * head for every enqueue would be unnecessarily slow)
2077 	 */
2078 
2079 	if (new_tail == he_dev->tpdrq_head) {
2080 		he_dev->tpdrq_head = (struct he_tpdrq *)
2081 			(((unsigned long)he_dev->tpdrq_base) |
2082 				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2083 
2084 		if (new_tail == he_dev->tpdrq_head) {
2085 			int slot;
2086 
2087 			hprintk("tpdrq full (cid 0x%x)\n", cid);
2088 			/*
2089 			 * FIXME
2090 			 * push tpd onto a transmit backlog queue
2091 			 * after service_tbrq, service the backlog
2092 			 * for now, we just drop the pdu
2093 			 */
2094 			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2095 				if (tpd->iovec[slot].addr)
2096 					pci_unmap_single(he_dev->pci_dev,
2097 						tpd->iovec[slot].addr,
2098 						tpd->iovec[slot].len & TPD_LEN_MASK,
2099 								PCI_DMA_TODEVICE);
2100 			}
2101 			if (tpd->skb) {
2102 				if (tpd->vcc->pop)
2103 					tpd->vcc->pop(tpd->vcc, tpd->skb);
2104 				else
2105 					dev_kfree_skb_any(tpd->skb);
2106 				atomic_inc(&tpd->vcc->stats->tx_err);
2107 			}
2108 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2109 			return;
2110 		}
2111 	}
2112 
2113 	/* 2.1.5 transmit packet descriptor ready queue */
2114 	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2115 	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2116 	he_dev->tpdrq_tail->cid = cid;
2117 	wmb();
2118 
2119 	he_dev->tpdrq_tail = new_tail;
2120 
2121 	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2122 	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2123 }
2124 
2125 static int
2126 he_open(struct atm_vcc *vcc)
2127 {
2128 	unsigned long flags;
2129 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2130 	struct he_vcc *he_vcc;
2131 	int err = 0;
2132 	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2133 	short vpi = vcc->vpi;
2134 	int vci = vcc->vci;
2135 
2136 	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2137 		return 0;
2138 
2139 	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2140 
2141 	set_bit(ATM_VF_ADDR, &vcc->flags);
2142 
2143 	cid = he_mkcid(he_dev, vpi, vci);
2144 
2145 	he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2146 	if (he_vcc == NULL) {
2147 		hprintk("unable to allocate he_vcc during open\n");
2148 		return -ENOMEM;
2149 	}
2150 
2151 	INIT_LIST_HEAD(&he_vcc->buffers);
2152 	he_vcc->pdu_len = 0;
2153 	he_vcc->rc_index = -1;
2154 
2155 	init_waitqueue_head(&he_vcc->rx_waitq);
2156 	init_waitqueue_head(&he_vcc->tx_waitq);
2157 
2158 	vcc->dev_data = he_vcc;
2159 
2160 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2161 		int pcr_goal;
2162 
2163 		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2164 		if (pcr_goal == 0)
2165 			pcr_goal = he_dev->atm_dev->link_rate;
2166 		if (pcr_goal < 0)	/* means round down, technically */
2167 			pcr_goal = -pcr_goal;
2168 
2169 		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2170 
2171 		switch (vcc->qos.aal) {
2172 			case ATM_AAL5:
2173 				tsr0_aal = TSR0_AAL5;
2174 				tsr4 = TSR4_AAL5;
2175 				break;
2176 			case ATM_AAL0:
2177 				tsr0_aal = TSR0_AAL0_SDU;
2178 				tsr4 = TSR4_AAL0_SDU;
2179 				break;
2180 			default:
2181 				err = -EINVAL;
2182 				goto open_failed;
2183 		}
2184 
2185 		spin_lock_irqsave(&he_dev->global_lock, flags);
2186 		tsr0 = he_readl_tsr0(he_dev, cid);
2187 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2188 
2189 		if (TSR0_CONN_STATE(tsr0) != 0) {
2190 			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2191 			err = -EBUSY;
2192 			goto open_failed;
2193 		}
2194 
2195 		switch (vcc->qos.txtp.traffic_class) {
2196 			case ATM_UBR:
2197 				/* 2.3.3.1 open connection ubr */
2198 
2199 				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2200 					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2201 				break;
2202 
2203 			case ATM_CBR:
2204 				/* 2.3.3.2 open connection cbr */
2205 
2206 				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2207 				if ((he_dev->total_bw + pcr_goal)
2208 					> (he_dev->atm_dev->link_rate * 9 / 10))
2209 				{
2210 					err = -EBUSY;
2211 					goto open_failed;
2212 				}
2213 
2214 				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2215 
2216 				/* find an unused cs_stper register */
2217 				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2218 					if (he_dev->cs_stper[reg].inuse == 0 ||
2219 					    he_dev->cs_stper[reg].pcr == pcr_goal)
2220 							break;
2221 
2222 				if (reg == HE_NUM_CS_STPER) {
2223 					err = -EBUSY;
2224 					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2225 					goto open_failed;
2226 				}
2227 
2228 				he_dev->total_bw += pcr_goal;
2229 
2230 				he_vcc->rc_index = reg;
2231 				++he_dev->cs_stper[reg].inuse;
2232 				he_dev->cs_stper[reg].pcr = pcr_goal;
2233 
2234 				clock = he_is622(he_dev) ? 66667000 : 50000000;
2235 				period = clock / pcr_goal;
2236 
2237 				HPRINTK("rc_index = %d period = %d\n",
2238 								reg, period);
2239 
2240 				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2241 							CS_STPER0 + reg);
2242 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2243 
2244 				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2245 							TSR0_RC_INDEX(reg);
2246 
2247 				break;
2248 			default:
2249 				err = -EINVAL;
2250 				goto open_failed;
2251 		}
2252 
2253 		spin_lock_irqsave(&he_dev->global_lock, flags);
2254 
2255 		he_writel_tsr0(he_dev, tsr0, cid);
2256 		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2257 		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2258 					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2259 		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2260 		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2261 
2262 		he_writel_tsr3(he_dev, 0x0, cid);
2263 		he_writel_tsr5(he_dev, 0x0, cid);
2264 		he_writel_tsr6(he_dev, 0x0, cid);
2265 		he_writel_tsr7(he_dev, 0x0, cid);
2266 		he_writel_tsr8(he_dev, 0x0, cid);
2267 		he_writel_tsr10(he_dev, 0x0, cid);
2268 		he_writel_tsr11(he_dev, 0x0, cid);
2269 		he_writel_tsr12(he_dev, 0x0, cid);
2270 		he_writel_tsr13(he_dev, 0x0, cid);
2271 		he_writel_tsr14(he_dev, 0x0, cid);
2272 		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2273 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2274 	}
2275 
2276 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2277 		unsigned aal;
2278 
2279 		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2280 		 				&HE_VCC(vcc)->rx_waitq);
2281 
2282 		switch (vcc->qos.aal) {
2283 			case ATM_AAL5:
2284 				aal = RSR0_AAL5;
2285 				break;
2286 			case ATM_AAL0:
2287 				aal = RSR0_RAWCELL;
2288 				break;
2289 			default:
2290 				err = -EINVAL;
2291 				goto open_failed;
2292 		}
2293 
2294 		spin_lock_irqsave(&he_dev->global_lock, flags);
2295 
2296 		rsr0 = he_readl_rsr0(he_dev, cid);
2297 		if (rsr0 & RSR0_OPEN_CONN) {
2298 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2299 
2300 			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2301 			err = -EBUSY;
2302 			goto open_failed;
2303 		}
2304 
2305 		rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2306 		rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2307 		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2308 				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2309 
2310 #ifdef USE_CHECKSUM_HW
2311 		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2312 			rsr0 |= RSR0_TCP_CKSUM;
2313 #endif
2314 
2315 		he_writel_rsr4(he_dev, rsr4, cid);
2316 		he_writel_rsr1(he_dev, rsr1, cid);
2317 		/* 5.1.11 last parameter initialized should be
2318 			  the open/closed indication in rsr0 */
2319 		he_writel_rsr0(he_dev,
2320 			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2321 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2322 
2323 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2324 	}
2325 
2326 open_failed:
2327 
2328 	if (err) {
2329 		kfree(he_vcc);
2330 		clear_bit(ATM_VF_ADDR, &vcc->flags);
2331 	}
2332 	else
2333 		set_bit(ATM_VF_READY, &vcc->flags);
2334 
2335 	return err;
2336 }
2337 
2338 static void
2339 he_close(struct atm_vcc *vcc)
2340 {
2341 	unsigned long flags;
2342 	DECLARE_WAITQUEUE(wait, current);
2343 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2344 	struct he_tpd *tpd;
2345 	unsigned cid;
2346 	struct he_vcc *he_vcc = HE_VCC(vcc);
2347 #define MAX_RETRY 30
2348 	int retry = 0, sleep = 1, tx_inuse;
2349 
2350 	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2351 
2352 	clear_bit(ATM_VF_READY, &vcc->flags);
2353 	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2354 
2355 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2356 		int timeout;
2357 
2358 		HPRINTK("close rx cid 0x%x\n", cid);
2359 
2360 		/* 2.7.2.2 close receive operation */
2361 
2362 		/* wait for previous close (if any) to finish */
2363 
2364 		spin_lock_irqsave(&he_dev->global_lock, flags);
2365 		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2366 			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2367 			udelay(250);
2368 		}
2369 
2370 		set_current_state(TASK_UNINTERRUPTIBLE);
2371 		add_wait_queue(&he_vcc->rx_waitq, &wait);
2372 
2373 		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2374 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2375 		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2376 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2377 
2378 		timeout = schedule_timeout(30*HZ);
2379 
2380 		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2381 		set_current_state(TASK_RUNNING);
2382 
2383 		if (timeout == 0)
2384 			hprintk("close rx timeout cid 0x%x\n", cid);
2385 
2386 		HPRINTK("close rx cid 0x%x complete\n", cid);
2387 
2388 	}
2389 
2390 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2391 		volatile unsigned tsr4, tsr0;
2392 		int timeout;
2393 
2394 		HPRINTK("close tx cid 0x%x\n", cid);
2395 
2396 		/* 2.1.2
2397 		 *
2398 		 * ... the host must first stop queueing packets to the TPDRQ
2399 		 * on the connection to be closed, then wait for all outstanding
2400 		 * packets to be transmitted and their buffers returned to the
2401 		 * TBRQ. When the last packet on the connection arrives in the
2402 		 * TBRQ, the host issues the close command to the adapter.
2403 		 */
2404 
2405 		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2406 		       (retry < MAX_RETRY)) {
2407 			msleep(sleep);
2408 			if (sleep < 250)
2409 				sleep = sleep * 2;
2410 
2411 			++retry;
2412 		}
2413 
2414 		if (tx_inuse > 1)
2415 			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2416 
2417 		/* 2.3.1.1 generic close operations with flush */
2418 
2419 		spin_lock_irqsave(&he_dev->global_lock, flags);
2420 		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2421 					/* also clears TSR4_SESSION_ENDED */
2422 
2423 		switch (vcc->qos.txtp.traffic_class) {
2424 			case ATM_UBR:
2425 				he_writel_tsr1(he_dev,
2426 					TSR1_MCR(rate_to_atmf(200000))
2427 					| TSR1_PCR(0), cid);
2428 				break;
2429 			case ATM_CBR:
2430 				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2431 				break;
2432 		}
2433 		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2434 
2435 		tpd = __alloc_tpd(he_dev);
2436 		if (tpd == NULL) {
2437 			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2438 			goto close_tx_incomplete;
2439 		}
2440 		tpd->status |= TPD_EOS | TPD_INT;
2441 		tpd->skb = NULL;
2442 		tpd->vcc = vcc;
2443 		wmb();
2444 
2445 		set_current_state(TASK_UNINTERRUPTIBLE);
2446 		add_wait_queue(&he_vcc->tx_waitq, &wait);
2447 		__enqueue_tpd(he_dev, tpd, cid);
2448 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2449 
2450 		timeout = schedule_timeout(30*HZ);
2451 
2452 		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2453 		set_current_state(TASK_RUNNING);
2454 
2455 		spin_lock_irqsave(&he_dev->global_lock, flags);
2456 
2457 		if (timeout == 0) {
2458 			hprintk("close tx timeout cid 0x%x\n", cid);
2459 			goto close_tx_incomplete;
2460 		}
2461 
2462 		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2463 			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2464 			udelay(250);
2465 		}
2466 
2467 		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2468 			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2469 			udelay(250);
2470 		}
2471 
2472 close_tx_incomplete:
2473 
2474 		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2475 			int reg = he_vcc->rc_index;
2476 
2477 			HPRINTK("cs_stper reg = %d\n", reg);
2478 
2479 			if (he_dev->cs_stper[reg].inuse == 0)
2480 				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2481 			else
2482 				--he_dev->cs_stper[reg].inuse;
2483 
2484 			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2485 		}
2486 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2487 
2488 		HPRINTK("close tx cid 0x%x complete\n", cid);
2489 	}
2490 
2491 	kfree(he_vcc);
2492 
2493 	clear_bit(ATM_VF_ADDR, &vcc->flags);
2494 }
2495 
2496 static int
2497 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2498 {
2499 	unsigned long flags;
2500 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2501 	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2502 	struct he_tpd *tpd;
2503 #ifdef USE_SCATTERGATHER
2504 	int i, slot = 0;
2505 #endif
2506 
2507 #define HE_TPD_BUFSIZE 0xffff
2508 
2509 	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2510 
2511 	if ((skb->len > HE_TPD_BUFSIZE) ||
2512 	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2513 		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2514 		if (vcc->pop)
2515 			vcc->pop(vcc, skb);
2516 		else
2517 			dev_kfree_skb_any(skb);
2518 		atomic_inc(&vcc->stats->tx_err);
2519 		return -EINVAL;
2520 	}
2521 
2522 #ifndef USE_SCATTERGATHER
2523 	if (skb_shinfo(skb)->nr_frags) {
2524 		hprintk("no scatter/gather support\n");
2525 		if (vcc->pop)
2526 			vcc->pop(vcc, skb);
2527 		else
2528 			dev_kfree_skb_any(skb);
2529 		atomic_inc(&vcc->stats->tx_err);
2530 		return -EINVAL;
2531 	}
2532 #endif
2533 	spin_lock_irqsave(&he_dev->global_lock, flags);
2534 
2535 	tpd = __alloc_tpd(he_dev);
2536 	if (tpd == NULL) {
2537 		if (vcc->pop)
2538 			vcc->pop(vcc, skb);
2539 		else
2540 			dev_kfree_skb_any(skb);
2541 		atomic_inc(&vcc->stats->tx_err);
2542 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2543 		return -ENOMEM;
2544 	}
2545 
2546 	if (vcc->qos.aal == ATM_AAL5)
2547 		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2548 	else {
2549 		char *pti_clp = (void *) (skb->data + 3);
2550 		int clp, pti;
2551 
2552 		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2553 		clp = (*pti_clp & ATM_HDR_CLP);
2554 		tpd->status |= TPD_CELLTYPE(pti);
2555 		if (clp)
2556 			tpd->status |= TPD_CLP;
2557 
2558 		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2559 	}
2560 
2561 #ifdef USE_SCATTERGATHER
2562 	tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2563 				skb_headlen(skb), PCI_DMA_TODEVICE);
2564 	tpd->iovec[slot].len = skb_headlen(skb);
2565 	++slot;
2566 
2567 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2568 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2569 
2570 		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2571 			tpd->vcc = vcc;
2572 			tpd->skb = NULL;	/* not the last fragment
2573 						   so dont ->push() yet */
2574 			wmb();
2575 
2576 			__enqueue_tpd(he_dev, tpd, cid);
2577 			tpd = __alloc_tpd(he_dev);
2578 			if (tpd == NULL) {
2579 				if (vcc->pop)
2580 					vcc->pop(vcc, skb);
2581 				else
2582 					dev_kfree_skb_any(skb);
2583 				atomic_inc(&vcc->stats->tx_err);
2584 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2585 				return -ENOMEM;
2586 			}
2587 			tpd->status |= TPD_USERCELL;
2588 			slot = 0;
2589 		}
2590 
2591 		tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2592 			(void *) page_address(frag->page) + frag->page_offset,
2593 				frag->size, PCI_DMA_TODEVICE);
2594 		tpd->iovec[slot].len = frag->size;
2595 		++slot;
2596 
2597 	}
2598 
2599 	tpd->iovec[slot - 1].len |= TPD_LST;
2600 #else
2601 	tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2602 	tpd->length0 = skb->len | TPD_LST;
2603 #endif
2604 	tpd->status |= TPD_INT;
2605 
2606 	tpd->vcc = vcc;
2607 	tpd->skb = skb;
2608 	wmb();
2609 	ATM_SKB(skb)->vcc = vcc;
2610 
2611 	__enqueue_tpd(he_dev, tpd, cid);
2612 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2613 
2614 	atomic_inc(&vcc->stats->tx);
2615 
2616 	return 0;
2617 }
2618 
2619 static int
2620 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2621 {
2622 	unsigned long flags;
2623 	struct he_dev *he_dev = HE_DEV(atm_dev);
2624 	struct he_ioctl_reg reg;
2625 	int err = 0;
2626 
2627 	switch (cmd) {
2628 		case HE_GET_REG:
2629 			if (!capable(CAP_NET_ADMIN))
2630 				return -EPERM;
2631 
2632 			if (copy_from_user(&reg, arg,
2633 					   sizeof(struct he_ioctl_reg)))
2634 				return -EFAULT;
2635 
2636 			spin_lock_irqsave(&he_dev->global_lock, flags);
2637 			switch (reg.type) {
2638 				case HE_REGTYPE_PCI:
2639 					if (reg.addr >= HE_REGMAP_SIZE) {
2640 						err = -EINVAL;
2641 						break;
2642 					}
2643 
2644 					reg.val = he_readl(he_dev, reg.addr);
2645 					break;
2646 				case HE_REGTYPE_RCM:
2647 					reg.val =
2648 						he_readl_rcm(he_dev, reg.addr);
2649 					break;
2650 				case HE_REGTYPE_TCM:
2651 					reg.val =
2652 						he_readl_tcm(he_dev, reg.addr);
2653 					break;
2654 				case HE_REGTYPE_MBOX:
2655 					reg.val =
2656 						he_readl_mbox(he_dev, reg.addr);
2657 					break;
2658 				default:
2659 					err = -EINVAL;
2660 					break;
2661 			}
2662 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2663 			if (err == 0)
2664 				if (copy_to_user(arg, &reg,
2665 							sizeof(struct he_ioctl_reg)))
2666 					return -EFAULT;
2667 			break;
2668 		default:
2669 #ifdef CONFIG_ATM_HE_USE_SUNI
2670 			if (atm_dev->phy && atm_dev->phy->ioctl)
2671 				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2672 #else /* CONFIG_ATM_HE_USE_SUNI */
2673 			err = -EINVAL;
2674 #endif /* CONFIG_ATM_HE_USE_SUNI */
2675 			break;
2676 	}
2677 
2678 	return err;
2679 }
2680 
2681 static void
2682 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2683 {
2684 	unsigned long flags;
2685 	struct he_dev *he_dev = HE_DEV(atm_dev);
2686 
2687 	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2688 
2689 	spin_lock_irqsave(&he_dev->global_lock, flags);
2690 	he_writel(he_dev, val, FRAMER + (addr*4));
2691 	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2692 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2693 }
2694 
2695 
2696 static unsigned char
2697 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2698 {
2699 	unsigned long flags;
2700 	struct he_dev *he_dev = HE_DEV(atm_dev);
2701 	unsigned reg;
2702 
2703 	spin_lock_irqsave(&he_dev->global_lock, flags);
2704 	reg = he_readl(he_dev, FRAMER + (addr*4));
2705 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2706 
2707 	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2708 	return reg;
2709 }
2710 
2711 static int
2712 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2713 {
2714 	unsigned long flags;
2715 	struct he_dev *he_dev = HE_DEV(dev);
2716 	int left, i;
2717 #ifdef notdef
2718 	struct he_rbrq *rbrq_tail;
2719 	struct he_tpdrq *tpdrq_head;
2720 	int rbpl_head, rbpl_tail;
2721 #endif
2722 	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2723 
2724 
2725 	left = *pos;
2726 	if (!left--)
2727 		return sprintf(page, "ATM he driver\n");
2728 
2729 	if (!left--)
2730 		return sprintf(page, "%s%s\n\n",
2731 			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2732 
2733 	if (!left--)
2734 		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2735 
2736 	spin_lock_irqsave(&he_dev->global_lock, flags);
2737 	mcc += he_readl(he_dev, MCC);
2738 	oec += he_readl(he_dev, OEC);
2739 	dcc += he_readl(he_dev, DCC);
2740 	cec += he_readl(he_dev, CEC);
2741 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2742 
2743 	if (!left--)
2744 		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n",
2745 							mcc, oec, dcc, cec);
2746 
2747 	if (!left--)
2748 		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2749 				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2750 
2751 	if (!left--)
2752 		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2753 						CONFIG_TPDRQ_SIZE);
2754 
2755 	if (!left--)
2756 		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2757 				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2758 
2759 	if (!left--)
2760 		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2761 					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2762 
2763 
2764 #ifdef notdef
2765 	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2766 	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2767 
2768 	inuse = rbpl_head - rbpl_tail;
2769 	if (inuse < 0)
2770 		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2771 	inuse /= sizeof(struct he_rbp);
2772 
2773 	if (!left--)
2774 		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2775 						CONFIG_RBPL_SIZE, inuse);
2776 #endif
2777 
2778 	if (!left--)
2779 		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2780 
2781 	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2782 		if (!left--)
2783 			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2784 						he_dev->cs_stper[i].pcr,
2785 						he_dev->cs_stper[i].inuse);
2786 
2787 	if (!left--)
2788 		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2789 			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2790 
2791 	return 0;
2792 }
2793 
2794 /* eeprom routines  -- see 4.7 */
2795 
2796 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2797 {
2798 	u32 val = 0, tmp_read = 0;
2799 	int i, j = 0;
2800 	u8 byte_read = 0;
2801 
2802 	val = readl(he_dev->membase + HOST_CNTL);
2803 	val &= 0xFFFFE0FF;
2804 
2805 	/* Turn on write enable */
2806 	val |= 0x800;
2807 	he_writel(he_dev, val, HOST_CNTL);
2808 
2809 	/* Send READ instruction */
2810 	for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2811 		he_writel(he_dev, val | readtab[i], HOST_CNTL);
2812 		udelay(EEPROM_DELAY);
2813 	}
2814 
2815 	/* Next, we need to send the byte address to read from */
2816 	for (i = 7; i >= 0; i--) {
2817 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2818 		udelay(EEPROM_DELAY);
2819 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2820 		udelay(EEPROM_DELAY);
2821 	}
2822 
2823 	j = 0;
2824 
2825 	val &= 0xFFFFF7FF;      /* Turn off write enable */
2826 	he_writel(he_dev, val, HOST_CNTL);
2827 
2828 	/* Now, we can read data from the EEPROM by clocking it in */
2829 	for (i = 7; i >= 0; i--) {
2830 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2831 		udelay(EEPROM_DELAY);
2832 		tmp_read = he_readl(he_dev, HOST_CNTL);
2833 		byte_read |= (unsigned char)
2834 			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2835 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2836 		udelay(EEPROM_DELAY);
2837 	}
2838 
2839 	he_writel(he_dev, val | ID_CS, HOST_CNTL);
2840 	udelay(EEPROM_DELAY);
2841 
2842 	return byte_read;
2843 }
2844 
2845 MODULE_LICENSE("GPL");
2846 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2847 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2848 module_param(disable64, bool, 0);
2849 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2850 module_param(nvpibits, short, 0);
2851 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2852 module_param(nvcibits, short, 0);
2853 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2854 module_param(rx_skb_reserve, short, 0);
2855 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2856 module_param(irq_coalesce, bool, 0);
2857 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2858 module_param(sdh, bool, 0);
2859 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2860 
2861 static struct pci_device_id he_pci_tbl[] = {
2862 	{ PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2863 	{ 0, }
2864 };
2865 
2866 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2867 
2868 static struct pci_driver he_driver = {
2869 	.name =		"he",
2870 	.probe =	he_init_one,
2871 	.remove =	he_remove_one,
2872 	.id_table =	he_pci_tbl,
2873 };
2874 
2875 static int __init he_init(void)
2876 {
2877 	return pci_register_driver(&he_driver);
2878 }
2879 
2880 static void __exit he_cleanup(void)
2881 {
2882 	pci_unregister_driver(&he_driver);
2883 }
2884 
2885 module_init(he_init);
2886 module_exit(he_cleanup);
2887