xref: /openbmc/linux/drivers/atm/he.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
2 
3 /*
4 
5   he.c
6 
7   ForeRunnerHE ATM Adapter driver for ATM on Linux
8   Copyright (C) 1999-2001  Naval Research Laboratory
9 
10   This library is free software; you can redistribute it and/or
11   modify it under the terms of the GNU Lesser General Public
12   License as published by the Free Software Foundation; either
13   version 2.1 of the License, or (at your option) any later version.
14 
15   This library is distributed in the hope that it will be useful,
16   but WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18   Lesser General Public License for more details.
19 
20   You should have received a copy of the GNU Lesser General Public
21   License along with this library; if not, write to the Free Software
22   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23 
24 */
25 
26 /*
27 
28   he.c
29 
30   ForeRunnerHE ATM Adapter driver for ATM on Linux
31   Copyright (C) 1999-2001  Naval Research Laboratory
32 
33   Permission to use, copy, modify and distribute this software and its
34   documentation is hereby granted, provided that both the copyright
35   notice and this permission notice appear in all copies of the software,
36   derivative works or modified versions, and any portions thereof, and
37   that both notices appear in supporting documentation.
38 
39   NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
40   DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
41   RESULTING FROM THE USE OF THIS SOFTWARE.
42 
43   This driver was written using the "Programmer's Reference Manual for
44   ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
45 
46   AUTHORS:
47 	chas williams <chas@cmf.nrl.navy.mil>
48 	eric kinzie <ekinzie@cmf.nrl.navy.mil>
49 
50   NOTES:
51 	4096 supported 'connections'
52 	group 0 is used for all traffic
53 	interrupt queue 0 is used for all interrupts
54 	aal0 support (based on work from ulrich.u.muller@nokia.com)
55 
56  */
57 
58 #include <linux/module.h>
59 #include <linux/kernel.h>
60 #include <linux/skbuff.h>
61 #include <linux/pci.h>
62 #include <linux/errno.h>
63 #include <linux/types.h>
64 #include <linux/string.h>
65 #include <linux/delay.h>
66 #include <linux/init.h>
67 #include <linux/mm.h>
68 #include <linux/sched.h>
69 #include <linux/timer.h>
70 #include <linux/interrupt.h>
71 #include <linux/dma-mapping.h>
72 #include <asm/io.h>
73 #include <asm/byteorder.h>
74 #include <asm/uaccess.h>
75 
76 #include <linux/atmdev.h>
77 #include <linux/atm.h>
78 #include <linux/sonet.h>
79 
80 #define USE_TASKLET
81 #undef USE_SCATTERGATHER
82 #undef USE_CHECKSUM_HW			/* still confused about this */
83 #define USE_RBPS
84 #undef USE_RBPS_POOL			/* if memory is tight try this */
85 #undef USE_RBPL_POOL			/* if memory is tight try this */
86 #define USE_TPD_POOL
87 /* #undef CONFIG_ATM_HE_USE_SUNI */
88 /* #undef HE_DEBUG */
89 
90 #include "he.h"
91 #include "suni.h"
92 #include <linux/atm_he.h>
93 
94 #define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
95 
96 #ifdef HE_DEBUG
97 #define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
98 #else /* !HE_DEBUG */
99 #define HPRINTK(fmt,args...)	do { } while (0)
100 #endif /* HE_DEBUG */
101 
102 /* version definition */
103 
104 static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
105 
106 /* declarations */
107 
108 static int he_open(struct atm_vcc *vcc);
109 static void he_close(struct atm_vcc *vcc);
110 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
111 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
112 static irqreturn_t he_irq_handler(int irq, void *dev_id);
113 static void he_tasklet(unsigned long data);
114 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
115 static int he_start(struct atm_dev *dev);
116 static void he_stop(struct he_dev *dev);
117 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
118 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
119 
120 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
121 
122 /* globals */
123 
124 static struct he_dev *he_devs;
125 static int disable64;
126 static short nvpibits = -1;
127 static short nvcibits = -1;
128 static short rx_skb_reserve = 16;
129 static int irq_coalesce = 1;
130 static int sdh = 0;
131 
132 /* Read from EEPROM = 0000 0011b */
133 static unsigned int readtab[] = {
134 	CS_HIGH | CLK_HIGH,
135 	CS_LOW | CLK_LOW,
136 	CLK_HIGH,               /* 0 */
137 	CLK_LOW,
138 	CLK_HIGH,               /* 0 */
139 	CLK_LOW,
140 	CLK_HIGH,               /* 0 */
141 	CLK_LOW,
142 	CLK_HIGH,               /* 0 */
143 	CLK_LOW,
144 	CLK_HIGH,               /* 0 */
145 	CLK_LOW,
146 	CLK_HIGH,               /* 0 */
147 	CLK_LOW | SI_HIGH,
148 	CLK_HIGH | SI_HIGH,     /* 1 */
149 	CLK_LOW | SI_HIGH,
150 	CLK_HIGH | SI_HIGH      /* 1 */
151 };
152 
153 /* Clock to read from/write to the EEPROM */
154 static unsigned int clocktab[] = {
155 	CLK_LOW,
156 	CLK_HIGH,
157 	CLK_LOW,
158 	CLK_HIGH,
159 	CLK_LOW,
160 	CLK_HIGH,
161 	CLK_LOW,
162 	CLK_HIGH,
163 	CLK_LOW,
164 	CLK_HIGH,
165 	CLK_LOW,
166 	CLK_HIGH,
167 	CLK_LOW,
168 	CLK_HIGH,
169 	CLK_LOW,
170 	CLK_HIGH,
171 	CLK_LOW
172 };
173 
174 static struct atmdev_ops he_ops =
175 {
176 	.open =		he_open,
177 	.close =	he_close,
178 	.ioctl =	he_ioctl,
179 	.send =		he_send,
180 	.phy_put =	he_phy_put,
181 	.phy_get =	he_phy_get,
182 	.proc_read =	he_proc_read,
183 	.owner =	THIS_MODULE
184 };
185 
186 #define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
187 #define he_readl(dev, reg)		readl((dev)->membase + (reg))
188 
189 /* section 2.12 connection memory access */
190 
191 static __inline__ void
192 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
193 								unsigned flags)
194 {
195 	he_writel(he_dev, val, CON_DAT);
196 	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
197 	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
198 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
199 }
200 
201 #define he_writel_rcm(dev, val, reg) 				\
202 			he_writel_internal(dev, val, reg, CON_CTL_RCM)
203 
204 #define he_writel_tcm(dev, val, reg) 				\
205 			he_writel_internal(dev, val, reg, CON_CTL_TCM)
206 
207 #define he_writel_mbox(dev, val, reg) 				\
208 			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
209 
210 static unsigned
211 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
212 {
213 	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
214 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
215 	return he_readl(he_dev, CON_DAT);
216 }
217 
218 #define he_readl_rcm(dev, reg) \
219 			he_readl_internal(dev, reg, CON_CTL_RCM)
220 
221 #define he_readl_tcm(dev, reg) \
222 			he_readl_internal(dev, reg, CON_CTL_TCM)
223 
224 #define he_readl_mbox(dev, reg) \
225 			he_readl_internal(dev, reg, CON_CTL_MBOX)
226 
227 
228 /* figure 2.2 connection id */
229 
230 #define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
231 
232 /* 2.5.1 per connection transmit state registers */
233 
234 #define he_writel_tsr0(dev, val, cid) \
235 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
236 #define he_readl_tsr0(dev, cid) \
237 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
238 
239 #define he_writel_tsr1(dev, val, cid) \
240 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
241 
242 #define he_writel_tsr2(dev, val, cid) \
243 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
244 
245 #define he_writel_tsr3(dev, val, cid) \
246 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
247 
248 #define he_writel_tsr4(dev, val, cid) \
249 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
250 
251 	/* from page 2-20
252 	 *
253 	 * NOTE While the transmit connection is active, bits 23 through 0
254 	 *      of this register must not be written by the host.  Byte
255 	 *      enables should be used during normal operation when writing
256 	 *      the most significant byte.
257 	 */
258 
259 #define he_writel_tsr4_upper(dev, val, cid) \
260 		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
261 							CON_CTL_TCM \
262 							| CON_BYTE_DISABLE_2 \
263 							| CON_BYTE_DISABLE_1 \
264 							| CON_BYTE_DISABLE_0)
265 
266 #define he_readl_tsr4(dev, cid) \
267 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
268 
269 #define he_writel_tsr5(dev, val, cid) \
270 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
271 
272 #define he_writel_tsr6(dev, val, cid) \
273 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
274 
275 #define he_writel_tsr7(dev, val, cid) \
276 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
277 
278 
279 #define he_writel_tsr8(dev, val, cid) \
280 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
281 
282 #define he_writel_tsr9(dev, val, cid) \
283 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
284 
285 #define he_writel_tsr10(dev, val, cid) \
286 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
287 
288 #define he_writel_tsr11(dev, val, cid) \
289 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
290 
291 
292 #define he_writel_tsr12(dev, val, cid) \
293 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
294 
295 #define he_writel_tsr13(dev, val, cid) \
296 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
297 
298 
299 #define he_writel_tsr14(dev, val, cid) \
300 		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
301 
302 #define he_writel_tsr14_upper(dev, val, cid) \
303 		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
304 							CON_CTL_TCM \
305 							| CON_BYTE_DISABLE_2 \
306 							| CON_BYTE_DISABLE_1 \
307 							| CON_BYTE_DISABLE_0)
308 
309 /* 2.7.1 per connection receive state registers */
310 
311 #define he_writel_rsr0(dev, val, cid) \
312 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
313 #define he_readl_rsr0(dev, cid) \
314 		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
315 
316 #define he_writel_rsr1(dev, val, cid) \
317 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
318 
319 #define he_writel_rsr2(dev, val, cid) \
320 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
321 
322 #define he_writel_rsr3(dev, val, cid) \
323 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
324 
325 #define he_writel_rsr4(dev, val, cid) \
326 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
327 
328 #define he_writel_rsr5(dev, val, cid) \
329 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
330 
331 #define he_writel_rsr6(dev, val, cid) \
332 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
333 
334 #define he_writel_rsr7(dev, val, cid) \
335 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
336 
337 static __inline__ struct atm_vcc*
338 __find_vcc(struct he_dev *he_dev, unsigned cid)
339 {
340 	struct hlist_head *head;
341 	struct atm_vcc *vcc;
342 	struct hlist_node *node;
343 	struct sock *s;
344 	short vpi;
345 	int vci;
346 
347 	vpi = cid >> he_dev->vcibits;
348 	vci = cid & ((1 << he_dev->vcibits) - 1);
349 	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
350 
351 	sk_for_each(s, node, head) {
352 		vcc = atm_sk(s);
353 		if (vcc->dev == he_dev->atm_dev &&
354 		    vcc->vci == vci && vcc->vpi == vpi &&
355 		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
356 				return vcc;
357 		}
358 	}
359 	return NULL;
360 }
361 
362 static int __devinit
363 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
364 {
365 	struct atm_dev *atm_dev = NULL;
366 	struct he_dev *he_dev = NULL;
367 	int err = 0;
368 
369 	printk(KERN_INFO "he: %s\n", version);
370 
371 	if (pci_enable_device(pci_dev))
372 		return -EIO;
373 	if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK) != 0) {
374 		printk(KERN_WARNING "he: no suitable dma available\n");
375 		err = -EIO;
376 		goto init_one_failure;
377 	}
378 
379 	atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
380 	if (!atm_dev) {
381 		err = -ENODEV;
382 		goto init_one_failure;
383 	}
384 	pci_set_drvdata(pci_dev, atm_dev);
385 
386 	he_dev = kzalloc(sizeof(struct he_dev),
387 							GFP_KERNEL);
388 	if (!he_dev) {
389 		err = -ENOMEM;
390 		goto init_one_failure;
391 	}
392 	he_dev->pci_dev = pci_dev;
393 	he_dev->atm_dev = atm_dev;
394 	he_dev->atm_dev->dev_data = he_dev;
395 	atm_dev->dev_data = he_dev;
396 	he_dev->number = atm_dev->number;
397 	if (he_start(atm_dev)) {
398 		he_stop(he_dev);
399 		err = -ENODEV;
400 		goto init_one_failure;
401 	}
402 	he_dev->next = NULL;
403 	if (he_devs)
404 		he_dev->next = he_devs;
405 	he_devs = he_dev;
406 	return 0;
407 
408 init_one_failure:
409 	if (atm_dev)
410 		atm_dev_deregister(atm_dev);
411 	kfree(he_dev);
412 	pci_disable_device(pci_dev);
413 	return err;
414 }
415 
416 static void __devexit
417 he_remove_one (struct pci_dev *pci_dev)
418 {
419 	struct atm_dev *atm_dev;
420 	struct he_dev *he_dev;
421 
422 	atm_dev = pci_get_drvdata(pci_dev);
423 	he_dev = HE_DEV(atm_dev);
424 
425 	/* need to remove from he_devs */
426 
427 	he_stop(he_dev);
428 	atm_dev_deregister(atm_dev);
429 	kfree(he_dev);
430 
431 	pci_set_drvdata(pci_dev, NULL);
432 	pci_disable_device(pci_dev);
433 }
434 
435 
436 static unsigned
437 rate_to_atmf(unsigned rate)		/* cps to atm forum format */
438 {
439 #define NONZERO (1 << 14)
440 
441 	unsigned exp = 0;
442 
443 	if (rate == 0)
444 		return 0;
445 
446 	rate <<= 9;
447 	while (rate > 0x3ff) {
448 		++exp;
449 		rate >>= 1;
450 	}
451 
452 	return (NONZERO | (exp << 9) | (rate & 0x1ff));
453 }
454 
455 static void __devinit
456 he_init_rx_lbfp0(struct he_dev *he_dev)
457 {
458 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
459 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
460 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
461 	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
462 
463 	lbufd_index = 0;
464 	lbm_offset = he_readl(he_dev, RCMLBM_BA);
465 
466 	he_writel(he_dev, lbufd_index, RLBF0_H);
467 
468 	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
469 		lbufd_index += 2;
470 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
471 
472 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
473 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
474 
475 		if (++lbuf_count == lbufs_per_row) {
476 			lbuf_count = 0;
477 			row_offset += he_dev->bytes_per_row;
478 		}
479 		lbm_offset += 4;
480 	}
481 
482 	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
483 	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
484 }
485 
486 static void __devinit
487 he_init_rx_lbfp1(struct he_dev *he_dev)
488 {
489 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
490 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
491 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
492 	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
493 
494 	lbufd_index = 1;
495 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
496 
497 	he_writel(he_dev, lbufd_index, RLBF1_H);
498 
499 	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
500 		lbufd_index += 2;
501 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
502 
503 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
504 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
505 
506 		if (++lbuf_count == lbufs_per_row) {
507 			lbuf_count = 0;
508 			row_offset += he_dev->bytes_per_row;
509 		}
510 		lbm_offset += 4;
511 	}
512 
513 	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
514 	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
515 }
516 
517 static void __devinit
518 he_init_tx_lbfp(struct he_dev *he_dev)
519 {
520 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
521 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
522 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
523 	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
524 
525 	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
526 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
527 
528 	he_writel(he_dev, lbufd_index, TLBF_H);
529 
530 	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
531 		lbufd_index += 1;
532 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
533 
534 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
535 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
536 
537 		if (++lbuf_count == lbufs_per_row) {
538 			lbuf_count = 0;
539 			row_offset += he_dev->bytes_per_row;
540 		}
541 		lbm_offset += 2;
542 	}
543 
544 	he_writel(he_dev, lbufd_index - 1, TLBF_T);
545 }
546 
547 static int __devinit
548 he_init_tpdrq(struct he_dev *he_dev)
549 {
550 	he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
551 		CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
552 	if (he_dev->tpdrq_base == NULL) {
553 		hprintk("failed to alloc tpdrq\n");
554 		return -ENOMEM;
555 	}
556 	memset(he_dev->tpdrq_base, 0,
557 				CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
558 
559 	he_dev->tpdrq_tail = he_dev->tpdrq_base;
560 	he_dev->tpdrq_head = he_dev->tpdrq_base;
561 
562 	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
563 	he_writel(he_dev, 0, TPDRQ_T);
564 	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
565 
566 	return 0;
567 }
568 
569 static void __devinit
570 he_init_cs_block(struct he_dev *he_dev)
571 {
572 	unsigned clock, rate, delta;
573 	int reg;
574 
575 	/* 5.1.7 cs block initialization */
576 
577 	for (reg = 0; reg < 0x20; ++reg)
578 		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
579 
580 	/* rate grid timer reload values */
581 
582 	clock = he_is622(he_dev) ? 66667000 : 50000000;
583 	rate = he_dev->atm_dev->link_rate;
584 	delta = rate / 16 / 2;
585 
586 	for (reg = 0; reg < 0x10; ++reg) {
587 		/* 2.4 internal transmit function
588 		 *
589 	 	 * we initialize the first row in the rate grid.
590 		 * values are period (in clock cycles) of timer
591 		 */
592 		unsigned period = clock / rate;
593 
594 		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
595 		rate -= delta;
596 	}
597 
598 	if (he_is622(he_dev)) {
599 		/* table 5.2 (4 cells per lbuf) */
600 		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
601 		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
602 		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
603 		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
604 		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
605 
606 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
607 		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
608 		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
609 		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
610 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
611 		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
612 		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
613 
614 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
615 
616 		/* table 5.8 */
617 		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
618 		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
619 		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
620 		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
621 		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
622 		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
623 
624 		/* table 5.9 */
625 		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
626 		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
627 	} else {
628 		/* table 5.1 (4 cells per lbuf) */
629 		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
630 		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
631 		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
632 		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
633 		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
634 
635 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
636 		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
637 		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
638 		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
639 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
640 		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
641 		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
642 
643 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
644 
645 		/* table 5.8 */
646 		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
647 		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
648 		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
649 		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
650 		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
651 		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
652 
653 		/* table 5.9 */
654 		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
655 		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
656 	}
657 
658 	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
659 
660 	for (reg = 0; reg < 0x8; ++reg)
661 		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
662 
663 }
664 
665 static int __devinit
666 he_init_cs_block_rcm(struct he_dev *he_dev)
667 {
668 	unsigned (*rategrid)[16][16];
669 	unsigned rate, delta;
670 	int i, j, reg;
671 
672 	unsigned rate_atmf, exp, man;
673 	unsigned long long rate_cps;
674 	int mult, buf, buf_limit = 4;
675 
676 	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
677 	if (!rategrid)
678 		return -ENOMEM;
679 
680 	/* initialize rate grid group table */
681 
682 	for (reg = 0x0; reg < 0xff; ++reg)
683 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
684 
685 	/* initialize rate controller groups */
686 
687 	for (reg = 0x100; reg < 0x1ff; ++reg)
688 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
689 
690 	/* initialize tNrm lookup table */
691 
692 	/* the manual makes reference to a routine in a sample driver
693 	   for proper configuration; fortunately, we only need this
694 	   in order to support abr connection */
695 
696 	/* initialize rate to group table */
697 
698 	rate = he_dev->atm_dev->link_rate;
699 	delta = rate / 32;
700 
701 	/*
702 	 * 2.4 transmit internal functions
703 	 *
704 	 * we construct a copy of the rate grid used by the scheduler
705 	 * in order to construct the rate to group table below
706 	 */
707 
708 	for (j = 0; j < 16; j++) {
709 		(*rategrid)[0][j] = rate;
710 		rate -= delta;
711 	}
712 
713 	for (i = 1; i < 16; i++)
714 		for (j = 0; j < 16; j++)
715 			if (i > 14)
716 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
717 			else
718 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
719 
720 	/*
721 	 * 2.4 transmit internal function
722 	 *
723 	 * this table maps the upper 5 bits of exponent and mantissa
724 	 * of the atm forum representation of the rate into an index
725 	 * on rate grid
726 	 */
727 
728 	rate_atmf = 0;
729 	while (rate_atmf < 0x400) {
730 		man = (rate_atmf & 0x1f) << 4;
731 		exp = rate_atmf >> 5;
732 
733 		/*
734 			instead of '/ 512', use '>> 9' to prevent a call
735 			to divdu3 on x86 platforms
736 		*/
737 		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
738 
739 		if (rate_cps < 10)
740 			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
741 
742 		for (i = 255; i > 0; i--)
743 			if ((*rategrid)[i/16][i%16] >= rate_cps)
744 				break;	 /* pick nearest rate instead? */
745 
746 		/*
747 		 * each table entry is 16 bits: (rate grid index (8 bits)
748 		 * and a buffer limit (8 bits)
749 		 * there are two table entries in each 32-bit register
750 		 */
751 
752 #ifdef notdef
753 		buf = rate_cps * he_dev->tx_numbuffs /
754 				(he_dev->atm_dev->link_rate * 2);
755 #else
756 		/* this is pretty, but avoids _divdu3 and is mostly correct */
757 		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
758 		if (rate_cps > (272 * mult))
759 			buf = 4;
760 		else if (rate_cps > (204 * mult))
761 			buf = 3;
762 		else if (rate_cps > (136 * mult))
763 			buf = 2;
764 		else if (rate_cps > (68 * mult))
765 			buf = 1;
766 		else
767 			buf = 0;
768 #endif
769 		if (buf > buf_limit)
770 			buf = buf_limit;
771 		reg = (reg << 16) | ((i << 8) | buf);
772 
773 #define RTGTBL_OFFSET 0x400
774 
775 		if (rate_atmf & 0x1)
776 			he_writel_rcm(he_dev, reg,
777 				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
778 
779 		++rate_atmf;
780 	}
781 
782 	kfree(rategrid);
783 	return 0;
784 }
785 
786 static int __devinit
787 he_init_group(struct he_dev *he_dev, int group)
788 {
789 	int i;
790 
791 #ifdef USE_RBPS
792 	/* small buffer pool */
793 #ifdef USE_RBPS_POOL
794 	he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
795 			CONFIG_RBPS_BUFSIZE, 8, 0);
796 	if (he_dev->rbps_pool == NULL) {
797 		hprintk("unable to create rbps pages\n");
798 		return -ENOMEM;
799 	}
800 #else /* !USE_RBPS_POOL */
801 	he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
802 		CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
803 	if (he_dev->rbps_pages == NULL) {
804 		hprintk("unable to create rbps page pool\n");
805 		return -ENOMEM;
806 	}
807 #endif /* USE_RBPS_POOL */
808 
809 	he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
810 		CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
811 	if (he_dev->rbps_base == NULL) {
812 		hprintk("failed to alloc rbps\n");
813 		return -ENOMEM;
814 	}
815 	memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
816 	he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
817 
818 	for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
819 		dma_addr_t dma_handle;
820 		void *cpuaddr;
821 
822 #ifdef USE_RBPS_POOL
823 		cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
824 		if (cpuaddr == NULL)
825 			return -ENOMEM;
826 #else
827 		cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
828 		dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
829 #endif
830 
831 		he_dev->rbps_virt[i].virt = cpuaddr;
832 		he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
833 		he_dev->rbps_base[i].phys = dma_handle;
834 
835 	}
836 	he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
837 
838 	he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
839 	he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
840 						G0_RBPS_T + (group * 32));
841 	he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
842 						G0_RBPS_BS + (group * 32));
843 	he_writel(he_dev,
844 			RBP_THRESH(CONFIG_RBPS_THRESH) |
845 			RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
846 			RBP_INT_ENB,
847 						G0_RBPS_QI + (group * 32));
848 #else /* !USE_RBPS */
849 	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
850 	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
851 	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
852 	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
853 						G0_RBPS_BS + (group * 32));
854 #endif /* USE_RBPS */
855 
856 	/* large buffer pool */
857 #ifdef USE_RBPL_POOL
858 	he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
859 			CONFIG_RBPL_BUFSIZE, 8, 0);
860 	if (he_dev->rbpl_pool == NULL) {
861 		hprintk("unable to create rbpl pool\n");
862 		return -ENOMEM;
863 	}
864 #else /* !USE_RBPL_POOL */
865 	he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
866 		CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
867 	if (he_dev->rbpl_pages == NULL) {
868 		hprintk("unable to create rbpl pages\n");
869 		return -ENOMEM;
870 	}
871 #endif /* USE_RBPL_POOL */
872 
873 	he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
874 		CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
875 	if (he_dev->rbpl_base == NULL) {
876 		hprintk("failed to alloc rbpl\n");
877 		return -ENOMEM;
878 	}
879 	memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
880 	he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
881 
882 	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
883 		dma_addr_t dma_handle;
884 		void *cpuaddr;
885 
886 #ifdef USE_RBPL_POOL
887 		cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
888 		if (cpuaddr == NULL)
889 			return -ENOMEM;
890 #else
891 		cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
892 		dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
893 #endif
894 
895 		he_dev->rbpl_virt[i].virt = cpuaddr;
896 		he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
897 		he_dev->rbpl_base[i].phys = dma_handle;
898 	}
899 	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
900 
901 	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
902 	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
903 						G0_RBPL_T + (group * 32));
904 	he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
905 						G0_RBPL_BS + (group * 32));
906 	he_writel(he_dev,
907 			RBP_THRESH(CONFIG_RBPL_THRESH) |
908 			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
909 			RBP_INT_ENB,
910 						G0_RBPL_QI + (group * 32));
911 
912 	/* rx buffer ready queue */
913 
914 	he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
915 		CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
916 	if (he_dev->rbrq_base == NULL) {
917 		hprintk("failed to allocate rbrq\n");
918 		return -ENOMEM;
919 	}
920 	memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
921 
922 	he_dev->rbrq_head = he_dev->rbrq_base;
923 	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
924 	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
925 	he_writel(he_dev,
926 		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
927 						G0_RBRQ_Q + (group * 16));
928 	if (irq_coalesce) {
929 		hprintk("coalescing interrupts\n");
930 		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
931 						G0_RBRQ_I + (group * 16));
932 	} else
933 		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
934 						G0_RBRQ_I + (group * 16));
935 
936 	/* tx buffer ready queue */
937 
938 	he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
939 		CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
940 	if (he_dev->tbrq_base == NULL) {
941 		hprintk("failed to allocate tbrq\n");
942 		return -ENOMEM;
943 	}
944 	memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
945 
946 	he_dev->tbrq_head = he_dev->tbrq_base;
947 
948 	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
949 	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
950 	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
951 	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
952 
953 	return 0;
954 }
955 
956 static int __devinit
957 he_init_irq(struct he_dev *he_dev)
958 {
959 	int i;
960 
961 	/* 2.9.3.5  tail offset for each interrupt queue is located after the
962 		    end of the interrupt queue */
963 
964 	he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
965 			(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
966 	if (he_dev->irq_base == NULL) {
967 		hprintk("failed to allocate irq\n");
968 		return -ENOMEM;
969 	}
970 	he_dev->irq_tailoffset = (unsigned *)
971 					&he_dev->irq_base[CONFIG_IRQ_SIZE];
972 	*he_dev->irq_tailoffset = 0;
973 	he_dev->irq_head = he_dev->irq_base;
974 	he_dev->irq_tail = he_dev->irq_base;
975 
976 	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
977 		he_dev->irq_base[i].isw = ITYPE_INVALID;
978 
979 	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
980 	he_writel(he_dev,
981 		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
982 								IRQ0_HEAD);
983 	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
984 	he_writel(he_dev, 0x0, IRQ0_DATA);
985 
986 	he_writel(he_dev, 0x0, IRQ1_BASE);
987 	he_writel(he_dev, 0x0, IRQ1_HEAD);
988 	he_writel(he_dev, 0x0, IRQ1_CNTL);
989 	he_writel(he_dev, 0x0, IRQ1_DATA);
990 
991 	he_writel(he_dev, 0x0, IRQ2_BASE);
992 	he_writel(he_dev, 0x0, IRQ2_HEAD);
993 	he_writel(he_dev, 0x0, IRQ2_CNTL);
994 	he_writel(he_dev, 0x0, IRQ2_DATA);
995 
996 	he_writel(he_dev, 0x0, IRQ3_BASE);
997 	he_writel(he_dev, 0x0, IRQ3_HEAD);
998 	he_writel(he_dev, 0x0, IRQ3_CNTL);
999 	he_writel(he_dev, 0x0, IRQ3_DATA);
1000 
1001 	/* 2.9.3.2 interrupt queue mapping registers */
1002 
1003 	he_writel(he_dev, 0x0, GRP_10_MAP);
1004 	he_writel(he_dev, 0x0, GRP_32_MAP);
1005 	he_writel(he_dev, 0x0, GRP_54_MAP);
1006 	he_writel(he_dev, 0x0, GRP_76_MAP);
1007 
1008 	if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_DISABLED|IRQF_SHARED, DEV_LABEL, he_dev)) {
1009 		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
1010 		return -EINVAL;
1011 	}
1012 
1013 	he_dev->irq = he_dev->pci_dev->irq;
1014 
1015 	return 0;
1016 }
1017 
1018 static int __devinit
1019 he_start(struct atm_dev *dev)
1020 {
1021 	struct he_dev *he_dev;
1022 	struct pci_dev *pci_dev;
1023 	unsigned long membase;
1024 
1025 	u16 command;
1026 	u32 gen_cntl_0, host_cntl, lb_swap;
1027 	u8 cache_size, timer;
1028 
1029 	unsigned err;
1030 	unsigned int status, reg;
1031 	int i, group;
1032 
1033 	he_dev = HE_DEV(dev);
1034 	pci_dev = he_dev->pci_dev;
1035 
1036 	membase = pci_resource_start(pci_dev, 0);
1037 	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
1038 
1039 	/*
1040 	 * pci bus controller initialization
1041 	 */
1042 
1043 	/* 4.3 pci bus controller-specific initialization */
1044 	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1045 		hprintk("can't read GEN_CNTL_0\n");
1046 		return -EINVAL;
1047 	}
1048 	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1049 	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1050 		hprintk("can't write GEN_CNTL_0.\n");
1051 		return -EINVAL;
1052 	}
1053 
1054 	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1055 		hprintk("can't read PCI_COMMAND.\n");
1056 		return -EINVAL;
1057 	}
1058 
1059 	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1060 	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1061 		hprintk("can't enable memory.\n");
1062 		return -EINVAL;
1063 	}
1064 
1065 	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1066 		hprintk("can't read cache line size?\n");
1067 		return -EINVAL;
1068 	}
1069 
1070 	if (cache_size < 16) {
1071 		cache_size = 16;
1072 		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1073 			hprintk("can't set cache line size to %d\n", cache_size);
1074 	}
1075 
1076 	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1077 		hprintk("can't read latency timer?\n");
1078 		return -EINVAL;
1079 	}
1080 
1081 	/* from table 3.9
1082 	 *
1083 	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1084 	 *
1085 	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1086 	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1087 	 *
1088 	 */
1089 #define LAT_TIMER 209
1090 	if (timer < LAT_TIMER) {
1091 		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1092 		timer = LAT_TIMER;
1093 		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1094 			hprintk("can't set latency timer to %d\n", timer);
1095 	}
1096 
1097 	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1098 		hprintk("can't set up page mapping\n");
1099 		return -EINVAL;
1100 	}
1101 
1102 	/* 4.4 card reset */
1103 	he_writel(he_dev, 0x0, RESET_CNTL);
1104 	he_writel(he_dev, 0xff, RESET_CNTL);
1105 
1106 	udelay(16*1000);	/* 16 ms */
1107 	status = he_readl(he_dev, RESET_CNTL);
1108 	if ((status & BOARD_RST_STATUS) == 0) {
1109 		hprintk("reset failed\n");
1110 		return -EINVAL;
1111 	}
1112 
1113 	/* 4.5 set bus width */
1114 	host_cntl = he_readl(he_dev, HOST_CNTL);
1115 	if (host_cntl & PCI_BUS_SIZE64)
1116 		gen_cntl_0 |= ENBL_64;
1117 	else
1118 		gen_cntl_0 &= ~ENBL_64;
1119 
1120 	if (disable64 == 1) {
1121 		hprintk("disabling 64-bit pci bus transfers\n");
1122 		gen_cntl_0 &= ~ENBL_64;
1123 	}
1124 
1125 	if (gen_cntl_0 & ENBL_64)
1126 		hprintk("64-bit transfers enabled\n");
1127 
1128 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1129 
1130 	/* 4.7 read prom contents */
1131 	for (i = 0; i < PROD_ID_LEN; ++i)
1132 		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1133 
1134 	he_dev->media = read_prom_byte(he_dev, MEDIA);
1135 
1136 	for (i = 0; i < 6; ++i)
1137 		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1138 
1139 	hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1140 				he_dev->prod_id,
1141 					he_dev->media & 0x40 ? "SM" : "MM",
1142 						dev->esi[0],
1143 						dev->esi[1],
1144 						dev->esi[2],
1145 						dev->esi[3],
1146 						dev->esi[4],
1147 						dev->esi[5]);
1148 	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1149 						ATM_OC12_PCR : ATM_OC3_PCR;
1150 
1151 	/* 4.6 set host endianess */
1152 	lb_swap = he_readl(he_dev, LB_SWAP);
1153 	if (he_is622(he_dev))
1154 		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1155 	else
1156 		lb_swap |= XFER_SIZE;		/* 8 cells */
1157 #ifdef __BIG_ENDIAN
1158 	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1159 #else
1160 	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1161 			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1162 #endif /* __BIG_ENDIAN */
1163 	he_writel(he_dev, lb_swap, LB_SWAP);
1164 
1165 	/* 4.8 sdram controller initialization */
1166 	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1167 
1168 	/* 4.9 initialize rnum value */
1169 	lb_swap |= SWAP_RNUM_MAX(0xf);
1170 	he_writel(he_dev, lb_swap, LB_SWAP);
1171 
1172 	/* 4.10 initialize the interrupt queues */
1173 	if ((err = he_init_irq(he_dev)) != 0)
1174 		return err;
1175 
1176 #ifdef USE_TASKLET
1177 	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
1178 #endif
1179 	spin_lock_init(&he_dev->global_lock);
1180 
1181 	/* 4.11 enable pci bus controller state machines */
1182 	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1183 				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1184 	he_writel(he_dev, host_cntl, HOST_CNTL);
1185 
1186 	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1187 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1188 
1189 	/*
1190 	 * atm network controller initialization
1191 	 */
1192 
1193 	/* 5.1.1 generic configuration state */
1194 
1195 	/*
1196 	 *		local (cell) buffer memory map
1197 	 *
1198 	 *             HE155                          HE622
1199 	 *
1200 	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1201 	 *         |            |            |                   |   |
1202 	 *         |  utility   |            |        rx0        |   |
1203 	 *        5|____________|         255|___________________| u |
1204 	 *        6|            |         256|                   | t |
1205 	 *         |            |            |                   | i |
1206 	 *         |    rx0     |     row    |        tx         | l |
1207 	 *         |            |            |                   | i |
1208 	 *         |            |         767|___________________| t |
1209 	 *      517|____________|         768|                   | y |
1210 	 * row  518|            |            |        rx1        |   |
1211 	 *         |            |        1023|___________________|___|
1212 	 *         |            |
1213 	 *         |    tx      |
1214 	 *         |            |
1215 	 *         |            |
1216 	 *     1535|____________|
1217 	 *     1536|            |
1218 	 *         |    rx1     |
1219 	 *     2047|____________|
1220 	 *
1221 	 */
1222 
1223 	/* total 4096 connections */
1224 	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1225 	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1226 
1227 	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1228 		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1229 		return -ENODEV;
1230 	}
1231 
1232 	if (nvpibits != -1) {
1233 		he_dev->vpibits = nvpibits;
1234 		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1235 	}
1236 
1237 	if (nvcibits != -1) {
1238 		he_dev->vcibits = nvcibits;
1239 		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1240 	}
1241 
1242 
1243 	if (he_is622(he_dev)) {
1244 		he_dev->cells_per_row = 40;
1245 		he_dev->bytes_per_row = 2048;
1246 		he_dev->r0_numrows = 256;
1247 		he_dev->tx_numrows = 512;
1248 		he_dev->r1_numrows = 256;
1249 		he_dev->r0_startrow = 0;
1250 		he_dev->tx_startrow = 256;
1251 		he_dev->r1_startrow = 768;
1252 	} else {
1253 		he_dev->cells_per_row = 20;
1254 		he_dev->bytes_per_row = 1024;
1255 		he_dev->r0_numrows = 512;
1256 		he_dev->tx_numrows = 1018;
1257 		he_dev->r1_numrows = 512;
1258 		he_dev->r0_startrow = 6;
1259 		he_dev->tx_startrow = 518;
1260 		he_dev->r1_startrow = 1536;
1261 	}
1262 
1263 	he_dev->cells_per_lbuf = 4;
1264 	he_dev->buffer_limit = 4;
1265 	he_dev->r0_numbuffs = he_dev->r0_numrows *
1266 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1267 	if (he_dev->r0_numbuffs > 2560)
1268 		he_dev->r0_numbuffs = 2560;
1269 
1270 	he_dev->r1_numbuffs = he_dev->r1_numrows *
1271 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1272 	if (he_dev->r1_numbuffs > 2560)
1273 		he_dev->r1_numbuffs = 2560;
1274 
1275 	he_dev->tx_numbuffs = he_dev->tx_numrows *
1276 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1277 	if (he_dev->tx_numbuffs > 5120)
1278 		he_dev->tx_numbuffs = 5120;
1279 
1280 	/* 5.1.2 configure hardware dependent registers */
1281 
1282 	he_writel(he_dev,
1283 		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1284 		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1285 		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1286 		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1287 								LBARB);
1288 
1289 	he_writel(he_dev, BANK_ON |
1290 		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1291 								SDRAMCON);
1292 
1293 	he_writel(he_dev,
1294 		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1295 						RM_RW_WAIT(1), RCMCONFIG);
1296 	he_writel(he_dev,
1297 		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1298 						TM_RW_WAIT(1), TCMCONFIG);
1299 
1300 	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1301 
1302 	he_writel(he_dev,
1303 		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1304 		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1305 		RX_VALVP(he_dev->vpibits) |
1306 		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1307 
1308 	he_writel(he_dev, DRF_THRESH(0x20) |
1309 		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1310 		TX_VCI_MASK(he_dev->vcibits) |
1311 		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1312 
1313 	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1314 
1315 	he_writel(he_dev, PHY_INT_ENB |
1316 		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1317 								RH_CONFIG);
1318 
1319 	/* 5.1.3 initialize connection memory */
1320 
1321 	for (i = 0; i < TCM_MEM_SIZE; ++i)
1322 		he_writel_tcm(he_dev, 0, i);
1323 
1324 	for (i = 0; i < RCM_MEM_SIZE; ++i)
1325 		he_writel_rcm(he_dev, 0, i);
1326 
1327 	/*
1328 	 *	transmit connection memory map
1329 	 *
1330 	 *                  tx memory
1331 	 *          0x0 ___________________
1332 	 *             |                   |
1333 	 *             |                   |
1334 	 *             |       TSRa        |
1335 	 *             |                   |
1336 	 *             |                   |
1337 	 *       0x8000|___________________|
1338 	 *             |                   |
1339 	 *             |       TSRb        |
1340 	 *       0xc000|___________________|
1341 	 *             |                   |
1342 	 *             |       TSRc        |
1343 	 *       0xe000|___________________|
1344 	 *             |       TSRd        |
1345 	 *       0xf000|___________________|
1346 	 *             |       tmABR       |
1347 	 *      0x10000|___________________|
1348 	 *             |                   |
1349 	 *             |       tmTPD       |
1350 	 *             |___________________|
1351 	 *             |                   |
1352 	 *                      ....
1353 	 *      0x1ffff|___________________|
1354 	 *
1355 	 *
1356 	 */
1357 
1358 	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1359 	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1360 	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1361 	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1362 	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1363 
1364 
1365 	/*
1366 	 *	receive connection memory map
1367 	 *
1368 	 *          0x0 ___________________
1369 	 *             |                   |
1370 	 *             |                   |
1371 	 *             |       RSRa        |
1372 	 *             |                   |
1373 	 *             |                   |
1374 	 *       0x8000|___________________|
1375 	 *             |                   |
1376 	 *             |             rx0/1 |
1377 	 *             |       LBM         |   link lists of local
1378 	 *             |             tx    |   buffer memory
1379 	 *             |                   |
1380 	 *       0xd000|___________________|
1381 	 *             |                   |
1382 	 *             |      rmABR        |
1383 	 *       0xe000|___________________|
1384 	 *             |                   |
1385 	 *             |       RSRb        |
1386 	 *             |___________________|
1387 	 *             |                   |
1388 	 *                      ....
1389 	 *       0xffff|___________________|
1390 	 */
1391 
1392 	he_writel(he_dev, 0x08000, RCMLBM_BA);
1393 	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1394 	he_writel(he_dev, 0x0d800, RCMABR_BA);
1395 
1396 	/* 5.1.4 initialize local buffer free pools linked lists */
1397 
1398 	he_init_rx_lbfp0(he_dev);
1399 	he_init_rx_lbfp1(he_dev);
1400 
1401 	he_writel(he_dev, 0x0, RLBC_H);
1402 	he_writel(he_dev, 0x0, RLBC_T);
1403 	he_writel(he_dev, 0x0, RLBC_H2);
1404 
1405 	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1406 	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1407 
1408 	he_init_tx_lbfp(he_dev);
1409 
1410 	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1411 
1412 	/* 5.1.5 initialize intermediate receive queues */
1413 
1414 	if (he_is622(he_dev)) {
1415 		he_writel(he_dev, 0x000f, G0_INMQ_S);
1416 		he_writel(he_dev, 0x200f, G0_INMQ_L);
1417 
1418 		he_writel(he_dev, 0x001f, G1_INMQ_S);
1419 		he_writel(he_dev, 0x201f, G1_INMQ_L);
1420 
1421 		he_writel(he_dev, 0x002f, G2_INMQ_S);
1422 		he_writel(he_dev, 0x202f, G2_INMQ_L);
1423 
1424 		he_writel(he_dev, 0x003f, G3_INMQ_S);
1425 		he_writel(he_dev, 0x203f, G3_INMQ_L);
1426 
1427 		he_writel(he_dev, 0x004f, G4_INMQ_S);
1428 		he_writel(he_dev, 0x204f, G4_INMQ_L);
1429 
1430 		he_writel(he_dev, 0x005f, G5_INMQ_S);
1431 		he_writel(he_dev, 0x205f, G5_INMQ_L);
1432 
1433 		he_writel(he_dev, 0x006f, G6_INMQ_S);
1434 		he_writel(he_dev, 0x206f, G6_INMQ_L);
1435 
1436 		he_writel(he_dev, 0x007f, G7_INMQ_S);
1437 		he_writel(he_dev, 0x207f, G7_INMQ_L);
1438 	} else {
1439 		he_writel(he_dev, 0x0000, G0_INMQ_S);
1440 		he_writel(he_dev, 0x0008, G0_INMQ_L);
1441 
1442 		he_writel(he_dev, 0x0001, G1_INMQ_S);
1443 		he_writel(he_dev, 0x0009, G1_INMQ_L);
1444 
1445 		he_writel(he_dev, 0x0002, G2_INMQ_S);
1446 		he_writel(he_dev, 0x000a, G2_INMQ_L);
1447 
1448 		he_writel(he_dev, 0x0003, G3_INMQ_S);
1449 		he_writel(he_dev, 0x000b, G3_INMQ_L);
1450 
1451 		he_writel(he_dev, 0x0004, G4_INMQ_S);
1452 		he_writel(he_dev, 0x000c, G4_INMQ_L);
1453 
1454 		he_writel(he_dev, 0x0005, G5_INMQ_S);
1455 		he_writel(he_dev, 0x000d, G5_INMQ_L);
1456 
1457 		he_writel(he_dev, 0x0006, G6_INMQ_S);
1458 		he_writel(he_dev, 0x000e, G6_INMQ_L);
1459 
1460 		he_writel(he_dev, 0x0007, G7_INMQ_S);
1461 		he_writel(he_dev, 0x000f, G7_INMQ_L);
1462 	}
1463 
1464 	/* 5.1.6 application tunable parameters */
1465 
1466 	he_writel(he_dev, 0x0, MCC);
1467 	he_writel(he_dev, 0x0, OEC);
1468 	he_writel(he_dev, 0x0, DCC);
1469 	he_writel(he_dev, 0x0, CEC);
1470 
1471 	/* 5.1.7 cs block initialization */
1472 
1473 	he_init_cs_block(he_dev);
1474 
1475 	/* 5.1.8 cs block connection memory initialization */
1476 
1477 	if (he_init_cs_block_rcm(he_dev) < 0)
1478 		return -ENOMEM;
1479 
1480 	/* 5.1.10 initialize host structures */
1481 
1482 	he_init_tpdrq(he_dev);
1483 
1484 #ifdef USE_TPD_POOL
1485 	he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1486 		sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1487 	if (he_dev->tpd_pool == NULL) {
1488 		hprintk("unable to create tpd pci_pool\n");
1489 		return -ENOMEM;
1490 	}
1491 
1492 	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1493 #else
1494 	he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
1495 			CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
1496 	if (!he_dev->tpd_base)
1497 		return -ENOMEM;
1498 
1499 	for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1500 		he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
1501 		he_dev->tpd_base[i].inuse = 0;
1502 	}
1503 
1504 	he_dev->tpd_head = he_dev->tpd_base;
1505 	he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
1506 #endif
1507 
1508 	if (he_init_group(he_dev, 0) != 0)
1509 		return -ENOMEM;
1510 
1511 	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1512 		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1513 		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1514 		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1515 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1516 						G0_RBPS_BS + (group * 32));
1517 
1518 		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1519 		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1520 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1521 						G0_RBPL_QI + (group * 32));
1522 		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1523 
1524 		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1525 		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1526 		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1527 						G0_RBRQ_Q + (group * 16));
1528 		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1529 
1530 		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1531 		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1532 		he_writel(he_dev, TBRQ_THRESH(0x1),
1533 						G0_TBRQ_THRESH + (group * 16));
1534 		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1535 	}
1536 
1537 	/* host status page */
1538 
1539 	he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1540 				sizeof(struct he_hsp), &he_dev->hsp_phys);
1541 	if (he_dev->hsp == NULL) {
1542 		hprintk("failed to allocate host status page\n");
1543 		return -ENOMEM;
1544 	}
1545 	memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1546 	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1547 
1548 	/* initialize framer */
1549 
1550 #ifdef CONFIG_ATM_HE_USE_SUNI
1551 	suni_init(he_dev->atm_dev);
1552 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1553 		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1554 #endif /* CONFIG_ATM_HE_USE_SUNI */
1555 
1556 	if (sdh) {
1557 		/* this really should be in suni.c but for now... */
1558 		int val;
1559 
1560 		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1561 		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1562 		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1563 	}
1564 
1565 	/* 5.1.12 enable transmit and receive */
1566 
1567 	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1568 	reg |= TX_ENABLE|ER_ENABLE;
1569 	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1570 
1571 	reg = he_readl(he_dev, RC_CONFIG);
1572 	reg |= RX_ENABLE;
1573 	he_writel(he_dev, reg, RC_CONFIG);
1574 
1575 	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1576 		he_dev->cs_stper[i].inuse = 0;
1577 		he_dev->cs_stper[i].pcr = -1;
1578 	}
1579 	he_dev->total_bw = 0;
1580 
1581 
1582 	/* atm linux initialization */
1583 
1584 	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1585 	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1586 
1587 	he_dev->irq_peak = 0;
1588 	he_dev->rbrq_peak = 0;
1589 	he_dev->rbpl_peak = 0;
1590 	he_dev->tbrq_peak = 0;
1591 
1592 	HPRINTK("hell bent for leather!\n");
1593 
1594 	return 0;
1595 }
1596 
1597 static void
1598 he_stop(struct he_dev *he_dev)
1599 {
1600 	u16 command;
1601 	u32 gen_cntl_0, reg;
1602 	struct pci_dev *pci_dev;
1603 
1604 	pci_dev = he_dev->pci_dev;
1605 
1606 	/* disable interrupts */
1607 
1608 	if (he_dev->membase) {
1609 		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1610 		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1611 		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1612 
1613 #ifdef USE_TASKLET
1614 		tasklet_disable(&he_dev->tasklet);
1615 #endif
1616 
1617 		/* disable recv and transmit */
1618 
1619 		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1620 		reg &= ~(TX_ENABLE|ER_ENABLE);
1621 		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1622 
1623 		reg = he_readl(he_dev, RC_CONFIG);
1624 		reg &= ~(RX_ENABLE);
1625 		he_writel(he_dev, reg, RC_CONFIG);
1626 	}
1627 
1628 #ifdef CONFIG_ATM_HE_USE_SUNI
1629 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1630 		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1631 #endif /* CONFIG_ATM_HE_USE_SUNI */
1632 
1633 	if (he_dev->irq)
1634 		free_irq(he_dev->irq, he_dev);
1635 
1636 	if (he_dev->irq_base)
1637 		pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1638 			* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1639 
1640 	if (he_dev->hsp)
1641 		pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1642 						he_dev->hsp, he_dev->hsp_phys);
1643 
1644 	if (he_dev->rbpl_base) {
1645 #ifdef USE_RBPL_POOL
1646 		for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1647 			void *cpuaddr = he_dev->rbpl_virt[i].virt;
1648 			dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1649 
1650 			pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1651 		}
1652 #else
1653 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1654 			* CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
1655 #endif
1656 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1657 			* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1658 	}
1659 
1660 #ifdef USE_RBPL_POOL
1661 	if (he_dev->rbpl_pool)
1662 		pci_pool_destroy(he_dev->rbpl_pool);
1663 #endif
1664 
1665 #ifdef USE_RBPS
1666 	if (he_dev->rbps_base) {
1667 #ifdef USE_RBPS_POOL
1668 		for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1669 			void *cpuaddr = he_dev->rbps_virt[i].virt;
1670 			dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1671 
1672 			pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1673 		}
1674 #else
1675 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1676 			* CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
1677 #endif
1678 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1679 			* sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1680 	}
1681 
1682 #ifdef USE_RBPS_POOL
1683 	if (he_dev->rbps_pool)
1684 		pci_pool_destroy(he_dev->rbps_pool);
1685 #endif
1686 
1687 #endif /* USE_RBPS */
1688 
1689 	if (he_dev->rbrq_base)
1690 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1691 							he_dev->rbrq_base, he_dev->rbrq_phys);
1692 
1693 	if (he_dev->tbrq_base)
1694 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1695 							he_dev->tbrq_base, he_dev->tbrq_phys);
1696 
1697 	if (he_dev->tpdrq_base)
1698 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1699 							he_dev->tpdrq_base, he_dev->tpdrq_phys);
1700 
1701 #ifdef USE_TPD_POOL
1702 	if (he_dev->tpd_pool)
1703 		pci_pool_destroy(he_dev->tpd_pool);
1704 #else
1705 	if (he_dev->tpd_base)
1706 		pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
1707 							he_dev->tpd_base, he_dev->tpd_base_phys);
1708 #endif
1709 
1710 	if (he_dev->pci_dev) {
1711 		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1712 		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1713 		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1714 	}
1715 
1716 	if (he_dev->membase)
1717 		iounmap(he_dev->membase);
1718 }
1719 
1720 static struct he_tpd *
1721 __alloc_tpd(struct he_dev *he_dev)
1722 {
1723 #ifdef USE_TPD_POOL
1724 	struct he_tpd *tpd;
1725 	dma_addr_t dma_handle;
1726 
1727 	tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle);
1728 	if (tpd == NULL)
1729 		return NULL;
1730 
1731 	tpd->status = TPD_ADDR(dma_handle);
1732 	tpd->reserved = 0;
1733 	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1734 	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1735 	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1736 
1737 	return tpd;
1738 #else
1739 	int i;
1740 
1741 	for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1742 		++he_dev->tpd_head;
1743 		if (he_dev->tpd_head > he_dev->tpd_end) {
1744 			he_dev->tpd_head = he_dev->tpd_base;
1745 		}
1746 
1747 		if (!he_dev->tpd_head->inuse) {
1748 			he_dev->tpd_head->inuse = 1;
1749 			he_dev->tpd_head->status &= TPD_MASK;
1750 			he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
1751 			he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
1752 			he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
1753 			return he_dev->tpd_head;
1754 		}
1755 	}
1756 	hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
1757 	return NULL;
1758 #endif
1759 }
1760 
1761 #define AAL5_LEN(buf,len) 						\
1762 			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1763 				(((unsigned char *)(buf))[(len)-5]))
1764 
1765 /* 2.10.1.2 receive
1766  *
1767  * aal5 packets can optionally return the tcp checksum in the lower
1768  * 16 bits of the crc (RSR0_TCP_CKSUM)
1769  */
1770 
1771 #define TCP_CKSUM(buf,len) 						\
1772 			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1773 				(((unsigned char *)(buf))[(len-1)]))
1774 
1775 static int
1776 he_service_rbrq(struct he_dev *he_dev, int group)
1777 {
1778 	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1779 				((unsigned long)he_dev->rbrq_base |
1780 					he_dev->hsp->group[group].rbrq_tail);
1781 	struct he_rbp *rbp = NULL;
1782 	unsigned cid, lastcid = -1;
1783 	unsigned buf_len = 0;
1784 	struct sk_buff *skb;
1785 	struct atm_vcc *vcc = NULL;
1786 	struct he_vcc *he_vcc;
1787 	struct he_iovec *iov;
1788 	int pdus_assembled = 0;
1789 	int updated = 0;
1790 
1791 	read_lock(&vcc_sklist_lock);
1792 	while (he_dev->rbrq_head != rbrq_tail) {
1793 		++updated;
1794 
1795 		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1796 			he_dev->rbrq_head, group,
1797 			RBRQ_ADDR(he_dev->rbrq_head),
1798 			RBRQ_BUFLEN(he_dev->rbrq_head),
1799 			RBRQ_CID(he_dev->rbrq_head),
1800 			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1801 			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1802 			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1803 			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1804 			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1805 			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1806 
1807 #ifdef USE_RBPS
1808 		if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1809 			rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1810 		else
1811 #endif
1812 			rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1813 
1814 		buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1815 		cid = RBRQ_CID(he_dev->rbrq_head);
1816 
1817 		if (cid != lastcid)
1818 			vcc = __find_vcc(he_dev, cid);
1819 		lastcid = cid;
1820 
1821 		if (vcc == NULL) {
1822 			hprintk("vcc == NULL  (cid 0x%x)\n", cid);
1823 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1824 					rbp->status &= ~RBP_LOANED;
1825 
1826 			goto next_rbrq_entry;
1827 		}
1828 
1829 		he_vcc = HE_VCC(vcc);
1830 		if (he_vcc == NULL) {
1831 			hprintk("he_vcc == NULL  (cid 0x%x)\n", cid);
1832 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1833 					rbp->status &= ~RBP_LOANED;
1834 			goto next_rbrq_entry;
1835 		}
1836 
1837 		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1838 			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1839 				atomic_inc(&vcc->stats->rx_drop);
1840 			goto return_host_buffers;
1841 		}
1842 
1843 		he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1844 		he_vcc->iov_tail->iov_len = buf_len;
1845 		he_vcc->pdu_len += buf_len;
1846 		++he_vcc->iov_tail;
1847 
1848 		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1849 			lastcid = -1;
1850 			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1851 			wake_up(&he_vcc->rx_waitq);
1852 			goto return_host_buffers;
1853 		}
1854 
1855 #ifdef notdef
1856 		if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1857 			hprintk("iovec full!  cid 0x%x\n", cid);
1858 			goto return_host_buffers;
1859 		}
1860 #endif
1861 		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1862 			goto next_rbrq_entry;
1863 
1864 		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1865 				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1866 			HPRINTK("%s%s (%d.%d)\n",
1867 				RBRQ_CRC_ERR(he_dev->rbrq_head)
1868 							? "CRC_ERR " : "",
1869 				RBRQ_LEN_ERR(he_dev->rbrq_head)
1870 							? "LEN_ERR" : "",
1871 							vcc->vpi, vcc->vci);
1872 			atomic_inc(&vcc->stats->rx_err);
1873 			goto return_host_buffers;
1874 		}
1875 
1876 		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1877 							GFP_ATOMIC);
1878 		if (!skb) {
1879 			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1880 			goto return_host_buffers;
1881 		}
1882 
1883 		if (rx_skb_reserve > 0)
1884 			skb_reserve(skb, rx_skb_reserve);
1885 
1886 		__net_timestamp(skb);
1887 
1888 		for (iov = he_vcc->iov_head;
1889 				iov < he_vcc->iov_tail; ++iov) {
1890 #ifdef USE_RBPS
1891 			if (iov->iov_base & RBP_SMALLBUF)
1892 				memcpy(skb_put(skb, iov->iov_len),
1893 					he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1894 			else
1895 #endif
1896 				memcpy(skb_put(skb, iov->iov_len),
1897 					he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1898 		}
1899 
1900 		switch (vcc->qos.aal) {
1901 			case ATM_AAL0:
1902 				/* 2.10.1.5 raw cell receive */
1903 				skb->len = ATM_AAL0_SDU;
1904 				skb_set_tail_pointer(skb, skb->len);
1905 				break;
1906 			case ATM_AAL5:
1907 				/* 2.10.1.2 aal5 receive */
1908 
1909 				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1910 				skb_set_tail_pointer(skb, skb->len);
1911 #ifdef USE_CHECKSUM_HW
1912 				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1913 					skb->ip_summed = CHECKSUM_COMPLETE;
1914 					skb->csum = TCP_CKSUM(skb->data,
1915 							he_vcc->pdu_len);
1916 				}
1917 #endif
1918 				break;
1919 		}
1920 
1921 #ifdef should_never_happen
1922 		if (skb->len > vcc->qos.rxtp.max_sdu)
1923 			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1924 #endif
1925 
1926 #ifdef notdef
1927 		ATM_SKB(skb)->vcc = vcc;
1928 #endif
1929 		spin_unlock(&he_dev->global_lock);
1930 		vcc->push(vcc, skb);
1931 		spin_lock(&he_dev->global_lock);
1932 
1933 		atomic_inc(&vcc->stats->rx);
1934 
1935 return_host_buffers:
1936 		++pdus_assembled;
1937 
1938 		for (iov = he_vcc->iov_head;
1939 				iov < he_vcc->iov_tail; ++iov) {
1940 #ifdef USE_RBPS
1941 			if (iov->iov_base & RBP_SMALLBUF)
1942 				rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1943 			else
1944 #endif
1945 				rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1946 
1947 			rbp->status &= ~RBP_LOANED;
1948 		}
1949 
1950 		he_vcc->iov_tail = he_vcc->iov_head;
1951 		he_vcc->pdu_len = 0;
1952 
1953 next_rbrq_entry:
1954 		he_dev->rbrq_head = (struct he_rbrq *)
1955 				((unsigned long) he_dev->rbrq_base |
1956 					RBRQ_MASK(++he_dev->rbrq_head));
1957 
1958 	}
1959 	read_unlock(&vcc_sklist_lock);
1960 
1961 	if (updated) {
1962 		if (updated > he_dev->rbrq_peak)
1963 			he_dev->rbrq_peak = updated;
1964 
1965 		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1966 						G0_RBRQ_H + (group * 16));
1967 	}
1968 
1969 	return pdus_assembled;
1970 }
1971 
1972 static void
1973 he_service_tbrq(struct he_dev *he_dev, int group)
1974 {
1975 	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1976 				((unsigned long)he_dev->tbrq_base |
1977 					he_dev->hsp->group[group].tbrq_tail);
1978 	struct he_tpd *tpd;
1979 	int slot, updated = 0;
1980 #ifdef USE_TPD_POOL
1981 	struct he_tpd *__tpd;
1982 #endif
1983 
1984 	/* 2.1.6 transmit buffer return queue */
1985 
1986 	while (he_dev->tbrq_head != tbrq_tail) {
1987 		++updated;
1988 
1989 		HPRINTK("tbrq%d 0x%x%s%s\n",
1990 			group,
1991 			TBRQ_TPD(he_dev->tbrq_head),
1992 			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1993 			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1994 #ifdef USE_TPD_POOL
1995 		tpd = NULL;
1996 		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1997 			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1998 				tpd = __tpd;
1999 				list_del(&__tpd->entry);
2000 				break;
2001 			}
2002 		}
2003 
2004 		if (tpd == NULL) {
2005 			hprintk("unable to locate tpd for dma buffer %x\n",
2006 						TBRQ_TPD(he_dev->tbrq_head));
2007 			goto next_tbrq_entry;
2008 		}
2009 #else
2010 		tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
2011 #endif
2012 
2013 		if (TBRQ_EOS(he_dev->tbrq_head)) {
2014 			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2015 				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
2016 			if (tpd->vcc)
2017 				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
2018 
2019 			goto next_tbrq_entry;
2020 		}
2021 
2022 		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2023 			if (tpd->iovec[slot].addr)
2024 				pci_unmap_single(he_dev->pci_dev,
2025 					tpd->iovec[slot].addr,
2026 					tpd->iovec[slot].len & TPD_LEN_MASK,
2027 							PCI_DMA_TODEVICE);
2028 			if (tpd->iovec[slot].len & TPD_LST)
2029 				break;
2030 
2031 		}
2032 
2033 		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2034 			if (tpd->vcc && tpd->vcc->pop)
2035 				tpd->vcc->pop(tpd->vcc, tpd->skb);
2036 			else
2037 				dev_kfree_skb_any(tpd->skb);
2038 		}
2039 
2040 next_tbrq_entry:
2041 #ifdef USE_TPD_POOL
2042 		if (tpd)
2043 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2044 #else
2045 		tpd->inuse = 0;
2046 #endif
2047 		he_dev->tbrq_head = (struct he_tbrq *)
2048 				((unsigned long) he_dev->tbrq_base |
2049 					TBRQ_MASK(++he_dev->tbrq_head));
2050 	}
2051 
2052 	if (updated) {
2053 		if (updated > he_dev->tbrq_peak)
2054 			he_dev->tbrq_peak = updated;
2055 
2056 		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
2057 						G0_TBRQ_H + (group * 16));
2058 	}
2059 }
2060 
2061 
2062 static void
2063 he_service_rbpl(struct he_dev *he_dev, int group)
2064 {
2065 	struct he_rbp *newtail;
2066 	struct he_rbp *rbpl_head;
2067 	int moved = 0;
2068 
2069 	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2070 					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
2071 
2072 	for (;;) {
2073 		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2074 						RBPL_MASK(he_dev->rbpl_tail+1));
2075 
2076 		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2077 		if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
2078 			break;
2079 
2080 		newtail->status |= RBP_LOANED;
2081 		he_dev->rbpl_tail = newtail;
2082 		++moved;
2083 	}
2084 
2085 	if (moved)
2086 		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2087 }
2088 
2089 #ifdef USE_RBPS
2090 static void
2091 he_service_rbps(struct he_dev *he_dev, int group)
2092 {
2093 	struct he_rbp *newtail;
2094 	struct he_rbp *rbps_head;
2095 	int moved = 0;
2096 
2097 	rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2098 					RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2099 
2100 	for (;;) {
2101 		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2102 						RBPS_MASK(he_dev->rbps_tail+1));
2103 
2104 		/* table 3.42 -- rbps_tail should never be set to rbps_head */
2105 		if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2106 			break;
2107 
2108 		newtail->status |= RBP_LOANED;
2109 		he_dev->rbps_tail = newtail;
2110 		++moved;
2111 	}
2112 
2113 	if (moved)
2114 		he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2115 }
2116 #endif /* USE_RBPS */
2117 
2118 static void
2119 he_tasklet(unsigned long data)
2120 {
2121 	unsigned long flags;
2122 	struct he_dev *he_dev = (struct he_dev *) data;
2123 	int group, type;
2124 	int updated = 0;
2125 
2126 	HPRINTK("tasklet (0x%lx)\n", data);
2127 #ifdef USE_TASKLET
2128 	spin_lock_irqsave(&he_dev->global_lock, flags);
2129 #endif
2130 
2131 	while (he_dev->irq_head != he_dev->irq_tail) {
2132 		++updated;
2133 
2134 		type = ITYPE_TYPE(he_dev->irq_head->isw);
2135 		group = ITYPE_GROUP(he_dev->irq_head->isw);
2136 
2137 		switch (type) {
2138 			case ITYPE_RBRQ_THRESH:
2139 				HPRINTK("rbrq%d threshold\n", group);
2140 				/* fall through */
2141 			case ITYPE_RBRQ_TIMER:
2142 				if (he_service_rbrq(he_dev, group)) {
2143 					he_service_rbpl(he_dev, group);
2144 #ifdef USE_RBPS
2145 					he_service_rbps(he_dev, group);
2146 #endif /* USE_RBPS */
2147 				}
2148 				break;
2149 			case ITYPE_TBRQ_THRESH:
2150 				HPRINTK("tbrq%d threshold\n", group);
2151 				/* fall through */
2152 			case ITYPE_TPD_COMPLETE:
2153 				he_service_tbrq(he_dev, group);
2154 				break;
2155 			case ITYPE_RBPL_THRESH:
2156 				he_service_rbpl(he_dev, group);
2157 				break;
2158 			case ITYPE_RBPS_THRESH:
2159 #ifdef USE_RBPS
2160 				he_service_rbps(he_dev, group);
2161 #endif /* USE_RBPS */
2162 				break;
2163 			case ITYPE_PHY:
2164 				HPRINTK("phy interrupt\n");
2165 #ifdef CONFIG_ATM_HE_USE_SUNI
2166 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2167 				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2168 					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2169 				spin_lock_irqsave(&he_dev->global_lock, flags);
2170 #endif
2171 				break;
2172 			case ITYPE_OTHER:
2173 				switch (type|group) {
2174 					case ITYPE_PARITY:
2175 						hprintk("parity error\n");
2176 						break;
2177 					case ITYPE_ABORT:
2178 						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2179 						break;
2180 				}
2181 				break;
2182 			case ITYPE_TYPE(ITYPE_INVALID):
2183 				/* see 8.1.1 -- check all queues */
2184 
2185 				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2186 
2187 				he_service_rbrq(he_dev, 0);
2188 				he_service_rbpl(he_dev, 0);
2189 #ifdef USE_RBPS
2190 				he_service_rbps(he_dev, 0);
2191 #endif /* USE_RBPS */
2192 				he_service_tbrq(he_dev, 0);
2193 				break;
2194 			default:
2195 				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2196 		}
2197 
2198 		he_dev->irq_head->isw = ITYPE_INVALID;
2199 
2200 		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2201 	}
2202 
2203 	if (updated) {
2204 		if (updated > he_dev->irq_peak)
2205 			he_dev->irq_peak = updated;
2206 
2207 		he_writel(he_dev,
2208 			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2209 			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2210 			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2211 		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2212 	}
2213 #ifdef USE_TASKLET
2214 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2215 #endif
2216 }
2217 
2218 static irqreturn_t
2219 he_irq_handler(int irq, void *dev_id)
2220 {
2221 	unsigned long flags;
2222 	struct he_dev *he_dev = (struct he_dev * )dev_id;
2223 	int handled = 0;
2224 
2225 	if (he_dev == NULL)
2226 		return IRQ_NONE;
2227 
2228 	spin_lock_irqsave(&he_dev->global_lock, flags);
2229 
2230 	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2231 						(*he_dev->irq_tailoffset << 2));
2232 
2233 	if (he_dev->irq_tail == he_dev->irq_head) {
2234 		HPRINTK("tailoffset not updated?\n");
2235 		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2236 			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2237 		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2238 	}
2239 
2240 #ifdef DEBUG
2241 	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2242 		hprintk("spurious (or shared) interrupt?\n");
2243 #endif
2244 
2245 	if (he_dev->irq_head != he_dev->irq_tail) {
2246 		handled = 1;
2247 #ifdef USE_TASKLET
2248 		tasklet_schedule(&he_dev->tasklet);
2249 #else
2250 		he_tasklet((unsigned long) he_dev);
2251 #endif
2252 		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2253 		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2254 	}
2255 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2256 	return IRQ_RETVAL(handled);
2257 
2258 }
2259 
2260 static __inline__ void
2261 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2262 {
2263 	struct he_tpdrq *new_tail;
2264 
2265 	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2266 					tpd, cid, he_dev->tpdrq_tail);
2267 
2268 	/* new_tail = he_dev->tpdrq_tail; */
2269 	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2270 					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2271 
2272 	/*
2273 	 * check to see if we are about to set the tail == head
2274 	 * if true, update the head pointer from the adapter
2275 	 * to see if this is really the case (reading the queue
2276 	 * head for every enqueue would be unnecessarily slow)
2277 	 */
2278 
2279 	if (new_tail == he_dev->tpdrq_head) {
2280 		he_dev->tpdrq_head = (struct he_tpdrq *)
2281 			(((unsigned long)he_dev->tpdrq_base) |
2282 				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2283 
2284 		if (new_tail == he_dev->tpdrq_head) {
2285 			int slot;
2286 
2287 			hprintk("tpdrq full (cid 0x%x)\n", cid);
2288 			/*
2289 			 * FIXME
2290 			 * push tpd onto a transmit backlog queue
2291 			 * after service_tbrq, service the backlog
2292 			 * for now, we just drop the pdu
2293 			 */
2294 			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2295 				if (tpd->iovec[slot].addr)
2296 					pci_unmap_single(he_dev->pci_dev,
2297 						tpd->iovec[slot].addr,
2298 						tpd->iovec[slot].len & TPD_LEN_MASK,
2299 								PCI_DMA_TODEVICE);
2300 			}
2301 			if (tpd->skb) {
2302 				if (tpd->vcc->pop)
2303 					tpd->vcc->pop(tpd->vcc, tpd->skb);
2304 				else
2305 					dev_kfree_skb_any(tpd->skb);
2306 				atomic_inc(&tpd->vcc->stats->tx_err);
2307 			}
2308 #ifdef USE_TPD_POOL
2309 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2310 #else
2311 			tpd->inuse = 0;
2312 #endif
2313 			return;
2314 		}
2315 	}
2316 
2317 	/* 2.1.5 transmit packet descriptor ready queue */
2318 #ifdef USE_TPD_POOL
2319 	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2320 	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2321 #else
2322 	he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
2323 				(TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
2324 #endif
2325 	he_dev->tpdrq_tail->cid = cid;
2326 	wmb();
2327 
2328 	he_dev->tpdrq_tail = new_tail;
2329 
2330 	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2331 	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2332 }
2333 
2334 static int
2335 he_open(struct atm_vcc *vcc)
2336 {
2337 	unsigned long flags;
2338 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2339 	struct he_vcc *he_vcc;
2340 	int err = 0;
2341 	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2342 	short vpi = vcc->vpi;
2343 	int vci = vcc->vci;
2344 
2345 	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2346 		return 0;
2347 
2348 	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2349 
2350 	set_bit(ATM_VF_ADDR, &vcc->flags);
2351 
2352 	cid = he_mkcid(he_dev, vpi, vci);
2353 
2354 	he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2355 	if (he_vcc == NULL) {
2356 		hprintk("unable to allocate he_vcc during open\n");
2357 		return -ENOMEM;
2358 	}
2359 
2360 	he_vcc->iov_tail = he_vcc->iov_head;
2361 	he_vcc->pdu_len = 0;
2362 	he_vcc->rc_index = -1;
2363 
2364 	init_waitqueue_head(&he_vcc->rx_waitq);
2365 	init_waitqueue_head(&he_vcc->tx_waitq);
2366 
2367 	vcc->dev_data = he_vcc;
2368 
2369 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2370 		int pcr_goal;
2371 
2372 		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2373 		if (pcr_goal == 0)
2374 			pcr_goal = he_dev->atm_dev->link_rate;
2375 		if (pcr_goal < 0)	/* means round down, technically */
2376 			pcr_goal = -pcr_goal;
2377 
2378 		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2379 
2380 		switch (vcc->qos.aal) {
2381 			case ATM_AAL5:
2382 				tsr0_aal = TSR0_AAL5;
2383 				tsr4 = TSR4_AAL5;
2384 				break;
2385 			case ATM_AAL0:
2386 				tsr0_aal = TSR0_AAL0_SDU;
2387 				tsr4 = TSR4_AAL0_SDU;
2388 				break;
2389 			default:
2390 				err = -EINVAL;
2391 				goto open_failed;
2392 		}
2393 
2394 		spin_lock_irqsave(&he_dev->global_lock, flags);
2395 		tsr0 = he_readl_tsr0(he_dev, cid);
2396 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2397 
2398 		if (TSR0_CONN_STATE(tsr0) != 0) {
2399 			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2400 			err = -EBUSY;
2401 			goto open_failed;
2402 		}
2403 
2404 		switch (vcc->qos.txtp.traffic_class) {
2405 			case ATM_UBR:
2406 				/* 2.3.3.1 open connection ubr */
2407 
2408 				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2409 					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2410 				break;
2411 
2412 			case ATM_CBR:
2413 				/* 2.3.3.2 open connection cbr */
2414 
2415 				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2416 				if ((he_dev->total_bw + pcr_goal)
2417 					> (he_dev->atm_dev->link_rate * 9 / 10))
2418 				{
2419 					err = -EBUSY;
2420 					goto open_failed;
2421 				}
2422 
2423 				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2424 
2425 				/* find an unused cs_stper register */
2426 				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2427 					if (he_dev->cs_stper[reg].inuse == 0 ||
2428 					    he_dev->cs_stper[reg].pcr == pcr_goal)
2429 							break;
2430 
2431 				if (reg == HE_NUM_CS_STPER) {
2432 					err = -EBUSY;
2433 					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2434 					goto open_failed;
2435 				}
2436 
2437 				he_dev->total_bw += pcr_goal;
2438 
2439 				he_vcc->rc_index = reg;
2440 				++he_dev->cs_stper[reg].inuse;
2441 				he_dev->cs_stper[reg].pcr = pcr_goal;
2442 
2443 				clock = he_is622(he_dev) ? 66667000 : 50000000;
2444 				period = clock / pcr_goal;
2445 
2446 				HPRINTK("rc_index = %d period = %d\n",
2447 								reg, period);
2448 
2449 				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2450 							CS_STPER0 + reg);
2451 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2452 
2453 				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2454 							TSR0_RC_INDEX(reg);
2455 
2456 				break;
2457 			default:
2458 				err = -EINVAL;
2459 				goto open_failed;
2460 		}
2461 
2462 		spin_lock_irqsave(&he_dev->global_lock, flags);
2463 
2464 		he_writel_tsr0(he_dev, tsr0, cid);
2465 		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2466 		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2467 					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2468 		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2469 		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2470 
2471 		he_writel_tsr3(he_dev, 0x0, cid);
2472 		he_writel_tsr5(he_dev, 0x0, cid);
2473 		he_writel_tsr6(he_dev, 0x0, cid);
2474 		he_writel_tsr7(he_dev, 0x0, cid);
2475 		he_writel_tsr8(he_dev, 0x0, cid);
2476 		he_writel_tsr10(he_dev, 0x0, cid);
2477 		he_writel_tsr11(he_dev, 0x0, cid);
2478 		he_writel_tsr12(he_dev, 0x0, cid);
2479 		he_writel_tsr13(he_dev, 0x0, cid);
2480 		he_writel_tsr14(he_dev, 0x0, cid);
2481 		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2482 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2483 	}
2484 
2485 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2486 		unsigned aal;
2487 
2488 		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2489 		 				&HE_VCC(vcc)->rx_waitq);
2490 
2491 		switch (vcc->qos.aal) {
2492 			case ATM_AAL5:
2493 				aal = RSR0_AAL5;
2494 				break;
2495 			case ATM_AAL0:
2496 				aal = RSR0_RAWCELL;
2497 				break;
2498 			default:
2499 				err = -EINVAL;
2500 				goto open_failed;
2501 		}
2502 
2503 		spin_lock_irqsave(&he_dev->global_lock, flags);
2504 
2505 		rsr0 = he_readl_rsr0(he_dev, cid);
2506 		if (rsr0 & RSR0_OPEN_CONN) {
2507 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2508 
2509 			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2510 			err = -EBUSY;
2511 			goto open_failed;
2512 		}
2513 
2514 #ifdef USE_RBPS
2515 		rsr1 = RSR1_GROUP(0);
2516 		rsr4 = RSR4_GROUP(0);
2517 #else /* !USE_RBPS */
2518 		rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
2519 		rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
2520 #endif /* USE_RBPS */
2521 		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2522 				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2523 
2524 #ifdef USE_CHECKSUM_HW
2525 		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2526 			rsr0 |= RSR0_TCP_CKSUM;
2527 #endif
2528 
2529 		he_writel_rsr4(he_dev, rsr4, cid);
2530 		he_writel_rsr1(he_dev, rsr1, cid);
2531 		/* 5.1.11 last parameter initialized should be
2532 			  the open/closed indication in rsr0 */
2533 		he_writel_rsr0(he_dev,
2534 			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2535 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2536 
2537 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2538 	}
2539 
2540 open_failed:
2541 
2542 	if (err) {
2543 		kfree(he_vcc);
2544 		clear_bit(ATM_VF_ADDR, &vcc->flags);
2545 	}
2546 	else
2547 		set_bit(ATM_VF_READY, &vcc->flags);
2548 
2549 	return err;
2550 }
2551 
2552 static void
2553 he_close(struct atm_vcc *vcc)
2554 {
2555 	unsigned long flags;
2556 	DECLARE_WAITQUEUE(wait, current);
2557 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2558 	struct he_tpd *tpd;
2559 	unsigned cid;
2560 	struct he_vcc *he_vcc = HE_VCC(vcc);
2561 #define MAX_RETRY 30
2562 	int retry = 0, sleep = 1, tx_inuse;
2563 
2564 	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2565 
2566 	clear_bit(ATM_VF_READY, &vcc->flags);
2567 	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2568 
2569 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2570 		int timeout;
2571 
2572 		HPRINTK("close rx cid 0x%x\n", cid);
2573 
2574 		/* 2.7.2.2 close receive operation */
2575 
2576 		/* wait for previous close (if any) to finish */
2577 
2578 		spin_lock_irqsave(&he_dev->global_lock, flags);
2579 		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2580 			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2581 			udelay(250);
2582 		}
2583 
2584 		set_current_state(TASK_UNINTERRUPTIBLE);
2585 		add_wait_queue(&he_vcc->rx_waitq, &wait);
2586 
2587 		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2588 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2589 		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2590 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2591 
2592 		timeout = schedule_timeout(30*HZ);
2593 
2594 		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2595 		set_current_state(TASK_RUNNING);
2596 
2597 		if (timeout == 0)
2598 			hprintk("close rx timeout cid 0x%x\n", cid);
2599 
2600 		HPRINTK("close rx cid 0x%x complete\n", cid);
2601 
2602 	}
2603 
2604 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2605 		volatile unsigned tsr4, tsr0;
2606 		int timeout;
2607 
2608 		HPRINTK("close tx cid 0x%x\n", cid);
2609 
2610 		/* 2.1.2
2611 		 *
2612 		 * ... the host must first stop queueing packets to the TPDRQ
2613 		 * on the connection to be closed, then wait for all outstanding
2614 		 * packets to be transmitted and their buffers returned to the
2615 		 * TBRQ. When the last packet on the connection arrives in the
2616 		 * TBRQ, the host issues the close command to the adapter.
2617 		 */
2618 
2619 		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 0) &&
2620 		       (retry < MAX_RETRY)) {
2621 			msleep(sleep);
2622 			if (sleep < 250)
2623 				sleep = sleep * 2;
2624 
2625 			++retry;
2626 		}
2627 
2628 		if (tx_inuse)
2629 			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2630 
2631 		/* 2.3.1.1 generic close operations with flush */
2632 
2633 		spin_lock_irqsave(&he_dev->global_lock, flags);
2634 		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2635 					/* also clears TSR4_SESSION_ENDED */
2636 
2637 		switch (vcc->qos.txtp.traffic_class) {
2638 			case ATM_UBR:
2639 				he_writel_tsr1(he_dev,
2640 					TSR1_MCR(rate_to_atmf(200000))
2641 					| TSR1_PCR(0), cid);
2642 				break;
2643 			case ATM_CBR:
2644 				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2645 				break;
2646 		}
2647 		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2648 
2649 		tpd = __alloc_tpd(he_dev);
2650 		if (tpd == NULL) {
2651 			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2652 			goto close_tx_incomplete;
2653 		}
2654 		tpd->status |= TPD_EOS | TPD_INT;
2655 		tpd->skb = NULL;
2656 		tpd->vcc = vcc;
2657 		wmb();
2658 
2659 		set_current_state(TASK_UNINTERRUPTIBLE);
2660 		add_wait_queue(&he_vcc->tx_waitq, &wait);
2661 		__enqueue_tpd(he_dev, tpd, cid);
2662 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2663 
2664 		timeout = schedule_timeout(30*HZ);
2665 
2666 		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2667 		set_current_state(TASK_RUNNING);
2668 
2669 		spin_lock_irqsave(&he_dev->global_lock, flags);
2670 
2671 		if (timeout == 0) {
2672 			hprintk("close tx timeout cid 0x%x\n", cid);
2673 			goto close_tx_incomplete;
2674 		}
2675 
2676 		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2677 			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2678 			udelay(250);
2679 		}
2680 
2681 		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2682 			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2683 			udelay(250);
2684 		}
2685 
2686 close_tx_incomplete:
2687 
2688 		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2689 			int reg = he_vcc->rc_index;
2690 
2691 			HPRINTK("cs_stper reg = %d\n", reg);
2692 
2693 			if (he_dev->cs_stper[reg].inuse == 0)
2694 				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2695 			else
2696 				--he_dev->cs_stper[reg].inuse;
2697 
2698 			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2699 		}
2700 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2701 
2702 		HPRINTK("close tx cid 0x%x complete\n", cid);
2703 	}
2704 
2705 	kfree(he_vcc);
2706 
2707 	clear_bit(ATM_VF_ADDR, &vcc->flags);
2708 }
2709 
2710 static int
2711 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2712 {
2713 	unsigned long flags;
2714 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2715 	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2716 	struct he_tpd *tpd;
2717 #ifdef USE_SCATTERGATHER
2718 	int i, slot = 0;
2719 #endif
2720 
2721 #define HE_TPD_BUFSIZE 0xffff
2722 
2723 	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2724 
2725 	if ((skb->len > HE_TPD_BUFSIZE) ||
2726 	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2727 		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2728 		if (vcc->pop)
2729 			vcc->pop(vcc, skb);
2730 		else
2731 			dev_kfree_skb_any(skb);
2732 		atomic_inc(&vcc->stats->tx_err);
2733 		return -EINVAL;
2734 	}
2735 
2736 #ifndef USE_SCATTERGATHER
2737 	if (skb_shinfo(skb)->nr_frags) {
2738 		hprintk("no scatter/gather support\n");
2739 		if (vcc->pop)
2740 			vcc->pop(vcc, skb);
2741 		else
2742 			dev_kfree_skb_any(skb);
2743 		atomic_inc(&vcc->stats->tx_err);
2744 		return -EINVAL;
2745 	}
2746 #endif
2747 	spin_lock_irqsave(&he_dev->global_lock, flags);
2748 
2749 	tpd = __alloc_tpd(he_dev);
2750 	if (tpd == NULL) {
2751 		if (vcc->pop)
2752 			vcc->pop(vcc, skb);
2753 		else
2754 			dev_kfree_skb_any(skb);
2755 		atomic_inc(&vcc->stats->tx_err);
2756 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2757 		return -ENOMEM;
2758 	}
2759 
2760 	if (vcc->qos.aal == ATM_AAL5)
2761 		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2762 	else {
2763 		char *pti_clp = (void *) (skb->data + 3);
2764 		int clp, pti;
2765 
2766 		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2767 		clp = (*pti_clp & ATM_HDR_CLP);
2768 		tpd->status |= TPD_CELLTYPE(pti);
2769 		if (clp)
2770 			tpd->status |= TPD_CLP;
2771 
2772 		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2773 	}
2774 
2775 #ifdef USE_SCATTERGATHER
2776 	tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2777 				skb->len - skb->data_len, PCI_DMA_TODEVICE);
2778 	tpd->iovec[slot].len = skb->len - skb->data_len;
2779 	++slot;
2780 
2781 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2782 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2783 
2784 		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2785 			tpd->vcc = vcc;
2786 			tpd->skb = NULL;	/* not the last fragment
2787 						   so dont ->push() yet */
2788 			wmb();
2789 
2790 			__enqueue_tpd(he_dev, tpd, cid);
2791 			tpd = __alloc_tpd(he_dev);
2792 			if (tpd == NULL) {
2793 				if (vcc->pop)
2794 					vcc->pop(vcc, skb);
2795 				else
2796 					dev_kfree_skb_any(skb);
2797 				atomic_inc(&vcc->stats->tx_err);
2798 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2799 				return -ENOMEM;
2800 			}
2801 			tpd->status |= TPD_USERCELL;
2802 			slot = 0;
2803 		}
2804 
2805 		tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2806 			(void *) page_address(frag->page) + frag->page_offset,
2807 				frag->size, PCI_DMA_TODEVICE);
2808 		tpd->iovec[slot].len = frag->size;
2809 		++slot;
2810 
2811 	}
2812 
2813 	tpd->iovec[slot - 1].len |= TPD_LST;
2814 #else
2815 	tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2816 	tpd->length0 = skb->len | TPD_LST;
2817 #endif
2818 	tpd->status |= TPD_INT;
2819 
2820 	tpd->vcc = vcc;
2821 	tpd->skb = skb;
2822 	wmb();
2823 	ATM_SKB(skb)->vcc = vcc;
2824 
2825 	__enqueue_tpd(he_dev, tpd, cid);
2826 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2827 
2828 	atomic_inc(&vcc->stats->tx);
2829 
2830 	return 0;
2831 }
2832 
2833 static int
2834 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2835 {
2836 	unsigned long flags;
2837 	struct he_dev *he_dev = HE_DEV(atm_dev);
2838 	struct he_ioctl_reg reg;
2839 	int err = 0;
2840 
2841 	switch (cmd) {
2842 		case HE_GET_REG:
2843 			if (!capable(CAP_NET_ADMIN))
2844 				return -EPERM;
2845 
2846 			if (copy_from_user(&reg, arg,
2847 					   sizeof(struct he_ioctl_reg)))
2848 				return -EFAULT;
2849 
2850 			spin_lock_irqsave(&he_dev->global_lock, flags);
2851 			switch (reg.type) {
2852 				case HE_REGTYPE_PCI:
2853 					reg.val = he_readl(he_dev, reg.addr);
2854 					break;
2855 				case HE_REGTYPE_RCM:
2856 					reg.val =
2857 						he_readl_rcm(he_dev, reg.addr);
2858 					break;
2859 				case HE_REGTYPE_TCM:
2860 					reg.val =
2861 						he_readl_tcm(he_dev, reg.addr);
2862 					break;
2863 				case HE_REGTYPE_MBOX:
2864 					reg.val =
2865 						he_readl_mbox(he_dev, reg.addr);
2866 					break;
2867 				default:
2868 					err = -EINVAL;
2869 					break;
2870 			}
2871 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2872 			if (err == 0)
2873 				if (copy_to_user(arg, &reg,
2874 							sizeof(struct he_ioctl_reg)))
2875 					return -EFAULT;
2876 			break;
2877 		default:
2878 #ifdef CONFIG_ATM_HE_USE_SUNI
2879 			if (atm_dev->phy && atm_dev->phy->ioctl)
2880 				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2881 #else /* CONFIG_ATM_HE_USE_SUNI */
2882 			err = -EINVAL;
2883 #endif /* CONFIG_ATM_HE_USE_SUNI */
2884 			break;
2885 	}
2886 
2887 	return err;
2888 }
2889 
2890 static void
2891 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2892 {
2893 	unsigned long flags;
2894 	struct he_dev *he_dev = HE_DEV(atm_dev);
2895 
2896 	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2897 
2898 	spin_lock_irqsave(&he_dev->global_lock, flags);
2899 	he_writel(he_dev, val, FRAMER + (addr*4));
2900 	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2901 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2902 }
2903 
2904 
2905 static unsigned char
2906 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2907 {
2908 	unsigned long flags;
2909 	struct he_dev *he_dev = HE_DEV(atm_dev);
2910 	unsigned reg;
2911 
2912 	spin_lock_irqsave(&he_dev->global_lock, flags);
2913 	reg = he_readl(he_dev, FRAMER + (addr*4));
2914 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2915 
2916 	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2917 	return reg;
2918 }
2919 
2920 static int
2921 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2922 {
2923 	unsigned long flags;
2924 	struct he_dev *he_dev = HE_DEV(dev);
2925 	int left, i;
2926 #ifdef notdef
2927 	struct he_rbrq *rbrq_tail;
2928 	struct he_tpdrq *tpdrq_head;
2929 	int rbpl_head, rbpl_tail;
2930 #endif
2931 	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2932 
2933 
2934 	left = *pos;
2935 	if (!left--)
2936 		return sprintf(page, "%s\n", version);
2937 
2938 	if (!left--)
2939 		return sprintf(page, "%s%s\n\n",
2940 			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2941 
2942 	if (!left--)
2943 		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2944 
2945 	spin_lock_irqsave(&he_dev->global_lock, flags);
2946 	mcc += he_readl(he_dev, MCC);
2947 	oec += he_readl(he_dev, OEC);
2948 	dcc += he_readl(he_dev, DCC);
2949 	cec += he_readl(he_dev, CEC);
2950 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2951 
2952 	if (!left--)
2953 		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n",
2954 							mcc, oec, dcc, cec);
2955 
2956 	if (!left--)
2957 		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2958 				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2959 
2960 	if (!left--)
2961 		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2962 						CONFIG_TPDRQ_SIZE);
2963 
2964 	if (!left--)
2965 		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2966 				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2967 
2968 	if (!left--)
2969 		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2970 					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2971 
2972 
2973 #ifdef notdef
2974 	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2975 	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2976 
2977 	inuse = rbpl_head - rbpl_tail;
2978 	if (inuse < 0)
2979 		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2980 	inuse /= sizeof(struct he_rbp);
2981 
2982 	if (!left--)
2983 		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2984 						CONFIG_RBPL_SIZE, inuse);
2985 #endif
2986 
2987 	if (!left--)
2988 		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2989 
2990 	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2991 		if (!left--)
2992 			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2993 						he_dev->cs_stper[i].pcr,
2994 						he_dev->cs_stper[i].inuse);
2995 
2996 	if (!left--)
2997 		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2998 			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2999 
3000 	return 0;
3001 }
3002 
3003 /* eeprom routines  -- see 4.7 */
3004 
3005 u8
3006 read_prom_byte(struct he_dev *he_dev, int addr)
3007 {
3008 	u32 val = 0, tmp_read = 0;
3009 	int i, j = 0;
3010 	u8 byte_read = 0;
3011 
3012 	val = readl(he_dev->membase + HOST_CNTL);
3013 	val &= 0xFFFFE0FF;
3014 
3015 	/* Turn on write enable */
3016 	val |= 0x800;
3017 	he_writel(he_dev, val, HOST_CNTL);
3018 
3019 	/* Send READ instruction */
3020 	for (i = 0; i < ARRAY_SIZE(readtab); i++) {
3021 		he_writel(he_dev, val | readtab[i], HOST_CNTL);
3022 		udelay(EEPROM_DELAY);
3023 	}
3024 
3025 	/* Next, we need to send the byte address to read from */
3026 	for (i = 7; i >= 0; i--) {
3027 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3028 		udelay(EEPROM_DELAY);
3029 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3030 		udelay(EEPROM_DELAY);
3031 	}
3032 
3033 	j = 0;
3034 
3035 	val &= 0xFFFFF7FF;      /* Turn off write enable */
3036 	he_writel(he_dev, val, HOST_CNTL);
3037 
3038 	/* Now, we can read data from the EEPROM by clocking it in */
3039 	for (i = 7; i >= 0; i--) {
3040 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3041 		udelay(EEPROM_DELAY);
3042 		tmp_read = he_readl(he_dev, HOST_CNTL);
3043 		byte_read |= (unsigned char)
3044 			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
3045 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3046 		udelay(EEPROM_DELAY);
3047 	}
3048 
3049 	he_writel(he_dev, val | ID_CS, HOST_CNTL);
3050 	udelay(EEPROM_DELAY);
3051 
3052 	return byte_read;
3053 }
3054 
3055 MODULE_LICENSE("GPL");
3056 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3057 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3058 module_param(disable64, bool, 0);
3059 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
3060 module_param(nvpibits, short, 0);
3061 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
3062 module_param(nvcibits, short, 0);
3063 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
3064 module_param(rx_skb_reserve, short, 0);
3065 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
3066 module_param(irq_coalesce, bool, 0);
3067 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
3068 module_param(sdh, bool, 0);
3069 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
3070 
3071 static struct pci_device_id he_pci_tbl[] = {
3072 	{ PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
3073 	  0, 0, 0 },
3074 	{ 0, }
3075 };
3076 
3077 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
3078 
3079 static struct pci_driver he_driver = {
3080 	.name =		"he",
3081 	.probe =	he_init_one,
3082 	.remove =	__devexit_p(he_remove_one),
3083 	.id_table =	he_pci_tbl,
3084 };
3085 
3086 static int __init he_init(void)
3087 {
3088 	return pci_register_driver(&he_driver);
3089 }
3090 
3091 static void __exit he_cleanup(void)
3092 {
3093 	pci_unregister_driver(&he_driver);
3094 }
3095 
3096 module_init(he_init);
3097 module_exit(he_cleanup);
3098