xref: /openbmc/linux/drivers/atm/he.c (revision 384740dc)
1 /*
2 
3   he.c
4 
5   ForeRunnerHE ATM Adapter driver for ATM on Linux
6   Copyright (C) 1999-2001  Naval Research Laboratory
7 
8   This library is free software; you can redistribute it and/or
9   modify it under the terms of the GNU Lesser General Public
10   License as published by the Free Software Foundation; either
11   version 2.1 of the License, or (at your option) any later version.
12 
13   This library is distributed in the hope that it will be useful,
14   but WITHOUT ANY WARRANTY; without even the implied warranty of
15   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16   Lesser General Public License for more details.
17 
18   You should have received a copy of the GNU Lesser General Public
19   License along with this library; if not, write to the Free Software
20   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 
22 */
23 
24 /*
25 
26   he.c
27 
28   ForeRunnerHE ATM Adapter driver for ATM on Linux
29   Copyright (C) 1999-2001  Naval Research Laboratory
30 
31   Permission to use, copy, modify and distribute this software and its
32   documentation is hereby granted, provided that both the copyright
33   notice and this permission notice appear in all copies of the software,
34   derivative works or modified versions, and any portions thereof, and
35   that both notices appear in supporting documentation.
36 
37   NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38   DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39   RESULTING FROM THE USE OF THIS SOFTWARE.
40 
41   This driver was written using the "Programmer's Reference Manual for
42   ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43 
44   AUTHORS:
45 	chas williams <chas@cmf.nrl.navy.mil>
46 	eric kinzie <ekinzie@cmf.nrl.navy.mil>
47 
48   NOTES:
49 	4096 supported 'connections'
50 	group 0 is used for all traffic
51 	interrupt queue 0 is used for all interrupts
52 	aal0 support (based on work from ulrich.u.muller@nokia.com)
53 
54  */
55 
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
65 #include <linux/mm.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <asm/io.h>
71 #include <asm/byteorder.h>
72 #include <asm/uaccess.h>
73 
74 #include <linux/atmdev.h>
75 #include <linux/atm.h>
76 #include <linux/sonet.h>
77 
78 #undef USE_SCATTERGATHER
79 #undef USE_CHECKSUM_HW			/* still confused about this */
80 /* #undef HE_DEBUG */
81 
82 #include "he.h"
83 #include "suni.h"
84 #include <linux/atm_he.h>
85 
86 #define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
87 
88 #ifdef HE_DEBUG
89 #define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
90 #else /* !HE_DEBUG */
91 #define HPRINTK(fmt,args...)	do { } while (0)
92 #endif /* HE_DEBUG */
93 
94 /* declarations */
95 
96 static int he_open(struct atm_vcc *vcc);
97 static void he_close(struct atm_vcc *vcc);
98 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
99 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
100 static irqreturn_t he_irq_handler(int irq, void *dev_id);
101 static void he_tasklet(unsigned long data);
102 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
103 static int he_start(struct atm_dev *dev);
104 static void he_stop(struct he_dev *dev);
105 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
106 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
107 
108 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
109 
110 /* globals */
111 
112 static struct he_dev *he_devs;
113 static int disable64;
114 static short nvpibits = -1;
115 static short nvcibits = -1;
116 static short rx_skb_reserve = 16;
117 static int irq_coalesce = 1;
118 static int sdh = 0;
119 
120 /* Read from EEPROM = 0000 0011b */
121 static unsigned int readtab[] = {
122 	CS_HIGH | CLK_HIGH,
123 	CS_LOW | CLK_LOW,
124 	CLK_HIGH,               /* 0 */
125 	CLK_LOW,
126 	CLK_HIGH,               /* 0 */
127 	CLK_LOW,
128 	CLK_HIGH,               /* 0 */
129 	CLK_LOW,
130 	CLK_HIGH,               /* 0 */
131 	CLK_LOW,
132 	CLK_HIGH,               /* 0 */
133 	CLK_LOW,
134 	CLK_HIGH,               /* 0 */
135 	CLK_LOW | SI_HIGH,
136 	CLK_HIGH | SI_HIGH,     /* 1 */
137 	CLK_LOW | SI_HIGH,
138 	CLK_HIGH | SI_HIGH      /* 1 */
139 };
140 
141 /* Clock to read from/write to the EEPROM */
142 static unsigned int clocktab[] = {
143 	CLK_LOW,
144 	CLK_HIGH,
145 	CLK_LOW,
146 	CLK_HIGH,
147 	CLK_LOW,
148 	CLK_HIGH,
149 	CLK_LOW,
150 	CLK_HIGH,
151 	CLK_LOW,
152 	CLK_HIGH,
153 	CLK_LOW,
154 	CLK_HIGH,
155 	CLK_LOW,
156 	CLK_HIGH,
157 	CLK_LOW,
158 	CLK_HIGH,
159 	CLK_LOW
160 };
161 
162 static struct atmdev_ops he_ops =
163 {
164 	.open =		he_open,
165 	.close =	he_close,
166 	.ioctl =	he_ioctl,
167 	.send =		he_send,
168 	.phy_put =	he_phy_put,
169 	.phy_get =	he_phy_get,
170 	.proc_read =	he_proc_read,
171 	.owner =	THIS_MODULE
172 };
173 
174 #define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
175 #define he_readl(dev, reg)		readl((dev)->membase + (reg))
176 
177 /* section 2.12 connection memory access */
178 
179 static __inline__ void
180 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
181 								unsigned flags)
182 {
183 	he_writel(he_dev, val, CON_DAT);
184 	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
185 	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
186 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
187 }
188 
189 #define he_writel_rcm(dev, val, reg) 				\
190 			he_writel_internal(dev, val, reg, CON_CTL_RCM)
191 
192 #define he_writel_tcm(dev, val, reg) 				\
193 			he_writel_internal(dev, val, reg, CON_CTL_TCM)
194 
195 #define he_writel_mbox(dev, val, reg) 				\
196 			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
197 
198 static unsigned
199 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
200 {
201 	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
202 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
203 	return he_readl(he_dev, CON_DAT);
204 }
205 
206 #define he_readl_rcm(dev, reg) \
207 			he_readl_internal(dev, reg, CON_CTL_RCM)
208 
209 #define he_readl_tcm(dev, reg) \
210 			he_readl_internal(dev, reg, CON_CTL_TCM)
211 
212 #define he_readl_mbox(dev, reg) \
213 			he_readl_internal(dev, reg, CON_CTL_MBOX)
214 
215 
216 /* figure 2.2 connection id */
217 
218 #define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
219 
220 /* 2.5.1 per connection transmit state registers */
221 
222 #define he_writel_tsr0(dev, val, cid) \
223 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
224 #define he_readl_tsr0(dev, cid) \
225 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
226 
227 #define he_writel_tsr1(dev, val, cid) \
228 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
229 
230 #define he_writel_tsr2(dev, val, cid) \
231 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
232 
233 #define he_writel_tsr3(dev, val, cid) \
234 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
235 
236 #define he_writel_tsr4(dev, val, cid) \
237 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
238 
239 	/* from page 2-20
240 	 *
241 	 * NOTE While the transmit connection is active, bits 23 through 0
242 	 *      of this register must not be written by the host.  Byte
243 	 *      enables should be used during normal operation when writing
244 	 *      the most significant byte.
245 	 */
246 
247 #define he_writel_tsr4_upper(dev, val, cid) \
248 		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
249 							CON_CTL_TCM \
250 							| CON_BYTE_DISABLE_2 \
251 							| CON_BYTE_DISABLE_1 \
252 							| CON_BYTE_DISABLE_0)
253 
254 #define he_readl_tsr4(dev, cid) \
255 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
256 
257 #define he_writel_tsr5(dev, val, cid) \
258 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
259 
260 #define he_writel_tsr6(dev, val, cid) \
261 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
262 
263 #define he_writel_tsr7(dev, val, cid) \
264 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
265 
266 
267 #define he_writel_tsr8(dev, val, cid) \
268 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
269 
270 #define he_writel_tsr9(dev, val, cid) \
271 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
272 
273 #define he_writel_tsr10(dev, val, cid) \
274 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
275 
276 #define he_writel_tsr11(dev, val, cid) \
277 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
278 
279 
280 #define he_writel_tsr12(dev, val, cid) \
281 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
282 
283 #define he_writel_tsr13(dev, val, cid) \
284 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
285 
286 
287 #define he_writel_tsr14(dev, val, cid) \
288 		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
289 
290 #define he_writel_tsr14_upper(dev, val, cid) \
291 		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
292 							CON_CTL_TCM \
293 							| CON_BYTE_DISABLE_2 \
294 							| CON_BYTE_DISABLE_1 \
295 							| CON_BYTE_DISABLE_0)
296 
297 /* 2.7.1 per connection receive state registers */
298 
299 #define he_writel_rsr0(dev, val, cid) \
300 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
301 #define he_readl_rsr0(dev, cid) \
302 		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
303 
304 #define he_writel_rsr1(dev, val, cid) \
305 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
306 
307 #define he_writel_rsr2(dev, val, cid) \
308 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
309 
310 #define he_writel_rsr3(dev, val, cid) \
311 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
312 
313 #define he_writel_rsr4(dev, val, cid) \
314 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
315 
316 #define he_writel_rsr5(dev, val, cid) \
317 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
318 
319 #define he_writel_rsr6(dev, val, cid) \
320 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
321 
322 #define he_writel_rsr7(dev, val, cid) \
323 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
324 
325 static __inline__ struct atm_vcc*
326 __find_vcc(struct he_dev *he_dev, unsigned cid)
327 {
328 	struct hlist_head *head;
329 	struct atm_vcc *vcc;
330 	struct hlist_node *node;
331 	struct sock *s;
332 	short vpi;
333 	int vci;
334 
335 	vpi = cid >> he_dev->vcibits;
336 	vci = cid & ((1 << he_dev->vcibits) - 1);
337 	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
338 
339 	sk_for_each(s, node, head) {
340 		vcc = atm_sk(s);
341 		if (vcc->dev == he_dev->atm_dev &&
342 		    vcc->vci == vci && vcc->vpi == vpi &&
343 		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
344 				return vcc;
345 		}
346 	}
347 	return NULL;
348 }
349 
350 static int __devinit
351 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
352 {
353 	struct atm_dev *atm_dev = NULL;
354 	struct he_dev *he_dev = NULL;
355 	int err = 0;
356 
357 	printk(KERN_INFO "ATM he driver\n");
358 
359 	if (pci_enable_device(pci_dev))
360 		return -EIO;
361 	if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK) != 0) {
362 		printk(KERN_WARNING "he: no suitable dma available\n");
363 		err = -EIO;
364 		goto init_one_failure;
365 	}
366 
367 	atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
368 	if (!atm_dev) {
369 		err = -ENODEV;
370 		goto init_one_failure;
371 	}
372 	pci_set_drvdata(pci_dev, atm_dev);
373 
374 	he_dev = kzalloc(sizeof(struct he_dev),
375 							GFP_KERNEL);
376 	if (!he_dev) {
377 		err = -ENOMEM;
378 		goto init_one_failure;
379 	}
380 	he_dev->pci_dev = pci_dev;
381 	he_dev->atm_dev = atm_dev;
382 	he_dev->atm_dev->dev_data = he_dev;
383 	atm_dev->dev_data = he_dev;
384 	he_dev->number = atm_dev->number;
385 	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
386 	spin_lock_init(&he_dev->global_lock);
387 
388 	if (he_start(atm_dev)) {
389 		he_stop(he_dev);
390 		err = -ENODEV;
391 		goto init_one_failure;
392 	}
393 	he_dev->next = NULL;
394 	if (he_devs)
395 		he_dev->next = he_devs;
396 	he_devs = he_dev;
397 	return 0;
398 
399 init_one_failure:
400 	if (atm_dev)
401 		atm_dev_deregister(atm_dev);
402 	kfree(he_dev);
403 	pci_disable_device(pci_dev);
404 	return err;
405 }
406 
407 static void __devexit
408 he_remove_one (struct pci_dev *pci_dev)
409 {
410 	struct atm_dev *atm_dev;
411 	struct he_dev *he_dev;
412 
413 	atm_dev = pci_get_drvdata(pci_dev);
414 	he_dev = HE_DEV(atm_dev);
415 
416 	/* need to remove from he_devs */
417 
418 	he_stop(he_dev);
419 	atm_dev_deregister(atm_dev);
420 	kfree(he_dev);
421 
422 	pci_set_drvdata(pci_dev, NULL);
423 	pci_disable_device(pci_dev);
424 }
425 
426 
427 static unsigned
428 rate_to_atmf(unsigned rate)		/* cps to atm forum format */
429 {
430 #define NONZERO (1 << 14)
431 
432 	unsigned exp = 0;
433 
434 	if (rate == 0)
435 		return 0;
436 
437 	rate <<= 9;
438 	while (rate > 0x3ff) {
439 		++exp;
440 		rate >>= 1;
441 	}
442 
443 	return (NONZERO | (exp << 9) | (rate & 0x1ff));
444 }
445 
446 static void __devinit
447 he_init_rx_lbfp0(struct he_dev *he_dev)
448 {
449 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
450 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
451 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
452 	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
453 
454 	lbufd_index = 0;
455 	lbm_offset = he_readl(he_dev, RCMLBM_BA);
456 
457 	he_writel(he_dev, lbufd_index, RLBF0_H);
458 
459 	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
460 		lbufd_index += 2;
461 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
462 
463 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
464 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
465 
466 		if (++lbuf_count == lbufs_per_row) {
467 			lbuf_count = 0;
468 			row_offset += he_dev->bytes_per_row;
469 		}
470 		lbm_offset += 4;
471 	}
472 
473 	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
474 	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
475 }
476 
477 static void __devinit
478 he_init_rx_lbfp1(struct he_dev *he_dev)
479 {
480 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
481 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
482 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
483 	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
484 
485 	lbufd_index = 1;
486 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
487 
488 	he_writel(he_dev, lbufd_index, RLBF1_H);
489 
490 	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
491 		lbufd_index += 2;
492 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
493 
494 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
495 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
496 
497 		if (++lbuf_count == lbufs_per_row) {
498 			lbuf_count = 0;
499 			row_offset += he_dev->bytes_per_row;
500 		}
501 		lbm_offset += 4;
502 	}
503 
504 	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
505 	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
506 }
507 
508 static void __devinit
509 he_init_tx_lbfp(struct he_dev *he_dev)
510 {
511 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
512 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
513 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
514 	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
515 
516 	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
517 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
518 
519 	he_writel(he_dev, lbufd_index, TLBF_H);
520 
521 	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
522 		lbufd_index += 1;
523 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
524 
525 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
526 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
527 
528 		if (++lbuf_count == lbufs_per_row) {
529 			lbuf_count = 0;
530 			row_offset += he_dev->bytes_per_row;
531 		}
532 		lbm_offset += 2;
533 	}
534 
535 	he_writel(he_dev, lbufd_index - 1, TLBF_T);
536 }
537 
538 static int __devinit
539 he_init_tpdrq(struct he_dev *he_dev)
540 {
541 	he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
542 		CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
543 	if (he_dev->tpdrq_base == NULL) {
544 		hprintk("failed to alloc tpdrq\n");
545 		return -ENOMEM;
546 	}
547 	memset(he_dev->tpdrq_base, 0,
548 				CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
549 
550 	he_dev->tpdrq_tail = he_dev->tpdrq_base;
551 	he_dev->tpdrq_head = he_dev->tpdrq_base;
552 
553 	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
554 	he_writel(he_dev, 0, TPDRQ_T);
555 	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
556 
557 	return 0;
558 }
559 
560 static void __devinit
561 he_init_cs_block(struct he_dev *he_dev)
562 {
563 	unsigned clock, rate, delta;
564 	int reg;
565 
566 	/* 5.1.7 cs block initialization */
567 
568 	for (reg = 0; reg < 0x20; ++reg)
569 		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
570 
571 	/* rate grid timer reload values */
572 
573 	clock = he_is622(he_dev) ? 66667000 : 50000000;
574 	rate = he_dev->atm_dev->link_rate;
575 	delta = rate / 16 / 2;
576 
577 	for (reg = 0; reg < 0x10; ++reg) {
578 		/* 2.4 internal transmit function
579 		 *
580 	 	 * we initialize the first row in the rate grid.
581 		 * values are period (in clock cycles) of timer
582 		 */
583 		unsigned period = clock / rate;
584 
585 		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
586 		rate -= delta;
587 	}
588 
589 	if (he_is622(he_dev)) {
590 		/* table 5.2 (4 cells per lbuf) */
591 		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
592 		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
593 		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
594 		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
595 		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
596 
597 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
598 		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
599 		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
600 		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
601 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
602 		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
603 		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
604 
605 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
606 
607 		/* table 5.8 */
608 		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
609 		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
610 		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
611 		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
612 		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
613 		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
614 
615 		/* table 5.9 */
616 		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
617 		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
618 	} else {
619 		/* table 5.1 (4 cells per lbuf) */
620 		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
621 		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
622 		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
623 		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
624 		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
625 
626 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
627 		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
628 		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
629 		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
630 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
631 		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
632 		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
633 
634 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
635 
636 		/* table 5.8 */
637 		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
638 		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
639 		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
640 		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
641 		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
642 		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
643 
644 		/* table 5.9 */
645 		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
646 		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
647 	}
648 
649 	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
650 
651 	for (reg = 0; reg < 0x8; ++reg)
652 		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
653 
654 }
655 
656 static int __devinit
657 he_init_cs_block_rcm(struct he_dev *he_dev)
658 {
659 	unsigned (*rategrid)[16][16];
660 	unsigned rate, delta;
661 	int i, j, reg;
662 
663 	unsigned rate_atmf, exp, man;
664 	unsigned long long rate_cps;
665 	int mult, buf, buf_limit = 4;
666 
667 	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
668 	if (!rategrid)
669 		return -ENOMEM;
670 
671 	/* initialize rate grid group table */
672 
673 	for (reg = 0x0; reg < 0xff; ++reg)
674 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
675 
676 	/* initialize rate controller groups */
677 
678 	for (reg = 0x100; reg < 0x1ff; ++reg)
679 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
680 
681 	/* initialize tNrm lookup table */
682 
683 	/* the manual makes reference to a routine in a sample driver
684 	   for proper configuration; fortunately, we only need this
685 	   in order to support abr connection */
686 
687 	/* initialize rate to group table */
688 
689 	rate = he_dev->atm_dev->link_rate;
690 	delta = rate / 32;
691 
692 	/*
693 	 * 2.4 transmit internal functions
694 	 *
695 	 * we construct a copy of the rate grid used by the scheduler
696 	 * in order to construct the rate to group table below
697 	 */
698 
699 	for (j = 0; j < 16; j++) {
700 		(*rategrid)[0][j] = rate;
701 		rate -= delta;
702 	}
703 
704 	for (i = 1; i < 16; i++)
705 		for (j = 0; j < 16; j++)
706 			if (i > 14)
707 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
708 			else
709 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
710 
711 	/*
712 	 * 2.4 transmit internal function
713 	 *
714 	 * this table maps the upper 5 bits of exponent and mantissa
715 	 * of the atm forum representation of the rate into an index
716 	 * on rate grid
717 	 */
718 
719 	rate_atmf = 0;
720 	while (rate_atmf < 0x400) {
721 		man = (rate_atmf & 0x1f) << 4;
722 		exp = rate_atmf >> 5;
723 
724 		/*
725 			instead of '/ 512', use '>> 9' to prevent a call
726 			to divdu3 on x86 platforms
727 		*/
728 		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
729 
730 		if (rate_cps < 10)
731 			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
732 
733 		for (i = 255; i > 0; i--)
734 			if ((*rategrid)[i/16][i%16] >= rate_cps)
735 				break;	 /* pick nearest rate instead? */
736 
737 		/*
738 		 * each table entry is 16 bits: (rate grid index (8 bits)
739 		 * and a buffer limit (8 bits)
740 		 * there are two table entries in each 32-bit register
741 		 */
742 
743 #ifdef notdef
744 		buf = rate_cps * he_dev->tx_numbuffs /
745 				(he_dev->atm_dev->link_rate * 2);
746 #else
747 		/* this is pretty, but avoids _divdu3 and is mostly correct */
748 		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
749 		if (rate_cps > (272 * mult))
750 			buf = 4;
751 		else if (rate_cps > (204 * mult))
752 			buf = 3;
753 		else if (rate_cps > (136 * mult))
754 			buf = 2;
755 		else if (rate_cps > (68 * mult))
756 			buf = 1;
757 		else
758 			buf = 0;
759 #endif
760 		if (buf > buf_limit)
761 			buf = buf_limit;
762 		reg = (reg << 16) | ((i << 8) | buf);
763 
764 #define RTGTBL_OFFSET 0x400
765 
766 		if (rate_atmf & 0x1)
767 			he_writel_rcm(he_dev, reg,
768 				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
769 
770 		++rate_atmf;
771 	}
772 
773 	kfree(rategrid);
774 	return 0;
775 }
776 
777 static int __devinit
778 he_init_group(struct he_dev *he_dev, int group)
779 {
780 	int i;
781 
782 	/* small buffer pool */
783 	he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
784 			CONFIG_RBPS_BUFSIZE, 8, 0);
785 	if (he_dev->rbps_pool == NULL) {
786 		hprintk("unable to create rbps pages\n");
787 		return -ENOMEM;
788 	}
789 
790 	he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
791 		CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
792 	if (he_dev->rbps_base == NULL) {
793 		hprintk("failed to alloc rbps\n");
794 		return -ENOMEM;
795 	}
796 	memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
797 	he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
798 
799 	for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
800 		dma_addr_t dma_handle;
801 		void *cpuaddr;
802 
803 		cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
804 		if (cpuaddr == NULL)
805 			return -ENOMEM;
806 
807 		he_dev->rbps_virt[i].virt = cpuaddr;
808 		he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
809 		he_dev->rbps_base[i].phys = dma_handle;
810 
811 	}
812 	he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
813 
814 	he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
815 	he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
816 						G0_RBPS_T + (group * 32));
817 	he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
818 						G0_RBPS_BS + (group * 32));
819 	he_writel(he_dev,
820 			RBP_THRESH(CONFIG_RBPS_THRESH) |
821 			RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
822 			RBP_INT_ENB,
823 						G0_RBPS_QI + (group * 32));
824 
825 	/* large buffer pool */
826 	he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
827 			CONFIG_RBPL_BUFSIZE, 8, 0);
828 	if (he_dev->rbpl_pool == NULL) {
829 		hprintk("unable to create rbpl pool\n");
830 		return -ENOMEM;
831 	}
832 
833 	he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
834 		CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
835 	if (he_dev->rbpl_base == NULL) {
836 		hprintk("failed to alloc rbpl\n");
837 		return -ENOMEM;
838 	}
839 	memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
840 	he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
841 
842 	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
843 		dma_addr_t dma_handle;
844 		void *cpuaddr;
845 
846 		cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
847 		if (cpuaddr == NULL)
848 			return -ENOMEM;
849 
850 		he_dev->rbpl_virt[i].virt = cpuaddr;
851 		he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
852 		he_dev->rbpl_base[i].phys = dma_handle;
853 	}
854 	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
855 
856 	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
857 	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
858 						G0_RBPL_T + (group * 32));
859 	he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
860 						G0_RBPL_BS + (group * 32));
861 	he_writel(he_dev,
862 			RBP_THRESH(CONFIG_RBPL_THRESH) |
863 			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
864 			RBP_INT_ENB,
865 						G0_RBPL_QI + (group * 32));
866 
867 	/* rx buffer ready queue */
868 
869 	he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
870 		CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
871 	if (he_dev->rbrq_base == NULL) {
872 		hprintk("failed to allocate rbrq\n");
873 		return -ENOMEM;
874 	}
875 	memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
876 
877 	he_dev->rbrq_head = he_dev->rbrq_base;
878 	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
879 	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
880 	he_writel(he_dev,
881 		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
882 						G0_RBRQ_Q + (group * 16));
883 	if (irq_coalesce) {
884 		hprintk("coalescing interrupts\n");
885 		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
886 						G0_RBRQ_I + (group * 16));
887 	} else
888 		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
889 						G0_RBRQ_I + (group * 16));
890 
891 	/* tx buffer ready queue */
892 
893 	he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
894 		CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
895 	if (he_dev->tbrq_base == NULL) {
896 		hprintk("failed to allocate tbrq\n");
897 		return -ENOMEM;
898 	}
899 	memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
900 
901 	he_dev->tbrq_head = he_dev->tbrq_base;
902 
903 	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
904 	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
905 	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
906 	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
907 
908 	return 0;
909 }
910 
911 static int __devinit
912 he_init_irq(struct he_dev *he_dev)
913 {
914 	int i;
915 
916 	/* 2.9.3.5  tail offset for each interrupt queue is located after the
917 		    end of the interrupt queue */
918 
919 	he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
920 			(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
921 	if (he_dev->irq_base == NULL) {
922 		hprintk("failed to allocate irq\n");
923 		return -ENOMEM;
924 	}
925 	he_dev->irq_tailoffset = (unsigned *)
926 					&he_dev->irq_base[CONFIG_IRQ_SIZE];
927 	*he_dev->irq_tailoffset = 0;
928 	he_dev->irq_head = he_dev->irq_base;
929 	he_dev->irq_tail = he_dev->irq_base;
930 
931 	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
932 		he_dev->irq_base[i].isw = ITYPE_INVALID;
933 
934 	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
935 	he_writel(he_dev,
936 		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
937 								IRQ0_HEAD);
938 	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
939 	he_writel(he_dev, 0x0, IRQ0_DATA);
940 
941 	he_writel(he_dev, 0x0, IRQ1_BASE);
942 	he_writel(he_dev, 0x0, IRQ1_HEAD);
943 	he_writel(he_dev, 0x0, IRQ1_CNTL);
944 	he_writel(he_dev, 0x0, IRQ1_DATA);
945 
946 	he_writel(he_dev, 0x0, IRQ2_BASE);
947 	he_writel(he_dev, 0x0, IRQ2_HEAD);
948 	he_writel(he_dev, 0x0, IRQ2_CNTL);
949 	he_writel(he_dev, 0x0, IRQ2_DATA);
950 
951 	he_writel(he_dev, 0x0, IRQ3_BASE);
952 	he_writel(he_dev, 0x0, IRQ3_HEAD);
953 	he_writel(he_dev, 0x0, IRQ3_CNTL);
954 	he_writel(he_dev, 0x0, IRQ3_DATA);
955 
956 	/* 2.9.3.2 interrupt queue mapping registers */
957 
958 	he_writel(he_dev, 0x0, GRP_10_MAP);
959 	he_writel(he_dev, 0x0, GRP_32_MAP);
960 	he_writel(he_dev, 0x0, GRP_54_MAP);
961 	he_writel(he_dev, 0x0, GRP_76_MAP);
962 
963 	if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_DISABLED|IRQF_SHARED, DEV_LABEL, he_dev)) {
964 		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
965 		return -EINVAL;
966 	}
967 
968 	he_dev->irq = he_dev->pci_dev->irq;
969 
970 	return 0;
971 }
972 
973 static int __devinit
974 he_start(struct atm_dev *dev)
975 {
976 	struct he_dev *he_dev;
977 	struct pci_dev *pci_dev;
978 	unsigned long membase;
979 
980 	u16 command;
981 	u32 gen_cntl_0, host_cntl, lb_swap;
982 	u8 cache_size, timer;
983 
984 	unsigned err;
985 	unsigned int status, reg;
986 	int i, group;
987 
988 	he_dev = HE_DEV(dev);
989 	pci_dev = he_dev->pci_dev;
990 
991 	membase = pci_resource_start(pci_dev, 0);
992 	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
993 
994 	/*
995 	 * pci bus controller initialization
996 	 */
997 
998 	/* 4.3 pci bus controller-specific initialization */
999 	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1000 		hprintk("can't read GEN_CNTL_0\n");
1001 		return -EINVAL;
1002 	}
1003 	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1004 	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1005 		hprintk("can't write GEN_CNTL_0.\n");
1006 		return -EINVAL;
1007 	}
1008 
1009 	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1010 		hprintk("can't read PCI_COMMAND.\n");
1011 		return -EINVAL;
1012 	}
1013 
1014 	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1015 	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1016 		hprintk("can't enable memory.\n");
1017 		return -EINVAL;
1018 	}
1019 
1020 	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1021 		hprintk("can't read cache line size?\n");
1022 		return -EINVAL;
1023 	}
1024 
1025 	if (cache_size < 16) {
1026 		cache_size = 16;
1027 		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1028 			hprintk("can't set cache line size to %d\n", cache_size);
1029 	}
1030 
1031 	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1032 		hprintk("can't read latency timer?\n");
1033 		return -EINVAL;
1034 	}
1035 
1036 	/* from table 3.9
1037 	 *
1038 	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1039 	 *
1040 	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1041 	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1042 	 *
1043 	 */
1044 #define LAT_TIMER 209
1045 	if (timer < LAT_TIMER) {
1046 		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1047 		timer = LAT_TIMER;
1048 		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1049 			hprintk("can't set latency timer to %d\n", timer);
1050 	}
1051 
1052 	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1053 		hprintk("can't set up page mapping\n");
1054 		return -EINVAL;
1055 	}
1056 
1057 	/* 4.4 card reset */
1058 	he_writel(he_dev, 0x0, RESET_CNTL);
1059 	he_writel(he_dev, 0xff, RESET_CNTL);
1060 
1061 	udelay(16*1000);	/* 16 ms */
1062 	status = he_readl(he_dev, RESET_CNTL);
1063 	if ((status & BOARD_RST_STATUS) == 0) {
1064 		hprintk("reset failed\n");
1065 		return -EINVAL;
1066 	}
1067 
1068 	/* 4.5 set bus width */
1069 	host_cntl = he_readl(he_dev, HOST_CNTL);
1070 	if (host_cntl & PCI_BUS_SIZE64)
1071 		gen_cntl_0 |= ENBL_64;
1072 	else
1073 		gen_cntl_0 &= ~ENBL_64;
1074 
1075 	if (disable64 == 1) {
1076 		hprintk("disabling 64-bit pci bus transfers\n");
1077 		gen_cntl_0 &= ~ENBL_64;
1078 	}
1079 
1080 	if (gen_cntl_0 & ENBL_64)
1081 		hprintk("64-bit transfers enabled\n");
1082 
1083 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1084 
1085 	/* 4.7 read prom contents */
1086 	for (i = 0; i < PROD_ID_LEN; ++i)
1087 		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1088 
1089 	he_dev->media = read_prom_byte(he_dev, MEDIA);
1090 
1091 	for (i = 0; i < 6; ++i)
1092 		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1093 
1094 	hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1095 				he_dev->prod_id,
1096 					he_dev->media & 0x40 ? "SM" : "MM",
1097 						dev->esi[0],
1098 						dev->esi[1],
1099 						dev->esi[2],
1100 						dev->esi[3],
1101 						dev->esi[4],
1102 						dev->esi[5]);
1103 	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1104 						ATM_OC12_PCR : ATM_OC3_PCR;
1105 
1106 	/* 4.6 set host endianess */
1107 	lb_swap = he_readl(he_dev, LB_SWAP);
1108 	if (he_is622(he_dev))
1109 		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1110 	else
1111 		lb_swap |= XFER_SIZE;		/* 8 cells */
1112 #ifdef __BIG_ENDIAN
1113 	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1114 #else
1115 	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1116 			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1117 #endif /* __BIG_ENDIAN */
1118 	he_writel(he_dev, lb_swap, LB_SWAP);
1119 
1120 	/* 4.8 sdram controller initialization */
1121 	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1122 
1123 	/* 4.9 initialize rnum value */
1124 	lb_swap |= SWAP_RNUM_MAX(0xf);
1125 	he_writel(he_dev, lb_swap, LB_SWAP);
1126 
1127 	/* 4.10 initialize the interrupt queues */
1128 	if ((err = he_init_irq(he_dev)) != 0)
1129 		return err;
1130 
1131 	/* 4.11 enable pci bus controller state machines */
1132 	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1133 				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1134 	he_writel(he_dev, host_cntl, HOST_CNTL);
1135 
1136 	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1137 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1138 
1139 	/*
1140 	 * atm network controller initialization
1141 	 */
1142 
1143 	/* 5.1.1 generic configuration state */
1144 
1145 	/*
1146 	 *		local (cell) buffer memory map
1147 	 *
1148 	 *             HE155                          HE622
1149 	 *
1150 	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1151 	 *         |            |            |                   |   |
1152 	 *         |  utility   |            |        rx0        |   |
1153 	 *        5|____________|         255|___________________| u |
1154 	 *        6|            |         256|                   | t |
1155 	 *         |            |            |                   | i |
1156 	 *         |    rx0     |     row    |        tx         | l |
1157 	 *         |            |            |                   | i |
1158 	 *         |            |         767|___________________| t |
1159 	 *      517|____________|         768|                   | y |
1160 	 * row  518|            |            |        rx1        |   |
1161 	 *         |            |        1023|___________________|___|
1162 	 *         |            |
1163 	 *         |    tx      |
1164 	 *         |            |
1165 	 *         |            |
1166 	 *     1535|____________|
1167 	 *     1536|            |
1168 	 *         |    rx1     |
1169 	 *     2047|____________|
1170 	 *
1171 	 */
1172 
1173 	/* total 4096 connections */
1174 	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1175 	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1176 
1177 	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1178 		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1179 		return -ENODEV;
1180 	}
1181 
1182 	if (nvpibits != -1) {
1183 		he_dev->vpibits = nvpibits;
1184 		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1185 	}
1186 
1187 	if (nvcibits != -1) {
1188 		he_dev->vcibits = nvcibits;
1189 		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1190 	}
1191 
1192 
1193 	if (he_is622(he_dev)) {
1194 		he_dev->cells_per_row = 40;
1195 		he_dev->bytes_per_row = 2048;
1196 		he_dev->r0_numrows = 256;
1197 		he_dev->tx_numrows = 512;
1198 		he_dev->r1_numrows = 256;
1199 		he_dev->r0_startrow = 0;
1200 		he_dev->tx_startrow = 256;
1201 		he_dev->r1_startrow = 768;
1202 	} else {
1203 		he_dev->cells_per_row = 20;
1204 		he_dev->bytes_per_row = 1024;
1205 		he_dev->r0_numrows = 512;
1206 		he_dev->tx_numrows = 1018;
1207 		he_dev->r1_numrows = 512;
1208 		he_dev->r0_startrow = 6;
1209 		he_dev->tx_startrow = 518;
1210 		he_dev->r1_startrow = 1536;
1211 	}
1212 
1213 	he_dev->cells_per_lbuf = 4;
1214 	he_dev->buffer_limit = 4;
1215 	he_dev->r0_numbuffs = he_dev->r0_numrows *
1216 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1217 	if (he_dev->r0_numbuffs > 2560)
1218 		he_dev->r0_numbuffs = 2560;
1219 
1220 	he_dev->r1_numbuffs = he_dev->r1_numrows *
1221 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1222 	if (he_dev->r1_numbuffs > 2560)
1223 		he_dev->r1_numbuffs = 2560;
1224 
1225 	he_dev->tx_numbuffs = he_dev->tx_numrows *
1226 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1227 	if (he_dev->tx_numbuffs > 5120)
1228 		he_dev->tx_numbuffs = 5120;
1229 
1230 	/* 5.1.2 configure hardware dependent registers */
1231 
1232 	he_writel(he_dev,
1233 		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1234 		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1235 		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1236 		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1237 								LBARB);
1238 
1239 	he_writel(he_dev, BANK_ON |
1240 		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1241 								SDRAMCON);
1242 
1243 	he_writel(he_dev,
1244 		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1245 						RM_RW_WAIT(1), RCMCONFIG);
1246 	he_writel(he_dev,
1247 		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1248 						TM_RW_WAIT(1), TCMCONFIG);
1249 
1250 	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1251 
1252 	he_writel(he_dev,
1253 		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1254 		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1255 		RX_VALVP(he_dev->vpibits) |
1256 		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1257 
1258 	he_writel(he_dev, DRF_THRESH(0x20) |
1259 		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1260 		TX_VCI_MASK(he_dev->vcibits) |
1261 		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1262 
1263 	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1264 
1265 	he_writel(he_dev, PHY_INT_ENB |
1266 		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1267 								RH_CONFIG);
1268 
1269 	/* 5.1.3 initialize connection memory */
1270 
1271 	for (i = 0; i < TCM_MEM_SIZE; ++i)
1272 		he_writel_tcm(he_dev, 0, i);
1273 
1274 	for (i = 0; i < RCM_MEM_SIZE; ++i)
1275 		he_writel_rcm(he_dev, 0, i);
1276 
1277 	/*
1278 	 *	transmit connection memory map
1279 	 *
1280 	 *                  tx memory
1281 	 *          0x0 ___________________
1282 	 *             |                   |
1283 	 *             |                   |
1284 	 *             |       TSRa        |
1285 	 *             |                   |
1286 	 *             |                   |
1287 	 *       0x8000|___________________|
1288 	 *             |                   |
1289 	 *             |       TSRb        |
1290 	 *       0xc000|___________________|
1291 	 *             |                   |
1292 	 *             |       TSRc        |
1293 	 *       0xe000|___________________|
1294 	 *             |       TSRd        |
1295 	 *       0xf000|___________________|
1296 	 *             |       tmABR       |
1297 	 *      0x10000|___________________|
1298 	 *             |                   |
1299 	 *             |       tmTPD       |
1300 	 *             |___________________|
1301 	 *             |                   |
1302 	 *                      ....
1303 	 *      0x1ffff|___________________|
1304 	 *
1305 	 *
1306 	 */
1307 
1308 	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1309 	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1310 	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1311 	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1312 	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1313 
1314 
1315 	/*
1316 	 *	receive connection memory map
1317 	 *
1318 	 *          0x0 ___________________
1319 	 *             |                   |
1320 	 *             |                   |
1321 	 *             |       RSRa        |
1322 	 *             |                   |
1323 	 *             |                   |
1324 	 *       0x8000|___________________|
1325 	 *             |                   |
1326 	 *             |             rx0/1 |
1327 	 *             |       LBM         |   link lists of local
1328 	 *             |             tx    |   buffer memory
1329 	 *             |                   |
1330 	 *       0xd000|___________________|
1331 	 *             |                   |
1332 	 *             |      rmABR        |
1333 	 *       0xe000|___________________|
1334 	 *             |                   |
1335 	 *             |       RSRb        |
1336 	 *             |___________________|
1337 	 *             |                   |
1338 	 *                      ....
1339 	 *       0xffff|___________________|
1340 	 */
1341 
1342 	he_writel(he_dev, 0x08000, RCMLBM_BA);
1343 	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1344 	he_writel(he_dev, 0x0d800, RCMABR_BA);
1345 
1346 	/* 5.1.4 initialize local buffer free pools linked lists */
1347 
1348 	he_init_rx_lbfp0(he_dev);
1349 	he_init_rx_lbfp1(he_dev);
1350 
1351 	he_writel(he_dev, 0x0, RLBC_H);
1352 	he_writel(he_dev, 0x0, RLBC_T);
1353 	he_writel(he_dev, 0x0, RLBC_H2);
1354 
1355 	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1356 	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1357 
1358 	he_init_tx_lbfp(he_dev);
1359 
1360 	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1361 
1362 	/* 5.1.5 initialize intermediate receive queues */
1363 
1364 	if (he_is622(he_dev)) {
1365 		he_writel(he_dev, 0x000f, G0_INMQ_S);
1366 		he_writel(he_dev, 0x200f, G0_INMQ_L);
1367 
1368 		he_writel(he_dev, 0x001f, G1_INMQ_S);
1369 		he_writel(he_dev, 0x201f, G1_INMQ_L);
1370 
1371 		he_writel(he_dev, 0x002f, G2_INMQ_S);
1372 		he_writel(he_dev, 0x202f, G2_INMQ_L);
1373 
1374 		he_writel(he_dev, 0x003f, G3_INMQ_S);
1375 		he_writel(he_dev, 0x203f, G3_INMQ_L);
1376 
1377 		he_writel(he_dev, 0x004f, G4_INMQ_S);
1378 		he_writel(he_dev, 0x204f, G4_INMQ_L);
1379 
1380 		he_writel(he_dev, 0x005f, G5_INMQ_S);
1381 		he_writel(he_dev, 0x205f, G5_INMQ_L);
1382 
1383 		he_writel(he_dev, 0x006f, G6_INMQ_S);
1384 		he_writel(he_dev, 0x206f, G6_INMQ_L);
1385 
1386 		he_writel(he_dev, 0x007f, G7_INMQ_S);
1387 		he_writel(he_dev, 0x207f, G7_INMQ_L);
1388 	} else {
1389 		he_writel(he_dev, 0x0000, G0_INMQ_S);
1390 		he_writel(he_dev, 0x0008, G0_INMQ_L);
1391 
1392 		he_writel(he_dev, 0x0001, G1_INMQ_S);
1393 		he_writel(he_dev, 0x0009, G1_INMQ_L);
1394 
1395 		he_writel(he_dev, 0x0002, G2_INMQ_S);
1396 		he_writel(he_dev, 0x000a, G2_INMQ_L);
1397 
1398 		he_writel(he_dev, 0x0003, G3_INMQ_S);
1399 		he_writel(he_dev, 0x000b, G3_INMQ_L);
1400 
1401 		he_writel(he_dev, 0x0004, G4_INMQ_S);
1402 		he_writel(he_dev, 0x000c, G4_INMQ_L);
1403 
1404 		he_writel(he_dev, 0x0005, G5_INMQ_S);
1405 		he_writel(he_dev, 0x000d, G5_INMQ_L);
1406 
1407 		he_writel(he_dev, 0x0006, G6_INMQ_S);
1408 		he_writel(he_dev, 0x000e, G6_INMQ_L);
1409 
1410 		he_writel(he_dev, 0x0007, G7_INMQ_S);
1411 		he_writel(he_dev, 0x000f, G7_INMQ_L);
1412 	}
1413 
1414 	/* 5.1.6 application tunable parameters */
1415 
1416 	he_writel(he_dev, 0x0, MCC);
1417 	he_writel(he_dev, 0x0, OEC);
1418 	he_writel(he_dev, 0x0, DCC);
1419 	he_writel(he_dev, 0x0, CEC);
1420 
1421 	/* 5.1.7 cs block initialization */
1422 
1423 	he_init_cs_block(he_dev);
1424 
1425 	/* 5.1.8 cs block connection memory initialization */
1426 
1427 	if (he_init_cs_block_rcm(he_dev) < 0)
1428 		return -ENOMEM;
1429 
1430 	/* 5.1.10 initialize host structures */
1431 
1432 	he_init_tpdrq(he_dev);
1433 
1434 	he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1435 		sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1436 	if (he_dev->tpd_pool == NULL) {
1437 		hprintk("unable to create tpd pci_pool\n");
1438 		return -ENOMEM;
1439 	}
1440 
1441 	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1442 
1443 	if (he_init_group(he_dev, 0) != 0)
1444 		return -ENOMEM;
1445 
1446 	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1447 		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1448 		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1449 		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1450 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1451 						G0_RBPS_BS + (group * 32));
1452 
1453 		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1454 		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1455 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1456 						G0_RBPL_QI + (group * 32));
1457 		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1458 
1459 		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1460 		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1461 		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1462 						G0_RBRQ_Q + (group * 16));
1463 		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1464 
1465 		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1466 		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1467 		he_writel(he_dev, TBRQ_THRESH(0x1),
1468 						G0_TBRQ_THRESH + (group * 16));
1469 		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1470 	}
1471 
1472 	/* host status page */
1473 
1474 	he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1475 				sizeof(struct he_hsp), &he_dev->hsp_phys);
1476 	if (he_dev->hsp == NULL) {
1477 		hprintk("failed to allocate host status page\n");
1478 		return -ENOMEM;
1479 	}
1480 	memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1481 	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1482 
1483 	/* initialize framer */
1484 
1485 #ifdef CONFIG_ATM_HE_USE_SUNI
1486 	if (he_isMM(he_dev))
1487 		suni_init(he_dev->atm_dev);
1488 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1489 		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1490 #endif /* CONFIG_ATM_HE_USE_SUNI */
1491 
1492 	if (sdh) {
1493 		/* this really should be in suni.c but for now... */
1494 		int val;
1495 
1496 		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1497 		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1498 		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1499 		he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1500 	}
1501 
1502 	/* 5.1.12 enable transmit and receive */
1503 
1504 	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1505 	reg |= TX_ENABLE|ER_ENABLE;
1506 	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1507 
1508 	reg = he_readl(he_dev, RC_CONFIG);
1509 	reg |= RX_ENABLE;
1510 	he_writel(he_dev, reg, RC_CONFIG);
1511 
1512 	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1513 		he_dev->cs_stper[i].inuse = 0;
1514 		he_dev->cs_stper[i].pcr = -1;
1515 	}
1516 	he_dev->total_bw = 0;
1517 
1518 
1519 	/* atm linux initialization */
1520 
1521 	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1522 	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1523 
1524 	he_dev->irq_peak = 0;
1525 	he_dev->rbrq_peak = 0;
1526 	he_dev->rbpl_peak = 0;
1527 	he_dev->tbrq_peak = 0;
1528 
1529 	HPRINTK("hell bent for leather!\n");
1530 
1531 	return 0;
1532 }
1533 
1534 static void
1535 he_stop(struct he_dev *he_dev)
1536 {
1537 	u16 command;
1538 	u32 gen_cntl_0, reg;
1539 	struct pci_dev *pci_dev;
1540 
1541 	pci_dev = he_dev->pci_dev;
1542 
1543 	/* disable interrupts */
1544 
1545 	if (he_dev->membase) {
1546 		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1547 		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1548 		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1549 
1550 		tasklet_disable(&he_dev->tasklet);
1551 
1552 		/* disable recv and transmit */
1553 
1554 		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1555 		reg &= ~(TX_ENABLE|ER_ENABLE);
1556 		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1557 
1558 		reg = he_readl(he_dev, RC_CONFIG);
1559 		reg &= ~(RX_ENABLE);
1560 		he_writel(he_dev, reg, RC_CONFIG);
1561 	}
1562 
1563 #ifdef CONFIG_ATM_HE_USE_SUNI
1564 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1565 		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1566 #endif /* CONFIG_ATM_HE_USE_SUNI */
1567 
1568 	if (he_dev->irq)
1569 		free_irq(he_dev->irq, he_dev);
1570 
1571 	if (he_dev->irq_base)
1572 		pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1573 			* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1574 
1575 	if (he_dev->hsp)
1576 		pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1577 						he_dev->hsp, he_dev->hsp_phys);
1578 
1579 	if (he_dev->rbpl_base) {
1580 		int i;
1581 
1582 		for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1583 			void *cpuaddr = he_dev->rbpl_virt[i].virt;
1584 			dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1585 
1586 			pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1587 		}
1588 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1589 			* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1590 	}
1591 
1592 	if (he_dev->rbpl_pool)
1593 		pci_pool_destroy(he_dev->rbpl_pool);
1594 
1595 	if (he_dev->rbps_base) {
1596 		int i;
1597 
1598 		for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1599 			void *cpuaddr = he_dev->rbps_virt[i].virt;
1600 			dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1601 
1602 			pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1603 		}
1604 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1605 			* sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1606 	}
1607 
1608 	if (he_dev->rbps_pool)
1609 		pci_pool_destroy(he_dev->rbps_pool);
1610 
1611 	if (he_dev->rbrq_base)
1612 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1613 							he_dev->rbrq_base, he_dev->rbrq_phys);
1614 
1615 	if (he_dev->tbrq_base)
1616 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1617 							he_dev->tbrq_base, he_dev->tbrq_phys);
1618 
1619 	if (he_dev->tpdrq_base)
1620 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1621 							he_dev->tpdrq_base, he_dev->tpdrq_phys);
1622 
1623 	if (he_dev->tpd_pool)
1624 		pci_pool_destroy(he_dev->tpd_pool);
1625 
1626 	if (he_dev->pci_dev) {
1627 		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1628 		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1629 		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1630 	}
1631 
1632 	if (he_dev->membase)
1633 		iounmap(he_dev->membase);
1634 }
1635 
1636 static struct he_tpd *
1637 __alloc_tpd(struct he_dev *he_dev)
1638 {
1639 	struct he_tpd *tpd;
1640 	dma_addr_t dma_handle;
1641 
1642 	tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle);
1643 	if (tpd == NULL)
1644 		return NULL;
1645 
1646 	tpd->status = TPD_ADDR(dma_handle);
1647 	tpd->reserved = 0;
1648 	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1649 	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1650 	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1651 
1652 	return tpd;
1653 }
1654 
1655 #define AAL5_LEN(buf,len) 						\
1656 			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1657 				(((unsigned char *)(buf))[(len)-5]))
1658 
1659 /* 2.10.1.2 receive
1660  *
1661  * aal5 packets can optionally return the tcp checksum in the lower
1662  * 16 bits of the crc (RSR0_TCP_CKSUM)
1663  */
1664 
1665 #define TCP_CKSUM(buf,len) 						\
1666 			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1667 				(((unsigned char *)(buf))[(len-1)]))
1668 
1669 static int
1670 he_service_rbrq(struct he_dev *he_dev, int group)
1671 {
1672 	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1673 				((unsigned long)he_dev->rbrq_base |
1674 					he_dev->hsp->group[group].rbrq_tail);
1675 	struct he_rbp *rbp = NULL;
1676 	unsigned cid, lastcid = -1;
1677 	unsigned buf_len = 0;
1678 	struct sk_buff *skb;
1679 	struct atm_vcc *vcc = NULL;
1680 	struct he_vcc *he_vcc;
1681 	struct he_iovec *iov;
1682 	int pdus_assembled = 0;
1683 	int updated = 0;
1684 
1685 	read_lock(&vcc_sklist_lock);
1686 	while (he_dev->rbrq_head != rbrq_tail) {
1687 		++updated;
1688 
1689 		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1690 			he_dev->rbrq_head, group,
1691 			RBRQ_ADDR(he_dev->rbrq_head),
1692 			RBRQ_BUFLEN(he_dev->rbrq_head),
1693 			RBRQ_CID(he_dev->rbrq_head),
1694 			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1695 			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1696 			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1697 			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1698 			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1699 			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1700 
1701 		if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1702 			rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1703 		else
1704 			rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1705 
1706 		buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1707 		cid = RBRQ_CID(he_dev->rbrq_head);
1708 
1709 		if (cid != lastcid)
1710 			vcc = __find_vcc(he_dev, cid);
1711 		lastcid = cid;
1712 
1713 		if (vcc == NULL) {
1714 			hprintk("vcc == NULL  (cid 0x%x)\n", cid);
1715 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1716 					rbp->status &= ~RBP_LOANED;
1717 
1718 			goto next_rbrq_entry;
1719 		}
1720 
1721 		he_vcc = HE_VCC(vcc);
1722 		if (he_vcc == NULL) {
1723 			hprintk("he_vcc == NULL  (cid 0x%x)\n", cid);
1724 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1725 					rbp->status &= ~RBP_LOANED;
1726 			goto next_rbrq_entry;
1727 		}
1728 
1729 		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1730 			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1731 				atomic_inc(&vcc->stats->rx_drop);
1732 			goto return_host_buffers;
1733 		}
1734 
1735 		he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1736 		he_vcc->iov_tail->iov_len = buf_len;
1737 		he_vcc->pdu_len += buf_len;
1738 		++he_vcc->iov_tail;
1739 
1740 		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1741 			lastcid = -1;
1742 			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1743 			wake_up(&he_vcc->rx_waitq);
1744 			goto return_host_buffers;
1745 		}
1746 
1747 #ifdef notdef
1748 		if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1749 			hprintk("iovec full!  cid 0x%x\n", cid);
1750 			goto return_host_buffers;
1751 		}
1752 #endif
1753 		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1754 			goto next_rbrq_entry;
1755 
1756 		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1757 				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1758 			HPRINTK("%s%s (%d.%d)\n",
1759 				RBRQ_CRC_ERR(he_dev->rbrq_head)
1760 							? "CRC_ERR " : "",
1761 				RBRQ_LEN_ERR(he_dev->rbrq_head)
1762 							? "LEN_ERR" : "",
1763 							vcc->vpi, vcc->vci);
1764 			atomic_inc(&vcc->stats->rx_err);
1765 			goto return_host_buffers;
1766 		}
1767 
1768 		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1769 							GFP_ATOMIC);
1770 		if (!skb) {
1771 			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1772 			goto return_host_buffers;
1773 		}
1774 
1775 		if (rx_skb_reserve > 0)
1776 			skb_reserve(skb, rx_skb_reserve);
1777 
1778 		__net_timestamp(skb);
1779 
1780 		for (iov = he_vcc->iov_head;
1781 				iov < he_vcc->iov_tail; ++iov) {
1782 			if (iov->iov_base & RBP_SMALLBUF)
1783 				memcpy(skb_put(skb, iov->iov_len),
1784 					he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1785 			else
1786 				memcpy(skb_put(skb, iov->iov_len),
1787 					he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1788 		}
1789 
1790 		switch (vcc->qos.aal) {
1791 			case ATM_AAL0:
1792 				/* 2.10.1.5 raw cell receive */
1793 				skb->len = ATM_AAL0_SDU;
1794 				skb_set_tail_pointer(skb, skb->len);
1795 				break;
1796 			case ATM_AAL5:
1797 				/* 2.10.1.2 aal5 receive */
1798 
1799 				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1800 				skb_set_tail_pointer(skb, skb->len);
1801 #ifdef USE_CHECKSUM_HW
1802 				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1803 					skb->ip_summed = CHECKSUM_COMPLETE;
1804 					skb->csum = TCP_CKSUM(skb->data,
1805 							he_vcc->pdu_len);
1806 				}
1807 #endif
1808 				break;
1809 		}
1810 
1811 #ifdef should_never_happen
1812 		if (skb->len > vcc->qos.rxtp.max_sdu)
1813 			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1814 #endif
1815 
1816 #ifdef notdef
1817 		ATM_SKB(skb)->vcc = vcc;
1818 #endif
1819 		spin_unlock(&he_dev->global_lock);
1820 		vcc->push(vcc, skb);
1821 		spin_lock(&he_dev->global_lock);
1822 
1823 		atomic_inc(&vcc->stats->rx);
1824 
1825 return_host_buffers:
1826 		++pdus_assembled;
1827 
1828 		for (iov = he_vcc->iov_head;
1829 				iov < he_vcc->iov_tail; ++iov) {
1830 			if (iov->iov_base & RBP_SMALLBUF)
1831 				rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1832 			else
1833 				rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1834 
1835 			rbp->status &= ~RBP_LOANED;
1836 		}
1837 
1838 		he_vcc->iov_tail = he_vcc->iov_head;
1839 		he_vcc->pdu_len = 0;
1840 
1841 next_rbrq_entry:
1842 		he_dev->rbrq_head = (struct he_rbrq *)
1843 				((unsigned long) he_dev->rbrq_base |
1844 					RBRQ_MASK(++he_dev->rbrq_head));
1845 
1846 	}
1847 	read_unlock(&vcc_sklist_lock);
1848 
1849 	if (updated) {
1850 		if (updated > he_dev->rbrq_peak)
1851 			he_dev->rbrq_peak = updated;
1852 
1853 		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1854 						G0_RBRQ_H + (group * 16));
1855 	}
1856 
1857 	return pdus_assembled;
1858 }
1859 
1860 static void
1861 he_service_tbrq(struct he_dev *he_dev, int group)
1862 {
1863 	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1864 				((unsigned long)he_dev->tbrq_base |
1865 					he_dev->hsp->group[group].tbrq_tail);
1866 	struct he_tpd *tpd;
1867 	int slot, updated = 0;
1868 	struct he_tpd *__tpd;
1869 
1870 	/* 2.1.6 transmit buffer return queue */
1871 
1872 	while (he_dev->tbrq_head != tbrq_tail) {
1873 		++updated;
1874 
1875 		HPRINTK("tbrq%d 0x%x%s%s\n",
1876 			group,
1877 			TBRQ_TPD(he_dev->tbrq_head),
1878 			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1879 			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1880 		tpd = NULL;
1881 		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1882 			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1883 				tpd = __tpd;
1884 				list_del(&__tpd->entry);
1885 				break;
1886 			}
1887 		}
1888 
1889 		if (tpd == NULL) {
1890 			hprintk("unable to locate tpd for dma buffer %x\n",
1891 						TBRQ_TPD(he_dev->tbrq_head));
1892 			goto next_tbrq_entry;
1893 		}
1894 
1895 		if (TBRQ_EOS(he_dev->tbrq_head)) {
1896 			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1897 				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1898 			if (tpd->vcc)
1899 				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1900 
1901 			goto next_tbrq_entry;
1902 		}
1903 
1904 		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1905 			if (tpd->iovec[slot].addr)
1906 				pci_unmap_single(he_dev->pci_dev,
1907 					tpd->iovec[slot].addr,
1908 					tpd->iovec[slot].len & TPD_LEN_MASK,
1909 							PCI_DMA_TODEVICE);
1910 			if (tpd->iovec[slot].len & TPD_LST)
1911 				break;
1912 
1913 		}
1914 
1915 		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1916 			if (tpd->vcc && tpd->vcc->pop)
1917 				tpd->vcc->pop(tpd->vcc, tpd->skb);
1918 			else
1919 				dev_kfree_skb_any(tpd->skb);
1920 		}
1921 
1922 next_tbrq_entry:
1923 		if (tpd)
1924 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1925 		he_dev->tbrq_head = (struct he_tbrq *)
1926 				((unsigned long) he_dev->tbrq_base |
1927 					TBRQ_MASK(++he_dev->tbrq_head));
1928 	}
1929 
1930 	if (updated) {
1931 		if (updated > he_dev->tbrq_peak)
1932 			he_dev->tbrq_peak = updated;
1933 
1934 		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1935 						G0_TBRQ_H + (group * 16));
1936 	}
1937 }
1938 
1939 
1940 static void
1941 he_service_rbpl(struct he_dev *he_dev, int group)
1942 {
1943 	struct he_rbp *newtail;
1944 	struct he_rbp *rbpl_head;
1945 	int moved = 0;
1946 
1947 	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1948 					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1949 
1950 	for (;;) {
1951 		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1952 						RBPL_MASK(he_dev->rbpl_tail+1));
1953 
1954 		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1955 		if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
1956 			break;
1957 
1958 		newtail->status |= RBP_LOANED;
1959 		he_dev->rbpl_tail = newtail;
1960 		++moved;
1961 	}
1962 
1963 	if (moved)
1964 		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1965 }
1966 
1967 static void
1968 he_service_rbps(struct he_dev *he_dev, int group)
1969 {
1970 	struct he_rbp *newtail;
1971 	struct he_rbp *rbps_head;
1972 	int moved = 0;
1973 
1974 	rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
1975 					RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
1976 
1977 	for (;;) {
1978 		newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
1979 						RBPS_MASK(he_dev->rbps_tail+1));
1980 
1981 		/* table 3.42 -- rbps_tail should never be set to rbps_head */
1982 		if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
1983 			break;
1984 
1985 		newtail->status |= RBP_LOANED;
1986 		he_dev->rbps_tail = newtail;
1987 		++moved;
1988 	}
1989 
1990 	if (moved)
1991 		he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
1992 }
1993 
1994 static void
1995 he_tasklet(unsigned long data)
1996 {
1997 	unsigned long flags;
1998 	struct he_dev *he_dev = (struct he_dev *) data;
1999 	int group, type;
2000 	int updated = 0;
2001 
2002 	HPRINTK("tasklet (0x%lx)\n", data);
2003 	spin_lock_irqsave(&he_dev->global_lock, flags);
2004 
2005 	while (he_dev->irq_head != he_dev->irq_tail) {
2006 		++updated;
2007 
2008 		type = ITYPE_TYPE(he_dev->irq_head->isw);
2009 		group = ITYPE_GROUP(he_dev->irq_head->isw);
2010 
2011 		switch (type) {
2012 			case ITYPE_RBRQ_THRESH:
2013 				HPRINTK("rbrq%d threshold\n", group);
2014 				/* fall through */
2015 			case ITYPE_RBRQ_TIMER:
2016 				if (he_service_rbrq(he_dev, group)) {
2017 					he_service_rbpl(he_dev, group);
2018 					he_service_rbps(he_dev, group);
2019 				}
2020 				break;
2021 			case ITYPE_TBRQ_THRESH:
2022 				HPRINTK("tbrq%d threshold\n", group);
2023 				/* fall through */
2024 			case ITYPE_TPD_COMPLETE:
2025 				he_service_tbrq(he_dev, group);
2026 				break;
2027 			case ITYPE_RBPL_THRESH:
2028 				he_service_rbpl(he_dev, group);
2029 				break;
2030 			case ITYPE_RBPS_THRESH:
2031 				he_service_rbps(he_dev, group);
2032 				break;
2033 			case ITYPE_PHY:
2034 				HPRINTK("phy interrupt\n");
2035 #ifdef CONFIG_ATM_HE_USE_SUNI
2036 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2037 				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2038 					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2039 				spin_lock_irqsave(&he_dev->global_lock, flags);
2040 #endif
2041 				break;
2042 			case ITYPE_OTHER:
2043 				switch (type|group) {
2044 					case ITYPE_PARITY:
2045 						hprintk("parity error\n");
2046 						break;
2047 					case ITYPE_ABORT:
2048 						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2049 						break;
2050 				}
2051 				break;
2052 			case ITYPE_TYPE(ITYPE_INVALID):
2053 				/* see 8.1.1 -- check all queues */
2054 
2055 				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2056 
2057 				he_service_rbrq(he_dev, 0);
2058 				he_service_rbpl(he_dev, 0);
2059 				he_service_rbps(he_dev, 0);
2060 				he_service_tbrq(he_dev, 0);
2061 				break;
2062 			default:
2063 				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2064 		}
2065 
2066 		he_dev->irq_head->isw = ITYPE_INVALID;
2067 
2068 		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2069 	}
2070 
2071 	if (updated) {
2072 		if (updated > he_dev->irq_peak)
2073 			he_dev->irq_peak = updated;
2074 
2075 		he_writel(he_dev,
2076 			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2077 			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2078 			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2079 		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2080 	}
2081 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2082 }
2083 
2084 static irqreturn_t
2085 he_irq_handler(int irq, void *dev_id)
2086 {
2087 	unsigned long flags;
2088 	struct he_dev *he_dev = (struct he_dev * )dev_id;
2089 	int handled = 0;
2090 
2091 	if (he_dev == NULL)
2092 		return IRQ_NONE;
2093 
2094 	spin_lock_irqsave(&he_dev->global_lock, flags);
2095 
2096 	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2097 						(*he_dev->irq_tailoffset << 2));
2098 
2099 	if (he_dev->irq_tail == he_dev->irq_head) {
2100 		HPRINTK("tailoffset not updated?\n");
2101 		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2102 			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2103 		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2104 	}
2105 
2106 #ifdef DEBUG
2107 	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2108 		hprintk("spurious (or shared) interrupt?\n");
2109 #endif
2110 
2111 	if (he_dev->irq_head != he_dev->irq_tail) {
2112 		handled = 1;
2113 		tasklet_schedule(&he_dev->tasklet);
2114 		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2115 		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2116 	}
2117 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2118 	return IRQ_RETVAL(handled);
2119 
2120 }
2121 
2122 static __inline__ void
2123 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2124 {
2125 	struct he_tpdrq *new_tail;
2126 
2127 	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2128 					tpd, cid, he_dev->tpdrq_tail);
2129 
2130 	/* new_tail = he_dev->tpdrq_tail; */
2131 	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2132 					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2133 
2134 	/*
2135 	 * check to see if we are about to set the tail == head
2136 	 * if true, update the head pointer from the adapter
2137 	 * to see if this is really the case (reading the queue
2138 	 * head for every enqueue would be unnecessarily slow)
2139 	 */
2140 
2141 	if (new_tail == he_dev->tpdrq_head) {
2142 		he_dev->tpdrq_head = (struct he_tpdrq *)
2143 			(((unsigned long)he_dev->tpdrq_base) |
2144 				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2145 
2146 		if (new_tail == he_dev->tpdrq_head) {
2147 			int slot;
2148 
2149 			hprintk("tpdrq full (cid 0x%x)\n", cid);
2150 			/*
2151 			 * FIXME
2152 			 * push tpd onto a transmit backlog queue
2153 			 * after service_tbrq, service the backlog
2154 			 * for now, we just drop the pdu
2155 			 */
2156 			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2157 				if (tpd->iovec[slot].addr)
2158 					pci_unmap_single(he_dev->pci_dev,
2159 						tpd->iovec[slot].addr,
2160 						tpd->iovec[slot].len & TPD_LEN_MASK,
2161 								PCI_DMA_TODEVICE);
2162 			}
2163 			if (tpd->skb) {
2164 				if (tpd->vcc->pop)
2165 					tpd->vcc->pop(tpd->vcc, tpd->skb);
2166 				else
2167 					dev_kfree_skb_any(tpd->skb);
2168 				atomic_inc(&tpd->vcc->stats->tx_err);
2169 			}
2170 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2171 			return;
2172 		}
2173 	}
2174 
2175 	/* 2.1.5 transmit packet descriptor ready queue */
2176 	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2177 	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2178 	he_dev->tpdrq_tail->cid = cid;
2179 	wmb();
2180 
2181 	he_dev->tpdrq_tail = new_tail;
2182 
2183 	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2184 	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2185 }
2186 
2187 static int
2188 he_open(struct atm_vcc *vcc)
2189 {
2190 	unsigned long flags;
2191 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2192 	struct he_vcc *he_vcc;
2193 	int err = 0;
2194 	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2195 	short vpi = vcc->vpi;
2196 	int vci = vcc->vci;
2197 
2198 	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2199 		return 0;
2200 
2201 	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2202 
2203 	set_bit(ATM_VF_ADDR, &vcc->flags);
2204 
2205 	cid = he_mkcid(he_dev, vpi, vci);
2206 
2207 	he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2208 	if (he_vcc == NULL) {
2209 		hprintk("unable to allocate he_vcc during open\n");
2210 		return -ENOMEM;
2211 	}
2212 
2213 	he_vcc->iov_tail = he_vcc->iov_head;
2214 	he_vcc->pdu_len = 0;
2215 	he_vcc->rc_index = -1;
2216 
2217 	init_waitqueue_head(&he_vcc->rx_waitq);
2218 	init_waitqueue_head(&he_vcc->tx_waitq);
2219 
2220 	vcc->dev_data = he_vcc;
2221 
2222 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2223 		int pcr_goal;
2224 
2225 		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2226 		if (pcr_goal == 0)
2227 			pcr_goal = he_dev->atm_dev->link_rate;
2228 		if (pcr_goal < 0)	/* means round down, technically */
2229 			pcr_goal = -pcr_goal;
2230 
2231 		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2232 
2233 		switch (vcc->qos.aal) {
2234 			case ATM_AAL5:
2235 				tsr0_aal = TSR0_AAL5;
2236 				tsr4 = TSR4_AAL5;
2237 				break;
2238 			case ATM_AAL0:
2239 				tsr0_aal = TSR0_AAL0_SDU;
2240 				tsr4 = TSR4_AAL0_SDU;
2241 				break;
2242 			default:
2243 				err = -EINVAL;
2244 				goto open_failed;
2245 		}
2246 
2247 		spin_lock_irqsave(&he_dev->global_lock, flags);
2248 		tsr0 = he_readl_tsr0(he_dev, cid);
2249 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2250 
2251 		if (TSR0_CONN_STATE(tsr0) != 0) {
2252 			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2253 			err = -EBUSY;
2254 			goto open_failed;
2255 		}
2256 
2257 		switch (vcc->qos.txtp.traffic_class) {
2258 			case ATM_UBR:
2259 				/* 2.3.3.1 open connection ubr */
2260 
2261 				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2262 					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2263 				break;
2264 
2265 			case ATM_CBR:
2266 				/* 2.3.3.2 open connection cbr */
2267 
2268 				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2269 				if ((he_dev->total_bw + pcr_goal)
2270 					> (he_dev->atm_dev->link_rate * 9 / 10))
2271 				{
2272 					err = -EBUSY;
2273 					goto open_failed;
2274 				}
2275 
2276 				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2277 
2278 				/* find an unused cs_stper register */
2279 				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2280 					if (he_dev->cs_stper[reg].inuse == 0 ||
2281 					    he_dev->cs_stper[reg].pcr == pcr_goal)
2282 							break;
2283 
2284 				if (reg == HE_NUM_CS_STPER) {
2285 					err = -EBUSY;
2286 					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2287 					goto open_failed;
2288 				}
2289 
2290 				he_dev->total_bw += pcr_goal;
2291 
2292 				he_vcc->rc_index = reg;
2293 				++he_dev->cs_stper[reg].inuse;
2294 				he_dev->cs_stper[reg].pcr = pcr_goal;
2295 
2296 				clock = he_is622(he_dev) ? 66667000 : 50000000;
2297 				period = clock / pcr_goal;
2298 
2299 				HPRINTK("rc_index = %d period = %d\n",
2300 								reg, period);
2301 
2302 				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2303 							CS_STPER0 + reg);
2304 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2305 
2306 				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2307 							TSR0_RC_INDEX(reg);
2308 
2309 				break;
2310 			default:
2311 				err = -EINVAL;
2312 				goto open_failed;
2313 		}
2314 
2315 		spin_lock_irqsave(&he_dev->global_lock, flags);
2316 
2317 		he_writel_tsr0(he_dev, tsr0, cid);
2318 		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2319 		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2320 					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2321 		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2322 		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2323 
2324 		he_writel_tsr3(he_dev, 0x0, cid);
2325 		he_writel_tsr5(he_dev, 0x0, cid);
2326 		he_writel_tsr6(he_dev, 0x0, cid);
2327 		he_writel_tsr7(he_dev, 0x0, cid);
2328 		he_writel_tsr8(he_dev, 0x0, cid);
2329 		he_writel_tsr10(he_dev, 0x0, cid);
2330 		he_writel_tsr11(he_dev, 0x0, cid);
2331 		he_writel_tsr12(he_dev, 0x0, cid);
2332 		he_writel_tsr13(he_dev, 0x0, cid);
2333 		he_writel_tsr14(he_dev, 0x0, cid);
2334 		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2335 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2336 	}
2337 
2338 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2339 		unsigned aal;
2340 
2341 		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2342 		 				&HE_VCC(vcc)->rx_waitq);
2343 
2344 		switch (vcc->qos.aal) {
2345 			case ATM_AAL5:
2346 				aal = RSR0_AAL5;
2347 				break;
2348 			case ATM_AAL0:
2349 				aal = RSR0_RAWCELL;
2350 				break;
2351 			default:
2352 				err = -EINVAL;
2353 				goto open_failed;
2354 		}
2355 
2356 		spin_lock_irqsave(&he_dev->global_lock, flags);
2357 
2358 		rsr0 = he_readl_rsr0(he_dev, cid);
2359 		if (rsr0 & RSR0_OPEN_CONN) {
2360 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2361 
2362 			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2363 			err = -EBUSY;
2364 			goto open_failed;
2365 		}
2366 
2367 		rsr1 = RSR1_GROUP(0);
2368 		rsr4 = RSR4_GROUP(0);
2369 		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2370 				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2371 
2372 #ifdef USE_CHECKSUM_HW
2373 		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2374 			rsr0 |= RSR0_TCP_CKSUM;
2375 #endif
2376 
2377 		he_writel_rsr4(he_dev, rsr4, cid);
2378 		he_writel_rsr1(he_dev, rsr1, cid);
2379 		/* 5.1.11 last parameter initialized should be
2380 			  the open/closed indication in rsr0 */
2381 		he_writel_rsr0(he_dev,
2382 			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2383 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2384 
2385 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2386 	}
2387 
2388 open_failed:
2389 
2390 	if (err) {
2391 		kfree(he_vcc);
2392 		clear_bit(ATM_VF_ADDR, &vcc->flags);
2393 	}
2394 	else
2395 		set_bit(ATM_VF_READY, &vcc->flags);
2396 
2397 	return err;
2398 }
2399 
2400 static void
2401 he_close(struct atm_vcc *vcc)
2402 {
2403 	unsigned long flags;
2404 	DECLARE_WAITQUEUE(wait, current);
2405 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2406 	struct he_tpd *tpd;
2407 	unsigned cid;
2408 	struct he_vcc *he_vcc = HE_VCC(vcc);
2409 #define MAX_RETRY 30
2410 	int retry = 0, sleep = 1, tx_inuse;
2411 
2412 	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2413 
2414 	clear_bit(ATM_VF_READY, &vcc->flags);
2415 	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2416 
2417 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2418 		int timeout;
2419 
2420 		HPRINTK("close rx cid 0x%x\n", cid);
2421 
2422 		/* 2.7.2.2 close receive operation */
2423 
2424 		/* wait for previous close (if any) to finish */
2425 
2426 		spin_lock_irqsave(&he_dev->global_lock, flags);
2427 		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2428 			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2429 			udelay(250);
2430 		}
2431 
2432 		set_current_state(TASK_UNINTERRUPTIBLE);
2433 		add_wait_queue(&he_vcc->rx_waitq, &wait);
2434 
2435 		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2436 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2437 		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2438 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2439 
2440 		timeout = schedule_timeout(30*HZ);
2441 
2442 		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2443 		set_current_state(TASK_RUNNING);
2444 
2445 		if (timeout == 0)
2446 			hprintk("close rx timeout cid 0x%x\n", cid);
2447 
2448 		HPRINTK("close rx cid 0x%x complete\n", cid);
2449 
2450 	}
2451 
2452 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2453 		volatile unsigned tsr4, tsr0;
2454 		int timeout;
2455 
2456 		HPRINTK("close tx cid 0x%x\n", cid);
2457 
2458 		/* 2.1.2
2459 		 *
2460 		 * ... the host must first stop queueing packets to the TPDRQ
2461 		 * on the connection to be closed, then wait for all outstanding
2462 		 * packets to be transmitted and their buffers returned to the
2463 		 * TBRQ. When the last packet on the connection arrives in the
2464 		 * TBRQ, the host issues the close command to the adapter.
2465 		 */
2466 
2467 		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 0) &&
2468 		       (retry < MAX_RETRY)) {
2469 			msleep(sleep);
2470 			if (sleep < 250)
2471 				sleep = sleep * 2;
2472 
2473 			++retry;
2474 		}
2475 
2476 		if (tx_inuse)
2477 			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2478 
2479 		/* 2.3.1.1 generic close operations with flush */
2480 
2481 		spin_lock_irqsave(&he_dev->global_lock, flags);
2482 		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2483 					/* also clears TSR4_SESSION_ENDED */
2484 
2485 		switch (vcc->qos.txtp.traffic_class) {
2486 			case ATM_UBR:
2487 				he_writel_tsr1(he_dev,
2488 					TSR1_MCR(rate_to_atmf(200000))
2489 					| TSR1_PCR(0), cid);
2490 				break;
2491 			case ATM_CBR:
2492 				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2493 				break;
2494 		}
2495 		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2496 
2497 		tpd = __alloc_tpd(he_dev);
2498 		if (tpd == NULL) {
2499 			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2500 			goto close_tx_incomplete;
2501 		}
2502 		tpd->status |= TPD_EOS | TPD_INT;
2503 		tpd->skb = NULL;
2504 		tpd->vcc = vcc;
2505 		wmb();
2506 
2507 		set_current_state(TASK_UNINTERRUPTIBLE);
2508 		add_wait_queue(&he_vcc->tx_waitq, &wait);
2509 		__enqueue_tpd(he_dev, tpd, cid);
2510 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2511 
2512 		timeout = schedule_timeout(30*HZ);
2513 
2514 		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2515 		set_current_state(TASK_RUNNING);
2516 
2517 		spin_lock_irqsave(&he_dev->global_lock, flags);
2518 
2519 		if (timeout == 0) {
2520 			hprintk("close tx timeout cid 0x%x\n", cid);
2521 			goto close_tx_incomplete;
2522 		}
2523 
2524 		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2525 			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2526 			udelay(250);
2527 		}
2528 
2529 		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2530 			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2531 			udelay(250);
2532 		}
2533 
2534 close_tx_incomplete:
2535 
2536 		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2537 			int reg = he_vcc->rc_index;
2538 
2539 			HPRINTK("cs_stper reg = %d\n", reg);
2540 
2541 			if (he_dev->cs_stper[reg].inuse == 0)
2542 				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2543 			else
2544 				--he_dev->cs_stper[reg].inuse;
2545 
2546 			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2547 		}
2548 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2549 
2550 		HPRINTK("close tx cid 0x%x complete\n", cid);
2551 	}
2552 
2553 	kfree(he_vcc);
2554 
2555 	clear_bit(ATM_VF_ADDR, &vcc->flags);
2556 }
2557 
2558 static int
2559 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2560 {
2561 	unsigned long flags;
2562 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2563 	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2564 	struct he_tpd *tpd;
2565 #ifdef USE_SCATTERGATHER
2566 	int i, slot = 0;
2567 #endif
2568 
2569 #define HE_TPD_BUFSIZE 0xffff
2570 
2571 	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2572 
2573 	if ((skb->len > HE_TPD_BUFSIZE) ||
2574 	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2575 		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2576 		if (vcc->pop)
2577 			vcc->pop(vcc, skb);
2578 		else
2579 			dev_kfree_skb_any(skb);
2580 		atomic_inc(&vcc->stats->tx_err);
2581 		return -EINVAL;
2582 	}
2583 
2584 #ifndef USE_SCATTERGATHER
2585 	if (skb_shinfo(skb)->nr_frags) {
2586 		hprintk("no scatter/gather support\n");
2587 		if (vcc->pop)
2588 			vcc->pop(vcc, skb);
2589 		else
2590 			dev_kfree_skb_any(skb);
2591 		atomic_inc(&vcc->stats->tx_err);
2592 		return -EINVAL;
2593 	}
2594 #endif
2595 	spin_lock_irqsave(&he_dev->global_lock, flags);
2596 
2597 	tpd = __alloc_tpd(he_dev);
2598 	if (tpd == NULL) {
2599 		if (vcc->pop)
2600 			vcc->pop(vcc, skb);
2601 		else
2602 			dev_kfree_skb_any(skb);
2603 		atomic_inc(&vcc->stats->tx_err);
2604 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2605 		return -ENOMEM;
2606 	}
2607 
2608 	if (vcc->qos.aal == ATM_AAL5)
2609 		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2610 	else {
2611 		char *pti_clp = (void *) (skb->data + 3);
2612 		int clp, pti;
2613 
2614 		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2615 		clp = (*pti_clp & ATM_HDR_CLP);
2616 		tpd->status |= TPD_CELLTYPE(pti);
2617 		if (clp)
2618 			tpd->status |= TPD_CLP;
2619 
2620 		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2621 	}
2622 
2623 #ifdef USE_SCATTERGATHER
2624 	tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2625 				skb->len - skb->data_len, PCI_DMA_TODEVICE);
2626 	tpd->iovec[slot].len = skb->len - skb->data_len;
2627 	++slot;
2628 
2629 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2630 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2631 
2632 		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2633 			tpd->vcc = vcc;
2634 			tpd->skb = NULL;	/* not the last fragment
2635 						   so dont ->push() yet */
2636 			wmb();
2637 
2638 			__enqueue_tpd(he_dev, tpd, cid);
2639 			tpd = __alloc_tpd(he_dev);
2640 			if (tpd == NULL) {
2641 				if (vcc->pop)
2642 					vcc->pop(vcc, skb);
2643 				else
2644 					dev_kfree_skb_any(skb);
2645 				atomic_inc(&vcc->stats->tx_err);
2646 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2647 				return -ENOMEM;
2648 			}
2649 			tpd->status |= TPD_USERCELL;
2650 			slot = 0;
2651 		}
2652 
2653 		tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2654 			(void *) page_address(frag->page) + frag->page_offset,
2655 				frag->size, PCI_DMA_TODEVICE);
2656 		tpd->iovec[slot].len = frag->size;
2657 		++slot;
2658 
2659 	}
2660 
2661 	tpd->iovec[slot - 1].len |= TPD_LST;
2662 #else
2663 	tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2664 	tpd->length0 = skb->len | TPD_LST;
2665 #endif
2666 	tpd->status |= TPD_INT;
2667 
2668 	tpd->vcc = vcc;
2669 	tpd->skb = skb;
2670 	wmb();
2671 	ATM_SKB(skb)->vcc = vcc;
2672 
2673 	__enqueue_tpd(he_dev, tpd, cid);
2674 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2675 
2676 	atomic_inc(&vcc->stats->tx);
2677 
2678 	return 0;
2679 }
2680 
2681 static int
2682 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2683 {
2684 	unsigned long flags;
2685 	struct he_dev *he_dev = HE_DEV(atm_dev);
2686 	struct he_ioctl_reg reg;
2687 	int err = 0;
2688 
2689 	switch (cmd) {
2690 		case HE_GET_REG:
2691 			if (!capable(CAP_NET_ADMIN))
2692 				return -EPERM;
2693 
2694 			if (copy_from_user(&reg, arg,
2695 					   sizeof(struct he_ioctl_reg)))
2696 				return -EFAULT;
2697 
2698 			spin_lock_irqsave(&he_dev->global_lock, flags);
2699 			switch (reg.type) {
2700 				case HE_REGTYPE_PCI:
2701 					if (reg.addr < 0 || reg.addr >= HE_REGMAP_SIZE) {
2702 						err = -EINVAL;
2703 						break;
2704 					}
2705 
2706 					reg.val = he_readl(he_dev, reg.addr);
2707 					break;
2708 				case HE_REGTYPE_RCM:
2709 					reg.val =
2710 						he_readl_rcm(he_dev, reg.addr);
2711 					break;
2712 				case HE_REGTYPE_TCM:
2713 					reg.val =
2714 						he_readl_tcm(he_dev, reg.addr);
2715 					break;
2716 				case HE_REGTYPE_MBOX:
2717 					reg.val =
2718 						he_readl_mbox(he_dev, reg.addr);
2719 					break;
2720 				default:
2721 					err = -EINVAL;
2722 					break;
2723 			}
2724 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2725 			if (err == 0)
2726 				if (copy_to_user(arg, &reg,
2727 							sizeof(struct he_ioctl_reg)))
2728 					return -EFAULT;
2729 			break;
2730 		default:
2731 #ifdef CONFIG_ATM_HE_USE_SUNI
2732 			if (atm_dev->phy && atm_dev->phy->ioctl)
2733 				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2734 #else /* CONFIG_ATM_HE_USE_SUNI */
2735 			err = -EINVAL;
2736 #endif /* CONFIG_ATM_HE_USE_SUNI */
2737 			break;
2738 	}
2739 
2740 	return err;
2741 }
2742 
2743 static void
2744 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2745 {
2746 	unsigned long flags;
2747 	struct he_dev *he_dev = HE_DEV(atm_dev);
2748 
2749 	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2750 
2751 	spin_lock_irqsave(&he_dev->global_lock, flags);
2752 	he_writel(he_dev, val, FRAMER + (addr*4));
2753 	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2754 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2755 }
2756 
2757 
2758 static unsigned char
2759 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2760 {
2761 	unsigned long flags;
2762 	struct he_dev *he_dev = HE_DEV(atm_dev);
2763 	unsigned reg;
2764 
2765 	spin_lock_irqsave(&he_dev->global_lock, flags);
2766 	reg = he_readl(he_dev, FRAMER + (addr*4));
2767 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2768 
2769 	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2770 	return reg;
2771 }
2772 
2773 static int
2774 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2775 {
2776 	unsigned long flags;
2777 	struct he_dev *he_dev = HE_DEV(dev);
2778 	int left, i;
2779 #ifdef notdef
2780 	struct he_rbrq *rbrq_tail;
2781 	struct he_tpdrq *tpdrq_head;
2782 	int rbpl_head, rbpl_tail;
2783 #endif
2784 	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2785 
2786 
2787 	left = *pos;
2788 	if (!left--)
2789 		return sprintf(page, "ATM he driver\n");
2790 
2791 	if (!left--)
2792 		return sprintf(page, "%s%s\n\n",
2793 			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2794 
2795 	if (!left--)
2796 		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2797 
2798 	spin_lock_irqsave(&he_dev->global_lock, flags);
2799 	mcc += he_readl(he_dev, MCC);
2800 	oec += he_readl(he_dev, OEC);
2801 	dcc += he_readl(he_dev, DCC);
2802 	cec += he_readl(he_dev, CEC);
2803 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2804 
2805 	if (!left--)
2806 		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n",
2807 							mcc, oec, dcc, cec);
2808 
2809 	if (!left--)
2810 		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2811 				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2812 
2813 	if (!left--)
2814 		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2815 						CONFIG_TPDRQ_SIZE);
2816 
2817 	if (!left--)
2818 		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2819 				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2820 
2821 	if (!left--)
2822 		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2823 					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2824 
2825 
2826 #ifdef notdef
2827 	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2828 	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2829 
2830 	inuse = rbpl_head - rbpl_tail;
2831 	if (inuse < 0)
2832 		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2833 	inuse /= sizeof(struct he_rbp);
2834 
2835 	if (!left--)
2836 		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2837 						CONFIG_RBPL_SIZE, inuse);
2838 #endif
2839 
2840 	if (!left--)
2841 		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2842 
2843 	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2844 		if (!left--)
2845 			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2846 						he_dev->cs_stper[i].pcr,
2847 						he_dev->cs_stper[i].inuse);
2848 
2849 	if (!left--)
2850 		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2851 			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2852 
2853 	return 0;
2854 }
2855 
2856 /* eeprom routines  -- see 4.7 */
2857 
2858 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2859 {
2860 	u32 val = 0, tmp_read = 0;
2861 	int i, j = 0;
2862 	u8 byte_read = 0;
2863 
2864 	val = readl(he_dev->membase + HOST_CNTL);
2865 	val &= 0xFFFFE0FF;
2866 
2867 	/* Turn on write enable */
2868 	val |= 0x800;
2869 	he_writel(he_dev, val, HOST_CNTL);
2870 
2871 	/* Send READ instruction */
2872 	for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2873 		he_writel(he_dev, val | readtab[i], HOST_CNTL);
2874 		udelay(EEPROM_DELAY);
2875 	}
2876 
2877 	/* Next, we need to send the byte address to read from */
2878 	for (i = 7; i >= 0; i--) {
2879 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2880 		udelay(EEPROM_DELAY);
2881 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2882 		udelay(EEPROM_DELAY);
2883 	}
2884 
2885 	j = 0;
2886 
2887 	val &= 0xFFFFF7FF;      /* Turn off write enable */
2888 	he_writel(he_dev, val, HOST_CNTL);
2889 
2890 	/* Now, we can read data from the EEPROM by clocking it in */
2891 	for (i = 7; i >= 0; i--) {
2892 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2893 		udelay(EEPROM_DELAY);
2894 		tmp_read = he_readl(he_dev, HOST_CNTL);
2895 		byte_read |= (unsigned char)
2896 			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2897 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2898 		udelay(EEPROM_DELAY);
2899 	}
2900 
2901 	he_writel(he_dev, val | ID_CS, HOST_CNTL);
2902 	udelay(EEPROM_DELAY);
2903 
2904 	return byte_read;
2905 }
2906 
2907 MODULE_LICENSE("GPL");
2908 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2909 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2910 module_param(disable64, bool, 0);
2911 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2912 module_param(nvpibits, short, 0);
2913 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2914 module_param(nvcibits, short, 0);
2915 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2916 module_param(rx_skb_reserve, short, 0);
2917 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2918 module_param(irq_coalesce, bool, 0);
2919 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2920 module_param(sdh, bool, 0);
2921 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2922 
2923 static struct pci_device_id he_pci_tbl[] = {
2924 	{ PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
2925 	  0, 0, 0 },
2926 	{ 0, }
2927 };
2928 
2929 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2930 
2931 static struct pci_driver he_driver = {
2932 	.name =		"he",
2933 	.probe =	he_init_one,
2934 	.remove =	__devexit_p(he_remove_one),
2935 	.id_table =	he_pci_tbl,
2936 };
2937 
2938 static int __init he_init(void)
2939 {
2940 	return pci_register_driver(&he_driver);
2941 }
2942 
2943 static void __exit he_cleanup(void)
2944 {
2945 	pci_unregister_driver(&he_driver);
2946 }
2947 
2948 module_init(he_init);
2949 module_exit(he_cleanup);
2950