xref: /openbmc/linux/drivers/atm/he.c (revision 6774def6)
1 /*
2 
3   he.c
4 
5   ForeRunnerHE ATM Adapter driver for ATM on Linux
6   Copyright (C) 1999-2001  Naval Research Laboratory
7 
8   This library is free software; you can redistribute it and/or
9   modify it under the terms of the GNU Lesser General Public
10   License as published by the Free Software Foundation; either
11   version 2.1 of the License, or (at your option) any later version.
12 
13   This library is distributed in the hope that it will be useful,
14   but WITHOUT ANY WARRANTY; without even the implied warranty of
15   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16   Lesser General Public License for more details.
17 
18   You should have received a copy of the GNU Lesser General Public
19   License along with this library; if not, write to the Free Software
20   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 
22 */
23 
24 /*
25 
26   he.c
27 
28   ForeRunnerHE ATM Adapter driver for ATM on Linux
29   Copyright (C) 1999-2001  Naval Research Laboratory
30 
31   Permission to use, copy, modify and distribute this software and its
32   documentation is hereby granted, provided that both the copyright
33   notice and this permission notice appear in all copies of the software,
34   derivative works or modified versions, and any portions thereof, and
35   that both notices appear in supporting documentation.
36 
37   NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38   DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39   RESULTING FROM THE USE OF THIS SOFTWARE.
40 
41   This driver was written using the "Programmer's Reference Manual for
42   ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43 
44   AUTHORS:
45 	chas williams <chas@cmf.nrl.navy.mil>
46 	eric kinzie <ekinzie@cmf.nrl.navy.mil>
47 
48   NOTES:
49 	4096 supported 'connections'
50 	group 0 is used for all traffic
51 	interrupt queue 0 is used for all interrupts
52 	aal0 support (based on work from ulrich.u.muller@nokia.com)
53 
54  */
55 
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
65 #include <linux/mm.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/bitmap.h>
71 #include <linux/slab.h>
72 #include <asm/io.h>
73 #include <asm/byteorder.h>
74 #include <asm/uaccess.h>
75 
76 #include <linux/atmdev.h>
77 #include <linux/atm.h>
78 #include <linux/sonet.h>
79 
80 #undef USE_SCATTERGATHER
81 #undef USE_CHECKSUM_HW			/* still confused about this */
82 /* #undef HE_DEBUG */
83 
84 #include "he.h"
85 #include "suni.h"
86 #include <linux/atm_he.h>
87 
88 #define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
89 
90 #ifdef HE_DEBUG
91 #define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92 #else /* !HE_DEBUG */
93 #define HPRINTK(fmt,args...)	do { } while (0)
94 #endif /* HE_DEBUG */
95 
96 /* declarations */
97 
98 static int he_open(struct atm_vcc *vcc);
99 static void he_close(struct atm_vcc *vcc);
100 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102 static irqreturn_t he_irq_handler(int irq, void *dev_id);
103 static void he_tasklet(unsigned long data);
104 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105 static int he_start(struct atm_dev *dev);
106 static void he_stop(struct he_dev *dev);
107 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109 
110 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
111 
112 /* globals */
113 
114 static struct he_dev *he_devs;
115 static bool disable64;
116 static short nvpibits = -1;
117 static short nvcibits = -1;
118 static short rx_skb_reserve = 16;
119 static bool irq_coalesce = 1;
120 static bool sdh = 0;
121 
122 /* Read from EEPROM = 0000 0011b */
123 static unsigned int readtab[] = {
124 	CS_HIGH | CLK_HIGH,
125 	CS_LOW | CLK_LOW,
126 	CLK_HIGH,               /* 0 */
127 	CLK_LOW,
128 	CLK_HIGH,               /* 0 */
129 	CLK_LOW,
130 	CLK_HIGH,               /* 0 */
131 	CLK_LOW,
132 	CLK_HIGH,               /* 0 */
133 	CLK_LOW,
134 	CLK_HIGH,               /* 0 */
135 	CLK_LOW,
136 	CLK_HIGH,               /* 0 */
137 	CLK_LOW | SI_HIGH,
138 	CLK_HIGH | SI_HIGH,     /* 1 */
139 	CLK_LOW | SI_HIGH,
140 	CLK_HIGH | SI_HIGH      /* 1 */
141 };
142 
143 /* Clock to read from/write to the EEPROM */
144 static unsigned int clocktab[] = {
145 	CLK_LOW,
146 	CLK_HIGH,
147 	CLK_LOW,
148 	CLK_HIGH,
149 	CLK_LOW,
150 	CLK_HIGH,
151 	CLK_LOW,
152 	CLK_HIGH,
153 	CLK_LOW,
154 	CLK_HIGH,
155 	CLK_LOW,
156 	CLK_HIGH,
157 	CLK_LOW,
158 	CLK_HIGH,
159 	CLK_LOW,
160 	CLK_HIGH,
161 	CLK_LOW
162 };
163 
164 static struct atmdev_ops he_ops =
165 {
166 	.open =		he_open,
167 	.close =	he_close,
168 	.ioctl =	he_ioctl,
169 	.send =		he_send,
170 	.phy_put =	he_phy_put,
171 	.phy_get =	he_phy_get,
172 	.proc_read =	he_proc_read,
173 	.owner =	THIS_MODULE
174 };
175 
176 #define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177 #define he_readl(dev, reg)		readl((dev)->membase + (reg))
178 
179 /* section 2.12 connection memory access */
180 
181 static __inline__ void
182 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
183 								unsigned flags)
184 {
185 	he_writel(he_dev, val, CON_DAT);
186 	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
187 	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
189 }
190 
191 #define he_writel_rcm(dev, val, reg) 				\
192 			he_writel_internal(dev, val, reg, CON_CTL_RCM)
193 
194 #define he_writel_tcm(dev, val, reg) 				\
195 			he_writel_internal(dev, val, reg, CON_CTL_TCM)
196 
197 #define he_writel_mbox(dev, val, reg) 				\
198 			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
199 
200 static unsigned
201 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202 {
203 	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205 	return he_readl(he_dev, CON_DAT);
206 }
207 
208 #define he_readl_rcm(dev, reg) \
209 			he_readl_internal(dev, reg, CON_CTL_RCM)
210 
211 #define he_readl_tcm(dev, reg) \
212 			he_readl_internal(dev, reg, CON_CTL_TCM)
213 
214 #define he_readl_mbox(dev, reg) \
215 			he_readl_internal(dev, reg, CON_CTL_MBOX)
216 
217 
218 /* figure 2.2 connection id */
219 
220 #define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
221 
222 /* 2.5.1 per connection transmit state registers */
223 
224 #define he_writel_tsr0(dev, val, cid) \
225 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226 #define he_readl_tsr0(dev, cid) \
227 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228 
229 #define he_writel_tsr1(dev, val, cid) \
230 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231 
232 #define he_writel_tsr2(dev, val, cid) \
233 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234 
235 #define he_writel_tsr3(dev, val, cid) \
236 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237 
238 #define he_writel_tsr4(dev, val, cid) \
239 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
240 
241 	/* from page 2-20
242 	 *
243 	 * NOTE While the transmit connection is active, bits 23 through 0
244 	 *      of this register must not be written by the host.  Byte
245 	 *      enables should be used during normal operation when writing
246 	 *      the most significant byte.
247 	 */
248 
249 #define he_writel_tsr4_upper(dev, val, cid) \
250 		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251 							CON_CTL_TCM \
252 							| CON_BYTE_DISABLE_2 \
253 							| CON_BYTE_DISABLE_1 \
254 							| CON_BYTE_DISABLE_0)
255 
256 #define he_readl_tsr4(dev, cid) \
257 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258 
259 #define he_writel_tsr5(dev, val, cid) \
260 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261 
262 #define he_writel_tsr6(dev, val, cid) \
263 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264 
265 #define he_writel_tsr7(dev, val, cid) \
266 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
267 
268 
269 #define he_writel_tsr8(dev, val, cid) \
270 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271 
272 #define he_writel_tsr9(dev, val, cid) \
273 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274 
275 #define he_writel_tsr10(dev, val, cid) \
276 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277 
278 #define he_writel_tsr11(dev, val, cid) \
279 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
280 
281 
282 #define he_writel_tsr12(dev, val, cid) \
283 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284 
285 #define he_writel_tsr13(dev, val, cid) \
286 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
287 
288 
289 #define he_writel_tsr14(dev, val, cid) \
290 		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291 
292 #define he_writel_tsr14_upper(dev, val, cid) \
293 		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294 							CON_CTL_TCM \
295 							| CON_BYTE_DISABLE_2 \
296 							| CON_BYTE_DISABLE_1 \
297 							| CON_BYTE_DISABLE_0)
298 
299 /* 2.7.1 per connection receive state registers */
300 
301 #define he_writel_rsr0(dev, val, cid) \
302 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303 #define he_readl_rsr0(dev, cid) \
304 		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305 
306 #define he_writel_rsr1(dev, val, cid) \
307 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308 
309 #define he_writel_rsr2(dev, val, cid) \
310 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311 
312 #define he_writel_rsr3(dev, val, cid) \
313 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314 
315 #define he_writel_rsr4(dev, val, cid) \
316 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317 
318 #define he_writel_rsr5(dev, val, cid) \
319 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320 
321 #define he_writel_rsr6(dev, val, cid) \
322 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323 
324 #define he_writel_rsr7(dev, val, cid) \
325 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326 
327 static __inline__ struct atm_vcc*
328 __find_vcc(struct he_dev *he_dev, unsigned cid)
329 {
330 	struct hlist_head *head;
331 	struct atm_vcc *vcc;
332 	struct sock *s;
333 	short vpi;
334 	int vci;
335 
336 	vpi = cid >> he_dev->vcibits;
337 	vci = cid & ((1 << he_dev->vcibits) - 1);
338 	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
339 
340 	sk_for_each(s, head) {
341 		vcc = atm_sk(s);
342 		if (vcc->dev == he_dev->atm_dev &&
343 		    vcc->vci == vci && vcc->vpi == vpi &&
344 		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
345 				return vcc;
346 		}
347 	}
348 	return NULL;
349 }
350 
351 static int he_init_one(struct pci_dev *pci_dev,
352 		       const struct pci_device_id *pci_ent)
353 {
354 	struct atm_dev *atm_dev = NULL;
355 	struct he_dev *he_dev = NULL;
356 	int err = 0;
357 
358 	printk(KERN_INFO "ATM he driver\n");
359 
360 	if (pci_enable_device(pci_dev))
361 		return -EIO;
362 	if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
363 		printk(KERN_WARNING "he: no suitable dma available\n");
364 		err = -EIO;
365 		goto init_one_failure;
366 	}
367 
368 	atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
369 	if (!atm_dev) {
370 		err = -ENODEV;
371 		goto init_one_failure;
372 	}
373 	pci_set_drvdata(pci_dev, atm_dev);
374 
375 	he_dev = kzalloc(sizeof(struct he_dev),
376 							GFP_KERNEL);
377 	if (!he_dev) {
378 		err = -ENOMEM;
379 		goto init_one_failure;
380 	}
381 	he_dev->pci_dev = pci_dev;
382 	he_dev->atm_dev = atm_dev;
383 	he_dev->atm_dev->dev_data = he_dev;
384 	atm_dev->dev_data = he_dev;
385 	he_dev->number = atm_dev->number;
386 	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
387 	spin_lock_init(&he_dev->global_lock);
388 
389 	if (he_start(atm_dev)) {
390 		he_stop(he_dev);
391 		err = -ENODEV;
392 		goto init_one_failure;
393 	}
394 	he_dev->next = NULL;
395 	if (he_devs)
396 		he_dev->next = he_devs;
397 	he_devs = he_dev;
398 	return 0;
399 
400 init_one_failure:
401 	if (atm_dev)
402 		atm_dev_deregister(atm_dev);
403 	kfree(he_dev);
404 	pci_disable_device(pci_dev);
405 	return err;
406 }
407 
408 static void he_remove_one(struct pci_dev *pci_dev)
409 {
410 	struct atm_dev *atm_dev;
411 	struct he_dev *he_dev;
412 
413 	atm_dev = pci_get_drvdata(pci_dev);
414 	he_dev = HE_DEV(atm_dev);
415 
416 	/* need to remove from he_devs */
417 
418 	he_stop(he_dev);
419 	atm_dev_deregister(atm_dev);
420 	kfree(he_dev);
421 
422 	pci_disable_device(pci_dev);
423 }
424 
425 
426 static unsigned
427 rate_to_atmf(unsigned rate)		/* cps to atm forum format */
428 {
429 #define NONZERO (1 << 14)
430 
431 	unsigned exp = 0;
432 
433 	if (rate == 0)
434 		return 0;
435 
436 	rate <<= 9;
437 	while (rate > 0x3ff) {
438 		++exp;
439 		rate >>= 1;
440 	}
441 
442 	return (NONZERO | (exp << 9) | (rate & 0x1ff));
443 }
444 
445 static void he_init_rx_lbfp0(struct he_dev *he_dev)
446 {
447 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
448 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
449 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
450 	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
451 
452 	lbufd_index = 0;
453 	lbm_offset = he_readl(he_dev, RCMLBM_BA);
454 
455 	he_writel(he_dev, lbufd_index, RLBF0_H);
456 
457 	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
458 		lbufd_index += 2;
459 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
460 
461 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
462 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
463 
464 		if (++lbuf_count == lbufs_per_row) {
465 			lbuf_count = 0;
466 			row_offset += he_dev->bytes_per_row;
467 		}
468 		lbm_offset += 4;
469 	}
470 
471 	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
472 	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
473 }
474 
475 static void he_init_rx_lbfp1(struct he_dev *he_dev)
476 {
477 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
478 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
479 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
480 	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
481 
482 	lbufd_index = 1;
483 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
484 
485 	he_writel(he_dev, lbufd_index, RLBF1_H);
486 
487 	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
488 		lbufd_index += 2;
489 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
490 
491 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
492 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
493 
494 		if (++lbuf_count == lbufs_per_row) {
495 			lbuf_count = 0;
496 			row_offset += he_dev->bytes_per_row;
497 		}
498 		lbm_offset += 4;
499 	}
500 
501 	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
502 	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
503 }
504 
505 static void he_init_tx_lbfp(struct he_dev *he_dev)
506 {
507 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
508 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
509 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
510 	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
511 
512 	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
513 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
514 
515 	he_writel(he_dev, lbufd_index, TLBF_H);
516 
517 	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
518 		lbufd_index += 1;
519 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
520 
521 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
522 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
523 
524 		if (++lbuf_count == lbufs_per_row) {
525 			lbuf_count = 0;
526 			row_offset += he_dev->bytes_per_row;
527 		}
528 		lbm_offset += 2;
529 	}
530 
531 	he_writel(he_dev, lbufd_index - 1, TLBF_T);
532 }
533 
534 static int he_init_tpdrq(struct he_dev *he_dev)
535 {
536 	he_dev->tpdrq_base = pci_zalloc_consistent(he_dev->pci_dev,
537 						   CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
538 						   &he_dev->tpdrq_phys);
539 	if (he_dev->tpdrq_base == NULL) {
540 		hprintk("failed to alloc tpdrq\n");
541 		return -ENOMEM;
542 	}
543 
544 	he_dev->tpdrq_tail = he_dev->tpdrq_base;
545 	he_dev->tpdrq_head = he_dev->tpdrq_base;
546 
547 	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
548 	he_writel(he_dev, 0, TPDRQ_T);
549 	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
550 
551 	return 0;
552 }
553 
554 static void he_init_cs_block(struct he_dev *he_dev)
555 {
556 	unsigned clock, rate, delta;
557 	int reg;
558 
559 	/* 5.1.7 cs block initialization */
560 
561 	for (reg = 0; reg < 0x20; ++reg)
562 		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
563 
564 	/* rate grid timer reload values */
565 
566 	clock = he_is622(he_dev) ? 66667000 : 50000000;
567 	rate = he_dev->atm_dev->link_rate;
568 	delta = rate / 16 / 2;
569 
570 	for (reg = 0; reg < 0x10; ++reg) {
571 		/* 2.4 internal transmit function
572 		 *
573 	 	 * we initialize the first row in the rate grid.
574 		 * values are period (in clock cycles) of timer
575 		 */
576 		unsigned period = clock / rate;
577 
578 		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
579 		rate -= delta;
580 	}
581 
582 	if (he_is622(he_dev)) {
583 		/* table 5.2 (4 cells per lbuf) */
584 		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
585 		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
586 		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
587 		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
588 		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
589 
590 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
591 		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
592 		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
593 		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
594 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
595 		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
596 		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
597 
598 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
599 
600 		/* table 5.8 */
601 		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
602 		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
603 		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
604 		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
605 		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
606 		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
607 
608 		/* table 5.9 */
609 		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
610 		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
611 	} else {
612 		/* table 5.1 (4 cells per lbuf) */
613 		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
614 		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
615 		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
616 		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
617 		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
618 
619 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
620 		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
621 		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
622 		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
623 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
624 		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
625 		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
626 
627 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
628 
629 		/* table 5.8 */
630 		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
631 		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
632 		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
633 		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
634 		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
635 		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
636 
637 		/* table 5.9 */
638 		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
639 		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
640 	}
641 
642 	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
643 
644 	for (reg = 0; reg < 0x8; ++reg)
645 		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
646 
647 }
648 
649 static int he_init_cs_block_rcm(struct he_dev *he_dev)
650 {
651 	unsigned (*rategrid)[16][16];
652 	unsigned rate, delta;
653 	int i, j, reg;
654 
655 	unsigned rate_atmf, exp, man;
656 	unsigned long long rate_cps;
657 	int mult, buf, buf_limit = 4;
658 
659 	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
660 	if (!rategrid)
661 		return -ENOMEM;
662 
663 	/* initialize rate grid group table */
664 
665 	for (reg = 0x0; reg < 0xff; ++reg)
666 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
667 
668 	/* initialize rate controller groups */
669 
670 	for (reg = 0x100; reg < 0x1ff; ++reg)
671 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
672 
673 	/* initialize tNrm lookup table */
674 
675 	/* the manual makes reference to a routine in a sample driver
676 	   for proper configuration; fortunately, we only need this
677 	   in order to support abr connection */
678 
679 	/* initialize rate to group table */
680 
681 	rate = he_dev->atm_dev->link_rate;
682 	delta = rate / 32;
683 
684 	/*
685 	 * 2.4 transmit internal functions
686 	 *
687 	 * we construct a copy of the rate grid used by the scheduler
688 	 * in order to construct the rate to group table below
689 	 */
690 
691 	for (j = 0; j < 16; j++) {
692 		(*rategrid)[0][j] = rate;
693 		rate -= delta;
694 	}
695 
696 	for (i = 1; i < 16; i++)
697 		for (j = 0; j < 16; j++)
698 			if (i > 14)
699 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
700 			else
701 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
702 
703 	/*
704 	 * 2.4 transmit internal function
705 	 *
706 	 * this table maps the upper 5 bits of exponent and mantissa
707 	 * of the atm forum representation of the rate into an index
708 	 * on rate grid
709 	 */
710 
711 	rate_atmf = 0;
712 	while (rate_atmf < 0x400) {
713 		man = (rate_atmf & 0x1f) << 4;
714 		exp = rate_atmf >> 5;
715 
716 		/*
717 			instead of '/ 512', use '>> 9' to prevent a call
718 			to divdu3 on x86 platforms
719 		*/
720 		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
721 
722 		if (rate_cps < 10)
723 			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
724 
725 		for (i = 255; i > 0; i--)
726 			if ((*rategrid)[i/16][i%16] >= rate_cps)
727 				break;	 /* pick nearest rate instead? */
728 
729 		/*
730 		 * each table entry is 16 bits: (rate grid index (8 bits)
731 		 * and a buffer limit (8 bits)
732 		 * there are two table entries in each 32-bit register
733 		 */
734 
735 #ifdef notdef
736 		buf = rate_cps * he_dev->tx_numbuffs /
737 				(he_dev->atm_dev->link_rate * 2);
738 #else
739 		/* this is pretty, but avoids _divdu3 and is mostly correct */
740 		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
741 		if (rate_cps > (272 * mult))
742 			buf = 4;
743 		else if (rate_cps > (204 * mult))
744 			buf = 3;
745 		else if (rate_cps > (136 * mult))
746 			buf = 2;
747 		else if (rate_cps > (68 * mult))
748 			buf = 1;
749 		else
750 			buf = 0;
751 #endif
752 		if (buf > buf_limit)
753 			buf = buf_limit;
754 		reg = (reg << 16) | ((i << 8) | buf);
755 
756 #define RTGTBL_OFFSET 0x400
757 
758 		if (rate_atmf & 0x1)
759 			he_writel_rcm(he_dev, reg,
760 				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
761 
762 		++rate_atmf;
763 	}
764 
765 	kfree(rategrid);
766 	return 0;
767 }
768 
769 static int he_init_group(struct he_dev *he_dev, int group)
770 {
771 	struct he_buff *heb, *next;
772 	dma_addr_t mapping;
773 	int i;
774 
775 	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
776 	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
777 	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
778 	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
779 		  G0_RBPS_BS + (group * 32));
780 
781 	/* bitmap table */
782 	he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
783 				     * sizeof(unsigned long), GFP_KERNEL);
784 	if (!he_dev->rbpl_table) {
785 		hprintk("unable to allocate rbpl bitmap table\n");
786 		return -ENOMEM;
787 	}
788 	bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
789 
790 	/* rbpl_virt 64-bit pointers */
791 	he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
792 				    * sizeof(struct he_buff *), GFP_KERNEL);
793 	if (!he_dev->rbpl_virt) {
794 		hprintk("unable to allocate rbpl virt table\n");
795 		goto out_free_rbpl_table;
796 	}
797 
798 	/* large buffer pool */
799 	he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
800 					    CONFIG_RBPL_BUFSIZE, 64, 0);
801 	if (he_dev->rbpl_pool == NULL) {
802 		hprintk("unable to create rbpl pool\n");
803 		goto out_free_rbpl_virt;
804 	}
805 
806 	he_dev->rbpl_base = pci_zalloc_consistent(he_dev->pci_dev,
807 						  CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
808 						  &he_dev->rbpl_phys);
809 	if (he_dev->rbpl_base == NULL) {
810 		hprintk("failed to alloc rbpl_base\n");
811 		goto out_destroy_rbpl_pool;
812 	}
813 
814 	INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
815 
816 	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
817 
818 		heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
819 		if (!heb)
820 			goto out_free_rbpl;
821 		heb->mapping = mapping;
822 		list_add(&heb->entry, &he_dev->rbpl_outstanding);
823 
824 		set_bit(i, he_dev->rbpl_table);
825 		he_dev->rbpl_virt[i] = heb;
826 		he_dev->rbpl_hint = i + 1;
827 		he_dev->rbpl_base[i].idx =  i << RBP_IDX_OFFSET;
828 		he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
829 	}
830 	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
831 
832 	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
833 	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
834 						G0_RBPL_T + (group * 32));
835 	he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
836 						G0_RBPL_BS + (group * 32));
837 	he_writel(he_dev,
838 			RBP_THRESH(CONFIG_RBPL_THRESH) |
839 			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
840 			RBP_INT_ENB,
841 						G0_RBPL_QI + (group * 32));
842 
843 	/* rx buffer ready queue */
844 
845 	he_dev->rbrq_base = pci_zalloc_consistent(he_dev->pci_dev,
846 						  CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
847 						  &he_dev->rbrq_phys);
848 	if (he_dev->rbrq_base == NULL) {
849 		hprintk("failed to allocate rbrq\n");
850 		goto out_free_rbpl;
851 	}
852 
853 	he_dev->rbrq_head = he_dev->rbrq_base;
854 	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
855 	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
856 	he_writel(he_dev,
857 		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
858 						G0_RBRQ_Q + (group * 16));
859 	if (irq_coalesce) {
860 		hprintk("coalescing interrupts\n");
861 		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
862 						G0_RBRQ_I + (group * 16));
863 	} else
864 		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
865 						G0_RBRQ_I + (group * 16));
866 
867 	/* tx buffer ready queue */
868 
869 	he_dev->tbrq_base = pci_zalloc_consistent(he_dev->pci_dev,
870 						  CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
871 						  &he_dev->tbrq_phys);
872 	if (he_dev->tbrq_base == NULL) {
873 		hprintk("failed to allocate tbrq\n");
874 		goto out_free_rbpq_base;
875 	}
876 
877 	he_dev->tbrq_head = he_dev->tbrq_base;
878 
879 	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
880 	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
881 	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
882 	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
883 
884 	return 0;
885 
886 out_free_rbpq_base:
887 	pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
888 			sizeof(struct he_rbrq), he_dev->rbrq_base,
889 			he_dev->rbrq_phys);
890 out_free_rbpl:
891 	list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
892 		pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
893 
894 	pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
895 			sizeof(struct he_rbp), he_dev->rbpl_base,
896 			he_dev->rbpl_phys);
897 out_destroy_rbpl_pool:
898 	pci_pool_destroy(he_dev->rbpl_pool);
899 out_free_rbpl_virt:
900 	kfree(he_dev->rbpl_virt);
901 out_free_rbpl_table:
902 	kfree(he_dev->rbpl_table);
903 
904 	return -ENOMEM;
905 }
906 
907 static int he_init_irq(struct he_dev *he_dev)
908 {
909 	int i;
910 
911 	/* 2.9.3.5  tail offset for each interrupt queue is located after the
912 		    end of the interrupt queue */
913 
914 	he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
915 			(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
916 	if (he_dev->irq_base == NULL) {
917 		hprintk("failed to allocate irq\n");
918 		return -ENOMEM;
919 	}
920 	he_dev->irq_tailoffset = (unsigned *)
921 					&he_dev->irq_base[CONFIG_IRQ_SIZE];
922 	*he_dev->irq_tailoffset = 0;
923 	he_dev->irq_head = he_dev->irq_base;
924 	he_dev->irq_tail = he_dev->irq_base;
925 
926 	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
927 		he_dev->irq_base[i].isw = ITYPE_INVALID;
928 
929 	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
930 	he_writel(he_dev,
931 		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
932 								IRQ0_HEAD);
933 	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
934 	he_writel(he_dev, 0x0, IRQ0_DATA);
935 
936 	he_writel(he_dev, 0x0, IRQ1_BASE);
937 	he_writel(he_dev, 0x0, IRQ1_HEAD);
938 	he_writel(he_dev, 0x0, IRQ1_CNTL);
939 	he_writel(he_dev, 0x0, IRQ1_DATA);
940 
941 	he_writel(he_dev, 0x0, IRQ2_BASE);
942 	he_writel(he_dev, 0x0, IRQ2_HEAD);
943 	he_writel(he_dev, 0x0, IRQ2_CNTL);
944 	he_writel(he_dev, 0x0, IRQ2_DATA);
945 
946 	he_writel(he_dev, 0x0, IRQ3_BASE);
947 	he_writel(he_dev, 0x0, IRQ3_HEAD);
948 	he_writel(he_dev, 0x0, IRQ3_CNTL);
949 	he_writel(he_dev, 0x0, IRQ3_DATA);
950 
951 	/* 2.9.3.2 interrupt queue mapping registers */
952 
953 	he_writel(he_dev, 0x0, GRP_10_MAP);
954 	he_writel(he_dev, 0x0, GRP_32_MAP);
955 	he_writel(he_dev, 0x0, GRP_54_MAP);
956 	he_writel(he_dev, 0x0, GRP_76_MAP);
957 
958 	if (request_irq(he_dev->pci_dev->irq,
959 			he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
960 		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
961 		return -EINVAL;
962 	}
963 
964 	he_dev->irq = he_dev->pci_dev->irq;
965 
966 	return 0;
967 }
968 
969 static int he_start(struct atm_dev *dev)
970 {
971 	struct he_dev *he_dev;
972 	struct pci_dev *pci_dev;
973 	unsigned long membase;
974 
975 	u16 command;
976 	u32 gen_cntl_0, host_cntl, lb_swap;
977 	u8 cache_size, timer;
978 
979 	unsigned err;
980 	unsigned int status, reg;
981 	int i, group;
982 
983 	he_dev = HE_DEV(dev);
984 	pci_dev = he_dev->pci_dev;
985 
986 	membase = pci_resource_start(pci_dev, 0);
987 	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
988 
989 	/*
990 	 * pci bus controller initialization
991 	 */
992 
993 	/* 4.3 pci bus controller-specific initialization */
994 	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
995 		hprintk("can't read GEN_CNTL_0\n");
996 		return -EINVAL;
997 	}
998 	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
999 	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1000 		hprintk("can't write GEN_CNTL_0.\n");
1001 		return -EINVAL;
1002 	}
1003 
1004 	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1005 		hprintk("can't read PCI_COMMAND.\n");
1006 		return -EINVAL;
1007 	}
1008 
1009 	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1010 	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1011 		hprintk("can't enable memory.\n");
1012 		return -EINVAL;
1013 	}
1014 
1015 	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1016 		hprintk("can't read cache line size?\n");
1017 		return -EINVAL;
1018 	}
1019 
1020 	if (cache_size < 16) {
1021 		cache_size = 16;
1022 		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1023 			hprintk("can't set cache line size to %d\n", cache_size);
1024 	}
1025 
1026 	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1027 		hprintk("can't read latency timer?\n");
1028 		return -EINVAL;
1029 	}
1030 
1031 	/* from table 3.9
1032 	 *
1033 	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1034 	 *
1035 	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1036 	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1037 	 *
1038 	 */
1039 #define LAT_TIMER 209
1040 	if (timer < LAT_TIMER) {
1041 		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1042 		timer = LAT_TIMER;
1043 		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1044 			hprintk("can't set latency timer to %d\n", timer);
1045 	}
1046 
1047 	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1048 		hprintk("can't set up page mapping\n");
1049 		return -EINVAL;
1050 	}
1051 
1052 	/* 4.4 card reset */
1053 	he_writel(he_dev, 0x0, RESET_CNTL);
1054 	he_writel(he_dev, 0xff, RESET_CNTL);
1055 
1056 	msleep(16);	/* 16 ms */
1057 	status = he_readl(he_dev, RESET_CNTL);
1058 	if ((status & BOARD_RST_STATUS) == 0) {
1059 		hprintk("reset failed\n");
1060 		return -EINVAL;
1061 	}
1062 
1063 	/* 4.5 set bus width */
1064 	host_cntl = he_readl(he_dev, HOST_CNTL);
1065 	if (host_cntl & PCI_BUS_SIZE64)
1066 		gen_cntl_0 |= ENBL_64;
1067 	else
1068 		gen_cntl_0 &= ~ENBL_64;
1069 
1070 	if (disable64 == 1) {
1071 		hprintk("disabling 64-bit pci bus transfers\n");
1072 		gen_cntl_0 &= ~ENBL_64;
1073 	}
1074 
1075 	if (gen_cntl_0 & ENBL_64)
1076 		hprintk("64-bit transfers enabled\n");
1077 
1078 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1079 
1080 	/* 4.7 read prom contents */
1081 	for (i = 0; i < PROD_ID_LEN; ++i)
1082 		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1083 
1084 	he_dev->media = read_prom_byte(he_dev, MEDIA);
1085 
1086 	for (i = 0; i < 6; ++i)
1087 		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1088 
1089 	hprintk("%s%s, %pM\n", he_dev->prod_id,
1090 		he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
1091 	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1092 						ATM_OC12_PCR : ATM_OC3_PCR;
1093 
1094 	/* 4.6 set host endianess */
1095 	lb_swap = he_readl(he_dev, LB_SWAP);
1096 	if (he_is622(he_dev))
1097 		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1098 	else
1099 		lb_swap |= XFER_SIZE;		/* 8 cells */
1100 #ifdef __BIG_ENDIAN
1101 	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1102 #else
1103 	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1104 			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1105 #endif /* __BIG_ENDIAN */
1106 	he_writel(he_dev, lb_swap, LB_SWAP);
1107 
1108 	/* 4.8 sdram controller initialization */
1109 	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1110 
1111 	/* 4.9 initialize rnum value */
1112 	lb_swap |= SWAP_RNUM_MAX(0xf);
1113 	he_writel(he_dev, lb_swap, LB_SWAP);
1114 
1115 	/* 4.10 initialize the interrupt queues */
1116 	if ((err = he_init_irq(he_dev)) != 0)
1117 		return err;
1118 
1119 	/* 4.11 enable pci bus controller state machines */
1120 	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1121 				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1122 	he_writel(he_dev, host_cntl, HOST_CNTL);
1123 
1124 	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1125 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1126 
1127 	/*
1128 	 * atm network controller initialization
1129 	 */
1130 
1131 	/* 5.1.1 generic configuration state */
1132 
1133 	/*
1134 	 *		local (cell) buffer memory map
1135 	 *
1136 	 *             HE155                          HE622
1137 	 *
1138 	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1139 	 *         |            |            |                   |   |
1140 	 *         |  utility   |            |        rx0        |   |
1141 	 *        5|____________|         255|___________________| u |
1142 	 *        6|            |         256|                   | t |
1143 	 *         |            |            |                   | i |
1144 	 *         |    rx0     |     row    |        tx         | l |
1145 	 *         |            |            |                   | i |
1146 	 *         |            |         767|___________________| t |
1147 	 *      517|____________|         768|                   | y |
1148 	 * row  518|            |            |        rx1        |   |
1149 	 *         |            |        1023|___________________|___|
1150 	 *         |            |
1151 	 *         |    tx      |
1152 	 *         |            |
1153 	 *         |            |
1154 	 *     1535|____________|
1155 	 *     1536|            |
1156 	 *         |    rx1     |
1157 	 *     2047|____________|
1158 	 *
1159 	 */
1160 
1161 	/* total 4096 connections */
1162 	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1163 	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1164 
1165 	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1166 		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1167 		return -ENODEV;
1168 	}
1169 
1170 	if (nvpibits != -1) {
1171 		he_dev->vpibits = nvpibits;
1172 		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1173 	}
1174 
1175 	if (nvcibits != -1) {
1176 		he_dev->vcibits = nvcibits;
1177 		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1178 	}
1179 
1180 
1181 	if (he_is622(he_dev)) {
1182 		he_dev->cells_per_row = 40;
1183 		he_dev->bytes_per_row = 2048;
1184 		he_dev->r0_numrows = 256;
1185 		he_dev->tx_numrows = 512;
1186 		he_dev->r1_numrows = 256;
1187 		he_dev->r0_startrow = 0;
1188 		he_dev->tx_startrow = 256;
1189 		he_dev->r1_startrow = 768;
1190 	} else {
1191 		he_dev->cells_per_row = 20;
1192 		he_dev->bytes_per_row = 1024;
1193 		he_dev->r0_numrows = 512;
1194 		he_dev->tx_numrows = 1018;
1195 		he_dev->r1_numrows = 512;
1196 		he_dev->r0_startrow = 6;
1197 		he_dev->tx_startrow = 518;
1198 		he_dev->r1_startrow = 1536;
1199 	}
1200 
1201 	he_dev->cells_per_lbuf = 4;
1202 	he_dev->buffer_limit = 4;
1203 	he_dev->r0_numbuffs = he_dev->r0_numrows *
1204 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1205 	if (he_dev->r0_numbuffs > 2560)
1206 		he_dev->r0_numbuffs = 2560;
1207 
1208 	he_dev->r1_numbuffs = he_dev->r1_numrows *
1209 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1210 	if (he_dev->r1_numbuffs > 2560)
1211 		he_dev->r1_numbuffs = 2560;
1212 
1213 	he_dev->tx_numbuffs = he_dev->tx_numrows *
1214 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1215 	if (he_dev->tx_numbuffs > 5120)
1216 		he_dev->tx_numbuffs = 5120;
1217 
1218 	/* 5.1.2 configure hardware dependent registers */
1219 
1220 	he_writel(he_dev,
1221 		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1222 		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1223 		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1224 		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1225 								LBARB);
1226 
1227 	he_writel(he_dev, BANK_ON |
1228 		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1229 								SDRAMCON);
1230 
1231 	he_writel(he_dev,
1232 		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1233 						RM_RW_WAIT(1), RCMCONFIG);
1234 	he_writel(he_dev,
1235 		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1236 						TM_RW_WAIT(1), TCMCONFIG);
1237 
1238 	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1239 
1240 	he_writel(he_dev,
1241 		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1242 		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1243 		RX_VALVP(he_dev->vpibits) |
1244 		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1245 
1246 	he_writel(he_dev, DRF_THRESH(0x20) |
1247 		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1248 		TX_VCI_MASK(he_dev->vcibits) |
1249 		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1250 
1251 	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1252 
1253 	he_writel(he_dev, PHY_INT_ENB |
1254 		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1255 								RH_CONFIG);
1256 
1257 	/* 5.1.3 initialize connection memory */
1258 
1259 	for (i = 0; i < TCM_MEM_SIZE; ++i)
1260 		he_writel_tcm(he_dev, 0, i);
1261 
1262 	for (i = 0; i < RCM_MEM_SIZE; ++i)
1263 		he_writel_rcm(he_dev, 0, i);
1264 
1265 	/*
1266 	 *	transmit connection memory map
1267 	 *
1268 	 *                  tx memory
1269 	 *          0x0 ___________________
1270 	 *             |                   |
1271 	 *             |                   |
1272 	 *             |       TSRa        |
1273 	 *             |                   |
1274 	 *             |                   |
1275 	 *       0x8000|___________________|
1276 	 *             |                   |
1277 	 *             |       TSRb        |
1278 	 *       0xc000|___________________|
1279 	 *             |                   |
1280 	 *             |       TSRc        |
1281 	 *       0xe000|___________________|
1282 	 *             |       TSRd        |
1283 	 *       0xf000|___________________|
1284 	 *             |       tmABR       |
1285 	 *      0x10000|___________________|
1286 	 *             |                   |
1287 	 *             |       tmTPD       |
1288 	 *             |___________________|
1289 	 *             |                   |
1290 	 *                      ....
1291 	 *      0x1ffff|___________________|
1292 	 *
1293 	 *
1294 	 */
1295 
1296 	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1297 	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1298 	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1299 	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1300 	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1301 
1302 
1303 	/*
1304 	 *	receive connection memory map
1305 	 *
1306 	 *          0x0 ___________________
1307 	 *             |                   |
1308 	 *             |                   |
1309 	 *             |       RSRa        |
1310 	 *             |                   |
1311 	 *             |                   |
1312 	 *       0x8000|___________________|
1313 	 *             |                   |
1314 	 *             |             rx0/1 |
1315 	 *             |       LBM         |   link lists of local
1316 	 *             |             tx    |   buffer memory
1317 	 *             |                   |
1318 	 *       0xd000|___________________|
1319 	 *             |                   |
1320 	 *             |      rmABR        |
1321 	 *       0xe000|___________________|
1322 	 *             |                   |
1323 	 *             |       RSRb        |
1324 	 *             |___________________|
1325 	 *             |                   |
1326 	 *                      ....
1327 	 *       0xffff|___________________|
1328 	 */
1329 
1330 	he_writel(he_dev, 0x08000, RCMLBM_BA);
1331 	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1332 	he_writel(he_dev, 0x0d800, RCMABR_BA);
1333 
1334 	/* 5.1.4 initialize local buffer free pools linked lists */
1335 
1336 	he_init_rx_lbfp0(he_dev);
1337 	he_init_rx_lbfp1(he_dev);
1338 
1339 	he_writel(he_dev, 0x0, RLBC_H);
1340 	he_writel(he_dev, 0x0, RLBC_T);
1341 	he_writel(he_dev, 0x0, RLBC_H2);
1342 
1343 	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1344 	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1345 
1346 	he_init_tx_lbfp(he_dev);
1347 
1348 	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1349 
1350 	/* 5.1.5 initialize intermediate receive queues */
1351 
1352 	if (he_is622(he_dev)) {
1353 		he_writel(he_dev, 0x000f, G0_INMQ_S);
1354 		he_writel(he_dev, 0x200f, G0_INMQ_L);
1355 
1356 		he_writel(he_dev, 0x001f, G1_INMQ_S);
1357 		he_writel(he_dev, 0x201f, G1_INMQ_L);
1358 
1359 		he_writel(he_dev, 0x002f, G2_INMQ_S);
1360 		he_writel(he_dev, 0x202f, G2_INMQ_L);
1361 
1362 		he_writel(he_dev, 0x003f, G3_INMQ_S);
1363 		he_writel(he_dev, 0x203f, G3_INMQ_L);
1364 
1365 		he_writel(he_dev, 0x004f, G4_INMQ_S);
1366 		he_writel(he_dev, 0x204f, G4_INMQ_L);
1367 
1368 		he_writel(he_dev, 0x005f, G5_INMQ_S);
1369 		he_writel(he_dev, 0x205f, G5_INMQ_L);
1370 
1371 		he_writel(he_dev, 0x006f, G6_INMQ_S);
1372 		he_writel(he_dev, 0x206f, G6_INMQ_L);
1373 
1374 		he_writel(he_dev, 0x007f, G7_INMQ_S);
1375 		he_writel(he_dev, 0x207f, G7_INMQ_L);
1376 	} else {
1377 		he_writel(he_dev, 0x0000, G0_INMQ_S);
1378 		he_writel(he_dev, 0x0008, G0_INMQ_L);
1379 
1380 		he_writel(he_dev, 0x0001, G1_INMQ_S);
1381 		he_writel(he_dev, 0x0009, G1_INMQ_L);
1382 
1383 		he_writel(he_dev, 0x0002, G2_INMQ_S);
1384 		he_writel(he_dev, 0x000a, G2_INMQ_L);
1385 
1386 		he_writel(he_dev, 0x0003, G3_INMQ_S);
1387 		he_writel(he_dev, 0x000b, G3_INMQ_L);
1388 
1389 		he_writel(he_dev, 0x0004, G4_INMQ_S);
1390 		he_writel(he_dev, 0x000c, G4_INMQ_L);
1391 
1392 		he_writel(he_dev, 0x0005, G5_INMQ_S);
1393 		he_writel(he_dev, 0x000d, G5_INMQ_L);
1394 
1395 		he_writel(he_dev, 0x0006, G6_INMQ_S);
1396 		he_writel(he_dev, 0x000e, G6_INMQ_L);
1397 
1398 		he_writel(he_dev, 0x0007, G7_INMQ_S);
1399 		he_writel(he_dev, 0x000f, G7_INMQ_L);
1400 	}
1401 
1402 	/* 5.1.6 application tunable parameters */
1403 
1404 	he_writel(he_dev, 0x0, MCC);
1405 	he_writel(he_dev, 0x0, OEC);
1406 	he_writel(he_dev, 0x0, DCC);
1407 	he_writel(he_dev, 0x0, CEC);
1408 
1409 	/* 5.1.7 cs block initialization */
1410 
1411 	he_init_cs_block(he_dev);
1412 
1413 	/* 5.1.8 cs block connection memory initialization */
1414 
1415 	if (he_init_cs_block_rcm(he_dev) < 0)
1416 		return -ENOMEM;
1417 
1418 	/* 5.1.10 initialize host structures */
1419 
1420 	he_init_tpdrq(he_dev);
1421 
1422 	he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1423 		sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1424 	if (he_dev->tpd_pool == NULL) {
1425 		hprintk("unable to create tpd pci_pool\n");
1426 		return -ENOMEM;
1427 	}
1428 
1429 	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1430 
1431 	if (he_init_group(he_dev, 0) != 0)
1432 		return -ENOMEM;
1433 
1434 	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1435 		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1436 		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1437 		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1438 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1439 						G0_RBPS_BS + (group * 32));
1440 
1441 		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1442 		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1443 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1444 						G0_RBPL_QI + (group * 32));
1445 		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1446 
1447 		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1448 		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1449 		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1450 						G0_RBRQ_Q + (group * 16));
1451 		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1452 
1453 		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1454 		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1455 		he_writel(he_dev, TBRQ_THRESH(0x1),
1456 						G0_TBRQ_THRESH + (group * 16));
1457 		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1458 	}
1459 
1460 	/* host status page */
1461 
1462 	he_dev->hsp = pci_zalloc_consistent(he_dev->pci_dev,
1463 					    sizeof(struct he_hsp),
1464 					    &he_dev->hsp_phys);
1465 	if (he_dev->hsp == NULL) {
1466 		hprintk("failed to allocate host status page\n");
1467 		return -ENOMEM;
1468 	}
1469 	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1470 
1471 	/* initialize framer */
1472 
1473 #ifdef CONFIG_ATM_HE_USE_SUNI
1474 	if (he_isMM(he_dev))
1475 		suni_init(he_dev->atm_dev);
1476 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1477 		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1478 #endif /* CONFIG_ATM_HE_USE_SUNI */
1479 
1480 	if (sdh) {
1481 		/* this really should be in suni.c but for now... */
1482 		int val;
1483 
1484 		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1485 		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1486 		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1487 		he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1488 	}
1489 
1490 	/* 5.1.12 enable transmit and receive */
1491 
1492 	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1493 	reg |= TX_ENABLE|ER_ENABLE;
1494 	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1495 
1496 	reg = he_readl(he_dev, RC_CONFIG);
1497 	reg |= RX_ENABLE;
1498 	he_writel(he_dev, reg, RC_CONFIG);
1499 
1500 	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1501 		he_dev->cs_stper[i].inuse = 0;
1502 		he_dev->cs_stper[i].pcr = -1;
1503 	}
1504 	he_dev->total_bw = 0;
1505 
1506 
1507 	/* atm linux initialization */
1508 
1509 	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1510 	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1511 
1512 	he_dev->irq_peak = 0;
1513 	he_dev->rbrq_peak = 0;
1514 	he_dev->rbpl_peak = 0;
1515 	he_dev->tbrq_peak = 0;
1516 
1517 	HPRINTK("hell bent for leather!\n");
1518 
1519 	return 0;
1520 }
1521 
1522 static void
1523 he_stop(struct he_dev *he_dev)
1524 {
1525 	struct he_buff *heb, *next;
1526 	struct pci_dev *pci_dev;
1527 	u32 gen_cntl_0, reg;
1528 	u16 command;
1529 
1530 	pci_dev = he_dev->pci_dev;
1531 
1532 	/* disable interrupts */
1533 
1534 	if (he_dev->membase) {
1535 		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1536 		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1537 		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1538 
1539 		tasklet_disable(&he_dev->tasklet);
1540 
1541 		/* disable recv and transmit */
1542 
1543 		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1544 		reg &= ~(TX_ENABLE|ER_ENABLE);
1545 		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1546 
1547 		reg = he_readl(he_dev, RC_CONFIG);
1548 		reg &= ~(RX_ENABLE);
1549 		he_writel(he_dev, reg, RC_CONFIG);
1550 	}
1551 
1552 #ifdef CONFIG_ATM_HE_USE_SUNI
1553 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1554 		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1555 #endif /* CONFIG_ATM_HE_USE_SUNI */
1556 
1557 	if (he_dev->irq)
1558 		free_irq(he_dev->irq, he_dev);
1559 
1560 	if (he_dev->irq_base)
1561 		pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1562 			* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1563 
1564 	if (he_dev->hsp)
1565 		pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1566 						he_dev->hsp, he_dev->hsp_phys);
1567 
1568 	if (he_dev->rbpl_base) {
1569 		list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1570 			pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1571 
1572 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1573 			* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1574 	}
1575 
1576 	kfree(he_dev->rbpl_virt);
1577 	kfree(he_dev->rbpl_table);
1578 
1579 	if (he_dev->rbpl_pool)
1580 		pci_pool_destroy(he_dev->rbpl_pool);
1581 
1582 	if (he_dev->rbrq_base)
1583 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1584 							he_dev->rbrq_base, he_dev->rbrq_phys);
1585 
1586 	if (he_dev->tbrq_base)
1587 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1588 							he_dev->tbrq_base, he_dev->tbrq_phys);
1589 
1590 	if (he_dev->tpdrq_base)
1591 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1592 							he_dev->tpdrq_base, he_dev->tpdrq_phys);
1593 
1594 	if (he_dev->tpd_pool)
1595 		pci_pool_destroy(he_dev->tpd_pool);
1596 
1597 	if (he_dev->pci_dev) {
1598 		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1599 		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1600 		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1601 	}
1602 
1603 	if (he_dev->membase)
1604 		iounmap(he_dev->membase);
1605 }
1606 
1607 static struct he_tpd *
1608 __alloc_tpd(struct he_dev *he_dev)
1609 {
1610 	struct he_tpd *tpd;
1611 	dma_addr_t mapping;
1612 
1613 	tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1614 	if (tpd == NULL)
1615 		return NULL;
1616 
1617 	tpd->status = TPD_ADDR(mapping);
1618 	tpd->reserved = 0;
1619 	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1620 	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1621 	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1622 
1623 	return tpd;
1624 }
1625 
1626 #define AAL5_LEN(buf,len) 						\
1627 			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1628 				(((unsigned char *)(buf))[(len)-5]))
1629 
1630 /* 2.10.1.2 receive
1631  *
1632  * aal5 packets can optionally return the tcp checksum in the lower
1633  * 16 bits of the crc (RSR0_TCP_CKSUM)
1634  */
1635 
1636 #define TCP_CKSUM(buf,len) 						\
1637 			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1638 				(((unsigned char *)(buf))[(len-1)]))
1639 
1640 static int
1641 he_service_rbrq(struct he_dev *he_dev, int group)
1642 {
1643 	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1644 				((unsigned long)he_dev->rbrq_base |
1645 					he_dev->hsp->group[group].rbrq_tail);
1646 	unsigned cid, lastcid = -1;
1647 	struct sk_buff *skb;
1648 	struct atm_vcc *vcc = NULL;
1649 	struct he_vcc *he_vcc;
1650 	struct he_buff *heb, *next;
1651 	int i;
1652 	int pdus_assembled = 0;
1653 	int updated = 0;
1654 
1655 	read_lock(&vcc_sklist_lock);
1656 	while (he_dev->rbrq_head != rbrq_tail) {
1657 		++updated;
1658 
1659 		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1660 			he_dev->rbrq_head, group,
1661 			RBRQ_ADDR(he_dev->rbrq_head),
1662 			RBRQ_BUFLEN(he_dev->rbrq_head),
1663 			RBRQ_CID(he_dev->rbrq_head),
1664 			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1665 			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1666 			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1667 			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1668 			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1669 			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1670 
1671 		i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1672 		heb = he_dev->rbpl_virt[i];
1673 
1674 		cid = RBRQ_CID(he_dev->rbrq_head);
1675 		if (cid != lastcid)
1676 			vcc = __find_vcc(he_dev, cid);
1677 		lastcid = cid;
1678 
1679 		if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1680 			hprintk("vcc/he_vcc == NULL  (cid 0x%x)\n", cid);
1681 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1682 				clear_bit(i, he_dev->rbpl_table);
1683 				list_del(&heb->entry);
1684 				pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1685 			}
1686 
1687 			goto next_rbrq_entry;
1688 		}
1689 
1690 		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1691 			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1692 				atomic_inc(&vcc->stats->rx_drop);
1693 			goto return_host_buffers;
1694 		}
1695 
1696 		heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1697 		clear_bit(i, he_dev->rbpl_table);
1698 		list_move_tail(&heb->entry, &he_vcc->buffers);
1699 		he_vcc->pdu_len += heb->len;
1700 
1701 		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1702 			lastcid = -1;
1703 			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1704 			wake_up(&he_vcc->rx_waitq);
1705 			goto return_host_buffers;
1706 		}
1707 
1708 		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1709 			goto next_rbrq_entry;
1710 
1711 		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1712 				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1713 			HPRINTK("%s%s (%d.%d)\n",
1714 				RBRQ_CRC_ERR(he_dev->rbrq_head)
1715 							? "CRC_ERR " : "",
1716 				RBRQ_LEN_ERR(he_dev->rbrq_head)
1717 							? "LEN_ERR" : "",
1718 							vcc->vpi, vcc->vci);
1719 			atomic_inc(&vcc->stats->rx_err);
1720 			goto return_host_buffers;
1721 		}
1722 
1723 		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1724 							GFP_ATOMIC);
1725 		if (!skb) {
1726 			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1727 			goto return_host_buffers;
1728 		}
1729 
1730 		if (rx_skb_reserve > 0)
1731 			skb_reserve(skb, rx_skb_reserve);
1732 
1733 		__net_timestamp(skb);
1734 
1735 		list_for_each_entry(heb, &he_vcc->buffers, entry)
1736 			memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1737 
1738 		switch (vcc->qos.aal) {
1739 			case ATM_AAL0:
1740 				/* 2.10.1.5 raw cell receive */
1741 				skb->len = ATM_AAL0_SDU;
1742 				skb_set_tail_pointer(skb, skb->len);
1743 				break;
1744 			case ATM_AAL5:
1745 				/* 2.10.1.2 aal5 receive */
1746 
1747 				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1748 				skb_set_tail_pointer(skb, skb->len);
1749 #ifdef USE_CHECKSUM_HW
1750 				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1751 					skb->ip_summed = CHECKSUM_COMPLETE;
1752 					skb->csum = TCP_CKSUM(skb->data,
1753 							he_vcc->pdu_len);
1754 				}
1755 #endif
1756 				break;
1757 		}
1758 
1759 #ifdef should_never_happen
1760 		if (skb->len > vcc->qos.rxtp.max_sdu)
1761 			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1762 #endif
1763 
1764 #ifdef notdef
1765 		ATM_SKB(skb)->vcc = vcc;
1766 #endif
1767 		spin_unlock(&he_dev->global_lock);
1768 		vcc->push(vcc, skb);
1769 		spin_lock(&he_dev->global_lock);
1770 
1771 		atomic_inc(&vcc->stats->rx);
1772 
1773 return_host_buffers:
1774 		++pdus_assembled;
1775 
1776 		list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1777 			pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1778 		INIT_LIST_HEAD(&he_vcc->buffers);
1779 		he_vcc->pdu_len = 0;
1780 
1781 next_rbrq_entry:
1782 		he_dev->rbrq_head = (struct he_rbrq *)
1783 				((unsigned long) he_dev->rbrq_base |
1784 					RBRQ_MASK(he_dev->rbrq_head + 1));
1785 
1786 	}
1787 	read_unlock(&vcc_sklist_lock);
1788 
1789 	if (updated) {
1790 		if (updated > he_dev->rbrq_peak)
1791 			he_dev->rbrq_peak = updated;
1792 
1793 		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1794 						G0_RBRQ_H + (group * 16));
1795 	}
1796 
1797 	return pdus_assembled;
1798 }
1799 
1800 static void
1801 he_service_tbrq(struct he_dev *he_dev, int group)
1802 {
1803 	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1804 				((unsigned long)he_dev->tbrq_base |
1805 					he_dev->hsp->group[group].tbrq_tail);
1806 	struct he_tpd *tpd;
1807 	int slot, updated = 0;
1808 	struct he_tpd *__tpd;
1809 
1810 	/* 2.1.6 transmit buffer return queue */
1811 
1812 	while (he_dev->tbrq_head != tbrq_tail) {
1813 		++updated;
1814 
1815 		HPRINTK("tbrq%d 0x%x%s%s\n",
1816 			group,
1817 			TBRQ_TPD(he_dev->tbrq_head),
1818 			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1819 			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1820 		tpd = NULL;
1821 		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1822 			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1823 				tpd = __tpd;
1824 				list_del(&__tpd->entry);
1825 				break;
1826 			}
1827 		}
1828 
1829 		if (tpd == NULL) {
1830 			hprintk("unable to locate tpd for dma buffer %x\n",
1831 						TBRQ_TPD(he_dev->tbrq_head));
1832 			goto next_tbrq_entry;
1833 		}
1834 
1835 		if (TBRQ_EOS(he_dev->tbrq_head)) {
1836 			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1837 				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1838 			if (tpd->vcc)
1839 				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1840 
1841 			goto next_tbrq_entry;
1842 		}
1843 
1844 		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1845 			if (tpd->iovec[slot].addr)
1846 				pci_unmap_single(he_dev->pci_dev,
1847 					tpd->iovec[slot].addr,
1848 					tpd->iovec[slot].len & TPD_LEN_MASK,
1849 							PCI_DMA_TODEVICE);
1850 			if (tpd->iovec[slot].len & TPD_LST)
1851 				break;
1852 
1853 		}
1854 
1855 		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1856 			if (tpd->vcc && tpd->vcc->pop)
1857 				tpd->vcc->pop(tpd->vcc, tpd->skb);
1858 			else
1859 				dev_kfree_skb_any(tpd->skb);
1860 		}
1861 
1862 next_tbrq_entry:
1863 		if (tpd)
1864 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1865 		he_dev->tbrq_head = (struct he_tbrq *)
1866 				((unsigned long) he_dev->tbrq_base |
1867 					TBRQ_MASK(he_dev->tbrq_head + 1));
1868 	}
1869 
1870 	if (updated) {
1871 		if (updated > he_dev->tbrq_peak)
1872 			he_dev->tbrq_peak = updated;
1873 
1874 		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1875 						G0_TBRQ_H + (group * 16));
1876 	}
1877 }
1878 
1879 static void
1880 he_service_rbpl(struct he_dev *he_dev, int group)
1881 {
1882 	struct he_rbp *new_tail;
1883 	struct he_rbp *rbpl_head;
1884 	struct he_buff *heb;
1885 	dma_addr_t mapping;
1886 	int i;
1887 	int moved = 0;
1888 
1889 	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1890 					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1891 
1892 	for (;;) {
1893 		new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1894 						RBPL_MASK(he_dev->rbpl_tail+1));
1895 
1896 		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1897 		if (new_tail == rbpl_head)
1898 			break;
1899 
1900 		i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1901 		if (i > (RBPL_TABLE_SIZE - 1)) {
1902 			i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1903 			if (i > (RBPL_TABLE_SIZE - 1))
1904 				break;
1905 		}
1906 		he_dev->rbpl_hint = i + 1;
1907 
1908 		heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1909 		if (!heb)
1910 			break;
1911 		heb->mapping = mapping;
1912 		list_add(&heb->entry, &he_dev->rbpl_outstanding);
1913 		he_dev->rbpl_virt[i] = heb;
1914 		set_bit(i, he_dev->rbpl_table);
1915 		new_tail->idx = i << RBP_IDX_OFFSET;
1916 		new_tail->phys = mapping + offsetof(struct he_buff, data);
1917 
1918 		he_dev->rbpl_tail = new_tail;
1919 		++moved;
1920 	}
1921 
1922 	if (moved)
1923 		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1924 }
1925 
1926 static void
1927 he_tasklet(unsigned long data)
1928 {
1929 	unsigned long flags;
1930 	struct he_dev *he_dev = (struct he_dev *) data;
1931 	int group, type;
1932 	int updated = 0;
1933 
1934 	HPRINTK("tasklet (0x%lx)\n", data);
1935 	spin_lock_irqsave(&he_dev->global_lock, flags);
1936 
1937 	while (he_dev->irq_head != he_dev->irq_tail) {
1938 		++updated;
1939 
1940 		type = ITYPE_TYPE(he_dev->irq_head->isw);
1941 		group = ITYPE_GROUP(he_dev->irq_head->isw);
1942 
1943 		switch (type) {
1944 			case ITYPE_RBRQ_THRESH:
1945 				HPRINTK("rbrq%d threshold\n", group);
1946 				/* fall through */
1947 			case ITYPE_RBRQ_TIMER:
1948 				if (he_service_rbrq(he_dev, group))
1949 					he_service_rbpl(he_dev, group);
1950 				break;
1951 			case ITYPE_TBRQ_THRESH:
1952 				HPRINTK("tbrq%d threshold\n", group);
1953 				/* fall through */
1954 			case ITYPE_TPD_COMPLETE:
1955 				he_service_tbrq(he_dev, group);
1956 				break;
1957 			case ITYPE_RBPL_THRESH:
1958 				he_service_rbpl(he_dev, group);
1959 				break;
1960 			case ITYPE_RBPS_THRESH:
1961 				/* shouldn't happen unless small buffers enabled */
1962 				break;
1963 			case ITYPE_PHY:
1964 				HPRINTK("phy interrupt\n");
1965 #ifdef CONFIG_ATM_HE_USE_SUNI
1966 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
1967 				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1968 					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1969 				spin_lock_irqsave(&he_dev->global_lock, flags);
1970 #endif
1971 				break;
1972 			case ITYPE_OTHER:
1973 				switch (type|group) {
1974 					case ITYPE_PARITY:
1975 						hprintk("parity error\n");
1976 						break;
1977 					case ITYPE_ABORT:
1978 						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1979 						break;
1980 				}
1981 				break;
1982 			case ITYPE_TYPE(ITYPE_INVALID):
1983 				/* see 8.1.1 -- check all queues */
1984 
1985 				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1986 
1987 				he_service_rbrq(he_dev, 0);
1988 				he_service_rbpl(he_dev, 0);
1989 				he_service_tbrq(he_dev, 0);
1990 				break;
1991 			default:
1992 				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
1993 		}
1994 
1995 		he_dev->irq_head->isw = ITYPE_INVALID;
1996 
1997 		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
1998 	}
1999 
2000 	if (updated) {
2001 		if (updated > he_dev->irq_peak)
2002 			he_dev->irq_peak = updated;
2003 
2004 		he_writel(he_dev,
2005 			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2006 			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2007 			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2008 		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2009 	}
2010 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2011 }
2012 
2013 static irqreturn_t
2014 he_irq_handler(int irq, void *dev_id)
2015 {
2016 	unsigned long flags;
2017 	struct he_dev *he_dev = (struct he_dev * )dev_id;
2018 	int handled = 0;
2019 
2020 	if (he_dev == NULL)
2021 		return IRQ_NONE;
2022 
2023 	spin_lock_irqsave(&he_dev->global_lock, flags);
2024 
2025 	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2026 						(*he_dev->irq_tailoffset << 2));
2027 
2028 	if (he_dev->irq_tail == he_dev->irq_head) {
2029 		HPRINTK("tailoffset not updated?\n");
2030 		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2031 			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2032 		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2033 	}
2034 
2035 #ifdef DEBUG
2036 	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2037 		hprintk("spurious (or shared) interrupt?\n");
2038 #endif
2039 
2040 	if (he_dev->irq_head != he_dev->irq_tail) {
2041 		handled = 1;
2042 		tasklet_schedule(&he_dev->tasklet);
2043 		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2044 		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2045 	}
2046 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2047 	return IRQ_RETVAL(handled);
2048 
2049 }
2050 
2051 static __inline__ void
2052 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2053 {
2054 	struct he_tpdrq *new_tail;
2055 
2056 	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2057 					tpd, cid, he_dev->tpdrq_tail);
2058 
2059 	/* new_tail = he_dev->tpdrq_tail; */
2060 	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2061 					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2062 
2063 	/*
2064 	 * check to see if we are about to set the tail == head
2065 	 * if true, update the head pointer from the adapter
2066 	 * to see if this is really the case (reading the queue
2067 	 * head for every enqueue would be unnecessarily slow)
2068 	 */
2069 
2070 	if (new_tail == he_dev->tpdrq_head) {
2071 		he_dev->tpdrq_head = (struct he_tpdrq *)
2072 			(((unsigned long)he_dev->tpdrq_base) |
2073 				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2074 
2075 		if (new_tail == he_dev->tpdrq_head) {
2076 			int slot;
2077 
2078 			hprintk("tpdrq full (cid 0x%x)\n", cid);
2079 			/*
2080 			 * FIXME
2081 			 * push tpd onto a transmit backlog queue
2082 			 * after service_tbrq, service the backlog
2083 			 * for now, we just drop the pdu
2084 			 */
2085 			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2086 				if (tpd->iovec[slot].addr)
2087 					pci_unmap_single(he_dev->pci_dev,
2088 						tpd->iovec[slot].addr,
2089 						tpd->iovec[slot].len & TPD_LEN_MASK,
2090 								PCI_DMA_TODEVICE);
2091 			}
2092 			if (tpd->skb) {
2093 				if (tpd->vcc->pop)
2094 					tpd->vcc->pop(tpd->vcc, tpd->skb);
2095 				else
2096 					dev_kfree_skb_any(tpd->skb);
2097 				atomic_inc(&tpd->vcc->stats->tx_err);
2098 			}
2099 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2100 			return;
2101 		}
2102 	}
2103 
2104 	/* 2.1.5 transmit packet descriptor ready queue */
2105 	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2106 	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2107 	he_dev->tpdrq_tail->cid = cid;
2108 	wmb();
2109 
2110 	he_dev->tpdrq_tail = new_tail;
2111 
2112 	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2113 	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2114 }
2115 
2116 static int
2117 he_open(struct atm_vcc *vcc)
2118 {
2119 	unsigned long flags;
2120 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2121 	struct he_vcc *he_vcc;
2122 	int err = 0;
2123 	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2124 	short vpi = vcc->vpi;
2125 	int vci = vcc->vci;
2126 
2127 	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2128 		return 0;
2129 
2130 	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2131 
2132 	set_bit(ATM_VF_ADDR, &vcc->flags);
2133 
2134 	cid = he_mkcid(he_dev, vpi, vci);
2135 
2136 	he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2137 	if (he_vcc == NULL) {
2138 		hprintk("unable to allocate he_vcc during open\n");
2139 		return -ENOMEM;
2140 	}
2141 
2142 	INIT_LIST_HEAD(&he_vcc->buffers);
2143 	he_vcc->pdu_len = 0;
2144 	he_vcc->rc_index = -1;
2145 
2146 	init_waitqueue_head(&he_vcc->rx_waitq);
2147 	init_waitqueue_head(&he_vcc->tx_waitq);
2148 
2149 	vcc->dev_data = he_vcc;
2150 
2151 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2152 		int pcr_goal;
2153 
2154 		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2155 		if (pcr_goal == 0)
2156 			pcr_goal = he_dev->atm_dev->link_rate;
2157 		if (pcr_goal < 0)	/* means round down, technically */
2158 			pcr_goal = -pcr_goal;
2159 
2160 		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2161 
2162 		switch (vcc->qos.aal) {
2163 			case ATM_AAL5:
2164 				tsr0_aal = TSR0_AAL5;
2165 				tsr4 = TSR4_AAL5;
2166 				break;
2167 			case ATM_AAL0:
2168 				tsr0_aal = TSR0_AAL0_SDU;
2169 				tsr4 = TSR4_AAL0_SDU;
2170 				break;
2171 			default:
2172 				err = -EINVAL;
2173 				goto open_failed;
2174 		}
2175 
2176 		spin_lock_irqsave(&he_dev->global_lock, flags);
2177 		tsr0 = he_readl_tsr0(he_dev, cid);
2178 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2179 
2180 		if (TSR0_CONN_STATE(tsr0) != 0) {
2181 			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2182 			err = -EBUSY;
2183 			goto open_failed;
2184 		}
2185 
2186 		switch (vcc->qos.txtp.traffic_class) {
2187 			case ATM_UBR:
2188 				/* 2.3.3.1 open connection ubr */
2189 
2190 				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2191 					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2192 				break;
2193 
2194 			case ATM_CBR:
2195 				/* 2.3.3.2 open connection cbr */
2196 
2197 				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2198 				if ((he_dev->total_bw + pcr_goal)
2199 					> (he_dev->atm_dev->link_rate * 9 / 10))
2200 				{
2201 					err = -EBUSY;
2202 					goto open_failed;
2203 				}
2204 
2205 				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2206 
2207 				/* find an unused cs_stper register */
2208 				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2209 					if (he_dev->cs_stper[reg].inuse == 0 ||
2210 					    he_dev->cs_stper[reg].pcr == pcr_goal)
2211 							break;
2212 
2213 				if (reg == HE_NUM_CS_STPER) {
2214 					err = -EBUSY;
2215 					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2216 					goto open_failed;
2217 				}
2218 
2219 				he_dev->total_bw += pcr_goal;
2220 
2221 				he_vcc->rc_index = reg;
2222 				++he_dev->cs_stper[reg].inuse;
2223 				he_dev->cs_stper[reg].pcr = pcr_goal;
2224 
2225 				clock = he_is622(he_dev) ? 66667000 : 50000000;
2226 				period = clock / pcr_goal;
2227 
2228 				HPRINTK("rc_index = %d period = %d\n",
2229 								reg, period);
2230 
2231 				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2232 							CS_STPER0 + reg);
2233 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2234 
2235 				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2236 							TSR0_RC_INDEX(reg);
2237 
2238 				break;
2239 			default:
2240 				err = -EINVAL;
2241 				goto open_failed;
2242 		}
2243 
2244 		spin_lock_irqsave(&he_dev->global_lock, flags);
2245 
2246 		he_writel_tsr0(he_dev, tsr0, cid);
2247 		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2248 		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2249 					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2250 		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2251 		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2252 
2253 		he_writel_tsr3(he_dev, 0x0, cid);
2254 		he_writel_tsr5(he_dev, 0x0, cid);
2255 		he_writel_tsr6(he_dev, 0x0, cid);
2256 		he_writel_tsr7(he_dev, 0x0, cid);
2257 		he_writel_tsr8(he_dev, 0x0, cid);
2258 		he_writel_tsr10(he_dev, 0x0, cid);
2259 		he_writel_tsr11(he_dev, 0x0, cid);
2260 		he_writel_tsr12(he_dev, 0x0, cid);
2261 		he_writel_tsr13(he_dev, 0x0, cid);
2262 		he_writel_tsr14(he_dev, 0x0, cid);
2263 		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2264 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2265 	}
2266 
2267 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2268 		unsigned aal;
2269 
2270 		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2271 		 				&HE_VCC(vcc)->rx_waitq);
2272 
2273 		switch (vcc->qos.aal) {
2274 			case ATM_AAL5:
2275 				aal = RSR0_AAL5;
2276 				break;
2277 			case ATM_AAL0:
2278 				aal = RSR0_RAWCELL;
2279 				break;
2280 			default:
2281 				err = -EINVAL;
2282 				goto open_failed;
2283 		}
2284 
2285 		spin_lock_irqsave(&he_dev->global_lock, flags);
2286 
2287 		rsr0 = he_readl_rsr0(he_dev, cid);
2288 		if (rsr0 & RSR0_OPEN_CONN) {
2289 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2290 
2291 			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2292 			err = -EBUSY;
2293 			goto open_failed;
2294 		}
2295 
2296 		rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2297 		rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2298 		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2299 				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2300 
2301 #ifdef USE_CHECKSUM_HW
2302 		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2303 			rsr0 |= RSR0_TCP_CKSUM;
2304 #endif
2305 
2306 		he_writel_rsr4(he_dev, rsr4, cid);
2307 		he_writel_rsr1(he_dev, rsr1, cid);
2308 		/* 5.1.11 last parameter initialized should be
2309 			  the open/closed indication in rsr0 */
2310 		he_writel_rsr0(he_dev,
2311 			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2312 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2313 
2314 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2315 	}
2316 
2317 open_failed:
2318 
2319 	if (err) {
2320 		kfree(he_vcc);
2321 		clear_bit(ATM_VF_ADDR, &vcc->flags);
2322 	}
2323 	else
2324 		set_bit(ATM_VF_READY, &vcc->flags);
2325 
2326 	return err;
2327 }
2328 
2329 static void
2330 he_close(struct atm_vcc *vcc)
2331 {
2332 	unsigned long flags;
2333 	DECLARE_WAITQUEUE(wait, current);
2334 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2335 	struct he_tpd *tpd;
2336 	unsigned cid;
2337 	struct he_vcc *he_vcc = HE_VCC(vcc);
2338 #define MAX_RETRY 30
2339 	int retry = 0, sleep = 1, tx_inuse;
2340 
2341 	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2342 
2343 	clear_bit(ATM_VF_READY, &vcc->flags);
2344 	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2345 
2346 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2347 		int timeout;
2348 
2349 		HPRINTK("close rx cid 0x%x\n", cid);
2350 
2351 		/* 2.7.2.2 close receive operation */
2352 
2353 		/* wait for previous close (if any) to finish */
2354 
2355 		spin_lock_irqsave(&he_dev->global_lock, flags);
2356 		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2357 			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2358 			udelay(250);
2359 		}
2360 
2361 		set_current_state(TASK_UNINTERRUPTIBLE);
2362 		add_wait_queue(&he_vcc->rx_waitq, &wait);
2363 
2364 		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2365 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2366 		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2367 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2368 
2369 		timeout = schedule_timeout(30*HZ);
2370 
2371 		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2372 		set_current_state(TASK_RUNNING);
2373 
2374 		if (timeout == 0)
2375 			hprintk("close rx timeout cid 0x%x\n", cid);
2376 
2377 		HPRINTK("close rx cid 0x%x complete\n", cid);
2378 
2379 	}
2380 
2381 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2382 		volatile unsigned tsr4, tsr0;
2383 		int timeout;
2384 
2385 		HPRINTK("close tx cid 0x%x\n", cid);
2386 
2387 		/* 2.1.2
2388 		 *
2389 		 * ... the host must first stop queueing packets to the TPDRQ
2390 		 * on the connection to be closed, then wait for all outstanding
2391 		 * packets to be transmitted and their buffers returned to the
2392 		 * TBRQ. When the last packet on the connection arrives in the
2393 		 * TBRQ, the host issues the close command to the adapter.
2394 		 */
2395 
2396 		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2397 		       (retry < MAX_RETRY)) {
2398 			msleep(sleep);
2399 			if (sleep < 250)
2400 				sleep = sleep * 2;
2401 
2402 			++retry;
2403 		}
2404 
2405 		if (tx_inuse > 1)
2406 			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2407 
2408 		/* 2.3.1.1 generic close operations with flush */
2409 
2410 		spin_lock_irqsave(&he_dev->global_lock, flags);
2411 		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2412 					/* also clears TSR4_SESSION_ENDED */
2413 
2414 		switch (vcc->qos.txtp.traffic_class) {
2415 			case ATM_UBR:
2416 				he_writel_tsr1(he_dev,
2417 					TSR1_MCR(rate_to_atmf(200000))
2418 					| TSR1_PCR(0), cid);
2419 				break;
2420 			case ATM_CBR:
2421 				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2422 				break;
2423 		}
2424 		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2425 
2426 		tpd = __alloc_tpd(he_dev);
2427 		if (tpd == NULL) {
2428 			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2429 			goto close_tx_incomplete;
2430 		}
2431 		tpd->status |= TPD_EOS | TPD_INT;
2432 		tpd->skb = NULL;
2433 		tpd->vcc = vcc;
2434 		wmb();
2435 
2436 		set_current_state(TASK_UNINTERRUPTIBLE);
2437 		add_wait_queue(&he_vcc->tx_waitq, &wait);
2438 		__enqueue_tpd(he_dev, tpd, cid);
2439 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2440 
2441 		timeout = schedule_timeout(30*HZ);
2442 
2443 		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2444 		set_current_state(TASK_RUNNING);
2445 
2446 		spin_lock_irqsave(&he_dev->global_lock, flags);
2447 
2448 		if (timeout == 0) {
2449 			hprintk("close tx timeout cid 0x%x\n", cid);
2450 			goto close_tx_incomplete;
2451 		}
2452 
2453 		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2454 			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2455 			udelay(250);
2456 		}
2457 
2458 		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2459 			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2460 			udelay(250);
2461 		}
2462 
2463 close_tx_incomplete:
2464 
2465 		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2466 			int reg = he_vcc->rc_index;
2467 
2468 			HPRINTK("cs_stper reg = %d\n", reg);
2469 
2470 			if (he_dev->cs_stper[reg].inuse == 0)
2471 				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2472 			else
2473 				--he_dev->cs_stper[reg].inuse;
2474 
2475 			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2476 		}
2477 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2478 
2479 		HPRINTK("close tx cid 0x%x complete\n", cid);
2480 	}
2481 
2482 	kfree(he_vcc);
2483 
2484 	clear_bit(ATM_VF_ADDR, &vcc->flags);
2485 }
2486 
2487 static int
2488 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2489 {
2490 	unsigned long flags;
2491 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2492 	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2493 	struct he_tpd *tpd;
2494 #ifdef USE_SCATTERGATHER
2495 	int i, slot = 0;
2496 #endif
2497 
2498 #define HE_TPD_BUFSIZE 0xffff
2499 
2500 	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2501 
2502 	if ((skb->len > HE_TPD_BUFSIZE) ||
2503 	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2504 		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2505 		if (vcc->pop)
2506 			vcc->pop(vcc, skb);
2507 		else
2508 			dev_kfree_skb_any(skb);
2509 		atomic_inc(&vcc->stats->tx_err);
2510 		return -EINVAL;
2511 	}
2512 
2513 #ifndef USE_SCATTERGATHER
2514 	if (skb_shinfo(skb)->nr_frags) {
2515 		hprintk("no scatter/gather support\n");
2516 		if (vcc->pop)
2517 			vcc->pop(vcc, skb);
2518 		else
2519 			dev_kfree_skb_any(skb);
2520 		atomic_inc(&vcc->stats->tx_err);
2521 		return -EINVAL;
2522 	}
2523 #endif
2524 	spin_lock_irqsave(&he_dev->global_lock, flags);
2525 
2526 	tpd = __alloc_tpd(he_dev);
2527 	if (tpd == NULL) {
2528 		if (vcc->pop)
2529 			vcc->pop(vcc, skb);
2530 		else
2531 			dev_kfree_skb_any(skb);
2532 		atomic_inc(&vcc->stats->tx_err);
2533 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2534 		return -ENOMEM;
2535 	}
2536 
2537 	if (vcc->qos.aal == ATM_AAL5)
2538 		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2539 	else {
2540 		char *pti_clp = (void *) (skb->data + 3);
2541 		int clp, pti;
2542 
2543 		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2544 		clp = (*pti_clp & ATM_HDR_CLP);
2545 		tpd->status |= TPD_CELLTYPE(pti);
2546 		if (clp)
2547 			tpd->status |= TPD_CLP;
2548 
2549 		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2550 	}
2551 
2552 #ifdef USE_SCATTERGATHER
2553 	tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2554 				skb_headlen(skb), PCI_DMA_TODEVICE);
2555 	tpd->iovec[slot].len = skb_headlen(skb);
2556 	++slot;
2557 
2558 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2559 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2560 
2561 		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2562 			tpd->vcc = vcc;
2563 			tpd->skb = NULL;	/* not the last fragment
2564 						   so dont ->push() yet */
2565 			wmb();
2566 
2567 			__enqueue_tpd(he_dev, tpd, cid);
2568 			tpd = __alloc_tpd(he_dev);
2569 			if (tpd == NULL) {
2570 				if (vcc->pop)
2571 					vcc->pop(vcc, skb);
2572 				else
2573 					dev_kfree_skb_any(skb);
2574 				atomic_inc(&vcc->stats->tx_err);
2575 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2576 				return -ENOMEM;
2577 			}
2578 			tpd->status |= TPD_USERCELL;
2579 			slot = 0;
2580 		}
2581 
2582 		tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2583 			(void *) page_address(frag->page) + frag->page_offset,
2584 				frag->size, PCI_DMA_TODEVICE);
2585 		tpd->iovec[slot].len = frag->size;
2586 		++slot;
2587 
2588 	}
2589 
2590 	tpd->iovec[slot - 1].len |= TPD_LST;
2591 #else
2592 	tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2593 	tpd->length0 = skb->len | TPD_LST;
2594 #endif
2595 	tpd->status |= TPD_INT;
2596 
2597 	tpd->vcc = vcc;
2598 	tpd->skb = skb;
2599 	wmb();
2600 	ATM_SKB(skb)->vcc = vcc;
2601 
2602 	__enqueue_tpd(he_dev, tpd, cid);
2603 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2604 
2605 	atomic_inc(&vcc->stats->tx);
2606 
2607 	return 0;
2608 }
2609 
2610 static int
2611 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2612 {
2613 	unsigned long flags;
2614 	struct he_dev *he_dev = HE_DEV(atm_dev);
2615 	struct he_ioctl_reg reg;
2616 	int err = 0;
2617 
2618 	switch (cmd) {
2619 		case HE_GET_REG:
2620 			if (!capable(CAP_NET_ADMIN))
2621 				return -EPERM;
2622 
2623 			if (copy_from_user(&reg, arg,
2624 					   sizeof(struct he_ioctl_reg)))
2625 				return -EFAULT;
2626 
2627 			spin_lock_irqsave(&he_dev->global_lock, flags);
2628 			switch (reg.type) {
2629 				case HE_REGTYPE_PCI:
2630 					if (reg.addr >= HE_REGMAP_SIZE) {
2631 						err = -EINVAL;
2632 						break;
2633 					}
2634 
2635 					reg.val = he_readl(he_dev, reg.addr);
2636 					break;
2637 				case HE_REGTYPE_RCM:
2638 					reg.val =
2639 						he_readl_rcm(he_dev, reg.addr);
2640 					break;
2641 				case HE_REGTYPE_TCM:
2642 					reg.val =
2643 						he_readl_tcm(he_dev, reg.addr);
2644 					break;
2645 				case HE_REGTYPE_MBOX:
2646 					reg.val =
2647 						he_readl_mbox(he_dev, reg.addr);
2648 					break;
2649 				default:
2650 					err = -EINVAL;
2651 					break;
2652 			}
2653 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2654 			if (err == 0)
2655 				if (copy_to_user(arg, &reg,
2656 							sizeof(struct he_ioctl_reg)))
2657 					return -EFAULT;
2658 			break;
2659 		default:
2660 #ifdef CONFIG_ATM_HE_USE_SUNI
2661 			if (atm_dev->phy && atm_dev->phy->ioctl)
2662 				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2663 #else /* CONFIG_ATM_HE_USE_SUNI */
2664 			err = -EINVAL;
2665 #endif /* CONFIG_ATM_HE_USE_SUNI */
2666 			break;
2667 	}
2668 
2669 	return err;
2670 }
2671 
2672 static void
2673 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2674 {
2675 	unsigned long flags;
2676 	struct he_dev *he_dev = HE_DEV(atm_dev);
2677 
2678 	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2679 
2680 	spin_lock_irqsave(&he_dev->global_lock, flags);
2681 	he_writel(he_dev, val, FRAMER + (addr*4));
2682 	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2683 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2684 }
2685 
2686 
2687 static unsigned char
2688 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2689 {
2690 	unsigned long flags;
2691 	struct he_dev *he_dev = HE_DEV(atm_dev);
2692 	unsigned reg;
2693 
2694 	spin_lock_irqsave(&he_dev->global_lock, flags);
2695 	reg = he_readl(he_dev, FRAMER + (addr*4));
2696 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2697 
2698 	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2699 	return reg;
2700 }
2701 
2702 static int
2703 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2704 {
2705 	unsigned long flags;
2706 	struct he_dev *he_dev = HE_DEV(dev);
2707 	int left, i;
2708 #ifdef notdef
2709 	struct he_rbrq *rbrq_tail;
2710 	struct he_tpdrq *tpdrq_head;
2711 	int rbpl_head, rbpl_tail;
2712 #endif
2713 	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2714 
2715 
2716 	left = *pos;
2717 	if (!left--)
2718 		return sprintf(page, "ATM he driver\n");
2719 
2720 	if (!left--)
2721 		return sprintf(page, "%s%s\n\n",
2722 			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2723 
2724 	if (!left--)
2725 		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2726 
2727 	spin_lock_irqsave(&he_dev->global_lock, flags);
2728 	mcc += he_readl(he_dev, MCC);
2729 	oec += he_readl(he_dev, OEC);
2730 	dcc += he_readl(he_dev, DCC);
2731 	cec += he_readl(he_dev, CEC);
2732 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2733 
2734 	if (!left--)
2735 		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n",
2736 							mcc, oec, dcc, cec);
2737 
2738 	if (!left--)
2739 		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2740 				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2741 
2742 	if (!left--)
2743 		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2744 						CONFIG_TPDRQ_SIZE);
2745 
2746 	if (!left--)
2747 		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2748 				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2749 
2750 	if (!left--)
2751 		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2752 					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2753 
2754 
2755 #ifdef notdef
2756 	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2757 	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2758 
2759 	inuse = rbpl_head - rbpl_tail;
2760 	if (inuse < 0)
2761 		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2762 	inuse /= sizeof(struct he_rbp);
2763 
2764 	if (!left--)
2765 		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2766 						CONFIG_RBPL_SIZE, inuse);
2767 #endif
2768 
2769 	if (!left--)
2770 		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2771 
2772 	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2773 		if (!left--)
2774 			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2775 						he_dev->cs_stper[i].pcr,
2776 						he_dev->cs_stper[i].inuse);
2777 
2778 	if (!left--)
2779 		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2780 			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2781 
2782 	return 0;
2783 }
2784 
2785 /* eeprom routines  -- see 4.7 */
2786 
2787 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2788 {
2789 	u32 val = 0, tmp_read = 0;
2790 	int i, j = 0;
2791 	u8 byte_read = 0;
2792 
2793 	val = readl(he_dev->membase + HOST_CNTL);
2794 	val &= 0xFFFFE0FF;
2795 
2796 	/* Turn on write enable */
2797 	val |= 0x800;
2798 	he_writel(he_dev, val, HOST_CNTL);
2799 
2800 	/* Send READ instruction */
2801 	for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2802 		he_writel(he_dev, val | readtab[i], HOST_CNTL);
2803 		udelay(EEPROM_DELAY);
2804 	}
2805 
2806 	/* Next, we need to send the byte address to read from */
2807 	for (i = 7; i >= 0; i--) {
2808 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2809 		udelay(EEPROM_DELAY);
2810 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2811 		udelay(EEPROM_DELAY);
2812 	}
2813 
2814 	j = 0;
2815 
2816 	val &= 0xFFFFF7FF;      /* Turn off write enable */
2817 	he_writel(he_dev, val, HOST_CNTL);
2818 
2819 	/* Now, we can read data from the EEPROM by clocking it in */
2820 	for (i = 7; i >= 0; i--) {
2821 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2822 		udelay(EEPROM_DELAY);
2823 		tmp_read = he_readl(he_dev, HOST_CNTL);
2824 		byte_read |= (unsigned char)
2825 			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2826 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2827 		udelay(EEPROM_DELAY);
2828 	}
2829 
2830 	he_writel(he_dev, val | ID_CS, HOST_CNTL);
2831 	udelay(EEPROM_DELAY);
2832 
2833 	return byte_read;
2834 }
2835 
2836 MODULE_LICENSE("GPL");
2837 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2838 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2839 module_param(disable64, bool, 0);
2840 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2841 module_param(nvpibits, short, 0);
2842 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2843 module_param(nvcibits, short, 0);
2844 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2845 module_param(rx_skb_reserve, short, 0);
2846 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2847 module_param(irq_coalesce, bool, 0);
2848 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2849 module_param(sdh, bool, 0);
2850 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2851 
2852 static struct pci_device_id he_pci_tbl[] = {
2853 	{ PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2854 	{ 0, }
2855 };
2856 
2857 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2858 
2859 static struct pci_driver he_driver = {
2860 	.name =		"he",
2861 	.probe =	he_init_one,
2862 	.remove =	he_remove_one,
2863 	.id_table =	he_pci_tbl,
2864 };
2865 
2866 module_pci_driver(he_driver);
2867