xref: /openbmc/linux/drivers/atm/he.c (revision 5bd8e16d)
1 /*
2 
3   he.c
4 
5   ForeRunnerHE ATM Adapter driver for ATM on Linux
6   Copyright (C) 1999-2001  Naval Research Laboratory
7 
8   This library is free software; you can redistribute it and/or
9   modify it under the terms of the GNU Lesser General Public
10   License as published by the Free Software Foundation; either
11   version 2.1 of the License, or (at your option) any later version.
12 
13   This library is distributed in the hope that it will be useful,
14   but WITHOUT ANY WARRANTY; without even the implied warranty of
15   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16   Lesser General Public License for more details.
17 
18   You should have received a copy of the GNU Lesser General Public
19   License along with this library; if not, write to the Free Software
20   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 
22 */
23 
24 /*
25 
26   he.c
27 
28   ForeRunnerHE ATM Adapter driver for ATM on Linux
29   Copyright (C) 1999-2001  Naval Research Laboratory
30 
31   Permission to use, copy, modify and distribute this software and its
32   documentation is hereby granted, provided that both the copyright
33   notice and this permission notice appear in all copies of the software,
34   derivative works or modified versions, and any portions thereof, and
35   that both notices appear in supporting documentation.
36 
37   NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38   DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39   RESULTING FROM THE USE OF THIS SOFTWARE.
40 
41   This driver was written using the "Programmer's Reference Manual for
42   ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43 
44   AUTHORS:
45 	chas williams <chas@cmf.nrl.navy.mil>
46 	eric kinzie <ekinzie@cmf.nrl.navy.mil>
47 
48   NOTES:
49 	4096 supported 'connections'
50 	group 0 is used for all traffic
51 	interrupt queue 0 is used for all interrupts
52 	aal0 support (based on work from ulrich.u.muller@nokia.com)
53 
54  */
55 
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
65 #include <linux/mm.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/bitmap.h>
71 #include <linux/slab.h>
72 #include <asm/io.h>
73 #include <asm/byteorder.h>
74 #include <asm/uaccess.h>
75 
76 #include <linux/atmdev.h>
77 #include <linux/atm.h>
78 #include <linux/sonet.h>
79 
80 #undef USE_SCATTERGATHER
81 #undef USE_CHECKSUM_HW			/* still confused about this */
82 /* #undef HE_DEBUG */
83 
84 #include "he.h"
85 #include "suni.h"
86 #include <linux/atm_he.h>
87 
88 #define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
89 
90 #ifdef HE_DEBUG
91 #define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92 #else /* !HE_DEBUG */
93 #define HPRINTK(fmt,args...)	do { } while (0)
94 #endif /* HE_DEBUG */
95 
96 /* declarations */
97 
98 static int he_open(struct atm_vcc *vcc);
99 static void he_close(struct atm_vcc *vcc);
100 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102 static irqreturn_t he_irq_handler(int irq, void *dev_id);
103 static void he_tasklet(unsigned long data);
104 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105 static int he_start(struct atm_dev *dev);
106 static void he_stop(struct he_dev *dev);
107 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109 
110 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
111 
112 /* globals */
113 
114 static struct he_dev *he_devs;
115 static bool disable64;
116 static short nvpibits = -1;
117 static short nvcibits = -1;
118 static short rx_skb_reserve = 16;
119 static bool irq_coalesce = 1;
120 static bool sdh = 0;
121 
122 /* Read from EEPROM = 0000 0011b */
123 static unsigned int readtab[] = {
124 	CS_HIGH | CLK_HIGH,
125 	CS_LOW | CLK_LOW,
126 	CLK_HIGH,               /* 0 */
127 	CLK_LOW,
128 	CLK_HIGH,               /* 0 */
129 	CLK_LOW,
130 	CLK_HIGH,               /* 0 */
131 	CLK_LOW,
132 	CLK_HIGH,               /* 0 */
133 	CLK_LOW,
134 	CLK_HIGH,               /* 0 */
135 	CLK_LOW,
136 	CLK_HIGH,               /* 0 */
137 	CLK_LOW | SI_HIGH,
138 	CLK_HIGH | SI_HIGH,     /* 1 */
139 	CLK_LOW | SI_HIGH,
140 	CLK_HIGH | SI_HIGH      /* 1 */
141 };
142 
143 /* Clock to read from/write to the EEPROM */
144 static unsigned int clocktab[] = {
145 	CLK_LOW,
146 	CLK_HIGH,
147 	CLK_LOW,
148 	CLK_HIGH,
149 	CLK_LOW,
150 	CLK_HIGH,
151 	CLK_LOW,
152 	CLK_HIGH,
153 	CLK_LOW,
154 	CLK_HIGH,
155 	CLK_LOW,
156 	CLK_HIGH,
157 	CLK_LOW,
158 	CLK_HIGH,
159 	CLK_LOW,
160 	CLK_HIGH,
161 	CLK_LOW
162 };
163 
164 static struct atmdev_ops he_ops =
165 {
166 	.open =		he_open,
167 	.close =	he_close,
168 	.ioctl =	he_ioctl,
169 	.send =		he_send,
170 	.phy_put =	he_phy_put,
171 	.phy_get =	he_phy_get,
172 	.proc_read =	he_proc_read,
173 	.owner =	THIS_MODULE
174 };
175 
176 #define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177 #define he_readl(dev, reg)		readl((dev)->membase + (reg))
178 
179 /* section 2.12 connection memory access */
180 
181 static __inline__ void
182 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
183 								unsigned flags)
184 {
185 	he_writel(he_dev, val, CON_DAT);
186 	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
187 	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
189 }
190 
191 #define he_writel_rcm(dev, val, reg) 				\
192 			he_writel_internal(dev, val, reg, CON_CTL_RCM)
193 
194 #define he_writel_tcm(dev, val, reg) 				\
195 			he_writel_internal(dev, val, reg, CON_CTL_TCM)
196 
197 #define he_writel_mbox(dev, val, reg) 				\
198 			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
199 
200 static unsigned
201 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202 {
203 	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204 	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205 	return he_readl(he_dev, CON_DAT);
206 }
207 
208 #define he_readl_rcm(dev, reg) \
209 			he_readl_internal(dev, reg, CON_CTL_RCM)
210 
211 #define he_readl_tcm(dev, reg) \
212 			he_readl_internal(dev, reg, CON_CTL_TCM)
213 
214 #define he_readl_mbox(dev, reg) \
215 			he_readl_internal(dev, reg, CON_CTL_MBOX)
216 
217 
218 /* figure 2.2 connection id */
219 
220 #define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
221 
222 /* 2.5.1 per connection transmit state registers */
223 
224 #define he_writel_tsr0(dev, val, cid) \
225 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226 #define he_readl_tsr0(dev, cid) \
227 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228 
229 #define he_writel_tsr1(dev, val, cid) \
230 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231 
232 #define he_writel_tsr2(dev, val, cid) \
233 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234 
235 #define he_writel_tsr3(dev, val, cid) \
236 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237 
238 #define he_writel_tsr4(dev, val, cid) \
239 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
240 
241 	/* from page 2-20
242 	 *
243 	 * NOTE While the transmit connection is active, bits 23 through 0
244 	 *      of this register must not be written by the host.  Byte
245 	 *      enables should be used during normal operation when writing
246 	 *      the most significant byte.
247 	 */
248 
249 #define he_writel_tsr4_upper(dev, val, cid) \
250 		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251 							CON_CTL_TCM \
252 							| CON_BYTE_DISABLE_2 \
253 							| CON_BYTE_DISABLE_1 \
254 							| CON_BYTE_DISABLE_0)
255 
256 #define he_readl_tsr4(dev, cid) \
257 		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258 
259 #define he_writel_tsr5(dev, val, cid) \
260 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261 
262 #define he_writel_tsr6(dev, val, cid) \
263 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264 
265 #define he_writel_tsr7(dev, val, cid) \
266 		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
267 
268 
269 #define he_writel_tsr8(dev, val, cid) \
270 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271 
272 #define he_writel_tsr9(dev, val, cid) \
273 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274 
275 #define he_writel_tsr10(dev, val, cid) \
276 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277 
278 #define he_writel_tsr11(dev, val, cid) \
279 		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
280 
281 
282 #define he_writel_tsr12(dev, val, cid) \
283 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284 
285 #define he_writel_tsr13(dev, val, cid) \
286 		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
287 
288 
289 #define he_writel_tsr14(dev, val, cid) \
290 		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291 
292 #define he_writel_tsr14_upper(dev, val, cid) \
293 		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294 							CON_CTL_TCM \
295 							| CON_BYTE_DISABLE_2 \
296 							| CON_BYTE_DISABLE_1 \
297 							| CON_BYTE_DISABLE_0)
298 
299 /* 2.7.1 per connection receive state registers */
300 
301 #define he_writel_rsr0(dev, val, cid) \
302 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303 #define he_readl_rsr0(dev, cid) \
304 		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305 
306 #define he_writel_rsr1(dev, val, cid) \
307 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308 
309 #define he_writel_rsr2(dev, val, cid) \
310 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311 
312 #define he_writel_rsr3(dev, val, cid) \
313 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314 
315 #define he_writel_rsr4(dev, val, cid) \
316 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317 
318 #define he_writel_rsr5(dev, val, cid) \
319 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320 
321 #define he_writel_rsr6(dev, val, cid) \
322 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323 
324 #define he_writel_rsr7(dev, val, cid) \
325 		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326 
327 static __inline__ struct atm_vcc*
328 __find_vcc(struct he_dev *he_dev, unsigned cid)
329 {
330 	struct hlist_head *head;
331 	struct atm_vcc *vcc;
332 	struct sock *s;
333 	short vpi;
334 	int vci;
335 
336 	vpi = cid >> he_dev->vcibits;
337 	vci = cid & ((1 << he_dev->vcibits) - 1);
338 	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
339 
340 	sk_for_each(s, head) {
341 		vcc = atm_sk(s);
342 		if (vcc->dev == he_dev->atm_dev &&
343 		    vcc->vci == vci && vcc->vpi == vpi &&
344 		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
345 				return vcc;
346 		}
347 	}
348 	return NULL;
349 }
350 
351 static int he_init_one(struct pci_dev *pci_dev,
352 		       const struct pci_device_id *pci_ent)
353 {
354 	struct atm_dev *atm_dev = NULL;
355 	struct he_dev *he_dev = NULL;
356 	int err = 0;
357 
358 	printk(KERN_INFO "ATM he driver\n");
359 
360 	if (pci_enable_device(pci_dev))
361 		return -EIO;
362 	if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
363 		printk(KERN_WARNING "he: no suitable dma available\n");
364 		err = -EIO;
365 		goto init_one_failure;
366 	}
367 
368 	atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
369 	if (!atm_dev) {
370 		err = -ENODEV;
371 		goto init_one_failure;
372 	}
373 	pci_set_drvdata(pci_dev, atm_dev);
374 
375 	he_dev = kzalloc(sizeof(struct he_dev),
376 							GFP_KERNEL);
377 	if (!he_dev) {
378 		err = -ENOMEM;
379 		goto init_one_failure;
380 	}
381 	he_dev->pci_dev = pci_dev;
382 	he_dev->atm_dev = atm_dev;
383 	he_dev->atm_dev->dev_data = he_dev;
384 	atm_dev->dev_data = he_dev;
385 	he_dev->number = atm_dev->number;
386 	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
387 	spin_lock_init(&he_dev->global_lock);
388 
389 	if (he_start(atm_dev)) {
390 		he_stop(he_dev);
391 		err = -ENODEV;
392 		goto init_one_failure;
393 	}
394 	he_dev->next = NULL;
395 	if (he_devs)
396 		he_dev->next = he_devs;
397 	he_devs = he_dev;
398 	return 0;
399 
400 init_one_failure:
401 	if (atm_dev)
402 		atm_dev_deregister(atm_dev);
403 	kfree(he_dev);
404 	pci_disable_device(pci_dev);
405 	return err;
406 }
407 
408 static void he_remove_one(struct pci_dev *pci_dev)
409 {
410 	struct atm_dev *atm_dev;
411 	struct he_dev *he_dev;
412 
413 	atm_dev = pci_get_drvdata(pci_dev);
414 	he_dev = HE_DEV(atm_dev);
415 
416 	/* need to remove from he_devs */
417 
418 	he_stop(he_dev);
419 	atm_dev_deregister(atm_dev);
420 	kfree(he_dev);
421 
422 	pci_set_drvdata(pci_dev, NULL);
423 	pci_disable_device(pci_dev);
424 }
425 
426 
427 static unsigned
428 rate_to_atmf(unsigned rate)		/* cps to atm forum format */
429 {
430 #define NONZERO (1 << 14)
431 
432 	unsigned exp = 0;
433 
434 	if (rate == 0)
435 		return 0;
436 
437 	rate <<= 9;
438 	while (rate > 0x3ff) {
439 		++exp;
440 		rate >>= 1;
441 	}
442 
443 	return (NONZERO | (exp << 9) | (rate & 0x1ff));
444 }
445 
446 static void he_init_rx_lbfp0(struct he_dev *he_dev)
447 {
448 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
449 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
450 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
451 	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
452 
453 	lbufd_index = 0;
454 	lbm_offset = he_readl(he_dev, RCMLBM_BA);
455 
456 	he_writel(he_dev, lbufd_index, RLBF0_H);
457 
458 	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
459 		lbufd_index += 2;
460 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
461 
462 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
463 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
464 
465 		if (++lbuf_count == lbufs_per_row) {
466 			lbuf_count = 0;
467 			row_offset += he_dev->bytes_per_row;
468 		}
469 		lbm_offset += 4;
470 	}
471 
472 	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
473 	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
474 }
475 
476 static void he_init_rx_lbfp1(struct he_dev *he_dev)
477 {
478 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
479 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
480 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
481 	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
482 
483 	lbufd_index = 1;
484 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
485 
486 	he_writel(he_dev, lbufd_index, RLBF1_H);
487 
488 	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
489 		lbufd_index += 2;
490 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
491 
492 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
493 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
494 
495 		if (++lbuf_count == lbufs_per_row) {
496 			lbuf_count = 0;
497 			row_offset += he_dev->bytes_per_row;
498 		}
499 		lbm_offset += 4;
500 	}
501 
502 	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
503 	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
504 }
505 
506 static void he_init_tx_lbfp(struct he_dev *he_dev)
507 {
508 	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
509 	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
510 	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
511 	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
512 
513 	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
514 	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
515 
516 	he_writel(he_dev, lbufd_index, TLBF_H);
517 
518 	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
519 		lbufd_index += 1;
520 		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
521 
522 		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
523 		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
524 
525 		if (++lbuf_count == lbufs_per_row) {
526 			lbuf_count = 0;
527 			row_offset += he_dev->bytes_per_row;
528 		}
529 		lbm_offset += 2;
530 	}
531 
532 	he_writel(he_dev, lbufd_index - 1, TLBF_T);
533 }
534 
535 static int he_init_tpdrq(struct he_dev *he_dev)
536 {
537 	he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
538 		CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
539 	if (he_dev->tpdrq_base == NULL) {
540 		hprintk("failed to alloc tpdrq\n");
541 		return -ENOMEM;
542 	}
543 	memset(he_dev->tpdrq_base, 0,
544 				CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
545 
546 	he_dev->tpdrq_tail = he_dev->tpdrq_base;
547 	he_dev->tpdrq_head = he_dev->tpdrq_base;
548 
549 	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
550 	he_writel(he_dev, 0, TPDRQ_T);
551 	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
552 
553 	return 0;
554 }
555 
556 static void he_init_cs_block(struct he_dev *he_dev)
557 {
558 	unsigned clock, rate, delta;
559 	int reg;
560 
561 	/* 5.1.7 cs block initialization */
562 
563 	for (reg = 0; reg < 0x20; ++reg)
564 		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
565 
566 	/* rate grid timer reload values */
567 
568 	clock = he_is622(he_dev) ? 66667000 : 50000000;
569 	rate = he_dev->atm_dev->link_rate;
570 	delta = rate / 16 / 2;
571 
572 	for (reg = 0; reg < 0x10; ++reg) {
573 		/* 2.4 internal transmit function
574 		 *
575 	 	 * we initialize the first row in the rate grid.
576 		 * values are period (in clock cycles) of timer
577 		 */
578 		unsigned period = clock / rate;
579 
580 		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
581 		rate -= delta;
582 	}
583 
584 	if (he_is622(he_dev)) {
585 		/* table 5.2 (4 cells per lbuf) */
586 		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
587 		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
588 		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
589 		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
590 		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
591 
592 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
593 		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
594 		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
595 		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
596 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
597 		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
598 		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
599 
600 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
601 
602 		/* table 5.8 */
603 		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
604 		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
605 		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
606 		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
607 		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
608 		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
609 
610 		/* table 5.9 */
611 		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
612 		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
613 	} else {
614 		/* table 5.1 (4 cells per lbuf) */
615 		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
616 		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
617 		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
618 		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
619 		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
620 
621 		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
622 		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
623 		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
624 		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
625 		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
626 		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
627 		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
628 
629 		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
630 
631 		/* table 5.8 */
632 		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
633 		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
634 		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
635 		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
636 		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
637 		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
638 
639 		/* table 5.9 */
640 		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
641 		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
642 	}
643 
644 	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
645 
646 	for (reg = 0; reg < 0x8; ++reg)
647 		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
648 
649 }
650 
651 static int he_init_cs_block_rcm(struct he_dev *he_dev)
652 {
653 	unsigned (*rategrid)[16][16];
654 	unsigned rate, delta;
655 	int i, j, reg;
656 
657 	unsigned rate_atmf, exp, man;
658 	unsigned long long rate_cps;
659 	int mult, buf, buf_limit = 4;
660 
661 	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
662 	if (!rategrid)
663 		return -ENOMEM;
664 
665 	/* initialize rate grid group table */
666 
667 	for (reg = 0x0; reg < 0xff; ++reg)
668 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
669 
670 	/* initialize rate controller groups */
671 
672 	for (reg = 0x100; reg < 0x1ff; ++reg)
673 		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
674 
675 	/* initialize tNrm lookup table */
676 
677 	/* the manual makes reference to a routine in a sample driver
678 	   for proper configuration; fortunately, we only need this
679 	   in order to support abr connection */
680 
681 	/* initialize rate to group table */
682 
683 	rate = he_dev->atm_dev->link_rate;
684 	delta = rate / 32;
685 
686 	/*
687 	 * 2.4 transmit internal functions
688 	 *
689 	 * we construct a copy of the rate grid used by the scheduler
690 	 * in order to construct the rate to group table below
691 	 */
692 
693 	for (j = 0; j < 16; j++) {
694 		(*rategrid)[0][j] = rate;
695 		rate -= delta;
696 	}
697 
698 	for (i = 1; i < 16; i++)
699 		for (j = 0; j < 16; j++)
700 			if (i > 14)
701 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
702 			else
703 				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
704 
705 	/*
706 	 * 2.4 transmit internal function
707 	 *
708 	 * this table maps the upper 5 bits of exponent and mantissa
709 	 * of the atm forum representation of the rate into an index
710 	 * on rate grid
711 	 */
712 
713 	rate_atmf = 0;
714 	while (rate_atmf < 0x400) {
715 		man = (rate_atmf & 0x1f) << 4;
716 		exp = rate_atmf >> 5;
717 
718 		/*
719 			instead of '/ 512', use '>> 9' to prevent a call
720 			to divdu3 on x86 platforms
721 		*/
722 		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
723 
724 		if (rate_cps < 10)
725 			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
726 
727 		for (i = 255; i > 0; i--)
728 			if ((*rategrid)[i/16][i%16] >= rate_cps)
729 				break;	 /* pick nearest rate instead? */
730 
731 		/*
732 		 * each table entry is 16 bits: (rate grid index (8 bits)
733 		 * and a buffer limit (8 bits)
734 		 * there are two table entries in each 32-bit register
735 		 */
736 
737 #ifdef notdef
738 		buf = rate_cps * he_dev->tx_numbuffs /
739 				(he_dev->atm_dev->link_rate * 2);
740 #else
741 		/* this is pretty, but avoids _divdu3 and is mostly correct */
742 		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
743 		if (rate_cps > (272 * mult))
744 			buf = 4;
745 		else if (rate_cps > (204 * mult))
746 			buf = 3;
747 		else if (rate_cps > (136 * mult))
748 			buf = 2;
749 		else if (rate_cps > (68 * mult))
750 			buf = 1;
751 		else
752 			buf = 0;
753 #endif
754 		if (buf > buf_limit)
755 			buf = buf_limit;
756 		reg = (reg << 16) | ((i << 8) | buf);
757 
758 #define RTGTBL_OFFSET 0x400
759 
760 		if (rate_atmf & 0x1)
761 			he_writel_rcm(he_dev, reg,
762 				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
763 
764 		++rate_atmf;
765 	}
766 
767 	kfree(rategrid);
768 	return 0;
769 }
770 
771 static int he_init_group(struct he_dev *he_dev, int group)
772 {
773 	struct he_buff *heb, *next;
774 	dma_addr_t mapping;
775 	int i;
776 
777 	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
778 	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
779 	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
780 	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
781 		  G0_RBPS_BS + (group * 32));
782 
783 	/* bitmap table */
784 	he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
785 				     * sizeof(unsigned long), GFP_KERNEL);
786 	if (!he_dev->rbpl_table) {
787 		hprintk("unable to allocate rbpl bitmap table\n");
788 		return -ENOMEM;
789 	}
790 	bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
791 
792 	/* rbpl_virt 64-bit pointers */
793 	he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
794 				    * sizeof(struct he_buff *), GFP_KERNEL);
795 	if (!he_dev->rbpl_virt) {
796 		hprintk("unable to allocate rbpl virt table\n");
797 		goto out_free_rbpl_table;
798 	}
799 
800 	/* large buffer pool */
801 	he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
802 					    CONFIG_RBPL_BUFSIZE, 64, 0);
803 	if (he_dev->rbpl_pool == NULL) {
804 		hprintk("unable to create rbpl pool\n");
805 		goto out_free_rbpl_virt;
806 	}
807 
808 	he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
809 		CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
810 	if (he_dev->rbpl_base == NULL) {
811 		hprintk("failed to alloc rbpl_base\n");
812 		goto out_destroy_rbpl_pool;
813 	}
814 	memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
815 
816 	INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
817 
818 	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
819 
820 		heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
821 		if (!heb)
822 			goto out_free_rbpl;
823 		heb->mapping = mapping;
824 		list_add(&heb->entry, &he_dev->rbpl_outstanding);
825 
826 		set_bit(i, he_dev->rbpl_table);
827 		he_dev->rbpl_virt[i] = heb;
828 		he_dev->rbpl_hint = i + 1;
829 		he_dev->rbpl_base[i].idx =  i << RBP_IDX_OFFSET;
830 		he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
831 	}
832 	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
833 
834 	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
835 	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
836 						G0_RBPL_T + (group * 32));
837 	he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
838 						G0_RBPL_BS + (group * 32));
839 	he_writel(he_dev,
840 			RBP_THRESH(CONFIG_RBPL_THRESH) |
841 			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
842 			RBP_INT_ENB,
843 						G0_RBPL_QI + (group * 32));
844 
845 	/* rx buffer ready queue */
846 
847 	he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
848 		CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
849 	if (he_dev->rbrq_base == NULL) {
850 		hprintk("failed to allocate rbrq\n");
851 		goto out_free_rbpl;
852 	}
853 	memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
854 
855 	he_dev->rbrq_head = he_dev->rbrq_base;
856 	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
857 	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
858 	he_writel(he_dev,
859 		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
860 						G0_RBRQ_Q + (group * 16));
861 	if (irq_coalesce) {
862 		hprintk("coalescing interrupts\n");
863 		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
864 						G0_RBRQ_I + (group * 16));
865 	} else
866 		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
867 						G0_RBRQ_I + (group * 16));
868 
869 	/* tx buffer ready queue */
870 
871 	he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
872 		CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
873 	if (he_dev->tbrq_base == NULL) {
874 		hprintk("failed to allocate tbrq\n");
875 		goto out_free_rbpq_base;
876 	}
877 	memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
878 
879 	he_dev->tbrq_head = he_dev->tbrq_base;
880 
881 	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
882 	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
883 	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
884 	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
885 
886 	return 0;
887 
888 out_free_rbpq_base:
889 	pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
890 			sizeof(struct he_rbrq), he_dev->rbrq_base,
891 			he_dev->rbrq_phys);
892 out_free_rbpl:
893 	list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
894 		pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
895 
896 	pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
897 			sizeof(struct he_rbp), he_dev->rbpl_base,
898 			he_dev->rbpl_phys);
899 out_destroy_rbpl_pool:
900 	pci_pool_destroy(he_dev->rbpl_pool);
901 out_free_rbpl_virt:
902 	kfree(he_dev->rbpl_virt);
903 out_free_rbpl_table:
904 	kfree(he_dev->rbpl_table);
905 
906 	return -ENOMEM;
907 }
908 
909 static int he_init_irq(struct he_dev *he_dev)
910 {
911 	int i;
912 
913 	/* 2.9.3.5  tail offset for each interrupt queue is located after the
914 		    end of the interrupt queue */
915 
916 	he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
917 			(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
918 	if (he_dev->irq_base == NULL) {
919 		hprintk("failed to allocate irq\n");
920 		return -ENOMEM;
921 	}
922 	he_dev->irq_tailoffset = (unsigned *)
923 					&he_dev->irq_base[CONFIG_IRQ_SIZE];
924 	*he_dev->irq_tailoffset = 0;
925 	he_dev->irq_head = he_dev->irq_base;
926 	he_dev->irq_tail = he_dev->irq_base;
927 
928 	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
929 		he_dev->irq_base[i].isw = ITYPE_INVALID;
930 
931 	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
932 	he_writel(he_dev,
933 		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
934 								IRQ0_HEAD);
935 	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
936 	he_writel(he_dev, 0x0, IRQ0_DATA);
937 
938 	he_writel(he_dev, 0x0, IRQ1_BASE);
939 	he_writel(he_dev, 0x0, IRQ1_HEAD);
940 	he_writel(he_dev, 0x0, IRQ1_CNTL);
941 	he_writel(he_dev, 0x0, IRQ1_DATA);
942 
943 	he_writel(he_dev, 0x0, IRQ2_BASE);
944 	he_writel(he_dev, 0x0, IRQ2_HEAD);
945 	he_writel(he_dev, 0x0, IRQ2_CNTL);
946 	he_writel(he_dev, 0x0, IRQ2_DATA);
947 
948 	he_writel(he_dev, 0x0, IRQ3_BASE);
949 	he_writel(he_dev, 0x0, IRQ3_HEAD);
950 	he_writel(he_dev, 0x0, IRQ3_CNTL);
951 	he_writel(he_dev, 0x0, IRQ3_DATA);
952 
953 	/* 2.9.3.2 interrupt queue mapping registers */
954 
955 	he_writel(he_dev, 0x0, GRP_10_MAP);
956 	he_writel(he_dev, 0x0, GRP_32_MAP);
957 	he_writel(he_dev, 0x0, GRP_54_MAP);
958 	he_writel(he_dev, 0x0, GRP_76_MAP);
959 
960 	if (request_irq(he_dev->pci_dev->irq,
961 			he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
962 		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
963 		return -EINVAL;
964 	}
965 
966 	he_dev->irq = he_dev->pci_dev->irq;
967 
968 	return 0;
969 }
970 
971 static int he_start(struct atm_dev *dev)
972 {
973 	struct he_dev *he_dev;
974 	struct pci_dev *pci_dev;
975 	unsigned long membase;
976 
977 	u16 command;
978 	u32 gen_cntl_0, host_cntl, lb_swap;
979 	u8 cache_size, timer;
980 
981 	unsigned err;
982 	unsigned int status, reg;
983 	int i, group;
984 
985 	he_dev = HE_DEV(dev);
986 	pci_dev = he_dev->pci_dev;
987 
988 	membase = pci_resource_start(pci_dev, 0);
989 	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
990 
991 	/*
992 	 * pci bus controller initialization
993 	 */
994 
995 	/* 4.3 pci bus controller-specific initialization */
996 	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
997 		hprintk("can't read GEN_CNTL_0\n");
998 		return -EINVAL;
999 	}
1000 	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1001 	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1002 		hprintk("can't write GEN_CNTL_0.\n");
1003 		return -EINVAL;
1004 	}
1005 
1006 	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1007 		hprintk("can't read PCI_COMMAND.\n");
1008 		return -EINVAL;
1009 	}
1010 
1011 	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1012 	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1013 		hprintk("can't enable memory.\n");
1014 		return -EINVAL;
1015 	}
1016 
1017 	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1018 		hprintk("can't read cache line size?\n");
1019 		return -EINVAL;
1020 	}
1021 
1022 	if (cache_size < 16) {
1023 		cache_size = 16;
1024 		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1025 			hprintk("can't set cache line size to %d\n", cache_size);
1026 	}
1027 
1028 	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1029 		hprintk("can't read latency timer?\n");
1030 		return -EINVAL;
1031 	}
1032 
1033 	/* from table 3.9
1034 	 *
1035 	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1036 	 *
1037 	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1038 	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1039 	 *
1040 	 */
1041 #define LAT_TIMER 209
1042 	if (timer < LAT_TIMER) {
1043 		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1044 		timer = LAT_TIMER;
1045 		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1046 			hprintk("can't set latency timer to %d\n", timer);
1047 	}
1048 
1049 	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1050 		hprintk("can't set up page mapping\n");
1051 		return -EINVAL;
1052 	}
1053 
1054 	/* 4.4 card reset */
1055 	he_writel(he_dev, 0x0, RESET_CNTL);
1056 	he_writel(he_dev, 0xff, RESET_CNTL);
1057 
1058 	msleep(16);	/* 16 ms */
1059 	status = he_readl(he_dev, RESET_CNTL);
1060 	if ((status & BOARD_RST_STATUS) == 0) {
1061 		hprintk("reset failed\n");
1062 		return -EINVAL;
1063 	}
1064 
1065 	/* 4.5 set bus width */
1066 	host_cntl = he_readl(he_dev, HOST_CNTL);
1067 	if (host_cntl & PCI_BUS_SIZE64)
1068 		gen_cntl_0 |= ENBL_64;
1069 	else
1070 		gen_cntl_0 &= ~ENBL_64;
1071 
1072 	if (disable64 == 1) {
1073 		hprintk("disabling 64-bit pci bus transfers\n");
1074 		gen_cntl_0 &= ~ENBL_64;
1075 	}
1076 
1077 	if (gen_cntl_0 & ENBL_64)
1078 		hprintk("64-bit transfers enabled\n");
1079 
1080 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1081 
1082 	/* 4.7 read prom contents */
1083 	for (i = 0; i < PROD_ID_LEN; ++i)
1084 		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1085 
1086 	he_dev->media = read_prom_byte(he_dev, MEDIA);
1087 
1088 	for (i = 0; i < 6; ++i)
1089 		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1090 
1091 	hprintk("%s%s, %pM\n", he_dev->prod_id,
1092 		he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
1093 	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1094 						ATM_OC12_PCR : ATM_OC3_PCR;
1095 
1096 	/* 4.6 set host endianess */
1097 	lb_swap = he_readl(he_dev, LB_SWAP);
1098 	if (he_is622(he_dev))
1099 		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1100 	else
1101 		lb_swap |= XFER_SIZE;		/* 8 cells */
1102 #ifdef __BIG_ENDIAN
1103 	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1104 #else
1105 	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1106 			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1107 #endif /* __BIG_ENDIAN */
1108 	he_writel(he_dev, lb_swap, LB_SWAP);
1109 
1110 	/* 4.8 sdram controller initialization */
1111 	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1112 
1113 	/* 4.9 initialize rnum value */
1114 	lb_swap |= SWAP_RNUM_MAX(0xf);
1115 	he_writel(he_dev, lb_swap, LB_SWAP);
1116 
1117 	/* 4.10 initialize the interrupt queues */
1118 	if ((err = he_init_irq(he_dev)) != 0)
1119 		return err;
1120 
1121 	/* 4.11 enable pci bus controller state machines */
1122 	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1123 				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1124 	he_writel(he_dev, host_cntl, HOST_CNTL);
1125 
1126 	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1127 	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1128 
1129 	/*
1130 	 * atm network controller initialization
1131 	 */
1132 
1133 	/* 5.1.1 generic configuration state */
1134 
1135 	/*
1136 	 *		local (cell) buffer memory map
1137 	 *
1138 	 *             HE155                          HE622
1139 	 *
1140 	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1141 	 *         |            |            |                   |   |
1142 	 *         |  utility   |            |        rx0        |   |
1143 	 *        5|____________|         255|___________________| u |
1144 	 *        6|            |         256|                   | t |
1145 	 *         |            |            |                   | i |
1146 	 *         |    rx0     |     row    |        tx         | l |
1147 	 *         |            |            |                   | i |
1148 	 *         |            |         767|___________________| t |
1149 	 *      517|____________|         768|                   | y |
1150 	 * row  518|            |            |        rx1        |   |
1151 	 *         |            |        1023|___________________|___|
1152 	 *         |            |
1153 	 *         |    tx      |
1154 	 *         |            |
1155 	 *         |            |
1156 	 *     1535|____________|
1157 	 *     1536|            |
1158 	 *         |    rx1     |
1159 	 *     2047|____________|
1160 	 *
1161 	 */
1162 
1163 	/* total 4096 connections */
1164 	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1165 	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1166 
1167 	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1168 		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1169 		return -ENODEV;
1170 	}
1171 
1172 	if (nvpibits != -1) {
1173 		he_dev->vpibits = nvpibits;
1174 		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1175 	}
1176 
1177 	if (nvcibits != -1) {
1178 		he_dev->vcibits = nvcibits;
1179 		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1180 	}
1181 
1182 
1183 	if (he_is622(he_dev)) {
1184 		he_dev->cells_per_row = 40;
1185 		he_dev->bytes_per_row = 2048;
1186 		he_dev->r0_numrows = 256;
1187 		he_dev->tx_numrows = 512;
1188 		he_dev->r1_numrows = 256;
1189 		he_dev->r0_startrow = 0;
1190 		he_dev->tx_startrow = 256;
1191 		he_dev->r1_startrow = 768;
1192 	} else {
1193 		he_dev->cells_per_row = 20;
1194 		he_dev->bytes_per_row = 1024;
1195 		he_dev->r0_numrows = 512;
1196 		he_dev->tx_numrows = 1018;
1197 		he_dev->r1_numrows = 512;
1198 		he_dev->r0_startrow = 6;
1199 		he_dev->tx_startrow = 518;
1200 		he_dev->r1_startrow = 1536;
1201 	}
1202 
1203 	he_dev->cells_per_lbuf = 4;
1204 	he_dev->buffer_limit = 4;
1205 	he_dev->r0_numbuffs = he_dev->r0_numrows *
1206 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1207 	if (he_dev->r0_numbuffs > 2560)
1208 		he_dev->r0_numbuffs = 2560;
1209 
1210 	he_dev->r1_numbuffs = he_dev->r1_numrows *
1211 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1212 	if (he_dev->r1_numbuffs > 2560)
1213 		he_dev->r1_numbuffs = 2560;
1214 
1215 	he_dev->tx_numbuffs = he_dev->tx_numrows *
1216 				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1217 	if (he_dev->tx_numbuffs > 5120)
1218 		he_dev->tx_numbuffs = 5120;
1219 
1220 	/* 5.1.2 configure hardware dependent registers */
1221 
1222 	he_writel(he_dev,
1223 		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1224 		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1225 		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1226 		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1227 								LBARB);
1228 
1229 	he_writel(he_dev, BANK_ON |
1230 		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1231 								SDRAMCON);
1232 
1233 	he_writel(he_dev,
1234 		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1235 						RM_RW_WAIT(1), RCMCONFIG);
1236 	he_writel(he_dev,
1237 		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1238 						TM_RW_WAIT(1), TCMCONFIG);
1239 
1240 	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1241 
1242 	he_writel(he_dev,
1243 		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1244 		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1245 		RX_VALVP(he_dev->vpibits) |
1246 		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1247 
1248 	he_writel(he_dev, DRF_THRESH(0x20) |
1249 		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1250 		TX_VCI_MASK(he_dev->vcibits) |
1251 		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1252 
1253 	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1254 
1255 	he_writel(he_dev, PHY_INT_ENB |
1256 		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1257 								RH_CONFIG);
1258 
1259 	/* 5.1.3 initialize connection memory */
1260 
1261 	for (i = 0; i < TCM_MEM_SIZE; ++i)
1262 		he_writel_tcm(he_dev, 0, i);
1263 
1264 	for (i = 0; i < RCM_MEM_SIZE; ++i)
1265 		he_writel_rcm(he_dev, 0, i);
1266 
1267 	/*
1268 	 *	transmit connection memory map
1269 	 *
1270 	 *                  tx memory
1271 	 *          0x0 ___________________
1272 	 *             |                   |
1273 	 *             |                   |
1274 	 *             |       TSRa        |
1275 	 *             |                   |
1276 	 *             |                   |
1277 	 *       0x8000|___________________|
1278 	 *             |                   |
1279 	 *             |       TSRb        |
1280 	 *       0xc000|___________________|
1281 	 *             |                   |
1282 	 *             |       TSRc        |
1283 	 *       0xe000|___________________|
1284 	 *             |       TSRd        |
1285 	 *       0xf000|___________________|
1286 	 *             |       tmABR       |
1287 	 *      0x10000|___________________|
1288 	 *             |                   |
1289 	 *             |       tmTPD       |
1290 	 *             |___________________|
1291 	 *             |                   |
1292 	 *                      ....
1293 	 *      0x1ffff|___________________|
1294 	 *
1295 	 *
1296 	 */
1297 
1298 	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1299 	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1300 	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1301 	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1302 	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1303 
1304 
1305 	/*
1306 	 *	receive connection memory map
1307 	 *
1308 	 *          0x0 ___________________
1309 	 *             |                   |
1310 	 *             |                   |
1311 	 *             |       RSRa        |
1312 	 *             |                   |
1313 	 *             |                   |
1314 	 *       0x8000|___________________|
1315 	 *             |                   |
1316 	 *             |             rx0/1 |
1317 	 *             |       LBM         |   link lists of local
1318 	 *             |             tx    |   buffer memory
1319 	 *             |                   |
1320 	 *       0xd000|___________________|
1321 	 *             |                   |
1322 	 *             |      rmABR        |
1323 	 *       0xe000|___________________|
1324 	 *             |                   |
1325 	 *             |       RSRb        |
1326 	 *             |___________________|
1327 	 *             |                   |
1328 	 *                      ....
1329 	 *       0xffff|___________________|
1330 	 */
1331 
1332 	he_writel(he_dev, 0x08000, RCMLBM_BA);
1333 	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1334 	he_writel(he_dev, 0x0d800, RCMABR_BA);
1335 
1336 	/* 5.1.4 initialize local buffer free pools linked lists */
1337 
1338 	he_init_rx_lbfp0(he_dev);
1339 	he_init_rx_lbfp1(he_dev);
1340 
1341 	he_writel(he_dev, 0x0, RLBC_H);
1342 	he_writel(he_dev, 0x0, RLBC_T);
1343 	he_writel(he_dev, 0x0, RLBC_H2);
1344 
1345 	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1346 	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1347 
1348 	he_init_tx_lbfp(he_dev);
1349 
1350 	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1351 
1352 	/* 5.1.5 initialize intermediate receive queues */
1353 
1354 	if (he_is622(he_dev)) {
1355 		he_writel(he_dev, 0x000f, G0_INMQ_S);
1356 		he_writel(he_dev, 0x200f, G0_INMQ_L);
1357 
1358 		he_writel(he_dev, 0x001f, G1_INMQ_S);
1359 		he_writel(he_dev, 0x201f, G1_INMQ_L);
1360 
1361 		he_writel(he_dev, 0x002f, G2_INMQ_S);
1362 		he_writel(he_dev, 0x202f, G2_INMQ_L);
1363 
1364 		he_writel(he_dev, 0x003f, G3_INMQ_S);
1365 		he_writel(he_dev, 0x203f, G3_INMQ_L);
1366 
1367 		he_writel(he_dev, 0x004f, G4_INMQ_S);
1368 		he_writel(he_dev, 0x204f, G4_INMQ_L);
1369 
1370 		he_writel(he_dev, 0x005f, G5_INMQ_S);
1371 		he_writel(he_dev, 0x205f, G5_INMQ_L);
1372 
1373 		he_writel(he_dev, 0x006f, G6_INMQ_S);
1374 		he_writel(he_dev, 0x206f, G6_INMQ_L);
1375 
1376 		he_writel(he_dev, 0x007f, G7_INMQ_S);
1377 		he_writel(he_dev, 0x207f, G7_INMQ_L);
1378 	} else {
1379 		he_writel(he_dev, 0x0000, G0_INMQ_S);
1380 		he_writel(he_dev, 0x0008, G0_INMQ_L);
1381 
1382 		he_writel(he_dev, 0x0001, G1_INMQ_S);
1383 		he_writel(he_dev, 0x0009, G1_INMQ_L);
1384 
1385 		he_writel(he_dev, 0x0002, G2_INMQ_S);
1386 		he_writel(he_dev, 0x000a, G2_INMQ_L);
1387 
1388 		he_writel(he_dev, 0x0003, G3_INMQ_S);
1389 		he_writel(he_dev, 0x000b, G3_INMQ_L);
1390 
1391 		he_writel(he_dev, 0x0004, G4_INMQ_S);
1392 		he_writel(he_dev, 0x000c, G4_INMQ_L);
1393 
1394 		he_writel(he_dev, 0x0005, G5_INMQ_S);
1395 		he_writel(he_dev, 0x000d, G5_INMQ_L);
1396 
1397 		he_writel(he_dev, 0x0006, G6_INMQ_S);
1398 		he_writel(he_dev, 0x000e, G6_INMQ_L);
1399 
1400 		he_writel(he_dev, 0x0007, G7_INMQ_S);
1401 		he_writel(he_dev, 0x000f, G7_INMQ_L);
1402 	}
1403 
1404 	/* 5.1.6 application tunable parameters */
1405 
1406 	he_writel(he_dev, 0x0, MCC);
1407 	he_writel(he_dev, 0x0, OEC);
1408 	he_writel(he_dev, 0x0, DCC);
1409 	he_writel(he_dev, 0x0, CEC);
1410 
1411 	/* 5.1.7 cs block initialization */
1412 
1413 	he_init_cs_block(he_dev);
1414 
1415 	/* 5.1.8 cs block connection memory initialization */
1416 
1417 	if (he_init_cs_block_rcm(he_dev) < 0)
1418 		return -ENOMEM;
1419 
1420 	/* 5.1.10 initialize host structures */
1421 
1422 	he_init_tpdrq(he_dev);
1423 
1424 	he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1425 		sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1426 	if (he_dev->tpd_pool == NULL) {
1427 		hprintk("unable to create tpd pci_pool\n");
1428 		return -ENOMEM;
1429 	}
1430 
1431 	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1432 
1433 	if (he_init_group(he_dev, 0) != 0)
1434 		return -ENOMEM;
1435 
1436 	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1437 		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1438 		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1439 		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1440 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1441 						G0_RBPS_BS + (group * 32));
1442 
1443 		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1444 		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1445 		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1446 						G0_RBPL_QI + (group * 32));
1447 		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1448 
1449 		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1450 		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1451 		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1452 						G0_RBRQ_Q + (group * 16));
1453 		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1454 
1455 		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1456 		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1457 		he_writel(he_dev, TBRQ_THRESH(0x1),
1458 						G0_TBRQ_THRESH + (group * 16));
1459 		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1460 	}
1461 
1462 	/* host status page */
1463 
1464 	he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1465 				sizeof(struct he_hsp), &he_dev->hsp_phys);
1466 	if (he_dev->hsp == NULL) {
1467 		hprintk("failed to allocate host status page\n");
1468 		return -ENOMEM;
1469 	}
1470 	memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1471 	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1472 
1473 	/* initialize framer */
1474 
1475 #ifdef CONFIG_ATM_HE_USE_SUNI
1476 	if (he_isMM(he_dev))
1477 		suni_init(he_dev->atm_dev);
1478 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1479 		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1480 #endif /* CONFIG_ATM_HE_USE_SUNI */
1481 
1482 	if (sdh) {
1483 		/* this really should be in suni.c but for now... */
1484 		int val;
1485 
1486 		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1487 		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1488 		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1489 		he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1490 	}
1491 
1492 	/* 5.1.12 enable transmit and receive */
1493 
1494 	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1495 	reg |= TX_ENABLE|ER_ENABLE;
1496 	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1497 
1498 	reg = he_readl(he_dev, RC_CONFIG);
1499 	reg |= RX_ENABLE;
1500 	he_writel(he_dev, reg, RC_CONFIG);
1501 
1502 	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1503 		he_dev->cs_stper[i].inuse = 0;
1504 		he_dev->cs_stper[i].pcr = -1;
1505 	}
1506 	he_dev->total_bw = 0;
1507 
1508 
1509 	/* atm linux initialization */
1510 
1511 	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1512 	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1513 
1514 	he_dev->irq_peak = 0;
1515 	he_dev->rbrq_peak = 0;
1516 	he_dev->rbpl_peak = 0;
1517 	he_dev->tbrq_peak = 0;
1518 
1519 	HPRINTK("hell bent for leather!\n");
1520 
1521 	return 0;
1522 }
1523 
1524 static void
1525 he_stop(struct he_dev *he_dev)
1526 {
1527 	struct he_buff *heb, *next;
1528 	struct pci_dev *pci_dev;
1529 	u32 gen_cntl_0, reg;
1530 	u16 command;
1531 
1532 	pci_dev = he_dev->pci_dev;
1533 
1534 	/* disable interrupts */
1535 
1536 	if (he_dev->membase) {
1537 		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1538 		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1539 		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1540 
1541 		tasklet_disable(&he_dev->tasklet);
1542 
1543 		/* disable recv and transmit */
1544 
1545 		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1546 		reg &= ~(TX_ENABLE|ER_ENABLE);
1547 		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1548 
1549 		reg = he_readl(he_dev, RC_CONFIG);
1550 		reg &= ~(RX_ENABLE);
1551 		he_writel(he_dev, reg, RC_CONFIG);
1552 	}
1553 
1554 #ifdef CONFIG_ATM_HE_USE_SUNI
1555 	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1556 		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1557 #endif /* CONFIG_ATM_HE_USE_SUNI */
1558 
1559 	if (he_dev->irq)
1560 		free_irq(he_dev->irq, he_dev);
1561 
1562 	if (he_dev->irq_base)
1563 		pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1564 			* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1565 
1566 	if (he_dev->hsp)
1567 		pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1568 						he_dev->hsp, he_dev->hsp_phys);
1569 
1570 	if (he_dev->rbpl_base) {
1571 		list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1572 			pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1573 
1574 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1575 			* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1576 	}
1577 
1578 	kfree(he_dev->rbpl_virt);
1579 	kfree(he_dev->rbpl_table);
1580 
1581 	if (he_dev->rbpl_pool)
1582 		pci_pool_destroy(he_dev->rbpl_pool);
1583 
1584 	if (he_dev->rbrq_base)
1585 		pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1586 							he_dev->rbrq_base, he_dev->rbrq_phys);
1587 
1588 	if (he_dev->tbrq_base)
1589 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1590 							he_dev->tbrq_base, he_dev->tbrq_phys);
1591 
1592 	if (he_dev->tpdrq_base)
1593 		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1594 							he_dev->tpdrq_base, he_dev->tpdrq_phys);
1595 
1596 	if (he_dev->tpd_pool)
1597 		pci_pool_destroy(he_dev->tpd_pool);
1598 
1599 	if (he_dev->pci_dev) {
1600 		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1601 		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1602 		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1603 	}
1604 
1605 	if (he_dev->membase)
1606 		iounmap(he_dev->membase);
1607 }
1608 
1609 static struct he_tpd *
1610 __alloc_tpd(struct he_dev *he_dev)
1611 {
1612 	struct he_tpd *tpd;
1613 	dma_addr_t mapping;
1614 
1615 	tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1616 	if (tpd == NULL)
1617 		return NULL;
1618 
1619 	tpd->status = TPD_ADDR(mapping);
1620 	tpd->reserved = 0;
1621 	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1622 	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1623 	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1624 
1625 	return tpd;
1626 }
1627 
1628 #define AAL5_LEN(buf,len) 						\
1629 			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1630 				(((unsigned char *)(buf))[(len)-5]))
1631 
1632 /* 2.10.1.2 receive
1633  *
1634  * aal5 packets can optionally return the tcp checksum in the lower
1635  * 16 bits of the crc (RSR0_TCP_CKSUM)
1636  */
1637 
1638 #define TCP_CKSUM(buf,len) 						\
1639 			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1640 				(((unsigned char *)(buf))[(len-1)]))
1641 
1642 static int
1643 he_service_rbrq(struct he_dev *he_dev, int group)
1644 {
1645 	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1646 				((unsigned long)he_dev->rbrq_base |
1647 					he_dev->hsp->group[group].rbrq_tail);
1648 	unsigned cid, lastcid = -1;
1649 	struct sk_buff *skb;
1650 	struct atm_vcc *vcc = NULL;
1651 	struct he_vcc *he_vcc;
1652 	struct he_buff *heb, *next;
1653 	int i;
1654 	int pdus_assembled = 0;
1655 	int updated = 0;
1656 
1657 	read_lock(&vcc_sklist_lock);
1658 	while (he_dev->rbrq_head != rbrq_tail) {
1659 		++updated;
1660 
1661 		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1662 			he_dev->rbrq_head, group,
1663 			RBRQ_ADDR(he_dev->rbrq_head),
1664 			RBRQ_BUFLEN(he_dev->rbrq_head),
1665 			RBRQ_CID(he_dev->rbrq_head),
1666 			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1667 			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1668 			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1669 			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1670 			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1671 			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1672 
1673 		i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1674 		heb = he_dev->rbpl_virt[i];
1675 
1676 		cid = RBRQ_CID(he_dev->rbrq_head);
1677 		if (cid != lastcid)
1678 			vcc = __find_vcc(he_dev, cid);
1679 		lastcid = cid;
1680 
1681 		if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1682 			hprintk("vcc/he_vcc == NULL  (cid 0x%x)\n", cid);
1683 			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1684 				clear_bit(i, he_dev->rbpl_table);
1685 				list_del(&heb->entry);
1686 				pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1687 			}
1688 
1689 			goto next_rbrq_entry;
1690 		}
1691 
1692 		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1693 			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1694 				atomic_inc(&vcc->stats->rx_drop);
1695 			goto return_host_buffers;
1696 		}
1697 
1698 		heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1699 		clear_bit(i, he_dev->rbpl_table);
1700 		list_move_tail(&heb->entry, &he_vcc->buffers);
1701 		he_vcc->pdu_len += heb->len;
1702 
1703 		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1704 			lastcid = -1;
1705 			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1706 			wake_up(&he_vcc->rx_waitq);
1707 			goto return_host_buffers;
1708 		}
1709 
1710 		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1711 			goto next_rbrq_entry;
1712 
1713 		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1714 				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1715 			HPRINTK("%s%s (%d.%d)\n",
1716 				RBRQ_CRC_ERR(he_dev->rbrq_head)
1717 							? "CRC_ERR " : "",
1718 				RBRQ_LEN_ERR(he_dev->rbrq_head)
1719 							? "LEN_ERR" : "",
1720 							vcc->vpi, vcc->vci);
1721 			atomic_inc(&vcc->stats->rx_err);
1722 			goto return_host_buffers;
1723 		}
1724 
1725 		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1726 							GFP_ATOMIC);
1727 		if (!skb) {
1728 			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1729 			goto return_host_buffers;
1730 		}
1731 
1732 		if (rx_skb_reserve > 0)
1733 			skb_reserve(skb, rx_skb_reserve);
1734 
1735 		__net_timestamp(skb);
1736 
1737 		list_for_each_entry(heb, &he_vcc->buffers, entry)
1738 			memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1739 
1740 		switch (vcc->qos.aal) {
1741 			case ATM_AAL0:
1742 				/* 2.10.1.5 raw cell receive */
1743 				skb->len = ATM_AAL0_SDU;
1744 				skb_set_tail_pointer(skb, skb->len);
1745 				break;
1746 			case ATM_AAL5:
1747 				/* 2.10.1.2 aal5 receive */
1748 
1749 				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1750 				skb_set_tail_pointer(skb, skb->len);
1751 #ifdef USE_CHECKSUM_HW
1752 				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1753 					skb->ip_summed = CHECKSUM_COMPLETE;
1754 					skb->csum = TCP_CKSUM(skb->data,
1755 							he_vcc->pdu_len);
1756 				}
1757 #endif
1758 				break;
1759 		}
1760 
1761 #ifdef should_never_happen
1762 		if (skb->len > vcc->qos.rxtp.max_sdu)
1763 			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1764 #endif
1765 
1766 #ifdef notdef
1767 		ATM_SKB(skb)->vcc = vcc;
1768 #endif
1769 		spin_unlock(&he_dev->global_lock);
1770 		vcc->push(vcc, skb);
1771 		spin_lock(&he_dev->global_lock);
1772 
1773 		atomic_inc(&vcc->stats->rx);
1774 
1775 return_host_buffers:
1776 		++pdus_assembled;
1777 
1778 		list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1779 			pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1780 		INIT_LIST_HEAD(&he_vcc->buffers);
1781 		he_vcc->pdu_len = 0;
1782 
1783 next_rbrq_entry:
1784 		he_dev->rbrq_head = (struct he_rbrq *)
1785 				((unsigned long) he_dev->rbrq_base |
1786 					RBRQ_MASK(he_dev->rbrq_head + 1));
1787 
1788 	}
1789 	read_unlock(&vcc_sklist_lock);
1790 
1791 	if (updated) {
1792 		if (updated > he_dev->rbrq_peak)
1793 			he_dev->rbrq_peak = updated;
1794 
1795 		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1796 						G0_RBRQ_H + (group * 16));
1797 	}
1798 
1799 	return pdus_assembled;
1800 }
1801 
1802 static void
1803 he_service_tbrq(struct he_dev *he_dev, int group)
1804 {
1805 	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1806 				((unsigned long)he_dev->tbrq_base |
1807 					he_dev->hsp->group[group].tbrq_tail);
1808 	struct he_tpd *tpd;
1809 	int slot, updated = 0;
1810 	struct he_tpd *__tpd;
1811 
1812 	/* 2.1.6 transmit buffer return queue */
1813 
1814 	while (he_dev->tbrq_head != tbrq_tail) {
1815 		++updated;
1816 
1817 		HPRINTK("tbrq%d 0x%x%s%s\n",
1818 			group,
1819 			TBRQ_TPD(he_dev->tbrq_head),
1820 			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1821 			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1822 		tpd = NULL;
1823 		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1824 			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1825 				tpd = __tpd;
1826 				list_del(&__tpd->entry);
1827 				break;
1828 			}
1829 		}
1830 
1831 		if (tpd == NULL) {
1832 			hprintk("unable to locate tpd for dma buffer %x\n",
1833 						TBRQ_TPD(he_dev->tbrq_head));
1834 			goto next_tbrq_entry;
1835 		}
1836 
1837 		if (TBRQ_EOS(he_dev->tbrq_head)) {
1838 			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1839 				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1840 			if (tpd->vcc)
1841 				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1842 
1843 			goto next_tbrq_entry;
1844 		}
1845 
1846 		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1847 			if (tpd->iovec[slot].addr)
1848 				pci_unmap_single(he_dev->pci_dev,
1849 					tpd->iovec[slot].addr,
1850 					tpd->iovec[slot].len & TPD_LEN_MASK,
1851 							PCI_DMA_TODEVICE);
1852 			if (tpd->iovec[slot].len & TPD_LST)
1853 				break;
1854 
1855 		}
1856 
1857 		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1858 			if (tpd->vcc && tpd->vcc->pop)
1859 				tpd->vcc->pop(tpd->vcc, tpd->skb);
1860 			else
1861 				dev_kfree_skb_any(tpd->skb);
1862 		}
1863 
1864 next_tbrq_entry:
1865 		if (tpd)
1866 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1867 		he_dev->tbrq_head = (struct he_tbrq *)
1868 				((unsigned long) he_dev->tbrq_base |
1869 					TBRQ_MASK(he_dev->tbrq_head + 1));
1870 	}
1871 
1872 	if (updated) {
1873 		if (updated > he_dev->tbrq_peak)
1874 			he_dev->tbrq_peak = updated;
1875 
1876 		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1877 						G0_TBRQ_H + (group * 16));
1878 	}
1879 }
1880 
1881 static void
1882 he_service_rbpl(struct he_dev *he_dev, int group)
1883 {
1884 	struct he_rbp *new_tail;
1885 	struct he_rbp *rbpl_head;
1886 	struct he_buff *heb;
1887 	dma_addr_t mapping;
1888 	int i;
1889 	int moved = 0;
1890 
1891 	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1892 					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1893 
1894 	for (;;) {
1895 		new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1896 						RBPL_MASK(he_dev->rbpl_tail+1));
1897 
1898 		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1899 		if (new_tail == rbpl_head)
1900 			break;
1901 
1902 		i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1903 		if (i > (RBPL_TABLE_SIZE - 1)) {
1904 			i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1905 			if (i > (RBPL_TABLE_SIZE - 1))
1906 				break;
1907 		}
1908 		he_dev->rbpl_hint = i + 1;
1909 
1910 		heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1911 		if (!heb)
1912 			break;
1913 		heb->mapping = mapping;
1914 		list_add(&heb->entry, &he_dev->rbpl_outstanding);
1915 		he_dev->rbpl_virt[i] = heb;
1916 		set_bit(i, he_dev->rbpl_table);
1917 		new_tail->idx = i << RBP_IDX_OFFSET;
1918 		new_tail->phys = mapping + offsetof(struct he_buff, data);
1919 
1920 		he_dev->rbpl_tail = new_tail;
1921 		++moved;
1922 	}
1923 
1924 	if (moved)
1925 		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1926 }
1927 
1928 static void
1929 he_tasklet(unsigned long data)
1930 {
1931 	unsigned long flags;
1932 	struct he_dev *he_dev = (struct he_dev *) data;
1933 	int group, type;
1934 	int updated = 0;
1935 
1936 	HPRINTK("tasklet (0x%lx)\n", data);
1937 	spin_lock_irqsave(&he_dev->global_lock, flags);
1938 
1939 	while (he_dev->irq_head != he_dev->irq_tail) {
1940 		++updated;
1941 
1942 		type = ITYPE_TYPE(he_dev->irq_head->isw);
1943 		group = ITYPE_GROUP(he_dev->irq_head->isw);
1944 
1945 		switch (type) {
1946 			case ITYPE_RBRQ_THRESH:
1947 				HPRINTK("rbrq%d threshold\n", group);
1948 				/* fall through */
1949 			case ITYPE_RBRQ_TIMER:
1950 				if (he_service_rbrq(he_dev, group))
1951 					he_service_rbpl(he_dev, group);
1952 				break;
1953 			case ITYPE_TBRQ_THRESH:
1954 				HPRINTK("tbrq%d threshold\n", group);
1955 				/* fall through */
1956 			case ITYPE_TPD_COMPLETE:
1957 				he_service_tbrq(he_dev, group);
1958 				break;
1959 			case ITYPE_RBPL_THRESH:
1960 				he_service_rbpl(he_dev, group);
1961 				break;
1962 			case ITYPE_RBPS_THRESH:
1963 				/* shouldn't happen unless small buffers enabled */
1964 				break;
1965 			case ITYPE_PHY:
1966 				HPRINTK("phy interrupt\n");
1967 #ifdef CONFIG_ATM_HE_USE_SUNI
1968 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
1969 				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1970 					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1971 				spin_lock_irqsave(&he_dev->global_lock, flags);
1972 #endif
1973 				break;
1974 			case ITYPE_OTHER:
1975 				switch (type|group) {
1976 					case ITYPE_PARITY:
1977 						hprintk("parity error\n");
1978 						break;
1979 					case ITYPE_ABORT:
1980 						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1981 						break;
1982 				}
1983 				break;
1984 			case ITYPE_TYPE(ITYPE_INVALID):
1985 				/* see 8.1.1 -- check all queues */
1986 
1987 				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1988 
1989 				he_service_rbrq(he_dev, 0);
1990 				he_service_rbpl(he_dev, 0);
1991 				he_service_tbrq(he_dev, 0);
1992 				break;
1993 			default:
1994 				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
1995 		}
1996 
1997 		he_dev->irq_head->isw = ITYPE_INVALID;
1998 
1999 		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2000 	}
2001 
2002 	if (updated) {
2003 		if (updated > he_dev->irq_peak)
2004 			he_dev->irq_peak = updated;
2005 
2006 		he_writel(he_dev,
2007 			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2008 			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2009 			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2010 		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2011 	}
2012 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2013 }
2014 
2015 static irqreturn_t
2016 he_irq_handler(int irq, void *dev_id)
2017 {
2018 	unsigned long flags;
2019 	struct he_dev *he_dev = (struct he_dev * )dev_id;
2020 	int handled = 0;
2021 
2022 	if (he_dev == NULL)
2023 		return IRQ_NONE;
2024 
2025 	spin_lock_irqsave(&he_dev->global_lock, flags);
2026 
2027 	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2028 						(*he_dev->irq_tailoffset << 2));
2029 
2030 	if (he_dev->irq_tail == he_dev->irq_head) {
2031 		HPRINTK("tailoffset not updated?\n");
2032 		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2033 			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2034 		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2035 	}
2036 
2037 #ifdef DEBUG
2038 	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2039 		hprintk("spurious (or shared) interrupt?\n");
2040 #endif
2041 
2042 	if (he_dev->irq_head != he_dev->irq_tail) {
2043 		handled = 1;
2044 		tasklet_schedule(&he_dev->tasklet);
2045 		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2046 		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2047 	}
2048 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2049 	return IRQ_RETVAL(handled);
2050 
2051 }
2052 
2053 static __inline__ void
2054 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2055 {
2056 	struct he_tpdrq *new_tail;
2057 
2058 	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2059 					tpd, cid, he_dev->tpdrq_tail);
2060 
2061 	/* new_tail = he_dev->tpdrq_tail; */
2062 	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2063 					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2064 
2065 	/*
2066 	 * check to see if we are about to set the tail == head
2067 	 * if true, update the head pointer from the adapter
2068 	 * to see if this is really the case (reading the queue
2069 	 * head for every enqueue would be unnecessarily slow)
2070 	 */
2071 
2072 	if (new_tail == he_dev->tpdrq_head) {
2073 		he_dev->tpdrq_head = (struct he_tpdrq *)
2074 			(((unsigned long)he_dev->tpdrq_base) |
2075 				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2076 
2077 		if (new_tail == he_dev->tpdrq_head) {
2078 			int slot;
2079 
2080 			hprintk("tpdrq full (cid 0x%x)\n", cid);
2081 			/*
2082 			 * FIXME
2083 			 * push tpd onto a transmit backlog queue
2084 			 * after service_tbrq, service the backlog
2085 			 * for now, we just drop the pdu
2086 			 */
2087 			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2088 				if (tpd->iovec[slot].addr)
2089 					pci_unmap_single(he_dev->pci_dev,
2090 						tpd->iovec[slot].addr,
2091 						tpd->iovec[slot].len & TPD_LEN_MASK,
2092 								PCI_DMA_TODEVICE);
2093 			}
2094 			if (tpd->skb) {
2095 				if (tpd->vcc->pop)
2096 					tpd->vcc->pop(tpd->vcc, tpd->skb);
2097 				else
2098 					dev_kfree_skb_any(tpd->skb);
2099 				atomic_inc(&tpd->vcc->stats->tx_err);
2100 			}
2101 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2102 			return;
2103 		}
2104 	}
2105 
2106 	/* 2.1.5 transmit packet descriptor ready queue */
2107 	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2108 	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2109 	he_dev->tpdrq_tail->cid = cid;
2110 	wmb();
2111 
2112 	he_dev->tpdrq_tail = new_tail;
2113 
2114 	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2115 	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2116 }
2117 
2118 static int
2119 he_open(struct atm_vcc *vcc)
2120 {
2121 	unsigned long flags;
2122 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2123 	struct he_vcc *he_vcc;
2124 	int err = 0;
2125 	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2126 	short vpi = vcc->vpi;
2127 	int vci = vcc->vci;
2128 
2129 	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2130 		return 0;
2131 
2132 	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2133 
2134 	set_bit(ATM_VF_ADDR, &vcc->flags);
2135 
2136 	cid = he_mkcid(he_dev, vpi, vci);
2137 
2138 	he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2139 	if (he_vcc == NULL) {
2140 		hprintk("unable to allocate he_vcc during open\n");
2141 		return -ENOMEM;
2142 	}
2143 
2144 	INIT_LIST_HEAD(&he_vcc->buffers);
2145 	he_vcc->pdu_len = 0;
2146 	he_vcc->rc_index = -1;
2147 
2148 	init_waitqueue_head(&he_vcc->rx_waitq);
2149 	init_waitqueue_head(&he_vcc->tx_waitq);
2150 
2151 	vcc->dev_data = he_vcc;
2152 
2153 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2154 		int pcr_goal;
2155 
2156 		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2157 		if (pcr_goal == 0)
2158 			pcr_goal = he_dev->atm_dev->link_rate;
2159 		if (pcr_goal < 0)	/* means round down, technically */
2160 			pcr_goal = -pcr_goal;
2161 
2162 		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2163 
2164 		switch (vcc->qos.aal) {
2165 			case ATM_AAL5:
2166 				tsr0_aal = TSR0_AAL5;
2167 				tsr4 = TSR4_AAL5;
2168 				break;
2169 			case ATM_AAL0:
2170 				tsr0_aal = TSR0_AAL0_SDU;
2171 				tsr4 = TSR4_AAL0_SDU;
2172 				break;
2173 			default:
2174 				err = -EINVAL;
2175 				goto open_failed;
2176 		}
2177 
2178 		spin_lock_irqsave(&he_dev->global_lock, flags);
2179 		tsr0 = he_readl_tsr0(he_dev, cid);
2180 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2181 
2182 		if (TSR0_CONN_STATE(tsr0) != 0) {
2183 			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2184 			err = -EBUSY;
2185 			goto open_failed;
2186 		}
2187 
2188 		switch (vcc->qos.txtp.traffic_class) {
2189 			case ATM_UBR:
2190 				/* 2.3.3.1 open connection ubr */
2191 
2192 				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2193 					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2194 				break;
2195 
2196 			case ATM_CBR:
2197 				/* 2.3.3.2 open connection cbr */
2198 
2199 				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2200 				if ((he_dev->total_bw + pcr_goal)
2201 					> (he_dev->atm_dev->link_rate * 9 / 10))
2202 				{
2203 					err = -EBUSY;
2204 					goto open_failed;
2205 				}
2206 
2207 				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2208 
2209 				/* find an unused cs_stper register */
2210 				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2211 					if (he_dev->cs_stper[reg].inuse == 0 ||
2212 					    he_dev->cs_stper[reg].pcr == pcr_goal)
2213 							break;
2214 
2215 				if (reg == HE_NUM_CS_STPER) {
2216 					err = -EBUSY;
2217 					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2218 					goto open_failed;
2219 				}
2220 
2221 				he_dev->total_bw += pcr_goal;
2222 
2223 				he_vcc->rc_index = reg;
2224 				++he_dev->cs_stper[reg].inuse;
2225 				he_dev->cs_stper[reg].pcr = pcr_goal;
2226 
2227 				clock = he_is622(he_dev) ? 66667000 : 50000000;
2228 				period = clock / pcr_goal;
2229 
2230 				HPRINTK("rc_index = %d period = %d\n",
2231 								reg, period);
2232 
2233 				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2234 							CS_STPER0 + reg);
2235 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2236 
2237 				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2238 							TSR0_RC_INDEX(reg);
2239 
2240 				break;
2241 			default:
2242 				err = -EINVAL;
2243 				goto open_failed;
2244 		}
2245 
2246 		spin_lock_irqsave(&he_dev->global_lock, flags);
2247 
2248 		he_writel_tsr0(he_dev, tsr0, cid);
2249 		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2250 		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2251 					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2252 		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2253 		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2254 
2255 		he_writel_tsr3(he_dev, 0x0, cid);
2256 		he_writel_tsr5(he_dev, 0x0, cid);
2257 		he_writel_tsr6(he_dev, 0x0, cid);
2258 		he_writel_tsr7(he_dev, 0x0, cid);
2259 		he_writel_tsr8(he_dev, 0x0, cid);
2260 		he_writel_tsr10(he_dev, 0x0, cid);
2261 		he_writel_tsr11(he_dev, 0x0, cid);
2262 		he_writel_tsr12(he_dev, 0x0, cid);
2263 		he_writel_tsr13(he_dev, 0x0, cid);
2264 		he_writel_tsr14(he_dev, 0x0, cid);
2265 		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2266 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2267 	}
2268 
2269 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2270 		unsigned aal;
2271 
2272 		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2273 		 				&HE_VCC(vcc)->rx_waitq);
2274 
2275 		switch (vcc->qos.aal) {
2276 			case ATM_AAL5:
2277 				aal = RSR0_AAL5;
2278 				break;
2279 			case ATM_AAL0:
2280 				aal = RSR0_RAWCELL;
2281 				break;
2282 			default:
2283 				err = -EINVAL;
2284 				goto open_failed;
2285 		}
2286 
2287 		spin_lock_irqsave(&he_dev->global_lock, flags);
2288 
2289 		rsr0 = he_readl_rsr0(he_dev, cid);
2290 		if (rsr0 & RSR0_OPEN_CONN) {
2291 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2292 
2293 			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2294 			err = -EBUSY;
2295 			goto open_failed;
2296 		}
2297 
2298 		rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2299 		rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2300 		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2301 				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2302 
2303 #ifdef USE_CHECKSUM_HW
2304 		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2305 			rsr0 |= RSR0_TCP_CKSUM;
2306 #endif
2307 
2308 		he_writel_rsr4(he_dev, rsr4, cid);
2309 		he_writel_rsr1(he_dev, rsr1, cid);
2310 		/* 5.1.11 last parameter initialized should be
2311 			  the open/closed indication in rsr0 */
2312 		he_writel_rsr0(he_dev,
2313 			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2314 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2315 
2316 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2317 	}
2318 
2319 open_failed:
2320 
2321 	if (err) {
2322 		kfree(he_vcc);
2323 		clear_bit(ATM_VF_ADDR, &vcc->flags);
2324 	}
2325 	else
2326 		set_bit(ATM_VF_READY, &vcc->flags);
2327 
2328 	return err;
2329 }
2330 
2331 static void
2332 he_close(struct atm_vcc *vcc)
2333 {
2334 	unsigned long flags;
2335 	DECLARE_WAITQUEUE(wait, current);
2336 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2337 	struct he_tpd *tpd;
2338 	unsigned cid;
2339 	struct he_vcc *he_vcc = HE_VCC(vcc);
2340 #define MAX_RETRY 30
2341 	int retry = 0, sleep = 1, tx_inuse;
2342 
2343 	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2344 
2345 	clear_bit(ATM_VF_READY, &vcc->flags);
2346 	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2347 
2348 	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2349 		int timeout;
2350 
2351 		HPRINTK("close rx cid 0x%x\n", cid);
2352 
2353 		/* 2.7.2.2 close receive operation */
2354 
2355 		/* wait for previous close (if any) to finish */
2356 
2357 		spin_lock_irqsave(&he_dev->global_lock, flags);
2358 		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2359 			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2360 			udelay(250);
2361 		}
2362 
2363 		set_current_state(TASK_UNINTERRUPTIBLE);
2364 		add_wait_queue(&he_vcc->rx_waitq, &wait);
2365 
2366 		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2367 		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2368 		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2369 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2370 
2371 		timeout = schedule_timeout(30*HZ);
2372 
2373 		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2374 		set_current_state(TASK_RUNNING);
2375 
2376 		if (timeout == 0)
2377 			hprintk("close rx timeout cid 0x%x\n", cid);
2378 
2379 		HPRINTK("close rx cid 0x%x complete\n", cid);
2380 
2381 	}
2382 
2383 	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2384 		volatile unsigned tsr4, tsr0;
2385 		int timeout;
2386 
2387 		HPRINTK("close tx cid 0x%x\n", cid);
2388 
2389 		/* 2.1.2
2390 		 *
2391 		 * ... the host must first stop queueing packets to the TPDRQ
2392 		 * on the connection to be closed, then wait for all outstanding
2393 		 * packets to be transmitted and their buffers returned to the
2394 		 * TBRQ. When the last packet on the connection arrives in the
2395 		 * TBRQ, the host issues the close command to the adapter.
2396 		 */
2397 
2398 		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2399 		       (retry < MAX_RETRY)) {
2400 			msleep(sleep);
2401 			if (sleep < 250)
2402 				sleep = sleep * 2;
2403 
2404 			++retry;
2405 		}
2406 
2407 		if (tx_inuse > 1)
2408 			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2409 
2410 		/* 2.3.1.1 generic close operations with flush */
2411 
2412 		spin_lock_irqsave(&he_dev->global_lock, flags);
2413 		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2414 					/* also clears TSR4_SESSION_ENDED */
2415 
2416 		switch (vcc->qos.txtp.traffic_class) {
2417 			case ATM_UBR:
2418 				he_writel_tsr1(he_dev,
2419 					TSR1_MCR(rate_to_atmf(200000))
2420 					| TSR1_PCR(0), cid);
2421 				break;
2422 			case ATM_CBR:
2423 				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2424 				break;
2425 		}
2426 		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2427 
2428 		tpd = __alloc_tpd(he_dev);
2429 		if (tpd == NULL) {
2430 			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2431 			goto close_tx_incomplete;
2432 		}
2433 		tpd->status |= TPD_EOS | TPD_INT;
2434 		tpd->skb = NULL;
2435 		tpd->vcc = vcc;
2436 		wmb();
2437 
2438 		set_current_state(TASK_UNINTERRUPTIBLE);
2439 		add_wait_queue(&he_vcc->tx_waitq, &wait);
2440 		__enqueue_tpd(he_dev, tpd, cid);
2441 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2442 
2443 		timeout = schedule_timeout(30*HZ);
2444 
2445 		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2446 		set_current_state(TASK_RUNNING);
2447 
2448 		spin_lock_irqsave(&he_dev->global_lock, flags);
2449 
2450 		if (timeout == 0) {
2451 			hprintk("close tx timeout cid 0x%x\n", cid);
2452 			goto close_tx_incomplete;
2453 		}
2454 
2455 		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2456 			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2457 			udelay(250);
2458 		}
2459 
2460 		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2461 			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2462 			udelay(250);
2463 		}
2464 
2465 close_tx_incomplete:
2466 
2467 		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2468 			int reg = he_vcc->rc_index;
2469 
2470 			HPRINTK("cs_stper reg = %d\n", reg);
2471 
2472 			if (he_dev->cs_stper[reg].inuse == 0)
2473 				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2474 			else
2475 				--he_dev->cs_stper[reg].inuse;
2476 
2477 			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2478 		}
2479 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2480 
2481 		HPRINTK("close tx cid 0x%x complete\n", cid);
2482 	}
2483 
2484 	kfree(he_vcc);
2485 
2486 	clear_bit(ATM_VF_ADDR, &vcc->flags);
2487 }
2488 
2489 static int
2490 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2491 {
2492 	unsigned long flags;
2493 	struct he_dev *he_dev = HE_DEV(vcc->dev);
2494 	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2495 	struct he_tpd *tpd;
2496 #ifdef USE_SCATTERGATHER
2497 	int i, slot = 0;
2498 #endif
2499 
2500 #define HE_TPD_BUFSIZE 0xffff
2501 
2502 	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2503 
2504 	if ((skb->len > HE_TPD_BUFSIZE) ||
2505 	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2506 		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2507 		if (vcc->pop)
2508 			vcc->pop(vcc, skb);
2509 		else
2510 			dev_kfree_skb_any(skb);
2511 		atomic_inc(&vcc->stats->tx_err);
2512 		return -EINVAL;
2513 	}
2514 
2515 #ifndef USE_SCATTERGATHER
2516 	if (skb_shinfo(skb)->nr_frags) {
2517 		hprintk("no scatter/gather support\n");
2518 		if (vcc->pop)
2519 			vcc->pop(vcc, skb);
2520 		else
2521 			dev_kfree_skb_any(skb);
2522 		atomic_inc(&vcc->stats->tx_err);
2523 		return -EINVAL;
2524 	}
2525 #endif
2526 	spin_lock_irqsave(&he_dev->global_lock, flags);
2527 
2528 	tpd = __alloc_tpd(he_dev);
2529 	if (tpd == NULL) {
2530 		if (vcc->pop)
2531 			vcc->pop(vcc, skb);
2532 		else
2533 			dev_kfree_skb_any(skb);
2534 		atomic_inc(&vcc->stats->tx_err);
2535 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2536 		return -ENOMEM;
2537 	}
2538 
2539 	if (vcc->qos.aal == ATM_AAL5)
2540 		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2541 	else {
2542 		char *pti_clp = (void *) (skb->data + 3);
2543 		int clp, pti;
2544 
2545 		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2546 		clp = (*pti_clp & ATM_HDR_CLP);
2547 		tpd->status |= TPD_CELLTYPE(pti);
2548 		if (clp)
2549 			tpd->status |= TPD_CLP;
2550 
2551 		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2552 	}
2553 
2554 #ifdef USE_SCATTERGATHER
2555 	tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2556 				skb_headlen(skb), PCI_DMA_TODEVICE);
2557 	tpd->iovec[slot].len = skb_headlen(skb);
2558 	++slot;
2559 
2560 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2561 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2562 
2563 		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2564 			tpd->vcc = vcc;
2565 			tpd->skb = NULL;	/* not the last fragment
2566 						   so dont ->push() yet */
2567 			wmb();
2568 
2569 			__enqueue_tpd(he_dev, tpd, cid);
2570 			tpd = __alloc_tpd(he_dev);
2571 			if (tpd == NULL) {
2572 				if (vcc->pop)
2573 					vcc->pop(vcc, skb);
2574 				else
2575 					dev_kfree_skb_any(skb);
2576 				atomic_inc(&vcc->stats->tx_err);
2577 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2578 				return -ENOMEM;
2579 			}
2580 			tpd->status |= TPD_USERCELL;
2581 			slot = 0;
2582 		}
2583 
2584 		tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2585 			(void *) page_address(frag->page) + frag->page_offset,
2586 				frag->size, PCI_DMA_TODEVICE);
2587 		tpd->iovec[slot].len = frag->size;
2588 		++slot;
2589 
2590 	}
2591 
2592 	tpd->iovec[slot - 1].len |= TPD_LST;
2593 #else
2594 	tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2595 	tpd->length0 = skb->len | TPD_LST;
2596 #endif
2597 	tpd->status |= TPD_INT;
2598 
2599 	tpd->vcc = vcc;
2600 	tpd->skb = skb;
2601 	wmb();
2602 	ATM_SKB(skb)->vcc = vcc;
2603 
2604 	__enqueue_tpd(he_dev, tpd, cid);
2605 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2606 
2607 	atomic_inc(&vcc->stats->tx);
2608 
2609 	return 0;
2610 }
2611 
2612 static int
2613 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2614 {
2615 	unsigned long flags;
2616 	struct he_dev *he_dev = HE_DEV(atm_dev);
2617 	struct he_ioctl_reg reg;
2618 	int err = 0;
2619 
2620 	switch (cmd) {
2621 		case HE_GET_REG:
2622 			if (!capable(CAP_NET_ADMIN))
2623 				return -EPERM;
2624 
2625 			if (copy_from_user(&reg, arg,
2626 					   sizeof(struct he_ioctl_reg)))
2627 				return -EFAULT;
2628 
2629 			spin_lock_irqsave(&he_dev->global_lock, flags);
2630 			switch (reg.type) {
2631 				case HE_REGTYPE_PCI:
2632 					if (reg.addr >= HE_REGMAP_SIZE) {
2633 						err = -EINVAL;
2634 						break;
2635 					}
2636 
2637 					reg.val = he_readl(he_dev, reg.addr);
2638 					break;
2639 				case HE_REGTYPE_RCM:
2640 					reg.val =
2641 						he_readl_rcm(he_dev, reg.addr);
2642 					break;
2643 				case HE_REGTYPE_TCM:
2644 					reg.val =
2645 						he_readl_tcm(he_dev, reg.addr);
2646 					break;
2647 				case HE_REGTYPE_MBOX:
2648 					reg.val =
2649 						he_readl_mbox(he_dev, reg.addr);
2650 					break;
2651 				default:
2652 					err = -EINVAL;
2653 					break;
2654 			}
2655 			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2656 			if (err == 0)
2657 				if (copy_to_user(arg, &reg,
2658 							sizeof(struct he_ioctl_reg)))
2659 					return -EFAULT;
2660 			break;
2661 		default:
2662 #ifdef CONFIG_ATM_HE_USE_SUNI
2663 			if (atm_dev->phy && atm_dev->phy->ioctl)
2664 				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2665 #else /* CONFIG_ATM_HE_USE_SUNI */
2666 			err = -EINVAL;
2667 #endif /* CONFIG_ATM_HE_USE_SUNI */
2668 			break;
2669 	}
2670 
2671 	return err;
2672 }
2673 
2674 static void
2675 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2676 {
2677 	unsigned long flags;
2678 	struct he_dev *he_dev = HE_DEV(atm_dev);
2679 
2680 	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2681 
2682 	spin_lock_irqsave(&he_dev->global_lock, flags);
2683 	he_writel(he_dev, val, FRAMER + (addr*4));
2684 	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2685 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2686 }
2687 
2688 
2689 static unsigned char
2690 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2691 {
2692 	unsigned long flags;
2693 	struct he_dev *he_dev = HE_DEV(atm_dev);
2694 	unsigned reg;
2695 
2696 	spin_lock_irqsave(&he_dev->global_lock, flags);
2697 	reg = he_readl(he_dev, FRAMER + (addr*4));
2698 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2699 
2700 	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2701 	return reg;
2702 }
2703 
2704 static int
2705 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2706 {
2707 	unsigned long flags;
2708 	struct he_dev *he_dev = HE_DEV(dev);
2709 	int left, i;
2710 #ifdef notdef
2711 	struct he_rbrq *rbrq_tail;
2712 	struct he_tpdrq *tpdrq_head;
2713 	int rbpl_head, rbpl_tail;
2714 #endif
2715 	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2716 
2717 
2718 	left = *pos;
2719 	if (!left--)
2720 		return sprintf(page, "ATM he driver\n");
2721 
2722 	if (!left--)
2723 		return sprintf(page, "%s%s\n\n",
2724 			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2725 
2726 	if (!left--)
2727 		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2728 
2729 	spin_lock_irqsave(&he_dev->global_lock, flags);
2730 	mcc += he_readl(he_dev, MCC);
2731 	oec += he_readl(he_dev, OEC);
2732 	dcc += he_readl(he_dev, DCC);
2733 	cec += he_readl(he_dev, CEC);
2734 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2735 
2736 	if (!left--)
2737 		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n",
2738 							mcc, oec, dcc, cec);
2739 
2740 	if (!left--)
2741 		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2742 				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2743 
2744 	if (!left--)
2745 		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2746 						CONFIG_TPDRQ_SIZE);
2747 
2748 	if (!left--)
2749 		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2750 				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2751 
2752 	if (!left--)
2753 		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2754 					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2755 
2756 
2757 #ifdef notdef
2758 	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2759 	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2760 
2761 	inuse = rbpl_head - rbpl_tail;
2762 	if (inuse < 0)
2763 		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2764 	inuse /= sizeof(struct he_rbp);
2765 
2766 	if (!left--)
2767 		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2768 						CONFIG_RBPL_SIZE, inuse);
2769 #endif
2770 
2771 	if (!left--)
2772 		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2773 
2774 	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2775 		if (!left--)
2776 			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2777 						he_dev->cs_stper[i].pcr,
2778 						he_dev->cs_stper[i].inuse);
2779 
2780 	if (!left--)
2781 		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2782 			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2783 
2784 	return 0;
2785 }
2786 
2787 /* eeprom routines  -- see 4.7 */
2788 
2789 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2790 {
2791 	u32 val = 0, tmp_read = 0;
2792 	int i, j = 0;
2793 	u8 byte_read = 0;
2794 
2795 	val = readl(he_dev->membase + HOST_CNTL);
2796 	val &= 0xFFFFE0FF;
2797 
2798 	/* Turn on write enable */
2799 	val |= 0x800;
2800 	he_writel(he_dev, val, HOST_CNTL);
2801 
2802 	/* Send READ instruction */
2803 	for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2804 		he_writel(he_dev, val | readtab[i], HOST_CNTL);
2805 		udelay(EEPROM_DELAY);
2806 	}
2807 
2808 	/* Next, we need to send the byte address to read from */
2809 	for (i = 7; i >= 0; i--) {
2810 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2811 		udelay(EEPROM_DELAY);
2812 		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2813 		udelay(EEPROM_DELAY);
2814 	}
2815 
2816 	j = 0;
2817 
2818 	val &= 0xFFFFF7FF;      /* Turn off write enable */
2819 	he_writel(he_dev, val, HOST_CNTL);
2820 
2821 	/* Now, we can read data from the EEPROM by clocking it in */
2822 	for (i = 7; i >= 0; i--) {
2823 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2824 		udelay(EEPROM_DELAY);
2825 		tmp_read = he_readl(he_dev, HOST_CNTL);
2826 		byte_read |= (unsigned char)
2827 			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2828 		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2829 		udelay(EEPROM_DELAY);
2830 	}
2831 
2832 	he_writel(he_dev, val | ID_CS, HOST_CNTL);
2833 	udelay(EEPROM_DELAY);
2834 
2835 	return byte_read;
2836 }
2837 
2838 MODULE_LICENSE("GPL");
2839 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2840 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2841 module_param(disable64, bool, 0);
2842 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2843 module_param(nvpibits, short, 0);
2844 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2845 module_param(nvcibits, short, 0);
2846 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2847 module_param(rx_skb_reserve, short, 0);
2848 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2849 module_param(irq_coalesce, bool, 0);
2850 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2851 module_param(sdh, bool, 0);
2852 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2853 
2854 static struct pci_device_id he_pci_tbl[] = {
2855 	{ PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2856 	{ 0, }
2857 };
2858 
2859 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2860 
2861 static struct pci_driver he_driver = {
2862 	.name =		"he",
2863 	.probe =	he_init_one,
2864 	.remove =	he_remove_one,
2865 	.id_table =	he_pci_tbl,
2866 };
2867 
2868 module_pci_driver(he_driver);
2869