1 /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
2    munged into HPPA boxen .
3 
4    This driver is based upon 82596.c, original credits are below...
5    but there were too many hoops which HP wants jumped through to
6    keep this code in there in a sane manner.
7 
8    3 primary sources of the mess --
9    1) hppa needs *lots* of cacheline flushing to keep this kind of
10    MMIO running.
11 
12    2) The 82596 needs to see all of its pointers as their physical
13    address.  Thus virt_to_bus/bus_to_virt are *everywhere*.
14 
15    3) The implementation HP is using seems to be significantly pickier
16    about when and how the command and RX units are started.  some
17    command ordering was changed.
18 
19    Examination of the mach driver leads one to believe that there
20    might be a saner way to pull this off...  anyone who feels like a
21    full rewrite can be my guest.
22 
23    Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
24 
25    02/01/2000  Initial modifications for parisc by Helge Deller (deller@gmx.de)
26    03/02/2000  changes for better/correct(?) cache-flushing (deller)
27 */
28 
29 /* 82596.c: A generic 82596 ethernet driver for linux. */
30 /*
31    Based on Apricot.c
32    Written 1994 by Mark Evans.
33    This driver is for the Apricot 82596 bus-master interface
34 
35    Modularised 12/94 Mark Evans
36 
37 
38    Modified to support the 82596 ethernet chips on 680x0 VME boards.
39    by Richard Hirst <richard@sleepie.demon.co.uk>
40    Renamed to be 82596.c
41 
42    980825:  Changed to receive directly in to sk_buffs which are
43    allocated at open() time.  Eliminates copy on incoming frames
44    (small ones are still copied).  Shared data now held in a
45    non-cached page, so we can run on 68060 in copyback mode.
46 
47    TBD:
48    * look at deferring rx frames rather than discarding (as per tulip)
49    * handle tx ring full as per tulip
50    * performance test to tune rx_copybreak
51 
52    Most of my modifications relate to the braindead big-endian
53    implementation by Intel.  When the i596 is operating in
54    'big-endian' mode, it thinks a 32 bit value of 0x12345678
55    should be stored as 0x56781234.  This is a real pain, when
56    you have linked lists which are shared by the 680x0 and the
57    i596.
58 
59    Driver skeleton
60    Written 1993 by Donald Becker.
61    Copyright 1993 United States Government as represented by the Director,
62    National Security Agency. This software may only be used and distributed
63    according to the terms of the GNU General Public License as modified by SRC,
64    incorporated herein by reference.
65 
66    The author may be reached as becker@scyld.com, or C/O
67    Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
68 
69  */
70 
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/errno.h>
75 #include <linux/ioport.h>
76 #include <linux/interrupt.h>
77 #include <linux/delay.h>
78 #include <linux/netdevice.h>
79 #include <linux/etherdevice.h>
80 #include <linux/skbuff.h>
81 #include <linux/types.h>
82 #include <linux/bitops.h>
83 #include <linux/dma-mapping.h>
84 #include <linux/io.h>
85 #include <linux/irq.h>
86 #include <linux/gfp.h>
87 
88 /* DEBUG flags
89  */
90 
91 #define DEB_INIT	0x0001
92 #define DEB_PROBE	0x0002
93 #define DEB_SERIOUS	0x0004
94 #define DEB_ERRORS	0x0008
95 #define DEB_MULTI	0x0010
96 #define DEB_TDR		0x0020
97 #define DEB_OPEN	0x0040
98 #define DEB_RESET	0x0080
99 #define DEB_ADDCMD	0x0100
100 #define DEB_STATUS	0x0200
101 #define DEB_STARTTX	0x0400
102 #define DEB_RXADDR	0x0800
103 #define DEB_TXADDR	0x1000
104 #define DEB_RXFRAME	0x2000
105 #define DEB_INTS	0x4000
106 #define DEB_STRUCT	0x8000
107 #define DEB_ANY		0xffff
108 
109 
110 #define DEB(x, y)	if (i596_debug & (x)) { y; }
111 
112 
113 /*
114  * The MPU_PORT command allows direct access to the 82596. With PORT access
115  * the following commands are available (p5-18). The 32-bit port command
116  * must be word-swapped with the most significant word written first.
117  * This only applies to VME boards.
118  */
119 #define PORT_RESET		0x00	/* reset 82596 */
120 #define PORT_SELFTEST		0x01	/* selftest */
121 #define PORT_ALTSCP		0x02	/* alternate SCB address */
122 #define PORT_ALTDUMP		0x03	/* Alternate DUMP address */
123 
124 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
125 
126 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
127  * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
128  */
129 static int rx_copybreak = 100;
130 
131 #define PKT_BUF_SZ	1536
132 #define MAX_MC_CNT	64
133 
134 #define ISCP_BUSY	0x0001
135 
136 #define I596_NULL ((u32)0xffffffff)
137 
138 #define CMD_EOL		0x8000	/* The last command of the list, stop. */
139 #define CMD_SUSP	0x4000	/* Suspend after doing cmd. */
140 #define CMD_INTR	0x2000	/* Interrupt after doing cmd. */
141 
142 #define CMD_FLEX	0x0008	/* Enable flexible memory model */
143 
144 enum commands {
145 	CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
146 	CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
147 };
148 
149 #define STAT_C		0x8000	/* Set to 0 after execution */
150 #define STAT_B		0x4000	/* Command being executed */
151 #define STAT_OK		0x2000	/* Command executed ok */
152 #define STAT_A		0x1000	/* Command aborted */
153 
154 #define	 CUC_START	0x0100
155 #define	 CUC_RESUME	0x0200
156 #define	 CUC_SUSPEND    0x0300
157 #define	 CUC_ABORT	0x0400
158 #define	 RX_START	0x0010
159 #define	 RX_RESUME	0x0020
160 #define	 RX_SUSPEND	0x0030
161 #define	 RX_ABORT	0x0040
162 
163 #define TX_TIMEOUT	(HZ/20)
164 
165 
166 struct i596_reg {
167 	unsigned short porthi;
168 	unsigned short portlo;
169 	u32            ca;
170 };
171 
172 #define EOF		0x8000
173 #define SIZE_MASK	0x3fff
174 
175 struct i596_tbd {
176 	unsigned short size;
177 	unsigned short pad;
178 	u32            next;
179 	u32            data;
180 	u32 cache_pad[5];		/* Total 32 bytes... */
181 };
182 
183 /* The command structure has two 'next' pointers; v_next is the address of
184  * the next command as seen by the CPU, b_next is the address of the next
185  * command as seen by the 82596.  The b_next pointer, as used by the 82596
186  * always references the status field of the next command, rather than the
187  * v_next field, because the 82596 is unaware of v_next.  It may seem more
188  * logical to put v_next at the end of the structure, but we cannot do that
189  * because the 82596 expects other fields to be there, depending on command
190  * type.
191  */
192 
193 struct i596_cmd {
194 	struct i596_cmd *v_next;	/* Address from CPUs viewpoint */
195 	unsigned short status;
196 	unsigned short command;
197 	u32            b_next;	/* Address from i596 viewpoint */
198 };
199 
200 struct tx_cmd {
201 	struct i596_cmd cmd;
202 	u32            tbd;
203 	unsigned short size;
204 	unsigned short pad;
205 	struct sk_buff *skb;		/* So we can free it after tx */
206 	dma_addr_t dma_addr;
207 #ifdef __LP64__
208 	u32 cache_pad[6];		/* Total 64 bytes... */
209 #else
210 	u32 cache_pad[1];		/* Total 32 bytes... */
211 #endif
212 };
213 
214 struct tdr_cmd {
215 	struct i596_cmd cmd;
216 	unsigned short status;
217 	unsigned short pad;
218 };
219 
220 struct mc_cmd {
221 	struct i596_cmd cmd;
222 	short mc_cnt;
223 	char mc_addrs[MAX_MC_CNT*6];
224 };
225 
226 struct sa_cmd {
227 	struct i596_cmd cmd;
228 	char eth_addr[8];
229 };
230 
231 struct cf_cmd {
232 	struct i596_cmd cmd;
233 	char i596_config[16];
234 };
235 
236 struct i596_rfd {
237 	unsigned short stat;
238 	unsigned short cmd;
239 	u32            b_next;	/* Address from i596 viewpoint */
240 	u32            rbd;
241 	unsigned short count;
242 	unsigned short size;
243 	struct i596_rfd *v_next;	/* Address from CPUs viewpoint */
244 	struct i596_rfd *v_prev;
245 #ifndef __LP64__
246 	u32 cache_pad[2];		/* Total 32 bytes... */
247 #endif
248 };
249 
250 struct i596_rbd {
251 	/* hardware data */
252 	unsigned short count;
253 	unsigned short zero1;
254 	u32            b_next;
255 	u32            b_data;		/* Address from i596 viewpoint */
256 	unsigned short size;
257 	unsigned short zero2;
258 	/* driver data */
259 	struct sk_buff *skb;
260 	struct i596_rbd *v_next;
261 	u32            b_addr;		/* This rbd addr from i596 view */
262 	unsigned char *v_data;		/* Address from CPUs viewpoint */
263 					/* Total 32 bytes... */
264 #ifdef __LP64__
265     u32 cache_pad[4];
266 #endif
267 };
268 
269 /* These values as chosen so struct i596_dma fits in one page... */
270 
271 #define TX_RING_SIZE 32
272 #define RX_RING_SIZE 16
273 
274 struct i596_scb {
275 	unsigned short status;
276 	unsigned short command;
277 	u32           cmd;
278 	u32           rfd;
279 	u32           crc_err;
280 	u32           align_err;
281 	u32           resource_err;
282 	u32           over_err;
283 	u32           rcvdt_err;
284 	u32           short_err;
285 	unsigned short t_on;
286 	unsigned short t_off;
287 };
288 
289 struct i596_iscp {
290 	u32 stat;
291 	u32 scb;
292 };
293 
294 struct i596_scp {
295 	u32 sysbus;
296 	u32 pad;
297 	u32 iscp;
298 };
299 
300 struct i596_dma {
301 	struct i596_scp scp		        __attribute__((aligned(32)));
302 	volatile struct i596_iscp iscp		__attribute__((aligned(32)));
303 	volatile struct i596_scb scb		__attribute__((aligned(32)));
304 	struct sa_cmd sa_cmd			__attribute__((aligned(32)));
305 	struct cf_cmd cf_cmd			__attribute__((aligned(32)));
306 	struct tdr_cmd tdr_cmd			__attribute__((aligned(32)));
307 	struct mc_cmd mc_cmd			__attribute__((aligned(32)));
308 	struct i596_rfd rfds[RX_RING_SIZE]	__attribute__((aligned(32)));
309 	struct i596_rbd rbds[RX_RING_SIZE]	__attribute__((aligned(32)));
310 	struct tx_cmd tx_cmds[TX_RING_SIZE]	__attribute__((aligned(32)));
311 	struct i596_tbd tbds[TX_RING_SIZE]	__attribute__((aligned(32)));
312 };
313 
314 struct i596_private {
315 	struct i596_dma *dma;
316 	u32    stat;
317 	int last_restart;
318 	struct i596_rfd *rfd_head;
319 	struct i596_rbd *rbd_head;
320 	struct i596_cmd *cmd_tail;
321 	struct i596_cmd *cmd_head;
322 	int cmd_backlog;
323 	u32    last_cmd;
324 	int next_tx_cmd;
325 	int options;
326 	spinlock_t lock;       /* serialize access to chip */
327 	dma_addr_t dma_addr;
328 	void __iomem *mpu_port;
329 	void __iomem *ca;
330 };
331 
332 static const char init_setup[] =
333 {
334 	0x8E,		/* length, prefetch on */
335 	0xC8,		/* fifo to 8, monitor off */
336 	0x80,		/* don't save bad frames */
337 	0x2E,		/* No source address insertion, 8 byte preamble */
338 	0x00,		/* priority and backoff defaults */
339 	0x60,		/* interframe spacing */
340 	0x00,		/* slot time LSB */
341 	0xf2,		/* slot time and retries */
342 	0x00,		/* promiscuous mode */
343 	0x00,		/* collision detect */
344 	0x40,		/* minimum frame length */
345 	0xff,
346 	0x00,
347 	0x7f /*  *multi IA */ };
348 
349 static int i596_open(struct net_device *dev);
350 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
351 static irqreturn_t i596_interrupt(int irq, void *dev_id);
352 static int i596_close(struct net_device *dev);
353 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
354 static void i596_tx_timeout (struct net_device *dev);
355 static void print_eth(unsigned char *buf, char *str);
356 static void set_multicast_list(struct net_device *dev);
357 static inline void ca(struct net_device *dev);
358 static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
359 
360 static int rx_ring_size = RX_RING_SIZE;
361 static int ticks_limit = 100;
362 static int max_cmd_backlog = TX_RING_SIZE-1;
363 
364 #ifdef CONFIG_NET_POLL_CONTROLLER
365 static void i596_poll_controller(struct net_device *dev);
366 #endif
367 
368 
369 static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
370 {
371 	DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
372 	while (--delcnt && dma->iscp.stat) {
373 		udelay(10);
374 		DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
375 	}
376 	if (!delcnt) {
377 		printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
378 		     dev->name, str, SWAP16(dma->iscp.stat));
379 		return -1;
380 	} else
381 		return 0;
382 }
383 
384 
385 static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
386 {
387 	DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
388 	while (--delcnt && dma->scb.command) {
389 		udelay(10);
390 		DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
391 	}
392 	if (!delcnt) {
393 		printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
394 		       dev->name, str,
395 		       SWAP16(dma->scb.status),
396 		       SWAP16(dma->scb.command));
397 		return -1;
398 	} else
399 		return 0;
400 }
401 
402 
403 static void i596_display_data(struct net_device *dev)
404 {
405 	struct i596_private *lp = netdev_priv(dev);
406 	struct i596_dma *dma = lp->dma;
407 	struct i596_cmd *cmd;
408 	struct i596_rfd *rfd;
409 	struct i596_rbd *rbd;
410 
411 	printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
412 	       &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
413 	printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
414 	       &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
415 	printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
416 		" .cmd = %08x, .rfd = %08x\n",
417 	       &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
418 		SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
419 	printk(KERN_DEBUG "   errors: crc %x, align %x, resource %x,"
420 	       " over %x, rcvdt %x, short %x\n",
421 	       SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
422 	       SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
423 	       SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
424 	cmd = lp->cmd_head;
425 	while (cmd != NULL) {
426 		printk(KERN_DEBUG
427 		       "cmd at %p, .status = %04x, .command = %04x,"
428 		       " .b_next = %08x\n",
429 		       cmd, SWAP16(cmd->status), SWAP16(cmd->command),
430 		       SWAP32(cmd->b_next));
431 		cmd = cmd->v_next;
432 	}
433 	rfd = lp->rfd_head;
434 	printk(KERN_DEBUG "rfd_head = %p\n", rfd);
435 	do {
436 		printk(KERN_DEBUG
437 		       "   %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
438 		       " count %04x\n",
439 		       rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
440 		       SWAP32(rfd->b_next), SWAP32(rfd->rbd),
441 		       SWAP16(rfd->count));
442 		rfd = rfd->v_next;
443 	} while (rfd != lp->rfd_head);
444 	rbd = lp->rbd_head;
445 	printk(KERN_DEBUG "rbd_head = %p\n", rbd);
446 	do {
447 		printk(KERN_DEBUG
448 		       "   %p .count %04x, b_next %08x, b_data %08x,"
449 		       " size %04x\n",
450 			rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
451 		       SWAP32(rbd->b_data), SWAP16(rbd->size));
452 		rbd = rbd->v_next;
453 	} while (rbd != lp->rbd_head);
454 	DMA_INV(dev, dma, sizeof(struct i596_dma));
455 }
456 
457 
458 #define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma)))
459 
460 static inline int init_rx_bufs(struct net_device *dev)
461 {
462 	struct i596_private *lp = netdev_priv(dev);
463 	struct i596_dma *dma = lp->dma;
464 	int i;
465 	struct i596_rfd *rfd;
466 	struct i596_rbd *rbd;
467 
468 	/* First build the Receive Buffer Descriptor List */
469 
470 	for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
471 		dma_addr_t dma_addr;
472 		struct sk_buff *skb;
473 
474 		skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
475 		if (skb == NULL)
476 			return -1;
477 		dma_addr = dma_map_single(dev->dev.parent, skb->data,
478 					  PKT_BUF_SZ, DMA_FROM_DEVICE);
479 		rbd->v_next = rbd+1;
480 		rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
481 		rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
482 		rbd->skb = skb;
483 		rbd->v_data = skb->data;
484 		rbd->b_data = SWAP32(dma_addr);
485 		rbd->size = SWAP16(PKT_BUF_SZ);
486 	}
487 	lp->rbd_head = dma->rbds;
488 	rbd = dma->rbds + rx_ring_size - 1;
489 	rbd->v_next = dma->rbds;
490 	rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
491 
492 	/* Now build the Receive Frame Descriptor List */
493 
494 	for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
495 		rfd->rbd = I596_NULL;
496 		rfd->v_next = rfd+1;
497 		rfd->v_prev = rfd-1;
498 		rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
499 		rfd->cmd = SWAP16(CMD_FLEX);
500 	}
501 	lp->rfd_head = dma->rfds;
502 	dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
503 	rfd = dma->rfds;
504 	rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
505 	rfd->v_prev = dma->rfds + rx_ring_size - 1;
506 	rfd = dma->rfds + rx_ring_size - 1;
507 	rfd->v_next = dma->rfds;
508 	rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
509 	rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
510 
511 	DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
512 	return 0;
513 }
514 
515 static inline void remove_rx_bufs(struct net_device *dev)
516 {
517 	struct i596_private *lp = netdev_priv(dev);
518 	struct i596_rbd *rbd;
519 	int i;
520 
521 	for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
522 		if (rbd->skb == NULL)
523 			break;
524 		dma_unmap_single(dev->dev.parent,
525 				 (dma_addr_t)SWAP32(rbd->b_data),
526 				 PKT_BUF_SZ, DMA_FROM_DEVICE);
527 		dev_kfree_skb(rbd->skb);
528 	}
529 }
530 
531 
532 static void rebuild_rx_bufs(struct net_device *dev)
533 {
534 	struct i596_private *lp = netdev_priv(dev);
535 	struct i596_dma *dma = lp->dma;
536 	int i;
537 
538 	/* Ensure rx frame/buffer descriptors are tidy */
539 
540 	for (i = 0; i < rx_ring_size; i++) {
541 		dma->rfds[i].rbd = I596_NULL;
542 		dma->rfds[i].cmd = SWAP16(CMD_FLEX);
543 	}
544 	dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
545 	lp->rfd_head = dma->rfds;
546 	dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
547 	lp->rbd_head = dma->rbds;
548 	dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
549 
550 	DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
551 }
552 
553 
554 static int init_i596_mem(struct net_device *dev)
555 {
556 	struct i596_private *lp = netdev_priv(dev);
557 	struct i596_dma *dma = lp->dma;
558 	unsigned long flags;
559 
560 	mpu_port(dev, PORT_RESET, 0);
561 	udelay(100);			/* Wait 100us - seems to help */
562 
563 	/* change the scp address */
564 
565 	lp->last_cmd = jiffies;
566 
567 	dma->scp.sysbus = SYSBUS;
568 	dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
569 	dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
570 	dma->iscp.stat = SWAP32(ISCP_BUSY);
571 	lp->cmd_backlog = 0;
572 
573 	lp->cmd_head = NULL;
574 	dma->scb.cmd = I596_NULL;
575 
576 	DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
577 
578 	DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp));
579 	DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp));
580 	DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
581 
582 	mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
583 	ca(dev);
584 	if (wait_istat(dev, dma, 1000, "initialization timed out"))
585 		goto failed;
586 	DEB(DEB_INIT, printk(KERN_DEBUG
587 			     "%s: i82596 initialization successful\n",
588 			     dev->name));
589 
590 	if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
591 		printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
592 		goto failed;
593 	}
594 
595 	/* Ensure rx frame/buffer descriptors are tidy */
596 	rebuild_rx_bufs(dev);
597 
598 	dma->scb.command = 0;
599 	DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
600 
601 	DEB(DEB_INIT, printk(KERN_DEBUG
602 			     "%s: queuing CmdConfigure\n", dev->name));
603 	memcpy(dma->cf_cmd.i596_config, init_setup, 14);
604 	dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
605 	DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
606 	i596_add_cmd(dev, &dma->cf_cmd.cmd);
607 
608 	DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
609 	memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
610 	dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
611 	DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
612 	i596_add_cmd(dev, &dma->sa_cmd.cmd);
613 
614 	DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
615 	dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
616 	DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
617 	i596_add_cmd(dev, &dma->tdr_cmd.cmd);
618 
619 	spin_lock_irqsave (&lp->lock, flags);
620 
621 	if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
622 		spin_unlock_irqrestore (&lp->lock, flags);
623 		goto failed_free_irq;
624 	}
625 	DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
626 	dma->scb.command = SWAP16(RX_START);
627 	dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
628 	DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
629 
630 	ca(dev);
631 
632 	spin_unlock_irqrestore (&lp->lock, flags);
633 	if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
634 		goto failed_free_irq;
635 	DEB(DEB_INIT, printk(KERN_DEBUG
636 			     "%s: Receive unit started OK\n", dev->name));
637 	return 0;
638 
639 failed_free_irq:
640 	free_irq(dev->irq, dev);
641 failed:
642 	printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
643 	mpu_port(dev, PORT_RESET, 0);
644 	return -1;
645 }
646 
647 
648 static inline int i596_rx(struct net_device *dev)
649 {
650 	struct i596_private *lp = netdev_priv(dev);
651 	struct i596_rfd *rfd;
652 	struct i596_rbd *rbd;
653 	int frames = 0;
654 
655 	DEB(DEB_RXFRAME, printk(KERN_DEBUG
656 				"i596_rx(), rfd_head %p, rbd_head %p\n",
657 				lp->rfd_head, lp->rbd_head));
658 
659 
660 	rfd = lp->rfd_head;		/* Ref next frame to check */
661 
662 	DMA_INV(dev, rfd, sizeof(struct i596_rfd));
663 	while (rfd->stat & SWAP16(STAT_C)) {	/* Loop while complete frames */
664 		if (rfd->rbd == I596_NULL)
665 			rbd = NULL;
666 		else if (rfd->rbd == lp->rbd_head->b_addr) {
667 			rbd = lp->rbd_head;
668 			DMA_INV(dev, rbd, sizeof(struct i596_rbd));
669 		} else {
670 			printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
671 			/* XXX Now what? */
672 			rbd = NULL;
673 		}
674 		DEB(DEB_RXFRAME, printk(KERN_DEBUG
675 				      "  rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
676 				      rfd, rfd->rbd, rfd->stat));
677 
678 		if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
679 			/* a good frame */
680 			int pkt_len = SWAP16(rbd->count) & 0x3fff;
681 			struct sk_buff *skb = rbd->skb;
682 			int rx_in_place = 0;
683 
684 			DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
685 			frames++;
686 
687 			/* Check if the packet is long enough to just accept
688 			 * without copying to a properly sized skbuff.
689 			 */
690 
691 			if (pkt_len > rx_copybreak) {
692 				struct sk_buff *newskb;
693 				dma_addr_t dma_addr;
694 
695 				dma_unmap_single(dev->dev.parent,
696 						 (dma_addr_t)SWAP32(rbd->b_data),
697 						 PKT_BUF_SZ, DMA_FROM_DEVICE);
698 				/* Get fresh skbuff to replace filled one. */
699 				newskb = netdev_alloc_skb_ip_align(dev,
700 								   PKT_BUF_SZ);
701 				if (newskb == NULL) {
702 					skb = NULL;	/* drop pkt */
703 					goto memory_squeeze;
704 				}
705 
706 				/* Pass up the skb already on the Rx ring. */
707 				skb_put(skb, pkt_len);
708 				rx_in_place = 1;
709 				rbd->skb = newskb;
710 				dma_addr = dma_map_single(dev->dev.parent,
711 							  newskb->data,
712 							  PKT_BUF_SZ,
713 							  DMA_FROM_DEVICE);
714 				rbd->v_data = newskb->data;
715 				rbd->b_data = SWAP32(dma_addr);
716 				DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
717 			} else {
718 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
719 			}
720 memory_squeeze:
721 			if (skb == NULL) {
722 				/* XXX tulip.c can defer packets here!! */
723 				dev->stats.rx_dropped++;
724 			} else {
725 				if (!rx_in_place) {
726 					/* 16 byte align the data fields */
727 					dma_sync_single_for_cpu(dev->dev.parent,
728 								(dma_addr_t)SWAP32(rbd->b_data),
729 								PKT_BUF_SZ, DMA_FROM_DEVICE);
730 					skb_put_data(skb, rbd->v_data,
731 						     pkt_len);
732 					dma_sync_single_for_device(dev->dev.parent,
733 								   (dma_addr_t)SWAP32(rbd->b_data),
734 								   PKT_BUF_SZ, DMA_FROM_DEVICE);
735 				}
736 				skb->len = pkt_len;
737 				skb->protocol = eth_type_trans(skb, dev);
738 				netif_rx(skb);
739 				dev->stats.rx_packets++;
740 				dev->stats.rx_bytes += pkt_len;
741 			}
742 		} else {
743 			DEB(DEB_ERRORS, printk(KERN_DEBUG
744 					       "%s: Error, rfd.stat = 0x%04x\n",
745 					       dev->name, rfd->stat));
746 			dev->stats.rx_errors++;
747 			if (rfd->stat & SWAP16(0x0100))
748 				dev->stats.collisions++;
749 			if (rfd->stat & SWAP16(0x8000))
750 				dev->stats.rx_length_errors++;
751 			if (rfd->stat & SWAP16(0x0001))
752 				dev->stats.rx_over_errors++;
753 			if (rfd->stat & SWAP16(0x0002))
754 				dev->stats.rx_fifo_errors++;
755 			if (rfd->stat & SWAP16(0x0004))
756 				dev->stats.rx_frame_errors++;
757 			if (rfd->stat & SWAP16(0x0008))
758 				dev->stats.rx_crc_errors++;
759 			if (rfd->stat & SWAP16(0x0010))
760 				dev->stats.rx_length_errors++;
761 		}
762 
763 		/* Clear the buffer descriptor count and EOF + F flags */
764 
765 		if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
766 			rbd->count = 0;
767 			lp->rbd_head = rbd->v_next;
768 			DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
769 		}
770 
771 		/* Tidy the frame descriptor, marking it as end of list */
772 
773 		rfd->rbd = I596_NULL;
774 		rfd->stat = 0;
775 		rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
776 		rfd->count = 0;
777 
778 		/* Update record of next frame descriptor to process */
779 
780 		lp->dma->scb.rfd = rfd->b_next;
781 		lp->rfd_head = rfd->v_next;
782 		DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd));
783 
784 		/* Remove end-of-list from old end descriptor */
785 
786 		rfd->v_prev->cmd = SWAP16(CMD_FLEX);
787 		DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd));
788 		rfd = lp->rfd_head;
789 		DMA_INV(dev, rfd, sizeof(struct i596_rfd));
790 	}
791 
792 	DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
793 
794 	return 0;
795 }
796 
797 
798 static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
799 {
800 	struct i596_cmd *ptr;
801 
802 	while (lp->cmd_head != NULL) {
803 		ptr = lp->cmd_head;
804 		lp->cmd_head = ptr->v_next;
805 		lp->cmd_backlog--;
806 
807 		switch (SWAP16(ptr->command) & 0x7) {
808 		case CmdTx:
809 			{
810 				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
811 				struct sk_buff *skb = tx_cmd->skb;
812 				dma_unmap_single(dev->dev.parent,
813 						 tx_cmd->dma_addr,
814 						 skb->len, DMA_TO_DEVICE);
815 
816 				dev_kfree_skb(skb);
817 
818 				dev->stats.tx_errors++;
819 				dev->stats.tx_aborted_errors++;
820 
821 				ptr->v_next = NULL;
822 				ptr->b_next = I596_NULL;
823 				tx_cmd->cmd.command = 0;  /* Mark as free */
824 				break;
825 			}
826 		default:
827 			ptr->v_next = NULL;
828 			ptr->b_next = I596_NULL;
829 		}
830 		DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd));
831 	}
832 
833 	wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
834 	lp->dma->scb.cmd = I596_NULL;
835 	DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
836 }
837 
838 
839 static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
840 {
841 	unsigned long flags;
842 
843 	DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
844 
845 	spin_lock_irqsave (&lp->lock, flags);
846 
847 	wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
848 
849 	netif_stop_queue(dev);
850 
851 	/* FIXME: this command might cause an lpmc */
852 	lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
853 	DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
854 	ca(dev);
855 
856 	/* wait for shutdown */
857 	wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
858 	spin_unlock_irqrestore (&lp->lock, flags);
859 
860 	i596_cleanup_cmd(dev, lp);
861 	i596_rx(dev);
862 
863 	netif_start_queue(dev);
864 	init_i596_mem(dev);
865 }
866 
867 
868 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
869 {
870 	struct i596_private *lp = netdev_priv(dev);
871 	struct i596_dma *dma = lp->dma;
872 	unsigned long flags;
873 
874 	DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
875 			       lp->cmd_head));
876 
877 	cmd->status = 0;
878 	cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
879 	cmd->v_next = NULL;
880 	cmd->b_next = I596_NULL;
881 	DMA_WBACK(dev, cmd, sizeof(struct i596_cmd));
882 
883 	spin_lock_irqsave (&lp->lock, flags);
884 
885 	if (lp->cmd_head != NULL) {
886 		lp->cmd_tail->v_next = cmd;
887 		lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
888 		DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd));
889 	} else {
890 		lp->cmd_head = cmd;
891 		wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
892 		dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
893 		dma->scb.command = SWAP16(CUC_START);
894 		DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
895 		ca(dev);
896 	}
897 	lp->cmd_tail = cmd;
898 	lp->cmd_backlog++;
899 
900 	spin_unlock_irqrestore (&lp->lock, flags);
901 
902 	if (lp->cmd_backlog > max_cmd_backlog) {
903 		unsigned long tickssofar = jiffies - lp->last_cmd;
904 
905 		if (tickssofar < ticks_limit)
906 			return;
907 
908 		printk(KERN_ERR
909 		       "%s: command unit timed out, status resetting.\n",
910 		       dev->name);
911 #if 1
912 		i596_reset(dev, lp);
913 #endif
914 	}
915 }
916 
917 static int i596_open(struct net_device *dev)
918 {
919 	DEB(DEB_OPEN, printk(KERN_DEBUG
920 			     "%s: i596_open() irq %d.\n", dev->name, dev->irq));
921 
922 	if (init_rx_bufs(dev)) {
923 		printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
924 		return -EAGAIN;
925 	}
926 	if (init_i596_mem(dev)) {
927 		printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
928 		goto out_remove_rx_bufs;
929 	}
930 	netif_start_queue(dev);
931 
932 	return 0;
933 
934 out_remove_rx_bufs:
935 	remove_rx_bufs(dev);
936 	return -EAGAIN;
937 }
938 
939 static void i596_tx_timeout (struct net_device *dev)
940 {
941 	struct i596_private *lp = netdev_priv(dev);
942 
943 	/* Transmitter timeout, serious problems. */
944 	DEB(DEB_ERRORS, printk(KERN_DEBUG
945 			       "%s: transmit timed out, status resetting.\n",
946 			       dev->name));
947 
948 	dev->stats.tx_errors++;
949 
950 	/* Try to restart the adaptor */
951 	if (lp->last_restart == dev->stats.tx_packets) {
952 		DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
953 		/* Shutdown and restart */
954 		i596_reset (dev, lp);
955 	} else {
956 		/* Issue a channel attention signal */
957 		DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
958 		lp->dma->scb.command = SWAP16(CUC_START | RX_START);
959 		DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb));
960 		ca (dev);
961 		lp->last_restart = dev->stats.tx_packets;
962 	}
963 
964 	netif_trans_update(dev); /* prevent tx timeout */
965 	netif_wake_queue (dev);
966 }
967 
968 
969 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
970 {
971 	struct i596_private *lp = netdev_priv(dev);
972 	struct tx_cmd *tx_cmd;
973 	struct i596_tbd *tbd;
974 	short length = skb->len;
975 
976 	DEB(DEB_STARTTX, printk(KERN_DEBUG
977 				"%s: i596_start_xmit(%x,%p) called\n",
978 				dev->name, skb->len, skb->data));
979 
980 	if (length < ETH_ZLEN) {
981 		if (skb_padto(skb, ETH_ZLEN))
982 			return NETDEV_TX_OK;
983 		length = ETH_ZLEN;
984 	}
985 
986 	netif_stop_queue(dev);
987 
988 	tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
989 	tbd = lp->dma->tbds + lp->next_tx_cmd;
990 
991 	if (tx_cmd->cmd.command) {
992 		DEB(DEB_ERRORS, printk(KERN_DEBUG
993 				       "%s: xmit ring full, dropping packet.\n",
994 				       dev->name));
995 		dev->stats.tx_dropped++;
996 
997 		dev_kfree_skb_any(skb);
998 	} else {
999 		if (++lp->next_tx_cmd == TX_RING_SIZE)
1000 			lp->next_tx_cmd = 0;
1001 		tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
1002 		tbd->next = I596_NULL;
1003 
1004 		tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
1005 		tx_cmd->skb = skb;
1006 
1007 		tx_cmd->pad = 0;
1008 		tx_cmd->size = 0;
1009 		tbd->pad = 0;
1010 		tbd->size = SWAP16(EOF | length);
1011 
1012 		tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
1013 						  skb->len, DMA_TO_DEVICE);
1014 		tbd->data = SWAP32(tx_cmd->dma_addr);
1015 
1016 		DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
1017 		DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd));
1018 		DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd));
1019 		i596_add_cmd(dev, &tx_cmd->cmd);
1020 
1021 		dev->stats.tx_packets++;
1022 		dev->stats.tx_bytes += length;
1023 	}
1024 
1025 	netif_start_queue(dev);
1026 
1027 	return NETDEV_TX_OK;
1028 }
1029 
1030 static void print_eth(unsigned char *add, char *str)
1031 {
1032 	printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1033 	       add, add + 6, add, add[12], add[13], str);
1034 }
1035 static const struct net_device_ops i596_netdev_ops = {
1036 	.ndo_open		= i596_open,
1037 	.ndo_stop		= i596_close,
1038 	.ndo_start_xmit		= i596_start_xmit,
1039 	.ndo_set_rx_mode	= set_multicast_list,
1040 	.ndo_tx_timeout		= i596_tx_timeout,
1041 	.ndo_validate_addr	= eth_validate_addr,
1042 	.ndo_set_mac_address	= eth_mac_addr,
1043 #ifdef CONFIG_NET_POLL_CONTROLLER
1044 	.ndo_poll_controller	= i596_poll_controller,
1045 #endif
1046 };
1047 
1048 static int i82596_probe(struct net_device *dev)
1049 {
1050 	int i;
1051 	struct i596_private *lp = netdev_priv(dev);
1052 	struct i596_dma *dma;
1053 
1054 	/* This lot is ensure things have been cache line aligned. */
1055 	BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
1056 	BUILD_BUG_ON(sizeof(struct i596_rbd) &  31);
1057 	BUILD_BUG_ON(sizeof(struct tx_cmd)   &  31);
1058 	BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
1059 #ifndef __LP64__
1060 	BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
1061 #endif
1062 
1063 	if (!dev->base_addr || !dev->irq)
1064 		return -ENODEV;
1065 
1066 	dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma),
1067 			      &lp->dma_addr, GFP_KERNEL,
1068 			      DMA_ATTR_NON_CONSISTENT);
1069 	if (!dma) {
1070 		printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
1071 		return -ENOMEM;
1072 	}
1073 
1074 	dev->netdev_ops = &i596_netdev_ops;
1075 	dev->watchdog_timeo = TX_TIMEOUT;
1076 
1077 	memset(dma, 0, sizeof(struct i596_dma));
1078 	lp->dma = dma;
1079 
1080 	dma->scb.command = 0;
1081 	dma->scb.cmd = I596_NULL;
1082 	dma->scb.rfd = I596_NULL;
1083 	spin_lock_init(&lp->lock);
1084 
1085 	DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
1086 
1087 	i = register_netdev(dev);
1088 	if (i) {
1089 		dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma),
1090 			       dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
1091 		return i;
1092 	}
1093 
1094 	DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
1095 			      dev->name, dev->base_addr, dev->dev_addr,
1096 			      dev->irq));
1097 	DEB(DEB_INIT, printk(KERN_INFO
1098 			     "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1099 			     dev->name, dma, (int)sizeof(struct i596_dma),
1100 			     &dma->scb));
1101 
1102 	return 0;
1103 }
1104 
1105 #ifdef CONFIG_NET_POLL_CONTROLLER
1106 static void i596_poll_controller(struct net_device *dev)
1107 {
1108 	disable_irq(dev->irq);
1109 	i596_interrupt(dev->irq, dev);
1110 	enable_irq(dev->irq);
1111 }
1112 #endif
1113 
1114 static irqreturn_t i596_interrupt(int irq, void *dev_id)
1115 {
1116 	struct net_device *dev = dev_id;
1117 	struct i596_private *lp;
1118 	struct i596_dma *dma;
1119 	unsigned short status, ack_cmd = 0;
1120 
1121 	lp = netdev_priv(dev);
1122 	dma = lp->dma;
1123 
1124 	spin_lock (&lp->lock);
1125 
1126 	wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1127 	status = SWAP16(dma->scb.status);
1128 
1129 	DEB(DEB_INTS, printk(KERN_DEBUG
1130 			     "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1131 			dev->name, dev->irq, status));
1132 
1133 	ack_cmd = status & 0xf000;
1134 
1135 	if (!ack_cmd) {
1136 		DEB(DEB_ERRORS, printk(KERN_DEBUG
1137 				       "%s: interrupt with no events\n",
1138 				       dev->name));
1139 		spin_unlock (&lp->lock);
1140 		return IRQ_NONE;
1141 	}
1142 
1143 	if ((status & 0x8000) || (status & 0x2000)) {
1144 		struct i596_cmd *ptr;
1145 
1146 		if ((status & 0x8000))
1147 			DEB(DEB_INTS,
1148 			    printk(KERN_DEBUG
1149 				   "%s: i596 interrupt completed command.\n",
1150 				   dev->name));
1151 		if ((status & 0x2000))
1152 			DEB(DEB_INTS,
1153 			    printk(KERN_DEBUG
1154 				   "%s: i596 interrupt command unit inactive %x.\n",
1155 				   dev->name, status & 0x0700));
1156 
1157 		while (lp->cmd_head != NULL) {
1158 			DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd));
1159 			if (!(lp->cmd_head->status & SWAP16(STAT_C)))
1160 				break;
1161 
1162 			ptr = lp->cmd_head;
1163 
1164 			DEB(DEB_STATUS,
1165 			    printk(KERN_DEBUG
1166 				   "cmd_head->status = %04x, ->command = %04x\n",
1167 				   SWAP16(lp->cmd_head->status),
1168 				   SWAP16(lp->cmd_head->command)));
1169 			lp->cmd_head = ptr->v_next;
1170 			lp->cmd_backlog--;
1171 
1172 			switch (SWAP16(ptr->command) & 0x7) {
1173 			case CmdTx:
1174 			    {
1175 				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1176 				struct sk_buff *skb = tx_cmd->skb;
1177 
1178 				if (ptr->status & SWAP16(STAT_OK)) {
1179 					DEB(DEB_TXADDR,
1180 					    print_eth(skb->data, "tx-done"));
1181 				} else {
1182 					dev->stats.tx_errors++;
1183 					if (ptr->status & SWAP16(0x0020))
1184 						dev->stats.collisions++;
1185 					if (!(ptr->status & SWAP16(0x0040)))
1186 						dev->stats.tx_heartbeat_errors++;
1187 					if (ptr->status & SWAP16(0x0400))
1188 						dev->stats.tx_carrier_errors++;
1189 					if (ptr->status & SWAP16(0x0800))
1190 						dev->stats.collisions++;
1191 					if (ptr->status & SWAP16(0x1000))
1192 						dev->stats.tx_aborted_errors++;
1193 				}
1194 				dma_unmap_single(dev->dev.parent,
1195 						 tx_cmd->dma_addr,
1196 						 skb->len, DMA_TO_DEVICE);
1197 				dev_kfree_skb_irq(skb);
1198 
1199 				tx_cmd->cmd.command = 0; /* Mark free */
1200 				break;
1201 			    }
1202 			case CmdTDR:
1203 			    {
1204 				unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
1205 
1206 				if (status & 0x8000) {
1207 					DEB(DEB_ANY,
1208 					    printk(KERN_DEBUG "%s: link ok.\n",
1209 						   dev->name));
1210 				} else {
1211 					if (status & 0x4000)
1212 						printk(KERN_ERR
1213 						       "%s: Transceiver problem.\n",
1214 						       dev->name);
1215 					if (status & 0x2000)
1216 						printk(KERN_ERR
1217 						       "%s: Termination problem.\n",
1218 						       dev->name);
1219 					if (status & 0x1000)
1220 						printk(KERN_ERR
1221 						       "%s: Short circuit.\n",
1222 						       dev->name);
1223 
1224 					DEB(DEB_TDR,
1225 					    printk(KERN_DEBUG "%s: Time %d.\n",
1226 						   dev->name, status & 0x07ff));
1227 				}
1228 				break;
1229 			    }
1230 			case CmdConfigure:
1231 				/*
1232 				 * Zap command so set_multicast_list() know
1233 				 * it is free
1234 				 */
1235 				ptr->command = 0;
1236 				break;
1237 			}
1238 			ptr->v_next = NULL;
1239 			ptr->b_next = I596_NULL;
1240 			DMA_WBACK(dev, ptr, sizeof(struct i596_cmd));
1241 			lp->last_cmd = jiffies;
1242 		}
1243 
1244 		/* This mess is arranging that only the last of any outstanding
1245 		 * commands has the interrupt bit set.  Should probably really
1246 		 * only add to the cmd queue when the CU is stopped.
1247 		 */
1248 		ptr = lp->cmd_head;
1249 		while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1250 			struct i596_cmd *prev = ptr;
1251 
1252 			ptr->command &= SWAP16(0x1fff);
1253 			ptr = ptr->v_next;
1254 			DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd));
1255 		}
1256 
1257 		if (lp->cmd_head != NULL)
1258 			ack_cmd |= CUC_START;
1259 		dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
1260 		DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb));
1261 	}
1262 	if ((status & 0x1000) || (status & 0x4000)) {
1263 		if ((status & 0x4000))
1264 			DEB(DEB_INTS,
1265 			    printk(KERN_DEBUG
1266 				   "%s: i596 interrupt received a frame.\n",
1267 				   dev->name));
1268 		i596_rx(dev);
1269 		/* Only RX_START if stopped - RGH 07-07-96 */
1270 		if (status & 0x1000) {
1271 			if (netif_running(dev)) {
1272 				DEB(DEB_ERRORS,
1273 				    printk(KERN_DEBUG
1274 					   "%s: i596 interrupt receive unit inactive, status 0x%x\n",
1275 					   dev->name, status));
1276 				ack_cmd |= RX_START;
1277 				dev->stats.rx_errors++;
1278 				dev->stats.rx_fifo_errors++;
1279 				rebuild_rx_bufs(dev);
1280 			}
1281 		}
1282 	}
1283 	wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1284 	dma->scb.command = SWAP16(ack_cmd);
1285 	DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb));
1286 
1287 	/* DANGER: I suspect that some kind of interrupt
1288 	 acknowledgement aside from acking the 82596 might be needed
1289 	 here...  but it's running acceptably without */
1290 
1291 	ca(dev);
1292 
1293 	wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
1294 	DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1295 
1296 	spin_unlock (&lp->lock);
1297 	return IRQ_HANDLED;
1298 }
1299 
1300 static int i596_close(struct net_device *dev)
1301 {
1302 	struct i596_private *lp = netdev_priv(dev);
1303 	unsigned long flags;
1304 
1305 	netif_stop_queue(dev);
1306 
1307 	DEB(DEB_INIT,
1308 	    printk(KERN_DEBUG
1309 		   "%s: Shutting down ethercard, status was %4.4x.\n",
1310 		   dev->name, SWAP16(lp->dma->scb.status)));
1311 
1312 	spin_lock_irqsave(&lp->lock, flags);
1313 
1314 	wait_cmd(dev, lp->dma, 100, "close1 timed out");
1315 	lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
1316 	DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb));
1317 
1318 	ca(dev);
1319 
1320 	wait_cmd(dev, lp->dma, 100, "close2 timed out");
1321 	spin_unlock_irqrestore(&lp->lock, flags);
1322 	DEB(DEB_STRUCT, i596_display_data(dev));
1323 	i596_cleanup_cmd(dev, lp);
1324 
1325 	free_irq(dev->irq, dev);
1326 	remove_rx_bufs(dev);
1327 
1328 	return 0;
1329 }
1330 
1331 /*
1332  *    Set or clear the multicast filter for this adaptor.
1333  */
1334 
1335 static void set_multicast_list(struct net_device *dev)
1336 {
1337 	struct i596_private *lp = netdev_priv(dev);
1338 	struct i596_dma *dma = lp->dma;
1339 	int config = 0, cnt;
1340 
1341 	DEB(DEB_MULTI,
1342 	    printk(KERN_DEBUG
1343 		   "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1344 		   dev->name, netdev_mc_count(dev),
1345 		   dev->flags & IFF_PROMISC ? "ON" : "OFF",
1346 		   dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1347 
1348 	if ((dev->flags & IFF_PROMISC) &&
1349 	    !(dma->cf_cmd.i596_config[8] & 0x01)) {
1350 		dma->cf_cmd.i596_config[8] |= 0x01;
1351 		config = 1;
1352 	}
1353 	if (!(dev->flags & IFF_PROMISC) &&
1354 	    (dma->cf_cmd.i596_config[8] & 0x01)) {
1355 		dma->cf_cmd.i596_config[8] &= ~0x01;
1356 		config = 1;
1357 	}
1358 	if ((dev->flags & IFF_ALLMULTI) &&
1359 	    (dma->cf_cmd.i596_config[11] & 0x20)) {
1360 		dma->cf_cmd.i596_config[11] &= ~0x20;
1361 		config = 1;
1362 	}
1363 	if (!(dev->flags & IFF_ALLMULTI) &&
1364 	    !(dma->cf_cmd.i596_config[11] & 0x20)) {
1365 		dma->cf_cmd.i596_config[11] |= 0x20;
1366 		config = 1;
1367 	}
1368 	if (config) {
1369 		if (dma->cf_cmd.cmd.command)
1370 			printk(KERN_INFO
1371 			       "%s: config change request already queued\n",
1372 			       dev->name);
1373 		else {
1374 			dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
1375 			DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
1376 			i596_add_cmd(dev, &dma->cf_cmd.cmd);
1377 		}
1378 	}
1379 
1380 	cnt = netdev_mc_count(dev);
1381 	if (cnt > MAX_MC_CNT) {
1382 		cnt = MAX_MC_CNT;
1383 		printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
1384 			dev->name, cnt);
1385 	}
1386 
1387 	if (!netdev_mc_empty(dev)) {
1388 		struct netdev_hw_addr *ha;
1389 		unsigned char *cp;
1390 		struct mc_cmd *cmd;
1391 
1392 		cmd = &dma->mc_cmd;
1393 		cmd->cmd.command = SWAP16(CmdMulticastList);
1394 		cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
1395 		cp = cmd->mc_addrs;
1396 		netdev_for_each_mc_addr(ha, dev) {
1397 			if (!cnt--)
1398 				break;
1399 			memcpy(cp, ha->addr, ETH_ALEN);
1400 			if (i596_debug > 1)
1401 				DEB(DEB_MULTI,
1402 				    printk(KERN_DEBUG
1403 					   "%s: Adding address %pM\n",
1404 					   dev->name, cp));
1405 			cp += ETH_ALEN;
1406 		}
1407 		DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
1408 		i596_add_cmd(dev, &cmd->cmd);
1409 	}
1410 }
1411