1 // SPDX-License-Identifier: GPL-1.0+
2 /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
3    munged into HPPA boxen .
4 
5    This driver is based upon 82596.c, original credits are below...
6    but there were too many hoops which HP wants jumped through to
7    keep this code in there in a sane manner.
8 
9    3 primary sources of the mess --
10    1) hppa needs *lots* of cacheline flushing to keep this kind of
11    MMIO running.
12 
13    2) The 82596 needs to see all of its pointers as their physical
14    address.  Thus virt_to_bus/bus_to_virt are *everywhere*.
15 
16    3) The implementation HP is using seems to be significantly pickier
17    about when and how the command and RX units are started.  some
18    command ordering was changed.
19 
20    Examination of the mach driver leads one to believe that there
21    might be a saner way to pull this off...  anyone who feels like a
22    full rewrite can be my guest.
23 
24    Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
25 
26    02/01/2000  Initial modifications for parisc by Helge Deller (deller@gmx.de)
27    03/02/2000  changes for better/correct(?) cache-flushing (deller)
28 */
29 
30 /* 82596.c: A generic 82596 ethernet driver for linux. */
31 /*
32    Based on Apricot.c
33    Written 1994 by Mark Evans.
34    This driver is for the Apricot 82596 bus-master interface
35 
36    Modularised 12/94 Mark Evans
37 
38 
39    Modified to support the 82596 ethernet chips on 680x0 VME boards.
40    by Richard Hirst <richard@sleepie.demon.co.uk>
41    Renamed to be 82596.c
42 
43    980825:  Changed to receive directly in to sk_buffs which are
44    allocated at open() time.  Eliminates copy on incoming frames
45    (small ones are still copied).  Shared data now held in a
46    non-cached page, so we can run on 68060 in copyback mode.
47 
48    TBD:
49    * look at deferring rx frames rather than discarding (as per tulip)
50    * handle tx ring full as per tulip
51    * performance test to tune rx_copybreak
52 
53    Most of my modifications relate to the braindead big-endian
54    implementation by Intel.  When the i596 is operating in
55    'big-endian' mode, it thinks a 32 bit value of 0x12345678
56    should be stored as 0x56781234.  This is a real pain, when
57    you have linked lists which are shared by the 680x0 and the
58    i596.
59 
60    Driver skeleton
61    Written 1993 by Donald Becker.
62    Copyright 1993 United States Government as represented by the Director,
63    National Security Agency.
64 
65    The author may be reached as becker@scyld.com, or C/O
66    Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
67 
68  */
69 
70 #include <linux/module.h>
71 #include <linux/kernel.h>
72 #include <linux/string.h>
73 #include <linux/errno.h>
74 #include <linux/ioport.h>
75 #include <linux/interrupt.h>
76 #include <linux/delay.h>
77 #include <linux/netdevice.h>
78 #include <linux/etherdevice.h>
79 #include <linux/skbuff.h>
80 #include <linux/types.h>
81 #include <linux/bitops.h>
82 #include <linux/dma-mapping.h>
83 #include <linux/io.h>
84 #include <linux/irq.h>
85 #include <linux/gfp.h>
86 
87 /* DEBUG flags
88  */
89 
90 #define DEB_INIT	0x0001
91 #define DEB_PROBE	0x0002
92 #define DEB_SERIOUS	0x0004
93 #define DEB_ERRORS	0x0008
94 #define DEB_MULTI	0x0010
95 #define DEB_TDR		0x0020
96 #define DEB_OPEN	0x0040
97 #define DEB_RESET	0x0080
98 #define DEB_ADDCMD	0x0100
99 #define DEB_STATUS	0x0200
100 #define DEB_STARTTX	0x0400
101 #define DEB_RXADDR	0x0800
102 #define DEB_TXADDR	0x1000
103 #define DEB_RXFRAME	0x2000
104 #define DEB_INTS	0x4000
105 #define DEB_STRUCT	0x8000
106 #define DEB_ANY		0xffff
107 
108 
109 #define DEB(x, y)	if (i596_debug & (x)) { y; }
110 
111 
112 /*
113  * The MPU_PORT command allows direct access to the 82596. With PORT access
114  * the following commands are available (p5-18). The 32-bit port command
115  * must be word-swapped with the most significant word written first.
116  * This only applies to VME boards.
117  */
118 #define PORT_RESET		0x00	/* reset 82596 */
119 #define PORT_SELFTEST		0x01	/* selftest */
120 #define PORT_ALTSCP		0x02	/* alternate SCB address */
121 #define PORT_ALTDUMP		0x03	/* Alternate DUMP address */
122 
123 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
124 
125 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
126  * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
127  */
128 static int rx_copybreak = 100;
129 
130 #define PKT_BUF_SZ	1536
131 #define MAX_MC_CNT	64
132 
133 #define ISCP_BUSY	0x0001
134 
135 #define I596_NULL ((u32)0xffffffff)
136 
137 #define CMD_EOL		0x8000	/* The last command of the list, stop. */
138 #define CMD_SUSP	0x4000	/* Suspend after doing cmd. */
139 #define CMD_INTR	0x2000	/* Interrupt after doing cmd. */
140 
141 #define CMD_FLEX	0x0008	/* Enable flexible memory model */
142 
143 enum commands {
144 	CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
145 	CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
146 };
147 
148 #define STAT_C		0x8000	/* Set to 0 after execution */
149 #define STAT_B		0x4000	/* Command being executed */
150 #define STAT_OK		0x2000	/* Command executed ok */
151 #define STAT_A		0x1000	/* Command aborted */
152 
153 #define	 CUC_START	0x0100
154 #define	 CUC_RESUME	0x0200
155 #define	 CUC_SUSPEND    0x0300
156 #define	 CUC_ABORT	0x0400
157 #define	 RX_START	0x0010
158 #define	 RX_RESUME	0x0020
159 #define	 RX_SUSPEND	0x0030
160 #define	 RX_ABORT	0x0040
161 
162 #define TX_TIMEOUT	(HZ/20)
163 
164 
165 struct i596_reg {
166 	unsigned short porthi;
167 	unsigned short portlo;
168 	u32            ca;
169 };
170 
171 #define EOF		0x8000
172 #define SIZE_MASK	0x3fff
173 
174 struct i596_tbd {
175 	unsigned short size;
176 	unsigned short pad;
177 	u32            next;
178 	u32            data;
179 	u32 cache_pad[5];		/* Total 32 bytes... */
180 };
181 
182 /* The command structure has two 'next' pointers; v_next is the address of
183  * the next command as seen by the CPU, b_next is the address of the next
184  * command as seen by the 82596.  The b_next pointer, as used by the 82596
185  * always references the status field of the next command, rather than the
186  * v_next field, because the 82596 is unaware of v_next.  It may seem more
187  * logical to put v_next at the end of the structure, but we cannot do that
188  * because the 82596 expects other fields to be there, depending on command
189  * type.
190  */
191 
192 struct i596_cmd {
193 	struct i596_cmd *v_next;	/* Address from CPUs viewpoint */
194 	unsigned short status;
195 	unsigned short command;
196 	u32            b_next;	/* Address from i596 viewpoint */
197 };
198 
199 struct tx_cmd {
200 	struct i596_cmd cmd;
201 	u32            tbd;
202 	unsigned short size;
203 	unsigned short pad;
204 	struct sk_buff *skb;		/* So we can free it after tx */
205 	dma_addr_t dma_addr;
206 #ifdef __LP64__
207 	u32 cache_pad[6];		/* Total 64 bytes... */
208 #else
209 	u32 cache_pad[1];		/* Total 32 bytes... */
210 #endif
211 };
212 
213 struct tdr_cmd {
214 	struct i596_cmd cmd;
215 	unsigned short status;
216 	unsigned short pad;
217 };
218 
219 struct mc_cmd {
220 	struct i596_cmd cmd;
221 	short mc_cnt;
222 	char mc_addrs[MAX_MC_CNT*6];
223 };
224 
225 struct sa_cmd {
226 	struct i596_cmd cmd;
227 	char eth_addr[8];
228 };
229 
230 struct cf_cmd {
231 	struct i596_cmd cmd;
232 	char i596_config[16];
233 };
234 
235 struct i596_rfd {
236 	unsigned short stat;
237 	unsigned short cmd;
238 	u32            b_next;	/* Address from i596 viewpoint */
239 	u32            rbd;
240 	unsigned short count;
241 	unsigned short size;
242 	struct i596_rfd *v_next;	/* Address from CPUs viewpoint */
243 	struct i596_rfd *v_prev;
244 #ifndef __LP64__
245 	u32 cache_pad[2];		/* Total 32 bytes... */
246 #endif
247 };
248 
249 struct i596_rbd {
250 	/* hardware data */
251 	unsigned short count;
252 	unsigned short zero1;
253 	u32            b_next;
254 	u32            b_data;		/* Address from i596 viewpoint */
255 	unsigned short size;
256 	unsigned short zero2;
257 	/* driver data */
258 	struct sk_buff *skb;
259 	struct i596_rbd *v_next;
260 	u32            b_addr;		/* This rbd addr from i596 view */
261 	unsigned char *v_data;		/* Address from CPUs viewpoint */
262 					/* Total 32 bytes... */
263 #ifdef __LP64__
264     u32 cache_pad[4];
265 #endif
266 };
267 
268 /* These values as chosen so struct i596_dma fits in one page... */
269 
270 #define TX_RING_SIZE 32
271 #define RX_RING_SIZE 16
272 
273 struct i596_scb {
274 	unsigned short status;
275 	unsigned short command;
276 	u32           cmd;
277 	u32           rfd;
278 	u32           crc_err;
279 	u32           align_err;
280 	u32           resource_err;
281 	u32           over_err;
282 	u32           rcvdt_err;
283 	u32           short_err;
284 	unsigned short t_on;
285 	unsigned short t_off;
286 };
287 
288 struct i596_iscp {
289 	u32 stat;
290 	u32 scb;
291 };
292 
293 struct i596_scp {
294 	u32 sysbus;
295 	u32 pad;
296 	u32 iscp;
297 };
298 
299 struct i596_dma {
300 	struct i596_scp scp		        __attribute__((aligned(32)));
301 	volatile struct i596_iscp iscp		__attribute__((aligned(32)));
302 	volatile struct i596_scb scb		__attribute__((aligned(32)));
303 	struct sa_cmd sa_cmd			__attribute__((aligned(32)));
304 	struct cf_cmd cf_cmd			__attribute__((aligned(32)));
305 	struct tdr_cmd tdr_cmd			__attribute__((aligned(32)));
306 	struct mc_cmd mc_cmd			__attribute__((aligned(32)));
307 	struct i596_rfd rfds[RX_RING_SIZE]	__attribute__((aligned(32)));
308 	struct i596_rbd rbds[RX_RING_SIZE]	__attribute__((aligned(32)));
309 	struct tx_cmd tx_cmds[TX_RING_SIZE]	__attribute__((aligned(32)));
310 	struct i596_tbd tbds[TX_RING_SIZE]	__attribute__((aligned(32)));
311 };
312 
313 struct i596_private {
314 	struct i596_dma *dma;
315 	u32    stat;
316 	int last_restart;
317 	struct i596_rfd *rfd_head;
318 	struct i596_rbd *rbd_head;
319 	struct i596_cmd *cmd_tail;
320 	struct i596_cmd *cmd_head;
321 	int cmd_backlog;
322 	u32    last_cmd;
323 	int next_tx_cmd;
324 	int options;
325 	spinlock_t lock;       /* serialize access to chip */
326 	dma_addr_t dma_addr;
327 	void __iomem *mpu_port;
328 	void __iomem *ca;
329 };
330 
331 static const char init_setup[] =
332 {
333 	0x8E,		/* length, prefetch on */
334 	0xC8,		/* fifo to 8, monitor off */
335 	0x80,		/* don't save bad frames */
336 	0x2E,		/* No source address insertion, 8 byte preamble */
337 	0x00,		/* priority and backoff defaults */
338 	0x60,		/* interframe spacing */
339 	0x00,		/* slot time LSB */
340 	0xf2,		/* slot time and retries */
341 	0x00,		/* promiscuous mode */
342 	0x00,		/* collision detect */
343 	0x40,		/* minimum frame length */
344 	0xff,
345 	0x00,
346 	0x7f /*  *multi IA */ };
347 
348 static int i596_open(struct net_device *dev);
349 static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
350 static irqreturn_t i596_interrupt(int irq, void *dev_id);
351 static int i596_close(struct net_device *dev);
352 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
353 static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue);
354 static void print_eth(unsigned char *buf, char *str);
355 static void set_multicast_list(struct net_device *dev);
356 static inline void ca(struct net_device *dev);
357 static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
358 
359 static int rx_ring_size = RX_RING_SIZE;
360 static int ticks_limit = 100;
361 static int max_cmd_backlog = TX_RING_SIZE-1;
362 
363 #ifdef CONFIG_NET_POLL_CONTROLLER
364 static void i596_poll_controller(struct net_device *dev);
365 #endif
366 
367 static inline dma_addr_t virt_to_dma(struct i596_private *lp, volatile void *v)
368 {
369 	return lp->dma_addr + ((unsigned long)v - (unsigned long)lp->dma);
370 }
371 
372 #ifdef NONCOHERENT_DMA
373 static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
374 		size_t len)
375 {
376 	dma_sync_single_for_device(ndev->dev.parent,
377 			virt_to_dma(netdev_priv(ndev), addr), len,
378 			DMA_BIDIRECTIONAL);
379 }
380 
381 static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
382 		size_t len)
383 {
384 	dma_sync_single_for_cpu(ndev->dev.parent,
385 			virt_to_dma(netdev_priv(ndev), addr), len,
386 			DMA_BIDIRECTIONAL);
387 }
388 #else
389 static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
390 		size_t len)
391 {
392 }
393 static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
394 		size_t len)
395 {
396 }
397 #endif /* NONCOHERENT_DMA */
398 
399 static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
400 {
401 	dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
402 	while (--delcnt && dma->iscp.stat) {
403 		udelay(10);
404 		dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
405 	}
406 	if (!delcnt) {
407 		printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
408 		     dev->name, str, SWAP16(dma->iscp.stat));
409 		return -1;
410 	} else
411 		return 0;
412 }
413 
414 
415 static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
416 {
417 	dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
418 	while (--delcnt && dma->scb.command) {
419 		udelay(10);
420 		dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
421 	}
422 	if (!delcnt) {
423 		printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
424 		       dev->name, str,
425 		       SWAP16(dma->scb.status),
426 		       SWAP16(dma->scb.command));
427 		return -1;
428 	} else
429 		return 0;
430 }
431 
432 
433 static void i596_display_data(struct net_device *dev)
434 {
435 	struct i596_private *lp = netdev_priv(dev);
436 	struct i596_dma *dma = lp->dma;
437 	struct i596_cmd *cmd;
438 	struct i596_rfd *rfd;
439 	struct i596_rbd *rbd;
440 
441 	printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
442 	       &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
443 	printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
444 	       &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
445 	printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
446 		" .cmd = %08x, .rfd = %08x\n",
447 	       &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
448 		SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
449 	printk(KERN_DEBUG "   errors: crc %x, align %x, resource %x,"
450 	       " over %x, rcvdt %x, short %x\n",
451 	       SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
452 	       SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
453 	       SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
454 	cmd = lp->cmd_head;
455 	while (cmd != NULL) {
456 		printk(KERN_DEBUG
457 		       "cmd at %p, .status = %04x, .command = %04x,"
458 		       " .b_next = %08x\n",
459 		       cmd, SWAP16(cmd->status), SWAP16(cmd->command),
460 		       SWAP32(cmd->b_next));
461 		cmd = cmd->v_next;
462 	}
463 	rfd = lp->rfd_head;
464 	printk(KERN_DEBUG "rfd_head = %p\n", rfd);
465 	do {
466 		printk(KERN_DEBUG
467 		       "   %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
468 		       " count %04x\n",
469 		       rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
470 		       SWAP32(rfd->b_next), SWAP32(rfd->rbd),
471 		       SWAP16(rfd->count));
472 		rfd = rfd->v_next;
473 	} while (rfd != lp->rfd_head);
474 	rbd = lp->rbd_head;
475 	printk(KERN_DEBUG "rbd_head = %p\n", rbd);
476 	do {
477 		printk(KERN_DEBUG
478 		       "   %p .count %04x, b_next %08x, b_data %08x,"
479 		       " size %04x\n",
480 			rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
481 		       SWAP32(rbd->b_data), SWAP16(rbd->size));
482 		rbd = rbd->v_next;
483 	} while (rbd != lp->rbd_head);
484 	dma_sync_cpu(dev, dma, sizeof(struct i596_dma));
485 }
486 
487 static inline int init_rx_bufs(struct net_device *dev)
488 {
489 	struct i596_private *lp = netdev_priv(dev);
490 	struct i596_dma *dma = lp->dma;
491 	int i;
492 	struct i596_rfd *rfd;
493 	struct i596_rbd *rbd;
494 
495 	/* First build the Receive Buffer Descriptor List */
496 
497 	for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
498 		dma_addr_t dma_addr;
499 		struct sk_buff *skb;
500 
501 		skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
502 		if (skb == NULL)
503 			return -1;
504 		dma_addr = dma_map_single(dev->dev.parent, skb->data,
505 					  PKT_BUF_SZ, DMA_FROM_DEVICE);
506 		rbd->v_next = rbd+1;
507 		rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
508 		rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
509 		rbd->skb = skb;
510 		rbd->v_data = skb->data;
511 		rbd->b_data = SWAP32(dma_addr);
512 		rbd->size = SWAP16(PKT_BUF_SZ);
513 	}
514 	lp->rbd_head = dma->rbds;
515 	rbd = dma->rbds + rx_ring_size - 1;
516 	rbd->v_next = dma->rbds;
517 	rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
518 
519 	/* Now build the Receive Frame Descriptor List */
520 
521 	for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
522 		rfd->rbd = I596_NULL;
523 		rfd->v_next = rfd+1;
524 		rfd->v_prev = rfd-1;
525 		rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
526 		rfd->cmd = SWAP16(CMD_FLEX);
527 	}
528 	lp->rfd_head = dma->rfds;
529 	dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
530 	rfd = dma->rfds;
531 	rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
532 	rfd->v_prev = dma->rfds + rx_ring_size - 1;
533 	rfd = dma->rfds + rx_ring_size - 1;
534 	rfd->v_next = dma->rfds;
535 	rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
536 	rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
537 
538 	dma_sync_dev(dev, dma, sizeof(struct i596_dma));
539 	return 0;
540 }
541 
542 static inline void remove_rx_bufs(struct net_device *dev)
543 {
544 	struct i596_private *lp = netdev_priv(dev);
545 	struct i596_rbd *rbd;
546 	int i;
547 
548 	for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
549 		if (rbd->skb == NULL)
550 			break;
551 		dma_unmap_single(dev->dev.parent,
552 				 (dma_addr_t)SWAP32(rbd->b_data),
553 				 PKT_BUF_SZ, DMA_FROM_DEVICE);
554 		dev_kfree_skb(rbd->skb);
555 	}
556 }
557 
558 
559 static void rebuild_rx_bufs(struct net_device *dev)
560 {
561 	struct i596_private *lp = netdev_priv(dev);
562 	struct i596_dma *dma = lp->dma;
563 	int i;
564 
565 	/* Ensure rx frame/buffer descriptors are tidy */
566 
567 	for (i = 0; i < rx_ring_size; i++) {
568 		dma->rfds[i].rbd = I596_NULL;
569 		dma->rfds[i].cmd = SWAP16(CMD_FLEX);
570 	}
571 	dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
572 	lp->rfd_head = dma->rfds;
573 	dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
574 	lp->rbd_head = dma->rbds;
575 	dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
576 
577 	dma_sync_dev(dev, dma, sizeof(struct i596_dma));
578 }
579 
580 
581 static int init_i596_mem(struct net_device *dev)
582 {
583 	struct i596_private *lp = netdev_priv(dev);
584 	struct i596_dma *dma = lp->dma;
585 	unsigned long flags;
586 
587 	mpu_port(dev, PORT_RESET, 0);
588 	udelay(100);			/* Wait 100us - seems to help */
589 
590 	/* change the scp address */
591 
592 	lp->last_cmd = jiffies;
593 
594 	dma->scp.sysbus = SYSBUS;
595 	dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
596 	dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
597 	dma->iscp.stat = SWAP32(ISCP_BUSY);
598 	lp->cmd_backlog = 0;
599 
600 	lp->cmd_head = NULL;
601 	dma->scb.cmd = I596_NULL;
602 
603 	DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
604 
605 	dma_sync_dev(dev, &(dma->scp), sizeof(struct i596_scp));
606 	dma_sync_dev(dev, &(dma->iscp), sizeof(struct i596_iscp));
607 	dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
608 
609 	mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
610 	ca(dev);
611 	if (wait_istat(dev, dma, 1000, "initialization timed out"))
612 		goto failed;
613 	DEB(DEB_INIT, printk(KERN_DEBUG
614 			     "%s: i82596 initialization successful\n",
615 			     dev->name));
616 
617 	if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
618 		printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
619 		goto failed;
620 	}
621 
622 	/* Ensure rx frame/buffer descriptors are tidy */
623 	rebuild_rx_bufs(dev);
624 
625 	dma->scb.command = 0;
626 	dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
627 
628 	DEB(DEB_INIT, printk(KERN_DEBUG
629 			     "%s: queuing CmdConfigure\n", dev->name));
630 	memcpy(dma->cf_cmd.i596_config, init_setup, 14);
631 	dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
632 	dma_sync_dev(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
633 	i596_add_cmd(dev, &dma->cf_cmd.cmd);
634 
635 	DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
636 	memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
637 	dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
638 	dma_sync_dev(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
639 	i596_add_cmd(dev, &dma->sa_cmd.cmd);
640 
641 	DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
642 	dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
643 	dma_sync_dev(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
644 	i596_add_cmd(dev, &dma->tdr_cmd.cmd);
645 
646 	spin_lock_irqsave (&lp->lock, flags);
647 
648 	if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
649 		spin_unlock_irqrestore (&lp->lock, flags);
650 		goto failed_free_irq;
651 	}
652 	DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
653 	dma->scb.command = SWAP16(RX_START);
654 	dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
655 	dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
656 
657 	ca(dev);
658 
659 	spin_unlock_irqrestore (&lp->lock, flags);
660 	if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
661 		goto failed_free_irq;
662 	DEB(DEB_INIT, printk(KERN_DEBUG
663 			     "%s: Receive unit started OK\n", dev->name));
664 	return 0;
665 
666 failed_free_irq:
667 	free_irq(dev->irq, dev);
668 failed:
669 	printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
670 	mpu_port(dev, PORT_RESET, 0);
671 	return -1;
672 }
673 
674 
675 static inline int i596_rx(struct net_device *dev)
676 {
677 	struct i596_private *lp = netdev_priv(dev);
678 	struct i596_rfd *rfd;
679 	struct i596_rbd *rbd;
680 	int frames = 0;
681 
682 	DEB(DEB_RXFRAME, printk(KERN_DEBUG
683 				"i596_rx(), rfd_head %p, rbd_head %p\n",
684 				lp->rfd_head, lp->rbd_head));
685 
686 
687 	rfd = lp->rfd_head;		/* Ref next frame to check */
688 
689 	dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
690 	while (rfd->stat & SWAP16(STAT_C)) {	/* Loop while complete frames */
691 		if (rfd->rbd == I596_NULL)
692 			rbd = NULL;
693 		else if (rfd->rbd == lp->rbd_head->b_addr) {
694 			rbd = lp->rbd_head;
695 			dma_sync_cpu(dev, rbd, sizeof(struct i596_rbd));
696 		} else {
697 			printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
698 			/* XXX Now what? */
699 			rbd = NULL;
700 		}
701 		DEB(DEB_RXFRAME, printk(KERN_DEBUG
702 				      "  rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
703 				      rfd, rfd->rbd, rfd->stat));
704 
705 		if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
706 			/* a good frame */
707 			int pkt_len = SWAP16(rbd->count) & 0x3fff;
708 			struct sk_buff *skb = rbd->skb;
709 			int rx_in_place = 0;
710 
711 			DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
712 			frames++;
713 
714 			/* Check if the packet is long enough to just accept
715 			 * without copying to a properly sized skbuff.
716 			 */
717 
718 			if (pkt_len > rx_copybreak) {
719 				struct sk_buff *newskb;
720 				dma_addr_t dma_addr;
721 
722 				dma_unmap_single(dev->dev.parent,
723 						 (dma_addr_t)SWAP32(rbd->b_data),
724 						 PKT_BUF_SZ, DMA_FROM_DEVICE);
725 				/* Get fresh skbuff to replace filled one. */
726 				newskb = netdev_alloc_skb_ip_align(dev,
727 								   PKT_BUF_SZ);
728 				if (newskb == NULL) {
729 					skb = NULL;	/* drop pkt */
730 					goto memory_squeeze;
731 				}
732 
733 				/* Pass up the skb already on the Rx ring. */
734 				skb_put(skb, pkt_len);
735 				rx_in_place = 1;
736 				rbd->skb = newskb;
737 				dma_addr = dma_map_single(dev->dev.parent,
738 							  newskb->data,
739 							  PKT_BUF_SZ,
740 							  DMA_FROM_DEVICE);
741 				rbd->v_data = newskb->data;
742 				rbd->b_data = SWAP32(dma_addr);
743 				dma_sync_dev(dev, rbd, sizeof(struct i596_rbd));
744 			} else {
745 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
746 			}
747 memory_squeeze:
748 			if (skb == NULL) {
749 				/* XXX tulip.c can defer packets here!! */
750 				dev->stats.rx_dropped++;
751 			} else {
752 				if (!rx_in_place) {
753 					/* 16 byte align the data fields */
754 					dma_sync_single_for_cpu(dev->dev.parent,
755 								(dma_addr_t)SWAP32(rbd->b_data),
756 								PKT_BUF_SZ, DMA_FROM_DEVICE);
757 					skb_put_data(skb, rbd->v_data,
758 						     pkt_len);
759 					dma_sync_single_for_device(dev->dev.parent,
760 								   (dma_addr_t)SWAP32(rbd->b_data),
761 								   PKT_BUF_SZ, DMA_FROM_DEVICE);
762 				}
763 				skb->len = pkt_len;
764 				skb->protocol = eth_type_trans(skb, dev);
765 				netif_rx(skb);
766 				dev->stats.rx_packets++;
767 				dev->stats.rx_bytes += pkt_len;
768 			}
769 		} else {
770 			DEB(DEB_ERRORS, printk(KERN_DEBUG
771 					       "%s: Error, rfd.stat = 0x%04x\n",
772 					       dev->name, rfd->stat));
773 			dev->stats.rx_errors++;
774 			if (rfd->stat & SWAP16(0x0100))
775 				dev->stats.collisions++;
776 			if (rfd->stat & SWAP16(0x8000))
777 				dev->stats.rx_length_errors++;
778 			if (rfd->stat & SWAP16(0x0001))
779 				dev->stats.rx_over_errors++;
780 			if (rfd->stat & SWAP16(0x0002))
781 				dev->stats.rx_fifo_errors++;
782 			if (rfd->stat & SWAP16(0x0004))
783 				dev->stats.rx_frame_errors++;
784 			if (rfd->stat & SWAP16(0x0008))
785 				dev->stats.rx_crc_errors++;
786 			if (rfd->stat & SWAP16(0x0010))
787 				dev->stats.rx_length_errors++;
788 		}
789 
790 		/* Clear the buffer descriptor count and EOF + F flags */
791 
792 		if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
793 			rbd->count = 0;
794 			lp->rbd_head = rbd->v_next;
795 			dma_sync_dev(dev, rbd, sizeof(struct i596_rbd));
796 		}
797 
798 		/* Tidy the frame descriptor, marking it as end of list */
799 
800 		rfd->rbd = I596_NULL;
801 		rfd->stat = 0;
802 		rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
803 		rfd->count = 0;
804 
805 		/* Update record of next frame descriptor to process */
806 
807 		lp->dma->scb.rfd = rfd->b_next;
808 		lp->rfd_head = rfd->v_next;
809 		dma_sync_dev(dev, rfd, sizeof(struct i596_rfd));
810 
811 		/* Remove end-of-list from old end descriptor */
812 
813 		rfd->v_prev->cmd = SWAP16(CMD_FLEX);
814 		dma_sync_dev(dev, rfd->v_prev, sizeof(struct i596_rfd));
815 		rfd = lp->rfd_head;
816 		dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
817 	}
818 
819 	DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
820 
821 	return 0;
822 }
823 
824 
825 static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
826 {
827 	struct i596_cmd *ptr;
828 
829 	while (lp->cmd_head != NULL) {
830 		ptr = lp->cmd_head;
831 		lp->cmd_head = ptr->v_next;
832 		lp->cmd_backlog--;
833 
834 		switch (SWAP16(ptr->command) & 0x7) {
835 		case CmdTx:
836 			{
837 				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
838 				struct sk_buff *skb = tx_cmd->skb;
839 				dma_unmap_single(dev->dev.parent,
840 						 tx_cmd->dma_addr,
841 						 skb->len, DMA_TO_DEVICE);
842 
843 				dev_kfree_skb(skb);
844 
845 				dev->stats.tx_errors++;
846 				dev->stats.tx_aborted_errors++;
847 
848 				ptr->v_next = NULL;
849 				ptr->b_next = I596_NULL;
850 				tx_cmd->cmd.command = 0;  /* Mark as free */
851 				break;
852 			}
853 		default:
854 			ptr->v_next = NULL;
855 			ptr->b_next = I596_NULL;
856 		}
857 		dma_sync_dev(dev, ptr, sizeof(struct i596_cmd));
858 	}
859 
860 	wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
861 	lp->dma->scb.cmd = I596_NULL;
862 	dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
863 }
864 
865 
866 static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
867 {
868 	unsigned long flags;
869 
870 	DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
871 
872 	spin_lock_irqsave (&lp->lock, flags);
873 
874 	wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
875 
876 	netif_stop_queue(dev);
877 
878 	/* FIXME: this command might cause an lpmc */
879 	lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
880 	dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
881 	ca(dev);
882 
883 	/* wait for shutdown */
884 	wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
885 	spin_unlock_irqrestore (&lp->lock, flags);
886 
887 	i596_cleanup_cmd(dev, lp);
888 	i596_rx(dev);
889 
890 	netif_start_queue(dev);
891 	init_i596_mem(dev);
892 }
893 
894 
895 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
896 {
897 	struct i596_private *lp = netdev_priv(dev);
898 	struct i596_dma *dma = lp->dma;
899 	unsigned long flags;
900 
901 	DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
902 			       lp->cmd_head));
903 
904 	cmd->status = 0;
905 	cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
906 	cmd->v_next = NULL;
907 	cmd->b_next = I596_NULL;
908 	dma_sync_dev(dev, cmd, sizeof(struct i596_cmd));
909 
910 	spin_lock_irqsave (&lp->lock, flags);
911 
912 	if (lp->cmd_head != NULL) {
913 		lp->cmd_tail->v_next = cmd;
914 		lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
915 		dma_sync_dev(dev, lp->cmd_tail, sizeof(struct i596_cmd));
916 	} else {
917 		lp->cmd_head = cmd;
918 		wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
919 		dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
920 		dma->scb.command = SWAP16(CUC_START);
921 		dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
922 		ca(dev);
923 	}
924 	lp->cmd_tail = cmd;
925 	lp->cmd_backlog++;
926 
927 	spin_unlock_irqrestore (&lp->lock, flags);
928 
929 	if (lp->cmd_backlog > max_cmd_backlog) {
930 		unsigned long tickssofar = jiffies - lp->last_cmd;
931 
932 		if (tickssofar < ticks_limit)
933 			return;
934 
935 		printk(KERN_ERR
936 		       "%s: command unit timed out, status resetting.\n",
937 		       dev->name);
938 #if 1
939 		i596_reset(dev, lp);
940 #endif
941 	}
942 }
943 
944 static int i596_open(struct net_device *dev)
945 {
946 	DEB(DEB_OPEN, printk(KERN_DEBUG
947 			     "%s: i596_open() irq %d.\n", dev->name, dev->irq));
948 
949 	if (init_rx_bufs(dev)) {
950 		printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
951 		return -EAGAIN;
952 	}
953 	if (init_i596_mem(dev)) {
954 		printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
955 		goto out_remove_rx_bufs;
956 	}
957 	netif_start_queue(dev);
958 
959 	return 0;
960 
961 out_remove_rx_bufs:
962 	remove_rx_bufs(dev);
963 	return -EAGAIN;
964 }
965 
966 static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue)
967 {
968 	struct i596_private *lp = netdev_priv(dev);
969 
970 	/* Transmitter timeout, serious problems. */
971 	DEB(DEB_ERRORS, printk(KERN_DEBUG
972 			       "%s: transmit timed out, status resetting.\n",
973 			       dev->name));
974 
975 	dev->stats.tx_errors++;
976 
977 	/* Try to restart the adaptor */
978 	if (lp->last_restart == dev->stats.tx_packets) {
979 		DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
980 		/* Shutdown and restart */
981 		i596_reset (dev, lp);
982 	} else {
983 		/* Issue a channel attention signal */
984 		DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
985 		lp->dma->scb.command = SWAP16(CUC_START | RX_START);
986 		dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
987 		ca (dev);
988 		lp->last_restart = dev->stats.tx_packets;
989 	}
990 
991 	netif_trans_update(dev); /* prevent tx timeout */
992 	netif_wake_queue (dev);
993 }
994 
995 
996 static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
997 {
998 	struct i596_private *lp = netdev_priv(dev);
999 	struct tx_cmd *tx_cmd;
1000 	struct i596_tbd *tbd;
1001 	short length = skb->len;
1002 
1003 	DEB(DEB_STARTTX, printk(KERN_DEBUG
1004 				"%s: i596_start_xmit(%x,%p) called\n",
1005 				dev->name, skb->len, skb->data));
1006 
1007 	if (length < ETH_ZLEN) {
1008 		if (skb_padto(skb, ETH_ZLEN))
1009 			return NETDEV_TX_OK;
1010 		length = ETH_ZLEN;
1011 	}
1012 
1013 	netif_stop_queue(dev);
1014 
1015 	tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
1016 	tbd = lp->dma->tbds + lp->next_tx_cmd;
1017 
1018 	if (tx_cmd->cmd.command) {
1019 		DEB(DEB_ERRORS, printk(KERN_DEBUG
1020 				       "%s: xmit ring full, dropping packet.\n",
1021 				       dev->name));
1022 		dev->stats.tx_dropped++;
1023 
1024 		dev_kfree_skb_any(skb);
1025 	} else {
1026 		if (++lp->next_tx_cmd == TX_RING_SIZE)
1027 			lp->next_tx_cmd = 0;
1028 		tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
1029 		tbd->next = I596_NULL;
1030 
1031 		tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
1032 		tx_cmd->skb = skb;
1033 
1034 		tx_cmd->pad = 0;
1035 		tx_cmd->size = 0;
1036 		tbd->pad = 0;
1037 		tbd->size = SWAP16(EOF | length);
1038 
1039 		tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
1040 						  skb->len, DMA_TO_DEVICE);
1041 		tbd->data = SWAP32(tx_cmd->dma_addr);
1042 
1043 		DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
1044 		dma_sync_dev(dev, tx_cmd, sizeof(struct tx_cmd));
1045 		dma_sync_dev(dev, tbd, sizeof(struct i596_tbd));
1046 		i596_add_cmd(dev, &tx_cmd->cmd);
1047 
1048 		dev->stats.tx_packets++;
1049 		dev->stats.tx_bytes += length;
1050 	}
1051 
1052 	netif_start_queue(dev);
1053 
1054 	return NETDEV_TX_OK;
1055 }
1056 
1057 static void print_eth(unsigned char *add, char *str)
1058 {
1059 	printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1060 	       add, add + 6, add, add[12], add[13], str);
1061 }
1062 static const struct net_device_ops i596_netdev_ops = {
1063 	.ndo_open		= i596_open,
1064 	.ndo_stop		= i596_close,
1065 	.ndo_start_xmit		= i596_start_xmit,
1066 	.ndo_set_rx_mode	= set_multicast_list,
1067 	.ndo_tx_timeout		= i596_tx_timeout,
1068 	.ndo_validate_addr	= eth_validate_addr,
1069 	.ndo_set_mac_address	= eth_mac_addr,
1070 #ifdef CONFIG_NET_POLL_CONTROLLER
1071 	.ndo_poll_controller	= i596_poll_controller,
1072 #endif
1073 };
1074 
1075 static int i82596_probe(struct net_device *dev)
1076 {
1077 	struct i596_private *lp = netdev_priv(dev);
1078 	int ret;
1079 
1080 	/* This lot is ensure things have been cache line aligned. */
1081 	BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
1082 	BUILD_BUG_ON(sizeof(struct i596_rbd) &  31);
1083 	BUILD_BUG_ON(sizeof(struct tx_cmd)   &  31);
1084 	BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
1085 #ifndef __LP64__
1086 	BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
1087 #endif
1088 
1089 	if (!dev->base_addr || !dev->irq)
1090 		return -ENODEV;
1091 
1092 	dev->netdev_ops = &i596_netdev_ops;
1093 	dev->watchdog_timeo = TX_TIMEOUT;
1094 
1095 	memset(lp->dma, 0, sizeof(struct i596_dma));
1096 	lp->dma->scb.command = 0;
1097 	lp->dma->scb.cmd = I596_NULL;
1098 	lp->dma->scb.rfd = I596_NULL;
1099 	spin_lock_init(&lp->lock);
1100 
1101 	dma_sync_dev(dev, lp->dma, sizeof(struct i596_dma));
1102 
1103 	ret = register_netdev(dev);
1104 	if (ret)
1105 		return ret;
1106 
1107 	DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
1108 			      dev->name, dev->base_addr, dev->dev_addr,
1109 			      dev->irq));
1110 	DEB(DEB_INIT, printk(KERN_INFO
1111 			     "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1112 			     dev->name, lp->dma, (int)sizeof(struct i596_dma),
1113 			     &lp->dma->scb));
1114 
1115 	return 0;
1116 }
1117 
1118 #ifdef CONFIG_NET_POLL_CONTROLLER
1119 static void i596_poll_controller(struct net_device *dev)
1120 {
1121 	disable_irq(dev->irq);
1122 	i596_interrupt(dev->irq, dev);
1123 	enable_irq(dev->irq);
1124 }
1125 #endif
1126 
1127 static irqreturn_t i596_interrupt(int irq, void *dev_id)
1128 {
1129 	struct net_device *dev = dev_id;
1130 	struct i596_private *lp;
1131 	struct i596_dma *dma;
1132 	unsigned short status, ack_cmd = 0;
1133 
1134 	lp = netdev_priv(dev);
1135 	dma = lp->dma;
1136 
1137 	spin_lock (&lp->lock);
1138 
1139 	wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1140 	status = SWAP16(dma->scb.status);
1141 
1142 	DEB(DEB_INTS, printk(KERN_DEBUG
1143 			     "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1144 			dev->name, dev->irq, status));
1145 
1146 	ack_cmd = status & 0xf000;
1147 
1148 	if (!ack_cmd) {
1149 		DEB(DEB_ERRORS, printk(KERN_DEBUG
1150 				       "%s: interrupt with no events\n",
1151 				       dev->name));
1152 		spin_unlock (&lp->lock);
1153 		return IRQ_NONE;
1154 	}
1155 
1156 	if ((status & 0x8000) || (status & 0x2000)) {
1157 		struct i596_cmd *ptr;
1158 
1159 		if ((status & 0x8000))
1160 			DEB(DEB_INTS,
1161 			    printk(KERN_DEBUG
1162 				   "%s: i596 interrupt completed command.\n",
1163 				   dev->name));
1164 		if ((status & 0x2000))
1165 			DEB(DEB_INTS,
1166 			    printk(KERN_DEBUG
1167 				   "%s: i596 interrupt command unit inactive %x.\n",
1168 				   dev->name, status & 0x0700));
1169 
1170 		while (lp->cmd_head != NULL) {
1171 			dma_sync_cpu(dev, lp->cmd_head, sizeof(struct i596_cmd));
1172 			if (!(lp->cmd_head->status & SWAP16(STAT_C)))
1173 				break;
1174 
1175 			ptr = lp->cmd_head;
1176 
1177 			DEB(DEB_STATUS,
1178 			    printk(KERN_DEBUG
1179 				   "cmd_head->status = %04x, ->command = %04x\n",
1180 				   SWAP16(lp->cmd_head->status),
1181 				   SWAP16(lp->cmd_head->command)));
1182 			lp->cmd_head = ptr->v_next;
1183 			lp->cmd_backlog--;
1184 
1185 			switch (SWAP16(ptr->command) & 0x7) {
1186 			case CmdTx:
1187 			    {
1188 				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1189 				struct sk_buff *skb = tx_cmd->skb;
1190 
1191 				if (ptr->status & SWAP16(STAT_OK)) {
1192 					DEB(DEB_TXADDR,
1193 					    print_eth(skb->data, "tx-done"));
1194 				} else {
1195 					dev->stats.tx_errors++;
1196 					if (ptr->status & SWAP16(0x0020))
1197 						dev->stats.collisions++;
1198 					if (!(ptr->status & SWAP16(0x0040)))
1199 						dev->stats.tx_heartbeat_errors++;
1200 					if (ptr->status & SWAP16(0x0400))
1201 						dev->stats.tx_carrier_errors++;
1202 					if (ptr->status & SWAP16(0x0800))
1203 						dev->stats.collisions++;
1204 					if (ptr->status & SWAP16(0x1000))
1205 						dev->stats.tx_aborted_errors++;
1206 				}
1207 				dma_unmap_single(dev->dev.parent,
1208 						 tx_cmd->dma_addr,
1209 						 skb->len, DMA_TO_DEVICE);
1210 				dev_consume_skb_irq(skb);
1211 
1212 				tx_cmd->cmd.command = 0; /* Mark free */
1213 				break;
1214 			    }
1215 			case CmdTDR:
1216 			    {
1217 				unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
1218 
1219 				if (status & 0x8000) {
1220 					DEB(DEB_ANY,
1221 					    printk(KERN_DEBUG "%s: link ok.\n",
1222 						   dev->name));
1223 				} else {
1224 					if (status & 0x4000)
1225 						printk(KERN_ERR
1226 						       "%s: Transceiver problem.\n",
1227 						       dev->name);
1228 					if (status & 0x2000)
1229 						printk(KERN_ERR
1230 						       "%s: Termination problem.\n",
1231 						       dev->name);
1232 					if (status & 0x1000)
1233 						printk(KERN_ERR
1234 						       "%s: Short circuit.\n",
1235 						       dev->name);
1236 
1237 					DEB(DEB_TDR,
1238 					    printk(KERN_DEBUG "%s: Time %d.\n",
1239 						   dev->name, status & 0x07ff));
1240 				}
1241 				break;
1242 			    }
1243 			case CmdConfigure:
1244 				/*
1245 				 * Zap command so set_multicast_list() know
1246 				 * it is free
1247 				 */
1248 				ptr->command = 0;
1249 				break;
1250 			}
1251 			ptr->v_next = NULL;
1252 			ptr->b_next = I596_NULL;
1253 			dma_sync_dev(dev, ptr, sizeof(struct i596_cmd));
1254 			lp->last_cmd = jiffies;
1255 		}
1256 
1257 		/* This mess is arranging that only the last of any outstanding
1258 		 * commands has the interrupt bit set.  Should probably really
1259 		 * only add to the cmd queue when the CU is stopped.
1260 		 */
1261 		ptr = lp->cmd_head;
1262 		while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1263 			struct i596_cmd *prev = ptr;
1264 
1265 			ptr->command &= SWAP16(0x1fff);
1266 			ptr = ptr->v_next;
1267 			dma_sync_dev(dev, prev, sizeof(struct i596_cmd));
1268 		}
1269 
1270 		if (lp->cmd_head != NULL)
1271 			ack_cmd |= CUC_START;
1272 		dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
1273 		dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
1274 	}
1275 	if ((status & 0x1000) || (status & 0x4000)) {
1276 		if ((status & 0x4000))
1277 			DEB(DEB_INTS,
1278 			    printk(KERN_DEBUG
1279 				   "%s: i596 interrupt received a frame.\n",
1280 				   dev->name));
1281 		i596_rx(dev);
1282 		/* Only RX_START if stopped - RGH 07-07-96 */
1283 		if (status & 0x1000) {
1284 			if (netif_running(dev)) {
1285 				DEB(DEB_ERRORS,
1286 				    printk(KERN_DEBUG
1287 					   "%s: i596 interrupt receive unit inactive, status 0x%x\n",
1288 					   dev->name, status));
1289 				ack_cmd |= RX_START;
1290 				dev->stats.rx_errors++;
1291 				dev->stats.rx_fifo_errors++;
1292 				rebuild_rx_bufs(dev);
1293 			}
1294 		}
1295 	}
1296 	wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1297 	dma->scb.command = SWAP16(ack_cmd);
1298 	dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
1299 
1300 	/* DANGER: I suspect that some kind of interrupt
1301 	 acknowledgement aside from acking the 82596 might be needed
1302 	 here...  but it's running acceptably without */
1303 
1304 	ca(dev);
1305 
1306 	wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
1307 	DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1308 
1309 	spin_unlock (&lp->lock);
1310 	return IRQ_HANDLED;
1311 }
1312 
1313 static int i596_close(struct net_device *dev)
1314 {
1315 	struct i596_private *lp = netdev_priv(dev);
1316 	unsigned long flags;
1317 
1318 	netif_stop_queue(dev);
1319 
1320 	DEB(DEB_INIT,
1321 	    printk(KERN_DEBUG
1322 		   "%s: Shutting down ethercard, status was %4.4x.\n",
1323 		   dev->name, SWAP16(lp->dma->scb.status)));
1324 
1325 	spin_lock_irqsave(&lp->lock, flags);
1326 
1327 	wait_cmd(dev, lp->dma, 100, "close1 timed out");
1328 	lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
1329 	dma_sync_dev(dev, &lp->dma->scb, sizeof(struct i596_scb));
1330 
1331 	ca(dev);
1332 
1333 	wait_cmd(dev, lp->dma, 100, "close2 timed out");
1334 	spin_unlock_irqrestore(&lp->lock, flags);
1335 	DEB(DEB_STRUCT, i596_display_data(dev));
1336 	i596_cleanup_cmd(dev, lp);
1337 
1338 	free_irq(dev->irq, dev);
1339 	remove_rx_bufs(dev);
1340 
1341 	return 0;
1342 }
1343 
1344 /*
1345  *    Set or clear the multicast filter for this adaptor.
1346  */
1347 
1348 static void set_multicast_list(struct net_device *dev)
1349 {
1350 	struct i596_private *lp = netdev_priv(dev);
1351 	struct i596_dma *dma = lp->dma;
1352 	int config = 0, cnt;
1353 
1354 	DEB(DEB_MULTI,
1355 	    printk(KERN_DEBUG
1356 		   "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1357 		   dev->name, netdev_mc_count(dev),
1358 		   dev->flags & IFF_PROMISC ? "ON" : "OFF",
1359 		   dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1360 
1361 	if ((dev->flags & IFF_PROMISC) &&
1362 	    !(dma->cf_cmd.i596_config[8] & 0x01)) {
1363 		dma->cf_cmd.i596_config[8] |= 0x01;
1364 		config = 1;
1365 	}
1366 	if (!(dev->flags & IFF_PROMISC) &&
1367 	    (dma->cf_cmd.i596_config[8] & 0x01)) {
1368 		dma->cf_cmd.i596_config[8] &= ~0x01;
1369 		config = 1;
1370 	}
1371 	if ((dev->flags & IFF_ALLMULTI) &&
1372 	    (dma->cf_cmd.i596_config[11] & 0x20)) {
1373 		dma->cf_cmd.i596_config[11] &= ~0x20;
1374 		config = 1;
1375 	}
1376 	if (!(dev->flags & IFF_ALLMULTI) &&
1377 	    !(dma->cf_cmd.i596_config[11] & 0x20)) {
1378 		dma->cf_cmd.i596_config[11] |= 0x20;
1379 		config = 1;
1380 	}
1381 	if (config) {
1382 		if (dma->cf_cmd.cmd.command)
1383 			printk(KERN_INFO
1384 			       "%s: config change request already queued\n",
1385 			       dev->name);
1386 		else {
1387 			dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
1388 			dma_sync_dev(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
1389 			i596_add_cmd(dev, &dma->cf_cmd.cmd);
1390 		}
1391 	}
1392 
1393 	cnt = netdev_mc_count(dev);
1394 	if (cnt > MAX_MC_CNT) {
1395 		cnt = MAX_MC_CNT;
1396 		printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
1397 			dev->name, cnt);
1398 	}
1399 
1400 	if (!netdev_mc_empty(dev)) {
1401 		struct netdev_hw_addr *ha;
1402 		unsigned char *cp;
1403 		struct mc_cmd *cmd;
1404 
1405 		cmd = &dma->mc_cmd;
1406 		cmd->cmd.command = SWAP16(CmdMulticastList);
1407 		cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
1408 		cp = cmd->mc_addrs;
1409 		netdev_for_each_mc_addr(ha, dev) {
1410 			if (!cnt--)
1411 				break;
1412 			memcpy(cp, ha->addr, ETH_ALEN);
1413 			if (i596_debug > 1)
1414 				DEB(DEB_MULTI,
1415 				    printk(KERN_DEBUG
1416 					   "%s: Adding address %pM\n",
1417 					   dev->name, cp));
1418 			cp += ETH_ALEN;
1419 		}
1420 		dma_sync_dev(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
1421 		i596_add_cmd(dev, &cmd->cmd);
1422 	}
1423 }
1424