1 /*
2  *	Driver for the Macintosh 68K onboard MACE controller with PSC
3  *	driven DMA. The MACE driver code is derived from mace.c. The
4  *	Mac68k theory of operation is courtesy of the MacBSD wizards.
5  *
6  *	This program is free software; you can redistribute it and/or
7  *	modify it under the terms of the GNU General Public License
8  *	as published by the Free Software Foundation; either version
9  *	2 of the License, or (at your option) any later version.
10  *
11  *	Copyright (C) 1996 Paul Mackerras.
12  *	Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
13  *
14  *	Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
15  *
16  *	Copyright (C) 2007 Finn Thain
17  *
18  *	Converted to DMA API, converted to unified driver model,
19  *	sync'd some routines with mace.c and fixed various bugs.
20  */
21 
22 
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/delay.h>
28 #include <linux/string.h>
29 #include <linux/crc32.h>
30 #include <linux/bitrev.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/platform_device.h>
33 #include <linux/gfp.h>
34 #include <linux/interrupt.h>
35 #include <asm/io.h>
36 #include <asm/macints.h>
37 #include <asm/mac_psc.h>
38 #include <asm/page.h>
39 #include "mace.h"
40 
41 static char mac_mace_string[] = "macmace";
42 
43 #define N_TX_BUFF_ORDER	0
44 #define N_TX_RING	(1 << N_TX_BUFF_ORDER)
45 #define N_RX_BUFF_ORDER	3
46 #define N_RX_RING	(1 << N_RX_BUFF_ORDER)
47 
48 #define TX_TIMEOUT	HZ
49 
50 #define MACE_BUFF_SIZE	0x800
51 
52 /* Chip rev needs workaround on HW & multicast addr change */
53 #define BROKEN_ADDRCHG_REV	0x0941
54 
55 /* The MACE is simply wired down on a Mac68K box */
56 
57 #define MACE_BASE	(void *)(0x50F1C000)
58 #define MACE_PROM	(void *)(0x50F08001)
59 
60 struct mace_data {
61 	volatile struct mace *mace;
62 	unsigned char *tx_ring;
63 	dma_addr_t tx_ring_phys;
64 	unsigned char *rx_ring;
65 	dma_addr_t rx_ring_phys;
66 	int dma_intr;
67 	int rx_slot, rx_tail;
68 	int tx_slot, tx_sloti, tx_count;
69 	int chipid;
70 	struct device *device;
71 };
72 
73 struct mace_frame {
74 	u8	rcvcnt;
75 	u8	pad1;
76 	u8	rcvsts;
77 	u8	pad2;
78 	u8	rntpc;
79 	u8	pad3;
80 	u8	rcvcc;
81 	u8	pad4;
82 	u32	pad5;
83 	u32	pad6;
84 	u8	data[1];
85 	/* And frame continues.. */
86 };
87 
88 #define PRIV_BYTES	sizeof(struct mace_data)
89 
90 static int mace_open(struct net_device *dev);
91 static int mace_close(struct net_device *dev);
92 static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
93 static void mace_set_multicast(struct net_device *dev);
94 static int mace_set_address(struct net_device *dev, void *addr);
95 static void mace_reset(struct net_device *dev);
96 static irqreturn_t mace_interrupt(int irq, void *dev_id);
97 static irqreturn_t mace_dma_intr(int irq, void *dev_id);
98 static void mace_tx_timeout(struct net_device *dev);
99 static void __mace_set_address(struct net_device *dev, void *addr);
100 
101 /*
102  * Load a receive DMA channel with a base address and ring length
103  */
104 
105 static void mace_load_rxdma_base(struct net_device *dev, int set)
106 {
107 	struct mace_data *mp = netdev_priv(dev);
108 
109 	psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
110 	psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
111 	psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
112 	psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
113 	mp->rx_tail = 0;
114 }
115 
116 /*
117  * Reset the receive DMA subsystem
118  */
119 
120 static void mace_rxdma_reset(struct net_device *dev)
121 {
122 	struct mace_data *mp = netdev_priv(dev);
123 	volatile struct mace *mace = mp->mace;
124 	u8 maccc = mace->maccc;
125 
126 	mace->maccc = maccc & ~ENRCV;
127 
128 	psc_write_word(PSC_ENETRD_CTL, 0x8800);
129 	mace_load_rxdma_base(dev, 0x00);
130 	psc_write_word(PSC_ENETRD_CTL, 0x0400);
131 
132 	psc_write_word(PSC_ENETRD_CTL, 0x8800);
133 	mace_load_rxdma_base(dev, 0x10);
134 	psc_write_word(PSC_ENETRD_CTL, 0x0400);
135 
136 	mace->maccc = maccc;
137 	mp->rx_slot = 0;
138 
139 	psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
140 	psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
141 }
142 
143 /*
144  * Reset the transmit DMA subsystem
145  */
146 
147 static void mace_txdma_reset(struct net_device *dev)
148 {
149 	struct mace_data *mp = netdev_priv(dev);
150 	volatile struct mace *mace = mp->mace;
151 	u8 maccc;
152 
153 	psc_write_word(PSC_ENETWR_CTL, 0x8800);
154 
155 	maccc = mace->maccc;
156 	mace->maccc = maccc & ~ENXMT;
157 
158 	mp->tx_slot = mp->tx_sloti = 0;
159 	mp->tx_count = N_TX_RING;
160 
161 	psc_write_word(PSC_ENETWR_CTL, 0x0400);
162 	mace->maccc = maccc;
163 }
164 
165 /*
166  * Disable DMA
167  */
168 
169 static void mace_dma_off(struct net_device *dev)
170 {
171 	psc_write_word(PSC_ENETRD_CTL, 0x8800);
172 	psc_write_word(PSC_ENETRD_CTL, 0x1000);
173 	psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
174 	psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
175 
176 	psc_write_word(PSC_ENETWR_CTL, 0x8800);
177 	psc_write_word(PSC_ENETWR_CTL, 0x1000);
178 	psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
179 	psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
180 }
181 
182 static const struct net_device_ops mace_netdev_ops = {
183 	.ndo_open		= mace_open,
184 	.ndo_stop		= mace_close,
185 	.ndo_start_xmit		= mace_xmit_start,
186 	.ndo_tx_timeout		= mace_tx_timeout,
187 	.ndo_set_rx_mode	= mace_set_multicast,
188 	.ndo_set_mac_address	= mace_set_address,
189 	.ndo_change_mtu		= eth_change_mtu,
190 	.ndo_validate_addr	= eth_validate_addr,
191 };
192 
193 /*
194  * Not really much of a probe. The hardware table tells us if this
195  * model of Macintrash has a MACE (AV macintoshes)
196  */
197 
198 static int __devinit mace_probe(struct platform_device *pdev)
199 {
200 	int j;
201 	struct mace_data *mp;
202 	unsigned char *addr;
203 	struct net_device *dev;
204 	unsigned char checksum = 0;
205 	int err;
206 
207 	dev = alloc_etherdev(PRIV_BYTES);
208 	if (!dev)
209 		return -ENOMEM;
210 
211 	mp = netdev_priv(dev);
212 
213 	mp->device = &pdev->dev;
214 	SET_NETDEV_DEV(dev, &pdev->dev);
215 
216 	dev->base_addr = (u32)MACE_BASE;
217 	mp->mace = MACE_BASE;
218 
219 	dev->irq = IRQ_MAC_MACE;
220 	mp->dma_intr = IRQ_MAC_MACE_DMA;
221 
222 	mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo;
223 
224 	/*
225 	 * The PROM contains 8 bytes which total 0xFF when XOR'd
226 	 * together. Due to the usual peculiar apple brain damage
227 	 * the bytes are spaced out in a strange boundary and the
228 	 * bits are reversed.
229 	 */
230 
231 	addr = (void *)MACE_PROM;
232 
233 	for (j = 0; j < 6; ++j) {
234 		u8 v = bitrev8(addr[j<<4]);
235 		checksum ^= v;
236 		dev->dev_addr[j] = v;
237 	}
238 	for (; j < 8; ++j) {
239 		checksum ^= bitrev8(addr[j<<4]);
240 	}
241 
242 	if (checksum != 0xFF) {
243 		free_netdev(dev);
244 		return -ENODEV;
245 	}
246 
247 	dev->netdev_ops		= &mace_netdev_ops;
248 	dev->watchdog_timeo	= TX_TIMEOUT;
249 
250 	printk(KERN_INFO "%s: 68K MACE, hardware address %pM\n",
251 	       dev->name, dev->dev_addr);
252 
253 	err = register_netdev(dev);
254 	if (!err)
255 		return 0;
256 
257 	free_netdev(dev);
258 	return err;
259 }
260 
261 /*
262  * Reset the chip.
263  */
264 
265 static void mace_reset(struct net_device *dev)
266 {
267 	struct mace_data *mp = netdev_priv(dev);
268 	volatile struct mace *mb = mp->mace;
269 	int i;
270 
271 	/* soft-reset the chip */
272 	i = 200;
273 	while (--i) {
274 		mb->biucc = SWRST;
275 		if (mb->biucc & SWRST) {
276 			udelay(10);
277 			continue;
278 		}
279 		break;
280 	}
281 	if (!i) {
282 		printk(KERN_ERR "macmace: cannot reset chip!\n");
283 		return;
284 	}
285 
286 	mb->maccc = 0;	/* turn off tx, rx */
287 	mb->imr = 0xFF;	/* disable all intrs for now */
288 	i = mb->ir;
289 
290 	mb->biucc = XMTSP_64;
291 	mb->utr = RTRD;
292 	mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU;
293 
294 	mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */
295 	mb->rcvfc = 0;
296 
297 	/* load up the hardware address */
298 	__mace_set_address(dev, dev->dev_addr);
299 
300 	/* clear the multicast filter */
301 	if (mp->chipid == BROKEN_ADDRCHG_REV)
302 		mb->iac = LOGADDR;
303 	else {
304 		mb->iac = ADDRCHG | LOGADDR;
305 		while ((mb->iac & ADDRCHG) != 0)
306 			;
307 	}
308 	for (i = 0; i < 8; ++i)
309 		mb->ladrf = 0;
310 
311 	/* done changing address */
312 	if (mp->chipid != BROKEN_ADDRCHG_REV)
313 		mb->iac = 0;
314 
315 	mb->plscc = PORTSEL_AUI;
316 }
317 
318 /*
319  * Load the address on a mace controller.
320  */
321 
322 static void __mace_set_address(struct net_device *dev, void *addr)
323 {
324 	struct mace_data *mp = netdev_priv(dev);
325 	volatile struct mace *mb = mp->mace;
326 	unsigned char *p = addr;
327 	int i;
328 
329 	/* load up the hardware address */
330 	if (mp->chipid == BROKEN_ADDRCHG_REV)
331 		mb->iac = PHYADDR;
332 	else {
333 		mb->iac = ADDRCHG | PHYADDR;
334 		while ((mb->iac & ADDRCHG) != 0)
335 			;
336 	}
337 	for (i = 0; i < 6; ++i)
338 		mb->padr = dev->dev_addr[i] = p[i];
339 	if (mp->chipid != BROKEN_ADDRCHG_REV)
340 		mb->iac = 0;
341 }
342 
343 static int mace_set_address(struct net_device *dev, void *addr)
344 {
345 	struct mace_data *mp = netdev_priv(dev);
346 	volatile struct mace *mb = mp->mace;
347 	unsigned long flags;
348 	u8 maccc;
349 
350 	local_irq_save(flags);
351 
352 	maccc = mb->maccc;
353 
354 	__mace_set_address(dev, addr);
355 
356 	mb->maccc = maccc;
357 
358 	local_irq_restore(flags);
359 
360 	return 0;
361 }
362 
363 /*
364  * Open the Macintosh MACE. Most of this is playing with the DMA
365  * engine. The ethernet chip is quite friendly.
366  */
367 
368 static int mace_open(struct net_device *dev)
369 {
370 	struct mace_data *mp = netdev_priv(dev);
371 	volatile struct mace *mb = mp->mace;
372 
373 	/* reset the chip */
374 	mace_reset(dev);
375 
376 	if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
377 		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
378 		return -EAGAIN;
379 	}
380 	if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
381 		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
382 		free_irq(dev->irq, dev);
383 		return -EAGAIN;
384 	}
385 
386 	/* Allocate the DMA ring buffers */
387 
388 	mp->tx_ring = dma_alloc_coherent(mp->device,
389 			N_TX_RING * MACE_BUFF_SIZE,
390 			&mp->tx_ring_phys, GFP_KERNEL);
391 	if (mp->tx_ring == NULL) {
392 		printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
393 		goto out1;
394 	}
395 
396 	mp->rx_ring = dma_alloc_coherent(mp->device,
397 			N_RX_RING * MACE_BUFF_SIZE,
398 			&mp->rx_ring_phys, GFP_KERNEL);
399 	if (mp->rx_ring == NULL) {
400 		printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
401 		goto out2;
402 	}
403 
404 	mace_dma_off(dev);
405 
406 	/* Not sure what these do */
407 
408 	psc_write_word(PSC_ENETWR_CTL, 0x9000);
409 	psc_write_word(PSC_ENETRD_CTL, 0x9000);
410 	psc_write_word(PSC_ENETWR_CTL, 0x0400);
411 	psc_write_word(PSC_ENETRD_CTL, 0x0400);
412 
413 	mace_rxdma_reset(dev);
414 	mace_txdma_reset(dev);
415 
416 	/* turn it on! */
417 	mb->maccc = ENXMT | ENRCV;
418 	/* enable all interrupts except receive interrupts */
419 	mb->imr = RCVINT;
420 	return 0;
421 
422 out2:
423 	dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
424 	                  mp->tx_ring, mp->tx_ring_phys);
425 out1:
426 	free_irq(dev->irq, dev);
427 	free_irq(mp->dma_intr, dev);
428 	return -ENOMEM;
429 }
430 
431 /*
432  * Shut down the mace and its interrupt channel
433  */
434 
435 static int mace_close(struct net_device *dev)
436 {
437 	struct mace_data *mp = netdev_priv(dev);
438 	volatile struct mace *mb = mp->mace;
439 
440 	mb->maccc = 0;		/* disable rx and tx	 */
441 	mb->imr = 0xFF;		/* disable all irqs	 */
442 	mace_dma_off(dev);	/* disable rx and tx dma */
443 
444 	return 0;
445 }
446 
447 /*
448  * Transmit a frame
449  */
450 
451 static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
452 {
453 	struct mace_data *mp = netdev_priv(dev);
454 	unsigned long flags;
455 
456 	/* Stop the queue since there's only the one buffer */
457 
458 	local_irq_save(flags);
459 	netif_stop_queue(dev);
460 	if (!mp->tx_count) {
461 		printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
462 		local_irq_restore(flags);
463 		return NETDEV_TX_BUSY;
464 	}
465 	mp->tx_count--;
466 	local_irq_restore(flags);
467 
468 	dev->stats.tx_packets++;
469 	dev->stats.tx_bytes += skb->len;
470 
471 	/* We need to copy into our xmit buffer to take care of alignment and caching issues */
472 	skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
473 
474 	/* load the Tx DMA and fire it off */
475 
476 	psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32)  mp->tx_ring_phys);
477 	psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
478 	psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
479 
480 	mp->tx_slot ^= 0x10;
481 
482 	dev_kfree_skb(skb);
483 
484 	return NETDEV_TX_OK;
485 }
486 
487 static void mace_set_multicast(struct net_device *dev)
488 {
489 	struct mace_data *mp = netdev_priv(dev);
490 	volatile struct mace *mb = mp->mace;
491 	int i;
492 	u32 crc;
493 	u8 maccc;
494 	unsigned long flags;
495 
496 	local_irq_save(flags);
497 	maccc = mb->maccc;
498 	mb->maccc &= ~PROM;
499 
500 	if (dev->flags & IFF_PROMISC) {
501 		mb->maccc |= PROM;
502 	} else {
503 		unsigned char multicast_filter[8];
504 		struct netdev_hw_addr *ha;
505 
506 		if (dev->flags & IFF_ALLMULTI) {
507 			for (i = 0; i < 8; i++) {
508 				multicast_filter[i] = 0xFF;
509 			}
510 		} else {
511 			for (i = 0; i < 8; i++)
512 				multicast_filter[i] = 0;
513 			netdev_for_each_mc_addr(ha, dev) {
514 				crc = ether_crc_le(6, ha->addr);
515 				/* bit number in multicast_filter */
516 				i = crc >> 26;
517 				multicast_filter[i >> 3] |= 1 << (i & 7);
518 			}
519 		}
520 
521 		if (mp->chipid == BROKEN_ADDRCHG_REV)
522 			mb->iac = LOGADDR;
523 		else {
524 			mb->iac = ADDRCHG | LOGADDR;
525 			while ((mb->iac & ADDRCHG) != 0)
526 				;
527 		}
528 		for (i = 0; i < 8; ++i)
529 			mb->ladrf = multicast_filter[i];
530 		if (mp->chipid != BROKEN_ADDRCHG_REV)
531 			mb->iac = 0;
532 	}
533 
534 	mb->maccc = maccc;
535 	local_irq_restore(flags);
536 }
537 
538 static void mace_handle_misc_intrs(struct net_device *dev, int intr)
539 {
540 	struct mace_data *mp = netdev_priv(dev);
541 	volatile struct mace *mb = mp->mace;
542 	static int mace_babbles, mace_jabbers;
543 
544 	if (intr & MPCO)
545 		dev->stats.rx_missed_errors += 256;
546 	dev->stats.rx_missed_errors += mb->mpc;   /* reading clears it */
547 	if (intr & RNTPCO)
548 		dev->stats.rx_length_errors += 256;
549 	dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */
550 	if (intr & CERR)
551 		++dev->stats.tx_heartbeat_errors;
552 	if (intr & BABBLE)
553 		if (mace_babbles++ < 4)
554 			printk(KERN_DEBUG "macmace: babbling transmitter\n");
555 	if (intr & JABBER)
556 		if (mace_jabbers++ < 4)
557 			printk(KERN_DEBUG "macmace: jabbering transceiver\n");
558 }
559 
560 static irqreturn_t mace_interrupt(int irq, void *dev_id)
561 {
562 	struct net_device *dev = (struct net_device *) dev_id;
563 	struct mace_data *mp = netdev_priv(dev);
564 	volatile struct mace *mb = mp->mace;
565 	int intr, fs;
566 	unsigned long flags;
567 
568 	/* don't want the dma interrupt handler to fire */
569 	local_irq_save(flags);
570 
571 	intr = mb->ir; /* read interrupt register */
572 	mace_handle_misc_intrs(dev, intr);
573 
574 	if (intr & XMTINT) {
575 		fs = mb->xmtfs;
576 		if ((fs & XMTSV) == 0) {
577 			printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
578 			mace_reset(dev);
579 			/*
580 			 * XXX mace likes to hang the machine after a xmtfs error.
581 			 * This is hard to reproduce, reseting *may* help
582 			 */
583 		}
584 		/* dma should have finished */
585 		if (!mp->tx_count) {
586 			printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
587 		}
588 		/* Update stats */
589 		if (fs & (UFLO|LCOL|LCAR|RTRY)) {
590 			++dev->stats.tx_errors;
591 			if (fs & LCAR)
592 				++dev->stats.tx_carrier_errors;
593 			else if (fs & (UFLO|LCOL|RTRY)) {
594 				++dev->stats.tx_aborted_errors;
595 				if (mb->xmtfs & UFLO) {
596 					printk(KERN_ERR "%s: DMA underrun.\n", dev->name);
597 					dev->stats.tx_fifo_errors++;
598 					mace_txdma_reset(dev);
599 				}
600 			}
601 		}
602 	}
603 
604 	if (mp->tx_count)
605 		netif_wake_queue(dev);
606 
607 	local_irq_restore(flags);
608 
609 	return IRQ_HANDLED;
610 }
611 
612 static void mace_tx_timeout(struct net_device *dev)
613 {
614 	struct mace_data *mp = netdev_priv(dev);
615 	volatile struct mace *mb = mp->mace;
616 	unsigned long flags;
617 
618 	local_irq_save(flags);
619 
620 	/* turn off both tx and rx and reset the chip */
621 	mb->maccc = 0;
622 	printk(KERN_ERR "macmace: transmit timeout - resetting\n");
623 	mace_txdma_reset(dev);
624 	mace_reset(dev);
625 
626 	/* restart rx dma */
627 	mace_rxdma_reset(dev);
628 
629 	mp->tx_count = N_TX_RING;
630 	netif_wake_queue(dev);
631 
632 	/* turn it on! */
633 	mb->maccc = ENXMT | ENRCV;
634 	/* enable all interrupts except receive interrupts */
635 	mb->imr = RCVINT;
636 
637 	local_irq_restore(flags);
638 }
639 
640 /*
641  * Handle a newly arrived frame
642  */
643 
644 static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
645 {
646 	struct sk_buff *skb;
647 	unsigned int frame_status = mf->rcvsts;
648 
649 	if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
650 		dev->stats.rx_errors++;
651 		if (frame_status & RS_OFLO) {
652 			printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name);
653 			dev->stats.rx_fifo_errors++;
654 		}
655 		if (frame_status & RS_CLSN)
656 			dev->stats.collisions++;
657 		if (frame_status & RS_FRAMERR)
658 			dev->stats.rx_frame_errors++;
659 		if (frame_status & RS_FCSERR)
660 			dev->stats.rx_crc_errors++;
661 	} else {
662 		unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
663 
664 		skb = netdev_alloc_skb(dev, frame_length + 2);
665 		if (!skb) {
666 			dev->stats.rx_dropped++;
667 			return;
668 		}
669 		skb_reserve(skb, 2);
670 		memcpy(skb_put(skb, frame_length), mf->data, frame_length);
671 
672 		skb->protocol = eth_type_trans(skb, dev);
673 		netif_rx(skb);
674 		dev->stats.rx_packets++;
675 		dev->stats.rx_bytes += frame_length;
676 	}
677 }
678 
679 /*
680  * The PSC has passed us a DMA interrupt event.
681  */
682 
683 static irqreturn_t mace_dma_intr(int irq, void *dev_id)
684 {
685 	struct net_device *dev = (struct net_device *) dev_id;
686 	struct mace_data *mp = netdev_priv(dev);
687 	int left, head;
688 	u16 status;
689 	u32 baka;
690 
691 	/* Not sure what this does */
692 
693 	while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
694 	if (!(baka & 0x60000000)) return IRQ_NONE;
695 
696 	/*
697 	 * Process the read queue
698 	 */
699 
700 	status = psc_read_word(PSC_ENETRD_CTL);
701 
702 	if (status & 0x2000) {
703 		mace_rxdma_reset(dev);
704 	} else if (status & 0x0100) {
705 		psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
706 
707 		left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
708 		head = N_RX_RING - left;
709 
710 		/* Loop through the ring buffer and process new packages */
711 
712 		while (mp->rx_tail < head) {
713 			mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring
714 				+ (mp->rx_tail * MACE_BUFF_SIZE)));
715 			mp->rx_tail++;
716 		}
717 
718 		/* If we're out of buffers in this ring then switch to */
719 		/* the other set, otherwise just reactivate this one.  */
720 
721 		if (!left) {
722 			mace_load_rxdma_base(dev, mp->rx_slot);
723 			mp->rx_slot ^= 0x10;
724 		} else {
725 			psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
726 		}
727 	}
728 
729 	/*
730 	 * Process the write queue
731 	 */
732 
733 	status = psc_read_word(PSC_ENETWR_CTL);
734 
735 	if (status & 0x2000) {
736 		mace_txdma_reset(dev);
737 	} else if (status & 0x0100) {
738 		psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
739 		mp->tx_sloti ^= 0x10;
740 		mp->tx_count++;
741 	}
742 	return IRQ_HANDLED;
743 }
744 
745 MODULE_LICENSE("GPL");
746 MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
747 MODULE_ALIAS("platform:macmace");
748 
749 static int __devexit mac_mace_device_remove (struct platform_device *pdev)
750 {
751 	struct net_device *dev = platform_get_drvdata(pdev);
752 	struct mace_data *mp = netdev_priv(dev);
753 
754 	unregister_netdev(dev);
755 
756 	free_irq(dev->irq, dev);
757 	free_irq(IRQ_MAC_MACE_DMA, dev);
758 
759 	dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE,
760 	                  mp->rx_ring, mp->rx_ring_phys);
761 	dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
762 	                  mp->tx_ring, mp->tx_ring_phys);
763 
764 	free_netdev(dev);
765 
766 	return 0;
767 }
768 
769 static struct platform_driver mac_mace_driver = {
770 	.probe  = mace_probe,
771 	.remove = __devexit_p(mac_mace_device_remove),
772 	.driver	= {
773 		.name	= mac_mace_string,
774 		.owner	= THIS_MODULE,
775 	},
776 };
777 
778 static int __init mac_mace_init_module(void)
779 {
780 	if (!MACH_IS_MAC)
781 		return -ENODEV;
782 
783 	return platform_driver_register(&mac_mace_driver);
784 }
785 
786 static void __exit mac_mace_cleanup_module(void)
787 {
788 	platform_driver_unregister(&mac_mace_driver);
789 }
790 
791 module_init(mac_mace_init_module);
792 module_exit(mac_mace_cleanup_module);
793