1 /*
2  *	Driver for the Macintosh 68K onboard MACE controller with PSC
3  *	driven DMA. The MACE driver code is derived from mace.c. The
4  *	Mac68k theory of operation is courtesy of the MacBSD wizards.
5  *
6  *	This program is free software; you can redistribute it and/or
7  *	modify it under the terms of the GNU General Public License
8  *	as published by the Free Software Foundation; either version
9  *	2 of the License, or (at your option) any later version.
10  *
11  *	Copyright (C) 1996 Paul Mackerras.
12  *	Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
13  *
14  *	Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
15  *
16  *	Copyright (C) 2007 Finn Thain
17  *
18  *	Converted to DMA API, converted to unified driver model,
19  *	sync'd some routines with mace.c and fixed various bugs.
20  */
21 
22 
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/delay.h>
28 #include <linux/string.h>
29 #include <linux/crc32.h>
30 #include <linux/bitrev.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/platform_device.h>
33 #include <linux/gfp.h>
34 #include <asm/io.h>
35 #include <asm/irq.h>
36 #include <asm/macintosh.h>
37 #include <asm/macints.h>
38 #include <asm/mac_psc.h>
39 #include <asm/page.h>
40 #include "mace.h"
41 
42 static char mac_mace_string[] = "macmace";
43 
44 #define N_TX_BUFF_ORDER	0
45 #define N_TX_RING	(1 << N_TX_BUFF_ORDER)
46 #define N_RX_BUFF_ORDER	3
47 #define N_RX_RING	(1 << N_RX_BUFF_ORDER)
48 
49 #define TX_TIMEOUT	HZ
50 
51 #define MACE_BUFF_SIZE	0x800
52 
53 /* Chip rev needs workaround on HW & multicast addr change */
54 #define BROKEN_ADDRCHG_REV	0x0941
55 
56 /* The MACE is simply wired down on a Mac68K box */
57 
58 #define MACE_BASE	(void *)(0x50F1C000)
59 #define MACE_PROM	(void *)(0x50F08001)
60 
61 struct mace_data {
62 	volatile struct mace *mace;
63 	unsigned char *tx_ring;
64 	dma_addr_t tx_ring_phys;
65 	unsigned char *rx_ring;
66 	dma_addr_t rx_ring_phys;
67 	int dma_intr;
68 	int rx_slot, rx_tail;
69 	int tx_slot, tx_sloti, tx_count;
70 	int chipid;
71 	struct device *device;
72 };
73 
74 struct mace_frame {
75 	u8	rcvcnt;
76 	u8	pad1;
77 	u8	rcvsts;
78 	u8	pad2;
79 	u8	rntpc;
80 	u8	pad3;
81 	u8	rcvcc;
82 	u8	pad4;
83 	u32	pad5;
84 	u32	pad6;
85 	u8	data[1];
86 	/* And frame continues.. */
87 };
88 
89 #define PRIV_BYTES	sizeof(struct mace_data)
90 
91 static int mace_open(struct net_device *dev);
92 static int mace_close(struct net_device *dev);
93 static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
94 static void mace_set_multicast(struct net_device *dev);
95 static int mace_set_address(struct net_device *dev, void *addr);
96 static void mace_reset(struct net_device *dev);
97 static irqreturn_t mace_interrupt(int irq, void *dev_id);
98 static irqreturn_t mace_dma_intr(int irq, void *dev_id);
99 static void mace_tx_timeout(struct net_device *dev);
100 static void __mace_set_address(struct net_device *dev, void *addr);
101 
102 /*
103  * Load a receive DMA channel with a base address and ring length
104  */
105 
106 static void mace_load_rxdma_base(struct net_device *dev, int set)
107 {
108 	struct mace_data *mp = netdev_priv(dev);
109 
110 	psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
111 	psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
112 	psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
113 	psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
114 	mp->rx_tail = 0;
115 }
116 
117 /*
118  * Reset the receive DMA subsystem
119  */
120 
121 static void mace_rxdma_reset(struct net_device *dev)
122 {
123 	struct mace_data *mp = netdev_priv(dev);
124 	volatile struct mace *mace = mp->mace;
125 	u8 maccc = mace->maccc;
126 
127 	mace->maccc = maccc & ~ENRCV;
128 
129 	psc_write_word(PSC_ENETRD_CTL, 0x8800);
130 	mace_load_rxdma_base(dev, 0x00);
131 	psc_write_word(PSC_ENETRD_CTL, 0x0400);
132 
133 	psc_write_word(PSC_ENETRD_CTL, 0x8800);
134 	mace_load_rxdma_base(dev, 0x10);
135 	psc_write_word(PSC_ENETRD_CTL, 0x0400);
136 
137 	mace->maccc = maccc;
138 	mp->rx_slot = 0;
139 
140 	psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
141 	psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
142 }
143 
144 /*
145  * Reset the transmit DMA subsystem
146  */
147 
148 static void mace_txdma_reset(struct net_device *dev)
149 {
150 	struct mace_data *mp = netdev_priv(dev);
151 	volatile struct mace *mace = mp->mace;
152 	u8 maccc;
153 
154 	psc_write_word(PSC_ENETWR_CTL, 0x8800);
155 
156 	maccc = mace->maccc;
157 	mace->maccc = maccc & ~ENXMT;
158 
159 	mp->tx_slot = mp->tx_sloti = 0;
160 	mp->tx_count = N_TX_RING;
161 
162 	psc_write_word(PSC_ENETWR_CTL, 0x0400);
163 	mace->maccc = maccc;
164 }
165 
166 /*
167  * Disable DMA
168  */
169 
170 static void mace_dma_off(struct net_device *dev)
171 {
172 	psc_write_word(PSC_ENETRD_CTL, 0x8800);
173 	psc_write_word(PSC_ENETRD_CTL, 0x1000);
174 	psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
175 	psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
176 
177 	psc_write_word(PSC_ENETWR_CTL, 0x8800);
178 	psc_write_word(PSC_ENETWR_CTL, 0x1000);
179 	psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
180 	psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
181 }
182 
183 static const struct net_device_ops mace_netdev_ops = {
184 	.ndo_open		= mace_open,
185 	.ndo_stop		= mace_close,
186 	.ndo_start_xmit		= mace_xmit_start,
187 	.ndo_tx_timeout		= mace_tx_timeout,
188 	.ndo_set_multicast_list	= mace_set_multicast,
189 	.ndo_set_mac_address	= mace_set_address,
190 	.ndo_change_mtu		= eth_change_mtu,
191 	.ndo_validate_addr	= eth_validate_addr,
192 };
193 
194 /*
195  * Not really much of a probe. The hardware table tells us if this
196  * model of Macintrash has a MACE (AV macintoshes)
197  */
198 
199 static int __devinit mace_probe(struct platform_device *pdev)
200 {
201 	int j;
202 	struct mace_data *mp;
203 	unsigned char *addr;
204 	struct net_device *dev;
205 	unsigned char checksum = 0;
206 	static int found = 0;
207 	int err;
208 
209 	if (found || macintosh_config->ether_type != MAC_ETHER_MACE)
210 		return -ENODEV;
211 
212 	found = 1;	/* prevent 'finding' one on every device probe */
213 
214 	dev = alloc_etherdev(PRIV_BYTES);
215 	if (!dev)
216 		return -ENOMEM;
217 
218 	mp = netdev_priv(dev);
219 
220 	mp->device = &pdev->dev;
221 	SET_NETDEV_DEV(dev, &pdev->dev);
222 
223 	dev->base_addr = (u32)MACE_BASE;
224 	mp->mace = MACE_BASE;
225 
226 	dev->irq = IRQ_MAC_MACE;
227 	mp->dma_intr = IRQ_MAC_MACE_DMA;
228 
229 	mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo;
230 
231 	/*
232 	 * The PROM contains 8 bytes which total 0xFF when XOR'd
233 	 * together. Due to the usual peculiar apple brain damage
234 	 * the bytes are spaced out in a strange boundary and the
235 	 * bits are reversed.
236 	 */
237 
238 	addr = (void *)MACE_PROM;
239 
240 	for (j = 0; j < 6; ++j) {
241 		u8 v = bitrev8(addr[j<<4]);
242 		checksum ^= v;
243 		dev->dev_addr[j] = v;
244 	}
245 	for (; j < 8; ++j) {
246 		checksum ^= bitrev8(addr[j<<4]);
247 	}
248 
249 	if (checksum != 0xFF) {
250 		free_netdev(dev);
251 		return -ENODEV;
252 	}
253 
254 	dev->netdev_ops		= &mace_netdev_ops;
255 	dev->watchdog_timeo	= TX_TIMEOUT;
256 
257 	printk(KERN_INFO "%s: 68K MACE, hardware address %pM\n",
258 	       dev->name, dev->dev_addr);
259 
260 	err = register_netdev(dev);
261 	if (!err)
262 		return 0;
263 
264 	free_netdev(dev);
265 	return err;
266 }
267 
268 /*
269  * Reset the chip.
270  */
271 
272 static void mace_reset(struct net_device *dev)
273 {
274 	struct mace_data *mp = netdev_priv(dev);
275 	volatile struct mace *mb = mp->mace;
276 	int i;
277 
278 	/* soft-reset the chip */
279 	i = 200;
280 	while (--i) {
281 		mb->biucc = SWRST;
282 		if (mb->biucc & SWRST) {
283 			udelay(10);
284 			continue;
285 		}
286 		break;
287 	}
288 	if (!i) {
289 		printk(KERN_ERR "macmace: cannot reset chip!\n");
290 		return;
291 	}
292 
293 	mb->maccc = 0;	/* turn off tx, rx */
294 	mb->imr = 0xFF;	/* disable all intrs for now */
295 	i = mb->ir;
296 
297 	mb->biucc = XMTSP_64;
298 	mb->utr = RTRD;
299 	mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU;
300 
301 	mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */
302 	mb->rcvfc = 0;
303 
304 	/* load up the hardware address */
305 	__mace_set_address(dev, dev->dev_addr);
306 
307 	/* clear the multicast filter */
308 	if (mp->chipid == BROKEN_ADDRCHG_REV)
309 		mb->iac = LOGADDR;
310 	else {
311 		mb->iac = ADDRCHG | LOGADDR;
312 		while ((mb->iac & ADDRCHG) != 0)
313 			;
314 	}
315 	for (i = 0; i < 8; ++i)
316 		mb->ladrf = 0;
317 
318 	/* done changing address */
319 	if (mp->chipid != BROKEN_ADDRCHG_REV)
320 		mb->iac = 0;
321 
322 	mb->plscc = PORTSEL_AUI;
323 }
324 
325 /*
326  * Load the address on a mace controller.
327  */
328 
329 static void __mace_set_address(struct net_device *dev, void *addr)
330 {
331 	struct mace_data *mp = netdev_priv(dev);
332 	volatile struct mace *mb = mp->mace;
333 	unsigned char *p = addr;
334 	int i;
335 
336 	/* load up the hardware address */
337 	if (mp->chipid == BROKEN_ADDRCHG_REV)
338 		mb->iac = PHYADDR;
339 	else {
340 		mb->iac = ADDRCHG | PHYADDR;
341 		while ((mb->iac & ADDRCHG) != 0)
342 			;
343 	}
344 	for (i = 0; i < 6; ++i)
345 		mb->padr = dev->dev_addr[i] = p[i];
346 	if (mp->chipid != BROKEN_ADDRCHG_REV)
347 		mb->iac = 0;
348 }
349 
350 static int mace_set_address(struct net_device *dev, void *addr)
351 {
352 	struct mace_data *mp = netdev_priv(dev);
353 	volatile struct mace *mb = mp->mace;
354 	unsigned long flags;
355 	u8 maccc;
356 
357 	local_irq_save(flags);
358 
359 	maccc = mb->maccc;
360 
361 	__mace_set_address(dev, addr);
362 
363 	mb->maccc = maccc;
364 
365 	local_irq_restore(flags);
366 
367 	return 0;
368 }
369 
370 /*
371  * Open the Macintosh MACE. Most of this is playing with the DMA
372  * engine. The ethernet chip is quite friendly.
373  */
374 
375 static int mace_open(struct net_device *dev)
376 {
377 	struct mace_data *mp = netdev_priv(dev);
378 	volatile struct mace *mb = mp->mace;
379 
380 	/* reset the chip */
381 	mace_reset(dev);
382 
383 	if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
384 		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
385 		return -EAGAIN;
386 	}
387 	if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
388 		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
389 		free_irq(dev->irq, dev);
390 		return -EAGAIN;
391 	}
392 
393 	/* Allocate the DMA ring buffers */
394 
395 	mp->tx_ring = dma_alloc_coherent(mp->device,
396 			N_TX_RING * MACE_BUFF_SIZE,
397 			&mp->tx_ring_phys, GFP_KERNEL);
398 	if (mp->tx_ring == NULL) {
399 		printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
400 		goto out1;
401 	}
402 
403 	mp->rx_ring = dma_alloc_coherent(mp->device,
404 			N_RX_RING * MACE_BUFF_SIZE,
405 			&mp->rx_ring_phys, GFP_KERNEL);
406 	if (mp->rx_ring == NULL) {
407 		printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
408 		goto out2;
409 	}
410 
411 	mace_dma_off(dev);
412 
413 	/* Not sure what these do */
414 
415 	psc_write_word(PSC_ENETWR_CTL, 0x9000);
416 	psc_write_word(PSC_ENETRD_CTL, 0x9000);
417 	psc_write_word(PSC_ENETWR_CTL, 0x0400);
418 	psc_write_word(PSC_ENETRD_CTL, 0x0400);
419 
420 	mace_rxdma_reset(dev);
421 	mace_txdma_reset(dev);
422 
423 	/* turn it on! */
424 	mb->maccc = ENXMT | ENRCV;
425 	/* enable all interrupts except receive interrupts */
426 	mb->imr = RCVINT;
427 	return 0;
428 
429 out2:
430 	dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
431 	                  mp->tx_ring, mp->tx_ring_phys);
432 out1:
433 	free_irq(dev->irq, dev);
434 	free_irq(mp->dma_intr, dev);
435 	return -ENOMEM;
436 }
437 
438 /*
439  * Shut down the mace and its interrupt channel
440  */
441 
442 static int mace_close(struct net_device *dev)
443 {
444 	struct mace_data *mp = netdev_priv(dev);
445 	volatile struct mace *mb = mp->mace;
446 
447 	mb->maccc = 0;		/* disable rx and tx	 */
448 	mb->imr = 0xFF;		/* disable all irqs	 */
449 	mace_dma_off(dev);	/* disable rx and tx dma */
450 
451 	return 0;
452 }
453 
454 /*
455  * Transmit a frame
456  */
457 
458 static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
459 {
460 	struct mace_data *mp = netdev_priv(dev);
461 	unsigned long flags;
462 
463 	/* Stop the queue since there's only the one buffer */
464 
465 	local_irq_save(flags);
466 	netif_stop_queue(dev);
467 	if (!mp->tx_count) {
468 		printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
469 		local_irq_restore(flags);
470 		return NETDEV_TX_BUSY;
471 	}
472 	mp->tx_count--;
473 	local_irq_restore(flags);
474 
475 	dev->stats.tx_packets++;
476 	dev->stats.tx_bytes += skb->len;
477 
478 	/* We need to copy into our xmit buffer to take care of alignment and caching issues */
479 	skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
480 
481 	/* load the Tx DMA and fire it off */
482 
483 	psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32)  mp->tx_ring_phys);
484 	psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
485 	psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
486 
487 	mp->tx_slot ^= 0x10;
488 
489 	dev_kfree_skb(skb);
490 
491 	return NETDEV_TX_OK;
492 }
493 
494 static void mace_set_multicast(struct net_device *dev)
495 {
496 	struct mace_data *mp = netdev_priv(dev);
497 	volatile struct mace *mb = mp->mace;
498 	int i;
499 	u32 crc;
500 	u8 maccc;
501 	unsigned long flags;
502 
503 	local_irq_save(flags);
504 	maccc = mb->maccc;
505 	mb->maccc &= ~PROM;
506 
507 	if (dev->flags & IFF_PROMISC) {
508 		mb->maccc |= PROM;
509 	} else {
510 		unsigned char multicast_filter[8];
511 		struct netdev_hw_addr *ha;
512 
513 		if (dev->flags & IFF_ALLMULTI) {
514 			for (i = 0; i < 8; i++) {
515 				multicast_filter[i] = 0xFF;
516 			}
517 		} else {
518 			for (i = 0; i < 8; i++)
519 				multicast_filter[i] = 0;
520 			netdev_for_each_mc_addr(ha, dev) {
521 				crc = ether_crc_le(6, ha->addr);
522 				/* bit number in multicast_filter */
523 				i = crc >> 26;
524 				multicast_filter[i >> 3] |= 1 << (i & 7);
525 			}
526 		}
527 
528 		if (mp->chipid == BROKEN_ADDRCHG_REV)
529 			mb->iac = LOGADDR;
530 		else {
531 			mb->iac = ADDRCHG | LOGADDR;
532 			while ((mb->iac & ADDRCHG) != 0)
533 				;
534 		}
535 		for (i = 0; i < 8; ++i)
536 			mb->ladrf = multicast_filter[i];
537 		if (mp->chipid != BROKEN_ADDRCHG_REV)
538 			mb->iac = 0;
539 	}
540 
541 	mb->maccc = maccc;
542 	local_irq_restore(flags);
543 }
544 
545 static void mace_handle_misc_intrs(struct net_device *dev, int intr)
546 {
547 	struct mace_data *mp = netdev_priv(dev);
548 	volatile struct mace *mb = mp->mace;
549 	static int mace_babbles, mace_jabbers;
550 
551 	if (intr & MPCO)
552 		dev->stats.rx_missed_errors += 256;
553 	dev->stats.rx_missed_errors += mb->mpc;   /* reading clears it */
554 	if (intr & RNTPCO)
555 		dev->stats.rx_length_errors += 256;
556 	dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */
557 	if (intr & CERR)
558 		++dev->stats.tx_heartbeat_errors;
559 	if (intr & BABBLE)
560 		if (mace_babbles++ < 4)
561 			printk(KERN_DEBUG "macmace: babbling transmitter\n");
562 	if (intr & JABBER)
563 		if (mace_jabbers++ < 4)
564 			printk(KERN_DEBUG "macmace: jabbering transceiver\n");
565 }
566 
567 static irqreturn_t mace_interrupt(int irq, void *dev_id)
568 {
569 	struct net_device *dev = (struct net_device *) dev_id;
570 	struct mace_data *mp = netdev_priv(dev);
571 	volatile struct mace *mb = mp->mace;
572 	int intr, fs;
573 	unsigned long flags;
574 
575 	/* don't want the dma interrupt handler to fire */
576 	local_irq_save(flags);
577 
578 	intr = mb->ir; /* read interrupt register */
579 	mace_handle_misc_intrs(dev, intr);
580 
581 	if (intr & XMTINT) {
582 		fs = mb->xmtfs;
583 		if ((fs & XMTSV) == 0) {
584 			printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
585 			mace_reset(dev);
586 			/*
587 			 * XXX mace likes to hang the machine after a xmtfs error.
588 			 * This is hard to reproduce, reseting *may* help
589 			 */
590 		}
591 		/* dma should have finished */
592 		if (!mp->tx_count) {
593 			printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
594 		}
595 		/* Update stats */
596 		if (fs & (UFLO|LCOL|LCAR|RTRY)) {
597 			++dev->stats.tx_errors;
598 			if (fs & LCAR)
599 				++dev->stats.tx_carrier_errors;
600 			else if (fs & (UFLO|LCOL|RTRY)) {
601 				++dev->stats.tx_aborted_errors;
602 				if (mb->xmtfs & UFLO) {
603 					printk(KERN_ERR "%s: DMA underrun.\n", dev->name);
604 					dev->stats.tx_fifo_errors++;
605 					mace_txdma_reset(dev);
606 				}
607 			}
608 		}
609 	}
610 
611 	if (mp->tx_count)
612 		netif_wake_queue(dev);
613 
614 	local_irq_restore(flags);
615 
616 	return IRQ_HANDLED;
617 }
618 
619 static void mace_tx_timeout(struct net_device *dev)
620 {
621 	struct mace_data *mp = netdev_priv(dev);
622 	volatile struct mace *mb = mp->mace;
623 	unsigned long flags;
624 
625 	local_irq_save(flags);
626 
627 	/* turn off both tx and rx and reset the chip */
628 	mb->maccc = 0;
629 	printk(KERN_ERR "macmace: transmit timeout - resetting\n");
630 	mace_txdma_reset(dev);
631 	mace_reset(dev);
632 
633 	/* restart rx dma */
634 	mace_rxdma_reset(dev);
635 
636 	mp->tx_count = N_TX_RING;
637 	netif_wake_queue(dev);
638 
639 	/* turn it on! */
640 	mb->maccc = ENXMT | ENRCV;
641 	/* enable all interrupts except receive interrupts */
642 	mb->imr = RCVINT;
643 
644 	local_irq_restore(flags);
645 }
646 
647 /*
648  * Handle a newly arrived frame
649  */
650 
651 static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
652 {
653 	struct sk_buff *skb;
654 	unsigned int frame_status = mf->rcvsts;
655 
656 	if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
657 		dev->stats.rx_errors++;
658 		if (frame_status & RS_OFLO) {
659 			printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name);
660 			dev->stats.rx_fifo_errors++;
661 		}
662 		if (frame_status & RS_CLSN)
663 			dev->stats.collisions++;
664 		if (frame_status & RS_FRAMERR)
665 			dev->stats.rx_frame_errors++;
666 		if (frame_status & RS_FCSERR)
667 			dev->stats.rx_crc_errors++;
668 	} else {
669 		unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
670 
671 		skb = dev_alloc_skb(frame_length + 2);
672 		if (!skb) {
673 			dev->stats.rx_dropped++;
674 			return;
675 		}
676 		skb_reserve(skb, 2);
677 		memcpy(skb_put(skb, frame_length), mf->data, frame_length);
678 
679 		skb->protocol = eth_type_trans(skb, dev);
680 		netif_rx(skb);
681 		dev->stats.rx_packets++;
682 		dev->stats.rx_bytes += frame_length;
683 	}
684 }
685 
686 /*
687  * The PSC has passed us a DMA interrupt event.
688  */
689 
690 static irqreturn_t mace_dma_intr(int irq, void *dev_id)
691 {
692 	struct net_device *dev = (struct net_device *) dev_id;
693 	struct mace_data *mp = netdev_priv(dev);
694 	int left, head;
695 	u16 status;
696 	u32 baka;
697 
698 	/* Not sure what this does */
699 
700 	while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
701 	if (!(baka & 0x60000000)) return IRQ_NONE;
702 
703 	/*
704 	 * Process the read queue
705 	 */
706 
707 	status = psc_read_word(PSC_ENETRD_CTL);
708 
709 	if (status & 0x2000) {
710 		mace_rxdma_reset(dev);
711 	} else if (status & 0x0100) {
712 		psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
713 
714 		left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
715 		head = N_RX_RING - left;
716 
717 		/* Loop through the ring buffer and process new packages */
718 
719 		while (mp->rx_tail < head) {
720 			mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring
721 				+ (mp->rx_tail * MACE_BUFF_SIZE)));
722 			mp->rx_tail++;
723 		}
724 
725 		/* If we're out of buffers in this ring then switch to */
726 		/* the other set, otherwise just reactivate this one.  */
727 
728 		if (!left) {
729 			mace_load_rxdma_base(dev, mp->rx_slot);
730 			mp->rx_slot ^= 0x10;
731 		} else {
732 			psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
733 		}
734 	}
735 
736 	/*
737 	 * Process the write queue
738 	 */
739 
740 	status = psc_read_word(PSC_ENETWR_CTL);
741 
742 	if (status & 0x2000) {
743 		mace_txdma_reset(dev);
744 	} else if (status & 0x0100) {
745 		psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
746 		mp->tx_sloti ^= 0x10;
747 		mp->tx_count++;
748 	}
749 	return IRQ_HANDLED;
750 }
751 
752 MODULE_LICENSE("GPL");
753 MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
754 MODULE_ALIAS("platform:macmace");
755 
756 static int __devexit mac_mace_device_remove (struct platform_device *pdev)
757 {
758 	struct net_device *dev = platform_get_drvdata(pdev);
759 	struct mace_data *mp = netdev_priv(dev);
760 
761 	unregister_netdev(dev);
762 
763 	free_irq(dev->irq, dev);
764 	free_irq(IRQ_MAC_MACE_DMA, dev);
765 
766 	dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE,
767 	                  mp->rx_ring, mp->rx_ring_phys);
768 	dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
769 	                  mp->tx_ring, mp->tx_ring_phys);
770 
771 	free_netdev(dev);
772 
773 	return 0;
774 }
775 
776 static struct platform_driver mac_mace_driver = {
777 	.probe  = mace_probe,
778 	.remove = __devexit_p(mac_mace_device_remove),
779 	.driver	= {
780 		.name	= mac_mace_string,
781 		.owner	= THIS_MODULE,
782 	},
783 };
784 
785 static int __init mac_mace_init_module(void)
786 {
787 	if (!MACH_IS_MAC)
788 		return -ENODEV;
789 
790 	return platform_driver_register(&mac_mace_driver);
791 }
792 
793 static void __exit mac_mace_cleanup_module(void)
794 {
795 	platform_driver_unregister(&mac_mace_driver);
796 }
797 
798 module_init(mac_mace_init_module);
799 module_exit(mac_mace_cleanup_module);
800