1 /* 2 * Driver for the Macintosh 68K onboard MACE controller with PSC 3 * driven DMA. The MACE driver code is derived from mace.c. The 4 * Mac68k theory of operation is courtesy of the MacBSD wizards. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * Copyright (C) 1996 Paul Mackerras. 12 * Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk> 13 * 14 * Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver 15 * 16 * Copyright (C) 2007 Finn Thain 17 * 18 * Converted to DMA API, converted to unified driver model, 19 * sync'd some routines with mace.c and fixed various bugs. 20 */ 21 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/netdevice.h> 26 #include <linux/etherdevice.h> 27 #include <linux/delay.h> 28 #include <linux/string.h> 29 #include <linux/crc32.h> 30 #include <linux/bitrev.h> 31 #include <linux/dma-mapping.h> 32 #include <linux/platform_device.h> 33 #include <linux/gfp.h> 34 #include <linux/interrupt.h> 35 #include <asm/io.h> 36 #include <asm/macints.h> 37 #include <asm/mac_psc.h> 38 #include <asm/page.h> 39 #include "mace.h" 40 41 static char mac_mace_string[] = "macmace"; 42 43 #define N_TX_BUFF_ORDER 0 44 #define N_TX_RING (1 << N_TX_BUFF_ORDER) 45 #define N_RX_BUFF_ORDER 3 46 #define N_RX_RING (1 << N_RX_BUFF_ORDER) 47 48 #define TX_TIMEOUT HZ 49 50 #define MACE_BUFF_SIZE 0x800 51 52 /* Chip rev needs workaround on HW & multicast addr change */ 53 #define BROKEN_ADDRCHG_REV 0x0941 54 55 /* The MACE is simply wired down on a Mac68K box */ 56 57 #define MACE_BASE (void *)(0x50F1C000) 58 #define MACE_PROM (void *)(0x50F08001) 59 60 struct mace_data { 61 volatile struct mace *mace; 62 unsigned char *tx_ring; 63 dma_addr_t tx_ring_phys; 64 unsigned char *rx_ring; 65 dma_addr_t rx_ring_phys; 66 int dma_intr; 67 int rx_slot, rx_tail; 68 int tx_slot, tx_sloti, tx_count; 69 int chipid; 70 struct device *device; 71 }; 72 73 struct mace_frame { 74 u8 rcvcnt; 75 u8 pad1; 76 u8 rcvsts; 77 u8 pad2; 78 u8 rntpc; 79 u8 pad3; 80 u8 rcvcc; 81 u8 pad4; 82 u32 pad5; 83 u32 pad6; 84 u8 data[1]; 85 /* And frame continues.. */ 86 }; 87 88 #define PRIV_BYTES sizeof(struct mace_data) 89 90 static int mace_open(struct net_device *dev); 91 static int mace_close(struct net_device *dev); 92 static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); 93 static void mace_set_multicast(struct net_device *dev); 94 static int mace_set_address(struct net_device *dev, void *addr); 95 static void mace_reset(struct net_device *dev); 96 static irqreturn_t mace_interrupt(int irq, void *dev_id); 97 static irqreturn_t mace_dma_intr(int irq, void *dev_id); 98 static void mace_tx_timeout(struct net_device *dev); 99 static void __mace_set_address(struct net_device *dev, void *addr); 100 101 /* 102 * Load a receive DMA channel with a base address and ring length 103 */ 104 105 static void mace_load_rxdma_base(struct net_device *dev, int set) 106 { 107 struct mace_data *mp = netdev_priv(dev); 108 109 psc_write_word(PSC_ENETRD_CMD + set, 0x0100); 110 psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys); 111 psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING); 112 psc_write_word(PSC_ENETRD_CMD + set, 0x9800); 113 mp->rx_tail = 0; 114 } 115 116 /* 117 * Reset the receive DMA subsystem 118 */ 119 120 static void mace_rxdma_reset(struct net_device *dev) 121 { 122 struct mace_data *mp = netdev_priv(dev); 123 volatile struct mace *mace = mp->mace; 124 u8 maccc = mace->maccc; 125 126 mace->maccc = maccc & ~ENRCV; 127 128 psc_write_word(PSC_ENETRD_CTL, 0x8800); 129 mace_load_rxdma_base(dev, 0x00); 130 psc_write_word(PSC_ENETRD_CTL, 0x0400); 131 132 psc_write_word(PSC_ENETRD_CTL, 0x8800); 133 mace_load_rxdma_base(dev, 0x10); 134 psc_write_word(PSC_ENETRD_CTL, 0x0400); 135 136 mace->maccc = maccc; 137 mp->rx_slot = 0; 138 139 psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800); 140 psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800); 141 } 142 143 /* 144 * Reset the transmit DMA subsystem 145 */ 146 147 static void mace_txdma_reset(struct net_device *dev) 148 { 149 struct mace_data *mp = netdev_priv(dev); 150 volatile struct mace *mace = mp->mace; 151 u8 maccc; 152 153 psc_write_word(PSC_ENETWR_CTL, 0x8800); 154 155 maccc = mace->maccc; 156 mace->maccc = maccc & ~ENXMT; 157 158 mp->tx_slot = mp->tx_sloti = 0; 159 mp->tx_count = N_TX_RING; 160 161 psc_write_word(PSC_ENETWR_CTL, 0x0400); 162 mace->maccc = maccc; 163 } 164 165 /* 166 * Disable DMA 167 */ 168 169 static void mace_dma_off(struct net_device *dev) 170 { 171 psc_write_word(PSC_ENETRD_CTL, 0x8800); 172 psc_write_word(PSC_ENETRD_CTL, 0x1000); 173 psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100); 174 psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100); 175 176 psc_write_word(PSC_ENETWR_CTL, 0x8800); 177 psc_write_word(PSC_ENETWR_CTL, 0x1000); 178 psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100); 179 psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100); 180 } 181 182 static const struct net_device_ops mace_netdev_ops = { 183 .ndo_open = mace_open, 184 .ndo_stop = mace_close, 185 .ndo_start_xmit = mace_xmit_start, 186 .ndo_tx_timeout = mace_tx_timeout, 187 .ndo_set_rx_mode = mace_set_multicast, 188 .ndo_set_mac_address = mace_set_address, 189 .ndo_change_mtu = eth_change_mtu, 190 .ndo_validate_addr = eth_validate_addr, 191 }; 192 193 /* 194 * Not really much of a probe. The hardware table tells us if this 195 * model of Macintrash has a MACE (AV macintoshes) 196 */ 197 198 static int mace_probe(struct platform_device *pdev) 199 { 200 int j; 201 struct mace_data *mp; 202 unsigned char *addr; 203 struct net_device *dev; 204 unsigned char checksum = 0; 205 int err; 206 207 dev = alloc_etherdev(PRIV_BYTES); 208 if (!dev) 209 return -ENOMEM; 210 211 mp = netdev_priv(dev); 212 213 mp->device = &pdev->dev; 214 platform_set_drvdata(pdev, dev); 215 SET_NETDEV_DEV(dev, &pdev->dev); 216 217 dev->base_addr = (u32)MACE_BASE; 218 mp->mace = MACE_BASE; 219 220 dev->irq = IRQ_MAC_MACE; 221 mp->dma_intr = IRQ_MAC_MACE_DMA; 222 223 mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo; 224 225 /* 226 * The PROM contains 8 bytes which total 0xFF when XOR'd 227 * together. Due to the usual peculiar apple brain damage 228 * the bytes are spaced out in a strange boundary and the 229 * bits are reversed. 230 */ 231 232 addr = MACE_PROM; 233 234 for (j = 0; j < 6; ++j) { 235 u8 v = bitrev8(addr[j<<4]); 236 checksum ^= v; 237 dev->dev_addr[j] = v; 238 } 239 for (; j < 8; ++j) { 240 checksum ^= bitrev8(addr[j<<4]); 241 } 242 243 if (checksum != 0xFF) { 244 free_netdev(dev); 245 return -ENODEV; 246 } 247 248 dev->netdev_ops = &mace_netdev_ops; 249 dev->watchdog_timeo = TX_TIMEOUT; 250 251 printk(KERN_INFO "%s: 68K MACE, hardware address %pM\n", 252 dev->name, dev->dev_addr); 253 254 err = register_netdev(dev); 255 if (!err) 256 return 0; 257 258 free_netdev(dev); 259 return err; 260 } 261 262 /* 263 * Reset the chip. 264 */ 265 266 static void mace_reset(struct net_device *dev) 267 { 268 struct mace_data *mp = netdev_priv(dev); 269 volatile struct mace *mb = mp->mace; 270 int i; 271 272 /* soft-reset the chip */ 273 i = 200; 274 while (--i) { 275 mb->biucc = SWRST; 276 if (mb->biucc & SWRST) { 277 udelay(10); 278 continue; 279 } 280 break; 281 } 282 if (!i) { 283 printk(KERN_ERR "macmace: cannot reset chip!\n"); 284 return; 285 } 286 287 mb->maccc = 0; /* turn off tx, rx */ 288 mb->imr = 0xFF; /* disable all intrs for now */ 289 i = mb->ir; 290 291 mb->biucc = XMTSP_64; 292 mb->utr = RTRD; 293 mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU; 294 295 mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */ 296 mb->rcvfc = 0; 297 298 /* load up the hardware address */ 299 __mace_set_address(dev, dev->dev_addr); 300 301 /* clear the multicast filter */ 302 if (mp->chipid == BROKEN_ADDRCHG_REV) 303 mb->iac = LOGADDR; 304 else { 305 mb->iac = ADDRCHG | LOGADDR; 306 while ((mb->iac & ADDRCHG) != 0) 307 ; 308 } 309 for (i = 0; i < 8; ++i) 310 mb->ladrf = 0; 311 312 /* done changing address */ 313 if (mp->chipid != BROKEN_ADDRCHG_REV) 314 mb->iac = 0; 315 316 mb->plscc = PORTSEL_AUI; 317 } 318 319 /* 320 * Load the address on a mace controller. 321 */ 322 323 static void __mace_set_address(struct net_device *dev, void *addr) 324 { 325 struct mace_data *mp = netdev_priv(dev); 326 volatile struct mace *mb = mp->mace; 327 unsigned char *p = addr; 328 int i; 329 330 /* load up the hardware address */ 331 if (mp->chipid == BROKEN_ADDRCHG_REV) 332 mb->iac = PHYADDR; 333 else { 334 mb->iac = ADDRCHG | PHYADDR; 335 while ((mb->iac & ADDRCHG) != 0) 336 ; 337 } 338 for (i = 0; i < 6; ++i) 339 mb->padr = dev->dev_addr[i] = p[i]; 340 if (mp->chipid != BROKEN_ADDRCHG_REV) 341 mb->iac = 0; 342 } 343 344 static int mace_set_address(struct net_device *dev, void *addr) 345 { 346 struct mace_data *mp = netdev_priv(dev); 347 volatile struct mace *mb = mp->mace; 348 unsigned long flags; 349 u8 maccc; 350 351 local_irq_save(flags); 352 353 maccc = mb->maccc; 354 355 __mace_set_address(dev, addr); 356 357 mb->maccc = maccc; 358 359 local_irq_restore(flags); 360 361 return 0; 362 } 363 364 /* 365 * Open the Macintosh MACE. Most of this is playing with the DMA 366 * engine. The ethernet chip is quite friendly. 367 */ 368 369 static int mace_open(struct net_device *dev) 370 { 371 struct mace_data *mp = netdev_priv(dev); 372 volatile struct mace *mb = mp->mace; 373 374 /* reset the chip */ 375 mace_reset(dev); 376 377 if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) { 378 printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq); 379 return -EAGAIN; 380 } 381 if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) { 382 printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr); 383 free_irq(dev->irq, dev); 384 return -EAGAIN; 385 } 386 387 /* Allocate the DMA ring buffers */ 388 389 mp->tx_ring = dma_alloc_coherent(mp->device, 390 N_TX_RING * MACE_BUFF_SIZE, 391 &mp->tx_ring_phys, GFP_KERNEL); 392 if (mp->tx_ring == NULL) 393 goto out1; 394 395 mp->rx_ring = dma_alloc_coherent(mp->device, 396 N_RX_RING * MACE_BUFF_SIZE, 397 &mp->rx_ring_phys, GFP_KERNEL); 398 if (mp->rx_ring == NULL) 399 goto out2; 400 401 mace_dma_off(dev); 402 403 /* Not sure what these do */ 404 405 psc_write_word(PSC_ENETWR_CTL, 0x9000); 406 psc_write_word(PSC_ENETRD_CTL, 0x9000); 407 psc_write_word(PSC_ENETWR_CTL, 0x0400); 408 psc_write_word(PSC_ENETRD_CTL, 0x0400); 409 410 mace_rxdma_reset(dev); 411 mace_txdma_reset(dev); 412 413 /* turn it on! */ 414 mb->maccc = ENXMT | ENRCV; 415 /* enable all interrupts except receive interrupts */ 416 mb->imr = RCVINT; 417 return 0; 418 419 out2: 420 dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE, 421 mp->tx_ring, mp->tx_ring_phys); 422 out1: 423 free_irq(dev->irq, dev); 424 free_irq(mp->dma_intr, dev); 425 return -ENOMEM; 426 } 427 428 /* 429 * Shut down the mace and its interrupt channel 430 */ 431 432 static int mace_close(struct net_device *dev) 433 { 434 struct mace_data *mp = netdev_priv(dev); 435 volatile struct mace *mb = mp->mace; 436 437 mb->maccc = 0; /* disable rx and tx */ 438 mb->imr = 0xFF; /* disable all irqs */ 439 mace_dma_off(dev); /* disable rx and tx dma */ 440 441 return 0; 442 } 443 444 /* 445 * Transmit a frame 446 */ 447 448 static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) 449 { 450 struct mace_data *mp = netdev_priv(dev); 451 unsigned long flags; 452 453 /* Stop the queue since there's only the one buffer */ 454 455 local_irq_save(flags); 456 netif_stop_queue(dev); 457 if (!mp->tx_count) { 458 printk(KERN_ERR "macmace: tx queue running but no free buffers.\n"); 459 local_irq_restore(flags); 460 return NETDEV_TX_BUSY; 461 } 462 mp->tx_count--; 463 local_irq_restore(flags); 464 465 dev->stats.tx_packets++; 466 dev->stats.tx_bytes += skb->len; 467 468 /* We need to copy into our xmit buffer to take care of alignment and caching issues */ 469 skb_copy_from_linear_data(skb, mp->tx_ring, skb->len); 470 471 /* load the Tx DMA and fire it off */ 472 473 psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys); 474 psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len); 475 psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800); 476 477 mp->tx_slot ^= 0x10; 478 479 dev_kfree_skb(skb); 480 481 return NETDEV_TX_OK; 482 } 483 484 static void mace_set_multicast(struct net_device *dev) 485 { 486 struct mace_data *mp = netdev_priv(dev); 487 volatile struct mace *mb = mp->mace; 488 int i; 489 u32 crc; 490 u8 maccc; 491 unsigned long flags; 492 493 local_irq_save(flags); 494 maccc = mb->maccc; 495 mb->maccc &= ~PROM; 496 497 if (dev->flags & IFF_PROMISC) { 498 mb->maccc |= PROM; 499 } else { 500 unsigned char multicast_filter[8]; 501 struct netdev_hw_addr *ha; 502 503 if (dev->flags & IFF_ALLMULTI) { 504 for (i = 0; i < 8; i++) { 505 multicast_filter[i] = 0xFF; 506 } 507 } else { 508 for (i = 0; i < 8; i++) 509 multicast_filter[i] = 0; 510 netdev_for_each_mc_addr(ha, dev) { 511 crc = ether_crc_le(6, ha->addr); 512 /* bit number in multicast_filter */ 513 i = crc >> 26; 514 multicast_filter[i >> 3] |= 1 << (i & 7); 515 } 516 } 517 518 if (mp->chipid == BROKEN_ADDRCHG_REV) 519 mb->iac = LOGADDR; 520 else { 521 mb->iac = ADDRCHG | LOGADDR; 522 while ((mb->iac & ADDRCHG) != 0) 523 ; 524 } 525 for (i = 0; i < 8; ++i) 526 mb->ladrf = multicast_filter[i]; 527 if (mp->chipid != BROKEN_ADDRCHG_REV) 528 mb->iac = 0; 529 } 530 531 mb->maccc = maccc; 532 local_irq_restore(flags); 533 } 534 535 static void mace_handle_misc_intrs(struct net_device *dev, int intr) 536 { 537 struct mace_data *mp = netdev_priv(dev); 538 volatile struct mace *mb = mp->mace; 539 static int mace_babbles, mace_jabbers; 540 541 if (intr & MPCO) 542 dev->stats.rx_missed_errors += 256; 543 dev->stats.rx_missed_errors += mb->mpc; /* reading clears it */ 544 if (intr & RNTPCO) 545 dev->stats.rx_length_errors += 256; 546 dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */ 547 if (intr & CERR) 548 ++dev->stats.tx_heartbeat_errors; 549 if (intr & BABBLE) 550 if (mace_babbles++ < 4) 551 printk(KERN_DEBUG "macmace: babbling transmitter\n"); 552 if (intr & JABBER) 553 if (mace_jabbers++ < 4) 554 printk(KERN_DEBUG "macmace: jabbering transceiver\n"); 555 } 556 557 static irqreturn_t mace_interrupt(int irq, void *dev_id) 558 { 559 struct net_device *dev = (struct net_device *) dev_id; 560 struct mace_data *mp = netdev_priv(dev); 561 volatile struct mace *mb = mp->mace; 562 int intr, fs; 563 unsigned long flags; 564 565 /* don't want the dma interrupt handler to fire */ 566 local_irq_save(flags); 567 568 intr = mb->ir; /* read interrupt register */ 569 mace_handle_misc_intrs(dev, intr); 570 571 if (intr & XMTINT) { 572 fs = mb->xmtfs; 573 if ((fs & XMTSV) == 0) { 574 printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs); 575 mace_reset(dev); 576 /* 577 * XXX mace likes to hang the machine after a xmtfs error. 578 * This is hard to reproduce, resetting *may* help 579 */ 580 } 581 /* dma should have finished */ 582 if (!mp->tx_count) { 583 printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs); 584 } 585 /* Update stats */ 586 if (fs & (UFLO|LCOL|LCAR|RTRY)) { 587 ++dev->stats.tx_errors; 588 if (fs & LCAR) 589 ++dev->stats.tx_carrier_errors; 590 else if (fs & (UFLO|LCOL|RTRY)) { 591 ++dev->stats.tx_aborted_errors; 592 if (mb->xmtfs & UFLO) { 593 printk(KERN_ERR "%s: DMA underrun.\n", dev->name); 594 dev->stats.tx_fifo_errors++; 595 mace_txdma_reset(dev); 596 } 597 } 598 } 599 } 600 601 if (mp->tx_count) 602 netif_wake_queue(dev); 603 604 local_irq_restore(flags); 605 606 return IRQ_HANDLED; 607 } 608 609 static void mace_tx_timeout(struct net_device *dev) 610 { 611 struct mace_data *mp = netdev_priv(dev); 612 volatile struct mace *mb = mp->mace; 613 unsigned long flags; 614 615 local_irq_save(flags); 616 617 /* turn off both tx and rx and reset the chip */ 618 mb->maccc = 0; 619 printk(KERN_ERR "macmace: transmit timeout - resetting\n"); 620 mace_txdma_reset(dev); 621 mace_reset(dev); 622 623 /* restart rx dma */ 624 mace_rxdma_reset(dev); 625 626 mp->tx_count = N_TX_RING; 627 netif_wake_queue(dev); 628 629 /* turn it on! */ 630 mb->maccc = ENXMT | ENRCV; 631 /* enable all interrupts except receive interrupts */ 632 mb->imr = RCVINT; 633 634 local_irq_restore(flags); 635 } 636 637 /* 638 * Handle a newly arrived frame 639 */ 640 641 static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf) 642 { 643 struct sk_buff *skb; 644 unsigned int frame_status = mf->rcvsts; 645 646 if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) { 647 dev->stats.rx_errors++; 648 if (frame_status & RS_OFLO) { 649 printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name); 650 dev->stats.rx_fifo_errors++; 651 } 652 if (frame_status & RS_CLSN) 653 dev->stats.collisions++; 654 if (frame_status & RS_FRAMERR) 655 dev->stats.rx_frame_errors++; 656 if (frame_status & RS_FCSERR) 657 dev->stats.rx_crc_errors++; 658 } else { 659 unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 ); 660 661 skb = netdev_alloc_skb(dev, frame_length + 2); 662 if (!skb) { 663 dev->stats.rx_dropped++; 664 return; 665 } 666 skb_reserve(skb, 2); 667 memcpy(skb_put(skb, frame_length), mf->data, frame_length); 668 669 skb->protocol = eth_type_trans(skb, dev); 670 netif_rx(skb); 671 dev->stats.rx_packets++; 672 dev->stats.rx_bytes += frame_length; 673 } 674 } 675 676 /* 677 * The PSC has passed us a DMA interrupt event. 678 */ 679 680 static irqreturn_t mace_dma_intr(int irq, void *dev_id) 681 { 682 struct net_device *dev = (struct net_device *) dev_id; 683 struct mace_data *mp = netdev_priv(dev); 684 int left, head; 685 u16 status; 686 u32 baka; 687 688 /* Not sure what this does */ 689 690 while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY)); 691 if (!(baka & 0x60000000)) return IRQ_NONE; 692 693 /* 694 * Process the read queue 695 */ 696 697 status = psc_read_word(PSC_ENETRD_CTL); 698 699 if (status & 0x2000) { 700 mace_rxdma_reset(dev); 701 } else if (status & 0x0100) { 702 psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100); 703 704 left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot); 705 head = N_RX_RING - left; 706 707 /* Loop through the ring buffer and process new packages */ 708 709 while (mp->rx_tail < head) { 710 mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring 711 + (mp->rx_tail * MACE_BUFF_SIZE))); 712 mp->rx_tail++; 713 } 714 715 /* If we're out of buffers in this ring then switch to */ 716 /* the other set, otherwise just reactivate this one. */ 717 718 if (!left) { 719 mace_load_rxdma_base(dev, mp->rx_slot); 720 mp->rx_slot ^= 0x10; 721 } else { 722 psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800); 723 } 724 } 725 726 /* 727 * Process the write queue 728 */ 729 730 status = psc_read_word(PSC_ENETWR_CTL); 731 732 if (status & 0x2000) { 733 mace_txdma_reset(dev); 734 } else if (status & 0x0100) { 735 psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100); 736 mp->tx_sloti ^= 0x10; 737 mp->tx_count++; 738 } 739 return IRQ_HANDLED; 740 } 741 742 MODULE_LICENSE("GPL"); 743 MODULE_DESCRIPTION("Macintosh MACE ethernet driver"); 744 MODULE_ALIAS("platform:macmace"); 745 746 static int mac_mace_device_remove(struct platform_device *pdev) 747 { 748 struct net_device *dev = platform_get_drvdata(pdev); 749 struct mace_data *mp = netdev_priv(dev); 750 751 unregister_netdev(dev); 752 753 free_irq(dev->irq, dev); 754 free_irq(IRQ_MAC_MACE_DMA, dev); 755 756 dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE, 757 mp->rx_ring, mp->rx_ring_phys); 758 dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE, 759 mp->tx_ring, mp->tx_ring_phys); 760 761 free_netdev(dev); 762 763 return 0; 764 } 765 766 static struct platform_driver mac_mace_driver = { 767 .probe = mace_probe, 768 .remove = mac_mace_device_remove, 769 .driver = { 770 .name = mac_mace_string, 771 }, 772 }; 773 774 static int __init mac_mace_init_module(void) 775 { 776 if (!MACH_IS_MAC) 777 return -ENODEV; 778 779 return platform_driver_register(&mac_mace_driver); 780 } 781 782 static void __exit mac_mace_cleanup_module(void) 783 { 784 platform_driver_unregister(&mac_mace_driver); 785 } 786 787 module_init(mac_mace_init_module); 788 module_exit(mac_mace_cleanup_module); 789