1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Network device driver for the MACE ethernet controller on 4 * Apple Powermacs. Assumes it's under a DBDMA controller. 5 * 6 * Copyright (C) 1996 Paul Mackerras. 7 */ 8 9 #include <linux/module.h> 10 #include <linux/kernel.h> 11 #include <linux/netdevice.h> 12 #include <linux/etherdevice.h> 13 #include <linux/delay.h> 14 #include <linux/string.h> 15 #include <linux/timer.h> 16 #include <linux/init.h> 17 #include <linux/interrupt.h> 18 #include <linux/crc32.h> 19 #include <linux/spinlock.h> 20 #include <linux/bitrev.h> 21 #include <linux/slab.h> 22 #include <linux/pgtable.h> 23 #include <asm/prom.h> 24 #include <asm/dbdma.h> 25 #include <asm/io.h> 26 #include <asm/macio.h> 27 28 #include "mace.h" 29 30 static int port_aaui = -1; 31 32 #define N_RX_RING 8 33 #define N_TX_RING 6 34 #define MAX_TX_ACTIVE 1 35 #define NCMDS_TX 1 /* dma commands per element in tx ring */ 36 #define RX_BUFLEN (ETH_FRAME_LEN + 8) 37 #define TX_TIMEOUT HZ /* 1 second */ 38 39 /* Chip rev needs workaround on HW & multicast addr change */ 40 #define BROKEN_ADDRCHG_REV 0x0941 41 42 /* Bits in transmit DMA status */ 43 #define TX_DMA_ERR 0x80 44 45 struct mace_data { 46 volatile struct mace __iomem *mace; 47 volatile struct dbdma_regs __iomem *tx_dma; 48 int tx_dma_intr; 49 volatile struct dbdma_regs __iomem *rx_dma; 50 int rx_dma_intr; 51 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */ 52 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */ 53 struct sk_buff *rx_bufs[N_RX_RING]; 54 int rx_fill; 55 int rx_empty; 56 struct sk_buff *tx_bufs[N_TX_RING]; 57 int tx_fill; 58 int tx_empty; 59 unsigned char maccc; 60 unsigned char tx_fullup; 61 unsigned char tx_active; 62 unsigned char tx_bad_runt; 63 struct timer_list tx_timeout; 64 int timeout_active; 65 int port_aaui; 66 int chipid; 67 struct macio_dev *mdev; 68 spinlock_t lock; 69 }; 70 71 /* 72 * Number of bytes of private data per MACE: allow enough for 73 * the rx and tx dma commands plus a branch dma command each, 74 * and another 16 bytes to allow us to align the dma command 75 * buffers on a 16 byte boundary. 76 */ 77 #define PRIV_BYTES (sizeof(struct mace_data) \ 78 + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd)) 79 80 static int mace_open(struct net_device *dev); 81 static int mace_close(struct net_device *dev); 82 static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev); 83 static void mace_set_multicast(struct net_device *dev); 84 static void mace_reset(struct net_device *dev); 85 static int mace_set_address(struct net_device *dev, void *addr); 86 static irqreturn_t mace_interrupt(int irq, void *dev_id); 87 static irqreturn_t mace_txdma_intr(int irq, void *dev_id); 88 static irqreturn_t mace_rxdma_intr(int irq, void *dev_id); 89 static void mace_set_timeout(struct net_device *dev); 90 static void mace_tx_timeout(struct timer_list *t); 91 static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma); 92 static inline void mace_clean_rings(struct mace_data *mp); 93 static void __mace_set_address(struct net_device *dev, const void *addr); 94 95 /* 96 * If we can't get a skbuff when we need it, we use this area for DMA. 97 */ 98 static unsigned char *dummy_buf; 99 100 static const struct net_device_ops mace_netdev_ops = { 101 .ndo_open = mace_open, 102 .ndo_stop = mace_close, 103 .ndo_start_xmit = mace_xmit_start, 104 .ndo_set_rx_mode = mace_set_multicast, 105 .ndo_set_mac_address = mace_set_address, 106 .ndo_validate_addr = eth_validate_addr, 107 }; 108 109 static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match) 110 { 111 struct device_node *mace = macio_get_of_node(mdev); 112 struct net_device *dev; 113 struct mace_data *mp; 114 const unsigned char *addr; 115 u8 macaddr[ETH_ALEN]; 116 int j, rev, rc = -EBUSY; 117 118 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { 119 printk(KERN_ERR "can't use MACE %pOF: need 3 addrs and 3 irqs\n", 120 mace); 121 return -ENODEV; 122 } 123 124 addr = of_get_property(mace, "mac-address", NULL); 125 if (addr == NULL) { 126 addr = of_get_property(mace, "local-mac-address", NULL); 127 if (addr == NULL) { 128 printk(KERN_ERR "Can't get mac-address for MACE %pOF\n", 129 mace); 130 return -ENODEV; 131 } 132 } 133 134 /* 135 * lazy allocate the driver-wide dummy buffer. (Note that we 136 * never have more than one MACE in the system anyway) 137 */ 138 if (dummy_buf == NULL) { 139 dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL); 140 if (dummy_buf == NULL) 141 return -ENOMEM; 142 } 143 144 if (macio_request_resources(mdev, "mace")) { 145 printk(KERN_ERR "MACE: can't request IO resources !\n"); 146 return -EBUSY; 147 } 148 149 dev = alloc_etherdev(PRIV_BYTES); 150 if (!dev) { 151 rc = -ENOMEM; 152 goto err_release; 153 } 154 SET_NETDEV_DEV(dev, &mdev->ofdev.dev); 155 156 mp = netdev_priv(dev); 157 mp->mdev = mdev; 158 macio_set_drvdata(mdev, dev); 159 160 dev->base_addr = macio_resource_start(mdev, 0); 161 mp->mace = ioremap(dev->base_addr, 0x1000); 162 if (mp->mace == NULL) { 163 printk(KERN_ERR "MACE: can't map IO resources !\n"); 164 rc = -ENOMEM; 165 goto err_free; 166 } 167 dev->irq = macio_irq(mdev, 0); 168 169 rev = addr[0] == 0 && addr[1] == 0xA0; 170 for (j = 0; j < 6; ++j) { 171 macaddr[j] = rev ? bitrev8(addr[j]): addr[j]; 172 } 173 eth_hw_addr_set(dev, macaddr); 174 mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) | 175 in_8(&mp->mace->chipid_lo); 176 177 178 mp = netdev_priv(dev); 179 mp->maccc = ENXMT | ENRCV; 180 181 mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000); 182 if (mp->tx_dma == NULL) { 183 printk(KERN_ERR "MACE: can't map TX DMA resources !\n"); 184 rc = -ENOMEM; 185 goto err_unmap_io; 186 } 187 mp->tx_dma_intr = macio_irq(mdev, 1); 188 189 mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000); 190 if (mp->rx_dma == NULL) { 191 printk(KERN_ERR "MACE: can't map RX DMA resources !\n"); 192 rc = -ENOMEM; 193 goto err_unmap_tx_dma; 194 } 195 mp->rx_dma_intr = macio_irq(mdev, 2); 196 197 mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1); 198 mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1; 199 200 memset((char *) mp->tx_cmds, 0, 201 (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd)); 202 timer_setup(&mp->tx_timeout, mace_tx_timeout, 0); 203 spin_lock_init(&mp->lock); 204 mp->timeout_active = 0; 205 206 if (port_aaui >= 0) 207 mp->port_aaui = port_aaui; 208 else { 209 /* Apple Network Server uses the AAUI port */ 210 if (of_machine_is_compatible("AAPL,ShinerESB")) 211 mp->port_aaui = 1; 212 else { 213 #ifdef CONFIG_MACE_AAUI_PORT 214 mp->port_aaui = 1; 215 #else 216 mp->port_aaui = 0; 217 #endif 218 } 219 } 220 221 dev->netdev_ops = &mace_netdev_ops; 222 223 /* 224 * Most of what is below could be moved to mace_open() 225 */ 226 mace_reset(dev); 227 228 rc = request_irq(dev->irq, mace_interrupt, 0, "MACE", dev); 229 if (rc) { 230 printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq); 231 goto err_unmap_rx_dma; 232 } 233 rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev); 234 if (rc) { 235 printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr); 236 goto err_free_irq; 237 } 238 rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev); 239 if (rc) { 240 printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr); 241 goto err_free_tx_irq; 242 } 243 244 rc = register_netdev(dev); 245 if (rc) { 246 printk(KERN_ERR "MACE: Cannot register net device, aborting.\n"); 247 goto err_free_rx_irq; 248 } 249 250 printk(KERN_INFO "%s: MACE at %pM, chip revision %d.%d\n", 251 dev->name, dev->dev_addr, 252 mp->chipid >> 8, mp->chipid & 0xff); 253 254 return 0; 255 256 err_free_rx_irq: 257 free_irq(macio_irq(mdev, 2), dev); 258 err_free_tx_irq: 259 free_irq(macio_irq(mdev, 1), dev); 260 err_free_irq: 261 free_irq(macio_irq(mdev, 0), dev); 262 err_unmap_rx_dma: 263 iounmap(mp->rx_dma); 264 err_unmap_tx_dma: 265 iounmap(mp->tx_dma); 266 err_unmap_io: 267 iounmap(mp->mace); 268 err_free: 269 free_netdev(dev); 270 err_release: 271 macio_release_resources(mdev); 272 273 return rc; 274 } 275 276 static int mace_remove(struct macio_dev *mdev) 277 { 278 struct net_device *dev = macio_get_drvdata(mdev); 279 struct mace_data *mp; 280 281 BUG_ON(dev == NULL); 282 283 macio_set_drvdata(mdev, NULL); 284 285 mp = netdev_priv(dev); 286 287 unregister_netdev(dev); 288 289 free_irq(dev->irq, dev); 290 free_irq(mp->tx_dma_intr, dev); 291 free_irq(mp->rx_dma_intr, dev); 292 293 iounmap(mp->rx_dma); 294 iounmap(mp->tx_dma); 295 iounmap(mp->mace); 296 297 free_netdev(dev); 298 299 macio_release_resources(mdev); 300 301 return 0; 302 } 303 304 static void dbdma_reset(volatile struct dbdma_regs __iomem *dma) 305 { 306 int i; 307 308 out_le32(&dma->control, (WAKE|FLUSH|PAUSE|RUN) << 16); 309 310 /* 311 * Yes this looks peculiar, but apparently it needs to be this 312 * way on some machines. 313 */ 314 for (i = 200; i > 0; --i) 315 if (le32_to_cpu(dma->control) & RUN) 316 udelay(1); 317 } 318 319 static void mace_reset(struct net_device *dev) 320 { 321 struct mace_data *mp = netdev_priv(dev); 322 volatile struct mace __iomem *mb = mp->mace; 323 int i; 324 325 /* soft-reset the chip */ 326 i = 200; 327 while (--i) { 328 out_8(&mb->biucc, SWRST); 329 if (in_8(&mb->biucc) & SWRST) { 330 udelay(10); 331 continue; 332 } 333 break; 334 } 335 if (!i) { 336 printk(KERN_ERR "mace: cannot reset chip!\n"); 337 return; 338 } 339 340 out_8(&mb->imr, 0xff); /* disable all intrs for now */ 341 i = in_8(&mb->ir); 342 out_8(&mb->maccc, 0); /* turn off tx, rx */ 343 344 out_8(&mb->biucc, XMTSP_64); 345 out_8(&mb->utr, RTRD); 346 out_8(&mb->fifocc, RCVFW_32 | XMTFW_16 | XMTFWU | RCVFWU | XMTBRST); 347 out_8(&mb->xmtfc, AUTO_PAD_XMIT); /* auto-pad short frames */ 348 out_8(&mb->rcvfc, 0); 349 350 /* load up the hardware address */ 351 __mace_set_address(dev, dev->dev_addr); 352 353 /* clear the multicast filter */ 354 if (mp->chipid == BROKEN_ADDRCHG_REV) 355 out_8(&mb->iac, LOGADDR); 356 else { 357 out_8(&mb->iac, ADDRCHG | LOGADDR); 358 while ((in_8(&mb->iac) & ADDRCHG) != 0) 359 ; 360 } 361 for (i = 0; i < 8; ++i) 362 out_8(&mb->ladrf, 0); 363 364 /* done changing address */ 365 if (mp->chipid != BROKEN_ADDRCHG_REV) 366 out_8(&mb->iac, 0); 367 368 if (mp->port_aaui) 369 out_8(&mb->plscc, PORTSEL_AUI + ENPLSIO); 370 else 371 out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO); 372 } 373 374 static void __mace_set_address(struct net_device *dev, const void *addr) 375 { 376 struct mace_data *mp = netdev_priv(dev); 377 volatile struct mace __iomem *mb = mp->mace; 378 const unsigned char *p = addr; 379 u8 macaddr[ETH_ALEN]; 380 int i; 381 382 /* load up the hardware address */ 383 if (mp->chipid == BROKEN_ADDRCHG_REV) 384 out_8(&mb->iac, PHYADDR); 385 else { 386 out_8(&mb->iac, ADDRCHG | PHYADDR); 387 while ((in_8(&mb->iac) & ADDRCHG) != 0) 388 ; 389 } 390 for (i = 0; i < 6; ++i) 391 out_8(&mb->padr, macaddr[i] = p[i]); 392 393 eth_hw_addr_set(dev, macaddr); 394 395 if (mp->chipid != BROKEN_ADDRCHG_REV) 396 out_8(&mb->iac, 0); 397 } 398 399 static int mace_set_address(struct net_device *dev, void *addr) 400 { 401 struct mace_data *mp = netdev_priv(dev); 402 volatile struct mace __iomem *mb = mp->mace; 403 unsigned long flags; 404 405 spin_lock_irqsave(&mp->lock, flags); 406 407 __mace_set_address(dev, addr); 408 409 /* note: setting ADDRCHG clears ENRCV */ 410 out_8(&mb->maccc, mp->maccc); 411 412 spin_unlock_irqrestore(&mp->lock, flags); 413 return 0; 414 } 415 416 static inline void mace_clean_rings(struct mace_data *mp) 417 { 418 int i; 419 420 /* free some skb's */ 421 for (i = 0; i < N_RX_RING; ++i) { 422 if (mp->rx_bufs[i] != NULL) { 423 dev_kfree_skb(mp->rx_bufs[i]); 424 mp->rx_bufs[i] = NULL; 425 } 426 } 427 for (i = mp->tx_empty; i != mp->tx_fill; ) { 428 dev_kfree_skb(mp->tx_bufs[i]); 429 if (++i >= N_TX_RING) 430 i = 0; 431 } 432 } 433 434 static int mace_open(struct net_device *dev) 435 { 436 struct mace_data *mp = netdev_priv(dev); 437 volatile struct mace __iomem *mb = mp->mace; 438 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; 439 volatile struct dbdma_regs __iomem *td = mp->tx_dma; 440 volatile struct dbdma_cmd *cp; 441 int i; 442 struct sk_buff *skb; 443 unsigned char *data; 444 445 /* reset the chip */ 446 mace_reset(dev); 447 448 /* initialize list of sk_buffs for receiving and set up recv dma */ 449 mace_clean_rings(mp); 450 memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd)); 451 cp = mp->rx_cmds; 452 for (i = 0; i < N_RX_RING - 1; ++i) { 453 skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); 454 if (!skb) { 455 data = dummy_buf; 456 } else { 457 skb_reserve(skb, 2); /* so IP header lands on 4-byte bdry */ 458 data = skb->data; 459 } 460 mp->rx_bufs[i] = skb; 461 cp->req_count = cpu_to_le16(RX_BUFLEN); 462 cp->command = cpu_to_le16(INPUT_LAST + INTR_ALWAYS); 463 cp->phy_addr = cpu_to_le32(virt_to_bus(data)); 464 cp->xfer_status = 0; 465 ++cp; 466 } 467 mp->rx_bufs[i] = NULL; 468 cp->command = cpu_to_le16(DBDMA_STOP); 469 mp->rx_fill = i; 470 mp->rx_empty = 0; 471 472 /* Put a branch back to the beginning of the receive command list */ 473 ++cp; 474 cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS); 475 cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->rx_cmds)); 476 477 /* start rx dma */ 478 out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ 479 out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds)); 480 out_le32(&rd->control, (RUN << 16) | RUN); 481 482 /* put a branch at the end of the tx command list */ 483 cp = mp->tx_cmds + NCMDS_TX * N_TX_RING; 484 cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS); 485 cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->tx_cmds)); 486 487 /* reset tx dma */ 488 out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); 489 out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds)); 490 mp->tx_fill = 0; 491 mp->tx_empty = 0; 492 mp->tx_fullup = 0; 493 mp->tx_active = 0; 494 mp->tx_bad_runt = 0; 495 496 /* turn it on! */ 497 out_8(&mb->maccc, mp->maccc); 498 /* enable all interrupts except receive interrupts */ 499 out_8(&mb->imr, RCVINT); 500 501 return 0; 502 } 503 504 static int mace_close(struct net_device *dev) 505 { 506 struct mace_data *mp = netdev_priv(dev); 507 volatile struct mace __iomem *mb = mp->mace; 508 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; 509 volatile struct dbdma_regs __iomem *td = mp->tx_dma; 510 511 /* disable rx and tx */ 512 out_8(&mb->maccc, 0); 513 out_8(&mb->imr, 0xff); /* disable all intrs */ 514 515 /* disable rx and tx dma */ 516 rd->control = cpu_to_le32((RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ 517 td->control = cpu_to_le32((RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ 518 519 mace_clean_rings(mp); 520 521 return 0; 522 } 523 524 static inline void mace_set_timeout(struct net_device *dev) 525 { 526 struct mace_data *mp = netdev_priv(dev); 527 528 if (mp->timeout_active) 529 del_timer(&mp->tx_timeout); 530 mp->tx_timeout.expires = jiffies + TX_TIMEOUT; 531 add_timer(&mp->tx_timeout); 532 mp->timeout_active = 1; 533 } 534 535 static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev) 536 { 537 struct mace_data *mp = netdev_priv(dev); 538 volatile struct dbdma_regs __iomem *td = mp->tx_dma; 539 volatile struct dbdma_cmd *cp, *np; 540 unsigned long flags; 541 int fill, next, len; 542 543 /* see if there's a free slot in the tx ring */ 544 spin_lock_irqsave(&mp->lock, flags); 545 fill = mp->tx_fill; 546 next = fill + 1; 547 if (next >= N_TX_RING) 548 next = 0; 549 if (next == mp->tx_empty) { 550 netif_stop_queue(dev); 551 mp->tx_fullup = 1; 552 spin_unlock_irqrestore(&mp->lock, flags); 553 return NETDEV_TX_BUSY; /* can't take it at the moment */ 554 } 555 spin_unlock_irqrestore(&mp->lock, flags); 556 557 /* partially fill in the dma command block */ 558 len = skb->len; 559 if (len > ETH_FRAME_LEN) { 560 printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len); 561 len = ETH_FRAME_LEN; 562 } 563 mp->tx_bufs[fill] = skb; 564 cp = mp->tx_cmds + NCMDS_TX * fill; 565 cp->req_count = cpu_to_le16(len); 566 cp->phy_addr = cpu_to_le32(virt_to_bus(skb->data)); 567 568 np = mp->tx_cmds + NCMDS_TX * next; 569 out_le16(&np->command, DBDMA_STOP); 570 571 /* poke the tx dma channel */ 572 spin_lock_irqsave(&mp->lock, flags); 573 mp->tx_fill = next; 574 if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) { 575 out_le16(&cp->xfer_status, 0); 576 out_le16(&cp->command, OUTPUT_LAST); 577 out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); 578 ++mp->tx_active; 579 mace_set_timeout(dev); 580 } 581 if (++next >= N_TX_RING) 582 next = 0; 583 if (next == mp->tx_empty) 584 netif_stop_queue(dev); 585 spin_unlock_irqrestore(&mp->lock, flags); 586 587 return NETDEV_TX_OK; 588 } 589 590 static void mace_set_multicast(struct net_device *dev) 591 { 592 struct mace_data *mp = netdev_priv(dev); 593 volatile struct mace __iomem *mb = mp->mace; 594 int i; 595 u32 crc; 596 unsigned long flags; 597 598 spin_lock_irqsave(&mp->lock, flags); 599 mp->maccc &= ~PROM; 600 if (dev->flags & IFF_PROMISC) { 601 mp->maccc |= PROM; 602 } else { 603 unsigned char multicast_filter[8]; 604 struct netdev_hw_addr *ha; 605 606 if (dev->flags & IFF_ALLMULTI) { 607 for (i = 0; i < 8; i++) 608 multicast_filter[i] = 0xff; 609 } else { 610 for (i = 0; i < 8; i++) 611 multicast_filter[i] = 0; 612 netdev_for_each_mc_addr(ha, dev) { 613 crc = ether_crc_le(6, ha->addr); 614 i = crc >> 26; /* bit number in multicast_filter */ 615 multicast_filter[i >> 3] |= 1 << (i & 7); 616 } 617 } 618 #if 0 619 printk("Multicast filter :"); 620 for (i = 0; i < 8; i++) 621 printk("%02x ", multicast_filter[i]); 622 printk("\n"); 623 #endif 624 625 if (mp->chipid == BROKEN_ADDRCHG_REV) 626 out_8(&mb->iac, LOGADDR); 627 else { 628 out_8(&mb->iac, ADDRCHG | LOGADDR); 629 while ((in_8(&mb->iac) & ADDRCHG) != 0) 630 ; 631 } 632 for (i = 0; i < 8; ++i) 633 out_8(&mb->ladrf, multicast_filter[i]); 634 if (mp->chipid != BROKEN_ADDRCHG_REV) 635 out_8(&mb->iac, 0); 636 } 637 /* reset maccc */ 638 out_8(&mb->maccc, mp->maccc); 639 spin_unlock_irqrestore(&mp->lock, flags); 640 } 641 642 static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev) 643 { 644 volatile struct mace __iomem *mb = mp->mace; 645 static int mace_babbles, mace_jabbers; 646 647 if (intr & MPCO) 648 dev->stats.rx_missed_errors += 256; 649 dev->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */ 650 if (intr & RNTPCO) 651 dev->stats.rx_length_errors += 256; 652 dev->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */ 653 if (intr & CERR) 654 ++dev->stats.tx_heartbeat_errors; 655 if (intr & BABBLE) 656 if (mace_babbles++ < 4) 657 printk(KERN_DEBUG "mace: babbling transmitter\n"); 658 if (intr & JABBER) 659 if (mace_jabbers++ < 4) 660 printk(KERN_DEBUG "mace: jabbering transceiver\n"); 661 } 662 663 static irqreturn_t mace_interrupt(int irq, void *dev_id) 664 { 665 struct net_device *dev = (struct net_device *) dev_id; 666 struct mace_data *mp = netdev_priv(dev); 667 volatile struct mace __iomem *mb = mp->mace; 668 volatile struct dbdma_regs __iomem *td = mp->tx_dma; 669 volatile struct dbdma_cmd *cp; 670 int intr, fs, i, stat, x; 671 int xcount, dstat; 672 unsigned long flags; 673 /* static int mace_last_fs, mace_last_xcount; */ 674 675 spin_lock_irqsave(&mp->lock, flags); 676 intr = in_8(&mb->ir); /* read interrupt register */ 677 in_8(&mb->xmtrc); /* get retries */ 678 mace_handle_misc_intrs(mp, intr, dev); 679 680 i = mp->tx_empty; 681 while (in_8(&mb->pr) & XMTSV) { 682 del_timer(&mp->tx_timeout); 683 mp->timeout_active = 0; 684 /* 685 * Clear any interrupt indication associated with this status 686 * word. This appears to unlatch any error indication from 687 * the DMA controller. 688 */ 689 intr = in_8(&mb->ir); 690 if (intr != 0) 691 mace_handle_misc_intrs(mp, intr, dev); 692 if (mp->tx_bad_runt) { 693 fs = in_8(&mb->xmtfs); 694 mp->tx_bad_runt = 0; 695 out_8(&mb->xmtfc, AUTO_PAD_XMIT); 696 continue; 697 } 698 dstat = le32_to_cpu(td->status); 699 /* stop DMA controller */ 700 out_le32(&td->control, RUN << 16); 701 /* 702 * xcount is the number of complete frames which have been 703 * written to the fifo but for which status has not been read. 704 */ 705 xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK; 706 if (xcount == 0 || (dstat & DEAD)) { 707 /* 708 * If a packet was aborted before the DMA controller has 709 * finished transferring it, it seems that there are 2 bytes 710 * which are stuck in some buffer somewhere. These will get 711 * transmitted as soon as we read the frame status (which 712 * reenables the transmit data transfer request). Turning 713 * off the DMA controller and/or resetting the MACE doesn't 714 * help. So we disable auto-padding and FCS transmission 715 * so the two bytes will only be a runt packet which should 716 * be ignored by other stations. 717 */ 718 out_8(&mb->xmtfc, DXMTFCS); 719 } 720 fs = in_8(&mb->xmtfs); 721 if ((fs & XMTSV) == 0) { 722 printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n", 723 fs, xcount, dstat); 724 mace_reset(dev); 725 /* 726 * XXX mace likes to hang the machine after a xmtfs error. 727 * This is hard to reproduce, resetting *may* help 728 */ 729 } 730 cp = mp->tx_cmds + NCMDS_TX * i; 731 stat = le16_to_cpu(cp->xfer_status); 732 if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) { 733 /* 734 * Check whether there were in fact 2 bytes written to 735 * the transmit FIFO. 736 */ 737 udelay(1); 738 x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK; 739 if (x != 0) { 740 /* there were two bytes with an end-of-packet indication */ 741 mp->tx_bad_runt = 1; 742 mace_set_timeout(dev); 743 } else { 744 /* 745 * Either there weren't the two bytes buffered up, or they 746 * didn't have an end-of-packet indication. 747 * We flush the transmit FIFO just in case (by setting the 748 * XMTFWU bit with the transmitter disabled). 749 */ 750 out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT); 751 out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU); 752 udelay(1); 753 out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT); 754 out_8(&mb->xmtfc, AUTO_PAD_XMIT); 755 } 756 } 757 /* dma should have finished */ 758 if (i == mp->tx_fill) { 759 printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n", 760 fs, xcount, dstat); 761 continue; 762 } 763 /* Update stats */ 764 if (fs & (UFLO|LCOL|LCAR|RTRY)) { 765 ++dev->stats.tx_errors; 766 if (fs & LCAR) 767 ++dev->stats.tx_carrier_errors; 768 if (fs & (UFLO|LCOL|RTRY)) 769 ++dev->stats.tx_aborted_errors; 770 } else { 771 dev->stats.tx_bytes += mp->tx_bufs[i]->len; 772 ++dev->stats.tx_packets; 773 } 774 dev_consume_skb_irq(mp->tx_bufs[i]); 775 --mp->tx_active; 776 if (++i >= N_TX_RING) 777 i = 0; 778 #if 0 779 mace_last_fs = fs; 780 mace_last_xcount = xcount; 781 #endif 782 } 783 784 if (i != mp->tx_empty) { 785 mp->tx_fullup = 0; 786 netif_wake_queue(dev); 787 } 788 mp->tx_empty = i; 789 i += mp->tx_active; 790 if (i >= N_TX_RING) 791 i -= N_TX_RING; 792 if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) { 793 do { 794 /* set up the next one */ 795 cp = mp->tx_cmds + NCMDS_TX * i; 796 out_le16(&cp->xfer_status, 0); 797 out_le16(&cp->command, OUTPUT_LAST); 798 ++mp->tx_active; 799 if (++i >= N_TX_RING) 800 i = 0; 801 } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE); 802 out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); 803 mace_set_timeout(dev); 804 } 805 spin_unlock_irqrestore(&mp->lock, flags); 806 return IRQ_HANDLED; 807 } 808 809 static void mace_tx_timeout(struct timer_list *t) 810 { 811 struct mace_data *mp = from_timer(mp, t, tx_timeout); 812 struct net_device *dev = macio_get_drvdata(mp->mdev); 813 volatile struct mace __iomem *mb = mp->mace; 814 volatile struct dbdma_regs __iomem *td = mp->tx_dma; 815 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; 816 volatile struct dbdma_cmd *cp; 817 unsigned long flags; 818 int i; 819 820 spin_lock_irqsave(&mp->lock, flags); 821 mp->timeout_active = 0; 822 if (mp->tx_active == 0 && !mp->tx_bad_runt) 823 goto out; 824 825 /* update various counters */ 826 mace_handle_misc_intrs(mp, in_8(&mb->ir), dev); 827 828 cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty; 829 830 /* turn off both tx and rx and reset the chip */ 831 out_8(&mb->maccc, 0); 832 printk(KERN_ERR "mace: transmit timeout - resetting\n"); 833 dbdma_reset(td); 834 mace_reset(dev); 835 836 /* restart rx dma */ 837 cp = bus_to_virt(le32_to_cpu(rd->cmdptr)); 838 dbdma_reset(rd); 839 out_le16(&cp->xfer_status, 0); 840 out_le32(&rd->cmdptr, virt_to_bus(cp)); 841 out_le32(&rd->control, (RUN << 16) | RUN); 842 843 /* fix up the transmit side */ 844 i = mp->tx_empty; 845 mp->tx_active = 0; 846 ++dev->stats.tx_errors; 847 if (mp->tx_bad_runt) { 848 mp->tx_bad_runt = 0; 849 } else if (i != mp->tx_fill) { 850 dev_kfree_skb(mp->tx_bufs[i]); 851 if (++i >= N_TX_RING) 852 i = 0; 853 mp->tx_empty = i; 854 } 855 mp->tx_fullup = 0; 856 netif_wake_queue(dev); 857 if (i != mp->tx_fill) { 858 cp = mp->tx_cmds + NCMDS_TX * i; 859 out_le16(&cp->xfer_status, 0); 860 out_le16(&cp->command, OUTPUT_LAST); 861 out_le32(&td->cmdptr, virt_to_bus(cp)); 862 out_le32(&td->control, (RUN << 16) | RUN); 863 ++mp->tx_active; 864 mace_set_timeout(dev); 865 } 866 867 /* turn it back on */ 868 out_8(&mb->imr, RCVINT); 869 out_8(&mb->maccc, mp->maccc); 870 871 out: 872 spin_unlock_irqrestore(&mp->lock, flags); 873 } 874 875 static irqreturn_t mace_txdma_intr(int irq, void *dev_id) 876 { 877 return IRQ_HANDLED; 878 } 879 880 static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) 881 { 882 struct net_device *dev = (struct net_device *) dev_id; 883 struct mace_data *mp = netdev_priv(dev); 884 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; 885 volatile struct dbdma_cmd *cp, *np; 886 int i, nb, stat, next; 887 struct sk_buff *skb; 888 unsigned frame_status; 889 static int mace_lost_status; 890 unsigned char *data; 891 unsigned long flags; 892 893 spin_lock_irqsave(&mp->lock, flags); 894 for (i = mp->rx_empty; i != mp->rx_fill; ) { 895 cp = mp->rx_cmds + i; 896 stat = le16_to_cpu(cp->xfer_status); 897 if ((stat & ACTIVE) == 0) { 898 next = i + 1; 899 if (next >= N_RX_RING) 900 next = 0; 901 np = mp->rx_cmds + next; 902 if (next != mp->rx_fill && 903 (le16_to_cpu(np->xfer_status) & ACTIVE) != 0) { 904 printk(KERN_DEBUG "mace: lost a status word\n"); 905 ++mace_lost_status; 906 } else 907 break; 908 } 909 nb = le16_to_cpu(cp->req_count) - le16_to_cpu(cp->res_count); 910 out_le16(&cp->command, DBDMA_STOP); 911 /* got a packet, have a look at it */ 912 skb = mp->rx_bufs[i]; 913 if (!skb) { 914 ++dev->stats.rx_dropped; 915 } else if (nb > 8) { 916 data = skb->data; 917 frame_status = (data[nb-3] << 8) + data[nb-4]; 918 if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) { 919 ++dev->stats.rx_errors; 920 if (frame_status & RS_OFLO) 921 ++dev->stats.rx_over_errors; 922 if (frame_status & RS_FRAMERR) 923 ++dev->stats.rx_frame_errors; 924 if (frame_status & RS_FCSERR) 925 ++dev->stats.rx_crc_errors; 926 } else { 927 /* Mace feature AUTO_STRIP_RCV is on by default, dropping the 928 * FCS on frames with 802.3 headers. This means that Ethernet 929 * frames have 8 extra octets at the end, while 802.3 frames 930 * have only 4. We need to correctly account for this. */ 931 if (*(unsigned short *)(data+12) < 1536) /* 802.3 header */ 932 nb -= 4; 933 else /* Ethernet header; mace includes FCS */ 934 nb -= 8; 935 skb_put(skb, nb); 936 skb->protocol = eth_type_trans(skb, dev); 937 dev->stats.rx_bytes += skb->len; 938 netif_rx(skb); 939 mp->rx_bufs[i] = NULL; 940 ++dev->stats.rx_packets; 941 } 942 } else { 943 ++dev->stats.rx_errors; 944 ++dev->stats.rx_length_errors; 945 } 946 947 /* advance to next */ 948 if (++i >= N_RX_RING) 949 i = 0; 950 } 951 mp->rx_empty = i; 952 953 i = mp->rx_fill; 954 for (;;) { 955 next = i + 1; 956 if (next >= N_RX_RING) 957 next = 0; 958 if (next == mp->rx_empty) 959 break; 960 cp = mp->rx_cmds + i; 961 skb = mp->rx_bufs[i]; 962 if (!skb) { 963 skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); 964 if (skb) { 965 skb_reserve(skb, 2); 966 mp->rx_bufs[i] = skb; 967 } 968 } 969 cp->req_count = cpu_to_le16(RX_BUFLEN); 970 data = skb? skb->data: dummy_buf; 971 cp->phy_addr = cpu_to_le32(virt_to_bus(data)); 972 out_le16(&cp->xfer_status, 0); 973 out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS); 974 #if 0 975 if ((le32_to_cpu(rd->status) & ACTIVE) != 0) { 976 out_le32(&rd->control, (PAUSE << 16) | PAUSE); 977 while ((in_le32(&rd->status) & ACTIVE) != 0) 978 ; 979 } 980 #endif 981 i = next; 982 } 983 if (i != mp->rx_fill) { 984 out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE)); 985 mp->rx_fill = i; 986 } 987 spin_unlock_irqrestore(&mp->lock, flags); 988 return IRQ_HANDLED; 989 } 990 991 static const struct of_device_id mace_match[] = 992 { 993 { 994 .name = "mace", 995 }, 996 {}, 997 }; 998 MODULE_DEVICE_TABLE (of, mace_match); 999 1000 static struct macio_driver mace_driver = 1001 { 1002 .driver = { 1003 .name = "mace", 1004 .owner = THIS_MODULE, 1005 .of_match_table = mace_match, 1006 }, 1007 .probe = mace_probe, 1008 .remove = mace_remove, 1009 }; 1010 1011 1012 static int __init mace_init(void) 1013 { 1014 return macio_register_driver(&mace_driver); 1015 } 1016 1017 static void __exit mace_cleanup(void) 1018 { 1019 macio_unregister_driver(&mace_driver); 1020 1021 kfree(dummy_buf); 1022 dummy_buf = NULL; 1023 } 1024 1025 MODULE_AUTHOR("Paul Mackerras"); 1026 MODULE_DESCRIPTION("PowerMac MACE driver."); 1027 module_param(port_aaui, int, 0); 1028 MODULE_PARM_DESC(port_aaui, "MACE uses AAUI port (0-1)"); 1029 MODULE_LICENSE("GPL"); 1030 1031 module_init(mace_init); 1032 module_exit(mace_cleanup); 1033