1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Network device driver for the BMAC ethernet controller on 4 * Apple Powermacs. Assumes it's under a DBDMA controller. 5 * 6 * Copyright (C) 1998 Randy Gobbel. 7 * 8 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to 9 * dynamic procfs inode. 10 */ 11 #include <linux/interrupt.h> 12 #include <linux/module.h> 13 #include <linux/kernel.h> 14 #include <linux/netdevice.h> 15 #include <linux/etherdevice.h> 16 #include <linux/delay.h> 17 #include <linux/string.h> 18 #include <linux/timer.h> 19 #include <linux/proc_fs.h> 20 #include <linux/init.h> 21 #include <linux/spinlock.h> 22 #include <linux/crc32.h> 23 #include <linux/crc32poly.h> 24 #include <linux/bitrev.h> 25 #include <linux/ethtool.h> 26 #include <linux/slab.h> 27 #include <linux/pgtable.h> 28 #include <asm/prom.h> 29 #include <asm/dbdma.h> 30 #include <asm/io.h> 31 #include <asm/page.h> 32 #include <asm/machdep.h> 33 #include <asm/pmac_feature.h> 34 #include <asm/macio.h> 35 #include <asm/irq.h> 36 37 #include "bmac.h" 38 39 #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1)))) 40 #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1))) 41 42 /* switch to use multicast code lifted from sunhme driver */ 43 #define SUNHME_MULTICAST 44 45 #define N_RX_RING 64 46 #define N_TX_RING 32 47 #define MAX_TX_ACTIVE 1 48 #define ETHERCRC 4 49 #define ETHERMINPACKET 64 50 #define ETHERMTU 1500 51 #define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2) 52 #define TX_TIMEOUT HZ /* 1 second */ 53 54 /* Bits in transmit DMA status */ 55 #define TX_DMA_ERR 0x80 56 57 #define XXDEBUG(args) 58 59 struct bmac_data { 60 /* volatile struct bmac *bmac; */ 61 struct sk_buff_head *queue; 62 volatile struct dbdma_regs __iomem *tx_dma; 63 int tx_dma_intr; 64 volatile struct dbdma_regs __iomem *rx_dma; 65 int rx_dma_intr; 66 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */ 67 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */ 68 struct macio_dev *mdev; 69 int is_bmac_plus; 70 struct sk_buff *rx_bufs[N_RX_RING]; 71 int rx_fill; 72 int rx_empty; 73 struct sk_buff *tx_bufs[N_TX_RING]; 74 int tx_fill; 75 int tx_empty; 76 unsigned char tx_fullup; 77 struct timer_list tx_timeout; 78 int timeout_active; 79 int sleeping; 80 int opened; 81 unsigned short hash_use_count[64]; 82 unsigned short hash_table_mask[4]; 83 spinlock_t lock; 84 }; 85 86 #if 0 /* Move that to ethtool */ 87 88 typedef struct bmac_reg_entry { 89 char *name; 90 unsigned short reg_offset; 91 } bmac_reg_entry_t; 92 93 #define N_REG_ENTRIES 31 94 95 static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = { 96 {"MEMADD", MEMADD}, 97 {"MEMDATAHI", MEMDATAHI}, 98 {"MEMDATALO", MEMDATALO}, 99 {"TXPNTR", TXPNTR}, 100 {"RXPNTR", RXPNTR}, 101 {"IPG1", IPG1}, 102 {"IPG2", IPG2}, 103 {"ALIMIT", ALIMIT}, 104 {"SLOT", SLOT}, 105 {"PALEN", PALEN}, 106 {"PAPAT", PAPAT}, 107 {"TXSFD", TXSFD}, 108 {"JAM", JAM}, 109 {"TXCFG", TXCFG}, 110 {"TXMAX", TXMAX}, 111 {"TXMIN", TXMIN}, 112 {"PAREG", PAREG}, 113 {"DCNT", DCNT}, 114 {"NCCNT", NCCNT}, 115 {"NTCNT", NTCNT}, 116 {"EXCNT", EXCNT}, 117 {"LTCNT", LTCNT}, 118 {"TXSM", TXSM}, 119 {"RXCFG", RXCFG}, 120 {"RXMAX", RXMAX}, 121 {"RXMIN", RXMIN}, 122 {"FRCNT", FRCNT}, 123 {"AECNT", AECNT}, 124 {"FECNT", FECNT}, 125 {"RXSM", RXSM}, 126 {"RXCV", RXCV} 127 }; 128 129 #endif 130 131 static unsigned char *bmac_emergency_rxbuf; 132 133 /* 134 * Number of bytes of private data per BMAC: allow enough for 135 * the rx and tx dma commands plus a branch dma command each, 136 * and another 16 bytes to allow us to align the dma command 137 * buffers on a 16 byte boundary. 138 */ 139 #define PRIV_BYTES (sizeof(struct bmac_data) \ 140 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \ 141 + sizeof(struct sk_buff_head)) 142 143 static int bmac_open(struct net_device *dev); 144 static int bmac_close(struct net_device *dev); 145 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev); 146 static void bmac_set_multicast(struct net_device *dev); 147 static void bmac_reset_and_enable(struct net_device *dev); 148 static void bmac_start_chip(struct net_device *dev); 149 static void bmac_init_chip(struct net_device *dev); 150 static void bmac_init_registers(struct net_device *dev); 151 static void bmac_enable_and_reset_chip(struct net_device *dev); 152 static int bmac_set_address(struct net_device *dev, void *addr); 153 static irqreturn_t bmac_misc_intr(int irq, void *dev_id); 154 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id); 155 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id); 156 static void bmac_set_timeout(struct net_device *dev); 157 static void bmac_tx_timeout(struct timer_list *t); 158 static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev); 159 static void bmac_start(struct net_device *dev); 160 161 #define DBDMA_SET(x) ( ((x) | (x) << 16) ) 162 #define DBDMA_CLEAR(x) ( (x) << 16) 163 164 static inline void 165 dbdma_st32(volatile __u32 __iomem *a, unsigned long x) 166 { 167 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory"); 168 } 169 170 static inline unsigned long 171 dbdma_ld32(volatile __u32 __iomem *a) 172 { 173 __u32 swap; 174 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a)); 175 return swap; 176 } 177 178 static void 179 dbdma_continue(volatile struct dbdma_regs __iomem *dmap) 180 { 181 dbdma_st32(&dmap->control, 182 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD)); 183 eieio(); 184 } 185 186 static void 187 dbdma_reset(volatile struct dbdma_regs __iomem *dmap) 188 { 189 dbdma_st32(&dmap->control, 190 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)); 191 eieio(); 192 while (dbdma_ld32(&dmap->status) & RUN) 193 eieio(); 194 } 195 196 static void 197 dbdma_setcmd(volatile struct dbdma_cmd *cp, 198 unsigned short cmd, unsigned count, unsigned long addr, 199 unsigned long cmd_dep) 200 { 201 out_le16(&cp->command, cmd); 202 out_le16(&cp->req_count, count); 203 out_le32(&cp->phy_addr, addr); 204 out_le32(&cp->cmd_dep, cmd_dep); 205 out_le16(&cp->xfer_status, 0); 206 out_le16(&cp->res_count, 0); 207 } 208 209 static inline 210 void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data ) 211 { 212 out_le16((void __iomem *)dev->base_addr + reg_offset, data); 213 } 214 215 216 static inline 217 unsigned short bmread(struct net_device *dev, unsigned long reg_offset ) 218 { 219 return in_le16((void __iomem *)dev->base_addr + reg_offset); 220 } 221 222 static void 223 bmac_enable_and_reset_chip(struct net_device *dev) 224 { 225 struct bmac_data *bp = netdev_priv(dev); 226 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 227 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 228 229 if (rd) 230 dbdma_reset(rd); 231 if (td) 232 dbdma_reset(td); 233 234 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1); 235 } 236 237 #define MIFDELAY udelay(10) 238 239 static unsigned int 240 bmac_mif_readbits(struct net_device *dev, int nb) 241 { 242 unsigned int val = 0; 243 244 while (--nb >= 0) { 245 bmwrite(dev, MIFCSR, 0); 246 MIFDELAY; 247 if (bmread(dev, MIFCSR) & 8) 248 val |= 1 << nb; 249 bmwrite(dev, MIFCSR, 1); 250 MIFDELAY; 251 } 252 bmwrite(dev, MIFCSR, 0); 253 MIFDELAY; 254 bmwrite(dev, MIFCSR, 1); 255 MIFDELAY; 256 return val; 257 } 258 259 static void 260 bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb) 261 { 262 int b; 263 264 while (--nb >= 0) { 265 b = (val & (1 << nb))? 6: 4; 266 bmwrite(dev, MIFCSR, b); 267 MIFDELAY; 268 bmwrite(dev, MIFCSR, b|1); 269 MIFDELAY; 270 } 271 } 272 273 static unsigned int 274 bmac_mif_read(struct net_device *dev, unsigned int addr) 275 { 276 unsigned int val; 277 278 bmwrite(dev, MIFCSR, 4); 279 MIFDELAY; 280 bmac_mif_writebits(dev, ~0U, 32); 281 bmac_mif_writebits(dev, 6, 4); 282 bmac_mif_writebits(dev, addr, 10); 283 bmwrite(dev, MIFCSR, 2); 284 MIFDELAY; 285 bmwrite(dev, MIFCSR, 1); 286 MIFDELAY; 287 val = bmac_mif_readbits(dev, 17); 288 bmwrite(dev, MIFCSR, 4); 289 MIFDELAY; 290 return val; 291 } 292 293 static void 294 bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val) 295 { 296 bmwrite(dev, MIFCSR, 4); 297 MIFDELAY; 298 bmac_mif_writebits(dev, ~0U, 32); 299 bmac_mif_writebits(dev, 5, 4); 300 bmac_mif_writebits(dev, addr, 10); 301 bmac_mif_writebits(dev, 2, 2); 302 bmac_mif_writebits(dev, val, 16); 303 bmac_mif_writebits(dev, 3, 2); 304 } 305 306 static void 307 bmac_init_registers(struct net_device *dev) 308 { 309 struct bmac_data *bp = netdev_priv(dev); 310 volatile unsigned short regValue; 311 const unsigned short *pWord16; 312 int i; 313 314 /* XXDEBUG(("bmac: enter init_registers\n")); */ 315 316 bmwrite(dev, RXRST, RxResetValue); 317 bmwrite(dev, TXRST, TxResetBit); 318 319 i = 100; 320 do { 321 --i; 322 udelay(10000); 323 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */ 324 } while ((regValue & TxResetBit) && i > 0); 325 326 if (!bp->is_bmac_plus) { 327 regValue = bmread(dev, XCVRIF); 328 regValue |= ClkBit | SerialMode | COLActiveLow; 329 bmwrite(dev, XCVRIF, regValue); 330 udelay(10000); 331 } 332 333 bmwrite(dev, RSEED, (unsigned short)0x1968); 334 335 regValue = bmread(dev, XIFC); 336 regValue |= TxOutputEnable; 337 bmwrite(dev, XIFC, regValue); 338 339 bmread(dev, PAREG); 340 341 /* set collision counters to 0 */ 342 bmwrite(dev, NCCNT, 0); 343 bmwrite(dev, NTCNT, 0); 344 bmwrite(dev, EXCNT, 0); 345 bmwrite(dev, LTCNT, 0); 346 347 /* set rx counters to 0 */ 348 bmwrite(dev, FRCNT, 0); 349 bmwrite(dev, LECNT, 0); 350 bmwrite(dev, AECNT, 0); 351 bmwrite(dev, FECNT, 0); 352 bmwrite(dev, RXCV, 0); 353 354 /* set tx fifo information */ 355 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */ 356 357 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */ 358 bmwrite(dev, TXFIFOCSR, TxFIFOEnable ); 359 360 /* set rx fifo information */ 361 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */ 362 bmwrite(dev, RXFIFOCSR, RxFIFOEnable ); 363 364 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */ 365 bmread(dev, STATUS); /* read it just to clear it */ 366 367 /* zero out the chip Hash Filter registers */ 368 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; 369 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ 370 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ 371 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ 372 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ 373 374 pWord16 = (const unsigned short *)dev->dev_addr; 375 bmwrite(dev, MADD0, *pWord16++); 376 bmwrite(dev, MADD1, *pWord16++); 377 bmwrite(dev, MADD2, *pWord16); 378 379 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets); 380 381 bmwrite(dev, INTDISABLE, EnableNormal); 382 } 383 384 #if 0 385 static void 386 bmac_disable_interrupts(struct net_device *dev) 387 { 388 bmwrite(dev, INTDISABLE, DisableAll); 389 } 390 391 static void 392 bmac_enable_interrupts(struct net_device *dev) 393 { 394 bmwrite(dev, INTDISABLE, EnableNormal); 395 } 396 #endif 397 398 399 static void 400 bmac_start_chip(struct net_device *dev) 401 { 402 struct bmac_data *bp = netdev_priv(dev); 403 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 404 unsigned short oldConfig; 405 406 /* enable rx dma channel */ 407 dbdma_continue(rd); 408 409 oldConfig = bmread(dev, TXCFG); 410 bmwrite(dev, TXCFG, oldConfig | TxMACEnable ); 411 412 /* turn on rx plus any other bits already on (promiscuous possibly) */ 413 oldConfig = bmread(dev, RXCFG); 414 bmwrite(dev, RXCFG, oldConfig | RxMACEnable ); 415 udelay(20000); 416 } 417 418 static void 419 bmac_init_phy(struct net_device *dev) 420 { 421 unsigned int addr; 422 struct bmac_data *bp = netdev_priv(dev); 423 424 printk(KERN_DEBUG "phy registers:"); 425 for (addr = 0; addr < 32; ++addr) { 426 if ((addr & 7) == 0) 427 printk(KERN_DEBUG); 428 printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr)); 429 } 430 printk(KERN_CONT "\n"); 431 432 if (bp->is_bmac_plus) { 433 unsigned int capable, ctrl; 434 435 ctrl = bmac_mif_read(dev, 0); 436 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1; 437 if (bmac_mif_read(dev, 4) != capable || 438 (ctrl & 0x1000) == 0) { 439 bmac_mif_write(dev, 4, capable); 440 bmac_mif_write(dev, 0, 0x1200); 441 } else 442 bmac_mif_write(dev, 0, 0x1000); 443 } 444 } 445 446 static void bmac_init_chip(struct net_device *dev) 447 { 448 bmac_init_phy(dev); 449 bmac_init_registers(dev); 450 } 451 452 #ifdef CONFIG_PM 453 static int bmac_suspend(struct macio_dev *mdev, pm_message_t state) 454 { 455 struct net_device* dev = macio_get_drvdata(mdev); 456 struct bmac_data *bp = netdev_priv(dev); 457 unsigned long flags; 458 unsigned short config; 459 int i; 460 461 netif_device_detach(dev); 462 /* prolly should wait for dma to finish & turn off the chip */ 463 spin_lock_irqsave(&bp->lock, flags); 464 if (bp->timeout_active) { 465 del_timer(&bp->tx_timeout); 466 bp->timeout_active = 0; 467 } 468 disable_irq(dev->irq); 469 disable_irq(bp->tx_dma_intr); 470 disable_irq(bp->rx_dma_intr); 471 bp->sleeping = 1; 472 spin_unlock_irqrestore(&bp->lock, flags); 473 if (bp->opened) { 474 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 475 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 476 477 config = bmread(dev, RXCFG); 478 bmwrite(dev, RXCFG, (config & ~RxMACEnable)); 479 config = bmread(dev, TXCFG); 480 bmwrite(dev, TXCFG, (config & ~TxMACEnable)); 481 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */ 482 /* disable rx and tx dma */ 483 rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 484 td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 485 /* free some skb's */ 486 for (i=0; i<N_RX_RING; i++) { 487 if (bp->rx_bufs[i] != NULL) { 488 dev_kfree_skb(bp->rx_bufs[i]); 489 bp->rx_bufs[i] = NULL; 490 } 491 } 492 for (i = 0; i<N_TX_RING; i++) { 493 if (bp->tx_bufs[i] != NULL) { 494 dev_kfree_skb(bp->tx_bufs[i]); 495 bp->tx_bufs[i] = NULL; 496 } 497 } 498 } 499 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 500 return 0; 501 } 502 503 static int bmac_resume(struct macio_dev *mdev) 504 { 505 struct net_device* dev = macio_get_drvdata(mdev); 506 struct bmac_data *bp = netdev_priv(dev); 507 508 /* see if this is enough */ 509 if (bp->opened) 510 bmac_reset_and_enable(dev); 511 512 enable_irq(dev->irq); 513 enable_irq(bp->tx_dma_intr); 514 enable_irq(bp->rx_dma_intr); 515 netif_device_attach(dev); 516 517 return 0; 518 } 519 #endif /* CONFIG_PM */ 520 521 static int bmac_set_address(struct net_device *dev, void *addr) 522 { 523 struct bmac_data *bp = netdev_priv(dev); 524 const unsigned short *pWord16; 525 unsigned long flags; 526 527 XXDEBUG(("bmac: enter set_address\n")); 528 spin_lock_irqsave(&bp->lock, flags); 529 530 eth_hw_addr_set(dev, addr); 531 532 /* load up the hardware address */ 533 pWord16 = (const unsigned short *)dev->dev_addr; 534 bmwrite(dev, MADD0, *pWord16++); 535 bmwrite(dev, MADD1, *pWord16++); 536 bmwrite(dev, MADD2, *pWord16); 537 538 spin_unlock_irqrestore(&bp->lock, flags); 539 XXDEBUG(("bmac: exit set_address\n")); 540 return 0; 541 } 542 543 static inline void bmac_set_timeout(struct net_device *dev) 544 { 545 struct bmac_data *bp = netdev_priv(dev); 546 unsigned long flags; 547 548 spin_lock_irqsave(&bp->lock, flags); 549 if (bp->timeout_active) 550 del_timer(&bp->tx_timeout); 551 bp->tx_timeout.expires = jiffies + TX_TIMEOUT; 552 add_timer(&bp->tx_timeout); 553 bp->timeout_active = 1; 554 spin_unlock_irqrestore(&bp->lock, flags); 555 } 556 557 static void 558 bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp) 559 { 560 void *vaddr; 561 unsigned long baddr; 562 unsigned long len; 563 564 len = skb->len; 565 vaddr = skb->data; 566 baddr = virt_to_bus(vaddr); 567 568 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0); 569 } 570 571 static void 572 bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp) 573 { 574 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf; 575 576 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN, 577 virt_to_bus(addr), 0); 578 } 579 580 static void 581 bmac_init_tx_ring(struct bmac_data *bp) 582 { 583 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 584 585 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd)); 586 587 bp->tx_empty = 0; 588 bp->tx_fill = 0; 589 bp->tx_fullup = 0; 590 591 /* put a branch at the end of the tx command list */ 592 dbdma_setcmd(&bp->tx_cmds[N_TX_RING], 593 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds)); 594 595 /* reset tx dma */ 596 dbdma_reset(td); 597 out_le32(&td->wait_sel, 0x00200020); 598 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds)); 599 } 600 601 static int 602 bmac_init_rx_ring(struct net_device *dev) 603 { 604 struct bmac_data *bp = netdev_priv(dev); 605 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 606 int i; 607 struct sk_buff *skb; 608 609 /* initialize list of sk_buffs for receiving and set up recv dma */ 610 memset((char *)bp->rx_cmds, 0, 611 (N_RX_RING + 1) * sizeof(struct dbdma_cmd)); 612 for (i = 0; i < N_RX_RING; i++) { 613 if ((skb = bp->rx_bufs[i]) == NULL) { 614 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); 615 if (skb != NULL) 616 skb_reserve(skb, 2); 617 } 618 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); 619 } 620 621 bp->rx_empty = 0; 622 bp->rx_fill = i; 623 624 /* Put a branch back to the beginning of the receive command list */ 625 dbdma_setcmd(&bp->rx_cmds[N_RX_RING], 626 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds)); 627 628 /* start rx dma */ 629 dbdma_reset(rd); 630 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds)); 631 632 return 1; 633 } 634 635 636 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev) 637 { 638 struct bmac_data *bp = netdev_priv(dev); 639 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 640 int i; 641 642 /* see if there's a free slot in the tx ring */ 643 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */ 644 /* bp->tx_empty, bp->tx_fill)); */ 645 i = bp->tx_fill + 1; 646 if (i >= N_TX_RING) 647 i = 0; 648 if (i == bp->tx_empty) { 649 netif_stop_queue(dev); 650 bp->tx_fullup = 1; 651 XXDEBUG(("bmac_transmit_packet: tx ring full\n")); 652 return -1; /* can't take it at the moment */ 653 } 654 655 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0); 656 657 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]); 658 659 bp->tx_bufs[bp->tx_fill] = skb; 660 bp->tx_fill = i; 661 662 dev->stats.tx_bytes += skb->len; 663 664 dbdma_continue(td); 665 666 return 0; 667 } 668 669 static int rxintcount; 670 671 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id) 672 { 673 struct net_device *dev = (struct net_device *) dev_id; 674 struct bmac_data *bp = netdev_priv(dev); 675 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 676 volatile struct dbdma_cmd *cp; 677 int i, nb, stat; 678 struct sk_buff *skb; 679 unsigned int residual; 680 int last; 681 unsigned long flags; 682 683 spin_lock_irqsave(&bp->lock, flags); 684 685 if (++rxintcount < 10) { 686 XXDEBUG(("bmac_rxdma_intr\n")); 687 } 688 689 last = -1; 690 i = bp->rx_empty; 691 692 while (1) { 693 cp = &bp->rx_cmds[i]; 694 stat = le16_to_cpu(cp->xfer_status); 695 residual = le16_to_cpu(cp->res_count); 696 if ((stat & ACTIVE) == 0) 697 break; 698 nb = RX_BUFLEN - residual - 2; 699 if (nb < (ETHERMINPACKET - ETHERCRC)) { 700 skb = NULL; 701 dev->stats.rx_length_errors++; 702 dev->stats.rx_errors++; 703 } else { 704 skb = bp->rx_bufs[i]; 705 bp->rx_bufs[i] = NULL; 706 } 707 if (skb != NULL) { 708 nb -= ETHERCRC; 709 skb_put(skb, nb); 710 skb->protocol = eth_type_trans(skb, dev); 711 netif_rx(skb); 712 ++dev->stats.rx_packets; 713 dev->stats.rx_bytes += nb; 714 } else { 715 ++dev->stats.rx_dropped; 716 } 717 if ((skb = bp->rx_bufs[i]) == NULL) { 718 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); 719 if (skb != NULL) 720 skb_reserve(bp->rx_bufs[i], 2); 721 } 722 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); 723 cp->res_count = cpu_to_le16(0); 724 cp->xfer_status = cpu_to_le16(0); 725 last = i; 726 if (++i >= N_RX_RING) i = 0; 727 } 728 729 if (last != -1) { 730 bp->rx_fill = last; 731 bp->rx_empty = i; 732 } 733 734 dbdma_continue(rd); 735 spin_unlock_irqrestore(&bp->lock, flags); 736 737 if (rxintcount < 10) { 738 XXDEBUG(("bmac_rxdma_intr done\n")); 739 } 740 return IRQ_HANDLED; 741 } 742 743 static int txintcount; 744 745 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id) 746 { 747 struct net_device *dev = (struct net_device *) dev_id; 748 struct bmac_data *bp = netdev_priv(dev); 749 volatile struct dbdma_cmd *cp; 750 int stat; 751 unsigned long flags; 752 753 spin_lock_irqsave(&bp->lock, flags); 754 755 if (txintcount++ < 10) { 756 XXDEBUG(("bmac_txdma_intr\n")); 757 } 758 759 /* del_timer(&bp->tx_timeout); */ 760 /* bp->timeout_active = 0; */ 761 762 while (1) { 763 cp = &bp->tx_cmds[bp->tx_empty]; 764 stat = le16_to_cpu(cp->xfer_status); 765 if (txintcount < 10) { 766 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat)); 767 } 768 if (!(stat & ACTIVE)) { 769 /* 770 * status field might not have been filled by DBDMA 771 */ 772 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr))) 773 break; 774 } 775 776 if (bp->tx_bufs[bp->tx_empty]) { 777 ++dev->stats.tx_packets; 778 dev_consume_skb_irq(bp->tx_bufs[bp->tx_empty]); 779 } 780 bp->tx_bufs[bp->tx_empty] = NULL; 781 bp->tx_fullup = 0; 782 netif_wake_queue(dev); 783 if (++bp->tx_empty >= N_TX_RING) 784 bp->tx_empty = 0; 785 if (bp->tx_empty == bp->tx_fill) 786 break; 787 } 788 789 spin_unlock_irqrestore(&bp->lock, flags); 790 791 if (txintcount < 10) { 792 XXDEBUG(("bmac_txdma_intr done->bmac_start\n")); 793 } 794 795 bmac_start(dev); 796 return IRQ_HANDLED; 797 } 798 799 #ifndef SUNHME_MULTICAST 800 /* Real fast bit-reversal algorithm, 6-bit values */ 801 static int reverse6[64] = { 802 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38, 803 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c, 804 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a, 805 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e, 806 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39, 807 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d, 808 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b, 809 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f 810 }; 811 812 static unsigned int 813 crc416(unsigned int curval, unsigned short nxtval) 814 { 815 unsigned int counter, cur = curval, next = nxtval; 816 int high_crc_set, low_data_set; 817 818 /* Swap bytes */ 819 next = ((next & 0x00FF) << 8) | (next >> 8); 820 821 /* Compute bit-by-bit */ 822 for (counter = 0; counter < 16; ++counter) { 823 /* is high CRC bit set? */ 824 if ((cur & 0x80000000) == 0) high_crc_set = 0; 825 else high_crc_set = 1; 826 827 cur = cur << 1; 828 829 if ((next & 0x0001) == 0) low_data_set = 0; 830 else low_data_set = 1; 831 832 next = next >> 1; 833 834 /* do the XOR */ 835 if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE; 836 } 837 return cur; 838 } 839 840 static unsigned int 841 bmac_crc(unsigned short *address) 842 { 843 unsigned int newcrc; 844 845 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2])); 846 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */ 847 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */ 848 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */ 849 850 return(newcrc); 851 } 852 853 /* 854 * Add requested mcast addr to BMac's hash table filter. 855 * 856 */ 857 858 static void 859 bmac_addhash(struct bmac_data *bp, unsigned char *addr) 860 { 861 unsigned int crc; 862 unsigned short mask; 863 864 if (!(*addr)) return; 865 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */ 866 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ 867 if (bp->hash_use_count[crc]++) return; /* This bit is already set */ 868 mask = crc % 16; 869 mask = (unsigned char)1 << mask; 870 bp->hash_use_count[crc/16] |= mask; 871 } 872 873 static void 874 bmac_removehash(struct bmac_data *bp, unsigned char *addr) 875 { 876 unsigned int crc; 877 unsigned char mask; 878 879 /* Now, delete the address from the filter copy, as indicated */ 880 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */ 881 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ 882 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */ 883 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */ 884 mask = crc % 16; 885 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */ 886 bp->hash_table_mask[crc/16] &= mask; 887 } 888 889 /* 890 * Sync the adapter with the software copy of the multicast mask 891 * (logical address filter). 892 */ 893 894 static void 895 bmac_rx_off(struct net_device *dev) 896 { 897 unsigned short rx_cfg; 898 899 rx_cfg = bmread(dev, RXCFG); 900 rx_cfg &= ~RxMACEnable; 901 bmwrite(dev, RXCFG, rx_cfg); 902 do { 903 rx_cfg = bmread(dev, RXCFG); 904 } while (rx_cfg & RxMACEnable); 905 } 906 907 unsigned short 908 bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable) 909 { 910 unsigned short rx_cfg; 911 912 rx_cfg = bmread(dev, RXCFG); 913 rx_cfg |= RxMACEnable; 914 if (hash_enable) rx_cfg |= RxHashFilterEnable; 915 else rx_cfg &= ~RxHashFilterEnable; 916 if (promisc_enable) rx_cfg |= RxPromiscEnable; 917 else rx_cfg &= ~RxPromiscEnable; 918 bmwrite(dev, RXRST, RxResetValue); 919 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */ 920 bmwrite(dev, RXFIFOCSR, RxFIFOEnable ); 921 bmwrite(dev, RXCFG, rx_cfg ); 922 return rx_cfg; 923 } 924 925 static void 926 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp) 927 { 928 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ 929 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ 930 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ 931 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ 932 } 933 934 #if 0 935 static void 936 bmac_add_multi(struct net_device *dev, 937 struct bmac_data *bp, unsigned char *addr) 938 { 939 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */ 940 bmac_addhash(bp, addr); 941 bmac_rx_off(dev); 942 bmac_update_hash_table_mask(dev, bp); 943 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0); 944 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */ 945 } 946 947 static void 948 bmac_remove_multi(struct net_device *dev, 949 struct bmac_data *bp, unsigned char *addr) 950 { 951 bmac_removehash(bp, addr); 952 bmac_rx_off(dev); 953 bmac_update_hash_table_mask(dev, bp); 954 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0); 955 } 956 #endif 957 958 /* Set or clear the multicast filter for this adaptor. 959 num_addrs == -1 Promiscuous mode, receive all packets 960 num_addrs == 0 Normal mode, clear multicast list 961 num_addrs > 0 Multicast mode, receive normal and MC packets, and do 962 best-effort filtering. 963 */ 964 static void bmac_set_multicast(struct net_device *dev) 965 { 966 struct netdev_hw_addr *ha; 967 struct bmac_data *bp = netdev_priv(dev); 968 int num_addrs = netdev_mc_count(dev); 969 unsigned short rx_cfg; 970 int i; 971 972 if (bp->sleeping) 973 return; 974 975 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs)); 976 977 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { 978 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff; 979 bmac_update_hash_table_mask(dev, bp); 980 rx_cfg = bmac_rx_on(dev, 1, 0); 981 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n")); 982 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) { 983 rx_cfg = bmread(dev, RXCFG); 984 rx_cfg |= RxPromiscEnable; 985 bmwrite(dev, RXCFG, rx_cfg); 986 rx_cfg = bmac_rx_on(dev, 0, 1); 987 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg)); 988 } else { 989 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; 990 for (i=0; i<64; i++) bp->hash_use_count[i] = 0; 991 if (num_addrs == 0) { 992 rx_cfg = bmac_rx_on(dev, 0, 0); 993 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg)); 994 } else { 995 netdev_for_each_mc_addr(ha, dev) 996 bmac_addhash(bp, ha->addr); 997 bmac_update_hash_table_mask(dev, bp); 998 rx_cfg = bmac_rx_on(dev, 1, 0); 999 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg)); 1000 } 1001 } 1002 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */ 1003 } 1004 #else /* ifdef SUNHME_MULTICAST */ 1005 1006 /* The version of set_multicast below was lifted from sunhme.c */ 1007 1008 static void bmac_set_multicast(struct net_device *dev) 1009 { 1010 struct netdev_hw_addr *ha; 1011 unsigned short rx_cfg; 1012 u32 crc; 1013 1014 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { 1015 bmwrite(dev, BHASH0, 0xffff); 1016 bmwrite(dev, BHASH1, 0xffff); 1017 bmwrite(dev, BHASH2, 0xffff); 1018 bmwrite(dev, BHASH3, 0xffff); 1019 } else if(dev->flags & IFF_PROMISC) { 1020 rx_cfg = bmread(dev, RXCFG); 1021 rx_cfg |= RxPromiscEnable; 1022 bmwrite(dev, RXCFG, rx_cfg); 1023 } else { 1024 u16 hash_table[4] = { 0 }; 1025 1026 rx_cfg = bmread(dev, RXCFG); 1027 rx_cfg &= ~RxPromiscEnable; 1028 bmwrite(dev, RXCFG, rx_cfg); 1029 1030 netdev_for_each_mc_addr(ha, dev) { 1031 crc = ether_crc_le(6, ha->addr); 1032 crc >>= 26; 1033 hash_table[crc >> 4] |= 1 << (crc & 0xf); 1034 } 1035 bmwrite(dev, BHASH0, hash_table[0]); 1036 bmwrite(dev, BHASH1, hash_table[1]); 1037 bmwrite(dev, BHASH2, hash_table[2]); 1038 bmwrite(dev, BHASH3, hash_table[3]); 1039 } 1040 } 1041 #endif /* SUNHME_MULTICAST */ 1042 1043 static int miscintcount; 1044 1045 static irqreturn_t bmac_misc_intr(int irq, void *dev_id) 1046 { 1047 struct net_device *dev = (struct net_device *) dev_id; 1048 unsigned int status = bmread(dev, STATUS); 1049 if (miscintcount++ < 10) { 1050 XXDEBUG(("bmac_misc_intr\n")); 1051 } 1052 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */ 1053 /* bmac_txdma_intr_inner(irq, dev_id); */ 1054 /* if (status & FrameReceived) dev->stats.rx_dropped++; */ 1055 if (status & RxErrorMask) dev->stats.rx_errors++; 1056 if (status & RxCRCCntExp) dev->stats.rx_crc_errors++; 1057 if (status & RxLenCntExp) dev->stats.rx_length_errors++; 1058 if (status & RxOverFlow) dev->stats.rx_over_errors++; 1059 if (status & RxAlignCntExp) dev->stats.rx_frame_errors++; 1060 1061 /* if (status & FrameSent) dev->stats.tx_dropped++; */ 1062 if (status & TxErrorMask) dev->stats.tx_errors++; 1063 if (status & TxUnderrun) dev->stats.tx_fifo_errors++; 1064 if (status & TxNormalCollExp) dev->stats.collisions++; 1065 return IRQ_HANDLED; 1066 } 1067 1068 /* 1069 * Procedure for reading EEPROM 1070 */ 1071 #define SROMAddressLength 5 1072 #define DataInOn 0x0008 1073 #define DataInOff 0x0000 1074 #define Clk 0x0002 1075 #define ChipSelect 0x0001 1076 #define SDIShiftCount 3 1077 #define SD0ShiftCount 2 1078 #define DelayValue 1000 /* number of microseconds */ 1079 #define SROMStartOffset 10 /* this is in words */ 1080 #define SROMReadCount 3 /* number of words to read from SROM */ 1081 #define SROMAddressBits 6 1082 #define EnetAddressOffset 20 1083 1084 static unsigned char 1085 bmac_clock_out_bit(struct net_device *dev) 1086 { 1087 unsigned short data; 1088 unsigned short val; 1089 1090 bmwrite(dev, SROMCSR, ChipSelect | Clk); 1091 udelay(DelayValue); 1092 1093 data = bmread(dev, SROMCSR); 1094 udelay(DelayValue); 1095 val = (data >> SD0ShiftCount) & 1; 1096 1097 bmwrite(dev, SROMCSR, ChipSelect); 1098 udelay(DelayValue); 1099 1100 return val; 1101 } 1102 1103 static void 1104 bmac_clock_in_bit(struct net_device *dev, unsigned int val) 1105 { 1106 unsigned short data; 1107 1108 if (val != 0 && val != 1) return; 1109 1110 data = (val << SDIShiftCount); 1111 bmwrite(dev, SROMCSR, data | ChipSelect ); 1112 udelay(DelayValue); 1113 1114 bmwrite(dev, SROMCSR, data | ChipSelect | Clk ); 1115 udelay(DelayValue); 1116 1117 bmwrite(dev, SROMCSR, data | ChipSelect); 1118 udelay(DelayValue); 1119 } 1120 1121 static void 1122 reset_and_select_srom(struct net_device *dev) 1123 { 1124 /* first reset */ 1125 bmwrite(dev, SROMCSR, 0); 1126 udelay(DelayValue); 1127 1128 /* send it the read command (110) */ 1129 bmac_clock_in_bit(dev, 1); 1130 bmac_clock_in_bit(dev, 1); 1131 bmac_clock_in_bit(dev, 0); 1132 } 1133 1134 static unsigned short 1135 read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len) 1136 { 1137 unsigned short data, val; 1138 int i; 1139 1140 /* send out the address we want to read from */ 1141 for (i = 0; i < addr_len; i++) { 1142 val = addr >> (addr_len-i-1); 1143 bmac_clock_in_bit(dev, val & 1); 1144 } 1145 1146 /* Now read in the 16-bit data */ 1147 data = 0; 1148 for (i = 0; i < 16; i++) { 1149 val = bmac_clock_out_bit(dev); 1150 data <<= 1; 1151 data |= val; 1152 } 1153 bmwrite(dev, SROMCSR, 0); 1154 1155 return data; 1156 } 1157 1158 /* 1159 * It looks like Cogent and SMC use different methods for calculating 1160 * checksums. What a pain.. 1161 */ 1162 1163 static int 1164 bmac_verify_checksum(struct net_device *dev) 1165 { 1166 unsigned short data, storedCS; 1167 1168 reset_and_select_srom(dev); 1169 data = read_srom(dev, 3, SROMAddressBits); 1170 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00); 1171 1172 return 0; 1173 } 1174 1175 1176 static void 1177 bmac_get_station_address(struct net_device *dev, unsigned char *ea) 1178 { 1179 int i; 1180 unsigned short data; 1181 1182 for (i = 0; i < 3; i++) 1183 { 1184 reset_and_select_srom(dev); 1185 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits); 1186 ea[2*i] = bitrev8(data & 0x0ff); 1187 ea[2*i+1] = bitrev8((data >> 8) & 0x0ff); 1188 } 1189 } 1190 1191 static void bmac_reset_and_enable(struct net_device *dev) 1192 { 1193 struct bmac_data *bp = netdev_priv(dev); 1194 unsigned long flags; 1195 struct sk_buff *skb; 1196 unsigned char *data; 1197 1198 spin_lock_irqsave(&bp->lock, flags); 1199 bmac_enable_and_reset_chip(dev); 1200 bmac_init_tx_ring(bp); 1201 bmac_init_rx_ring(dev); 1202 bmac_init_chip(dev); 1203 bmac_start_chip(dev); 1204 bmwrite(dev, INTDISABLE, EnableNormal); 1205 bp->sleeping = 0; 1206 1207 /* 1208 * It seems that the bmac can't receive until it's transmitted 1209 * a packet. So we give it a dummy packet to transmit. 1210 */ 1211 skb = netdev_alloc_skb(dev, ETHERMINPACKET); 1212 if (skb != NULL) { 1213 data = skb_put_zero(skb, ETHERMINPACKET); 1214 memcpy(data, dev->dev_addr, ETH_ALEN); 1215 memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN); 1216 bmac_transmit_packet(skb, dev); 1217 } 1218 spin_unlock_irqrestore(&bp->lock, flags); 1219 } 1220 1221 static const struct ethtool_ops bmac_ethtool_ops = { 1222 .get_link = ethtool_op_get_link, 1223 }; 1224 1225 static const struct net_device_ops bmac_netdev_ops = { 1226 .ndo_open = bmac_open, 1227 .ndo_stop = bmac_close, 1228 .ndo_start_xmit = bmac_output, 1229 .ndo_set_rx_mode = bmac_set_multicast, 1230 .ndo_set_mac_address = bmac_set_address, 1231 .ndo_validate_addr = eth_validate_addr, 1232 }; 1233 1234 static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match) 1235 { 1236 int j, rev, ret; 1237 struct bmac_data *bp; 1238 const unsigned char *prop_addr; 1239 unsigned char addr[6]; 1240 u8 macaddr[6]; 1241 struct net_device *dev; 1242 int is_bmac_plus = ((int)match->data) != 0; 1243 1244 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { 1245 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n"); 1246 return -ENODEV; 1247 } 1248 prop_addr = of_get_property(macio_get_of_node(mdev), 1249 "mac-address", NULL); 1250 if (prop_addr == NULL) { 1251 prop_addr = of_get_property(macio_get_of_node(mdev), 1252 "local-mac-address", NULL); 1253 if (prop_addr == NULL) { 1254 printk(KERN_ERR "BMAC: Can't get mac-address\n"); 1255 return -ENODEV; 1256 } 1257 } 1258 memcpy(addr, prop_addr, sizeof(addr)); 1259 1260 dev = alloc_etherdev(PRIV_BYTES); 1261 if (!dev) 1262 return -ENOMEM; 1263 1264 bp = netdev_priv(dev); 1265 SET_NETDEV_DEV(dev, &mdev->ofdev.dev); 1266 macio_set_drvdata(mdev, dev); 1267 1268 bp->mdev = mdev; 1269 spin_lock_init(&bp->lock); 1270 1271 if (macio_request_resources(mdev, "bmac")) { 1272 printk(KERN_ERR "BMAC: can't request IO resource !\n"); 1273 goto out_free; 1274 } 1275 1276 dev->base_addr = (unsigned long) 1277 ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0)); 1278 if (dev->base_addr == 0) 1279 goto out_release; 1280 1281 dev->irq = macio_irq(mdev, 0); 1282 1283 bmac_enable_and_reset_chip(dev); 1284 bmwrite(dev, INTDISABLE, DisableAll); 1285 1286 rev = addr[0] == 0 && addr[1] == 0xA0; 1287 for (j = 0; j < 6; ++j) 1288 macaddr[j] = rev ? bitrev8(addr[j]): addr[j]; 1289 1290 eth_hw_addr_set(dev, macaddr); 1291 1292 /* Enable chip without interrupts for now */ 1293 bmac_enable_and_reset_chip(dev); 1294 bmwrite(dev, INTDISABLE, DisableAll); 1295 1296 dev->netdev_ops = &bmac_netdev_ops; 1297 dev->ethtool_ops = &bmac_ethtool_ops; 1298 1299 bmac_get_station_address(dev, addr); 1300 if (bmac_verify_checksum(dev) != 0) 1301 goto err_out_iounmap; 1302 1303 bp->is_bmac_plus = is_bmac_plus; 1304 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1)); 1305 if (!bp->tx_dma) 1306 goto err_out_iounmap; 1307 bp->tx_dma_intr = macio_irq(mdev, 1); 1308 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2)); 1309 if (!bp->rx_dma) 1310 goto err_out_iounmap_tx; 1311 bp->rx_dma_intr = macio_irq(mdev, 2); 1312 1313 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1); 1314 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1; 1315 1316 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1); 1317 skb_queue_head_init(bp->queue); 1318 1319 timer_setup(&bp->tx_timeout, bmac_tx_timeout, 0); 1320 1321 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev); 1322 if (ret) { 1323 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq); 1324 goto err_out_iounmap_rx; 1325 } 1326 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev); 1327 if (ret) { 1328 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr); 1329 goto err_out_irq0; 1330 } 1331 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev); 1332 if (ret) { 1333 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr); 1334 goto err_out_irq1; 1335 } 1336 1337 /* Mask chip interrupts and disable chip, will be 1338 * re-enabled on open() 1339 */ 1340 disable_irq(dev->irq); 1341 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 1342 1343 if (register_netdev(dev) != 0) { 1344 printk(KERN_ERR "BMAC: Ethernet registration failed\n"); 1345 goto err_out_irq2; 1346 } 1347 1348 printk(KERN_INFO "%s: BMAC%s at %pM", 1349 dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr); 1350 XXDEBUG((", base_addr=%#0lx", dev->base_addr)); 1351 printk("\n"); 1352 1353 return 0; 1354 1355 err_out_irq2: 1356 free_irq(bp->rx_dma_intr, dev); 1357 err_out_irq1: 1358 free_irq(bp->tx_dma_intr, dev); 1359 err_out_irq0: 1360 free_irq(dev->irq, dev); 1361 err_out_iounmap_rx: 1362 iounmap(bp->rx_dma); 1363 err_out_iounmap_tx: 1364 iounmap(bp->tx_dma); 1365 err_out_iounmap: 1366 iounmap((void __iomem *)dev->base_addr); 1367 out_release: 1368 macio_release_resources(mdev); 1369 out_free: 1370 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 1371 free_netdev(dev); 1372 1373 return -ENODEV; 1374 } 1375 1376 static int bmac_open(struct net_device *dev) 1377 { 1378 struct bmac_data *bp = netdev_priv(dev); 1379 /* XXDEBUG(("bmac: enter open\n")); */ 1380 /* reset the chip */ 1381 bp->opened = 1; 1382 bmac_reset_and_enable(dev); 1383 enable_irq(dev->irq); 1384 return 0; 1385 } 1386 1387 static int bmac_close(struct net_device *dev) 1388 { 1389 struct bmac_data *bp = netdev_priv(dev); 1390 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 1391 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 1392 unsigned short config; 1393 int i; 1394 1395 bp->sleeping = 1; 1396 1397 /* disable rx and tx */ 1398 config = bmread(dev, RXCFG); 1399 bmwrite(dev, RXCFG, (config & ~RxMACEnable)); 1400 1401 config = bmread(dev, TXCFG); 1402 bmwrite(dev, TXCFG, (config & ~TxMACEnable)); 1403 1404 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */ 1405 1406 /* disable rx and tx dma */ 1407 rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 1408 td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 1409 1410 /* free some skb's */ 1411 XXDEBUG(("bmac: free rx bufs\n")); 1412 for (i=0; i<N_RX_RING; i++) { 1413 if (bp->rx_bufs[i] != NULL) { 1414 dev_kfree_skb(bp->rx_bufs[i]); 1415 bp->rx_bufs[i] = NULL; 1416 } 1417 } 1418 XXDEBUG(("bmac: free tx bufs\n")); 1419 for (i = 0; i<N_TX_RING; i++) { 1420 if (bp->tx_bufs[i] != NULL) { 1421 dev_kfree_skb(bp->tx_bufs[i]); 1422 bp->tx_bufs[i] = NULL; 1423 } 1424 } 1425 XXDEBUG(("bmac: all bufs freed\n")); 1426 1427 bp->opened = 0; 1428 disable_irq(dev->irq); 1429 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 1430 1431 return 0; 1432 } 1433 1434 static void 1435 bmac_start(struct net_device *dev) 1436 { 1437 struct bmac_data *bp = netdev_priv(dev); 1438 int i; 1439 struct sk_buff *skb; 1440 unsigned long flags; 1441 1442 if (bp->sleeping) 1443 return; 1444 1445 spin_lock_irqsave(&bp->lock, flags); 1446 while (1) { 1447 i = bp->tx_fill + 1; 1448 if (i >= N_TX_RING) 1449 i = 0; 1450 if (i == bp->tx_empty) 1451 break; 1452 skb = skb_dequeue(bp->queue); 1453 if (skb == NULL) 1454 break; 1455 bmac_transmit_packet(skb, dev); 1456 } 1457 spin_unlock_irqrestore(&bp->lock, flags); 1458 } 1459 1460 static netdev_tx_t 1461 bmac_output(struct sk_buff *skb, struct net_device *dev) 1462 { 1463 struct bmac_data *bp = netdev_priv(dev); 1464 skb_queue_tail(bp->queue, skb); 1465 bmac_start(dev); 1466 return NETDEV_TX_OK; 1467 } 1468 1469 static void bmac_tx_timeout(struct timer_list *t) 1470 { 1471 struct bmac_data *bp = from_timer(bp, t, tx_timeout); 1472 struct net_device *dev = macio_get_drvdata(bp->mdev); 1473 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 1474 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 1475 volatile struct dbdma_cmd *cp; 1476 unsigned long flags; 1477 unsigned short config, oldConfig; 1478 int i; 1479 1480 XXDEBUG(("bmac: tx_timeout called\n")); 1481 spin_lock_irqsave(&bp->lock, flags); 1482 bp->timeout_active = 0; 1483 1484 /* update various counters */ 1485 /* bmac_handle_misc_intrs(bp, 0); */ 1486 1487 cp = &bp->tx_cmds[bp->tx_empty]; 1488 /* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */ 1489 /* le32_to_cpu(td->status), le16_to_cpu(cp->xfer_status), bp->tx_bad_runt, */ 1490 /* mb->pr, mb->xmtfs, mb->fifofc)); */ 1491 1492 /* turn off both tx and rx and reset the chip */ 1493 config = bmread(dev, RXCFG); 1494 bmwrite(dev, RXCFG, (config & ~RxMACEnable)); 1495 config = bmread(dev, TXCFG); 1496 bmwrite(dev, TXCFG, (config & ~TxMACEnable)); 1497 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD)); 1498 printk(KERN_ERR "bmac: transmit timeout - resetting\n"); 1499 bmac_enable_and_reset_chip(dev); 1500 1501 /* restart rx dma */ 1502 cp = bus_to_virt(le32_to_cpu(rd->cmdptr)); 1503 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD)); 1504 out_le16(&cp->xfer_status, 0); 1505 out_le32(&rd->cmdptr, virt_to_bus(cp)); 1506 out_le32(&rd->control, DBDMA_SET(RUN|WAKE)); 1507 1508 /* fix up the transmit side */ 1509 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n", 1510 bp->tx_empty, bp->tx_fill, bp->tx_fullup)); 1511 i = bp->tx_empty; 1512 ++dev->stats.tx_errors; 1513 if (i != bp->tx_fill) { 1514 dev_kfree_skb(bp->tx_bufs[i]); 1515 bp->tx_bufs[i] = NULL; 1516 if (++i >= N_TX_RING) i = 0; 1517 bp->tx_empty = i; 1518 } 1519 bp->tx_fullup = 0; 1520 netif_wake_queue(dev); 1521 if (i != bp->tx_fill) { 1522 cp = &bp->tx_cmds[i]; 1523 out_le16(&cp->xfer_status, 0); 1524 out_le16(&cp->command, OUTPUT_LAST); 1525 out_le32(&td->cmdptr, virt_to_bus(cp)); 1526 out_le32(&td->control, DBDMA_SET(RUN)); 1527 /* bmac_set_timeout(dev); */ 1528 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i)); 1529 } 1530 1531 /* turn it back on */ 1532 oldConfig = bmread(dev, RXCFG); 1533 bmwrite(dev, RXCFG, oldConfig | RxMACEnable ); 1534 oldConfig = bmread(dev, TXCFG); 1535 bmwrite(dev, TXCFG, oldConfig | TxMACEnable ); 1536 1537 spin_unlock_irqrestore(&bp->lock, flags); 1538 } 1539 1540 #if 0 1541 static void dump_dbdma(volatile struct dbdma_cmd *cp,int count) 1542 { 1543 int i,*ip; 1544 1545 for (i=0;i< count;i++) { 1546 ip = (int*)(cp+i); 1547 1548 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n", 1549 le32_to_cpup(ip+0), 1550 le32_to_cpup(ip+1), 1551 le32_to_cpup(ip+2), 1552 le32_to_cpup(ip+3)); 1553 } 1554 1555 } 1556 #endif 1557 1558 #if 0 1559 static int 1560 bmac_proc_info(char *buffer, char **start, off_t offset, int length) 1561 { 1562 int len = 0; 1563 off_t pos = 0; 1564 off_t begin = 0; 1565 int i; 1566 1567 if (bmac_devs == NULL) 1568 return -ENOSYS; 1569 1570 len += sprintf(buffer, "BMAC counters & registers\n"); 1571 1572 for (i = 0; i<N_REG_ENTRIES; i++) { 1573 len += sprintf(buffer + len, "%s: %#08x\n", 1574 reg_entries[i].name, 1575 bmread(bmac_devs, reg_entries[i].reg_offset)); 1576 pos = begin + len; 1577 1578 if (pos < offset) { 1579 len = 0; 1580 begin = pos; 1581 } 1582 1583 if (pos > offset+length) break; 1584 } 1585 1586 *start = buffer + (offset - begin); 1587 len -= (offset - begin); 1588 1589 if (len > length) len = length; 1590 1591 return len; 1592 } 1593 #endif 1594 1595 static int bmac_remove(struct macio_dev *mdev) 1596 { 1597 struct net_device *dev = macio_get_drvdata(mdev); 1598 struct bmac_data *bp = netdev_priv(dev); 1599 1600 unregister_netdev(dev); 1601 1602 free_irq(dev->irq, dev); 1603 free_irq(bp->tx_dma_intr, dev); 1604 free_irq(bp->rx_dma_intr, dev); 1605 1606 iounmap((void __iomem *)dev->base_addr); 1607 iounmap(bp->tx_dma); 1608 iounmap(bp->rx_dma); 1609 1610 macio_release_resources(mdev); 1611 1612 free_netdev(dev); 1613 1614 return 0; 1615 } 1616 1617 static const struct of_device_id bmac_match[] = 1618 { 1619 { 1620 .name = "bmac", 1621 .data = (void *)0, 1622 }, 1623 { 1624 .type = "network", 1625 .compatible = "bmac+", 1626 .data = (void *)1, 1627 }, 1628 {}, 1629 }; 1630 MODULE_DEVICE_TABLE (of, bmac_match); 1631 1632 static struct macio_driver bmac_driver = 1633 { 1634 .driver = { 1635 .name = "bmac", 1636 .owner = THIS_MODULE, 1637 .of_match_table = bmac_match, 1638 }, 1639 .probe = bmac_probe, 1640 .remove = bmac_remove, 1641 #ifdef CONFIG_PM 1642 .suspend = bmac_suspend, 1643 .resume = bmac_resume, 1644 #endif 1645 }; 1646 1647 1648 static int __init bmac_init(void) 1649 { 1650 if (bmac_emergency_rxbuf == NULL) { 1651 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL); 1652 if (bmac_emergency_rxbuf == NULL) 1653 return -ENOMEM; 1654 } 1655 1656 return macio_register_driver(&bmac_driver); 1657 } 1658 1659 static void __exit bmac_exit(void) 1660 { 1661 macio_unregister_driver(&bmac_driver); 1662 1663 kfree(bmac_emergency_rxbuf); 1664 bmac_emergency_rxbuf = NULL; 1665 } 1666 1667 MODULE_AUTHOR("Randy Gobbel/Paul Mackerras"); 1668 MODULE_DESCRIPTION("PowerMac BMAC ethernet driver."); 1669 MODULE_LICENSE("GPL"); 1670 1671 module_init(bmac_init); 1672 module_exit(bmac_exit); 1673