1 /* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters. 2 * 3 * Copyright (C) 1997, 1998, 1999, 2003, 2008 David S. Miller (davem@davemloft.net) 4 */ 5 6 #include <linux/module.h> 7 8 #include <linux/kernel.h> 9 #include <linux/types.h> 10 #include <linux/fcntl.h> 11 #include <linux/interrupt.h> 12 #include <linux/ioport.h> 13 #include <linux/in.h> 14 #include <linux/string.h> 15 #include <linux/delay.h> 16 #include <linux/crc32.h> 17 #include <linux/errno.h> 18 #include <linux/ethtool.h> 19 #include <linux/mii.h> 20 #include <linux/netdevice.h> 21 #include <linux/etherdevice.h> 22 #include <linux/skbuff.h> 23 #include <linux/bitops.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/of.h> 26 #include <linux/of_device.h> 27 #include <linux/gfp.h> 28 29 #include <asm/auxio.h> 30 #include <asm/byteorder.h> 31 #include <asm/dma.h> 32 #include <asm/idprom.h> 33 #include <asm/io.h> 34 #include <asm/openprom.h> 35 #include <asm/oplib.h> 36 #include <asm/pgtable.h> 37 38 #include "sunbmac.h" 39 40 #define DRV_NAME "sunbmac" 41 #define DRV_VERSION "2.1" 42 #define DRV_RELDATE "August 26, 2008" 43 #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" 44 45 static char version[] = 46 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; 47 48 MODULE_VERSION(DRV_VERSION); 49 MODULE_AUTHOR(DRV_AUTHOR); 50 MODULE_DESCRIPTION("Sun BigMAC 100baseT ethernet driver"); 51 MODULE_LICENSE("GPL"); 52 53 #undef DEBUG_PROBE 54 #undef DEBUG_TX 55 #undef DEBUG_IRQ 56 57 #ifdef DEBUG_PROBE 58 #define DP(x) printk x 59 #else 60 #define DP(x) 61 #endif 62 63 #ifdef DEBUG_TX 64 #define DTX(x) printk x 65 #else 66 #define DTX(x) 67 #endif 68 69 #ifdef DEBUG_IRQ 70 #define DIRQ(x) printk x 71 #else 72 #define DIRQ(x) 73 #endif 74 75 #define DEFAULT_JAMSIZE 4 /* Toe jam */ 76 77 #define QEC_RESET_TRIES 200 78 79 static int qec_global_reset(void __iomem *gregs) 80 { 81 int tries = QEC_RESET_TRIES; 82 83 sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); 84 while (--tries) { 85 if (sbus_readl(gregs + GLOB_CTRL) & GLOB_CTRL_RESET) { 86 udelay(20); 87 continue; 88 } 89 break; 90 } 91 if (tries) 92 return 0; 93 printk(KERN_ERR "BigMAC: Cannot reset the QEC.\n"); 94 return -1; 95 } 96 97 static void qec_init(struct bigmac *bp) 98 { 99 struct platform_device *qec_op = bp->qec_op; 100 void __iomem *gregs = bp->gregs; 101 u8 bsizes = bp->bigmac_bursts; 102 u32 regval; 103 104 /* 64byte bursts do not work at the moment, do 105 * not even try to enable them. -DaveM 106 */ 107 if (bsizes & DMA_BURST32) 108 regval = GLOB_CTRL_B32; 109 else 110 regval = GLOB_CTRL_B16; 111 sbus_writel(regval | GLOB_CTRL_BMODE, gregs + GLOB_CTRL); 112 sbus_writel(GLOB_PSIZE_2048, gregs + GLOB_PSIZE); 113 114 /* All of memsize is given to bigmac. */ 115 sbus_writel(resource_size(&qec_op->resource[1]), 116 gregs + GLOB_MSIZE); 117 118 /* Half to the transmitter, half to the receiver. */ 119 sbus_writel(resource_size(&qec_op->resource[1]) >> 1, 120 gregs + GLOB_TSIZE); 121 sbus_writel(resource_size(&qec_op->resource[1]) >> 1, 122 gregs + GLOB_RSIZE); 123 } 124 125 #define TX_RESET_TRIES 32 126 #define RX_RESET_TRIES 32 127 128 static void bigmac_tx_reset(void __iomem *bregs) 129 { 130 int tries = TX_RESET_TRIES; 131 132 sbus_writel(0, bregs + BMAC_TXCFG); 133 134 /* The fifo threshold bit is read-only and does 135 * not clear. -DaveM 136 */ 137 while ((sbus_readl(bregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_FIFO)) != 0 && 138 --tries != 0) 139 udelay(20); 140 141 if (!tries) { 142 printk(KERN_ERR "BIGMAC: Transmitter will not reset.\n"); 143 printk(KERN_ERR "BIGMAC: tx_cfg is %08x\n", 144 sbus_readl(bregs + BMAC_TXCFG)); 145 } 146 } 147 148 static void bigmac_rx_reset(void __iomem *bregs) 149 { 150 int tries = RX_RESET_TRIES; 151 152 sbus_writel(0, bregs + BMAC_RXCFG); 153 while (sbus_readl(bregs + BMAC_RXCFG) && --tries) 154 udelay(20); 155 156 if (!tries) { 157 printk(KERN_ERR "BIGMAC: Receiver will not reset.\n"); 158 printk(KERN_ERR "BIGMAC: rx_cfg is %08x\n", 159 sbus_readl(bregs + BMAC_RXCFG)); 160 } 161 } 162 163 /* Reset the transmitter and receiver. */ 164 static void bigmac_stop(struct bigmac *bp) 165 { 166 bigmac_tx_reset(bp->bregs); 167 bigmac_rx_reset(bp->bregs); 168 } 169 170 static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs) 171 { 172 struct net_device_stats *stats = &bp->dev->stats; 173 174 stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR); 175 sbus_writel(0, bregs + BMAC_RCRCECTR); 176 177 stats->rx_frame_errors += sbus_readl(bregs + BMAC_UNALECTR); 178 sbus_writel(0, bregs + BMAC_UNALECTR); 179 180 stats->rx_length_errors += sbus_readl(bregs + BMAC_GLECTR); 181 sbus_writel(0, bregs + BMAC_GLECTR); 182 183 stats->tx_aborted_errors += sbus_readl(bregs + BMAC_EXCTR); 184 185 stats->collisions += 186 (sbus_readl(bregs + BMAC_EXCTR) + 187 sbus_readl(bregs + BMAC_LTCTR)); 188 sbus_writel(0, bregs + BMAC_EXCTR); 189 sbus_writel(0, bregs + BMAC_LTCTR); 190 } 191 192 static void bigmac_clean_rings(struct bigmac *bp) 193 { 194 int i; 195 196 for (i = 0; i < RX_RING_SIZE; i++) { 197 if (bp->rx_skbs[i] != NULL) { 198 dev_kfree_skb_any(bp->rx_skbs[i]); 199 bp->rx_skbs[i] = NULL; 200 } 201 } 202 203 for (i = 0; i < TX_RING_SIZE; i++) { 204 if (bp->tx_skbs[i] != NULL) { 205 dev_kfree_skb_any(bp->tx_skbs[i]); 206 bp->tx_skbs[i] = NULL; 207 } 208 } 209 } 210 211 static void bigmac_init_rings(struct bigmac *bp, int from_irq) 212 { 213 struct bmac_init_block *bb = bp->bmac_block; 214 int i; 215 gfp_t gfp_flags = GFP_KERNEL; 216 217 if (from_irq || in_interrupt()) 218 gfp_flags = GFP_ATOMIC; 219 220 bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0; 221 222 /* Free any skippy bufs left around in the rings. */ 223 bigmac_clean_rings(bp); 224 225 /* Now get new skbufs for the receive ring. */ 226 for (i = 0; i < RX_RING_SIZE; i++) { 227 struct sk_buff *skb; 228 229 skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags); 230 if (!skb) 231 continue; 232 233 bp->rx_skbs[i] = skb; 234 235 /* Because we reserve afterwards. */ 236 skb_put(skb, ETH_FRAME_LEN); 237 skb_reserve(skb, 34); 238 239 bb->be_rxd[i].rx_addr = 240 dma_map_single(&bp->bigmac_op->dev, 241 skb->data, 242 RX_BUF_ALLOC_SIZE - 34, 243 DMA_FROM_DEVICE); 244 bb->be_rxd[i].rx_flags = 245 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); 246 } 247 248 for (i = 0; i < TX_RING_SIZE; i++) 249 bb->be_txd[i].tx_flags = bb->be_txd[i].tx_addr = 0; 250 } 251 252 #define MGMT_CLKON (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB|MGMT_PAL_DCLOCK) 253 #define MGMT_CLKOFF (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB) 254 255 static void idle_transceiver(void __iomem *tregs) 256 { 257 int i = 20; 258 259 while (i--) { 260 sbus_writel(MGMT_CLKOFF, tregs + TCVR_MPAL); 261 sbus_readl(tregs + TCVR_MPAL); 262 sbus_writel(MGMT_CLKON, tregs + TCVR_MPAL); 263 sbus_readl(tregs + TCVR_MPAL); 264 } 265 } 266 267 static void write_tcvr_bit(struct bigmac *bp, void __iomem *tregs, int bit) 268 { 269 if (bp->tcvr_type == internal) { 270 bit = (bit & 1) << 3; 271 sbus_writel(bit | (MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO), 272 tregs + TCVR_MPAL); 273 sbus_readl(tregs + TCVR_MPAL); 274 sbus_writel(bit | MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, 275 tregs + TCVR_MPAL); 276 sbus_readl(tregs + TCVR_MPAL); 277 } else if (bp->tcvr_type == external) { 278 bit = (bit & 1) << 2; 279 sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB, 280 tregs + TCVR_MPAL); 281 sbus_readl(tregs + TCVR_MPAL); 282 sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB | MGMT_PAL_DCLOCK, 283 tregs + TCVR_MPAL); 284 sbus_readl(tregs + TCVR_MPAL); 285 } else { 286 printk(KERN_ERR "write_tcvr_bit: No transceiver type known!\n"); 287 } 288 } 289 290 static int read_tcvr_bit(struct bigmac *bp, void __iomem *tregs) 291 { 292 int retval = 0; 293 294 if (bp->tcvr_type == internal) { 295 sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); 296 sbus_readl(tregs + TCVR_MPAL); 297 sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, 298 tregs + TCVR_MPAL); 299 sbus_readl(tregs + TCVR_MPAL); 300 retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3; 301 } else if (bp->tcvr_type == external) { 302 sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL); 303 sbus_readl(tregs + TCVR_MPAL); 304 sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); 305 sbus_readl(tregs + TCVR_MPAL); 306 retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2; 307 } else { 308 printk(KERN_ERR "read_tcvr_bit: No transceiver type known!\n"); 309 } 310 return retval; 311 } 312 313 static int read_tcvr_bit2(struct bigmac *bp, void __iomem *tregs) 314 { 315 int retval = 0; 316 317 if (bp->tcvr_type == internal) { 318 sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); 319 sbus_readl(tregs + TCVR_MPAL); 320 retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3; 321 sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); 322 sbus_readl(tregs + TCVR_MPAL); 323 } else if (bp->tcvr_type == external) { 324 sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL); 325 sbus_readl(tregs + TCVR_MPAL); 326 retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2; 327 sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); 328 sbus_readl(tregs + TCVR_MPAL); 329 } else { 330 printk(KERN_ERR "read_tcvr_bit2: No transceiver type known!\n"); 331 } 332 return retval; 333 } 334 335 static void put_tcvr_byte(struct bigmac *bp, 336 void __iomem *tregs, 337 unsigned int byte) 338 { 339 int shift = 4; 340 341 do { 342 write_tcvr_bit(bp, tregs, ((byte >> shift) & 1)); 343 shift -= 1; 344 } while (shift >= 0); 345 } 346 347 static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs, 348 int reg, unsigned short val) 349 { 350 int shift; 351 352 reg &= 0xff; 353 val &= 0xffff; 354 switch(bp->tcvr_type) { 355 case internal: 356 case external: 357 break; 358 359 default: 360 printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); 361 return; 362 } 363 364 idle_transceiver(tregs); 365 write_tcvr_bit(bp, tregs, 0); 366 write_tcvr_bit(bp, tregs, 1); 367 write_tcvr_bit(bp, tregs, 0); 368 write_tcvr_bit(bp, tregs, 1); 369 370 put_tcvr_byte(bp, tregs, 371 ((bp->tcvr_type == internal) ? 372 BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL)); 373 374 put_tcvr_byte(bp, tregs, reg); 375 376 write_tcvr_bit(bp, tregs, 1); 377 write_tcvr_bit(bp, tregs, 0); 378 379 shift = 15; 380 do { 381 write_tcvr_bit(bp, tregs, (val >> shift) & 1); 382 shift -= 1; 383 } while (shift >= 0); 384 } 385 386 static unsigned short bigmac_tcvr_read(struct bigmac *bp, 387 void __iomem *tregs, 388 int reg) 389 { 390 unsigned short retval = 0; 391 392 reg &= 0xff; 393 switch(bp->tcvr_type) { 394 case internal: 395 case external: 396 break; 397 398 default: 399 printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); 400 return 0xffff; 401 } 402 403 idle_transceiver(tregs); 404 write_tcvr_bit(bp, tregs, 0); 405 write_tcvr_bit(bp, tregs, 1); 406 write_tcvr_bit(bp, tregs, 1); 407 write_tcvr_bit(bp, tregs, 0); 408 409 put_tcvr_byte(bp, tregs, 410 ((bp->tcvr_type == internal) ? 411 BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL)); 412 413 put_tcvr_byte(bp, tregs, reg); 414 415 if (bp->tcvr_type == external) { 416 int shift = 15; 417 418 (void) read_tcvr_bit2(bp, tregs); 419 (void) read_tcvr_bit2(bp, tregs); 420 421 do { 422 int tmp; 423 424 tmp = read_tcvr_bit2(bp, tregs); 425 retval |= ((tmp & 1) << shift); 426 shift -= 1; 427 } while (shift >= 0); 428 429 (void) read_tcvr_bit2(bp, tregs); 430 (void) read_tcvr_bit2(bp, tregs); 431 (void) read_tcvr_bit2(bp, tregs); 432 } else { 433 int shift = 15; 434 435 (void) read_tcvr_bit(bp, tregs); 436 (void) read_tcvr_bit(bp, tregs); 437 438 do { 439 int tmp; 440 441 tmp = read_tcvr_bit(bp, tregs); 442 retval |= ((tmp & 1) << shift); 443 shift -= 1; 444 } while (shift >= 0); 445 446 (void) read_tcvr_bit(bp, tregs); 447 (void) read_tcvr_bit(bp, tregs); 448 (void) read_tcvr_bit(bp, tregs); 449 } 450 return retval; 451 } 452 453 static void bigmac_tcvr_init(struct bigmac *bp) 454 { 455 void __iomem *tregs = bp->tregs; 456 u32 mpal; 457 458 idle_transceiver(tregs); 459 sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, 460 tregs + TCVR_MPAL); 461 sbus_readl(tregs + TCVR_MPAL); 462 463 /* Only the bit for the present transceiver (internal or 464 * external) will stick, set them both and see what stays. 465 */ 466 sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); 467 sbus_readl(tregs + TCVR_MPAL); 468 udelay(20); 469 470 mpal = sbus_readl(tregs + TCVR_MPAL); 471 if (mpal & MGMT_PAL_EXT_MDIO) { 472 bp->tcvr_type = external; 473 sbus_writel(~(TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE), 474 tregs + TCVR_TPAL); 475 sbus_readl(tregs + TCVR_TPAL); 476 } else if (mpal & MGMT_PAL_INT_MDIO) { 477 bp->tcvr_type = internal; 478 sbus_writel(~(TCVR_PAL_SERIAL | TCVR_PAL_EXTLBACK | 479 TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE), 480 tregs + TCVR_TPAL); 481 sbus_readl(tregs + TCVR_TPAL); 482 } else { 483 printk(KERN_ERR "BIGMAC: AIEEE, neither internal nor " 484 "external MDIO available!\n"); 485 printk(KERN_ERR "BIGMAC: mgmt_pal[%08x] tcvr_pal[%08x]\n", 486 sbus_readl(tregs + TCVR_MPAL), 487 sbus_readl(tregs + TCVR_TPAL)); 488 } 489 } 490 491 static int bigmac_init_hw(struct bigmac *, int); 492 493 static int try_next_permutation(struct bigmac *bp, void __iomem *tregs) 494 { 495 if (bp->sw_bmcr & BMCR_SPEED100) { 496 int timeout; 497 498 /* Reset the PHY. */ 499 bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); 500 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); 501 bp->sw_bmcr = (BMCR_RESET); 502 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); 503 504 timeout = 64; 505 while (--timeout) { 506 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); 507 if ((bp->sw_bmcr & BMCR_RESET) == 0) 508 break; 509 udelay(20); 510 } 511 if (timeout == 0) 512 printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); 513 514 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); 515 516 /* Now we try 10baseT. */ 517 bp->sw_bmcr &= ~(BMCR_SPEED100); 518 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); 519 return 0; 520 } 521 522 /* We've tried them all. */ 523 return -1; 524 } 525 526 static void bigmac_timer(struct timer_list *t) 527 { 528 struct bigmac *bp = from_timer(bp, t, bigmac_timer); 529 void __iomem *tregs = bp->tregs; 530 int restart_timer = 0; 531 532 bp->timer_ticks++; 533 if (bp->timer_state == ltrywait) { 534 bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR); 535 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); 536 if (bp->sw_bmsr & BMSR_LSTATUS) { 537 printk(KERN_INFO "%s: Link is now up at %s.\n", 538 bp->dev->name, 539 (bp->sw_bmcr & BMCR_SPEED100) ? 540 "100baseT" : "10baseT"); 541 bp->timer_state = asleep; 542 restart_timer = 0; 543 } else { 544 if (bp->timer_ticks >= 4) { 545 int ret; 546 547 ret = try_next_permutation(bp, tregs); 548 if (ret == -1) { 549 printk(KERN_ERR "%s: Link down, cable problem?\n", 550 bp->dev->name); 551 ret = bigmac_init_hw(bp, 0); 552 if (ret) { 553 printk(KERN_ERR "%s: Error, cannot re-init the " 554 "BigMAC.\n", bp->dev->name); 555 } 556 return; 557 } 558 bp->timer_ticks = 0; 559 restart_timer = 1; 560 } else { 561 restart_timer = 1; 562 } 563 } 564 } else { 565 /* Can't happens.... */ 566 printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n", 567 bp->dev->name); 568 restart_timer = 0; 569 bp->timer_ticks = 0; 570 bp->timer_state = asleep; /* foo on you */ 571 } 572 573 if (restart_timer != 0) { 574 bp->bigmac_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */ 575 add_timer(&bp->bigmac_timer); 576 } 577 } 578 579 /* Well, really we just force the chip into 100baseT then 580 * 10baseT, each time checking for a link status. 581 */ 582 static void bigmac_begin_auto_negotiation(struct bigmac *bp) 583 { 584 void __iomem *tregs = bp->tregs; 585 int timeout; 586 587 /* Grab new software copies of PHY registers. */ 588 bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR); 589 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); 590 591 /* Reset the PHY. */ 592 bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); 593 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); 594 bp->sw_bmcr = (BMCR_RESET); 595 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); 596 597 timeout = 64; 598 while (--timeout) { 599 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); 600 if ((bp->sw_bmcr & BMCR_RESET) == 0) 601 break; 602 udelay(20); 603 } 604 if (timeout == 0) 605 printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); 606 607 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); 608 609 /* First we try 100baseT. */ 610 bp->sw_bmcr |= BMCR_SPEED100; 611 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); 612 613 bp->timer_state = ltrywait; 614 bp->timer_ticks = 0; 615 bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10; 616 add_timer(&bp->bigmac_timer); 617 } 618 619 static int bigmac_init_hw(struct bigmac *bp, int from_irq) 620 { 621 void __iomem *gregs = bp->gregs; 622 void __iomem *cregs = bp->creg; 623 void __iomem *bregs = bp->bregs; 624 __u32 bblk_dvma = (__u32)bp->bblock_dvma; 625 unsigned char *e = &bp->dev->dev_addr[0]; 626 627 /* Latch current counters into statistics. */ 628 bigmac_get_counters(bp, bregs); 629 630 /* Reset QEC. */ 631 qec_global_reset(gregs); 632 633 /* Init QEC. */ 634 qec_init(bp); 635 636 /* Alloc and reset the tx/rx descriptor chains. */ 637 bigmac_init_rings(bp, from_irq); 638 639 /* Initialize the PHY. */ 640 bigmac_tcvr_init(bp); 641 642 /* Stop transmitter and receiver. */ 643 bigmac_stop(bp); 644 645 /* Set hardware ethernet address. */ 646 sbus_writel(((e[4] << 8) | e[5]), bregs + BMAC_MACADDR2); 647 sbus_writel(((e[2] << 8) | e[3]), bregs + BMAC_MACADDR1); 648 sbus_writel(((e[0] << 8) | e[1]), bregs + BMAC_MACADDR0); 649 650 /* Clear the hash table until mc upload occurs. */ 651 sbus_writel(0, bregs + BMAC_HTABLE3); 652 sbus_writel(0, bregs + BMAC_HTABLE2); 653 sbus_writel(0, bregs + BMAC_HTABLE1); 654 sbus_writel(0, bregs + BMAC_HTABLE0); 655 656 /* Enable Big Mac hash table filter. */ 657 sbus_writel(BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_FIFO, 658 bregs + BMAC_RXCFG); 659 udelay(20); 660 661 /* Ok, configure the Big Mac transmitter. */ 662 sbus_writel(BIGMAC_TXCFG_FIFO, bregs + BMAC_TXCFG); 663 664 /* The HME docs recommend to use the 10LSB of our MAC here. */ 665 sbus_writel(((e[5] | e[4] << 8) & 0x3ff), 666 bregs + BMAC_RSEED); 667 668 /* Enable the output drivers no matter what. */ 669 sbus_writel(BIGMAC_XCFG_ODENABLE | BIGMAC_XCFG_RESV, 670 bregs + BMAC_XIFCFG); 671 672 /* Tell the QEC where the ring descriptors are. */ 673 sbus_writel(bblk_dvma + bib_offset(be_rxd, 0), 674 cregs + CREG_RXDS); 675 sbus_writel(bblk_dvma + bib_offset(be_txd, 0), 676 cregs + CREG_TXDS); 677 678 /* Setup the FIFO pointers into QEC local memory. */ 679 sbus_writel(0, cregs + CREG_RXRBUFPTR); 680 sbus_writel(0, cregs + CREG_RXWBUFPTR); 681 sbus_writel(sbus_readl(gregs + GLOB_RSIZE), 682 cregs + CREG_TXRBUFPTR); 683 sbus_writel(sbus_readl(gregs + GLOB_RSIZE), 684 cregs + CREG_TXWBUFPTR); 685 686 /* Tell bigmac what interrupts we don't want to hear about. */ 687 sbus_writel(BIGMAC_IMASK_GOTFRAME | BIGMAC_IMASK_SENTFRAME, 688 bregs + BMAC_IMASK); 689 690 /* Enable the various other irq's. */ 691 sbus_writel(0, cregs + CREG_RIMASK); 692 sbus_writel(0, cregs + CREG_TIMASK); 693 sbus_writel(0, cregs + CREG_QMASK); 694 sbus_writel(0, cregs + CREG_BMASK); 695 696 /* Set jam size to a reasonable default. */ 697 sbus_writel(DEFAULT_JAMSIZE, bregs + BMAC_JSIZE); 698 699 /* Clear collision counter. */ 700 sbus_writel(0, cregs + CREG_CCNT); 701 702 /* Enable transmitter and receiver. */ 703 sbus_writel(sbus_readl(bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE, 704 bregs + BMAC_TXCFG); 705 sbus_writel(sbus_readl(bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE, 706 bregs + BMAC_RXCFG); 707 708 /* Ok, start detecting link speed/duplex. */ 709 bigmac_begin_auto_negotiation(bp); 710 711 /* Success. */ 712 return 0; 713 } 714 715 /* Error interrupts get sent here. */ 716 static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_status) 717 { 718 printk(KERN_ERR "bigmac_is_medium_rare: "); 719 if (qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) { 720 if (qec_status & GLOB_STAT_ER) 721 printk("QEC_ERROR, "); 722 if (qec_status & GLOB_STAT_BM) 723 printk("QEC_BMAC_ERROR, "); 724 } 725 if (bmac_status & CREG_STAT_ERRORS) { 726 if (bmac_status & CREG_STAT_BERROR) 727 printk("BMAC_ERROR, "); 728 if (bmac_status & CREG_STAT_TXDERROR) 729 printk("TXD_ERROR, "); 730 if (bmac_status & CREG_STAT_TXLERR) 731 printk("TX_LATE_ERROR, "); 732 if (bmac_status & CREG_STAT_TXPERR) 733 printk("TX_PARITY_ERROR, "); 734 if (bmac_status & CREG_STAT_TXSERR) 735 printk("TX_SBUS_ERROR, "); 736 737 if (bmac_status & CREG_STAT_RXDROP) 738 printk("RX_DROP_ERROR, "); 739 740 if (bmac_status & CREG_STAT_RXSMALL) 741 printk("RX_SMALL_ERROR, "); 742 if (bmac_status & CREG_STAT_RXLERR) 743 printk("RX_LATE_ERROR, "); 744 if (bmac_status & CREG_STAT_RXPERR) 745 printk("RX_PARITY_ERROR, "); 746 if (bmac_status & CREG_STAT_RXSERR) 747 printk("RX_SBUS_ERROR, "); 748 } 749 750 printk(" RESET\n"); 751 bigmac_init_hw(bp, 1); 752 } 753 754 /* BigMAC transmit complete service routines. */ 755 static void bigmac_tx(struct bigmac *bp) 756 { 757 struct be_txd *txbase = &bp->bmac_block->be_txd[0]; 758 struct net_device *dev = bp->dev; 759 int elem; 760 761 spin_lock(&bp->lock); 762 763 elem = bp->tx_old; 764 DTX(("bigmac_tx: tx_old[%d] ", elem)); 765 while (elem != bp->tx_new) { 766 struct sk_buff *skb; 767 struct be_txd *this = &txbase[elem]; 768 769 DTX(("this(%p) [flags(%08x)addr(%08x)]", 770 this, this->tx_flags, this->tx_addr)); 771 772 if (this->tx_flags & TXD_OWN) 773 break; 774 skb = bp->tx_skbs[elem]; 775 dev->stats.tx_packets++; 776 dev->stats.tx_bytes += skb->len; 777 dma_unmap_single(&bp->bigmac_op->dev, 778 this->tx_addr, skb->len, 779 DMA_TO_DEVICE); 780 781 DTX(("skb(%p) ", skb)); 782 bp->tx_skbs[elem] = NULL; 783 dev_kfree_skb_irq(skb); 784 785 elem = NEXT_TX(elem); 786 } 787 DTX((" DONE, tx_old=%d\n", elem)); 788 bp->tx_old = elem; 789 790 if (netif_queue_stopped(dev) && 791 TX_BUFFS_AVAIL(bp) > 0) 792 netif_wake_queue(bp->dev); 793 794 spin_unlock(&bp->lock); 795 } 796 797 /* BigMAC receive complete service routines. */ 798 static void bigmac_rx(struct bigmac *bp) 799 { 800 struct be_rxd *rxbase = &bp->bmac_block->be_rxd[0]; 801 struct be_rxd *this; 802 int elem = bp->rx_new, drops = 0; 803 u32 flags; 804 805 this = &rxbase[elem]; 806 while (!((flags = this->rx_flags) & RXD_OWN)) { 807 struct sk_buff *skb; 808 int len = (flags & RXD_LENGTH); /* FCS not included */ 809 810 /* Check for errors. */ 811 if (len < ETH_ZLEN) { 812 bp->dev->stats.rx_errors++; 813 bp->dev->stats.rx_length_errors++; 814 815 drop_it: 816 /* Return it to the BigMAC. */ 817 bp->dev->stats.rx_dropped++; 818 this->rx_flags = 819 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); 820 goto next; 821 } 822 skb = bp->rx_skbs[elem]; 823 if (len > RX_COPY_THRESHOLD) { 824 struct sk_buff *new_skb; 825 826 /* Now refill the entry, if we can. */ 827 new_skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 828 if (new_skb == NULL) { 829 drops++; 830 goto drop_it; 831 } 832 dma_unmap_single(&bp->bigmac_op->dev, 833 this->rx_addr, 834 RX_BUF_ALLOC_SIZE - 34, 835 DMA_FROM_DEVICE); 836 bp->rx_skbs[elem] = new_skb; 837 skb_put(new_skb, ETH_FRAME_LEN); 838 skb_reserve(new_skb, 34); 839 this->rx_addr = 840 dma_map_single(&bp->bigmac_op->dev, 841 new_skb->data, 842 RX_BUF_ALLOC_SIZE - 34, 843 DMA_FROM_DEVICE); 844 this->rx_flags = 845 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); 846 847 /* Trim the original skb for the netif. */ 848 skb_trim(skb, len); 849 } else { 850 struct sk_buff *copy_skb = netdev_alloc_skb(bp->dev, len + 2); 851 852 if (copy_skb == NULL) { 853 drops++; 854 goto drop_it; 855 } 856 skb_reserve(copy_skb, 2); 857 skb_put(copy_skb, len); 858 dma_sync_single_for_cpu(&bp->bigmac_op->dev, 859 this->rx_addr, len, 860 DMA_FROM_DEVICE); 861 skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len); 862 dma_sync_single_for_device(&bp->bigmac_op->dev, 863 this->rx_addr, len, 864 DMA_FROM_DEVICE); 865 866 /* Reuse original ring buffer. */ 867 this->rx_flags = 868 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); 869 870 skb = copy_skb; 871 } 872 873 /* No checksums done by the BigMAC ;-( */ 874 skb->protocol = eth_type_trans(skb, bp->dev); 875 netif_rx(skb); 876 bp->dev->stats.rx_packets++; 877 bp->dev->stats.rx_bytes += len; 878 next: 879 elem = NEXT_RX(elem); 880 this = &rxbase[elem]; 881 } 882 bp->rx_new = elem; 883 if (drops) 884 printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", bp->dev->name); 885 } 886 887 static irqreturn_t bigmac_interrupt(int irq, void *dev_id) 888 { 889 struct bigmac *bp = (struct bigmac *) dev_id; 890 u32 qec_status, bmac_status; 891 892 DIRQ(("bigmac_interrupt: ")); 893 894 /* Latch status registers now. */ 895 bmac_status = sbus_readl(bp->creg + CREG_STAT); 896 qec_status = sbus_readl(bp->gregs + GLOB_STAT); 897 898 DIRQ(("qec_status=%08x bmac_status=%08x\n", qec_status, bmac_status)); 899 if ((qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) || 900 (bmac_status & CREG_STAT_ERRORS)) 901 bigmac_is_medium_rare(bp, qec_status, bmac_status); 902 903 if (bmac_status & CREG_STAT_TXIRQ) 904 bigmac_tx(bp); 905 906 if (bmac_status & CREG_STAT_RXIRQ) 907 bigmac_rx(bp); 908 909 return IRQ_HANDLED; 910 } 911 912 static int bigmac_open(struct net_device *dev) 913 { 914 struct bigmac *bp = netdev_priv(dev); 915 int ret; 916 917 ret = request_irq(dev->irq, bigmac_interrupt, IRQF_SHARED, dev->name, bp); 918 if (ret) { 919 printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq); 920 return ret; 921 } 922 timer_setup(&bp->bigmac_timer, bigmac_timer, 0); 923 ret = bigmac_init_hw(bp, 0); 924 if (ret) 925 free_irq(dev->irq, bp); 926 return ret; 927 } 928 929 static int bigmac_close(struct net_device *dev) 930 { 931 struct bigmac *bp = netdev_priv(dev); 932 933 del_timer(&bp->bigmac_timer); 934 bp->timer_state = asleep; 935 bp->timer_ticks = 0; 936 937 bigmac_stop(bp); 938 bigmac_clean_rings(bp); 939 free_irq(dev->irq, bp); 940 return 0; 941 } 942 943 static void bigmac_tx_timeout(struct net_device *dev) 944 { 945 struct bigmac *bp = netdev_priv(dev); 946 947 bigmac_init_hw(bp, 0); 948 netif_wake_queue(dev); 949 } 950 951 /* Put a packet on the wire. */ 952 static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) 953 { 954 struct bigmac *bp = netdev_priv(dev); 955 int len, entry; 956 u32 mapping; 957 958 len = skb->len; 959 mapping = dma_map_single(&bp->bigmac_op->dev, skb->data, 960 len, DMA_TO_DEVICE); 961 962 /* Avoid a race... */ 963 spin_lock_irq(&bp->lock); 964 entry = bp->tx_new; 965 DTX(("bigmac_start_xmit: len(%d) entry(%d)\n", len, entry)); 966 bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE; 967 bp->tx_skbs[entry] = skb; 968 bp->bmac_block->be_txd[entry].tx_addr = mapping; 969 bp->bmac_block->be_txd[entry].tx_flags = 970 (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); 971 bp->tx_new = NEXT_TX(entry); 972 if (TX_BUFFS_AVAIL(bp) <= 0) 973 netif_stop_queue(dev); 974 spin_unlock_irq(&bp->lock); 975 976 /* Get it going. */ 977 sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL); 978 979 980 return NETDEV_TX_OK; 981 } 982 983 static struct net_device_stats *bigmac_get_stats(struct net_device *dev) 984 { 985 struct bigmac *bp = netdev_priv(dev); 986 987 bigmac_get_counters(bp, bp->bregs); 988 return &dev->stats; 989 } 990 991 static void bigmac_set_multicast(struct net_device *dev) 992 { 993 struct bigmac *bp = netdev_priv(dev); 994 void __iomem *bregs = bp->bregs; 995 struct netdev_hw_addr *ha; 996 u32 tmp, crc; 997 998 /* Disable the receiver. The bit self-clears when 999 * the operation is complete. 1000 */ 1001 tmp = sbus_readl(bregs + BMAC_RXCFG); 1002 tmp &= ~(BIGMAC_RXCFG_ENABLE); 1003 sbus_writel(tmp, bregs + BMAC_RXCFG); 1004 while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0) 1005 udelay(20); 1006 1007 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { 1008 sbus_writel(0xffff, bregs + BMAC_HTABLE0); 1009 sbus_writel(0xffff, bregs + BMAC_HTABLE1); 1010 sbus_writel(0xffff, bregs + BMAC_HTABLE2); 1011 sbus_writel(0xffff, bregs + BMAC_HTABLE3); 1012 } else if (dev->flags & IFF_PROMISC) { 1013 tmp = sbus_readl(bregs + BMAC_RXCFG); 1014 tmp |= BIGMAC_RXCFG_PMISC; 1015 sbus_writel(tmp, bregs + BMAC_RXCFG); 1016 } else { 1017 u16 hash_table[4] = { 0 }; 1018 1019 netdev_for_each_mc_addr(ha, dev) { 1020 crc = ether_crc_le(6, ha->addr); 1021 crc >>= 26; 1022 hash_table[crc >> 4] |= 1 << (crc & 0xf); 1023 } 1024 sbus_writel(hash_table[0], bregs + BMAC_HTABLE0); 1025 sbus_writel(hash_table[1], bregs + BMAC_HTABLE1); 1026 sbus_writel(hash_table[2], bregs + BMAC_HTABLE2); 1027 sbus_writel(hash_table[3], bregs + BMAC_HTABLE3); 1028 } 1029 1030 /* Re-enable the receiver. */ 1031 tmp = sbus_readl(bregs + BMAC_RXCFG); 1032 tmp |= BIGMAC_RXCFG_ENABLE; 1033 sbus_writel(tmp, bregs + BMAC_RXCFG); 1034 } 1035 1036 /* Ethtool support... */ 1037 static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1038 { 1039 strlcpy(info->driver, "sunbmac", sizeof(info->driver)); 1040 strlcpy(info->version, "2.0", sizeof(info->version)); 1041 } 1042 1043 static u32 bigmac_get_link(struct net_device *dev) 1044 { 1045 struct bigmac *bp = netdev_priv(dev); 1046 1047 spin_lock_irq(&bp->lock); 1048 bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, MII_BMSR); 1049 spin_unlock_irq(&bp->lock); 1050 1051 return (bp->sw_bmsr & BMSR_LSTATUS); 1052 } 1053 1054 static const struct ethtool_ops bigmac_ethtool_ops = { 1055 .get_drvinfo = bigmac_get_drvinfo, 1056 .get_link = bigmac_get_link, 1057 }; 1058 1059 static const struct net_device_ops bigmac_ops = { 1060 .ndo_open = bigmac_open, 1061 .ndo_stop = bigmac_close, 1062 .ndo_start_xmit = bigmac_start_xmit, 1063 .ndo_get_stats = bigmac_get_stats, 1064 .ndo_set_rx_mode = bigmac_set_multicast, 1065 .ndo_tx_timeout = bigmac_tx_timeout, 1066 .ndo_set_mac_address = eth_mac_addr, 1067 .ndo_validate_addr = eth_validate_addr, 1068 }; 1069 1070 static int bigmac_ether_init(struct platform_device *op, 1071 struct platform_device *qec_op) 1072 { 1073 static int version_printed; 1074 struct net_device *dev; 1075 u8 bsizes, bsizes_more; 1076 struct bigmac *bp; 1077 int i; 1078 1079 /* Get a new device struct for this interface. */ 1080 dev = alloc_etherdev(sizeof(struct bigmac)); 1081 if (!dev) 1082 return -ENOMEM; 1083 1084 if (version_printed++ == 0) 1085 printk(KERN_INFO "%s", version); 1086 1087 for (i = 0; i < 6; i++) 1088 dev->dev_addr[i] = idprom->id_ethaddr[i]; 1089 1090 /* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */ 1091 bp = netdev_priv(dev); 1092 bp->qec_op = qec_op; 1093 bp->bigmac_op = op; 1094 1095 SET_NETDEV_DEV(dev, &op->dev); 1096 1097 spin_lock_init(&bp->lock); 1098 1099 /* Map in QEC global control registers. */ 1100 bp->gregs = of_ioremap(&qec_op->resource[0], 0, 1101 GLOB_REG_SIZE, "BigMAC QEC GLobal Regs"); 1102 if (!bp->gregs) { 1103 printk(KERN_ERR "BIGMAC: Cannot map QEC global registers.\n"); 1104 goto fail_and_cleanup; 1105 } 1106 1107 /* Make sure QEC is in BigMAC mode. */ 1108 if ((sbus_readl(bp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_BMODE) { 1109 printk(KERN_ERR "BigMAC: AIEEE, QEC is not in BigMAC mode!\n"); 1110 goto fail_and_cleanup; 1111 } 1112 1113 /* Reset the QEC. */ 1114 if (qec_global_reset(bp->gregs)) 1115 goto fail_and_cleanup; 1116 1117 /* Get supported SBUS burst sizes. */ 1118 bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff); 1119 bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff); 1120 1121 bsizes &= 0xff; 1122 if (bsizes_more != 0xff) 1123 bsizes &= bsizes_more; 1124 if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || 1125 (bsizes & DMA_BURST32) == 0) 1126 bsizes = (DMA_BURST32 - 1); 1127 bp->bigmac_bursts = bsizes; 1128 1129 /* Perform QEC initialization. */ 1130 qec_init(bp); 1131 1132 /* Map in the BigMAC channel registers. */ 1133 bp->creg = of_ioremap(&op->resource[0], 0, 1134 CREG_REG_SIZE, "BigMAC QEC Channel Regs"); 1135 if (!bp->creg) { 1136 printk(KERN_ERR "BIGMAC: Cannot map QEC channel registers.\n"); 1137 goto fail_and_cleanup; 1138 } 1139 1140 /* Map in the BigMAC control registers. */ 1141 bp->bregs = of_ioremap(&op->resource[1], 0, 1142 BMAC_REG_SIZE, "BigMAC Primary Regs"); 1143 if (!bp->bregs) { 1144 printk(KERN_ERR "BIGMAC: Cannot map BigMAC primary registers.\n"); 1145 goto fail_and_cleanup; 1146 } 1147 1148 /* Map in the BigMAC transceiver registers, this is how you poke at 1149 * the BigMAC's PHY. 1150 */ 1151 bp->tregs = of_ioremap(&op->resource[2], 0, 1152 TCVR_REG_SIZE, "BigMAC Transceiver Regs"); 1153 if (!bp->tregs) { 1154 printk(KERN_ERR "BIGMAC: Cannot map BigMAC transceiver registers.\n"); 1155 goto fail_and_cleanup; 1156 } 1157 1158 /* Stop the BigMAC. */ 1159 bigmac_stop(bp); 1160 1161 /* Allocate transmit/receive descriptor DVMA block. */ 1162 bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev, 1163 PAGE_SIZE, 1164 &bp->bblock_dvma, GFP_ATOMIC); 1165 if (bp->bmac_block == NULL || bp->bblock_dvma == 0) 1166 goto fail_and_cleanup; 1167 1168 /* Get the board revision of this BigMAC. */ 1169 bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node, 1170 "board-version", 1); 1171 1172 /* Init auto-negotiation timer state. */ 1173 timer_setup(&bp->bigmac_timer, bigmac_timer, 0); 1174 bp->timer_state = asleep; 1175 bp->timer_ticks = 0; 1176 1177 /* Backlink to generic net device struct. */ 1178 bp->dev = dev; 1179 1180 /* Set links to our BigMAC open and close routines. */ 1181 dev->ethtool_ops = &bigmac_ethtool_ops; 1182 dev->netdev_ops = &bigmac_ops; 1183 dev->watchdog_timeo = 5*HZ; 1184 1185 /* Finish net device registration. */ 1186 dev->irq = bp->bigmac_op->archdata.irqs[0]; 1187 dev->dma = 0; 1188 1189 if (register_netdev(dev)) { 1190 printk(KERN_ERR "BIGMAC: Cannot register device.\n"); 1191 goto fail_and_cleanup; 1192 } 1193 1194 dev_set_drvdata(&bp->bigmac_op->dev, bp); 1195 1196 printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %pM\n", 1197 dev->name, dev->dev_addr); 1198 1199 return 0; 1200 1201 fail_and_cleanup: 1202 /* Something went wrong, undo whatever we did so far. */ 1203 /* Free register mappings if any. */ 1204 if (bp->gregs) 1205 of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); 1206 if (bp->creg) 1207 of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); 1208 if (bp->bregs) 1209 of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); 1210 if (bp->tregs) 1211 of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); 1212 1213 if (bp->bmac_block) 1214 dma_free_coherent(&bp->bigmac_op->dev, 1215 PAGE_SIZE, 1216 bp->bmac_block, 1217 bp->bblock_dvma); 1218 1219 /* This also frees the co-located private data */ 1220 free_netdev(dev); 1221 return -ENODEV; 1222 } 1223 1224 /* QEC can be the parent of either QuadEthernet or a BigMAC. We want 1225 * the latter. 1226 */ 1227 static int bigmac_sbus_probe(struct platform_device *op) 1228 { 1229 struct device *parent = op->dev.parent; 1230 struct platform_device *qec_op; 1231 1232 qec_op = to_platform_device(parent); 1233 1234 return bigmac_ether_init(op, qec_op); 1235 } 1236 1237 static int bigmac_sbus_remove(struct platform_device *op) 1238 { 1239 struct bigmac *bp = platform_get_drvdata(op); 1240 struct device *parent = op->dev.parent; 1241 struct net_device *net_dev = bp->dev; 1242 struct platform_device *qec_op; 1243 1244 qec_op = to_platform_device(parent); 1245 1246 unregister_netdev(net_dev); 1247 1248 of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); 1249 of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); 1250 of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); 1251 of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); 1252 dma_free_coherent(&op->dev, 1253 PAGE_SIZE, 1254 bp->bmac_block, 1255 bp->bblock_dvma); 1256 1257 free_netdev(net_dev); 1258 1259 return 0; 1260 } 1261 1262 static const struct of_device_id bigmac_sbus_match[] = { 1263 { 1264 .name = "be", 1265 }, 1266 {}, 1267 }; 1268 1269 MODULE_DEVICE_TABLE(of, bigmac_sbus_match); 1270 1271 static struct platform_driver bigmac_sbus_driver = { 1272 .driver = { 1273 .name = "sunbmac", 1274 .of_match_table = bigmac_sbus_match, 1275 }, 1276 .probe = bigmac_sbus_probe, 1277 .remove = bigmac_sbus_remove, 1278 }; 1279 1280 module_platform_driver(bigmac_sbus_driver); 1281