1 /* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $ 2 * sungem.c: Sun GEM ethernet driver. 3 * 4 * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com) 5 * 6 * Support for Apple GMAC and assorted PHYs, WOL, Power Management 7 * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org) 8 * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp. 9 * 10 * NAPI and NETPOLL support 11 * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com) 12 * 13 */ 14 15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17 #include <linux/module.h> 18 #include <linux/kernel.h> 19 #include <linux/types.h> 20 #include <linux/fcntl.h> 21 #include <linux/interrupt.h> 22 #include <linux/ioport.h> 23 #include <linux/in.h> 24 #include <linux/sched.h> 25 #include <linux/string.h> 26 #include <linux/delay.h> 27 #include <linux/init.h> 28 #include <linux/errno.h> 29 #include <linux/pci.h> 30 #include <linux/dma-mapping.h> 31 #include <linux/netdevice.h> 32 #include <linux/etherdevice.h> 33 #include <linux/skbuff.h> 34 #include <linux/mii.h> 35 #include <linux/ethtool.h> 36 #include <linux/crc32.h> 37 #include <linux/random.h> 38 #include <linux/workqueue.h> 39 #include <linux/if_vlan.h> 40 #include <linux/bitops.h> 41 #include <linux/mm.h> 42 #include <linux/gfp.h> 43 44 #include <asm/io.h> 45 #include <asm/byteorder.h> 46 #include <asm/uaccess.h> 47 #include <asm/irq.h> 48 49 #ifdef CONFIG_SPARC 50 #include <asm/idprom.h> 51 #include <asm/prom.h> 52 #endif 53 54 #ifdef CONFIG_PPC_PMAC 55 #include <asm/pci-bridge.h> 56 #include <asm/prom.h> 57 #include <asm/machdep.h> 58 #include <asm/pmac_feature.h> 59 #endif 60 61 #include <linux/sungem_phy.h> 62 #include "sungem.h" 63 64 /* Stripping FCS is causing problems, disabled for now */ 65 #undef STRIP_FCS 66 67 #define DEFAULT_MSG (NETIF_MSG_DRV | \ 68 NETIF_MSG_PROBE | \ 69 NETIF_MSG_LINK) 70 71 #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ 72 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ 73 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \ 74 SUPPORTED_Pause | SUPPORTED_Autoneg) 75 76 #define DRV_NAME "sungem" 77 #define DRV_VERSION "1.0" 78 #define DRV_AUTHOR "David S. Miller <davem@redhat.com>" 79 80 static char version[] __devinitdata = 81 DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n"; 82 83 MODULE_AUTHOR(DRV_AUTHOR); 84 MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver"); 85 MODULE_LICENSE("GPL"); 86 87 #define GEM_MODULE_NAME "gem" 88 89 static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = { 90 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, 91 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 92 93 /* These models only differ from the original GEM in 94 * that their tx/rx fifos are of a different size and 95 * they only support 10/100 speeds. -DaveM 96 * 97 * Apple's GMAC does support gigabit on machines with 98 * the BCM54xx PHYs. -BenH 99 */ 100 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM, 101 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 102 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC, 103 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 104 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP, 105 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 106 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2, 107 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 108 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC, 109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 110 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM, 111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 112 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC, 113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 114 {0, } 115 }; 116 117 MODULE_DEVICE_TABLE(pci, gem_pci_tbl); 118 119 static u16 __phy_read(struct gem *gp, int phy_addr, int reg) 120 { 121 u32 cmd; 122 int limit = 10000; 123 124 cmd = (1 << 30); 125 cmd |= (2 << 28); 126 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; 127 cmd |= (reg << 18) & MIF_FRAME_REGAD; 128 cmd |= (MIF_FRAME_TAMSB); 129 writel(cmd, gp->regs + MIF_FRAME); 130 131 while (--limit) { 132 cmd = readl(gp->regs + MIF_FRAME); 133 if (cmd & MIF_FRAME_TALSB) 134 break; 135 136 udelay(10); 137 } 138 139 if (!limit) 140 cmd = 0xffff; 141 142 return cmd & MIF_FRAME_DATA; 143 } 144 145 static inline int _phy_read(struct net_device *dev, int mii_id, int reg) 146 { 147 struct gem *gp = netdev_priv(dev); 148 return __phy_read(gp, mii_id, reg); 149 } 150 151 static inline u16 phy_read(struct gem *gp, int reg) 152 { 153 return __phy_read(gp, gp->mii_phy_addr, reg); 154 } 155 156 static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val) 157 { 158 u32 cmd; 159 int limit = 10000; 160 161 cmd = (1 << 30); 162 cmd |= (1 << 28); 163 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; 164 cmd |= (reg << 18) & MIF_FRAME_REGAD; 165 cmd |= (MIF_FRAME_TAMSB); 166 cmd |= (val & MIF_FRAME_DATA); 167 writel(cmd, gp->regs + MIF_FRAME); 168 169 while (limit--) { 170 cmd = readl(gp->regs + MIF_FRAME); 171 if (cmd & MIF_FRAME_TALSB) 172 break; 173 174 udelay(10); 175 } 176 } 177 178 static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val) 179 { 180 struct gem *gp = netdev_priv(dev); 181 __phy_write(gp, mii_id, reg, val & 0xffff); 182 } 183 184 static inline void phy_write(struct gem *gp, int reg, u16 val) 185 { 186 __phy_write(gp, gp->mii_phy_addr, reg, val); 187 } 188 189 static inline void gem_enable_ints(struct gem *gp) 190 { 191 /* Enable all interrupts but TXDONE */ 192 writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK); 193 } 194 195 static inline void gem_disable_ints(struct gem *gp) 196 { 197 /* Disable all interrupts, including TXDONE */ 198 writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK); 199 (void)readl(gp->regs + GREG_IMASK); /* write posting */ 200 } 201 202 static void gem_get_cell(struct gem *gp) 203 { 204 BUG_ON(gp->cell_enabled < 0); 205 gp->cell_enabled++; 206 #ifdef CONFIG_PPC_PMAC 207 if (gp->cell_enabled == 1) { 208 mb(); 209 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1); 210 udelay(10); 211 } 212 #endif /* CONFIG_PPC_PMAC */ 213 } 214 215 /* Turn off the chip's clock */ 216 static void gem_put_cell(struct gem *gp) 217 { 218 BUG_ON(gp->cell_enabled <= 0); 219 gp->cell_enabled--; 220 #ifdef CONFIG_PPC_PMAC 221 if (gp->cell_enabled == 0) { 222 mb(); 223 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0); 224 udelay(10); 225 } 226 #endif /* CONFIG_PPC_PMAC */ 227 } 228 229 static inline void gem_netif_stop(struct gem *gp) 230 { 231 gp->dev->trans_start = jiffies; /* prevent tx timeout */ 232 napi_disable(&gp->napi); 233 netif_tx_disable(gp->dev); 234 } 235 236 static inline void gem_netif_start(struct gem *gp) 237 { 238 /* NOTE: unconditional netif_wake_queue is only 239 * appropriate so long as all callers are assured to 240 * have free tx slots. 241 */ 242 netif_wake_queue(gp->dev); 243 napi_enable(&gp->napi); 244 } 245 246 static void gem_schedule_reset(struct gem *gp) 247 { 248 gp->reset_task_pending = 1; 249 schedule_work(&gp->reset_task); 250 } 251 252 static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) 253 { 254 if (netif_msg_intr(gp)) 255 printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name); 256 } 257 258 static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 259 { 260 u32 pcs_istat = readl(gp->regs + PCS_ISTAT); 261 u32 pcs_miistat; 262 263 if (netif_msg_intr(gp)) 264 printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n", 265 gp->dev->name, pcs_istat); 266 267 if (!(pcs_istat & PCS_ISTAT_LSC)) { 268 netdev_err(dev, "PCS irq but no link status change???\n"); 269 return 0; 270 } 271 272 /* The link status bit latches on zero, so you must 273 * read it twice in such a case to see a transition 274 * to the link being up. 275 */ 276 pcs_miistat = readl(gp->regs + PCS_MIISTAT); 277 if (!(pcs_miistat & PCS_MIISTAT_LS)) 278 pcs_miistat |= 279 (readl(gp->regs + PCS_MIISTAT) & 280 PCS_MIISTAT_LS); 281 282 if (pcs_miistat & PCS_MIISTAT_ANC) { 283 /* The remote-fault indication is only valid 284 * when autoneg has completed. 285 */ 286 if (pcs_miistat & PCS_MIISTAT_RF) 287 netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n"); 288 else 289 netdev_info(dev, "PCS AutoNEG complete\n"); 290 } 291 292 if (pcs_miistat & PCS_MIISTAT_LS) { 293 netdev_info(dev, "PCS link is now up\n"); 294 netif_carrier_on(gp->dev); 295 } else { 296 netdev_info(dev, "PCS link is now down\n"); 297 netif_carrier_off(gp->dev); 298 /* If this happens and the link timer is not running, 299 * reset so we re-negotiate. 300 */ 301 if (!timer_pending(&gp->link_timer)) 302 return 1; 303 } 304 305 return 0; 306 } 307 308 static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 309 { 310 u32 txmac_stat = readl(gp->regs + MAC_TXSTAT); 311 312 if (netif_msg_intr(gp)) 313 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", 314 gp->dev->name, txmac_stat); 315 316 /* Defer timer expiration is quite normal, 317 * don't even log the event. 318 */ 319 if ((txmac_stat & MAC_TXSTAT_DTE) && 320 !(txmac_stat & ~MAC_TXSTAT_DTE)) 321 return 0; 322 323 if (txmac_stat & MAC_TXSTAT_URUN) { 324 netdev_err(dev, "TX MAC xmit underrun\n"); 325 dev->stats.tx_fifo_errors++; 326 } 327 328 if (txmac_stat & MAC_TXSTAT_MPE) { 329 netdev_err(dev, "TX MAC max packet size error\n"); 330 dev->stats.tx_errors++; 331 } 332 333 /* The rest are all cases of one of the 16-bit TX 334 * counters expiring. 335 */ 336 if (txmac_stat & MAC_TXSTAT_NCE) 337 dev->stats.collisions += 0x10000; 338 339 if (txmac_stat & MAC_TXSTAT_ECE) { 340 dev->stats.tx_aborted_errors += 0x10000; 341 dev->stats.collisions += 0x10000; 342 } 343 344 if (txmac_stat & MAC_TXSTAT_LCE) { 345 dev->stats.tx_aborted_errors += 0x10000; 346 dev->stats.collisions += 0x10000; 347 } 348 349 /* We do not keep track of MAC_TXSTAT_FCE and 350 * MAC_TXSTAT_PCE events. 351 */ 352 return 0; 353 } 354 355 /* When we get a RX fifo overflow, the RX unit in GEM is probably hung 356 * so we do the following. 357 * 358 * If any part of the reset goes wrong, we return 1 and that causes the 359 * whole chip to be reset. 360 */ 361 static int gem_rxmac_reset(struct gem *gp) 362 { 363 struct net_device *dev = gp->dev; 364 int limit, i; 365 u64 desc_dma; 366 u32 val; 367 368 /* First, reset & disable MAC RX. */ 369 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); 370 for (limit = 0; limit < 5000; limit++) { 371 if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD)) 372 break; 373 udelay(10); 374 } 375 if (limit == 5000) { 376 netdev_err(dev, "RX MAC will not reset, resetting whole chip\n"); 377 return 1; 378 } 379 380 writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB, 381 gp->regs + MAC_RXCFG); 382 for (limit = 0; limit < 5000; limit++) { 383 if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB)) 384 break; 385 udelay(10); 386 } 387 if (limit == 5000) { 388 netdev_err(dev, "RX MAC will not disable, resetting whole chip\n"); 389 return 1; 390 } 391 392 /* Second, disable RX DMA. */ 393 writel(0, gp->regs + RXDMA_CFG); 394 for (limit = 0; limit < 5000; limit++) { 395 if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE)) 396 break; 397 udelay(10); 398 } 399 if (limit == 5000) { 400 netdev_err(dev, "RX DMA will not disable, resetting whole chip\n"); 401 return 1; 402 } 403 404 mdelay(5); 405 406 /* Execute RX reset command. */ 407 writel(gp->swrst_base | GREG_SWRST_RXRST, 408 gp->regs + GREG_SWRST); 409 for (limit = 0; limit < 5000; limit++) { 410 if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST)) 411 break; 412 udelay(10); 413 } 414 if (limit == 5000) { 415 netdev_err(dev, "RX reset command will not execute, resetting whole chip\n"); 416 return 1; 417 } 418 419 /* Refresh the RX ring. */ 420 for (i = 0; i < RX_RING_SIZE; i++) { 421 struct gem_rxd *rxd = &gp->init_block->rxd[i]; 422 423 if (gp->rx_skbs[i] == NULL) { 424 netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n"); 425 return 1; 426 } 427 428 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 429 } 430 gp->rx_new = gp->rx_old = 0; 431 432 /* Now we must reprogram the rest of RX unit. */ 433 desc_dma = (u64) gp->gblock_dvma; 434 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); 435 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); 436 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); 437 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 438 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | 439 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); 440 writel(val, gp->regs + RXDMA_CFG); 441 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) 442 writel(((5 & RXDMA_BLANK_IPKTS) | 443 ((8 << 12) & RXDMA_BLANK_ITIME)), 444 gp->regs + RXDMA_BLANK); 445 else 446 writel(((5 & RXDMA_BLANK_IPKTS) | 447 ((4 << 12) & RXDMA_BLANK_ITIME)), 448 gp->regs + RXDMA_BLANK); 449 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); 450 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); 451 writel(val, gp->regs + RXDMA_PTHRESH); 452 val = readl(gp->regs + RXDMA_CFG); 453 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 454 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); 455 val = readl(gp->regs + MAC_RXCFG); 456 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 457 458 return 0; 459 } 460 461 static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 462 { 463 u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT); 464 int ret = 0; 465 466 if (netif_msg_intr(gp)) 467 printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n", 468 gp->dev->name, rxmac_stat); 469 470 if (rxmac_stat & MAC_RXSTAT_OFLW) { 471 u32 smac = readl(gp->regs + MAC_SMACHINE); 472 473 netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac); 474 dev->stats.rx_over_errors++; 475 dev->stats.rx_fifo_errors++; 476 477 ret = gem_rxmac_reset(gp); 478 } 479 480 if (rxmac_stat & MAC_RXSTAT_ACE) 481 dev->stats.rx_frame_errors += 0x10000; 482 483 if (rxmac_stat & MAC_RXSTAT_CCE) 484 dev->stats.rx_crc_errors += 0x10000; 485 486 if (rxmac_stat & MAC_RXSTAT_LCE) 487 dev->stats.rx_length_errors += 0x10000; 488 489 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE 490 * events. 491 */ 492 return ret; 493 } 494 495 static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 496 { 497 u32 mac_cstat = readl(gp->regs + MAC_CSTAT); 498 499 if (netif_msg_intr(gp)) 500 printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n", 501 gp->dev->name, mac_cstat); 502 503 /* This interrupt is just for pause frame and pause 504 * tracking. It is useful for diagnostics and debug 505 * but probably by default we will mask these events. 506 */ 507 if (mac_cstat & MAC_CSTAT_PS) 508 gp->pause_entered++; 509 510 if (mac_cstat & MAC_CSTAT_PRCV) 511 gp->pause_last_time_recvd = (mac_cstat >> 16); 512 513 return 0; 514 } 515 516 static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 517 { 518 u32 mif_status = readl(gp->regs + MIF_STATUS); 519 u32 reg_val, changed_bits; 520 521 reg_val = (mif_status & MIF_STATUS_DATA) >> 16; 522 changed_bits = (mif_status & MIF_STATUS_STAT); 523 524 gem_handle_mif_event(gp, reg_val, changed_bits); 525 526 return 0; 527 } 528 529 static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 530 { 531 u32 pci_estat = readl(gp->regs + GREG_PCIESTAT); 532 533 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && 534 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { 535 netdev_err(dev, "PCI error [%04x]", pci_estat); 536 537 if (pci_estat & GREG_PCIESTAT_BADACK) 538 pr_cont(" <No ACK64# during ABS64 cycle>"); 539 if (pci_estat & GREG_PCIESTAT_DTRTO) 540 pr_cont(" <Delayed transaction timeout>"); 541 if (pci_estat & GREG_PCIESTAT_OTHER) 542 pr_cont(" <other>"); 543 pr_cont("\n"); 544 } else { 545 pci_estat |= GREG_PCIESTAT_OTHER; 546 netdev_err(dev, "PCI error\n"); 547 } 548 549 if (pci_estat & GREG_PCIESTAT_OTHER) { 550 u16 pci_cfg_stat; 551 552 /* Interrogate PCI config space for the 553 * true cause. 554 */ 555 pci_read_config_word(gp->pdev, PCI_STATUS, 556 &pci_cfg_stat); 557 netdev_err(dev, "Read PCI cfg space status [%04x]\n", 558 pci_cfg_stat); 559 if (pci_cfg_stat & PCI_STATUS_PARITY) 560 netdev_err(dev, "PCI parity error detected\n"); 561 if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT) 562 netdev_err(dev, "PCI target abort\n"); 563 if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT) 564 netdev_err(dev, "PCI master acks target abort\n"); 565 if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT) 566 netdev_err(dev, "PCI master abort\n"); 567 if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR) 568 netdev_err(dev, "PCI system error SERR#\n"); 569 if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY) 570 netdev_err(dev, "PCI parity error\n"); 571 572 /* Write the error bits back to clear them. */ 573 pci_cfg_stat &= (PCI_STATUS_PARITY | 574 PCI_STATUS_SIG_TARGET_ABORT | 575 PCI_STATUS_REC_TARGET_ABORT | 576 PCI_STATUS_REC_MASTER_ABORT | 577 PCI_STATUS_SIG_SYSTEM_ERROR | 578 PCI_STATUS_DETECTED_PARITY); 579 pci_write_config_word(gp->pdev, 580 PCI_STATUS, pci_cfg_stat); 581 } 582 583 /* For all PCI errors, we should reset the chip. */ 584 return 1; 585 } 586 587 /* All non-normal interrupt conditions get serviced here. 588 * Returns non-zero if we should just exit the interrupt 589 * handler right now (ie. if we reset the card which invalidates 590 * all of the other original irq status bits). 591 */ 592 static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status) 593 { 594 if (gem_status & GREG_STAT_RXNOBUF) { 595 /* Frame arrived, no free RX buffers available. */ 596 if (netif_msg_rx_err(gp)) 597 printk(KERN_DEBUG "%s: no buffer for rx frame\n", 598 gp->dev->name); 599 dev->stats.rx_dropped++; 600 } 601 602 if (gem_status & GREG_STAT_RXTAGERR) { 603 /* corrupt RX tag framing */ 604 if (netif_msg_rx_err(gp)) 605 printk(KERN_DEBUG "%s: corrupt rx tag framing\n", 606 gp->dev->name); 607 dev->stats.rx_errors++; 608 609 return 1; 610 } 611 612 if (gem_status & GREG_STAT_PCS) { 613 if (gem_pcs_interrupt(dev, gp, gem_status)) 614 return 1; 615 } 616 617 if (gem_status & GREG_STAT_TXMAC) { 618 if (gem_txmac_interrupt(dev, gp, gem_status)) 619 return 1; 620 } 621 622 if (gem_status & GREG_STAT_RXMAC) { 623 if (gem_rxmac_interrupt(dev, gp, gem_status)) 624 return 1; 625 } 626 627 if (gem_status & GREG_STAT_MAC) { 628 if (gem_mac_interrupt(dev, gp, gem_status)) 629 return 1; 630 } 631 632 if (gem_status & GREG_STAT_MIF) { 633 if (gem_mif_interrupt(dev, gp, gem_status)) 634 return 1; 635 } 636 637 if (gem_status & GREG_STAT_PCIERR) { 638 if (gem_pci_interrupt(dev, gp, gem_status)) 639 return 1; 640 } 641 642 return 0; 643 } 644 645 static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status) 646 { 647 int entry, limit; 648 649 entry = gp->tx_old; 650 limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT); 651 while (entry != limit) { 652 struct sk_buff *skb; 653 struct gem_txd *txd; 654 dma_addr_t dma_addr; 655 u32 dma_len; 656 int frag; 657 658 if (netif_msg_tx_done(gp)) 659 printk(KERN_DEBUG "%s: tx done, slot %d\n", 660 gp->dev->name, entry); 661 skb = gp->tx_skbs[entry]; 662 if (skb_shinfo(skb)->nr_frags) { 663 int last = entry + skb_shinfo(skb)->nr_frags; 664 int walk = entry; 665 int incomplete = 0; 666 667 last &= (TX_RING_SIZE - 1); 668 for (;;) { 669 walk = NEXT_TX(walk); 670 if (walk == limit) 671 incomplete = 1; 672 if (walk == last) 673 break; 674 } 675 if (incomplete) 676 break; 677 } 678 gp->tx_skbs[entry] = NULL; 679 dev->stats.tx_bytes += skb->len; 680 681 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 682 txd = &gp->init_block->txd[entry]; 683 684 dma_addr = le64_to_cpu(txd->buffer); 685 dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ; 686 687 pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); 688 entry = NEXT_TX(entry); 689 } 690 691 dev->stats.tx_packets++; 692 dev_kfree_skb(skb); 693 } 694 gp->tx_old = entry; 695 696 /* Need to make the tx_old update visible to gem_start_xmit() 697 * before checking for netif_queue_stopped(). Without the 698 * memory barrier, there is a small possibility that gem_start_xmit() 699 * will miss it and cause the queue to be stopped forever. 700 */ 701 smp_mb(); 702 703 if (unlikely(netif_queue_stopped(dev) && 704 TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) { 705 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 706 707 __netif_tx_lock(txq, smp_processor_id()); 708 if (netif_queue_stopped(dev) && 709 TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) 710 netif_wake_queue(dev); 711 __netif_tx_unlock(txq); 712 } 713 } 714 715 static __inline__ void gem_post_rxds(struct gem *gp, int limit) 716 { 717 int cluster_start, curr, count, kick; 718 719 cluster_start = curr = (gp->rx_new & ~(4 - 1)); 720 count = 0; 721 kick = -1; 722 wmb(); 723 while (curr != limit) { 724 curr = NEXT_RX(curr); 725 if (++count == 4) { 726 struct gem_rxd *rxd = 727 &gp->init_block->rxd[cluster_start]; 728 for (;;) { 729 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 730 rxd++; 731 cluster_start = NEXT_RX(cluster_start); 732 if (cluster_start == curr) 733 break; 734 } 735 kick = curr; 736 count = 0; 737 } 738 } 739 if (kick >= 0) { 740 mb(); 741 writel(kick, gp->regs + RXDMA_KICK); 742 } 743 } 744 745 #define ALIGNED_RX_SKB_ADDR(addr) \ 746 ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) 747 static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size, 748 gfp_t gfp_flags) 749 { 750 struct sk_buff *skb = alloc_skb(size + 64, gfp_flags); 751 752 if (likely(skb)) { 753 unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data); 754 skb_reserve(skb, offset); 755 } 756 return skb; 757 } 758 759 static int gem_rx(struct gem *gp, int work_to_do) 760 { 761 struct net_device *dev = gp->dev; 762 int entry, drops, work_done = 0; 763 u32 done; 764 __sum16 csum; 765 766 if (netif_msg_rx_status(gp)) 767 printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", 768 gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new); 769 770 entry = gp->rx_new; 771 drops = 0; 772 done = readl(gp->regs + RXDMA_DONE); 773 for (;;) { 774 struct gem_rxd *rxd = &gp->init_block->rxd[entry]; 775 struct sk_buff *skb; 776 u64 status = le64_to_cpu(rxd->status_word); 777 dma_addr_t dma_addr; 778 int len; 779 780 if ((status & RXDCTRL_OWN) != 0) 781 break; 782 783 if (work_done >= RX_RING_SIZE || work_done >= work_to_do) 784 break; 785 786 /* When writing back RX descriptor, GEM writes status 787 * then buffer address, possibly in separate transactions. 788 * If we don't wait for the chip to write both, we could 789 * post a new buffer to this descriptor then have GEM spam 790 * on the buffer address. We sync on the RX completion 791 * register to prevent this from happening. 792 */ 793 if (entry == done) { 794 done = readl(gp->regs + RXDMA_DONE); 795 if (entry == done) 796 break; 797 } 798 799 /* We can now account for the work we're about to do */ 800 work_done++; 801 802 skb = gp->rx_skbs[entry]; 803 804 len = (status & RXDCTRL_BUFSZ) >> 16; 805 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { 806 dev->stats.rx_errors++; 807 if (len < ETH_ZLEN) 808 dev->stats.rx_length_errors++; 809 if (len & RXDCTRL_BAD) 810 dev->stats.rx_crc_errors++; 811 812 /* We'll just return it to GEM. */ 813 drop_it: 814 dev->stats.rx_dropped++; 815 goto next; 816 } 817 818 dma_addr = le64_to_cpu(rxd->buffer); 819 if (len > RX_COPY_THRESHOLD) { 820 struct sk_buff *new_skb; 821 822 new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); 823 if (new_skb == NULL) { 824 drops++; 825 goto drop_it; 826 } 827 pci_unmap_page(gp->pdev, dma_addr, 828 RX_BUF_ALLOC_SIZE(gp), 829 PCI_DMA_FROMDEVICE); 830 gp->rx_skbs[entry] = new_skb; 831 skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); 832 rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, 833 virt_to_page(new_skb->data), 834 offset_in_page(new_skb->data), 835 RX_BUF_ALLOC_SIZE(gp), 836 PCI_DMA_FROMDEVICE)); 837 skb_reserve(new_skb, RX_OFFSET); 838 839 /* Trim the original skb for the netif. */ 840 skb_trim(skb, len); 841 } else { 842 struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2); 843 844 if (copy_skb == NULL) { 845 drops++; 846 goto drop_it; 847 } 848 849 skb_reserve(copy_skb, 2); 850 skb_put(copy_skb, len); 851 pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 852 skb_copy_from_linear_data(skb, copy_skb->data, len); 853 pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 854 855 /* We'll reuse the original ring buffer. */ 856 skb = copy_skb; 857 } 858 859 csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); 860 skb->csum = csum_unfold(csum); 861 skb->ip_summed = CHECKSUM_COMPLETE; 862 skb->protocol = eth_type_trans(skb, gp->dev); 863 864 napi_gro_receive(&gp->napi, skb); 865 866 dev->stats.rx_packets++; 867 dev->stats.rx_bytes += len; 868 869 next: 870 entry = NEXT_RX(entry); 871 } 872 873 gem_post_rxds(gp, entry); 874 875 gp->rx_new = entry; 876 877 if (drops) 878 netdev_info(gp->dev, "Memory squeeze, deferring packet\n"); 879 880 return work_done; 881 } 882 883 static int gem_poll(struct napi_struct *napi, int budget) 884 { 885 struct gem *gp = container_of(napi, struct gem, napi); 886 struct net_device *dev = gp->dev; 887 int work_done; 888 889 work_done = 0; 890 do { 891 /* Handle anomalies */ 892 if (unlikely(gp->status & GREG_STAT_ABNORMAL)) { 893 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 894 int reset; 895 896 /* We run the abnormal interrupt handling code with 897 * the Tx lock. It only resets the Rx portion of the 898 * chip, but we need to guard it against DMA being 899 * restarted by the link poll timer 900 */ 901 __netif_tx_lock(txq, smp_processor_id()); 902 reset = gem_abnormal_irq(dev, gp, gp->status); 903 __netif_tx_unlock(txq); 904 if (reset) { 905 gem_schedule_reset(gp); 906 napi_complete(napi); 907 return work_done; 908 } 909 } 910 911 /* Run TX completion thread */ 912 gem_tx(dev, gp, gp->status); 913 914 /* Run RX thread. We don't use any locking here, 915 * code willing to do bad things - like cleaning the 916 * rx ring - must call napi_disable(), which 917 * schedule_timeout()'s if polling is already disabled. 918 */ 919 work_done += gem_rx(gp, budget - work_done); 920 921 if (work_done >= budget) 922 return work_done; 923 924 gp->status = readl(gp->regs + GREG_STAT); 925 } while (gp->status & GREG_STAT_NAPI); 926 927 napi_complete(napi); 928 gem_enable_ints(gp); 929 930 return work_done; 931 } 932 933 static irqreturn_t gem_interrupt(int irq, void *dev_id) 934 { 935 struct net_device *dev = dev_id; 936 struct gem *gp = netdev_priv(dev); 937 938 if (napi_schedule_prep(&gp->napi)) { 939 u32 gem_status = readl(gp->regs + GREG_STAT); 940 941 if (unlikely(gem_status == 0)) { 942 napi_enable(&gp->napi); 943 return IRQ_NONE; 944 } 945 if (netif_msg_intr(gp)) 946 printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n", 947 gp->dev->name, gem_status); 948 949 gp->status = gem_status; 950 gem_disable_ints(gp); 951 __napi_schedule(&gp->napi); 952 } 953 954 /* If polling was disabled at the time we received that 955 * interrupt, we may return IRQ_HANDLED here while we 956 * should return IRQ_NONE. No big deal... 957 */ 958 return IRQ_HANDLED; 959 } 960 961 #ifdef CONFIG_NET_POLL_CONTROLLER 962 static void gem_poll_controller(struct net_device *dev) 963 { 964 struct gem *gp = netdev_priv(dev); 965 966 disable_irq(gp->pdev->irq); 967 gem_interrupt(gp->pdev->irq, dev); 968 enable_irq(gp->pdev->irq); 969 } 970 #endif 971 972 static void gem_tx_timeout(struct net_device *dev) 973 { 974 struct gem *gp = netdev_priv(dev); 975 976 netdev_err(dev, "transmit timed out, resetting\n"); 977 978 netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n", 979 readl(gp->regs + TXDMA_CFG), 980 readl(gp->regs + MAC_TXSTAT), 981 readl(gp->regs + MAC_TXCFG)); 982 netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n", 983 readl(gp->regs + RXDMA_CFG), 984 readl(gp->regs + MAC_RXSTAT), 985 readl(gp->regs + MAC_RXCFG)); 986 987 gem_schedule_reset(gp); 988 } 989 990 static __inline__ int gem_intme(int entry) 991 { 992 /* Algorithm: IRQ every 1/2 of descriptors. */ 993 if (!(entry & ((TX_RING_SIZE>>1)-1))) 994 return 1; 995 996 return 0; 997 } 998 999 static netdev_tx_t gem_start_xmit(struct sk_buff *skb, 1000 struct net_device *dev) 1001 { 1002 struct gem *gp = netdev_priv(dev); 1003 int entry; 1004 u64 ctrl; 1005 1006 ctrl = 0; 1007 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1008 const u64 csum_start_off = skb_checksum_start_offset(skb); 1009 const u64 csum_stuff_off = csum_start_off + skb->csum_offset; 1010 1011 ctrl = (TXDCTRL_CENAB | 1012 (csum_start_off << 15) | 1013 (csum_stuff_off << 21)); 1014 } 1015 1016 if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) { 1017 /* This is a hard error, log it. */ 1018 if (!netif_queue_stopped(dev)) { 1019 netif_stop_queue(dev); 1020 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); 1021 } 1022 return NETDEV_TX_BUSY; 1023 } 1024 1025 entry = gp->tx_new; 1026 gp->tx_skbs[entry] = skb; 1027 1028 if (skb_shinfo(skb)->nr_frags == 0) { 1029 struct gem_txd *txd = &gp->init_block->txd[entry]; 1030 dma_addr_t mapping; 1031 u32 len; 1032 1033 len = skb->len; 1034 mapping = pci_map_page(gp->pdev, 1035 virt_to_page(skb->data), 1036 offset_in_page(skb->data), 1037 len, PCI_DMA_TODEVICE); 1038 ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len; 1039 if (gem_intme(entry)) 1040 ctrl |= TXDCTRL_INTME; 1041 txd->buffer = cpu_to_le64(mapping); 1042 wmb(); 1043 txd->control_word = cpu_to_le64(ctrl); 1044 entry = NEXT_TX(entry); 1045 } else { 1046 struct gem_txd *txd; 1047 u32 first_len; 1048 u64 intme; 1049 dma_addr_t first_mapping; 1050 int frag, first_entry = entry; 1051 1052 intme = 0; 1053 if (gem_intme(entry)) 1054 intme |= TXDCTRL_INTME; 1055 1056 /* We must give this initial chunk to the device last. 1057 * Otherwise we could race with the device. 1058 */ 1059 first_len = skb_headlen(skb); 1060 first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data), 1061 offset_in_page(skb->data), 1062 first_len, PCI_DMA_TODEVICE); 1063 entry = NEXT_TX(entry); 1064 1065 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 1066 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 1067 u32 len; 1068 dma_addr_t mapping; 1069 u64 this_ctrl; 1070 1071 len = skb_frag_size(this_frag); 1072 mapping = skb_frag_dma_map(&gp->pdev->dev, this_frag, 1073 0, len, DMA_TO_DEVICE); 1074 this_ctrl = ctrl; 1075 if (frag == skb_shinfo(skb)->nr_frags - 1) 1076 this_ctrl |= TXDCTRL_EOF; 1077 1078 txd = &gp->init_block->txd[entry]; 1079 txd->buffer = cpu_to_le64(mapping); 1080 wmb(); 1081 txd->control_word = cpu_to_le64(this_ctrl | len); 1082 1083 if (gem_intme(entry)) 1084 intme |= TXDCTRL_INTME; 1085 1086 entry = NEXT_TX(entry); 1087 } 1088 txd = &gp->init_block->txd[first_entry]; 1089 txd->buffer = cpu_to_le64(first_mapping); 1090 wmb(); 1091 txd->control_word = 1092 cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); 1093 } 1094 1095 gp->tx_new = entry; 1096 if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) { 1097 netif_stop_queue(dev); 1098 1099 /* netif_stop_queue() must be done before checking 1100 * checking tx index in TX_BUFFS_AVAIL() below, because 1101 * in gem_tx(), we update tx_old before checking for 1102 * netif_queue_stopped(). 1103 */ 1104 smp_mb(); 1105 if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) 1106 netif_wake_queue(dev); 1107 } 1108 if (netif_msg_tx_queued(gp)) 1109 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", 1110 dev->name, entry, skb->len); 1111 mb(); 1112 writel(gp->tx_new, gp->regs + TXDMA_KICK); 1113 1114 return NETDEV_TX_OK; 1115 } 1116 1117 static void gem_pcs_reset(struct gem *gp) 1118 { 1119 int limit; 1120 u32 val; 1121 1122 /* Reset PCS unit. */ 1123 val = readl(gp->regs + PCS_MIICTRL); 1124 val |= PCS_MIICTRL_RST; 1125 writel(val, gp->regs + PCS_MIICTRL); 1126 1127 limit = 32; 1128 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) { 1129 udelay(100); 1130 if (limit-- <= 0) 1131 break; 1132 } 1133 if (limit < 0) 1134 netdev_warn(gp->dev, "PCS reset bit would not clear\n"); 1135 } 1136 1137 static void gem_pcs_reinit_adv(struct gem *gp) 1138 { 1139 u32 val; 1140 1141 /* Make sure PCS is disabled while changing advertisement 1142 * configuration. 1143 */ 1144 val = readl(gp->regs + PCS_CFG); 1145 val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO); 1146 writel(val, gp->regs + PCS_CFG); 1147 1148 /* Advertise all capabilities except asymmetric 1149 * pause. 1150 */ 1151 val = readl(gp->regs + PCS_MIIADV); 1152 val |= (PCS_MIIADV_FD | PCS_MIIADV_HD | 1153 PCS_MIIADV_SP | PCS_MIIADV_AP); 1154 writel(val, gp->regs + PCS_MIIADV); 1155 1156 /* Enable and restart auto-negotiation, disable wrapback/loopback, 1157 * and re-enable PCS. 1158 */ 1159 val = readl(gp->regs + PCS_MIICTRL); 1160 val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE); 1161 val &= ~PCS_MIICTRL_WB; 1162 writel(val, gp->regs + PCS_MIICTRL); 1163 1164 val = readl(gp->regs + PCS_CFG); 1165 val |= PCS_CFG_ENABLE; 1166 writel(val, gp->regs + PCS_CFG); 1167 1168 /* Make sure serialink loopback is off. The meaning 1169 * of this bit is logically inverted based upon whether 1170 * you are in Serialink or SERDES mode. 1171 */ 1172 val = readl(gp->regs + PCS_SCTRL); 1173 if (gp->phy_type == phy_serialink) 1174 val &= ~PCS_SCTRL_LOOP; 1175 else 1176 val |= PCS_SCTRL_LOOP; 1177 writel(val, gp->regs + PCS_SCTRL); 1178 } 1179 1180 #define STOP_TRIES 32 1181 1182 static void gem_reset(struct gem *gp) 1183 { 1184 int limit; 1185 u32 val; 1186 1187 /* Make sure we won't get any more interrupts */ 1188 writel(0xffffffff, gp->regs + GREG_IMASK); 1189 1190 /* Reset the chip */ 1191 writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST, 1192 gp->regs + GREG_SWRST); 1193 1194 limit = STOP_TRIES; 1195 1196 do { 1197 udelay(20); 1198 val = readl(gp->regs + GREG_SWRST); 1199 if (limit-- <= 0) 1200 break; 1201 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); 1202 1203 if (limit < 0) 1204 netdev_err(gp->dev, "SW reset is ghetto\n"); 1205 1206 if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) 1207 gem_pcs_reinit_adv(gp); 1208 } 1209 1210 static void gem_start_dma(struct gem *gp) 1211 { 1212 u32 val; 1213 1214 /* We are ready to rock, turn everything on. */ 1215 val = readl(gp->regs + TXDMA_CFG); 1216 writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); 1217 val = readl(gp->regs + RXDMA_CFG); 1218 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 1219 val = readl(gp->regs + MAC_TXCFG); 1220 writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); 1221 val = readl(gp->regs + MAC_RXCFG); 1222 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 1223 1224 (void) readl(gp->regs + MAC_RXCFG); 1225 udelay(100); 1226 1227 gem_enable_ints(gp); 1228 1229 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 1230 } 1231 1232 /* DMA won't be actually stopped before about 4ms tho ... 1233 */ 1234 static void gem_stop_dma(struct gem *gp) 1235 { 1236 u32 val; 1237 1238 /* We are done rocking, turn everything off. */ 1239 val = readl(gp->regs + TXDMA_CFG); 1240 writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); 1241 val = readl(gp->regs + RXDMA_CFG); 1242 writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 1243 val = readl(gp->regs + MAC_TXCFG); 1244 writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); 1245 val = readl(gp->regs + MAC_RXCFG); 1246 writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 1247 1248 (void) readl(gp->regs + MAC_RXCFG); 1249 1250 /* Need to wait a bit ... done by the caller */ 1251 } 1252 1253 1254 // XXX dbl check what that function should do when called on PCS PHY 1255 static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) 1256 { 1257 u32 advertise, features; 1258 int autoneg; 1259 int speed; 1260 int duplex; 1261 1262 if (gp->phy_type != phy_mii_mdio0 && 1263 gp->phy_type != phy_mii_mdio1) 1264 goto non_mii; 1265 1266 /* Setup advertise */ 1267 if (found_mii_phy(gp)) 1268 features = gp->phy_mii.def->features; 1269 else 1270 features = 0; 1271 1272 advertise = features & ADVERTISE_MASK; 1273 if (gp->phy_mii.advertising != 0) 1274 advertise &= gp->phy_mii.advertising; 1275 1276 autoneg = gp->want_autoneg; 1277 speed = gp->phy_mii.speed; 1278 duplex = gp->phy_mii.duplex; 1279 1280 /* Setup link parameters */ 1281 if (!ep) 1282 goto start_aneg; 1283 if (ep->autoneg == AUTONEG_ENABLE) { 1284 advertise = ep->advertising; 1285 autoneg = 1; 1286 } else { 1287 autoneg = 0; 1288 speed = ethtool_cmd_speed(ep); 1289 duplex = ep->duplex; 1290 } 1291 1292 start_aneg: 1293 /* Sanitize settings based on PHY capabilities */ 1294 if ((features & SUPPORTED_Autoneg) == 0) 1295 autoneg = 0; 1296 if (speed == SPEED_1000 && 1297 !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full))) 1298 speed = SPEED_100; 1299 if (speed == SPEED_100 && 1300 !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full))) 1301 speed = SPEED_10; 1302 if (duplex == DUPLEX_FULL && 1303 !(features & (SUPPORTED_1000baseT_Full | 1304 SUPPORTED_100baseT_Full | 1305 SUPPORTED_10baseT_Full))) 1306 duplex = DUPLEX_HALF; 1307 if (speed == 0) 1308 speed = SPEED_10; 1309 1310 /* If we are asleep, we don't try to actually setup the PHY, we 1311 * just store the settings 1312 */ 1313 if (!netif_device_present(gp->dev)) { 1314 gp->phy_mii.autoneg = gp->want_autoneg = autoneg; 1315 gp->phy_mii.speed = speed; 1316 gp->phy_mii.duplex = duplex; 1317 return; 1318 } 1319 1320 /* Configure PHY & start aneg */ 1321 gp->want_autoneg = autoneg; 1322 if (autoneg) { 1323 if (found_mii_phy(gp)) 1324 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise); 1325 gp->lstate = link_aneg; 1326 } else { 1327 if (found_mii_phy(gp)) 1328 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex); 1329 gp->lstate = link_force_ok; 1330 } 1331 1332 non_mii: 1333 gp->timer_ticks = 0; 1334 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); 1335 } 1336 1337 /* A link-up condition has occurred, initialize and enable the 1338 * rest of the chip. 1339 */ 1340 static int gem_set_link_modes(struct gem *gp) 1341 { 1342 struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0); 1343 int full_duplex, speed, pause; 1344 u32 val; 1345 1346 full_duplex = 0; 1347 speed = SPEED_10; 1348 pause = 0; 1349 1350 if (found_mii_phy(gp)) { 1351 if (gp->phy_mii.def->ops->read_link(&gp->phy_mii)) 1352 return 1; 1353 full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL); 1354 speed = gp->phy_mii.speed; 1355 pause = gp->phy_mii.pause; 1356 } else if (gp->phy_type == phy_serialink || 1357 gp->phy_type == phy_serdes) { 1358 u32 pcs_lpa = readl(gp->regs + PCS_MIILP); 1359 1360 if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes) 1361 full_duplex = 1; 1362 speed = SPEED_1000; 1363 } 1364 1365 netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n", 1366 speed, (full_duplex ? "full" : "half")); 1367 1368 1369 /* We take the tx queue lock to avoid collisions between 1370 * this code, the tx path and the NAPI-driven error path 1371 */ 1372 __netif_tx_lock(txq, smp_processor_id()); 1373 1374 val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); 1375 if (full_duplex) { 1376 val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL); 1377 } else { 1378 /* MAC_TXCFG_NBO must be zero. */ 1379 } 1380 writel(val, gp->regs + MAC_TXCFG); 1381 1382 val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED); 1383 if (!full_duplex && 1384 (gp->phy_type == phy_mii_mdio0 || 1385 gp->phy_type == phy_mii_mdio1)) { 1386 val |= MAC_XIFCFG_DISE; 1387 } else if (full_duplex) { 1388 val |= MAC_XIFCFG_FLED; 1389 } 1390 1391 if (speed == SPEED_1000) 1392 val |= (MAC_XIFCFG_GMII); 1393 1394 writel(val, gp->regs + MAC_XIFCFG); 1395 1396 /* If gigabit and half-duplex, enable carrier extension 1397 * mode. Else, disable it. 1398 */ 1399 if (speed == SPEED_1000 && !full_duplex) { 1400 val = readl(gp->regs + MAC_TXCFG); 1401 writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); 1402 1403 val = readl(gp->regs + MAC_RXCFG); 1404 writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); 1405 } else { 1406 val = readl(gp->regs + MAC_TXCFG); 1407 writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); 1408 1409 val = readl(gp->regs + MAC_RXCFG); 1410 writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); 1411 } 1412 1413 if (gp->phy_type == phy_serialink || 1414 gp->phy_type == phy_serdes) { 1415 u32 pcs_lpa = readl(gp->regs + PCS_MIILP); 1416 1417 if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP)) 1418 pause = 1; 1419 } 1420 1421 if (!full_duplex) 1422 writel(512, gp->regs + MAC_STIME); 1423 else 1424 writel(64, gp->regs + MAC_STIME); 1425 val = readl(gp->regs + MAC_MCCFG); 1426 if (pause) 1427 val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE); 1428 else 1429 val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE); 1430 writel(val, gp->regs + MAC_MCCFG); 1431 1432 gem_start_dma(gp); 1433 1434 __netif_tx_unlock(txq); 1435 1436 if (netif_msg_link(gp)) { 1437 if (pause) { 1438 netdev_info(gp->dev, 1439 "Pause is enabled (rxfifo: %d off: %d on: %d)\n", 1440 gp->rx_fifo_sz, 1441 gp->rx_pause_off, 1442 gp->rx_pause_on); 1443 } else { 1444 netdev_info(gp->dev, "Pause is disabled\n"); 1445 } 1446 } 1447 1448 return 0; 1449 } 1450 1451 static int gem_mdio_link_not_up(struct gem *gp) 1452 { 1453 switch (gp->lstate) { 1454 case link_force_ret: 1455 netif_info(gp, link, gp->dev, 1456 "Autoneg failed again, keeping forced mode\n"); 1457 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, 1458 gp->last_forced_speed, DUPLEX_HALF); 1459 gp->timer_ticks = 5; 1460 gp->lstate = link_force_ok; 1461 return 0; 1462 case link_aneg: 1463 /* We try forced modes after a failed aneg only on PHYs that don't 1464 * have "magic_aneg" bit set, which means they internally do the 1465 * while forced-mode thingy. On these, we just restart aneg 1466 */ 1467 if (gp->phy_mii.def->magic_aneg) 1468 return 1; 1469 netif_info(gp, link, gp->dev, "switching to forced 100bt\n"); 1470 /* Try forced modes. */ 1471 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, 1472 DUPLEX_HALF); 1473 gp->timer_ticks = 5; 1474 gp->lstate = link_force_try; 1475 return 0; 1476 case link_force_try: 1477 /* Downgrade from 100 to 10 Mbps if necessary. 1478 * If already at 10Mbps, warn user about the 1479 * situation every 10 ticks. 1480 */ 1481 if (gp->phy_mii.speed == SPEED_100) { 1482 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, 1483 DUPLEX_HALF); 1484 gp->timer_ticks = 5; 1485 netif_info(gp, link, gp->dev, 1486 "switching to forced 10bt\n"); 1487 return 0; 1488 } else 1489 return 1; 1490 default: 1491 return 0; 1492 } 1493 } 1494 1495 static void gem_link_timer(unsigned long data) 1496 { 1497 struct gem *gp = (struct gem *) data; 1498 struct net_device *dev = gp->dev; 1499 int restart_aneg = 0; 1500 1501 /* There's no point doing anything if we're going to be reset */ 1502 if (gp->reset_task_pending) 1503 return; 1504 1505 if (gp->phy_type == phy_serialink || 1506 gp->phy_type == phy_serdes) { 1507 u32 val = readl(gp->regs + PCS_MIISTAT); 1508 1509 if (!(val & PCS_MIISTAT_LS)) 1510 val = readl(gp->regs + PCS_MIISTAT); 1511 1512 if ((val & PCS_MIISTAT_LS) != 0) { 1513 if (gp->lstate == link_up) 1514 goto restart; 1515 1516 gp->lstate = link_up; 1517 netif_carrier_on(dev); 1518 (void)gem_set_link_modes(gp); 1519 } 1520 goto restart; 1521 } 1522 if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) { 1523 /* Ok, here we got a link. If we had it due to a forced 1524 * fallback, and we were configured for autoneg, we do 1525 * retry a short autoneg pass. If you know your hub is 1526 * broken, use ethtool ;) 1527 */ 1528 if (gp->lstate == link_force_try && gp->want_autoneg) { 1529 gp->lstate = link_force_ret; 1530 gp->last_forced_speed = gp->phy_mii.speed; 1531 gp->timer_ticks = 5; 1532 if (netif_msg_link(gp)) 1533 netdev_info(dev, 1534 "Got link after fallback, retrying autoneg once...\n"); 1535 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); 1536 } else if (gp->lstate != link_up) { 1537 gp->lstate = link_up; 1538 netif_carrier_on(dev); 1539 if (gem_set_link_modes(gp)) 1540 restart_aneg = 1; 1541 } 1542 } else { 1543 /* If the link was previously up, we restart the 1544 * whole process 1545 */ 1546 if (gp->lstate == link_up) { 1547 gp->lstate = link_down; 1548 netif_info(gp, link, dev, "Link down\n"); 1549 netif_carrier_off(dev); 1550 gem_schedule_reset(gp); 1551 /* The reset task will restart the timer */ 1552 return; 1553 } else if (++gp->timer_ticks > 10) { 1554 if (found_mii_phy(gp)) 1555 restart_aneg = gem_mdio_link_not_up(gp); 1556 else 1557 restart_aneg = 1; 1558 } 1559 } 1560 if (restart_aneg) { 1561 gem_begin_auto_negotiation(gp, NULL); 1562 return; 1563 } 1564 restart: 1565 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); 1566 } 1567 1568 static void gem_clean_rings(struct gem *gp) 1569 { 1570 struct gem_init_block *gb = gp->init_block; 1571 struct sk_buff *skb; 1572 int i; 1573 dma_addr_t dma_addr; 1574 1575 for (i = 0; i < RX_RING_SIZE; i++) { 1576 struct gem_rxd *rxd; 1577 1578 rxd = &gb->rxd[i]; 1579 if (gp->rx_skbs[i] != NULL) { 1580 skb = gp->rx_skbs[i]; 1581 dma_addr = le64_to_cpu(rxd->buffer); 1582 pci_unmap_page(gp->pdev, dma_addr, 1583 RX_BUF_ALLOC_SIZE(gp), 1584 PCI_DMA_FROMDEVICE); 1585 dev_kfree_skb_any(skb); 1586 gp->rx_skbs[i] = NULL; 1587 } 1588 rxd->status_word = 0; 1589 wmb(); 1590 rxd->buffer = 0; 1591 } 1592 1593 for (i = 0; i < TX_RING_SIZE; i++) { 1594 if (gp->tx_skbs[i] != NULL) { 1595 struct gem_txd *txd; 1596 int frag; 1597 1598 skb = gp->tx_skbs[i]; 1599 gp->tx_skbs[i] = NULL; 1600 1601 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 1602 int ent = i & (TX_RING_SIZE - 1); 1603 1604 txd = &gb->txd[ent]; 1605 dma_addr = le64_to_cpu(txd->buffer); 1606 pci_unmap_page(gp->pdev, dma_addr, 1607 le64_to_cpu(txd->control_word) & 1608 TXDCTRL_BUFSZ, PCI_DMA_TODEVICE); 1609 1610 if (frag != skb_shinfo(skb)->nr_frags) 1611 i++; 1612 } 1613 dev_kfree_skb_any(skb); 1614 } 1615 } 1616 } 1617 1618 static void gem_init_rings(struct gem *gp) 1619 { 1620 struct gem_init_block *gb = gp->init_block; 1621 struct net_device *dev = gp->dev; 1622 int i; 1623 dma_addr_t dma_addr; 1624 1625 gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0; 1626 1627 gem_clean_rings(gp); 1628 1629 gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN, 1630 (unsigned)VLAN_ETH_FRAME_LEN); 1631 1632 for (i = 0; i < RX_RING_SIZE; i++) { 1633 struct sk_buff *skb; 1634 struct gem_rxd *rxd = &gb->rxd[i]; 1635 1636 skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL); 1637 if (!skb) { 1638 rxd->buffer = 0; 1639 rxd->status_word = 0; 1640 continue; 1641 } 1642 1643 gp->rx_skbs[i] = skb; 1644 skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); 1645 dma_addr = pci_map_page(gp->pdev, 1646 virt_to_page(skb->data), 1647 offset_in_page(skb->data), 1648 RX_BUF_ALLOC_SIZE(gp), 1649 PCI_DMA_FROMDEVICE); 1650 rxd->buffer = cpu_to_le64(dma_addr); 1651 wmb(); 1652 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 1653 skb_reserve(skb, RX_OFFSET); 1654 } 1655 1656 for (i = 0; i < TX_RING_SIZE; i++) { 1657 struct gem_txd *txd = &gb->txd[i]; 1658 1659 txd->control_word = 0; 1660 wmb(); 1661 txd->buffer = 0; 1662 } 1663 wmb(); 1664 } 1665 1666 /* Init PHY interface and start link poll state machine */ 1667 static void gem_init_phy(struct gem *gp) 1668 { 1669 u32 mifcfg; 1670 1671 /* Revert MIF CFG setting done on stop_phy */ 1672 mifcfg = readl(gp->regs + MIF_CFG); 1673 mifcfg &= ~MIF_CFG_BBMODE; 1674 writel(mifcfg, gp->regs + MIF_CFG); 1675 1676 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { 1677 int i; 1678 1679 /* Those delay sucks, the HW seem to love them though, I'll 1680 * serisouly consider breaking some locks here to be able 1681 * to schedule instead 1682 */ 1683 for (i = 0; i < 3; i++) { 1684 #ifdef CONFIG_PPC_PMAC 1685 pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0); 1686 msleep(20); 1687 #endif 1688 /* Some PHYs used by apple have problem getting back to us, 1689 * we do an additional reset here 1690 */ 1691 phy_write(gp, MII_BMCR, BMCR_RESET); 1692 msleep(20); 1693 if (phy_read(gp, MII_BMCR) != 0xffff) 1694 break; 1695 if (i == 2) 1696 netdev_warn(gp->dev, "GMAC PHY not responding !\n"); 1697 } 1698 } 1699 1700 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && 1701 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { 1702 u32 val; 1703 1704 /* Init datapath mode register. */ 1705 if (gp->phy_type == phy_mii_mdio0 || 1706 gp->phy_type == phy_mii_mdio1) { 1707 val = PCS_DMODE_MGM; 1708 } else if (gp->phy_type == phy_serialink) { 1709 val = PCS_DMODE_SM | PCS_DMODE_GMOE; 1710 } else { 1711 val = PCS_DMODE_ESM; 1712 } 1713 1714 writel(val, gp->regs + PCS_DMODE); 1715 } 1716 1717 if (gp->phy_type == phy_mii_mdio0 || 1718 gp->phy_type == phy_mii_mdio1) { 1719 /* Reset and detect MII PHY */ 1720 sungem_phy_probe(&gp->phy_mii, gp->mii_phy_addr); 1721 1722 /* Init PHY */ 1723 if (gp->phy_mii.def && gp->phy_mii.def->ops->init) 1724 gp->phy_mii.def->ops->init(&gp->phy_mii); 1725 } else { 1726 gem_pcs_reset(gp); 1727 gem_pcs_reinit_adv(gp); 1728 } 1729 1730 /* Default aneg parameters */ 1731 gp->timer_ticks = 0; 1732 gp->lstate = link_down; 1733 netif_carrier_off(gp->dev); 1734 1735 /* Print things out */ 1736 if (gp->phy_type == phy_mii_mdio0 || 1737 gp->phy_type == phy_mii_mdio1) 1738 netdev_info(gp->dev, "Found %s PHY\n", 1739 gp->phy_mii.def ? gp->phy_mii.def->name : "no"); 1740 1741 gem_begin_auto_negotiation(gp, NULL); 1742 } 1743 1744 static void gem_init_dma(struct gem *gp) 1745 { 1746 u64 desc_dma = (u64) gp->gblock_dvma; 1747 u32 val; 1748 1749 val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE); 1750 writel(val, gp->regs + TXDMA_CFG); 1751 1752 writel(desc_dma >> 32, gp->regs + TXDMA_DBHI); 1753 writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW); 1754 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); 1755 1756 writel(0, gp->regs + TXDMA_KICK); 1757 1758 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | 1759 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); 1760 writel(val, gp->regs + RXDMA_CFG); 1761 1762 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); 1763 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); 1764 1765 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 1766 1767 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); 1768 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); 1769 writel(val, gp->regs + RXDMA_PTHRESH); 1770 1771 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) 1772 writel(((5 & RXDMA_BLANK_IPKTS) | 1773 ((8 << 12) & RXDMA_BLANK_ITIME)), 1774 gp->regs + RXDMA_BLANK); 1775 else 1776 writel(((5 & RXDMA_BLANK_IPKTS) | 1777 ((4 << 12) & RXDMA_BLANK_ITIME)), 1778 gp->regs + RXDMA_BLANK); 1779 } 1780 1781 static u32 gem_setup_multicast(struct gem *gp) 1782 { 1783 u32 rxcfg = 0; 1784 int i; 1785 1786 if ((gp->dev->flags & IFF_ALLMULTI) || 1787 (netdev_mc_count(gp->dev) > 256)) { 1788 for (i=0; i<16; i++) 1789 writel(0xffff, gp->regs + MAC_HASH0 + (i << 2)); 1790 rxcfg |= MAC_RXCFG_HFE; 1791 } else if (gp->dev->flags & IFF_PROMISC) { 1792 rxcfg |= MAC_RXCFG_PROM; 1793 } else { 1794 u16 hash_table[16]; 1795 u32 crc; 1796 struct netdev_hw_addr *ha; 1797 int i; 1798 1799 memset(hash_table, 0, sizeof(hash_table)); 1800 netdev_for_each_mc_addr(ha, gp->dev) { 1801 crc = ether_crc_le(6, ha->addr); 1802 crc >>= 24; 1803 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 1804 } 1805 for (i=0; i<16; i++) 1806 writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2)); 1807 rxcfg |= MAC_RXCFG_HFE; 1808 } 1809 1810 return rxcfg; 1811 } 1812 1813 static void gem_init_mac(struct gem *gp) 1814 { 1815 unsigned char *e = &gp->dev->dev_addr[0]; 1816 1817 writel(0x1bf0, gp->regs + MAC_SNDPAUSE); 1818 1819 writel(0x00, gp->regs + MAC_IPG0); 1820 writel(0x08, gp->regs + MAC_IPG1); 1821 writel(0x04, gp->regs + MAC_IPG2); 1822 writel(0x40, gp->regs + MAC_STIME); 1823 writel(0x40, gp->regs + MAC_MINFSZ); 1824 1825 /* Ethernet payload + header + FCS + optional VLAN tag. */ 1826 writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ); 1827 1828 writel(0x07, gp->regs + MAC_PASIZE); 1829 writel(0x04, gp->regs + MAC_JAMSIZE); 1830 writel(0x10, gp->regs + MAC_ATTLIM); 1831 writel(0x8808, gp->regs + MAC_MCTYPE); 1832 1833 writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED); 1834 1835 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); 1836 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); 1837 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); 1838 1839 writel(0, gp->regs + MAC_ADDR3); 1840 writel(0, gp->regs + MAC_ADDR4); 1841 writel(0, gp->regs + MAC_ADDR5); 1842 1843 writel(0x0001, gp->regs + MAC_ADDR6); 1844 writel(0xc200, gp->regs + MAC_ADDR7); 1845 writel(0x0180, gp->regs + MAC_ADDR8); 1846 1847 writel(0, gp->regs + MAC_AFILT0); 1848 writel(0, gp->regs + MAC_AFILT1); 1849 writel(0, gp->regs + MAC_AFILT2); 1850 writel(0, gp->regs + MAC_AF21MSK); 1851 writel(0, gp->regs + MAC_AF0MSK); 1852 1853 gp->mac_rx_cfg = gem_setup_multicast(gp); 1854 #ifdef STRIP_FCS 1855 gp->mac_rx_cfg |= MAC_RXCFG_SFCS; 1856 #endif 1857 writel(0, gp->regs + MAC_NCOLL); 1858 writel(0, gp->regs + MAC_FASUCC); 1859 writel(0, gp->regs + MAC_ECOLL); 1860 writel(0, gp->regs + MAC_LCOLL); 1861 writel(0, gp->regs + MAC_DTIMER); 1862 writel(0, gp->regs + MAC_PATMPS); 1863 writel(0, gp->regs + MAC_RFCTR); 1864 writel(0, gp->regs + MAC_LERR); 1865 writel(0, gp->regs + MAC_AERR); 1866 writel(0, gp->regs + MAC_FCSERR); 1867 writel(0, gp->regs + MAC_RXCVERR); 1868 1869 /* Clear RX/TX/MAC/XIF config, we will set these up and enable 1870 * them once a link is established. 1871 */ 1872 writel(0, gp->regs + MAC_TXCFG); 1873 writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG); 1874 writel(0, gp->regs + MAC_MCCFG); 1875 writel(0, gp->regs + MAC_XIFCFG); 1876 1877 /* Setup MAC interrupts. We want to get all of the interesting 1878 * counter expiration events, but we do not want to hear about 1879 * normal rx/tx as the DMA engine tells us that. 1880 */ 1881 writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK); 1882 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); 1883 1884 /* Don't enable even the PAUSE interrupts for now, we 1885 * make no use of those events other than to record them. 1886 */ 1887 writel(0xffffffff, gp->regs + MAC_MCMASK); 1888 1889 /* Don't enable GEM's WOL in normal operations 1890 */ 1891 if (gp->has_wol) 1892 writel(0, gp->regs + WOL_WAKECSR); 1893 } 1894 1895 static void gem_init_pause_thresholds(struct gem *gp) 1896 { 1897 u32 cfg; 1898 1899 /* Calculate pause thresholds. Setting the OFF threshold to the 1900 * full RX fifo size effectively disables PAUSE generation which 1901 * is what we do for 10/100 only GEMs which have FIFOs too small 1902 * to make real gains from PAUSE. 1903 */ 1904 if (gp->rx_fifo_sz <= (2 * 1024)) { 1905 gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz; 1906 } else { 1907 int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63; 1908 int off = (gp->rx_fifo_sz - (max_frame * 2)); 1909 int on = off - max_frame; 1910 1911 gp->rx_pause_off = off; 1912 gp->rx_pause_on = on; 1913 } 1914 1915 1916 /* Configure the chip "burst" DMA mode & enable some 1917 * HW bug fixes on Apple version 1918 */ 1919 cfg = 0; 1920 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) 1921 cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX; 1922 #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) 1923 cfg |= GREG_CFG_IBURST; 1924 #endif 1925 cfg |= ((31 << 1) & GREG_CFG_TXDMALIM); 1926 cfg |= ((31 << 6) & GREG_CFG_RXDMALIM); 1927 writel(cfg, gp->regs + GREG_CFG); 1928 1929 /* If Infinite Burst didn't stick, then use different 1930 * thresholds (and Apple bug fixes don't exist) 1931 */ 1932 if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) { 1933 cfg = ((2 << 1) & GREG_CFG_TXDMALIM); 1934 cfg |= ((8 << 6) & GREG_CFG_RXDMALIM); 1935 writel(cfg, gp->regs + GREG_CFG); 1936 } 1937 } 1938 1939 static int gem_check_invariants(struct gem *gp) 1940 { 1941 struct pci_dev *pdev = gp->pdev; 1942 u32 mif_cfg; 1943 1944 /* On Apple's sungem, we can't rely on registers as the chip 1945 * was been powered down by the firmware. The PHY is looked 1946 * up later on. 1947 */ 1948 if (pdev->vendor == PCI_VENDOR_ID_APPLE) { 1949 gp->phy_type = phy_mii_mdio0; 1950 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; 1951 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; 1952 gp->swrst_base = 0; 1953 1954 mif_cfg = readl(gp->regs + MIF_CFG); 1955 mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1); 1956 mif_cfg |= MIF_CFG_MDI0; 1957 writel(mif_cfg, gp->regs + MIF_CFG); 1958 writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE); 1959 writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG); 1960 1961 /* We hard-code the PHY address so we can properly bring it out of 1962 * reset later on, we can't really probe it at this point, though 1963 * that isn't an issue. 1964 */ 1965 if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC) 1966 gp->mii_phy_addr = 1; 1967 else 1968 gp->mii_phy_addr = 0; 1969 1970 return 0; 1971 } 1972 1973 mif_cfg = readl(gp->regs + MIF_CFG); 1974 1975 if (pdev->vendor == PCI_VENDOR_ID_SUN && 1976 pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) { 1977 /* One of the MII PHYs _must_ be present 1978 * as this chip has no gigabit PHY. 1979 */ 1980 if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { 1981 pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n", 1982 mif_cfg); 1983 return -1; 1984 } 1985 } 1986 1987 /* Determine initial PHY interface type guess. MDIO1 is the 1988 * external PHY and thus takes precedence over MDIO0. 1989 */ 1990 1991 if (mif_cfg & MIF_CFG_MDI1) { 1992 gp->phy_type = phy_mii_mdio1; 1993 mif_cfg |= MIF_CFG_PSELECT; 1994 writel(mif_cfg, gp->regs + MIF_CFG); 1995 } else if (mif_cfg & MIF_CFG_MDI0) { 1996 gp->phy_type = phy_mii_mdio0; 1997 mif_cfg &= ~MIF_CFG_PSELECT; 1998 writel(mif_cfg, gp->regs + MIF_CFG); 1999 } else { 2000 #ifdef CONFIG_SPARC 2001 const char *p; 2002 2003 p = of_get_property(gp->of_node, "shared-pins", NULL); 2004 if (p && !strcmp(p, "serdes")) 2005 gp->phy_type = phy_serdes; 2006 else 2007 #endif 2008 gp->phy_type = phy_serialink; 2009 } 2010 if (gp->phy_type == phy_mii_mdio1 || 2011 gp->phy_type == phy_mii_mdio0) { 2012 int i; 2013 2014 for (i = 0; i < 32; i++) { 2015 gp->mii_phy_addr = i; 2016 if (phy_read(gp, MII_BMCR) != 0xffff) 2017 break; 2018 } 2019 if (i == 32) { 2020 if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { 2021 pr_err("RIO MII phy will not respond\n"); 2022 return -1; 2023 } 2024 gp->phy_type = phy_serdes; 2025 } 2026 } 2027 2028 /* Fetch the FIFO configurations now too. */ 2029 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; 2030 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; 2031 2032 if (pdev->vendor == PCI_VENDOR_ID_SUN) { 2033 if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { 2034 if (gp->tx_fifo_sz != (9 * 1024) || 2035 gp->rx_fifo_sz != (20 * 1024)) { 2036 pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n", 2037 gp->tx_fifo_sz, gp->rx_fifo_sz); 2038 return -1; 2039 } 2040 gp->swrst_base = 0; 2041 } else { 2042 if (gp->tx_fifo_sz != (2 * 1024) || 2043 gp->rx_fifo_sz != (2 * 1024)) { 2044 pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n", 2045 gp->tx_fifo_sz, gp->rx_fifo_sz); 2046 return -1; 2047 } 2048 gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT; 2049 } 2050 } 2051 2052 return 0; 2053 } 2054 2055 static void gem_reinit_chip(struct gem *gp) 2056 { 2057 /* Reset the chip */ 2058 gem_reset(gp); 2059 2060 /* Make sure ints are disabled */ 2061 gem_disable_ints(gp); 2062 2063 /* Allocate & setup ring buffers */ 2064 gem_init_rings(gp); 2065 2066 /* Configure pause thresholds */ 2067 gem_init_pause_thresholds(gp); 2068 2069 /* Init DMA & MAC engines */ 2070 gem_init_dma(gp); 2071 gem_init_mac(gp); 2072 } 2073 2074 2075 static void gem_stop_phy(struct gem *gp, int wol) 2076 { 2077 u32 mifcfg; 2078 2079 /* Let the chip settle down a bit, it seems that helps 2080 * for sleep mode on some models 2081 */ 2082 msleep(10); 2083 2084 /* Make sure we aren't polling PHY status change. We 2085 * don't currently use that feature though 2086 */ 2087 mifcfg = readl(gp->regs + MIF_CFG); 2088 mifcfg &= ~MIF_CFG_POLL; 2089 writel(mifcfg, gp->regs + MIF_CFG); 2090 2091 if (wol && gp->has_wol) { 2092 unsigned char *e = &gp->dev->dev_addr[0]; 2093 u32 csr; 2094 2095 /* Setup wake-on-lan for MAGIC packet */ 2096 writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB, 2097 gp->regs + MAC_RXCFG); 2098 writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0); 2099 writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1); 2100 writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2); 2101 2102 writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT); 2103 csr = WOL_WAKECSR_ENABLE; 2104 if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0) 2105 csr |= WOL_WAKECSR_MII; 2106 writel(csr, gp->regs + WOL_WAKECSR); 2107 } else { 2108 writel(0, gp->regs + MAC_RXCFG); 2109 (void)readl(gp->regs + MAC_RXCFG); 2110 /* Machine sleep will die in strange ways if we 2111 * dont wait a bit here, looks like the chip takes 2112 * some time to really shut down 2113 */ 2114 msleep(10); 2115 } 2116 2117 writel(0, gp->regs + MAC_TXCFG); 2118 writel(0, gp->regs + MAC_XIFCFG); 2119 writel(0, gp->regs + TXDMA_CFG); 2120 writel(0, gp->regs + RXDMA_CFG); 2121 2122 if (!wol) { 2123 gem_reset(gp); 2124 writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); 2125 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); 2126 2127 if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend) 2128 gp->phy_mii.def->ops->suspend(&gp->phy_mii); 2129 2130 /* According to Apple, we must set the MDIO pins to this begnign 2131 * state or we may 1) eat more current, 2) damage some PHYs 2132 */ 2133 writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG); 2134 writel(0, gp->regs + MIF_BBCLK); 2135 writel(0, gp->regs + MIF_BBDATA); 2136 writel(0, gp->regs + MIF_BBOENAB); 2137 writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG); 2138 (void) readl(gp->regs + MAC_XIFCFG); 2139 } 2140 } 2141 2142 static int gem_do_start(struct net_device *dev) 2143 { 2144 struct gem *gp = netdev_priv(dev); 2145 int rc; 2146 2147 /* Enable the cell */ 2148 gem_get_cell(gp); 2149 2150 /* Make sure PCI access and bus master are enabled */ 2151 rc = pci_enable_device(gp->pdev); 2152 if (rc) { 2153 netdev_err(dev, "Failed to enable chip on PCI bus !\n"); 2154 2155 /* Put cell and forget it for now, it will be considered as 2156 * still asleep, a new sleep cycle may bring it back 2157 */ 2158 gem_put_cell(gp); 2159 return -ENXIO; 2160 } 2161 pci_set_master(gp->pdev); 2162 2163 /* Init & setup chip hardware */ 2164 gem_reinit_chip(gp); 2165 2166 /* An interrupt might come in handy */ 2167 rc = request_irq(gp->pdev->irq, gem_interrupt, 2168 IRQF_SHARED, dev->name, (void *)dev); 2169 if (rc) { 2170 netdev_err(dev, "failed to request irq !\n"); 2171 2172 gem_reset(gp); 2173 gem_clean_rings(gp); 2174 gem_put_cell(gp); 2175 return rc; 2176 } 2177 2178 /* Mark us as attached again if we come from resume(), this has 2179 * no effect if we weren't detatched and needs to be done now. 2180 */ 2181 netif_device_attach(dev); 2182 2183 /* Restart NAPI & queues */ 2184 gem_netif_start(gp); 2185 2186 /* Detect & init PHY, start autoneg etc... this will 2187 * eventually result in starting DMA operations when 2188 * the link is up 2189 */ 2190 gem_init_phy(gp); 2191 2192 return 0; 2193 } 2194 2195 static void gem_do_stop(struct net_device *dev, int wol) 2196 { 2197 struct gem *gp = netdev_priv(dev); 2198 2199 /* Stop NAPI and stop tx queue */ 2200 gem_netif_stop(gp); 2201 2202 /* Make sure ints are disabled. We don't care about 2203 * synchronizing as NAPI is disabled, thus a stray 2204 * interrupt will do nothing bad (our irq handler 2205 * just schedules NAPI) 2206 */ 2207 gem_disable_ints(gp); 2208 2209 /* Stop the link timer */ 2210 del_timer_sync(&gp->link_timer); 2211 2212 /* We cannot cancel the reset task while holding the 2213 * rtnl lock, we'd get an A->B / B->A deadlock stituation 2214 * if we did. This is not an issue however as the reset 2215 * task is synchronized vs. us (rtnl_lock) and will do 2216 * nothing if the device is down or suspended. We do 2217 * still clear reset_task_pending to avoid a spurrious 2218 * reset later on in case we do resume before it gets 2219 * scheduled. 2220 */ 2221 gp->reset_task_pending = 0; 2222 2223 /* If we are going to sleep with WOL */ 2224 gem_stop_dma(gp); 2225 msleep(10); 2226 if (!wol) 2227 gem_reset(gp); 2228 msleep(10); 2229 2230 /* Get rid of rings */ 2231 gem_clean_rings(gp); 2232 2233 /* No irq needed anymore */ 2234 free_irq(gp->pdev->irq, (void *) dev); 2235 2236 /* Shut the PHY down eventually and setup WOL */ 2237 gem_stop_phy(gp, wol); 2238 2239 /* Make sure bus master is disabled */ 2240 pci_disable_device(gp->pdev); 2241 2242 /* Cell not needed neither if no WOL */ 2243 if (!wol) 2244 gem_put_cell(gp); 2245 } 2246 2247 static void gem_reset_task(struct work_struct *work) 2248 { 2249 struct gem *gp = container_of(work, struct gem, reset_task); 2250 2251 /* Lock out the network stack (essentially shield ourselves 2252 * against a racing open, close, control call, or suspend 2253 */ 2254 rtnl_lock(); 2255 2256 /* Skip the reset task if suspended or closed, or if it's 2257 * been cancelled by gem_do_stop (see comment there) 2258 */ 2259 if (!netif_device_present(gp->dev) || 2260 !netif_running(gp->dev) || 2261 !gp->reset_task_pending) { 2262 rtnl_unlock(); 2263 return; 2264 } 2265 2266 /* Stop the link timer */ 2267 del_timer_sync(&gp->link_timer); 2268 2269 /* Stop NAPI and tx */ 2270 gem_netif_stop(gp); 2271 2272 /* Reset the chip & rings */ 2273 gem_reinit_chip(gp); 2274 if (gp->lstate == link_up) 2275 gem_set_link_modes(gp); 2276 2277 /* Restart NAPI and Tx */ 2278 gem_netif_start(gp); 2279 2280 /* We are back ! */ 2281 gp->reset_task_pending = 0; 2282 2283 /* If the link is not up, restart autoneg, else restart the 2284 * polling timer 2285 */ 2286 if (gp->lstate != link_up) 2287 gem_begin_auto_negotiation(gp, NULL); 2288 else 2289 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); 2290 2291 rtnl_unlock(); 2292 } 2293 2294 static int gem_open(struct net_device *dev) 2295 { 2296 /* We allow open while suspended, we just do nothing, 2297 * the chip will be initialized in resume() 2298 */ 2299 if (netif_device_present(dev)) 2300 return gem_do_start(dev); 2301 return 0; 2302 } 2303 2304 static int gem_close(struct net_device *dev) 2305 { 2306 if (netif_device_present(dev)) 2307 gem_do_stop(dev, 0); 2308 2309 return 0; 2310 } 2311 2312 #ifdef CONFIG_PM 2313 static int gem_suspend(struct pci_dev *pdev, pm_message_t state) 2314 { 2315 struct net_device *dev = pci_get_drvdata(pdev); 2316 struct gem *gp = netdev_priv(dev); 2317 2318 /* Lock the network stack first to avoid racing with open/close, 2319 * reset task and setting calls 2320 */ 2321 rtnl_lock(); 2322 2323 /* Not running, mark ourselves non-present, no need for 2324 * a lock here 2325 */ 2326 if (!netif_running(dev)) { 2327 netif_device_detach(dev); 2328 rtnl_unlock(); 2329 return 0; 2330 } 2331 netdev_info(dev, "suspending, WakeOnLan %s\n", 2332 (gp->wake_on_lan && netif_running(dev)) ? 2333 "enabled" : "disabled"); 2334 2335 /* Tell the network stack we're gone. gem_do_stop() below will 2336 * synchronize with TX, stop NAPI etc... 2337 */ 2338 netif_device_detach(dev); 2339 2340 /* Switch off chip, remember WOL setting */ 2341 gp->asleep_wol = !!gp->wake_on_lan; 2342 gem_do_stop(dev, gp->asleep_wol); 2343 2344 /* Unlock the network stack */ 2345 rtnl_unlock(); 2346 2347 return 0; 2348 } 2349 2350 static int gem_resume(struct pci_dev *pdev) 2351 { 2352 struct net_device *dev = pci_get_drvdata(pdev); 2353 struct gem *gp = netdev_priv(dev); 2354 2355 /* See locking comment in gem_suspend */ 2356 rtnl_lock(); 2357 2358 /* Not running, mark ourselves present, no need for 2359 * a lock here 2360 */ 2361 if (!netif_running(dev)) { 2362 netif_device_attach(dev); 2363 rtnl_unlock(); 2364 return 0; 2365 } 2366 2367 /* Restart chip. If that fails there isn't much we can do, we 2368 * leave things stopped. 2369 */ 2370 gem_do_start(dev); 2371 2372 /* If we had WOL enabled, the cell clock was never turned off during 2373 * sleep, so we end up beeing unbalanced. Fix that here 2374 */ 2375 if (gp->asleep_wol) 2376 gem_put_cell(gp); 2377 2378 /* Unlock the network stack */ 2379 rtnl_unlock(); 2380 2381 return 0; 2382 } 2383 #endif /* CONFIG_PM */ 2384 2385 static struct net_device_stats *gem_get_stats(struct net_device *dev) 2386 { 2387 struct gem *gp = netdev_priv(dev); 2388 2389 /* I have seen this being called while the PM was in progress, 2390 * so we shield against this. Let's also not poke at registers 2391 * while the reset task is going on. 2392 * 2393 * TODO: Move stats collection elsewhere (link timer ?) and 2394 * make this a nop to avoid all those synchro issues 2395 */ 2396 if (!netif_device_present(dev) || !netif_running(dev)) 2397 goto bail; 2398 2399 /* Better safe than sorry... */ 2400 if (WARN_ON(!gp->cell_enabled)) 2401 goto bail; 2402 2403 dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR); 2404 writel(0, gp->regs + MAC_FCSERR); 2405 2406 dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR); 2407 writel(0, gp->regs + MAC_AERR); 2408 2409 dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR); 2410 writel(0, gp->regs + MAC_LERR); 2411 2412 dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL); 2413 dev->stats.collisions += 2414 (readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL)); 2415 writel(0, gp->regs + MAC_ECOLL); 2416 writel(0, gp->regs + MAC_LCOLL); 2417 bail: 2418 return &dev->stats; 2419 } 2420 2421 static int gem_set_mac_address(struct net_device *dev, void *addr) 2422 { 2423 struct sockaddr *macaddr = (struct sockaddr *) addr; 2424 struct gem *gp = netdev_priv(dev); 2425 unsigned char *e = &dev->dev_addr[0]; 2426 2427 if (!is_valid_ether_addr(macaddr->sa_data)) 2428 return -EADDRNOTAVAIL; 2429 2430 memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); 2431 2432 /* We'll just catch it later when the device is up'd or resumed */ 2433 if (!netif_running(dev) || !netif_device_present(dev)) 2434 return 0; 2435 2436 /* Better safe than sorry... */ 2437 if (WARN_ON(!gp->cell_enabled)) 2438 return 0; 2439 2440 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); 2441 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); 2442 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); 2443 2444 return 0; 2445 } 2446 2447 static void gem_set_multicast(struct net_device *dev) 2448 { 2449 struct gem *gp = netdev_priv(dev); 2450 u32 rxcfg, rxcfg_new; 2451 int limit = 10000; 2452 2453 if (!netif_running(dev) || !netif_device_present(dev)) 2454 return; 2455 2456 /* Better safe than sorry... */ 2457 if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled)) 2458 return; 2459 2460 rxcfg = readl(gp->regs + MAC_RXCFG); 2461 rxcfg_new = gem_setup_multicast(gp); 2462 #ifdef STRIP_FCS 2463 rxcfg_new |= MAC_RXCFG_SFCS; 2464 #endif 2465 gp->mac_rx_cfg = rxcfg_new; 2466 2467 writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 2468 while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) { 2469 if (!limit--) 2470 break; 2471 udelay(10); 2472 } 2473 2474 rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE); 2475 rxcfg |= rxcfg_new; 2476 2477 writel(rxcfg, gp->regs + MAC_RXCFG); 2478 } 2479 2480 /* Jumbo-grams don't seem to work :-( */ 2481 #define GEM_MIN_MTU 68 2482 #if 1 2483 #define GEM_MAX_MTU 1500 2484 #else 2485 #define GEM_MAX_MTU 9000 2486 #endif 2487 2488 static int gem_change_mtu(struct net_device *dev, int new_mtu) 2489 { 2490 struct gem *gp = netdev_priv(dev); 2491 2492 if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU) 2493 return -EINVAL; 2494 2495 dev->mtu = new_mtu; 2496 2497 /* We'll just catch it later when the device is up'd or resumed */ 2498 if (!netif_running(dev) || !netif_device_present(dev)) 2499 return 0; 2500 2501 /* Better safe than sorry... */ 2502 if (WARN_ON(!gp->cell_enabled)) 2503 return 0; 2504 2505 gem_netif_stop(gp); 2506 gem_reinit_chip(gp); 2507 if (gp->lstate == link_up) 2508 gem_set_link_modes(gp); 2509 gem_netif_start(gp); 2510 2511 return 0; 2512 } 2513 2514 static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2515 { 2516 struct gem *gp = netdev_priv(dev); 2517 2518 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 2519 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 2520 strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info)); 2521 } 2522 2523 static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2524 { 2525 struct gem *gp = netdev_priv(dev); 2526 2527 if (gp->phy_type == phy_mii_mdio0 || 2528 gp->phy_type == phy_mii_mdio1) { 2529 if (gp->phy_mii.def) 2530 cmd->supported = gp->phy_mii.def->features; 2531 else 2532 cmd->supported = (SUPPORTED_10baseT_Half | 2533 SUPPORTED_10baseT_Full); 2534 2535 /* XXX hardcoded stuff for now */ 2536 cmd->port = PORT_MII; 2537 cmd->transceiver = XCVR_EXTERNAL; 2538 cmd->phy_address = 0; /* XXX fixed PHYAD */ 2539 2540 /* Return current PHY settings */ 2541 cmd->autoneg = gp->want_autoneg; 2542 ethtool_cmd_speed_set(cmd, gp->phy_mii.speed); 2543 cmd->duplex = gp->phy_mii.duplex; 2544 cmd->advertising = gp->phy_mii.advertising; 2545 2546 /* If we started with a forced mode, we don't have a default 2547 * advertise set, we need to return something sensible so 2548 * userland can re-enable autoneg properly. 2549 */ 2550 if (cmd->advertising == 0) 2551 cmd->advertising = cmd->supported; 2552 } else { // XXX PCS ? 2553 cmd->supported = 2554 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 2555 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 2556 SUPPORTED_Autoneg); 2557 cmd->advertising = cmd->supported; 2558 ethtool_cmd_speed_set(cmd, 0); 2559 cmd->duplex = cmd->port = cmd->phy_address = 2560 cmd->transceiver = cmd->autoneg = 0; 2561 2562 /* serdes means usually a Fibre connector, with most fixed */ 2563 if (gp->phy_type == phy_serdes) { 2564 cmd->port = PORT_FIBRE; 2565 cmd->supported = (SUPPORTED_1000baseT_Half | 2566 SUPPORTED_1000baseT_Full | 2567 SUPPORTED_FIBRE | SUPPORTED_Autoneg | 2568 SUPPORTED_Pause | SUPPORTED_Asym_Pause); 2569 cmd->advertising = cmd->supported; 2570 cmd->transceiver = XCVR_INTERNAL; 2571 if (gp->lstate == link_up) 2572 ethtool_cmd_speed_set(cmd, SPEED_1000); 2573 cmd->duplex = DUPLEX_FULL; 2574 cmd->autoneg = 1; 2575 } 2576 } 2577 cmd->maxtxpkt = cmd->maxrxpkt = 0; 2578 2579 return 0; 2580 } 2581 2582 static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2583 { 2584 struct gem *gp = netdev_priv(dev); 2585 u32 speed = ethtool_cmd_speed(cmd); 2586 2587 /* Verify the settings we care about. */ 2588 if (cmd->autoneg != AUTONEG_ENABLE && 2589 cmd->autoneg != AUTONEG_DISABLE) 2590 return -EINVAL; 2591 2592 if (cmd->autoneg == AUTONEG_ENABLE && 2593 cmd->advertising == 0) 2594 return -EINVAL; 2595 2596 if (cmd->autoneg == AUTONEG_DISABLE && 2597 ((speed != SPEED_1000 && 2598 speed != SPEED_100 && 2599 speed != SPEED_10) || 2600 (cmd->duplex != DUPLEX_HALF && 2601 cmd->duplex != DUPLEX_FULL))) 2602 return -EINVAL; 2603 2604 /* Apply settings and restart link process. */ 2605 if (netif_device_present(gp->dev)) { 2606 del_timer_sync(&gp->link_timer); 2607 gem_begin_auto_negotiation(gp, cmd); 2608 } 2609 2610 return 0; 2611 } 2612 2613 static int gem_nway_reset(struct net_device *dev) 2614 { 2615 struct gem *gp = netdev_priv(dev); 2616 2617 if (!gp->want_autoneg) 2618 return -EINVAL; 2619 2620 /* Restart link process */ 2621 if (netif_device_present(gp->dev)) { 2622 del_timer_sync(&gp->link_timer); 2623 gem_begin_auto_negotiation(gp, NULL); 2624 } 2625 2626 return 0; 2627 } 2628 2629 static u32 gem_get_msglevel(struct net_device *dev) 2630 { 2631 struct gem *gp = netdev_priv(dev); 2632 return gp->msg_enable; 2633 } 2634 2635 static void gem_set_msglevel(struct net_device *dev, u32 value) 2636 { 2637 struct gem *gp = netdev_priv(dev); 2638 gp->msg_enable = value; 2639 } 2640 2641 2642 /* Add more when I understand how to program the chip */ 2643 /* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */ 2644 2645 #define WOL_SUPPORTED_MASK (WAKE_MAGIC) 2646 2647 static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2648 { 2649 struct gem *gp = netdev_priv(dev); 2650 2651 /* Add more when I understand how to program the chip */ 2652 if (gp->has_wol) { 2653 wol->supported = WOL_SUPPORTED_MASK; 2654 wol->wolopts = gp->wake_on_lan; 2655 } else { 2656 wol->supported = 0; 2657 wol->wolopts = 0; 2658 } 2659 } 2660 2661 static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2662 { 2663 struct gem *gp = netdev_priv(dev); 2664 2665 if (!gp->has_wol) 2666 return -EOPNOTSUPP; 2667 gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK; 2668 return 0; 2669 } 2670 2671 static const struct ethtool_ops gem_ethtool_ops = { 2672 .get_drvinfo = gem_get_drvinfo, 2673 .get_link = ethtool_op_get_link, 2674 .get_settings = gem_get_settings, 2675 .set_settings = gem_set_settings, 2676 .nway_reset = gem_nway_reset, 2677 .get_msglevel = gem_get_msglevel, 2678 .set_msglevel = gem_set_msglevel, 2679 .get_wol = gem_get_wol, 2680 .set_wol = gem_set_wol, 2681 }; 2682 2683 static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2684 { 2685 struct gem *gp = netdev_priv(dev); 2686 struct mii_ioctl_data *data = if_mii(ifr); 2687 int rc = -EOPNOTSUPP; 2688 2689 /* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that 2690 * netif_device_present() is true and holds rtnl_lock for us 2691 * so we have nothing to worry about 2692 */ 2693 2694 switch (cmd) { 2695 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 2696 data->phy_id = gp->mii_phy_addr; 2697 /* Fallthrough... */ 2698 2699 case SIOCGMIIREG: /* Read MII PHY register. */ 2700 data->val_out = __phy_read(gp, data->phy_id & 0x1f, 2701 data->reg_num & 0x1f); 2702 rc = 0; 2703 break; 2704 2705 case SIOCSMIIREG: /* Write MII PHY register. */ 2706 __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, 2707 data->val_in); 2708 rc = 0; 2709 break; 2710 } 2711 return rc; 2712 } 2713 2714 #if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC)) 2715 /* Fetch MAC address from vital product data of PCI ROM. */ 2716 static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) 2717 { 2718 int this_offset; 2719 2720 for (this_offset = 0x20; this_offset < len; this_offset++) { 2721 void __iomem *p = rom_base + this_offset; 2722 int i; 2723 2724 if (readb(p + 0) != 0x90 || 2725 readb(p + 1) != 0x00 || 2726 readb(p + 2) != 0x09 || 2727 readb(p + 3) != 0x4e || 2728 readb(p + 4) != 0x41 || 2729 readb(p + 5) != 0x06) 2730 continue; 2731 2732 this_offset += 6; 2733 p += 6; 2734 2735 for (i = 0; i < 6; i++) 2736 dev_addr[i] = readb(p + i); 2737 return 1; 2738 } 2739 return 0; 2740 } 2741 2742 static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) 2743 { 2744 size_t size; 2745 void __iomem *p = pci_map_rom(pdev, &size); 2746 2747 if (p) { 2748 int found; 2749 2750 found = readb(p) == 0x55 && 2751 readb(p + 1) == 0xaa && 2752 find_eth_addr_in_vpd(p, (64 * 1024), dev_addr); 2753 pci_unmap_rom(pdev, p); 2754 if (found) 2755 return; 2756 } 2757 2758 /* Sun MAC prefix then 3 random bytes. */ 2759 dev_addr[0] = 0x08; 2760 dev_addr[1] = 0x00; 2761 dev_addr[2] = 0x20; 2762 get_random_bytes(dev_addr + 3, 3); 2763 } 2764 #endif /* not Sparc and not PPC */ 2765 2766 static int __devinit gem_get_device_address(struct gem *gp) 2767 { 2768 #if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC) 2769 struct net_device *dev = gp->dev; 2770 const unsigned char *addr; 2771 2772 addr = of_get_property(gp->of_node, "local-mac-address", NULL); 2773 if (addr == NULL) { 2774 #ifdef CONFIG_SPARC 2775 addr = idprom->id_ethaddr; 2776 #else 2777 printk("\n"); 2778 pr_err("%s: can't get mac-address\n", dev->name); 2779 return -1; 2780 #endif 2781 } 2782 memcpy(dev->dev_addr, addr, 6); 2783 #else 2784 get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr); 2785 #endif 2786 return 0; 2787 } 2788 2789 static void gem_remove_one(struct pci_dev *pdev) 2790 { 2791 struct net_device *dev = pci_get_drvdata(pdev); 2792 2793 if (dev) { 2794 struct gem *gp = netdev_priv(dev); 2795 2796 unregister_netdev(dev); 2797 2798 /* Ensure reset task is truely gone */ 2799 cancel_work_sync(&gp->reset_task); 2800 2801 /* Free resources */ 2802 pci_free_consistent(pdev, 2803 sizeof(struct gem_init_block), 2804 gp->init_block, 2805 gp->gblock_dvma); 2806 iounmap(gp->regs); 2807 pci_release_regions(pdev); 2808 free_netdev(dev); 2809 2810 pci_set_drvdata(pdev, NULL); 2811 } 2812 } 2813 2814 static const struct net_device_ops gem_netdev_ops = { 2815 .ndo_open = gem_open, 2816 .ndo_stop = gem_close, 2817 .ndo_start_xmit = gem_start_xmit, 2818 .ndo_get_stats = gem_get_stats, 2819 .ndo_set_rx_mode = gem_set_multicast, 2820 .ndo_do_ioctl = gem_ioctl, 2821 .ndo_tx_timeout = gem_tx_timeout, 2822 .ndo_change_mtu = gem_change_mtu, 2823 .ndo_validate_addr = eth_validate_addr, 2824 .ndo_set_mac_address = gem_set_mac_address, 2825 #ifdef CONFIG_NET_POLL_CONTROLLER 2826 .ndo_poll_controller = gem_poll_controller, 2827 #endif 2828 }; 2829 2830 static int __devinit gem_init_one(struct pci_dev *pdev, 2831 const struct pci_device_id *ent) 2832 { 2833 unsigned long gemreg_base, gemreg_len; 2834 struct net_device *dev; 2835 struct gem *gp; 2836 int err, pci_using_dac; 2837 2838 printk_once(KERN_INFO "%s", version); 2839 2840 /* Apple gmac note: during probe, the chip is powered up by 2841 * the arch code to allow the code below to work (and to let 2842 * the chip be probed on the config space. It won't stay powered 2843 * up until the interface is brought up however, so we can't rely 2844 * on register configuration done at this point. 2845 */ 2846 err = pci_enable_device(pdev); 2847 if (err) { 2848 pr_err("Cannot enable MMIO operation, aborting\n"); 2849 return err; 2850 } 2851 pci_set_master(pdev); 2852 2853 /* Configure DMA attributes. */ 2854 2855 /* All of the GEM documentation states that 64-bit DMA addressing 2856 * is fully supported and should work just fine. However the 2857 * front end for RIO based GEMs is different and only supports 2858 * 32-bit addressing. 2859 * 2860 * For now we assume the various PPC GEMs are 32-bit only as well. 2861 */ 2862 if (pdev->vendor == PCI_VENDOR_ID_SUN && 2863 pdev->device == PCI_DEVICE_ID_SUN_GEM && 2864 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 2865 pci_using_dac = 1; 2866 } else { 2867 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2868 if (err) { 2869 pr_err("No usable DMA configuration, aborting\n"); 2870 goto err_disable_device; 2871 } 2872 pci_using_dac = 0; 2873 } 2874 2875 gemreg_base = pci_resource_start(pdev, 0); 2876 gemreg_len = pci_resource_len(pdev, 0); 2877 2878 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { 2879 pr_err("Cannot find proper PCI device base address, aborting\n"); 2880 err = -ENODEV; 2881 goto err_disable_device; 2882 } 2883 2884 dev = alloc_etherdev(sizeof(*gp)); 2885 if (!dev) { 2886 err = -ENOMEM; 2887 goto err_disable_device; 2888 } 2889 SET_NETDEV_DEV(dev, &pdev->dev); 2890 2891 gp = netdev_priv(dev); 2892 2893 err = pci_request_regions(pdev, DRV_NAME); 2894 if (err) { 2895 pr_err("Cannot obtain PCI resources, aborting\n"); 2896 goto err_out_free_netdev; 2897 } 2898 2899 gp->pdev = pdev; 2900 gp->dev = dev; 2901 2902 gp->msg_enable = DEFAULT_MSG; 2903 2904 init_timer(&gp->link_timer); 2905 gp->link_timer.function = gem_link_timer; 2906 gp->link_timer.data = (unsigned long) gp; 2907 2908 INIT_WORK(&gp->reset_task, gem_reset_task); 2909 2910 gp->lstate = link_down; 2911 gp->timer_ticks = 0; 2912 netif_carrier_off(dev); 2913 2914 gp->regs = ioremap(gemreg_base, gemreg_len); 2915 if (!gp->regs) { 2916 pr_err("Cannot map device registers, aborting\n"); 2917 err = -EIO; 2918 goto err_out_free_res; 2919 } 2920 2921 /* On Apple, we want a reference to the Open Firmware device-tree 2922 * node. We use it for clock control. 2923 */ 2924 #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) 2925 gp->of_node = pci_device_to_OF_node(pdev); 2926 #endif 2927 2928 /* Only Apple version supports WOL afaik */ 2929 if (pdev->vendor == PCI_VENDOR_ID_APPLE) 2930 gp->has_wol = 1; 2931 2932 /* Make sure cell is enabled */ 2933 gem_get_cell(gp); 2934 2935 /* Make sure everything is stopped and in init state */ 2936 gem_reset(gp); 2937 2938 /* Fill up the mii_phy structure (even if we won't use it) */ 2939 gp->phy_mii.dev = dev; 2940 gp->phy_mii.mdio_read = _phy_read; 2941 gp->phy_mii.mdio_write = _phy_write; 2942 #ifdef CONFIG_PPC_PMAC 2943 gp->phy_mii.platform_data = gp->of_node; 2944 #endif 2945 /* By default, we start with autoneg */ 2946 gp->want_autoneg = 1; 2947 2948 /* Check fifo sizes, PHY type, etc... */ 2949 if (gem_check_invariants(gp)) { 2950 err = -ENODEV; 2951 goto err_out_iounmap; 2952 } 2953 2954 /* It is guaranteed that the returned buffer will be at least 2955 * PAGE_SIZE aligned. 2956 */ 2957 gp->init_block = (struct gem_init_block *) 2958 pci_alloc_consistent(pdev, sizeof(struct gem_init_block), 2959 &gp->gblock_dvma); 2960 if (!gp->init_block) { 2961 pr_err("Cannot allocate init block, aborting\n"); 2962 err = -ENOMEM; 2963 goto err_out_iounmap; 2964 } 2965 2966 if (gem_get_device_address(gp)) 2967 goto err_out_free_consistent; 2968 2969 dev->netdev_ops = &gem_netdev_ops; 2970 netif_napi_add(dev, &gp->napi, gem_poll, 64); 2971 dev->ethtool_ops = &gem_ethtool_ops; 2972 dev->watchdog_timeo = 5 * HZ; 2973 dev->dma = 0; 2974 2975 /* Set that now, in case PM kicks in now */ 2976 pci_set_drvdata(pdev, dev); 2977 2978 /* We can do scatter/gather and HW checksum */ 2979 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; 2980 dev->features |= dev->hw_features | NETIF_F_RXCSUM; 2981 if (pci_using_dac) 2982 dev->features |= NETIF_F_HIGHDMA; 2983 2984 /* Register with kernel */ 2985 if (register_netdev(dev)) { 2986 pr_err("Cannot register net device, aborting\n"); 2987 err = -ENOMEM; 2988 goto err_out_free_consistent; 2989 } 2990 2991 /* Undo the get_cell with appropriate locking (we could use 2992 * ndo_init/uninit but that would be even more clumsy imho) 2993 */ 2994 rtnl_lock(); 2995 gem_put_cell(gp); 2996 rtnl_unlock(); 2997 2998 netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n", 2999 dev->dev_addr); 3000 return 0; 3001 3002 err_out_free_consistent: 3003 gem_remove_one(pdev); 3004 err_out_iounmap: 3005 gem_put_cell(gp); 3006 iounmap(gp->regs); 3007 3008 err_out_free_res: 3009 pci_release_regions(pdev); 3010 3011 err_out_free_netdev: 3012 free_netdev(dev); 3013 err_disable_device: 3014 pci_disable_device(pdev); 3015 return err; 3016 3017 } 3018 3019 3020 static struct pci_driver gem_driver = { 3021 .name = GEM_MODULE_NAME, 3022 .id_table = gem_pci_tbl, 3023 .probe = gem_init_one, 3024 .remove = gem_remove_one, 3025 #ifdef CONFIG_PM 3026 .suspend = gem_suspend, 3027 .resume = gem_resume, 3028 #endif /* CONFIG_PM */ 3029 }; 3030 3031 static int __init gem_init(void) 3032 { 3033 return pci_register_driver(&gem_driver); 3034 } 3035 3036 static void __exit gem_cleanup(void) 3037 { 3038 pci_unregister_driver(&gem_driver); 3039 } 3040 3041 module_init(gem_init); 3042 module_exit(gem_cleanup); 3043