1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver 4 * 5 * Copyright 2008 JMicron Technology Corporation 6 * https://www.jmicron.com/ 7 * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org> 8 * 9 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org> 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/kernel.h> 16 #include <linux/pci.h> 17 #include <linux/netdevice.h> 18 #include <linux/etherdevice.h> 19 #include <linux/ethtool.h> 20 #include <linux/mii.h> 21 #include <linux/crc32.h> 22 #include <linux/delay.h> 23 #include <linux/spinlock.h> 24 #include <linux/in.h> 25 #include <linux/ip.h> 26 #include <linux/ipv6.h> 27 #include <linux/tcp.h> 28 #include <linux/udp.h> 29 #include <linux/if_vlan.h> 30 #include <linux/slab.h> 31 #include <net/ip6_checksum.h> 32 #include "jme.h" 33 34 static int force_pseudohp = -1; 35 static int no_pseudohp = -1; 36 static int no_extplug = -1; 37 module_param(force_pseudohp, int, 0); 38 MODULE_PARM_DESC(force_pseudohp, 39 "Enable pseudo hot-plug feature manually by driver instead of BIOS."); 40 module_param(no_pseudohp, int, 0); 41 MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature."); 42 module_param(no_extplug, int, 0); 43 MODULE_PARM_DESC(no_extplug, 44 "Do not use external plug signal for pseudo hot-plug."); 45 46 static int 47 jme_mdio_read(struct net_device *netdev, int phy, int reg) 48 { 49 struct jme_adapter *jme = netdev_priv(netdev); 50 int i, val, again = (reg == MII_BMSR) ? 1 : 0; 51 52 read_again: 53 jwrite32(jme, JME_SMI, SMI_OP_REQ | 54 smi_phy_addr(phy) | 55 smi_reg_addr(reg)); 56 57 wmb(); 58 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { 59 udelay(20); 60 val = jread32(jme, JME_SMI); 61 if ((val & SMI_OP_REQ) == 0) 62 break; 63 } 64 65 if (i == 0) { 66 pr_err("phy(%d) read timeout : %d\n", phy, reg); 67 return 0; 68 } 69 70 if (again--) 71 goto read_again; 72 73 return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT; 74 } 75 76 static void 77 jme_mdio_write(struct net_device *netdev, 78 int phy, int reg, int val) 79 { 80 struct jme_adapter *jme = netdev_priv(netdev); 81 int i; 82 83 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ | 84 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 85 smi_phy_addr(phy) | smi_reg_addr(reg)); 86 87 wmb(); 88 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { 89 udelay(20); 90 if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0) 91 break; 92 } 93 94 if (i == 0) 95 pr_err("phy(%d) write timeout : %d\n", phy, reg); 96 } 97 98 static inline void 99 jme_reset_phy_processor(struct jme_adapter *jme) 100 { 101 u32 val; 102 103 jme_mdio_write(jme->dev, 104 jme->mii_if.phy_id, 105 MII_ADVERTISE, ADVERTISE_ALL | 106 ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 107 108 if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) 109 jme_mdio_write(jme->dev, 110 jme->mii_if.phy_id, 111 MII_CTRL1000, 112 ADVERTISE_1000FULL | ADVERTISE_1000HALF); 113 114 val = jme_mdio_read(jme->dev, 115 jme->mii_if.phy_id, 116 MII_BMCR); 117 118 jme_mdio_write(jme->dev, 119 jme->mii_if.phy_id, 120 MII_BMCR, val | BMCR_RESET); 121 } 122 123 static void 124 jme_setup_wakeup_frame(struct jme_adapter *jme, 125 const u32 *mask, u32 crc, int fnr) 126 { 127 int i; 128 129 /* 130 * Setup CRC pattern 131 */ 132 jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL)); 133 wmb(); 134 jwrite32(jme, JME_WFODP, crc); 135 wmb(); 136 137 /* 138 * Setup Mask 139 */ 140 for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) { 141 jwrite32(jme, JME_WFOI, 142 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) | 143 (fnr & WFOI_FRAME_SEL)); 144 wmb(); 145 jwrite32(jme, JME_WFODP, mask[i]); 146 wmb(); 147 } 148 } 149 150 static inline void 151 jme_mac_rxclk_off(struct jme_adapter *jme) 152 { 153 jme->reg_gpreg1 |= GPREG1_RXCLKOFF; 154 jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1); 155 } 156 157 static inline void 158 jme_mac_rxclk_on(struct jme_adapter *jme) 159 { 160 jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF; 161 jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1); 162 } 163 164 static inline void 165 jme_mac_txclk_off(struct jme_adapter *jme) 166 { 167 jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC); 168 jwrite32f(jme, JME_GHC, jme->reg_ghc); 169 } 170 171 static inline void 172 jme_mac_txclk_on(struct jme_adapter *jme) 173 { 174 u32 speed = jme->reg_ghc & GHC_SPEED; 175 if (speed == GHC_SPEED_1000M) 176 jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY; 177 else 178 jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; 179 jwrite32f(jme, JME_GHC, jme->reg_ghc); 180 } 181 182 static inline void 183 jme_reset_ghc_speed(struct jme_adapter *jme) 184 { 185 jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX); 186 jwrite32f(jme, JME_GHC, jme->reg_ghc); 187 } 188 189 static inline void 190 jme_reset_250A2_workaround(struct jme_adapter *jme) 191 { 192 jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | 193 GPREG1_RSSPATCH); 194 jwrite32(jme, JME_GPREG1, jme->reg_gpreg1); 195 } 196 197 static inline void 198 jme_assert_ghc_reset(struct jme_adapter *jme) 199 { 200 jme->reg_ghc |= GHC_SWRST; 201 jwrite32f(jme, JME_GHC, jme->reg_ghc); 202 } 203 204 static inline void 205 jme_clear_ghc_reset(struct jme_adapter *jme) 206 { 207 jme->reg_ghc &= ~GHC_SWRST; 208 jwrite32f(jme, JME_GHC, jme->reg_ghc); 209 } 210 211 static void 212 jme_reset_mac_processor(struct jme_adapter *jme) 213 { 214 static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; 215 u32 crc = 0xCDCDCDCD; 216 u32 gpreg0; 217 int i; 218 219 jme_reset_ghc_speed(jme); 220 jme_reset_250A2_workaround(jme); 221 222 jme_mac_rxclk_on(jme); 223 jme_mac_txclk_on(jme); 224 udelay(1); 225 jme_assert_ghc_reset(jme); 226 udelay(1); 227 jme_mac_rxclk_off(jme); 228 jme_mac_txclk_off(jme); 229 udelay(1); 230 jme_clear_ghc_reset(jme); 231 udelay(1); 232 jme_mac_rxclk_on(jme); 233 jme_mac_txclk_on(jme); 234 udelay(1); 235 jme_mac_rxclk_off(jme); 236 jme_mac_txclk_off(jme); 237 238 jwrite32(jme, JME_RXDBA_LO, 0x00000000); 239 jwrite32(jme, JME_RXDBA_HI, 0x00000000); 240 jwrite32(jme, JME_RXQDC, 0x00000000); 241 jwrite32(jme, JME_RXNDA, 0x00000000); 242 jwrite32(jme, JME_TXDBA_LO, 0x00000000); 243 jwrite32(jme, JME_TXDBA_HI, 0x00000000); 244 jwrite32(jme, JME_TXQDC, 0x00000000); 245 jwrite32(jme, JME_TXNDA, 0x00000000); 246 247 jwrite32(jme, JME_RXMCHT_LO, 0x00000000); 248 jwrite32(jme, JME_RXMCHT_HI, 0x00000000); 249 for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i) 250 jme_setup_wakeup_frame(jme, mask, crc, i); 251 if (jme->fpgaver) 252 gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL; 253 else 254 gpreg0 = GPREG0_DEFAULT; 255 jwrite32(jme, JME_GPREG0, gpreg0); 256 } 257 258 static inline void 259 jme_clear_pm_enable_wol(struct jme_adapter *jme) 260 { 261 jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs); 262 } 263 264 static inline void 265 jme_clear_pm_disable_wol(struct jme_adapter *jme) 266 { 267 jwrite32(jme, JME_PMCS, PMCS_STMASK); 268 } 269 270 static int 271 jme_reload_eeprom(struct jme_adapter *jme) 272 { 273 u32 val; 274 int i; 275 276 val = jread32(jme, JME_SMBCSR); 277 278 if (val & SMBCSR_EEPROMD) { 279 val |= SMBCSR_CNACK; 280 jwrite32(jme, JME_SMBCSR, val); 281 val |= SMBCSR_RELOAD; 282 jwrite32(jme, JME_SMBCSR, val); 283 mdelay(12); 284 285 for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) { 286 mdelay(1); 287 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0) 288 break; 289 } 290 291 if (i == 0) { 292 pr_err("eeprom reload timeout\n"); 293 return -EIO; 294 } 295 } 296 297 return 0; 298 } 299 300 static void 301 jme_load_macaddr(struct net_device *netdev) 302 { 303 struct jme_adapter *jme = netdev_priv(netdev); 304 unsigned char macaddr[ETH_ALEN]; 305 u32 val; 306 307 spin_lock_bh(&jme->macaddr_lock); 308 val = jread32(jme, JME_RXUMA_LO); 309 macaddr[0] = (val >> 0) & 0xFF; 310 macaddr[1] = (val >> 8) & 0xFF; 311 macaddr[2] = (val >> 16) & 0xFF; 312 macaddr[3] = (val >> 24) & 0xFF; 313 val = jread32(jme, JME_RXUMA_HI); 314 macaddr[4] = (val >> 0) & 0xFF; 315 macaddr[5] = (val >> 8) & 0xFF; 316 eth_hw_addr_set(netdev, macaddr); 317 spin_unlock_bh(&jme->macaddr_lock); 318 } 319 320 static inline void 321 jme_set_rx_pcc(struct jme_adapter *jme, int p) 322 { 323 switch (p) { 324 case PCC_OFF: 325 jwrite32(jme, JME_PCCRX0, 326 ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 327 ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 328 break; 329 case PCC_P1: 330 jwrite32(jme, JME_PCCRX0, 331 ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 332 ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 333 break; 334 case PCC_P2: 335 jwrite32(jme, JME_PCCRX0, 336 ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 337 ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 338 break; 339 case PCC_P3: 340 jwrite32(jme, JME_PCCRX0, 341 ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 342 ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 343 break; 344 default: 345 break; 346 } 347 wmb(); 348 349 if (!(test_bit(JME_FLAG_POLL, &jme->flags))) 350 netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p); 351 } 352 353 static void 354 jme_start_irq(struct jme_adapter *jme) 355 { 356 register struct dynpcc_info *dpi = &(jme->dpi); 357 358 jme_set_rx_pcc(jme, PCC_P1); 359 dpi->cur = PCC_P1; 360 dpi->attempt = PCC_P1; 361 dpi->cnt = 0; 362 363 jwrite32(jme, JME_PCCTX, 364 ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) | 365 ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) | 366 PCCTXQ0_EN 367 ); 368 369 /* 370 * Enable Interrupts 371 */ 372 jwrite32(jme, JME_IENS, INTR_ENABLE); 373 } 374 375 static inline void 376 jme_stop_irq(struct jme_adapter *jme) 377 { 378 /* 379 * Disable Interrupts 380 */ 381 jwrite32f(jme, JME_IENC, INTR_ENABLE); 382 } 383 384 static u32 385 jme_linkstat_from_phy(struct jme_adapter *jme) 386 { 387 u32 phylink, bmsr; 388 389 phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17); 390 bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR); 391 if (bmsr & BMSR_ANCOMP) 392 phylink |= PHY_LINK_AUTONEG_COMPLETE; 393 394 return phylink; 395 } 396 397 static inline void 398 jme_set_phyfifo_5level(struct jme_adapter *jme) 399 { 400 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004); 401 } 402 403 static inline void 404 jme_set_phyfifo_8level(struct jme_adapter *jme) 405 { 406 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000); 407 } 408 409 static int 410 jme_check_link(struct net_device *netdev, int testonly) 411 { 412 struct jme_adapter *jme = netdev_priv(netdev); 413 u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr; 414 char linkmsg[64]; 415 int rc = 0; 416 417 linkmsg[0] = '\0'; 418 419 if (jme->fpgaver) 420 phylink = jme_linkstat_from_phy(jme); 421 else 422 phylink = jread32(jme, JME_PHY_LINK); 423 424 if (phylink & PHY_LINK_UP) { 425 if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) { 426 /* 427 * If we did not enable AN 428 * Speed/Duplex Info should be obtained from SMI 429 */ 430 phylink = PHY_LINK_UP; 431 432 bmcr = jme_mdio_read(jme->dev, 433 jme->mii_if.phy_id, 434 MII_BMCR); 435 436 phylink |= ((bmcr & BMCR_SPEED1000) && 437 (bmcr & BMCR_SPEED100) == 0) ? 438 PHY_LINK_SPEED_1000M : 439 (bmcr & BMCR_SPEED100) ? 440 PHY_LINK_SPEED_100M : 441 PHY_LINK_SPEED_10M; 442 443 phylink |= (bmcr & BMCR_FULLDPLX) ? 444 PHY_LINK_DUPLEX : 0; 445 446 strcat(linkmsg, "Forced: "); 447 } else { 448 /* 449 * Keep polling for speed/duplex resolve complete 450 */ 451 while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) && 452 --cnt) { 453 454 udelay(1); 455 456 if (jme->fpgaver) 457 phylink = jme_linkstat_from_phy(jme); 458 else 459 phylink = jread32(jme, JME_PHY_LINK); 460 } 461 if (!cnt) 462 pr_err("Waiting speed resolve timeout\n"); 463 464 strcat(linkmsg, "ANed: "); 465 } 466 467 if (jme->phylink == phylink) { 468 rc = 1; 469 goto out; 470 } 471 if (testonly) 472 goto out; 473 474 jme->phylink = phylink; 475 476 /* 477 * The speed/duplex setting of jme->reg_ghc already cleared 478 * by jme_reset_mac_processor() 479 */ 480 switch (phylink & PHY_LINK_SPEED_MASK) { 481 case PHY_LINK_SPEED_10M: 482 jme->reg_ghc |= GHC_SPEED_10M; 483 strcat(linkmsg, "10 Mbps, "); 484 break; 485 case PHY_LINK_SPEED_100M: 486 jme->reg_ghc |= GHC_SPEED_100M; 487 strcat(linkmsg, "100 Mbps, "); 488 break; 489 case PHY_LINK_SPEED_1000M: 490 jme->reg_ghc |= GHC_SPEED_1000M; 491 strcat(linkmsg, "1000 Mbps, "); 492 break; 493 default: 494 break; 495 } 496 497 if (phylink & PHY_LINK_DUPLEX) { 498 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT); 499 jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX); 500 jme->reg_ghc |= GHC_DPX; 501 } else { 502 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT | 503 TXMCS_BACKOFF | 504 TXMCS_CARRIERSENSE | 505 TXMCS_COLLISION); 506 jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX); 507 } 508 509 jwrite32(jme, JME_GHC, jme->reg_ghc); 510 511 if (is_buggy250(jme->pdev->device, jme->chiprev)) { 512 jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | 513 GPREG1_RSSPATCH); 514 if (!(phylink & PHY_LINK_DUPLEX)) 515 jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH; 516 switch (phylink & PHY_LINK_SPEED_MASK) { 517 case PHY_LINK_SPEED_10M: 518 jme_set_phyfifo_8level(jme); 519 jme->reg_gpreg1 |= GPREG1_RSSPATCH; 520 break; 521 case PHY_LINK_SPEED_100M: 522 jme_set_phyfifo_5level(jme); 523 jme->reg_gpreg1 |= GPREG1_RSSPATCH; 524 break; 525 case PHY_LINK_SPEED_1000M: 526 jme_set_phyfifo_8level(jme); 527 break; 528 default: 529 break; 530 } 531 } 532 jwrite32(jme, JME_GPREG1, jme->reg_gpreg1); 533 534 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ? 535 "Full-Duplex, " : 536 "Half-Duplex, "); 537 strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ? 538 "MDI-X" : 539 "MDI"); 540 netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg); 541 netif_carrier_on(netdev); 542 } else { 543 if (testonly) 544 goto out; 545 546 netif_info(jme, link, jme->dev, "Link is down\n"); 547 jme->phylink = 0; 548 netif_carrier_off(netdev); 549 } 550 551 out: 552 return rc; 553 } 554 555 static int 556 jme_setup_tx_resources(struct jme_adapter *jme) 557 { 558 struct jme_ring *txring = &(jme->txring[0]); 559 560 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev), 561 TX_RING_ALLOC_SIZE(jme->tx_ring_size), 562 &(txring->dmaalloc), 563 GFP_ATOMIC); 564 565 if (!txring->alloc) 566 goto err_set_null; 567 568 /* 569 * 16 Bytes align 570 */ 571 txring->desc = (void *)ALIGN((unsigned long)(txring->alloc), 572 RING_DESC_ALIGN); 573 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN); 574 txring->next_to_use = 0; 575 atomic_set(&txring->next_to_clean, 0); 576 atomic_set(&txring->nr_free, jme->tx_ring_size); 577 578 txring->bufinf = kcalloc(jme->tx_ring_size, 579 sizeof(struct jme_buffer_info), 580 GFP_ATOMIC); 581 if (unlikely(!(txring->bufinf))) 582 goto err_free_txring; 583 584 return 0; 585 586 err_free_txring: 587 dma_free_coherent(&(jme->pdev->dev), 588 TX_RING_ALLOC_SIZE(jme->tx_ring_size), 589 txring->alloc, 590 txring->dmaalloc); 591 592 err_set_null: 593 txring->desc = NULL; 594 txring->dmaalloc = 0; 595 txring->dma = 0; 596 txring->bufinf = NULL; 597 598 return -ENOMEM; 599 } 600 601 static void 602 jme_free_tx_resources(struct jme_adapter *jme) 603 { 604 int i; 605 struct jme_ring *txring = &(jme->txring[0]); 606 struct jme_buffer_info *txbi; 607 608 if (txring->alloc) { 609 if (txring->bufinf) { 610 for (i = 0 ; i < jme->tx_ring_size ; ++i) { 611 txbi = txring->bufinf + i; 612 if (txbi->skb) { 613 dev_kfree_skb(txbi->skb); 614 txbi->skb = NULL; 615 } 616 txbi->mapping = 0; 617 txbi->len = 0; 618 txbi->nr_desc = 0; 619 txbi->start_xmit = 0; 620 } 621 kfree(txring->bufinf); 622 } 623 624 dma_free_coherent(&(jme->pdev->dev), 625 TX_RING_ALLOC_SIZE(jme->tx_ring_size), 626 txring->alloc, 627 txring->dmaalloc); 628 629 txring->alloc = NULL; 630 txring->desc = NULL; 631 txring->dmaalloc = 0; 632 txring->dma = 0; 633 txring->bufinf = NULL; 634 } 635 txring->next_to_use = 0; 636 atomic_set(&txring->next_to_clean, 0); 637 atomic_set(&txring->nr_free, 0); 638 } 639 640 static inline void 641 jme_enable_tx_engine(struct jme_adapter *jme) 642 { 643 /* 644 * Select Queue 0 645 */ 646 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0); 647 wmb(); 648 649 /* 650 * Setup TX Queue 0 DMA Bass Address 651 */ 652 jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); 653 jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32); 654 jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); 655 656 /* 657 * Setup TX Descptor Count 658 */ 659 jwrite32(jme, JME_TXQDC, jme->tx_ring_size); 660 661 /* 662 * Enable TX Engine 663 */ 664 wmb(); 665 jwrite32f(jme, JME_TXCS, jme->reg_txcs | 666 TXCS_SELECT_QUEUE0 | 667 TXCS_ENABLE); 668 669 /* 670 * Start clock for TX MAC Processor 671 */ 672 jme_mac_txclk_on(jme); 673 } 674 675 static inline void 676 jme_disable_tx_engine(struct jme_adapter *jme) 677 { 678 int i; 679 u32 val; 680 681 /* 682 * Disable TX Engine 683 */ 684 jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0); 685 wmb(); 686 687 val = jread32(jme, JME_TXCS); 688 for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) { 689 mdelay(1); 690 val = jread32(jme, JME_TXCS); 691 rmb(); 692 } 693 694 if (!i) 695 pr_err("Disable TX engine timeout\n"); 696 697 /* 698 * Stop clock for TX MAC Processor 699 */ 700 jme_mac_txclk_off(jme); 701 } 702 703 static void 704 jme_set_clean_rxdesc(struct jme_adapter *jme, int i) 705 { 706 struct jme_ring *rxring = &(jme->rxring[0]); 707 register struct rxdesc *rxdesc = rxring->desc; 708 struct jme_buffer_info *rxbi = rxring->bufinf; 709 rxdesc += i; 710 rxbi += i; 711 712 rxdesc->dw[0] = 0; 713 rxdesc->dw[1] = 0; 714 rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32); 715 rxdesc->desc1.bufaddrl = cpu_to_le32( 716 (__u64)rxbi->mapping & 0xFFFFFFFFUL); 717 rxdesc->desc1.datalen = cpu_to_le16(rxbi->len); 718 if (jme->dev->features & NETIF_F_HIGHDMA) 719 rxdesc->desc1.flags = RXFLAG_64BIT; 720 wmb(); 721 rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT; 722 } 723 724 static int 725 jme_make_new_rx_buf(struct jme_adapter *jme, int i) 726 { 727 struct jme_ring *rxring = &(jme->rxring[0]); 728 struct jme_buffer_info *rxbi = rxring->bufinf + i; 729 struct sk_buff *skb; 730 dma_addr_t mapping; 731 732 skb = netdev_alloc_skb(jme->dev, 733 jme->dev->mtu + RX_EXTRA_LEN); 734 if (unlikely(!skb)) 735 return -ENOMEM; 736 737 mapping = dma_map_page(&jme->pdev->dev, virt_to_page(skb->data), 738 offset_in_page(skb->data), skb_tailroom(skb), 739 DMA_FROM_DEVICE); 740 if (unlikely(dma_mapping_error(&jme->pdev->dev, mapping))) { 741 dev_kfree_skb(skb); 742 return -ENOMEM; 743 } 744 745 if (likely(rxbi->mapping)) 746 dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len, 747 DMA_FROM_DEVICE); 748 749 rxbi->skb = skb; 750 rxbi->len = skb_tailroom(skb); 751 rxbi->mapping = mapping; 752 return 0; 753 } 754 755 static void 756 jme_free_rx_buf(struct jme_adapter *jme, int i) 757 { 758 struct jme_ring *rxring = &(jme->rxring[0]); 759 struct jme_buffer_info *rxbi = rxring->bufinf; 760 rxbi += i; 761 762 if (rxbi->skb) { 763 dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len, 764 DMA_FROM_DEVICE); 765 dev_kfree_skb(rxbi->skb); 766 rxbi->skb = NULL; 767 rxbi->mapping = 0; 768 rxbi->len = 0; 769 } 770 } 771 772 static void 773 jme_free_rx_resources(struct jme_adapter *jme) 774 { 775 int i; 776 struct jme_ring *rxring = &(jme->rxring[0]); 777 778 if (rxring->alloc) { 779 if (rxring->bufinf) { 780 for (i = 0 ; i < jme->rx_ring_size ; ++i) 781 jme_free_rx_buf(jme, i); 782 kfree(rxring->bufinf); 783 } 784 785 dma_free_coherent(&(jme->pdev->dev), 786 RX_RING_ALLOC_SIZE(jme->rx_ring_size), 787 rxring->alloc, 788 rxring->dmaalloc); 789 rxring->alloc = NULL; 790 rxring->desc = NULL; 791 rxring->dmaalloc = 0; 792 rxring->dma = 0; 793 rxring->bufinf = NULL; 794 } 795 rxring->next_to_use = 0; 796 atomic_set(&rxring->next_to_clean, 0); 797 } 798 799 static int 800 jme_setup_rx_resources(struct jme_adapter *jme) 801 { 802 int i; 803 struct jme_ring *rxring = &(jme->rxring[0]); 804 805 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev), 806 RX_RING_ALLOC_SIZE(jme->rx_ring_size), 807 &(rxring->dmaalloc), 808 GFP_ATOMIC); 809 if (!rxring->alloc) 810 goto err_set_null; 811 812 /* 813 * 16 Bytes align 814 */ 815 rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc), 816 RING_DESC_ALIGN); 817 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN); 818 rxring->next_to_use = 0; 819 atomic_set(&rxring->next_to_clean, 0); 820 821 rxring->bufinf = kcalloc(jme->rx_ring_size, 822 sizeof(struct jme_buffer_info), 823 GFP_ATOMIC); 824 if (unlikely(!(rxring->bufinf))) 825 goto err_free_rxring; 826 827 /* 828 * Initiallize Receive Descriptors 829 */ 830 for (i = 0 ; i < jme->rx_ring_size ; ++i) { 831 if (unlikely(jme_make_new_rx_buf(jme, i))) { 832 jme_free_rx_resources(jme); 833 return -ENOMEM; 834 } 835 836 jme_set_clean_rxdesc(jme, i); 837 } 838 839 return 0; 840 841 err_free_rxring: 842 dma_free_coherent(&(jme->pdev->dev), 843 RX_RING_ALLOC_SIZE(jme->rx_ring_size), 844 rxring->alloc, 845 rxring->dmaalloc); 846 err_set_null: 847 rxring->desc = NULL; 848 rxring->dmaalloc = 0; 849 rxring->dma = 0; 850 rxring->bufinf = NULL; 851 852 return -ENOMEM; 853 } 854 855 static inline void 856 jme_enable_rx_engine(struct jme_adapter *jme) 857 { 858 /* 859 * Select Queue 0 860 */ 861 jwrite32(jme, JME_RXCS, jme->reg_rxcs | 862 RXCS_QUEUESEL_Q0); 863 wmb(); 864 865 /* 866 * Setup RX DMA Bass Address 867 */ 868 jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); 869 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32); 870 jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); 871 872 /* 873 * Setup RX Descriptor Count 874 */ 875 jwrite32(jme, JME_RXQDC, jme->rx_ring_size); 876 877 /* 878 * Setup Unicast Filter 879 */ 880 jme_set_unicastaddr(jme->dev); 881 jme_set_multi(jme->dev); 882 883 /* 884 * Enable RX Engine 885 */ 886 wmb(); 887 jwrite32f(jme, JME_RXCS, jme->reg_rxcs | 888 RXCS_QUEUESEL_Q0 | 889 RXCS_ENABLE | 890 RXCS_QST); 891 892 /* 893 * Start clock for RX MAC Processor 894 */ 895 jme_mac_rxclk_on(jme); 896 } 897 898 static inline void 899 jme_restart_rx_engine(struct jme_adapter *jme) 900 { 901 /* 902 * Start RX Engine 903 */ 904 jwrite32(jme, JME_RXCS, jme->reg_rxcs | 905 RXCS_QUEUESEL_Q0 | 906 RXCS_ENABLE | 907 RXCS_QST); 908 } 909 910 static inline void 911 jme_disable_rx_engine(struct jme_adapter *jme) 912 { 913 int i; 914 u32 val; 915 916 /* 917 * Disable RX Engine 918 */ 919 jwrite32(jme, JME_RXCS, jme->reg_rxcs); 920 wmb(); 921 922 val = jread32(jme, JME_RXCS); 923 for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) { 924 mdelay(1); 925 val = jread32(jme, JME_RXCS); 926 rmb(); 927 } 928 929 if (!i) 930 pr_err("Disable RX engine timeout\n"); 931 932 /* 933 * Stop clock for RX MAC Processor 934 */ 935 jme_mac_rxclk_off(jme); 936 } 937 938 static u16 939 jme_udpsum(struct sk_buff *skb) 940 { 941 u16 csum = 0xFFFFu; 942 943 if (skb->len < (ETH_HLEN + sizeof(struct iphdr))) 944 return csum; 945 if (skb->protocol != htons(ETH_P_IP)) 946 return csum; 947 skb_set_network_header(skb, ETH_HLEN); 948 if ((ip_hdr(skb)->protocol != IPPROTO_UDP) || 949 (skb->len < (ETH_HLEN + 950 (ip_hdr(skb)->ihl << 2) + 951 sizeof(struct udphdr)))) { 952 skb_reset_network_header(skb); 953 return csum; 954 } 955 skb_set_transport_header(skb, 956 ETH_HLEN + (ip_hdr(skb)->ihl << 2)); 957 csum = udp_hdr(skb)->check; 958 skb_reset_transport_header(skb); 959 skb_reset_network_header(skb); 960 961 return csum; 962 } 963 964 static int 965 jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb) 966 { 967 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) 968 return false; 969 970 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS)) 971 == RXWBFLAG_TCPON)) { 972 if (flags & RXWBFLAG_IPV4) 973 netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n"); 974 return false; 975 } 976 977 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) 978 == RXWBFLAG_UDPON) && jme_udpsum(skb)) { 979 if (flags & RXWBFLAG_IPV4) 980 netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n"); 981 return false; 982 } 983 984 if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS)) 985 == RXWBFLAG_IPV4)) { 986 netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n"); 987 return false; 988 } 989 990 return true; 991 } 992 993 static void 994 jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) 995 { 996 struct jme_ring *rxring = &(jme->rxring[0]); 997 struct rxdesc *rxdesc = rxring->desc; 998 struct jme_buffer_info *rxbi = rxring->bufinf; 999 struct sk_buff *skb; 1000 int framesize; 1001 1002 rxdesc += idx; 1003 rxbi += idx; 1004 1005 skb = rxbi->skb; 1006 dma_sync_single_for_cpu(&jme->pdev->dev, rxbi->mapping, rxbi->len, 1007 DMA_FROM_DEVICE); 1008 1009 if (unlikely(jme_make_new_rx_buf(jme, idx))) { 1010 dma_sync_single_for_device(&jme->pdev->dev, rxbi->mapping, 1011 rxbi->len, DMA_FROM_DEVICE); 1012 1013 ++(NET_STAT(jme).rx_dropped); 1014 } else { 1015 framesize = le16_to_cpu(rxdesc->descwb.framesize) 1016 - RX_PREPAD_SIZE; 1017 1018 skb_reserve(skb, RX_PREPAD_SIZE); 1019 skb_put(skb, framesize); 1020 skb->protocol = eth_type_trans(skb, jme->dev); 1021 1022 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb)) 1023 skb->ip_summed = CHECKSUM_UNNECESSARY; 1024 else 1025 skb_checksum_none_assert(skb); 1026 1027 if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { 1028 u16 vid = le16_to_cpu(rxdesc->descwb.vlan); 1029 1030 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 1031 NET_STAT(jme).rx_bytes += 4; 1032 } 1033 jme->jme_rx(skb); 1034 1035 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) == 1036 cpu_to_le16(RXWBFLAG_DEST_MUL)) 1037 ++(NET_STAT(jme).multicast); 1038 1039 NET_STAT(jme).rx_bytes += framesize; 1040 ++(NET_STAT(jme).rx_packets); 1041 } 1042 1043 jme_set_clean_rxdesc(jme, idx); 1044 1045 } 1046 1047 static int 1048 jme_process_receive(struct jme_adapter *jme, int limit) 1049 { 1050 struct jme_ring *rxring = &(jme->rxring[0]); 1051 struct rxdesc *rxdesc; 1052 int i, j, ccnt, desccnt, mask = jme->rx_ring_mask; 1053 1054 if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning))) 1055 goto out_inc; 1056 1057 if (unlikely(atomic_read(&jme->link_changing) != 1)) 1058 goto out_inc; 1059 1060 if (unlikely(!netif_carrier_ok(jme->dev))) 1061 goto out_inc; 1062 1063 i = atomic_read(&rxring->next_to_clean); 1064 while (limit > 0) { 1065 rxdesc = rxring->desc; 1066 rxdesc += i; 1067 1068 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) || 1069 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) 1070 goto out; 1071 --limit; 1072 1073 rmb(); 1074 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT; 1075 1076 if (unlikely(desccnt > 1 || 1077 rxdesc->descwb.errstat & RXWBERR_ALLERR)) { 1078 1079 if (rxdesc->descwb.errstat & RXWBERR_CRCERR) 1080 ++(NET_STAT(jme).rx_crc_errors); 1081 else if (rxdesc->descwb.errstat & RXWBERR_OVERUN) 1082 ++(NET_STAT(jme).rx_fifo_errors); 1083 else 1084 ++(NET_STAT(jme).rx_errors); 1085 1086 if (desccnt > 1) 1087 limit -= desccnt - 1; 1088 1089 for (j = i, ccnt = desccnt ; ccnt-- ; ) { 1090 jme_set_clean_rxdesc(jme, j); 1091 j = (j + 1) & (mask); 1092 } 1093 1094 } else { 1095 jme_alloc_and_feed_skb(jme, i); 1096 } 1097 1098 i = (i + desccnt) & (mask); 1099 } 1100 1101 out: 1102 atomic_set(&rxring->next_to_clean, i); 1103 1104 out_inc: 1105 atomic_inc(&jme->rx_cleaning); 1106 1107 return limit > 0 ? limit : 0; 1108 1109 } 1110 1111 static void 1112 jme_attempt_pcc(struct dynpcc_info *dpi, int atmp) 1113 { 1114 if (likely(atmp == dpi->cur)) { 1115 dpi->cnt = 0; 1116 return; 1117 } 1118 1119 if (dpi->attempt == atmp) { 1120 ++(dpi->cnt); 1121 } else { 1122 dpi->attempt = atmp; 1123 dpi->cnt = 0; 1124 } 1125 1126 } 1127 1128 static void 1129 jme_dynamic_pcc(struct jme_adapter *jme) 1130 { 1131 register struct dynpcc_info *dpi = &(jme->dpi); 1132 1133 if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD) 1134 jme_attempt_pcc(dpi, PCC_P3); 1135 else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD || 1136 dpi->intr_cnt > PCC_INTR_THRESHOLD) 1137 jme_attempt_pcc(dpi, PCC_P2); 1138 else 1139 jme_attempt_pcc(dpi, PCC_P1); 1140 1141 if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) { 1142 if (dpi->attempt < dpi->cur) 1143 tasklet_schedule(&jme->rxclean_task); 1144 jme_set_rx_pcc(jme, dpi->attempt); 1145 dpi->cur = dpi->attempt; 1146 dpi->cnt = 0; 1147 } 1148 } 1149 1150 static void 1151 jme_start_pcc_timer(struct jme_adapter *jme) 1152 { 1153 struct dynpcc_info *dpi = &(jme->dpi); 1154 dpi->last_bytes = NET_STAT(jme).rx_bytes; 1155 dpi->last_pkts = NET_STAT(jme).rx_packets; 1156 dpi->intr_cnt = 0; 1157 jwrite32(jme, JME_TMCSR, 1158 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT)); 1159 } 1160 1161 static inline void 1162 jme_stop_pcc_timer(struct jme_adapter *jme) 1163 { 1164 jwrite32(jme, JME_TMCSR, 0); 1165 } 1166 1167 static void 1168 jme_shutdown_nic(struct jme_adapter *jme) 1169 { 1170 u32 phylink; 1171 1172 phylink = jme_linkstat_from_phy(jme); 1173 1174 if (!(phylink & PHY_LINK_UP)) { 1175 /* 1176 * Disable all interrupt before issue timer 1177 */ 1178 jme_stop_irq(jme); 1179 jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE); 1180 } 1181 } 1182 1183 static void 1184 jme_pcc_tasklet(struct tasklet_struct *t) 1185 { 1186 struct jme_adapter *jme = from_tasklet(jme, t, pcc_task); 1187 struct net_device *netdev = jme->dev; 1188 1189 if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) { 1190 jme_shutdown_nic(jme); 1191 return; 1192 } 1193 1194 if (unlikely(!netif_carrier_ok(netdev) || 1195 (atomic_read(&jme->link_changing) != 1) 1196 )) { 1197 jme_stop_pcc_timer(jme); 1198 return; 1199 } 1200 1201 if (!(test_bit(JME_FLAG_POLL, &jme->flags))) 1202 jme_dynamic_pcc(jme); 1203 1204 jme_start_pcc_timer(jme); 1205 } 1206 1207 static inline void 1208 jme_polling_mode(struct jme_adapter *jme) 1209 { 1210 jme_set_rx_pcc(jme, PCC_OFF); 1211 } 1212 1213 static inline void 1214 jme_interrupt_mode(struct jme_adapter *jme) 1215 { 1216 jme_set_rx_pcc(jme, PCC_P1); 1217 } 1218 1219 static inline int 1220 jme_pseudo_hotplug_enabled(struct jme_adapter *jme) 1221 { 1222 u32 apmc; 1223 apmc = jread32(jme, JME_APMC); 1224 return apmc & JME_APMC_PSEUDO_HP_EN; 1225 } 1226 1227 static void 1228 jme_start_shutdown_timer(struct jme_adapter *jme) 1229 { 1230 u32 apmc; 1231 1232 apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN; 1233 apmc &= ~JME_APMC_EPIEN_CTRL; 1234 if (!no_extplug) { 1235 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN); 1236 wmb(); 1237 } 1238 jwrite32f(jme, JME_APMC, apmc); 1239 1240 jwrite32f(jme, JME_TIMER2, 0); 1241 set_bit(JME_FLAG_SHUTDOWN, &jme->flags); 1242 jwrite32(jme, JME_TMCSR, 1243 TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT)); 1244 } 1245 1246 static void 1247 jme_stop_shutdown_timer(struct jme_adapter *jme) 1248 { 1249 u32 apmc; 1250 1251 jwrite32f(jme, JME_TMCSR, 0); 1252 jwrite32f(jme, JME_TIMER2, 0); 1253 clear_bit(JME_FLAG_SHUTDOWN, &jme->flags); 1254 1255 apmc = jread32(jme, JME_APMC); 1256 apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL); 1257 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS); 1258 wmb(); 1259 jwrite32f(jme, JME_APMC, apmc); 1260 } 1261 1262 static void jme_link_change_work(struct work_struct *work) 1263 { 1264 struct jme_adapter *jme = container_of(work, struct jme_adapter, linkch_task); 1265 struct net_device *netdev = jme->dev; 1266 int rc; 1267 1268 while (!atomic_dec_and_test(&jme->link_changing)) { 1269 atomic_inc(&jme->link_changing); 1270 netif_info(jme, intr, jme->dev, "Get link change lock failed\n"); 1271 while (atomic_read(&jme->link_changing) != 1) 1272 netif_info(jme, intr, jme->dev, "Waiting link change lock\n"); 1273 } 1274 1275 if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu) 1276 goto out; 1277 1278 jme->old_mtu = netdev->mtu; 1279 netif_stop_queue(netdev); 1280 if (jme_pseudo_hotplug_enabled(jme)) 1281 jme_stop_shutdown_timer(jme); 1282 1283 jme_stop_pcc_timer(jme); 1284 tasklet_disable(&jme->txclean_task); 1285 tasklet_disable(&jme->rxclean_task); 1286 tasklet_disable(&jme->rxempty_task); 1287 1288 if (netif_carrier_ok(netdev)) { 1289 jme_disable_rx_engine(jme); 1290 jme_disable_tx_engine(jme); 1291 jme_reset_mac_processor(jme); 1292 jme_free_rx_resources(jme); 1293 jme_free_tx_resources(jme); 1294 1295 if (test_bit(JME_FLAG_POLL, &jme->flags)) 1296 jme_polling_mode(jme); 1297 1298 netif_carrier_off(netdev); 1299 } 1300 1301 jme_check_link(netdev, 0); 1302 if (netif_carrier_ok(netdev)) { 1303 rc = jme_setup_rx_resources(jme); 1304 if (rc) { 1305 pr_err("Allocating resources for RX error, Device STOPPED!\n"); 1306 goto out_enable_tasklet; 1307 } 1308 1309 rc = jme_setup_tx_resources(jme); 1310 if (rc) { 1311 pr_err("Allocating resources for TX error, Device STOPPED!\n"); 1312 goto err_out_free_rx_resources; 1313 } 1314 1315 jme_enable_rx_engine(jme); 1316 jme_enable_tx_engine(jme); 1317 1318 netif_start_queue(netdev); 1319 1320 if (test_bit(JME_FLAG_POLL, &jme->flags)) 1321 jme_interrupt_mode(jme); 1322 1323 jme_start_pcc_timer(jme); 1324 } else if (jme_pseudo_hotplug_enabled(jme)) { 1325 jme_start_shutdown_timer(jme); 1326 } 1327 1328 goto out_enable_tasklet; 1329 1330 err_out_free_rx_resources: 1331 jme_free_rx_resources(jme); 1332 out_enable_tasklet: 1333 tasklet_enable(&jme->txclean_task); 1334 tasklet_enable(&jme->rxclean_task); 1335 tasklet_enable(&jme->rxempty_task); 1336 out: 1337 atomic_inc(&jme->link_changing); 1338 } 1339 1340 static void 1341 jme_rx_clean_tasklet(struct tasklet_struct *t) 1342 { 1343 struct jme_adapter *jme = from_tasklet(jme, t, rxclean_task); 1344 struct dynpcc_info *dpi = &(jme->dpi); 1345 1346 jme_process_receive(jme, jme->rx_ring_size); 1347 ++(dpi->intr_cnt); 1348 1349 } 1350 1351 static int 1352 jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget)) 1353 { 1354 struct jme_adapter *jme = jme_napi_priv(holder); 1355 int rest; 1356 1357 rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget)); 1358 1359 while (atomic_read(&jme->rx_empty) > 0) { 1360 atomic_dec(&jme->rx_empty); 1361 ++(NET_STAT(jme).rx_dropped); 1362 jme_restart_rx_engine(jme); 1363 } 1364 atomic_inc(&jme->rx_empty); 1365 1366 if (rest) { 1367 JME_RX_COMPLETE(netdev, holder); 1368 jme_interrupt_mode(jme); 1369 } 1370 1371 JME_NAPI_WEIGHT_SET(budget, rest); 1372 return JME_NAPI_WEIGHT_VAL(budget) - rest; 1373 } 1374 1375 static void 1376 jme_rx_empty_tasklet(struct tasklet_struct *t) 1377 { 1378 struct jme_adapter *jme = from_tasklet(jme, t, rxempty_task); 1379 1380 if (unlikely(atomic_read(&jme->link_changing) != 1)) 1381 return; 1382 1383 if (unlikely(!netif_carrier_ok(jme->dev))) 1384 return; 1385 1386 netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n"); 1387 1388 jme_rx_clean_tasklet(&jme->rxclean_task); 1389 1390 while (atomic_read(&jme->rx_empty) > 0) { 1391 atomic_dec(&jme->rx_empty); 1392 ++(NET_STAT(jme).rx_dropped); 1393 jme_restart_rx_engine(jme); 1394 } 1395 atomic_inc(&jme->rx_empty); 1396 } 1397 1398 static void 1399 jme_wake_queue_if_stopped(struct jme_adapter *jme) 1400 { 1401 struct jme_ring *txring = &(jme->txring[0]); 1402 1403 smp_wmb(); 1404 if (unlikely(netif_queue_stopped(jme->dev) && 1405 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) { 1406 netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n"); 1407 netif_wake_queue(jme->dev); 1408 } 1409 1410 } 1411 1412 static void jme_tx_clean_tasklet(struct tasklet_struct *t) 1413 { 1414 struct jme_adapter *jme = from_tasklet(jme, t, txclean_task); 1415 struct jme_ring *txring = &(jme->txring[0]); 1416 struct txdesc *txdesc = txring->desc; 1417 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi; 1418 int i, j, cnt = 0, max, err, mask; 1419 1420 tx_dbg(jme, "Into txclean\n"); 1421 1422 if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning))) 1423 goto out; 1424 1425 if (unlikely(atomic_read(&jme->link_changing) != 1)) 1426 goto out; 1427 1428 if (unlikely(!netif_carrier_ok(jme->dev))) 1429 goto out; 1430 1431 max = jme->tx_ring_size - atomic_read(&txring->nr_free); 1432 mask = jme->tx_ring_mask; 1433 1434 for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) { 1435 1436 ctxbi = txbi + i; 1437 1438 if (likely(ctxbi->skb && 1439 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) { 1440 1441 tx_dbg(jme, "txclean: %d+%d@%lu\n", 1442 i, ctxbi->nr_desc, jiffies); 1443 1444 err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR; 1445 1446 for (j = 1 ; j < ctxbi->nr_desc ; ++j) { 1447 ttxbi = txbi + ((i + j) & (mask)); 1448 txdesc[(i + j) & (mask)].dw[0] = 0; 1449 1450 dma_unmap_page(&jme->pdev->dev, 1451 ttxbi->mapping, ttxbi->len, 1452 DMA_TO_DEVICE); 1453 1454 ttxbi->mapping = 0; 1455 ttxbi->len = 0; 1456 } 1457 1458 dev_kfree_skb(ctxbi->skb); 1459 1460 cnt += ctxbi->nr_desc; 1461 1462 if (unlikely(err)) { 1463 ++(NET_STAT(jme).tx_carrier_errors); 1464 } else { 1465 ++(NET_STAT(jme).tx_packets); 1466 NET_STAT(jme).tx_bytes += ctxbi->len; 1467 } 1468 1469 ctxbi->skb = NULL; 1470 ctxbi->len = 0; 1471 ctxbi->start_xmit = 0; 1472 1473 } else { 1474 break; 1475 } 1476 1477 i = (i + ctxbi->nr_desc) & mask; 1478 1479 ctxbi->nr_desc = 0; 1480 } 1481 1482 tx_dbg(jme, "txclean: done %d@%lu\n", i, jiffies); 1483 atomic_set(&txring->next_to_clean, i); 1484 atomic_add(cnt, &txring->nr_free); 1485 1486 jme_wake_queue_if_stopped(jme); 1487 1488 out: 1489 atomic_inc(&jme->tx_cleaning); 1490 } 1491 1492 static void 1493 jme_intr_msi(struct jme_adapter *jme, u32 intrstat) 1494 { 1495 /* 1496 * Disable interrupt 1497 */ 1498 jwrite32f(jme, JME_IENC, INTR_ENABLE); 1499 1500 if (intrstat & (INTR_LINKCH | INTR_SWINTR)) { 1501 /* 1502 * Link change event is critical 1503 * all other events are ignored 1504 */ 1505 jwrite32(jme, JME_IEVE, intrstat); 1506 schedule_work(&jme->linkch_task); 1507 goto out_reenable; 1508 } 1509 1510 if (intrstat & INTR_TMINTR) { 1511 jwrite32(jme, JME_IEVE, INTR_TMINTR); 1512 tasklet_schedule(&jme->pcc_task); 1513 } 1514 1515 if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) { 1516 jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0); 1517 tasklet_schedule(&jme->txclean_task); 1518 } 1519 1520 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { 1521 jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO | 1522 INTR_PCCRX0 | 1523 INTR_RX0EMP)) | 1524 INTR_RX0); 1525 } 1526 1527 if (test_bit(JME_FLAG_POLL, &jme->flags)) { 1528 if (intrstat & INTR_RX0EMP) 1529 atomic_inc(&jme->rx_empty); 1530 1531 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { 1532 if (likely(JME_RX_SCHEDULE_PREP(jme))) { 1533 jme_polling_mode(jme); 1534 JME_RX_SCHEDULE(jme); 1535 } 1536 } 1537 } else { 1538 if (intrstat & INTR_RX0EMP) { 1539 atomic_inc(&jme->rx_empty); 1540 tasklet_hi_schedule(&jme->rxempty_task); 1541 } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) { 1542 tasklet_hi_schedule(&jme->rxclean_task); 1543 } 1544 } 1545 1546 out_reenable: 1547 /* 1548 * Re-enable interrupt 1549 */ 1550 jwrite32f(jme, JME_IENS, INTR_ENABLE); 1551 } 1552 1553 static irqreturn_t 1554 jme_intr(int irq, void *dev_id) 1555 { 1556 struct net_device *netdev = dev_id; 1557 struct jme_adapter *jme = netdev_priv(netdev); 1558 u32 intrstat; 1559 1560 intrstat = jread32(jme, JME_IEVE); 1561 1562 /* 1563 * Check if it's really an interrupt for us 1564 */ 1565 if (unlikely((intrstat & INTR_ENABLE) == 0)) 1566 return IRQ_NONE; 1567 1568 /* 1569 * Check if the device still exist 1570 */ 1571 if (unlikely(intrstat == ~((typeof(intrstat))0))) 1572 return IRQ_NONE; 1573 1574 jme_intr_msi(jme, intrstat); 1575 1576 return IRQ_HANDLED; 1577 } 1578 1579 static irqreturn_t 1580 jme_msi(int irq, void *dev_id) 1581 { 1582 struct net_device *netdev = dev_id; 1583 struct jme_adapter *jme = netdev_priv(netdev); 1584 u32 intrstat; 1585 1586 intrstat = jread32(jme, JME_IEVE); 1587 1588 jme_intr_msi(jme, intrstat); 1589 1590 return IRQ_HANDLED; 1591 } 1592 1593 static void 1594 jme_reset_link(struct jme_adapter *jme) 1595 { 1596 jwrite32(jme, JME_TMCSR, TMCSR_SWIT); 1597 } 1598 1599 static void 1600 jme_restart_an(struct jme_adapter *jme) 1601 { 1602 u32 bmcr; 1603 1604 spin_lock_bh(&jme->phy_lock); 1605 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1606 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 1607 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); 1608 spin_unlock_bh(&jme->phy_lock); 1609 } 1610 1611 static int 1612 jme_request_irq(struct jme_adapter *jme) 1613 { 1614 int rc; 1615 struct net_device *netdev = jme->dev; 1616 irq_handler_t handler = jme_intr; 1617 int irq_flags = IRQF_SHARED; 1618 1619 if (!pci_enable_msi(jme->pdev)) { 1620 set_bit(JME_FLAG_MSI, &jme->flags); 1621 handler = jme_msi; 1622 irq_flags = 0; 1623 } 1624 1625 rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name, 1626 netdev); 1627 if (rc) { 1628 netdev_err(netdev, 1629 "Unable to request %s interrupt (return: %d)\n", 1630 test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx", 1631 rc); 1632 1633 if (test_bit(JME_FLAG_MSI, &jme->flags)) { 1634 pci_disable_msi(jme->pdev); 1635 clear_bit(JME_FLAG_MSI, &jme->flags); 1636 } 1637 } else { 1638 netdev->irq = jme->pdev->irq; 1639 } 1640 1641 return rc; 1642 } 1643 1644 static void 1645 jme_free_irq(struct jme_adapter *jme) 1646 { 1647 free_irq(jme->pdev->irq, jme->dev); 1648 if (test_bit(JME_FLAG_MSI, &jme->flags)) { 1649 pci_disable_msi(jme->pdev); 1650 clear_bit(JME_FLAG_MSI, &jme->flags); 1651 jme->dev->irq = jme->pdev->irq; 1652 } 1653 } 1654 1655 static inline void 1656 jme_new_phy_on(struct jme_adapter *jme) 1657 { 1658 u32 reg; 1659 1660 reg = jread32(jme, JME_PHY_PWR); 1661 reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW | 1662 PHY_PWR_DWN2 | PHY_PWR_CLKSEL); 1663 jwrite32(jme, JME_PHY_PWR, reg); 1664 1665 pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®); 1666 reg &= ~PE1_GPREG0_PBG; 1667 reg |= PE1_GPREG0_ENBG; 1668 pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg); 1669 } 1670 1671 static inline void 1672 jme_new_phy_off(struct jme_adapter *jme) 1673 { 1674 u32 reg; 1675 1676 reg = jread32(jme, JME_PHY_PWR); 1677 reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW | 1678 PHY_PWR_DWN2 | PHY_PWR_CLKSEL; 1679 jwrite32(jme, JME_PHY_PWR, reg); 1680 1681 pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®); 1682 reg &= ~PE1_GPREG0_PBG; 1683 reg |= PE1_GPREG0_PDD3COLD; 1684 pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg); 1685 } 1686 1687 static inline void 1688 jme_phy_on(struct jme_adapter *jme) 1689 { 1690 u32 bmcr; 1691 1692 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1693 bmcr &= ~BMCR_PDOWN; 1694 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); 1695 1696 if (new_phy_power_ctrl(jme->chip_main_rev)) 1697 jme_new_phy_on(jme); 1698 } 1699 1700 static inline void 1701 jme_phy_off(struct jme_adapter *jme) 1702 { 1703 u32 bmcr; 1704 1705 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1706 bmcr |= BMCR_PDOWN; 1707 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); 1708 1709 if (new_phy_power_ctrl(jme->chip_main_rev)) 1710 jme_new_phy_off(jme); 1711 } 1712 1713 static int 1714 jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg) 1715 { 1716 u32 phy_addr; 1717 1718 phy_addr = JM_PHY_SPEC_REG_READ | specreg; 1719 jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, 1720 phy_addr); 1721 return jme_mdio_read(jme->dev, jme->mii_if.phy_id, 1722 JM_PHY_SPEC_DATA_REG); 1723 } 1724 1725 static void 1726 jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data) 1727 { 1728 u32 phy_addr; 1729 1730 phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg; 1731 jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG, 1732 phy_data); 1733 jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, 1734 phy_addr); 1735 } 1736 1737 static int 1738 jme_phy_calibration(struct jme_adapter *jme) 1739 { 1740 u32 ctrl1000, phy_data; 1741 1742 jme_phy_off(jme); 1743 jme_phy_on(jme); 1744 /* Enabel PHY test mode 1 */ 1745 ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000); 1746 ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK; 1747 ctrl1000 |= PHY_GAD_TEST_MODE_1; 1748 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000); 1749 1750 phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG); 1751 phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0; 1752 phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH | 1753 JM_PHY_EXT_COMM_2_CALI_ENABLE; 1754 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data); 1755 msleep(20); 1756 phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG); 1757 phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE | 1758 JM_PHY_EXT_COMM_2_CALI_MODE_0 | 1759 JM_PHY_EXT_COMM_2_CALI_LATCH); 1760 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data); 1761 1762 /* Disable PHY test mode */ 1763 ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000); 1764 ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK; 1765 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000); 1766 return 0; 1767 } 1768 1769 static int 1770 jme_phy_setEA(struct jme_adapter *jme) 1771 { 1772 u32 phy_comm0 = 0, phy_comm1 = 0; 1773 u8 nic_ctrl; 1774 1775 pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl); 1776 if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE) 1777 return 0; 1778 1779 switch (jme->pdev->device) { 1780 case PCI_DEVICE_ID_JMICRON_JMC250: 1781 if (((jme->chip_main_rev == 5) && 1782 ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || 1783 (jme->chip_sub_rev == 3))) || 1784 (jme->chip_main_rev >= 6)) { 1785 phy_comm0 = 0x008A; 1786 phy_comm1 = 0x4109; 1787 } 1788 if ((jme->chip_main_rev == 3) && 1789 ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) 1790 phy_comm0 = 0xE088; 1791 break; 1792 case PCI_DEVICE_ID_JMICRON_JMC260: 1793 if (((jme->chip_main_rev == 5) && 1794 ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || 1795 (jme->chip_sub_rev == 3))) || 1796 (jme->chip_main_rev >= 6)) { 1797 phy_comm0 = 0x008A; 1798 phy_comm1 = 0x4109; 1799 } 1800 if ((jme->chip_main_rev == 3) && 1801 ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) 1802 phy_comm0 = 0xE088; 1803 if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0)) 1804 phy_comm0 = 0x608A; 1805 if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2)) 1806 phy_comm0 = 0x408A; 1807 break; 1808 default: 1809 return -ENODEV; 1810 } 1811 if (phy_comm0) 1812 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0); 1813 if (phy_comm1) 1814 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1); 1815 1816 return 0; 1817 } 1818 1819 static int 1820 jme_open(struct net_device *netdev) 1821 { 1822 struct jme_adapter *jme = netdev_priv(netdev); 1823 int rc; 1824 1825 jme_clear_pm_disable_wol(jme); 1826 JME_NAPI_ENABLE(jme); 1827 1828 tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet); 1829 tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet); 1830 tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet); 1831 1832 rc = jme_request_irq(jme); 1833 if (rc) 1834 goto err_out; 1835 1836 jme_start_irq(jme); 1837 1838 jme_phy_on(jme); 1839 if (test_bit(JME_FLAG_SSET, &jme->flags)) 1840 jme_set_link_ksettings(netdev, &jme->old_cmd); 1841 else 1842 jme_reset_phy_processor(jme); 1843 jme_phy_calibration(jme); 1844 jme_phy_setEA(jme); 1845 jme_reset_link(jme); 1846 1847 return 0; 1848 1849 err_out: 1850 netif_stop_queue(netdev); 1851 netif_carrier_off(netdev); 1852 return rc; 1853 } 1854 1855 static void 1856 jme_set_100m_half(struct jme_adapter *jme) 1857 { 1858 u32 bmcr, tmp; 1859 1860 jme_phy_on(jme); 1861 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1862 tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | 1863 BMCR_SPEED1000 | BMCR_FULLDPLX); 1864 tmp |= BMCR_SPEED100; 1865 1866 if (bmcr != tmp) 1867 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp); 1868 1869 if (jme->fpgaver) 1870 jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL); 1871 else 1872 jwrite32(jme, JME_GHC, GHC_SPEED_100M); 1873 } 1874 1875 #define JME_WAIT_LINK_TIME 2000 /* 2000ms */ 1876 static void 1877 jme_wait_link(struct jme_adapter *jme) 1878 { 1879 u32 phylink, to = JME_WAIT_LINK_TIME; 1880 1881 msleep(1000); 1882 phylink = jme_linkstat_from_phy(jme); 1883 while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) { 1884 usleep_range(10000, 11000); 1885 phylink = jme_linkstat_from_phy(jme); 1886 } 1887 } 1888 1889 static void 1890 jme_powersave_phy(struct jme_adapter *jme) 1891 { 1892 if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) { 1893 jme_set_100m_half(jme); 1894 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) 1895 jme_wait_link(jme); 1896 jme_clear_pm_enable_wol(jme); 1897 } else { 1898 jme_phy_off(jme); 1899 } 1900 } 1901 1902 static int 1903 jme_close(struct net_device *netdev) 1904 { 1905 struct jme_adapter *jme = netdev_priv(netdev); 1906 1907 netif_stop_queue(netdev); 1908 netif_carrier_off(netdev); 1909 1910 jme_stop_irq(jme); 1911 jme_free_irq(jme); 1912 1913 JME_NAPI_DISABLE(jme); 1914 1915 cancel_work_sync(&jme->linkch_task); 1916 tasklet_kill(&jme->txclean_task); 1917 tasklet_kill(&jme->rxclean_task); 1918 tasklet_kill(&jme->rxempty_task); 1919 1920 jme_disable_rx_engine(jme); 1921 jme_disable_tx_engine(jme); 1922 jme_reset_mac_processor(jme); 1923 jme_free_rx_resources(jme); 1924 jme_free_tx_resources(jme); 1925 jme->phylink = 0; 1926 jme_phy_off(jme); 1927 1928 return 0; 1929 } 1930 1931 static int 1932 jme_alloc_txdesc(struct jme_adapter *jme, 1933 struct sk_buff *skb) 1934 { 1935 struct jme_ring *txring = &(jme->txring[0]); 1936 int idx, nr_alloc, mask = jme->tx_ring_mask; 1937 1938 idx = txring->next_to_use; 1939 nr_alloc = skb_shinfo(skb)->nr_frags + 2; 1940 1941 if (unlikely(atomic_read(&txring->nr_free) < nr_alloc)) 1942 return -1; 1943 1944 atomic_sub(nr_alloc, &txring->nr_free); 1945 1946 txring->next_to_use = (txring->next_to_use + nr_alloc) & mask; 1947 1948 return idx; 1949 } 1950 1951 static int 1952 jme_fill_tx_map(struct pci_dev *pdev, 1953 struct txdesc *txdesc, 1954 struct jme_buffer_info *txbi, 1955 struct page *page, 1956 u32 page_offset, 1957 u32 len, 1958 bool hidma) 1959 { 1960 dma_addr_t dmaaddr; 1961 1962 dmaaddr = dma_map_page(&pdev->dev, page, page_offset, len, 1963 DMA_TO_DEVICE); 1964 1965 if (unlikely(dma_mapping_error(&pdev->dev, dmaaddr))) 1966 return -EINVAL; 1967 1968 dma_sync_single_for_device(&pdev->dev, dmaaddr, len, DMA_TO_DEVICE); 1969 1970 txdesc->dw[0] = 0; 1971 txdesc->dw[1] = 0; 1972 txdesc->desc2.flags = TXFLAG_OWN; 1973 txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0; 1974 txdesc->desc2.datalen = cpu_to_le16(len); 1975 txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32); 1976 txdesc->desc2.bufaddrl = cpu_to_le32( 1977 (__u64)dmaaddr & 0xFFFFFFFFUL); 1978 1979 txbi->mapping = dmaaddr; 1980 txbi->len = len; 1981 return 0; 1982 } 1983 1984 static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count) 1985 { 1986 struct jme_ring *txring = &(jme->txring[0]); 1987 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; 1988 int mask = jme->tx_ring_mask; 1989 int j; 1990 1991 for (j = 0 ; j < count ; j++) { 1992 ctxbi = txbi + ((startidx + j + 2) & (mask)); 1993 dma_unmap_page(&jme->pdev->dev, ctxbi->mapping, ctxbi->len, 1994 DMA_TO_DEVICE); 1995 1996 ctxbi->mapping = 0; 1997 ctxbi->len = 0; 1998 } 1999 } 2000 2001 static int 2002 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) 2003 { 2004 struct jme_ring *txring = &(jme->txring[0]); 2005 struct txdesc *txdesc = txring->desc, *ctxdesc; 2006 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; 2007 bool hidma = jme->dev->features & NETIF_F_HIGHDMA; 2008 int i, nr_frags = skb_shinfo(skb)->nr_frags; 2009 int mask = jme->tx_ring_mask; 2010 u32 len; 2011 int ret = 0; 2012 2013 for (i = 0 ; i < nr_frags ; ++i) { 2014 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2015 2016 ctxdesc = txdesc + ((idx + i + 2) & (mask)); 2017 ctxbi = txbi + ((idx + i + 2) & (mask)); 2018 2019 ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, 2020 skb_frag_page(frag), skb_frag_off(frag), 2021 skb_frag_size(frag), hidma); 2022 if (ret) { 2023 jme_drop_tx_map(jme, idx, i); 2024 goto out; 2025 } 2026 } 2027 2028 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 2029 ctxdesc = txdesc + ((idx + 1) & (mask)); 2030 ctxbi = txbi + ((idx + 1) & (mask)); 2031 ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), 2032 offset_in_page(skb->data), len, hidma); 2033 if (ret) 2034 jme_drop_tx_map(jme, idx, i); 2035 2036 out: 2037 return ret; 2038 2039 } 2040 2041 2042 static int 2043 jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) 2044 { 2045 *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT); 2046 if (*mss) { 2047 *flags |= TXFLAG_LSEN; 2048 2049 if (skb->protocol == htons(ETH_P_IP)) { 2050 struct iphdr *iph = ip_hdr(skb); 2051 2052 iph->check = 0; 2053 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2054 iph->daddr, 0, 2055 IPPROTO_TCP, 2056 0); 2057 } else { 2058 tcp_v6_gso_csum_prep(skb); 2059 } 2060 2061 return 0; 2062 } 2063 2064 return 1; 2065 } 2066 2067 static void 2068 jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags) 2069 { 2070 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2071 u8 ip_proto; 2072 2073 switch (skb->protocol) { 2074 case htons(ETH_P_IP): 2075 ip_proto = ip_hdr(skb)->protocol; 2076 break; 2077 case htons(ETH_P_IPV6): 2078 ip_proto = ipv6_hdr(skb)->nexthdr; 2079 break; 2080 default: 2081 ip_proto = 0; 2082 break; 2083 } 2084 2085 switch (ip_proto) { 2086 case IPPROTO_TCP: 2087 *flags |= TXFLAG_TCPCS; 2088 break; 2089 case IPPROTO_UDP: 2090 *flags |= TXFLAG_UDPCS; 2091 break; 2092 default: 2093 netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n"); 2094 break; 2095 } 2096 } 2097 } 2098 2099 static inline void 2100 jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags) 2101 { 2102 if (skb_vlan_tag_present(skb)) { 2103 *flags |= TXFLAG_TAGON; 2104 *vlan = cpu_to_le16(skb_vlan_tag_get(skb)); 2105 } 2106 } 2107 2108 static int 2109 jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) 2110 { 2111 struct jme_ring *txring = &(jme->txring[0]); 2112 struct txdesc *txdesc; 2113 struct jme_buffer_info *txbi; 2114 u8 flags; 2115 int ret = 0; 2116 2117 txdesc = (struct txdesc *)txring->desc + idx; 2118 txbi = txring->bufinf + idx; 2119 2120 txdesc->dw[0] = 0; 2121 txdesc->dw[1] = 0; 2122 txdesc->dw[2] = 0; 2123 txdesc->dw[3] = 0; 2124 txdesc->desc1.pktsize = cpu_to_le16(skb->len); 2125 /* 2126 * Set OWN bit at final. 2127 * When kernel transmit faster than NIC. 2128 * And NIC trying to send this descriptor before we tell 2129 * it to start sending this TX queue. 2130 * Other fields are already filled correctly. 2131 */ 2132 wmb(); 2133 flags = TXFLAG_OWN | TXFLAG_INT; 2134 /* 2135 * Set checksum flags while not tso 2136 */ 2137 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) 2138 jme_tx_csum(jme, skb, &flags); 2139 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); 2140 ret = jme_map_tx_skb(jme, skb, idx); 2141 if (ret) 2142 return ret; 2143 2144 txdesc->desc1.flags = flags; 2145 /* 2146 * Set tx buffer info after telling NIC to send 2147 * For better tx_clean timing 2148 */ 2149 wmb(); 2150 txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2; 2151 txbi->skb = skb; 2152 txbi->len = skb->len; 2153 txbi->start_xmit = jiffies; 2154 if (!txbi->start_xmit) 2155 txbi->start_xmit = (0UL-1); 2156 2157 return 0; 2158 } 2159 2160 static void 2161 jme_stop_queue_if_full(struct jme_adapter *jme) 2162 { 2163 struct jme_ring *txring = &(jme->txring[0]); 2164 struct jme_buffer_info *txbi = txring->bufinf; 2165 int idx = atomic_read(&txring->next_to_clean); 2166 2167 txbi += idx; 2168 2169 smp_wmb(); 2170 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) { 2171 netif_stop_queue(jme->dev); 2172 netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n"); 2173 smp_wmb(); 2174 if (atomic_read(&txring->nr_free) 2175 >= (jme->tx_wake_threshold)) { 2176 netif_wake_queue(jme->dev); 2177 netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n"); 2178 } 2179 } 2180 2181 if (unlikely(txbi->start_xmit && 2182 (jiffies - txbi->start_xmit) >= TX_TIMEOUT && 2183 txbi->skb)) { 2184 netif_stop_queue(jme->dev); 2185 netif_info(jme, tx_queued, jme->dev, 2186 "TX Queue Stopped %d@%lu\n", idx, jiffies); 2187 } 2188 } 2189 2190 /* 2191 * This function is already protected by netif_tx_lock() 2192 */ 2193 2194 static netdev_tx_t 2195 jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) 2196 { 2197 struct jme_adapter *jme = netdev_priv(netdev); 2198 int idx; 2199 2200 if (unlikely(skb_is_gso(skb) && skb_cow_head(skb, 0))) { 2201 dev_kfree_skb_any(skb); 2202 ++(NET_STAT(jme).tx_dropped); 2203 return NETDEV_TX_OK; 2204 } 2205 2206 idx = jme_alloc_txdesc(jme, skb); 2207 2208 if (unlikely(idx < 0)) { 2209 netif_stop_queue(netdev); 2210 netif_err(jme, tx_err, jme->dev, 2211 "BUG! Tx ring full when queue awake!\n"); 2212 2213 return NETDEV_TX_BUSY; 2214 } 2215 2216 if (jme_fill_tx_desc(jme, skb, idx)) 2217 return NETDEV_TX_OK; 2218 2219 jwrite32(jme, JME_TXCS, jme->reg_txcs | 2220 TXCS_SELECT_QUEUE0 | 2221 TXCS_QUEUE0S | 2222 TXCS_ENABLE); 2223 2224 tx_dbg(jme, "xmit: %d+%d@%lu\n", 2225 idx, skb_shinfo(skb)->nr_frags + 2, jiffies); 2226 jme_stop_queue_if_full(jme); 2227 2228 return NETDEV_TX_OK; 2229 } 2230 2231 static void 2232 jme_set_unicastaddr(struct net_device *netdev) 2233 { 2234 struct jme_adapter *jme = netdev_priv(netdev); 2235 u32 val; 2236 2237 val = (netdev->dev_addr[3] & 0xff) << 24 | 2238 (netdev->dev_addr[2] & 0xff) << 16 | 2239 (netdev->dev_addr[1] & 0xff) << 8 | 2240 (netdev->dev_addr[0] & 0xff); 2241 jwrite32(jme, JME_RXUMA_LO, val); 2242 val = (netdev->dev_addr[5] & 0xff) << 8 | 2243 (netdev->dev_addr[4] & 0xff); 2244 jwrite32(jme, JME_RXUMA_HI, val); 2245 } 2246 2247 static int 2248 jme_set_macaddr(struct net_device *netdev, void *p) 2249 { 2250 struct jme_adapter *jme = netdev_priv(netdev); 2251 struct sockaddr *addr = p; 2252 2253 if (netif_running(netdev)) 2254 return -EBUSY; 2255 2256 spin_lock_bh(&jme->macaddr_lock); 2257 eth_hw_addr_set(netdev, addr->sa_data); 2258 jme_set_unicastaddr(netdev); 2259 spin_unlock_bh(&jme->macaddr_lock); 2260 2261 return 0; 2262 } 2263 2264 static void 2265 jme_set_multi(struct net_device *netdev) 2266 { 2267 struct jme_adapter *jme = netdev_priv(netdev); 2268 u32 mc_hash[2] = {}; 2269 2270 spin_lock_bh(&jme->rxmcs_lock); 2271 2272 jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME; 2273 2274 if (netdev->flags & IFF_PROMISC) { 2275 jme->reg_rxmcs |= RXMCS_ALLFRAME; 2276 } else if (netdev->flags & IFF_ALLMULTI) { 2277 jme->reg_rxmcs |= RXMCS_ALLMULFRAME; 2278 } else if (netdev->flags & IFF_MULTICAST) { 2279 struct netdev_hw_addr *ha; 2280 int bit_nr; 2281 2282 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; 2283 netdev_for_each_mc_addr(ha, netdev) { 2284 bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F; 2285 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F); 2286 } 2287 2288 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]); 2289 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]); 2290 } 2291 2292 wmb(); 2293 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); 2294 2295 spin_unlock_bh(&jme->rxmcs_lock); 2296 } 2297 2298 static int 2299 jme_change_mtu(struct net_device *netdev, int new_mtu) 2300 { 2301 struct jme_adapter *jme = netdev_priv(netdev); 2302 2303 netdev->mtu = new_mtu; 2304 netdev_update_features(netdev); 2305 2306 jme_restart_rx_engine(jme); 2307 jme_reset_link(jme); 2308 2309 return 0; 2310 } 2311 2312 static void 2313 jme_tx_timeout(struct net_device *netdev, unsigned int txqueue) 2314 { 2315 struct jme_adapter *jme = netdev_priv(netdev); 2316 2317 jme->phylink = 0; 2318 jme_reset_phy_processor(jme); 2319 if (test_bit(JME_FLAG_SSET, &jme->flags)) 2320 jme_set_link_ksettings(netdev, &jme->old_cmd); 2321 2322 /* 2323 * Force to Reset the link again 2324 */ 2325 jme_reset_link(jme); 2326 } 2327 2328 static void 2329 jme_get_drvinfo(struct net_device *netdev, 2330 struct ethtool_drvinfo *info) 2331 { 2332 struct jme_adapter *jme = netdev_priv(netdev); 2333 2334 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 2335 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 2336 strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info)); 2337 } 2338 2339 static int 2340 jme_get_regs_len(struct net_device *netdev) 2341 { 2342 return JME_REG_LEN; 2343 } 2344 2345 static void 2346 mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len) 2347 { 2348 int i; 2349 2350 for (i = 0 ; i < len ; i += 4) 2351 p[i >> 2] = jread32(jme, reg + i); 2352 } 2353 2354 static void 2355 mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr) 2356 { 2357 int i; 2358 u16 *p16 = (u16 *)p; 2359 2360 for (i = 0 ; i < reg_nr ; ++i) 2361 p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i); 2362 } 2363 2364 static void 2365 jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) 2366 { 2367 struct jme_adapter *jme = netdev_priv(netdev); 2368 u32 *p32 = (u32 *)p; 2369 2370 memset(p, 0xFF, JME_REG_LEN); 2371 2372 regs->version = 1; 2373 mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN); 2374 2375 p32 += 0x100 >> 2; 2376 mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN); 2377 2378 p32 += 0x100 >> 2; 2379 mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN); 2380 2381 p32 += 0x100 >> 2; 2382 mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN); 2383 2384 p32 += 0x100 >> 2; 2385 mdio_memcpy(jme, p32, JME_PHY_REG_NR); 2386 } 2387 2388 static int jme_get_coalesce(struct net_device *netdev, 2389 struct ethtool_coalesce *ecmd, 2390 struct kernel_ethtool_coalesce *kernel_coal, 2391 struct netlink_ext_ack *extack) 2392 { 2393 struct jme_adapter *jme = netdev_priv(netdev); 2394 2395 ecmd->tx_coalesce_usecs = PCC_TX_TO; 2396 ecmd->tx_max_coalesced_frames = PCC_TX_CNT; 2397 2398 if (test_bit(JME_FLAG_POLL, &jme->flags)) { 2399 ecmd->use_adaptive_rx_coalesce = false; 2400 ecmd->rx_coalesce_usecs = 0; 2401 ecmd->rx_max_coalesced_frames = 0; 2402 return 0; 2403 } 2404 2405 ecmd->use_adaptive_rx_coalesce = true; 2406 2407 switch (jme->dpi.cur) { 2408 case PCC_P1: 2409 ecmd->rx_coalesce_usecs = PCC_P1_TO; 2410 ecmd->rx_max_coalesced_frames = PCC_P1_CNT; 2411 break; 2412 case PCC_P2: 2413 ecmd->rx_coalesce_usecs = PCC_P2_TO; 2414 ecmd->rx_max_coalesced_frames = PCC_P2_CNT; 2415 break; 2416 case PCC_P3: 2417 ecmd->rx_coalesce_usecs = PCC_P3_TO; 2418 ecmd->rx_max_coalesced_frames = PCC_P3_CNT; 2419 break; 2420 default: 2421 break; 2422 } 2423 2424 return 0; 2425 } 2426 2427 static int jme_set_coalesce(struct net_device *netdev, 2428 struct ethtool_coalesce *ecmd, 2429 struct kernel_ethtool_coalesce *kernel_coal, 2430 struct netlink_ext_ack *extack) 2431 { 2432 struct jme_adapter *jme = netdev_priv(netdev); 2433 struct dynpcc_info *dpi = &(jme->dpi); 2434 2435 if (netif_running(netdev)) 2436 return -EBUSY; 2437 2438 if (ecmd->use_adaptive_rx_coalesce && 2439 test_bit(JME_FLAG_POLL, &jme->flags)) { 2440 clear_bit(JME_FLAG_POLL, &jme->flags); 2441 jme->jme_rx = netif_rx; 2442 dpi->cur = PCC_P1; 2443 dpi->attempt = PCC_P1; 2444 dpi->cnt = 0; 2445 jme_set_rx_pcc(jme, PCC_P1); 2446 jme_interrupt_mode(jme); 2447 } else if (!(ecmd->use_adaptive_rx_coalesce) && 2448 !(test_bit(JME_FLAG_POLL, &jme->flags))) { 2449 set_bit(JME_FLAG_POLL, &jme->flags); 2450 jme->jme_rx = netif_receive_skb; 2451 jme_interrupt_mode(jme); 2452 } 2453 2454 return 0; 2455 } 2456 2457 static void 2458 jme_get_pauseparam(struct net_device *netdev, 2459 struct ethtool_pauseparam *ecmd) 2460 { 2461 struct jme_adapter *jme = netdev_priv(netdev); 2462 u32 val; 2463 2464 ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0; 2465 ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0; 2466 2467 spin_lock_bh(&jme->phy_lock); 2468 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); 2469 spin_unlock_bh(&jme->phy_lock); 2470 2471 ecmd->autoneg = 2472 (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0; 2473 } 2474 2475 static int 2476 jme_set_pauseparam(struct net_device *netdev, 2477 struct ethtool_pauseparam *ecmd) 2478 { 2479 struct jme_adapter *jme = netdev_priv(netdev); 2480 u32 val; 2481 2482 if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^ 2483 (ecmd->tx_pause != 0)) { 2484 2485 if (ecmd->tx_pause) 2486 jme->reg_txpfc |= TXPFC_PF_EN; 2487 else 2488 jme->reg_txpfc &= ~TXPFC_PF_EN; 2489 2490 jwrite32(jme, JME_TXPFC, jme->reg_txpfc); 2491 } 2492 2493 spin_lock_bh(&jme->rxmcs_lock); 2494 if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^ 2495 (ecmd->rx_pause != 0)) { 2496 2497 if (ecmd->rx_pause) 2498 jme->reg_rxmcs |= RXMCS_FLOWCTRL; 2499 else 2500 jme->reg_rxmcs &= ~RXMCS_FLOWCTRL; 2501 2502 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); 2503 } 2504 spin_unlock_bh(&jme->rxmcs_lock); 2505 2506 spin_lock_bh(&jme->phy_lock); 2507 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); 2508 if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^ 2509 (ecmd->autoneg != 0)) { 2510 2511 if (ecmd->autoneg) 2512 val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 2513 else 2514 val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 2515 2516 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 2517 MII_ADVERTISE, val); 2518 } 2519 spin_unlock_bh(&jme->phy_lock); 2520 2521 return 0; 2522 } 2523 2524 static void 2525 jme_get_wol(struct net_device *netdev, 2526 struct ethtool_wolinfo *wol) 2527 { 2528 struct jme_adapter *jme = netdev_priv(netdev); 2529 2530 wol->supported = WAKE_MAGIC | WAKE_PHY; 2531 2532 wol->wolopts = 0; 2533 2534 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) 2535 wol->wolopts |= WAKE_PHY; 2536 2537 if (jme->reg_pmcs & PMCS_MFEN) 2538 wol->wolopts |= WAKE_MAGIC; 2539 2540 } 2541 2542 static int 2543 jme_set_wol(struct net_device *netdev, 2544 struct ethtool_wolinfo *wol) 2545 { 2546 struct jme_adapter *jme = netdev_priv(netdev); 2547 2548 if (wol->wolopts & (WAKE_MAGICSECURE | 2549 WAKE_UCAST | 2550 WAKE_MCAST | 2551 WAKE_BCAST | 2552 WAKE_ARP)) 2553 return -EOPNOTSUPP; 2554 2555 jme->reg_pmcs = 0; 2556 2557 if (wol->wolopts & WAKE_PHY) 2558 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN; 2559 2560 if (wol->wolopts & WAKE_MAGIC) 2561 jme->reg_pmcs |= PMCS_MFEN; 2562 2563 return 0; 2564 } 2565 2566 static int 2567 jme_get_link_ksettings(struct net_device *netdev, 2568 struct ethtool_link_ksettings *cmd) 2569 { 2570 struct jme_adapter *jme = netdev_priv(netdev); 2571 2572 spin_lock_bh(&jme->phy_lock); 2573 mii_ethtool_get_link_ksettings(&jme->mii_if, cmd); 2574 spin_unlock_bh(&jme->phy_lock); 2575 return 0; 2576 } 2577 2578 static int 2579 jme_set_link_ksettings(struct net_device *netdev, 2580 const struct ethtool_link_ksettings *cmd) 2581 { 2582 struct jme_adapter *jme = netdev_priv(netdev); 2583 int rc, fdc = 0; 2584 2585 if (cmd->base.speed == SPEED_1000 && 2586 cmd->base.autoneg != AUTONEG_ENABLE) 2587 return -EINVAL; 2588 2589 /* 2590 * Check If user changed duplex only while force_media. 2591 * Hardware would not generate link change interrupt. 2592 */ 2593 if (jme->mii_if.force_media && 2594 cmd->base.autoneg != AUTONEG_ENABLE && 2595 (jme->mii_if.full_duplex != cmd->base.duplex)) 2596 fdc = 1; 2597 2598 spin_lock_bh(&jme->phy_lock); 2599 rc = mii_ethtool_set_link_ksettings(&jme->mii_if, cmd); 2600 spin_unlock_bh(&jme->phy_lock); 2601 2602 if (!rc) { 2603 if (fdc) 2604 jme_reset_link(jme); 2605 jme->old_cmd = *cmd; 2606 set_bit(JME_FLAG_SSET, &jme->flags); 2607 } 2608 2609 return rc; 2610 } 2611 2612 static int 2613 jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 2614 { 2615 int rc; 2616 struct jme_adapter *jme = netdev_priv(netdev); 2617 struct mii_ioctl_data *mii_data = if_mii(rq); 2618 unsigned int duplex_chg; 2619 2620 if (cmd == SIOCSMIIREG) { 2621 u16 val = mii_data->val_in; 2622 if (!(val & (BMCR_RESET|BMCR_ANENABLE)) && 2623 (val & BMCR_SPEED1000)) 2624 return -EINVAL; 2625 } 2626 2627 spin_lock_bh(&jme->phy_lock); 2628 rc = generic_mii_ioctl(&jme->mii_if, mii_data, cmd, &duplex_chg); 2629 spin_unlock_bh(&jme->phy_lock); 2630 2631 if (!rc && (cmd == SIOCSMIIREG)) { 2632 if (duplex_chg) 2633 jme_reset_link(jme); 2634 jme_get_link_ksettings(netdev, &jme->old_cmd); 2635 set_bit(JME_FLAG_SSET, &jme->flags); 2636 } 2637 2638 return rc; 2639 } 2640 2641 static u32 2642 jme_get_link(struct net_device *netdev) 2643 { 2644 struct jme_adapter *jme = netdev_priv(netdev); 2645 return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP; 2646 } 2647 2648 static u32 2649 jme_get_msglevel(struct net_device *netdev) 2650 { 2651 struct jme_adapter *jme = netdev_priv(netdev); 2652 return jme->msg_enable; 2653 } 2654 2655 static void 2656 jme_set_msglevel(struct net_device *netdev, u32 value) 2657 { 2658 struct jme_adapter *jme = netdev_priv(netdev); 2659 jme->msg_enable = value; 2660 } 2661 2662 static netdev_features_t 2663 jme_fix_features(struct net_device *netdev, netdev_features_t features) 2664 { 2665 if (netdev->mtu > 1900) 2666 features &= ~(NETIF_F_ALL_TSO | NETIF_F_CSUM_MASK); 2667 return features; 2668 } 2669 2670 static int 2671 jme_set_features(struct net_device *netdev, netdev_features_t features) 2672 { 2673 struct jme_adapter *jme = netdev_priv(netdev); 2674 2675 spin_lock_bh(&jme->rxmcs_lock); 2676 if (features & NETIF_F_RXCSUM) 2677 jme->reg_rxmcs |= RXMCS_CHECKSUM; 2678 else 2679 jme->reg_rxmcs &= ~RXMCS_CHECKSUM; 2680 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); 2681 spin_unlock_bh(&jme->rxmcs_lock); 2682 2683 return 0; 2684 } 2685 2686 #ifdef CONFIG_NET_POLL_CONTROLLER 2687 static void jme_netpoll(struct net_device *dev) 2688 { 2689 unsigned long flags; 2690 2691 local_irq_save(flags); 2692 jme_intr(dev->irq, dev); 2693 local_irq_restore(flags); 2694 } 2695 #endif 2696 2697 static int 2698 jme_nway_reset(struct net_device *netdev) 2699 { 2700 struct jme_adapter *jme = netdev_priv(netdev); 2701 jme_restart_an(jme); 2702 return 0; 2703 } 2704 2705 static u8 2706 jme_smb_read(struct jme_adapter *jme, unsigned int addr) 2707 { 2708 u32 val; 2709 int to; 2710 2711 val = jread32(jme, JME_SMBCSR); 2712 to = JME_SMB_BUSY_TIMEOUT; 2713 while ((val & SMBCSR_BUSY) && --to) { 2714 msleep(1); 2715 val = jread32(jme, JME_SMBCSR); 2716 } 2717 if (!to) { 2718 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); 2719 return 0xFF; 2720 } 2721 2722 jwrite32(jme, JME_SMBINTF, 2723 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | 2724 SMBINTF_HWRWN_READ | 2725 SMBINTF_HWCMD); 2726 2727 val = jread32(jme, JME_SMBINTF); 2728 to = JME_SMB_BUSY_TIMEOUT; 2729 while ((val & SMBINTF_HWCMD) && --to) { 2730 msleep(1); 2731 val = jread32(jme, JME_SMBINTF); 2732 } 2733 if (!to) { 2734 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); 2735 return 0xFF; 2736 } 2737 2738 return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT; 2739 } 2740 2741 static void 2742 jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data) 2743 { 2744 u32 val; 2745 int to; 2746 2747 val = jread32(jme, JME_SMBCSR); 2748 to = JME_SMB_BUSY_TIMEOUT; 2749 while ((val & SMBCSR_BUSY) && --to) { 2750 msleep(1); 2751 val = jread32(jme, JME_SMBCSR); 2752 } 2753 if (!to) { 2754 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); 2755 return; 2756 } 2757 2758 jwrite32(jme, JME_SMBINTF, 2759 ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) | 2760 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | 2761 SMBINTF_HWRWN_WRITE | 2762 SMBINTF_HWCMD); 2763 2764 val = jread32(jme, JME_SMBINTF); 2765 to = JME_SMB_BUSY_TIMEOUT; 2766 while ((val & SMBINTF_HWCMD) && --to) { 2767 msleep(1); 2768 val = jread32(jme, JME_SMBINTF); 2769 } 2770 if (!to) { 2771 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); 2772 return; 2773 } 2774 2775 mdelay(2); 2776 } 2777 2778 static int 2779 jme_get_eeprom_len(struct net_device *netdev) 2780 { 2781 struct jme_adapter *jme = netdev_priv(netdev); 2782 u32 val; 2783 val = jread32(jme, JME_SMBCSR); 2784 return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0; 2785 } 2786 2787 static int 2788 jme_get_eeprom(struct net_device *netdev, 2789 struct ethtool_eeprom *eeprom, u8 *data) 2790 { 2791 struct jme_adapter *jme = netdev_priv(netdev); 2792 int i, offset = eeprom->offset, len = eeprom->len; 2793 2794 /* 2795 * ethtool will check the boundary for us 2796 */ 2797 eeprom->magic = JME_EEPROM_MAGIC; 2798 for (i = 0 ; i < len ; ++i) 2799 data[i] = jme_smb_read(jme, i + offset); 2800 2801 return 0; 2802 } 2803 2804 static int 2805 jme_set_eeprom(struct net_device *netdev, 2806 struct ethtool_eeprom *eeprom, u8 *data) 2807 { 2808 struct jme_adapter *jme = netdev_priv(netdev); 2809 int i, offset = eeprom->offset, len = eeprom->len; 2810 2811 if (eeprom->magic != JME_EEPROM_MAGIC) 2812 return -EINVAL; 2813 2814 /* 2815 * ethtool will check the boundary for us 2816 */ 2817 for (i = 0 ; i < len ; ++i) 2818 jme_smb_write(jme, i + offset, data[i]); 2819 2820 return 0; 2821 } 2822 2823 static const struct ethtool_ops jme_ethtool_ops = { 2824 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 2825 ETHTOOL_COALESCE_MAX_FRAMES | 2826 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 2827 .get_drvinfo = jme_get_drvinfo, 2828 .get_regs_len = jme_get_regs_len, 2829 .get_regs = jme_get_regs, 2830 .get_coalesce = jme_get_coalesce, 2831 .set_coalesce = jme_set_coalesce, 2832 .get_pauseparam = jme_get_pauseparam, 2833 .set_pauseparam = jme_set_pauseparam, 2834 .get_wol = jme_get_wol, 2835 .set_wol = jme_set_wol, 2836 .get_link = jme_get_link, 2837 .get_msglevel = jme_get_msglevel, 2838 .set_msglevel = jme_set_msglevel, 2839 .nway_reset = jme_nway_reset, 2840 .get_eeprom_len = jme_get_eeprom_len, 2841 .get_eeprom = jme_get_eeprom, 2842 .set_eeprom = jme_set_eeprom, 2843 .get_link_ksettings = jme_get_link_ksettings, 2844 .set_link_ksettings = jme_set_link_ksettings, 2845 }; 2846 2847 static int 2848 jme_pci_dma64(struct pci_dev *pdev) 2849 { 2850 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && 2851 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) 2852 return 1; 2853 2854 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && 2855 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40))) 2856 return 1; 2857 2858 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 2859 return 0; 2860 2861 return -1; 2862 } 2863 2864 static inline void 2865 jme_phy_init(struct jme_adapter *jme) 2866 { 2867 u16 reg26; 2868 2869 reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26); 2870 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000); 2871 } 2872 2873 static inline void 2874 jme_check_hw_ver(struct jme_adapter *jme) 2875 { 2876 u32 chipmode; 2877 2878 chipmode = jread32(jme, JME_CHIPMODE); 2879 2880 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; 2881 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; 2882 jme->chip_main_rev = jme->chiprev & 0xF; 2883 jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF; 2884 } 2885 2886 static const struct net_device_ops jme_netdev_ops = { 2887 .ndo_open = jme_open, 2888 .ndo_stop = jme_close, 2889 .ndo_validate_addr = eth_validate_addr, 2890 .ndo_eth_ioctl = jme_ioctl, 2891 .ndo_start_xmit = jme_start_xmit, 2892 .ndo_set_mac_address = jme_set_macaddr, 2893 .ndo_set_rx_mode = jme_set_multi, 2894 .ndo_change_mtu = jme_change_mtu, 2895 .ndo_tx_timeout = jme_tx_timeout, 2896 .ndo_fix_features = jme_fix_features, 2897 .ndo_set_features = jme_set_features, 2898 #ifdef CONFIG_NET_POLL_CONTROLLER 2899 .ndo_poll_controller = jme_netpoll, 2900 #endif 2901 }; 2902 2903 static int 2904 jme_init_one(struct pci_dev *pdev, 2905 const struct pci_device_id *ent) 2906 { 2907 int rc = 0, using_dac, i; 2908 struct net_device *netdev; 2909 struct jme_adapter *jme; 2910 u16 bmcr, bmsr; 2911 u32 apmc; 2912 2913 /* 2914 * set up PCI device basics 2915 */ 2916 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | 2917 PCIE_LINK_STATE_CLKPM); 2918 2919 rc = pci_enable_device(pdev); 2920 if (rc) { 2921 pr_err("Cannot enable PCI device\n"); 2922 goto err_out; 2923 } 2924 2925 using_dac = jme_pci_dma64(pdev); 2926 if (using_dac < 0) { 2927 pr_err("Cannot set PCI DMA Mask\n"); 2928 rc = -EIO; 2929 goto err_out_disable_pdev; 2930 } 2931 2932 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2933 pr_err("No PCI resource region found\n"); 2934 rc = -ENOMEM; 2935 goto err_out_disable_pdev; 2936 } 2937 2938 rc = pci_request_regions(pdev, DRV_NAME); 2939 if (rc) { 2940 pr_err("Cannot obtain PCI resource region\n"); 2941 goto err_out_disable_pdev; 2942 } 2943 2944 pci_set_master(pdev); 2945 2946 /* 2947 * alloc and init net device 2948 */ 2949 netdev = alloc_etherdev(sizeof(*jme)); 2950 if (!netdev) { 2951 rc = -ENOMEM; 2952 goto err_out_release_regions; 2953 } 2954 netdev->netdev_ops = &jme_netdev_ops; 2955 netdev->ethtool_ops = &jme_ethtool_ops; 2956 netdev->watchdog_timeo = TX_TIMEOUT; 2957 netdev->hw_features = NETIF_F_IP_CSUM | 2958 NETIF_F_IPV6_CSUM | 2959 NETIF_F_SG | 2960 NETIF_F_TSO | 2961 NETIF_F_TSO6 | 2962 NETIF_F_RXCSUM; 2963 netdev->features = NETIF_F_IP_CSUM | 2964 NETIF_F_IPV6_CSUM | 2965 NETIF_F_SG | 2966 NETIF_F_TSO | 2967 NETIF_F_TSO6 | 2968 NETIF_F_HW_VLAN_CTAG_TX | 2969 NETIF_F_HW_VLAN_CTAG_RX; 2970 if (using_dac) 2971 netdev->features |= NETIF_F_HIGHDMA; 2972 2973 /* MTU range: 1280 - 9202*/ 2974 netdev->min_mtu = IPV6_MIN_MTU; 2975 netdev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE - ETH_HLEN; 2976 2977 SET_NETDEV_DEV(netdev, &pdev->dev); 2978 pci_set_drvdata(pdev, netdev); 2979 2980 /* 2981 * init adapter info 2982 */ 2983 jme = netdev_priv(netdev); 2984 jme->pdev = pdev; 2985 jme->dev = netdev; 2986 jme->jme_rx = netif_rx; 2987 jme->old_mtu = netdev->mtu = 1500; 2988 jme->phylink = 0; 2989 jme->tx_ring_size = 1 << 10; 2990 jme->tx_ring_mask = jme->tx_ring_size - 1; 2991 jme->tx_wake_threshold = 1 << 9; 2992 jme->rx_ring_size = 1 << 9; 2993 jme->rx_ring_mask = jme->rx_ring_size - 1; 2994 jme->msg_enable = JME_DEF_MSG_ENABLE; 2995 jme->regs = ioremap(pci_resource_start(pdev, 0), 2996 pci_resource_len(pdev, 0)); 2997 if (!(jme->regs)) { 2998 pr_err("Mapping PCI resource region error\n"); 2999 rc = -ENOMEM; 3000 goto err_out_free_netdev; 3001 } 3002 3003 if (no_pseudohp) { 3004 apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN; 3005 jwrite32(jme, JME_APMC, apmc); 3006 } else if (force_pseudohp) { 3007 apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN; 3008 jwrite32(jme, JME_APMC, apmc); 3009 } 3010 3011 NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT) 3012 3013 spin_lock_init(&jme->phy_lock); 3014 spin_lock_init(&jme->macaddr_lock); 3015 spin_lock_init(&jme->rxmcs_lock); 3016 3017 atomic_set(&jme->link_changing, 1); 3018 atomic_set(&jme->rx_cleaning, 1); 3019 atomic_set(&jme->tx_cleaning, 1); 3020 atomic_set(&jme->rx_empty, 1); 3021 3022 tasklet_setup(&jme->pcc_task, jme_pcc_tasklet); 3023 INIT_WORK(&jme->linkch_task, jme_link_change_work); 3024 jme->dpi.cur = PCC_P1; 3025 3026 jme->reg_ghc = 0; 3027 jme->reg_rxcs = RXCS_DEFAULT; 3028 jme->reg_rxmcs = RXMCS_DEFAULT; 3029 jme->reg_txpfc = 0; 3030 jme->reg_pmcs = PMCS_MFEN; 3031 jme->reg_gpreg1 = GPREG1_DEFAULT; 3032 3033 if (jme->reg_rxmcs & RXMCS_CHECKSUM) 3034 netdev->features |= NETIF_F_RXCSUM; 3035 3036 /* 3037 * Get Max Read Req Size from PCI Config Space 3038 */ 3039 pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs); 3040 jme->mrrs &= PCI_DCSR_MRRS_MASK; 3041 switch (jme->mrrs) { 3042 case MRRS_128B: 3043 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B; 3044 break; 3045 case MRRS_256B: 3046 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B; 3047 break; 3048 default: 3049 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B; 3050 break; 3051 } 3052 3053 /* 3054 * Must check before reset_mac_processor 3055 */ 3056 jme_check_hw_ver(jme); 3057 jme->mii_if.dev = netdev; 3058 if (jme->fpgaver) { 3059 jme->mii_if.phy_id = 0; 3060 for (i = 1 ; i < 32 ; ++i) { 3061 bmcr = jme_mdio_read(netdev, i, MII_BMCR); 3062 bmsr = jme_mdio_read(netdev, i, MII_BMSR); 3063 if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) { 3064 jme->mii_if.phy_id = i; 3065 break; 3066 } 3067 } 3068 3069 if (!jme->mii_if.phy_id) { 3070 rc = -EIO; 3071 pr_err("Can not find phy_id\n"); 3072 goto err_out_unmap; 3073 } 3074 3075 jme->reg_ghc |= GHC_LINK_POLL; 3076 } else { 3077 jme->mii_if.phy_id = 1; 3078 } 3079 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) 3080 jme->mii_if.supports_gmii = true; 3081 else 3082 jme->mii_if.supports_gmii = false; 3083 jme->mii_if.phy_id_mask = 0x1F; 3084 jme->mii_if.reg_num_mask = 0x1F; 3085 jme->mii_if.mdio_read = jme_mdio_read; 3086 jme->mii_if.mdio_write = jme_mdio_write; 3087 3088 jme_clear_pm_disable_wol(jme); 3089 device_init_wakeup(&pdev->dev, true); 3090 3091 jme_set_phyfifo_5level(jme); 3092 jme->pcirev = pdev->revision; 3093 if (!jme->fpgaver) 3094 jme_phy_init(jme); 3095 jme_phy_off(jme); 3096 3097 /* 3098 * Reset MAC processor and reload EEPROM for MAC Address 3099 */ 3100 jme_reset_mac_processor(jme); 3101 rc = jme_reload_eeprom(jme); 3102 if (rc) { 3103 pr_err("Reload eeprom for reading MAC Address error\n"); 3104 goto err_out_unmap; 3105 } 3106 jme_load_macaddr(netdev); 3107 3108 /* 3109 * Tell stack that we are not ready to work until open() 3110 */ 3111 netif_carrier_off(netdev); 3112 3113 rc = register_netdev(netdev); 3114 if (rc) { 3115 pr_err("Cannot register net device\n"); 3116 goto err_out_unmap; 3117 } 3118 3119 netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n", 3120 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? 3121 "JMC250 Gigabit Ethernet" : 3122 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? 3123 "JMC260 Fast Ethernet" : "Unknown", 3124 (jme->fpgaver != 0) ? " (FPGA)" : "", 3125 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, 3126 jme->pcirev, netdev->dev_addr); 3127 3128 return 0; 3129 3130 err_out_unmap: 3131 iounmap(jme->regs); 3132 err_out_free_netdev: 3133 free_netdev(netdev); 3134 err_out_release_regions: 3135 pci_release_regions(pdev); 3136 err_out_disable_pdev: 3137 pci_disable_device(pdev); 3138 err_out: 3139 return rc; 3140 } 3141 3142 static void 3143 jme_remove_one(struct pci_dev *pdev) 3144 { 3145 struct net_device *netdev = pci_get_drvdata(pdev); 3146 struct jme_adapter *jme = netdev_priv(netdev); 3147 3148 unregister_netdev(netdev); 3149 iounmap(jme->regs); 3150 free_netdev(netdev); 3151 pci_release_regions(pdev); 3152 pci_disable_device(pdev); 3153 3154 } 3155 3156 static void 3157 jme_shutdown(struct pci_dev *pdev) 3158 { 3159 struct net_device *netdev = pci_get_drvdata(pdev); 3160 struct jme_adapter *jme = netdev_priv(netdev); 3161 3162 jme_powersave_phy(jme); 3163 pci_pme_active(pdev, true); 3164 } 3165 3166 #ifdef CONFIG_PM_SLEEP 3167 static int 3168 jme_suspend(struct device *dev) 3169 { 3170 struct net_device *netdev = dev_get_drvdata(dev); 3171 struct jme_adapter *jme = netdev_priv(netdev); 3172 3173 if (!netif_running(netdev)) 3174 return 0; 3175 3176 atomic_dec(&jme->link_changing); 3177 3178 netif_device_detach(netdev); 3179 netif_stop_queue(netdev); 3180 jme_stop_irq(jme); 3181 3182 tasklet_disable(&jme->txclean_task); 3183 tasklet_disable(&jme->rxclean_task); 3184 tasklet_disable(&jme->rxempty_task); 3185 3186 if (netif_carrier_ok(netdev)) { 3187 if (test_bit(JME_FLAG_POLL, &jme->flags)) 3188 jme_polling_mode(jme); 3189 3190 jme_stop_pcc_timer(jme); 3191 jme_disable_rx_engine(jme); 3192 jme_disable_tx_engine(jme); 3193 jme_reset_mac_processor(jme); 3194 jme_free_rx_resources(jme); 3195 jme_free_tx_resources(jme); 3196 netif_carrier_off(netdev); 3197 jme->phylink = 0; 3198 } 3199 3200 tasklet_enable(&jme->txclean_task); 3201 tasklet_enable(&jme->rxclean_task); 3202 tasklet_enable(&jme->rxempty_task); 3203 3204 jme_powersave_phy(jme); 3205 3206 return 0; 3207 } 3208 3209 static int 3210 jme_resume(struct device *dev) 3211 { 3212 struct net_device *netdev = dev_get_drvdata(dev); 3213 struct jme_adapter *jme = netdev_priv(netdev); 3214 3215 if (!netif_running(netdev)) 3216 return 0; 3217 3218 jme_clear_pm_disable_wol(jme); 3219 jme_phy_on(jme); 3220 if (test_bit(JME_FLAG_SSET, &jme->flags)) 3221 jme_set_link_ksettings(netdev, &jme->old_cmd); 3222 else 3223 jme_reset_phy_processor(jme); 3224 jme_phy_calibration(jme); 3225 jme_phy_setEA(jme); 3226 netif_device_attach(netdev); 3227 3228 atomic_inc(&jme->link_changing); 3229 3230 jme_reset_link(jme); 3231 3232 jme_start_irq(jme); 3233 3234 return 0; 3235 } 3236 3237 static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume); 3238 #define JME_PM_OPS (&jme_pm_ops) 3239 3240 #else 3241 3242 #define JME_PM_OPS NULL 3243 #endif 3244 3245 static const struct pci_device_id jme_pci_tbl[] = { 3246 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) }, 3247 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) }, 3248 { } 3249 }; 3250 3251 static struct pci_driver jme_driver = { 3252 .name = DRV_NAME, 3253 .id_table = jme_pci_tbl, 3254 .probe = jme_init_one, 3255 .remove = jme_remove_one, 3256 .shutdown = jme_shutdown, 3257 .driver.pm = JME_PM_OPS, 3258 }; 3259 3260 static int __init 3261 jme_init_module(void) 3262 { 3263 pr_info("JMicron JMC2XX ethernet driver version %s\n", DRV_VERSION); 3264 return pci_register_driver(&jme_driver); 3265 } 3266 3267 static void __exit 3268 jme_cleanup_module(void) 3269 { 3270 pci_unregister_driver(&jme_driver); 3271 } 3272 3273 module_init(jme_init_module); 3274 module_exit(jme_cleanup_module); 3275 3276 MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>"); 3277 MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver"); 3278 MODULE_LICENSE("GPL"); 3279 MODULE_VERSION(DRV_VERSION); 3280 MODULE_DEVICE_TABLE(pci, jme_pci_tbl); 3281