1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver 4 * 5 * Copyright 2008 JMicron Technology Corporation 6 * https://www.jmicron.com/ 7 * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org> 8 * 9 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org> 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/kernel.h> 16 #include <linux/pci.h> 17 #include <linux/netdevice.h> 18 #include <linux/etherdevice.h> 19 #include <linux/ethtool.h> 20 #include <linux/mii.h> 21 #include <linux/crc32.h> 22 #include <linux/delay.h> 23 #include <linux/spinlock.h> 24 #include <linux/in.h> 25 #include <linux/ip.h> 26 #include <linux/ipv6.h> 27 #include <linux/tcp.h> 28 #include <linux/udp.h> 29 #include <linux/if_vlan.h> 30 #include <linux/slab.h> 31 #include <linux/jiffies.h> 32 #include <net/ip6_checksum.h> 33 #include "jme.h" 34 35 static int force_pseudohp = -1; 36 static int no_pseudohp = -1; 37 static int no_extplug = -1; 38 module_param(force_pseudohp, int, 0); 39 MODULE_PARM_DESC(force_pseudohp, 40 "Enable pseudo hot-plug feature manually by driver instead of BIOS."); 41 module_param(no_pseudohp, int, 0); 42 MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature."); 43 module_param(no_extplug, int, 0); 44 MODULE_PARM_DESC(no_extplug, 45 "Do not use external plug signal for pseudo hot-plug."); 46 47 static int 48 jme_mdio_read(struct net_device *netdev, int phy, int reg) 49 { 50 struct jme_adapter *jme = netdev_priv(netdev); 51 int i, val, again = (reg == MII_BMSR) ? 1 : 0; 52 53 read_again: 54 jwrite32(jme, JME_SMI, SMI_OP_REQ | 55 smi_phy_addr(phy) | 56 smi_reg_addr(reg)); 57 58 wmb(); 59 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { 60 udelay(20); 61 val = jread32(jme, JME_SMI); 62 if ((val & SMI_OP_REQ) == 0) 63 break; 64 } 65 66 if (i == 0) { 67 pr_err("phy(%d) read timeout : %d\n", phy, reg); 68 return 0; 69 } 70 71 if (again--) 72 goto read_again; 73 74 return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT; 75 } 76 77 static void 78 jme_mdio_write(struct net_device *netdev, 79 int phy, int reg, int val) 80 { 81 struct jme_adapter *jme = netdev_priv(netdev); 82 int i; 83 84 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ | 85 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 86 smi_phy_addr(phy) | smi_reg_addr(reg)); 87 88 wmb(); 89 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { 90 udelay(20); 91 if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0) 92 break; 93 } 94 95 if (i == 0) 96 pr_err("phy(%d) write timeout : %d\n", phy, reg); 97 } 98 99 static inline void 100 jme_reset_phy_processor(struct jme_adapter *jme) 101 { 102 u32 val; 103 104 jme_mdio_write(jme->dev, 105 jme->mii_if.phy_id, 106 MII_ADVERTISE, ADVERTISE_ALL | 107 ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 108 109 if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) 110 jme_mdio_write(jme->dev, 111 jme->mii_if.phy_id, 112 MII_CTRL1000, 113 ADVERTISE_1000FULL | ADVERTISE_1000HALF); 114 115 val = jme_mdio_read(jme->dev, 116 jme->mii_if.phy_id, 117 MII_BMCR); 118 119 jme_mdio_write(jme->dev, 120 jme->mii_if.phy_id, 121 MII_BMCR, val | BMCR_RESET); 122 } 123 124 static void 125 jme_setup_wakeup_frame(struct jme_adapter *jme, 126 const u32 *mask, u32 crc, int fnr) 127 { 128 int i; 129 130 /* 131 * Setup CRC pattern 132 */ 133 jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL)); 134 wmb(); 135 jwrite32(jme, JME_WFODP, crc); 136 wmb(); 137 138 /* 139 * Setup Mask 140 */ 141 for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) { 142 jwrite32(jme, JME_WFOI, 143 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) | 144 (fnr & WFOI_FRAME_SEL)); 145 wmb(); 146 jwrite32(jme, JME_WFODP, mask[i]); 147 wmb(); 148 } 149 } 150 151 static inline void 152 jme_mac_rxclk_off(struct jme_adapter *jme) 153 { 154 jme->reg_gpreg1 |= GPREG1_RXCLKOFF; 155 jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1); 156 } 157 158 static inline void 159 jme_mac_rxclk_on(struct jme_adapter *jme) 160 { 161 jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF; 162 jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1); 163 } 164 165 static inline void 166 jme_mac_txclk_off(struct jme_adapter *jme) 167 { 168 jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC); 169 jwrite32f(jme, JME_GHC, jme->reg_ghc); 170 } 171 172 static inline void 173 jme_mac_txclk_on(struct jme_adapter *jme) 174 { 175 u32 speed = jme->reg_ghc & GHC_SPEED; 176 if (speed == GHC_SPEED_1000M) 177 jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY; 178 else 179 jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; 180 jwrite32f(jme, JME_GHC, jme->reg_ghc); 181 } 182 183 static inline void 184 jme_reset_ghc_speed(struct jme_adapter *jme) 185 { 186 jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX); 187 jwrite32f(jme, JME_GHC, jme->reg_ghc); 188 } 189 190 static inline void 191 jme_reset_250A2_workaround(struct jme_adapter *jme) 192 { 193 jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | 194 GPREG1_RSSPATCH); 195 jwrite32(jme, JME_GPREG1, jme->reg_gpreg1); 196 } 197 198 static inline void 199 jme_assert_ghc_reset(struct jme_adapter *jme) 200 { 201 jme->reg_ghc |= GHC_SWRST; 202 jwrite32f(jme, JME_GHC, jme->reg_ghc); 203 } 204 205 static inline void 206 jme_clear_ghc_reset(struct jme_adapter *jme) 207 { 208 jme->reg_ghc &= ~GHC_SWRST; 209 jwrite32f(jme, JME_GHC, jme->reg_ghc); 210 } 211 212 static void 213 jme_reset_mac_processor(struct jme_adapter *jme) 214 { 215 static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; 216 u32 crc = 0xCDCDCDCD; 217 u32 gpreg0; 218 int i; 219 220 jme_reset_ghc_speed(jme); 221 jme_reset_250A2_workaround(jme); 222 223 jme_mac_rxclk_on(jme); 224 jme_mac_txclk_on(jme); 225 udelay(1); 226 jme_assert_ghc_reset(jme); 227 udelay(1); 228 jme_mac_rxclk_off(jme); 229 jme_mac_txclk_off(jme); 230 udelay(1); 231 jme_clear_ghc_reset(jme); 232 udelay(1); 233 jme_mac_rxclk_on(jme); 234 jme_mac_txclk_on(jme); 235 udelay(1); 236 jme_mac_rxclk_off(jme); 237 jme_mac_txclk_off(jme); 238 239 jwrite32(jme, JME_RXDBA_LO, 0x00000000); 240 jwrite32(jme, JME_RXDBA_HI, 0x00000000); 241 jwrite32(jme, JME_RXQDC, 0x00000000); 242 jwrite32(jme, JME_RXNDA, 0x00000000); 243 jwrite32(jme, JME_TXDBA_LO, 0x00000000); 244 jwrite32(jme, JME_TXDBA_HI, 0x00000000); 245 jwrite32(jme, JME_TXQDC, 0x00000000); 246 jwrite32(jme, JME_TXNDA, 0x00000000); 247 248 jwrite32(jme, JME_RXMCHT_LO, 0x00000000); 249 jwrite32(jme, JME_RXMCHT_HI, 0x00000000); 250 for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i) 251 jme_setup_wakeup_frame(jme, mask, crc, i); 252 if (jme->fpgaver) 253 gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL; 254 else 255 gpreg0 = GPREG0_DEFAULT; 256 jwrite32(jme, JME_GPREG0, gpreg0); 257 } 258 259 static inline void 260 jme_clear_pm_enable_wol(struct jme_adapter *jme) 261 { 262 jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs); 263 } 264 265 static inline void 266 jme_clear_pm_disable_wol(struct jme_adapter *jme) 267 { 268 jwrite32(jme, JME_PMCS, PMCS_STMASK); 269 } 270 271 static int 272 jme_reload_eeprom(struct jme_adapter *jme) 273 { 274 u32 val; 275 int i; 276 277 val = jread32(jme, JME_SMBCSR); 278 279 if (val & SMBCSR_EEPROMD) { 280 val |= SMBCSR_CNACK; 281 jwrite32(jme, JME_SMBCSR, val); 282 val |= SMBCSR_RELOAD; 283 jwrite32(jme, JME_SMBCSR, val); 284 mdelay(12); 285 286 for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) { 287 mdelay(1); 288 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0) 289 break; 290 } 291 292 if (i == 0) { 293 pr_err("eeprom reload timeout\n"); 294 return -EIO; 295 } 296 } 297 298 return 0; 299 } 300 301 static void 302 jme_load_macaddr(struct net_device *netdev) 303 { 304 struct jme_adapter *jme = netdev_priv(netdev); 305 unsigned char macaddr[ETH_ALEN]; 306 u32 val; 307 308 spin_lock_bh(&jme->macaddr_lock); 309 val = jread32(jme, JME_RXUMA_LO); 310 macaddr[0] = (val >> 0) & 0xFF; 311 macaddr[1] = (val >> 8) & 0xFF; 312 macaddr[2] = (val >> 16) & 0xFF; 313 macaddr[3] = (val >> 24) & 0xFF; 314 val = jread32(jme, JME_RXUMA_HI); 315 macaddr[4] = (val >> 0) & 0xFF; 316 macaddr[5] = (val >> 8) & 0xFF; 317 eth_hw_addr_set(netdev, macaddr); 318 spin_unlock_bh(&jme->macaddr_lock); 319 } 320 321 static inline void 322 jme_set_rx_pcc(struct jme_adapter *jme, int p) 323 { 324 switch (p) { 325 case PCC_OFF: 326 jwrite32(jme, JME_PCCRX0, 327 ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 328 ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 329 break; 330 case PCC_P1: 331 jwrite32(jme, JME_PCCRX0, 332 ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 333 ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 334 break; 335 case PCC_P2: 336 jwrite32(jme, JME_PCCRX0, 337 ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 338 ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 339 break; 340 case PCC_P3: 341 jwrite32(jme, JME_PCCRX0, 342 ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 343 ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 344 break; 345 default: 346 break; 347 } 348 wmb(); 349 350 if (!(test_bit(JME_FLAG_POLL, &jme->flags))) 351 netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p); 352 } 353 354 static void 355 jme_start_irq(struct jme_adapter *jme) 356 { 357 register struct dynpcc_info *dpi = &(jme->dpi); 358 359 jme_set_rx_pcc(jme, PCC_P1); 360 dpi->cur = PCC_P1; 361 dpi->attempt = PCC_P1; 362 dpi->cnt = 0; 363 364 jwrite32(jme, JME_PCCTX, 365 ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) | 366 ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) | 367 PCCTXQ0_EN 368 ); 369 370 /* 371 * Enable Interrupts 372 */ 373 jwrite32(jme, JME_IENS, INTR_ENABLE); 374 } 375 376 static inline void 377 jme_stop_irq(struct jme_adapter *jme) 378 { 379 /* 380 * Disable Interrupts 381 */ 382 jwrite32f(jme, JME_IENC, INTR_ENABLE); 383 } 384 385 static u32 386 jme_linkstat_from_phy(struct jme_adapter *jme) 387 { 388 u32 phylink, bmsr; 389 390 phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17); 391 bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR); 392 if (bmsr & BMSR_ANCOMP) 393 phylink |= PHY_LINK_AUTONEG_COMPLETE; 394 395 return phylink; 396 } 397 398 static inline void 399 jme_set_phyfifo_5level(struct jme_adapter *jme) 400 { 401 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004); 402 } 403 404 static inline void 405 jme_set_phyfifo_8level(struct jme_adapter *jme) 406 { 407 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000); 408 } 409 410 static int 411 jme_check_link(struct net_device *netdev, int testonly) 412 { 413 struct jme_adapter *jme = netdev_priv(netdev); 414 u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr; 415 char linkmsg[64]; 416 int rc = 0; 417 418 linkmsg[0] = '\0'; 419 420 if (jme->fpgaver) 421 phylink = jme_linkstat_from_phy(jme); 422 else 423 phylink = jread32(jme, JME_PHY_LINK); 424 425 if (phylink & PHY_LINK_UP) { 426 if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) { 427 /* 428 * If we did not enable AN 429 * Speed/Duplex Info should be obtained from SMI 430 */ 431 phylink = PHY_LINK_UP; 432 433 bmcr = jme_mdio_read(jme->dev, 434 jme->mii_if.phy_id, 435 MII_BMCR); 436 437 phylink |= ((bmcr & BMCR_SPEED1000) && 438 (bmcr & BMCR_SPEED100) == 0) ? 439 PHY_LINK_SPEED_1000M : 440 (bmcr & BMCR_SPEED100) ? 441 PHY_LINK_SPEED_100M : 442 PHY_LINK_SPEED_10M; 443 444 phylink |= (bmcr & BMCR_FULLDPLX) ? 445 PHY_LINK_DUPLEX : 0; 446 447 strcat(linkmsg, "Forced: "); 448 } else { 449 /* 450 * Keep polling for speed/duplex resolve complete 451 */ 452 while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) && 453 --cnt) { 454 455 udelay(1); 456 457 if (jme->fpgaver) 458 phylink = jme_linkstat_from_phy(jme); 459 else 460 phylink = jread32(jme, JME_PHY_LINK); 461 } 462 if (!cnt) 463 pr_err("Waiting speed resolve timeout\n"); 464 465 strcat(linkmsg, "ANed: "); 466 } 467 468 if (jme->phylink == phylink) { 469 rc = 1; 470 goto out; 471 } 472 if (testonly) 473 goto out; 474 475 jme->phylink = phylink; 476 477 /* 478 * The speed/duplex setting of jme->reg_ghc already cleared 479 * by jme_reset_mac_processor() 480 */ 481 switch (phylink & PHY_LINK_SPEED_MASK) { 482 case PHY_LINK_SPEED_10M: 483 jme->reg_ghc |= GHC_SPEED_10M; 484 strcat(linkmsg, "10 Mbps, "); 485 break; 486 case PHY_LINK_SPEED_100M: 487 jme->reg_ghc |= GHC_SPEED_100M; 488 strcat(linkmsg, "100 Mbps, "); 489 break; 490 case PHY_LINK_SPEED_1000M: 491 jme->reg_ghc |= GHC_SPEED_1000M; 492 strcat(linkmsg, "1000 Mbps, "); 493 break; 494 default: 495 break; 496 } 497 498 if (phylink & PHY_LINK_DUPLEX) { 499 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT); 500 jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX); 501 jme->reg_ghc |= GHC_DPX; 502 } else { 503 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT | 504 TXMCS_BACKOFF | 505 TXMCS_CARRIERSENSE | 506 TXMCS_COLLISION); 507 jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX); 508 } 509 510 jwrite32(jme, JME_GHC, jme->reg_ghc); 511 512 if (is_buggy250(jme->pdev->device, jme->chiprev)) { 513 jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | 514 GPREG1_RSSPATCH); 515 if (!(phylink & PHY_LINK_DUPLEX)) 516 jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH; 517 switch (phylink & PHY_LINK_SPEED_MASK) { 518 case PHY_LINK_SPEED_10M: 519 jme_set_phyfifo_8level(jme); 520 jme->reg_gpreg1 |= GPREG1_RSSPATCH; 521 break; 522 case PHY_LINK_SPEED_100M: 523 jme_set_phyfifo_5level(jme); 524 jme->reg_gpreg1 |= GPREG1_RSSPATCH; 525 break; 526 case PHY_LINK_SPEED_1000M: 527 jme_set_phyfifo_8level(jme); 528 break; 529 default: 530 break; 531 } 532 } 533 jwrite32(jme, JME_GPREG1, jme->reg_gpreg1); 534 535 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ? 536 "Full-Duplex, " : 537 "Half-Duplex, "); 538 strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ? 539 "MDI-X" : 540 "MDI"); 541 netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg); 542 netif_carrier_on(netdev); 543 } else { 544 if (testonly) 545 goto out; 546 547 netif_info(jme, link, jme->dev, "Link is down\n"); 548 jme->phylink = 0; 549 netif_carrier_off(netdev); 550 } 551 552 out: 553 return rc; 554 } 555 556 static int 557 jme_setup_tx_resources(struct jme_adapter *jme) 558 { 559 struct jme_ring *txring = &(jme->txring[0]); 560 561 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev), 562 TX_RING_ALLOC_SIZE(jme->tx_ring_size), 563 &(txring->dmaalloc), 564 GFP_ATOMIC); 565 566 if (!txring->alloc) 567 goto err_set_null; 568 569 /* 570 * 16 Bytes align 571 */ 572 txring->desc = (void *)ALIGN((unsigned long)(txring->alloc), 573 RING_DESC_ALIGN); 574 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN); 575 txring->next_to_use = 0; 576 atomic_set(&txring->next_to_clean, 0); 577 atomic_set(&txring->nr_free, jme->tx_ring_size); 578 579 txring->bufinf = kcalloc(jme->tx_ring_size, 580 sizeof(struct jme_buffer_info), 581 GFP_ATOMIC); 582 if (unlikely(!(txring->bufinf))) 583 goto err_free_txring; 584 585 return 0; 586 587 err_free_txring: 588 dma_free_coherent(&(jme->pdev->dev), 589 TX_RING_ALLOC_SIZE(jme->tx_ring_size), 590 txring->alloc, 591 txring->dmaalloc); 592 593 err_set_null: 594 txring->desc = NULL; 595 txring->dmaalloc = 0; 596 txring->dma = 0; 597 txring->bufinf = NULL; 598 599 return -ENOMEM; 600 } 601 602 static void 603 jme_free_tx_resources(struct jme_adapter *jme) 604 { 605 int i; 606 struct jme_ring *txring = &(jme->txring[0]); 607 struct jme_buffer_info *txbi; 608 609 if (txring->alloc) { 610 if (txring->bufinf) { 611 for (i = 0 ; i < jme->tx_ring_size ; ++i) { 612 txbi = txring->bufinf + i; 613 if (txbi->skb) { 614 dev_kfree_skb(txbi->skb); 615 txbi->skb = NULL; 616 } 617 txbi->mapping = 0; 618 txbi->len = 0; 619 txbi->nr_desc = 0; 620 txbi->start_xmit = 0; 621 } 622 kfree(txring->bufinf); 623 } 624 625 dma_free_coherent(&(jme->pdev->dev), 626 TX_RING_ALLOC_SIZE(jme->tx_ring_size), 627 txring->alloc, 628 txring->dmaalloc); 629 630 txring->alloc = NULL; 631 txring->desc = NULL; 632 txring->dmaalloc = 0; 633 txring->dma = 0; 634 txring->bufinf = NULL; 635 } 636 txring->next_to_use = 0; 637 atomic_set(&txring->next_to_clean, 0); 638 atomic_set(&txring->nr_free, 0); 639 } 640 641 static inline void 642 jme_enable_tx_engine(struct jme_adapter *jme) 643 { 644 /* 645 * Select Queue 0 646 */ 647 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0); 648 wmb(); 649 650 /* 651 * Setup TX Queue 0 DMA Bass Address 652 */ 653 jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); 654 jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32); 655 jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); 656 657 /* 658 * Setup TX Descptor Count 659 */ 660 jwrite32(jme, JME_TXQDC, jme->tx_ring_size); 661 662 /* 663 * Enable TX Engine 664 */ 665 wmb(); 666 jwrite32f(jme, JME_TXCS, jme->reg_txcs | 667 TXCS_SELECT_QUEUE0 | 668 TXCS_ENABLE); 669 670 /* 671 * Start clock for TX MAC Processor 672 */ 673 jme_mac_txclk_on(jme); 674 } 675 676 static inline void 677 jme_disable_tx_engine(struct jme_adapter *jme) 678 { 679 int i; 680 u32 val; 681 682 /* 683 * Disable TX Engine 684 */ 685 jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0); 686 wmb(); 687 688 val = jread32(jme, JME_TXCS); 689 for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) { 690 mdelay(1); 691 val = jread32(jme, JME_TXCS); 692 rmb(); 693 } 694 695 if (!i) 696 pr_err("Disable TX engine timeout\n"); 697 698 /* 699 * Stop clock for TX MAC Processor 700 */ 701 jme_mac_txclk_off(jme); 702 } 703 704 static void 705 jme_set_clean_rxdesc(struct jme_adapter *jme, int i) 706 { 707 struct jme_ring *rxring = &(jme->rxring[0]); 708 register struct rxdesc *rxdesc = rxring->desc; 709 struct jme_buffer_info *rxbi = rxring->bufinf; 710 rxdesc += i; 711 rxbi += i; 712 713 rxdesc->dw[0] = 0; 714 rxdesc->dw[1] = 0; 715 rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32); 716 rxdesc->desc1.bufaddrl = cpu_to_le32( 717 (__u64)rxbi->mapping & 0xFFFFFFFFUL); 718 rxdesc->desc1.datalen = cpu_to_le16(rxbi->len); 719 if (jme->dev->features & NETIF_F_HIGHDMA) 720 rxdesc->desc1.flags = RXFLAG_64BIT; 721 wmb(); 722 rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT; 723 } 724 725 static int 726 jme_make_new_rx_buf(struct jme_adapter *jme, int i) 727 { 728 struct jme_ring *rxring = &(jme->rxring[0]); 729 struct jme_buffer_info *rxbi = rxring->bufinf + i; 730 struct sk_buff *skb; 731 dma_addr_t mapping; 732 733 skb = netdev_alloc_skb(jme->dev, 734 jme->dev->mtu + RX_EXTRA_LEN); 735 if (unlikely(!skb)) 736 return -ENOMEM; 737 738 mapping = dma_map_page(&jme->pdev->dev, virt_to_page(skb->data), 739 offset_in_page(skb->data), skb_tailroom(skb), 740 DMA_FROM_DEVICE); 741 if (unlikely(dma_mapping_error(&jme->pdev->dev, mapping))) { 742 dev_kfree_skb(skb); 743 return -ENOMEM; 744 } 745 746 if (likely(rxbi->mapping)) 747 dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len, 748 DMA_FROM_DEVICE); 749 750 rxbi->skb = skb; 751 rxbi->len = skb_tailroom(skb); 752 rxbi->mapping = mapping; 753 return 0; 754 } 755 756 static void 757 jme_free_rx_buf(struct jme_adapter *jme, int i) 758 { 759 struct jme_ring *rxring = &(jme->rxring[0]); 760 struct jme_buffer_info *rxbi = rxring->bufinf; 761 rxbi += i; 762 763 if (rxbi->skb) { 764 dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len, 765 DMA_FROM_DEVICE); 766 dev_kfree_skb(rxbi->skb); 767 rxbi->skb = NULL; 768 rxbi->mapping = 0; 769 rxbi->len = 0; 770 } 771 } 772 773 static void 774 jme_free_rx_resources(struct jme_adapter *jme) 775 { 776 int i; 777 struct jme_ring *rxring = &(jme->rxring[0]); 778 779 if (rxring->alloc) { 780 if (rxring->bufinf) { 781 for (i = 0 ; i < jme->rx_ring_size ; ++i) 782 jme_free_rx_buf(jme, i); 783 kfree(rxring->bufinf); 784 } 785 786 dma_free_coherent(&(jme->pdev->dev), 787 RX_RING_ALLOC_SIZE(jme->rx_ring_size), 788 rxring->alloc, 789 rxring->dmaalloc); 790 rxring->alloc = NULL; 791 rxring->desc = NULL; 792 rxring->dmaalloc = 0; 793 rxring->dma = 0; 794 rxring->bufinf = NULL; 795 } 796 rxring->next_to_use = 0; 797 atomic_set(&rxring->next_to_clean, 0); 798 } 799 800 static int 801 jme_setup_rx_resources(struct jme_adapter *jme) 802 { 803 int i; 804 struct jme_ring *rxring = &(jme->rxring[0]); 805 806 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev), 807 RX_RING_ALLOC_SIZE(jme->rx_ring_size), 808 &(rxring->dmaalloc), 809 GFP_ATOMIC); 810 if (!rxring->alloc) 811 goto err_set_null; 812 813 /* 814 * 16 Bytes align 815 */ 816 rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc), 817 RING_DESC_ALIGN); 818 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN); 819 rxring->next_to_use = 0; 820 atomic_set(&rxring->next_to_clean, 0); 821 822 rxring->bufinf = kcalloc(jme->rx_ring_size, 823 sizeof(struct jme_buffer_info), 824 GFP_ATOMIC); 825 if (unlikely(!(rxring->bufinf))) 826 goto err_free_rxring; 827 828 /* 829 * Initiallize Receive Descriptors 830 */ 831 for (i = 0 ; i < jme->rx_ring_size ; ++i) { 832 if (unlikely(jme_make_new_rx_buf(jme, i))) { 833 jme_free_rx_resources(jme); 834 return -ENOMEM; 835 } 836 837 jme_set_clean_rxdesc(jme, i); 838 } 839 840 return 0; 841 842 err_free_rxring: 843 dma_free_coherent(&(jme->pdev->dev), 844 RX_RING_ALLOC_SIZE(jme->rx_ring_size), 845 rxring->alloc, 846 rxring->dmaalloc); 847 err_set_null: 848 rxring->desc = NULL; 849 rxring->dmaalloc = 0; 850 rxring->dma = 0; 851 rxring->bufinf = NULL; 852 853 return -ENOMEM; 854 } 855 856 static inline void 857 jme_enable_rx_engine(struct jme_adapter *jme) 858 { 859 /* 860 * Select Queue 0 861 */ 862 jwrite32(jme, JME_RXCS, jme->reg_rxcs | 863 RXCS_QUEUESEL_Q0); 864 wmb(); 865 866 /* 867 * Setup RX DMA Bass Address 868 */ 869 jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); 870 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32); 871 jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); 872 873 /* 874 * Setup RX Descriptor Count 875 */ 876 jwrite32(jme, JME_RXQDC, jme->rx_ring_size); 877 878 /* 879 * Setup Unicast Filter 880 */ 881 jme_set_unicastaddr(jme->dev); 882 jme_set_multi(jme->dev); 883 884 /* 885 * Enable RX Engine 886 */ 887 wmb(); 888 jwrite32f(jme, JME_RXCS, jme->reg_rxcs | 889 RXCS_QUEUESEL_Q0 | 890 RXCS_ENABLE | 891 RXCS_QST); 892 893 /* 894 * Start clock for RX MAC Processor 895 */ 896 jme_mac_rxclk_on(jme); 897 } 898 899 static inline void 900 jme_restart_rx_engine(struct jme_adapter *jme) 901 { 902 /* 903 * Start RX Engine 904 */ 905 jwrite32(jme, JME_RXCS, jme->reg_rxcs | 906 RXCS_QUEUESEL_Q0 | 907 RXCS_ENABLE | 908 RXCS_QST); 909 } 910 911 static inline void 912 jme_disable_rx_engine(struct jme_adapter *jme) 913 { 914 int i; 915 u32 val; 916 917 /* 918 * Disable RX Engine 919 */ 920 jwrite32(jme, JME_RXCS, jme->reg_rxcs); 921 wmb(); 922 923 val = jread32(jme, JME_RXCS); 924 for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) { 925 mdelay(1); 926 val = jread32(jme, JME_RXCS); 927 rmb(); 928 } 929 930 if (!i) 931 pr_err("Disable RX engine timeout\n"); 932 933 /* 934 * Stop clock for RX MAC Processor 935 */ 936 jme_mac_rxclk_off(jme); 937 } 938 939 static u16 940 jme_udpsum(struct sk_buff *skb) 941 { 942 u16 csum = 0xFFFFu; 943 944 if (skb->len < (ETH_HLEN + sizeof(struct iphdr))) 945 return csum; 946 if (skb->protocol != htons(ETH_P_IP)) 947 return csum; 948 skb_set_network_header(skb, ETH_HLEN); 949 950 if (ip_hdr(skb)->protocol != IPPROTO_UDP || 951 skb->len < (ETH_HLEN + ip_hdrlen(skb) + sizeof(struct udphdr))) { 952 skb_reset_network_header(skb); 953 return csum; 954 } 955 skb_set_transport_header(skb, ETH_HLEN + ip_hdrlen(skb)); 956 csum = udp_hdr(skb)->check; 957 skb_reset_transport_header(skb); 958 skb_reset_network_header(skb); 959 960 return csum; 961 } 962 963 static int 964 jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb) 965 { 966 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) 967 return false; 968 969 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS)) 970 == RXWBFLAG_TCPON)) { 971 if (flags & RXWBFLAG_IPV4) 972 netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n"); 973 return false; 974 } 975 976 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) 977 == RXWBFLAG_UDPON) && jme_udpsum(skb)) { 978 if (flags & RXWBFLAG_IPV4) 979 netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n"); 980 return false; 981 } 982 983 if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS)) 984 == RXWBFLAG_IPV4)) { 985 netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n"); 986 return false; 987 } 988 989 return true; 990 } 991 992 static void 993 jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) 994 { 995 struct jme_ring *rxring = &(jme->rxring[0]); 996 struct rxdesc *rxdesc = rxring->desc; 997 struct jme_buffer_info *rxbi = rxring->bufinf; 998 struct sk_buff *skb; 999 int framesize; 1000 1001 rxdesc += idx; 1002 rxbi += idx; 1003 1004 skb = rxbi->skb; 1005 dma_sync_single_for_cpu(&jme->pdev->dev, rxbi->mapping, rxbi->len, 1006 DMA_FROM_DEVICE); 1007 1008 if (unlikely(jme_make_new_rx_buf(jme, idx))) { 1009 dma_sync_single_for_device(&jme->pdev->dev, rxbi->mapping, 1010 rxbi->len, DMA_FROM_DEVICE); 1011 1012 ++(NET_STAT(jme).rx_dropped); 1013 } else { 1014 framesize = le16_to_cpu(rxdesc->descwb.framesize) 1015 - RX_PREPAD_SIZE; 1016 1017 skb_reserve(skb, RX_PREPAD_SIZE); 1018 skb_put(skb, framesize); 1019 skb->protocol = eth_type_trans(skb, jme->dev); 1020 1021 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb)) 1022 skb->ip_summed = CHECKSUM_UNNECESSARY; 1023 else 1024 skb_checksum_none_assert(skb); 1025 1026 if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { 1027 u16 vid = le16_to_cpu(rxdesc->descwb.vlan); 1028 1029 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 1030 NET_STAT(jme).rx_bytes += 4; 1031 } 1032 jme->jme_rx(skb); 1033 1034 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) == 1035 cpu_to_le16(RXWBFLAG_DEST_MUL)) 1036 ++(NET_STAT(jme).multicast); 1037 1038 NET_STAT(jme).rx_bytes += framesize; 1039 ++(NET_STAT(jme).rx_packets); 1040 } 1041 1042 jme_set_clean_rxdesc(jme, idx); 1043 1044 } 1045 1046 static int 1047 jme_process_receive(struct jme_adapter *jme, int limit) 1048 { 1049 struct jme_ring *rxring = &(jme->rxring[0]); 1050 struct rxdesc *rxdesc; 1051 int i, j, ccnt, desccnt, mask = jme->rx_ring_mask; 1052 1053 if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning))) 1054 goto out_inc; 1055 1056 if (unlikely(atomic_read(&jme->link_changing) != 1)) 1057 goto out_inc; 1058 1059 if (unlikely(!netif_carrier_ok(jme->dev))) 1060 goto out_inc; 1061 1062 i = atomic_read(&rxring->next_to_clean); 1063 while (limit > 0) { 1064 rxdesc = rxring->desc; 1065 rxdesc += i; 1066 1067 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) || 1068 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) 1069 goto out; 1070 --limit; 1071 1072 rmb(); 1073 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT; 1074 1075 if (unlikely(desccnt > 1 || 1076 rxdesc->descwb.errstat & RXWBERR_ALLERR)) { 1077 1078 if (rxdesc->descwb.errstat & RXWBERR_CRCERR) 1079 ++(NET_STAT(jme).rx_crc_errors); 1080 else if (rxdesc->descwb.errstat & RXWBERR_OVERUN) 1081 ++(NET_STAT(jme).rx_fifo_errors); 1082 else 1083 ++(NET_STAT(jme).rx_errors); 1084 1085 if (desccnt > 1) 1086 limit -= desccnt - 1; 1087 1088 for (j = i, ccnt = desccnt ; ccnt-- ; ) { 1089 jme_set_clean_rxdesc(jme, j); 1090 j = (j + 1) & (mask); 1091 } 1092 1093 } else { 1094 jme_alloc_and_feed_skb(jme, i); 1095 } 1096 1097 i = (i + desccnt) & (mask); 1098 } 1099 1100 out: 1101 atomic_set(&rxring->next_to_clean, i); 1102 1103 out_inc: 1104 atomic_inc(&jme->rx_cleaning); 1105 1106 return limit > 0 ? limit : 0; 1107 1108 } 1109 1110 static void 1111 jme_attempt_pcc(struct dynpcc_info *dpi, int atmp) 1112 { 1113 if (likely(atmp == dpi->cur)) { 1114 dpi->cnt = 0; 1115 return; 1116 } 1117 1118 if (dpi->attempt == atmp) { 1119 ++(dpi->cnt); 1120 } else { 1121 dpi->attempt = atmp; 1122 dpi->cnt = 0; 1123 } 1124 1125 } 1126 1127 static void 1128 jme_dynamic_pcc(struct jme_adapter *jme) 1129 { 1130 register struct dynpcc_info *dpi = &(jme->dpi); 1131 1132 if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD) 1133 jme_attempt_pcc(dpi, PCC_P3); 1134 else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD || 1135 dpi->intr_cnt > PCC_INTR_THRESHOLD) 1136 jme_attempt_pcc(dpi, PCC_P2); 1137 else 1138 jme_attempt_pcc(dpi, PCC_P1); 1139 1140 if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) { 1141 if (dpi->attempt < dpi->cur) 1142 tasklet_schedule(&jme->rxclean_task); 1143 jme_set_rx_pcc(jme, dpi->attempt); 1144 dpi->cur = dpi->attempt; 1145 dpi->cnt = 0; 1146 } 1147 } 1148 1149 static void 1150 jme_start_pcc_timer(struct jme_adapter *jme) 1151 { 1152 struct dynpcc_info *dpi = &(jme->dpi); 1153 dpi->last_bytes = NET_STAT(jme).rx_bytes; 1154 dpi->last_pkts = NET_STAT(jme).rx_packets; 1155 dpi->intr_cnt = 0; 1156 jwrite32(jme, JME_TMCSR, 1157 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT)); 1158 } 1159 1160 static inline void 1161 jme_stop_pcc_timer(struct jme_adapter *jme) 1162 { 1163 jwrite32(jme, JME_TMCSR, 0); 1164 } 1165 1166 static void 1167 jme_shutdown_nic(struct jme_adapter *jme) 1168 { 1169 u32 phylink; 1170 1171 phylink = jme_linkstat_from_phy(jme); 1172 1173 if (!(phylink & PHY_LINK_UP)) { 1174 /* 1175 * Disable all interrupt before issue timer 1176 */ 1177 jme_stop_irq(jme); 1178 jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE); 1179 } 1180 } 1181 1182 static void 1183 jme_pcc_tasklet(struct tasklet_struct *t) 1184 { 1185 struct jme_adapter *jme = from_tasklet(jme, t, pcc_task); 1186 struct net_device *netdev = jme->dev; 1187 1188 if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) { 1189 jme_shutdown_nic(jme); 1190 return; 1191 } 1192 1193 if (unlikely(!netif_carrier_ok(netdev) || 1194 (atomic_read(&jme->link_changing) != 1) 1195 )) { 1196 jme_stop_pcc_timer(jme); 1197 return; 1198 } 1199 1200 if (!(test_bit(JME_FLAG_POLL, &jme->flags))) 1201 jme_dynamic_pcc(jme); 1202 1203 jme_start_pcc_timer(jme); 1204 } 1205 1206 static inline void 1207 jme_polling_mode(struct jme_adapter *jme) 1208 { 1209 jme_set_rx_pcc(jme, PCC_OFF); 1210 } 1211 1212 static inline void 1213 jme_interrupt_mode(struct jme_adapter *jme) 1214 { 1215 jme_set_rx_pcc(jme, PCC_P1); 1216 } 1217 1218 static inline int 1219 jme_pseudo_hotplug_enabled(struct jme_adapter *jme) 1220 { 1221 u32 apmc; 1222 apmc = jread32(jme, JME_APMC); 1223 return apmc & JME_APMC_PSEUDO_HP_EN; 1224 } 1225 1226 static void 1227 jme_start_shutdown_timer(struct jme_adapter *jme) 1228 { 1229 u32 apmc; 1230 1231 apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN; 1232 apmc &= ~JME_APMC_EPIEN_CTRL; 1233 if (!no_extplug) { 1234 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN); 1235 wmb(); 1236 } 1237 jwrite32f(jme, JME_APMC, apmc); 1238 1239 jwrite32f(jme, JME_TIMER2, 0); 1240 set_bit(JME_FLAG_SHUTDOWN, &jme->flags); 1241 jwrite32(jme, JME_TMCSR, 1242 TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT)); 1243 } 1244 1245 static void 1246 jme_stop_shutdown_timer(struct jme_adapter *jme) 1247 { 1248 u32 apmc; 1249 1250 jwrite32f(jme, JME_TMCSR, 0); 1251 jwrite32f(jme, JME_TIMER2, 0); 1252 clear_bit(JME_FLAG_SHUTDOWN, &jme->flags); 1253 1254 apmc = jread32(jme, JME_APMC); 1255 apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL); 1256 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS); 1257 wmb(); 1258 jwrite32f(jme, JME_APMC, apmc); 1259 } 1260 1261 static void jme_link_change_work(struct work_struct *work) 1262 { 1263 struct jme_adapter *jme = container_of(work, struct jme_adapter, linkch_task); 1264 struct net_device *netdev = jme->dev; 1265 int rc; 1266 1267 while (!atomic_dec_and_test(&jme->link_changing)) { 1268 atomic_inc(&jme->link_changing); 1269 netif_info(jme, intr, jme->dev, "Get link change lock failed\n"); 1270 while (atomic_read(&jme->link_changing) != 1) 1271 netif_info(jme, intr, jme->dev, "Waiting link change lock\n"); 1272 } 1273 1274 if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu) 1275 goto out; 1276 1277 jme->old_mtu = netdev->mtu; 1278 netif_stop_queue(netdev); 1279 if (jme_pseudo_hotplug_enabled(jme)) 1280 jme_stop_shutdown_timer(jme); 1281 1282 jme_stop_pcc_timer(jme); 1283 tasklet_disable(&jme->txclean_task); 1284 tasklet_disable(&jme->rxclean_task); 1285 tasklet_disable(&jme->rxempty_task); 1286 1287 if (netif_carrier_ok(netdev)) { 1288 jme_disable_rx_engine(jme); 1289 jme_disable_tx_engine(jme); 1290 jme_reset_mac_processor(jme); 1291 jme_free_rx_resources(jme); 1292 jme_free_tx_resources(jme); 1293 1294 if (test_bit(JME_FLAG_POLL, &jme->flags)) 1295 jme_polling_mode(jme); 1296 1297 netif_carrier_off(netdev); 1298 } 1299 1300 jme_check_link(netdev, 0); 1301 if (netif_carrier_ok(netdev)) { 1302 rc = jme_setup_rx_resources(jme); 1303 if (rc) { 1304 pr_err("Allocating resources for RX error, Device STOPPED!\n"); 1305 goto out_enable_tasklet; 1306 } 1307 1308 rc = jme_setup_tx_resources(jme); 1309 if (rc) { 1310 pr_err("Allocating resources for TX error, Device STOPPED!\n"); 1311 goto err_out_free_rx_resources; 1312 } 1313 1314 jme_enable_rx_engine(jme); 1315 jme_enable_tx_engine(jme); 1316 1317 netif_start_queue(netdev); 1318 1319 if (test_bit(JME_FLAG_POLL, &jme->flags)) 1320 jme_interrupt_mode(jme); 1321 1322 jme_start_pcc_timer(jme); 1323 } else if (jme_pseudo_hotplug_enabled(jme)) { 1324 jme_start_shutdown_timer(jme); 1325 } 1326 1327 goto out_enable_tasklet; 1328 1329 err_out_free_rx_resources: 1330 jme_free_rx_resources(jme); 1331 out_enable_tasklet: 1332 tasklet_enable(&jme->txclean_task); 1333 tasklet_enable(&jme->rxclean_task); 1334 tasklet_enable(&jme->rxempty_task); 1335 out: 1336 atomic_inc(&jme->link_changing); 1337 } 1338 1339 static void 1340 jme_rx_clean_tasklet(struct tasklet_struct *t) 1341 { 1342 struct jme_adapter *jme = from_tasklet(jme, t, rxclean_task); 1343 struct dynpcc_info *dpi = &(jme->dpi); 1344 1345 jme_process_receive(jme, jme->rx_ring_size); 1346 ++(dpi->intr_cnt); 1347 1348 } 1349 1350 static int 1351 jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget)) 1352 { 1353 struct jme_adapter *jme = jme_napi_priv(holder); 1354 int rest; 1355 1356 rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget)); 1357 1358 while (atomic_read(&jme->rx_empty) > 0) { 1359 atomic_dec(&jme->rx_empty); 1360 ++(NET_STAT(jme).rx_dropped); 1361 jme_restart_rx_engine(jme); 1362 } 1363 atomic_inc(&jme->rx_empty); 1364 1365 if (rest) { 1366 JME_RX_COMPLETE(netdev, holder); 1367 jme_interrupt_mode(jme); 1368 } 1369 1370 JME_NAPI_WEIGHT_SET(budget, rest); 1371 return JME_NAPI_WEIGHT_VAL(budget) - rest; 1372 } 1373 1374 static void 1375 jme_rx_empty_tasklet(struct tasklet_struct *t) 1376 { 1377 struct jme_adapter *jme = from_tasklet(jme, t, rxempty_task); 1378 1379 if (unlikely(atomic_read(&jme->link_changing) != 1)) 1380 return; 1381 1382 if (unlikely(!netif_carrier_ok(jme->dev))) 1383 return; 1384 1385 netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n"); 1386 1387 jme_rx_clean_tasklet(&jme->rxclean_task); 1388 1389 while (atomic_read(&jme->rx_empty) > 0) { 1390 atomic_dec(&jme->rx_empty); 1391 ++(NET_STAT(jme).rx_dropped); 1392 jme_restart_rx_engine(jme); 1393 } 1394 atomic_inc(&jme->rx_empty); 1395 } 1396 1397 static void 1398 jme_wake_queue_if_stopped(struct jme_adapter *jme) 1399 { 1400 struct jme_ring *txring = &(jme->txring[0]); 1401 1402 smp_wmb(); 1403 if (unlikely(netif_queue_stopped(jme->dev) && 1404 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) { 1405 netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n"); 1406 netif_wake_queue(jme->dev); 1407 } 1408 1409 } 1410 1411 static void jme_tx_clean_tasklet(struct tasklet_struct *t) 1412 { 1413 struct jme_adapter *jme = from_tasklet(jme, t, txclean_task); 1414 struct jme_ring *txring = &(jme->txring[0]); 1415 struct txdesc *txdesc = txring->desc; 1416 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi; 1417 int i, j, cnt = 0, max, err, mask; 1418 1419 tx_dbg(jme, "Into txclean\n"); 1420 1421 if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning))) 1422 goto out; 1423 1424 if (unlikely(atomic_read(&jme->link_changing) != 1)) 1425 goto out; 1426 1427 if (unlikely(!netif_carrier_ok(jme->dev))) 1428 goto out; 1429 1430 max = jme->tx_ring_size - atomic_read(&txring->nr_free); 1431 mask = jme->tx_ring_mask; 1432 1433 for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) { 1434 1435 ctxbi = txbi + i; 1436 1437 if (likely(ctxbi->skb && 1438 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) { 1439 1440 tx_dbg(jme, "txclean: %d+%d@%lu\n", 1441 i, ctxbi->nr_desc, jiffies); 1442 1443 err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR; 1444 1445 for (j = 1 ; j < ctxbi->nr_desc ; ++j) { 1446 ttxbi = txbi + ((i + j) & (mask)); 1447 txdesc[(i + j) & (mask)].dw[0] = 0; 1448 1449 dma_unmap_page(&jme->pdev->dev, 1450 ttxbi->mapping, ttxbi->len, 1451 DMA_TO_DEVICE); 1452 1453 ttxbi->mapping = 0; 1454 ttxbi->len = 0; 1455 } 1456 1457 dev_kfree_skb(ctxbi->skb); 1458 1459 cnt += ctxbi->nr_desc; 1460 1461 if (unlikely(err)) { 1462 ++(NET_STAT(jme).tx_carrier_errors); 1463 } else { 1464 ++(NET_STAT(jme).tx_packets); 1465 NET_STAT(jme).tx_bytes += ctxbi->len; 1466 } 1467 1468 ctxbi->skb = NULL; 1469 ctxbi->len = 0; 1470 ctxbi->start_xmit = 0; 1471 1472 } else { 1473 break; 1474 } 1475 1476 i = (i + ctxbi->nr_desc) & mask; 1477 1478 ctxbi->nr_desc = 0; 1479 } 1480 1481 tx_dbg(jme, "txclean: done %d@%lu\n", i, jiffies); 1482 atomic_set(&txring->next_to_clean, i); 1483 atomic_add(cnt, &txring->nr_free); 1484 1485 jme_wake_queue_if_stopped(jme); 1486 1487 out: 1488 atomic_inc(&jme->tx_cleaning); 1489 } 1490 1491 static void 1492 jme_intr_msi(struct jme_adapter *jme, u32 intrstat) 1493 { 1494 /* 1495 * Disable interrupt 1496 */ 1497 jwrite32f(jme, JME_IENC, INTR_ENABLE); 1498 1499 if (intrstat & (INTR_LINKCH | INTR_SWINTR)) { 1500 /* 1501 * Link change event is critical 1502 * all other events are ignored 1503 */ 1504 jwrite32(jme, JME_IEVE, intrstat); 1505 schedule_work(&jme->linkch_task); 1506 goto out_reenable; 1507 } 1508 1509 if (intrstat & INTR_TMINTR) { 1510 jwrite32(jme, JME_IEVE, INTR_TMINTR); 1511 tasklet_schedule(&jme->pcc_task); 1512 } 1513 1514 if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) { 1515 jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0); 1516 tasklet_schedule(&jme->txclean_task); 1517 } 1518 1519 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { 1520 jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO | 1521 INTR_PCCRX0 | 1522 INTR_RX0EMP)) | 1523 INTR_RX0); 1524 } 1525 1526 if (test_bit(JME_FLAG_POLL, &jme->flags)) { 1527 if (intrstat & INTR_RX0EMP) 1528 atomic_inc(&jme->rx_empty); 1529 1530 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { 1531 if (likely(JME_RX_SCHEDULE_PREP(jme))) { 1532 jme_polling_mode(jme); 1533 JME_RX_SCHEDULE(jme); 1534 } 1535 } 1536 } else { 1537 if (intrstat & INTR_RX0EMP) { 1538 atomic_inc(&jme->rx_empty); 1539 tasklet_hi_schedule(&jme->rxempty_task); 1540 } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) { 1541 tasklet_hi_schedule(&jme->rxclean_task); 1542 } 1543 } 1544 1545 out_reenable: 1546 /* 1547 * Re-enable interrupt 1548 */ 1549 jwrite32f(jme, JME_IENS, INTR_ENABLE); 1550 } 1551 1552 static irqreturn_t 1553 jme_intr(int irq, void *dev_id) 1554 { 1555 struct net_device *netdev = dev_id; 1556 struct jme_adapter *jme = netdev_priv(netdev); 1557 u32 intrstat; 1558 1559 intrstat = jread32(jme, JME_IEVE); 1560 1561 /* 1562 * Check if it's really an interrupt for us 1563 */ 1564 if (unlikely((intrstat & INTR_ENABLE) == 0)) 1565 return IRQ_NONE; 1566 1567 /* 1568 * Check if the device still exist 1569 */ 1570 if (unlikely(intrstat == ~((typeof(intrstat))0))) 1571 return IRQ_NONE; 1572 1573 jme_intr_msi(jme, intrstat); 1574 1575 return IRQ_HANDLED; 1576 } 1577 1578 static irqreturn_t 1579 jme_msi(int irq, void *dev_id) 1580 { 1581 struct net_device *netdev = dev_id; 1582 struct jme_adapter *jme = netdev_priv(netdev); 1583 u32 intrstat; 1584 1585 intrstat = jread32(jme, JME_IEVE); 1586 1587 jme_intr_msi(jme, intrstat); 1588 1589 return IRQ_HANDLED; 1590 } 1591 1592 static void 1593 jme_reset_link(struct jme_adapter *jme) 1594 { 1595 jwrite32(jme, JME_TMCSR, TMCSR_SWIT); 1596 } 1597 1598 static void 1599 jme_restart_an(struct jme_adapter *jme) 1600 { 1601 u32 bmcr; 1602 1603 spin_lock_bh(&jme->phy_lock); 1604 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1605 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 1606 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); 1607 spin_unlock_bh(&jme->phy_lock); 1608 } 1609 1610 static int 1611 jme_request_irq(struct jme_adapter *jme) 1612 { 1613 int rc; 1614 struct net_device *netdev = jme->dev; 1615 irq_handler_t handler = jme_intr; 1616 int irq_flags = IRQF_SHARED; 1617 1618 if (!pci_enable_msi(jme->pdev)) { 1619 set_bit(JME_FLAG_MSI, &jme->flags); 1620 handler = jme_msi; 1621 irq_flags = 0; 1622 } 1623 1624 rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name, 1625 netdev); 1626 if (rc) { 1627 netdev_err(netdev, 1628 "Unable to request %s interrupt (return: %d)\n", 1629 test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx", 1630 rc); 1631 1632 if (test_bit(JME_FLAG_MSI, &jme->flags)) { 1633 pci_disable_msi(jme->pdev); 1634 clear_bit(JME_FLAG_MSI, &jme->flags); 1635 } 1636 } else { 1637 netdev->irq = jme->pdev->irq; 1638 } 1639 1640 return rc; 1641 } 1642 1643 static void 1644 jme_free_irq(struct jme_adapter *jme) 1645 { 1646 free_irq(jme->pdev->irq, jme->dev); 1647 if (test_bit(JME_FLAG_MSI, &jme->flags)) { 1648 pci_disable_msi(jme->pdev); 1649 clear_bit(JME_FLAG_MSI, &jme->flags); 1650 jme->dev->irq = jme->pdev->irq; 1651 } 1652 } 1653 1654 static inline void 1655 jme_new_phy_on(struct jme_adapter *jme) 1656 { 1657 u32 reg; 1658 1659 reg = jread32(jme, JME_PHY_PWR); 1660 reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW | 1661 PHY_PWR_DWN2 | PHY_PWR_CLKSEL); 1662 jwrite32(jme, JME_PHY_PWR, reg); 1663 1664 pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®); 1665 reg &= ~PE1_GPREG0_PBG; 1666 reg |= PE1_GPREG0_ENBG; 1667 pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg); 1668 } 1669 1670 static inline void 1671 jme_new_phy_off(struct jme_adapter *jme) 1672 { 1673 u32 reg; 1674 1675 reg = jread32(jme, JME_PHY_PWR); 1676 reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW | 1677 PHY_PWR_DWN2 | PHY_PWR_CLKSEL; 1678 jwrite32(jme, JME_PHY_PWR, reg); 1679 1680 pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®); 1681 reg &= ~PE1_GPREG0_PBG; 1682 reg |= PE1_GPREG0_PDD3COLD; 1683 pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg); 1684 } 1685 1686 static inline void 1687 jme_phy_on(struct jme_adapter *jme) 1688 { 1689 u32 bmcr; 1690 1691 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1692 bmcr &= ~BMCR_PDOWN; 1693 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); 1694 1695 if (new_phy_power_ctrl(jme->chip_main_rev)) 1696 jme_new_phy_on(jme); 1697 } 1698 1699 static inline void 1700 jme_phy_off(struct jme_adapter *jme) 1701 { 1702 u32 bmcr; 1703 1704 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1705 bmcr |= BMCR_PDOWN; 1706 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); 1707 1708 if (new_phy_power_ctrl(jme->chip_main_rev)) 1709 jme_new_phy_off(jme); 1710 } 1711 1712 static int 1713 jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg) 1714 { 1715 u32 phy_addr; 1716 1717 phy_addr = JM_PHY_SPEC_REG_READ | specreg; 1718 jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, 1719 phy_addr); 1720 return jme_mdio_read(jme->dev, jme->mii_if.phy_id, 1721 JM_PHY_SPEC_DATA_REG); 1722 } 1723 1724 static void 1725 jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data) 1726 { 1727 u32 phy_addr; 1728 1729 phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg; 1730 jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG, 1731 phy_data); 1732 jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, 1733 phy_addr); 1734 } 1735 1736 static int 1737 jme_phy_calibration(struct jme_adapter *jme) 1738 { 1739 u32 ctrl1000, phy_data; 1740 1741 jme_phy_off(jme); 1742 jme_phy_on(jme); 1743 /* Enabel PHY test mode 1 */ 1744 ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000); 1745 ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK; 1746 ctrl1000 |= PHY_GAD_TEST_MODE_1; 1747 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000); 1748 1749 phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG); 1750 phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0; 1751 phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH | 1752 JM_PHY_EXT_COMM_2_CALI_ENABLE; 1753 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data); 1754 msleep(20); 1755 phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG); 1756 phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE | 1757 JM_PHY_EXT_COMM_2_CALI_MODE_0 | 1758 JM_PHY_EXT_COMM_2_CALI_LATCH); 1759 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data); 1760 1761 /* Disable PHY test mode */ 1762 ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000); 1763 ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK; 1764 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000); 1765 return 0; 1766 } 1767 1768 static int 1769 jme_phy_setEA(struct jme_adapter *jme) 1770 { 1771 u32 phy_comm0 = 0, phy_comm1 = 0; 1772 u8 nic_ctrl; 1773 1774 pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl); 1775 if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE) 1776 return 0; 1777 1778 switch (jme->pdev->device) { 1779 case PCI_DEVICE_ID_JMICRON_JMC250: 1780 if (((jme->chip_main_rev == 5) && 1781 ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || 1782 (jme->chip_sub_rev == 3))) || 1783 (jme->chip_main_rev >= 6)) { 1784 phy_comm0 = 0x008A; 1785 phy_comm1 = 0x4109; 1786 } 1787 if ((jme->chip_main_rev == 3) && 1788 ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) 1789 phy_comm0 = 0xE088; 1790 break; 1791 case PCI_DEVICE_ID_JMICRON_JMC260: 1792 if (((jme->chip_main_rev == 5) && 1793 ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || 1794 (jme->chip_sub_rev == 3))) || 1795 (jme->chip_main_rev >= 6)) { 1796 phy_comm0 = 0x008A; 1797 phy_comm1 = 0x4109; 1798 } 1799 if ((jme->chip_main_rev == 3) && 1800 ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) 1801 phy_comm0 = 0xE088; 1802 if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0)) 1803 phy_comm0 = 0x608A; 1804 if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2)) 1805 phy_comm0 = 0x408A; 1806 break; 1807 default: 1808 return -ENODEV; 1809 } 1810 if (phy_comm0) 1811 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0); 1812 if (phy_comm1) 1813 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1); 1814 1815 return 0; 1816 } 1817 1818 static int 1819 jme_open(struct net_device *netdev) 1820 { 1821 struct jme_adapter *jme = netdev_priv(netdev); 1822 int rc; 1823 1824 jme_clear_pm_disable_wol(jme); 1825 JME_NAPI_ENABLE(jme); 1826 1827 tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet); 1828 tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet); 1829 tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet); 1830 1831 rc = jme_request_irq(jme); 1832 if (rc) 1833 goto err_out; 1834 1835 jme_start_irq(jme); 1836 1837 jme_phy_on(jme); 1838 if (test_bit(JME_FLAG_SSET, &jme->flags)) 1839 jme_set_link_ksettings(netdev, &jme->old_cmd); 1840 else 1841 jme_reset_phy_processor(jme); 1842 jme_phy_calibration(jme); 1843 jme_phy_setEA(jme); 1844 jme_reset_link(jme); 1845 1846 return 0; 1847 1848 err_out: 1849 netif_stop_queue(netdev); 1850 netif_carrier_off(netdev); 1851 return rc; 1852 } 1853 1854 static void 1855 jme_set_100m_half(struct jme_adapter *jme) 1856 { 1857 u32 bmcr, tmp; 1858 1859 jme_phy_on(jme); 1860 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1861 tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | 1862 BMCR_SPEED1000 | BMCR_FULLDPLX); 1863 tmp |= BMCR_SPEED100; 1864 1865 if (bmcr != tmp) 1866 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp); 1867 1868 if (jme->fpgaver) 1869 jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL); 1870 else 1871 jwrite32(jme, JME_GHC, GHC_SPEED_100M); 1872 } 1873 1874 #define JME_WAIT_LINK_TIME 2000 /* 2000ms */ 1875 static void 1876 jme_wait_link(struct jme_adapter *jme) 1877 { 1878 u32 phylink, to = JME_WAIT_LINK_TIME; 1879 1880 msleep(1000); 1881 phylink = jme_linkstat_from_phy(jme); 1882 while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) { 1883 usleep_range(10000, 11000); 1884 phylink = jme_linkstat_from_phy(jme); 1885 } 1886 } 1887 1888 static void 1889 jme_powersave_phy(struct jme_adapter *jme) 1890 { 1891 if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) { 1892 jme_set_100m_half(jme); 1893 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) 1894 jme_wait_link(jme); 1895 jme_clear_pm_enable_wol(jme); 1896 } else { 1897 jme_phy_off(jme); 1898 } 1899 } 1900 1901 static int 1902 jme_close(struct net_device *netdev) 1903 { 1904 struct jme_adapter *jme = netdev_priv(netdev); 1905 1906 netif_stop_queue(netdev); 1907 netif_carrier_off(netdev); 1908 1909 jme_stop_irq(jme); 1910 jme_free_irq(jme); 1911 1912 JME_NAPI_DISABLE(jme); 1913 1914 cancel_work_sync(&jme->linkch_task); 1915 tasklet_kill(&jme->txclean_task); 1916 tasklet_kill(&jme->rxclean_task); 1917 tasklet_kill(&jme->rxempty_task); 1918 1919 jme_disable_rx_engine(jme); 1920 jme_disable_tx_engine(jme); 1921 jme_reset_mac_processor(jme); 1922 jme_free_rx_resources(jme); 1923 jme_free_tx_resources(jme); 1924 jme->phylink = 0; 1925 jme_phy_off(jme); 1926 1927 return 0; 1928 } 1929 1930 static int 1931 jme_alloc_txdesc(struct jme_adapter *jme, 1932 struct sk_buff *skb) 1933 { 1934 struct jme_ring *txring = &(jme->txring[0]); 1935 int idx, nr_alloc, mask = jme->tx_ring_mask; 1936 1937 idx = txring->next_to_use; 1938 nr_alloc = skb_shinfo(skb)->nr_frags + 2; 1939 1940 if (unlikely(atomic_read(&txring->nr_free) < nr_alloc)) 1941 return -1; 1942 1943 atomic_sub(nr_alloc, &txring->nr_free); 1944 1945 txring->next_to_use = (txring->next_to_use + nr_alloc) & mask; 1946 1947 return idx; 1948 } 1949 1950 static int 1951 jme_fill_tx_map(struct pci_dev *pdev, 1952 struct txdesc *txdesc, 1953 struct jme_buffer_info *txbi, 1954 struct page *page, 1955 u32 page_offset, 1956 u32 len, 1957 bool hidma) 1958 { 1959 dma_addr_t dmaaddr; 1960 1961 dmaaddr = dma_map_page(&pdev->dev, page, page_offset, len, 1962 DMA_TO_DEVICE); 1963 1964 if (unlikely(dma_mapping_error(&pdev->dev, dmaaddr))) 1965 return -EINVAL; 1966 1967 dma_sync_single_for_device(&pdev->dev, dmaaddr, len, DMA_TO_DEVICE); 1968 1969 txdesc->dw[0] = 0; 1970 txdesc->dw[1] = 0; 1971 txdesc->desc2.flags = TXFLAG_OWN; 1972 txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0; 1973 txdesc->desc2.datalen = cpu_to_le16(len); 1974 txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32); 1975 txdesc->desc2.bufaddrl = cpu_to_le32( 1976 (__u64)dmaaddr & 0xFFFFFFFFUL); 1977 1978 txbi->mapping = dmaaddr; 1979 txbi->len = len; 1980 return 0; 1981 } 1982 1983 static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count) 1984 { 1985 struct jme_ring *txring = &(jme->txring[0]); 1986 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; 1987 int mask = jme->tx_ring_mask; 1988 int j; 1989 1990 for (j = 0 ; j < count ; j++) { 1991 ctxbi = txbi + ((startidx + j + 2) & (mask)); 1992 dma_unmap_page(&jme->pdev->dev, ctxbi->mapping, ctxbi->len, 1993 DMA_TO_DEVICE); 1994 1995 ctxbi->mapping = 0; 1996 ctxbi->len = 0; 1997 } 1998 } 1999 2000 static int 2001 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) 2002 { 2003 struct jme_ring *txring = &(jme->txring[0]); 2004 struct txdesc *txdesc = txring->desc, *ctxdesc; 2005 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; 2006 bool hidma = jme->dev->features & NETIF_F_HIGHDMA; 2007 int i, nr_frags = skb_shinfo(skb)->nr_frags; 2008 int mask = jme->tx_ring_mask; 2009 u32 len; 2010 int ret = 0; 2011 2012 for (i = 0 ; i < nr_frags ; ++i) { 2013 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2014 2015 ctxdesc = txdesc + ((idx + i + 2) & (mask)); 2016 ctxbi = txbi + ((idx + i + 2) & (mask)); 2017 2018 ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, 2019 skb_frag_page(frag), skb_frag_off(frag), 2020 skb_frag_size(frag), hidma); 2021 if (ret) { 2022 jme_drop_tx_map(jme, idx, i); 2023 goto out; 2024 } 2025 } 2026 2027 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 2028 ctxdesc = txdesc + ((idx + 1) & (mask)); 2029 ctxbi = txbi + ((idx + 1) & (mask)); 2030 ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), 2031 offset_in_page(skb->data), len, hidma); 2032 if (ret) 2033 jme_drop_tx_map(jme, idx, i); 2034 2035 out: 2036 return ret; 2037 2038 } 2039 2040 2041 static int 2042 jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) 2043 { 2044 *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT); 2045 if (*mss) { 2046 *flags |= TXFLAG_LSEN; 2047 2048 if (skb->protocol == htons(ETH_P_IP)) { 2049 struct iphdr *iph = ip_hdr(skb); 2050 2051 iph->check = 0; 2052 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2053 iph->daddr, 0, 2054 IPPROTO_TCP, 2055 0); 2056 } else { 2057 tcp_v6_gso_csum_prep(skb); 2058 } 2059 2060 return 0; 2061 } 2062 2063 return 1; 2064 } 2065 2066 static void 2067 jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags) 2068 { 2069 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2070 u8 ip_proto; 2071 2072 switch (skb->protocol) { 2073 case htons(ETH_P_IP): 2074 ip_proto = ip_hdr(skb)->protocol; 2075 break; 2076 case htons(ETH_P_IPV6): 2077 ip_proto = ipv6_hdr(skb)->nexthdr; 2078 break; 2079 default: 2080 ip_proto = 0; 2081 break; 2082 } 2083 2084 switch (ip_proto) { 2085 case IPPROTO_TCP: 2086 *flags |= TXFLAG_TCPCS; 2087 break; 2088 case IPPROTO_UDP: 2089 *flags |= TXFLAG_UDPCS; 2090 break; 2091 default: 2092 netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n"); 2093 break; 2094 } 2095 } 2096 } 2097 2098 static inline void 2099 jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags) 2100 { 2101 if (skb_vlan_tag_present(skb)) { 2102 *flags |= TXFLAG_TAGON; 2103 *vlan = cpu_to_le16(skb_vlan_tag_get(skb)); 2104 } 2105 } 2106 2107 static int 2108 jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) 2109 { 2110 struct jme_ring *txring = &(jme->txring[0]); 2111 struct txdesc *txdesc; 2112 struct jme_buffer_info *txbi; 2113 u8 flags; 2114 int ret = 0; 2115 2116 txdesc = (struct txdesc *)txring->desc + idx; 2117 txbi = txring->bufinf + idx; 2118 2119 txdesc->dw[0] = 0; 2120 txdesc->dw[1] = 0; 2121 txdesc->dw[2] = 0; 2122 txdesc->dw[3] = 0; 2123 txdesc->desc1.pktsize = cpu_to_le16(skb->len); 2124 /* 2125 * Set OWN bit at final. 2126 * When kernel transmit faster than NIC. 2127 * And NIC trying to send this descriptor before we tell 2128 * it to start sending this TX queue. 2129 * Other fields are already filled correctly. 2130 */ 2131 wmb(); 2132 flags = TXFLAG_OWN | TXFLAG_INT; 2133 /* 2134 * Set checksum flags while not tso 2135 */ 2136 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) 2137 jme_tx_csum(jme, skb, &flags); 2138 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); 2139 ret = jme_map_tx_skb(jme, skb, idx); 2140 if (ret) 2141 return ret; 2142 2143 txdesc->desc1.flags = flags; 2144 /* 2145 * Set tx buffer info after telling NIC to send 2146 * For better tx_clean timing 2147 */ 2148 wmb(); 2149 txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2; 2150 txbi->skb = skb; 2151 txbi->len = skb->len; 2152 txbi->start_xmit = jiffies; 2153 if (!txbi->start_xmit) 2154 txbi->start_xmit = (0UL-1); 2155 2156 return 0; 2157 } 2158 2159 static void 2160 jme_stop_queue_if_full(struct jme_adapter *jme) 2161 { 2162 struct jme_ring *txring = &(jme->txring[0]); 2163 struct jme_buffer_info *txbi = txring->bufinf; 2164 int idx = atomic_read(&txring->next_to_clean); 2165 2166 txbi += idx; 2167 2168 smp_wmb(); 2169 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) { 2170 netif_stop_queue(jme->dev); 2171 netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n"); 2172 smp_wmb(); 2173 if (atomic_read(&txring->nr_free) 2174 >= (jme->tx_wake_threshold)) { 2175 netif_wake_queue(jme->dev); 2176 netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n"); 2177 } 2178 } 2179 2180 if (unlikely(txbi->start_xmit && 2181 time_is_before_eq_jiffies(txbi->start_xmit + TX_TIMEOUT) && 2182 txbi->skb)) { 2183 netif_stop_queue(jme->dev); 2184 netif_info(jme, tx_queued, jme->dev, 2185 "TX Queue Stopped %d@%lu\n", idx, jiffies); 2186 } 2187 } 2188 2189 /* 2190 * This function is already protected by netif_tx_lock() 2191 */ 2192 2193 static netdev_tx_t 2194 jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) 2195 { 2196 struct jme_adapter *jme = netdev_priv(netdev); 2197 int idx; 2198 2199 if (unlikely(skb_is_gso(skb) && skb_cow_head(skb, 0))) { 2200 dev_kfree_skb_any(skb); 2201 ++(NET_STAT(jme).tx_dropped); 2202 return NETDEV_TX_OK; 2203 } 2204 2205 idx = jme_alloc_txdesc(jme, skb); 2206 2207 if (unlikely(idx < 0)) { 2208 netif_stop_queue(netdev); 2209 netif_err(jme, tx_err, jme->dev, 2210 "BUG! Tx ring full when queue awake!\n"); 2211 2212 return NETDEV_TX_BUSY; 2213 } 2214 2215 if (jme_fill_tx_desc(jme, skb, idx)) 2216 return NETDEV_TX_OK; 2217 2218 jwrite32(jme, JME_TXCS, jme->reg_txcs | 2219 TXCS_SELECT_QUEUE0 | 2220 TXCS_QUEUE0S | 2221 TXCS_ENABLE); 2222 2223 tx_dbg(jme, "xmit: %d+%d@%lu\n", 2224 idx, skb_shinfo(skb)->nr_frags + 2, jiffies); 2225 jme_stop_queue_if_full(jme); 2226 2227 return NETDEV_TX_OK; 2228 } 2229 2230 static void 2231 jme_set_unicastaddr(struct net_device *netdev) 2232 { 2233 struct jme_adapter *jme = netdev_priv(netdev); 2234 u32 val; 2235 2236 val = (netdev->dev_addr[3] & 0xff) << 24 | 2237 (netdev->dev_addr[2] & 0xff) << 16 | 2238 (netdev->dev_addr[1] & 0xff) << 8 | 2239 (netdev->dev_addr[0] & 0xff); 2240 jwrite32(jme, JME_RXUMA_LO, val); 2241 val = (netdev->dev_addr[5] & 0xff) << 8 | 2242 (netdev->dev_addr[4] & 0xff); 2243 jwrite32(jme, JME_RXUMA_HI, val); 2244 } 2245 2246 static int 2247 jme_set_macaddr(struct net_device *netdev, void *p) 2248 { 2249 struct jme_adapter *jme = netdev_priv(netdev); 2250 struct sockaddr *addr = p; 2251 2252 if (netif_running(netdev)) 2253 return -EBUSY; 2254 2255 spin_lock_bh(&jme->macaddr_lock); 2256 eth_hw_addr_set(netdev, addr->sa_data); 2257 jme_set_unicastaddr(netdev); 2258 spin_unlock_bh(&jme->macaddr_lock); 2259 2260 return 0; 2261 } 2262 2263 static void 2264 jme_set_multi(struct net_device *netdev) 2265 { 2266 struct jme_adapter *jme = netdev_priv(netdev); 2267 u32 mc_hash[2] = {}; 2268 2269 spin_lock_bh(&jme->rxmcs_lock); 2270 2271 jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME; 2272 2273 if (netdev->flags & IFF_PROMISC) { 2274 jme->reg_rxmcs |= RXMCS_ALLFRAME; 2275 } else if (netdev->flags & IFF_ALLMULTI) { 2276 jme->reg_rxmcs |= RXMCS_ALLMULFRAME; 2277 } else if (netdev->flags & IFF_MULTICAST) { 2278 struct netdev_hw_addr *ha; 2279 int bit_nr; 2280 2281 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; 2282 netdev_for_each_mc_addr(ha, netdev) { 2283 bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F; 2284 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F); 2285 } 2286 2287 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]); 2288 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]); 2289 } 2290 2291 wmb(); 2292 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); 2293 2294 spin_unlock_bh(&jme->rxmcs_lock); 2295 } 2296 2297 static int 2298 jme_change_mtu(struct net_device *netdev, int new_mtu) 2299 { 2300 struct jme_adapter *jme = netdev_priv(netdev); 2301 2302 netdev->mtu = new_mtu; 2303 netdev_update_features(netdev); 2304 2305 jme_restart_rx_engine(jme); 2306 jme_reset_link(jme); 2307 2308 return 0; 2309 } 2310 2311 static void 2312 jme_tx_timeout(struct net_device *netdev, unsigned int txqueue) 2313 { 2314 struct jme_adapter *jme = netdev_priv(netdev); 2315 2316 jme->phylink = 0; 2317 jme_reset_phy_processor(jme); 2318 if (test_bit(JME_FLAG_SSET, &jme->flags)) 2319 jme_set_link_ksettings(netdev, &jme->old_cmd); 2320 2321 /* 2322 * Force to Reset the link again 2323 */ 2324 jme_reset_link(jme); 2325 } 2326 2327 static void 2328 jme_get_drvinfo(struct net_device *netdev, 2329 struct ethtool_drvinfo *info) 2330 { 2331 struct jme_adapter *jme = netdev_priv(netdev); 2332 2333 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); 2334 strscpy(info->version, DRV_VERSION, sizeof(info->version)); 2335 strscpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info)); 2336 } 2337 2338 static int 2339 jme_get_regs_len(struct net_device *netdev) 2340 { 2341 return JME_REG_LEN; 2342 } 2343 2344 static void 2345 mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len) 2346 { 2347 int i; 2348 2349 for (i = 0 ; i < len ; i += 4) 2350 p[i >> 2] = jread32(jme, reg + i); 2351 } 2352 2353 static void 2354 mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr) 2355 { 2356 int i; 2357 u16 *p16 = (u16 *)p; 2358 2359 for (i = 0 ; i < reg_nr ; ++i) 2360 p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i); 2361 } 2362 2363 static void 2364 jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) 2365 { 2366 struct jme_adapter *jme = netdev_priv(netdev); 2367 u32 *p32 = (u32 *)p; 2368 2369 memset(p, 0xFF, JME_REG_LEN); 2370 2371 regs->version = 1; 2372 mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN); 2373 2374 p32 += 0x100 >> 2; 2375 mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN); 2376 2377 p32 += 0x100 >> 2; 2378 mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN); 2379 2380 p32 += 0x100 >> 2; 2381 mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN); 2382 2383 p32 += 0x100 >> 2; 2384 mdio_memcpy(jme, p32, JME_PHY_REG_NR); 2385 } 2386 2387 static int jme_get_coalesce(struct net_device *netdev, 2388 struct ethtool_coalesce *ecmd, 2389 struct kernel_ethtool_coalesce *kernel_coal, 2390 struct netlink_ext_ack *extack) 2391 { 2392 struct jme_adapter *jme = netdev_priv(netdev); 2393 2394 ecmd->tx_coalesce_usecs = PCC_TX_TO; 2395 ecmd->tx_max_coalesced_frames = PCC_TX_CNT; 2396 2397 if (test_bit(JME_FLAG_POLL, &jme->flags)) { 2398 ecmd->use_adaptive_rx_coalesce = false; 2399 ecmd->rx_coalesce_usecs = 0; 2400 ecmd->rx_max_coalesced_frames = 0; 2401 return 0; 2402 } 2403 2404 ecmd->use_adaptive_rx_coalesce = true; 2405 2406 switch (jme->dpi.cur) { 2407 case PCC_P1: 2408 ecmd->rx_coalesce_usecs = PCC_P1_TO; 2409 ecmd->rx_max_coalesced_frames = PCC_P1_CNT; 2410 break; 2411 case PCC_P2: 2412 ecmd->rx_coalesce_usecs = PCC_P2_TO; 2413 ecmd->rx_max_coalesced_frames = PCC_P2_CNT; 2414 break; 2415 case PCC_P3: 2416 ecmd->rx_coalesce_usecs = PCC_P3_TO; 2417 ecmd->rx_max_coalesced_frames = PCC_P3_CNT; 2418 break; 2419 default: 2420 break; 2421 } 2422 2423 return 0; 2424 } 2425 2426 static int jme_set_coalesce(struct net_device *netdev, 2427 struct ethtool_coalesce *ecmd, 2428 struct kernel_ethtool_coalesce *kernel_coal, 2429 struct netlink_ext_ack *extack) 2430 { 2431 struct jme_adapter *jme = netdev_priv(netdev); 2432 struct dynpcc_info *dpi = &(jme->dpi); 2433 2434 if (netif_running(netdev)) 2435 return -EBUSY; 2436 2437 if (ecmd->use_adaptive_rx_coalesce && 2438 test_bit(JME_FLAG_POLL, &jme->flags)) { 2439 clear_bit(JME_FLAG_POLL, &jme->flags); 2440 jme->jme_rx = netif_rx; 2441 dpi->cur = PCC_P1; 2442 dpi->attempt = PCC_P1; 2443 dpi->cnt = 0; 2444 jme_set_rx_pcc(jme, PCC_P1); 2445 jme_interrupt_mode(jme); 2446 } else if (!(ecmd->use_adaptive_rx_coalesce) && 2447 !(test_bit(JME_FLAG_POLL, &jme->flags))) { 2448 set_bit(JME_FLAG_POLL, &jme->flags); 2449 jme->jme_rx = netif_receive_skb; 2450 jme_interrupt_mode(jme); 2451 } 2452 2453 return 0; 2454 } 2455 2456 static void 2457 jme_get_pauseparam(struct net_device *netdev, 2458 struct ethtool_pauseparam *ecmd) 2459 { 2460 struct jme_adapter *jme = netdev_priv(netdev); 2461 u32 val; 2462 2463 ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0; 2464 ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0; 2465 2466 spin_lock_bh(&jme->phy_lock); 2467 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); 2468 spin_unlock_bh(&jme->phy_lock); 2469 2470 ecmd->autoneg = 2471 (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0; 2472 } 2473 2474 static int 2475 jme_set_pauseparam(struct net_device *netdev, 2476 struct ethtool_pauseparam *ecmd) 2477 { 2478 struct jme_adapter *jme = netdev_priv(netdev); 2479 u32 val; 2480 2481 if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^ 2482 (ecmd->tx_pause != 0)) { 2483 2484 if (ecmd->tx_pause) 2485 jme->reg_txpfc |= TXPFC_PF_EN; 2486 else 2487 jme->reg_txpfc &= ~TXPFC_PF_EN; 2488 2489 jwrite32(jme, JME_TXPFC, jme->reg_txpfc); 2490 } 2491 2492 spin_lock_bh(&jme->rxmcs_lock); 2493 if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^ 2494 (ecmd->rx_pause != 0)) { 2495 2496 if (ecmd->rx_pause) 2497 jme->reg_rxmcs |= RXMCS_FLOWCTRL; 2498 else 2499 jme->reg_rxmcs &= ~RXMCS_FLOWCTRL; 2500 2501 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); 2502 } 2503 spin_unlock_bh(&jme->rxmcs_lock); 2504 2505 spin_lock_bh(&jme->phy_lock); 2506 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); 2507 if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^ 2508 (ecmd->autoneg != 0)) { 2509 2510 if (ecmd->autoneg) 2511 val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 2512 else 2513 val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 2514 2515 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 2516 MII_ADVERTISE, val); 2517 } 2518 spin_unlock_bh(&jme->phy_lock); 2519 2520 return 0; 2521 } 2522 2523 static void 2524 jme_get_wol(struct net_device *netdev, 2525 struct ethtool_wolinfo *wol) 2526 { 2527 struct jme_adapter *jme = netdev_priv(netdev); 2528 2529 wol->supported = WAKE_MAGIC | WAKE_PHY; 2530 2531 wol->wolopts = 0; 2532 2533 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) 2534 wol->wolopts |= WAKE_PHY; 2535 2536 if (jme->reg_pmcs & PMCS_MFEN) 2537 wol->wolopts |= WAKE_MAGIC; 2538 2539 } 2540 2541 static int 2542 jme_set_wol(struct net_device *netdev, 2543 struct ethtool_wolinfo *wol) 2544 { 2545 struct jme_adapter *jme = netdev_priv(netdev); 2546 2547 if (wol->wolopts & (WAKE_MAGICSECURE | 2548 WAKE_UCAST | 2549 WAKE_MCAST | 2550 WAKE_BCAST | 2551 WAKE_ARP)) 2552 return -EOPNOTSUPP; 2553 2554 jme->reg_pmcs = 0; 2555 2556 if (wol->wolopts & WAKE_PHY) 2557 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN; 2558 2559 if (wol->wolopts & WAKE_MAGIC) 2560 jme->reg_pmcs |= PMCS_MFEN; 2561 2562 return 0; 2563 } 2564 2565 static int 2566 jme_get_link_ksettings(struct net_device *netdev, 2567 struct ethtool_link_ksettings *cmd) 2568 { 2569 struct jme_adapter *jme = netdev_priv(netdev); 2570 2571 spin_lock_bh(&jme->phy_lock); 2572 mii_ethtool_get_link_ksettings(&jme->mii_if, cmd); 2573 spin_unlock_bh(&jme->phy_lock); 2574 return 0; 2575 } 2576 2577 static int 2578 jme_set_link_ksettings(struct net_device *netdev, 2579 const struct ethtool_link_ksettings *cmd) 2580 { 2581 struct jme_adapter *jme = netdev_priv(netdev); 2582 int rc, fdc = 0; 2583 2584 if (cmd->base.speed == SPEED_1000 && 2585 cmd->base.autoneg != AUTONEG_ENABLE) 2586 return -EINVAL; 2587 2588 /* 2589 * Check If user changed duplex only while force_media. 2590 * Hardware would not generate link change interrupt. 2591 */ 2592 if (jme->mii_if.force_media && 2593 cmd->base.autoneg != AUTONEG_ENABLE && 2594 (jme->mii_if.full_duplex != cmd->base.duplex)) 2595 fdc = 1; 2596 2597 spin_lock_bh(&jme->phy_lock); 2598 rc = mii_ethtool_set_link_ksettings(&jme->mii_if, cmd); 2599 spin_unlock_bh(&jme->phy_lock); 2600 2601 if (!rc) { 2602 if (fdc) 2603 jme_reset_link(jme); 2604 jme->old_cmd = *cmd; 2605 set_bit(JME_FLAG_SSET, &jme->flags); 2606 } 2607 2608 return rc; 2609 } 2610 2611 static int 2612 jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 2613 { 2614 int rc; 2615 struct jme_adapter *jme = netdev_priv(netdev); 2616 struct mii_ioctl_data *mii_data = if_mii(rq); 2617 unsigned int duplex_chg; 2618 2619 if (cmd == SIOCSMIIREG) { 2620 u16 val = mii_data->val_in; 2621 if (!(val & (BMCR_RESET|BMCR_ANENABLE)) && 2622 (val & BMCR_SPEED1000)) 2623 return -EINVAL; 2624 } 2625 2626 spin_lock_bh(&jme->phy_lock); 2627 rc = generic_mii_ioctl(&jme->mii_if, mii_data, cmd, &duplex_chg); 2628 spin_unlock_bh(&jme->phy_lock); 2629 2630 if (!rc && (cmd == SIOCSMIIREG)) { 2631 if (duplex_chg) 2632 jme_reset_link(jme); 2633 jme_get_link_ksettings(netdev, &jme->old_cmd); 2634 set_bit(JME_FLAG_SSET, &jme->flags); 2635 } 2636 2637 return rc; 2638 } 2639 2640 static u32 2641 jme_get_link(struct net_device *netdev) 2642 { 2643 struct jme_adapter *jme = netdev_priv(netdev); 2644 return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP; 2645 } 2646 2647 static u32 2648 jme_get_msglevel(struct net_device *netdev) 2649 { 2650 struct jme_adapter *jme = netdev_priv(netdev); 2651 return jme->msg_enable; 2652 } 2653 2654 static void 2655 jme_set_msglevel(struct net_device *netdev, u32 value) 2656 { 2657 struct jme_adapter *jme = netdev_priv(netdev); 2658 jme->msg_enable = value; 2659 } 2660 2661 static netdev_features_t 2662 jme_fix_features(struct net_device *netdev, netdev_features_t features) 2663 { 2664 if (netdev->mtu > 1900) 2665 features &= ~(NETIF_F_ALL_TSO | NETIF_F_CSUM_MASK); 2666 return features; 2667 } 2668 2669 static int 2670 jme_set_features(struct net_device *netdev, netdev_features_t features) 2671 { 2672 struct jme_adapter *jme = netdev_priv(netdev); 2673 2674 spin_lock_bh(&jme->rxmcs_lock); 2675 if (features & NETIF_F_RXCSUM) 2676 jme->reg_rxmcs |= RXMCS_CHECKSUM; 2677 else 2678 jme->reg_rxmcs &= ~RXMCS_CHECKSUM; 2679 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); 2680 spin_unlock_bh(&jme->rxmcs_lock); 2681 2682 return 0; 2683 } 2684 2685 #ifdef CONFIG_NET_POLL_CONTROLLER 2686 static void jme_netpoll(struct net_device *dev) 2687 { 2688 unsigned long flags; 2689 2690 local_irq_save(flags); 2691 jme_intr(dev->irq, dev); 2692 local_irq_restore(flags); 2693 } 2694 #endif 2695 2696 static int 2697 jme_nway_reset(struct net_device *netdev) 2698 { 2699 struct jme_adapter *jme = netdev_priv(netdev); 2700 jme_restart_an(jme); 2701 return 0; 2702 } 2703 2704 static u8 2705 jme_smb_read(struct jme_adapter *jme, unsigned int addr) 2706 { 2707 u32 val; 2708 int to; 2709 2710 val = jread32(jme, JME_SMBCSR); 2711 to = JME_SMB_BUSY_TIMEOUT; 2712 while ((val & SMBCSR_BUSY) && --to) { 2713 msleep(1); 2714 val = jread32(jme, JME_SMBCSR); 2715 } 2716 if (!to) { 2717 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); 2718 return 0xFF; 2719 } 2720 2721 jwrite32(jme, JME_SMBINTF, 2722 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | 2723 SMBINTF_HWRWN_READ | 2724 SMBINTF_HWCMD); 2725 2726 val = jread32(jme, JME_SMBINTF); 2727 to = JME_SMB_BUSY_TIMEOUT; 2728 while ((val & SMBINTF_HWCMD) && --to) { 2729 msleep(1); 2730 val = jread32(jme, JME_SMBINTF); 2731 } 2732 if (!to) { 2733 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); 2734 return 0xFF; 2735 } 2736 2737 return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT; 2738 } 2739 2740 static void 2741 jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data) 2742 { 2743 u32 val; 2744 int to; 2745 2746 val = jread32(jme, JME_SMBCSR); 2747 to = JME_SMB_BUSY_TIMEOUT; 2748 while ((val & SMBCSR_BUSY) && --to) { 2749 msleep(1); 2750 val = jread32(jme, JME_SMBCSR); 2751 } 2752 if (!to) { 2753 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); 2754 return; 2755 } 2756 2757 jwrite32(jme, JME_SMBINTF, 2758 ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) | 2759 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | 2760 SMBINTF_HWRWN_WRITE | 2761 SMBINTF_HWCMD); 2762 2763 val = jread32(jme, JME_SMBINTF); 2764 to = JME_SMB_BUSY_TIMEOUT; 2765 while ((val & SMBINTF_HWCMD) && --to) { 2766 msleep(1); 2767 val = jread32(jme, JME_SMBINTF); 2768 } 2769 if (!to) { 2770 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); 2771 return; 2772 } 2773 2774 mdelay(2); 2775 } 2776 2777 static int 2778 jme_get_eeprom_len(struct net_device *netdev) 2779 { 2780 struct jme_adapter *jme = netdev_priv(netdev); 2781 u32 val; 2782 val = jread32(jme, JME_SMBCSR); 2783 return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0; 2784 } 2785 2786 static int 2787 jme_get_eeprom(struct net_device *netdev, 2788 struct ethtool_eeprom *eeprom, u8 *data) 2789 { 2790 struct jme_adapter *jme = netdev_priv(netdev); 2791 int i, offset = eeprom->offset, len = eeprom->len; 2792 2793 /* 2794 * ethtool will check the boundary for us 2795 */ 2796 eeprom->magic = JME_EEPROM_MAGIC; 2797 for (i = 0 ; i < len ; ++i) 2798 data[i] = jme_smb_read(jme, i + offset); 2799 2800 return 0; 2801 } 2802 2803 static int 2804 jme_set_eeprom(struct net_device *netdev, 2805 struct ethtool_eeprom *eeprom, u8 *data) 2806 { 2807 struct jme_adapter *jme = netdev_priv(netdev); 2808 int i, offset = eeprom->offset, len = eeprom->len; 2809 2810 if (eeprom->magic != JME_EEPROM_MAGIC) 2811 return -EINVAL; 2812 2813 /* 2814 * ethtool will check the boundary for us 2815 */ 2816 for (i = 0 ; i < len ; ++i) 2817 jme_smb_write(jme, i + offset, data[i]); 2818 2819 return 0; 2820 } 2821 2822 static const struct ethtool_ops jme_ethtool_ops = { 2823 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 2824 ETHTOOL_COALESCE_MAX_FRAMES | 2825 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 2826 .get_drvinfo = jme_get_drvinfo, 2827 .get_regs_len = jme_get_regs_len, 2828 .get_regs = jme_get_regs, 2829 .get_coalesce = jme_get_coalesce, 2830 .set_coalesce = jme_set_coalesce, 2831 .get_pauseparam = jme_get_pauseparam, 2832 .set_pauseparam = jme_set_pauseparam, 2833 .get_wol = jme_get_wol, 2834 .set_wol = jme_set_wol, 2835 .get_link = jme_get_link, 2836 .get_msglevel = jme_get_msglevel, 2837 .set_msglevel = jme_set_msglevel, 2838 .nway_reset = jme_nway_reset, 2839 .get_eeprom_len = jme_get_eeprom_len, 2840 .get_eeprom = jme_get_eeprom, 2841 .set_eeprom = jme_set_eeprom, 2842 .get_link_ksettings = jme_get_link_ksettings, 2843 .set_link_ksettings = jme_set_link_ksettings, 2844 }; 2845 2846 static int 2847 jme_pci_dma64(struct pci_dev *pdev) 2848 { 2849 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && 2850 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) 2851 return 1; 2852 2853 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && 2854 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40))) 2855 return 1; 2856 2857 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 2858 return 0; 2859 2860 return -1; 2861 } 2862 2863 static inline void 2864 jme_phy_init(struct jme_adapter *jme) 2865 { 2866 u16 reg26; 2867 2868 reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26); 2869 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000); 2870 } 2871 2872 static inline void 2873 jme_check_hw_ver(struct jme_adapter *jme) 2874 { 2875 u32 chipmode; 2876 2877 chipmode = jread32(jme, JME_CHIPMODE); 2878 2879 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; 2880 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; 2881 jme->chip_main_rev = jme->chiprev & 0xF; 2882 jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF; 2883 } 2884 2885 static const struct net_device_ops jme_netdev_ops = { 2886 .ndo_open = jme_open, 2887 .ndo_stop = jme_close, 2888 .ndo_validate_addr = eth_validate_addr, 2889 .ndo_eth_ioctl = jme_ioctl, 2890 .ndo_start_xmit = jme_start_xmit, 2891 .ndo_set_mac_address = jme_set_macaddr, 2892 .ndo_set_rx_mode = jme_set_multi, 2893 .ndo_change_mtu = jme_change_mtu, 2894 .ndo_tx_timeout = jme_tx_timeout, 2895 .ndo_fix_features = jme_fix_features, 2896 .ndo_set_features = jme_set_features, 2897 #ifdef CONFIG_NET_POLL_CONTROLLER 2898 .ndo_poll_controller = jme_netpoll, 2899 #endif 2900 }; 2901 2902 static int 2903 jme_init_one(struct pci_dev *pdev, 2904 const struct pci_device_id *ent) 2905 { 2906 int rc = 0, using_dac, i; 2907 struct net_device *netdev; 2908 struct jme_adapter *jme; 2909 u16 bmcr, bmsr; 2910 u32 apmc; 2911 2912 /* 2913 * set up PCI device basics 2914 */ 2915 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | 2916 PCIE_LINK_STATE_CLKPM); 2917 2918 rc = pci_enable_device(pdev); 2919 if (rc) { 2920 pr_err("Cannot enable PCI device\n"); 2921 goto err_out; 2922 } 2923 2924 using_dac = jme_pci_dma64(pdev); 2925 if (using_dac < 0) { 2926 pr_err("Cannot set PCI DMA Mask\n"); 2927 rc = -EIO; 2928 goto err_out_disable_pdev; 2929 } 2930 2931 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2932 pr_err("No PCI resource region found\n"); 2933 rc = -ENOMEM; 2934 goto err_out_disable_pdev; 2935 } 2936 2937 rc = pci_request_regions(pdev, DRV_NAME); 2938 if (rc) { 2939 pr_err("Cannot obtain PCI resource region\n"); 2940 goto err_out_disable_pdev; 2941 } 2942 2943 pci_set_master(pdev); 2944 2945 /* 2946 * alloc and init net device 2947 */ 2948 netdev = alloc_etherdev(sizeof(*jme)); 2949 if (!netdev) { 2950 rc = -ENOMEM; 2951 goto err_out_release_regions; 2952 } 2953 netdev->netdev_ops = &jme_netdev_ops; 2954 netdev->ethtool_ops = &jme_ethtool_ops; 2955 netdev->watchdog_timeo = TX_TIMEOUT; 2956 netdev->hw_features = NETIF_F_IP_CSUM | 2957 NETIF_F_IPV6_CSUM | 2958 NETIF_F_SG | 2959 NETIF_F_TSO | 2960 NETIF_F_TSO6 | 2961 NETIF_F_RXCSUM; 2962 netdev->features = NETIF_F_IP_CSUM | 2963 NETIF_F_IPV6_CSUM | 2964 NETIF_F_SG | 2965 NETIF_F_TSO | 2966 NETIF_F_TSO6 | 2967 NETIF_F_HW_VLAN_CTAG_TX | 2968 NETIF_F_HW_VLAN_CTAG_RX; 2969 if (using_dac) 2970 netdev->features |= NETIF_F_HIGHDMA; 2971 2972 /* MTU range: 1280 - 9202*/ 2973 netdev->min_mtu = IPV6_MIN_MTU; 2974 netdev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE - ETH_HLEN; 2975 2976 SET_NETDEV_DEV(netdev, &pdev->dev); 2977 pci_set_drvdata(pdev, netdev); 2978 2979 /* 2980 * init adapter info 2981 */ 2982 jme = netdev_priv(netdev); 2983 jme->pdev = pdev; 2984 jme->dev = netdev; 2985 jme->jme_rx = netif_rx; 2986 jme->old_mtu = netdev->mtu = 1500; 2987 jme->phylink = 0; 2988 jme->tx_ring_size = 1 << 10; 2989 jme->tx_ring_mask = jme->tx_ring_size - 1; 2990 jme->tx_wake_threshold = 1 << 9; 2991 jme->rx_ring_size = 1 << 9; 2992 jme->rx_ring_mask = jme->rx_ring_size - 1; 2993 jme->msg_enable = JME_DEF_MSG_ENABLE; 2994 jme->regs = ioremap(pci_resource_start(pdev, 0), 2995 pci_resource_len(pdev, 0)); 2996 if (!(jme->regs)) { 2997 pr_err("Mapping PCI resource region error\n"); 2998 rc = -ENOMEM; 2999 goto err_out_free_netdev; 3000 } 3001 3002 if (no_pseudohp) { 3003 apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN; 3004 jwrite32(jme, JME_APMC, apmc); 3005 } else if (force_pseudohp) { 3006 apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN; 3007 jwrite32(jme, JME_APMC, apmc); 3008 } 3009 3010 netif_napi_add(netdev, &jme->napi, jme_poll); 3011 3012 spin_lock_init(&jme->phy_lock); 3013 spin_lock_init(&jme->macaddr_lock); 3014 spin_lock_init(&jme->rxmcs_lock); 3015 3016 atomic_set(&jme->link_changing, 1); 3017 atomic_set(&jme->rx_cleaning, 1); 3018 atomic_set(&jme->tx_cleaning, 1); 3019 atomic_set(&jme->rx_empty, 1); 3020 3021 tasklet_setup(&jme->pcc_task, jme_pcc_tasklet); 3022 INIT_WORK(&jme->linkch_task, jme_link_change_work); 3023 jme->dpi.cur = PCC_P1; 3024 3025 jme->reg_ghc = 0; 3026 jme->reg_rxcs = RXCS_DEFAULT; 3027 jme->reg_rxmcs = RXMCS_DEFAULT; 3028 jme->reg_txpfc = 0; 3029 jme->reg_pmcs = PMCS_MFEN; 3030 jme->reg_gpreg1 = GPREG1_DEFAULT; 3031 3032 if (jme->reg_rxmcs & RXMCS_CHECKSUM) 3033 netdev->features |= NETIF_F_RXCSUM; 3034 3035 /* 3036 * Get Max Read Req Size from PCI Config Space 3037 */ 3038 pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs); 3039 jme->mrrs &= PCI_DCSR_MRRS_MASK; 3040 switch (jme->mrrs) { 3041 case MRRS_128B: 3042 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B; 3043 break; 3044 case MRRS_256B: 3045 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B; 3046 break; 3047 default: 3048 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B; 3049 break; 3050 } 3051 3052 /* 3053 * Must check before reset_mac_processor 3054 */ 3055 jme_check_hw_ver(jme); 3056 jme->mii_if.dev = netdev; 3057 if (jme->fpgaver) { 3058 jme->mii_if.phy_id = 0; 3059 for (i = 1 ; i < 32 ; ++i) { 3060 bmcr = jme_mdio_read(netdev, i, MII_BMCR); 3061 bmsr = jme_mdio_read(netdev, i, MII_BMSR); 3062 if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) { 3063 jme->mii_if.phy_id = i; 3064 break; 3065 } 3066 } 3067 3068 if (!jme->mii_if.phy_id) { 3069 rc = -EIO; 3070 pr_err("Can not find phy_id\n"); 3071 goto err_out_unmap; 3072 } 3073 3074 jme->reg_ghc |= GHC_LINK_POLL; 3075 } else { 3076 jme->mii_if.phy_id = 1; 3077 } 3078 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) 3079 jme->mii_if.supports_gmii = true; 3080 else 3081 jme->mii_if.supports_gmii = false; 3082 jme->mii_if.phy_id_mask = 0x1F; 3083 jme->mii_if.reg_num_mask = 0x1F; 3084 jme->mii_if.mdio_read = jme_mdio_read; 3085 jme->mii_if.mdio_write = jme_mdio_write; 3086 3087 jme_clear_pm_disable_wol(jme); 3088 device_init_wakeup(&pdev->dev, true); 3089 3090 jme_set_phyfifo_5level(jme); 3091 jme->pcirev = pdev->revision; 3092 if (!jme->fpgaver) 3093 jme_phy_init(jme); 3094 jme_phy_off(jme); 3095 3096 /* 3097 * Reset MAC processor and reload EEPROM for MAC Address 3098 */ 3099 jme_reset_mac_processor(jme); 3100 rc = jme_reload_eeprom(jme); 3101 if (rc) { 3102 pr_err("Reload eeprom for reading MAC Address error\n"); 3103 goto err_out_unmap; 3104 } 3105 jme_load_macaddr(netdev); 3106 3107 /* 3108 * Tell stack that we are not ready to work until open() 3109 */ 3110 netif_carrier_off(netdev); 3111 3112 rc = register_netdev(netdev); 3113 if (rc) { 3114 pr_err("Cannot register net device\n"); 3115 goto err_out_unmap; 3116 } 3117 3118 netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n", 3119 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? 3120 "JMC250 Gigabit Ethernet" : 3121 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? 3122 "JMC260 Fast Ethernet" : "Unknown", 3123 (jme->fpgaver != 0) ? " (FPGA)" : "", 3124 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, 3125 jme->pcirev, netdev->dev_addr); 3126 3127 return 0; 3128 3129 err_out_unmap: 3130 iounmap(jme->regs); 3131 err_out_free_netdev: 3132 free_netdev(netdev); 3133 err_out_release_regions: 3134 pci_release_regions(pdev); 3135 err_out_disable_pdev: 3136 pci_disable_device(pdev); 3137 err_out: 3138 return rc; 3139 } 3140 3141 static void 3142 jme_remove_one(struct pci_dev *pdev) 3143 { 3144 struct net_device *netdev = pci_get_drvdata(pdev); 3145 struct jme_adapter *jme = netdev_priv(netdev); 3146 3147 unregister_netdev(netdev); 3148 iounmap(jme->regs); 3149 free_netdev(netdev); 3150 pci_release_regions(pdev); 3151 pci_disable_device(pdev); 3152 3153 } 3154 3155 static void 3156 jme_shutdown(struct pci_dev *pdev) 3157 { 3158 struct net_device *netdev = pci_get_drvdata(pdev); 3159 struct jme_adapter *jme = netdev_priv(netdev); 3160 3161 jme_powersave_phy(jme); 3162 pci_pme_active(pdev, true); 3163 } 3164 3165 #ifdef CONFIG_PM_SLEEP 3166 static int 3167 jme_suspend(struct device *dev) 3168 { 3169 struct net_device *netdev = dev_get_drvdata(dev); 3170 struct jme_adapter *jme = netdev_priv(netdev); 3171 3172 if (!netif_running(netdev)) 3173 return 0; 3174 3175 atomic_dec(&jme->link_changing); 3176 3177 netif_device_detach(netdev); 3178 netif_stop_queue(netdev); 3179 jme_stop_irq(jme); 3180 3181 tasklet_disable(&jme->txclean_task); 3182 tasklet_disable(&jme->rxclean_task); 3183 tasklet_disable(&jme->rxempty_task); 3184 3185 if (netif_carrier_ok(netdev)) { 3186 if (test_bit(JME_FLAG_POLL, &jme->flags)) 3187 jme_polling_mode(jme); 3188 3189 jme_stop_pcc_timer(jme); 3190 jme_disable_rx_engine(jme); 3191 jme_disable_tx_engine(jme); 3192 jme_reset_mac_processor(jme); 3193 jme_free_rx_resources(jme); 3194 jme_free_tx_resources(jme); 3195 netif_carrier_off(netdev); 3196 jme->phylink = 0; 3197 } 3198 3199 tasklet_enable(&jme->txclean_task); 3200 tasklet_enable(&jme->rxclean_task); 3201 tasklet_enable(&jme->rxempty_task); 3202 3203 jme_powersave_phy(jme); 3204 3205 return 0; 3206 } 3207 3208 static int 3209 jme_resume(struct device *dev) 3210 { 3211 struct net_device *netdev = dev_get_drvdata(dev); 3212 struct jme_adapter *jme = netdev_priv(netdev); 3213 3214 if (!netif_running(netdev)) 3215 return 0; 3216 3217 jme_clear_pm_disable_wol(jme); 3218 jme_phy_on(jme); 3219 if (test_bit(JME_FLAG_SSET, &jme->flags)) 3220 jme_set_link_ksettings(netdev, &jme->old_cmd); 3221 else 3222 jme_reset_phy_processor(jme); 3223 jme_phy_calibration(jme); 3224 jme_phy_setEA(jme); 3225 netif_device_attach(netdev); 3226 3227 atomic_inc(&jme->link_changing); 3228 3229 jme_reset_link(jme); 3230 3231 jme_start_irq(jme); 3232 3233 return 0; 3234 } 3235 3236 static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume); 3237 #define JME_PM_OPS (&jme_pm_ops) 3238 3239 #else 3240 3241 #define JME_PM_OPS NULL 3242 #endif 3243 3244 static const struct pci_device_id jme_pci_tbl[] = { 3245 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) }, 3246 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) }, 3247 { } 3248 }; 3249 3250 static struct pci_driver jme_driver = { 3251 .name = DRV_NAME, 3252 .id_table = jme_pci_tbl, 3253 .probe = jme_init_one, 3254 .remove = jme_remove_one, 3255 .shutdown = jme_shutdown, 3256 .driver.pm = JME_PM_OPS, 3257 }; 3258 3259 static int __init 3260 jme_init_module(void) 3261 { 3262 pr_info("JMicron JMC2XX ethernet driver version %s\n", DRV_VERSION); 3263 return pci_register_driver(&jme_driver); 3264 } 3265 3266 static void __exit 3267 jme_cleanup_module(void) 3268 { 3269 pci_unregister_driver(&jme_driver); 3270 } 3271 3272 module_init(jme_init_module); 3273 module_exit(jme_cleanup_module); 3274 3275 MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>"); 3276 MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver"); 3277 MODULE_LICENSE("GPL"); 3278 MODULE_VERSION(DRV_VERSION); 3279 MODULE_DEVICE_TABLE(pci, jme_pci_tbl); 3280