1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 1999 - 2006 Intel Corporation. */ 3 4 #include "e1000.h" 5 #include <net/ip6_checksum.h> 6 #include <linux/io.h> 7 #include <linux/prefetch.h> 8 #include <linux/bitops.h> 9 #include <linux/if_vlan.h> 10 11 char e1000_driver_name[] = "e1000"; 12 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 13 #define DRV_VERSION "7.3.21-k8-NAPI" 14 const char e1000_driver_version[] = DRV_VERSION; 15 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 16 17 /* e1000_pci_tbl - PCI Device ID Table 18 * 19 * Last entry must be all 0s 20 * 21 * Macro expands to... 22 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} 23 */ 24 static const struct pci_device_id e1000_pci_tbl[] = { 25 INTEL_E1000_ETHERNET_DEVICE(0x1000), 26 INTEL_E1000_ETHERNET_DEVICE(0x1001), 27 INTEL_E1000_ETHERNET_DEVICE(0x1004), 28 INTEL_E1000_ETHERNET_DEVICE(0x1008), 29 INTEL_E1000_ETHERNET_DEVICE(0x1009), 30 INTEL_E1000_ETHERNET_DEVICE(0x100C), 31 INTEL_E1000_ETHERNET_DEVICE(0x100D), 32 INTEL_E1000_ETHERNET_DEVICE(0x100E), 33 INTEL_E1000_ETHERNET_DEVICE(0x100F), 34 INTEL_E1000_ETHERNET_DEVICE(0x1010), 35 INTEL_E1000_ETHERNET_DEVICE(0x1011), 36 INTEL_E1000_ETHERNET_DEVICE(0x1012), 37 INTEL_E1000_ETHERNET_DEVICE(0x1013), 38 INTEL_E1000_ETHERNET_DEVICE(0x1014), 39 INTEL_E1000_ETHERNET_DEVICE(0x1015), 40 INTEL_E1000_ETHERNET_DEVICE(0x1016), 41 INTEL_E1000_ETHERNET_DEVICE(0x1017), 42 INTEL_E1000_ETHERNET_DEVICE(0x1018), 43 INTEL_E1000_ETHERNET_DEVICE(0x1019), 44 INTEL_E1000_ETHERNET_DEVICE(0x101A), 45 INTEL_E1000_ETHERNET_DEVICE(0x101D), 46 INTEL_E1000_ETHERNET_DEVICE(0x101E), 47 INTEL_E1000_ETHERNET_DEVICE(0x1026), 48 INTEL_E1000_ETHERNET_DEVICE(0x1027), 49 INTEL_E1000_ETHERNET_DEVICE(0x1028), 50 INTEL_E1000_ETHERNET_DEVICE(0x1075), 51 INTEL_E1000_ETHERNET_DEVICE(0x1076), 52 INTEL_E1000_ETHERNET_DEVICE(0x1077), 53 INTEL_E1000_ETHERNET_DEVICE(0x1078), 54 INTEL_E1000_ETHERNET_DEVICE(0x1079), 55 INTEL_E1000_ETHERNET_DEVICE(0x107A), 56 INTEL_E1000_ETHERNET_DEVICE(0x107B), 57 INTEL_E1000_ETHERNET_DEVICE(0x107C), 58 INTEL_E1000_ETHERNET_DEVICE(0x108A), 59 INTEL_E1000_ETHERNET_DEVICE(0x1099), 60 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 61 INTEL_E1000_ETHERNET_DEVICE(0x2E6E), 62 /* required last entry */ 63 {0,} 64 }; 65 66 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 67 68 int e1000_up(struct e1000_adapter *adapter); 69 void e1000_down(struct e1000_adapter *adapter); 70 void e1000_reinit_locked(struct e1000_adapter *adapter); 71 void e1000_reset(struct e1000_adapter *adapter); 72 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 73 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 74 void e1000_free_all_tx_resources(struct e1000_adapter *adapter); 75 void e1000_free_all_rx_resources(struct e1000_adapter *adapter); 76 static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 77 struct e1000_tx_ring *txdr); 78 static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 79 struct e1000_rx_ring *rxdr); 80 static void e1000_free_tx_resources(struct e1000_adapter *adapter, 81 struct e1000_tx_ring *tx_ring); 82 static void e1000_free_rx_resources(struct e1000_adapter *adapter, 83 struct e1000_rx_ring *rx_ring); 84 void e1000_update_stats(struct e1000_adapter *adapter); 85 86 static int e1000_init_module(void); 87 static void e1000_exit_module(void); 88 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 89 static void e1000_remove(struct pci_dev *pdev); 90 static int e1000_alloc_queues(struct e1000_adapter *adapter); 91 static int e1000_sw_init(struct e1000_adapter *adapter); 92 int e1000_open(struct net_device *netdev); 93 int e1000_close(struct net_device *netdev); 94 static void e1000_configure_tx(struct e1000_adapter *adapter); 95 static void e1000_configure_rx(struct e1000_adapter *adapter); 96 static void e1000_setup_rctl(struct e1000_adapter *adapter); 97 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); 98 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); 99 static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 100 struct e1000_tx_ring *tx_ring); 101 static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 102 struct e1000_rx_ring *rx_ring); 103 static void e1000_set_rx_mode(struct net_device *netdev); 104 static void e1000_update_phy_info_task(struct work_struct *work); 105 static void e1000_watchdog(struct work_struct *work); 106 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); 107 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 108 struct net_device *netdev); 109 static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 110 static int e1000_set_mac(struct net_device *netdev, void *p); 111 static irqreturn_t e1000_intr(int irq, void *data); 112 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 113 struct e1000_tx_ring *tx_ring); 114 static int e1000_clean(struct napi_struct *napi, int budget); 115 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 116 struct e1000_rx_ring *rx_ring, 117 int *work_done, int work_to_do); 118 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 119 struct e1000_rx_ring *rx_ring, 120 int *work_done, int work_to_do); 121 static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, 122 struct e1000_rx_ring *rx_ring, 123 int cleaned_count) 124 { 125 } 126 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 127 struct e1000_rx_ring *rx_ring, 128 int cleaned_count); 129 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 130 struct e1000_rx_ring *rx_ring, 131 int cleaned_count); 132 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 133 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 134 int cmd); 135 static void e1000_enter_82542_rst(struct e1000_adapter *adapter); 136 static void e1000_leave_82542_rst(struct e1000_adapter *adapter); 137 static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue); 138 static void e1000_reset_task(struct work_struct *work); 139 static void e1000_smartspeed(struct e1000_adapter *adapter); 140 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 141 struct sk_buff *skb); 142 143 static bool e1000_vlan_used(struct e1000_adapter *adapter); 144 static void e1000_vlan_mode(struct net_device *netdev, 145 netdev_features_t features); 146 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, 147 bool filter_on); 148 static int e1000_vlan_rx_add_vid(struct net_device *netdev, 149 __be16 proto, u16 vid); 150 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, 151 __be16 proto, u16 vid); 152 static void e1000_restore_vlan(struct e1000_adapter *adapter); 153 154 static int __maybe_unused e1000_suspend(struct device *dev); 155 static int __maybe_unused e1000_resume(struct device *dev); 156 static void e1000_shutdown(struct pci_dev *pdev); 157 158 #ifdef CONFIG_NET_POLL_CONTROLLER 159 /* for netdump / net console */ 160 static void e1000_netpoll (struct net_device *netdev); 161 #endif 162 163 #define COPYBREAK_DEFAULT 256 164 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; 165 module_param(copybreak, uint, 0644); 166 MODULE_PARM_DESC(copybreak, 167 "Maximum size of packet that is copied to a new buffer on receive"); 168 169 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 170 pci_channel_state_t state); 171 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); 172 static void e1000_io_resume(struct pci_dev *pdev); 173 174 static const struct pci_error_handlers e1000_err_handler = { 175 .error_detected = e1000_io_error_detected, 176 .slot_reset = e1000_io_slot_reset, 177 .resume = e1000_io_resume, 178 }; 179 180 static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume); 181 182 static struct pci_driver e1000_driver = { 183 .name = e1000_driver_name, 184 .id_table = e1000_pci_tbl, 185 .probe = e1000_probe, 186 .remove = e1000_remove, 187 .driver = { 188 .pm = &e1000_pm_ops, 189 }, 190 .shutdown = e1000_shutdown, 191 .err_handler = &e1000_err_handler 192 }; 193 194 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 195 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 196 MODULE_LICENSE("GPL v2"); 197 MODULE_VERSION(DRV_VERSION); 198 199 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 200 static int debug = -1; 201 module_param(debug, int, 0); 202 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 203 204 /** 205 * e1000_get_hw_dev - return device 206 * used by hardware layer to print debugging information 207 * 208 **/ 209 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) 210 { 211 struct e1000_adapter *adapter = hw->back; 212 return adapter->netdev; 213 } 214 215 /** 216 * e1000_init_module - Driver Registration Routine 217 * 218 * e1000_init_module is the first routine called when the driver is 219 * loaded. All it does is register with the PCI subsystem. 220 **/ 221 static int __init e1000_init_module(void) 222 { 223 int ret; 224 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version); 225 226 pr_info("%s\n", e1000_copyright); 227 228 ret = pci_register_driver(&e1000_driver); 229 if (copybreak != COPYBREAK_DEFAULT) { 230 if (copybreak == 0) 231 pr_info("copybreak disabled\n"); 232 else 233 pr_info("copybreak enabled for " 234 "packets <= %u bytes\n", copybreak); 235 } 236 return ret; 237 } 238 239 module_init(e1000_init_module); 240 241 /** 242 * e1000_exit_module - Driver Exit Cleanup Routine 243 * 244 * e1000_exit_module is called just before the driver is removed 245 * from memory. 246 **/ 247 static void __exit e1000_exit_module(void) 248 { 249 pci_unregister_driver(&e1000_driver); 250 } 251 252 module_exit(e1000_exit_module); 253 254 static int e1000_request_irq(struct e1000_adapter *adapter) 255 { 256 struct net_device *netdev = adapter->netdev; 257 irq_handler_t handler = e1000_intr; 258 int irq_flags = IRQF_SHARED; 259 int err; 260 261 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 262 netdev); 263 if (err) { 264 e_err(probe, "Unable to allocate interrupt Error: %d\n", err); 265 } 266 267 return err; 268 } 269 270 static void e1000_free_irq(struct e1000_adapter *adapter) 271 { 272 struct net_device *netdev = adapter->netdev; 273 274 free_irq(adapter->pdev->irq, netdev); 275 } 276 277 /** 278 * e1000_irq_disable - Mask off interrupt generation on the NIC 279 * @adapter: board private structure 280 **/ 281 static void e1000_irq_disable(struct e1000_adapter *adapter) 282 { 283 struct e1000_hw *hw = &adapter->hw; 284 285 ew32(IMC, ~0); 286 E1000_WRITE_FLUSH(); 287 synchronize_irq(adapter->pdev->irq); 288 } 289 290 /** 291 * e1000_irq_enable - Enable default interrupt generation settings 292 * @adapter: board private structure 293 **/ 294 static void e1000_irq_enable(struct e1000_adapter *adapter) 295 { 296 struct e1000_hw *hw = &adapter->hw; 297 298 ew32(IMS, IMS_ENABLE_MASK); 299 E1000_WRITE_FLUSH(); 300 } 301 302 static void e1000_update_mng_vlan(struct e1000_adapter *adapter) 303 { 304 struct e1000_hw *hw = &adapter->hw; 305 struct net_device *netdev = adapter->netdev; 306 u16 vid = hw->mng_cookie.vlan_id; 307 u16 old_vid = adapter->mng_vlan_id; 308 309 if (!e1000_vlan_used(adapter)) 310 return; 311 312 if (!test_bit(vid, adapter->active_vlans)) { 313 if (hw->mng_cookie.status & 314 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 315 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); 316 adapter->mng_vlan_id = vid; 317 } else { 318 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 319 } 320 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && 321 (vid != old_vid) && 322 !test_bit(old_vid, adapter->active_vlans)) 323 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), 324 old_vid); 325 } else { 326 adapter->mng_vlan_id = vid; 327 } 328 } 329 330 static void e1000_init_manageability(struct e1000_adapter *adapter) 331 { 332 struct e1000_hw *hw = &adapter->hw; 333 334 if (adapter->en_mng_pt) { 335 u32 manc = er32(MANC); 336 337 /* disable hardware interception of ARP */ 338 manc &= ~(E1000_MANC_ARP_EN); 339 340 ew32(MANC, manc); 341 } 342 } 343 344 static void e1000_release_manageability(struct e1000_adapter *adapter) 345 { 346 struct e1000_hw *hw = &adapter->hw; 347 348 if (adapter->en_mng_pt) { 349 u32 manc = er32(MANC); 350 351 /* re-enable hardware interception of ARP */ 352 manc |= E1000_MANC_ARP_EN; 353 354 ew32(MANC, manc); 355 } 356 } 357 358 /** 359 * e1000_configure - configure the hardware for RX and TX 360 * @adapter = private board structure 361 **/ 362 static void e1000_configure(struct e1000_adapter *adapter) 363 { 364 struct net_device *netdev = adapter->netdev; 365 int i; 366 367 e1000_set_rx_mode(netdev); 368 369 e1000_restore_vlan(adapter); 370 e1000_init_manageability(adapter); 371 372 e1000_configure_tx(adapter); 373 e1000_setup_rctl(adapter); 374 e1000_configure_rx(adapter); 375 /* call E1000_DESC_UNUSED which always leaves 376 * at least 1 descriptor unused to make sure 377 * next_to_use != next_to_clean 378 */ 379 for (i = 0; i < adapter->num_rx_queues; i++) { 380 struct e1000_rx_ring *ring = &adapter->rx_ring[i]; 381 adapter->alloc_rx_buf(adapter, ring, 382 E1000_DESC_UNUSED(ring)); 383 } 384 } 385 386 int e1000_up(struct e1000_adapter *adapter) 387 { 388 struct e1000_hw *hw = &adapter->hw; 389 390 /* hardware has been reset, we need to reload some things */ 391 e1000_configure(adapter); 392 393 clear_bit(__E1000_DOWN, &adapter->flags); 394 395 napi_enable(&adapter->napi); 396 397 e1000_irq_enable(adapter); 398 399 netif_wake_queue(adapter->netdev); 400 401 /* fire a link change interrupt to start the watchdog */ 402 ew32(ICS, E1000_ICS_LSC); 403 return 0; 404 } 405 406 /** 407 * e1000_power_up_phy - restore link in case the phy was powered down 408 * @adapter: address of board private structure 409 * 410 * The phy may be powered down to save power and turn off link when the 411 * driver is unloaded and wake on lan is not enabled (among others) 412 * *** this routine MUST be followed by a call to e1000_reset *** 413 **/ 414 void e1000_power_up_phy(struct e1000_adapter *adapter) 415 { 416 struct e1000_hw *hw = &adapter->hw; 417 u16 mii_reg = 0; 418 419 /* Just clear the power down bit to wake the phy back up */ 420 if (hw->media_type == e1000_media_type_copper) { 421 /* according to the manual, the phy will retain its 422 * settings across a power-down/up cycle 423 */ 424 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 425 mii_reg &= ~MII_CR_POWER_DOWN; 426 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 427 } 428 } 429 430 static void e1000_power_down_phy(struct e1000_adapter *adapter) 431 { 432 struct e1000_hw *hw = &adapter->hw; 433 434 /* Power down the PHY so no link is implied when interface is down * 435 * The PHY cannot be powered down if any of the following is true * 436 * (a) WoL is enabled 437 * (b) AMT is active 438 * (c) SoL/IDER session is active 439 */ 440 if (!adapter->wol && hw->mac_type >= e1000_82540 && 441 hw->media_type == e1000_media_type_copper) { 442 u16 mii_reg = 0; 443 444 switch (hw->mac_type) { 445 case e1000_82540: 446 case e1000_82545: 447 case e1000_82545_rev_3: 448 case e1000_82546: 449 case e1000_ce4100: 450 case e1000_82546_rev_3: 451 case e1000_82541: 452 case e1000_82541_rev_2: 453 case e1000_82547: 454 case e1000_82547_rev_2: 455 if (er32(MANC) & E1000_MANC_SMBUS_EN) 456 goto out; 457 break; 458 default: 459 goto out; 460 } 461 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 462 mii_reg |= MII_CR_POWER_DOWN; 463 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 464 msleep(1); 465 } 466 out: 467 return; 468 } 469 470 static void e1000_down_and_stop(struct e1000_adapter *adapter) 471 { 472 set_bit(__E1000_DOWN, &adapter->flags); 473 474 cancel_delayed_work_sync(&adapter->watchdog_task); 475 476 /* 477 * Since the watchdog task can reschedule other tasks, we should cancel 478 * it first, otherwise we can run into the situation when a work is 479 * still running after the adapter has been turned down. 480 */ 481 482 cancel_delayed_work_sync(&adapter->phy_info_task); 483 cancel_delayed_work_sync(&adapter->fifo_stall_task); 484 485 /* Only kill reset task if adapter is not resetting */ 486 if (!test_bit(__E1000_RESETTING, &adapter->flags)) 487 cancel_work_sync(&adapter->reset_task); 488 } 489 490 void e1000_down(struct e1000_adapter *adapter) 491 { 492 struct e1000_hw *hw = &adapter->hw; 493 struct net_device *netdev = adapter->netdev; 494 u32 rctl, tctl; 495 496 /* disable receives in the hardware */ 497 rctl = er32(RCTL); 498 ew32(RCTL, rctl & ~E1000_RCTL_EN); 499 /* flush and sleep below */ 500 501 netif_tx_disable(netdev); 502 503 /* disable transmits in the hardware */ 504 tctl = er32(TCTL); 505 tctl &= ~E1000_TCTL_EN; 506 ew32(TCTL, tctl); 507 /* flush both disables and wait for them to finish */ 508 E1000_WRITE_FLUSH(); 509 msleep(10); 510 511 /* Set the carrier off after transmits have been disabled in the 512 * hardware, to avoid race conditions with e1000_watchdog() (which 513 * may be running concurrently to us, checking for the carrier 514 * bit to decide whether it should enable transmits again). Such 515 * a race condition would result into transmission being disabled 516 * in the hardware until the next IFF_DOWN+IFF_UP cycle. 517 */ 518 netif_carrier_off(netdev); 519 520 napi_disable(&adapter->napi); 521 522 e1000_irq_disable(adapter); 523 524 /* Setting DOWN must be after irq_disable to prevent 525 * a screaming interrupt. Setting DOWN also prevents 526 * tasks from rescheduling. 527 */ 528 e1000_down_and_stop(adapter); 529 530 adapter->link_speed = 0; 531 adapter->link_duplex = 0; 532 533 e1000_reset(adapter); 534 e1000_clean_all_tx_rings(adapter); 535 e1000_clean_all_rx_rings(adapter); 536 } 537 538 void e1000_reinit_locked(struct e1000_adapter *adapter) 539 { 540 WARN_ON(in_interrupt()); 541 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 542 msleep(1); 543 544 /* only run the task if not already down */ 545 if (!test_bit(__E1000_DOWN, &adapter->flags)) { 546 e1000_down(adapter); 547 e1000_up(adapter); 548 } 549 550 clear_bit(__E1000_RESETTING, &adapter->flags); 551 } 552 553 void e1000_reset(struct e1000_adapter *adapter) 554 { 555 struct e1000_hw *hw = &adapter->hw; 556 u32 pba = 0, tx_space, min_tx_space, min_rx_space; 557 bool legacy_pba_adjust = false; 558 u16 hwm; 559 560 /* Repartition Pba for greater than 9k mtu 561 * To take effect CTRL.RST is required. 562 */ 563 564 switch (hw->mac_type) { 565 case e1000_82542_rev2_0: 566 case e1000_82542_rev2_1: 567 case e1000_82543: 568 case e1000_82544: 569 case e1000_82540: 570 case e1000_82541: 571 case e1000_82541_rev_2: 572 legacy_pba_adjust = true; 573 pba = E1000_PBA_48K; 574 break; 575 case e1000_82545: 576 case e1000_82545_rev_3: 577 case e1000_82546: 578 case e1000_ce4100: 579 case e1000_82546_rev_3: 580 pba = E1000_PBA_48K; 581 break; 582 case e1000_82547: 583 case e1000_82547_rev_2: 584 legacy_pba_adjust = true; 585 pba = E1000_PBA_30K; 586 break; 587 case e1000_undefined: 588 case e1000_num_macs: 589 break; 590 } 591 592 if (legacy_pba_adjust) { 593 if (hw->max_frame_size > E1000_RXBUFFER_8192) 594 pba -= 8; /* allocate more FIFO for Tx */ 595 596 if (hw->mac_type == e1000_82547) { 597 adapter->tx_fifo_head = 0; 598 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 599 adapter->tx_fifo_size = 600 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; 601 atomic_set(&adapter->tx_fifo_stall, 0); 602 } 603 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { 604 /* adjust PBA for jumbo frames */ 605 ew32(PBA, pba); 606 607 /* To maintain wire speed transmits, the Tx FIFO should be 608 * large enough to accommodate two full transmit packets, 609 * rounded up to the next 1KB and expressed in KB. Likewise, 610 * the Rx FIFO should be large enough to accommodate at least 611 * one full receive packet and is similarly rounded up and 612 * expressed in KB. 613 */ 614 pba = er32(PBA); 615 /* upper 16 bits has Tx packet buffer allocation size in KB */ 616 tx_space = pba >> 16; 617 /* lower 16 bits has Rx packet buffer allocation size in KB */ 618 pba &= 0xffff; 619 /* the Tx fifo also stores 16 bytes of information about the Tx 620 * but don't include ethernet FCS because hardware appends it 621 */ 622 min_tx_space = (hw->max_frame_size + 623 sizeof(struct e1000_tx_desc) - 624 ETH_FCS_LEN) * 2; 625 min_tx_space = ALIGN(min_tx_space, 1024); 626 min_tx_space >>= 10; 627 /* software strips receive CRC, so leave room for it */ 628 min_rx_space = hw->max_frame_size; 629 min_rx_space = ALIGN(min_rx_space, 1024); 630 min_rx_space >>= 10; 631 632 /* If current Tx allocation is less than the min Tx FIFO size, 633 * and the min Tx FIFO size is less than the current Rx FIFO 634 * allocation, take space away from current Rx allocation 635 */ 636 if (tx_space < min_tx_space && 637 ((min_tx_space - tx_space) < pba)) { 638 pba = pba - (min_tx_space - tx_space); 639 640 /* PCI/PCIx hardware has PBA alignment constraints */ 641 switch (hw->mac_type) { 642 case e1000_82545 ... e1000_82546_rev_3: 643 pba &= ~(E1000_PBA_8K - 1); 644 break; 645 default: 646 break; 647 } 648 649 /* if short on Rx space, Rx wins and must trump Tx 650 * adjustment or use Early Receive if available 651 */ 652 if (pba < min_rx_space) 653 pba = min_rx_space; 654 } 655 } 656 657 ew32(PBA, pba); 658 659 /* flow control settings: 660 * The high water mark must be low enough to fit one full frame 661 * (or the size used for early receive) above it in the Rx FIFO. 662 * Set it to the lower of: 663 * - 90% of the Rx FIFO size, and 664 * - the full Rx FIFO size minus the early receive size (for parts 665 * with ERT support assuming ERT set to E1000_ERT_2048), or 666 * - the full Rx FIFO size minus one full frame 667 */ 668 hwm = min(((pba << 10) * 9 / 10), 669 ((pba << 10) - hw->max_frame_size)); 670 671 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ 672 hw->fc_low_water = hw->fc_high_water - 8; 673 hw->fc_pause_time = E1000_FC_PAUSE_TIME; 674 hw->fc_send_xon = 1; 675 hw->fc = hw->original_fc; 676 677 /* Allow time for pending master requests to run */ 678 e1000_reset_hw(hw); 679 if (hw->mac_type >= e1000_82544) 680 ew32(WUC, 0); 681 682 if (e1000_init_hw(hw)) 683 e_dev_err("Hardware Error\n"); 684 e1000_update_mng_vlan(adapter); 685 686 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ 687 if (hw->mac_type >= e1000_82544 && 688 hw->autoneg == 1 && 689 hw->autoneg_advertised == ADVERTISE_1000_FULL) { 690 u32 ctrl = er32(CTRL); 691 /* clear phy power management bit if we are in gig only mode, 692 * which if enabled will attempt negotiation to 100Mb, which 693 * can cause a loss of link at power off or driver unload 694 */ 695 ctrl &= ~E1000_CTRL_SWDPIN3; 696 ew32(CTRL, ctrl); 697 } 698 699 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 700 ew32(VET, ETHERNET_IEEE_VLAN_TYPE); 701 702 e1000_reset_adaptive(hw); 703 e1000_phy_get_info(hw, &adapter->phy_info); 704 705 e1000_release_manageability(adapter); 706 } 707 708 /* Dump the eeprom for users having checksum issues */ 709 static void e1000_dump_eeprom(struct e1000_adapter *adapter) 710 { 711 struct net_device *netdev = adapter->netdev; 712 struct ethtool_eeprom eeprom; 713 const struct ethtool_ops *ops = netdev->ethtool_ops; 714 u8 *data; 715 int i; 716 u16 csum_old, csum_new = 0; 717 718 eeprom.len = ops->get_eeprom_len(netdev); 719 eeprom.offset = 0; 720 721 data = kmalloc(eeprom.len, GFP_KERNEL); 722 if (!data) 723 return; 724 725 ops->get_eeprom(netdev, &eeprom, data); 726 727 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + 728 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); 729 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) 730 csum_new += data[i] + (data[i + 1] << 8); 731 csum_new = EEPROM_SUM - csum_new; 732 733 pr_err("/*********************/\n"); 734 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); 735 pr_err("Calculated : 0x%04x\n", csum_new); 736 737 pr_err("Offset Values\n"); 738 pr_err("======== ======\n"); 739 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); 740 741 pr_err("Include this output when contacting your support provider.\n"); 742 pr_err("This is not a software error! Something bad happened to\n"); 743 pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); 744 pr_err("result in further problems, possibly loss of data,\n"); 745 pr_err("corruption or system hangs!\n"); 746 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); 747 pr_err("which is invalid and requires you to set the proper MAC\n"); 748 pr_err("address manually before continuing to enable this network\n"); 749 pr_err("device. Please inspect the EEPROM dump and report the\n"); 750 pr_err("issue to your hardware vendor or Intel Customer Support.\n"); 751 pr_err("/*********************/\n"); 752 753 kfree(data); 754 } 755 756 /** 757 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not 758 * @pdev: PCI device information struct 759 * 760 * Return true if an adapter needs ioport resources 761 **/ 762 static int e1000_is_need_ioport(struct pci_dev *pdev) 763 { 764 switch (pdev->device) { 765 case E1000_DEV_ID_82540EM: 766 case E1000_DEV_ID_82540EM_LOM: 767 case E1000_DEV_ID_82540EP: 768 case E1000_DEV_ID_82540EP_LOM: 769 case E1000_DEV_ID_82540EP_LP: 770 case E1000_DEV_ID_82541EI: 771 case E1000_DEV_ID_82541EI_MOBILE: 772 case E1000_DEV_ID_82541ER: 773 case E1000_DEV_ID_82541ER_LOM: 774 case E1000_DEV_ID_82541GI: 775 case E1000_DEV_ID_82541GI_LF: 776 case E1000_DEV_ID_82541GI_MOBILE: 777 case E1000_DEV_ID_82544EI_COPPER: 778 case E1000_DEV_ID_82544EI_FIBER: 779 case E1000_DEV_ID_82544GC_COPPER: 780 case E1000_DEV_ID_82544GC_LOM: 781 case E1000_DEV_ID_82545EM_COPPER: 782 case E1000_DEV_ID_82545EM_FIBER: 783 case E1000_DEV_ID_82546EB_COPPER: 784 case E1000_DEV_ID_82546EB_FIBER: 785 case E1000_DEV_ID_82546EB_QUAD_COPPER: 786 return true; 787 default: 788 return false; 789 } 790 } 791 792 static netdev_features_t e1000_fix_features(struct net_device *netdev, 793 netdev_features_t features) 794 { 795 /* Since there is no support for separate Rx/Tx vlan accel 796 * enable/disable make sure Tx flag is always in same state as Rx. 797 */ 798 if (features & NETIF_F_HW_VLAN_CTAG_RX) 799 features |= NETIF_F_HW_VLAN_CTAG_TX; 800 else 801 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 802 803 return features; 804 } 805 806 static int e1000_set_features(struct net_device *netdev, 807 netdev_features_t features) 808 { 809 struct e1000_adapter *adapter = netdev_priv(netdev); 810 netdev_features_t changed = features ^ netdev->features; 811 812 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 813 e1000_vlan_mode(netdev, features); 814 815 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) 816 return 0; 817 818 netdev->features = features; 819 adapter->rx_csum = !!(features & NETIF_F_RXCSUM); 820 821 if (netif_running(netdev)) 822 e1000_reinit_locked(adapter); 823 else 824 e1000_reset(adapter); 825 826 return 1; 827 } 828 829 static const struct net_device_ops e1000_netdev_ops = { 830 .ndo_open = e1000_open, 831 .ndo_stop = e1000_close, 832 .ndo_start_xmit = e1000_xmit_frame, 833 .ndo_set_rx_mode = e1000_set_rx_mode, 834 .ndo_set_mac_address = e1000_set_mac, 835 .ndo_tx_timeout = e1000_tx_timeout, 836 .ndo_change_mtu = e1000_change_mtu, 837 .ndo_do_ioctl = e1000_ioctl, 838 .ndo_validate_addr = eth_validate_addr, 839 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, 840 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, 841 #ifdef CONFIG_NET_POLL_CONTROLLER 842 .ndo_poll_controller = e1000_netpoll, 843 #endif 844 .ndo_fix_features = e1000_fix_features, 845 .ndo_set_features = e1000_set_features, 846 }; 847 848 /** 849 * e1000_init_hw_struct - initialize members of hw struct 850 * @adapter: board private struct 851 * @hw: structure used by e1000_hw.c 852 * 853 * Factors out initialization of the e1000_hw struct to its own function 854 * that can be called very early at init (just after struct allocation). 855 * Fields are initialized based on PCI device information and 856 * OS network device settings (MTU size). 857 * Returns negative error codes if MAC type setup fails. 858 */ 859 static int e1000_init_hw_struct(struct e1000_adapter *adapter, 860 struct e1000_hw *hw) 861 { 862 struct pci_dev *pdev = adapter->pdev; 863 864 /* PCI config space info */ 865 hw->vendor_id = pdev->vendor; 866 hw->device_id = pdev->device; 867 hw->subsystem_vendor_id = pdev->subsystem_vendor; 868 hw->subsystem_id = pdev->subsystem_device; 869 hw->revision_id = pdev->revision; 870 871 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 872 873 hw->max_frame_size = adapter->netdev->mtu + 874 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 875 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; 876 877 /* identify the MAC */ 878 if (e1000_set_mac_type(hw)) { 879 e_err(probe, "Unknown MAC Type\n"); 880 return -EIO; 881 } 882 883 switch (hw->mac_type) { 884 default: 885 break; 886 case e1000_82541: 887 case e1000_82547: 888 case e1000_82541_rev_2: 889 case e1000_82547_rev_2: 890 hw->phy_init_script = 1; 891 break; 892 } 893 894 e1000_set_media_type(hw); 895 e1000_get_bus_info(hw); 896 897 hw->wait_autoneg_complete = false; 898 hw->tbi_compatibility_en = true; 899 hw->adaptive_ifs = true; 900 901 /* Copper options */ 902 903 if (hw->media_type == e1000_media_type_copper) { 904 hw->mdix = AUTO_ALL_MODES; 905 hw->disable_polarity_correction = false; 906 hw->master_slave = E1000_MASTER_SLAVE; 907 } 908 909 return 0; 910 } 911 912 /** 913 * e1000_probe - Device Initialization Routine 914 * @pdev: PCI device information struct 915 * @ent: entry in e1000_pci_tbl 916 * 917 * Returns 0 on success, negative on failure 918 * 919 * e1000_probe initializes an adapter identified by a pci_dev structure. 920 * The OS initialization, configuring of the adapter private structure, 921 * and a hardware reset occur. 922 **/ 923 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 924 { 925 struct net_device *netdev; 926 struct e1000_adapter *adapter = NULL; 927 struct e1000_hw *hw; 928 929 static int cards_found; 930 static int global_quad_port_a; /* global ksp3 port a indication */ 931 int i, err, pci_using_dac; 932 u16 eeprom_data = 0; 933 u16 tmp = 0; 934 u16 eeprom_apme_mask = E1000_EEPROM_APME; 935 int bars, need_ioport; 936 bool disable_dev = false; 937 938 /* do not allocate ioport bars when not needed */ 939 need_ioport = e1000_is_need_ioport(pdev); 940 if (need_ioport) { 941 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 942 err = pci_enable_device(pdev); 943 } else { 944 bars = pci_select_bars(pdev, IORESOURCE_MEM); 945 err = pci_enable_device_mem(pdev); 946 } 947 if (err) 948 return err; 949 950 err = pci_request_selected_regions(pdev, bars, e1000_driver_name); 951 if (err) 952 goto err_pci_reg; 953 954 pci_set_master(pdev); 955 err = pci_save_state(pdev); 956 if (err) 957 goto err_alloc_etherdev; 958 959 err = -ENOMEM; 960 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 961 if (!netdev) 962 goto err_alloc_etherdev; 963 964 SET_NETDEV_DEV(netdev, &pdev->dev); 965 966 pci_set_drvdata(pdev, netdev); 967 adapter = netdev_priv(netdev); 968 adapter->netdev = netdev; 969 adapter->pdev = pdev; 970 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 971 adapter->bars = bars; 972 adapter->need_ioport = need_ioport; 973 974 hw = &adapter->hw; 975 hw->back = adapter; 976 977 err = -EIO; 978 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); 979 if (!hw->hw_addr) 980 goto err_ioremap; 981 982 if (adapter->need_ioport) { 983 for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) { 984 if (pci_resource_len(pdev, i) == 0) 985 continue; 986 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 987 hw->io_base = pci_resource_start(pdev, i); 988 break; 989 } 990 } 991 } 992 993 /* make ready for any if (hw->...) below */ 994 err = e1000_init_hw_struct(adapter, hw); 995 if (err) 996 goto err_sw_init; 997 998 /* there is a workaround being applied below that limits 999 * 64-bit DMA addresses to 64-bit hardware. There are some 1000 * 32-bit adapters that Tx hang when given 64-bit DMA addresses 1001 */ 1002 pci_using_dac = 0; 1003 if ((hw->bus_type == e1000_bus_type_pcix) && 1004 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 1005 pci_using_dac = 1; 1006 } else { 1007 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 1008 if (err) { 1009 pr_err("No usable DMA config, aborting\n"); 1010 goto err_dma; 1011 } 1012 } 1013 1014 netdev->netdev_ops = &e1000_netdev_ops; 1015 e1000_set_ethtool_ops(netdev); 1016 netdev->watchdog_timeo = 5 * HZ; 1017 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); 1018 1019 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1020 1021 adapter->bd_number = cards_found; 1022 1023 /* setup the private structure */ 1024 1025 err = e1000_sw_init(adapter); 1026 if (err) 1027 goto err_sw_init; 1028 1029 err = -EIO; 1030 if (hw->mac_type == e1000_ce4100) { 1031 hw->ce4100_gbe_mdio_base_virt = 1032 ioremap(pci_resource_start(pdev, BAR_1), 1033 pci_resource_len(pdev, BAR_1)); 1034 1035 if (!hw->ce4100_gbe_mdio_base_virt) 1036 goto err_mdio_ioremap; 1037 } 1038 1039 if (hw->mac_type >= e1000_82543) { 1040 netdev->hw_features = NETIF_F_SG | 1041 NETIF_F_HW_CSUM | 1042 NETIF_F_HW_VLAN_CTAG_RX; 1043 netdev->features = NETIF_F_HW_VLAN_CTAG_TX | 1044 NETIF_F_HW_VLAN_CTAG_FILTER; 1045 } 1046 1047 if ((hw->mac_type >= e1000_82544) && 1048 (hw->mac_type != e1000_82547)) 1049 netdev->hw_features |= NETIF_F_TSO; 1050 1051 netdev->priv_flags |= IFF_SUPP_NOFCS; 1052 1053 netdev->features |= netdev->hw_features; 1054 netdev->hw_features |= (NETIF_F_RXCSUM | 1055 NETIF_F_RXALL | 1056 NETIF_F_RXFCS); 1057 1058 if (pci_using_dac) { 1059 netdev->features |= NETIF_F_HIGHDMA; 1060 netdev->vlan_features |= NETIF_F_HIGHDMA; 1061 } 1062 1063 netdev->vlan_features |= (NETIF_F_TSO | 1064 NETIF_F_HW_CSUM | 1065 NETIF_F_SG); 1066 1067 /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ 1068 if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || 1069 hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) 1070 netdev->priv_flags |= IFF_UNICAST_FLT; 1071 1072 /* MTU range: 46 - 16110 */ 1073 netdev->min_mtu = ETH_ZLEN - ETH_HLEN; 1074 netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); 1075 1076 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); 1077 1078 /* initialize eeprom parameters */ 1079 if (e1000_init_eeprom_params(hw)) { 1080 e_err(probe, "EEPROM initialization failed\n"); 1081 goto err_eeprom; 1082 } 1083 1084 /* before reading the EEPROM, reset the controller to 1085 * put the device in a known good starting state 1086 */ 1087 1088 e1000_reset_hw(hw); 1089 1090 /* make sure the EEPROM is good */ 1091 if (e1000_validate_eeprom_checksum(hw) < 0) { 1092 e_err(probe, "The EEPROM Checksum Is Not Valid\n"); 1093 e1000_dump_eeprom(adapter); 1094 /* set MAC address to all zeroes to invalidate and temporary 1095 * disable this device for the user. This blocks regular 1096 * traffic while still permitting ethtool ioctls from reaching 1097 * the hardware as well as allowing the user to run the 1098 * interface after manually setting a hw addr using 1099 * `ip set address` 1100 */ 1101 memset(hw->mac_addr, 0, netdev->addr_len); 1102 } else { 1103 /* copy the MAC address out of the EEPROM */ 1104 if (e1000_read_mac_addr(hw)) 1105 e_err(probe, "EEPROM Read Error\n"); 1106 } 1107 /* don't block initialization here due to bad MAC address */ 1108 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); 1109 1110 if (!is_valid_ether_addr(netdev->dev_addr)) 1111 e_err(probe, "Invalid MAC Address\n"); 1112 1113 1114 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); 1115 INIT_DELAYED_WORK(&adapter->fifo_stall_task, 1116 e1000_82547_tx_fifo_stall_task); 1117 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); 1118 INIT_WORK(&adapter->reset_task, e1000_reset_task); 1119 1120 e1000_check_options(adapter); 1121 1122 /* Initial Wake on LAN setting 1123 * If APM wake is enabled in the EEPROM, 1124 * enable the ACPI Magic Packet filter 1125 */ 1126 1127 switch (hw->mac_type) { 1128 case e1000_82542_rev2_0: 1129 case e1000_82542_rev2_1: 1130 case e1000_82543: 1131 break; 1132 case e1000_82544: 1133 e1000_read_eeprom(hw, 1134 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); 1135 eeprom_apme_mask = E1000_EEPROM_82544_APM; 1136 break; 1137 case e1000_82546: 1138 case e1000_82546_rev_3: 1139 if (er32(STATUS) & E1000_STATUS_FUNC_1) { 1140 e1000_read_eeprom(hw, 1141 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 1142 break; 1143 } 1144 /* Fall Through */ 1145 default: 1146 e1000_read_eeprom(hw, 1147 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 1148 break; 1149 } 1150 if (eeprom_data & eeprom_apme_mask) 1151 adapter->eeprom_wol |= E1000_WUFC_MAG; 1152 1153 /* now that we have the eeprom settings, apply the special cases 1154 * where the eeprom may be wrong or the board simply won't support 1155 * wake on lan on a particular port 1156 */ 1157 switch (pdev->device) { 1158 case E1000_DEV_ID_82546GB_PCIE: 1159 adapter->eeprom_wol = 0; 1160 break; 1161 case E1000_DEV_ID_82546EB_FIBER: 1162 case E1000_DEV_ID_82546GB_FIBER: 1163 /* Wake events only supported on port A for dual fiber 1164 * regardless of eeprom setting 1165 */ 1166 if (er32(STATUS) & E1000_STATUS_FUNC_1) 1167 adapter->eeprom_wol = 0; 1168 break; 1169 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1170 /* if quad port adapter, disable WoL on all but port A */ 1171 if (global_quad_port_a != 0) 1172 adapter->eeprom_wol = 0; 1173 else 1174 adapter->quad_port_a = true; 1175 /* Reset for multiple quad port adapters */ 1176 if (++global_quad_port_a == 4) 1177 global_quad_port_a = 0; 1178 break; 1179 } 1180 1181 /* initialize the wol settings based on the eeprom settings */ 1182 adapter->wol = adapter->eeprom_wol; 1183 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1184 1185 /* Auto detect PHY address */ 1186 if (hw->mac_type == e1000_ce4100) { 1187 for (i = 0; i < 32; i++) { 1188 hw->phy_addr = i; 1189 e1000_read_phy_reg(hw, PHY_ID2, &tmp); 1190 1191 if (tmp != 0 && tmp != 0xFF) 1192 break; 1193 } 1194 1195 if (i >= 32) 1196 goto err_eeprom; 1197 } 1198 1199 /* reset the hardware with the new settings */ 1200 e1000_reset(adapter); 1201 1202 strcpy(netdev->name, "eth%d"); 1203 err = register_netdev(netdev); 1204 if (err) 1205 goto err_register; 1206 1207 e1000_vlan_filter_on_off(adapter, false); 1208 1209 /* print bus type/speed/width info */ 1210 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", 1211 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), 1212 ((hw->bus_speed == e1000_bus_speed_133) ? 133 : 1213 (hw->bus_speed == e1000_bus_speed_120) ? 120 : 1214 (hw->bus_speed == e1000_bus_speed_100) ? 100 : 1215 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), 1216 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), 1217 netdev->dev_addr); 1218 1219 /* carrier off reporting is important to ethtool even BEFORE open */ 1220 netif_carrier_off(netdev); 1221 1222 e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); 1223 1224 cards_found++; 1225 return 0; 1226 1227 err_register: 1228 err_eeprom: 1229 e1000_phy_hw_reset(hw); 1230 1231 if (hw->flash_address) 1232 iounmap(hw->flash_address); 1233 kfree(adapter->tx_ring); 1234 kfree(adapter->rx_ring); 1235 err_dma: 1236 err_sw_init: 1237 err_mdio_ioremap: 1238 iounmap(hw->ce4100_gbe_mdio_base_virt); 1239 iounmap(hw->hw_addr); 1240 err_ioremap: 1241 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); 1242 free_netdev(netdev); 1243 err_alloc_etherdev: 1244 pci_release_selected_regions(pdev, bars); 1245 err_pci_reg: 1246 if (!adapter || disable_dev) 1247 pci_disable_device(pdev); 1248 return err; 1249 } 1250 1251 /** 1252 * e1000_remove - Device Removal Routine 1253 * @pdev: PCI device information struct 1254 * 1255 * e1000_remove is called by the PCI subsystem to alert the driver 1256 * that it should release a PCI device. That could be caused by a 1257 * Hot-Plug event, or because the driver is going to be removed from 1258 * memory. 1259 **/ 1260 static void e1000_remove(struct pci_dev *pdev) 1261 { 1262 struct net_device *netdev = pci_get_drvdata(pdev); 1263 struct e1000_adapter *adapter = netdev_priv(netdev); 1264 struct e1000_hw *hw = &adapter->hw; 1265 bool disable_dev; 1266 1267 e1000_down_and_stop(adapter); 1268 e1000_release_manageability(adapter); 1269 1270 unregister_netdev(netdev); 1271 1272 e1000_phy_hw_reset(hw); 1273 1274 kfree(adapter->tx_ring); 1275 kfree(adapter->rx_ring); 1276 1277 if (hw->mac_type == e1000_ce4100) 1278 iounmap(hw->ce4100_gbe_mdio_base_virt); 1279 iounmap(hw->hw_addr); 1280 if (hw->flash_address) 1281 iounmap(hw->flash_address); 1282 pci_release_selected_regions(pdev, adapter->bars); 1283 1284 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); 1285 free_netdev(netdev); 1286 1287 if (disable_dev) 1288 pci_disable_device(pdev); 1289 } 1290 1291 /** 1292 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) 1293 * @adapter: board private structure to initialize 1294 * 1295 * e1000_sw_init initializes the Adapter private data structure. 1296 * e1000_init_hw_struct MUST be called before this function 1297 **/ 1298 static int e1000_sw_init(struct e1000_adapter *adapter) 1299 { 1300 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1301 1302 adapter->num_tx_queues = 1; 1303 adapter->num_rx_queues = 1; 1304 1305 if (e1000_alloc_queues(adapter)) { 1306 e_err(probe, "Unable to allocate memory for queues\n"); 1307 return -ENOMEM; 1308 } 1309 1310 /* Explicitly disable IRQ since the NIC can be in any state. */ 1311 e1000_irq_disable(adapter); 1312 1313 spin_lock_init(&adapter->stats_lock); 1314 1315 set_bit(__E1000_DOWN, &adapter->flags); 1316 1317 return 0; 1318 } 1319 1320 /** 1321 * e1000_alloc_queues - Allocate memory for all rings 1322 * @adapter: board private structure to initialize 1323 * 1324 * We allocate one ring per queue at run-time since we don't know the 1325 * number of queues at compile-time. 1326 **/ 1327 static int e1000_alloc_queues(struct e1000_adapter *adapter) 1328 { 1329 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1330 sizeof(struct e1000_tx_ring), GFP_KERNEL); 1331 if (!adapter->tx_ring) 1332 return -ENOMEM; 1333 1334 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1335 sizeof(struct e1000_rx_ring), GFP_KERNEL); 1336 if (!adapter->rx_ring) { 1337 kfree(adapter->tx_ring); 1338 return -ENOMEM; 1339 } 1340 1341 return E1000_SUCCESS; 1342 } 1343 1344 /** 1345 * e1000_open - Called when a network interface is made active 1346 * @netdev: network interface device structure 1347 * 1348 * Returns 0 on success, negative value on failure 1349 * 1350 * The open entry point is called when a network interface is made 1351 * active by the system (IFF_UP). At this point all resources needed 1352 * for transmit and receive operations are allocated, the interrupt 1353 * handler is registered with the OS, the watchdog task is started, 1354 * and the stack is notified that the interface is ready. 1355 **/ 1356 int e1000_open(struct net_device *netdev) 1357 { 1358 struct e1000_adapter *adapter = netdev_priv(netdev); 1359 struct e1000_hw *hw = &adapter->hw; 1360 int err; 1361 1362 /* disallow open during test */ 1363 if (test_bit(__E1000_TESTING, &adapter->flags)) 1364 return -EBUSY; 1365 1366 netif_carrier_off(netdev); 1367 1368 /* allocate transmit descriptors */ 1369 err = e1000_setup_all_tx_resources(adapter); 1370 if (err) 1371 goto err_setup_tx; 1372 1373 /* allocate receive descriptors */ 1374 err = e1000_setup_all_rx_resources(adapter); 1375 if (err) 1376 goto err_setup_rx; 1377 1378 e1000_power_up_phy(adapter); 1379 1380 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1381 if ((hw->mng_cookie.status & 1382 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1383 e1000_update_mng_vlan(adapter); 1384 } 1385 1386 /* before we allocate an interrupt, we must be ready to handle it. 1387 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1388 * as soon as we call pci_request_irq, so we have to setup our 1389 * clean_rx handler before we do so. 1390 */ 1391 e1000_configure(adapter); 1392 1393 err = e1000_request_irq(adapter); 1394 if (err) 1395 goto err_req_irq; 1396 1397 /* From here on the code is the same as e1000_up() */ 1398 clear_bit(__E1000_DOWN, &adapter->flags); 1399 1400 napi_enable(&adapter->napi); 1401 1402 e1000_irq_enable(adapter); 1403 1404 netif_start_queue(netdev); 1405 1406 /* fire a link status change interrupt to start the watchdog */ 1407 ew32(ICS, E1000_ICS_LSC); 1408 1409 return E1000_SUCCESS; 1410 1411 err_req_irq: 1412 e1000_power_down_phy(adapter); 1413 e1000_free_all_rx_resources(adapter); 1414 err_setup_rx: 1415 e1000_free_all_tx_resources(adapter); 1416 err_setup_tx: 1417 e1000_reset(adapter); 1418 1419 return err; 1420 } 1421 1422 /** 1423 * e1000_close - Disables a network interface 1424 * @netdev: network interface device structure 1425 * 1426 * Returns 0, this is not allowed to fail 1427 * 1428 * The close entry point is called when an interface is de-activated 1429 * by the OS. The hardware is still under the drivers control, but 1430 * needs to be disabled. A global MAC reset is issued to stop the 1431 * hardware, and all transmit and receive resources are freed. 1432 **/ 1433 int e1000_close(struct net_device *netdev) 1434 { 1435 struct e1000_adapter *adapter = netdev_priv(netdev); 1436 struct e1000_hw *hw = &adapter->hw; 1437 int count = E1000_CHECK_RESET_COUNT; 1438 1439 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) 1440 usleep_range(10000, 20000); 1441 1442 WARN_ON(count < 0); 1443 1444 /* signal that we're down so that the reset task will no longer run */ 1445 set_bit(__E1000_DOWN, &adapter->flags); 1446 clear_bit(__E1000_RESETTING, &adapter->flags); 1447 1448 e1000_down(adapter); 1449 e1000_power_down_phy(adapter); 1450 e1000_free_irq(adapter); 1451 1452 e1000_free_all_tx_resources(adapter); 1453 e1000_free_all_rx_resources(adapter); 1454 1455 /* kill manageability vlan ID if supported, but not if a vlan with 1456 * the same ID is registered on the host OS (let 8021q kill it) 1457 */ 1458 if ((hw->mng_cookie.status & 1459 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1460 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { 1461 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), 1462 adapter->mng_vlan_id); 1463 } 1464 1465 return 0; 1466 } 1467 1468 /** 1469 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary 1470 * @adapter: address of board private structure 1471 * @start: address of beginning of memory 1472 * @len: length of memory 1473 **/ 1474 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, 1475 unsigned long len) 1476 { 1477 struct e1000_hw *hw = &adapter->hw; 1478 unsigned long begin = (unsigned long)start; 1479 unsigned long end = begin + len; 1480 1481 /* First rev 82545 and 82546 need to not allow any memory 1482 * write location to cross 64k boundary due to errata 23 1483 */ 1484 if (hw->mac_type == e1000_82545 || 1485 hw->mac_type == e1000_ce4100 || 1486 hw->mac_type == e1000_82546) { 1487 return ((begin ^ (end - 1)) >> 16) == 0; 1488 } 1489 1490 return true; 1491 } 1492 1493 /** 1494 * e1000_setup_tx_resources - allocate Tx resources (Descriptors) 1495 * @adapter: board private structure 1496 * @txdr: tx descriptor ring (for a specific queue) to setup 1497 * 1498 * Return 0 on success, negative on failure 1499 **/ 1500 static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 1501 struct e1000_tx_ring *txdr) 1502 { 1503 struct pci_dev *pdev = adapter->pdev; 1504 int size; 1505 1506 size = sizeof(struct e1000_tx_buffer) * txdr->count; 1507 txdr->buffer_info = vzalloc(size); 1508 if (!txdr->buffer_info) 1509 return -ENOMEM; 1510 1511 /* round up to nearest 4K */ 1512 1513 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1514 txdr->size = ALIGN(txdr->size, 4096); 1515 1516 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 1517 GFP_KERNEL); 1518 if (!txdr->desc) { 1519 setup_tx_desc_die: 1520 vfree(txdr->buffer_info); 1521 return -ENOMEM; 1522 } 1523 1524 /* Fix for errata 23, can't cross 64kB boundary */ 1525 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1526 void *olddesc = txdr->desc; 1527 dma_addr_t olddma = txdr->dma; 1528 e_err(tx_err, "txdr align check failed: %u bytes at %p\n", 1529 txdr->size, txdr->desc); 1530 /* Try again, without freeing the previous */ 1531 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, 1532 &txdr->dma, GFP_KERNEL); 1533 /* Failed allocation, critical failure */ 1534 if (!txdr->desc) { 1535 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1536 olddma); 1537 goto setup_tx_desc_die; 1538 } 1539 1540 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1541 /* give up */ 1542 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, 1543 txdr->dma); 1544 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1545 olddma); 1546 e_err(probe, "Unable to allocate aligned memory " 1547 "for the transmit descriptor ring\n"); 1548 vfree(txdr->buffer_info); 1549 return -ENOMEM; 1550 } else { 1551 /* Free old allocation, new allocation was successful */ 1552 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1553 olddma); 1554 } 1555 } 1556 memset(txdr->desc, 0, txdr->size); 1557 1558 txdr->next_to_use = 0; 1559 txdr->next_to_clean = 0; 1560 1561 return 0; 1562 } 1563 1564 /** 1565 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources 1566 * (Descriptors) for all queues 1567 * @adapter: board private structure 1568 * 1569 * Return 0 on success, negative on failure 1570 **/ 1571 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) 1572 { 1573 int i, err = 0; 1574 1575 for (i = 0; i < adapter->num_tx_queues; i++) { 1576 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1577 if (err) { 1578 e_err(probe, "Allocation for Tx Queue %u failed\n", i); 1579 for (i-- ; i >= 0; i--) 1580 e1000_free_tx_resources(adapter, 1581 &adapter->tx_ring[i]); 1582 break; 1583 } 1584 } 1585 1586 return err; 1587 } 1588 1589 /** 1590 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset 1591 * @adapter: board private structure 1592 * 1593 * Configure the Tx unit of the MAC after a reset. 1594 **/ 1595 static void e1000_configure_tx(struct e1000_adapter *adapter) 1596 { 1597 u64 tdba; 1598 struct e1000_hw *hw = &adapter->hw; 1599 u32 tdlen, tctl, tipg; 1600 u32 ipgr1, ipgr2; 1601 1602 /* Setup the HW Tx Head and Tail descriptor pointers */ 1603 1604 switch (adapter->num_tx_queues) { 1605 case 1: 1606 default: 1607 tdba = adapter->tx_ring[0].dma; 1608 tdlen = adapter->tx_ring[0].count * 1609 sizeof(struct e1000_tx_desc); 1610 ew32(TDLEN, tdlen); 1611 ew32(TDBAH, (tdba >> 32)); 1612 ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); 1613 ew32(TDT, 0); 1614 ew32(TDH, 0); 1615 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? 1616 E1000_TDH : E1000_82542_TDH); 1617 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? 1618 E1000_TDT : E1000_82542_TDT); 1619 break; 1620 } 1621 1622 /* Set the default values for the Tx Inter Packet Gap timer */ 1623 if ((hw->media_type == e1000_media_type_fiber || 1624 hw->media_type == e1000_media_type_internal_serdes)) 1625 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1626 else 1627 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 1628 1629 switch (hw->mac_type) { 1630 case e1000_82542_rev2_0: 1631 case e1000_82542_rev2_1: 1632 tipg = DEFAULT_82542_TIPG_IPGT; 1633 ipgr1 = DEFAULT_82542_TIPG_IPGR1; 1634 ipgr2 = DEFAULT_82542_TIPG_IPGR2; 1635 break; 1636 default: 1637 ipgr1 = DEFAULT_82543_TIPG_IPGR1; 1638 ipgr2 = DEFAULT_82543_TIPG_IPGR2; 1639 break; 1640 } 1641 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; 1642 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; 1643 ew32(TIPG, tipg); 1644 1645 /* Set the Tx Interrupt Delay register */ 1646 1647 ew32(TIDV, adapter->tx_int_delay); 1648 if (hw->mac_type >= e1000_82540) 1649 ew32(TADV, adapter->tx_abs_int_delay); 1650 1651 /* Program the Transmit Control Register */ 1652 1653 tctl = er32(TCTL); 1654 tctl &= ~E1000_TCTL_CT; 1655 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 1656 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1657 1658 e1000_config_collision_dist(hw); 1659 1660 /* Setup Transmit Descriptor Settings for eop descriptor */ 1661 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; 1662 1663 /* only set IDE if we are delaying interrupts using the timers */ 1664 if (adapter->tx_int_delay) 1665 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 1666 1667 if (hw->mac_type < e1000_82543) 1668 adapter->txd_cmd |= E1000_TXD_CMD_RPS; 1669 else 1670 adapter->txd_cmd |= E1000_TXD_CMD_RS; 1671 1672 /* Cache if we're 82544 running in PCI-X because we'll 1673 * need this to apply a workaround later in the send path. 1674 */ 1675 if (hw->mac_type == e1000_82544 && 1676 hw->bus_type == e1000_bus_type_pcix) 1677 adapter->pcix_82544 = true; 1678 1679 ew32(TCTL, tctl); 1680 1681 } 1682 1683 /** 1684 * e1000_setup_rx_resources - allocate Rx resources (Descriptors) 1685 * @adapter: board private structure 1686 * @rxdr: rx descriptor ring (for a specific queue) to setup 1687 * 1688 * Returns 0 on success, negative on failure 1689 **/ 1690 static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 1691 struct e1000_rx_ring *rxdr) 1692 { 1693 struct pci_dev *pdev = adapter->pdev; 1694 int size, desc_len; 1695 1696 size = sizeof(struct e1000_rx_buffer) * rxdr->count; 1697 rxdr->buffer_info = vzalloc(size); 1698 if (!rxdr->buffer_info) 1699 return -ENOMEM; 1700 1701 desc_len = sizeof(struct e1000_rx_desc); 1702 1703 /* Round up to nearest 4K */ 1704 1705 rxdr->size = rxdr->count * desc_len; 1706 rxdr->size = ALIGN(rxdr->size, 4096); 1707 1708 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1709 GFP_KERNEL); 1710 if (!rxdr->desc) { 1711 setup_rx_desc_die: 1712 vfree(rxdr->buffer_info); 1713 return -ENOMEM; 1714 } 1715 1716 /* Fix for errata 23, can't cross 64kB boundary */ 1717 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1718 void *olddesc = rxdr->desc; 1719 dma_addr_t olddma = rxdr->dma; 1720 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", 1721 rxdr->size, rxdr->desc); 1722 /* Try again, without freeing the previous */ 1723 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, 1724 &rxdr->dma, GFP_KERNEL); 1725 /* Failed allocation, critical failure */ 1726 if (!rxdr->desc) { 1727 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1728 olddma); 1729 goto setup_rx_desc_die; 1730 } 1731 1732 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1733 /* give up */ 1734 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, 1735 rxdr->dma); 1736 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1737 olddma); 1738 e_err(probe, "Unable to allocate aligned memory for " 1739 "the Rx descriptor ring\n"); 1740 goto setup_rx_desc_die; 1741 } else { 1742 /* Free old allocation, new allocation was successful */ 1743 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1744 olddma); 1745 } 1746 } 1747 memset(rxdr->desc, 0, rxdr->size); 1748 1749 rxdr->next_to_clean = 0; 1750 rxdr->next_to_use = 0; 1751 rxdr->rx_skb_top = NULL; 1752 1753 return 0; 1754 } 1755 1756 /** 1757 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources 1758 * (Descriptors) for all queues 1759 * @adapter: board private structure 1760 * 1761 * Return 0 on success, negative on failure 1762 **/ 1763 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) 1764 { 1765 int i, err = 0; 1766 1767 for (i = 0; i < adapter->num_rx_queues; i++) { 1768 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1769 if (err) { 1770 e_err(probe, "Allocation for Rx Queue %u failed\n", i); 1771 for (i-- ; i >= 0; i--) 1772 e1000_free_rx_resources(adapter, 1773 &adapter->rx_ring[i]); 1774 break; 1775 } 1776 } 1777 1778 return err; 1779 } 1780 1781 /** 1782 * e1000_setup_rctl - configure the receive control registers 1783 * @adapter: Board private structure 1784 **/ 1785 static void e1000_setup_rctl(struct e1000_adapter *adapter) 1786 { 1787 struct e1000_hw *hw = &adapter->hw; 1788 u32 rctl; 1789 1790 rctl = er32(RCTL); 1791 1792 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 1793 1794 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 1795 E1000_RCTL_RDMTS_HALF | 1796 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); 1797 1798 if (hw->tbi_compatibility_on == 1) 1799 rctl |= E1000_RCTL_SBP; 1800 else 1801 rctl &= ~E1000_RCTL_SBP; 1802 1803 if (adapter->netdev->mtu <= ETH_DATA_LEN) 1804 rctl &= ~E1000_RCTL_LPE; 1805 else 1806 rctl |= E1000_RCTL_LPE; 1807 1808 /* Setup buffer sizes */ 1809 rctl &= ~E1000_RCTL_SZ_4096; 1810 rctl |= E1000_RCTL_BSEX; 1811 switch (adapter->rx_buffer_len) { 1812 case E1000_RXBUFFER_2048: 1813 default: 1814 rctl |= E1000_RCTL_SZ_2048; 1815 rctl &= ~E1000_RCTL_BSEX; 1816 break; 1817 case E1000_RXBUFFER_4096: 1818 rctl |= E1000_RCTL_SZ_4096; 1819 break; 1820 case E1000_RXBUFFER_8192: 1821 rctl |= E1000_RCTL_SZ_8192; 1822 break; 1823 case E1000_RXBUFFER_16384: 1824 rctl |= E1000_RCTL_SZ_16384; 1825 break; 1826 } 1827 1828 /* This is useful for sniffing bad packets. */ 1829 if (adapter->netdev->features & NETIF_F_RXALL) { 1830 /* UPE and MPE will be handled by normal PROMISC logic 1831 * in e1000e_set_rx_mode 1832 */ 1833 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ 1834 E1000_RCTL_BAM | /* RX All Bcast Pkts */ 1835 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 1836 1837 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ 1838 E1000_RCTL_DPF | /* Allow filtered pause */ 1839 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ 1840 /* Do not mess with E1000_CTRL_VME, it affects transmit as well, 1841 * and that breaks VLANs. 1842 */ 1843 } 1844 1845 ew32(RCTL, rctl); 1846 } 1847 1848 /** 1849 * e1000_configure_rx - Configure 8254x Receive Unit after Reset 1850 * @adapter: board private structure 1851 * 1852 * Configure the Rx unit of the MAC after a reset. 1853 **/ 1854 static void e1000_configure_rx(struct e1000_adapter *adapter) 1855 { 1856 u64 rdba; 1857 struct e1000_hw *hw = &adapter->hw; 1858 u32 rdlen, rctl, rxcsum; 1859 1860 if (adapter->netdev->mtu > ETH_DATA_LEN) { 1861 rdlen = adapter->rx_ring[0].count * 1862 sizeof(struct e1000_rx_desc); 1863 adapter->clean_rx = e1000_clean_jumbo_rx_irq; 1864 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 1865 } else { 1866 rdlen = adapter->rx_ring[0].count * 1867 sizeof(struct e1000_rx_desc); 1868 adapter->clean_rx = e1000_clean_rx_irq; 1869 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 1870 } 1871 1872 /* disable receives while setting up the descriptors */ 1873 rctl = er32(RCTL); 1874 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1875 1876 /* set the Receive Delay Timer Register */ 1877 ew32(RDTR, adapter->rx_int_delay); 1878 1879 if (hw->mac_type >= e1000_82540) { 1880 ew32(RADV, adapter->rx_abs_int_delay); 1881 if (adapter->itr_setting != 0) 1882 ew32(ITR, 1000000000 / (adapter->itr * 256)); 1883 } 1884 1885 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1886 * the Base and Length of the Rx Descriptor Ring 1887 */ 1888 switch (adapter->num_rx_queues) { 1889 case 1: 1890 default: 1891 rdba = adapter->rx_ring[0].dma; 1892 ew32(RDLEN, rdlen); 1893 ew32(RDBAH, (rdba >> 32)); 1894 ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); 1895 ew32(RDT, 0); 1896 ew32(RDH, 0); 1897 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? 1898 E1000_RDH : E1000_82542_RDH); 1899 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? 1900 E1000_RDT : E1000_82542_RDT); 1901 break; 1902 } 1903 1904 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1905 if (hw->mac_type >= e1000_82543) { 1906 rxcsum = er32(RXCSUM); 1907 if (adapter->rx_csum) 1908 rxcsum |= E1000_RXCSUM_TUOFL; 1909 else 1910 /* don't need to clear IPPCSE as it defaults to 0 */ 1911 rxcsum &= ~E1000_RXCSUM_TUOFL; 1912 ew32(RXCSUM, rxcsum); 1913 } 1914 1915 /* Enable Receives */ 1916 ew32(RCTL, rctl | E1000_RCTL_EN); 1917 } 1918 1919 /** 1920 * e1000_free_tx_resources - Free Tx Resources per Queue 1921 * @adapter: board private structure 1922 * @tx_ring: Tx descriptor ring for a specific queue 1923 * 1924 * Free all transmit software resources 1925 **/ 1926 static void e1000_free_tx_resources(struct e1000_adapter *adapter, 1927 struct e1000_tx_ring *tx_ring) 1928 { 1929 struct pci_dev *pdev = adapter->pdev; 1930 1931 e1000_clean_tx_ring(adapter, tx_ring); 1932 1933 vfree(tx_ring->buffer_info); 1934 tx_ring->buffer_info = NULL; 1935 1936 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 1937 tx_ring->dma); 1938 1939 tx_ring->desc = NULL; 1940 } 1941 1942 /** 1943 * e1000_free_all_tx_resources - Free Tx Resources for All Queues 1944 * @adapter: board private structure 1945 * 1946 * Free all transmit software resources 1947 **/ 1948 void e1000_free_all_tx_resources(struct e1000_adapter *adapter) 1949 { 1950 int i; 1951 1952 for (i = 0; i < adapter->num_tx_queues; i++) 1953 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); 1954 } 1955 1956 static void 1957 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1958 struct e1000_tx_buffer *buffer_info) 1959 { 1960 if (buffer_info->dma) { 1961 if (buffer_info->mapped_as_page) 1962 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, 1963 buffer_info->length, DMA_TO_DEVICE); 1964 else 1965 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1966 buffer_info->length, 1967 DMA_TO_DEVICE); 1968 buffer_info->dma = 0; 1969 } 1970 if (buffer_info->skb) { 1971 dev_kfree_skb_any(buffer_info->skb); 1972 buffer_info->skb = NULL; 1973 } 1974 buffer_info->time_stamp = 0; 1975 /* buffer_info must be completely set up in the transmit path */ 1976 } 1977 1978 /** 1979 * e1000_clean_tx_ring - Free Tx Buffers 1980 * @adapter: board private structure 1981 * @tx_ring: ring to be cleaned 1982 **/ 1983 static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 1984 struct e1000_tx_ring *tx_ring) 1985 { 1986 struct e1000_hw *hw = &adapter->hw; 1987 struct e1000_tx_buffer *buffer_info; 1988 unsigned long size; 1989 unsigned int i; 1990 1991 /* Free all the Tx ring sk_buffs */ 1992 1993 for (i = 0; i < tx_ring->count; i++) { 1994 buffer_info = &tx_ring->buffer_info[i]; 1995 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 1996 } 1997 1998 netdev_reset_queue(adapter->netdev); 1999 size = sizeof(struct e1000_tx_buffer) * tx_ring->count; 2000 memset(tx_ring->buffer_info, 0, size); 2001 2002 /* Zero out the descriptor ring */ 2003 2004 memset(tx_ring->desc, 0, tx_ring->size); 2005 2006 tx_ring->next_to_use = 0; 2007 tx_ring->next_to_clean = 0; 2008 tx_ring->last_tx_tso = false; 2009 2010 writel(0, hw->hw_addr + tx_ring->tdh); 2011 writel(0, hw->hw_addr + tx_ring->tdt); 2012 } 2013 2014 /** 2015 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues 2016 * @adapter: board private structure 2017 **/ 2018 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) 2019 { 2020 int i; 2021 2022 for (i = 0; i < adapter->num_tx_queues; i++) 2023 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); 2024 } 2025 2026 /** 2027 * e1000_free_rx_resources - Free Rx Resources 2028 * @adapter: board private structure 2029 * @rx_ring: ring to clean the resources from 2030 * 2031 * Free all receive software resources 2032 **/ 2033 static void e1000_free_rx_resources(struct e1000_adapter *adapter, 2034 struct e1000_rx_ring *rx_ring) 2035 { 2036 struct pci_dev *pdev = adapter->pdev; 2037 2038 e1000_clean_rx_ring(adapter, rx_ring); 2039 2040 vfree(rx_ring->buffer_info); 2041 rx_ring->buffer_info = NULL; 2042 2043 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2044 rx_ring->dma); 2045 2046 rx_ring->desc = NULL; 2047 } 2048 2049 /** 2050 * e1000_free_all_rx_resources - Free Rx Resources for All Queues 2051 * @adapter: board private structure 2052 * 2053 * Free all receive software resources 2054 **/ 2055 void e1000_free_all_rx_resources(struct e1000_adapter *adapter) 2056 { 2057 int i; 2058 2059 for (i = 0; i < adapter->num_rx_queues; i++) 2060 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); 2061 } 2062 2063 #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) 2064 static unsigned int e1000_frag_len(const struct e1000_adapter *a) 2065 { 2066 return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + 2067 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2068 } 2069 2070 static void *e1000_alloc_frag(const struct e1000_adapter *a) 2071 { 2072 unsigned int len = e1000_frag_len(a); 2073 u8 *data = netdev_alloc_frag(len); 2074 2075 if (likely(data)) 2076 data += E1000_HEADROOM; 2077 return data; 2078 } 2079 2080 /** 2081 * e1000_clean_rx_ring - Free Rx Buffers per Queue 2082 * @adapter: board private structure 2083 * @rx_ring: ring to free buffers from 2084 **/ 2085 static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 2086 struct e1000_rx_ring *rx_ring) 2087 { 2088 struct e1000_hw *hw = &adapter->hw; 2089 struct e1000_rx_buffer *buffer_info; 2090 struct pci_dev *pdev = adapter->pdev; 2091 unsigned long size; 2092 unsigned int i; 2093 2094 /* Free all the Rx netfrags */ 2095 for (i = 0; i < rx_ring->count; i++) { 2096 buffer_info = &rx_ring->buffer_info[i]; 2097 if (adapter->clean_rx == e1000_clean_rx_irq) { 2098 if (buffer_info->dma) 2099 dma_unmap_single(&pdev->dev, buffer_info->dma, 2100 adapter->rx_buffer_len, 2101 DMA_FROM_DEVICE); 2102 if (buffer_info->rxbuf.data) { 2103 skb_free_frag(buffer_info->rxbuf.data); 2104 buffer_info->rxbuf.data = NULL; 2105 } 2106 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { 2107 if (buffer_info->dma) 2108 dma_unmap_page(&pdev->dev, buffer_info->dma, 2109 adapter->rx_buffer_len, 2110 DMA_FROM_DEVICE); 2111 if (buffer_info->rxbuf.page) { 2112 put_page(buffer_info->rxbuf.page); 2113 buffer_info->rxbuf.page = NULL; 2114 } 2115 } 2116 2117 buffer_info->dma = 0; 2118 } 2119 2120 /* there also may be some cached data from a chained receive */ 2121 napi_free_frags(&adapter->napi); 2122 rx_ring->rx_skb_top = NULL; 2123 2124 size = sizeof(struct e1000_rx_buffer) * rx_ring->count; 2125 memset(rx_ring->buffer_info, 0, size); 2126 2127 /* Zero out the descriptor ring */ 2128 memset(rx_ring->desc, 0, rx_ring->size); 2129 2130 rx_ring->next_to_clean = 0; 2131 rx_ring->next_to_use = 0; 2132 2133 writel(0, hw->hw_addr + rx_ring->rdh); 2134 writel(0, hw->hw_addr + rx_ring->rdt); 2135 } 2136 2137 /** 2138 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues 2139 * @adapter: board private structure 2140 **/ 2141 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) 2142 { 2143 int i; 2144 2145 for (i = 0; i < adapter->num_rx_queues; i++) 2146 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); 2147 } 2148 2149 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset 2150 * and memory write and invalidate disabled for certain operations 2151 */ 2152 static void e1000_enter_82542_rst(struct e1000_adapter *adapter) 2153 { 2154 struct e1000_hw *hw = &adapter->hw; 2155 struct net_device *netdev = adapter->netdev; 2156 u32 rctl; 2157 2158 e1000_pci_clear_mwi(hw); 2159 2160 rctl = er32(RCTL); 2161 rctl |= E1000_RCTL_RST; 2162 ew32(RCTL, rctl); 2163 E1000_WRITE_FLUSH(); 2164 mdelay(5); 2165 2166 if (netif_running(netdev)) 2167 e1000_clean_all_rx_rings(adapter); 2168 } 2169 2170 static void e1000_leave_82542_rst(struct e1000_adapter *adapter) 2171 { 2172 struct e1000_hw *hw = &adapter->hw; 2173 struct net_device *netdev = adapter->netdev; 2174 u32 rctl; 2175 2176 rctl = er32(RCTL); 2177 rctl &= ~E1000_RCTL_RST; 2178 ew32(RCTL, rctl); 2179 E1000_WRITE_FLUSH(); 2180 mdelay(5); 2181 2182 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) 2183 e1000_pci_set_mwi(hw); 2184 2185 if (netif_running(netdev)) { 2186 /* No need to loop, because 82542 supports only 1 queue */ 2187 struct e1000_rx_ring *ring = &adapter->rx_ring[0]; 2188 e1000_configure_rx(adapter); 2189 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); 2190 } 2191 } 2192 2193 /** 2194 * e1000_set_mac - Change the Ethernet Address of the NIC 2195 * @netdev: network interface device structure 2196 * @p: pointer to an address structure 2197 * 2198 * Returns 0 on success, negative on failure 2199 **/ 2200 static int e1000_set_mac(struct net_device *netdev, void *p) 2201 { 2202 struct e1000_adapter *adapter = netdev_priv(netdev); 2203 struct e1000_hw *hw = &adapter->hw; 2204 struct sockaddr *addr = p; 2205 2206 if (!is_valid_ether_addr(addr->sa_data)) 2207 return -EADDRNOTAVAIL; 2208 2209 /* 82542 2.0 needs to be in reset to write receive address registers */ 2210 2211 if (hw->mac_type == e1000_82542_rev2_0) 2212 e1000_enter_82542_rst(adapter); 2213 2214 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2215 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); 2216 2217 e1000_rar_set(hw, hw->mac_addr, 0); 2218 2219 if (hw->mac_type == e1000_82542_rev2_0) 2220 e1000_leave_82542_rst(adapter); 2221 2222 return 0; 2223 } 2224 2225 /** 2226 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 2227 * @netdev: network interface device structure 2228 * 2229 * The set_rx_mode entry point is called whenever the unicast or multicast 2230 * address lists or the network interface flags are updated. This routine is 2231 * responsible for configuring the hardware for proper unicast, multicast, 2232 * promiscuous mode, and all-multi behavior. 2233 **/ 2234 static void e1000_set_rx_mode(struct net_device *netdev) 2235 { 2236 struct e1000_adapter *adapter = netdev_priv(netdev); 2237 struct e1000_hw *hw = &adapter->hw; 2238 struct netdev_hw_addr *ha; 2239 bool use_uc = false; 2240 u32 rctl; 2241 u32 hash_value; 2242 int i, rar_entries = E1000_RAR_ENTRIES; 2243 int mta_reg_count = E1000_NUM_MTA_REGISTERS; 2244 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); 2245 2246 if (!mcarray) 2247 return; 2248 2249 /* Check for Promiscuous and All Multicast modes */ 2250 2251 rctl = er32(RCTL); 2252 2253 if (netdev->flags & IFF_PROMISC) { 2254 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2255 rctl &= ~E1000_RCTL_VFE; 2256 } else { 2257 if (netdev->flags & IFF_ALLMULTI) 2258 rctl |= E1000_RCTL_MPE; 2259 else 2260 rctl &= ~E1000_RCTL_MPE; 2261 /* Enable VLAN filter if there is a VLAN */ 2262 if (e1000_vlan_used(adapter)) 2263 rctl |= E1000_RCTL_VFE; 2264 } 2265 2266 if (netdev_uc_count(netdev) > rar_entries - 1) { 2267 rctl |= E1000_RCTL_UPE; 2268 } else if (!(netdev->flags & IFF_PROMISC)) { 2269 rctl &= ~E1000_RCTL_UPE; 2270 use_uc = true; 2271 } 2272 2273 ew32(RCTL, rctl); 2274 2275 /* 82542 2.0 needs to be in reset to write receive address registers */ 2276 2277 if (hw->mac_type == e1000_82542_rev2_0) 2278 e1000_enter_82542_rst(adapter); 2279 2280 /* load the first 14 addresses into the exact filters 1-14. Unicast 2281 * addresses take precedence to avoid disabling unicast filtering 2282 * when possible. 2283 * 2284 * RAR 0 is used for the station MAC address 2285 * if there are not 14 addresses, go ahead and clear the filters 2286 */ 2287 i = 1; 2288 if (use_uc) 2289 netdev_for_each_uc_addr(ha, netdev) { 2290 if (i == rar_entries) 2291 break; 2292 e1000_rar_set(hw, ha->addr, i++); 2293 } 2294 2295 netdev_for_each_mc_addr(ha, netdev) { 2296 if (i == rar_entries) { 2297 /* load any remaining addresses into the hash table */ 2298 u32 hash_reg, hash_bit, mta; 2299 hash_value = e1000_hash_mc_addr(hw, ha->addr); 2300 hash_reg = (hash_value >> 5) & 0x7F; 2301 hash_bit = hash_value & 0x1F; 2302 mta = (1 << hash_bit); 2303 mcarray[hash_reg] |= mta; 2304 } else { 2305 e1000_rar_set(hw, ha->addr, i++); 2306 } 2307 } 2308 2309 for (; i < rar_entries; i++) { 2310 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); 2311 E1000_WRITE_FLUSH(); 2312 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); 2313 E1000_WRITE_FLUSH(); 2314 } 2315 2316 /* write the hash table completely, write from bottom to avoid 2317 * both stupid write combining chipsets, and flushing each write 2318 */ 2319 for (i = mta_reg_count - 1; i >= 0 ; i--) { 2320 /* If we are on an 82544 has an errata where writing odd 2321 * offsets overwrites the previous even offset, but writing 2322 * backwards over the range solves the issue by always 2323 * writing the odd offset first 2324 */ 2325 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); 2326 } 2327 E1000_WRITE_FLUSH(); 2328 2329 if (hw->mac_type == e1000_82542_rev2_0) 2330 e1000_leave_82542_rst(adapter); 2331 2332 kfree(mcarray); 2333 } 2334 2335 /** 2336 * e1000_update_phy_info_task - get phy info 2337 * @work: work struct contained inside adapter struct 2338 * 2339 * Need to wait a few seconds after link up to get diagnostic information from 2340 * the phy 2341 */ 2342 static void e1000_update_phy_info_task(struct work_struct *work) 2343 { 2344 struct e1000_adapter *adapter = container_of(work, 2345 struct e1000_adapter, 2346 phy_info_task.work); 2347 2348 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 2349 } 2350 2351 /** 2352 * e1000_82547_tx_fifo_stall_task - task to complete work 2353 * @work: work struct contained inside adapter struct 2354 **/ 2355 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) 2356 { 2357 struct e1000_adapter *adapter = container_of(work, 2358 struct e1000_adapter, 2359 fifo_stall_task.work); 2360 struct e1000_hw *hw = &adapter->hw; 2361 struct net_device *netdev = adapter->netdev; 2362 u32 tctl; 2363 2364 if (atomic_read(&adapter->tx_fifo_stall)) { 2365 if ((er32(TDT) == er32(TDH)) && 2366 (er32(TDFT) == er32(TDFH)) && 2367 (er32(TDFTS) == er32(TDFHS))) { 2368 tctl = er32(TCTL); 2369 ew32(TCTL, tctl & ~E1000_TCTL_EN); 2370 ew32(TDFT, adapter->tx_head_addr); 2371 ew32(TDFH, adapter->tx_head_addr); 2372 ew32(TDFTS, adapter->tx_head_addr); 2373 ew32(TDFHS, adapter->tx_head_addr); 2374 ew32(TCTL, tctl); 2375 E1000_WRITE_FLUSH(); 2376 2377 adapter->tx_fifo_head = 0; 2378 atomic_set(&adapter->tx_fifo_stall, 0); 2379 netif_wake_queue(netdev); 2380 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { 2381 schedule_delayed_work(&adapter->fifo_stall_task, 1); 2382 } 2383 } 2384 } 2385 2386 bool e1000_has_link(struct e1000_adapter *adapter) 2387 { 2388 struct e1000_hw *hw = &adapter->hw; 2389 bool link_active = false; 2390 2391 /* get_link_status is set on LSC (link status) interrupt or rx 2392 * sequence error interrupt (except on intel ce4100). 2393 * get_link_status will stay false until the 2394 * e1000_check_for_link establishes link for copper adapters 2395 * ONLY 2396 */ 2397 switch (hw->media_type) { 2398 case e1000_media_type_copper: 2399 if (hw->mac_type == e1000_ce4100) 2400 hw->get_link_status = 1; 2401 if (hw->get_link_status) { 2402 e1000_check_for_link(hw); 2403 link_active = !hw->get_link_status; 2404 } else { 2405 link_active = true; 2406 } 2407 break; 2408 case e1000_media_type_fiber: 2409 e1000_check_for_link(hw); 2410 link_active = !!(er32(STATUS) & E1000_STATUS_LU); 2411 break; 2412 case e1000_media_type_internal_serdes: 2413 e1000_check_for_link(hw); 2414 link_active = hw->serdes_has_link; 2415 break; 2416 default: 2417 break; 2418 } 2419 2420 return link_active; 2421 } 2422 2423 /** 2424 * e1000_watchdog - work function 2425 * @work: work struct contained inside adapter struct 2426 **/ 2427 static void e1000_watchdog(struct work_struct *work) 2428 { 2429 struct e1000_adapter *adapter = container_of(work, 2430 struct e1000_adapter, 2431 watchdog_task.work); 2432 struct e1000_hw *hw = &adapter->hw; 2433 struct net_device *netdev = adapter->netdev; 2434 struct e1000_tx_ring *txdr = adapter->tx_ring; 2435 u32 link, tctl; 2436 2437 link = e1000_has_link(adapter); 2438 if ((netif_carrier_ok(netdev)) && link) 2439 goto link_up; 2440 2441 if (link) { 2442 if (!netif_carrier_ok(netdev)) { 2443 u32 ctrl; 2444 /* update snapshot of PHY registers on LSC */ 2445 e1000_get_speed_and_duplex(hw, 2446 &adapter->link_speed, 2447 &adapter->link_duplex); 2448 2449 ctrl = er32(CTRL); 2450 pr_info("%s NIC Link is Up %d Mbps %s, " 2451 "Flow Control: %s\n", 2452 netdev->name, 2453 adapter->link_speed, 2454 adapter->link_duplex == FULL_DUPLEX ? 2455 "Full Duplex" : "Half Duplex", 2456 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2457 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2458 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2459 E1000_CTRL_TFCE) ? "TX" : "None"))); 2460 2461 /* adjust timeout factor according to speed/duplex */ 2462 adapter->tx_timeout_factor = 1; 2463 switch (adapter->link_speed) { 2464 case SPEED_10: 2465 adapter->tx_timeout_factor = 16; 2466 break; 2467 case SPEED_100: 2468 /* maybe add some timeout factor ? */ 2469 break; 2470 } 2471 2472 /* enable transmits in the hardware */ 2473 tctl = er32(TCTL); 2474 tctl |= E1000_TCTL_EN; 2475 ew32(TCTL, tctl); 2476 2477 netif_carrier_on(netdev); 2478 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2479 schedule_delayed_work(&adapter->phy_info_task, 2480 2 * HZ); 2481 adapter->smartspeed = 0; 2482 } 2483 } else { 2484 if (netif_carrier_ok(netdev)) { 2485 adapter->link_speed = 0; 2486 adapter->link_duplex = 0; 2487 pr_info("%s NIC Link is Down\n", 2488 netdev->name); 2489 netif_carrier_off(netdev); 2490 2491 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2492 schedule_delayed_work(&adapter->phy_info_task, 2493 2 * HZ); 2494 } 2495 2496 e1000_smartspeed(adapter); 2497 } 2498 2499 link_up: 2500 e1000_update_stats(adapter); 2501 2502 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 2503 adapter->tpt_old = adapter->stats.tpt; 2504 hw->collision_delta = adapter->stats.colc - adapter->colc_old; 2505 adapter->colc_old = adapter->stats.colc; 2506 2507 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; 2508 adapter->gorcl_old = adapter->stats.gorcl; 2509 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; 2510 adapter->gotcl_old = adapter->stats.gotcl; 2511 2512 e1000_update_adaptive(hw); 2513 2514 if (!netif_carrier_ok(netdev)) { 2515 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2516 /* We've lost link, so the controller stops DMA, 2517 * but we've got queued Tx work that's never going 2518 * to get done, so reset controller to flush Tx. 2519 * (Do the reset outside of interrupt context). 2520 */ 2521 adapter->tx_timeout_count++; 2522 schedule_work(&adapter->reset_task); 2523 /* exit immediately since reset is imminent */ 2524 return; 2525 } 2526 } 2527 2528 /* Simple mode for Interrupt Throttle Rate (ITR) */ 2529 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { 2530 /* Symmetric Tx/Rx gets a reduced ITR=2000; 2531 * Total asymmetrical Tx or Rx gets ITR=8000; 2532 * everyone else is between 2000-8000. 2533 */ 2534 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; 2535 u32 dif = (adapter->gotcl > adapter->gorcl ? 2536 adapter->gotcl - adapter->gorcl : 2537 adapter->gorcl - adapter->gotcl) / 10000; 2538 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 2539 2540 ew32(ITR, 1000000000 / (itr * 256)); 2541 } 2542 2543 /* Cause software interrupt to ensure rx ring is cleaned */ 2544 ew32(ICS, E1000_ICS_RXDMT0); 2545 2546 /* Force detection of hung controller every watchdog period */ 2547 adapter->detect_tx_hung = true; 2548 2549 /* Reschedule the task */ 2550 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2551 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); 2552 } 2553 2554 enum latency_range { 2555 lowest_latency = 0, 2556 low_latency = 1, 2557 bulk_latency = 2, 2558 latency_invalid = 255 2559 }; 2560 2561 /** 2562 * e1000_update_itr - update the dynamic ITR value based on statistics 2563 * @adapter: pointer to adapter 2564 * @itr_setting: current adapter->itr 2565 * @packets: the number of packets during this measurement interval 2566 * @bytes: the number of bytes during this measurement interval 2567 * 2568 * Stores a new ITR value based on packets and byte 2569 * counts during the last interrupt. The advantage of per interrupt 2570 * computation is faster updates and more accurate ITR for the current 2571 * traffic pattern. Constants in this function were computed 2572 * based on theoretical maximum wire speed and thresholds were set based 2573 * on testing data as well as attempting to minimize response time 2574 * while increasing bulk throughput. 2575 * this functionality is controlled by the InterruptThrottleRate module 2576 * parameter (see e1000_param.c) 2577 **/ 2578 static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2579 u16 itr_setting, int packets, int bytes) 2580 { 2581 unsigned int retval = itr_setting; 2582 struct e1000_hw *hw = &adapter->hw; 2583 2584 if (unlikely(hw->mac_type < e1000_82540)) 2585 goto update_itr_done; 2586 2587 if (packets == 0) 2588 goto update_itr_done; 2589 2590 switch (itr_setting) { 2591 case lowest_latency: 2592 /* jumbo frames get bulk treatment*/ 2593 if (bytes/packets > 8000) 2594 retval = bulk_latency; 2595 else if ((packets < 5) && (bytes > 512)) 2596 retval = low_latency; 2597 break; 2598 case low_latency: /* 50 usec aka 20000 ints/s */ 2599 if (bytes > 10000) { 2600 /* jumbo frames need bulk latency setting */ 2601 if (bytes/packets > 8000) 2602 retval = bulk_latency; 2603 else if ((packets < 10) || ((bytes/packets) > 1200)) 2604 retval = bulk_latency; 2605 else if ((packets > 35)) 2606 retval = lowest_latency; 2607 } else if (bytes/packets > 2000) 2608 retval = bulk_latency; 2609 else if (packets <= 2 && bytes < 512) 2610 retval = lowest_latency; 2611 break; 2612 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2613 if (bytes > 25000) { 2614 if (packets > 35) 2615 retval = low_latency; 2616 } else if (bytes < 6000) { 2617 retval = low_latency; 2618 } 2619 break; 2620 } 2621 2622 update_itr_done: 2623 return retval; 2624 } 2625 2626 static void e1000_set_itr(struct e1000_adapter *adapter) 2627 { 2628 struct e1000_hw *hw = &adapter->hw; 2629 u16 current_itr; 2630 u32 new_itr = adapter->itr; 2631 2632 if (unlikely(hw->mac_type < e1000_82540)) 2633 return; 2634 2635 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2636 if (unlikely(adapter->link_speed != SPEED_1000)) { 2637 current_itr = 0; 2638 new_itr = 4000; 2639 goto set_itr_now; 2640 } 2641 2642 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, 2643 adapter->total_tx_packets, 2644 adapter->total_tx_bytes); 2645 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2646 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 2647 adapter->tx_itr = low_latency; 2648 2649 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, 2650 adapter->total_rx_packets, 2651 adapter->total_rx_bytes); 2652 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2653 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 2654 adapter->rx_itr = low_latency; 2655 2656 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2657 2658 switch (current_itr) { 2659 /* counts and packets in update_itr are dependent on these numbers */ 2660 case lowest_latency: 2661 new_itr = 70000; 2662 break; 2663 case low_latency: 2664 new_itr = 20000; /* aka hwitr = ~200 */ 2665 break; 2666 case bulk_latency: 2667 new_itr = 4000; 2668 break; 2669 default: 2670 break; 2671 } 2672 2673 set_itr_now: 2674 if (new_itr != adapter->itr) { 2675 /* this attempts to bias the interrupt rate towards Bulk 2676 * by adding intermediate steps when interrupt rate is 2677 * increasing 2678 */ 2679 new_itr = new_itr > adapter->itr ? 2680 min(adapter->itr + (new_itr >> 2), new_itr) : 2681 new_itr; 2682 adapter->itr = new_itr; 2683 ew32(ITR, 1000000000 / (new_itr * 256)); 2684 } 2685 } 2686 2687 #define E1000_TX_FLAGS_CSUM 0x00000001 2688 #define E1000_TX_FLAGS_VLAN 0x00000002 2689 #define E1000_TX_FLAGS_TSO 0x00000004 2690 #define E1000_TX_FLAGS_IPV4 0x00000008 2691 #define E1000_TX_FLAGS_NO_FCS 0x00000010 2692 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 2693 #define E1000_TX_FLAGS_VLAN_SHIFT 16 2694 2695 static int e1000_tso(struct e1000_adapter *adapter, 2696 struct e1000_tx_ring *tx_ring, struct sk_buff *skb, 2697 __be16 protocol) 2698 { 2699 struct e1000_context_desc *context_desc; 2700 struct e1000_tx_buffer *buffer_info; 2701 unsigned int i; 2702 u32 cmd_length = 0; 2703 u16 ipcse = 0, tucse, mss; 2704 u8 ipcss, ipcso, tucss, tucso, hdr_len; 2705 2706 if (skb_is_gso(skb)) { 2707 int err; 2708 2709 err = skb_cow_head(skb, 0); 2710 if (err < 0) 2711 return err; 2712 2713 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2714 mss = skb_shinfo(skb)->gso_size; 2715 if (protocol == htons(ETH_P_IP)) { 2716 struct iphdr *iph = ip_hdr(skb); 2717 iph->tot_len = 0; 2718 iph->check = 0; 2719 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2720 iph->daddr, 0, 2721 IPPROTO_TCP, 2722 0); 2723 cmd_length = E1000_TXD_CMD_IP; 2724 ipcse = skb_transport_offset(skb) - 1; 2725 } else if (skb_is_gso_v6(skb)) { 2726 tcp_v6_gso_csum_prep(skb); 2727 ipcse = 0; 2728 } 2729 ipcss = skb_network_offset(skb); 2730 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 2731 tucss = skb_transport_offset(skb); 2732 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 2733 tucse = 0; 2734 2735 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 2736 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 2737 2738 i = tx_ring->next_to_use; 2739 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2740 buffer_info = &tx_ring->buffer_info[i]; 2741 2742 context_desc->lower_setup.ip_fields.ipcss = ipcss; 2743 context_desc->lower_setup.ip_fields.ipcso = ipcso; 2744 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 2745 context_desc->upper_setup.tcp_fields.tucss = tucss; 2746 context_desc->upper_setup.tcp_fields.tucso = tucso; 2747 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); 2748 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 2749 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 2750 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2751 2752 buffer_info->time_stamp = jiffies; 2753 buffer_info->next_to_watch = i; 2754 2755 if (++i == tx_ring->count) 2756 i = 0; 2757 2758 tx_ring->next_to_use = i; 2759 2760 return true; 2761 } 2762 return false; 2763 } 2764 2765 static bool e1000_tx_csum(struct e1000_adapter *adapter, 2766 struct e1000_tx_ring *tx_ring, struct sk_buff *skb, 2767 __be16 protocol) 2768 { 2769 struct e1000_context_desc *context_desc; 2770 struct e1000_tx_buffer *buffer_info; 2771 unsigned int i; 2772 u8 css; 2773 u32 cmd_len = E1000_TXD_CMD_DEXT; 2774 2775 if (skb->ip_summed != CHECKSUM_PARTIAL) 2776 return false; 2777 2778 switch (protocol) { 2779 case cpu_to_be16(ETH_P_IP): 2780 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2781 cmd_len |= E1000_TXD_CMD_TCP; 2782 break; 2783 case cpu_to_be16(ETH_P_IPV6): 2784 /* XXX not handling all IPV6 headers */ 2785 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2786 cmd_len |= E1000_TXD_CMD_TCP; 2787 break; 2788 default: 2789 if (unlikely(net_ratelimit())) 2790 e_warn(drv, "checksum_partial proto=%x!\n", 2791 skb->protocol); 2792 break; 2793 } 2794 2795 css = skb_checksum_start_offset(skb); 2796 2797 i = tx_ring->next_to_use; 2798 buffer_info = &tx_ring->buffer_info[i]; 2799 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2800 2801 context_desc->lower_setup.ip_config = 0; 2802 context_desc->upper_setup.tcp_fields.tucss = css; 2803 context_desc->upper_setup.tcp_fields.tucso = 2804 css + skb->csum_offset; 2805 context_desc->upper_setup.tcp_fields.tucse = 0; 2806 context_desc->tcp_seg_setup.data = 0; 2807 context_desc->cmd_and_length = cpu_to_le32(cmd_len); 2808 2809 buffer_info->time_stamp = jiffies; 2810 buffer_info->next_to_watch = i; 2811 2812 if (unlikely(++i == tx_ring->count)) 2813 i = 0; 2814 2815 tx_ring->next_to_use = i; 2816 2817 return true; 2818 } 2819 2820 #define E1000_MAX_TXD_PWR 12 2821 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) 2822 2823 static int e1000_tx_map(struct e1000_adapter *adapter, 2824 struct e1000_tx_ring *tx_ring, 2825 struct sk_buff *skb, unsigned int first, 2826 unsigned int max_per_txd, unsigned int nr_frags, 2827 unsigned int mss) 2828 { 2829 struct e1000_hw *hw = &adapter->hw; 2830 struct pci_dev *pdev = adapter->pdev; 2831 struct e1000_tx_buffer *buffer_info; 2832 unsigned int len = skb_headlen(skb); 2833 unsigned int offset = 0, size, count = 0, i; 2834 unsigned int f, bytecount, segs; 2835 2836 i = tx_ring->next_to_use; 2837 2838 while (len) { 2839 buffer_info = &tx_ring->buffer_info[i]; 2840 size = min(len, max_per_txd); 2841 /* Workaround for Controller erratum -- 2842 * descriptor for non-tso packet in a linear SKB that follows a 2843 * tso gets written back prematurely before the data is fully 2844 * DMA'd to the controller 2845 */ 2846 if (!skb->data_len && tx_ring->last_tx_tso && 2847 !skb_is_gso(skb)) { 2848 tx_ring->last_tx_tso = false; 2849 size -= 4; 2850 } 2851 2852 /* Workaround for premature desc write-backs 2853 * in TSO mode. Append 4-byte sentinel desc 2854 */ 2855 if (unlikely(mss && !nr_frags && size == len && size > 8)) 2856 size -= 4; 2857 /* work-around for errata 10 and it applies 2858 * to all controllers in PCI-X mode 2859 * The fix is to make sure that the first descriptor of a 2860 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes 2861 */ 2862 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 2863 (size > 2015) && count == 0)) 2864 size = 2015; 2865 2866 /* Workaround for potential 82544 hang in PCI-X. Avoid 2867 * terminating buffers within evenly-aligned dwords. 2868 */ 2869 if (unlikely(adapter->pcix_82544 && 2870 !((unsigned long)(skb->data + offset + size - 1) & 4) && 2871 size > 4)) 2872 size -= 4; 2873 2874 buffer_info->length = size; 2875 /* set time_stamp *before* dma to help avoid a possible race */ 2876 buffer_info->time_stamp = jiffies; 2877 buffer_info->mapped_as_page = false; 2878 buffer_info->dma = dma_map_single(&pdev->dev, 2879 skb->data + offset, 2880 size, DMA_TO_DEVICE); 2881 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2882 goto dma_error; 2883 buffer_info->next_to_watch = i; 2884 2885 len -= size; 2886 offset += size; 2887 count++; 2888 if (len) { 2889 i++; 2890 if (unlikely(i == tx_ring->count)) 2891 i = 0; 2892 } 2893 } 2894 2895 for (f = 0; f < nr_frags; f++) { 2896 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 2897 2898 len = skb_frag_size(frag); 2899 offset = 0; 2900 2901 while (len) { 2902 unsigned long bufend; 2903 i++; 2904 if (unlikely(i == tx_ring->count)) 2905 i = 0; 2906 2907 buffer_info = &tx_ring->buffer_info[i]; 2908 size = min(len, max_per_txd); 2909 /* Workaround for premature desc write-backs 2910 * in TSO mode. Append 4-byte sentinel desc 2911 */ 2912 if (unlikely(mss && f == (nr_frags-1) && 2913 size == len && size > 8)) 2914 size -= 4; 2915 /* Workaround for potential 82544 hang in PCI-X. 2916 * Avoid terminating buffers within evenly-aligned 2917 * dwords. 2918 */ 2919 bufend = (unsigned long) 2920 page_to_phys(skb_frag_page(frag)); 2921 bufend += offset + size - 1; 2922 if (unlikely(adapter->pcix_82544 && 2923 !(bufend & 4) && 2924 size > 4)) 2925 size -= 4; 2926 2927 buffer_info->length = size; 2928 buffer_info->time_stamp = jiffies; 2929 buffer_info->mapped_as_page = true; 2930 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 2931 offset, size, DMA_TO_DEVICE); 2932 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2933 goto dma_error; 2934 buffer_info->next_to_watch = i; 2935 2936 len -= size; 2937 offset += size; 2938 count++; 2939 } 2940 } 2941 2942 segs = skb_shinfo(skb)->gso_segs ?: 1; 2943 /* multiply data chunks by size of headers */ 2944 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; 2945 2946 tx_ring->buffer_info[i].skb = skb; 2947 tx_ring->buffer_info[i].segs = segs; 2948 tx_ring->buffer_info[i].bytecount = bytecount; 2949 tx_ring->buffer_info[first].next_to_watch = i; 2950 2951 return count; 2952 2953 dma_error: 2954 dev_err(&pdev->dev, "TX DMA map failed\n"); 2955 buffer_info->dma = 0; 2956 if (count) 2957 count--; 2958 2959 while (count--) { 2960 if (i == 0) 2961 i += tx_ring->count; 2962 i--; 2963 buffer_info = &tx_ring->buffer_info[i]; 2964 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 2965 } 2966 2967 return 0; 2968 } 2969 2970 static void e1000_tx_queue(struct e1000_adapter *adapter, 2971 struct e1000_tx_ring *tx_ring, int tx_flags, 2972 int count) 2973 { 2974 struct e1000_tx_desc *tx_desc = NULL; 2975 struct e1000_tx_buffer *buffer_info; 2976 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2977 unsigned int i; 2978 2979 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2980 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2981 E1000_TXD_CMD_TSE; 2982 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2983 2984 if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) 2985 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2986 } 2987 2988 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 2989 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2990 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2991 } 2992 2993 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { 2994 txd_lower |= E1000_TXD_CMD_VLE; 2995 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 2996 } 2997 2998 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 2999 txd_lower &= ~(E1000_TXD_CMD_IFCS); 3000 3001 i = tx_ring->next_to_use; 3002 3003 while (count--) { 3004 buffer_info = &tx_ring->buffer_info[i]; 3005 tx_desc = E1000_TX_DESC(*tx_ring, i); 3006 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 3007 tx_desc->lower.data = 3008 cpu_to_le32(txd_lower | buffer_info->length); 3009 tx_desc->upper.data = cpu_to_le32(txd_upper); 3010 if (unlikely(++i == tx_ring->count)) 3011 i = 0; 3012 } 3013 3014 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 3015 3016 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ 3017 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 3018 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); 3019 3020 /* Force memory writes to complete before letting h/w 3021 * know there are new descriptors to fetch. (Only 3022 * applicable for weak-ordered memory model archs, 3023 * such as IA-64). 3024 */ 3025 dma_wmb(); 3026 3027 tx_ring->next_to_use = i; 3028 } 3029 3030 /* 82547 workaround to avoid controller hang in half-duplex environment. 3031 * The workaround is to avoid queuing a large packet that would span 3032 * the internal Tx FIFO ring boundary by notifying the stack to resend 3033 * the packet at a later time. This gives the Tx FIFO an opportunity to 3034 * flush all packets. When that occurs, we reset the Tx FIFO pointers 3035 * to the beginning of the Tx FIFO. 3036 */ 3037 3038 #define E1000_FIFO_HDR 0x10 3039 #define E1000_82547_PAD_LEN 0x3E0 3040 3041 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 3042 struct sk_buff *skb) 3043 { 3044 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 3045 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; 3046 3047 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); 3048 3049 if (adapter->link_duplex != HALF_DUPLEX) 3050 goto no_fifo_stall_required; 3051 3052 if (atomic_read(&adapter->tx_fifo_stall)) 3053 return 1; 3054 3055 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { 3056 atomic_set(&adapter->tx_fifo_stall, 1); 3057 return 1; 3058 } 3059 3060 no_fifo_stall_required: 3061 adapter->tx_fifo_head += skb_fifo_len; 3062 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) 3063 adapter->tx_fifo_head -= adapter->tx_fifo_size; 3064 return 0; 3065 } 3066 3067 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) 3068 { 3069 struct e1000_adapter *adapter = netdev_priv(netdev); 3070 struct e1000_tx_ring *tx_ring = adapter->tx_ring; 3071 3072 netif_stop_queue(netdev); 3073 /* Herbert's original patch had: 3074 * smp_mb__after_netif_stop_queue(); 3075 * but since that doesn't exist yet, just open code it. 3076 */ 3077 smp_mb(); 3078 3079 /* We need to check again in a case another CPU has just 3080 * made room available. 3081 */ 3082 if (likely(E1000_DESC_UNUSED(tx_ring) < size)) 3083 return -EBUSY; 3084 3085 /* A reprieve! */ 3086 netif_start_queue(netdev); 3087 ++adapter->restart_queue; 3088 return 0; 3089 } 3090 3091 static int e1000_maybe_stop_tx(struct net_device *netdev, 3092 struct e1000_tx_ring *tx_ring, int size) 3093 { 3094 if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) 3095 return 0; 3096 return __e1000_maybe_stop_tx(netdev, size); 3097 } 3098 3099 #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X)) 3100 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 3101 struct net_device *netdev) 3102 { 3103 struct e1000_adapter *adapter = netdev_priv(netdev); 3104 struct e1000_hw *hw = &adapter->hw; 3105 struct e1000_tx_ring *tx_ring; 3106 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; 3107 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 3108 unsigned int tx_flags = 0; 3109 unsigned int len = skb_headlen(skb); 3110 unsigned int nr_frags; 3111 unsigned int mss; 3112 int count = 0; 3113 int tso; 3114 unsigned int f; 3115 __be16 protocol = vlan_get_protocol(skb); 3116 3117 /* This goes back to the question of how to logically map a Tx queue 3118 * to a flow. Right now, performance is impacted slightly negatively 3119 * if using multiple Tx queues. If the stack breaks away from a 3120 * single qdisc implementation, we can look at this again. 3121 */ 3122 tx_ring = adapter->tx_ring; 3123 3124 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, 3125 * packets may get corrupted during padding by HW. 3126 * To WA this issue, pad all small packets manually. 3127 */ 3128 if (eth_skb_pad(skb)) 3129 return NETDEV_TX_OK; 3130 3131 mss = skb_shinfo(skb)->gso_size; 3132 /* The controller does a simple calculation to 3133 * make sure there is enough room in the FIFO before 3134 * initiating the DMA for each buffer. The calc is: 3135 * 4 = ceil(buffer len/mss). To make sure we don't 3136 * overrun the FIFO, adjust the max buffer len if mss 3137 * drops. 3138 */ 3139 if (mss) { 3140 u8 hdr_len; 3141 max_per_txd = min(mss << 2, max_per_txd); 3142 max_txd_pwr = fls(max_per_txd) - 1; 3143 3144 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3145 if (skb->data_len && hdr_len == len) { 3146 switch (hw->mac_type) { 3147 case e1000_82544: { 3148 unsigned int pull_size; 3149 3150 /* Make sure we have room to chop off 4 bytes, 3151 * and that the end alignment will work out to 3152 * this hardware's requirements 3153 * NOTE: this is a TSO only workaround 3154 * if end byte alignment not correct move us 3155 * into the next dword 3156 */ 3157 if ((unsigned long)(skb_tail_pointer(skb) - 1) 3158 & 4) 3159 break; 3160 /* fall through */ 3161 pull_size = min((unsigned int)4, skb->data_len); 3162 if (!__pskb_pull_tail(skb, pull_size)) { 3163 e_err(drv, "__pskb_pull_tail " 3164 "failed.\n"); 3165 dev_kfree_skb_any(skb); 3166 return NETDEV_TX_OK; 3167 } 3168 len = skb_headlen(skb); 3169 break; 3170 } 3171 default: 3172 /* do nothing */ 3173 break; 3174 } 3175 } 3176 } 3177 3178 /* reserve a descriptor for the offload context */ 3179 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 3180 count++; 3181 count++; 3182 3183 /* Controller Erratum workaround */ 3184 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) 3185 count++; 3186 3187 count += TXD_USE_COUNT(len, max_txd_pwr); 3188 3189 if (adapter->pcix_82544) 3190 count++; 3191 3192 /* work-around for errata 10 and it applies to all controllers 3193 * in PCI-X mode, so add one more descriptor to the count 3194 */ 3195 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 3196 (len > 2015))) 3197 count++; 3198 3199 nr_frags = skb_shinfo(skb)->nr_frags; 3200 for (f = 0; f < nr_frags; f++) 3201 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), 3202 max_txd_pwr); 3203 if (adapter->pcix_82544) 3204 count += nr_frags; 3205 3206 /* need: count + 2 desc gap to keep tail from touching 3207 * head, otherwise try next time 3208 */ 3209 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) 3210 return NETDEV_TX_BUSY; 3211 3212 if (unlikely((hw->mac_type == e1000_82547) && 3213 (e1000_82547_fifo_workaround(adapter, skb)))) { 3214 netif_stop_queue(netdev); 3215 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3216 schedule_delayed_work(&adapter->fifo_stall_task, 1); 3217 return NETDEV_TX_BUSY; 3218 } 3219 3220 if (skb_vlan_tag_present(skb)) { 3221 tx_flags |= E1000_TX_FLAGS_VLAN; 3222 tx_flags |= (skb_vlan_tag_get(skb) << 3223 E1000_TX_FLAGS_VLAN_SHIFT); 3224 } 3225 3226 first = tx_ring->next_to_use; 3227 3228 tso = e1000_tso(adapter, tx_ring, skb, protocol); 3229 if (tso < 0) { 3230 dev_kfree_skb_any(skb); 3231 return NETDEV_TX_OK; 3232 } 3233 3234 if (likely(tso)) { 3235 if (likely(hw->mac_type != e1000_82544)) 3236 tx_ring->last_tx_tso = true; 3237 tx_flags |= E1000_TX_FLAGS_TSO; 3238 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) 3239 tx_flags |= E1000_TX_FLAGS_CSUM; 3240 3241 if (protocol == htons(ETH_P_IP)) 3242 tx_flags |= E1000_TX_FLAGS_IPV4; 3243 3244 if (unlikely(skb->no_fcs)) 3245 tx_flags |= E1000_TX_FLAGS_NO_FCS; 3246 3247 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, 3248 nr_frags, mss); 3249 3250 if (count) { 3251 /* The descriptors needed is higher than other Intel drivers 3252 * due to a number of workarounds. The breakdown is below: 3253 * Data descriptors: MAX_SKB_FRAGS + 1 3254 * Context Descriptor: 1 3255 * Keep head from touching tail: 2 3256 * Workarounds: 3 3257 */ 3258 int desc_needed = MAX_SKB_FRAGS + 7; 3259 3260 netdev_sent_queue(netdev, skb->len); 3261 skb_tx_timestamp(skb); 3262 3263 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3264 3265 /* 82544 potentially requires twice as many data descriptors 3266 * in order to guarantee buffers don't end on evenly-aligned 3267 * dwords 3268 */ 3269 if (adapter->pcix_82544) 3270 desc_needed += MAX_SKB_FRAGS + 1; 3271 3272 /* Make sure there is space in the ring for the next send. */ 3273 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); 3274 3275 if (!netdev_xmit_more() || 3276 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { 3277 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); 3278 } 3279 } else { 3280 dev_kfree_skb_any(skb); 3281 tx_ring->buffer_info[first].time_stamp = 0; 3282 tx_ring->next_to_use = first; 3283 } 3284 3285 return NETDEV_TX_OK; 3286 } 3287 3288 #define NUM_REGS 38 /* 1 based count */ 3289 static void e1000_regdump(struct e1000_adapter *adapter) 3290 { 3291 struct e1000_hw *hw = &adapter->hw; 3292 u32 regs[NUM_REGS]; 3293 u32 *regs_buff = regs; 3294 int i = 0; 3295 3296 static const char * const reg_name[] = { 3297 "CTRL", "STATUS", 3298 "RCTL", "RDLEN", "RDH", "RDT", "RDTR", 3299 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT", 3300 "TIDV", "TXDCTL", "TADV", "TARC0", 3301 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1", 3302 "TXDCTL1", "TARC1", 3303 "CTRL_EXT", "ERT", "RDBAL", "RDBAH", 3304 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC", 3305 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC" 3306 }; 3307 3308 regs_buff[0] = er32(CTRL); 3309 regs_buff[1] = er32(STATUS); 3310 3311 regs_buff[2] = er32(RCTL); 3312 regs_buff[3] = er32(RDLEN); 3313 regs_buff[4] = er32(RDH); 3314 regs_buff[5] = er32(RDT); 3315 regs_buff[6] = er32(RDTR); 3316 3317 regs_buff[7] = er32(TCTL); 3318 regs_buff[8] = er32(TDBAL); 3319 regs_buff[9] = er32(TDBAH); 3320 regs_buff[10] = er32(TDLEN); 3321 regs_buff[11] = er32(TDH); 3322 regs_buff[12] = er32(TDT); 3323 regs_buff[13] = er32(TIDV); 3324 regs_buff[14] = er32(TXDCTL); 3325 regs_buff[15] = er32(TADV); 3326 regs_buff[16] = er32(TARC0); 3327 3328 regs_buff[17] = er32(TDBAL1); 3329 regs_buff[18] = er32(TDBAH1); 3330 regs_buff[19] = er32(TDLEN1); 3331 regs_buff[20] = er32(TDH1); 3332 regs_buff[21] = er32(TDT1); 3333 regs_buff[22] = er32(TXDCTL1); 3334 regs_buff[23] = er32(TARC1); 3335 regs_buff[24] = er32(CTRL_EXT); 3336 regs_buff[25] = er32(ERT); 3337 regs_buff[26] = er32(RDBAL0); 3338 regs_buff[27] = er32(RDBAH0); 3339 regs_buff[28] = er32(TDFH); 3340 regs_buff[29] = er32(TDFT); 3341 regs_buff[30] = er32(TDFHS); 3342 regs_buff[31] = er32(TDFTS); 3343 regs_buff[32] = er32(TDFPC); 3344 regs_buff[33] = er32(RDFH); 3345 regs_buff[34] = er32(RDFT); 3346 regs_buff[35] = er32(RDFHS); 3347 regs_buff[36] = er32(RDFTS); 3348 regs_buff[37] = er32(RDFPC); 3349 3350 pr_info("Register dump\n"); 3351 for (i = 0; i < NUM_REGS; i++) 3352 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]); 3353 } 3354 3355 /* 3356 * e1000_dump: Print registers, tx ring and rx ring 3357 */ 3358 static void e1000_dump(struct e1000_adapter *adapter) 3359 { 3360 /* this code doesn't handle multiple rings */ 3361 struct e1000_tx_ring *tx_ring = adapter->tx_ring; 3362 struct e1000_rx_ring *rx_ring = adapter->rx_ring; 3363 int i; 3364 3365 if (!netif_msg_hw(adapter)) 3366 return; 3367 3368 /* Print Registers */ 3369 e1000_regdump(adapter); 3370 3371 /* transmit dump */ 3372 pr_info("TX Desc ring0 dump\n"); 3373 3374 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) 3375 * 3376 * Legacy Transmit Descriptor 3377 * +--------------------------------------------------------------+ 3378 * 0 | Buffer Address [63:0] (Reserved on Write Back) | 3379 * +--------------------------------------------------------------+ 3380 * 8 | Special | CSS | Status | CMD | CSO | Length | 3381 * +--------------------------------------------------------------+ 3382 * 63 48 47 36 35 32 31 24 23 16 15 0 3383 * 3384 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload 3385 * 63 48 47 40 39 32 31 16 15 8 7 0 3386 * +----------------------------------------------------------------+ 3387 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | 3388 * +----------------------------------------------------------------+ 3389 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | 3390 * +----------------------------------------------------------------+ 3391 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 3392 * 3393 * Extended Data Descriptor (DTYP=0x1) 3394 * +----------------------------------------------------------------+ 3395 * 0 | Buffer Address [63:0] | 3396 * +----------------------------------------------------------------+ 3397 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | 3398 * +----------------------------------------------------------------+ 3399 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 3400 */ 3401 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n"); 3402 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n"); 3403 3404 if (!netif_msg_tx_done(adapter)) 3405 goto rx_ring_summary; 3406 3407 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 3408 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); 3409 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; 3410 struct my_u { __le64 a; __le64 b; }; 3411 struct my_u *u = (struct my_u *)tx_desc; 3412 const char *type; 3413 3414 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) 3415 type = "NTC/U"; 3416 else if (i == tx_ring->next_to_use) 3417 type = "NTU"; 3418 else if (i == tx_ring->next_to_clean) 3419 type = "NTC"; 3420 else 3421 type = ""; 3422 3423 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n", 3424 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, 3425 le64_to_cpu(u->a), le64_to_cpu(u->b), 3426 (u64)buffer_info->dma, buffer_info->length, 3427 buffer_info->next_to_watch, 3428 (u64)buffer_info->time_stamp, buffer_info->skb, type); 3429 } 3430 3431 rx_ring_summary: 3432 /* receive dump */ 3433 pr_info("\nRX Desc ring dump\n"); 3434 3435 /* Legacy Receive Descriptor Format 3436 * 3437 * +-----------------------------------------------------+ 3438 * | Buffer Address [63:0] | 3439 * +-----------------------------------------------------+ 3440 * | VLAN Tag | Errors | Status 0 | Packet csum | Length | 3441 * +-----------------------------------------------------+ 3442 * 63 48 47 40 39 32 31 16 15 0 3443 */ 3444 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n"); 3445 3446 if (!netif_msg_rx_status(adapter)) 3447 goto exit; 3448 3449 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { 3450 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); 3451 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; 3452 struct my_u { __le64 a; __le64 b; }; 3453 struct my_u *u = (struct my_u *)rx_desc; 3454 const char *type; 3455 3456 if (i == rx_ring->next_to_use) 3457 type = "NTU"; 3458 else if (i == rx_ring->next_to_clean) 3459 type = "NTC"; 3460 else 3461 type = ""; 3462 3463 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", 3464 i, le64_to_cpu(u->a), le64_to_cpu(u->b), 3465 (u64)buffer_info->dma, buffer_info->rxbuf.data, type); 3466 } /* for */ 3467 3468 /* dump the descriptor caches */ 3469 /* rx */ 3470 pr_info("Rx descriptor cache in 64bit format\n"); 3471 for (i = 0x6000; i <= 0x63FF ; i += 0x10) { 3472 pr_info("R%04X: %08X|%08X %08X|%08X\n", 3473 i, 3474 readl(adapter->hw.hw_addr + i+4), 3475 readl(adapter->hw.hw_addr + i), 3476 readl(adapter->hw.hw_addr + i+12), 3477 readl(adapter->hw.hw_addr + i+8)); 3478 } 3479 /* tx */ 3480 pr_info("Tx descriptor cache in 64bit format\n"); 3481 for (i = 0x7000; i <= 0x73FF ; i += 0x10) { 3482 pr_info("T%04X: %08X|%08X %08X|%08X\n", 3483 i, 3484 readl(adapter->hw.hw_addr + i+4), 3485 readl(adapter->hw.hw_addr + i), 3486 readl(adapter->hw.hw_addr + i+12), 3487 readl(adapter->hw.hw_addr + i+8)); 3488 } 3489 exit: 3490 return; 3491 } 3492 3493 /** 3494 * e1000_tx_timeout - Respond to a Tx Hang 3495 * @netdev: network interface device structure 3496 **/ 3497 static void e1000_tx_timeout(struct net_device *netdev, unsigned int txqueue) 3498 { 3499 struct e1000_adapter *adapter = netdev_priv(netdev); 3500 3501 /* Do the reset outside of interrupt context */ 3502 adapter->tx_timeout_count++; 3503 schedule_work(&adapter->reset_task); 3504 } 3505 3506 static void e1000_reset_task(struct work_struct *work) 3507 { 3508 struct e1000_adapter *adapter = 3509 container_of(work, struct e1000_adapter, reset_task); 3510 3511 e_err(drv, "Reset adapter\n"); 3512 e1000_reinit_locked(adapter); 3513 } 3514 3515 /** 3516 * e1000_change_mtu - Change the Maximum Transfer Unit 3517 * @netdev: network interface device structure 3518 * @new_mtu: new value for maximum frame size 3519 * 3520 * Returns 0 on success, negative on failure 3521 **/ 3522 static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 3523 { 3524 struct e1000_adapter *adapter = netdev_priv(netdev); 3525 struct e1000_hw *hw = &adapter->hw; 3526 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3527 3528 /* Adapter-specific max frame size limits. */ 3529 switch (hw->mac_type) { 3530 case e1000_undefined ... e1000_82542_rev2_1: 3531 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 3532 e_err(probe, "Jumbo Frames not supported.\n"); 3533 return -EINVAL; 3534 } 3535 break; 3536 default: 3537 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ 3538 break; 3539 } 3540 3541 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 3542 msleep(1); 3543 /* e1000_down has a dependency on max_frame_size */ 3544 hw->max_frame_size = max_frame; 3545 if (netif_running(netdev)) { 3546 /* prevent buffers from being reallocated */ 3547 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; 3548 e1000_down(adapter); 3549 } 3550 3551 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3552 * means we reserve 2 more, this pushes us to allocate from the next 3553 * larger slab size. 3554 * i.e. RXBUFFER_2048 --> size-4096 slab 3555 * however with the new *_jumbo_rx* routines, jumbo receives will use 3556 * fragmented skbs 3557 */ 3558 3559 if (max_frame <= E1000_RXBUFFER_2048) 3560 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3561 else 3562 #if (PAGE_SIZE >= E1000_RXBUFFER_16384) 3563 adapter->rx_buffer_len = E1000_RXBUFFER_16384; 3564 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096) 3565 adapter->rx_buffer_len = PAGE_SIZE; 3566 #endif 3567 3568 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3569 if (!hw->tbi_compatibility_on && 3570 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || 3571 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3572 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 3573 3574 netdev_dbg(netdev, "changing MTU from %d to %d\n", 3575 netdev->mtu, new_mtu); 3576 netdev->mtu = new_mtu; 3577 3578 if (netif_running(netdev)) 3579 e1000_up(adapter); 3580 else 3581 e1000_reset(adapter); 3582 3583 clear_bit(__E1000_RESETTING, &adapter->flags); 3584 3585 return 0; 3586 } 3587 3588 /** 3589 * e1000_update_stats - Update the board statistics counters 3590 * @adapter: board private structure 3591 **/ 3592 void e1000_update_stats(struct e1000_adapter *adapter) 3593 { 3594 struct net_device *netdev = adapter->netdev; 3595 struct e1000_hw *hw = &adapter->hw; 3596 struct pci_dev *pdev = adapter->pdev; 3597 unsigned long flags; 3598 u16 phy_tmp; 3599 3600 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3601 3602 /* Prevent stats update while adapter is being reset, or if the pci 3603 * connection is down. 3604 */ 3605 if (adapter->link_speed == 0) 3606 return; 3607 if (pci_channel_offline(pdev)) 3608 return; 3609 3610 spin_lock_irqsave(&adapter->stats_lock, flags); 3611 3612 /* these counters are modified from e1000_tbi_adjust_stats, 3613 * called from the interrupt context, so they must only 3614 * be written while holding adapter->stats_lock 3615 */ 3616 3617 adapter->stats.crcerrs += er32(CRCERRS); 3618 adapter->stats.gprc += er32(GPRC); 3619 adapter->stats.gorcl += er32(GORCL); 3620 adapter->stats.gorch += er32(GORCH); 3621 adapter->stats.bprc += er32(BPRC); 3622 adapter->stats.mprc += er32(MPRC); 3623 adapter->stats.roc += er32(ROC); 3624 3625 adapter->stats.prc64 += er32(PRC64); 3626 adapter->stats.prc127 += er32(PRC127); 3627 adapter->stats.prc255 += er32(PRC255); 3628 adapter->stats.prc511 += er32(PRC511); 3629 adapter->stats.prc1023 += er32(PRC1023); 3630 adapter->stats.prc1522 += er32(PRC1522); 3631 3632 adapter->stats.symerrs += er32(SYMERRS); 3633 adapter->stats.mpc += er32(MPC); 3634 adapter->stats.scc += er32(SCC); 3635 adapter->stats.ecol += er32(ECOL); 3636 adapter->stats.mcc += er32(MCC); 3637 adapter->stats.latecol += er32(LATECOL); 3638 adapter->stats.dc += er32(DC); 3639 adapter->stats.sec += er32(SEC); 3640 adapter->stats.rlec += er32(RLEC); 3641 adapter->stats.xonrxc += er32(XONRXC); 3642 adapter->stats.xontxc += er32(XONTXC); 3643 adapter->stats.xoffrxc += er32(XOFFRXC); 3644 adapter->stats.xofftxc += er32(XOFFTXC); 3645 adapter->stats.fcruc += er32(FCRUC); 3646 adapter->stats.gptc += er32(GPTC); 3647 adapter->stats.gotcl += er32(GOTCL); 3648 adapter->stats.gotch += er32(GOTCH); 3649 adapter->stats.rnbc += er32(RNBC); 3650 adapter->stats.ruc += er32(RUC); 3651 adapter->stats.rfc += er32(RFC); 3652 adapter->stats.rjc += er32(RJC); 3653 adapter->stats.torl += er32(TORL); 3654 adapter->stats.torh += er32(TORH); 3655 adapter->stats.totl += er32(TOTL); 3656 adapter->stats.toth += er32(TOTH); 3657 adapter->stats.tpr += er32(TPR); 3658 3659 adapter->stats.ptc64 += er32(PTC64); 3660 adapter->stats.ptc127 += er32(PTC127); 3661 adapter->stats.ptc255 += er32(PTC255); 3662 adapter->stats.ptc511 += er32(PTC511); 3663 adapter->stats.ptc1023 += er32(PTC1023); 3664 adapter->stats.ptc1522 += er32(PTC1522); 3665 3666 adapter->stats.mptc += er32(MPTC); 3667 adapter->stats.bptc += er32(BPTC); 3668 3669 /* used for adaptive IFS */ 3670 3671 hw->tx_packet_delta = er32(TPT); 3672 adapter->stats.tpt += hw->tx_packet_delta; 3673 hw->collision_delta = er32(COLC); 3674 adapter->stats.colc += hw->collision_delta; 3675 3676 if (hw->mac_type >= e1000_82543) { 3677 adapter->stats.algnerrc += er32(ALGNERRC); 3678 adapter->stats.rxerrc += er32(RXERRC); 3679 adapter->stats.tncrs += er32(TNCRS); 3680 adapter->stats.cexterr += er32(CEXTERR); 3681 adapter->stats.tsctc += er32(TSCTC); 3682 adapter->stats.tsctfc += er32(TSCTFC); 3683 } 3684 3685 /* Fill out the OS statistics structure */ 3686 netdev->stats.multicast = adapter->stats.mprc; 3687 netdev->stats.collisions = adapter->stats.colc; 3688 3689 /* Rx Errors */ 3690 3691 /* RLEC on some newer hardware can be incorrect so build 3692 * our own version based on RUC and ROC 3693 */ 3694 netdev->stats.rx_errors = adapter->stats.rxerrc + 3695 adapter->stats.crcerrs + adapter->stats.algnerrc + 3696 adapter->stats.ruc + adapter->stats.roc + 3697 adapter->stats.cexterr; 3698 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; 3699 netdev->stats.rx_length_errors = adapter->stats.rlerrc; 3700 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 3701 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 3702 netdev->stats.rx_missed_errors = adapter->stats.mpc; 3703 3704 /* Tx Errors */ 3705 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; 3706 netdev->stats.tx_errors = adapter->stats.txerrc; 3707 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 3708 netdev->stats.tx_window_errors = adapter->stats.latecol; 3709 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 3710 if (hw->bad_tx_carr_stats_fd && 3711 adapter->link_duplex == FULL_DUPLEX) { 3712 netdev->stats.tx_carrier_errors = 0; 3713 adapter->stats.tncrs = 0; 3714 } 3715 3716 /* Tx Dropped needs to be maintained elsewhere */ 3717 3718 /* Phy Stats */ 3719 if (hw->media_type == e1000_media_type_copper) { 3720 if ((adapter->link_speed == SPEED_1000) && 3721 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { 3722 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3723 adapter->phy_stats.idle_errors += phy_tmp; 3724 } 3725 3726 if ((hw->mac_type <= e1000_82546) && 3727 (hw->phy_type == e1000_phy_m88) && 3728 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) 3729 adapter->phy_stats.receive_errors += phy_tmp; 3730 } 3731 3732 /* Management Stats */ 3733 if (hw->has_smbus) { 3734 adapter->stats.mgptc += er32(MGTPTC); 3735 adapter->stats.mgprc += er32(MGTPRC); 3736 adapter->stats.mgpdc += er32(MGTPDC); 3737 } 3738 3739 spin_unlock_irqrestore(&adapter->stats_lock, flags); 3740 } 3741 3742 /** 3743 * e1000_intr - Interrupt Handler 3744 * @irq: interrupt number 3745 * @data: pointer to a network interface device structure 3746 **/ 3747 static irqreturn_t e1000_intr(int irq, void *data) 3748 { 3749 struct net_device *netdev = data; 3750 struct e1000_adapter *adapter = netdev_priv(netdev); 3751 struct e1000_hw *hw = &adapter->hw; 3752 u32 icr = er32(ICR); 3753 3754 if (unlikely((!icr))) 3755 return IRQ_NONE; /* Not our interrupt */ 3756 3757 /* we might have caused the interrupt, but the above 3758 * read cleared it, and just in case the driver is 3759 * down there is nothing to do so return handled 3760 */ 3761 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) 3762 return IRQ_HANDLED; 3763 3764 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3765 hw->get_link_status = 1; 3766 /* guard against interrupt when we're going down */ 3767 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3768 schedule_delayed_work(&adapter->watchdog_task, 1); 3769 } 3770 3771 /* disable interrupts, without the synchronize_irq bit */ 3772 ew32(IMC, ~0); 3773 E1000_WRITE_FLUSH(); 3774 3775 if (likely(napi_schedule_prep(&adapter->napi))) { 3776 adapter->total_tx_bytes = 0; 3777 adapter->total_tx_packets = 0; 3778 adapter->total_rx_bytes = 0; 3779 adapter->total_rx_packets = 0; 3780 __napi_schedule(&adapter->napi); 3781 } else { 3782 /* this really should not happen! if it does it is basically a 3783 * bug, but not a hard error, so enable ints and continue 3784 */ 3785 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3786 e1000_irq_enable(adapter); 3787 } 3788 3789 return IRQ_HANDLED; 3790 } 3791 3792 /** 3793 * e1000_clean - NAPI Rx polling callback 3794 * @adapter: board private structure 3795 **/ 3796 static int e1000_clean(struct napi_struct *napi, int budget) 3797 { 3798 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, 3799 napi); 3800 int tx_clean_complete = 0, work_done = 0; 3801 3802 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); 3803 3804 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); 3805 3806 if (!tx_clean_complete || work_done == budget) 3807 return budget; 3808 3809 /* Exit the polling mode, but don't re-enable interrupts if stack might 3810 * poll us due to busy-polling 3811 */ 3812 if (likely(napi_complete_done(napi, work_done))) { 3813 if (likely(adapter->itr_setting & 3)) 3814 e1000_set_itr(adapter); 3815 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3816 e1000_irq_enable(adapter); 3817 } 3818 3819 return work_done; 3820 } 3821 3822 /** 3823 * e1000_clean_tx_irq - Reclaim resources after transmit completes 3824 * @adapter: board private structure 3825 **/ 3826 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 3827 struct e1000_tx_ring *tx_ring) 3828 { 3829 struct e1000_hw *hw = &adapter->hw; 3830 struct net_device *netdev = adapter->netdev; 3831 struct e1000_tx_desc *tx_desc, *eop_desc; 3832 struct e1000_tx_buffer *buffer_info; 3833 unsigned int i, eop; 3834 unsigned int count = 0; 3835 unsigned int total_tx_bytes = 0, total_tx_packets = 0; 3836 unsigned int bytes_compl = 0, pkts_compl = 0; 3837 3838 i = tx_ring->next_to_clean; 3839 eop = tx_ring->buffer_info[i].next_to_watch; 3840 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3841 3842 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 3843 (count < tx_ring->count)) { 3844 bool cleaned = false; 3845 dma_rmb(); /* read buffer_info after eop_desc */ 3846 for ( ; !cleaned; count++) { 3847 tx_desc = E1000_TX_DESC(*tx_ring, i); 3848 buffer_info = &tx_ring->buffer_info[i]; 3849 cleaned = (i == eop); 3850 3851 if (cleaned) { 3852 total_tx_packets += buffer_info->segs; 3853 total_tx_bytes += buffer_info->bytecount; 3854 if (buffer_info->skb) { 3855 bytes_compl += buffer_info->skb->len; 3856 pkts_compl++; 3857 } 3858 3859 } 3860 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3861 tx_desc->upper.data = 0; 3862 3863 if (unlikely(++i == tx_ring->count)) 3864 i = 0; 3865 } 3866 3867 eop = tx_ring->buffer_info[i].next_to_watch; 3868 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3869 } 3870 3871 /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, 3872 * which will reuse the cleaned buffers. 3873 */ 3874 smp_store_release(&tx_ring->next_to_clean, i); 3875 3876 netdev_completed_queue(netdev, pkts_compl, bytes_compl); 3877 3878 #define TX_WAKE_THRESHOLD 32 3879 if (unlikely(count && netif_carrier_ok(netdev) && 3880 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { 3881 /* Make sure that anybody stopping the queue after this 3882 * sees the new next_to_clean. 3883 */ 3884 smp_mb(); 3885 3886 if (netif_queue_stopped(netdev) && 3887 !(test_bit(__E1000_DOWN, &adapter->flags))) { 3888 netif_wake_queue(netdev); 3889 ++adapter->restart_queue; 3890 } 3891 } 3892 3893 if (adapter->detect_tx_hung) { 3894 /* Detect a transmit hang in hardware, this serializes the 3895 * check with the clearing of time_stamp and movement of i 3896 */ 3897 adapter->detect_tx_hung = false; 3898 if (tx_ring->buffer_info[eop].time_stamp && 3899 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + 3900 (adapter->tx_timeout_factor * HZ)) && 3901 !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3902 3903 /* detected Tx unit hang */ 3904 e_err(drv, "Detected Tx Unit Hang\n" 3905 " Tx Queue <%lu>\n" 3906 " TDH <%x>\n" 3907 " TDT <%x>\n" 3908 " next_to_use <%x>\n" 3909 " next_to_clean <%x>\n" 3910 "buffer_info[next_to_clean]\n" 3911 " time_stamp <%lx>\n" 3912 " next_to_watch <%x>\n" 3913 " jiffies <%lx>\n" 3914 " next_to_watch.status <%x>\n", 3915 (unsigned long)(tx_ring - adapter->tx_ring), 3916 readl(hw->hw_addr + tx_ring->tdh), 3917 readl(hw->hw_addr + tx_ring->tdt), 3918 tx_ring->next_to_use, 3919 tx_ring->next_to_clean, 3920 tx_ring->buffer_info[eop].time_stamp, 3921 eop, 3922 jiffies, 3923 eop_desc->upper.fields.status); 3924 e1000_dump(adapter); 3925 netif_stop_queue(netdev); 3926 } 3927 } 3928 adapter->total_tx_bytes += total_tx_bytes; 3929 adapter->total_tx_packets += total_tx_packets; 3930 netdev->stats.tx_bytes += total_tx_bytes; 3931 netdev->stats.tx_packets += total_tx_packets; 3932 return count < tx_ring->count; 3933 } 3934 3935 /** 3936 * e1000_rx_checksum - Receive Checksum Offload for 82543 3937 * @adapter: board private structure 3938 * @status_err: receive descriptor status and error fields 3939 * @csum: receive descriptor csum field 3940 * @sk_buff: socket buffer with received data 3941 **/ 3942 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 3943 u32 csum, struct sk_buff *skb) 3944 { 3945 struct e1000_hw *hw = &adapter->hw; 3946 u16 status = (u16)status_err; 3947 u8 errors = (u8)(status_err >> 24); 3948 3949 skb_checksum_none_assert(skb); 3950 3951 /* 82543 or newer only */ 3952 if (unlikely(hw->mac_type < e1000_82543)) 3953 return; 3954 /* Ignore Checksum bit is set */ 3955 if (unlikely(status & E1000_RXD_STAT_IXSM)) 3956 return; 3957 /* TCP/UDP checksum error bit is set */ 3958 if (unlikely(errors & E1000_RXD_ERR_TCPE)) { 3959 /* let the stack verify checksum errors */ 3960 adapter->hw_csum_err++; 3961 return; 3962 } 3963 /* TCP/UDP Checksum has not been calculated */ 3964 if (!(status & E1000_RXD_STAT_TCPCS)) 3965 return; 3966 3967 /* It must be a TCP or UDP packet with a valid checksum */ 3968 if (likely(status & E1000_RXD_STAT_TCPCS)) { 3969 /* TCP checksum is good */ 3970 skb->ip_summed = CHECKSUM_UNNECESSARY; 3971 } 3972 adapter->hw_csum_good++; 3973 } 3974 3975 /** 3976 * e1000_consume_page - helper function for jumbo Rx path 3977 **/ 3978 static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, 3979 u16 length) 3980 { 3981 bi->rxbuf.page = NULL; 3982 skb->len += length; 3983 skb->data_len += length; 3984 skb->truesize += PAGE_SIZE; 3985 } 3986 3987 /** 3988 * e1000_receive_skb - helper function to handle rx indications 3989 * @adapter: board private structure 3990 * @status: descriptor status field as written by hardware 3991 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 3992 * @skb: pointer to sk_buff to be indicated to stack 3993 */ 3994 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, 3995 __le16 vlan, struct sk_buff *skb) 3996 { 3997 skb->protocol = eth_type_trans(skb, adapter->netdev); 3998 3999 if (status & E1000_RXD_STAT_VP) { 4000 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 4001 4002 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 4003 } 4004 napi_gro_receive(&adapter->napi, skb); 4005 } 4006 4007 /** 4008 * e1000_tbi_adjust_stats 4009 * @hw: Struct containing variables accessed by shared code 4010 * @frame_len: The length of the frame in question 4011 * @mac_addr: The Ethernet destination address of the frame in question 4012 * 4013 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT 4014 */ 4015 static void e1000_tbi_adjust_stats(struct e1000_hw *hw, 4016 struct e1000_hw_stats *stats, 4017 u32 frame_len, const u8 *mac_addr) 4018 { 4019 u64 carry_bit; 4020 4021 /* First adjust the frame length. */ 4022 frame_len--; 4023 /* We need to adjust the statistics counters, since the hardware 4024 * counters overcount this packet as a CRC error and undercount 4025 * the packet as a good packet 4026 */ 4027 /* This packet should not be counted as a CRC error. */ 4028 stats->crcerrs--; 4029 /* This packet does count as a Good Packet Received. */ 4030 stats->gprc++; 4031 4032 /* Adjust the Good Octets received counters */ 4033 carry_bit = 0x80000000 & stats->gorcl; 4034 stats->gorcl += frame_len; 4035 /* If the high bit of Gorcl (the low 32 bits of the Good Octets 4036 * Received Count) was one before the addition, 4037 * AND it is zero after, then we lost the carry out, 4038 * need to add one to Gorch (Good Octets Received Count High). 4039 * This could be simplified if all environments supported 4040 * 64-bit integers. 4041 */ 4042 if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) 4043 stats->gorch++; 4044 /* Is this a broadcast or multicast? Check broadcast first, 4045 * since the test for a multicast frame will test positive on 4046 * a broadcast frame. 4047 */ 4048 if (is_broadcast_ether_addr(mac_addr)) 4049 stats->bprc++; 4050 else if (is_multicast_ether_addr(mac_addr)) 4051 stats->mprc++; 4052 4053 if (frame_len == hw->max_frame_size) { 4054 /* In this case, the hardware has overcounted the number of 4055 * oversize frames. 4056 */ 4057 if (stats->roc > 0) 4058 stats->roc--; 4059 } 4060 4061 /* Adjust the bin counters when the extra byte put the frame in the 4062 * wrong bin. Remember that the frame_len was adjusted above. 4063 */ 4064 if (frame_len == 64) { 4065 stats->prc64++; 4066 stats->prc127--; 4067 } else if (frame_len == 127) { 4068 stats->prc127++; 4069 stats->prc255--; 4070 } else if (frame_len == 255) { 4071 stats->prc255++; 4072 stats->prc511--; 4073 } else if (frame_len == 511) { 4074 stats->prc511++; 4075 stats->prc1023--; 4076 } else if (frame_len == 1023) { 4077 stats->prc1023++; 4078 stats->prc1522--; 4079 } else if (frame_len == 1522) { 4080 stats->prc1522++; 4081 } 4082 } 4083 4084 static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, 4085 u8 status, u8 errors, 4086 u32 length, const u8 *data) 4087 { 4088 struct e1000_hw *hw = &adapter->hw; 4089 u8 last_byte = *(data + length - 1); 4090 4091 if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { 4092 unsigned long irq_flags; 4093 4094 spin_lock_irqsave(&adapter->stats_lock, irq_flags); 4095 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data); 4096 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); 4097 4098 return true; 4099 } 4100 4101 return false; 4102 } 4103 4104 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, 4105 unsigned int bufsz) 4106 { 4107 struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz); 4108 4109 if (unlikely(!skb)) 4110 adapter->alloc_rx_buff_failed++; 4111 return skb; 4112 } 4113 4114 /** 4115 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy 4116 * @adapter: board private structure 4117 * @rx_ring: ring to clean 4118 * @work_done: amount of napi work completed this call 4119 * @work_to_do: max amount of work allowed for this call to do 4120 * 4121 * the return value indicates whether actual cleaning was done, there 4122 * is no guarantee that everything was cleaned 4123 */ 4124 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 4125 struct e1000_rx_ring *rx_ring, 4126 int *work_done, int work_to_do) 4127 { 4128 struct net_device *netdev = adapter->netdev; 4129 struct pci_dev *pdev = adapter->pdev; 4130 struct e1000_rx_desc *rx_desc, *next_rxd; 4131 struct e1000_rx_buffer *buffer_info, *next_buffer; 4132 u32 length; 4133 unsigned int i; 4134 int cleaned_count = 0; 4135 bool cleaned = false; 4136 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 4137 4138 i = rx_ring->next_to_clean; 4139 rx_desc = E1000_RX_DESC(*rx_ring, i); 4140 buffer_info = &rx_ring->buffer_info[i]; 4141 4142 while (rx_desc->status & E1000_RXD_STAT_DD) { 4143 struct sk_buff *skb; 4144 u8 status; 4145 4146 if (*work_done >= work_to_do) 4147 break; 4148 (*work_done)++; 4149 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ 4150 4151 status = rx_desc->status; 4152 4153 if (++i == rx_ring->count) 4154 i = 0; 4155 4156 next_rxd = E1000_RX_DESC(*rx_ring, i); 4157 prefetch(next_rxd); 4158 4159 next_buffer = &rx_ring->buffer_info[i]; 4160 4161 cleaned = true; 4162 cleaned_count++; 4163 dma_unmap_page(&pdev->dev, buffer_info->dma, 4164 adapter->rx_buffer_len, DMA_FROM_DEVICE); 4165 buffer_info->dma = 0; 4166 4167 length = le16_to_cpu(rx_desc->length); 4168 4169 /* errors is only valid for DD + EOP descriptors */ 4170 if (unlikely((status & E1000_RXD_STAT_EOP) && 4171 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { 4172 u8 *mapped = page_address(buffer_info->rxbuf.page); 4173 4174 if (e1000_tbi_should_accept(adapter, status, 4175 rx_desc->errors, 4176 length, mapped)) { 4177 length--; 4178 } else if (netdev->features & NETIF_F_RXALL) { 4179 goto process_skb; 4180 } else { 4181 /* an error means any chain goes out the window 4182 * too 4183 */ 4184 dev_kfree_skb(rx_ring->rx_skb_top); 4185 rx_ring->rx_skb_top = NULL; 4186 goto next_desc; 4187 } 4188 } 4189 4190 #define rxtop rx_ring->rx_skb_top 4191 process_skb: 4192 if (!(status & E1000_RXD_STAT_EOP)) { 4193 /* this descriptor is only the beginning (or middle) */ 4194 if (!rxtop) { 4195 /* this is the beginning of a chain */ 4196 rxtop = napi_get_frags(&adapter->napi); 4197 if (!rxtop) 4198 break; 4199 4200 skb_fill_page_desc(rxtop, 0, 4201 buffer_info->rxbuf.page, 4202 0, length); 4203 } else { 4204 /* this is the middle of a chain */ 4205 skb_fill_page_desc(rxtop, 4206 skb_shinfo(rxtop)->nr_frags, 4207 buffer_info->rxbuf.page, 0, length); 4208 } 4209 e1000_consume_page(buffer_info, rxtop, length); 4210 goto next_desc; 4211 } else { 4212 if (rxtop) { 4213 /* end of the chain */ 4214 skb_fill_page_desc(rxtop, 4215 skb_shinfo(rxtop)->nr_frags, 4216 buffer_info->rxbuf.page, 0, length); 4217 skb = rxtop; 4218 rxtop = NULL; 4219 e1000_consume_page(buffer_info, skb, length); 4220 } else { 4221 struct page *p; 4222 /* no chain, got EOP, this buf is the packet 4223 * copybreak to save the put_page/alloc_page 4224 */ 4225 p = buffer_info->rxbuf.page; 4226 if (length <= copybreak) { 4227 u8 *vaddr; 4228 4229 if (likely(!(netdev->features & NETIF_F_RXFCS))) 4230 length -= 4; 4231 skb = e1000_alloc_rx_skb(adapter, 4232 length); 4233 if (!skb) 4234 break; 4235 4236 vaddr = kmap_atomic(p); 4237 memcpy(skb_tail_pointer(skb), vaddr, 4238 length); 4239 kunmap_atomic(vaddr); 4240 /* re-use the page, so don't erase 4241 * buffer_info->rxbuf.page 4242 */ 4243 skb_put(skb, length); 4244 e1000_rx_checksum(adapter, 4245 status | rx_desc->errors << 24, 4246 le16_to_cpu(rx_desc->csum), skb); 4247 4248 total_rx_bytes += skb->len; 4249 total_rx_packets++; 4250 4251 e1000_receive_skb(adapter, status, 4252 rx_desc->special, skb); 4253 goto next_desc; 4254 } else { 4255 skb = napi_get_frags(&adapter->napi); 4256 if (!skb) { 4257 adapter->alloc_rx_buff_failed++; 4258 break; 4259 } 4260 skb_fill_page_desc(skb, 0, p, 0, 4261 length); 4262 e1000_consume_page(buffer_info, skb, 4263 length); 4264 } 4265 } 4266 } 4267 4268 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 4269 e1000_rx_checksum(adapter, 4270 (u32)(status) | 4271 ((u32)(rx_desc->errors) << 24), 4272 le16_to_cpu(rx_desc->csum), skb); 4273 4274 total_rx_bytes += (skb->len - 4); /* don't count FCS */ 4275 if (likely(!(netdev->features & NETIF_F_RXFCS))) 4276 pskb_trim(skb, skb->len - 4); 4277 total_rx_packets++; 4278 4279 if (status & E1000_RXD_STAT_VP) { 4280 __le16 vlan = rx_desc->special; 4281 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 4282 4283 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 4284 } 4285 4286 napi_gro_frags(&adapter->napi); 4287 4288 next_desc: 4289 rx_desc->status = 0; 4290 4291 /* return some buffers to hardware, one at a time is too slow */ 4292 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 4293 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4294 cleaned_count = 0; 4295 } 4296 4297 /* use prefetched values */ 4298 rx_desc = next_rxd; 4299 buffer_info = next_buffer; 4300 } 4301 rx_ring->next_to_clean = i; 4302 4303 cleaned_count = E1000_DESC_UNUSED(rx_ring); 4304 if (cleaned_count) 4305 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4306 4307 adapter->total_rx_packets += total_rx_packets; 4308 adapter->total_rx_bytes += total_rx_bytes; 4309 netdev->stats.rx_bytes += total_rx_bytes; 4310 netdev->stats.rx_packets += total_rx_packets; 4311 return cleaned; 4312 } 4313 4314 /* this should improve performance for small packets with large amounts 4315 * of reassembly being done in the stack 4316 */ 4317 static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter, 4318 struct e1000_rx_buffer *buffer_info, 4319 u32 length, const void *data) 4320 { 4321 struct sk_buff *skb; 4322 4323 if (length > copybreak) 4324 return NULL; 4325 4326 skb = e1000_alloc_rx_skb(adapter, length); 4327 if (!skb) 4328 return NULL; 4329 4330 dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, 4331 length, DMA_FROM_DEVICE); 4332 4333 skb_put_data(skb, data, length); 4334 4335 return skb; 4336 } 4337 4338 /** 4339 * e1000_clean_rx_irq - Send received data up the network stack; legacy 4340 * @adapter: board private structure 4341 * @rx_ring: ring to clean 4342 * @work_done: amount of napi work completed this call 4343 * @work_to_do: max amount of work allowed for this call to do 4344 */ 4345 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 4346 struct e1000_rx_ring *rx_ring, 4347 int *work_done, int work_to_do) 4348 { 4349 struct net_device *netdev = adapter->netdev; 4350 struct pci_dev *pdev = adapter->pdev; 4351 struct e1000_rx_desc *rx_desc, *next_rxd; 4352 struct e1000_rx_buffer *buffer_info, *next_buffer; 4353 u32 length; 4354 unsigned int i; 4355 int cleaned_count = 0; 4356 bool cleaned = false; 4357 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 4358 4359 i = rx_ring->next_to_clean; 4360 rx_desc = E1000_RX_DESC(*rx_ring, i); 4361 buffer_info = &rx_ring->buffer_info[i]; 4362 4363 while (rx_desc->status & E1000_RXD_STAT_DD) { 4364 struct sk_buff *skb; 4365 u8 *data; 4366 u8 status; 4367 4368 if (*work_done >= work_to_do) 4369 break; 4370 (*work_done)++; 4371 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ 4372 4373 status = rx_desc->status; 4374 length = le16_to_cpu(rx_desc->length); 4375 4376 data = buffer_info->rxbuf.data; 4377 prefetch(data); 4378 skb = e1000_copybreak(adapter, buffer_info, length, data); 4379 if (!skb) { 4380 unsigned int frag_len = e1000_frag_len(adapter); 4381 4382 skb = build_skb(data - E1000_HEADROOM, frag_len); 4383 if (!skb) { 4384 adapter->alloc_rx_buff_failed++; 4385 break; 4386 } 4387 4388 skb_reserve(skb, E1000_HEADROOM); 4389 dma_unmap_single(&pdev->dev, buffer_info->dma, 4390 adapter->rx_buffer_len, 4391 DMA_FROM_DEVICE); 4392 buffer_info->dma = 0; 4393 buffer_info->rxbuf.data = NULL; 4394 } 4395 4396 if (++i == rx_ring->count) 4397 i = 0; 4398 4399 next_rxd = E1000_RX_DESC(*rx_ring, i); 4400 prefetch(next_rxd); 4401 4402 next_buffer = &rx_ring->buffer_info[i]; 4403 4404 cleaned = true; 4405 cleaned_count++; 4406 4407 /* !EOP means multiple descriptors were used to store a single 4408 * packet, if thats the case we need to toss it. In fact, we 4409 * to toss every packet with the EOP bit clear and the next 4410 * frame that _does_ have the EOP bit set, as it is by 4411 * definition only a frame fragment 4412 */ 4413 if (unlikely(!(status & E1000_RXD_STAT_EOP))) 4414 adapter->discarding = true; 4415 4416 if (adapter->discarding) { 4417 /* All receives must fit into a single buffer */ 4418 netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); 4419 dev_kfree_skb(skb); 4420 if (status & E1000_RXD_STAT_EOP) 4421 adapter->discarding = false; 4422 goto next_desc; 4423 } 4424 4425 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 4426 if (e1000_tbi_should_accept(adapter, status, 4427 rx_desc->errors, 4428 length, data)) { 4429 length--; 4430 } else if (netdev->features & NETIF_F_RXALL) { 4431 goto process_skb; 4432 } else { 4433 dev_kfree_skb(skb); 4434 goto next_desc; 4435 } 4436 } 4437 4438 process_skb: 4439 total_rx_bytes += (length - 4); /* don't count FCS */ 4440 total_rx_packets++; 4441 4442 if (likely(!(netdev->features & NETIF_F_RXFCS))) 4443 /* adjust length to remove Ethernet CRC, this must be 4444 * done after the TBI_ACCEPT workaround above 4445 */ 4446 length -= 4; 4447 4448 if (buffer_info->rxbuf.data == NULL) 4449 skb_put(skb, length); 4450 else /* copybreak skb */ 4451 skb_trim(skb, length); 4452 4453 /* Receive Checksum Offload */ 4454 e1000_rx_checksum(adapter, 4455 (u32)(status) | 4456 ((u32)(rx_desc->errors) << 24), 4457 le16_to_cpu(rx_desc->csum), skb); 4458 4459 e1000_receive_skb(adapter, status, rx_desc->special, skb); 4460 4461 next_desc: 4462 rx_desc->status = 0; 4463 4464 /* return some buffers to hardware, one at a time is too slow */ 4465 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 4466 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4467 cleaned_count = 0; 4468 } 4469 4470 /* use prefetched values */ 4471 rx_desc = next_rxd; 4472 buffer_info = next_buffer; 4473 } 4474 rx_ring->next_to_clean = i; 4475 4476 cleaned_count = E1000_DESC_UNUSED(rx_ring); 4477 if (cleaned_count) 4478 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4479 4480 adapter->total_rx_packets += total_rx_packets; 4481 adapter->total_rx_bytes += total_rx_bytes; 4482 netdev->stats.rx_bytes += total_rx_bytes; 4483 netdev->stats.rx_packets += total_rx_packets; 4484 return cleaned; 4485 } 4486 4487 /** 4488 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 4489 * @adapter: address of board private structure 4490 * @rx_ring: pointer to receive ring structure 4491 * @cleaned_count: number of buffers to allocate this pass 4492 **/ 4493 static void 4494 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 4495 struct e1000_rx_ring *rx_ring, int cleaned_count) 4496 { 4497 struct pci_dev *pdev = adapter->pdev; 4498 struct e1000_rx_desc *rx_desc; 4499 struct e1000_rx_buffer *buffer_info; 4500 unsigned int i; 4501 4502 i = rx_ring->next_to_use; 4503 buffer_info = &rx_ring->buffer_info[i]; 4504 4505 while (cleaned_count--) { 4506 /* allocate a new page if necessary */ 4507 if (!buffer_info->rxbuf.page) { 4508 buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC); 4509 if (unlikely(!buffer_info->rxbuf.page)) { 4510 adapter->alloc_rx_buff_failed++; 4511 break; 4512 } 4513 } 4514 4515 if (!buffer_info->dma) { 4516 buffer_info->dma = dma_map_page(&pdev->dev, 4517 buffer_info->rxbuf.page, 0, 4518 adapter->rx_buffer_len, 4519 DMA_FROM_DEVICE); 4520 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4521 put_page(buffer_info->rxbuf.page); 4522 buffer_info->rxbuf.page = NULL; 4523 buffer_info->dma = 0; 4524 adapter->alloc_rx_buff_failed++; 4525 break; 4526 } 4527 } 4528 4529 rx_desc = E1000_RX_DESC(*rx_ring, i); 4530 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4531 4532 if (unlikely(++i == rx_ring->count)) 4533 i = 0; 4534 buffer_info = &rx_ring->buffer_info[i]; 4535 } 4536 4537 if (likely(rx_ring->next_to_use != i)) { 4538 rx_ring->next_to_use = i; 4539 if (unlikely(i-- == 0)) 4540 i = (rx_ring->count - 1); 4541 4542 /* Force memory writes to complete before letting h/w 4543 * know there are new descriptors to fetch. (Only 4544 * applicable for weak-ordered memory model archs, 4545 * such as IA-64). 4546 */ 4547 dma_wmb(); 4548 writel(i, adapter->hw.hw_addr + rx_ring->rdt); 4549 } 4550 } 4551 4552 /** 4553 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 4554 * @adapter: address of board private structure 4555 **/ 4556 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 4557 struct e1000_rx_ring *rx_ring, 4558 int cleaned_count) 4559 { 4560 struct e1000_hw *hw = &adapter->hw; 4561 struct pci_dev *pdev = adapter->pdev; 4562 struct e1000_rx_desc *rx_desc; 4563 struct e1000_rx_buffer *buffer_info; 4564 unsigned int i; 4565 unsigned int bufsz = adapter->rx_buffer_len; 4566 4567 i = rx_ring->next_to_use; 4568 buffer_info = &rx_ring->buffer_info[i]; 4569 4570 while (cleaned_count--) { 4571 void *data; 4572 4573 if (buffer_info->rxbuf.data) 4574 goto skip; 4575 4576 data = e1000_alloc_frag(adapter); 4577 if (!data) { 4578 /* Better luck next round */ 4579 adapter->alloc_rx_buff_failed++; 4580 break; 4581 } 4582 4583 /* Fix for errata 23, can't cross 64kB boundary */ 4584 if (!e1000_check_64k_bound(adapter, data, bufsz)) { 4585 void *olddata = data; 4586 e_err(rx_err, "skb align check failed: %u bytes at " 4587 "%p\n", bufsz, data); 4588 /* Try again, without freeing the previous */ 4589 data = e1000_alloc_frag(adapter); 4590 /* Failed allocation, critical failure */ 4591 if (!data) { 4592 skb_free_frag(olddata); 4593 adapter->alloc_rx_buff_failed++; 4594 break; 4595 } 4596 4597 if (!e1000_check_64k_bound(adapter, data, bufsz)) { 4598 /* give up */ 4599 skb_free_frag(data); 4600 skb_free_frag(olddata); 4601 adapter->alloc_rx_buff_failed++; 4602 break; 4603 } 4604 4605 /* Use new allocation */ 4606 skb_free_frag(olddata); 4607 } 4608 buffer_info->dma = dma_map_single(&pdev->dev, 4609 data, 4610 adapter->rx_buffer_len, 4611 DMA_FROM_DEVICE); 4612 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4613 skb_free_frag(data); 4614 buffer_info->dma = 0; 4615 adapter->alloc_rx_buff_failed++; 4616 break; 4617 } 4618 4619 /* XXX if it was allocated cleanly it will never map to a 4620 * boundary crossing 4621 */ 4622 4623 /* Fix for errata 23, can't cross 64kB boundary */ 4624 if (!e1000_check_64k_bound(adapter, 4625 (void *)(unsigned long)buffer_info->dma, 4626 adapter->rx_buffer_len)) { 4627 e_err(rx_err, "dma align check failed: %u bytes at " 4628 "%p\n", adapter->rx_buffer_len, 4629 (void *)(unsigned long)buffer_info->dma); 4630 4631 dma_unmap_single(&pdev->dev, buffer_info->dma, 4632 adapter->rx_buffer_len, 4633 DMA_FROM_DEVICE); 4634 4635 skb_free_frag(data); 4636 buffer_info->rxbuf.data = NULL; 4637 buffer_info->dma = 0; 4638 4639 adapter->alloc_rx_buff_failed++; 4640 break; 4641 } 4642 buffer_info->rxbuf.data = data; 4643 skip: 4644 rx_desc = E1000_RX_DESC(*rx_ring, i); 4645 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4646 4647 if (unlikely(++i == rx_ring->count)) 4648 i = 0; 4649 buffer_info = &rx_ring->buffer_info[i]; 4650 } 4651 4652 if (likely(rx_ring->next_to_use != i)) { 4653 rx_ring->next_to_use = i; 4654 if (unlikely(i-- == 0)) 4655 i = (rx_ring->count - 1); 4656 4657 /* Force memory writes to complete before letting h/w 4658 * know there are new descriptors to fetch. (Only 4659 * applicable for weak-ordered memory model archs, 4660 * such as IA-64). 4661 */ 4662 dma_wmb(); 4663 writel(i, hw->hw_addr + rx_ring->rdt); 4664 } 4665 } 4666 4667 /** 4668 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. 4669 * @adapter: 4670 **/ 4671 static void e1000_smartspeed(struct e1000_adapter *adapter) 4672 { 4673 struct e1000_hw *hw = &adapter->hw; 4674 u16 phy_status; 4675 u16 phy_ctrl; 4676 4677 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || 4678 !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) 4679 return; 4680 4681 if (adapter->smartspeed == 0) { 4682 /* If Master/Slave config fault is asserted twice, 4683 * we assume back-to-back 4684 */ 4685 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4686 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 4687 return; 4688 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4689 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 4690 return; 4691 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4692 if (phy_ctrl & CR_1000T_MS_ENABLE) { 4693 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4694 e1000_write_phy_reg(hw, PHY_1000T_CTRL, 4695 phy_ctrl); 4696 adapter->smartspeed++; 4697 if (!e1000_phy_setup_autoneg(hw) && 4698 !e1000_read_phy_reg(hw, PHY_CTRL, 4699 &phy_ctrl)) { 4700 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4701 MII_CR_RESTART_AUTO_NEG); 4702 e1000_write_phy_reg(hw, PHY_CTRL, 4703 phy_ctrl); 4704 } 4705 } 4706 return; 4707 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4708 /* If still no link, perhaps using 2/3 pair cable */ 4709 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4710 phy_ctrl |= CR_1000T_MS_ENABLE; 4711 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); 4712 if (!e1000_phy_setup_autoneg(hw) && 4713 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { 4714 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4715 MII_CR_RESTART_AUTO_NEG); 4716 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); 4717 } 4718 } 4719 /* Restart process after E1000_SMARTSPEED_MAX iterations */ 4720 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4721 adapter->smartspeed = 0; 4722 } 4723 4724 /** 4725 * e1000_ioctl - 4726 * @netdev: 4727 * @ifreq: 4728 * @cmd: 4729 **/ 4730 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 4731 { 4732 switch (cmd) { 4733 case SIOCGMIIPHY: 4734 case SIOCGMIIREG: 4735 case SIOCSMIIREG: 4736 return e1000_mii_ioctl(netdev, ifr, cmd); 4737 default: 4738 return -EOPNOTSUPP; 4739 } 4740 } 4741 4742 /** 4743 * e1000_mii_ioctl - 4744 * @netdev: 4745 * @ifreq: 4746 * @cmd: 4747 **/ 4748 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 4749 int cmd) 4750 { 4751 struct e1000_adapter *adapter = netdev_priv(netdev); 4752 struct e1000_hw *hw = &adapter->hw; 4753 struct mii_ioctl_data *data = if_mii(ifr); 4754 int retval; 4755 u16 mii_reg; 4756 unsigned long flags; 4757 4758 if (hw->media_type != e1000_media_type_copper) 4759 return -EOPNOTSUPP; 4760 4761 switch (cmd) { 4762 case SIOCGMIIPHY: 4763 data->phy_id = hw->phy_addr; 4764 break; 4765 case SIOCGMIIREG: 4766 spin_lock_irqsave(&adapter->stats_lock, flags); 4767 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, 4768 &data->val_out)) { 4769 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4770 return -EIO; 4771 } 4772 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4773 break; 4774 case SIOCSMIIREG: 4775 if (data->reg_num & ~(0x1F)) 4776 return -EFAULT; 4777 mii_reg = data->val_in; 4778 spin_lock_irqsave(&adapter->stats_lock, flags); 4779 if (e1000_write_phy_reg(hw, data->reg_num, 4780 mii_reg)) { 4781 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4782 return -EIO; 4783 } 4784 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4785 if (hw->media_type == e1000_media_type_copper) { 4786 switch (data->reg_num) { 4787 case PHY_CTRL: 4788 if (mii_reg & MII_CR_POWER_DOWN) 4789 break; 4790 if (mii_reg & MII_CR_AUTO_NEG_EN) { 4791 hw->autoneg = 1; 4792 hw->autoneg_advertised = 0x2F; 4793 } else { 4794 u32 speed; 4795 if (mii_reg & 0x40) 4796 speed = SPEED_1000; 4797 else if (mii_reg & 0x2000) 4798 speed = SPEED_100; 4799 else 4800 speed = SPEED_10; 4801 retval = e1000_set_spd_dplx( 4802 adapter, speed, 4803 ((mii_reg & 0x100) 4804 ? DUPLEX_FULL : 4805 DUPLEX_HALF)); 4806 if (retval) 4807 return retval; 4808 } 4809 if (netif_running(adapter->netdev)) 4810 e1000_reinit_locked(adapter); 4811 else 4812 e1000_reset(adapter); 4813 break; 4814 case M88E1000_PHY_SPEC_CTRL: 4815 case M88E1000_EXT_PHY_SPEC_CTRL: 4816 if (e1000_phy_reset(hw)) 4817 return -EIO; 4818 break; 4819 } 4820 } else { 4821 switch (data->reg_num) { 4822 case PHY_CTRL: 4823 if (mii_reg & MII_CR_POWER_DOWN) 4824 break; 4825 if (netif_running(adapter->netdev)) 4826 e1000_reinit_locked(adapter); 4827 else 4828 e1000_reset(adapter); 4829 break; 4830 } 4831 } 4832 break; 4833 default: 4834 return -EOPNOTSUPP; 4835 } 4836 return E1000_SUCCESS; 4837 } 4838 4839 void e1000_pci_set_mwi(struct e1000_hw *hw) 4840 { 4841 struct e1000_adapter *adapter = hw->back; 4842 int ret_val = pci_set_mwi(adapter->pdev); 4843 4844 if (ret_val) 4845 e_err(probe, "Error in setting MWI\n"); 4846 } 4847 4848 void e1000_pci_clear_mwi(struct e1000_hw *hw) 4849 { 4850 struct e1000_adapter *adapter = hw->back; 4851 4852 pci_clear_mwi(adapter->pdev); 4853 } 4854 4855 int e1000_pcix_get_mmrbc(struct e1000_hw *hw) 4856 { 4857 struct e1000_adapter *adapter = hw->back; 4858 return pcix_get_mmrbc(adapter->pdev); 4859 } 4860 4861 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) 4862 { 4863 struct e1000_adapter *adapter = hw->back; 4864 pcix_set_mmrbc(adapter->pdev, mmrbc); 4865 } 4866 4867 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) 4868 { 4869 outl(value, port); 4870 } 4871 4872 static bool e1000_vlan_used(struct e1000_adapter *adapter) 4873 { 4874 u16 vid; 4875 4876 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 4877 return true; 4878 return false; 4879 } 4880 4881 static void __e1000_vlan_mode(struct e1000_adapter *adapter, 4882 netdev_features_t features) 4883 { 4884 struct e1000_hw *hw = &adapter->hw; 4885 u32 ctrl; 4886 4887 ctrl = er32(CTRL); 4888 if (features & NETIF_F_HW_VLAN_CTAG_RX) { 4889 /* enable VLAN tag insert/strip */ 4890 ctrl |= E1000_CTRL_VME; 4891 } else { 4892 /* disable VLAN tag insert/strip */ 4893 ctrl &= ~E1000_CTRL_VME; 4894 } 4895 ew32(CTRL, ctrl); 4896 } 4897 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, 4898 bool filter_on) 4899 { 4900 struct e1000_hw *hw = &adapter->hw; 4901 u32 rctl; 4902 4903 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4904 e1000_irq_disable(adapter); 4905 4906 __e1000_vlan_mode(adapter, adapter->netdev->features); 4907 if (filter_on) { 4908 /* enable VLAN receive filtering */ 4909 rctl = er32(RCTL); 4910 rctl &= ~E1000_RCTL_CFIEN; 4911 if (!(adapter->netdev->flags & IFF_PROMISC)) 4912 rctl |= E1000_RCTL_VFE; 4913 ew32(RCTL, rctl); 4914 e1000_update_mng_vlan(adapter); 4915 } else { 4916 /* disable VLAN receive filtering */ 4917 rctl = er32(RCTL); 4918 rctl &= ~E1000_RCTL_VFE; 4919 ew32(RCTL, rctl); 4920 } 4921 4922 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4923 e1000_irq_enable(adapter); 4924 } 4925 4926 static void e1000_vlan_mode(struct net_device *netdev, 4927 netdev_features_t features) 4928 { 4929 struct e1000_adapter *adapter = netdev_priv(netdev); 4930 4931 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4932 e1000_irq_disable(adapter); 4933 4934 __e1000_vlan_mode(adapter, features); 4935 4936 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4937 e1000_irq_enable(adapter); 4938 } 4939 4940 static int e1000_vlan_rx_add_vid(struct net_device *netdev, 4941 __be16 proto, u16 vid) 4942 { 4943 struct e1000_adapter *adapter = netdev_priv(netdev); 4944 struct e1000_hw *hw = &adapter->hw; 4945 u32 vfta, index; 4946 4947 if ((hw->mng_cookie.status & 4948 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4949 (vid == adapter->mng_vlan_id)) 4950 return 0; 4951 4952 if (!e1000_vlan_used(adapter)) 4953 e1000_vlan_filter_on_off(adapter, true); 4954 4955 /* add VID to filter table */ 4956 index = (vid >> 5) & 0x7F; 4957 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4958 vfta |= (1 << (vid & 0x1F)); 4959 e1000_write_vfta(hw, index, vfta); 4960 4961 set_bit(vid, adapter->active_vlans); 4962 4963 return 0; 4964 } 4965 4966 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, 4967 __be16 proto, u16 vid) 4968 { 4969 struct e1000_adapter *adapter = netdev_priv(netdev); 4970 struct e1000_hw *hw = &adapter->hw; 4971 u32 vfta, index; 4972 4973 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4974 e1000_irq_disable(adapter); 4975 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4976 e1000_irq_enable(adapter); 4977 4978 /* remove VID from filter table */ 4979 index = (vid >> 5) & 0x7F; 4980 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4981 vfta &= ~(1 << (vid & 0x1F)); 4982 e1000_write_vfta(hw, index, vfta); 4983 4984 clear_bit(vid, adapter->active_vlans); 4985 4986 if (!e1000_vlan_used(adapter)) 4987 e1000_vlan_filter_on_off(adapter, false); 4988 4989 return 0; 4990 } 4991 4992 static void e1000_restore_vlan(struct e1000_adapter *adapter) 4993 { 4994 u16 vid; 4995 4996 if (!e1000_vlan_used(adapter)) 4997 return; 4998 4999 e1000_vlan_filter_on_off(adapter, true); 5000 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 5001 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); 5002 } 5003 5004 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) 5005 { 5006 struct e1000_hw *hw = &adapter->hw; 5007 5008 hw->autoneg = 0; 5009 5010 /* Make sure dplx is at most 1 bit and lsb of speed is not set 5011 * for the switch() below to work 5012 */ 5013 if ((spd & 1) || (dplx & ~1)) 5014 goto err_inval; 5015 5016 /* Fiber NICs only allow 1000 gbps Full duplex */ 5017 if ((hw->media_type == e1000_media_type_fiber) && 5018 spd != SPEED_1000 && 5019 dplx != DUPLEX_FULL) 5020 goto err_inval; 5021 5022 switch (spd + dplx) { 5023 case SPEED_10 + DUPLEX_HALF: 5024 hw->forced_speed_duplex = e1000_10_half; 5025 break; 5026 case SPEED_10 + DUPLEX_FULL: 5027 hw->forced_speed_duplex = e1000_10_full; 5028 break; 5029 case SPEED_100 + DUPLEX_HALF: 5030 hw->forced_speed_duplex = e1000_100_half; 5031 break; 5032 case SPEED_100 + DUPLEX_FULL: 5033 hw->forced_speed_duplex = e1000_100_full; 5034 break; 5035 case SPEED_1000 + DUPLEX_FULL: 5036 hw->autoneg = 1; 5037 hw->autoneg_advertised = ADVERTISE_1000_FULL; 5038 break; 5039 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 5040 default: 5041 goto err_inval; 5042 } 5043 5044 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ 5045 hw->mdix = AUTO_ALL_MODES; 5046 5047 return 0; 5048 5049 err_inval: 5050 e_err(probe, "Unsupported Speed/Duplex configuration\n"); 5051 return -EINVAL; 5052 } 5053 5054 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) 5055 { 5056 struct net_device *netdev = pci_get_drvdata(pdev); 5057 struct e1000_adapter *adapter = netdev_priv(netdev); 5058 struct e1000_hw *hw = &adapter->hw; 5059 u32 ctrl, ctrl_ext, rctl, status; 5060 u32 wufc = adapter->wol; 5061 5062 netif_device_detach(netdev); 5063 5064 if (netif_running(netdev)) { 5065 int count = E1000_CHECK_RESET_COUNT; 5066 5067 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) 5068 usleep_range(10000, 20000); 5069 5070 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 5071 e1000_down(adapter); 5072 } 5073 5074 status = er32(STATUS); 5075 if (status & E1000_STATUS_LU) 5076 wufc &= ~E1000_WUFC_LNKC; 5077 5078 if (wufc) { 5079 e1000_setup_rctl(adapter); 5080 e1000_set_rx_mode(netdev); 5081 5082 rctl = er32(RCTL); 5083 5084 /* turn on all-multi mode if wake on multicast is enabled */ 5085 if (wufc & E1000_WUFC_MC) 5086 rctl |= E1000_RCTL_MPE; 5087 5088 /* enable receives in the hardware */ 5089 ew32(RCTL, rctl | E1000_RCTL_EN); 5090 5091 if (hw->mac_type >= e1000_82540) { 5092 ctrl = er32(CTRL); 5093 /* advertise wake from D3Cold */ 5094 #define E1000_CTRL_ADVD3WUC 0x00100000 5095 /* phy power management enable */ 5096 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 5097 ctrl |= E1000_CTRL_ADVD3WUC | 5098 E1000_CTRL_EN_PHY_PWR_MGMT; 5099 ew32(CTRL, ctrl); 5100 } 5101 5102 if (hw->media_type == e1000_media_type_fiber || 5103 hw->media_type == e1000_media_type_internal_serdes) { 5104 /* keep the laser running in D3 */ 5105 ctrl_ext = er32(CTRL_EXT); 5106 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; 5107 ew32(CTRL_EXT, ctrl_ext); 5108 } 5109 5110 ew32(WUC, E1000_WUC_PME_EN); 5111 ew32(WUFC, wufc); 5112 } else { 5113 ew32(WUC, 0); 5114 ew32(WUFC, 0); 5115 } 5116 5117 e1000_release_manageability(adapter); 5118 5119 *enable_wake = !!wufc; 5120 5121 /* make sure adapter isn't asleep if manageability is enabled */ 5122 if (adapter->en_mng_pt) 5123 *enable_wake = true; 5124 5125 if (netif_running(netdev)) 5126 e1000_free_irq(adapter); 5127 5128 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) 5129 pci_disable_device(pdev); 5130 5131 return 0; 5132 } 5133 5134 static int __maybe_unused e1000_suspend(struct device *dev) 5135 { 5136 int retval; 5137 struct pci_dev *pdev = to_pci_dev(dev); 5138 bool wake; 5139 5140 retval = __e1000_shutdown(pdev, &wake); 5141 device_set_wakeup_enable(dev, wake); 5142 5143 return retval; 5144 } 5145 5146 static int __maybe_unused e1000_resume(struct device *dev) 5147 { 5148 struct pci_dev *pdev = to_pci_dev(dev); 5149 struct net_device *netdev = pci_get_drvdata(pdev); 5150 struct e1000_adapter *adapter = netdev_priv(netdev); 5151 struct e1000_hw *hw = &adapter->hw; 5152 u32 err; 5153 5154 if (adapter->need_ioport) 5155 err = pci_enable_device(pdev); 5156 else 5157 err = pci_enable_device_mem(pdev); 5158 if (err) { 5159 pr_err("Cannot enable PCI device from suspend\n"); 5160 return err; 5161 } 5162 5163 /* flush memory to make sure state is correct */ 5164 smp_mb__before_atomic(); 5165 clear_bit(__E1000_DISABLED, &adapter->flags); 5166 pci_set_master(pdev); 5167 5168 pci_enable_wake(pdev, PCI_D3hot, 0); 5169 pci_enable_wake(pdev, PCI_D3cold, 0); 5170 5171 if (netif_running(netdev)) { 5172 err = e1000_request_irq(adapter); 5173 if (err) 5174 return err; 5175 } 5176 5177 e1000_power_up_phy(adapter); 5178 e1000_reset(adapter); 5179 ew32(WUS, ~0); 5180 5181 e1000_init_manageability(adapter); 5182 5183 if (netif_running(netdev)) 5184 e1000_up(adapter); 5185 5186 netif_device_attach(netdev); 5187 5188 return 0; 5189 } 5190 5191 static void e1000_shutdown(struct pci_dev *pdev) 5192 { 5193 bool wake; 5194 5195 __e1000_shutdown(pdev, &wake); 5196 5197 if (system_state == SYSTEM_POWER_OFF) { 5198 pci_wake_from_d3(pdev, wake); 5199 pci_set_power_state(pdev, PCI_D3hot); 5200 } 5201 } 5202 5203 #ifdef CONFIG_NET_POLL_CONTROLLER 5204 /* Polling 'interrupt' - used by things like netconsole to send skbs 5205 * without having to re-enable interrupts. It's not called while 5206 * the interrupt routine is executing. 5207 */ 5208 static void e1000_netpoll(struct net_device *netdev) 5209 { 5210 struct e1000_adapter *adapter = netdev_priv(netdev); 5211 5212 if (disable_hardirq(adapter->pdev->irq)) 5213 e1000_intr(adapter->pdev->irq, netdev); 5214 enable_irq(adapter->pdev->irq); 5215 } 5216 #endif 5217 5218 /** 5219 * e1000_io_error_detected - called when PCI error is detected 5220 * @pdev: Pointer to PCI device 5221 * @state: The current pci connection state 5222 * 5223 * This function is called after a PCI bus error affecting 5224 * this device has been detected. 5225 */ 5226 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 5227 pci_channel_state_t state) 5228 { 5229 struct net_device *netdev = pci_get_drvdata(pdev); 5230 struct e1000_adapter *adapter = netdev_priv(netdev); 5231 5232 netif_device_detach(netdev); 5233 5234 if (state == pci_channel_io_perm_failure) 5235 return PCI_ERS_RESULT_DISCONNECT; 5236 5237 if (netif_running(netdev)) 5238 e1000_down(adapter); 5239 5240 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) 5241 pci_disable_device(pdev); 5242 5243 /* Request a slot slot reset. */ 5244 return PCI_ERS_RESULT_NEED_RESET; 5245 } 5246 5247 /** 5248 * e1000_io_slot_reset - called after the pci bus has been reset. 5249 * @pdev: Pointer to PCI device 5250 * 5251 * Restart the card from scratch, as if from a cold-boot. Implementation 5252 * resembles the first-half of the e1000_resume routine. 5253 */ 5254 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 5255 { 5256 struct net_device *netdev = pci_get_drvdata(pdev); 5257 struct e1000_adapter *adapter = netdev_priv(netdev); 5258 struct e1000_hw *hw = &adapter->hw; 5259 int err; 5260 5261 if (adapter->need_ioport) 5262 err = pci_enable_device(pdev); 5263 else 5264 err = pci_enable_device_mem(pdev); 5265 if (err) { 5266 pr_err("Cannot re-enable PCI device after reset.\n"); 5267 return PCI_ERS_RESULT_DISCONNECT; 5268 } 5269 5270 /* flush memory to make sure state is correct */ 5271 smp_mb__before_atomic(); 5272 clear_bit(__E1000_DISABLED, &adapter->flags); 5273 pci_set_master(pdev); 5274 5275 pci_enable_wake(pdev, PCI_D3hot, 0); 5276 pci_enable_wake(pdev, PCI_D3cold, 0); 5277 5278 e1000_reset(adapter); 5279 ew32(WUS, ~0); 5280 5281 return PCI_ERS_RESULT_RECOVERED; 5282 } 5283 5284 /** 5285 * e1000_io_resume - called when traffic can start flowing again. 5286 * @pdev: Pointer to PCI device 5287 * 5288 * This callback is called when the error recovery driver tells us that 5289 * its OK to resume normal operation. Implementation resembles the 5290 * second-half of the e1000_resume routine. 5291 */ 5292 static void e1000_io_resume(struct pci_dev *pdev) 5293 { 5294 struct net_device *netdev = pci_get_drvdata(pdev); 5295 struct e1000_adapter *adapter = netdev_priv(netdev); 5296 5297 e1000_init_manageability(adapter); 5298 5299 if (netif_running(netdev)) { 5300 if (e1000_up(adapter)) { 5301 pr_info("can't bring device back up after reset\n"); 5302 return; 5303 } 5304 } 5305 5306 netif_device_attach(netdev); 5307 } 5308 5309 /* e1000_main.c */ 5310