1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 1999 - 2006 Intel Corporation. */ 3 4 #include "e1000.h" 5 #include <net/ip6_checksum.h> 6 #include <linux/io.h> 7 #include <linux/prefetch.h> 8 #include <linux/bitops.h> 9 #include <linux/if_vlan.h> 10 11 char e1000_driver_name[] = "e1000"; 12 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 13 #define DRV_VERSION "7.3.21-k8-NAPI" 14 const char e1000_driver_version[] = DRV_VERSION; 15 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 16 17 /* e1000_pci_tbl - PCI Device ID Table 18 * 19 * Last entry must be all 0s 20 * 21 * Macro expands to... 22 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} 23 */ 24 static const struct pci_device_id e1000_pci_tbl[] = { 25 INTEL_E1000_ETHERNET_DEVICE(0x1000), 26 INTEL_E1000_ETHERNET_DEVICE(0x1001), 27 INTEL_E1000_ETHERNET_DEVICE(0x1004), 28 INTEL_E1000_ETHERNET_DEVICE(0x1008), 29 INTEL_E1000_ETHERNET_DEVICE(0x1009), 30 INTEL_E1000_ETHERNET_DEVICE(0x100C), 31 INTEL_E1000_ETHERNET_DEVICE(0x100D), 32 INTEL_E1000_ETHERNET_DEVICE(0x100E), 33 INTEL_E1000_ETHERNET_DEVICE(0x100F), 34 INTEL_E1000_ETHERNET_DEVICE(0x1010), 35 INTEL_E1000_ETHERNET_DEVICE(0x1011), 36 INTEL_E1000_ETHERNET_DEVICE(0x1012), 37 INTEL_E1000_ETHERNET_DEVICE(0x1013), 38 INTEL_E1000_ETHERNET_DEVICE(0x1014), 39 INTEL_E1000_ETHERNET_DEVICE(0x1015), 40 INTEL_E1000_ETHERNET_DEVICE(0x1016), 41 INTEL_E1000_ETHERNET_DEVICE(0x1017), 42 INTEL_E1000_ETHERNET_DEVICE(0x1018), 43 INTEL_E1000_ETHERNET_DEVICE(0x1019), 44 INTEL_E1000_ETHERNET_DEVICE(0x101A), 45 INTEL_E1000_ETHERNET_DEVICE(0x101D), 46 INTEL_E1000_ETHERNET_DEVICE(0x101E), 47 INTEL_E1000_ETHERNET_DEVICE(0x1026), 48 INTEL_E1000_ETHERNET_DEVICE(0x1027), 49 INTEL_E1000_ETHERNET_DEVICE(0x1028), 50 INTEL_E1000_ETHERNET_DEVICE(0x1075), 51 INTEL_E1000_ETHERNET_DEVICE(0x1076), 52 INTEL_E1000_ETHERNET_DEVICE(0x1077), 53 INTEL_E1000_ETHERNET_DEVICE(0x1078), 54 INTEL_E1000_ETHERNET_DEVICE(0x1079), 55 INTEL_E1000_ETHERNET_DEVICE(0x107A), 56 INTEL_E1000_ETHERNET_DEVICE(0x107B), 57 INTEL_E1000_ETHERNET_DEVICE(0x107C), 58 INTEL_E1000_ETHERNET_DEVICE(0x108A), 59 INTEL_E1000_ETHERNET_DEVICE(0x1099), 60 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 61 INTEL_E1000_ETHERNET_DEVICE(0x2E6E), 62 /* required last entry */ 63 {0,} 64 }; 65 66 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 67 68 int e1000_up(struct e1000_adapter *adapter); 69 void e1000_down(struct e1000_adapter *adapter); 70 void e1000_reinit_locked(struct e1000_adapter *adapter); 71 void e1000_reset(struct e1000_adapter *adapter); 72 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 73 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 74 void e1000_free_all_tx_resources(struct e1000_adapter *adapter); 75 void e1000_free_all_rx_resources(struct e1000_adapter *adapter); 76 static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 77 struct e1000_tx_ring *txdr); 78 static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 79 struct e1000_rx_ring *rxdr); 80 static void e1000_free_tx_resources(struct e1000_adapter *adapter, 81 struct e1000_tx_ring *tx_ring); 82 static void e1000_free_rx_resources(struct e1000_adapter *adapter, 83 struct e1000_rx_ring *rx_ring); 84 void e1000_update_stats(struct e1000_adapter *adapter); 85 86 static int e1000_init_module(void); 87 static void e1000_exit_module(void); 88 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 89 static void e1000_remove(struct pci_dev *pdev); 90 static int e1000_alloc_queues(struct e1000_adapter *adapter); 91 static int e1000_sw_init(struct e1000_adapter *adapter); 92 int e1000_open(struct net_device *netdev); 93 int e1000_close(struct net_device *netdev); 94 static void e1000_configure_tx(struct e1000_adapter *adapter); 95 static void e1000_configure_rx(struct e1000_adapter *adapter); 96 static void e1000_setup_rctl(struct e1000_adapter *adapter); 97 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); 98 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); 99 static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 100 struct e1000_tx_ring *tx_ring); 101 static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 102 struct e1000_rx_ring *rx_ring); 103 static void e1000_set_rx_mode(struct net_device *netdev); 104 static void e1000_update_phy_info_task(struct work_struct *work); 105 static void e1000_watchdog(struct work_struct *work); 106 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); 107 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 108 struct net_device *netdev); 109 static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 110 static int e1000_set_mac(struct net_device *netdev, void *p); 111 static irqreturn_t e1000_intr(int irq, void *data); 112 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 113 struct e1000_tx_ring *tx_ring); 114 static int e1000_clean(struct napi_struct *napi, int budget); 115 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 116 struct e1000_rx_ring *rx_ring, 117 int *work_done, int work_to_do); 118 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 119 struct e1000_rx_ring *rx_ring, 120 int *work_done, int work_to_do); 121 static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, 122 struct e1000_rx_ring *rx_ring, 123 int cleaned_count) 124 { 125 } 126 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 127 struct e1000_rx_ring *rx_ring, 128 int cleaned_count); 129 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 130 struct e1000_rx_ring *rx_ring, 131 int cleaned_count); 132 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 133 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 134 int cmd); 135 static void e1000_enter_82542_rst(struct e1000_adapter *adapter); 136 static void e1000_leave_82542_rst(struct e1000_adapter *adapter); 137 static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue); 138 static void e1000_reset_task(struct work_struct *work); 139 static void e1000_smartspeed(struct e1000_adapter *adapter); 140 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 141 struct sk_buff *skb); 142 143 static bool e1000_vlan_used(struct e1000_adapter *adapter); 144 static void e1000_vlan_mode(struct net_device *netdev, 145 netdev_features_t features); 146 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, 147 bool filter_on); 148 static int e1000_vlan_rx_add_vid(struct net_device *netdev, 149 __be16 proto, u16 vid); 150 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, 151 __be16 proto, u16 vid); 152 static void e1000_restore_vlan(struct e1000_adapter *adapter); 153 154 #ifdef CONFIG_PM 155 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); 156 static int e1000_resume(struct pci_dev *pdev); 157 #endif 158 static void e1000_shutdown(struct pci_dev *pdev); 159 160 #ifdef CONFIG_NET_POLL_CONTROLLER 161 /* for netdump / net console */ 162 static void e1000_netpoll (struct net_device *netdev); 163 #endif 164 165 #define COPYBREAK_DEFAULT 256 166 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; 167 module_param(copybreak, uint, 0644); 168 MODULE_PARM_DESC(copybreak, 169 "Maximum size of packet that is copied to a new buffer on receive"); 170 171 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 172 pci_channel_state_t state); 173 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); 174 static void e1000_io_resume(struct pci_dev *pdev); 175 176 static const struct pci_error_handlers e1000_err_handler = { 177 .error_detected = e1000_io_error_detected, 178 .slot_reset = e1000_io_slot_reset, 179 .resume = e1000_io_resume, 180 }; 181 182 static struct pci_driver e1000_driver = { 183 .name = e1000_driver_name, 184 .id_table = e1000_pci_tbl, 185 .probe = e1000_probe, 186 .remove = e1000_remove, 187 #ifdef CONFIG_PM 188 /* Power Management Hooks */ 189 .suspend = e1000_suspend, 190 .resume = e1000_resume, 191 #endif 192 .shutdown = e1000_shutdown, 193 .err_handler = &e1000_err_handler 194 }; 195 196 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 197 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 198 MODULE_LICENSE("GPL v2"); 199 MODULE_VERSION(DRV_VERSION); 200 201 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 202 static int debug = -1; 203 module_param(debug, int, 0); 204 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 205 206 /** 207 * e1000_get_hw_dev - return device 208 * used by hardware layer to print debugging information 209 * 210 **/ 211 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) 212 { 213 struct e1000_adapter *adapter = hw->back; 214 return adapter->netdev; 215 } 216 217 /** 218 * e1000_init_module - Driver Registration Routine 219 * 220 * e1000_init_module is the first routine called when the driver is 221 * loaded. All it does is register with the PCI subsystem. 222 **/ 223 static int __init e1000_init_module(void) 224 { 225 int ret; 226 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version); 227 228 pr_info("%s\n", e1000_copyright); 229 230 ret = pci_register_driver(&e1000_driver); 231 if (copybreak != COPYBREAK_DEFAULT) { 232 if (copybreak == 0) 233 pr_info("copybreak disabled\n"); 234 else 235 pr_info("copybreak enabled for " 236 "packets <= %u bytes\n", copybreak); 237 } 238 return ret; 239 } 240 241 module_init(e1000_init_module); 242 243 /** 244 * e1000_exit_module - Driver Exit Cleanup Routine 245 * 246 * e1000_exit_module is called just before the driver is removed 247 * from memory. 248 **/ 249 static void __exit e1000_exit_module(void) 250 { 251 pci_unregister_driver(&e1000_driver); 252 } 253 254 module_exit(e1000_exit_module); 255 256 static int e1000_request_irq(struct e1000_adapter *adapter) 257 { 258 struct net_device *netdev = adapter->netdev; 259 irq_handler_t handler = e1000_intr; 260 int irq_flags = IRQF_SHARED; 261 int err; 262 263 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 264 netdev); 265 if (err) { 266 e_err(probe, "Unable to allocate interrupt Error: %d\n", err); 267 } 268 269 return err; 270 } 271 272 static void e1000_free_irq(struct e1000_adapter *adapter) 273 { 274 struct net_device *netdev = adapter->netdev; 275 276 free_irq(adapter->pdev->irq, netdev); 277 } 278 279 /** 280 * e1000_irq_disable - Mask off interrupt generation on the NIC 281 * @adapter: board private structure 282 **/ 283 static void e1000_irq_disable(struct e1000_adapter *adapter) 284 { 285 struct e1000_hw *hw = &adapter->hw; 286 287 ew32(IMC, ~0); 288 E1000_WRITE_FLUSH(); 289 synchronize_irq(adapter->pdev->irq); 290 } 291 292 /** 293 * e1000_irq_enable - Enable default interrupt generation settings 294 * @adapter: board private structure 295 **/ 296 static void e1000_irq_enable(struct e1000_adapter *adapter) 297 { 298 struct e1000_hw *hw = &adapter->hw; 299 300 ew32(IMS, IMS_ENABLE_MASK); 301 E1000_WRITE_FLUSH(); 302 } 303 304 static void e1000_update_mng_vlan(struct e1000_adapter *adapter) 305 { 306 struct e1000_hw *hw = &adapter->hw; 307 struct net_device *netdev = adapter->netdev; 308 u16 vid = hw->mng_cookie.vlan_id; 309 u16 old_vid = adapter->mng_vlan_id; 310 311 if (!e1000_vlan_used(adapter)) 312 return; 313 314 if (!test_bit(vid, adapter->active_vlans)) { 315 if (hw->mng_cookie.status & 316 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 317 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); 318 adapter->mng_vlan_id = vid; 319 } else { 320 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 321 } 322 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && 323 (vid != old_vid) && 324 !test_bit(old_vid, adapter->active_vlans)) 325 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), 326 old_vid); 327 } else { 328 adapter->mng_vlan_id = vid; 329 } 330 } 331 332 static void e1000_init_manageability(struct e1000_adapter *adapter) 333 { 334 struct e1000_hw *hw = &adapter->hw; 335 336 if (adapter->en_mng_pt) { 337 u32 manc = er32(MANC); 338 339 /* disable hardware interception of ARP */ 340 manc &= ~(E1000_MANC_ARP_EN); 341 342 ew32(MANC, manc); 343 } 344 } 345 346 static void e1000_release_manageability(struct e1000_adapter *adapter) 347 { 348 struct e1000_hw *hw = &adapter->hw; 349 350 if (adapter->en_mng_pt) { 351 u32 manc = er32(MANC); 352 353 /* re-enable hardware interception of ARP */ 354 manc |= E1000_MANC_ARP_EN; 355 356 ew32(MANC, manc); 357 } 358 } 359 360 /** 361 * e1000_configure - configure the hardware for RX and TX 362 * @adapter = private board structure 363 **/ 364 static void e1000_configure(struct e1000_adapter *adapter) 365 { 366 struct net_device *netdev = adapter->netdev; 367 int i; 368 369 e1000_set_rx_mode(netdev); 370 371 e1000_restore_vlan(adapter); 372 e1000_init_manageability(adapter); 373 374 e1000_configure_tx(adapter); 375 e1000_setup_rctl(adapter); 376 e1000_configure_rx(adapter); 377 /* call E1000_DESC_UNUSED which always leaves 378 * at least 1 descriptor unused to make sure 379 * next_to_use != next_to_clean 380 */ 381 for (i = 0; i < adapter->num_rx_queues; i++) { 382 struct e1000_rx_ring *ring = &adapter->rx_ring[i]; 383 adapter->alloc_rx_buf(adapter, ring, 384 E1000_DESC_UNUSED(ring)); 385 } 386 } 387 388 int e1000_up(struct e1000_adapter *adapter) 389 { 390 struct e1000_hw *hw = &adapter->hw; 391 392 /* hardware has been reset, we need to reload some things */ 393 e1000_configure(adapter); 394 395 clear_bit(__E1000_DOWN, &adapter->flags); 396 397 napi_enable(&adapter->napi); 398 399 e1000_irq_enable(adapter); 400 401 netif_wake_queue(adapter->netdev); 402 403 /* fire a link change interrupt to start the watchdog */ 404 ew32(ICS, E1000_ICS_LSC); 405 return 0; 406 } 407 408 /** 409 * e1000_power_up_phy - restore link in case the phy was powered down 410 * @adapter: address of board private structure 411 * 412 * The phy may be powered down to save power and turn off link when the 413 * driver is unloaded and wake on lan is not enabled (among others) 414 * *** this routine MUST be followed by a call to e1000_reset *** 415 **/ 416 void e1000_power_up_phy(struct e1000_adapter *adapter) 417 { 418 struct e1000_hw *hw = &adapter->hw; 419 u16 mii_reg = 0; 420 421 /* Just clear the power down bit to wake the phy back up */ 422 if (hw->media_type == e1000_media_type_copper) { 423 /* according to the manual, the phy will retain its 424 * settings across a power-down/up cycle 425 */ 426 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 427 mii_reg &= ~MII_CR_POWER_DOWN; 428 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 429 } 430 } 431 432 static void e1000_power_down_phy(struct e1000_adapter *adapter) 433 { 434 struct e1000_hw *hw = &adapter->hw; 435 436 /* Power down the PHY so no link is implied when interface is down * 437 * The PHY cannot be powered down if any of the following is true * 438 * (a) WoL is enabled 439 * (b) AMT is active 440 * (c) SoL/IDER session is active 441 */ 442 if (!adapter->wol && hw->mac_type >= e1000_82540 && 443 hw->media_type == e1000_media_type_copper) { 444 u16 mii_reg = 0; 445 446 switch (hw->mac_type) { 447 case e1000_82540: 448 case e1000_82545: 449 case e1000_82545_rev_3: 450 case e1000_82546: 451 case e1000_ce4100: 452 case e1000_82546_rev_3: 453 case e1000_82541: 454 case e1000_82541_rev_2: 455 case e1000_82547: 456 case e1000_82547_rev_2: 457 if (er32(MANC) & E1000_MANC_SMBUS_EN) 458 goto out; 459 break; 460 default: 461 goto out; 462 } 463 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 464 mii_reg |= MII_CR_POWER_DOWN; 465 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 466 msleep(1); 467 } 468 out: 469 return; 470 } 471 472 static void e1000_down_and_stop(struct e1000_adapter *adapter) 473 { 474 set_bit(__E1000_DOWN, &adapter->flags); 475 476 cancel_delayed_work_sync(&adapter->watchdog_task); 477 478 /* 479 * Since the watchdog task can reschedule other tasks, we should cancel 480 * it first, otherwise we can run into the situation when a work is 481 * still running after the adapter has been turned down. 482 */ 483 484 cancel_delayed_work_sync(&adapter->phy_info_task); 485 cancel_delayed_work_sync(&adapter->fifo_stall_task); 486 487 /* Only kill reset task if adapter is not resetting */ 488 if (!test_bit(__E1000_RESETTING, &adapter->flags)) 489 cancel_work_sync(&adapter->reset_task); 490 } 491 492 void e1000_down(struct e1000_adapter *adapter) 493 { 494 struct e1000_hw *hw = &adapter->hw; 495 struct net_device *netdev = adapter->netdev; 496 u32 rctl, tctl; 497 498 /* disable receives in the hardware */ 499 rctl = er32(RCTL); 500 ew32(RCTL, rctl & ~E1000_RCTL_EN); 501 /* flush and sleep below */ 502 503 netif_tx_disable(netdev); 504 505 /* disable transmits in the hardware */ 506 tctl = er32(TCTL); 507 tctl &= ~E1000_TCTL_EN; 508 ew32(TCTL, tctl); 509 /* flush both disables and wait for them to finish */ 510 E1000_WRITE_FLUSH(); 511 msleep(10); 512 513 /* Set the carrier off after transmits have been disabled in the 514 * hardware, to avoid race conditions with e1000_watchdog() (which 515 * may be running concurrently to us, checking for the carrier 516 * bit to decide whether it should enable transmits again). Such 517 * a race condition would result into transmission being disabled 518 * in the hardware until the next IFF_DOWN+IFF_UP cycle. 519 */ 520 netif_carrier_off(netdev); 521 522 napi_disable(&adapter->napi); 523 524 e1000_irq_disable(adapter); 525 526 /* Setting DOWN must be after irq_disable to prevent 527 * a screaming interrupt. Setting DOWN also prevents 528 * tasks from rescheduling. 529 */ 530 e1000_down_and_stop(adapter); 531 532 adapter->link_speed = 0; 533 adapter->link_duplex = 0; 534 535 e1000_reset(adapter); 536 e1000_clean_all_tx_rings(adapter); 537 e1000_clean_all_rx_rings(adapter); 538 } 539 540 void e1000_reinit_locked(struct e1000_adapter *adapter) 541 { 542 WARN_ON(in_interrupt()); 543 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 544 msleep(1); 545 e1000_down(adapter); 546 e1000_up(adapter); 547 clear_bit(__E1000_RESETTING, &adapter->flags); 548 } 549 550 void e1000_reset(struct e1000_adapter *adapter) 551 { 552 struct e1000_hw *hw = &adapter->hw; 553 u32 pba = 0, tx_space, min_tx_space, min_rx_space; 554 bool legacy_pba_adjust = false; 555 u16 hwm; 556 557 /* Repartition Pba for greater than 9k mtu 558 * To take effect CTRL.RST is required. 559 */ 560 561 switch (hw->mac_type) { 562 case e1000_82542_rev2_0: 563 case e1000_82542_rev2_1: 564 case e1000_82543: 565 case e1000_82544: 566 case e1000_82540: 567 case e1000_82541: 568 case e1000_82541_rev_2: 569 legacy_pba_adjust = true; 570 pba = E1000_PBA_48K; 571 break; 572 case e1000_82545: 573 case e1000_82545_rev_3: 574 case e1000_82546: 575 case e1000_ce4100: 576 case e1000_82546_rev_3: 577 pba = E1000_PBA_48K; 578 break; 579 case e1000_82547: 580 case e1000_82547_rev_2: 581 legacy_pba_adjust = true; 582 pba = E1000_PBA_30K; 583 break; 584 case e1000_undefined: 585 case e1000_num_macs: 586 break; 587 } 588 589 if (legacy_pba_adjust) { 590 if (hw->max_frame_size > E1000_RXBUFFER_8192) 591 pba -= 8; /* allocate more FIFO for Tx */ 592 593 if (hw->mac_type == e1000_82547) { 594 adapter->tx_fifo_head = 0; 595 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 596 adapter->tx_fifo_size = 597 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; 598 atomic_set(&adapter->tx_fifo_stall, 0); 599 } 600 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { 601 /* adjust PBA for jumbo frames */ 602 ew32(PBA, pba); 603 604 /* To maintain wire speed transmits, the Tx FIFO should be 605 * large enough to accommodate two full transmit packets, 606 * rounded up to the next 1KB and expressed in KB. Likewise, 607 * the Rx FIFO should be large enough to accommodate at least 608 * one full receive packet and is similarly rounded up and 609 * expressed in KB. 610 */ 611 pba = er32(PBA); 612 /* upper 16 bits has Tx packet buffer allocation size in KB */ 613 tx_space = pba >> 16; 614 /* lower 16 bits has Rx packet buffer allocation size in KB */ 615 pba &= 0xffff; 616 /* the Tx fifo also stores 16 bytes of information about the Tx 617 * but don't include ethernet FCS because hardware appends it 618 */ 619 min_tx_space = (hw->max_frame_size + 620 sizeof(struct e1000_tx_desc) - 621 ETH_FCS_LEN) * 2; 622 min_tx_space = ALIGN(min_tx_space, 1024); 623 min_tx_space >>= 10; 624 /* software strips receive CRC, so leave room for it */ 625 min_rx_space = hw->max_frame_size; 626 min_rx_space = ALIGN(min_rx_space, 1024); 627 min_rx_space >>= 10; 628 629 /* If current Tx allocation is less than the min Tx FIFO size, 630 * and the min Tx FIFO size is less than the current Rx FIFO 631 * allocation, take space away from current Rx allocation 632 */ 633 if (tx_space < min_tx_space && 634 ((min_tx_space - tx_space) < pba)) { 635 pba = pba - (min_tx_space - tx_space); 636 637 /* PCI/PCIx hardware has PBA alignment constraints */ 638 switch (hw->mac_type) { 639 case e1000_82545 ... e1000_82546_rev_3: 640 pba &= ~(E1000_PBA_8K - 1); 641 break; 642 default: 643 break; 644 } 645 646 /* if short on Rx space, Rx wins and must trump Tx 647 * adjustment or use Early Receive if available 648 */ 649 if (pba < min_rx_space) 650 pba = min_rx_space; 651 } 652 } 653 654 ew32(PBA, pba); 655 656 /* flow control settings: 657 * The high water mark must be low enough to fit one full frame 658 * (or the size used for early receive) above it in the Rx FIFO. 659 * Set it to the lower of: 660 * - 90% of the Rx FIFO size, and 661 * - the full Rx FIFO size minus the early receive size (for parts 662 * with ERT support assuming ERT set to E1000_ERT_2048), or 663 * - the full Rx FIFO size minus one full frame 664 */ 665 hwm = min(((pba << 10) * 9 / 10), 666 ((pba << 10) - hw->max_frame_size)); 667 668 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ 669 hw->fc_low_water = hw->fc_high_water - 8; 670 hw->fc_pause_time = E1000_FC_PAUSE_TIME; 671 hw->fc_send_xon = 1; 672 hw->fc = hw->original_fc; 673 674 /* Allow time for pending master requests to run */ 675 e1000_reset_hw(hw); 676 if (hw->mac_type >= e1000_82544) 677 ew32(WUC, 0); 678 679 if (e1000_init_hw(hw)) 680 e_dev_err("Hardware Error\n"); 681 e1000_update_mng_vlan(adapter); 682 683 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ 684 if (hw->mac_type >= e1000_82544 && 685 hw->autoneg == 1 && 686 hw->autoneg_advertised == ADVERTISE_1000_FULL) { 687 u32 ctrl = er32(CTRL); 688 /* clear phy power management bit if we are in gig only mode, 689 * which if enabled will attempt negotiation to 100Mb, which 690 * can cause a loss of link at power off or driver unload 691 */ 692 ctrl &= ~E1000_CTRL_SWDPIN3; 693 ew32(CTRL, ctrl); 694 } 695 696 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 697 ew32(VET, ETHERNET_IEEE_VLAN_TYPE); 698 699 e1000_reset_adaptive(hw); 700 e1000_phy_get_info(hw, &adapter->phy_info); 701 702 e1000_release_manageability(adapter); 703 } 704 705 /* Dump the eeprom for users having checksum issues */ 706 static void e1000_dump_eeprom(struct e1000_adapter *adapter) 707 { 708 struct net_device *netdev = adapter->netdev; 709 struct ethtool_eeprom eeprom; 710 const struct ethtool_ops *ops = netdev->ethtool_ops; 711 u8 *data; 712 int i; 713 u16 csum_old, csum_new = 0; 714 715 eeprom.len = ops->get_eeprom_len(netdev); 716 eeprom.offset = 0; 717 718 data = kmalloc(eeprom.len, GFP_KERNEL); 719 if (!data) 720 return; 721 722 ops->get_eeprom(netdev, &eeprom, data); 723 724 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + 725 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); 726 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) 727 csum_new += data[i] + (data[i + 1] << 8); 728 csum_new = EEPROM_SUM - csum_new; 729 730 pr_err("/*********************/\n"); 731 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); 732 pr_err("Calculated : 0x%04x\n", csum_new); 733 734 pr_err("Offset Values\n"); 735 pr_err("======== ======\n"); 736 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); 737 738 pr_err("Include this output when contacting your support provider.\n"); 739 pr_err("This is not a software error! Something bad happened to\n"); 740 pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); 741 pr_err("result in further problems, possibly loss of data,\n"); 742 pr_err("corruption or system hangs!\n"); 743 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); 744 pr_err("which is invalid and requires you to set the proper MAC\n"); 745 pr_err("address manually before continuing to enable this network\n"); 746 pr_err("device. Please inspect the EEPROM dump and report the\n"); 747 pr_err("issue to your hardware vendor or Intel Customer Support.\n"); 748 pr_err("/*********************/\n"); 749 750 kfree(data); 751 } 752 753 /** 754 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not 755 * @pdev: PCI device information struct 756 * 757 * Return true if an adapter needs ioport resources 758 **/ 759 static int e1000_is_need_ioport(struct pci_dev *pdev) 760 { 761 switch (pdev->device) { 762 case E1000_DEV_ID_82540EM: 763 case E1000_DEV_ID_82540EM_LOM: 764 case E1000_DEV_ID_82540EP: 765 case E1000_DEV_ID_82540EP_LOM: 766 case E1000_DEV_ID_82540EP_LP: 767 case E1000_DEV_ID_82541EI: 768 case E1000_DEV_ID_82541EI_MOBILE: 769 case E1000_DEV_ID_82541ER: 770 case E1000_DEV_ID_82541ER_LOM: 771 case E1000_DEV_ID_82541GI: 772 case E1000_DEV_ID_82541GI_LF: 773 case E1000_DEV_ID_82541GI_MOBILE: 774 case E1000_DEV_ID_82544EI_COPPER: 775 case E1000_DEV_ID_82544EI_FIBER: 776 case E1000_DEV_ID_82544GC_COPPER: 777 case E1000_DEV_ID_82544GC_LOM: 778 case E1000_DEV_ID_82545EM_COPPER: 779 case E1000_DEV_ID_82545EM_FIBER: 780 case E1000_DEV_ID_82546EB_COPPER: 781 case E1000_DEV_ID_82546EB_FIBER: 782 case E1000_DEV_ID_82546EB_QUAD_COPPER: 783 return true; 784 default: 785 return false; 786 } 787 } 788 789 static netdev_features_t e1000_fix_features(struct net_device *netdev, 790 netdev_features_t features) 791 { 792 /* Since there is no support for separate Rx/Tx vlan accel 793 * enable/disable make sure Tx flag is always in same state as Rx. 794 */ 795 if (features & NETIF_F_HW_VLAN_CTAG_RX) 796 features |= NETIF_F_HW_VLAN_CTAG_TX; 797 else 798 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 799 800 return features; 801 } 802 803 static int e1000_set_features(struct net_device *netdev, 804 netdev_features_t features) 805 { 806 struct e1000_adapter *adapter = netdev_priv(netdev); 807 netdev_features_t changed = features ^ netdev->features; 808 809 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 810 e1000_vlan_mode(netdev, features); 811 812 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) 813 return 0; 814 815 netdev->features = features; 816 adapter->rx_csum = !!(features & NETIF_F_RXCSUM); 817 818 if (netif_running(netdev)) 819 e1000_reinit_locked(adapter); 820 else 821 e1000_reset(adapter); 822 823 return 1; 824 } 825 826 static const struct net_device_ops e1000_netdev_ops = { 827 .ndo_open = e1000_open, 828 .ndo_stop = e1000_close, 829 .ndo_start_xmit = e1000_xmit_frame, 830 .ndo_set_rx_mode = e1000_set_rx_mode, 831 .ndo_set_mac_address = e1000_set_mac, 832 .ndo_tx_timeout = e1000_tx_timeout, 833 .ndo_change_mtu = e1000_change_mtu, 834 .ndo_do_ioctl = e1000_ioctl, 835 .ndo_validate_addr = eth_validate_addr, 836 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, 837 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, 838 #ifdef CONFIG_NET_POLL_CONTROLLER 839 .ndo_poll_controller = e1000_netpoll, 840 #endif 841 .ndo_fix_features = e1000_fix_features, 842 .ndo_set_features = e1000_set_features, 843 }; 844 845 /** 846 * e1000_init_hw_struct - initialize members of hw struct 847 * @adapter: board private struct 848 * @hw: structure used by e1000_hw.c 849 * 850 * Factors out initialization of the e1000_hw struct to its own function 851 * that can be called very early at init (just after struct allocation). 852 * Fields are initialized based on PCI device information and 853 * OS network device settings (MTU size). 854 * Returns negative error codes if MAC type setup fails. 855 */ 856 static int e1000_init_hw_struct(struct e1000_adapter *adapter, 857 struct e1000_hw *hw) 858 { 859 struct pci_dev *pdev = adapter->pdev; 860 861 /* PCI config space info */ 862 hw->vendor_id = pdev->vendor; 863 hw->device_id = pdev->device; 864 hw->subsystem_vendor_id = pdev->subsystem_vendor; 865 hw->subsystem_id = pdev->subsystem_device; 866 hw->revision_id = pdev->revision; 867 868 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 869 870 hw->max_frame_size = adapter->netdev->mtu + 871 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 872 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; 873 874 /* identify the MAC */ 875 if (e1000_set_mac_type(hw)) { 876 e_err(probe, "Unknown MAC Type\n"); 877 return -EIO; 878 } 879 880 switch (hw->mac_type) { 881 default: 882 break; 883 case e1000_82541: 884 case e1000_82547: 885 case e1000_82541_rev_2: 886 case e1000_82547_rev_2: 887 hw->phy_init_script = 1; 888 break; 889 } 890 891 e1000_set_media_type(hw); 892 e1000_get_bus_info(hw); 893 894 hw->wait_autoneg_complete = false; 895 hw->tbi_compatibility_en = true; 896 hw->adaptive_ifs = true; 897 898 /* Copper options */ 899 900 if (hw->media_type == e1000_media_type_copper) { 901 hw->mdix = AUTO_ALL_MODES; 902 hw->disable_polarity_correction = false; 903 hw->master_slave = E1000_MASTER_SLAVE; 904 } 905 906 return 0; 907 } 908 909 /** 910 * e1000_probe - Device Initialization Routine 911 * @pdev: PCI device information struct 912 * @ent: entry in e1000_pci_tbl 913 * 914 * Returns 0 on success, negative on failure 915 * 916 * e1000_probe initializes an adapter identified by a pci_dev structure. 917 * The OS initialization, configuring of the adapter private structure, 918 * and a hardware reset occur. 919 **/ 920 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 921 { 922 struct net_device *netdev; 923 struct e1000_adapter *adapter = NULL; 924 struct e1000_hw *hw; 925 926 static int cards_found; 927 static int global_quad_port_a; /* global ksp3 port a indication */ 928 int i, err, pci_using_dac; 929 u16 eeprom_data = 0; 930 u16 tmp = 0; 931 u16 eeprom_apme_mask = E1000_EEPROM_APME; 932 int bars, need_ioport; 933 bool disable_dev = false; 934 935 /* do not allocate ioport bars when not needed */ 936 need_ioport = e1000_is_need_ioport(pdev); 937 if (need_ioport) { 938 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 939 err = pci_enable_device(pdev); 940 } else { 941 bars = pci_select_bars(pdev, IORESOURCE_MEM); 942 err = pci_enable_device_mem(pdev); 943 } 944 if (err) 945 return err; 946 947 err = pci_request_selected_regions(pdev, bars, e1000_driver_name); 948 if (err) 949 goto err_pci_reg; 950 951 pci_set_master(pdev); 952 err = pci_save_state(pdev); 953 if (err) 954 goto err_alloc_etherdev; 955 956 err = -ENOMEM; 957 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 958 if (!netdev) 959 goto err_alloc_etherdev; 960 961 SET_NETDEV_DEV(netdev, &pdev->dev); 962 963 pci_set_drvdata(pdev, netdev); 964 adapter = netdev_priv(netdev); 965 adapter->netdev = netdev; 966 adapter->pdev = pdev; 967 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 968 adapter->bars = bars; 969 adapter->need_ioport = need_ioport; 970 971 hw = &adapter->hw; 972 hw->back = adapter; 973 974 err = -EIO; 975 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); 976 if (!hw->hw_addr) 977 goto err_ioremap; 978 979 if (adapter->need_ioport) { 980 for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) { 981 if (pci_resource_len(pdev, i) == 0) 982 continue; 983 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 984 hw->io_base = pci_resource_start(pdev, i); 985 break; 986 } 987 } 988 } 989 990 /* make ready for any if (hw->...) below */ 991 err = e1000_init_hw_struct(adapter, hw); 992 if (err) 993 goto err_sw_init; 994 995 /* there is a workaround being applied below that limits 996 * 64-bit DMA addresses to 64-bit hardware. There are some 997 * 32-bit adapters that Tx hang when given 64-bit DMA addresses 998 */ 999 pci_using_dac = 0; 1000 if ((hw->bus_type == e1000_bus_type_pcix) && 1001 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 1002 pci_using_dac = 1; 1003 } else { 1004 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 1005 if (err) { 1006 pr_err("No usable DMA config, aborting\n"); 1007 goto err_dma; 1008 } 1009 } 1010 1011 netdev->netdev_ops = &e1000_netdev_ops; 1012 e1000_set_ethtool_ops(netdev); 1013 netdev->watchdog_timeo = 5 * HZ; 1014 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); 1015 1016 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1017 1018 adapter->bd_number = cards_found; 1019 1020 /* setup the private structure */ 1021 1022 err = e1000_sw_init(adapter); 1023 if (err) 1024 goto err_sw_init; 1025 1026 err = -EIO; 1027 if (hw->mac_type == e1000_ce4100) { 1028 hw->ce4100_gbe_mdio_base_virt = 1029 ioremap(pci_resource_start(pdev, BAR_1), 1030 pci_resource_len(pdev, BAR_1)); 1031 1032 if (!hw->ce4100_gbe_mdio_base_virt) 1033 goto err_mdio_ioremap; 1034 } 1035 1036 if (hw->mac_type >= e1000_82543) { 1037 netdev->hw_features = NETIF_F_SG | 1038 NETIF_F_HW_CSUM | 1039 NETIF_F_HW_VLAN_CTAG_RX; 1040 netdev->features = NETIF_F_HW_VLAN_CTAG_TX | 1041 NETIF_F_HW_VLAN_CTAG_FILTER; 1042 } 1043 1044 if ((hw->mac_type >= e1000_82544) && 1045 (hw->mac_type != e1000_82547)) 1046 netdev->hw_features |= NETIF_F_TSO; 1047 1048 netdev->priv_flags |= IFF_SUPP_NOFCS; 1049 1050 netdev->features |= netdev->hw_features; 1051 netdev->hw_features |= (NETIF_F_RXCSUM | 1052 NETIF_F_RXALL | 1053 NETIF_F_RXFCS); 1054 1055 if (pci_using_dac) { 1056 netdev->features |= NETIF_F_HIGHDMA; 1057 netdev->vlan_features |= NETIF_F_HIGHDMA; 1058 } 1059 1060 netdev->vlan_features |= (NETIF_F_TSO | 1061 NETIF_F_HW_CSUM | 1062 NETIF_F_SG); 1063 1064 /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ 1065 if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || 1066 hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) 1067 netdev->priv_flags |= IFF_UNICAST_FLT; 1068 1069 /* MTU range: 46 - 16110 */ 1070 netdev->min_mtu = ETH_ZLEN - ETH_HLEN; 1071 netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); 1072 1073 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); 1074 1075 /* initialize eeprom parameters */ 1076 if (e1000_init_eeprom_params(hw)) { 1077 e_err(probe, "EEPROM initialization failed\n"); 1078 goto err_eeprom; 1079 } 1080 1081 /* before reading the EEPROM, reset the controller to 1082 * put the device in a known good starting state 1083 */ 1084 1085 e1000_reset_hw(hw); 1086 1087 /* make sure the EEPROM is good */ 1088 if (e1000_validate_eeprom_checksum(hw) < 0) { 1089 e_err(probe, "The EEPROM Checksum Is Not Valid\n"); 1090 e1000_dump_eeprom(adapter); 1091 /* set MAC address to all zeroes to invalidate and temporary 1092 * disable this device for the user. This blocks regular 1093 * traffic while still permitting ethtool ioctls from reaching 1094 * the hardware as well as allowing the user to run the 1095 * interface after manually setting a hw addr using 1096 * `ip set address` 1097 */ 1098 memset(hw->mac_addr, 0, netdev->addr_len); 1099 } else { 1100 /* copy the MAC address out of the EEPROM */ 1101 if (e1000_read_mac_addr(hw)) 1102 e_err(probe, "EEPROM Read Error\n"); 1103 } 1104 /* don't block initialization here due to bad MAC address */ 1105 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); 1106 1107 if (!is_valid_ether_addr(netdev->dev_addr)) 1108 e_err(probe, "Invalid MAC Address\n"); 1109 1110 1111 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); 1112 INIT_DELAYED_WORK(&adapter->fifo_stall_task, 1113 e1000_82547_tx_fifo_stall_task); 1114 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); 1115 INIT_WORK(&adapter->reset_task, e1000_reset_task); 1116 1117 e1000_check_options(adapter); 1118 1119 /* Initial Wake on LAN setting 1120 * If APM wake is enabled in the EEPROM, 1121 * enable the ACPI Magic Packet filter 1122 */ 1123 1124 switch (hw->mac_type) { 1125 case e1000_82542_rev2_0: 1126 case e1000_82542_rev2_1: 1127 case e1000_82543: 1128 break; 1129 case e1000_82544: 1130 e1000_read_eeprom(hw, 1131 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); 1132 eeprom_apme_mask = E1000_EEPROM_82544_APM; 1133 break; 1134 case e1000_82546: 1135 case e1000_82546_rev_3: 1136 if (er32(STATUS) & E1000_STATUS_FUNC_1) { 1137 e1000_read_eeprom(hw, 1138 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 1139 break; 1140 } 1141 /* Fall Through */ 1142 default: 1143 e1000_read_eeprom(hw, 1144 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 1145 break; 1146 } 1147 if (eeprom_data & eeprom_apme_mask) 1148 adapter->eeprom_wol |= E1000_WUFC_MAG; 1149 1150 /* now that we have the eeprom settings, apply the special cases 1151 * where the eeprom may be wrong or the board simply won't support 1152 * wake on lan on a particular port 1153 */ 1154 switch (pdev->device) { 1155 case E1000_DEV_ID_82546GB_PCIE: 1156 adapter->eeprom_wol = 0; 1157 break; 1158 case E1000_DEV_ID_82546EB_FIBER: 1159 case E1000_DEV_ID_82546GB_FIBER: 1160 /* Wake events only supported on port A for dual fiber 1161 * regardless of eeprom setting 1162 */ 1163 if (er32(STATUS) & E1000_STATUS_FUNC_1) 1164 adapter->eeprom_wol = 0; 1165 break; 1166 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1167 /* if quad port adapter, disable WoL on all but port A */ 1168 if (global_quad_port_a != 0) 1169 adapter->eeprom_wol = 0; 1170 else 1171 adapter->quad_port_a = true; 1172 /* Reset for multiple quad port adapters */ 1173 if (++global_quad_port_a == 4) 1174 global_quad_port_a = 0; 1175 break; 1176 } 1177 1178 /* initialize the wol settings based on the eeprom settings */ 1179 adapter->wol = adapter->eeprom_wol; 1180 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1181 1182 /* Auto detect PHY address */ 1183 if (hw->mac_type == e1000_ce4100) { 1184 for (i = 0; i < 32; i++) { 1185 hw->phy_addr = i; 1186 e1000_read_phy_reg(hw, PHY_ID2, &tmp); 1187 1188 if (tmp != 0 && tmp != 0xFF) 1189 break; 1190 } 1191 1192 if (i >= 32) 1193 goto err_eeprom; 1194 } 1195 1196 /* reset the hardware with the new settings */ 1197 e1000_reset(adapter); 1198 1199 strcpy(netdev->name, "eth%d"); 1200 err = register_netdev(netdev); 1201 if (err) 1202 goto err_register; 1203 1204 e1000_vlan_filter_on_off(adapter, false); 1205 1206 /* print bus type/speed/width info */ 1207 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", 1208 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), 1209 ((hw->bus_speed == e1000_bus_speed_133) ? 133 : 1210 (hw->bus_speed == e1000_bus_speed_120) ? 120 : 1211 (hw->bus_speed == e1000_bus_speed_100) ? 100 : 1212 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), 1213 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), 1214 netdev->dev_addr); 1215 1216 /* carrier off reporting is important to ethtool even BEFORE open */ 1217 netif_carrier_off(netdev); 1218 1219 e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); 1220 1221 cards_found++; 1222 return 0; 1223 1224 err_register: 1225 err_eeprom: 1226 e1000_phy_hw_reset(hw); 1227 1228 if (hw->flash_address) 1229 iounmap(hw->flash_address); 1230 kfree(adapter->tx_ring); 1231 kfree(adapter->rx_ring); 1232 err_dma: 1233 err_sw_init: 1234 err_mdio_ioremap: 1235 iounmap(hw->ce4100_gbe_mdio_base_virt); 1236 iounmap(hw->hw_addr); 1237 err_ioremap: 1238 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); 1239 free_netdev(netdev); 1240 err_alloc_etherdev: 1241 pci_release_selected_regions(pdev, bars); 1242 err_pci_reg: 1243 if (!adapter || disable_dev) 1244 pci_disable_device(pdev); 1245 return err; 1246 } 1247 1248 /** 1249 * e1000_remove - Device Removal Routine 1250 * @pdev: PCI device information struct 1251 * 1252 * e1000_remove is called by the PCI subsystem to alert the driver 1253 * that it should release a PCI device. That could be caused by a 1254 * Hot-Plug event, or because the driver is going to be removed from 1255 * memory. 1256 **/ 1257 static void e1000_remove(struct pci_dev *pdev) 1258 { 1259 struct net_device *netdev = pci_get_drvdata(pdev); 1260 struct e1000_adapter *adapter = netdev_priv(netdev); 1261 struct e1000_hw *hw = &adapter->hw; 1262 bool disable_dev; 1263 1264 e1000_down_and_stop(adapter); 1265 e1000_release_manageability(adapter); 1266 1267 unregister_netdev(netdev); 1268 1269 e1000_phy_hw_reset(hw); 1270 1271 kfree(adapter->tx_ring); 1272 kfree(adapter->rx_ring); 1273 1274 if (hw->mac_type == e1000_ce4100) 1275 iounmap(hw->ce4100_gbe_mdio_base_virt); 1276 iounmap(hw->hw_addr); 1277 if (hw->flash_address) 1278 iounmap(hw->flash_address); 1279 pci_release_selected_regions(pdev, adapter->bars); 1280 1281 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); 1282 free_netdev(netdev); 1283 1284 if (disable_dev) 1285 pci_disable_device(pdev); 1286 } 1287 1288 /** 1289 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) 1290 * @adapter: board private structure to initialize 1291 * 1292 * e1000_sw_init initializes the Adapter private data structure. 1293 * e1000_init_hw_struct MUST be called before this function 1294 **/ 1295 static int e1000_sw_init(struct e1000_adapter *adapter) 1296 { 1297 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1298 1299 adapter->num_tx_queues = 1; 1300 adapter->num_rx_queues = 1; 1301 1302 if (e1000_alloc_queues(adapter)) { 1303 e_err(probe, "Unable to allocate memory for queues\n"); 1304 return -ENOMEM; 1305 } 1306 1307 /* Explicitly disable IRQ since the NIC can be in any state. */ 1308 e1000_irq_disable(adapter); 1309 1310 spin_lock_init(&adapter->stats_lock); 1311 1312 set_bit(__E1000_DOWN, &adapter->flags); 1313 1314 return 0; 1315 } 1316 1317 /** 1318 * e1000_alloc_queues - Allocate memory for all rings 1319 * @adapter: board private structure to initialize 1320 * 1321 * We allocate one ring per queue at run-time since we don't know the 1322 * number of queues at compile-time. 1323 **/ 1324 static int e1000_alloc_queues(struct e1000_adapter *adapter) 1325 { 1326 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1327 sizeof(struct e1000_tx_ring), GFP_KERNEL); 1328 if (!adapter->tx_ring) 1329 return -ENOMEM; 1330 1331 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1332 sizeof(struct e1000_rx_ring), GFP_KERNEL); 1333 if (!adapter->rx_ring) { 1334 kfree(adapter->tx_ring); 1335 return -ENOMEM; 1336 } 1337 1338 return E1000_SUCCESS; 1339 } 1340 1341 /** 1342 * e1000_open - Called when a network interface is made active 1343 * @netdev: network interface device structure 1344 * 1345 * Returns 0 on success, negative value on failure 1346 * 1347 * The open entry point is called when a network interface is made 1348 * active by the system (IFF_UP). At this point all resources needed 1349 * for transmit and receive operations are allocated, the interrupt 1350 * handler is registered with the OS, the watchdog task is started, 1351 * and the stack is notified that the interface is ready. 1352 **/ 1353 int e1000_open(struct net_device *netdev) 1354 { 1355 struct e1000_adapter *adapter = netdev_priv(netdev); 1356 struct e1000_hw *hw = &adapter->hw; 1357 int err; 1358 1359 /* disallow open during test */ 1360 if (test_bit(__E1000_TESTING, &adapter->flags)) 1361 return -EBUSY; 1362 1363 netif_carrier_off(netdev); 1364 1365 /* allocate transmit descriptors */ 1366 err = e1000_setup_all_tx_resources(adapter); 1367 if (err) 1368 goto err_setup_tx; 1369 1370 /* allocate receive descriptors */ 1371 err = e1000_setup_all_rx_resources(adapter); 1372 if (err) 1373 goto err_setup_rx; 1374 1375 e1000_power_up_phy(adapter); 1376 1377 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1378 if ((hw->mng_cookie.status & 1379 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1380 e1000_update_mng_vlan(adapter); 1381 } 1382 1383 /* before we allocate an interrupt, we must be ready to handle it. 1384 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1385 * as soon as we call pci_request_irq, so we have to setup our 1386 * clean_rx handler before we do so. 1387 */ 1388 e1000_configure(adapter); 1389 1390 err = e1000_request_irq(adapter); 1391 if (err) 1392 goto err_req_irq; 1393 1394 /* From here on the code is the same as e1000_up() */ 1395 clear_bit(__E1000_DOWN, &adapter->flags); 1396 1397 napi_enable(&adapter->napi); 1398 1399 e1000_irq_enable(adapter); 1400 1401 netif_start_queue(netdev); 1402 1403 /* fire a link status change interrupt to start the watchdog */ 1404 ew32(ICS, E1000_ICS_LSC); 1405 1406 return E1000_SUCCESS; 1407 1408 err_req_irq: 1409 e1000_power_down_phy(adapter); 1410 e1000_free_all_rx_resources(adapter); 1411 err_setup_rx: 1412 e1000_free_all_tx_resources(adapter); 1413 err_setup_tx: 1414 e1000_reset(adapter); 1415 1416 return err; 1417 } 1418 1419 /** 1420 * e1000_close - Disables a network interface 1421 * @netdev: network interface device structure 1422 * 1423 * Returns 0, this is not allowed to fail 1424 * 1425 * The close entry point is called when an interface is de-activated 1426 * by the OS. The hardware is still under the drivers control, but 1427 * needs to be disabled. A global MAC reset is issued to stop the 1428 * hardware, and all transmit and receive resources are freed. 1429 **/ 1430 int e1000_close(struct net_device *netdev) 1431 { 1432 struct e1000_adapter *adapter = netdev_priv(netdev); 1433 struct e1000_hw *hw = &adapter->hw; 1434 int count = E1000_CHECK_RESET_COUNT; 1435 1436 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) 1437 usleep_range(10000, 20000); 1438 1439 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 1440 e1000_down(adapter); 1441 e1000_power_down_phy(adapter); 1442 e1000_free_irq(adapter); 1443 1444 e1000_free_all_tx_resources(adapter); 1445 e1000_free_all_rx_resources(adapter); 1446 1447 /* kill manageability vlan ID if supported, but not if a vlan with 1448 * the same ID is registered on the host OS (let 8021q kill it) 1449 */ 1450 if ((hw->mng_cookie.status & 1451 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1452 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { 1453 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), 1454 adapter->mng_vlan_id); 1455 } 1456 1457 return 0; 1458 } 1459 1460 /** 1461 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary 1462 * @adapter: address of board private structure 1463 * @start: address of beginning of memory 1464 * @len: length of memory 1465 **/ 1466 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, 1467 unsigned long len) 1468 { 1469 struct e1000_hw *hw = &adapter->hw; 1470 unsigned long begin = (unsigned long)start; 1471 unsigned long end = begin + len; 1472 1473 /* First rev 82545 and 82546 need to not allow any memory 1474 * write location to cross 64k boundary due to errata 23 1475 */ 1476 if (hw->mac_type == e1000_82545 || 1477 hw->mac_type == e1000_ce4100 || 1478 hw->mac_type == e1000_82546) { 1479 return ((begin ^ (end - 1)) >> 16) == 0; 1480 } 1481 1482 return true; 1483 } 1484 1485 /** 1486 * e1000_setup_tx_resources - allocate Tx resources (Descriptors) 1487 * @adapter: board private structure 1488 * @txdr: tx descriptor ring (for a specific queue) to setup 1489 * 1490 * Return 0 on success, negative on failure 1491 **/ 1492 static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 1493 struct e1000_tx_ring *txdr) 1494 { 1495 struct pci_dev *pdev = adapter->pdev; 1496 int size; 1497 1498 size = sizeof(struct e1000_tx_buffer) * txdr->count; 1499 txdr->buffer_info = vzalloc(size); 1500 if (!txdr->buffer_info) 1501 return -ENOMEM; 1502 1503 /* round up to nearest 4K */ 1504 1505 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1506 txdr->size = ALIGN(txdr->size, 4096); 1507 1508 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 1509 GFP_KERNEL); 1510 if (!txdr->desc) { 1511 setup_tx_desc_die: 1512 vfree(txdr->buffer_info); 1513 return -ENOMEM; 1514 } 1515 1516 /* Fix for errata 23, can't cross 64kB boundary */ 1517 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1518 void *olddesc = txdr->desc; 1519 dma_addr_t olddma = txdr->dma; 1520 e_err(tx_err, "txdr align check failed: %u bytes at %p\n", 1521 txdr->size, txdr->desc); 1522 /* Try again, without freeing the previous */ 1523 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, 1524 &txdr->dma, GFP_KERNEL); 1525 /* Failed allocation, critical failure */ 1526 if (!txdr->desc) { 1527 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1528 olddma); 1529 goto setup_tx_desc_die; 1530 } 1531 1532 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1533 /* give up */ 1534 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, 1535 txdr->dma); 1536 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1537 olddma); 1538 e_err(probe, "Unable to allocate aligned memory " 1539 "for the transmit descriptor ring\n"); 1540 vfree(txdr->buffer_info); 1541 return -ENOMEM; 1542 } else { 1543 /* Free old allocation, new allocation was successful */ 1544 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1545 olddma); 1546 } 1547 } 1548 memset(txdr->desc, 0, txdr->size); 1549 1550 txdr->next_to_use = 0; 1551 txdr->next_to_clean = 0; 1552 1553 return 0; 1554 } 1555 1556 /** 1557 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources 1558 * (Descriptors) for all queues 1559 * @adapter: board private structure 1560 * 1561 * Return 0 on success, negative on failure 1562 **/ 1563 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) 1564 { 1565 int i, err = 0; 1566 1567 for (i = 0; i < adapter->num_tx_queues; i++) { 1568 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1569 if (err) { 1570 e_err(probe, "Allocation for Tx Queue %u failed\n", i); 1571 for (i-- ; i >= 0; i--) 1572 e1000_free_tx_resources(adapter, 1573 &adapter->tx_ring[i]); 1574 break; 1575 } 1576 } 1577 1578 return err; 1579 } 1580 1581 /** 1582 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset 1583 * @adapter: board private structure 1584 * 1585 * Configure the Tx unit of the MAC after a reset. 1586 **/ 1587 static void e1000_configure_tx(struct e1000_adapter *adapter) 1588 { 1589 u64 tdba; 1590 struct e1000_hw *hw = &adapter->hw; 1591 u32 tdlen, tctl, tipg; 1592 u32 ipgr1, ipgr2; 1593 1594 /* Setup the HW Tx Head and Tail descriptor pointers */ 1595 1596 switch (adapter->num_tx_queues) { 1597 case 1: 1598 default: 1599 tdba = adapter->tx_ring[0].dma; 1600 tdlen = adapter->tx_ring[0].count * 1601 sizeof(struct e1000_tx_desc); 1602 ew32(TDLEN, tdlen); 1603 ew32(TDBAH, (tdba >> 32)); 1604 ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); 1605 ew32(TDT, 0); 1606 ew32(TDH, 0); 1607 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? 1608 E1000_TDH : E1000_82542_TDH); 1609 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? 1610 E1000_TDT : E1000_82542_TDT); 1611 break; 1612 } 1613 1614 /* Set the default values for the Tx Inter Packet Gap timer */ 1615 if ((hw->media_type == e1000_media_type_fiber || 1616 hw->media_type == e1000_media_type_internal_serdes)) 1617 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1618 else 1619 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 1620 1621 switch (hw->mac_type) { 1622 case e1000_82542_rev2_0: 1623 case e1000_82542_rev2_1: 1624 tipg = DEFAULT_82542_TIPG_IPGT; 1625 ipgr1 = DEFAULT_82542_TIPG_IPGR1; 1626 ipgr2 = DEFAULT_82542_TIPG_IPGR2; 1627 break; 1628 default: 1629 ipgr1 = DEFAULT_82543_TIPG_IPGR1; 1630 ipgr2 = DEFAULT_82543_TIPG_IPGR2; 1631 break; 1632 } 1633 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; 1634 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; 1635 ew32(TIPG, tipg); 1636 1637 /* Set the Tx Interrupt Delay register */ 1638 1639 ew32(TIDV, adapter->tx_int_delay); 1640 if (hw->mac_type >= e1000_82540) 1641 ew32(TADV, adapter->tx_abs_int_delay); 1642 1643 /* Program the Transmit Control Register */ 1644 1645 tctl = er32(TCTL); 1646 tctl &= ~E1000_TCTL_CT; 1647 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 1648 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1649 1650 e1000_config_collision_dist(hw); 1651 1652 /* Setup Transmit Descriptor Settings for eop descriptor */ 1653 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; 1654 1655 /* only set IDE if we are delaying interrupts using the timers */ 1656 if (adapter->tx_int_delay) 1657 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 1658 1659 if (hw->mac_type < e1000_82543) 1660 adapter->txd_cmd |= E1000_TXD_CMD_RPS; 1661 else 1662 adapter->txd_cmd |= E1000_TXD_CMD_RS; 1663 1664 /* Cache if we're 82544 running in PCI-X because we'll 1665 * need this to apply a workaround later in the send path. 1666 */ 1667 if (hw->mac_type == e1000_82544 && 1668 hw->bus_type == e1000_bus_type_pcix) 1669 adapter->pcix_82544 = true; 1670 1671 ew32(TCTL, tctl); 1672 1673 } 1674 1675 /** 1676 * e1000_setup_rx_resources - allocate Rx resources (Descriptors) 1677 * @adapter: board private structure 1678 * @rxdr: rx descriptor ring (for a specific queue) to setup 1679 * 1680 * Returns 0 on success, negative on failure 1681 **/ 1682 static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 1683 struct e1000_rx_ring *rxdr) 1684 { 1685 struct pci_dev *pdev = adapter->pdev; 1686 int size, desc_len; 1687 1688 size = sizeof(struct e1000_rx_buffer) * rxdr->count; 1689 rxdr->buffer_info = vzalloc(size); 1690 if (!rxdr->buffer_info) 1691 return -ENOMEM; 1692 1693 desc_len = sizeof(struct e1000_rx_desc); 1694 1695 /* Round up to nearest 4K */ 1696 1697 rxdr->size = rxdr->count * desc_len; 1698 rxdr->size = ALIGN(rxdr->size, 4096); 1699 1700 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1701 GFP_KERNEL); 1702 if (!rxdr->desc) { 1703 setup_rx_desc_die: 1704 vfree(rxdr->buffer_info); 1705 return -ENOMEM; 1706 } 1707 1708 /* Fix for errata 23, can't cross 64kB boundary */ 1709 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1710 void *olddesc = rxdr->desc; 1711 dma_addr_t olddma = rxdr->dma; 1712 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", 1713 rxdr->size, rxdr->desc); 1714 /* Try again, without freeing the previous */ 1715 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, 1716 &rxdr->dma, GFP_KERNEL); 1717 /* Failed allocation, critical failure */ 1718 if (!rxdr->desc) { 1719 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1720 olddma); 1721 goto setup_rx_desc_die; 1722 } 1723 1724 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1725 /* give up */ 1726 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, 1727 rxdr->dma); 1728 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1729 olddma); 1730 e_err(probe, "Unable to allocate aligned memory for " 1731 "the Rx descriptor ring\n"); 1732 goto setup_rx_desc_die; 1733 } else { 1734 /* Free old allocation, new allocation was successful */ 1735 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1736 olddma); 1737 } 1738 } 1739 memset(rxdr->desc, 0, rxdr->size); 1740 1741 rxdr->next_to_clean = 0; 1742 rxdr->next_to_use = 0; 1743 rxdr->rx_skb_top = NULL; 1744 1745 return 0; 1746 } 1747 1748 /** 1749 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources 1750 * (Descriptors) for all queues 1751 * @adapter: board private structure 1752 * 1753 * Return 0 on success, negative on failure 1754 **/ 1755 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) 1756 { 1757 int i, err = 0; 1758 1759 for (i = 0; i < adapter->num_rx_queues; i++) { 1760 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1761 if (err) { 1762 e_err(probe, "Allocation for Rx Queue %u failed\n", i); 1763 for (i-- ; i >= 0; i--) 1764 e1000_free_rx_resources(adapter, 1765 &adapter->rx_ring[i]); 1766 break; 1767 } 1768 } 1769 1770 return err; 1771 } 1772 1773 /** 1774 * e1000_setup_rctl - configure the receive control registers 1775 * @adapter: Board private structure 1776 **/ 1777 static void e1000_setup_rctl(struct e1000_adapter *adapter) 1778 { 1779 struct e1000_hw *hw = &adapter->hw; 1780 u32 rctl; 1781 1782 rctl = er32(RCTL); 1783 1784 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 1785 1786 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 1787 E1000_RCTL_RDMTS_HALF | 1788 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); 1789 1790 if (hw->tbi_compatibility_on == 1) 1791 rctl |= E1000_RCTL_SBP; 1792 else 1793 rctl &= ~E1000_RCTL_SBP; 1794 1795 if (adapter->netdev->mtu <= ETH_DATA_LEN) 1796 rctl &= ~E1000_RCTL_LPE; 1797 else 1798 rctl |= E1000_RCTL_LPE; 1799 1800 /* Setup buffer sizes */ 1801 rctl &= ~E1000_RCTL_SZ_4096; 1802 rctl |= E1000_RCTL_BSEX; 1803 switch (adapter->rx_buffer_len) { 1804 case E1000_RXBUFFER_2048: 1805 default: 1806 rctl |= E1000_RCTL_SZ_2048; 1807 rctl &= ~E1000_RCTL_BSEX; 1808 break; 1809 case E1000_RXBUFFER_4096: 1810 rctl |= E1000_RCTL_SZ_4096; 1811 break; 1812 case E1000_RXBUFFER_8192: 1813 rctl |= E1000_RCTL_SZ_8192; 1814 break; 1815 case E1000_RXBUFFER_16384: 1816 rctl |= E1000_RCTL_SZ_16384; 1817 break; 1818 } 1819 1820 /* This is useful for sniffing bad packets. */ 1821 if (adapter->netdev->features & NETIF_F_RXALL) { 1822 /* UPE and MPE will be handled by normal PROMISC logic 1823 * in e1000e_set_rx_mode 1824 */ 1825 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ 1826 E1000_RCTL_BAM | /* RX All Bcast Pkts */ 1827 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 1828 1829 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ 1830 E1000_RCTL_DPF | /* Allow filtered pause */ 1831 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ 1832 /* Do not mess with E1000_CTRL_VME, it affects transmit as well, 1833 * and that breaks VLANs. 1834 */ 1835 } 1836 1837 ew32(RCTL, rctl); 1838 } 1839 1840 /** 1841 * e1000_configure_rx - Configure 8254x Receive Unit after Reset 1842 * @adapter: board private structure 1843 * 1844 * Configure the Rx unit of the MAC after a reset. 1845 **/ 1846 static void e1000_configure_rx(struct e1000_adapter *adapter) 1847 { 1848 u64 rdba; 1849 struct e1000_hw *hw = &adapter->hw; 1850 u32 rdlen, rctl, rxcsum; 1851 1852 if (adapter->netdev->mtu > ETH_DATA_LEN) { 1853 rdlen = adapter->rx_ring[0].count * 1854 sizeof(struct e1000_rx_desc); 1855 adapter->clean_rx = e1000_clean_jumbo_rx_irq; 1856 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 1857 } else { 1858 rdlen = adapter->rx_ring[0].count * 1859 sizeof(struct e1000_rx_desc); 1860 adapter->clean_rx = e1000_clean_rx_irq; 1861 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 1862 } 1863 1864 /* disable receives while setting up the descriptors */ 1865 rctl = er32(RCTL); 1866 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1867 1868 /* set the Receive Delay Timer Register */ 1869 ew32(RDTR, adapter->rx_int_delay); 1870 1871 if (hw->mac_type >= e1000_82540) { 1872 ew32(RADV, adapter->rx_abs_int_delay); 1873 if (adapter->itr_setting != 0) 1874 ew32(ITR, 1000000000 / (adapter->itr * 256)); 1875 } 1876 1877 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1878 * the Base and Length of the Rx Descriptor Ring 1879 */ 1880 switch (adapter->num_rx_queues) { 1881 case 1: 1882 default: 1883 rdba = adapter->rx_ring[0].dma; 1884 ew32(RDLEN, rdlen); 1885 ew32(RDBAH, (rdba >> 32)); 1886 ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); 1887 ew32(RDT, 0); 1888 ew32(RDH, 0); 1889 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? 1890 E1000_RDH : E1000_82542_RDH); 1891 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? 1892 E1000_RDT : E1000_82542_RDT); 1893 break; 1894 } 1895 1896 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1897 if (hw->mac_type >= e1000_82543) { 1898 rxcsum = er32(RXCSUM); 1899 if (adapter->rx_csum) 1900 rxcsum |= E1000_RXCSUM_TUOFL; 1901 else 1902 /* don't need to clear IPPCSE as it defaults to 0 */ 1903 rxcsum &= ~E1000_RXCSUM_TUOFL; 1904 ew32(RXCSUM, rxcsum); 1905 } 1906 1907 /* Enable Receives */ 1908 ew32(RCTL, rctl | E1000_RCTL_EN); 1909 } 1910 1911 /** 1912 * e1000_free_tx_resources - Free Tx Resources per Queue 1913 * @adapter: board private structure 1914 * @tx_ring: Tx descriptor ring for a specific queue 1915 * 1916 * Free all transmit software resources 1917 **/ 1918 static void e1000_free_tx_resources(struct e1000_adapter *adapter, 1919 struct e1000_tx_ring *tx_ring) 1920 { 1921 struct pci_dev *pdev = adapter->pdev; 1922 1923 e1000_clean_tx_ring(adapter, tx_ring); 1924 1925 vfree(tx_ring->buffer_info); 1926 tx_ring->buffer_info = NULL; 1927 1928 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 1929 tx_ring->dma); 1930 1931 tx_ring->desc = NULL; 1932 } 1933 1934 /** 1935 * e1000_free_all_tx_resources - Free Tx Resources for All Queues 1936 * @adapter: board private structure 1937 * 1938 * Free all transmit software resources 1939 **/ 1940 void e1000_free_all_tx_resources(struct e1000_adapter *adapter) 1941 { 1942 int i; 1943 1944 for (i = 0; i < adapter->num_tx_queues; i++) 1945 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); 1946 } 1947 1948 static void 1949 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1950 struct e1000_tx_buffer *buffer_info) 1951 { 1952 if (buffer_info->dma) { 1953 if (buffer_info->mapped_as_page) 1954 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, 1955 buffer_info->length, DMA_TO_DEVICE); 1956 else 1957 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1958 buffer_info->length, 1959 DMA_TO_DEVICE); 1960 buffer_info->dma = 0; 1961 } 1962 if (buffer_info->skb) { 1963 dev_kfree_skb_any(buffer_info->skb); 1964 buffer_info->skb = NULL; 1965 } 1966 buffer_info->time_stamp = 0; 1967 /* buffer_info must be completely set up in the transmit path */ 1968 } 1969 1970 /** 1971 * e1000_clean_tx_ring - Free Tx Buffers 1972 * @adapter: board private structure 1973 * @tx_ring: ring to be cleaned 1974 **/ 1975 static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 1976 struct e1000_tx_ring *tx_ring) 1977 { 1978 struct e1000_hw *hw = &adapter->hw; 1979 struct e1000_tx_buffer *buffer_info; 1980 unsigned long size; 1981 unsigned int i; 1982 1983 /* Free all the Tx ring sk_buffs */ 1984 1985 for (i = 0; i < tx_ring->count; i++) { 1986 buffer_info = &tx_ring->buffer_info[i]; 1987 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 1988 } 1989 1990 netdev_reset_queue(adapter->netdev); 1991 size = sizeof(struct e1000_tx_buffer) * tx_ring->count; 1992 memset(tx_ring->buffer_info, 0, size); 1993 1994 /* Zero out the descriptor ring */ 1995 1996 memset(tx_ring->desc, 0, tx_ring->size); 1997 1998 tx_ring->next_to_use = 0; 1999 tx_ring->next_to_clean = 0; 2000 tx_ring->last_tx_tso = false; 2001 2002 writel(0, hw->hw_addr + tx_ring->tdh); 2003 writel(0, hw->hw_addr + tx_ring->tdt); 2004 } 2005 2006 /** 2007 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues 2008 * @adapter: board private structure 2009 **/ 2010 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) 2011 { 2012 int i; 2013 2014 for (i = 0; i < adapter->num_tx_queues; i++) 2015 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); 2016 } 2017 2018 /** 2019 * e1000_free_rx_resources - Free Rx Resources 2020 * @adapter: board private structure 2021 * @rx_ring: ring to clean the resources from 2022 * 2023 * Free all receive software resources 2024 **/ 2025 static void e1000_free_rx_resources(struct e1000_adapter *adapter, 2026 struct e1000_rx_ring *rx_ring) 2027 { 2028 struct pci_dev *pdev = adapter->pdev; 2029 2030 e1000_clean_rx_ring(adapter, rx_ring); 2031 2032 vfree(rx_ring->buffer_info); 2033 rx_ring->buffer_info = NULL; 2034 2035 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2036 rx_ring->dma); 2037 2038 rx_ring->desc = NULL; 2039 } 2040 2041 /** 2042 * e1000_free_all_rx_resources - Free Rx Resources for All Queues 2043 * @adapter: board private structure 2044 * 2045 * Free all receive software resources 2046 **/ 2047 void e1000_free_all_rx_resources(struct e1000_adapter *adapter) 2048 { 2049 int i; 2050 2051 for (i = 0; i < adapter->num_rx_queues; i++) 2052 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); 2053 } 2054 2055 #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) 2056 static unsigned int e1000_frag_len(const struct e1000_adapter *a) 2057 { 2058 return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + 2059 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2060 } 2061 2062 static void *e1000_alloc_frag(const struct e1000_adapter *a) 2063 { 2064 unsigned int len = e1000_frag_len(a); 2065 u8 *data = netdev_alloc_frag(len); 2066 2067 if (likely(data)) 2068 data += E1000_HEADROOM; 2069 return data; 2070 } 2071 2072 /** 2073 * e1000_clean_rx_ring - Free Rx Buffers per Queue 2074 * @adapter: board private structure 2075 * @rx_ring: ring to free buffers from 2076 **/ 2077 static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 2078 struct e1000_rx_ring *rx_ring) 2079 { 2080 struct e1000_hw *hw = &adapter->hw; 2081 struct e1000_rx_buffer *buffer_info; 2082 struct pci_dev *pdev = adapter->pdev; 2083 unsigned long size; 2084 unsigned int i; 2085 2086 /* Free all the Rx netfrags */ 2087 for (i = 0; i < rx_ring->count; i++) { 2088 buffer_info = &rx_ring->buffer_info[i]; 2089 if (adapter->clean_rx == e1000_clean_rx_irq) { 2090 if (buffer_info->dma) 2091 dma_unmap_single(&pdev->dev, buffer_info->dma, 2092 adapter->rx_buffer_len, 2093 DMA_FROM_DEVICE); 2094 if (buffer_info->rxbuf.data) { 2095 skb_free_frag(buffer_info->rxbuf.data); 2096 buffer_info->rxbuf.data = NULL; 2097 } 2098 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { 2099 if (buffer_info->dma) 2100 dma_unmap_page(&pdev->dev, buffer_info->dma, 2101 adapter->rx_buffer_len, 2102 DMA_FROM_DEVICE); 2103 if (buffer_info->rxbuf.page) { 2104 put_page(buffer_info->rxbuf.page); 2105 buffer_info->rxbuf.page = NULL; 2106 } 2107 } 2108 2109 buffer_info->dma = 0; 2110 } 2111 2112 /* there also may be some cached data from a chained receive */ 2113 napi_free_frags(&adapter->napi); 2114 rx_ring->rx_skb_top = NULL; 2115 2116 size = sizeof(struct e1000_rx_buffer) * rx_ring->count; 2117 memset(rx_ring->buffer_info, 0, size); 2118 2119 /* Zero out the descriptor ring */ 2120 memset(rx_ring->desc, 0, rx_ring->size); 2121 2122 rx_ring->next_to_clean = 0; 2123 rx_ring->next_to_use = 0; 2124 2125 writel(0, hw->hw_addr + rx_ring->rdh); 2126 writel(0, hw->hw_addr + rx_ring->rdt); 2127 } 2128 2129 /** 2130 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues 2131 * @adapter: board private structure 2132 **/ 2133 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) 2134 { 2135 int i; 2136 2137 for (i = 0; i < adapter->num_rx_queues; i++) 2138 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); 2139 } 2140 2141 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset 2142 * and memory write and invalidate disabled for certain operations 2143 */ 2144 static void e1000_enter_82542_rst(struct e1000_adapter *adapter) 2145 { 2146 struct e1000_hw *hw = &adapter->hw; 2147 struct net_device *netdev = adapter->netdev; 2148 u32 rctl; 2149 2150 e1000_pci_clear_mwi(hw); 2151 2152 rctl = er32(RCTL); 2153 rctl |= E1000_RCTL_RST; 2154 ew32(RCTL, rctl); 2155 E1000_WRITE_FLUSH(); 2156 mdelay(5); 2157 2158 if (netif_running(netdev)) 2159 e1000_clean_all_rx_rings(adapter); 2160 } 2161 2162 static void e1000_leave_82542_rst(struct e1000_adapter *adapter) 2163 { 2164 struct e1000_hw *hw = &adapter->hw; 2165 struct net_device *netdev = adapter->netdev; 2166 u32 rctl; 2167 2168 rctl = er32(RCTL); 2169 rctl &= ~E1000_RCTL_RST; 2170 ew32(RCTL, rctl); 2171 E1000_WRITE_FLUSH(); 2172 mdelay(5); 2173 2174 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) 2175 e1000_pci_set_mwi(hw); 2176 2177 if (netif_running(netdev)) { 2178 /* No need to loop, because 82542 supports only 1 queue */ 2179 struct e1000_rx_ring *ring = &adapter->rx_ring[0]; 2180 e1000_configure_rx(adapter); 2181 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); 2182 } 2183 } 2184 2185 /** 2186 * e1000_set_mac - Change the Ethernet Address of the NIC 2187 * @netdev: network interface device structure 2188 * @p: pointer to an address structure 2189 * 2190 * Returns 0 on success, negative on failure 2191 **/ 2192 static int e1000_set_mac(struct net_device *netdev, void *p) 2193 { 2194 struct e1000_adapter *adapter = netdev_priv(netdev); 2195 struct e1000_hw *hw = &adapter->hw; 2196 struct sockaddr *addr = p; 2197 2198 if (!is_valid_ether_addr(addr->sa_data)) 2199 return -EADDRNOTAVAIL; 2200 2201 /* 82542 2.0 needs to be in reset to write receive address registers */ 2202 2203 if (hw->mac_type == e1000_82542_rev2_0) 2204 e1000_enter_82542_rst(adapter); 2205 2206 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2207 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); 2208 2209 e1000_rar_set(hw, hw->mac_addr, 0); 2210 2211 if (hw->mac_type == e1000_82542_rev2_0) 2212 e1000_leave_82542_rst(adapter); 2213 2214 return 0; 2215 } 2216 2217 /** 2218 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 2219 * @netdev: network interface device structure 2220 * 2221 * The set_rx_mode entry point is called whenever the unicast or multicast 2222 * address lists or the network interface flags are updated. This routine is 2223 * responsible for configuring the hardware for proper unicast, multicast, 2224 * promiscuous mode, and all-multi behavior. 2225 **/ 2226 static void e1000_set_rx_mode(struct net_device *netdev) 2227 { 2228 struct e1000_adapter *adapter = netdev_priv(netdev); 2229 struct e1000_hw *hw = &adapter->hw; 2230 struct netdev_hw_addr *ha; 2231 bool use_uc = false; 2232 u32 rctl; 2233 u32 hash_value; 2234 int i, rar_entries = E1000_RAR_ENTRIES; 2235 int mta_reg_count = E1000_NUM_MTA_REGISTERS; 2236 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); 2237 2238 if (!mcarray) 2239 return; 2240 2241 /* Check for Promiscuous and All Multicast modes */ 2242 2243 rctl = er32(RCTL); 2244 2245 if (netdev->flags & IFF_PROMISC) { 2246 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2247 rctl &= ~E1000_RCTL_VFE; 2248 } else { 2249 if (netdev->flags & IFF_ALLMULTI) 2250 rctl |= E1000_RCTL_MPE; 2251 else 2252 rctl &= ~E1000_RCTL_MPE; 2253 /* Enable VLAN filter if there is a VLAN */ 2254 if (e1000_vlan_used(adapter)) 2255 rctl |= E1000_RCTL_VFE; 2256 } 2257 2258 if (netdev_uc_count(netdev) > rar_entries - 1) { 2259 rctl |= E1000_RCTL_UPE; 2260 } else if (!(netdev->flags & IFF_PROMISC)) { 2261 rctl &= ~E1000_RCTL_UPE; 2262 use_uc = true; 2263 } 2264 2265 ew32(RCTL, rctl); 2266 2267 /* 82542 2.0 needs to be in reset to write receive address registers */ 2268 2269 if (hw->mac_type == e1000_82542_rev2_0) 2270 e1000_enter_82542_rst(adapter); 2271 2272 /* load the first 14 addresses into the exact filters 1-14. Unicast 2273 * addresses take precedence to avoid disabling unicast filtering 2274 * when possible. 2275 * 2276 * RAR 0 is used for the station MAC address 2277 * if there are not 14 addresses, go ahead and clear the filters 2278 */ 2279 i = 1; 2280 if (use_uc) 2281 netdev_for_each_uc_addr(ha, netdev) { 2282 if (i == rar_entries) 2283 break; 2284 e1000_rar_set(hw, ha->addr, i++); 2285 } 2286 2287 netdev_for_each_mc_addr(ha, netdev) { 2288 if (i == rar_entries) { 2289 /* load any remaining addresses into the hash table */ 2290 u32 hash_reg, hash_bit, mta; 2291 hash_value = e1000_hash_mc_addr(hw, ha->addr); 2292 hash_reg = (hash_value >> 5) & 0x7F; 2293 hash_bit = hash_value & 0x1F; 2294 mta = (1 << hash_bit); 2295 mcarray[hash_reg] |= mta; 2296 } else { 2297 e1000_rar_set(hw, ha->addr, i++); 2298 } 2299 } 2300 2301 for (; i < rar_entries; i++) { 2302 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); 2303 E1000_WRITE_FLUSH(); 2304 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); 2305 E1000_WRITE_FLUSH(); 2306 } 2307 2308 /* write the hash table completely, write from bottom to avoid 2309 * both stupid write combining chipsets, and flushing each write 2310 */ 2311 for (i = mta_reg_count - 1; i >= 0 ; i--) { 2312 /* If we are on an 82544 has an errata where writing odd 2313 * offsets overwrites the previous even offset, but writing 2314 * backwards over the range solves the issue by always 2315 * writing the odd offset first 2316 */ 2317 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); 2318 } 2319 E1000_WRITE_FLUSH(); 2320 2321 if (hw->mac_type == e1000_82542_rev2_0) 2322 e1000_leave_82542_rst(adapter); 2323 2324 kfree(mcarray); 2325 } 2326 2327 /** 2328 * e1000_update_phy_info_task - get phy info 2329 * @work: work struct contained inside adapter struct 2330 * 2331 * Need to wait a few seconds after link up to get diagnostic information from 2332 * the phy 2333 */ 2334 static void e1000_update_phy_info_task(struct work_struct *work) 2335 { 2336 struct e1000_adapter *adapter = container_of(work, 2337 struct e1000_adapter, 2338 phy_info_task.work); 2339 2340 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 2341 } 2342 2343 /** 2344 * e1000_82547_tx_fifo_stall_task - task to complete work 2345 * @work: work struct contained inside adapter struct 2346 **/ 2347 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) 2348 { 2349 struct e1000_adapter *adapter = container_of(work, 2350 struct e1000_adapter, 2351 fifo_stall_task.work); 2352 struct e1000_hw *hw = &adapter->hw; 2353 struct net_device *netdev = adapter->netdev; 2354 u32 tctl; 2355 2356 if (atomic_read(&adapter->tx_fifo_stall)) { 2357 if ((er32(TDT) == er32(TDH)) && 2358 (er32(TDFT) == er32(TDFH)) && 2359 (er32(TDFTS) == er32(TDFHS))) { 2360 tctl = er32(TCTL); 2361 ew32(TCTL, tctl & ~E1000_TCTL_EN); 2362 ew32(TDFT, adapter->tx_head_addr); 2363 ew32(TDFH, adapter->tx_head_addr); 2364 ew32(TDFTS, adapter->tx_head_addr); 2365 ew32(TDFHS, adapter->tx_head_addr); 2366 ew32(TCTL, tctl); 2367 E1000_WRITE_FLUSH(); 2368 2369 adapter->tx_fifo_head = 0; 2370 atomic_set(&adapter->tx_fifo_stall, 0); 2371 netif_wake_queue(netdev); 2372 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { 2373 schedule_delayed_work(&adapter->fifo_stall_task, 1); 2374 } 2375 } 2376 } 2377 2378 bool e1000_has_link(struct e1000_adapter *adapter) 2379 { 2380 struct e1000_hw *hw = &adapter->hw; 2381 bool link_active = false; 2382 2383 /* get_link_status is set on LSC (link status) interrupt or rx 2384 * sequence error interrupt (except on intel ce4100). 2385 * get_link_status will stay false until the 2386 * e1000_check_for_link establishes link for copper adapters 2387 * ONLY 2388 */ 2389 switch (hw->media_type) { 2390 case e1000_media_type_copper: 2391 if (hw->mac_type == e1000_ce4100) 2392 hw->get_link_status = 1; 2393 if (hw->get_link_status) { 2394 e1000_check_for_link(hw); 2395 link_active = !hw->get_link_status; 2396 } else { 2397 link_active = true; 2398 } 2399 break; 2400 case e1000_media_type_fiber: 2401 e1000_check_for_link(hw); 2402 link_active = !!(er32(STATUS) & E1000_STATUS_LU); 2403 break; 2404 case e1000_media_type_internal_serdes: 2405 e1000_check_for_link(hw); 2406 link_active = hw->serdes_has_link; 2407 break; 2408 default: 2409 break; 2410 } 2411 2412 return link_active; 2413 } 2414 2415 /** 2416 * e1000_watchdog - work function 2417 * @work: work struct contained inside adapter struct 2418 **/ 2419 static void e1000_watchdog(struct work_struct *work) 2420 { 2421 struct e1000_adapter *adapter = container_of(work, 2422 struct e1000_adapter, 2423 watchdog_task.work); 2424 struct e1000_hw *hw = &adapter->hw; 2425 struct net_device *netdev = adapter->netdev; 2426 struct e1000_tx_ring *txdr = adapter->tx_ring; 2427 u32 link, tctl; 2428 2429 link = e1000_has_link(adapter); 2430 if ((netif_carrier_ok(netdev)) && link) 2431 goto link_up; 2432 2433 if (link) { 2434 if (!netif_carrier_ok(netdev)) { 2435 u32 ctrl; 2436 /* update snapshot of PHY registers on LSC */ 2437 e1000_get_speed_and_duplex(hw, 2438 &adapter->link_speed, 2439 &adapter->link_duplex); 2440 2441 ctrl = er32(CTRL); 2442 pr_info("%s NIC Link is Up %d Mbps %s, " 2443 "Flow Control: %s\n", 2444 netdev->name, 2445 adapter->link_speed, 2446 adapter->link_duplex == FULL_DUPLEX ? 2447 "Full Duplex" : "Half Duplex", 2448 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2449 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2450 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2451 E1000_CTRL_TFCE) ? "TX" : "None"))); 2452 2453 /* adjust timeout factor according to speed/duplex */ 2454 adapter->tx_timeout_factor = 1; 2455 switch (adapter->link_speed) { 2456 case SPEED_10: 2457 adapter->tx_timeout_factor = 16; 2458 break; 2459 case SPEED_100: 2460 /* maybe add some timeout factor ? */ 2461 break; 2462 } 2463 2464 /* enable transmits in the hardware */ 2465 tctl = er32(TCTL); 2466 tctl |= E1000_TCTL_EN; 2467 ew32(TCTL, tctl); 2468 2469 netif_carrier_on(netdev); 2470 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2471 schedule_delayed_work(&adapter->phy_info_task, 2472 2 * HZ); 2473 adapter->smartspeed = 0; 2474 } 2475 } else { 2476 if (netif_carrier_ok(netdev)) { 2477 adapter->link_speed = 0; 2478 adapter->link_duplex = 0; 2479 pr_info("%s NIC Link is Down\n", 2480 netdev->name); 2481 netif_carrier_off(netdev); 2482 2483 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2484 schedule_delayed_work(&adapter->phy_info_task, 2485 2 * HZ); 2486 } 2487 2488 e1000_smartspeed(adapter); 2489 } 2490 2491 link_up: 2492 e1000_update_stats(adapter); 2493 2494 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 2495 adapter->tpt_old = adapter->stats.tpt; 2496 hw->collision_delta = adapter->stats.colc - adapter->colc_old; 2497 adapter->colc_old = adapter->stats.colc; 2498 2499 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; 2500 adapter->gorcl_old = adapter->stats.gorcl; 2501 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; 2502 adapter->gotcl_old = adapter->stats.gotcl; 2503 2504 e1000_update_adaptive(hw); 2505 2506 if (!netif_carrier_ok(netdev)) { 2507 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2508 /* We've lost link, so the controller stops DMA, 2509 * but we've got queued Tx work that's never going 2510 * to get done, so reset controller to flush Tx. 2511 * (Do the reset outside of interrupt context). 2512 */ 2513 adapter->tx_timeout_count++; 2514 schedule_work(&adapter->reset_task); 2515 /* exit immediately since reset is imminent */ 2516 return; 2517 } 2518 } 2519 2520 /* Simple mode for Interrupt Throttle Rate (ITR) */ 2521 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { 2522 /* Symmetric Tx/Rx gets a reduced ITR=2000; 2523 * Total asymmetrical Tx or Rx gets ITR=8000; 2524 * everyone else is between 2000-8000. 2525 */ 2526 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; 2527 u32 dif = (adapter->gotcl > adapter->gorcl ? 2528 adapter->gotcl - adapter->gorcl : 2529 adapter->gorcl - adapter->gotcl) / 10000; 2530 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 2531 2532 ew32(ITR, 1000000000 / (itr * 256)); 2533 } 2534 2535 /* Cause software interrupt to ensure rx ring is cleaned */ 2536 ew32(ICS, E1000_ICS_RXDMT0); 2537 2538 /* Force detection of hung controller every watchdog period */ 2539 adapter->detect_tx_hung = true; 2540 2541 /* Reschedule the task */ 2542 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2543 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); 2544 } 2545 2546 enum latency_range { 2547 lowest_latency = 0, 2548 low_latency = 1, 2549 bulk_latency = 2, 2550 latency_invalid = 255 2551 }; 2552 2553 /** 2554 * e1000_update_itr - update the dynamic ITR value based on statistics 2555 * @adapter: pointer to adapter 2556 * @itr_setting: current adapter->itr 2557 * @packets: the number of packets during this measurement interval 2558 * @bytes: the number of bytes during this measurement interval 2559 * 2560 * Stores a new ITR value based on packets and byte 2561 * counts during the last interrupt. The advantage of per interrupt 2562 * computation is faster updates and more accurate ITR for the current 2563 * traffic pattern. Constants in this function were computed 2564 * based on theoretical maximum wire speed and thresholds were set based 2565 * on testing data as well as attempting to minimize response time 2566 * while increasing bulk throughput. 2567 * this functionality is controlled by the InterruptThrottleRate module 2568 * parameter (see e1000_param.c) 2569 **/ 2570 static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2571 u16 itr_setting, int packets, int bytes) 2572 { 2573 unsigned int retval = itr_setting; 2574 struct e1000_hw *hw = &adapter->hw; 2575 2576 if (unlikely(hw->mac_type < e1000_82540)) 2577 goto update_itr_done; 2578 2579 if (packets == 0) 2580 goto update_itr_done; 2581 2582 switch (itr_setting) { 2583 case lowest_latency: 2584 /* jumbo frames get bulk treatment*/ 2585 if (bytes/packets > 8000) 2586 retval = bulk_latency; 2587 else if ((packets < 5) && (bytes > 512)) 2588 retval = low_latency; 2589 break; 2590 case low_latency: /* 50 usec aka 20000 ints/s */ 2591 if (bytes > 10000) { 2592 /* jumbo frames need bulk latency setting */ 2593 if (bytes/packets > 8000) 2594 retval = bulk_latency; 2595 else if ((packets < 10) || ((bytes/packets) > 1200)) 2596 retval = bulk_latency; 2597 else if ((packets > 35)) 2598 retval = lowest_latency; 2599 } else if (bytes/packets > 2000) 2600 retval = bulk_latency; 2601 else if (packets <= 2 && bytes < 512) 2602 retval = lowest_latency; 2603 break; 2604 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2605 if (bytes > 25000) { 2606 if (packets > 35) 2607 retval = low_latency; 2608 } else if (bytes < 6000) { 2609 retval = low_latency; 2610 } 2611 break; 2612 } 2613 2614 update_itr_done: 2615 return retval; 2616 } 2617 2618 static void e1000_set_itr(struct e1000_adapter *adapter) 2619 { 2620 struct e1000_hw *hw = &adapter->hw; 2621 u16 current_itr; 2622 u32 new_itr = adapter->itr; 2623 2624 if (unlikely(hw->mac_type < e1000_82540)) 2625 return; 2626 2627 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2628 if (unlikely(adapter->link_speed != SPEED_1000)) { 2629 current_itr = 0; 2630 new_itr = 4000; 2631 goto set_itr_now; 2632 } 2633 2634 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, 2635 adapter->total_tx_packets, 2636 adapter->total_tx_bytes); 2637 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2638 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 2639 adapter->tx_itr = low_latency; 2640 2641 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, 2642 adapter->total_rx_packets, 2643 adapter->total_rx_bytes); 2644 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2645 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 2646 adapter->rx_itr = low_latency; 2647 2648 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2649 2650 switch (current_itr) { 2651 /* counts and packets in update_itr are dependent on these numbers */ 2652 case lowest_latency: 2653 new_itr = 70000; 2654 break; 2655 case low_latency: 2656 new_itr = 20000; /* aka hwitr = ~200 */ 2657 break; 2658 case bulk_latency: 2659 new_itr = 4000; 2660 break; 2661 default: 2662 break; 2663 } 2664 2665 set_itr_now: 2666 if (new_itr != adapter->itr) { 2667 /* this attempts to bias the interrupt rate towards Bulk 2668 * by adding intermediate steps when interrupt rate is 2669 * increasing 2670 */ 2671 new_itr = new_itr > adapter->itr ? 2672 min(adapter->itr + (new_itr >> 2), new_itr) : 2673 new_itr; 2674 adapter->itr = new_itr; 2675 ew32(ITR, 1000000000 / (new_itr * 256)); 2676 } 2677 } 2678 2679 #define E1000_TX_FLAGS_CSUM 0x00000001 2680 #define E1000_TX_FLAGS_VLAN 0x00000002 2681 #define E1000_TX_FLAGS_TSO 0x00000004 2682 #define E1000_TX_FLAGS_IPV4 0x00000008 2683 #define E1000_TX_FLAGS_NO_FCS 0x00000010 2684 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 2685 #define E1000_TX_FLAGS_VLAN_SHIFT 16 2686 2687 static int e1000_tso(struct e1000_adapter *adapter, 2688 struct e1000_tx_ring *tx_ring, struct sk_buff *skb, 2689 __be16 protocol) 2690 { 2691 struct e1000_context_desc *context_desc; 2692 struct e1000_tx_buffer *buffer_info; 2693 unsigned int i; 2694 u32 cmd_length = 0; 2695 u16 ipcse = 0, tucse, mss; 2696 u8 ipcss, ipcso, tucss, tucso, hdr_len; 2697 2698 if (skb_is_gso(skb)) { 2699 int err; 2700 2701 err = skb_cow_head(skb, 0); 2702 if (err < 0) 2703 return err; 2704 2705 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2706 mss = skb_shinfo(skb)->gso_size; 2707 if (protocol == htons(ETH_P_IP)) { 2708 struct iphdr *iph = ip_hdr(skb); 2709 iph->tot_len = 0; 2710 iph->check = 0; 2711 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2712 iph->daddr, 0, 2713 IPPROTO_TCP, 2714 0); 2715 cmd_length = E1000_TXD_CMD_IP; 2716 ipcse = skb_transport_offset(skb) - 1; 2717 } else if (skb_is_gso_v6(skb)) { 2718 tcp_v6_gso_csum_prep(skb); 2719 ipcse = 0; 2720 } 2721 ipcss = skb_network_offset(skb); 2722 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 2723 tucss = skb_transport_offset(skb); 2724 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 2725 tucse = 0; 2726 2727 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 2728 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 2729 2730 i = tx_ring->next_to_use; 2731 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2732 buffer_info = &tx_ring->buffer_info[i]; 2733 2734 context_desc->lower_setup.ip_fields.ipcss = ipcss; 2735 context_desc->lower_setup.ip_fields.ipcso = ipcso; 2736 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 2737 context_desc->upper_setup.tcp_fields.tucss = tucss; 2738 context_desc->upper_setup.tcp_fields.tucso = tucso; 2739 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); 2740 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 2741 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 2742 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2743 2744 buffer_info->time_stamp = jiffies; 2745 buffer_info->next_to_watch = i; 2746 2747 if (++i == tx_ring->count) 2748 i = 0; 2749 2750 tx_ring->next_to_use = i; 2751 2752 return true; 2753 } 2754 return false; 2755 } 2756 2757 static bool e1000_tx_csum(struct e1000_adapter *adapter, 2758 struct e1000_tx_ring *tx_ring, struct sk_buff *skb, 2759 __be16 protocol) 2760 { 2761 struct e1000_context_desc *context_desc; 2762 struct e1000_tx_buffer *buffer_info; 2763 unsigned int i; 2764 u8 css; 2765 u32 cmd_len = E1000_TXD_CMD_DEXT; 2766 2767 if (skb->ip_summed != CHECKSUM_PARTIAL) 2768 return false; 2769 2770 switch (protocol) { 2771 case cpu_to_be16(ETH_P_IP): 2772 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2773 cmd_len |= E1000_TXD_CMD_TCP; 2774 break; 2775 case cpu_to_be16(ETH_P_IPV6): 2776 /* XXX not handling all IPV6 headers */ 2777 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2778 cmd_len |= E1000_TXD_CMD_TCP; 2779 break; 2780 default: 2781 if (unlikely(net_ratelimit())) 2782 e_warn(drv, "checksum_partial proto=%x!\n", 2783 skb->protocol); 2784 break; 2785 } 2786 2787 css = skb_checksum_start_offset(skb); 2788 2789 i = tx_ring->next_to_use; 2790 buffer_info = &tx_ring->buffer_info[i]; 2791 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2792 2793 context_desc->lower_setup.ip_config = 0; 2794 context_desc->upper_setup.tcp_fields.tucss = css; 2795 context_desc->upper_setup.tcp_fields.tucso = 2796 css + skb->csum_offset; 2797 context_desc->upper_setup.tcp_fields.tucse = 0; 2798 context_desc->tcp_seg_setup.data = 0; 2799 context_desc->cmd_and_length = cpu_to_le32(cmd_len); 2800 2801 buffer_info->time_stamp = jiffies; 2802 buffer_info->next_to_watch = i; 2803 2804 if (unlikely(++i == tx_ring->count)) 2805 i = 0; 2806 2807 tx_ring->next_to_use = i; 2808 2809 return true; 2810 } 2811 2812 #define E1000_MAX_TXD_PWR 12 2813 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) 2814 2815 static int e1000_tx_map(struct e1000_adapter *adapter, 2816 struct e1000_tx_ring *tx_ring, 2817 struct sk_buff *skb, unsigned int first, 2818 unsigned int max_per_txd, unsigned int nr_frags, 2819 unsigned int mss) 2820 { 2821 struct e1000_hw *hw = &adapter->hw; 2822 struct pci_dev *pdev = adapter->pdev; 2823 struct e1000_tx_buffer *buffer_info; 2824 unsigned int len = skb_headlen(skb); 2825 unsigned int offset = 0, size, count = 0, i; 2826 unsigned int f, bytecount, segs; 2827 2828 i = tx_ring->next_to_use; 2829 2830 while (len) { 2831 buffer_info = &tx_ring->buffer_info[i]; 2832 size = min(len, max_per_txd); 2833 /* Workaround for Controller erratum -- 2834 * descriptor for non-tso packet in a linear SKB that follows a 2835 * tso gets written back prematurely before the data is fully 2836 * DMA'd to the controller 2837 */ 2838 if (!skb->data_len && tx_ring->last_tx_tso && 2839 !skb_is_gso(skb)) { 2840 tx_ring->last_tx_tso = false; 2841 size -= 4; 2842 } 2843 2844 /* Workaround for premature desc write-backs 2845 * in TSO mode. Append 4-byte sentinel desc 2846 */ 2847 if (unlikely(mss && !nr_frags && size == len && size > 8)) 2848 size -= 4; 2849 /* work-around for errata 10 and it applies 2850 * to all controllers in PCI-X mode 2851 * The fix is to make sure that the first descriptor of a 2852 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes 2853 */ 2854 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 2855 (size > 2015) && count == 0)) 2856 size = 2015; 2857 2858 /* Workaround for potential 82544 hang in PCI-X. Avoid 2859 * terminating buffers within evenly-aligned dwords. 2860 */ 2861 if (unlikely(adapter->pcix_82544 && 2862 !((unsigned long)(skb->data + offset + size - 1) & 4) && 2863 size > 4)) 2864 size -= 4; 2865 2866 buffer_info->length = size; 2867 /* set time_stamp *before* dma to help avoid a possible race */ 2868 buffer_info->time_stamp = jiffies; 2869 buffer_info->mapped_as_page = false; 2870 buffer_info->dma = dma_map_single(&pdev->dev, 2871 skb->data + offset, 2872 size, DMA_TO_DEVICE); 2873 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2874 goto dma_error; 2875 buffer_info->next_to_watch = i; 2876 2877 len -= size; 2878 offset += size; 2879 count++; 2880 if (len) { 2881 i++; 2882 if (unlikely(i == tx_ring->count)) 2883 i = 0; 2884 } 2885 } 2886 2887 for (f = 0; f < nr_frags; f++) { 2888 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 2889 2890 len = skb_frag_size(frag); 2891 offset = 0; 2892 2893 while (len) { 2894 unsigned long bufend; 2895 i++; 2896 if (unlikely(i == tx_ring->count)) 2897 i = 0; 2898 2899 buffer_info = &tx_ring->buffer_info[i]; 2900 size = min(len, max_per_txd); 2901 /* Workaround for premature desc write-backs 2902 * in TSO mode. Append 4-byte sentinel desc 2903 */ 2904 if (unlikely(mss && f == (nr_frags-1) && 2905 size == len && size > 8)) 2906 size -= 4; 2907 /* Workaround for potential 82544 hang in PCI-X. 2908 * Avoid terminating buffers within evenly-aligned 2909 * dwords. 2910 */ 2911 bufend = (unsigned long) 2912 page_to_phys(skb_frag_page(frag)); 2913 bufend += offset + size - 1; 2914 if (unlikely(adapter->pcix_82544 && 2915 !(bufend & 4) && 2916 size > 4)) 2917 size -= 4; 2918 2919 buffer_info->length = size; 2920 buffer_info->time_stamp = jiffies; 2921 buffer_info->mapped_as_page = true; 2922 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 2923 offset, size, DMA_TO_DEVICE); 2924 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2925 goto dma_error; 2926 buffer_info->next_to_watch = i; 2927 2928 len -= size; 2929 offset += size; 2930 count++; 2931 } 2932 } 2933 2934 segs = skb_shinfo(skb)->gso_segs ?: 1; 2935 /* multiply data chunks by size of headers */ 2936 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; 2937 2938 tx_ring->buffer_info[i].skb = skb; 2939 tx_ring->buffer_info[i].segs = segs; 2940 tx_ring->buffer_info[i].bytecount = bytecount; 2941 tx_ring->buffer_info[first].next_to_watch = i; 2942 2943 return count; 2944 2945 dma_error: 2946 dev_err(&pdev->dev, "TX DMA map failed\n"); 2947 buffer_info->dma = 0; 2948 if (count) 2949 count--; 2950 2951 while (count--) { 2952 if (i == 0) 2953 i += tx_ring->count; 2954 i--; 2955 buffer_info = &tx_ring->buffer_info[i]; 2956 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 2957 } 2958 2959 return 0; 2960 } 2961 2962 static void e1000_tx_queue(struct e1000_adapter *adapter, 2963 struct e1000_tx_ring *tx_ring, int tx_flags, 2964 int count) 2965 { 2966 struct e1000_tx_desc *tx_desc = NULL; 2967 struct e1000_tx_buffer *buffer_info; 2968 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2969 unsigned int i; 2970 2971 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2972 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2973 E1000_TXD_CMD_TSE; 2974 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2975 2976 if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) 2977 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2978 } 2979 2980 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 2981 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2982 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2983 } 2984 2985 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { 2986 txd_lower |= E1000_TXD_CMD_VLE; 2987 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 2988 } 2989 2990 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 2991 txd_lower &= ~(E1000_TXD_CMD_IFCS); 2992 2993 i = tx_ring->next_to_use; 2994 2995 while (count--) { 2996 buffer_info = &tx_ring->buffer_info[i]; 2997 tx_desc = E1000_TX_DESC(*tx_ring, i); 2998 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 2999 tx_desc->lower.data = 3000 cpu_to_le32(txd_lower | buffer_info->length); 3001 tx_desc->upper.data = cpu_to_le32(txd_upper); 3002 if (unlikely(++i == tx_ring->count)) 3003 i = 0; 3004 } 3005 3006 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 3007 3008 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ 3009 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 3010 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); 3011 3012 /* Force memory writes to complete before letting h/w 3013 * know there are new descriptors to fetch. (Only 3014 * applicable for weak-ordered memory model archs, 3015 * such as IA-64). 3016 */ 3017 dma_wmb(); 3018 3019 tx_ring->next_to_use = i; 3020 } 3021 3022 /* 82547 workaround to avoid controller hang in half-duplex environment. 3023 * The workaround is to avoid queuing a large packet that would span 3024 * the internal Tx FIFO ring boundary by notifying the stack to resend 3025 * the packet at a later time. This gives the Tx FIFO an opportunity to 3026 * flush all packets. When that occurs, we reset the Tx FIFO pointers 3027 * to the beginning of the Tx FIFO. 3028 */ 3029 3030 #define E1000_FIFO_HDR 0x10 3031 #define E1000_82547_PAD_LEN 0x3E0 3032 3033 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 3034 struct sk_buff *skb) 3035 { 3036 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 3037 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; 3038 3039 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); 3040 3041 if (adapter->link_duplex != HALF_DUPLEX) 3042 goto no_fifo_stall_required; 3043 3044 if (atomic_read(&adapter->tx_fifo_stall)) 3045 return 1; 3046 3047 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { 3048 atomic_set(&adapter->tx_fifo_stall, 1); 3049 return 1; 3050 } 3051 3052 no_fifo_stall_required: 3053 adapter->tx_fifo_head += skb_fifo_len; 3054 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) 3055 adapter->tx_fifo_head -= adapter->tx_fifo_size; 3056 return 0; 3057 } 3058 3059 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) 3060 { 3061 struct e1000_adapter *adapter = netdev_priv(netdev); 3062 struct e1000_tx_ring *tx_ring = adapter->tx_ring; 3063 3064 netif_stop_queue(netdev); 3065 /* Herbert's original patch had: 3066 * smp_mb__after_netif_stop_queue(); 3067 * but since that doesn't exist yet, just open code it. 3068 */ 3069 smp_mb(); 3070 3071 /* We need to check again in a case another CPU has just 3072 * made room available. 3073 */ 3074 if (likely(E1000_DESC_UNUSED(tx_ring) < size)) 3075 return -EBUSY; 3076 3077 /* A reprieve! */ 3078 netif_start_queue(netdev); 3079 ++adapter->restart_queue; 3080 return 0; 3081 } 3082 3083 static int e1000_maybe_stop_tx(struct net_device *netdev, 3084 struct e1000_tx_ring *tx_ring, int size) 3085 { 3086 if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) 3087 return 0; 3088 return __e1000_maybe_stop_tx(netdev, size); 3089 } 3090 3091 #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X)) 3092 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 3093 struct net_device *netdev) 3094 { 3095 struct e1000_adapter *adapter = netdev_priv(netdev); 3096 struct e1000_hw *hw = &adapter->hw; 3097 struct e1000_tx_ring *tx_ring; 3098 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; 3099 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 3100 unsigned int tx_flags = 0; 3101 unsigned int len = skb_headlen(skb); 3102 unsigned int nr_frags; 3103 unsigned int mss; 3104 int count = 0; 3105 int tso; 3106 unsigned int f; 3107 __be16 protocol = vlan_get_protocol(skb); 3108 3109 /* This goes back to the question of how to logically map a Tx queue 3110 * to a flow. Right now, performance is impacted slightly negatively 3111 * if using multiple Tx queues. If the stack breaks away from a 3112 * single qdisc implementation, we can look at this again. 3113 */ 3114 tx_ring = adapter->tx_ring; 3115 3116 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, 3117 * packets may get corrupted during padding by HW. 3118 * To WA this issue, pad all small packets manually. 3119 */ 3120 if (eth_skb_pad(skb)) 3121 return NETDEV_TX_OK; 3122 3123 mss = skb_shinfo(skb)->gso_size; 3124 /* The controller does a simple calculation to 3125 * make sure there is enough room in the FIFO before 3126 * initiating the DMA for each buffer. The calc is: 3127 * 4 = ceil(buffer len/mss). To make sure we don't 3128 * overrun the FIFO, adjust the max buffer len if mss 3129 * drops. 3130 */ 3131 if (mss) { 3132 u8 hdr_len; 3133 max_per_txd = min(mss << 2, max_per_txd); 3134 max_txd_pwr = fls(max_per_txd) - 1; 3135 3136 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3137 if (skb->data_len && hdr_len == len) { 3138 switch (hw->mac_type) { 3139 unsigned int pull_size; 3140 case e1000_82544: 3141 /* Make sure we have room to chop off 4 bytes, 3142 * and that the end alignment will work out to 3143 * this hardware's requirements 3144 * NOTE: this is a TSO only workaround 3145 * if end byte alignment not correct move us 3146 * into the next dword 3147 */ 3148 if ((unsigned long)(skb_tail_pointer(skb) - 1) 3149 & 4) 3150 break; 3151 /* fall through */ 3152 pull_size = min((unsigned int)4, skb->data_len); 3153 if (!__pskb_pull_tail(skb, pull_size)) { 3154 e_err(drv, "__pskb_pull_tail " 3155 "failed.\n"); 3156 dev_kfree_skb_any(skb); 3157 return NETDEV_TX_OK; 3158 } 3159 len = skb_headlen(skb); 3160 break; 3161 default: 3162 /* do nothing */ 3163 break; 3164 } 3165 } 3166 } 3167 3168 /* reserve a descriptor for the offload context */ 3169 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 3170 count++; 3171 count++; 3172 3173 /* Controller Erratum workaround */ 3174 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) 3175 count++; 3176 3177 count += TXD_USE_COUNT(len, max_txd_pwr); 3178 3179 if (adapter->pcix_82544) 3180 count++; 3181 3182 /* work-around for errata 10 and it applies to all controllers 3183 * in PCI-X mode, so add one more descriptor to the count 3184 */ 3185 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 3186 (len > 2015))) 3187 count++; 3188 3189 nr_frags = skb_shinfo(skb)->nr_frags; 3190 for (f = 0; f < nr_frags; f++) 3191 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), 3192 max_txd_pwr); 3193 if (adapter->pcix_82544) 3194 count += nr_frags; 3195 3196 /* need: count + 2 desc gap to keep tail from touching 3197 * head, otherwise try next time 3198 */ 3199 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) 3200 return NETDEV_TX_BUSY; 3201 3202 if (unlikely((hw->mac_type == e1000_82547) && 3203 (e1000_82547_fifo_workaround(adapter, skb)))) { 3204 netif_stop_queue(netdev); 3205 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3206 schedule_delayed_work(&adapter->fifo_stall_task, 1); 3207 return NETDEV_TX_BUSY; 3208 } 3209 3210 if (skb_vlan_tag_present(skb)) { 3211 tx_flags |= E1000_TX_FLAGS_VLAN; 3212 tx_flags |= (skb_vlan_tag_get(skb) << 3213 E1000_TX_FLAGS_VLAN_SHIFT); 3214 } 3215 3216 first = tx_ring->next_to_use; 3217 3218 tso = e1000_tso(adapter, tx_ring, skb, protocol); 3219 if (tso < 0) { 3220 dev_kfree_skb_any(skb); 3221 return NETDEV_TX_OK; 3222 } 3223 3224 if (likely(tso)) { 3225 if (likely(hw->mac_type != e1000_82544)) 3226 tx_ring->last_tx_tso = true; 3227 tx_flags |= E1000_TX_FLAGS_TSO; 3228 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) 3229 tx_flags |= E1000_TX_FLAGS_CSUM; 3230 3231 if (protocol == htons(ETH_P_IP)) 3232 tx_flags |= E1000_TX_FLAGS_IPV4; 3233 3234 if (unlikely(skb->no_fcs)) 3235 tx_flags |= E1000_TX_FLAGS_NO_FCS; 3236 3237 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, 3238 nr_frags, mss); 3239 3240 if (count) { 3241 /* The descriptors needed is higher than other Intel drivers 3242 * due to a number of workarounds. The breakdown is below: 3243 * Data descriptors: MAX_SKB_FRAGS + 1 3244 * Context Descriptor: 1 3245 * Keep head from touching tail: 2 3246 * Workarounds: 3 3247 */ 3248 int desc_needed = MAX_SKB_FRAGS + 7; 3249 3250 netdev_sent_queue(netdev, skb->len); 3251 skb_tx_timestamp(skb); 3252 3253 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3254 3255 /* 82544 potentially requires twice as many data descriptors 3256 * in order to guarantee buffers don't end on evenly-aligned 3257 * dwords 3258 */ 3259 if (adapter->pcix_82544) 3260 desc_needed += MAX_SKB_FRAGS + 1; 3261 3262 /* Make sure there is space in the ring for the next send. */ 3263 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); 3264 3265 if (!netdev_xmit_more() || 3266 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { 3267 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); 3268 } 3269 } else { 3270 dev_kfree_skb_any(skb); 3271 tx_ring->buffer_info[first].time_stamp = 0; 3272 tx_ring->next_to_use = first; 3273 } 3274 3275 return NETDEV_TX_OK; 3276 } 3277 3278 #define NUM_REGS 38 /* 1 based count */ 3279 static void e1000_regdump(struct e1000_adapter *adapter) 3280 { 3281 struct e1000_hw *hw = &adapter->hw; 3282 u32 regs[NUM_REGS]; 3283 u32 *regs_buff = regs; 3284 int i = 0; 3285 3286 static const char * const reg_name[] = { 3287 "CTRL", "STATUS", 3288 "RCTL", "RDLEN", "RDH", "RDT", "RDTR", 3289 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT", 3290 "TIDV", "TXDCTL", "TADV", "TARC0", 3291 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1", 3292 "TXDCTL1", "TARC1", 3293 "CTRL_EXT", "ERT", "RDBAL", "RDBAH", 3294 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC", 3295 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC" 3296 }; 3297 3298 regs_buff[0] = er32(CTRL); 3299 regs_buff[1] = er32(STATUS); 3300 3301 regs_buff[2] = er32(RCTL); 3302 regs_buff[3] = er32(RDLEN); 3303 regs_buff[4] = er32(RDH); 3304 regs_buff[5] = er32(RDT); 3305 regs_buff[6] = er32(RDTR); 3306 3307 regs_buff[7] = er32(TCTL); 3308 regs_buff[8] = er32(TDBAL); 3309 regs_buff[9] = er32(TDBAH); 3310 regs_buff[10] = er32(TDLEN); 3311 regs_buff[11] = er32(TDH); 3312 regs_buff[12] = er32(TDT); 3313 regs_buff[13] = er32(TIDV); 3314 regs_buff[14] = er32(TXDCTL); 3315 regs_buff[15] = er32(TADV); 3316 regs_buff[16] = er32(TARC0); 3317 3318 regs_buff[17] = er32(TDBAL1); 3319 regs_buff[18] = er32(TDBAH1); 3320 regs_buff[19] = er32(TDLEN1); 3321 regs_buff[20] = er32(TDH1); 3322 regs_buff[21] = er32(TDT1); 3323 regs_buff[22] = er32(TXDCTL1); 3324 regs_buff[23] = er32(TARC1); 3325 regs_buff[24] = er32(CTRL_EXT); 3326 regs_buff[25] = er32(ERT); 3327 regs_buff[26] = er32(RDBAL0); 3328 regs_buff[27] = er32(RDBAH0); 3329 regs_buff[28] = er32(TDFH); 3330 regs_buff[29] = er32(TDFT); 3331 regs_buff[30] = er32(TDFHS); 3332 regs_buff[31] = er32(TDFTS); 3333 regs_buff[32] = er32(TDFPC); 3334 regs_buff[33] = er32(RDFH); 3335 regs_buff[34] = er32(RDFT); 3336 regs_buff[35] = er32(RDFHS); 3337 regs_buff[36] = er32(RDFTS); 3338 regs_buff[37] = er32(RDFPC); 3339 3340 pr_info("Register dump\n"); 3341 for (i = 0; i < NUM_REGS; i++) 3342 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]); 3343 } 3344 3345 /* 3346 * e1000_dump: Print registers, tx ring and rx ring 3347 */ 3348 static void e1000_dump(struct e1000_adapter *adapter) 3349 { 3350 /* this code doesn't handle multiple rings */ 3351 struct e1000_tx_ring *tx_ring = adapter->tx_ring; 3352 struct e1000_rx_ring *rx_ring = adapter->rx_ring; 3353 int i; 3354 3355 if (!netif_msg_hw(adapter)) 3356 return; 3357 3358 /* Print Registers */ 3359 e1000_regdump(adapter); 3360 3361 /* transmit dump */ 3362 pr_info("TX Desc ring0 dump\n"); 3363 3364 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) 3365 * 3366 * Legacy Transmit Descriptor 3367 * +--------------------------------------------------------------+ 3368 * 0 | Buffer Address [63:0] (Reserved on Write Back) | 3369 * +--------------------------------------------------------------+ 3370 * 8 | Special | CSS | Status | CMD | CSO | Length | 3371 * +--------------------------------------------------------------+ 3372 * 63 48 47 36 35 32 31 24 23 16 15 0 3373 * 3374 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload 3375 * 63 48 47 40 39 32 31 16 15 8 7 0 3376 * +----------------------------------------------------------------+ 3377 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | 3378 * +----------------------------------------------------------------+ 3379 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | 3380 * +----------------------------------------------------------------+ 3381 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 3382 * 3383 * Extended Data Descriptor (DTYP=0x1) 3384 * +----------------------------------------------------------------+ 3385 * 0 | Buffer Address [63:0] | 3386 * +----------------------------------------------------------------+ 3387 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | 3388 * +----------------------------------------------------------------+ 3389 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 3390 */ 3391 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n"); 3392 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n"); 3393 3394 if (!netif_msg_tx_done(adapter)) 3395 goto rx_ring_summary; 3396 3397 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 3398 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); 3399 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; 3400 struct my_u { __le64 a; __le64 b; }; 3401 struct my_u *u = (struct my_u *)tx_desc; 3402 const char *type; 3403 3404 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) 3405 type = "NTC/U"; 3406 else if (i == tx_ring->next_to_use) 3407 type = "NTU"; 3408 else if (i == tx_ring->next_to_clean) 3409 type = "NTC"; 3410 else 3411 type = ""; 3412 3413 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n", 3414 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, 3415 le64_to_cpu(u->a), le64_to_cpu(u->b), 3416 (u64)buffer_info->dma, buffer_info->length, 3417 buffer_info->next_to_watch, 3418 (u64)buffer_info->time_stamp, buffer_info->skb, type); 3419 } 3420 3421 rx_ring_summary: 3422 /* receive dump */ 3423 pr_info("\nRX Desc ring dump\n"); 3424 3425 /* Legacy Receive Descriptor Format 3426 * 3427 * +-----------------------------------------------------+ 3428 * | Buffer Address [63:0] | 3429 * +-----------------------------------------------------+ 3430 * | VLAN Tag | Errors | Status 0 | Packet csum | Length | 3431 * +-----------------------------------------------------+ 3432 * 63 48 47 40 39 32 31 16 15 0 3433 */ 3434 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n"); 3435 3436 if (!netif_msg_rx_status(adapter)) 3437 goto exit; 3438 3439 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { 3440 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); 3441 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; 3442 struct my_u { __le64 a; __le64 b; }; 3443 struct my_u *u = (struct my_u *)rx_desc; 3444 const char *type; 3445 3446 if (i == rx_ring->next_to_use) 3447 type = "NTU"; 3448 else if (i == rx_ring->next_to_clean) 3449 type = "NTC"; 3450 else 3451 type = ""; 3452 3453 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", 3454 i, le64_to_cpu(u->a), le64_to_cpu(u->b), 3455 (u64)buffer_info->dma, buffer_info->rxbuf.data, type); 3456 } /* for */ 3457 3458 /* dump the descriptor caches */ 3459 /* rx */ 3460 pr_info("Rx descriptor cache in 64bit format\n"); 3461 for (i = 0x6000; i <= 0x63FF ; i += 0x10) { 3462 pr_info("R%04X: %08X|%08X %08X|%08X\n", 3463 i, 3464 readl(adapter->hw.hw_addr + i+4), 3465 readl(adapter->hw.hw_addr + i), 3466 readl(adapter->hw.hw_addr + i+12), 3467 readl(adapter->hw.hw_addr + i+8)); 3468 } 3469 /* tx */ 3470 pr_info("Tx descriptor cache in 64bit format\n"); 3471 for (i = 0x7000; i <= 0x73FF ; i += 0x10) { 3472 pr_info("T%04X: %08X|%08X %08X|%08X\n", 3473 i, 3474 readl(adapter->hw.hw_addr + i+4), 3475 readl(adapter->hw.hw_addr + i), 3476 readl(adapter->hw.hw_addr + i+12), 3477 readl(adapter->hw.hw_addr + i+8)); 3478 } 3479 exit: 3480 return; 3481 } 3482 3483 /** 3484 * e1000_tx_timeout - Respond to a Tx Hang 3485 * @netdev: network interface device structure 3486 **/ 3487 static void e1000_tx_timeout(struct net_device *netdev, unsigned int txqueue) 3488 { 3489 struct e1000_adapter *adapter = netdev_priv(netdev); 3490 3491 /* Do the reset outside of interrupt context */ 3492 adapter->tx_timeout_count++; 3493 schedule_work(&adapter->reset_task); 3494 } 3495 3496 static void e1000_reset_task(struct work_struct *work) 3497 { 3498 struct e1000_adapter *adapter = 3499 container_of(work, struct e1000_adapter, reset_task); 3500 3501 e_err(drv, "Reset adapter\n"); 3502 e1000_reinit_locked(adapter); 3503 } 3504 3505 /** 3506 * e1000_change_mtu - Change the Maximum Transfer Unit 3507 * @netdev: network interface device structure 3508 * @new_mtu: new value for maximum frame size 3509 * 3510 * Returns 0 on success, negative on failure 3511 **/ 3512 static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 3513 { 3514 struct e1000_adapter *adapter = netdev_priv(netdev); 3515 struct e1000_hw *hw = &adapter->hw; 3516 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3517 3518 /* Adapter-specific max frame size limits. */ 3519 switch (hw->mac_type) { 3520 case e1000_undefined ... e1000_82542_rev2_1: 3521 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 3522 e_err(probe, "Jumbo Frames not supported.\n"); 3523 return -EINVAL; 3524 } 3525 break; 3526 default: 3527 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ 3528 break; 3529 } 3530 3531 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 3532 msleep(1); 3533 /* e1000_down has a dependency on max_frame_size */ 3534 hw->max_frame_size = max_frame; 3535 if (netif_running(netdev)) { 3536 /* prevent buffers from being reallocated */ 3537 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; 3538 e1000_down(adapter); 3539 } 3540 3541 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3542 * means we reserve 2 more, this pushes us to allocate from the next 3543 * larger slab size. 3544 * i.e. RXBUFFER_2048 --> size-4096 slab 3545 * however with the new *_jumbo_rx* routines, jumbo receives will use 3546 * fragmented skbs 3547 */ 3548 3549 if (max_frame <= E1000_RXBUFFER_2048) 3550 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3551 else 3552 #if (PAGE_SIZE >= E1000_RXBUFFER_16384) 3553 adapter->rx_buffer_len = E1000_RXBUFFER_16384; 3554 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096) 3555 adapter->rx_buffer_len = PAGE_SIZE; 3556 #endif 3557 3558 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3559 if (!hw->tbi_compatibility_on && 3560 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || 3561 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3562 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 3563 3564 netdev_dbg(netdev, "changing MTU from %d to %d\n", 3565 netdev->mtu, new_mtu); 3566 netdev->mtu = new_mtu; 3567 3568 if (netif_running(netdev)) 3569 e1000_up(adapter); 3570 else 3571 e1000_reset(adapter); 3572 3573 clear_bit(__E1000_RESETTING, &adapter->flags); 3574 3575 return 0; 3576 } 3577 3578 /** 3579 * e1000_update_stats - Update the board statistics counters 3580 * @adapter: board private structure 3581 **/ 3582 void e1000_update_stats(struct e1000_adapter *adapter) 3583 { 3584 struct net_device *netdev = adapter->netdev; 3585 struct e1000_hw *hw = &adapter->hw; 3586 struct pci_dev *pdev = adapter->pdev; 3587 unsigned long flags; 3588 u16 phy_tmp; 3589 3590 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3591 3592 /* Prevent stats update while adapter is being reset, or if the pci 3593 * connection is down. 3594 */ 3595 if (adapter->link_speed == 0) 3596 return; 3597 if (pci_channel_offline(pdev)) 3598 return; 3599 3600 spin_lock_irqsave(&adapter->stats_lock, flags); 3601 3602 /* these counters are modified from e1000_tbi_adjust_stats, 3603 * called from the interrupt context, so they must only 3604 * be written while holding adapter->stats_lock 3605 */ 3606 3607 adapter->stats.crcerrs += er32(CRCERRS); 3608 adapter->stats.gprc += er32(GPRC); 3609 adapter->stats.gorcl += er32(GORCL); 3610 adapter->stats.gorch += er32(GORCH); 3611 adapter->stats.bprc += er32(BPRC); 3612 adapter->stats.mprc += er32(MPRC); 3613 adapter->stats.roc += er32(ROC); 3614 3615 adapter->stats.prc64 += er32(PRC64); 3616 adapter->stats.prc127 += er32(PRC127); 3617 adapter->stats.prc255 += er32(PRC255); 3618 adapter->stats.prc511 += er32(PRC511); 3619 adapter->stats.prc1023 += er32(PRC1023); 3620 adapter->stats.prc1522 += er32(PRC1522); 3621 3622 adapter->stats.symerrs += er32(SYMERRS); 3623 adapter->stats.mpc += er32(MPC); 3624 adapter->stats.scc += er32(SCC); 3625 adapter->stats.ecol += er32(ECOL); 3626 adapter->stats.mcc += er32(MCC); 3627 adapter->stats.latecol += er32(LATECOL); 3628 adapter->stats.dc += er32(DC); 3629 adapter->stats.sec += er32(SEC); 3630 adapter->stats.rlec += er32(RLEC); 3631 adapter->stats.xonrxc += er32(XONRXC); 3632 adapter->stats.xontxc += er32(XONTXC); 3633 adapter->stats.xoffrxc += er32(XOFFRXC); 3634 adapter->stats.xofftxc += er32(XOFFTXC); 3635 adapter->stats.fcruc += er32(FCRUC); 3636 adapter->stats.gptc += er32(GPTC); 3637 adapter->stats.gotcl += er32(GOTCL); 3638 adapter->stats.gotch += er32(GOTCH); 3639 adapter->stats.rnbc += er32(RNBC); 3640 adapter->stats.ruc += er32(RUC); 3641 adapter->stats.rfc += er32(RFC); 3642 adapter->stats.rjc += er32(RJC); 3643 adapter->stats.torl += er32(TORL); 3644 adapter->stats.torh += er32(TORH); 3645 adapter->stats.totl += er32(TOTL); 3646 adapter->stats.toth += er32(TOTH); 3647 adapter->stats.tpr += er32(TPR); 3648 3649 adapter->stats.ptc64 += er32(PTC64); 3650 adapter->stats.ptc127 += er32(PTC127); 3651 adapter->stats.ptc255 += er32(PTC255); 3652 adapter->stats.ptc511 += er32(PTC511); 3653 adapter->stats.ptc1023 += er32(PTC1023); 3654 adapter->stats.ptc1522 += er32(PTC1522); 3655 3656 adapter->stats.mptc += er32(MPTC); 3657 adapter->stats.bptc += er32(BPTC); 3658 3659 /* used for adaptive IFS */ 3660 3661 hw->tx_packet_delta = er32(TPT); 3662 adapter->stats.tpt += hw->tx_packet_delta; 3663 hw->collision_delta = er32(COLC); 3664 adapter->stats.colc += hw->collision_delta; 3665 3666 if (hw->mac_type >= e1000_82543) { 3667 adapter->stats.algnerrc += er32(ALGNERRC); 3668 adapter->stats.rxerrc += er32(RXERRC); 3669 adapter->stats.tncrs += er32(TNCRS); 3670 adapter->stats.cexterr += er32(CEXTERR); 3671 adapter->stats.tsctc += er32(TSCTC); 3672 adapter->stats.tsctfc += er32(TSCTFC); 3673 } 3674 3675 /* Fill out the OS statistics structure */ 3676 netdev->stats.multicast = adapter->stats.mprc; 3677 netdev->stats.collisions = adapter->stats.colc; 3678 3679 /* Rx Errors */ 3680 3681 /* RLEC on some newer hardware can be incorrect so build 3682 * our own version based on RUC and ROC 3683 */ 3684 netdev->stats.rx_errors = adapter->stats.rxerrc + 3685 adapter->stats.crcerrs + adapter->stats.algnerrc + 3686 adapter->stats.ruc + adapter->stats.roc + 3687 adapter->stats.cexterr; 3688 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; 3689 netdev->stats.rx_length_errors = adapter->stats.rlerrc; 3690 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 3691 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 3692 netdev->stats.rx_missed_errors = adapter->stats.mpc; 3693 3694 /* Tx Errors */ 3695 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; 3696 netdev->stats.tx_errors = adapter->stats.txerrc; 3697 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 3698 netdev->stats.tx_window_errors = adapter->stats.latecol; 3699 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 3700 if (hw->bad_tx_carr_stats_fd && 3701 adapter->link_duplex == FULL_DUPLEX) { 3702 netdev->stats.tx_carrier_errors = 0; 3703 adapter->stats.tncrs = 0; 3704 } 3705 3706 /* Tx Dropped needs to be maintained elsewhere */ 3707 3708 /* Phy Stats */ 3709 if (hw->media_type == e1000_media_type_copper) { 3710 if ((adapter->link_speed == SPEED_1000) && 3711 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { 3712 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3713 adapter->phy_stats.idle_errors += phy_tmp; 3714 } 3715 3716 if ((hw->mac_type <= e1000_82546) && 3717 (hw->phy_type == e1000_phy_m88) && 3718 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) 3719 adapter->phy_stats.receive_errors += phy_tmp; 3720 } 3721 3722 /* Management Stats */ 3723 if (hw->has_smbus) { 3724 adapter->stats.mgptc += er32(MGTPTC); 3725 adapter->stats.mgprc += er32(MGTPRC); 3726 adapter->stats.mgpdc += er32(MGTPDC); 3727 } 3728 3729 spin_unlock_irqrestore(&adapter->stats_lock, flags); 3730 } 3731 3732 /** 3733 * e1000_intr - Interrupt Handler 3734 * @irq: interrupt number 3735 * @data: pointer to a network interface device structure 3736 **/ 3737 static irqreturn_t e1000_intr(int irq, void *data) 3738 { 3739 struct net_device *netdev = data; 3740 struct e1000_adapter *adapter = netdev_priv(netdev); 3741 struct e1000_hw *hw = &adapter->hw; 3742 u32 icr = er32(ICR); 3743 3744 if (unlikely((!icr))) 3745 return IRQ_NONE; /* Not our interrupt */ 3746 3747 /* we might have caused the interrupt, but the above 3748 * read cleared it, and just in case the driver is 3749 * down there is nothing to do so return handled 3750 */ 3751 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) 3752 return IRQ_HANDLED; 3753 3754 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3755 hw->get_link_status = 1; 3756 /* guard against interrupt when we're going down */ 3757 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3758 schedule_delayed_work(&adapter->watchdog_task, 1); 3759 } 3760 3761 /* disable interrupts, without the synchronize_irq bit */ 3762 ew32(IMC, ~0); 3763 E1000_WRITE_FLUSH(); 3764 3765 if (likely(napi_schedule_prep(&adapter->napi))) { 3766 adapter->total_tx_bytes = 0; 3767 adapter->total_tx_packets = 0; 3768 adapter->total_rx_bytes = 0; 3769 adapter->total_rx_packets = 0; 3770 __napi_schedule(&adapter->napi); 3771 } else { 3772 /* this really should not happen! if it does it is basically a 3773 * bug, but not a hard error, so enable ints and continue 3774 */ 3775 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3776 e1000_irq_enable(adapter); 3777 } 3778 3779 return IRQ_HANDLED; 3780 } 3781 3782 /** 3783 * e1000_clean - NAPI Rx polling callback 3784 * @adapter: board private structure 3785 **/ 3786 static int e1000_clean(struct napi_struct *napi, int budget) 3787 { 3788 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, 3789 napi); 3790 int tx_clean_complete = 0, work_done = 0; 3791 3792 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); 3793 3794 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); 3795 3796 if (!tx_clean_complete || work_done == budget) 3797 return budget; 3798 3799 /* Exit the polling mode, but don't re-enable interrupts if stack might 3800 * poll us due to busy-polling 3801 */ 3802 if (likely(napi_complete_done(napi, work_done))) { 3803 if (likely(adapter->itr_setting & 3)) 3804 e1000_set_itr(adapter); 3805 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3806 e1000_irq_enable(adapter); 3807 } 3808 3809 return work_done; 3810 } 3811 3812 /** 3813 * e1000_clean_tx_irq - Reclaim resources after transmit completes 3814 * @adapter: board private structure 3815 **/ 3816 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 3817 struct e1000_tx_ring *tx_ring) 3818 { 3819 struct e1000_hw *hw = &adapter->hw; 3820 struct net_device *netdev = adapter->netdev; 3821 struct e1000_tx_desc *tx_desc, *eop_desc; 3822 struct e1000_tx_buffer *buffer_info; 3823 unsigned int i, eop; 3824 unsigned int count = 0; 3825 unsigned int total_tx_bytes = 0, total_tx_packets = 0; 3826 unsigned int bytes_compl = 0, pkts_compl = 0; 3827 3828 i = tx_ring->next_to_clean; 3829 eop = tx_ring->buffer_info[i].next_to_watch; 3830 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3831 3832 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 3833 (count < tx_ring->count)) { 3834 bool cleaned = false; 3835 dma_rmb(); /* read buffer_info after eop_desc */ 3836 for ( ; !cleaned; count++) { 3837 tx_desc = E1000_TX_DESC(*tx_ring, i); 3838 buffer_info = &tx_ring->buffer_info[i]; 3839 cleaned = (i == eop); 3840 3841 if (cleaned) { 3842 total_tx_packets += buffer_info->segs; 3843 total_tx_bytes += buffer_info->bytecount; 3844 if (buffer_info->skb) { 3845 bytes_compl += buffer_info->skb->len; 3846 pkts_compl++; 3847 } 3848 3849 } 3850 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3851 tx_desc->upper.data = 0; 3852 3853 if (unlikely(++i == tx_ring->count)) 3854 i = 0; 3855 } 3856 3857 eop = tx_ring->buffer_info[i].next_to_watch; 3858 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3859 } 3860 3861 /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, 3862 * which will reuse the cleaned buffers. 3863 */ 3864 smp_store_release(&tx_ring->next_to_clean, i); 3865 3866 netdev_completed_queue(netdev, pkts_compl, bytes_compl); 3867 3868 #define TX_WAKE_THRESHOLD 32 3869 if (unlikely(count && netif_carrier_ok(netdev) && 3870 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { 3871 /* Make sure that anybody stopping the queue after this 3872 * sees the new next_to_clean. 3873 */ 3874 smp_mb(); 3875 3876 if (netif_queue_stopped(netdev) && 3877 !(test_bit(__E1000_DOWN, &adapter->flags))) { 3878 netif_wake_queue(netdev); 3879 ++adapter->restart_queue; 3880 } 3881 } 3882 3883 if (adapter->detect_tx_hung) { 3884 /* Detect a transmit hang in hardware, this serializes the 3885 * check with the clearing of time_stamp and movement of i 3886 */ 3887 adapter->detect_tx_hung = false; 3888 if (tx_ring->buffer_info[eop].time_stamp && 3889 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + 3890 (adapter->tx_timeout_factor * HZ)) && 3891 !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3892 3893 /* detected Tx unit hang */ 3894 e_err(drv, "Detected Tx Unit Hang\n" 3895 " Tx Queue <%lu>\n" 3896 " TDH <%x>\n" 3897 " TDT <%x>\n" 3898 " next_to_use <%x>\n" 3899 " next_to_clean <%x>\n" 3900 "buffer_info[next_to_clean]\n" 3901 " time_stamp <%lx>\n" 3902 " next_to_watch <%x>\n" 3903 " jiffies <%lx>\n" 3904 " next_to_watch.status <%x>\n", 3905 (unsigned long)(tx_ring - adapter->tx_ring), 3906 readl(hw->hw_addr + tx_ring->tdh), 3907 readl(hw->hw_addr + tx_ring->tdt), 3908 tx_ring->next_to_use, 3909 tx_ring->next_to_clean, 3910 tx_ring->buffer_info[eop].time_stamp, 3911 eop, 3912 jiffies, 3913 eop_desc->upper.fields.status); 3914 e1000_dump(adapter); 3915 netif_stop_queue(netdev); 3916 } 3917 } 3918 adapter->total_tx_bytes += total_tx_bytes; 3919 adapter->total_tx_packets += total_tx_packets; 3920 netdev->stats.tx_bytes += total_tx_bytes; 3921 netdev->stats.tx_packets += total_tx_packets; 3922 return count < tx_ring->count; 3923 } 3924 3925 /** 3926 * e1000_rx_checksum - Receive Checksum Offload for 82543 3927 * @adapter: board private structure 3928 * @status_err: receive descriptor status and error fields 3929 * @csum: receive descriptor csum field 3930 * @sk_buff: socket buffer with received data 3931 **/ 3932 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 3933 u32 csum, struct sk_buff *skb) 3934 { 3935 struct e1000_hw *hw = &adapter->hw; 3936 u16 status = (u16)status_err; 3937 u8 errors = (u8)(status_err >> 24); 3938 3939 skb_checksum_none_assert(skb); 3940 3941 /* 82543 or newer only */ 3942 if (unlikely(hw->mac_type < e1000_82543)) 3943 return; 3944 /* Ignore Checksum bit is set */ 3945 if (unlikely(status & E1000_RXD_STAT_IXSM)) 3946 return; 3947 /* TCP/UDP checksum error bit is set */ 3948 if (unlikely(errors & E1000_RXD_ERR_TCPE)) { 3949 /* let the stack verify checksum errors */ 3950 adapter->hw_csum_err++; 3951 return; 3952 } 3953 /* TCP/UDP Checksum has not been calculated */ 3954 if (!(status & E1000_RXD_STAT_TCPCS)) 3955 return; 3956 3957 /* It must be a TCP or UDP packet with a valid checksum */ 3958 if (likely(status & E1000_RXD_STAT_TCPCS)) { 3959 /* TCP checksum is good */ 3960 skb->ip_summed = CHECKSUM_UNNECESSARY; 3961 } 3962 adapter->hw_csum_good++; 3963 } 3964 3965 /** 3966 * e1000_consume_page - helper function for jumbo Rx path 3967 **/ 3968 static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, 3969 u16 length) 3970 { 3971 bi->rxbuf.page = NULL; 3972 skb->len += length; 3973 skb->data_len += length; 3974 skb->truesize += PAGE_SIZE; 3975 } 3976 3977 /** 3978 * e1000_receive_skb - helper function to handle rx indications 3979 * @adapter: board private structure 3980 * @status: descriptor status field as written by hardware 3981 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 3982 * @skb: pointer to sk_buff to be indicated to stack 3983 */ 3984 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, 3985 __le16 vlan, struct sk_buff *skb) 3986 { 3987 skb->protocol = eth_type_trans(skb, adapter->netdev); 3988 3989 if (status & E1000_RXD_STAT_VP) { 3990 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 3991 3992 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 3993 } 3994 napi_gro_receive(&adapter->napi, skb); 3995 } 3996 3997 /** 3998 * e1000_tbi_adjust_stats 3999 * @hw: Struct containing variables accessed by shared code 4000 * @frame_len: The length of the frame in question 4001 * @mac_addr: The Ethernet destination address of the frame in question 4002 * 4003 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT 4004 */ 4005 static void e1000_tbi_adjust_stats(struct e1000_hw *hw, 4006 struct e1000_hw_stats *stats, 4007 u32 frame_len, const u8 *mac_addr) 4008 { 4009 u64 carry_bit; 4010 4011 /* First adjust the frame length. */ 4012 frame_len--; 4013 /* We need to adjust the statistics counters, since the hardware 4014 * counters overcount this packet as a CRC error and undercount 4015 * the packet as a good packet 4016 */ 4017 /* This packet should not be counted as a CRC error. */ 4018 stats->crcerrs--; 4019 /* This packet does count as a Good Packet Received. */ 4020 stats->gprc++; 4021 4022 /* Adjust the Good Octets received counters */ 4023 carry_bit = 0x80000000 & stats->gorcl; 4024 stats->gorcl += frame_len; 4025 /* If the high bit of Gorcl (the low 32 bits of the Good Octets 4026 * Received Count) was one before the addition, 4027 * AND it is zero after, then we lost the carry out, 4028 * need to add one to Gorch (Good Octets Received Count High). 4029 * This could be simplified if all environments supported 4030 * 64-bit integers. 4031 */ 4032 if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) 4033 stats->gorch++; 4034 /* Is this a broadcast or multicast? Check broadcast first, 4035 * since the test for a multicast frame will test positive on 4036 * a broadcast frame. 4037 */ 4038 if (is_broadcast_ether_addr(mac_addr)) 4039 stats->bprc++; 4040 else if (is_multicast_ether_addr(mac_addr)) 4041 stats->mprc++; 4042 4043 if (frame_len == hw->max_frame_size) { 4044 /* In this case, the hardware has overcounted the number of 4045 * oversize frames. 4046 */ 4047 if (stats->roc > 0) 4048 stats->roc--; 4049 } 4050 4051 /* Adjust the bin counters when the extra byte put the frame in the 4052 * wrong bin. Remember that the frame_len was adjusted above. 4053 */ 4054 if (frame_len == 64) { 4055 stats->prc64++; 4056 stats->prc127--; 4057 } else if (frame_len == 127) { 4058 stats->prc127++; 4059 stats->prc255--; 4060 } else if (frame_len == 255) { 4061 stats->prc255++; 4062 stats->prc511--; 4063 } else if (frame_len == 511) { 4064 stats->prc511++; 4065 stats->prc1023--; 4066 } else if (frame_len == 1023) { 4067 stats->prc1023++; 4068 stats->prc1522--; 4069 } else if (frame_len == 1522) { 4070 stats->prc1522++; 4071 } 4072 } 4073 4074 static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, 4075 u8 status, u8 errors, 4076 u32 length, const u8 *data) 4077 { 4078 struct e1000_hw *hw = &adapter->hw; 4079 u8 last_byte = *(data + length - 1); 4080 4081 if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { 4082 unsigned long irq_flags; 4083 4084 spin_lock_irqsave(&adapter->stats_lock, irq_flags); 4085 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data); 4086 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); 4087 4088 return true; 4089 } 4090 4091 return false; 4092 } 4093 4094 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, 4095 unsigned int bufsz) 4096 { 4097 struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz); 4098 4099 if (unlikely(!skb)) 4100 adapter->alloc_rx_buff_failed++; 4101 return skb; 4102 } 4103 4104 /** 4105 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy 4106 * @adapter: board private structure 4107 * @rx_ring: ring to clean 4108 * @work_done: amount of napi work completed this call 4109 * @work_to_do: max amount of work allowed for this call to do 4110 * 4111 * the return value indicates whether actual cleaning was done, there 4112 * is no guarantee that everything was cleaned 4113 */ 4114 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 4115 struct e1000_rx_ring *rx_ring, 4116 int *work_done, int work_to_do) 4117 { 4118 struct net_device *netdev = adapter->netdev; 4119 struct pci_dev *pdev = adapter->pdev; 4120 struct e1000_rx_desc *rx_desc, *next_rxd; 4121 struct e1000_rx_buffer *buffer_info, *next_buffer; 4122 u32 length; 4123 unsigned int i; 4124 int cleaned_count = 0; 4125 bool cleaned = false; 4126 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 4127 4128 i = rx_ring->next_to_clean; 4129 rx_desc = E1000_RX_DESC(*rx_ring, i); 4130 buffer_info = &rx_ring->buffer_info[i]; 4131 4132 while (rx_desc->status & E1000_RXD_STAT_DD) { 4133 struct sk_buff *skb; 4134 u8 status; 4135 4136 if (*work_done >= work_to_do) 4137 break; 4138 (*work_done)++; 4139 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ 4140 4141 status = rx_desc->status; 4142 4143 if (++i == rx_ring->count) 4144 i = 0; 4145 4146 next_rxd = E1000_RX_DESC(*rx_ring, i); 4147 prefetch(next_rxd); 4148 4149 next_buffer = &rx_ring->buffer_info[i]; 4150 4151 cleaned = true; 4152 cleaned_count++; 4153 dma_unmap_page(&pdev->dev, buffer_info->dma, 4154 adapter->rx_buffer_len, DMA_FROM_DEVICE); 4155 buffer_info->dma = 0; 4156 4157 length = le16_to_cpu(rx_desc->length); 4158 4159 /* errors is only valid for DD + EOP descriptors */ 4160 if (unlikely((status & E1000_RXD_STAT_EOP) && 4161 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { 4162 u8 *mapped = page_address(buffer_info->rxbuf.page); 4163 4164 if (e1000_tbi_should_accept(adapter, status, 4165 rx_desc->errors, 4166 length, mapped)) { 4167 length--; 4168 } else if (netdev->features & NETIF_F_RXALL) { 4169 goto process_skb; 4170 } else { 4171 /* an error means any chain goes out the window 4172 * too 4173 */ 4174 dev_kfree_skb(rx_ring->rx_skb_top); 4175 rx_ring->rx_skb_top = NULL; 4176 goto next_desc; 4177 } 4178 } 4179 4180 #define rxtop rx_ring->rx_skb_top 4181 process_skb: 4182 if (!(status & E1000_RXD_STAT_EOP)) { 4183 /* this descriptor is only the beginning (or middle) */ 4184 if (!rxtop) { 4185 /* this is the beginning of a chain */ 4186 rxtop = napi_get_frags(&adapter->napi); 4187 if (!rxtop) 4188 break; 4189 4190 skb_fill_page_desc(rxtop, 0, 4191 buffer_info->rxbuf.page, 4192 0, length); 4193 } else { 4194 /* this is the middle of a chain */ 4195 skb_fill_page_desc(rxtop, 4196 skb_shinfo(rxtop)->nr_frags, 4197 buffer_info->rxbuf.page, 0, length); 4198 } 4199 e1000_consume_page(buffer_info, rxtop, length); 4200 goto next_desc; 4201 } else { 4202 if (rxtop) { 4203 /* end of the chain */ 4204 skb_fill_page_desc(rxtop, 4205 skb_shinfo(rxtop)->nr_frags, 4206 buffer_info->rxbuf.page, 0, length); 4207 skb = rxtop; 4208 rxtop = NULL; 4209 e1000_consume_page(buffer_info, skb, length); 4210 } else { 4211 struct page *p; 4212 /* no chain, got EOP, this buf is the packet 4213 * copybreak to save the put_page/alloc_page 4214 */ 4215 p = buffer_info->rxbuf.page; 4216 if (length <= copybreak) { 4217 u8 *vaddr; 4218 4219 if (likely(!(netdev->features & NETIF_F_RXFCS))) 4220 length -= 4; 4221 skb = e1000_alloc_rx_skb(adapter, 4222 length); 4223 if (!skb) 4224 break; 4225 4226 vaddr = kmap_atomic(p); 4227 memcpy(skb_tail_pointer(skb), vaddr, 4228 length); 4229 kunmap_atomic(vaddr); 4230 /* re-use the page, so don't erase 4231 * buffer_info->rxbuf.page 4232 */ 4233 skb_put(skb, length); 4234 e1000_rx_checksum(adapter, 4235 status | rx_desc->errors << 24, 4236 le16_to_cpu(rx_desc->csum), skb); 4237 4238 total_rx_bytes += skb->len; 4239 total_rx_packets++; 4240 4241 e1000_receive_skb(adapter, status, 4242 rx_desc->special, skb); 4243 goto next_desc; 4244 } else { 4245 skb = napi_get_frags(&adapter->napi); 4246 if (!skb) { 4247 adapter->alloc_rx_buff_failed++; 4248 break; 4249 } 4250 skb_fill_page_desc(skb, 0, p, 0, 4251 length); 4252 e1000_consume_page(buffer_info, skb, 4253 length); 4254 } 4255 } 4256 } 4257 4258 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 4259 e1000_rx_checksum(adapter, 4260 (u32)(status) | 4261 ((u32)(rx_desc->errors) << 24), 4262 le16_to_cpu(rx_desc->csum), skb); 4263 4264 total_rx_bytes += (skb->len - 4); /* don't count FCS */ 4265 if (likely(!(netdev->features & NETIF_F_RXFCS))) 4266 pskb_trim(skb, skb->len - 4); 4267 total_rx_packets++; 4268 4269 if (status & E1000_RXD_STAT_VP) { 4270 __le16 vlan = rx_desc->special; 4271 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 4272 4273 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 4274 } 4275 4276 napi_gro_frags(&adapter->napi); 4277 4278 next_desc: 4279 rx_desc->status = 0; 4280 4281 /* return some buffers to hardware, one at a time is too slow */ 4282 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 4283 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4284 cleaned_count = 0; 4285 } 4286 4287 /* use prefetched values */ 4288 rx_desc = next_rxd; 4289 buffer_info = next_buffer; 4290 } 4291 rx_ring->next_to_clean = i; 4292 4293 cleaned_count = E1000_DESC_UNUSED(rx_ring); 4294 if (cleaned_count) 4295 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4296 4297 adapter->total_rx_packets += total_rx_packets; 4298 adapter->total_rx_bytes += total_rx_bytes; 4299 netdev->stats.rx_bytes += total_rx_bytes; 4300 netdev->stats.rx_packets += total_rx_packets; 4301 return cleaned; 4302 } 4303 4304 /* this should improve performance for small packets with large amounts 4305 * of reassembly being done in the stack 4306 */ 4307 static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter, 4308 struct e1000_rx_buffer *buffer_info, 4309 u32 length, const void *data) 4310 { 4311 struct sk_buff *skb; 4312 4313 if (length > copybreak) 4314 return NULL; 4315 4316 skb = e1000_alloc_rx_skb(adapter, length); 4317 if (!skb) 4318 return NULL; 4319 4320 dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, 4321 length, DMA_FROM_DEVICE); 4322 4323 skb_put_data(skb, data, length); 4324 4325 return skb; 4326 } 4327 4328 /** 4329 * e1000_clean_rx_irq - Send received data up the network stack; legacy 4330 * @adapter: board private structure 4331 * @rx_ring: ring to clean 4332 * @work_done: amount of napi work completed this call 4333 * @work_to_do: max amount of work allowed for this call to do 4334 */ 4335 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 4336 struct e1000_rx_ring *rx_ring, 4337 int *work_done, int work_to_do) 4338 { 4339 struct net_device *netdev = adapter->netdev; 4340 struct pci_dev *pdev = adapter->pdev; 4341 struct e1000_rx_desc *rx_desc, *next_rxd; 4342 struct e1000_rx_buffer *buffer_info, *next_buffer; 4343 u32 length; 4344 unsigned int i; 4345 int cleaned_count = 0; 4346 bool cleaned = false; 4347 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 4348 4349 i = rx_ring->next_to_clean; 4350 rx_desc = E1000_RX_DESC(*rx_ring, i); 4351 buffer_info = &rx_ring->buffer_info[i]; 4352 4353 while (rx_desc->status & E1000_RXD_STAT_DD) { 4354 struct sk_buff *skb; 4355 u8 *data; 4356 u8 status; 4357 4358 if (*work_done >= work_to_do) 4359 break; 4360 (*work_done)++; 4361 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ 4362 4363 status = rx_desc->status; 4364 length = le16_to_cpu(rx_desc->length); 4365 4366 data = buffer_info->rxbuf.data; 4367 prefetch(data); 4368 skb = e1000_copybreak(adapter, buffer_info, length, data); 4369 if (!skb) { 4370 unsigned int frag_len = e1000_frag_len(adapter); 4371 4372 skb = build_skb(data - E1000_HEADROOM, frag_len); 4373 if (!skb) { 4374 adapter->alloc_rx_buff_failed++; 4375 break; 4376 } 4377 4378 skb_reserve(skb, E1000_HEADROOM); 4379 dma_unmap_single(&pdev->dev, buffer_info->dma, 4380 adapter->rx_buffer_len, 4381 DMA_FROM_DEVICE); 4382 buffer_info->dma = 0; 4383 buffer_info->rxbuf.data = NULL; 4384 } 4385 4386 if (++i == rx_ring->count) 4387 i = 0; 4388 4389 next_rxd = E1000_RX_DESC(*rx_ring, i); 4390 prefetch(next_rxd); 4391 4392 next_buffer = &rx_ring->buffer_info[i]; 4393 4394 cleaned = true; 4395 cleaned_count++; 4396 4397 /* !EOP means multiple descriptors were used to store a single 4398 * packet, if thats the case we need to toss it. In fact, we 4399 * to toss every packet with the EOP bit clear and the next 4400 * frame that _does_ have the EOP bit set, as it is by 4401 * definition only a frame fragment 4402 */ 4403 if (unlikely(!(status & E1000_RXD_STAT_EOP))) 4404 adapter->discarding = true; 4405 4406 if (adapter->discarding) { 4407 /* All receives must fit into a single buffer */ 4408 netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); 4409 dev_kfree_skb(skb); 4410 if (status & E1000_RXD_STAT_EOP) 4411 adapter->discarding = false; 4412 goto next_desc; 4413 } 4414 4415 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 4416 if (e1000_tbi_should_accept(adapter, status, 4417 rx_desc->errors, 4418 length, data)) { 4419 length--; 4420 } else if (netdev->features & NETIF_F_RXALL) { 4421 goto process_skb; 4422 } else { 4423 dev_kfree_skb(skb); 4424 goto next_desc; 4425 } 4426 } 4427 4428 process_skb: 4429 total_rx_bytes += (length - 4); /* don't count FCS */ 4430 total_rx_packets++; 4431 4432 if (likely(!(netdev->features & NETIF_F_RXFCS))) 4433 /* adjust length to remove Ethernet CRC, this must be 4434 * done after the TBI_ACCEPT workaround above 4435 */ 4436 length -= 4; 4437 4438 if (buffer_info->rxbuf.data == NULL) 4439 skb_put(skb, length); 4440 else /* copybreak skb */ 4441 skb_trim(skb, length); 4442 4443 /* Receive Checksum Offload */ 4444 e1000_rx_checksum(adapter, 4445 (u32)(status) | 4446 ((u32)(rx_desc->errors) << 24), 4447 le16_to_cpu(rx_desc->csum), skb); 4448 4449 e1000_receive_skb(adapter, status, rx_desc->special, skb); 4450 4451 next_desc: 4452 rx_desc->status = 0; 4453 4454 /* return some buffers to hardware, one at a time is too slow */ 4455 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 4456 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4457 cleaned_count = 0; 4458 } 4459 4460 /* use prefetched values */ 4461 rx_desc = next_rxd; 4462 buffer_info = next_buffer; 4463 } 4464 rx_ring->next_to_clean = i; 4465 4466 cleaned_count = E1000_DESC_UNUSED(rx_ring); 4467 if (cleaned_count) 4468 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4469 4470 adapter->total_rx_packets += total_rx_packets; 4471 adapter->total_rx_bytes += total_rx_bytes; 4472 netdev->stats.rx_bytes += total_rx_bytes; 4473 netdev->stats.rx_packets += total_rx_packets; 4474 return cleaned; 4475 } 4476 4477 /** 4478 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 4479 * @adapter: address of board private structure 4480 * @rx_ring: pointer to receive ring structure 4481 * @cleaned_count: number of buffers to allocate this pass 4482 **/ 4483 static void 4484 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 4485 struct e1000_rx_ring *rx_ring, int cleaned_count) 4486 { 4487 struct pci_dev *pdev = adapter->pdev; 4488 struct e1000_rx_desc *rx_desc; 4489 struct e1000_rx_buffer *buffer_info; 4490 unsigned int i; 4491 4492 i = rx_ring->next_to_use; 4493 buffer_info = &rx_ring->buffer_info[i]; 4494 4495 while (cleaned_count--) { 4496 /* allocate a new page if necessary */ 4497 if (!buffer_info->rxbuf.page) { 4498 buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC); 4499 if (unlikely(!buffer_info->rxbuf.page)) { 4500 adapter->alloc_rx_buff_failed++; 4501 break; 4502 } 4503 } 4504 4505 if (!buffer_info->dma) { 4506 buffer_info->dma = dma_map_page(&pdev->dev, 4507 buffer_info->rxbuf.page, 0, 4508 adapter->rx_buffer_len, 4509 DMA_FROM_DEVICE); 4510 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4511 put_page(buffer_info->rxbuf.page); 4512 buffer_info->rxbuf.page = NULL; 4513 buffer_info->dma = 0; 4514 adapter->alloc_rx_buff_failed++; 4515 break; 4516 } 4517 } 4518 4519 rx_desc = E1000_RX_DESC(*rx_ring, i); 4520 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4521 4522 if (unlikely(++i == rx_ring->count)) 4523 i = 0; 4524 buffer_info = &rx_ring->buffer_info[i]; 4525 } 4526 4527 if (likely(rx_ring->next_to_use != i)) { 4528 rx_ring->next_to_use = i; 4529 if (unlikely(i-- == 0)) 4530 i = (rx_ring->count - 1); 4531 4532 /* Force memory writes to complete before letting h/w 4533 * know there are new descriptors to fetch. (Only 4534 * applicable for weak-ordered memory model archs, 4535 * such as IA-64). 4536 */ 4537 dma_wmb(); 4538 writel(i, adapter->hw.hw_addr + rx_ring->rdt); 4539 } 4540 } 4541 4542 /** 4543 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 4544 * @adapter: address of board private structure 4545 **/ 4546 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 4547 struct e1000_rx_ring *rx_ring, 4548 int cleaned_count) 4549 { 4550 struct e1000_hw *hw = &adapter->hw; 4551 struct pci_dev *pdev = adapter->pdev; 4552 struct e1000_rx_desc *rx_desc; 4553 struct e1000_rx_buffer *buffer_info; 4554 unsigned int i; 4555 unsigned int bufsz = adapter->rx_buffer_len; 4556 4557 i = rx_ring->next_to_use; 4558 buffer_info = &rx_ring->buffer_info[i]; 4559 4560 while (cleaned_count--) { 4561 void *data; 4562 4563 if (buffer_info->rxbuf.data) 4564 goto skip; 4565 4566 data = e1000_alloc_frag(adapter); 4567 if (!data) { 4568 /* Better luck next round */ 4569 adapter->alloc_rx_buff_failed++; 4570 break; 4571 } 4572 4573 /* Fix for errata 23, can't cross 64kB boundary */ 4574 if (!e1000_check_64k_bound(adapter, data, bufsz)) { 4575 void *olddata = data; 4576 e_err(rx_err, "skb align check failed: %u bytes at " 4577 "%p\n", bufsz, data); 4578 /* Try again, without freeing the previous */ 4579 data = e1000_alloc_frag(adapter); 4580 /* Failed allocation, critical failure */ 4581 if (!data) { 4582 skb_free_frag(olddata); 4583 adapter->alloc_rx_buff_failed++; 4584 break; 4585 } 4586 4587 if (!e1000_check_64k_bound(adapter, data, bufsz)) { 4588 /* give up */ 4589 skb_free_frag(data); 4590 skb_free_frag(olddata); 4591 adapter->alloc_rx_buff_failed++; 4592 break; 4593 } 4594 4595 /* Use new allocation */ 4596 skb_free_frag(olddata); 4597 } 4598 buffer_info->dma = dma_map_single(&pdev->dev, 4599 data, 4600 adapter->rx_buffer_len, 4601 DMA_FROM_DEVICE); 4602 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4603 skb_free_frag(data); 4604 buffer_info->dma = 0; 4605 adapter->alloc_rx_buff_failed++; 4606 break; 4607 } 4608 4609 /* XXX if it was allocated cleanly it will never map to a 4610 * boundary crossing 4611 */ 4612 4613 /* Fix for errata 23, can't cross 64kB boundary */ 4614 if (!e1000_check_64k_bound(adapter, 4615 (void *)(unsigned long)buffer_info->dma, 4616 adapter->rx_buffer_len)) { 4617 e_err(rx_err, "dma align check failed: %u bytes at " 4618 "%p\n", adapter->rx_buffer_len, 4619 (void *)(unsigned long)buffer_info->dma); 4620 4621 dma_unmap_single(&pdev->dev, buffer_info->dma, 4622 adapter->rx_buffer_len, 4623 DMA_FROM_DEVICE); 4624 4625 skb_free_frag(data); 4626 buffer_info->rxbuf.data = NULL; 4627 buffer_info->dma = 0; 4628 4629 adapter->alloc_rx_buff_failed++; 4630 break; 4631 } 4632 buffer_info->rxbuf.data = data; 4633 skip: 4634 rx_desc = E1000_RX_DESC(*rx_ring, i); 4635 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4636 4637 if (unlikely(++i == rx_ring->count)) 4638 i = 0; 4639 buffer_info = &rx_ring->buffer_info[i]; 4640 } 4641 4642 if (likely(rx_ring->next_to_use != i)) { 4643 rx_ring->next_to_use = i; 4644 if (unlikely(i-- == 0)) 4645 i = (rx_ring->count - 1); 4646 4647 /* Force memory writes to complete before letting h/w 4648 * know there are new descriptors to fetch. (Only 4649 * applicable for weak-ordered memory model archs, 4650 * such as IA-64). 4651 */ 4652 dma_wmb(); 4653 writel(i, hw->hw_addr + rx_ring->rdt); 4654 } 4655 } 4656 4657 /** 4658 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. 4659 * @adapter: 4660 **/ 4661 static void e1000_smartspeed(struct e1000_adapter *adapter) 4662 { 4663 struct e1000_hw *hw = &adapter->hw; 4664 u16 phy_status; 4665 u16 phy_ctrl; 4666 4667 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || 4668 !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) 4669 return; 4670 4671 if (adapter->smartspeed == 0) { 4672 /* If Master/Slave config fault is asserted twice, 4673 * we assume back-to-back 4674 */ 4675 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4676 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 4677 return; 4678 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4679 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 4680 return; 4681 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4682 if (phy_ctrl & CR_1000T_MS_ENABLE) { 4683 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4684 e1000_write_phy_reg(hw, PHY_1000T_CTRL, 4685 phy_ctrl); 4686 adapter->smartspeed++; 4687 if (!e1000_phy_setup_autoneg(hw) && 4688 !e1000_read_phy_reg(hw, PHY_CTRL, 4689 &phy_ctrl)) { 4690 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4691 MII_CR_RESTART_AUTO_NEG); 4692 e1000_write_phy_reg(hw, PHY_CTRL, 4693 phy_ctrl); 4694 } 4695 } 4696 return; 4697 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4698 /* If still no link, perhaps using 2/3 pair cable */ 4699 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4700 phy_ctrl |= CR_1000T_MS_ENABLE; 4701 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); 4702 if (!e1000_phy_setup_autoneg(hw) && 4703 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { 4704 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4705 MII_CR_RESTART_AUTO_NEG); 4706 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); 4707 } 4708 } 4709 /* Restart process after E1000_SMARTSPEED_MAX iterations */ 4710 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4711 adapter->smartspeed = 0; 4712 } 4713 4714 /** 4715 * e1000_ioctl - 4716 * @netdev: 4717 * @ifreq: 4718 * @cmd: 4719 **/ 4720 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 4721 { 4722 switch (cmd) { 4723 case SIOCGMIIPHY: 4724 case SIOCGMIIREG: 4725 case SIOCSMIIREG: 4726 return e1000_mii_ioctl(netdev, ifr, cmd); 4727 default: 4728 return -EOPNOTSUPP; 4729 } 4730 } 4731 4732 /** 4733 * e1000_mii_ioctl - 4734 * @netdev: 4735 * @ifreq: 4736 * @cmd: 4737 **/ 4738 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 4739 int cmd) 4740 { 4741 struct e1000_adapter *adapter = netdev_priv(netdev); 4742 struct e1000_hw *hw = &adapter->hw; 4743 struct mii_ioctl_data *data = if_mii(ifr); 4744 int retval; 4745 u16 mii_reg; 4746 unsigned long flags; 4747 4748 if (hw->media_type != e1000_media_type_copper) 4749 return -EOPNOTSUPP; 4750 4751 switch (cmd) { 4752 case SIOCGMIIPHY: 4753 data->phy_id = hw->phy_addr; 4754 break; 4755 case SIOCGMIIREG: 4756 spin_lock_irqsave(&adapter->stats_lock, flags); 4757 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, 4758 &data->val_out)) { 4759 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4760 return -EIO; 4761 } 4762 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4763 break; 4764 case SIOCSMIIREG: 4765 if (data->reg_num & ~(0x1F)) 4766 return -EFAULT; 4767 mii_reg = data->val_in; 4768 spin_lock_irqsave(&adapter->stats_lock, flags); 4769 if (e1000_write_phy_reg(hw, data->reg_num, 4770 mii_reg)) { 4771 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4772 return -EIO; 4773 } 4774 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4775 if (hw->media_type == e1000_media_type_copper) { 4776 switch (data->reg_num) { 4777 case PHY_CTRL: 4778 if (mii_reg & MII_CR_POWER_DOWN) 4779 break; 4780 if (mii_reg & MII_CR_AUTO_NEG_EN) { 4781 hw->autoneg = 1; 4782 hw->autoneg_advertised = 0x2F; 4783 } else { 4784 u32 speed; 4785 if (mii_reg & 0x40) 4786 speed = SPEED_1000; 4787 else if (mii_reg & 0x2000) 4788 speed = SPEED_100; 4789 else 4790 speed = SPEED_10; 4791 retval = e1000_set_spd_dplx( 4792 adapter, speed, 4793 ((mii_reg & 0x100) 4794 ? DUPLEX_FULL : 4795 DUPLEX_HALF)); 4796 if (retval) 4797 return retval; 4798 } 4799 if (netif_running(adapter->netdev)) 4800 e1000_reinit_locked(adapter); 4801 else 4802 e1000_reset(adapter); 4803 break; 4804 case M88E1000_PHY_SPEC_CTRL: 4805 case M88E1000_EXT_PHY_SPEC_CTRL: 4806 if (e1000_phy_reset(hw)) 4807 return -EIO; 4808 break; 4809 } 4810 } else { 4811 switch (data->reg_num) { 4812 case PHY_CTRL: 4813 if (mii_reg & MII_CR_POWER_DOWN) 4814 break; 4815 if (netif_running(adapter->netdev)) 4816 e1000_reinit_locked(adapter); 4817 else 4818 e1000_reset(adapter); 4819 break; 4820 } 4821 } 4822 break; 4823 default: 4824 return -EOPNOTSUPP; 4825 } 4826 return E1000_SUCCESS; 4827 } 4828 4829 void e1000_pci_set_mwi(struct e1000_hw *hw) 4830 { 4831 struct e1000_adapter *adapter = hw->back; 4832 int ret_val = pci_set_mwi(adapter->pdev); 4833 4834 if (ret_val) 4835 e_err(probe, "Error in setting MWI\n"); 4836 } 4837 4838 void e1000_pci_clear_mwi(struct e1000_hw *hw) 4839 { 4840 struct e1000_adapter *adapter = hw->back; 4841 4842 pci_clear_mwi(adapter->pdev); 4843 } 4844 4845 int e1000_pcix_get_mmrbc(struct e1000_hw *hw) 4846 { 4847 struct e1000_adapter *adapter = hw->back; 4848 return pcix_get_mmrbc(adapter->pdev); 4849 } 4850 4851 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) 4852 { 4853 struct e1000_adapter *adapter = hw->back; 4854 pcix_set_mmrbc(adapter->pdev, mmrbc); 4855 } 4856 4857 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) 4858 { 4859 outl(value, port); 4860 } 4861 4862 static bool e1000_vlan_used(struct e1000_adapter *adapter) 4863 { 4864 u16 vid; 4865 4866 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 4867 return true; 4868 return false; 4869 } 4870 4871 static void __e1000_vlan_mode(struct e1000_adapter *adapter, 4872 netdev_features_t features) 4873 { 4874 struct e1000_hw *hw = &adapter->hw; 4875 u32 ctrl; 4876 4877 ctrl = er32(CTRL); 4878 if (features & NETIF_F_HW_VLAN_CTAG_RX) { 4879 /* enable VLAN tag insert/strip */ 4880 ctrl |= E1000_CTRL_VME; 4881 } else { 4882 /* disable VLAN tag insert/strip */ 4883 ctrl &= ~E1000_CTRL_VME; 4884 } 4885 ew32(CTRL, ctrl); 4886 } 4887 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, 4888 bool filter_on) 4889 { 4890 struct e1000_hw *hw = &adapter->hw; 4891 u32 rctl; 4892 4893 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4894 e1000_irq_disable(adapter); 4895 4896 __e1000_vlan_mode(adapter, adapter->netdev->features); 4897 if (filter_on) { 4898 /* enable VLAN receive filtering */ 4899 rctl = er32(RCTL); 4900 rctl &= ~E1000_RCTL_CFIEN; 4901 if (!(adapter->netdev->flags & IFF_PROMISC)) 4902 rctl |= E1000_RCTL_VFE; 4903 ew32(RCTL, rctl); 4904 e1000_update_mng_vlan(adapter); 4905 } else { 4906 /* disable VLAN receive filtering */ 4907 rctl = er32(RCTL); 4908 rctl &= ~E1000_RCTL_VFE; 4909 ew32(RCTL, rctl); 4910 } 4911 4912 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4913 e1000_irq_enable(adapter); 4914 } 4915 4916 static void e1000_vlan_mode(struct net_device *netdev, 4917 netdev_features_t features) 4918 { 4919 struct e1000_adapter *adapter = netdev_priv(netdev); 4920 4921 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4922 e1000_irq_disable(adapter); 4923 4924 __e1000_vlan_mode(adapter, features); 4925 4926 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4927 e1000_irq_enable(adapter); 4928 } 4929 4930 static int e1000_vlan_rx_add_vid(struct net_device *netdev, 4931 __be16 proto, u16 vid) 4932 { 4933 struct e1000_adapter *adapter = netdev_priv(netdev); 4934 struct e1000_hw *hw = &adapter->hw; 4935 u32 vfta, index; 4936 4937 if ((hw->mng_cookie.status & 4938 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4939 (vid == adapter->mng_vlan_id)) 4940 return 0; 4941 4942 if (!e1000_vlan_used(adapter)) 4943 e1000_vlan_filter_on_off(adapter, true); 4944 4945 /* add VID to filter table */ 4946 index = (vid >> 5) & 0x7F; 4947 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4948 vfta |= (1 << (vid & 0x1F)); 4949 e1000_write_vfta(hw, index, vfta); 4950 4951 set_bit(vid, adapter->active_vlans); 4952 4953 return 0; 4954 } 4955 4956 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, 4957 __be16 proto, u16 vid) 4958 { 4959 struct e1000_adapter *adapter = netdev_priv(netdev); 4960 struct e1000_hw *hw = &adapter->hw; 4961 u32 vfta, index; 4962 4963 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4964 e1000_irq_disable(adapter); 4965 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4966 e1000_irq_enable(adapter); 4967 4968 /* remove VID from filter table */ 4969 index = (vid >> 5) & 0x7F; 4970 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4971 vfta &= ~(1 << (vid & 0x1F)); 4972 e1000_write_vfta(hw, index, vfta); 4973 4974 clear_bit(vid, adapter->active_vlans); 4975 4976 if (!e1000_vlan_used(adapter)) 4977 e1000_vlan_filter_on_off(adapter, false); 4978 4979 return 0; 4980 } 4981 4982 static void e1000_restore_vlan(struct e1000_adapter *adapter) 4983 { 4984 u16 vid; 4985 4986 if (!e1000_vlan_used(adapter)) 4987 return; 4988 4989 e1000_vlan_filter_on_off(adapter, true); 4990 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 4991 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); 4992 } 4993 4994 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) 4995 { 4996 struct e1000_hw *hw = &adapter->hw; 4997 4998 hw->autoneg = 0; 4999 5000 /* Make sure dplx is at most 1 bit and lsb of speed is not set 5001 * for the switch() below to work 5002 */ 5003 if ((spd & 1) || (dplx & ~1)) 5004 goto err_inval; 5005 5006 /* Fiber NICs only allow 1000 gbps Full duplex */ 5007 if ((hw->media_type == e1000_media_type_fiber) && 5008 spd != SPEED_1000 && 5009 dplx != DUPLEX_FULL) 5010 goto err_inval; 5011 5012 switch (spd + dplx) { 5013 case SPEED_10 + DUPLEX_HALF: 5014 hw->forced_speed_duplex = e1000_10_half; 5015 break; 5016 case SPEED_10 + DUPLEX_FULL: 5017 hw->forced_speed_duplex = e1000_10_full; 5018 break; 5019 case SPEED_100 + DUPLEX_HALF: 5020 hw->forced_speed_duplex = e1000_100_half; 5021 break; 5022 case SPEED_100 + DUPLEX_FULL: 5023 hw->forced_speed_duplex = e1000_100_full; 5024 break; 5025 case SPEED_1000 + DUPLEX_FULL: 5026 hw->autoneg = 1; 5027 hw->autoneg_advertised = ADVERTISE_1000_FULL; 5028 break; 5029 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 5030 default: 5031 goto err_inval; 5032 } 5033 5034 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ 5035 hw->mdix = AUTO_ALL_MODES; 5036 5037 return 0; 5038 5039 err_inval: 5040 e_err(probe, "Unsupported Speed/Duplex configuration\n"); 5041 return -EINVAL; 5042 } 5043 5044 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) 5045 { 5046 struct net_device *netdev = pci_get_drvdata(pdev); 5047 struct e1000_adapter *adapter = netdev_priv(netdev); 5048 struct e1000_hw *hw = &adapter->hw; 5049 u32 ctrl, ctrl_ext, rctl, status; 5050 u32 wufc = adapter->wol; 5051 #ifdef CONFIG_PM 5052 int retval = 0; 5053 #endif 5054 5055 netif_device_detach(netdev); 5056 5057 if (netif_running(netdev)) { 5058 int count = E1000_CHECK_RESET_COUNT; 5059 5060 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) 5061 usleep_range(10000, 20000); 5062 5063 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 5064 e1000_down(adapter); 5065 } 5066 5067 #ifdef CONFIG_PM 5068 retval = pci_save_state(pdev); 5069 if (retval) 5070 return retval; 5071 #endif 5072 5073 status = er32(STATUS); 5074 if (status & E1000_STATUS_LU) 5075 wufc &= ~E1000_WUFC_LNKC; 5076 5077 if (wufc) { 5078 e1000_setup_rctl(adapter); 5079 e1000_set_rx_mode(netdev); 5080 5081 rctl = er32(RCTL); 5082 5083 /* turn on all-multi mode if wake on multicast is enabled */ 5084 if (wufc & E1000_WUFC_MC) 5085 rctl |= E1000_RCTL_MPE; 5086 5087 /* enable receives in the hardware */ 5088 ew32(RCTL, rctl | E1000_RCTL_EN); 5089 5090 if (hw->mac_type >= e1000_82540) { 5091 ctrl = er32(CTRL); 5092 /* advertise wake from D3Cold */ 5093 #define E1000_CTRL_ADVD3WUC 0x00100000 5094 /* phy power management enable */ 5095 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 5096 ctrl |= E1000_CTRL_ADVD3WUC | 5097 E1000_CTRL_EN_PHY_PWR_MGMT; 5098 ew32(CTRL, ctrl); 5099 } 5100 5101 if (hw->media_type == e1000_media_type_fiber || 5102 hw->media_type == e1000_media_type_internal_serdes) { 5103 /* keep the laser running in D3 */ 5104 ctrl_ext = er32(CTRL_EXT); 5105 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; 5106 ew32(CTRL_EXT, ctrl_ext); 5107 } 5108 5109 ew32(WUC, E1000_WUC_PME_EN); 5110 ew32(WUFC, wufc); 5111 } else { 5112 ew32(WUC, 0); 5113 ew32(WUFC, 0); 5114 } 5115 5116 e1000_release_manageability(adapter); 5117 5118 *enable_wake = !!wufc; 5119 5120 /* make sure adapter isn't asleep if manageability is enabled */ 5121 if (adapter->en_mng_pt) 5122 *enable_wake = true; 5123 5124 if (netif_running(netdev)) 5125 e1000_free_irq(adapter); 5126 5127 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) 5128 pci_disable_device(pdev); 5129 5130 return 0; 5131 } 5132 5133 #ifdef CONFIG_PM 5134 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) 5135 { 5136 int retval; 5137 bool wake; 5138 5139 retval = __e1000_shutdown(pdev, &wake); 5140 if (retval) 5141 return retval; 5142 5143 if (wake) { 5144 pci_prepare_to_sleep(pdev); 5145 } else { 5146 pci_wake_from_d3(pdev, false); 5147 pci_set_power_state(pdev, PCI_D3hot); 5148 } 5149 5150 return 0; 5151 } 5152 5153 static int e1000_resume(struct pci_dev *pdev) 5154 { 5155 struct net_device *netdev = pci_get_drvdata(pdev); 5156 struct e1000_adapter *adapter = netdev_priv(netdev); 5157 struct e1000_hw *hw = &adapter->hw; 5158 u32 err; 5159 5160 pci_set_power_state(pdev, PCI_D0); 5161 pci_restore_state(pdev); 5162 pci_save_state(pdev); 5163 5164 if (adapter->need_ioport) 5165 err = pci_enable_device(pdev); 5166 else 5167 err = pci_enable_device_mem(pdev); 5168 if (err) { 5169 pr_err("Cannot enable PCI device from suspend\n"); 5170 return err; 5171 } 5172 5173 /* flush memory to make sure state is correct */ 5174 smp_mb__before_atomic(); 5175 clear_bit(__E1000_DISABLED, &adapter->flags); 5176 pci_set_master(pdev); 5177 5178 pci_enable_wake(pdev, PCI_D3hot, 0); 5179 pci_enable_wake(pdev, PCI_D3cold, 0); 5180 5181 if (netif_running(netdev)) { 5182 err = e1000_request_irq(adapter); 5183 if (err) 5184 return err; 5185 } 5186 5187 e1000_power_up_phy(adapter); 5188 e1000_reset(adapter); 5189 ew32(WUS, ~0); 5190 5191 e1000_init_manageability(adapter); 5192 5193 if (netif_running(netdev)) 5194 e1000_up(adapter); 5195 5196 netif_device_attach(netdev); 5197 5198 return 0; 5199 } 5200 #endif 5201 5202 static void e1000_shutdown(struct pci_dev *pdev) 5203 { 5204 bool wake; 5205 5206 __e1000_shutdown(pdev, &wake); 5207 5208 if (system_state == SYSTEM_POWER_OFF) { 5209 pci_wake_from_d3(pdev, wake); 5210 pci_set_power_state(pdev, PCI_D3hot); 5211 } 5212 } 5213 5214 #ifdef CONFIG_NET_POLL_CONTROLLER 5215 /* Polling 'interrupt' - used by things like netconsole to send skbs 5216 * without having to re-enable interrupts. It's not called while 5217 * the interrupt routine is executing. 5218 */ 5219 static void e1000_netpoll(struct net_device *netdev) 5220 { 5221 struct e1000_adapter *adapter = netdev_priv(netdev); 5222 5223 if (disable_hardirq(adapter->pdev->irq)) 5224 e1000_intr(adapter->pdev->irq, netdev); 5225 enable_irq(adapter->pdev->irq); 5226 } 5227 #endif 5228 5229 /** 5230 * e1000_io_error_detected - called when PCI error is detected 5231 * @pdev: Pointer to PCI device 5232 * @state: The current pci connection state 5233 * 5234 * This function is called after a PCI bus error affecting 5235 * this device has been detected. 5236 */ 5237 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 5238 pci_channel_state_t state) 5239 { 5240 struct net_device *netdev = pci_get_drvdata(pdev); 5241 struct e1000_adapter *adapter = netdev_priv(netdev); 5242 5243 netif_device_detach(netdev); 5244 5245 if (state == pci_channel_io_perm_failure) 5246 return PCI_ERS_RESULT_DISCONNECT; 5247 5248 if (netif_running(netdev)) 5249 e1000_down(adapter); 5250 5251 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) 5252 pci_disable_device(pdev); 5253 5254 /* Request a slot slot reset. */ 5255 return PCI_ERS_RESULT_NEED_RESET; 5256 } 5257 5258 /** 5259 * e1000_io_slot_reset - called after the pci bus has been reset. 5260 * @pdev: Pointer to PCI device 5261 * 5262 * Restart the card from scratch, as if from a cold-boot. Implementation 5263 * resembles the first-half of the e1000_resume routine. 5264 */ 5265 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 5266 { 5267 struct net_device *netdev = pci_get_drvdata(pdev); 5268 struct e1000_adapter *adapter = netdev_priv(netdev); 5269 struct e1000_hw *hw = &adapter->hw; 5270 int err; 5271 5272 if (adapter->need_ioport) 5273 err = pci_enable_device(pdev); 5274 else 5275 err = pci_enable_device_mem(pdev); 5276 if (err) { 5277 pr_err("Cannot re-enable PCI device after reset.\n"); 5278 return PCI_ERS_RESULT_DISCONNECT; 5279 } 5280 5281 /* flush memory to make sure state is correct */ 5282 smp_mb__before_atomic(); 5283 clear_bit(__E1000_DISABLED, &adapter->flags); 5284 pci_set_master(pdev); 5285 5286 pci_enable_wake(pdev, PCI_D3hot, 0); 5287 pci_enable_wake(pdev, PCI_D3cold, 0); 5288 5289 e1000_reset(adapter); 5290 ew32(WUS, ~0); 5291 5292 return PCI_ERS_RESULT_RECOVERED; 5293 } 5294 5295 /** 5296 * e1000_io_resume - called when traffic can start flowing again. 5297 * @pdev: Pointer to PCI device 5298 * 5299 * This callback is called when the error recovery driver tells us that 5300 * its OK to resume normal operation. Implementation resembles the 5301 * second-half of the e1000_resume routine. 5302 */ 5303 static void e1000_io_resume(struct pci_dev *pdev) 5304 { 5305 struct net_device *netdev = pci_get_drvdata(pdev); 5306 struct e1000_adapter *adapter = netdev_priv(netdev); 5307 5308 e1000_init_manageability(adapter); 5309 5310 if (netif_running(netdev)) { 5311 if (e1000_up(adapter)) { 5312 pr_info("can't bring device back up after reset\n"); 5313 return; 5314 } 5315 } 5316 5317 netif_device_attach(netdev); 5318 } 5319 5320 /* e1000_main.c */ 5321