1 /******************************************************************************* 2 3 Intel PRO/1000 Linux driver 4 Copyright(c) 1999 - 2006 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #include "e1000.h" 30 #include <net/ip6_checksum.h> 31 #include <linux/io.h> 32 #include <linux/prefetch.h> 33 #include <linux/bitops.h> 34 #include <linux/if_vlan.h> 35 36 char e1000_driver_name[] = "e1000"; 37 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 38 #define DRV_VERSION "7.3.21-k8-NAPI" 39 const char e1000_driver_version[] = DRV_VERSION; 40 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 41 42 /* e1000_pci_tbl - PCI Device ID Table 43 * 44 * Last entry must be all 0s 45 * 46 * Macro expands to... 47 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} 48 */ 49 static const struct pci_device_id e1000_pci_tbl[] = { 50 INTEL_E1000_ETHERNET_DEVICE(0x1000), 51 INTEL_E1000_ETHERNET_DEVICE(0x1001), 52 INTEL_E1000_ETHERNET_DEVICE(0x1004), 53 INTEL_E1000_ETHERNET_DEVICE(0x1008), 54 INTEL_E1000_ETHERNET_DEVICE(0x1009), 55 INTEL_E1000_ETHERNET_DEVICE(0x100C), 56 INTEL_E1000_ETHERNET_DEVICE(0x100D), 57 INTEL_E1000_ETHERNET_DEVICE(0x100E), 58 INTEL_E1000_ETHERNET_DEVICE(0x100F), 59 INTEL_E1000_ETHERNET_DEVICE(0x1010), 60 INTEL_E1000_ETHERNET_DEVICE(0x1011), 61 INTEL_E1000_ETHERNET_DEVICE(0x1012), 62 INTEL_E1000_ETHERNET_DEVICE(0x1013), 63 INTEL_E1000_ETHERNET_DEVICE(0x1014), 64 INTEL_E1000_ETHERNET_DEVICE(0x1015), 65 INTEL_E1000_ETHERNET_DEVICE(0x1016), 66 INTEL_E1000_ETHERNET_DEVICE(0x1017), 67 INTEL_E1000_ETHERNET_DEVICE(0x1018), 68 INTEL_E1000_ETHERNET_DEVICE(0x1019), 69 INTEL_E1000_ETHERNET_DEVICE(0x101A), 70 INTEL_E1000_ETHERNET_DEVICE(0x101D), 71 INTEL_E1000_ETHERNET_DEVICE(0x101E), 72 INTEL_E1000_ETHERNET_DEVICE(0x1026), 73 INTEL_E1000_ETHERNET_DEVICE(0x1027), 74 INTEL_E1000_ETHERNET_DEVICE(0x1028), 75 INTEL_E1000_ETHERNET_DEVICE(0x1075), 76 INTEL_E1000_ETHERNET_DEVICE(0x1076), 77 INTEL_E1000_ETHERNET_DEVICE(0x1077), 78 INTEL_E1000_ETHERNET_DEVICE(0x1078), 79 INTEL_E1000_ETHERNET_DEVICE(0x1079), 80 INTEL_E1000_ETHERNET_DEVICE(0x107A), 81 INTEL_E1000_ETHERNET_DEVICE(0x107B), 82 INTEL_E1000_ETHERNET_DEVICE(0x107C), 83 INTEL_E1000_ETHERNET_DEVICE(0x108A), 84 INTEL_E1000_ETHERNET_DEVICE(0x1099), 85 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 86 INTEL_E1000_ETHERNET_DEVICE(0x2E6E), 87 /* required last entry */ 88 {0,} 89 }; 90 91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 92 93 int e1000_up(struct e1000_adapter *adapter); 94 void e1000_down(struct e1000_adapter *adapter); 95 void e1000_reinit_locked(struct e1000_adapter *adapter); 96 void e1000_reset(struct e1000_adapter *adapter); 97 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 98 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 99 void e1000_free_all_tx_resources(struct e1000_adapter *adapter); 100 void e1000_free_all_rx_resources(struct e1000_adapter *adapter); 101 static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 102 struct e1000_tx_ring *txdr); 103 static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 104 struct e1000_rx_ring *rxdr); 105 static void e1000_free_tx_resources(struct e1000_adapter *adapter, 106 struct e1000_tx_ring *tx_ring); 107 static void e1000_free_rx_resources(struct e1000_adapter *adapter, 108 struct e1000_rx_ring *rx_ring); 109 void e1000_update_stats(struct e1000_adapter *adapter); 110 111 static int e1000_init_module(void); 112 static void e1000_exit_module(void); 113 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 114 static void e1000_remove(struct pci_dev *pdev); 115 static int e1000_alloc_queues(struct e1000_adapter *adapter); 116 static int e1000_sw_init(struct e1000_adapter *adapter); 117 int e1000_open(struct net_device *netdev); 118 int e1000_close(struct net_device *netdev); 119 static void e1000_configure_tx(struct e1000_adapter *adapter); 120 static void e1000_configure_rx(struct e1000_adapter *adapter); 121 static void e1000_setup_rctl(struct e1000_adapter *adapter); 122 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); 123 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); 124 static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 125 struct e1000_tx_ring *tx_ring); 126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 127 struct e1000_rx_ring *rx_ring); 128 static void e1000_set_rx_mode(struct net_device *netdev); 129 static void e1000_update_phy_info_task(struct work_struct *work); 130 static void e1000_watchdog(struct work_struct *work); 131 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); 132 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 133 struct net_device *netdev); 134 static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 135 static int e1000_set_mac(struct net_device *netdev, void *p); 136 static irqreturn_t e1000_intr(int irq, void *data); 137 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 138 struct e1000_tx_ring *tx_ring); 139 static int e1000_clean(struct napi_struct *napi, int budget); 140 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 141 struct e1000_rx_ring *rx_ring, 142 int *work_done, int work_to_do); 143 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 144 struct e1000_rx_ring *rx_ring, 145 int *work_done, int work_to_do); 146 static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, 147 struct e1000_rx_ring *rx_ring, 148 int cleaned_count) 149 { 150 } 151 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 152 struct e1000_rx_ring *rx_ring, 153 int cleaned_count); 154 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 155 struct e1000_rx_ring *rx_ring, 156 int cleaned_count); 157 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 158 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 159 int cmd); 160 static void e1000_enter_82542_rst(struct e1000_adapter *adapter); 161 static void e1000_leave_82542_rst(struct e1000_adapter *adapter); 162 static void e1000_tx_timeout(struct net_device *dev); 163 static void e1000_reset_task(struct work_struct *work); 164 static void e1000_smartspeed(struct e1000_adapter *adapter); 165 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 166 struct sk_buff *skb); 167 168 static bool e1000_vlan_used(struct e1000_adapter *adapter); 169 static void e1000_vlan_mode(struct net_device *netdev, 170 netdev_features_t features); 171 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, 172 bool filter_on); 173 static int e1000_vlan_rx_add_vid(struct net_device *netdev, 174 __be16 proto, u16 vid); 175 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, 176 __be16 proto, u16 vid); 177 static void e1000_restore_vlan(struct e1000_adapter *adapter); 178 179 #ifdef CONFIG_PM 180 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); 181 static int e1000_resume(struct pci_dev *pdev); 182 #endif 183 static void e1000_shutdown(struct pci_dev *pdev); 184 185 #ifdef CONFIG_NET_POLL_CONTROLLER 186 /* for netdump / net console */ 187 static void e1000_netpoll (struct net_device *netdev); 188 #endif 189 190 #define COPYBREAK_DEFAULT 256 191 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; 192 module_param(copybreak, uint, 0644); 193 MODULE_PARM_DESC(copybreak, 194 "Maximum size of packet that is copied to a new buffer on receive"); 195 196 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 197 pci_channel_state_t state); 198 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); 199 static void e1000_io_resume(struct pci_dev *pdev); 200 201 static const struct pci_error_handlers e1000_err_handler = { 202 .error_detected = e1000_io_error_detected, 203 .slot_reset = e1000_io_slot_reset, 204 .resume = e1000_io_resume, 205 }; 206 207 static struct pci_driver e1000_driver = { 208 .name = e1000_driver_name, 209 .id_table = e1000_pci_tbl, 210 .probe = e1000_probe, 211 .remove = e1000_remove, 212 #ifdef CONFIG_PM 213 /* Power Management Hooks */ 214 .suspend = e1000_suspend, 215 .resume = e1000_resume, 216 #endif 217 .shutdown = e1000_shutdown, 218 .err_handler = &e1000_err_handler 219 }; 220 221 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 222 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 223 MODULE_LICENSE("GPL"); 224 MODULE_VERSION(DRV_VERSION); 225 226 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 227 static int debug = -1; 228 module_param(debug, int, 0); 229 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 230 231 /** 232 * e1000_get_hw_dev - return device 233 * used by hardware layer to print debugging information 234 * 235 **/ 236 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) 237 { 238 struct e1000_adapter *adapter = hw->back; 239 return adapter->netdev; 240 } 241 242 /** 243 * e1000_init_module - Driver Registration Routine 244 * 245 * e1000_init_module is the first routine called when the driver is 246 * loaded. All it does is register with the PCI subsystem. 247 **/ 248 static int __init e1000_init_module(void) 249 { 250 int ret; 251 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version); 252 253 pr_info("%s\n", e1000_copyright); 254 255 ret = pci_register_driver(&e1000_driver); 256 if (copybreak != COPYBREAK_DEFAULT) { 257 if (copybreak == 0) 258 pr_info("copybreak disabled\n"); 259 else 260 pr_info("copybreak enabled for " 261 "packets <= %u bytes\n", copybreak); 262 } 263 return ret; 264 } 265 266 module_init(e1000_init_module); 267 268 /** 269 * e1000_exit_module - Driver Exit Cleanup Routine 270 * 271 * e1000_exit_module is called just before the driver is removed 272 * from memory. 273 **/ 274 static void __exit e1000_exit_module(void) 275 { 276 pci_unregister_driver(&e1000_driver); 277 } 278 279 module_exit(e1000_exit_module); 280 281 static int e1000_request_irq(struct e1000_adapter *adapter) 282 { 283 struct net_device *netdev = adapter->netdev; 284 irq_handler_t handler = e1000_intr; 285 int irq_flags = IRQF_SHARED; 286 int err; 287 288 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 289 netdev); 290 if (err) { 291 e_err(probe, "Unable to allocate interrupt Error: %d\n", err); 292 } 293 294 return err; 295 } 296 297 static void e1000_free_irq(struct e1000_adapter *adapter) 298 { 299 struct net_device *netdev = adapter->netdev; 300 301 free_irq(adapter->pdev->irq, netdev); 302 } 303 304 /** 305 * e1000_irq_disable - Mask off interrupt generation on the NIC 306 * @adapter: board private structure 307 **/ 308 static void e1000_irq_disable(struct e1000_adapter *adapter) 309 { 310 struct e1000_hw *hw = &adapter->hw; 311 312 ew32(IMC, ~0); 313 E1000_WRITE_FLUSH(); 314 synchronize_irq(adapter->pdev->irq); 315 } 316 317 /** 318 * e1000_irq_enable - Enable default interrupt generation settings 319 * @adapter: board private structure 320 **/ 321 static void e1000_irq_enable(struct e1000_adapter *adapter) 322 { 323 struct e1000_hw *hw = &adapter->hw; 324 325 ew32(IMS, IMS_ENABLE_MASK); 326 E1000_WRITE_FLUSH(); 327 } 328 329 static void e1000_update_mng_vlan(struct e1000_adapter *adapter) 330 { 331 struct e1000_hw *hw = &adapter->hw; 332 struct net_device *netdev = adapter->netdev; 333 u16 vid = hw->mng_cookie.vlan_id; 334 u16 old_vid = adapter->mng_vlan_id; 335 336 if (!e1000_vlan_used(adapter)) 337 return; 338 339 if (!test_bit(vid, adapter->active_vlans)) { 340 if (hw->mng_cookie.status & 341 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 342 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); 343 adapter->mng_vlan_id = vid; 344 } else { 345 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 346 } 347 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && 348 (vid != old_vid) && 349 !test_bit(old_vid, adapter->active_vlans)) 350 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), 351 old_vid); 352 } else { 353 adapter->mng_vlan_id = vid; 354 } 355 } 356 357 static void e1000_init_manageability(struct e1000_adapter *adapter) 358 { 359 struct e1000_hw *hw = &adapter->hw; 360 361 if (adapter->en_mng_pt) { 362 u32 manc = er32(MANC); 363 364 /* disable hardware interception of ARP */ 365 manc &= ~(E1000_MANC_ARP_EN); 366 367 ew32(MANC, manc); 368 } 369 } 370 371 static void e1000_release_manageability(struct e1000_adapter *adapter) 372 { 373 struct e1000_hw *hw = &adapter->hw; 374 375 if (adapter->en_mng_pt) { 376 u32 manc = er32(MANC); 377 378 /* re-enable hardware interception of ARP */ 379 manc |= E1000_MANC_ARP_EN; 380 381 ew32(MANC, manc); 382 } 383 } 384 385 /** 386 * e1000_configure - configure the hardware for RX and TX 387 * @adapter = private board structure 388 **/ 389 static void e1000_configure(struct e1000_adapter *adapter) 390 { 391 struct net_device *netdev = adapter->netdev; 392 int i; 393 394 e1000_set_rx_mode(netdev); 395 396 e1000_restore_vlan(adapter); 397 e1000_init_manageability(adapter); 398 399 e1000_configure_tx(adapter); 400 e1000_setup_rctl(adapter); 401 e1000_configure_rx(adapter); 402 /* call E1000_DESC_UNUSED which always leaves 403 * at least 1 descriptor unused to make sure 404 * next_to_use != next_to_clean 405 */ 406 for (i = 0; i < adapter->num_rx_queues; i++) { 407 struct e1000_rx_ring *ring = &adapter->rx_ring[i]; 408 adapter->alloc_rx_buf(adapter, ring, 409 E1000_DESC_UNUSED(ring)); 410 } 411 } 412 413 int e1000_up(struct e1000_adapter *adapter) 414 { 415 struct e1000_hw *hw = &adapter->hw; 416 417 /* hardware has been reset, we need to reload some things */ 418 e1000_configure(adapter); 419 420 clear_bit(__E1000_DOWN, &adapter->flags); 421 422 napi_enable(&adapter->napi); 423 424 e1000_irq_enable(adapter); 425 426 netif_wake_queue(adapter->netdev); 427 428 /* fire a link change interrupt to start the watchdog */ 429 ew32(ICS, E1000_ICS_LSC); 430 return 0; 431 } 432 433 /** 434 * e1000_power_up_phy - restore link in case the phy was powered down 435 * @adapter: address of board private structure 436 * 437 * The phy may be powered down to save power and turn off link when the 438 * driver is unloaded and wake on lan is not enabled (among others) 439 * *** this routine MUST be followed by a call to e1000_reset *** 440 **/ 441 void e1000_power_up_phy(struct e1000_adapter *adapter) 442 { 443 struct e1000_hw *hw = &adapter->hw; 444 u16 mii_reg = 0; 445 446 /* Just clear the power down bit to wake the phy back up */ 447 if (hw->media_type == e1000_media_type_copper) { 448 /* according to the manual, the phy will retain its 449 * settings across a power-down/up cycle 450 */ 451 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 452 mii_reg &= ~MII_CR_POWER_DOWN; 453 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 454 } 455 } 456 457 static void e1000_power_down_phy(struct e1000_adapter *adapter) 458 { 459 struct e1000_hw *hw = &adapter->hw; 460 461 /* Power down the PHY so no link is implied when interface is down * 462 * The PHY cannot be powered down if any of the following is true * 463 * (a) WoL is enabled 464 * (b) AMT is active 465 * (c) SoL/IDER session is active 466 */ 467 if (!adapter->wol && hw->mac_type >= e1000_82540 && 468 hw->media_type == e1000_media_type_copper) { 469 u16 mii_reg = 0; 470 471 switch (hw->mac_type) { 472 case e1000_82540: 473 case e1000_82545: 474 case e1000_82545_rev_3: 475 case e1000_82546: 476 case e1000_ce4100: 477 case e1000_82546_rev_3: 478 case e1000_82541: 479 case e1000_82541_rev_2: 480 case e1000_82547: 481 case e1000_82547_rev_2: 482 if (er32(MANC) & E1000_MANC_SMBUS_EN) 483 goto out; 484 break; 485 default: 486 goto out; 487 } 488 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 489 mii_reg |= MII_CR_POWER_DOWN; 490 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 491 msleep(1); 492 } 493 out: 494 return; 495 } 496 497 static void e1000_down_and_stop(struct e1000_adapter *adapter) 498 { 499 set_bit(__E1000_DOWN, &adapter->flags); 500 501 cancel_delayed_work_sync(&adapter->watchdog_task); 502 503 /* 504 * Since the watchdog task can reschedule other tasks, we should cancel 505 * it first, otherwise we can run into the situation when a work is 506 * still running after the adapter has been turned down. 507 */ 508 509 cancel_delayed_work_sync(&adapter->phy_info_task); 510 cancel_delayed_work_sync(&adapter->fifo_stall_task); 511 512 /* Only kill reset task if adapter is not resetting */ 513 if (!test_bit(__E1000_RESETTING, &adapter->flags)) 514 cancel_work_sync(&adapter->reset_task); 515 } 516 517 void e1000_down(struct e1000_adapter *adapter) 518 { 519 struct e1000_hw *hw = &adapter->hw; 520 struct net_device *netdev = adapter->netdev; 521 u32 rctl, tctl; 522 523 netif_carrier_off(netdev); 524 525 /* disable receives in the hardware */ 526 rctl = er32(RCTL); 527 ew32(RCTL, rctl & ~E1000_RCTL_EN); 528 /* flush and sleep below */ 529 530 netif_tx_disable(netdev); 531 532 /* disable transmits in the hardware */ 533 tctl = er32(TCTL); 534 tctl &= ~E1000_TCTL_EN; 535 ew32(TCTL, tctl); 536 /* flush both disables and wait for them to finish */ 537 E1000_WRITE_FLUSH(); 538 msleep(10); 539 540 napi_disable(&adapter->napi); 541 542 e1000_irq_disable(adapter); 543 544 /* Setting DOWN must be after irq_disable to prevent 545 * a screaming interrupt. Setting DOWN also prevents 546 * tasks from rescheduling. 547 */ 548 e1000_down_and_stop(adapter); 549 550 adapter->link_speed = 0; 551 adapter->link_duplex = 0; 552 553 e1000_reset(adapter); 554 e1000_clean_all_tx_rings(adapter); 555 e1000_clean_all_rx_rings(adapter); 556 } 557 558 void e1000_reinit_locked(struct e1000_adapter *adapter) 559 { 560 WARN_ON(in_interrupt()); 561 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 562 msleep(1); 563 e1000_down(adapter); 564 e1000_up(adapter); 565 clear_bit(__E1000_RESETTING, &adapter->flags); 566 } 567 568 void e1000_reset(struct e1000_adapter *adapter) 569 { 570 struct e1000_hw *hw = &adapter->hw; 571 u32 pba = 0, tx_space, min_tx_space, min_rx_space; 572 bool legacy_pba_adjust = false; 573 u16 hwm; 574 575 /* Repartition Pba for greater than 9k mtu 576 * To take effect CTRL.RST is required. 577 */ 578 579 switch (hw->mac_type) { 580 case e1000_82542_rev2_0: 581 case e1000_82542_rev2_1: 582 case e1000_82543: 583 case e1000_82544: 584 case e1000_82540: 585 case e1000_82541: 586 case e1000_82541_rev_2: 587 legacy_pba_adjust = true; 588 pba = E1000_PBA_48K; 589 break; 590 case e1000_82545: 591 case e1000_82545_rev_3: 592 case e1000_82546: 593 case e1000_ce4100: 594 case e1000_82546_rev_3: 595 pba = E1000_PBA_48K; 596 break; 597 case e1000_82547: 598 case e1000_82547_rev_2: 599 legacy_pba_adjust = true; 600 pba = E1000_PBA_30K; 601 break; 602 case e1000_undefined: 603 case e1000_num_macs: 604 break; 605 } 606 607 if (legacy_pba_adjust) { 608 if (hw->max_frame_size > E1000_RXBUFFER_8192) 609 pba -= 8; /* allocate more FIFO for Tx */ 610 611 if (hw->mac_type == e1000_82547) { 612 adapter->tx_fifo_head = 0; 613 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 614 adapter->tx_fifo_size = 615 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; 616 atomic_set(&adapter->tx_fifo_stall, 0); 617 } 618 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { 619 /* adjust PBA for jumbo frames */ 620 ew32(PBA, pba); 621 622 /* To maintain wire speed transmits, the Tx FIFO should be 623 * large enough to accommodate two full transmit packets, 624 * rounded up to the next 1KB and expressed in KB. Likewise, 625 * the Rx FIFO should be large enough to accommodate at least 626 * one full receive packet and is similarly rounded up and 627 * expressed in KB. 628 */ 629 pba = er32(PBA); 630 /* upper 16 bits has Tx packet buffer allocation size in KB */ 631 tx_space = pba >> 16; 632 /* lower 16 bits has Rx packet buffer allocation size in KB */ 633 pba &= 0xffff; 634 /* the Tx fifo also stores 16 bytes of information about the Tx 635 * but don't include ethernet FCS because hardware appends it 636 */ 637 min_tx_space = (hw->max_frame_size + 638 sizeof(struct e1000_tx_desc) - 639 ETH_FCS_LEN) * 2; 640 min_tx_space = ALIGN(min_tx_space, 1024); 641 min_tx_space >>= 10; 642 /* software strips receive CRC, so leave room for it */ 643 min_rx_space = hw->max_frame_size; 644 min_rx_space = ALIGN(min_rx_space, 1024); 645 min_rx_space >>= 10; 646 647 /* If current Tx allocation is less than the min Tx FIFO size, 648 * and the min Tx FIFO size is less than the current Rx FIFO 649 * allocation, take space away from current Rx allocation 650 */ 651 if (tx_space < min_tx_space && 652 ((min_tx_space - tx_space) < pba)) { 653 pba = pba - (min_tx_space - tx_space); 654 655 /* PCI/PCIx hardware has PBA alignment constraints */ 656 switch (hw->mac_type) { 657 case e1000_82545 ... e1000_82546_rev_3: 658 pba &= ~(E1000_PBA_8K - 1); 659 break; 660 default: 661 break; 662 } 663 664 /* if short on Rx space, Rx wins and must trump Tx 665 * adjustment or use Early Receive if available 666 */ 667 if (pba < min_rx_space) 668 pba = min_rx_space; 669 } 670 } 671 672 ew32(PBA, pba); 673 674 /* flow control settings: 675 * The high water mark must be low enough to fit one full frame 676 * (or the size used for early receive) above it in the Rx FIFO. 677 * Set it to the lower of: 678 * - 90% of the Rx FIFO size, and 679 * - the full Rx FIFO size minus the early receive size (for parts 680 * with ERT support assuming ERT set to E1000_ERT_2048), or 681 * - the full Rx FIFO size minus one full frame 682 */ 683 hwm = min(((pba << 10) * 9 / 10), 684 ((pba << 10) - hw->max_frame_size)); 685 686 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ 687 hw->fc_low_water = hw->fc_high_water - 8; 688 hw->fc_pause_time = E1000_FC_PAUSE_TIME; 689 hw->fc_send_xon = 1; 690 hw->fc = hw->original_fc; 691 692 /* Allow time for pending master requests to run */ 693 e1000_reset_hw(hw); 694 if (hw->mac_type >= e1000_82544) 695 ew32(WUC, 0); 696 697 if (e1000_init_hw(hw)) 698 e_dev_err("Hardware Error\n"); 699 e1000_update_mng_vlan(adapter); 700 701 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ 702 if (hw->mac_type >= e1000_82544 && 703 hw->autoneg == 1 && 704 hw->autoneg_advertised == ADVERTISE_1000_FULL) { 705 u32 ctrl = er32(CTRL); 706 /* clear phy power management bit if we are in gig only mode, 707 * which if enabled will attempt negotiation to 100Mb, which 708 * can cause a loss of link at power off or driver unload 709 */ 710 ctrl &= ~E1000_CTRL_SWDPIN3; 711 ew32(CTRL, ctrl); 712 } 713 714 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 715 ew32(VET, ETHERNET_IEEE_VLAN_TYPE); 716 717 e1000_reset_adaptive(hw); 718 e1000_phy_get_info(hw, &adapter->phy_info); 719 720 e1000_release_manageability(adapter); 721 } 722 723 /* Dump the eeprom for users having checksum issues */ 724 static void e1000_dump_eeprom(struct e1000_adapter *adapter) 725 { 726 struct net_device *netdev = adapter->netdev; 727 struct ethtool_eeprom eeprom; 728 const struct ethtool_ops *ops = netdev->ethtool_ops; 729 u8 *data; 730 int i; 731 u16 csum_old, csum_new = 0; 732 733 eeprom.len = ops->get_eeprom_len(netdev); 734 eeprom.offset = 0; 735 736 data = kmalloc(eeprom.len, GFP_KERNEL); 737 if (!data) 738 return; 739 740 ops->get_eeprom(netdev, &eeprom, data); 741 742 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + 743 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); 744 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) 745 csum_new += data[i] + (data[i + 1] << 8); 746 csum_new = EEPROM_SUM - csum_new; 747 748 pr_err("/*********************/\n"); 749 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); 750 pr_err("Calculated : 0x%04x\n", csum_new); 751 752 pr_err("Offset Values\n"); 753 pr_err("======== ======\n"); 754 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); 755 756 pr_err("Include this output when contacting your support provider.\n"); 757 pr_err("This is not a software error! Something bad happened to\n"); 758 pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); 759 pr_err("result in further problems, possibly loss of data,\n"); 760 pr_err("corruption or system hangs!\n"); 761 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); 762 pr_err("which is invalid and requires you to set the proper MAC\n"); 763 pr_err("address manually before continuing to enable this network\n"); 764 pr_err("device. Please inspect the EEPROM dump and report the\n"); 765 pr_err("issue to your hardware vendor or Intel Customer Support.\n"); 766 pr_err("/*********************/\n"); 767 768 kfree(data); 769 } 770 771 /** 772 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not 773 * @pdev: PCI device information struct 774 * 775 * Return true if an adapter needs ioport resources 776 **/ 777 static int e1000_is_need_ioport(struct pci_dev *pdev) 778 { 779 switch (pdev->device) { 780 case E1000_DEV_ID_82540EM: 781 case E1000_DEV_ID_82540EM_LOM: 782 case E1000_DEV_ID_82540EP: 783 case E1000_DEV_ID_82540EP_LOM: 784 case E1000_DEV_ID_82540EP_LP: 785 case E1000_DEV_ID_82541EI: 786 case E1000_DEV_ID_82541EI_MOBILE: 787 case E1000_DEV_ID_82541ER: 788 case E1000_DEV_ID_82541ER_LOM: 789 case E1000_DEV_ID_82541GI: 790 case E1000_DEV_ID_82541GI_LF: 791 case E1000_DEV_ID_82541GI_MOBILE: 792 case E1000_DEV_ID_82544EI_COPPER: 793 case E1000_DEV_ID_82544EI_FIBER: 794 case E1000_DEV_ID_82544GC_COPPER: 795 case E1000_DEV_ID_82544GC_LOM: 796 case E1000_DEV_ID_82545EM_COPPER: 797 case E1000_DEV_ID_82545EM_FIBER: 798 case E1000_DEV_ID_82546EB_COPPER: 799 case E1000_DEV_ID_82546EB_FIBER: 800 case E1000_DEV_ID_82546EB_QUAD_COPPER: 801 return true; 802 default: 803 return false; 804 } 805 } 806 807 static netdev_features_t e1000_fix_features(struct net_device *netdev, 808 netdev_features_t features) 809 { 810 /* Since there is no support for separate Rx/Tx vlan accel 811 * enable/disable make sure Tx flag is always in same state as Rx. 812 */ 813 if (features & NETIF_F_HW_VLAN_CTAG_RX) 814 features |= NETIF_F_HW_VLAN_CTAG_TX; 815 else 816 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 817 818 return features; 819 } 820 821 static int e1000_set_features(struct net_device *netdev, 822 netdev_features_t features) 823 { 824 struct e1000_adapter *adapter = netdev_priv(netdev); 825 netdev_features_t changed = features ^ netdev->features; 826 827 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 828 e1000_vlan_mode(netdev, features); 829 830 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) 831 return 0; 832 833 netdev->features = features; 834 adapter->rx_csum = !!(features & NETIF_F_RXCSUM); 835 836 if (netif_running(netdev)) 837 e1000_reinit_locked(adapter); 838 else 839 e1000_reset(adapter); 840 841 return 0; 842 } 843 844 static const struct net_device_ops e1000_netdev_ops = { 845 .ndo_open = e1000_open, 846 .ndo_stop = e1000_close, 847 .ndo_start_xmit = e1000_xmit_frame, 848 .ndo_set_rx_mode = e1000_set_rx_mode, 849 .ndo_set_mac_address = e1000_set_mac, 850 .ndo_tx_timeout = e1000_tx_timeout, 851 .ndo_change_mtu = e1000_change_mtu, 852 .ndo_do_ioctl = e1000_ioctl, 853 .ndo_validate_addr = eth_validate_addr, 854 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, 855 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, 856 #ifdef CONFIG_NET_POLL_CONTROLLER 857 .ndo_poll_controller = e1000_netpoll, 858 #endif 859 .ndo_fix_features = e1000_fix_features, 860 .ndo_set_features = e1000_set_features, 861 }; 862 863 /** 864 * e1000_init_hw_struct - initialize members of hw struct 865 * @adapter: board private struct 866 * @hw: structure used by e1000_hw.c 867 * 868 * Factors out initialization of the e1000_hw struct to its own function 869 * that can be called very early at init (just after struct allocation). 870 * Fields are initialized based on PCI device information and 871 * OS network device settings (MTU size). 872 * Returns negative error codes if MAC type setup fails. 873 */ 874 static int e1000_init_hw_struct(struct e1000_adapter *adapter, 875 struct e1000_hw *hw) 876 { 877 struct pci_dev *pdev = adapter->pdev; 878 879 /* PCI config space info */ 880 hw->vendor_id = pdev->vendor; 881 hw->device_id = pdev->device; 882 hw->subsystem_vendor_id = pdev->subsystem_vendor; 883 hw->subsystem_id = pdev->subsystem_device; 884 hw->revision_id = pdev->revision; 885 886 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 887 888 hw->max_frame_size = adapter->netdev->mtu + 889 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 890 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; 891 892 /* identify the MAC */ 893 if (e1000_set_mac_type(hw)) { 894 e_err(probe, "Unknown MAC Type\n"); 895 return -EIO; 896 } 897 898 switch (hw->mac_type) { 899 default: 900 break; 901 case e1000_82541: 902 case e1000_82547: 903 case e1000_82541_rev_2: 904 case e1000_82547_rev_2: 905 hw->phy_init_script = 1; 906 break; 907 } 908 909 e1000_set_media_type(hw); 910 e1000_get_bus_info(hw); 911 912 hw->wait_autoneg_complete = false; 913 hw->tbi_compatibility_en = true; 914 hw->adaptive_ifs = true; 915 916 /* Copper options */ 917 918 if (hw->media_type == e1000_media_type_copper) { 919 hw->mdix = AUTO_ALL_MODES; 920 hw->disable_polarity_correction = false; 921 hw->master_slave = E1000_MASTER_SLAVE; 922 } 923 924 return 0; 925 } 926 927 /** 928 * e1000_probe - Device Initialization Routine 929 * @pdev: PCI device information struct 930 * @ent: entry in e1000_pci_tbl 931 * 932 * Returns 0 on success, negative on failure 933 * 934 * e1000_probe initializes an adapter identified by a pci_dev structure. 935 * The OS initialization, configuring of the adapter private structure, 936 * and a hardware reset occur. 937 **/ 938 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 939 { 940 struct net_device *netdev; 941 struct e1000_adapter *adapter; 942 struct e1000_hw *hw; 943 944 static int cards_found; 945 static int global_quad_port_a; /* global ksp3 port a indication */ 946 int i, err, pci_using_dac; 947 u16 eeprom_data = 0; 948 u16 tmp = 0; 949 u16 eeprom_apme_mask = E1000_EEPROM_APME; 950 int bars, need_ioport; 951 952 /* do not allocate ioport bars when not needed */ 953 need_ioport = e1000_is_need_ioport(pdev); 954 if (need_ioport) { 955 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 956 err = pci_enable_device(pdev); 957 } else { 958 bars = pci_select_bars(pdev, IORESOURCE_MEM); 959 err = pci_enable_device_mem(pdev); 960 } 961 if (err) 962 return err; 963 964 err = pci_request_selected_regions(pdev, bars, e1000_driver_name); 965 if (err) 966 goto err_pci_reg; 967 968 pci_set_master(pdev); 969 err = pci_save_state(pdev); 970 if (err) 971 goto err_alloc_etherdev; 972 973 err = -ENOMEM; 974 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 975 if (!netdev) 976 goto err_alloc_etherdev; 977 978 SET_NETDEV_DEV(netdev, &pdev->dev); 979 980 pci_set_drvdata(pdev, netdev); 981 adapter = netdev_priv(netdev); 982 adapter->netdev = netdev; 983 adapter->pdev = pdev; 984 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 985 adapter->bars = bars; 986 adapter->need_ioport = need_ioport; 987 988 hw = &adapter->hw; 989 hw->back = adapter; 990 991 err = -EIO; 992 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); 993 if (!hw->hw_addr) 994 goto err_ioremap; 995 996 if (adapter->need_ioport) { 997 for (i = BAR_1; i <= BAR_5; i++) { 998 if (pci_resource_len(pdev, i) == 0) 999 continue; 1000 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 1001 hw->io_base = pci_resource_start(pdev, i); 1002 break; 1003 } 1004 } 1005 } 1006 1007 /* make ready for any if (hw->...) below */ 1008 err = e1000_init_hw_struct(adapter, hw); 1009 if (err) 1010 goto err_sw_init; 1011 1012 /* there is a workaround being applied below that limits 1013 * 64-bit DMA addresses to 64-bit hardware. There are some 1014 * 32-bit adapters that Tx hang when given 64-bit DMA addresses 1015 */ 1016 pci_using_dac = 0; 1017 if ((hw->bus_type == e1000_bus_type_pcix) && 1018 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 1019 pci_using_dac = 1; 1020 } else { 1021 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 1022 if (err) { 1023 pr_err("No usable DMA config, aborting\n"); 1024 goto err_dma; 1025 } 1026 } 1027 1028 netdev->netdev_ops = &e1000_netdev_ops; 1029 e1000_set_ethtool_ops(netdev); 1030 netdev->watchdog_timeo = 5 * HZ; 1031 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); 1032 1033 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1034 1035 adapter->bd_number = cards_found; 1036 1037 /* setup the private structure */ 1038 1039 err = e1000_sw_init(adapter); 1040 if (err) 1041 goto err_sw_init; 1042 1043 err = -EIO; 1044 if (hw->mac_type == e1000_ce4100) { 1045 hw->ce4100_gbe_mdio_base_virt = 1046 ioremap(pci_resource_start(pdev, BAR_1), 1047 pci_resource_len(pdev, BAR_1)); 1048 1049 if (!hw->ce4100_gbe_mdio_base_virt) 1050 goto err_mdio_ioremap; 1051 } 1052 1053 if (hw->mac_type >= e1000_82543) { 1054 netdev->hw_features = NETIF_F_SG | 1055 NETIF_F_HW_CSUM | 1056 NETIF_F_HW_VLAN_CTAG_RX; 1057 netdev->features = NETIF_F_HW_VLAN_CTAG_TX | 1058 NETIF_F_HW_VLAN_CTAG_FILTER; 1059 } 1060 1061 if ((hw->mac_type >= e1000_82544) && 1062 (hw->mac_type != e1000_82547)) 1063 netdev->hw_features |= NETIF_F_TSO; 1064 1065 netdev->priv_flags |= IFF_SUPP_NOFCS; 1066 1067 netdev->features |= netdev->hw_features; 1068 netdev->hw_features |= (NETIF_F_RXCSUM | 1069 NETIF_F_RXALL | 1070 NETIF_F_RXFCS); 1071 1072 if (pci_using_dac) { 1073 netdev->features |= NETIF_F_HIGHDMA; 1074 netdev->vlan_features |= NETIF_F_HIGHDMA; 1075 } 1076 1077 netdev->vlan_features |= (NETIF_F_TSO | 1078 NETIF_F_HW_CSUM | 1079 NETIF_F_SG); 1080 1081 /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ 1082 if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || 1083 hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) 1084 netdev->priv_flags |= IFF_UNICAST_FLT; 1085 1086 /* MTU range: 46 - 16110 */ 1087 netdev->min_mtu = ETH_ZLEN - ETH_HLEN; 1088 netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); 1089 1090 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); 1091 1092 /* initialize eeprom parameters */ 1093 if (e1000_init_eeprom_params(hw)) { 1094 e_err(probe, "EEPROM initialization failed\n"); 1095 goto err_eeprom; 1096 } 1097 1098 /* before reading the EEPROM, reset the controller to 1099 * put the device in a known good starting state 1100 */ 1101 1102 e1000_reset_hw(hw); 1103 1104 /* make sure the EEPROM is good */ 1105 if (e1000_validate_eeprom_checksum(hw) < 0) { 1106 e_err(probe, "The EEPROM Checksum Is Not Valid\n"); 1107 e1000_dump_eeprom(adapter); 1108 /* set MAC address to all zeroes to invalidate and temporary 1109 * disable this device for the user. This blocks regular 1110 * traffic while still permitting ethtool ioctls from reaching 1111 * the hardware as well as allowing the user to run the 1112 * interface after manually setting a hw addr using 1113 * `ip set address` 1114 */ 1115 memset(hw->mac_addr, 0, netdev->addr_len); 1116 } else { 1117 /* copy the MAC address out of the EEPROM */ 1118 if (e1000_read_mac_addr(hw)) 1119 e_err(probe, "EEPROM Read Error\n"); 1120 } 1121 /* don't block initialization here due to bad MAC address */ 1122 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); 1123 1124 if (!is_valid_ether_addr(netdev->dev_addr)) 1125 e_err(probe, "Invalid MAC Address\n"); 1126 1127 1128 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); 1129 INIT_DELAYED_WORK(&adapter->fifo_stall_task, 1130 e1000_82547_tx_fifo_stall_task); 1131 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); 1132 INIT_WORK(&adapter->reset_task, e1000_reset_task); 1133 1134 e1000_check_options(adapter); 1135 1136 /* Initial Wake on LAN setting 1137 * If APM wake is enabled in the EEPROM, 1138 * enable the ACPI Magic Packet filter 1139 */ 1140 1141 switch (hw->mac_type) { 1142 case e1000_82542_rev2_0: 1143 case e1000_82542_rev2_1: 1144 case e1000_82543: 1145 break; 1146 case e1000_82544: 1147 e1000_read_eeprom(hw, 1148 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); 1149 eeprom_apme_mask = E1000_EEPROM_82544_APM; 1150 break; 1151 case e1000_82546: 1152 case e1000_82546_rev_3: 1153 if (er32(STATUS) & E1000_STATUS_FUNC_1) { 1154 e1000_read_eeprom(hw, 1155 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 1156 break; 1157 } 1158 /* Fall Through */ 1159 default: 1160 e1000_read_eeprom(hw, 1161 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 1162 break; 1163 } 1164 if (eeprom_data & eeprom_apme_mask) 1165 adapter->eeprom_wol |= E1000_WUFC_MAG; 1166 1167 /* now that we have the eeprom settings, apply the special cases 1168 * where the eeprom may be wrong or the board simply won't support 1169 * wake on lan on a particular port 1170 */ 1171 switch (pdev->device) { 1172 case E1000_DEV_ID_82546GB_PCIE: 1173 adapter->eeprom_wol = 0; 1174 break; 1175 case E1000_DEV_ID_82546EB_FIBER: 1176 case E1000_DEV_ID_82546GB_FIBER: 1177 /* Wake events only supported on port A for dual fiber 1178 * regardless of eeprom setting 1179 */ 1180 if (er32(STATUS) & E1000_STATUS_FUNC_1) 1181 adapter->eeprom_wol = 0; 1182 break; 1183 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1184 /* if quad port adapter, disable WoL on all but port A */ 1185 if (global_quad_port_a != 0) 1186 adapter->eeprom_wol = 0; 1187 else 1188 adapter->quad_port_a = true; 1189 /* Reset for multiple quad port adapters */ 1190 if (++global_quad_port_a == 4) 1191 global_quad_port_a = 0; 1192 break; 1193 } 1194 1195 /* initialize the wol settings based on the eeprom settings */ 1196 adapter->wol = adapter->eeprom_wol; 1197 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1198 1199 /* Auto detect PHY address */ 1200 if (hw->mac_type == e1000_ce4100) { 1201 for (i = 0; i < 32; i++) { 1202 hw->phy_addr = i; 1203 e1000_read_phy_reg(hw, PHY_ID2, &tmp); 1204 1205 if (tmp != 0 && tmp != 0xFF) 1206 break; 1207 } 1208 1209 if (i >= 32) 1210 goto err_eeprom; 1211 } 1212 1213 /* reset the hardware with the new settings */ 1214 e1000_reset(adapter); 1215 1216 strcpy(netdev->name, "eth%d"); 1217 err = register_netdev(netdev); 1218 if (err) 1219 goto err_register; 1220 1221 e1000_vlan_filter_on_off(adapter, false); 1222 1223 /* print bus type/speed/width info */ 1224 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", 1225 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), 1226 ((hw->bus_speed == e1000_bus_speed_133) ? 133 : 1227 (hw->bus_speed == e1000_bus_speed_120) ? 120 : 1228 (hw->bus_speed == e1000_bus_speed_100) ? 100 : 1229 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), 1230 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), 1231 netdev->dev_addr); 1232 1233 /* carrier off reporting is important to ethtool even BEFORE open */ 1234 netif_carrier_off(netdev); 1235 1236 e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); 1237 1238 cards_found++; 1239 return 0; 1240 1241 err_register: 1242 err_eeprom: 1243 e1000_phy_hw_reset(hw); 1244 1245 if (hw->flash_address) 1246 iounmap(hw->flash_address); 1247 kfree(adapter->tx_ring); 1248 kfree(adapter->rx_ring); 1249 err_dma: 1250 err_sw_init: 1251 err_mdio_ioremap: 1252 iounmap(hw->ce4100_gbe_mdio_base_virt); 1253 iounmap(hw->hw_addr); 1254 err_ioremap: 1255 free_netdev(netdev); 1256 err_alloc_etherdev: 1257 pci_release_selected_regions(pdev, bars); 1258 err_pci_reg: 1259 pci_disable_device(pdev); 1260 return err; 1261 } 1262 1263 /** 1264 * e1000_remove - Device Removal Routine 1265 * @pdev: PCI device information struct 1266 * 1267 * e1000_remove is called by the PCI subsystem to alert the driver 1268 * that it should release a PCI device. That could be caused by a 1269 * Hot-Plug event, or because the driver is going to be removed from 1270 * memory. 1271 **/ 1272 static void e1000_remove(struct pci_dev *pdev) 1273 { 1274 struct net_device *netdev = pci_get_drvdata(pdev); 1275 struct e1000_adapter *adapter = netdev_priv(netdev); 1276 struct e1000_hw *hw = &adapter->hw; 1277 1278 e1000_down_and_stop(adapter); 1279 e1000_release_manageability(adapter); 1280 1281 unregister_netdev(netdev); 1282 1283 e1000_phy_hw_reset(hw); 1284 1285 kfree(adapter->tx_ring); 1286 kfree(adapter->rx_ring); 1287 1288 if (hw->mac_type == e1000_ce4100) 1289 iounmap(hw->ce4100_gbe_mdio_base_virt); 1290 iounmap(hw->hw_addr); 1291 if (hw->flash_address) 1292 iounmap(hw->flash_address); 1293 pci_release_selected_regions(pdev, adapter->bars); 1294 1295 free_netdev(netdev); 1296 1297 pci_disable_device(pdev); 1298 } 1299 1300 /** 1301 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) 1302 * @adapter: board private structure to initialize 1303 * 1304 * e1000_sw_init initializes the Adapter private data structure. 1305 * e1000_init_hw_struct MUST be called before this function 1306 **/ 1307 static int e1000_sw_init(struct e1000_adapter *adapter) 1308 { 1309 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1310 1311 adapter->num_tx_queues = 1; 1312 adapter->num_rx_queues = 1; 1313 1314 if (e1000_alloc_queues(adapter)) { 1315 e_err(probe, "Unable to allocate memory for queues\n"); 1316 return -ENOMEM; 1317 } 1318 1319 /* Explicitly disable IRQ since the NIC can be in any state. */ 1320 e1000_irq_disable(adapter); 1321 1322 spin_lock_init(&adapter->stats_lock); 1323 1324 set_bit(__E1000_DOWN, &adapter->flags); 1325 1326 return 0; 1327 } 1328 1329 /** 1330 * e1000_alloc_queues - Allocate memory for all rings 1331 * @adapter: board private structure to initialize 1332 * 1333 * We allocate one ring per queue at run-time since we don't know the 1334 * number of queues at compile-time. 1335 **/ 1336 static int e1000_alloc_queues(struct e1000_adapter *adapter) 1337 { 1338 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1339 sizeof(struct e1000_tx_ring), GFP_KERNEL); 1340 if (!adapter->tx_ring) 1341 return -ENOMEM; 1342 1343 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1344 sizeof(struct e1000_rx_ring), GFP_KERNEL); 1345 if (!adapter->rx_ring) { 1346 kfree(adapter->tx_ring); 1347 return -ENOMEM; 1348 } 1349 1350 return E1000_SUCCESS; 1351 } 1352 1353 /** 1354 * e1000_open - Called when a network interface is made active 1355 * @netdev: network interface device structure 1356 * 1357 * Returns 0 on success, negative value on failure 1358 * 1359 * The open entry point is called when a network interface is made 1360 * active by the system (IFF_UP). At this point all resources needed 1361 * for transmit and receive operations are allocated, the interrupt 1362 * handler is registered with the OS, the watchdog task is started, 1363 * and the stack is notified that the interface is ready. 1364 **/ 1365 int e1000_open(struct net_device *netdev) 1366 { 1367 struct e1000_adapter *adapter = netdev_priv(netdev); 1368 struct e1000_hw *hw = &adapter->hw; 1369 int err; 1370 1371 /* disallow open during test */ 1372 if (test_bit(__E1000_TESTING, &adapter->flags)) 1373 return -EBUSY; 1374 1375 netif_carrier_off(netdev); 1376 1377 /* allocate transmit descriptors */ 1378 err = e1000_setup_all_tx_resources(adapter); 1379 if (err) 1380 goto err_setup_tx; 1381 1382 /* allocate receive descriptors */ 1383 err = e1000_setup_all_rx_resources(adapter); 1384 if (err) 1385 goto err_setup_rx; 1386 1387 e1000_power_up_phy(adapter); 1388 1389 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1390 if ((hw->mng_cookie.status & 1391 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1392 e1000_update_mng_vlan(adapter); 1393 } 1394 1395 /* before we allocate an interrupt, we must be ready to handle it. 1396 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1397 * as soon as we call pci_request_irq, so we have to setup our 1398 * clean_rx handler before we do so. 1399 */ 1400 e1000_configure(adapter); 1401 1402 err = e1000_request_irq(adapter); 1403 if (err) 1404 goto err_req_irq; 1405 1406 /* From here on the code is the same as e1000_up() */ 1407 clear_bit(__E1000_DOWN, &adapter->flags); 1408 1409 napi_enable(&adapter->napi); 1410 1411 e1000_irq_enable(adapter); 1412 1413 netif_start_queue(netdev); 1414 1415 /* fire a link status change interrupt to start the watchdog */ 1416 ew32(ICS, E1000_ICS_LSC); 1417 1418 return E1000_SUCCESS; 1419 1420 err_req_irq: 1421 e1000_power_down_phy(adapter); 1422 e1000_free_all_rx_resources(adapter); 1423 err_setup_rx: 1424 e1000_free_all_tx_resources(adapter); 1425 err_setup_tx: 1426 e1000_reset(adapter); 1427 1428 return err; 1429 } 1430 1431 /** 1432 * e1000_close - Disables a network interface 1433 * @netdev: network interface device structure 1434 * 1435 * Returns 0, this is not allowed to fail 1436 * 1437 * The close entry point is called when an interface is de-activated 1438 * by the OS. The hardware is still under the drivers control, but 1439 * needs to be disabled. A global MAC reset is issued to stop the 1440 * hardware, and all transmit and receive resources are freed. 1441 **/ 1442 int e1000_close(struct net_device *netdev) 1443 { 1444 struct e1000_adapter *adapter = netdev_priv(netdev); 1445 struct e1000_hw *hw = &adapter->hw; 1446 int count = E1000_CHECK_RESET_COUNT; 1447 1448 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) 1449 usleep_range(10000, 20000); 1450 1451 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 1452 e1000_down(adapter); 1453 e1000_power_down_phy(adapter); 1454 e1000_free_irq(adapter); 1455 1456 e1000_free_all_tx_resources(adapter); 1457 e1000_free_all_rx_resources(adapter); 1458 1459 /* kill manageability vlan ID if supported, but not if a vlan with 1460 * the same ID is registered on the host OS (let 8021q kill it) 1461 */ 1462 if ((hw->mng_cookie.status & 1463 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1464 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { 1465 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), 1466 adapter->mng_vlan_id); 1467 } 1468 1469 return 0; 1470 } 1471 1472 /** 1473 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary 1474 * @adapter: address of board private structure 1475 * @start: address of beginning of memory 1476 * @len: length of memory 1477 **/ 1478 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, 1479 unsigned long len) 1480 { 1481 struct e1000_hw *hw = &adapter->hw; 1482 unsigned long begin = (unsigned long)start; 1483 unsigned long end = begin + len; 1484 1485 /* First rev 82545 and 82546 need to not allow any memory 1486 * write location to cross 64k boundary due to errata 23 1487 */ 1488 if (hw->mac_type == e1000_82545 || 1489 hw->mac_type == e1000_ce4100 || 1490 hw->mac_type == e1000_82546) { 1491 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; 1492 } 1493 1494 return true; 1495 } 1496 1497 /** 1498 * e1000_setup_tx_resources - allocate Tx resources (Descriptors) 1499 * @adapter: board private structure 1500 * @txdr: tx descriptor ring (for a specific queue) to setup 1501 * 1502 * Return 0 on success, negative on failure 1503 **/ 1504 static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 1505 struct e1000_tx_ring *txdr) 1506 { 1507 struct pci_dev *pdev = adapter->pdev; 1508 int size; 1509 1510 size = sizeof(struct e1000_tx_buffer) * txdr->count; 1511 txdr->buffer_info = vzalloc(size); 1512 if (!txdr->buffer_info) 1513 return -ENOMEM; 1514 1515 /* round up to nearest 4K */ 1516 1517 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1518 txdr->size = ALIGN(txdr->size, 4096); 1519 1520 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 1521 GFP_KERNEL); 1522 if (!txdr->desc) { 1523 setup_tx_desc_die: 1524 vfree(txdr->buffer_info); 1525 return -ENOMEM; 1526 } 1527 1528 /* Fix for errata 23, can't cross 64kB boundary */ 1529 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1530 void *olddesc = txdr->desc; 1531 dma_addr_t olddma = txdr->dma; 1532 e_err(tx_err, "txdr align check failed: %u bytes at %p\n", 1533 txdr->size, txdr->desc); 1534 /* Try again, without freeing the previous */ 1535 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, 1536 &txdr->dma, GFP_KERNEL); 1537 /* Failed allocation, critical failure */ 1538 if (!txdr->desc) { 1539 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1540 olddma); 1541 goto setup_tx_desc_die; 1542 } 1543 1544 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1545 /* give up */ 1546 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, 1547 txdr->dma); 1548 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1549 olddma); 1550 e_err(probe, "Unable to allocate aligned memory " 1551 "for the transmit descriptor ring\n"); 1552 vfree(txdr->buffer_info); 1553 return -ENOMEM; 1554 } else { 1555 /* Free old allocation, new allocation was successful */ 1556 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1557 olddma); 1558 } 1559 } 1560 memset(txdr->desc, 0, txdr->size); 1561 1562 txdr->next_to_use = 0; 1563 txdr->next_to_clean = 0; 1564 1565 return 0; 1566 } 1567 1568 /** 1569 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources 1570 * (Descriptors) for all queues 1571 * @adapter: board private structure 1572 * 1573 * Return 0 on success, negative on failure 1574 **/ 1575 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) 1576 { 1577 int i, err = 0; 1578 1579 for (i = 0; i < adapter->num_tx_queues; i++) { 1580 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1581 if (err) { 1582 e_err(probe, "Allocation for Tx Queue %u failed\n", i); 1583 for (i-- ; i >= 0; i--) 1584 e1000_free_tx_resources(adapter, 1585 &adapter->tx_ring[i]); 1586 break; 1587 } 1588 } 1589 1590 return err; 1591 } 1592 1593 /** 1594 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset 1595 * @adapter: board private structure 1596 * 1597 * Configure the Tx unit of the MAC after a reset. 1598 **/ 1599 static void e1000_configure_tx(struct e1000_adapter *adapter) 1600 { 1601 u64 tdba; 1602 struct e1000_hw *hw = &adapter->hw; 1603 u32 tdlen, tctl, tipg; 1604 u32 ipgr1, ipgr2; 1605 1606 /* Setup the HW Tx Head and Tail descriptor pointers */ 1607 1608 switch (adapter->num_tx_queues) { 1609 case 1: 1610 default: 1611 tdba = adapter->tx_ring[0].dma; 1612 tdlen = adapter->tx_ring[0].count * 1613 sizeof(struct e1000_tx_desc); 1614 ew32(TDLEN, tdlen); 1615 ew32(TDBAH, (tdba >> 32)); 1616 ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); 1617 ew32(TDT, 0); 1618 ew32(TDH, 0); 1619 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? 1620 E1000_TDH : E1000_82542_TDH); 1621 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? 1622 E1000_TDT : E1000_82542_TDT); 1623 break; 1624 } 1625 1626 /* Set the default values for the Tx Inter Packet Gap timer */ 1627 if ((hw->media_type == e1000_media_type_fiber || 1628 hw->media_type == e1000_media_type_internal_serdes)) 1629 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1630 else 1631 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 1632 1633 switch (hw->mac_type) { 1634 case e1000_82542_rev2_0: 1635 case e1000_82542_rev2_1: 1636 tipg = DEFAULT_82542_TIPG_IPGT; 1637 ipgr1 = DEFAULT_82542_TIPG_IPGR1; 1638 ipgr2 = DEFAULT_82542_TIPG_IPGR2; 1639 break; 1640 default: 1641 ipgr1 = DEFAULT_82543_TIPG_IPGR1; 1642 ipgr2 = DEFAULT_82543_TIPG_IPGR2; 1643 break; 1644 } 1645 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; 1646 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; 1647 ew32(TIPG, tipg); 1648 1649 /* Set the Tx Interrupt Delay register */ 1650 1651 ew32(TIDV, adapter->tx_int_delay); 1652 if (hw->mac_type >= e1000_82540) 1653 ew32(TADV, adapter->tx_abs_int_delay); 1654 1655 /* Program the Transmit Control Register */ 1656 1657 tctl = er32(TCTL); 1658 tctl &= ~E1000_TCTL_CT; 1659 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 1660 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1661 1662 e1000_config_collision_dist(hw); 1663 1664 /* Setup Transmit Descriptor Settings for eop descriptor */ 1665 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; 1666 1667 /* only set IDE if we are delaying interrupts using the timers */ 1668 if (adapter->tx_int_delay) 1669 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 1670 1671 if (hw->mac_type < e1000_82543) 1672 adapter->txd_cmd |= E1000_TXD_CMD_RPS; 1673 else 1674 adapter->txd_cmd |= E1000_TXD_CMD_RS; 1675 1676 /* Cache if we're 82544 running in PCI-X because we'll 1677 * need this to apply a workaround later in the send path. 1678 */ 1679 if (hw->mac_type == e1000_82544 && 1680 hw->bus_type == e1000_bus_type_pcix) 1681 adapter->pcix_82544 = true; 1682 1683 ew32(TCTL, tctl); 1684 1685 } 1686 1687 /** 1688 * e1000_setup_rx_resources - allocate Rx resources (Descriptors) 1689 * @adapter: board private structure 1690 * @rxdr: rx descriptor ring (for a specific queue) to setup 1691 * 1692 * Returns 0 on success, negative on failure 1693 **/ 1694 static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 1695 struct e1000_rx_ring *rxdr) 1696 { 1697 struct pci_dev *pdev = adapter->pdev; 1698 int size, desc_len; 1699 1700 size = sizeof(struct e1000_rx_buffer) * rxdr->count; 1701 rxdr->buffer_info = vzalloc(size); 1702 if (!rxdr->buffer_info) 1703 return -ENOMEM; 1704 1705 desc_len = sizeof(struct e1000_rx_desc); 1706 1707 /* Round up to nearest 4K */ 1708 1709 rxdr->size = rxdr->count * desc_len; 1710 rxdr->size = ALIGN(rxdr->size, 4096); 1711 1712 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1713 GFP_KERNEL); 1714 if (!rxdr->desc) { 1715 setup_rx_desc_die: 1716 vfree(rxdr->buffer_info); 1717 return -ENOMEM; 1718 } 1719 1720 /* Fix for errata 23, can't cross 64kB boundary */ 1721 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1722 void *olddesc = rxdr->desc; 1723 dma_addr_t olddma = rxdr->dma; 1724 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", 1725 rxdr->size, rxdr->desc); 1726 /* Try again, without freeing the previous */ 1727 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, 1728 &rxdr->dma, GFP_KERNEL); 1729 /* Failed allocation, critical failure */ 1730 if (!rxdr->desc) { 1731 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1732 olddma); 1733 goto setup_rx_desc_die; 1734 } 1735 1736 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1737 /* give up */ 1738 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, 1739 rxdr->dma); 1740 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1741 olddma); 1742 e_err(probe, "Unable to allocate aligned memory for " 1743 "the Rx descriptor ring\n"); 1744 goto setup_rx_desc_die; 1745 } else { 1746 /* Free old allocation, new allocation was successful */ 1747 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1748 olddma); 1749 } 1750 } 1751 memset(rxdr->desc, 0, rxdr->size); 1752 1753 rxdr->next_to_clean = 0; 1754 rxdr->next_to_use = 0; 1755 rxdr->rx_skb_top = NULL; 1756 1757 return 0; 1758 } 1759 1760 /** 1761 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources 1762 * (Descriptors) for all queues 1763 * @adapter: board private structure 1764 * 1765 * Return 0 on success, negative on failure 1766 **/ 1767 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) 1768 { 1769 int i, err = 0; 1770 1771 for (i = 0; i < adapter->num_rx_queues; i++) { 1772 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1773 if (err) { 1774 e_err(probe, "Allocation for Rx Queue %u failed\n", i); 1775 for (i-- ; i >= 0; i--) 1776 e1000_free_rx_resources(adapter, 1777 &adapter->rx_ring[i]); 1778 break; 1779 } 1780 } 1781 1782 return err; 1783 } 1784 1785 /** 1786 * e1000_setup_rctl - configure the receive control registers 1787 * @adapter: Board private structure 1788 **/ 1789 static void e1000_setup_rctl(struct e1000_adapter *adapter) 1790 { 1791 struct e1000_hw *hw = &adapter->hw; 1792 u32 rctl; 1793 1794 rctl = er32(RCTL); 1795 1796 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 1797 1798 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 1799 E1000_RCTL_RDMTS_HALF | 1800 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); 1801 1802 if (hw->tbi_compatibility_on == 1) 1803 rctl |= E1000_RCTL_SBP; 1804 else 1805 rctl &= ~E1000_RCTL_SBP; 1806 1807 if (adapter->netdev->mtu <= ETH_DATA_LEN) 1808 rctl &= ~E1000_RCTL_LPE; 1809 else 1810 rctl |= E1000_RCTL_LPE; 1811 1812 /* Setup buffer sizes */ 1813 rctl &= ~E1000_RCTL_SZ_4096; 1814 rctl |= E1000_RCTL_BSEX; 1815 switch (adapter->rx_buffer_len) { 1816 case E1000_RXBUFFER_2048: 1817 default: 1818 rctl |= E1000_RCTL_SZ_2048; 1819 rctl &= ~E1000_RCTL_BSEX; 1820 break; 1821 case E1000_RXBUFFER_4096: 1822 rctl |= E1000_RCTL_SZ_4096; 1823 break; 1824 case E1000_RXBUFFER_8192: 1825 rctl |= E1000_RCTL_SZ_8192; 1826 break; 1827 case E1000_RXBUFFER_16384: 1828 rctl |= E1000_RCTL_SZ_16384; 1829 break; 1830 } 1831 1832 /* This is useful for sniffing bad packets. */ 1833 if (adapter->netdev->features & NETIF_F_RXALL) { 1834 /* UPE and MPE will be handled by normal PROMISC logic 1835 * in e1000e_set_rx_mode 1836 */ 1837 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ 1838 E1000_RCTL_BAM | /* RX All Bcast Pkts */ 1839 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 1840 1841 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ 1842 E1000_RCTL_DPF | /* Allow filtered pause */ 1843 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ 1844 /* Do not mess with E1000_CTRL_VME, it affects transmit as well, 1845 * and that breaks VLANs. 1846 */ 1847 } 1848 1849 ew32(RCTL, rctl); 1850 } 1851 1852 /** 1853 * e1000_configure_rx - Configure 8254x Receive Unit after Reset 1854 * @adapter: board private structure 1855 * 1856 * Configure the Rx unit of the MAC after a reset. 1857 **/ 1858 static void e1000_configure_rx(struct e1000_adapter *adapter) 1859 { 1860 u64 rdba; 1861 struct e1000_hw *hw = &adapter->hw; 1862 u32 rdlen, rctl, rxcsum; 1863 1864 if (adapter->netdev->mtu > ETH_DATA_LEN) { 1865 rdlen = adapter->rx_ring[0].count * 1866 sizeof(struct e1000_rx_desc); 1867 adapter->clean_rx = e1000_clean_jumbo_rx_irq; 1868 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 1869 } else { 1870 rdlen = adapter->rx_ring[0].count * 1871 sizeof(struct e1000_rx_desc); 1872 adapter->clean_rx = e1000_clean_rx_irq; 1873 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 1874 } 1875 1876 /* disable receives while setting up the descriptors */ 1877 rctl = er32(RCTL); 1878 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1879 1880 /* set the Receive Delay Timer Register */ 1881 ew32(RDTR, adapter->rx_int_delay); 1882 1883 if (hw->mac_type >= e1000_82540) { 1884 ew32(RADV, adapter->rx_abs_int_delay); 1885 if (adapter->itr_setting != 0) 1886 ew32(ITR, 1000000000 / (adapter->itr * 256)); 1887 } 1888 1889 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1890 * the Base and Length of the Rx Descriptor Ring 1891 */ 1892 switch (adapter->num_rx_queues) { 1893 case 1: 1894 default: 1895 rdba = adapter->rx_ring[0].dma; 1896 ew32(RDLEN, rdlen); 1897 ew32(RDBAH, (rdba >> 32)); 1898 ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); 1899 ew32(RDT, 0); 1900 ew32(RDH, 0); 1901 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? 1902 E1000_RDH : E1000_82542_RDH); 1903 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? 1904 E1000_RDT : E1000_82542_RDT); 1905 break; 1906 } 1907 1908 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1909 if (hw->mac_type >= e1000_82543) { 1910 rxcsum = er32(RXCSUM); 1911 if (adapter->rx_csum) 1912 rxcsum |= E1000_RXCSUM_TUOFL; 1913 else 1914 /* don't need to clear IPPCSE as it defaults to 0 */ 1915 rxcsum &= ~E1000_RXCSUM_TUOFL; 1916 ew32(RXCSUM, rxcsum); 1917 } 1918 1919 /* Enable Receives */ 1920 ew32(RCTL, rctl | E1000_RCTL_EN); 1921 } 1922 1923 /** 1924 * e1000_free_tx_resources - Free Tx Resources per Queue 1925 * @adapter: board private structure 1926 * @tx_ring: Tx descriptor ring for a specific queue 1927 * 1928 * Free all transmit software resources 1929 **/ 1930 static void e1000_free_tx_resources(struct e1000_adapter *adapter, 1931 struct e1000_tx_ring *tx_ring) 1932 { 1933 struct pci_dev *pdev = adapter->pdev; 1934 1935 e1000_clean_tx_ring(adapter, tx_ring); 1936 1937 vfree(tx_ring->buffer_info); 1938 tx_ring->buffer_info = NULL; 1939 1940 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 1941 tx_ring->dma); 1942 1943 tx_ring->desc = NULL; 1944 } 1945 1946 /** 1947 * e1000_free_all_tx_resources - Free Tx Resources for All Queues 1948 * @adapter: board private structure 1949 * 1950 * Free all transmit software resources 1951 **/ 1952 void e1000_free_all_tx_resources(struct e1000_adapter *adapter) 1953 { 1954 int i; 1955 1956 for (i = 0; i < adapter->num_tx_queues; i++) 1957 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); 1958 } 1959 1960 static void 1961 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1962 struct e1000_tx_buffer *buffer_info) 1963 { 1964 if (buffer_info->dma) { 1965 if (buffer_info->mapped_as_page) 1966 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, 1967 buffer_info->length, DMA_TO_DEVICE); 1968 else 1969 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1970 buffer_info->length, 1971 DMA_TO_DEVICE); 1972 buffer_info->dma = 0; 1973 } 1974 if (buffer_info->skb) { 1975 dev_kfree_skb_any(buffer_info->skb); 1976 buffer_info->skb = NULL; 1977 } 1978 buffer_info->time_stamp = 0; 1979 /* buffer_info must be completely set up in the transmit path */ 1980 } 1981 1982 /** 1983 * e1000_clean_tx_ring - Free Tx Buffers 1984 * @adapter: board private structure 1985 * @tx_ring: ring to be cleaned 1986 **/ 1987 static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 1988 struct e1000_tx_ring *tx_ring) 1989 { 1990 struct e1000_hw *hw = &adapter->hw; 1991 struct e1000_tx_buffer *buffer_info; 1992 unsigned long size; 1993 unsigned int i; 1994 1995 /* Free all the Tx ring sk_buffs */ 1996 1997 for (i = 0; i < tx_ring->count; i++) { 1998 buffer_info = &tx_ring->buffer_info[i]; 1999 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 2000 } 2001 2002 netdev_reset_queue(adapter->netdev); 2003 size = sizeof(struct e1000_tx_buffer) * tx_ring->count; 2004 memset(tx_ring->buffer_info, 0, size); 2005 2006 /* Zero out the descriptor ring */ 2007 2008 memset(tx_ring->desc, 0, tx_ring->size); 2009 2010 tx_ring->next_to_use = 0; 2011 tx_ring->next_to_clean = 0; 2012 tx_ring->last_tx_tso = false; 2013 2014 writel(0, hw->hw_addr + tx_ring->tdh); 2015 writel(0, hw->hw_addr + tx_ring->tdt); 2016 } 2017 2018 /** 2019 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues 2020 * @adapter: board private structure 2021 **/ 2022 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) 2023 { 2024 int i; 2025 2026 for (i = 0; i < adapter->num_tx_queues; i++) 2027 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); 2028 } 2029 2030 /** 2031 * e1000_free_rx_resources - Free Rx Resources 2032 * @adapter: board private structure 2033 * @rx_ring: ring to clean the resources from 2034 * 2035 * Free all receive software resources 2036 **/ 2037 static void e1000_free_rx_resources(struct e1000_adapter *adapter, 2038 struct e1000_rx_ring *rx_ring) 2039 { 2040 struct pci_dev *pdev = adapter->pdev; 2041 2042 e1000_clean_rx_ring(adapter, rx_ring); 2043 2044 vfree(rx_ring->buffer_info); 2045 rx_ring->buffer_info = NULL; 2046 2047 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2048 rx_ring->dma); 2049 2050 rx_ring->desc = NULL; 2051 } 2052 2053 /** 2054 * e1000_free_all_rx_resources - Free Rx Resources for All Queues 2055 * @adapter: board private structure 2056 * 2057 * Free all receive software resources 2058 **/ 2059 void e1000_free_all_rx_resources(struct e1000_adapter *adapter) 2060 { 2061 int i; 2062 2063 for (i = 0; i < adapter->num_rx_queues; i++) 2064 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); 2065 } 2066 2067 #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) 2068 static unsigned int e1000_frag_len(const struct e1000_adapter *a) 2069 { 2070 return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + 2071 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2072 } 2073 2074 static void *e1000_alloc_frag(const struct e1000_adapter *a) 2075 { 2076 unsigned int len = e1000_frag_len(a); 2077 u8 *data = netdev_alloc_frag(len); 2078 2079 if (likely(data)) 2080 data += E1000_HEADROOM; 2081 return data; 2082 } 2083 2084 /** 2085 * e1000_clean_rx_ring - Free Rx Buffers per Queue 2086 * @adapter: board private structure 2087 * @rx_ring: ring to free buffers from 2088 **/ 2089 static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 2090 struct e1000_rx_ring *rx_ring) 2091 { 2092 struct e1000_hw *hw = &adapter->hw; 2093 struct e1000_rx_buffer *buffer_info; 2094 struct pci_dev *pdev = adapter->pdev; 2095 unsigned long size; 2096 unsigned int i; 2097 2098 /* Free all the Rx netfrags */ 2099 for (i = 0; i < rx_ring->count; i++) { 2100 buffer_info = &rx_ring->buffer_info[i]; 2101 if (adapter->clean_rx == e1000_clean_rx_irq) { 2102 if (buffer_info->dma) 2103 dma_unmap_single(&pdev->dev, buffer_info->dma, 2104 adapter->rx_buffer_len, 2105 DMA_FROM_DEVICE); 2106 if (buffer_info->rxbuf.data) { 2107 skb_free_frag(buffer_info->rxbuf.data); 2108 buffer_info->rxbuf.data = NULL; 2109 } 2110 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { 2111 if (buffer_info->dma) 2112 dma_unmap_page(&pdev->dev, buffer_info->dma, 2113 adapter->rx_buffer_len, 2114 DMA_FROM_DEVICE); 2115 if (buffer_info->rxbuf.page) { 2116 put_page(buffer_info->rxbuf.page); 2117 buffer_info->rxbuf.page = NULL; 2118 } 2119 } 2120 2121 buffer_info->dma = 0; 2122 } 2123 2124 /* there also may be some cached data from a chained receive */ 2125 napi_free_frags(&adapter->napi); 2126 rx_ring->rx_skb_top = NULL; 2127 2128 size = sizeof(struct e1000_rx_buffer) * rx_ring->count; 2129 memset(rx_ring->buffer_info, 0, size); 2130 2131 /* Zero out the descriptor ring */ 2132 memset(rx_ring->desc, 0, rx_ring->size); 2133 2134 rx_ring->next_to_clean = 0; 2135 rx_ring->next_to_use = 0; 2136 2137 writel(0, hw->hw_addr + rx_ring->rdh); 2138 writel(0, hw->hw_addr + rx_ring->rdt); 2139 } 2140 2141 /** 2142 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues 2143 * @adapter: board private structure 2144 **/ 2145 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) 2146 { 2147 int i; 2148 2149 for (i = 0; i < adapter->num_rx_queues; i++) 2150 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); 2151 } 2152 2153 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset 2154 * and memory write and invalidate disabled for certain operations 2155 */ 2156 static void e1000_enter_82542_rst(struct e1000_adapter *adapter) 2157 { 2158 struct e1000_hw *hw = &adapter->hw; 2159 struct net_device *netdev = adapter->netdev; 2160 u32 rctl; 2161 2162 e1000_pci_clear_mwi(hw); 2163 2164 rctl = er32(RCTL); 2165 rctl |= E1000_RCTL_RST; 2166 ew32(RCTL, rctl); 2167 E1000_WRITE_FLUSH(); 2168 mdelay(5); 2169 2170 if (netif_running(netdev)) 2171 e1000_clean_all_rx_rings(adapter); 2172 } 2173 2174 static void e1000_leave_82542_rst(struct e1000_adapter *adapter) 2175 { 2176 struct e1000_hw *hw = &adapter->hw; 2177 struct net_device *netdev = adapter->netdev; 2178 u32 rctl; 2179 2180 rctl = er32(RCTL); 2181 rctl &= ~E1000_RCTL_RST; 2182 ew32(RCTL, rctl); 2183 E1000_WRITE_FLUSH(); 2184 mdelay(5); 2185 2186 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) 2187 e1000_pci_set_mwi(hw); 2188 2189 if (netif_running(netdev)) { 2190 /* No need to loop, because 82542 supports only 1 queue */ 2191 struct e1000_rx_ring *ring = &adapter->rx_ring[0]; 2192 e1000_configure_rx(adapter); 2193 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); 2194 } 2195 } 2196 2197 /** 2198 * e1000_set_mac - Change the Ethernet Address of the NIC 2199 * @netdev: network interface device structure 2200 * @p: pointer to an address structure 2201 * 2202 * Returns 0 on success, negative on failure 2203 **/ 2204 static int e1000_set_mac(struct net_device *netdev, void *p) 2205 { 2206 struct e1000_adapter *adapter = netdev_priv(netdev); 2207 struct e1000_hw *hw = &adapter->hw; 2208 struct sockaddr *addr = p; 2209 2210 if (!is_valid_ether_addr(addr->sa_data)) 2211 return -EADDRNOTAVAIL; 2212 2213 /* 82542 2.0 needs to be in reset to write receive address registers */ 2214 2215 if (hw->mac_type == e1000_82542_rev2_0) 2216 e1000_enter_82542_rst(adapter); 2217 2218 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2219 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); 2220 2221 e1000_rar_set(hw, hw->mac_addr, 0); 2222 2223 if (hw->mac_type == e1000_82542_rev2_0) 2224 e1000_leave_82542_rst(adapter); 2225 2226 return 0; 2227 } 2228 2229 /** 2230 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 2231 * @netdev: network interface device structure 2232 * 2233 * The set_rx_mode entry point is called whenever the unicast or multicast 2234 * address lists or the network interface flags are updated. This routine is 2235 * responsible for configuring the hardware for proper unicast, multicast, 2236 * promiscuous mode, and all-multi behavior. 2237 **/ 2238 static void e1000_set_rx_mode(struct net_device *netdev) 2239 { 2240 struct e1000_adapter *adapter = netdev_priv(netdev); 2241 struct e1000_hw *hw = &adapter->hw; 2242 struct netdev_hw_addr *ha; 2243 bool use_uc = false; 2244 u32 rctl; 2245 u32 hash_value; 2246 int i, rar_entries = E1000_RAR_ENTRIES; 2247 int mta_reg_count = E1000_NUM_MTA_REGISTERS; 2248 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); 2249 2250 if (!mcarray) 2251 return; 2252 2253 /* Check for Promiscuous and All Multicast modes */ 2254 2255 rctl = er32(RCTL); 2256 2257 if (netdev->flags & IFF_PROMISC) { 2258 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2259 rctl &= ~E1000_RCTL_VFE; 2260 } else { 2261 if (netdev->flags & IFF_ALLMULTI) 2262 rctl |= E1000_RCTL_MPE; 2263 else 2264 rctl &= ~E1000_RCTL_MPE; 2265 /* Enable VLAN filter if there is a VLAN */ 2266 if (e1000_vlan_used(adapter)) 2267 rctl |= E1000_RCTL_VFE; 2268 } 2269 2270 if (netdev_uc_count(netdev) > rar_entries - 1) { 2271 rctl |= E1000_RCTL_UPE; 2272 } else if (!(netdev->flags & IFF_PROMISC)) { 2273 rctl &= ~E1000_RCTL_UPE; 2274 use_uc = true; 2275 } 2276 2277 ew32(RCTL, rctl); 2278 2279 /* 82542 2.0 needs to be in reset to write receive address registers */ 2280 2281 if (hw->mac_type == e1000_82542_rev2_0) 2282 e1000_enter_82542_rst(adapter); 2283 2284 /* load the first 14 addresses into the exact filters 1-14. Unicast 2285 * addresses take precedence to avoid disabling unicast filtering 2286 * when possible. 2287 * 2288 * RAR 0 is used for the station MAC address 2289 * if there are not 14 addresses, go ahead and clear the filters 2290 */ 2291 i = 1; 2292 if (use_uc) 2293 netdev_for_each_uc_addr(ha, netdev) { 2294 if (i == rar_entries) 2295 break; 2296 e1000_rar_set(hw, ha->addr, i++); 2297 } 2298 2299 netdev_for_each_mc_addr(ha, netdev) { 2300 if (i == rar_entries) { 2301 /* load any remaining addresses into the hash table */ 2302 u32 hash_reg, hash_bit, mta; 2303 hash_value = e1000_hash_mc_addr(hw, ha->addr); 2304 hash_reg = (hash_value >> 5) & 0x7F; 2305 hash_bit = hash_value & 0x1F; 2306 mta = (1 << hash_bit); 2307 mcarray[hash_reg] |= mta; 2308 } else { 2309 e1000_rar_set(hw, ha->addr, i++); 2310 } 2311 } 2312 2313 for (; i < rar_entries; i++) { 2314 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); 2315 E1000_WRITE_FLUSH(); 2316 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); 2317 E1000_WRITE_FLUSH(); 2318 } 2319 2320 /* write the hash table completely, write from bottom to avoid 2321 * both stupid write combining chipsets, and flushing each write 2322 */ 2323 for (i = mta_reg_count - 1; i >= 0 ; i--) { 2324 /* If we are on an 82544 has an errata where writing odd 2325 * offsets overwrites the previous even offset, but writing 2326 * backwards over the range solves the issue by always 2327 * writing the odd offset first 2328 */ 2329 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); 2330 } 2331 E1000_WRITE_FLUSH(); 2332 2333 if (hw->mac_type == e1000_82542_rev2_0) 2334 e1000_leave_82542_rst(adapter); 2335 2336 kfree(mcarray); 2337 } 2338 2339 /** 2340 * e1000_update_phy_info_task - get phy info 2341 * @work: work struct contained inside adapter struct 2342 * 2343 * Need to wait a few seconds after link up to get diagnostic information from 2344 * the phy 2345 */ 2346 static void e1000_update_phy_info_task(struct work_struct *work) 2347 { 2348 struct e1000_adapter *adapter = container_of(work, 2349 struct e1000_adapter, 2350 phy_info_task.work); 2351 2352 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 2353 } 2354 2355 /** 2356 * e1000_82547_tx_fifo_stall_task - task to complete work 2357 * @work: work struct contained inside adapter struct 2358 **/ 2359 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) 2360 { 2361 struct e1000_adapter *adapter = container_of(work, 2362 struct e1000_adapter, 2363 fifo_stall_task.work); 2364 struct e1000_hw *hw = &adapter->hw; 2365 struct net_device *netdev = adapter->netdev; 2366 u32 tctl; 2367 2368 if (atomic_read(&adapter->tx_fifo_stall)) { 2369 if ((er32(TDT) == er32(TDH)) && 2370 (er32(TDFT) == er32(TDFH)) && 2371 (er32(TDFTS) == er32(TDFHS))) { 2372 tctl = er32(TCTL); 2373 ew32(TCTL, tctl & ~E1000_TCTL_EN); 2374 ew32(TDFT, adapter->tx_head_addr); 2375 ew32(TDFH, adapter->tx_head_addr); 2376 ew32(TDFTS, adapter->tx_head_addr); 2377 ew32(TDFHS, adapter->tx_head_addr); 2378 ew32(TCTL, tctl); 2379 E1000_WRITE_FLUSH(); 2380 2381 adapter->tx_fifo_head = 0; 2382 atomic_set(&adapter->tx_fifo_stall, 0); 2383 netif_wake_queue(netdev); 2384 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { 2385 schedule_delayed_work(&adapter->fifo_stall_task, 1); 2386 } 2387 } 2388 } 2389 2390 bool e1000_has_link(struct e1000_adapter *adapter) 2391 { 2392 struct e1000_hw *hw = &adapter->hw; 2393 bool link_active = false; 2394 2395 /* get_link_status is set on LSC (link status) interrupt or rx 2396 * sequence error interrupt (except on intel ce4100). 2397 * get_link_status will stay false until the 2398 * e1000_check_for_link establishes link for copper adapters 2399 * ONLY 2400 */ 2401 switch (hw->media_type) { 2402 case e1000_media_type_copper: 2403 if (hw->mac_type == e1000_ce4100) 2404 hw->get_link_status = 1; 2405 if (hw->get_link_status) { 2406 e1000_check_for_link(hw); 2407 link_active = !hw->get_link_status; 2408 } else { 2409 link_active = true; 2410 } 2411 break; 2412 case e1000_media_type_fiber: 2413 e1000_check_for_link(hw); 2414 link_active = !!(er32(STATUS) & E1000_STATUS_LU); 2415 break; 2416 case e1000_media_type_internal_serdes: 2417 e1000_check_for_link(hw); 2418 link_active = hw->serdes_has_link; 2419 break; 2420 default: 2421 break; 2422 } 2423 2424 return link_active; 2425 } 2426 2427 /** 2428 * e1000_watchdog - work function 2429 * @work: work struct contained inside adapter struct 2430 **/ 2431 static void e1000_watchdog(struct work_struct *work) 2432 { 2433 struct e1000_adapter *adapter = container_of(work, 2434 struct e1000_adapter, 2435 watchdog_task.work); 2436 struct e1000_hw *hw = &adapter->hw; 2437 struct net_device *netdev = adapter->netdev; 2438 struct e1000_tx_ring *txdr = adapter->tx_ring; 2439 u32 link, tctl; 2440 2441 link = e1000_has_link(adapter); 2442 if ((netif_carrier_ok(netdev)) && link) 2443 goto link_up; 2444 2445 if (link) { 2446 if (!netif_carrier_ok(netdev)) { 2447 u32 ctrl; 2448 bool txb2b = true; 2449 /* update snapshot of PHY registers on LSC */ 2450 e1000_get_speed_and_duplex(hw, 2451 &adapter->link_speed, 2452 &adapter->link_duplex); 2453 2454 ctrl = er32(CTRL); 2455 pr_info("%s NIC Link is Up %d Mbps %s, " 2456 "Flow Control: %s\n", 2457 netdev->name, 2458 adapter->link_speed, 2459 adapter->link_duplex == FULL_DUPLEX ? 2460 "Full Duplex" : "Half Duplex", 2461 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2462 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2463 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2464 E1000_CTRL_TFCE) ? "TX" : "None"))); 2465 2466 /* adjust timeout factor according to speed/duplex */ 2467 adapter->tx_timeout_factor = 1; 2468 switch (adapter->link_speed) { 2469 case SPEED_10: 2470 txb2b = false; 2471 adapter->tx_timeout_factor = 16; 2472 break; 2473 case SPEED_100: 2474 txb2b = false; 2475 /* maybe add some timeout factor ? */ 2476 break; 2477 } 2478 2479 /* enable transmits in the hardware */ 2480 tctl = er32(TCTL); 2481 tctl |= E1000_TCTL_EN; 2482 ew32(TCTL, tctl); 2483 2484 netif_carrier_on(netdev); 2485 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2486 schedule_delayed_work(&adapter->phy_info_task, 2487 2 * HZ); 2488 adapter->smartspeed = 0; 2489 } 2490 } else { 2491 if (netif_carrier_ok(netdev)) { 2492 adapter->link_speed = 0; 2493 adapter->link_duplex = 0; 2494 pr_info("%s NIC Link is Down\n", 2495 netdev->name); 2496 netif_carrier_off(netdev); 2497 2498 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2499 schedule_delayed_work(&adapter->phy_info_task, 2500 2 * HZ); 2501 } 2502 2503 e1000_smartspeed(adapter); 2504 } 2505 2506 link_up: 2507 e1000_update_stats(adapter); 2508 2509 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 2510 adapter->tpt_old = adapter->stats.tpt; 2511 hw->collision_delta = adapter->stats.colc - adapter->colc_old; 2512 adapter->colc_old = adapter->stats.colc; 2513 2514 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; 2515 adapter->gorcl_old = adapter->stats.gorcl; 2516 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; 2517 adapter->gotcl_old = adapter->stats.gotcl; 2518 2519 e1000_update_adaptive(hw); 2520 2521 if (!netif_carrier_ok(netdev)) { 2522 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2523 /* We've lost link, so the controller stops DMA, 2524 * but we've got queued Tx work that's never going 2525 * to get done, so reset controller to flush Tx. 2526 * (Do the reset outside of interrupt context). 2527 */ 2528 adapter->tx_timeout_count++; 2529 schedule_work(&adapter->reset_task); 2530 /* exit immediately since reset is imminent */ 2531 return; 2532 } 2533 } 2534 2535 /* Simple mode for Interrupt Throttle Rate (ITR) */ 2536 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { 2537 /* Symmetric Tx/Rx gets a reduced ITR=2000; 2538 * Total asymmetrical Tx or Rx gets ITR=8000; 2539 * everyone else is between 2000-8000. 2540 */ 2541 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; 2542 u32 dif = (adapter->gotcl > adapter->gorcl ? 2543 adapter->gotcl - adapter->gorcl : 2544 adapter->gorcl - adapter->gotcl) / 10000; 2545 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 2546 2547 ew32(ITR, 1000000000 / (itr * 256)); 2548 } 2549 2550 /* Cause software interrupt to ensure rx ring is cleaned */ 2551 ew32(ICS, E1000_ICS_RXDMT0); 2552 2553 /* Force detection of hung controller every watchdog period */ 2554 adapter->detect_tx_hung = true; 2555 2556 /* Reschedule the task */ 2557 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2558 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); 2559 } 2560 2561 enum latency_range { 2562 lowest_latency = 0, 2563 low_latency = 1, 2564 bulk_latency = 2, 2565 latency_invalid = 255 2566 }; 2567 2568 /** 2569 * e1000_update_itr - update the dynamic ITR value based on statistics 2570 * @adapter: pointer to adapter 2571 * @itr_setting: current adapter->itr 2572 * @packets: the number of packets during this measurement interval 2573 * @bytes: the number of bytes during this measurement interval 2574 * 2575 * Stores a new ITR value based on packets and byte 2576 * counts during the last interrupt. The advantage of per interrupt 2577 * computation is faster updates and more accurate ITR for the current 2578 * traffic pattern. Constants in this function were computed 2579 * based on theoretical maximum wire speed and thresholds were set based 2580 * on testing data as well as attempting to minimize response time 2581 * while increasing bulk throughput. 2582 * this functionality is controlled by the InterruptThrottleRate module 2583 * parameter (see e1000_param.c) 2584 **/ 2585 static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2586 u16 itr_setting, int packets, int bytes) 2587 { 2588 unsigned int retval = itr_setting; 2589 struct e1000_hw *hw = &adapter->hw; 2590 2591 if (unlikely(hw->mac_type < e1000_82540)) 2592 goto update_itr_done; 2593 2594 if (packets == 0) 2595 goto update_itr_done; 2596 2597 switch (itr_setting) { 2598 case lowest_latency: 2599 /* jumbo frames get bulk treatment*/ 2600 if (bytes/packets > 8000) 2601 retval = bulk_latency; 2602 else if ((packets < 5) && (bytes > 512)) 2603 retval = low_latency; 2604 break; 2605 case low_latency: /* 50 usec aka 20000 ints/s */ 2606 if (bytes > 10000) { 2607 /* jumbo frames need bulk latency setting */ 2608 if (bytes/packets > 8000) 2609 retval = bulk_latency; 2610 else if ((packets < 10) || ((bytes/packets) > 1200)) 2611 retval = bulk_latency; 2612 else if ((packets > 35)) 2613 retval = lowest_latency; 2614 } else if (bytes/packets > 2000) 2615 retval = bulk_latency; 2616 else if (packets <= 2 && bytes < 512) 2617 retval = lowest_latency; 2618 break; 2619 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2620 if (bytes > 25000) { 2621 if (packets > 35) 2622 retval = low_latency; 2623 } else if (bytes < 6000) { 2624 retval = low_latency; 2625 } 2626 break; 2627 } 2628 2629 update_itr_done: 2630 return retval; 2631 } 2632 2633 static void e1000_set_itr(struct e1000_adapter *adapter) 2634 { 2635 struct e1000_hw *hw = &adapter->hw; 2636 u16 current_itr; 2637 u32 new_itr = adapter->itr; 2638 2639 if (unlikely(hw->mac_type < e1000_82540)) 2640 return; 2641 2642 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2643 if (unlikely(adapter->link_speed != SPEED_1000)) { 2644 current_itr = 0; 2645 new_itr = 4000; 2646 goto set_itr_now; 2647 } 2648 2649 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, 2650 adapter->total_tx_packets, 2651 adapter->total_tx_bytes); 2652 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2653 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 2654 adapter->tx_itr = low_latency; 2655 2656 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, 2657 adapter->total_rx_packets, 2658 adapter->total_rx_bytes); 2659 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2660 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 2661 adapter->rx_itr = low_latency; 2662 2663 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2664 2665 switch (current_itr) { 2666 /* counts and packets in update_itr are dependent on these numbers */ 2667 case lowest_latency: 2668 new_itr = 70000; 2669 break; 2670 case low_latency: 2671 new_itr = 20000; /* aka hwitr = ~200 */ 2672 break; 2673 case bulk_latency: 2674 new_itr = 4000; 2675 break; 2676 default: 2677 break; 2678 } 2679 2680 set_itr_now: 2681 if (new_itr != adapter->itr) { 2682 /* this attempts to bias the interrupt rate towards Bulk 2683 * by adding intermediate steps when interrupt rate is 2684 * increasing 2685 */ 2686 new_itr = new_itr > adapter->itr ? 2687 min(adapter->itr + (new_itr >> 2), new_itr) : 2688 new_itr; 2689 adapter->itr = new_itr; 2690 ew32(ITR, 1000000000 / (new_itr * 256)); 2691 } 2692 } 2693 2694 #define E1000_TX_FLAGS_CSUM 0x00000001 2695 #define E1000_TX_FLAGS_VLAN 0x00000002 2696 #define E1000_TX_FLAGS_TSO 0x00000004 2697 #define E1000_TX_FLAGS_IPV4 0x00000008 2698 #define E1000_TX_FLAGS_NO_FCS 0x00000010 2699 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 2700 #define E1000_TX_FLAGS_VLAN_SHIFT 16 2701 2702 static int e1000_tso(struct e1000_adapter *adapter, 2703 struct e1000_tx_ring *tx_ring, struct sk_buff *skb, 2704 __be16 protocol) 2705 { 2706 struct e1000_context_desc *context_desc; 2707 struct e1000_tx_buffer *buffer_info; 2708 unsigned int i; 2709 u32 cmd_length = 0; 2710 u16 ipcse = 0, tucse, mss; 2711 u8 ipcss, ipcso, tucss, tucso, hdr_len; 2712 2713 if (skb_is_gso(skb)) { 2714 int err; 2715 2716 err = skb_cow_head(skb, 0); 2717 if (err < 0) 2718 return err; 2719 2720 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2721 mss = skb_shinfo(skb)->gso_size; 2722 if (protocol == htons(ETH_P_IP)) { 2723 struct iphdr *iph = ip_hdr(skb); 2724 iph->tot_len = 0; 2725 iph->check = 0; 2726 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2727 iph->daddr, 0, 2728 IPPROTO_TCP, 2729 0); 2730 cmd_length = E1000_TXD_CMD_IP; 2731 ipcse = skb_transport_offset(skb) - 1; 2732 } else if (skb_is_gso_v6(skb)) { 2733 ipv6_hdr(skb)->payload_len = 0; 2734 tcp_hdr(skb)->check = 2735 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2736 &ipv6_hdr(skb)->daddr, 2737 0, IPPROTO_TCP, 0); 2738 ipcse = 0; 2739 } 2740 ipcss = skb_network_offset(skb); 2741 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 2742 tucss = skb_transport_offset(skb); 2743 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 2744 tucse = 0; 2745 2746 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 2747 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 2748 2749 i = tx_ring->next_to_use; 2750 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2751 buffer_info = &tx_ring->buffer_info[i]; 2752 2753 context_desc->lower_setup.ip_fields.ipcss = ipcss; 2754 context_desc->lower_setup.ip_fields.ipcso = ipcso; 2755 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 2756 context_desc->upper_setup.tcp_fields.tucss = tucss; 2757 context_desc->upper_setup.tcp_fields.tucso = tucso; 2758 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); 2759 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 2760 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 2761 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2762 2763 buffer_info->time_stamp = jiffies; 2764 buffer_info->next_to_watch = i; 2765 2766 if (++i == tx_ring->count) 2767 i = 0; 2768 2769 tx_ring->next_to_use = i; 2770 2771 return true; 2772 } 2773 return false; 2774 } 2775 2776 static bool e1000_tx_csum(struct e1000_adapter *adapter, 2777 struct e1000_tx_ring *tx_ring, struct sk_buff *skb, 2778 __be16 protocol) 2779 { 2780 struct e1000_context_desc *context_desc; 2781 struct e1000_tx_buffer *buffer_info; 2782 unsigned int i; 2783 u8 css; 2784 u32 cmd_len = E1000_TXD_CMD_DEXT; 2785 2786 if (skb->ip_summed != CHECKSUM_PARTIAL) 2787 return false; 2788 2789 switch (protocol) { 2790 case cpu_to_be16(ETH_P_IP): 2791 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2792 cmd_len |= E1000_TXD_CMD_TCP; 2793 break; 2794 case cpu_to_be16(ETH_P_IPV6): 2795 /* XXX not handling all IPV6 headers */ 2796 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2797 cmd_len |= E1000_TXD_CMD_TCP; 2798 break; 2799 default: 2800 if (unlikely(net_ratelimit())) 2801 e_warn(drv, "checksum_partial proto=%x!\n", 2802 skb->protocol); 2803 break; 2804 } 2805 2806 css = skb_checksum_start_offset(skb); 2807 2808 i = tx_ring->next_to_use; 2809 buffer_info = &tx_ring->buffer_info[i]; 2810 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2811 2812 context_desc->lower_setup.ip_config = 0; 2813 context_desc->upper_setup.tcp_fields.tucss = css; 2814 context_desc->upper_setup.tcp_fields.tucso = 2815 css + skb->csum_offset; 2816 context_desc->upper_setup.tcp_fields.tucse = 0; 2817 context_desc->tcp_seg_setup.data = 0; 2818 context_desc->cmd_and_length = cpu_to_le32(cmd_len); 2819 2820 buffer_info->time_stamp = jiffies; 2821 buffer_info->next_to_watch = i; 2822 2823 if (unlikely(++i == tx_ring->count)) 2824 i = 0; 2825 2826 tx_ring->next_to_use = i; 2827 2828 return true; 2829 } 2830 2831 #define E1000_MAX_TXD_PWR 12 2832 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) 2833 2834 static int e1000_tx_map(struct e1000_adapter *adapter, 2835 struct e1000_tx_ring *tx_ring, 2836 struct sk_buff *skb, unsigned int first, 2837 unsigned int max_per_txd, unsigned int nr_frags, 2838 unsigned int mss) 2839 { 2840 struct e1000_hw *hw = &adapter->hw; 2841 struct pci_dev *pdev = adapter->pdev; 2842 struct e1000_tx_buffer *buffer_info; 2843 unsigned int len = skb_headlen(skb); 2844 unsigned int offset = 0, size, count = 0, i; 2845 unsigned int f, bytecount, segs; 2846 2847 i = tx_ring->next_to_use; 2848 2849 while (len) { 2850 buffer_info = &tx_ring->buffer_info[i]; 2851 size = min(len, max_per_txd); 2852 /* Workaround for Controller erratum -- 2853 * descriptor for non-tso packet in a linear SKB that follows a 2854 * tso gets written back prematurely before the data is fully 2855 * DMA'd to the controller 2856 */ 2857 if (!skb->data_len && tx_ring->last_tx_tso && 2858 !skb_is_gso(skb)) { 2859 tx_ring->last_tx_tso = false; 2860 size -= 4; 2861 } 2862 2863 /* Workaround for premature desc write-backs 2864 * in TSO mode. Append 4-byte sentinel desc 2865 */ 2866 if (unlikely(mss && !nr_frags && size == len && size > 8)) 2867 size -= 4; 2868 /* work-around for errata 10 and it applies 2869 * to all controllers in PCI-X mode 2870 * The fix is to make sure that the first descriptor of a 2871 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes 2872 */ 2873 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 2874 (size > 2015) && count == 0)) 2875 size = 2015; 2876 2877 /* Workaround for potential 82544 hang in PCI-X. Avoid 2878 * terminating buffers within evenly-aligned dwords. 2879 */ 2880 if (unlikely(adapter->pcix_82544 && 2881 !((unsigned long)(skb->data + offset + size - 1) & 4) && 2882 size > 4)) 2883 size -= 4; 2884 2885 buffer_info->length = size; 2886 /* set time_stamp *before* dma to help avoid a possible race */ 2887 buffer_info->time_stamp = jiffies; 2888 buffer_info->mapped_as_page = false; 2889 buffer_info->dma = dma_map_single(&pdev->dev, 2890 skb->data + offset, 2891 size, DMA_TO_DEVICE); 2892 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2893 goto dma_error; 2894 buffer_info->next_to_watch = i; 2895 2896 len -= size; 2897 offset += size; 2898 count++; 2899 if (len) { 2900 i++; 2901 if (unlikely(i == tx_ring->count)) 2902 i = 0; 2903 } 2904 } 2905 2906 for (f = 0; f < nr_frags; f++) { 2907 const struct skb_frag_struct *frag; 2908 2909 frag = &skb_shinfo(skb)->frags[f]; 2910 len = skb_frag_size(frag); 2911 offset = 0; 2912 2913 while (len) { 2914 unsigned long bufend; 2915 i++; 2916 if (unlikely(i == tx_ring->count)) 2917 i = 0; 2918 2919 buffer_info = &tx_ring->buffer_info[i]; 2920 size = min(len, max_per_txd); 2921 /* Workaround for premature desc write-backs 2922 * in TSO mode. Append 4-byte sentinel desc 2923 */ 2924 if (unlikely(mss && f == (nr_frags-1) && 2925 size == len && size > 8)) 2926 size -= 4; 2927 /* Workaround for potential 82544 hang in PCI-X. 2928 * Avoid terminating buffers within evenly-aligned 2929 * dwords. 2930 */ 2931 bufend = (unsigned long) 2932 page_to_phys(skb_frag_page(frag)); 2933 bufend += offset + size - 1; 2934 if (unlikely(adapter->pcix_82544 && 2935 !(bufend & 4) && 2936 size > 4)) 2937 size -= 4; 2938 2939 buffer_info->length = size; 2940 buffer_info->time_stamp = jiffies; 2941 buffer_info->mapped_as_page = true; 2942 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 2943 offset, size, DMA_TO_DEVICE); 2944 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2945 goto dma_error; 2946 buffer_info->next_to_watch = i; 2947 2948 len -= size; 2949 offset += size; 2950 count++; 2951 } 2952 } 2953 2954 segs = skb_shinfo(skb)->gso_segs ?: 1; 2955 /* multiply data chunks by size of headers */ 2956 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; 2957 2958 tx_ring->buffer_info[i].skb = skb; 2959 tx_ring->buffer_info[i].segs = segs; 2960 tx_ring->buffer_info[i].bytecount = bytecount; 2961 tx_ring->buffer_info[first].next_to_watch = i; 2962 2963 return count; 2964 2965 dma_error: 2966 dev_err(&pdev->dev, "TX DMA map failed\n"); 2967 buffer_info->dma = 0; 2968 if (count) 2969 count--; 2970 2971 while (count--) { 2972 if (i == 0) 2973 i += tx_ring->count; 2974 i--; 2975 buffer_info = &tx_ring->buffer_info[i]; 2976 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 2977 } 2978 2979 return 0; 2980 } 2981 2982 static void e1000_tx_queue(struct e1000_adapter *adapter, 2983 struct e1000_tx_ring *tx_ring, int tx_flags, 2984 int count) 2985 { 2986 struct e1000_tx_desc *tx_desc = NULL; 2987 struct e1000_tx_buffer *buffer_info; 2988 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2989 unsigned int i; 2990 2991 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2992 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2993 E1000_TXD_CMD_TSE; 2994 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2995 2996 if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) 2997 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2998 } 2999 3000 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 3001 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 3002 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 3003 } 3004 3005 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { 3006 txd_lower |= E1000_TXD_CMD_VLE; 3007 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 3008 } 3009 3010 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 3011 txd_lower &= ~(E1000_TXD_CMD_IFCS); 3012 3013 i = tx_ring->next_to_use; 3014 3015 while (count--) { 3016 buffer_info = &tx_ring->buffer_info[i]; 3017 tx_desc = E1000_TX_DESC(*tx_ring, i); 3018 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 3019 tx_desc->lower.data = 3020 cpu_to_le32(txd_lower | buffer_info->length); 3021 tx_desc->upper.data = cpu_to_le32(txd_upper); 3022 if (unlikely(++i == tx_ring->count)) 3023 i = 0; 3024 } 3025 3026 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 3027 3028 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ 3029 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 3030 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); 3031 3032 /* Force memory writes to complete before letting h/w 3033 * know there are new descriptors to fetch. (Only 3034 * applicable for weak-ordered memory model archs, 3035 * such as IA-64). 3036 */ 3037 wmb(); 3038 3039 tx_ring->next_to_use = i; 3040 } 3041 3042 /* 82547 workaround to avoid controller hang in half-duplex environment. 3043 * The workaround is to avoid queuing a large packet that would span 3044 * the internal Tx FIFO ring boundary by notifying the stack to resend 3045 * the packet at a later time. This gives the Tx FIFO an opportunity to 3046 * flush all packets. When that occurs, we reset the Tx FIFO pointers 3047 * to the beginning of the Tx FIFO. 3048 */ 3049 3050 #define E1000_FIFO_HDR 0x10 3051 #define E1000_82547_PAD_LEN 0x3E0 3052 3053 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 3054 struct sk_buff *skb) 3055 { 3056 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 3057 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; 3058 3059 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); 3060 3061 if (adapter->link_duplex != HALF_DUPLEX) 3062 goto no_fifo_stall_required; 3063 3064 if (atomic_read(&adapter->tx_fifo_stall)) 3065 return 1; 3066 3067 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { 3068 atomic_set(&adapter->tx_fifo_stall, 1); 3069 return 1; 3070 } 3071 3072 no_fifo_stall_required: 3073 adapter->tx_fifo_head += skb_fifo_len; 3074 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) 3075 adapter->tx_fifo_head -= adapter->tx_fifo_size; 3076 return 0; 3077 } 3078 3079 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) 3080 { 3081 struct e1000_adapter *adapter = netdev_priv(netdev); 3082 struct e1000_tx_ring *tx_ring = adapter->tx_ring; 3083 3084 netif_stop_queue(netdev); 3085 /* Herbert's original patch had: 3086 * smp_mb__after_netif_stop_queue(); 3087 * but since that doesn't exist yet, just open code it. 3088 */ 3089 smp_mb(); 3090 3091 /* We need to check again in a case another CPU has just 3092 * made room available. 3093 */ 3094 if (likely(E1000_DESC_UNUSED(tx_ring) < size)) 3095 return -EBUSY; 3096 3097 /* A reprieve! */ 3098 netif_start_queue(netdev); 3099 ++adapter->restart_queue; 3100 return 0; 3101 } 3102 3103 static int e1000_maybe_stop_tx(struct net_device *netdev, 3104 struct e1000_tx_ring *tx_ring, int size) 3105 { 3106 if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) 3107 return 0; 3108 return __e1000_maybe_stop_tx(netdev, size); 3109 } 3110 3111 #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X)) 3112 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 3113 struct net_device *netdev) 3114 { 3115 struct e1000_adapter *adapter = netdev_priv(netdev); 3116 struct e1000_hw *hw = &adapter->hw; 3117 struct e1000_tx_ring *tx_ring; 3118 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; 3119 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 3120 unsigned int tx_flags = 0; 3121 unsigned int len = skb_headlen(skb); 3122 unsigned int nr_frags; 3123 unsigned int mss; 3124 int count = 0; 3125 int tso; 3126 unsigned int f; 3127 __be16 protocol = vlan_get_protocol(skb); 3128 3129 /* This goes back to the question of how to logically map a Tx queue 3130 * to a flow. Right now, performance is impacted slightly negatively 3131 * if using multiple Tx queues. If the stack breaks away from a 3132 * single qdisc implementation, we can look at this again. 3133 */ 3134 tx_ring = adapter->tx_ring; 3135 3136 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, 3137 * packets may get corrupted during padding by HW. 3138 * To WA this issue, pad all small packets manually. 3139 */ 3140 if (eth_skb_pad(skb)) 3141 return NETDEV_TX_OK; 3142 3143 mss = skb_shinfo(skb)->gso_size; 3144 /* The controller does a simple calculation to 3145 * make sure there is enough room in the FIFO before 3146 * initiating the DMA for each buffer. The calc is: 3147 * 4 = ceil(buffer len/mss). To make sure we don't 3148 * overrun the FIFO, adjust the max buffer len if mss 3149 * drops. 3150 */ 3151 if (mss) { 3152 u8 hdr_len; 3153 max_per_txd = min(mss << 2, max_per_txd); 3154 max_txd_pwr = fls(max_per_txd) - 1; 3155 3156 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3157 if (skb->data_len && hdr_len == len) { 3158 switch (hw->mac_type) { 3159 unsigned int pull_size; 3160 case e1000_82544: 3161 /* Make sure we have room to chop off 4 bytes, 3162 * and that the end alignment will work out to 3163 * this hardware's requirements 3164 * NOTE: this is a TSO only workaround 3165 * if end byte alignment not correct move us 3166 * into the next dword 3167 */ 3168 if ((unsigned long)(skb_tail_pointer(skb) - 1) 3169 & 4) 3170 break; 3171 /* fall through */ 3172 pull_size = min((unsigned int)4, skb->data_len); 3173 if (!__pskb_pull_tail(skb, pull_size)) { 3174 e_err(drv, "__pskb_pull_tail " 3175 "failed.\n"); 3176 dev_kfree_skb_any(skb); 3177 return NETDEV_TX_OK; 3178 } 3179 len = skb_headlen(skb); 3180 break; 3181 default: 3182 /* do nothing */ 3183 break; 3184 } 3185 } 3186 } 3187 3188 /* reserve a descriptor for the offload context */ 3189 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 3190 count++; 3191 count++; 3192 3193 /* Controller Erratum workaround */ 3194 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) 3195 count++; 3196 3197 count += TXD_USE_COUNT(len, max_txd_pwr); 3198 3199 if (adapter->pcix_82544) 3200 count++; 3201 3202 /* work-around for errata 10 and it applies to all controllers 3203 * in PCI-X mode, so add one more descriptor to the count 3204 */ 3205 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 3206 (len > 2015))) 3207 count++; 3208 3209 nr_frags = skb_shinfo(skb)->nr_frags; 3210 for (f = 0; f < nr_frags; f++) 3211 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), 3212 max_txd_pwr); 3213 if (adapter->pcix_82544) 3214 count += nr_frags; 3215 3216 /* need: count + 2 desc gap to keep tail from touching 3217 * head, otherwise try next time 3218 */ 3219 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) 3220 return NETDEV_TX_BUSY; 3221 3222 if (unlikely((hw->mac_type == e1000_82547) && 3223 (e1000_82547_fifo_workaround(adapter, skb)))) { 3224 netif_stop_queue(netdev); 3225 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3226 schedule_delayed_work(&adapter->fifo_stall_task, 1); 3227 return NETDEV_TX_BUSY; 3228 } 3229 3230 if (skb_vlan_tag_present(skb)) { 3231 tx_flags |= E1000_TX_FLAGS_VLAN; 3232 tx_flags |= (skb_vlan_tag_get(skb) << 3233 E1000_TX_FLAGS_VLAN_SHIFT); 3234 } 3235 3236 first = tx_ring->next_to_use; 3237 3238 tso = e1000_tso(adapter, tx_ring, skb, protocol); 3239 if (tso < 0) { 3240 dev_kfree_skb_any(skb); 3241 return NETDEV_TX_OK; 3242 } 3243 3244 if (likely(tso)) { 3245 if (likely(hw->mac_type != e1000_82544)) 3246 tx_ring->last_tx_tso = true; 3247 tx_flags |= E1000_TX_FLAGS_TSO; 3248 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) 3249 tx_flags |= E1000_TX_FLAGS_CSUM; 3250 3251 if (protocol == htons(ETH_P_IP)) 3252 tx_flags |= E1000_TX_FLAGS_IPV4; 3253 3254 if (unlikely(skb->no_fcs)) 3255 tx_flags |= E1000_TX_FLAGS_NO_FCS; 3256 3257 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, 3258 nr_frags, mss); 3259 3260 if (count) { 3261 /* The descriptors needed is higher than other Intel drivers 3262 * due to a number of workarounds. The breakdown is below: 3263 * Data descriptors: MAX_SKB_FRAGS + 1 3264 * Context Descriptor: 1 3265 * Keep head from touching tail: 2 3266 * Workarounds: 3 3267 */ 3268 int desc_needed = MAX_SKB_FRAGS + 7; 3269 3270 netdev_sent_queue(netdev, skb->len); 3271 skb_tx_timestamp(skb); 3272 3273 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3274 3275 /* 82544 potentially requires twice as many data descriptors 3276 * in order to guarantee buffers don't end on evenly-aligned 3277 * dwords 3278 */ 3279 if (adapter->pcix_82544) 3280 desc_needed += MAX_SKB_FRAGS + 1; 3281 3282 /* Make sure there is space in the ring for the next send. */ 3283 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); 3284 3285 if (!skb->xmit_more || 3286 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { 3287 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); 3288 /* we need this if more than one processor can write to 3289 * our tail at a time, it synchronizes IO on IA64/Altix 3290 * systems 3291 */ 3292 mmiowb(); 3293 } 3294 } else { 3295 dev_kfree_skb_any(skb); 3296 tx_ring->buffer_info[first].time_stamp = 0; 3297 tx_ring->next_to_use = first; 3298 } 3299 3300 return NETDEV_TX_OK; 3301 } 3302 3303 #define NUM_REGS 38 /* 1 based count */ 3304 static void e1000_regdump(struct e1000_adapter *adapter) 3305 { 3306 struct e1000_hw *hw = &adapter->hw; 3307 u32 regs[NUM_REGS]; 3308 u32 *regs_buff = regs; 3309 int i = 0; 3310 3311 static const char * const reg_name[] = { 3312 "CTRL", "STATUS", 3313 "RCTL", "RDLEN", "RDH", "RDT", "RDTR", 3314 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT", 3315 "TIDV", "TXDCTL", "TADV", "TARC0", 3316 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1", 3317 "TXDCTL1", "TARC1", 3318 "CTRL_EXT", "ERT", "RDBAL", "RDBAH", 3319 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC", 3320 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC" 3321 }; 3322 3323 regs_buff[0] = er32(CTRL); 3324 regs_buff[1] = er32(STATUS); 3325 3326 regs_buff[2] = er32(RCTL); 3327 regs_buff[3] = er32(RDLEN); 3328 regs_buff[4] = er32(RDH); 3329 regs_buff[5] = er32(RDT); 3330 regs_buff[6] = er32(RDTR); 3331 3332 regs_buff[7] = er32(TCTL); 3333 regs_buff[8] = er32(TDBAL); 3334 regs_buff[9] = er32(TDBAH); 3335 regs_buff[10] = er32(TDLEN); 3336 regs_buff[11] = er32(TDH); 3337 regs_buff[12] = er32(TDT); 3338 regs_buff[13] = er32(TIDV); 3339 regs_buff[14] = er32(TXDCTL); 3340 regs_buff[15] = er32(TADV); 3341 regs_buff[16] = er32(TARC0); 3342 3343 regs_buff[17] = er32(TDBAL1); 3344 regs_buff[18] = er32(TDBAH1); 3345 regs_buff[19] = er32(TDLEN1); 3346 regs_buff[20] = er32(TDH1); 3347 regs_buff[21] = er32(TDT1); 3348 regs_buff[22] = er32(TXDCTL1); 3349 regs_buff[23] = er32(TARC1); 3350 regs_buff[24] = er32(CTRL_EXT); 3351 regs_buff[25] = er32(ERT); 3352 regs_buff[26] = er32(RDBAL0); 3353 regs_buff[27] = er32(RDBAH0); 3354 regs_buff[28] = er32(TDFH); 3355 regs_buff[29] = er32(TDFT); 3356 regs_buff[30] = er32(TDFHS); 3357 regs_buff[31] = er32(TDFTS); 3358 regs_buff[32] = er32(TDFPC); 3359 regs_buff[33] = er32(RDFH); 3360 regs_buff[34] = er32(RDFT); 3361 regs_buff[35] = er32(RDFHS); 3362 regs_buff[36] = er32(RDFTS); 3363 regs_buff[37] = er32(RDFPC); 3364 3365 pr_info("Register dump\n"); 3366 for (i = 0; i < NUM_REGS; i++) 3367 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]); 3368 } 3369 3370 /* 3371 * e1000_dump: Print registers, tx ring and rx ring 3372 */ 3373 static void e1000_dump(struct e1000_adapter *adapter) 3374 { 3375 /* this code doesn't handle multiple rings */ 3376 struct e1000_tx_ring *tx_ring = adapter->tx_ring; 3377 struct e1000_rx_ring *rx_ring = adapter->rx_ring; 3378 int i; 3379 3380 if (!netif_msg_hw(adapter)) 3381 return; 3382 3383 /* Print Registers */ 3384 e1000_regdump(adapter); 3385 3386 /* transmit dump */ 3387 pr_info("TX Desc ring0 dump\n"); 3388 3389 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) 3390 * 3391 * Legacy Transmit Descriptor 3392 * +--------------------------------------------------------------+ 3393 * 0 | Buffer Address [63:0] (Reserved on Write Back) | 3394 * +--------------------------------------------------------------+ 3395 * 8 | Special | CSS | Status | CMD | CSO | Length | 3396 * +--------------------------------------------------------------+ 3397 * 63 48 47 36 35 32 31 24 23 16 15 0 3398 * 3399 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload 3400 * 63 48 47 40 39 32 31 16 15 8 7 0 3401 * +----------------------------------------------------------------+ 3402 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | 3403 * +----------------------------------------------------------------+ 3404 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | 3405 * +----------------------------------------------------------------+ 3406 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 3407 * 3408 * Extended Data Descriptor (DTYP=0x1) 3409 * +----------------------------------------------------------------+ 3410 * 0 | Buffer Address [63:0] | 3411 * +----------------------------------------------------------------+ 3412 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | 3413 * +----------------------------------------------------------------+ 3414 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 3415 */ 3416 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n"); 3417 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n"); 3418 3419 if (!netif_msg_tx_done(adapter)) 3420 goto rx_ring_summary; 3421 3422 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 3423 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); 3424 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; 3425 struct my_u { __le64 a; __le64 b; }; 3426 struct my_u *u = (struct my_u *)tx_desc; 3427 const char *type; 3428 3429 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) 3430 type = "NTC/U"; 3431 else if (i == tx_ring->next_to_use) 3432 type = "NTU"; 3433 else if (i == tx_ring->next_to_clean) 3434 type = "NTC"; 3435 else 3436 type = ""; 3437 3438 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n", 3439 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, 3440 le64_to_cpu(u->a), le64_to_cpu(u->b), 3441 (u64)buffer_info->dma, buffer_info->length, 3442 buffer_info->next_to_watch, 3443 (u64)buffer_info->time_stamp, buffer_info->skb, type); 3444 } 3445 3446 rx_ring_summary: 3447 /* receive dump */ 3448 pr_info("\nRX Desc ring dump\n"); 3449 3450 /* Legacy Receive Descriptor Format 3451 * 3452 * +-----------------------------------------------------+ 3453 * | Buffer Address [63:0] | 3454 * +-----------------------------------------------------+ 3455 * | VLAN Tag | Errors | Status 0 | Packet csum | Length | 3456 * +-----------------------------------------------------+ 3457 * 63 48 47 40 39 32 31 16 15 0 3458 */ 3459 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n"); 3460 3461 if (!netif_msg_rx_status(adapter)) 3462 goto exit; 3463 3464 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { 3465 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); 3466 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; 3467 struct my_u { __le64 a; __le64 b; }; 3468 struct my_u *u = (struct my_u *)rx_desc; 3469 const char *type; 3470 3471 if (i == rx_ring->next_to_use) 3472 type = "NTU"; 3473 else if (i == rx_ring->next_to_clean) 3474 type = "NTC"; 3475 else 3476 type = ""; 3477 3478 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", 3479 i, le64_to_cpu(u->a), le64_to_cpu(u->b), 3480 (u64)buffer_info->dma, buffer_info->rxbuf.data, type); 3481 } /* for */ 3482 3483 /* dump the descriptor caches */ 3484 /* rx */ 3485 pr_info("Rx descriptor cache in 64bit format\n"); 3486 for (i = 0x6000; i <= 0x63FF ; i += 0x10) { 3487 pr_info("R%04X: %08X|%08X %08X|%08X\n", 3488 i, 3489 readl(adapter->hw.hw_addr + i+4), 3490 readl(adapter->hw.hw_addr + i), 3491 readl(adapter->hw.hw_addr + i+12), 3492 readl(adapter->hw.hw_addr + i+8)); 3493 } 3494 /* tx */ 3495 pr_info("Tx descriptor cache in 64bit format\n"); 3496 for (i = 0x7000; i <= 0x73FF ; i += 0x10) { 3497 pr_info("T%04X: %08X|%08X %08X|%08X\n", 3498 i, 3499 readl(adapter->hw.hw_addr + i+4), 3500 readl(adapter->hw.hw_addr + i), 3501 readl(adapter->hw.hw_addr + i+12), 3502 readl(adapter->hw.hw_addr + i+8)); 3503 } 3504 exit: 3505 return; 3506 } 3507 3508 /** 3509 * e1000_tx_timeout - Respond to a Tx Hang 3510 * @netdev: network interface device structure 3511 **/ 3512 static void e1000_tx_timeout(struct net_device *netdev) 3513 { 3514 struct e1000_adapter *adapter = netdev_priv(netdev); 3515 3516 /* Do the reset outside of interrupt context */ 3517 adapter->tx_timeout_count++; 3518 schedule_work(&adapter->reset_task); 3519 } 3520 3521 static void e1000_reset_task(struct work_struct *work) 3522 { 3523 struct e1000_adapter *adapter = 3524 container_of(work, struct e1000_adapter, reset_task); 3525 3526 e_err(drv, "Reset adapter\n"); 3527 e1000_reinit_locked(adapter); 3528 } 3529 3530 /** 3531 * e1000_change_mtu - Change the Maximum Transfer Unit 3532 * @netdev: network interface device structure 3533 * @new_mtu: new value for maximum frame size 3534 * 3535 * Returns 0 on success, negative on failure 3536 **/ 3537 static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 3538 { 3539 struct e1000_adapter *adapter = netdev_priv(netdev); 3540 struct e1000_hw *hw = &adapter->hw; 3541 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3542 3543 /* Adapter-specific max frame size limits. */ 3544 switch (hw->mac_type) { 3545 case e1000_undefined ... e1000_82542_rev2_1: 3546 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 3547 e_err(probe, "Jumbo Frames not supported.\n"); 3548 return -EINVAL; 3549 } 3550 break; 3551 default: 3552 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ 3553 break; 3554 } 3555 3556 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 3557 msleep(1); 3558 /* e1000_down has a dependency on max_frame_size */ 3559 hw->max_frame_size = max_frame; 3560 if (netif_running(netdev)) { 3561 /* prevent buffers from being reallocated */ 3562 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; 3563 e1000_down(adapter); 3564 } 3565 3566 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3567 * means we reserve 2 more, this pushes us to allocate from the next 3568 * larger slab size. 3569 * i.e. RXBUFFER_2048 --> size-4096 slab 3570 * however with the new *_jumbo_rx* routines, jumbo receives will use 3571 * fragmented skbs 3572 */ 3573 3574 if (max_frame <= E1000_RXBUFFER_2048) 3575 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3576 else 3577 #if (PAGE_SIZE >= E1000_RXBUFFER_16384) 3578 adapter->rx_buffer_len = E1000_RXBUFFER_16384; 3579 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096) 3580 adapter->rx_buffer_len = PAGE_SIZE; 3581 #endif 3582 3583 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3584 if (!hw->tbi_compatibility_on && 3585 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || 3586 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3587 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 3588 3589 pr_info("%s changing MTU from %d to %d\n", 3590 netdev->name, netdev->mtu, new_mtu); 3591 netdev->mtu = new_mtu; 3592 3593 if (netif_running(netdev)) 3594 e1000_up(adapter); 3595 else 3596 e1000_reset(adapter); 3597 3598 clear_bit(__E1000_RESETTING, &adapter->flags); 3599 3600 return 0; 3601 } 3602 3603 /** 3604 * e1000_update_stats - Update the board statistics counters 3605 * @adapter: board private structure 3606 **/ 3607 void e1000_update_stats(struct e1000_adapter *adapter) 3608 { 3609 struct net_device *netdev = adapter->netdev; 3610 struct e1000_hw *hw = &adapter->hw; 3611 struct pci_dev *pdev = adapter->pdev; 3612 unsigned long flags; 3613 u16 phy_tmp; 3614 3615 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3616 3617 /* Prevent stats update while adapter is being reset, or if the pci 3618 * connection is down. 3619 */ 3620 if (adapter->link_speed == 0) 3621 return; 3622 if (pci_channel_offline(pdev)) 3623 return; 3624 3625 spin_lock_irqsave(&adapter->stats_lock, flags); 3626 3627 /* these counters are modified from e1000_tbi_adjust_stats, 3628 * called from the interrupt context, so they must only 3629 * be written while holding adapter->stats_lock 3630 */ 3631 3632 adapter->stats.crcerrs += er32(CRCERRS); 3633 adapter->stats.gprc += er32(GPRC); 3634 adapter->stats.gorcl += er32(GORCL); 3635 adapter->stats.gorch += er32(GORCH); 3636 adapter->stats.bprc += er32(BPRC); 3637 adapter->stats.mprc += er32(MPRC); 3638 adapter->stats.roc += er32(ROC); 3639 3640 adapter->stats.prc64 += er32(PRC64); 3641 adapter->stats.prc127 += er32(PRC127); 3642 adapter->stats.prc255 += er32(PRC255); 3643 adapter->stats.prc511 += er32(PRC511); 3644 adapter->stats.prc1023 += er32(PRC1023); 3645 adapter->stats.prc1522 += er32(PRC1522); 3646 3647 adapter->stats.symerrs += er32(SYMERRS); 3648 adapter->stats.mpc += er32(MPC); 3649 adapter->stats.scc += er32(SCC); 3650 adapter->stats.ecol += er32(ECOL); 3651 adapter->stats.mcc += er32(MCC); 3652 adapter->stats.latecol += er32(LATECOL); 3653 adapter->stats.dc += er32(DC); 3654 adapter->stats.sec += er32(SEC); 3655 adapter->stats.rlec += er32(RLEC); 3656 adapter->stats.xonrxc += er32(XONRXC); 3657 adapter->stats.xontxc += er32(XONTXC); 3658 adapter->stats.xoffrxc += er32(XOFFRXC); 3659 adapter->stats.xofftxc += er32(XOFFTXC); 3660 adapter->stats.fcruc += er32(FCRUC); 3661 adapter->stats.gptc += er32(GPTC); 3662 adapter->stats.gotcl += er32(GOTCL); 3663 adapter->stats.gotch += er32(GOTCH); 3664 adapter->stats.rnbc += er32(RNBC); 3665 adapter->stats.ruc += er32(RUC); 3666 adapter->stats.rfc += er32(RFC); 3667 adapter->stats.rjc += er32(RJC); 3668 adapter->stats.torl += er32(TORL); 3669 adapter->stats.torh += er32(TORH); 3670 adapter->stats.totl += er32(TOTL); 3671 adapter->stats.toth += er32(TOTH); 3672 adapter->stats.tpr += er32(TPR); 3673 3674 adapter->stats.ptc64 += er32(PTC64); 3675 adapter->stats.ptc127 += er32(PTC127); 3676 adapter->stats.ptc255 += er32(PTC255); 3677 adapter->stats.ptc511 += er32(PTC511); 3678 adapter->stats.ptc1023 += er32(PTC1023); 3679 adapter->stats.ptc1522 += er32(PTC1522); 3680 3681 adapter->stats.mptc += er32(MPTC); 3682 adapter->stats.bptc += er32(BPTC); 3683 3684 /* used for adaptive IFS */ 3685 3686 hw->tx_packet_delta = er32(TPT); 3687 adapter->stats.tpt += hw->tx_packet_delta; 3688 hw->collision_delta = er32(COLC); 3689 adapter->stats.colc += hw->collision_delta; 3690 3691 if (hw->mac_type >= e1000_82543) { 3692 adapter->stats.algnerrc += er32(ALGNERRC); 3693 adapter->stats.rxerrc += er32(RXERRC); 3694 adapter->stats.tncrs += er32(TNCRS); 3695 adapter->stats.cexterr += er32(CEXTERR); 3696 adapter->stats.tsctc += er32(TSCTC); 3697 adapter->stats.tsctfc += er32(TSCTFC); 3698 } 3699 3700 /* Fill out the OS statistics structure */ 3701 netdev->stats.multicast = adapter->stats.mprc; 3702 netdev->stats.collisions = adapter->stats.colc; 3703 3704 /* Rx Errors */ 3705 3706 /* RLEC on some newer hardware can be incorrect so build 3707 * our own version based on RUC and ROC 3708 */ 3709 netdev->stats.rx_errors = adapter->stats.rxerrc + 3710 adapter->stats.crcerrs + adapter->stats.algnerrc + 3711 adapter->stats.ruc + adapter->stats.roc + 3712 adapter->stats.cexterr; 3713 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; 3714 netdev->stats.rx_length_errors = adapter->stats.rlerrc; 3715 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 3716 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 3717 netdev->stats.rx_missed_errors = adapter->stats.mpc; 3718 3719 /* Tx Errors */ 3720 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; 3721 netdev->stats.tx_errors = adapter->stats.txerrc; 3722 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 3723 netdev->stats.tx_window_errors = adapter->stats.latecol; 3724 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 3725 if (hw->bad_tx_carr_stats_fd && 3726 adapter->link_duplex == FULL_DUPLEX) { 3727 netdev->stats.tx_carrier_errors = 0; 3728 adapter->stats.tncrs = 0; 3729 } 3730 3731 /* Tx Dropped needs to be maintained elsewhere */ 3732 3733 /* Phy Stats */ 3734 if (hw->media_type == e1000_media_type_copper) { 3735 if ((adapter->link_speed == SPEED_1000) && 3736 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { 3737 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3738 adapter->phy_stats.idle_errors += phy_tmp; 3739 } 3740 3741 if ((hw->mac_type <= e1000_82546) && 3742 (hw->phy_type == e1000_phy_m88) && 3743 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) 3744 adapter->phy_stats.receive_errors += phy_tmp; 3745 } 3746 3747 /* Management Stats */ 3748 if (hw->has_smbus) { 3749 adapter->stats.mgptc += er32(MGTPTC); 3750 adapter->stats.mgprc += er32(MGTPRC); 3751 adapter->stats.mgpdc += er32(MGTPDC); 3752 } 3753 3754 spin_unlock_irqrestore(&adapter->stats_lock, flags); 3755 } 3756 3757 /** 3758 * e1000_intr - Interrupt Handler 3759 * @irq: interrupt number 3760 * @data: pointer to a network interface device structure 3761 **/ 3762 static irqreturn_t e1000_intr(int irq, void *data) 3763 { 3764 struct net_device *netdev = data; 3765 struct e1000_adapter *adapter = netdev_priv(netdev); 3766 struct e1000_hw *hw = &adapter->hw; 3767 u32 icr = er32(ICR); 3768 3769 if (unlikely((!icr))) 3770 return IRQ_NONE; /* Not our interrupt */ 3771 3772 /* we might have caused the interrupt, but the above 3773 * read cleared it, and just in case the driver is 3774 * down there is nothing to do so return handled 3775 */ 3776 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) 3777 return IRQ_HANDLED; 3778 3779 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3780 hw->get_link_status = 1; 3781 /* guard against interrupt when we're going down */ 3782 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3783 schedule_delayed_work(&adapter->watchdog_task, 1); 3784 } 3785 3786 /* disable interrupts, without the synchronize_irq bit */ 3787 ew32(IMC, ~0); 3788 E1000_WRITE_FLUSH(); 3789 3790 if (likely(napi_schedule_prep(&adapter->napi))) { 3791 adapter->total_tx_bytes = 0; 3792 adapter->total_tx_packets = 0; 3793 adapter->total_rx_bytes = 0; 3794 adapter->total_rx_packets = 0; 3795 __napi_schedule(&adapter->napi); 3796 } else { 3797 /* this really should not happen! if it does it is basically a 3798 * bug, but not a hard error, so enable ints and continue 3799 */ 3800 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3801 e1000_irq_enable(adapter); 3802 } 3803 3804 return IRQ_HANDLED; 3805 } 3806 3807 /** 3808 * e1000_clean - NAPI Rx polling callback 3809 * @adapter: board private structure 3810 **/ 3811 static int e1000_clean(struct napi_struct *napi, int budget) 3812 { 3813 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, 3814 napi); 3815 int tx_clean_complete = 0, work_done = 0; 3816 3817 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); 3818 3819 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); 3820 3821 if (!tx_clean_complete) 3822 work_done = budget; 3823 3824 /* If budget not fully consumed, exit the polling mode */ 3825 if (work_done < budget) { 3826 if (likely(adapter->itr_setting & 3)) 3827 e1000_set_itr(adapter); 3828 napi_complete_done(napi, work_done); 3829 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3830 e1000_irq_enable(adapter); 3831 } 3832 3833 return work_done; 3834 } 3835 3836 /** 3837 * e1000_clean_tx_irq - Reclaim resources after transmit completes 3838 * @adapter: board private structure 3839 **/ 3840 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 3841 struct e1000_tx_ring *tx_ring) 3842 { 3843 struct e1000_hw *hw = &adapter->hw; 3844 struct net_device *netdev = adapter->netdev; 3845 struct e1000_tx_desc *tx_desc, *eop_desc; 3846 struct e1000_tx_buffer *buffer_info; 3847 unsigned int i, eop; 3848 unsigned int count = 0; 3849 unsigned int total_tx_bytes = 0, total_tx_packets = 0; 3850 unsigned int bytes_compl = 0, pkts_compl = 0; 3851 3852 i = tx_ring->next_to_clean; 3853 eop = tx_ring->buffer_info[i].next_to_watch; 3854 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3855 3856 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 3857 (count < tx_ring->count)) { 3858 bool cleaned = false; 3859 dma_rmb(); /* read buffer_info after eop_desc */ 3860 for ( ; !cleaned; count++) { 3861 tx_desc = E1000_TX_DESC(*tx_ring, i); 3862 buffer_info = &tx_ring->buffer_info[i]; 3863 cleaned = (i == eop); 3864 3865 if (cleaned) { 3866 total_tx_packets += buffer_info->segs; 3867 total_tx_bytes += buffer_info->bytecount; 3868 if (buffer_info->skb) { 3869 bytes_compl += buffer_info->skb->len; 3870 pkts_compl++; 3871 } 3872 3873 } 3874 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3875 tx_desc->upper.data = 0; 3876 3877 if (unlikely(++i == tx_ring->count)) 3878 i = 0; 3879 } 3880 3881 eop = tx_ring->buffer_info[i].next_to_watch; 3882 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3883 } 3884 3885 /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, 3886 * which will reuse the cleaned buffers. 3887 */ 3888 smp_store_release(&tx_ring->next_to_clean, i); 3889 3890 netdev_completed_queue(netdev, pkts_compl, bytes_compl); 3891 3892 #define TX_WAKE_THRESHOLD 32 3893 if (unlikely(count && netif_carrier_ok(netdev) && 3894 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { 3895 /* Make sure that anybody stopping the queue after this 3896 * sees the new next_to_clean. 3897 */ 3898 smp_mb(); 3899 3900 if (netif_queue_stopped(netdev) && 3901 !(test_bit(__E1000_DOWN, &adapter->flags))) { 3902 netif_wake_queue(netdev); 3903 ++adapter->restart_queue; 3904 } 3905 } 3906 3907 if (adapter->detect_tx_hung) { 3908 /* Detect a transmit hang in hardware, this serializes the 3909 * check with the clearing of time_stamp and movement of i 3910 */ 3911 adapter->detect_tx_hung = false; 3912 if (tx_ring->buffer_info[eop].time_stamp && 3913 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + 3914 (adapter->tx_timeout_factor * HZ)) && 3915 !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3916 3917 /* detected Tx unit hang */ 3918 e_err(drv, "Detected Tx Unit Hang\n" 3919 " Tx Queue <%lu>\n" 3920 " TDH <%x>\n" 3921 " TDT <%x>\n" 3922 " next_to_use <%x>\n" 3923 " next_to_clean <%x>\n" 3924 "buffer_info[next_to_clean]\n" 3925 " time_stamp <%lx>\n" 3926 " next_to_watch <%x>\n" 3927 " jiffies <%lx>\n" 3928 " next_to_watch.status <%x>\n", 3929 (unsigned long)(tx_ring - adapter->tx_ring), 3930 readl(hw->hw_addr + tx_ring->tdh), 3931 readl(hw->hw_addr + tx_ring->tdt), 3932 tx_ring->next_to_use, 3933 tx_ring->next_to_clean, 3934 tx_ring->buffer_info[eop].time_stamp, 3935 eop, 3936 jiffies, 3937 eop_desc->upper.fields.status); 3938 e1000_dump(adapter); 3939 netif_stop_queue(netdev); 3940 } 3941 } 3942 adapter->total_tx_bytes += total_tx_bytes; 3943 adapter->total_tx_packets += total_tx_packets; 3944 netdev->stats.tx_bytes += total_tx_bytes; 3945 netdev->stats.tx_packets += total_tx_packets; 3946 return count < tx_ring->count; 3947 } 3948 3949 /** 3950 * e1000_rx_checksum - Receive Checksum Offload for 82543 3951 * @adapter: board private structure 3952 * @status_err: receive descriptor status and error fields 3953 * @csum: receive descriptor csum field 3954 * @sk_buff: socket buffer with received data 3955 **/ 3956 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 3957 u32 csum, struct sk_buff *skb) 3958 { 3959 struct e1000_hw *hw = &adapter->hw; 3960 u16 status = (u16)status_err; 3961 u8 errors = (u8)(status_err >> 24); 3962 3963 skb_checksum_none_assert(skb); 3964 3965 /* 82543 or newer only */ 3966 if (unlikely(hw->mac_type < e1000_82543)) 3967 return; 3968 /* Ignore Checksum bit is set */ 3969 if (unlikely(status & E1000_RXD_STAT_IXSM)) 3970 return; 3971 /* TCP/UDP checksum error bit is set */ 3972 if (unlikely(errors & E1000_RXD_ERR_TCPE)) { 3973 /* let the stack verify checksum errors */ 3974 adapter->hw_csum_err++; 3975 return; 3976 } 3977 /* TCP/UDP Checksum has not been calculated */ 3978 if (!(status & E1000_RXD_STAT_TCPCS)) 3979 return; 3980 3981 /* It must be a TCP or UDP packet with a valid checksum */ 3982 if (likely(status & E1000_RXD_STAT_TCPCS)) { 3983 /* TCP checksum is good */ 3984 skb->ip_summed = CHECKSUM_UNNECESSARY; 3985 } 3986 adapter->hw_csum_good++; 3987 } 3988 3989 /** 3990 * e1000_consume_page - helper function for jumbo Rx path 3991 **/ 3992 static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, 3993 u16 length) 3994 { 3995 bi->rxbuf.page = NULL; 3996 skb->len += length; 3997 skb->data_len += length; 3998 skb->truesize += PAGE_SIZE; 3999 } 4000 4001 /** 4002 * e1000_receive_skb - helper function to handle rx indications 4003 * @adapter: board private structure 4004 * @status: descriptor status field as written by hardware 4005 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 4006 * @skb: pointer to sk_buff to be indicated to stack 4007 */ 4008 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, 4009 __le16 vlan, struct sk_buff *skb) 4010 { 4011 skb->protocol = eth_type_trans(skb, adapter->netdev); 4012 4013 if (status & E1000_RXD_STAT_VP) { 4014 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 4015 4016 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 4017 } 4018 napi_gro_receive(&adapter->napi, skb); 4019 } 4020 4021 /** 4022 * e1000_tbi_adjust_stats 4023 * @hw: Struct containing variables accessed by shared code 4024 * @frame_len: The length of the frame in question 4025 * @mac_addr: The Ethernet destination address of the frame in question 4026 * 4027 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT 4028 */ 4029 static void e1000_tbi_adjust_stats(struct e1000_hw *hw, 4030 struct e1000_hw_stats *stats, 4031 u32 frame_len, const u8 *mac_addr) 4032 { 4033 u64 carry_bit; 4034 4035 /* First adjust the frame length. */ 4036 frame_len--; 4037 /* We need to adjust the statistics counters, since the hardware 4038 * counters overcount this packet as a CRC error and undercount 4039 * the packet as a good packet 4040 */ 4041 /* This packet should not be counted as a CRC error. */ 4042 stats->crcerrs--; 4043 /* This packet does count as a Good Packet Received. */ 4044 stats->gprc++; 4045 4046 /* Adjust the Good Octets received counters */ 4047 carry_bit = 0x80000000 & stats->gorcl; 4048 stats->gorcl += frame_len; 4049 /* If the high bit of Gorcl (the low 32 bits of the Good Octets 4050 * Received Count) was one before the addition, 4051 * AND it is zero after, then we lost the carry out, 4052 * need to add one to Gorch (Good Octets Received Count High). 4053 * This could be simplified if all environments supported 4054 * 64-bit integers. 4055 */ 4056 if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) 4057 stats->gorch++; 4058 /* Is this a broadcast or multicast? Check broadcast first, 4059 * since the test for a multicast frame will test positive on 4060 * a broadcast frame. 4061 */ 4062 if (is_broadcast_ether_addr(mac_addr)) 4063 stats->bprc++; 4064 else if (is_multicast_ether_addr(mac_addr)) 4065 stats->mprc++; 4066 4067 if (frame_len == hw->max_frame_size) { 4068 /* In this case, the hardware has overcounted the number of 4069 * oversize frames. 4070 */ 4071 if (stats->roc > 0) 4072 stats->roc--; 4073 } 4074 4075 /* Adjust the bin counters when the extra byte put the frame in the 4076 * wrong bin. Remember that the frame_len was adjusted above. 4077 */ 4078 if (frame_len == 64) { 4079 stats->prc64++; 4080 stats->prc127--; 4081 } else if (frame_len == 127) { 4082 stats->prc127++; 4083 stats->prc255--; 4084 } else if (frame_len == 255) { 4085 stats->prc255++; 4086 stats->prc511--; 4087 } else if (frame_len == 511) { 4088 stats->prc511++; 4089 stats->prc1023--; 4090 } else if (frame_len == 1023) { 4091 stats->prc1023++; 4092 stats->prc1522--; 4093 } else if (frame_len == 1522) { 4094 stats->prc1522++; 4095 } 4096 } 4097 4098 static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, 4099 u8 status, u8 errors, 4100 u32 length, const u8 *data) 4101 { 4102 struct e1000_hw *hw = &adapter->hw; 4103 u8 last_byte = *(data + length - 1); 4104 4105 if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { 4106 unsigned long irq_flags; 4107 4108 spin_lock_irqsave(&adapter->stats_lock, irq_flags); 4109 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data); 4110 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); 4111 4112 return true; 4113 } 4114 4115 return false; 4116 } 4117 4118 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, 4119 unsigned int bufsz) 4120 { 4121 struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz); 4122 4123 if (unlikely(!skb)) 4124 adapter->alloc_rx_buff_failed++; 4125 return skb; 4126 } 4127 4128 /** 4129 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy 4130 * @adapter: board private structure 4131 * @rx_ring: ring to clean 4132 * @work_done: amount of napi work completed this call 4133 * @work_to_do: max amount of work allowed for this call to do 4134 * 4135 * the return value indicates whether actual cleaning was done, there 4136 * is no guarantee that everything was cleaned 4137 */ 4138 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 4139 struct e1000_rx_ring *rx_ring, 4140 int *work_done, int work_to_do) 4141 { 4142 struct net_device *netdev = adapter->netdev; 4143 struct pci_dev *pdev = adapter->pdev; 4144 struct e1000_rx_desc *rx_desc, *next_rxd; 4145 struct e1000_rx_buffer *buffer_info, *next_buffer; 4146 u32 length; 4147 unsigned int i; 4148 int cleaned_count = 0; 4149 bool cleaned = false; 4150 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 4151 4152 i = rx_ring->next_to_clean; 4153 rx_desc = E1000_RX_DESC(*rx_ring, i); 4154 buffer_info = &rx_ring->buffer_info[i]; 4155 4156 while (rx_desc->status & E1000_RXD_STAT_DD) { 4157 struct sk_buff *skb; 4158 u8 status; 4159 4160 if (*work_done >= work_to_do) 4161 break; 4162 (*work_done)++; 4163 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ 4164 4165 status = rx_desc->status; 4166 4167 if (++i == rx_ring->count) 4168 i = 0; 4169 4170 next_rxd = E1000_RX_DESC(*rx_ring, i); 4171 prefetch(next_rxd); 4172 4173 next_buffer = &rx_ring->buffer_info[i]; 4174 4175 cleaned = true; 4176 cleaned_count++; 4177 dma_unmap_page(&pdev->dev, buffer_info->dma, 4178 adapter->rx_buffer_len, DMA_FROM_DEVICE); 4179 buffer_info->dma = 0; 4180 4181 length = le16_to_cpu(rx_desc->length); 4182 4183 /* errors is only valid for DD + EOP descriptors */ 4184 if (unlikely((status & E1000_RXD_STAT_EOP) && 4185 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { 4186 u8 *mapped = page_address(buffer_info->rxbuf.page); 4187 4188 if (e1000_tbi_should_accept(adapter, status, 4189 rx_desc->errors, 4190 length, mapped)) { 4191 length--; 4192 } else if (netdev->features & NETIF_F_RXALL) { 4193 goto process_skb; 4194 } else { 4195 /* an error means any chain goes out the window 4196 * too 4197 */ 4198 if (rx_ring->rx_skb_top) 4199 dev_kfree_skb(rx_ring->rx_skb_top); 4200 rx_ring->rx_skb_top = NULL; 4201 goto next_desc; 4202 } 4203 } 4204 4205 #define rxtop rx_ring->rx_skb_top 4206 process_skb: 4207 if (!(status & E1000_RXD_STAT_EOP)) { 4208 /* this descriptor is only the beginning (or middle) */ 4209 if (!rxtop) { 4210 /* this is the beginning of a chain */ 4211 rxtop = napi_get_frags(&adapter->napi); 4212 if (!rxtop) 4213 break; 4214 4215 skb_fill_page_desc(rxtop, 0, 4216 buffer_info->rxbuf.page, 4217 0, length); 4218 } else { 4219 /* this is the middle of a chain */ 4220 skb_fill_page_desc(rxtop, 4221 skb_shinfo(rxtop)->nr_frags, 4222 buffer_info->rxbuf.page, 0, length); 4223 } 4224 e1000_consume_page(buffer_info, rxtop, length); 4225 goto next_desc; 4226 } else { 4227 if (rxtop) { 4228 /* end of the chain */ 4229 skb_fill_page_desc(rxtop, 4230 skb_shinfo(rxtop)->nr_frags, 4231 buffer_info->rxbuf.page, 0, length); 4232 skb = rxtop; 4233 rxtop = NULL; 4234 e1000_consume_page(buffer_info, skb, length); 4235 } else { 4236 struct page *p; 4237 /* no chain, got EOP, this buf is the packet 4238 * copybreak to save the put_page/alloc_page 4239 */ 4240 p = buffer_info->rxbuf.page; 4241 if (length <= copybreak) { 4242 u8 *vaddr; 4243 4244 if (likely(!(netdev->features & NETIF_F_RXFCS))) 4245 length -= 4; 4246 skb = e1000_alloc_rx_skb(adapter, 4247 length); 4248 if (!skb) 4249 break; 4250 4251 vaddr = kmap_atomic(p); 4252 memcpy(skb_tail_pointer(skb), vaddr, 4253 length); 4254 kunmap_atomic(vaddr); 4255 /* re-use the page, so don't erase 4256 * buffer_info->rxbuf.page 4257 */ 4258 skb_put(skb, length); 4259 e1000_rx_checksum(adapter, 4260 status | rx_desc->errors << 24, 4261 le16_to_cpu(rx_desc->csum), skb); 4262 4263 total_rx_bytes += skb->len; 4264 total_rx_packets++; 4265 4266 e1000_receive_skb(adapter, status, 4267 rx_desc->special, skb); 4268 goto next_desc; 4269 } else { 4270 skb = napi_get_frags(&adapter->napi); 4271 if (!skb) { 4272 adapter->alloc_rx_buff_failed++; 4273 break; 4274 } 4275 skb_fill_page_desc(skb, 0, p, 0, 4276 length); 4277 e1000_consume_page(buffer_info, skb, 4278 length); 4279 } 4280 } 4281 } 4282 4283 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 4284 e1000_rx_checksum(adapter, 4285 (u32)(status) | 4286 ((u32)(rx_desc->errors) << 24), 4287 le16_to_cpu(rx_desc->csum), skb); 4288 4289 total_rx_bytes += (skb->len - 4); /* don't count FCS */ 4290 if (likely(!(netdev->features & NETIF_F_RXFCS))) 4291 pskb_trim(skb, skb->len - 4); 4292 total_rx_packets++; 4293 4294 if (status & E1000_RXD_STAT_VP) { 4295 __le16 vlan = rx_desc->special; 4296 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 4297 4298 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 4299 } 4300 4301 napi_gro_frags(&adapter->napi); 4302 4303 next_desc: 4304 rx_desc->status = 0; 4305 4306 /* return some buffers to hardware, one at a time is too slow */ 4307 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 4308 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4309 cleaned_count = 0; 4310 } 4311 4312 /* use prefetched values */ 4313 rx_desc = next_rxd; 4314 buffer_info = next_buffer; 4315 } 4316 rx_ring->next_to_clean = i; 4317 4318 cleaned_count = E1000_DESC_UNUSED(rx_ring); 4319 if (cleaned_count) 4320 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4321 4322 adapter->total_rx_packets += total_rx_packets; 4323 adapter->total_rx_bytes += total_rx_bytes; 4324 netdev->stats.rx_bytes += total_rx_bytes; 4325 netdev->stats.rx_packets += total_rx_packets; 4326 return cleaned; 4327 } 4328 4329 /* this should improve performance for small packets with large amounts 4330 * of reassembly being done in the stack 4331 */ 4332 static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter, 4333 struct e1000_rx_buffer *buffer_info, 4334 u32 length, const void *data) 4335 { 4336 struct sk_buff *skb; 4337 4338 if (length > copybreak) 4339 return NULL; 4340 4341 skb = e1000_alloc_rx_skb(adapter, length); 4342 if (!skb) 4343 return NULL; 4344 4345 dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, 4346 length, DMA_FROM_DEVICE); 4347 4348 skb_put_data(skb, data, length); 4349 4350 return skb; 4351 } 4352 4353 /** 4354 * e1000_clean_rx_irq - Send received data up the network stack; legacy 4355 * @adapter: board private structure 4356 * @rx_ring: ring to clean 4357 * @work_done: amount of napi work completed this call 4358 * @work_to_do: max amount of work allowed for this call to do 4359 */ 4360 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 4361 struct e1000_rx_ring *rx_ring, 4362 int *work_done, int work_to_do) 4363 { 4364 struct net_device *netdev = adapter->netdev; 4365 struct pci_dev *pdev = adapter->pdev; 4366 struct e1000_rx_desc *rx_desc, *next_rxd; 4367 struct e1000_rx_buffer *buffer_info, *next_buffer; 4368 u32 length; 4369 unsigned int i; 4370 int cleaned_count = 0; 4371 bool cleaned = false; 4372 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 4373 4374 i = rx_ring->next_to_clean; 4375 rx_desc = E1000_RX_DESC(*rx_ring, i); 4376 buffer_info = &rx_ring->buffer_info[i]; 4377 4378 while (rx_desc->status & E1000_RXD_STAT_DD) { 4379 struct sk_buff *skb; 4380 u8 *data; 4381 u8 status; 4382 4383 if (*work_done >= work_to_do) 4384 break; 4385 (*work_done)++; 4386 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ 4387 4388 status = rx_desc->status; 4389 length = le16_to_cpu(rx_desc->length); 4390 4391 data = buffer_info->rxbuf.data; 4392 prefetch(data); 4393 skb = e1000_copybreak(adapter, buffer_info, length, data); 4394 if (!skb) { 4395 unsigned int frag_len = e1000_frag_len(adapter); 4396 4397 skb = build_skb(data - E1000_HEADROOM, frag_len); 4398 if (!skb) { 4399 adapter->alloc_rx_buff_failed++; 4400 break; 4401 } 4402 4403 skb_reserve(skb, E1000_HEADROOM); 4404 dma_unmap_single(&pdev->dev, buffer_info->dma, 4405 adapter->rx_buffer_len, 4406 DMA_FROM_DEVICE); 4407 buffer_info->dma = 0; 4408 buffer_info->rxbuf.data = NULL; 4409 } 4410 4411 if (++i == rx_ring->count) 4412 i = 0; 4413 4414 next_rxd = E1000_RX_DESC(*rx_ring, i); 4415 prefetch(next_rxd); 4416 4417 next_buffer = &rx_ring->buffer_info[i]; 4418 4419 cleaned = true; 4420 cleaned_count++; 4421 4422 /* !EOP means multiple descriptors were used to store a single 4423 * packet, if thats the case we need to toss it. In fact, we 4424 * to toss every packet with the EOP bit clear and the next 4425 * frame that _does_ have the EOP bit set, as it is by 4426 * definition only a frame fragment 4427 */ 4428 if (unlikely(!(status & E1000_RXD_STAT_EOP))) 4429 adapter->discarding = true; 4430 4431 if (adapter->discarding) { 4432 /* All receives must fit into a single buffer */ 4433 netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); 4434 dev_kfree_skb(skb); 4435 if (status & E1000_RXD_STAT_EOP) 4436 adapter->discarding = false; 4437 goto next_desc; 4438 } 4439 4440 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 4441 if (e1000_tbi_should_accept(adapter, status, 4442 rx_desc->errors, 4443 length, data)) { 4444 length--; 4445 } else if (netdev->features & NETIF_F_RXALL) { 4446 goto process_skb; 4447 } else { 4448 dev_kfree_skb(skb); 4449 goto next_desc; 4450 } 4451 } 4452 4453 process_skb: 4454 total_rx_bytes += (length - 4); /* don't count FCS */ 4455 total_rx_packets++; 4456 4457 if (likely(!(netdev->features & NETIF_F_RXFCS))) 4458 /* adjust length to remove Ethernet CRC, this must be 4459 * done after the TBI_ACCEPT workaround above 4460 */ 4461 length -= 4; 4462 4463 if (buffer_info->rxbuf.data == NULL) 4464 skb_put(skb, length); 4465 else /* copybreak skb */ 4466 skb_trim(skb, length); 4467 4468 /* Receive Checksum Offload */ 4469 e1000_rx_checksum(adapter, 4470 (u32)(status) | 4471 ((u32)(rx_desc->errors) << 24), 4472 le16_to_cpu(rx_desc->csum), skb); 4473 4474 e1000_receive_skb(adapter, status, rx_desc->special, skb); 4475 4476 next_desc: 4477 rx_desc->status = 0; 4478 4479 /* return some buffers to hardware, one at a time is too slow */ 4480 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 4481 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4482 cleaned_count = 0; 4483 } 4484 4485 /* use prefetched values */ 4486 rx_desc = next_rxd; 4487 buffer_info = next_buffer; 4488 } 4489 rx_ring->next_to_clean = i; 4490 4491 cleaned_count = E1000_DESC_UNUSED(rx_ring); 4492 if (cleaned_count) 4493 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4494 4495 adapter->total_rx_packets += total_rx_packets; 4496 adapter->total_rx_bytes += total_rx_bytes; 4497 netdev->stats.rx_bytes += total_rx_bytes; 4498 netdev->stats.rx_packets += total_rx_packets; 4499 return cleaned; 4500 } 4501 4502 /** 4503 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 4504 * @adapter: address of board private structure 4505 * @rx_ring: pointer to receive ring structure 4506 * @cleaned_count: number of buffers to allocate this pass 4507 **/ 4508 static void 4509 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 4510 struct e1000_rx_ring *rx_ring, int cleaned_count) 4511 { 4512 struct pci_dev *pdev = adapter->pdev; 4513 struct e1000_rx_desc *rx_desc; 4514 struct e1000_rx_buffer *buffer_info; 4515 unsigned int i; 4516 4517 i = rx_ring->next_to_use; 4518 buffer_info = &rx_ring->buffer_info[i]; 4519 4520 while (cleaned_count--) { 4521 /* allocate a new page if necessary */ 4522 if (!buffer_info->rxbuf.page) { 4523 buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC); 4524 if (unlikely(!buffer_info->rxbuf.page)) { 4525 adapter->alloc_rx_buff_failed++; 4526 break; 4527 } 4528 } 4529 4530 if (!buffer_info->dma) { 4531 buffer_info->dma = dma_map_page(&pdev->dev, 4532 buffer_info->rxbuf.page, 0, 4533 adapter->rx_buffer_len, 4534 DMA_FROM_DEVICE); 4535 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4536 put_page(buffer_info->rxbuf.page); 4537 buffer_info->rxbuf.page = NULL; 4538 buffer_info->dma = 0; 4539 adapter->alloc_rx_buff_failed++; 4540 break; 4541 } 4542 } 4543 4544 rx_desc = E1000_RX_DESC(*rx_ring, i); 4545 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4546 4547 if (unlikely(++i == rx_ring->count)) 4548 i = 0; 4549 buffer_info = &rx_ring->buffer_info[i]; 4550 } 4551 4552 if (likely(rx_ring->next_to_use != i)) { 4553 rx_ring->next_to_use = i; 4554 if (unlikely(i-- == 0)) 4555 i = (rx_ring->count - 1); 4556 4557 /* Force memory writes to complete before letting h/w 4558 * know there are new descriptors to fetch. (Only 4559 * applicable for weak-ordered memory model archs, 4560 * such as IA-64). 4561 */ 4562 wmb(); 4563 writel(i, adapter->hw.hw_addr + rx_ring->rdt); 4564 } 4565 } 4566 4567 /** 4568 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 4569 * @adapter: address of board private structure 4570 **/ 4571 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 4572 struct e1000_rx_ring *rx_ring, 4573 int cleaned_count) 4574 { 4575 struct e1000_hw *hw = &adapter->hw; 4576 struct pci_dev *pdev = adapter->pdev; 4577 struct e1000_rx_desc *rx_desc; 4578 struct e1000_rx_buffer *buffer_info; 4579 unsigned int i; 4580 unsigned int bufsz = adapter->rx_buffer_len; 4581 4582 i = rx_ring->next_to_use; 4583 buffer_info = &rx_ring->buffer_info[i]; 4584 4585 while (cleaned_count--) { 4586 void *data; 4587 4588 if (buffer_info->rxbuf.data) 4589 goto skip; 4590 4591 data = e1000_alloc_frag(adapter); 4592 if (!data) { 4593 /* Better luck next round */ 4594 adapter->alloc_rx_buff_failed++; 4595 break; 4596 } 4597 4598 /* Fix for errata 23, can't cross 64kB boundary */ 4599 if (!e1000_check_64k_bound(adapter, data, bufsz)) { 4600 void *olddata = data; 4601 e_err(rx_err, "skb align check failed: %u bytes at " 4602 "%p\n", bufsz, data); 4603 /* Try again, without freeing the previous */ 4604 data = e1000_alloc_frag(adapter); 4605 /* Failed allocation, critical failure */ 4606 if (!data) { 4607 skb_free_frag(olddata); 4608 adapter->alloc_rx_buff_failed++; 4609 break; 4610 } 4611 4612 if (!e1000_check_64k_bound(adapter, data, bufsz)) { 4613 /* give up */ 4614 skb_free_frag(data); 4615 skb_free_frag(olddata); 4616 adapter->alloc_rx_buff_failed++; 4617 break; 4618 } 4619 4620 /* Use new allocation */ 4621 skb_free_frag(olddata); 4622 } 4623 buffer_info->dma = dma_map_single(&pdev->dev, 4624 data, 4625 adapter->rx_buffer_len, 4626 DMA_FROM_DEVICE); 4627 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4628 skb_free_frag(data); 4629 buffer_info->dma = 0; 4630 adapter->alloc_rx_buff_failed++; 4631 break; 4632 } 4633 4634 /* XXX if it was allocated cleanly it will never map to a 4635 * boundary crossing 4636 */ 4637 4638 /* Fix for errata 23, can't cross 64kB boundary */ 4639 if (!e1000_check_64k_bound(adapter, 4640 (void *)(unsigned long)buffer_info->dma, 4641 adapter->rx_buffer_len)) { 4642 e_err(rx_err, "dma align check failed: %u bytes at " 4643 "%p\n", adapter->rx_buffer_len, 4644 (void *)(unsigned long)buffer_info->dma); 4645 4646 dma_unmap_single(&pdev->dev, buffer_info->dma, 4647 adapter->rx_buffer_len, 4648 DMA_FROM_DEVICE); 4649 4650 skb_free_frag(data); 4651 buffer_info->rxbuf.data = NULL; 4652 buffer_info->dma = 0; 4653 4654 adapter->alloc_rx_buff_failed++; 4655 break; 4656 } 4657 buffer_info->rxbuf.data = data; 4658 skip: 4659 rx_desc = E1000_RX_DESC(*rx_ring, i); 4660 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4661 4662 if (unlikely(++i == rx_ring->count)) 4663 i = 0; 4664 buffer_info = &rx_ring->buffer_info[i]; 4665 } 4666 4667 if (likely(rx_ring->next_to_use != i)) { 4668 rx_ring->next_to_use = i; 4669 if (unlikely(i-- == 0)) 4670 i = (rx_ring->count - 1); 4671 4672 /* Force memory writes to complete before letting h/w 4673 * know there are new descriptors to fetch. (Only 4674 * applicable for weak-ordered memory model archs, 4675 * such as IA-64). 4676 */ 4677 wmb(); 4678 writel(i, hw->hw_addr + rx_ring->rdt); 4679 } 4680 } 4681 4682 /** 4683 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. 4684 * @adapter: 4685 **/ 4686 static void e1000_smartspeed(struct e1000_adapter *adapter) 4687 { 4688 struct e1000_hw *hw = &adapter->hw; 4689 u16 phy_status; 4690 u16 phy_ctrl; 4691 4692 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || 4693 !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) 4694 return; 4695 4696 if (adapter->smartspeed == 0) { 4697 /* If Master/Slave config fault is asserted twice, 4698 * we assume back-to-back 4699 */ 4700 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4701 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 4702 return; 4703 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4704 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 4705 return; 4706 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4707 if (phy_ctrl & CR_1000T_MS_ENABLE) { 4708 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4709 e1000_write_phy_reg(hw, PHY_1000T_CTRL, 4710 phy_ctrl); 4711 adapter->smartspeed++; 4712 if (!e1000_phy_setup_autoneg(hw) && 4713 !e1000_read_phy_reg(hw, PHY_CTRL, 4714 &phy_ctrl)) { 4715 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4716 MII_CR_RESTART_AUTO_NEG); 4717 e1000_write_phy_reg(hw, PHY_CTRL, 4718 phy_ctrl); 4719 } 4720 } 4721 return; 4722 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4723 /* If still no link, perhaps using 2/3 pair cable */ 4724 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4725 phy_ctrl |= CR_1000T_MS_ENABLE; 4726 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); 4727 if (!e1000_phy_setup_autoneg(hw) && 4728 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { 4729 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4730 MII_CR_RESTART_AUTO_NEG); 4731 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); 4732 } 4733 } 4734 /* Restart process after E1000_SMARTSPEED_MAX iterations */ 4735 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4736 adapter->smartspeed = 0; 4737 } 4738 4739 /** 4740 * e1000_ioctl - 4741 * @netdev: 4742 * @ifreq: 4743 * @cmd: 4744 **/ 4745 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 4746 { 4747 switch (cmd) { 4748 case SIOCGMIIPHY: 4749 case SIOCGMIIREG: 4750 case SIOCSMIIREG: 4751 return e1000_mii_ioctl(netdev, ifr, cmd); 4752 default: 4753 return -EOPNOTSUPP; 4754 } 4755 } 4756 4757 /** 4758 * e1000_mii_ioctl - 4759 * @netdev: 4760 * @ifreq: 4761 * @cmd: 4762 **/ 4763 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 4764 int cmd) 4765 { 4766 struct e1000_adapter *adapter = netdev_priv(netdev); 4767 struct e1000_hw *hw = &adapter->hw; 4768 struct mii_ioctl_data *data = if_mii(ifr); 4769 int retval; 4770 u16 mii_reg; 4771 unsigned long flags; 4772 4773 if (hw->media_type != e1000_media_type_copper) 4774 return -EOPNOTSUPP; 4775 4776 switch (cmd) { 4777 case SIOCGMIIPHY: 4778 data->phy_id = hw->phy_addr; 4779 break; 4780 case SIOCGMIIREG: 4781 spin_lock_irqsave(&adapter->stats_lock, flags); 4782 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, 4783 &data->val_out)) { 4784 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4785 return -EIO; 4786 } 4787 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4788 break; 4789 case SIOCSMIIREG: 4790 if (data->reg_num & ~(0x1F)) 4791 return -EFAULT; 4792 mii_reg = data->val_in; 4793 spin_lock_irqsave(&adapter->stats_lock, flags); 4794 if (e1000_write_phy_reg(hw, data->reg_num, 4795 mii_reg)) { 4796 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4797 return -EIO; 4798 } 4799 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4800 if (hw->media_type == e1000_media_type_copper) { 4801 switch (data->reg_num) { 4802 case PHY_CTRL: 4803 if (mii_reg & MII_CR_POWER_DOWN) 4804 break; 4805 if (mii_reg & MII_CR_AUTO_NEG_EN) { 4806 hw->autoneg = 1; 4807 hw->autoneg_advertised = 0x2F; 4808 } else { 4809 u32 speed; 4810 if (mii_reg & 0x40) 4811 speed = SPEED_1000; 4812 else if (mii_reg & 0x2000) 4813 speed = SPEED_100; 4814 else 4815 speed = SPEED_10; 4816 retval = e1000_set_spd_dplx( 4817 adapter, speed, 4818 ((mii_reg & 0x100) 4819 ? DUPLEX_FULL : 4820 DUPLEX_HALF)); 4821 if (retval) 4822 return retval; 4823 } 4824 if (netif_running(adapter->netdev)) 4825 e1000_reinit_locked(adapter); 4826 else 4827 e1000_reset(adapter); 4828 break; 4829 case M88E1000_PHY_SPEC_CTRL: 4830 case M88E1000_EXT_PHY_SPEC_CTRL: 4831 if (e1000_phy_reset(hw)) 4832 return -EIO; 4833 break; 4834 } 4835 } else { 4836 switch (data->reg_num) { 4837 case PHY_CTRL: 4838 if (mii_reg & MII_CR_POWER_DOWN) 4839 break; 4840 if (netif_running(adapter->netdev)) 4841 e1000_reinit_locked(adapter); 4842 else 4843 e1000_reset(adapter); 4844 break; 4845 } 4846 } 4847 break; 4848 default: 4849 return -EOPNOTSUPP; 4850 } 4851 return E1000_SUCCESS; 4852 } 4853 4854 void e1000_pci_set_mwi(struct e1000_hw *hw) 4855 { 4856 struct e1000_adapter *adapter = hw->back; 4857 int ret_val = pci_set_mwi(adapter->pdev); 4858 4859 if (ret_val) 4860 e_err(probe, "Error in setting MWI\n"); 4861 } 4862 4863 void e1000_pci_clear_mwi(struct e1000_hw *hw) 4864 { 4865 struct e1000_adapter *adapter = hw->back; 4866 4867 pci_clear_mwi(adapter->pdev); 4868 } 4869 4870 int e1000_pcix_get_mmrbc(struct e1000_hw *hw) 4871 { 4872 struct e1000_adapter *adapter = hw->back; 4873 return pcix_get_mmrbc(adapter->pdev); 4874 } 4875 4876 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) 4877 { 4878 struct e1000_adapter *adapter = hw->back; 4879 pcix_set_mmrbc(adapter->pdev, mmrbc); 4880 } 4881 4882 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) 4883 { 4884 outl(value, port); 4885 } 4886 4887 static bool e1000_vlan_used(struct e1000_adapter *adapter) 4888 { 4889 u16 vid; 4890 4891 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 4892 return true; 4893 return false; 4894 } 4895 4896 static void __e1000_vlan_mode(struct e1000_adapter *adapter, 4897 netdev_features_t features) 4898 { 4899 struct e1000_hw *hw = &adapter->hw; 4900 u32 ctrl; 4901 4902 ctrl = er32(CTRL); 4903 if (features & NETIF_F_HW_VLAN_CTAG_RX) { 4904 /* enable VLAN tag insert/strip */ 4905 ctrl |= E1000_CTRL_VME; 4906 } else { 4907 /* disable VLAN tag insert/strip */ 4908 ctrl &= ~E1000_CTRL_VME; 4909 } 4910 ew32(CTRL, ctrl); 4911 } 4912 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, 4913 bool filter_on) 4914 { 4915 struct e1000_hw *hw = &adapter->hw; 4916 u32 rctl; 4917 4918 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4919 e1000_irq_disable(adapter); 4920 4921 __e1000_vlan_mode(adapter, adapter->netdev->features); 4922 if (filter_on) { 4923 /* enable VLAN receive filtering */ 4924 rctl = er32(RCTL); 4925 rctl &= ~E1000_RCTL_CFIEN; 4926 if (!(adapter->netdev->flags & IFF_PROMISC)) 4927 rctl |= E1000_RCTL_VFE; 4928 ew32(RCTL, rctl); 4929 e1000_update_mng_vlan(adapter); 4930 } else { 4931 /* disable VLAN receive filtering */ 4932 rctl = er32(RCTL); 4933 rctl &= ~E1000_RCTL_VFE; 4934 ew32(RCTL, rctl); 4935 } 4936 4937 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4938 e1000_irq_enable(adapter); 4939 } 4940 4941 static void e1000_vlan_mode(struct net_device *netdev, 4942 netdev_features_t features) 4943 { 4944 struct e1000_adapter *adapter = netdev_priv(netdev); 4945 4946 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4947 e1000_irq_disable(adapter); 4948 4949 __e1000_vlan_mode(adapter, features); 4950 4951 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4952 e1000_irq_enable(adapter); 4953 } 4954 4955 static int e1000_vlan_rx_add_vid(struct net_device *netdev, 4956 __be16 proto, u16 vid) 4957 { 4958 struct e1000_adapter *adapter = netdev_priv(netdev); 4959 struct e1000_hw *hw = &adapter->hw; 4960 u32 vfta, index; 4961 4962 if ((hw->mng_cookie.status & 4963 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4964 (vid == adapter->mng_vlan_id)) 4965 return 0; 4966 4967 if (!e1000_vlan_used(adapter)) 4968 e1000_vlan_filter_on_off(adapter, true); 4969 4970 /* add VID to filter table */ 4971 index = (vid >> 5) & 0x7F; 4972 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4973 vfta |= (1 << (vid & 0x1F)); 4974 e1000_write_vfta(hw, index, vfta); 4975 4976 set_bit(vid, adapter->active_vlans); 4977 4978 return 0; 4979 } 4980 4981 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, 4982 __be16 proto, u16 vid) 4983 { 4984 struct e1000_adapter *adapter = netdev_priv(netdev); 4985 struct e1000_hw *hw = &adapter->hw; 4986 u32 vfta, index; 4987 4988 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4989 e1000_irq_disable(adapter); 4990 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4991 e1000_irq_enable(adapter); 4992 4993 /* remove VID from filter table */ 4994 index = (vid >> 5) & 0x7F; 4995 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4996 vfta &= ~(1 << (vid & 0x1F)); 4997 e1000_write_vfta(hw, index, vfta); 4998 4999 clear_bit(vid, adapter->active_vlans); 5000 5001 if (!e1000_vlan_used(adapter)) 5002 e1000_vlan_filter_on_off(adapter, false); 5003 5004 return 0; 5005 } 5006 5007 static void e1000_restore_vlan(struct e1000_adapter *adapter) 5008 { 5009 u16 vid; 5010 5011 if (!e1000_vlan_used(adapter)) 5012 return; 5013 5014 e1000_vlan_filter_on_off(adapter, true); 5015 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 5016 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); 5017 } 5018 5019 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) 5020 { 5021 struct e1000_hw *hw = &adapter->hw; 5022 5023 hw->autoneg = 0; 5024 5025 /* Make sure dplx is at most 1 bit and lsb of speed is not set 5026 * for the switch() below to work 5027 */ 5028 if ((spd & 1) || (dplx & ~1)) 5029 goto err_inval; 5030 5031 /* Fiber NICs only allow 1000 gbps Full duplex */ 5032 if ((hw->media_type == e1000_media_type_fiber) && 5033 spd != SPEED_1000 && 5034 dplx != DUPLEX_FULL) 5035 goto err_inval; 5036 5037 switch (spd + dplx) { 5038 case SPEED_10 + DUPLEX_HALF: 5039 hw->forced_speed_duplex = e1000_10_half; 5040 break; 5041 case SPEED_10 + DUPLEX_FULL: 5042 hw->forced_speed_duplex = e1000_10_full; 5043 break; 5044 case SPEED_100 + DUPLEX_HALF: 5045 hw->forced_speed_duplex = e1000_100_half; 5046 break; 5047 case SPEED_100 + DUPLEX_FULL: 5048 hw->forced_speed_duplex = e1000_100_full; 5049 break; 5050 case SPEED_1000 + DUPLEX_FULL: 5051 hw->autoneg = 1; 5052 hw->autoneg_advertised = ADVERTISE_1000_FULL; 5053 break; 5054 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 5055 default: 5056 goto err_inval; 5057 } 5058 5059 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ 5060 hw->mdix = AUTO_ALL_MODES; 5061 5062 return 0; 5063 5064 err_inval: 5065 e_err(probe, "Unsupported Speed/Duplex configuration\n"); 5066 return -EINVAL; 5067 } 5068 5069 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) 5070 { 5071 struct net_device *netdev = pci_get_drvdata(pdev); 5072 struct e1000_adapter *adapter = netdev_priv(netdev); 5073 struct e1000_hw *hw = &adapter->hw; 5074 u32 ctrl, ctrl_ext, rctl, status; 5075 u32 wufc = adapter->wol; 5076 #ifdef CONFIG_PM 5077 int retval = 0; 5078 #endif 5079 5080 netif_device_detach(netdev); 5081 5082 if (netif_running(netdev)) { 5083 int count = E1000_CHECK_RESET_COUNT; 5084 5085 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) 5086 usleep_range(10000, 20000); 5087 5088 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 5089 e1000_down(adapter); 5090 } 5091 5092 #ifdef CONFIG_PM 5093 retval = pci_save_state(pdev); 5094 if (retval) 5095 return retval; 5096 #endif 5097 5098 status = er32(STATUS); 5099 if (status & E1000_STATUS_LU) 5100 wufc &= ~E1000_WUFC_LNKC; 5101 5102 if (wufc) { 5103 e1000_setup_rctl(adapter); 5104 e1000_set_rx_mode(netdev); 5105 5106 rctl = er32(RCTL); 5107 5108 /* turn on all-multi mode if wake on multicast is enabled */ 5109 if (wufc & E1000_WUFC_MC) 5110 rctl |= E1000_RCTL_MPE; 5111 5112 /* enable receives in the hardware */ 5113 ew32(RCTL, rctl | E1000_RCTL_EN); 5114 5115 if (hw->mac_type >= e1000_82540) { 5116 ctrl = er32(CTRL); 5117 /* advertise wake from D3Cold */ 5118 #define E1000_CTRL_ADVD3WUC 0x00100000 5119 /* phy power management enable */ 5120 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 5121 ctrl |= E1000_CTRL_ADVD3WUC | 5122 E1000_CTRL_EN_PHY_PWR_MGMT; 5123 ew32(CTRL, ctrl); 5124 } 5125 5126 if (hw->media_type == e1000_media_type_fiber || 5127 hw->media_type == e1000_media_type_internal_serdes) { 5128 /* keep the laser running in D3 */ 5129 ctrl_ext = er32(CTRL_EXT); 5130 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; 5131 ew32(CTRL_EXT, ctrl_ext); 5132 } 5133 5134 ew32(WUC, E1000_WUC_PME_EN); 5135 ew32(WUFC, wufc); 5136 } else { 5137 ew32(WUC, 0); 5138 ew32(WUFC, 0); 5139 } 5140 5141 e1000_release_manageability(adapter); 5142 5143 *enable_wake = !!wufc; 5144 5145 /* make sure adapter isn't asleep if manageability is enabled */ 5146 if (adapter->en_mng_pt) 5147 *enable_wake = true; 5148 5149 if (netif_running(netdev)) 5150 e1000_free_irq(adapter); 5151 5152 pci_disable_device(pdev); 5153 5154 return 0; 5155 } 5156 5157 #ifdef CONFIG_PM 5158 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) 5159 { 5160 int retval; 5161 bool wake; 5162 5163 retval = __e1000_shutdown(pdev, &wake); 5164 if (retval) 5165 return retval; 5166 5167 if (wake) { 5168 pci_prepare_to_sleep(pdev); 5169 } else { 5170 pci_wake_from_d3(pdev, false); 5171 pci_set_power_state(pdev, PCI_D3hot); 5172 } 5173 5174 return 0; 5175 } 5176 5177 static int e1000_resume(struct pci_dev *pdev) 5178 { 5179 struct net_device *netdev = pci_get_drvdata(pdev); 5180 struct e1000_adapter *adapter = netdev_priv(netdev); 5181 struct e1000_hw *hw = &adapter->hw; 5182 u32 err; 5183 5184 pci_set_power_state(pdev, PCI_D0); 5185 pci_restore_state(pdev); 5186 pci_save_state(pdev); 5187 5188 if (adapter->need_ioport) 5189 err = pci_enable_device(pdev); 5190 else 5191 err = pci_enable_device_mem(pdev); 5192 if (err) { 5193 pr_err("Cannot enable PCI device from suspend\n"); 5194 return err; 5195 } 5196 pci_set_master(pdev); 5197 5198 pci_enable_wake(pdev, PCI_D3hot, 0); 5199 pci_enable_wake(pdev, PCI_D3cold, 0); 5200 5201 if (netif_running(netdev)) { 5202 err = e1000_request_irq(adapter); 5203 if (err) 5204 return err; 5205 } 5206 5207 e1000_power_up_phy(adapter); 5208 e1000_reset(adapter); 5209 ew32(WUS, ~0); 5210 5211 e1000_init_manageability(adapter); 5212 5213 if (netif_running(netdev)) 5214 e1000_up(adapter); 5215 5216 netif_device_attach(netdev); 5217 5218 return 0; 5219 } 5220 #endif 5221 5222 static void e1000_shutdown(struct pci_dev *pdev) 5223 { 5224 bool wake; 5225 5226 __e1000_shutdown(pdev, &wake); 5227 5228 if (system_state == SYSTEM_POWER_OFF) { 5229 pci_wake_from_d3(pdev, wake); 5230 pci_set_power_state(pdev, PCI_D3hot); 5231 } 5232 } 5233 5234 #ifdef CONFIG_NET_POLL_CONTROLLER 5235 /* Polling 'interrupt' - used by things like netconsole to send skbs 5236 * without having to re-enable interrupts. It's not called while 5237 * the interrupt routine is executing. 5238 */ 5239 static void e1000_netpoll(struct net_device *netdev) 5240 { 5241 struct e1000_adapter *adapter = netdev_priv(netdev); 5242 5243 if (disable_hardirq(adapter->pdev->irq)) 5244 e1000_intr(adapter->pdev->irq, netdev); 5245 enable_irq(adapter->pdev->irq); 5246 } 5247 #endif 5248 5249 /** 5250 * e1000_io_error_detected - called when PCI error is detected 5251 * @pdev: Pointer to PCI device 5252 * @state: The current pci connection state 5253 * 5254 * This function is called after a PCI bus error affecting 5255 * this device has been detected. 5256 */ 5257 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 5258 pci_channel_state_t state) 5259 { 5260 struct net_device *netdev = pci_get_drvdata(pdev); 5261 struct e1000_adapter *adapter = netdev_priv(netdev); 5262 5263 netif_device_detach(netdev); 5264 5265 if (state == pci_channel_io_perm_failure) 5266 return PCI_ERS_RESULT_DISCONNECT; 5267 5268 if (netif_running(netdev)) 5269 e1000_down(adapter); 5270 pci_disable_device(pdev); 5271 5272 /* Request a slot slot reset. */ 5273 return PCI_ERS_RESULT_NEED_RESET; 5274 } 5275 5276 /** 5277 * e1000_io_slot_reset - called after the pci bus has been reset. 5278 * @pdev: Pointer to PCI device 5279 * 5280 * Restart the card from scratch, as if from a cold-boot. Implementation 5281 * resembles the first-half of the e1000_resume routine. 5282 */ 5283 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 5284 { 5285 struct net_device *netdev = pci_get_drvdata(pdev); 5286 struct e1000_adapter *adapter = netdev_priv(netdev); 5287 struct e1000_hw *hw = &adapter->hw; 5288 int err; 5289 5290 if (adapter->need_ioport) 5291 err = pci_enable_device(pdev); 5292 else 5293 err = pci_enable_device_mem(pdev); 5294 if (err) { 5295 pr_err("Cannot re-enable PCI device after reset.\n"); 5296 return PCI_ERS_RESULT_DISCONNECT; 5297 } 5298 pci_set_master(pdev); 5299 5300 pci_enable_wake(pdev, PCI_D3hot, 0); 5301 pci_enable_wake(pdev, PCI_D3cold, 0); 5302 5303 e1000_reset(adapter); 5304 ew32(WUS, ~0); 5305 5306 return PCI_ERS_RESULT_RECOVERED; 5307 } 5308 5309 /** 5310 * e1000_io_resume - called when traffic can start flowing again. 5311 * @pdev: Pointer to PCI device 5312 * 5313 * This callback is called when the error recovery driver tells us that 5314 * its OK to resume normal operation. Implementation resembles the 5315 * second-half of the e1000_resume routine. 5316 */ 5317 static void e1000_io_resume(struct pci_dev *pdev) 5318 { 5319 struct net_device *netdev = pci_get_drvdata(pdev); 5320 struct e1000_adapter *adapter = netdev_priv(netdev); 5321 5322 e1000_init_manageability(adapter); 5323 5324 if (netif_running(netdev)) { 5325 if (e1000_up(adapter)) { 5326 pr_info("can't bring device back up after reset\n"); 5327 return; 5328 } 5329 } 5330 5331 netif_device_attach(netdev); 5332 } 5333 5334 /* e1000_main.c */ 5335