1 /* Intel PRO/1000 Linux driver 2 * Copyright(c) 1999 - 2015 Intel Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * The full GNU General Public License is included in this distribution in 14 * the file called "COPYING". 15 * 16 * Contact Information: 17 * Linux NICS <linux.nics@intel.com> 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 20 */ 21 22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23 24 #include <linux/module.h> 25 #include <linux/types.h> 26 #include <linux/init.h> 27 #include <linux/pci.h> 28 #include <linux/vmalloc.h> 29 #include <linux/pagemap.h> 30 #include <linux/delay.h> 31 #include <linux/netdevice.h> 32 #include <linux/interrupt.h> 33 #include <linux/tcp.h> 34 #include <linux/ipv6.h> 35 #include <linux/slab.h> 36 #include <net/checksum.h> 37 #include <net/ip6_checksum.h> 38 #include <linux/ethtool.h> 39 #include <linux/if_vlan.h> 40 #include <linux/cpu.h> 41 #include <linux/smp.h> 42 #include <linux/pm_qos.h> 43 #include <linux/pm_runtime.h> 44 #include <linux/aer.h> 45 #include <linux/prefetch.h> 46 47 #include "e1000.h" 48 49 #define DRV_EXTRAVERSION "-k" 50 51 #define DRV_VERSION "3.2.6" DRV_EXTRAVERSION 52 char e1000e_driver_name[] = "e1000e"; 53 const char e1000e_driver_version[] = DRV_VERSION; 54 55 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 56 static int debug = -1; 57 module_param(debug, int, 0); 58 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 59 60 static const struct e1000_info *e1000_info_tbl[] = { 61 [board_82571] = &e1000_82571_info, 62 [board_82572] = &e1000_82572_info, 63 [board_82573] = &e1000_82573_info, 64 [board_82574] = &e1000_82574_info, 65 [board_82583] = &e1000_82583_info, 66 [board_80003es2lan] = &e1000_es2_info, 67 [board_ich8lan] = &e1000_ich8_info, 68 [board_ich9lan] = &e1000_ich9_info, 69 [board_ich10lan] = &e1000_ich10_info, 70 [board_pchlan] = &e1000_pch_info, 71 [board_pch2lan] = &e1000_pch2_info, 72 [board_pch_lpt] = &e1000_pch_lpt_info, 73 [board_pch_spt] = &e1000_pch_spt_info, 74 [board_pch_cnp] = &e1000_pch_cnp_info, 75 }; 76 77 struct e1000_reg_info { 78 u32 ofs; 79 char *name; 80 }; 81 82 static const struct e1000_reg_info e1000_reg_info_tbl[] = { 83 /* General Registers */ 84 {E1000_CTRL, "CTRL"}, 85 {E1000_STATUS, "STATUS"}, 86 {E1000_CTRL_EXT, "CTRL_EXT"}, 87 88 /* Interrupt Registers */ 89 {E1000_ICR, "ICR"}, 90 91 /* Rx Registers */ 92 {E1000_RCTL, "RCTL"}, 93 {E1000_RDLEN(0), "RDLEN"}, 94 {E1000_RDH(0), "RDH"}, 95 {E1000_RDT(0), "RDT"}, 96 {E1000_RDTR, "RDTR"}, 97 {E1000_RXDCTL(0), "RXDCTL"}, 98 {E1000_ERT, "ERT"}, 99 {E1000_RDBAL(0), "RDBAL"}, 100 {E1000_RDBAH(0), "RDBAH"}, 101 {E1000_RDFH, "RDFH"}, 102 {E1000_RDFT, "RDFT"}, 103 {E1000_RDFHS, "RDFHS"}, 104 {E1000_RDFTS, "RDFTS"}, 105 {E1000_RDFPC, "RDFPC"}, 106 107 /* Tx Registers */ 108 {E1000_TCTL, "TCTL"}, 109 {E1000_TDBAL(0), "TDBAL"}, 110 {E1000_TDBAH(0), "TDBAH"}, 111 {E1000_TDLEN(0), "TDLEN"}, 112 {E1000_TDH(0), "TDH"}, 113 {E1000_TDT(0), "TDT"}, 114 {E1000_TIDV, "TIDV"}, 115 {E1000_TXDCTL(0), "TXDCTL"}, 116 {E1000_TADV, "TADV"}, 117 {E1000_TARC(0), "TARC"}, 118 {E1000_TDFH, "TDFH"}, 119 {E1000_TDFT, "TDFT"}, 120 {E1000_TDFHS, "TDFHS"}, 121 {E1000_TDFTS, "TDFTS"}, 122 {E1000_TDFPC, "TDFPC"}, 123 124 /* List Terminator */ 125 {0, NULL} 126 }; 127 128 /** 129 * __ew32_prepare - prepare to write to MAC CSR register on certain parts 130 * @hw: pointer to the HW structure 131 * 132 * When updating the MAC CSR registers, the Manageability Engine (ME) could 133 * be accessing the registers at the same time. Normally, this is handled in 134 * h/w by an arbiter but on some parts there is a bug that acknowledges Host 135 * accesses later than it should which could result in the register to have 136 * an incorrect value. Workaround this by checking the FWSM register which 137 * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set 138 * and try again a number of times. 139 **/ 140 s32 __ew32_prepare(struct e1000_hw *hw) 141 { 142 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; 143 144 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) 145 udelay(50); 146 147 return i; 148 } 149 150 void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) 151 { 152 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 153 __ew32_prepare(hw); 154 155 writel(val, hw->hw_addr + reg); 156 } 157 158 /** 159 * e1000_regdump - register printout routine 160 * @hw: pointer to the HW structure 161 * @reginfo: pointer to the register info table 162 **/ 163 static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) 164 { 165 int n = 0; 166 char rname[16]; 167 u32 regs[8]; 168 169 switch (reginfo->ofs) { 170 case E1000_RXDCTL(0): 171 for (n = 0; n < 2; n++) 172 regs[n] = __er32(hw, E1000_RXDCTL(n)); 173 break; 174 case E1000_TXDCTL(0): 175 for (n = 0; n < 2; n++) 176 regs[n] = __er32(hw, E1000_TXDCTL(n)); 177 break; 178 case E1000_TARC(0): 179 for (n = 0; n < 2; n++) 180 regs[n] = __er32(hw, E1000_TARC(n)); 181 break; 182 default: 183 pr_info("%-15s %08x\n", 184 reginfo->name, __er32(hw, reginfo->ofs)); 185 return; 186 } 187 188 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); 189 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); 190 } 191 192 static void e1000e_dump_ps_pages(struct e1000_adapter *adapter, 193 struct e1000_buffer *bi) 194 { 195 int i; 196 struct e1000_ps_page *ps_page; 197 198 for (i = 0; i < adapter->rx_ps_pages; i++) { 199 ps_page = &bi->ps_pages[i]; 200 201 if (ps_page->page) { 202 pr_info("packet dump for ps_page %d:\n", i); 203 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 204 16, 1, page_address(ps_page->page), 205 PAGE_SIZE, true); 206 } 207 } 208 } 209 210 /** 211 * e1000e_dump - Print registers, Tx-ring and Rx-ring 212 * @adapter: board private structure 213 **/ 214 static void e1000e_dump(struct e1000_adapter *adapter) 215 { 216 struct net_device *netdev = adapter->netdev; 217 struct e1000_hw *hw = &adapter->hw; 218 struct e1000_reg_info *reginfo; 219 struct e1000_ring *tx_ring = adapter->tx_ring; 220 struct e1000_tx_desc *tx_desc; 221 struct my_u0 { 222 __le64 a; 223 __le64 b; 224 } *u0; 225 struct e1000_buffer *buffer_info; 226 struct e1000_ring *rx_ring = adapter->rx_ring; 227 union e1000_rx_desc_packet_split *rx_desc_ps; 228 union e1000_rx_desc_extended *rx_desc; 229 struct my_u1 { 230 __le64 a; 231 __le64 b; 232 __le64 c; 233 __le64 d; 234 } *u1; 235 u32 staterr; 236 int i = 0; 237 238 if (!netif_msg_hw(adapter)) 239 return; 240 241 /* Print netdevice Info */ 242 if (netdev) { 243 dev_info(&adapter->pdev->dev, "Net device Info\n"); 244 pr_info("Device Name state trans_start\n"); 245 pr_info("%-15s %016lX %016lX\n", netdev->name, 246 netdev->state, dev_trans_start(netdev)); 247 } 248 249 /* Print Registers */ 250 dev_info(&adapter->pdev->dev, "Register Dump\n"); 251 pr_info(" Register Name Value\n"); 252 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; 253 reginfo->name; reginfo++) { 254 e1000_regdump(hw, reginfo); 255 } 256 257 /* Print Tx Ring Summary */ 258 if (!netdev || !netif_running(netdev)) 259 return; 260 261 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); 262 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); 263 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; 264 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", 265 0, tx_ring->next_to_use, tx_ring->next_to_clean, 266 (unsigned long long)buffer_info->dma, 267 buffer_info->length, 268 buffer_info->next_to_watch, 269 (unsigned long long)buffer_info->time_stamp); 270 271 /* Print Tx Ring */ 272 if (!netif_msg_tx_done(adapter)) 273 goto rx_ring_summary; 274 275 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); 276 277 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) 278 * 279 * Legacy Transmit Descriptor 280 * +--------------------------------------------------------------+ 281 * 0 | Buffer Address [63:0] (Reserved on Write Back) | 282 * +--------------------------------------------------------------+ 283 * 8 | Special | CSS | Status | CMD | CSO | Length | 284 * +--------------------------------------------------------------+ 285 * 63 48 47 36 35 32 31 24 23 16 15 0 286 * 287 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload 288 * 63 48 47 40 39 32 31 16 15 8 7 0 289 * +----------------------------------------------------------------+ 290 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | 291 * +----------------------------------------------------------------+ 292 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | 293 * +----------------------------------------------------------------+ 294 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 295 * 296 * Extended Data Descriptor (DTYP=0x1) 297 * +----------------------------------------------------------------+ 298 * 0 | Buffer Address [63:0] | 299 * +----------------------------------------------------------------+ 300 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | 301 * +----------------------------------------------------------------+ 302 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 303 */ 304 pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n"); 305 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n"); 306 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n"); 307 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 308 const char *next_desc; 309 tx_desc = E1000_TX_DESC(*tx_ring, i); 310 buffer_info = &tx_ring->buffer_info[i]; 311 u0 = (struct my_u0 *)tx_desc; 312 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) 313 next_desc = " NTC/U"; 314 else if (i == tx_ring->next_to_use) 315 next_desc = " NTU"; 316 else if (i == tx_ring->next_to_clean) 317 next_desc = " NTC"; 318 else 319 next_desc = ""; 320 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n", 321 (!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' : 322 ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')), 323 i, 324 (unsigned long long)le64_to_cpu(u0->a), 325 (unsigned long long)le64_to_cpu(u0->b), 326 (unsigned long long)buffer_info->dma, 327 buffer_info->length, buffer_info->next_to_watch, 328 (unsigned long long)buffer_info->time_stamp, 329 buffer_info->skb, next_desc); 330 331 if (netif_msg_pktdata(adapter) && buffer_info->skb) 332 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 333 16, 1, buffer_info->skb->data, 334 buffer_info->skb->len, true); 335 } 336 337 /* Print Rx Ring Summary */ 338 rx_ring_summary: 339 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); 340 pr_info("Queue [NTU] [NTC]\n"); 341 pr_info(" %5d %5X %5X\n", 342 0, rx_ring->next_to_use, rx_ring->next_to_clean); 343 344 /* Print Rx Ring */ 345 if (!netif_msg_rx_status(adapter)) 346 return; 347 348 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); 349 switch (adapter->rx_ps_pages) { 350 case 1: 351 case 2: 352 case 3: 353 /* [Extended] Packet Split Receive Descriptor Format 354 * 355 * +-----------------------------------------------------+ 356 * 0 | Buffer Address 0 [63:0] | 357 * +-----------------------------------------------------+ 358 * 8 | Buffer Address 1 [63:0] | 359 * +-----------------------------------------------------+ 360 * 16 | Buffer Address 2 [63:0] | 361 * +-----------------------------------------------------+ 362 * 24 | Buffer Address 3 [63:0] | 363 * +-----------------------------------------------------+ 364 */ 365 pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n"); 366 /* [Extended] Receive Descriptor (Write-Back) Format 367 * 368 * 63 48 47 32 31 13 12 8 7 4 3 0 369 * +------------------------------------------------------+ 370 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS | 371 * | Checksum | Ident | | Queue | | Type | 372 * +------------------------------------------------------+ 373 * 8 | VLAN Tag | Length | Extended Error | Extended Status | 374 * +------------------------------------------------------+ 375 * 63 48 47 32 31 20 19 0 376 */ 377 pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n"); 378 for (i = 0; i < rx_ring->count; i++) { 379 const char *next_desc; 380 buffer_info = &rx_ring->buffer_info[i]; 381 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); 382 u1 = (struct my_u1 *)rx_desc_ps; 383 staterr = 384 le32_to_cpu(rx_desc_ps->wb.middle.status_error); 385 386 if (i == rx_ring->next_to_use) 387 next_desc = " NTU"; 388 else if (i == rx_ring->next_to_clean) 389 next_desc = " NTC"; 390 else 391 next_desc = ""; 392 393 if (staterr & E1000_RXD_STAT_DD) { 394 /* Descriptor Done */ 395 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n", 396 "RWB", i, 397 (unsigned long long)le64_to_cpu(u1->a), 398 (unsigned long long)le64_to_cpu(u1->b), 399 (unsigned long long)le64_to_cpu(u1->c), 400 (unsigned long long)le64_to_cpu(u1->d), 401 buffer_info->skb, next_desc); 402 } else { 403 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n", 404 "R ", i, 405 (unsigned long long)le64_to_cpu(u1->a), 406 (unsigned long long)le64_to_cpu(u1->b), 407 (unsigned long long)le64_to_cpu(u1->c), 408 (unsigned long long)le64_to_cpu(u1->d), 409 (unsigned long long)buffer_info->dma, 410 buffer_info->skb, next_desc); 411 412 if (netif_msg_pktdata(adapter)) 413 e1000e_dump_ps_pages(adapter, 414 buffer_info); 415 } 416 } 417 break; 418 default: 419 case 0: 420 /* Extended Receive Descriptor (Read) Format 421 * 422 * +-----------------------------------------------------+ 423 * 0 | Buffer Address [63:0] | 424 * +-----------------------------------------------------+ 425 * 8 | Reserved | 426 * +-----------------------------------------------------+ 427 */ 428 pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n"); 429 /* Extended Receive Descriptor (Write-Back) Format 430 * 431 * 63 48 47 32 31 24 23 4 3 0 432 * +------------------------------------------------------+ 433 * | RSS Hash | | | | 434 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS | 435 * | Packet | IP | | | Type | 436 * | Checksum | Ident | | | | 437 * +------------------------------------------------------+ 438 * 8 | VLAN Tag | Length | Extended Error | Extended Status | 439 * +------------------------------------------------------+ 440 * 63 48 47 32 31 20 19 0 441 */ 442 pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n"); 443 444 for (i = 0; i < rx_ring->count; i++) { 445 const char *next_desc; 446 447 buffer_info = &rx_ring->buffer_info[i]; 448 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 449 u1 = (struct my_u1 *)rx_desc; 450 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 451 452 if (i == rx_ring->next_to_use) 453 next_desc = " NTU"; 454 else if (i == rx_ring->next_to_clean) 455 next_desc = " NTC"; 456 else 457 next_desc = ""; 458 459 if (staterr & E1000_RXD_STAT_DD) { 460 /* Descriptor Done */ 461 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n", 462 "RWB", i, 463 (unsigned long long)le64_to_cpu(u1->a), 464 (unsigned long long)le64_to_cpu(u1->b), 465 buffer_info->skb, next_desc); 466 } else { 467 pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n", 468 "R ", i, 469 (unsigned long long)le64_to_cpu(u1->a), 470 (unsigned long long)le64_to_cpu(u1->b), 471 (unsigned long long)buffer_info->dma, 472 buffer_info->skb, next_desc); 473 474 if (netif_msg_pktdata(adapter) && 475 buffer_info->skb) 476 print_hex_dump(KERN_INFO, "", 477 DUMP_PREFIX_ADDRESS, 16, 478 1, 479 buffer_info->skb->data, 480 adapter->rx_buffer_len, 481 true); 482 } 483 } 484 } 485 } 486 487 /** 488 * e1000_desc_unused - calculate if we have unused descriptors 489 **/ 490 static int e1000_desc_unused(struct e1000_ring *ring) 491 { 492 if (ring->next_to_clean > ring->next_to_use) 493 return ring->next_to_clean - ring->next_to_use - 1; 494 495 return ring->count + ring->next_to_clean - ring->next_to_use - 1; 496 } 497 498 /** 499 * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp 500 * @adapter: board private structure 501 * @hwtstamps: time stamp structure to update 502 * @systim: unsigned 64bit system time value. 503 * 504 * Convert the system time value stored in the RX/TXSTMP registers into a 505 * hwtstamp which can be used by the upper level time stamping functions. 506 * 507 * The 'systim_lock' spinlock is used to protect the consistency of the 508 * system time value. This is needed because reading the 64 bit time 509 * value involves reading two 32 bit registers. The first read latches the 510 * value. 511 **/ 512 static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter, 513 struct skb_shared_hwtstamps *hwtstamps, 514 u64 systim) 515 { 516 u64 ns; 517 unsigned long flags; 518 519 spin_lock_irqsave(&adapter->systim_lock, flags); 520 ns = timecounter_cyc2time(&adapter->tc, systim); 521 spin_unlock_irqrestore(&adapter->systim_lock, flags); 522 523 memset(hwtstamps, 0, sizeof(*hwtstamps)); 524 hwtstamps->hwtstamp = ns_to_ktime(ns); 525 } 526 527 /** 528 * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp 529 * @adapter: board private structure 530 * @status: descriptor extended error and status field 531 * @skb: particular skb to include time stamp 532 * 533 * If the time stamp is valid, convert it into the timecounter ns value 534 * and store that result into the shhwtstamps structure which is passed 535 * up the network stack. 536 **/ 537 static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status, 538 struct sk_buff *skb) 539 { 540 struct e1000_hw *hw = &adapter->hw; 541 u64 rxstmp; 542 543 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) || 544 !(status & E1000_RXDEXT_STATERR_TST) || 545 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) 546 return; 547 548 /* The Rx time stamp registers contain the time stamp. No other 549 * received packet will be time stamped until the Rx time stamp 550 * registers are read. Because only one packet can be time stamped 551 * at a time, the register values must belong to this packet and 552 * therefore none of the other additional attributes need to be 553 * compared. 554 */ 555 rxstmp = (u64)er32(RXSTMPL); 556 rxstmp |= (u64)er32(RXSTMPH) << 32; 557 e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp); 558 559 adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP; 560 } 561 562 /** 563 * e1000_receive_skb - helper function to handle Rx indications 564 * @adapter: board private structure 565 * @staterr: descriptor extended error and status field as written by hardware 566 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 567 * @skb: pointer to sk_buff to be indicated to stack 568 **/ 569 static void e1000_receive_skb(struct e1000_adapter *adapter, 570 struct net_device *netdev, struct sk_buff *skb, 571 u32 staterr, __le16 vlan) 572 { 573 u16 tag = le16_to_cpu(vlan); 574 575 e1000e_rx_hwtstamp(adapter, staterr, skb); 576 577 skb->protocol = eth_type_trans(skb, netdev); 578 579 if (staterr & E1000_RXD_STAT_VP) 580 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 581 582 napi_gro_receive(&adapter->napi, skb); 583 } 584 585 /** 586 * e1000_rx_checksum - Receive Checksum Offload 587 * @adapter: board private structure 588 * @status_err: receive descriptor status and error fields 589 * @csum: receive descriptor csum field 590 * @sk_buff: socket buffer with received data 591 **/ 592 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 593 struct sk_buff *skb) 594 { 595 u16 status = (u16)status_err; 596 u8 errors = (u8)(status_err >> 24); 597 598 skb_checksum_none_assert(skb); 599 600 /* Rx checksum disabled */ 601 if (!(adapter->netdev->features & NETIF_F_RXCSUM)) 602 return; 603 604 /* Ignore Checksum bit is set */ 605 if (status & E1000_RXD_STAT_IXSM) 606 return; 607 608 /* TCP/UDP checksum error bit or IP checksum error bit is set */ 609 if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) { 610 /* let the stack verify checksum errors */ 611 adapter->hw_csum_err++; 612 return; 613 } 614 615 /* TCP/UDP Checksum has not been calculated */ 616 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) 617 return; 618 619 /* It must be a TCP or UDP packet with a valid checksum */ 620 skb->ip_summed = CHECKSUM_UNNECESSARY; 621 adapter->hw_csum_good++; 622 } 623 624 static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) 625 { 626 struct e1000_adapter *adapter = rx_ring->adapter; 627 struct e1000_hw *hw = &adapter->hw; 628 s32 ret_val = __ew32_prepare(hw); 629 630 writel(i, rx_ring->tail); 631 632 if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { 633 u32 rctl = er32(RCTL); 634 635 ew32(RCTL, rctl & ~E1000_RCTL_EN); 636 e_err("ME firmware caused invalid RDT - resetting\n"); 637 schedule_work(&adapter->reset_task); 638 } 639 } 640 641 static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) 642 { 643 struct e1000_adapter *adapter = tx_ring->adapter; 644 struct e1000_hw *hw = &adapter->hw; 645 s32 ret_val = __ew32_prepare(hw); 646 647 writel(i, tx_ring->tail); 648 649 if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { 650 u32 tctl = er32(TCTL); 651 652 ew32(TCTL, tctl & ~E1000_TCTL_EN); 653 e_err("ME firmware caused invalid TDT - resetting\n"); 654 schedule_work(&adapter->reset_task); 655 } 656 } 657 658 /** 659 * e1000_alloc_rx_buffers - Replace used receive buffers 660 * @rx_ring: Rx descriptor ring 661 **/ 662 static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring, 663 int cleaned_count, gfp_t gfp) 664 { 665 struct e1000_adapter *adapter = rx_ring->adapter; 666 struct net_device *netdev = adapter->netdev; 667 struct pci_dev *pdev = adapter->pdev; 668 union e1000_rx_desc_extended *rx_desc; 669 struct e1000_buffer *buffer_info; 670 struct sk_buff *skb; 671 unsigned int i; 672 unsigned int bufsz = adapter->rx_buffer_len; 673 674 i = rx_ring->next_to_use; 675 buffer_info = &rx_ring->buffer_info[i]; 676 677 while (cleaned_count--) { 678 skb = buffer_info->skb; 679 if (skb) { 680 skb_trim(skb, 0); 681 goto map_skb; 682 } 683 684 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); 685 if (!skb) { 686 /* Better luck next round */ 687 adapter->alloc_rx_buff_failed++; 688 break; 689 } 690 691 buffer_info->skb = skb; 692 map_skb: 693 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 694 adapter->rx_buffer_len, 695 DMA_FROM_DEVICE); 696 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 697 dev_err(&pdev->dev, "Rx DMA map failed\n"); 698 adapter->rx_dma_failed++; 699 break; 700 } 701 702 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 703 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 704 705 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { 706 /* Force memory writes to complete before letting h/w 707 * know there are new descriptors to fetch. (Only 708 * applicable for weak-ordered memory model archs, 709 * such as IA-64). 710 */ 711 wmb(); 712 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 713 e1000e_update_rdt_wa(rx_ring, i); 714 else 715 writel(i, rx_ring->tail); 716 } 717 i++; 718 if (i == rx_ring->count) 719 i = 0; 720 buffer_info = &rx_ring->buffer_info[i]; 721 } 722 723 rx_ring->next_to_use = i; 724 } 725 726 /** 727 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split 728 * @rx_ring: Rx descriptor ring 729 **/ 730 static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, 731 int cleaned_count, gfp_t gfp) 732 { 733 struct e1000_adapter *adapter = rx_ring->adapter; 734 struct net_device *netdev = adapter->netdev; 735 struct pci_dev *pdev = adapter->pdev; 736 union e1000_rx_desc_packet_split *rx_desc; 737 struct e1000_buffer *buffer_info; 738 struct e1000_ps_page *ps_page; 739 struct sk_buff *skb; 740 unsigned int i, j; 741 742 i = rx_ring->next_to_use; 743 buffer_info = &rx_ring->buffer_info[i]; 744 745 while (cleaned_count--) { 746 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 747 748 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 749 ps_page = &buffer_info->ps_pages[j]; 750 if (j >= adapter->rx_ps_pages) { 751 /* all unused desc entries get hw null ptr */ 752 rx_desc->read.buffer_addr[j + 1] = 753 ~cpu_to_le64(0); 754 continue; 755 } 756 if (!ps_page->page) { 757 ps_page->page = alloc_page(gfp); 758 if (!ps_page->page) { 759 adapter->alloc_rx_buff_failed++; 760 goto no_buffers; 761 } 762 ps_page->dma = dma_map_page(&pdev->dev, 763 ps_page->page, 764 0, PAGE_SIZE, 765 DMA_FROM_DEVICE); 766 if (dma_mapping_error(&pdev->dev, 767 ps_page->dma)) { 768 dev_err(&adapter->pdev->dev, 769 "Rx DMA page map failed\n"); 770 adapter->rx_dma_failed++; 771 goto no_buffers; 772 } 773 } 774 /* Refresh the desc even if buffer_addrs 775 * didn't change because each write-back 776 * erases this info. 777 */ 778 rx_desc->read.buffer_addr[j + 1] = 779 cpu_to_le64(ps_page->dma); 780 } 781 782 skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0, 783 gfp); 784 785 if (!skb) { 786 adapter->alloc_rx_buff_failed++; 787 break; 788 } 789 790 buffer_info->skb = skb; 791 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 792 adapter->rx_ps_bsize0, 793 DMA_FROM_DEVICE); 794 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 795 dev_err(&pdev->dev, "Rx DMA map failed\n"); 796 adapter->rx_dma_failed++; 797 /* cleanup skb */ 798 dev_kfree_skb_any(skb); 799 buffer_info->skb = NULL; 800 break; 801 } 802 803 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 804 805 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { 806 /* Force memory writes to complete before letting h/w 807 * know there are new descriptors to fetch. (Only 808 * applicable for weak-ordered memory model archs, 809 * such as IA-64). 810 */ 811 wmb(); 812 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 813 e1000e_update_rdt_wa(rx_ring, i << 1); 814 else 815 writel(i << 1, rx_ring->tail); 816 } 817 818 i++; 819 if (i == rx_ring->count) 820 i = 0; 821 buffer_info = &rx_ring->buffer_info[i]; 822 } 823 824 no_buffers: 825 rx_ring->next_to_use = i; 826 } 827 828 /** 829 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 830 * @rx_ring: Rx descriptor ring 831 * @cleaned_count: number of buffers to allocate this pass 832 **/ 833 834 static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring, 835 int cleaned_count, gfp_t gfp) 836 { 837 struct e1000_adapter *adapter = rx_ring->adapter; 838 struct net_device *netdev = adapter->netdev; 839 struct pci_dev *pdev = adapter->pdev; 840 union e1000_rx_desc_extended *rx_desc; 841 struct e1000_buffer *buffer_info; 842 struct sk_buff *skb; 843 unsigned int i; 844 unsigned int bufsz = 256 - 16; /* for skb_reserve */ 845 846 i = rx_ring->next_to_use; 847 buffer_info = &rx_ring->buffer_info[i]; 848 849 while (cleaned_count--) { 850 skb = buffer_info->skb; 851 if (skb) { 852 skb_trim(skb, 0); 853 goto check_page; 854 } 855 856 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); 857 if (unlikely(!skb)) { 858 /* Better luck next round */ 859 adapter->alloc_rx_buff_failed++; 860 break; 861 } 862 863 buffer_info->skb = skb; 864 check_page: 865 /* allocate a new page if necessary */ 866 if (!buffer_info->page) { 867 buffer_info->page = alloc_page(gfp); 868 if (unlikely(!buffer_info->page)) { 869 adapter->alloc_rx_buff_failed++; 870 break; 871 } 872 } 873 874 if (!buffer_info->dma) { 875 buffer_info->dma = dma_map_page(&pdev->dev, 876 buffer_info->page, 0, 877 PAGE_SIZE, 878 DMA_FROM_DEVICE); 879 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 880 adapter->alloc_rx_buff_failed++; 881 break; 882 } 883 } 884 885 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 886 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 887 888 if (unlikely(++i == rx_ring->count)) 889 i = 0; 890 buffer_info = &rx_ring->buffer_info[i]; 891 } 892 893 if (likely(rx_ring->next_to_use != i)) { 894 rx_ring->next_to_use = i; 895 if (unlikely(i-- == 0)) 896 i = (rx_ring->count - 1); 897 898 /* Force memory writes to complete before letting h/w 899 * know there are new descriptors to fetch. (Only 900 * applicable for weak-ordered memory model archs, 901 * such as IA-64). 902 */ 903 wmb(); 904 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 905 e1000e_update_rdt_wa(rx_ring, i); 906 else 907 writel(i, rx_ring->tail); 908 } 909 } 910 911 static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss, 912 struct sk_buff *skb) 913 { 914 if (netdev->features & NETIF_F_RXHASH) 915 skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3); 916 } 917 918 /** 919 * e1000_clean_rx_irq - Send received data up the network stack 920 * @rx_ring: Rx descriptor ring 921 * 922 * the return value indicates whether actual cleaning was done, there 923 * is no guarantee that everything was cleaned 924 **/ 925 static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, 926 int work_to_do) 927 { 928 struct e1000_adapter *adapter = rx_ring->adapter; 929 struct net_device *netdev = adapter->netdev; 930 struct pci_dev *pdev = adapter->pdev; 931 struct e1000_hw *hw = &adapter->hw; 932 union e1000_rx_desc_extended *rx_desc, *next_rxd; 933 struct e1000_buffer *buffer_info, *next_buffer; 934 u32 length, staterr; 935 unsigned int i; 936 int cleaned_count = 0; 937 bool cleaned = false; 938 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 939 940 i = rx_ring->next_to_clean; 941 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 942 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 943 buffer_info = &rx_ring->buffer_info[i]; 944 945 while (staterr & E1000_RXD_STAT_DD) { 946 struct sk_buff *skb; 947 948 if (*work_done >= work_to_do) 949 break; 950 (*work_done)++; 951 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ 952 953 skb = buffer_info->skb; 954 buffer_info->skb = NULL; 955 956 prefetch(skb->data - NET_IP_ALIGN); 957 958 i++; 959 if (i == rx_ring->count) 960 i = 0; 961 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); 962 prefetch(next_rxd); 963 964 next_buffer = &rx_ring->buffer_info[i]; 965 966 cleaned = true; 967 cleaned_count++; 968 dma_unmap_single(&pdev->dev, buffer_info->dma, 969 adapter->rx_buffer_len, DMA_FROM_DEVICE); 970 buffer_info->dma = 0; 971 972 length = le16_to_cpu(rx_desc->wb.upper.length); 973 974 /* !EOP means multiple descriptors were used to store a single 975 * packet, if that's the case we need to toss it. In fact, we 976 * need to toss every packet with the EOP bit clear and the 977 * next frame that _does_ have the EOP bit set, as it is by 978 * definition only a frame fragment 979 */ 980 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) 981 adapter->flags2 |= FLAG2_IS_DISCARDING; 982 983 if (adapter->flags2 & FLAG2_IS_DISCARDING) { 984 /* All receives must fit into a single buffer */ 985 e_dbg("Receive packet consumed multiple buffers\n"); 986 /* recycle */ 987 buffer_info->skb = skb; 988 if (staterr & E1000_RXD_STAT_EOP) 989 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 990 goto next_desc; 991 } 992 993 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && 994 !(netdev->features & NETIF_F_RXALL))) { 995 /* recycle */ 996 buffer_info->skb = skb; 997 goto next_desc; 998 } 999 1000 /* adjust length to remove Ethernet CRC */ 1001 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { 1002 /* If configured to store CRC, don't subtract FCS, 1003 * but keep the FCS bytes out of the total_rx_bytes 1004 * counter 1005 */ 1006 if (netdev->features & NETIF_F_RXFCS) 1007 total_rx_bytes -= 4; 1008 else 1009 length -= 4; 1010 } 1011 1012 total_rx_bytes += length; 1013 total_rx_packets++; 1014 1015 /* code added for copybreak, this should improve 1016 * performance for small packets with large amounts 1017 * of reassembly being done in the stack 1018 */ 1019 if (length < copybreak) { 1020 struct sk_buff *new_skb = 1021 napi_alloc_skb(&adapter->napi, length); 1022 if (new_skb) { 1023 skb_copy_to_linear_data_offset(new_skb, 1024 -NET_IP_ALIGN, 1025 (skb->data - 1026 NET_IP_ALIGN), 1027 (length + 1028 NET_IP_ALIGN)); 1029 /* save the skb in buffer_info as good */ 1030 buffer_info->skb = skb; 1031 skb = new_skb; 1032 } 1033 /* else just continue with the old one */ 1034 } 1035 /* end copybreak code */ 1036 skb_put(skb, length); 1037 1038 /* Receive Checksum Offload */ 1039 e1000_rx_checksum(adapter, staterr, skb); 1040 1041 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 1042 1043 e1000_receive_skb(adapter, netdev, skb, staterr, 1044 rx_desc->wb.upper.vlan); 1045 1046 next_desc: 1047 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); 1048 1049 /* return some buffers to hardware, one at a time is too slow */ 1050 if (cleaned_count >= E1000_RX_BUFFER_WRITE) { 1051 adapter->alloc_rx_buf(rx_ring, cleaned_count, 1052 GFP_ATOMIC); 1053 cleaned_count = 0; 1054 } 1055 1056 /* use prefetched values */ 1057 rx_desc = next_rxd; 1058 buffer_info = next_buffer; 1059 1060 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1061 } 1062 rx_ring->next_to_clean = i; 1063 1064 cleaned_count = e1000_desc_unused(rx_ring); 1065 if (cleaned_count) 1066 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); 1067 1068 adapter->total_rx_bytes += total_rx_bytes; 1069 adapter->total_rx_packets += total_rx_packets; 1070 return cleaned; 1071 } 1072 1073 static void e1000_put_txbuf(struct e1000_ring *tx_ring, 1074 struct e1000_buffer *buffer_info) 1075 { 1076 struct e1000_adapter *adapter = tx_ring->adapter; 1077 1078 if (buffer_info->dma) { 1079 if (buffer_info->mapped_as_page) 1080 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, 1081 buffer_info->length, DMA_TO_DEVICE); 1082 else 1083 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1084 buffer_info->length, DMA_TO_DEVICE); 1085 buffer_info->dma = 0; 1086 } 1087 if (buffer_info->skb) { 1088 dev_kfree_skb_any(buffer_info->skb); 1089 buffer_info->skb = NULL; 1090 } 1091 buffer_info->time_stamp = 0; 1092 } 1093 1094 static void e1000_print_hw_hang(struct work_struct *work) 1095 { 1096 struct e1000_adapter *adapter = container_of(work, 1097 struct e1000_adapter, 1098 print_hang_task); 1099 struct net_device *netdev = adapter->netdev; 1100 struct e1000_ring *tx_ring = adapter->tx_ring; 1101 unsigned int i = tx_ring->next_to_clean; 1102 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; 1103 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); 1104 struct e1000_hw *hw = &adapter->hw; 1105 u16 phy_status, phy_1000t_status, phy_ext_status; 1106 u16 pci_status; 1107 1108 if (test_bit(__E1000_DOWN, &adapter->state)) 1109 return; 1110 1111 if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { 1112 /* May be block on write-back, flush and detect again 1113 * flush pending descriptor writebacks to memory 1114 */ 1115 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 1116 /* execute the writes immediately */ 1117 e1e_flush(); 1118 /* Due to rare timing issues, write to TIDV again to ensure 1119 * the write is successful 1120 */ 1121 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 1122 /* execute the writes immediately */ 1123 e1e_flush(); 1124 adapter->tx_hang_recheck = true; 1125 return; 1126 } 1127 adapter->tx_hang_recheck = false; 1128 1129 if (er32(TDH(0)) == er32(TDT(0))) { 1130 e_dbg("false hang detected, ignoring\n"); 1131 return; 1132 } 1133 1134 /* Real hang detected */ 1135 netif_stop_queue(netdev); 1136 1137 e1e_rphy(hw, MII_BMSR, &phy_status); 1138 e1e_rphy(hw, MII_STAT1000, &phy_1000t_status); 1139 e1e_rphy(hw, MII_ESTATUS, &phy_ext_status); 1140 1141 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); 1142 1143 /* detected Hardware unit hang */ 1144 e_err("Detected Hardware Unit Hang:\n" 1145 " TDH <%x>\n" 1146 " TDT <%x>\n" 1147 " next_to_use <%x>\n" 1148 " next_to_clean <%x>\n" 1149 "buffer_info[next_to_clean]:\n" 1150 " time_stamp <%lx>\n" 1151 " next_to_watch <%x>\n" 1152 " jiffies <%lx>\n" 1153 " next_to_watch.status <%x>\n" 1154 "MAC Status <%x>\n" 1155 "PHY Status <%x>\n" 1156 "PHY 1000BASE-T Status <%x>\n" 1157 "PHY Extended Status <%x>\n" 1158 "PCI Status <%x>\n", 1159 readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use, 1160 tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp, 1161 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), 1162 phy_status, phy_1000t_status, phy_ext_status, pci_status); 1163 1164 e1000e_dump(adapter); 1165 1166 /* Suggest workaround for known h/w issue */ 1167 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) 1168 e_err("Try turning off Tx pause (flow control) via ethtool\n"); 1169 } 1170 1171 /** 1172 * e1000e_tx_hwtstamp_work - check for Tx time stamp 1173 * @work: pointer to work struct 1174 * 1175 * This work function polls the TSYNCTXCTL valid bit to determine when a 1176 * timestamp has been taken for the current stored skb. The timestamp must 1177 * be for this skb because only one such packet is allowed in the queue. 1178 */ 1179 static void e1000e_tx_hwtstamp_work(struct work_struct *work) 1180 { 1181 struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, 1182 tx_hwtstamp_work); 1183 struct e1000_hw *hw = &adapter->hw; 1184 1185 if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) { 1186 struct sk_buff *skb = adapter->tx_hwtstamp_skb; 1187 struct skb_shared_hwtstamps shhwtstamps; 1188 u64 txstmp; 1189 1190 txstmp = er32(TXSTMPL); 1191 txstmp |= (u64)er32(TXSTMPH) << 32; 1192 1193 e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp); 1194 1195 /* Clear the global tx_hwtstamp_skb pointer and force writes 1196 * prior to notifying the stack of a Tx timestamp. 1197 */ 1198 adapter->tx_hwtstamp_skb = NULL; 1199 wmb(); /* force write prior to skb_tstamp_tx */ 1200 1201 skb_tstamp_tx(skb, &shhwtstamps); 1202 dev_kfree_skb_any(skb); 1203 } else if (time_after(jiffies, adapter->tx_hwtstamp_start 1204 + adapter->tx_timeout_factor * HZ)) { 1205 dev_kfree_skb_any(adapter->tx_hwtstamp_skb); 1206 adapter->tx_hwtstamp_skb = NULL; 1207 adapter->tx_hwtstamp_timeouts++; 1208 e_warn("clearing Tx timestamp hang\n"); 1209 } else { 1210 /* reschedule to check later */ 1211 schedule_work(&adapter->tx_hwtstamp_work); 1212 } 1213 } 1214 1215 /** 1216 * e1000_clean_tx_irq - Reclaim resources after transmit completes 1217 * @tx_ring: Tx descriptor ring 1218 * 1219 * the return value indicates whether actual cleaning was done, there 1220 * is no guarantee that everything was cleaned 1221 **/ 1222 static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) 1223 { 1224 struct e1000_adapter *adapter = tx_ring->adapter; 1225 struct net_device *netdev = adapter->netdev; 1226 struct e1000_hw *hw = &adapter->hw; 1227 struct e1000_tx_desc *tx_desc, *eop_desc; 1228 struct e1000_buffer *buffer_info; 1229 unsigned int i, eop; 1230 unsigned int count = 0; 1231 unsigned int total_tx_bytes = 0, total_tx_packets = 0; 1232 unsigned int bytes_compl = 0, pkts_compl = 0; 1233 1234 i = tx_ring->next_to_clean; 1235 eop = tx_ring->buffer_info[i].next_to_watch; 1236 eop_desc = E1000_TX_DESC(*tx_ring, eop); 1237 1238 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 1239 (count < tx_ring->count)) { 1240 bool cleaned = false; 1241 1242 dma_rmb(); /* read buffer_info after eop_desc */ 1243 for (; !cleaned; count++) { 1244 tx_desc = E1000_TX_DESC(*tx_ring, i); 1245 buffer_info = &tx_ring->buffer_info[i]; 1246 cleaned = (i == eop); 1247 1248 if (cleaned) { 1249 total_tx_packets += buffer_info->segs; 1250 total_tx_bytes += buffer_info->bytecount; 1251 if (buffer_info->skb) { 1252 bytes_compl += buffer_info->skb->len; 1253 pkts_compl++; 1254 } 1255 } 1256 1257 e1000_put_txbuf(tx_ring, buffer_info); 1258 tx_desc->upper.data = 0; 1259 1260 i++; 1261 if (i == tx_ring->count) 1262 i = 0; 1263 } 1264 1265 if (i == tx_ring->next_to_use) 1266 break; 1267 eop = tx_ring->buffer_info[i].next_to_watch; 1268 eop_desc = E1000_TX_DESC(*tx_ring, eop); 1269 } 1270 1271 tx_ring->next_to_clean = i; 1272 1273 netdev_completed_queue(netdev, pkts_compl, bytes_compl); 1274 1275 #define TX_WAKE_THRESHOLD 32 1276 if (count && netif_carrier_ok(netdev) && 1277 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { 1278 /* Make sure that anybody stopping the queue after this 1279 * sees the new next_to_clean. 1280 */ 1281 smp_mb(); 1282 1283 if (netif_queue_stopped(netdev) && 1284 !(test_bit(__E1000_DOWN, &adapter->state))) { 1285 netif_wake_queue(netdev); 1286 ++adapter->restart_queue; 1287 } 1288 } 1289 1290 if (adapter->detect_tx_hung) { 1291 /* Detect a transmit hang in hardware, this serializes the 1292 * check with the clearing of time_stamp and movement of i 1293 */ 1294 adapter->detect_tx_hung = false; 1295 if (tx_ring->buffer_info[i].time_stamp && 1296 time_after(jiffies, tx_ring->buffer_info[i].time_stamp 1297 + (adapter->tx_timeout_factor * HZ)) && 1298 !(er32(STATUS) & E1000_STATUS_TXOFF)) 1299 schedule_work(&adapter->print_hang_task); 1300 else 1301 adapter->tx_hang_recheck = false; 1302 } 1303 adapter->total_tx_bytes += total_tx_bytes; 1304 adapter->total_tx_packets += total_tx_packets; 1305 return count < tx_ring->count; 1306 } 1307 1308 /** 1309 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split 1310 * @rx_ring: Rx descriptor ring 1311 * 1312 * the return value indicates whether actual cleaning was done, there 1313 * is no guarantee that everything was cleaned 1314 **/ 1315 static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, 1316 int work_to_do) 1317 { 1318 struct e1000_adapter *adapter = rx_ring->adapter; 1319 struct e1000_hw *hw = &adapter->hw; 1320 union e1000_rx_desc_packet_split *rx_desc, *next_rxd; 1321 struct net_device *netdev = adapter->netdev; 1322 struct pci_dev *pdev = adapter->pdev; 1323 struct e1000_buffer *buffer_info, *next_buffer; 1324 struct e1000_ps_page *ps_page; 1325 struct sk_buff *skb; 1326 unsigned int i, j; 1327 u32 length, staterr; 1328 int cleaned_count = 0; 1329 bool cleaned = false; 1330 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1331 1332 i = rx_ring->next_to_clean; 1333 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 1334 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 1335 buffer_info = &rx_ring->buffer_info[i]; 1336 1337 while (staterr & E1000_RXD_STAT_DD) { 1338 if (*work_done >= work_to_do) 1339 break; 1340 (*work_done)++; 1341 skb = buffer_info->skb; 1342 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ 1343 1344 /* in the packet split case this is header only */ 1345 prefetch(skb->data - NET_IP_ALIGN); 1346 1347 i++; 1348 if (i == rx_ring->count) 1349 i = 0; 1350 next_rxd = E1000_RX_DESC_PS(*rx_ring, i); 1351 prefetch(next_rxd); 1352 1353 next_buffer = &rx_ring->buffer_info[i]; 1354 1355 cleaned = true; 1356 cleaned_count++; 1357 dma_unmap_single(&pdev->dev, buffer_info->dma, 1358 adapter->rx_ps_bsize0, DMA_FROM_DEVICE); 1359 buffer_info->dma = 0; 1360 1361 /* see !EOP comment in other Rx routine */ 1362 if (!(staterr & E1000_RXD_STAT_EOP)) 1363 adapter->flags2 |= FLAG2_IS_DISCARDING; 1364 1365 if (adapter->flags2 & FLAG2_IS_DISCARDING) { 1366 e_dbg("Packet Split buffers didn't pick up the full packet\n"); 1367 dev_kfree_skb_irq(skb); 1368 if (staterr & E1000_RXD_STAT_EOP) 1369 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1370 goto next_desc; 1371 } 1372 1373 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && 1374 !(netdev->features & NETIF_F_RXALL))) { 1375 dev_kfree_skb_irq(skb); 1376 goto next_desc; 1377 } 1378 1379 length = le16_to_cpu(rx_desc->wb.middle.length0); 1380 1381 if (!length) { 1382 e_dbg("Last part of the packet spanning multiple descriptors\n"); 1383 dev_kfree_skb_irq(skb); 1384 goto next_desc; 1385 } 1386 1387 /* Good Receive */ 1388 skb_put(skb, length); 1389 1390 { 1391 /* this looks ugly, but it seems compiler issues make 1392 * it more efficient than reusing j 1393 */ 1394 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); 1395 1396 /* page alloc/put takes too long and effects small 1397 * packet throughput, so unsplit small packets and 1398 * save the alloc/put only valid in softirq (napi) 1399 * context to call kmap_* 1400 */ 1401 if (l1 && (l1 <= copybreak) && 1402 ((length + l1) <= adapter->rx_ps_bsize0)) { 1403 u8 *vaddr; 1404 1405 ps_page = &buffer_info->ps_pages[0]; 1406 1407 /* there is no documentation about how to call 1408 * kmap_atomic, so we can't hold the mapping 1409 * very long 1410 */ 1411 dma_sync_single_for_cpu(&pdev->dev, 1412 ps_page->dma, 1413 PAGE_SIZE, 1414 DMA_FROM_DEVICE); 1415 vaddr = kmap_atomic(ps_page->page); 1416 memcpy(skb_tail_pointer(skb), vaddr, l1); 1417 kunmap_atomic(vaddr); 1418 dma_sync_single_for_device(&pdev->dev, 1419 ps_page->dma, 1420 PAGE_SIZE, 1421 DMA_FROM_DEVICE); 1422 1423 /* remove the CRC */ 1424 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { 1425 if (!(netdev->features & NETIF_F_RXFCS)) 1426 l1 -= 4; 1427 } 1428 1429 skb_put(skb, l1); 1430 goto copydone; 1431 } /* if */ 1432 } 1433 1434 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 1435 length = le16_to_cpu(rx_desc->wb.upper.length[j]); 1436 if (!length) 1437 break; 1438 1439 ps_page = &buffer_info->ps_pages[j]; 1440 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, 1441 DMA_FROM_DEVICE); 1442 ps_page->dma = 0; 1443 skb_fill_page_desc(skb, j, ps_page->page, 0, length); 1444 ps_page->page = NULL; 1445 skb->len += length; 1446 skb->data_len += length; 1447 skb->truesize += PAGE_SIZE; 1448 } 1449 1450 /* strip the ethernet crc, problem is we're using pages now so 1451 * this whole operation can get a little cpu intensive 1452 */ 1453 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { 1454 if (!(netdev->features & NETIF_F_RXFCS)) 1455 pskb_trim(skb, skb->len - 4); 1456 } 1457 1458 copydone: 1459 total_rx_bytes += skb->len; 1460 total_rx_packets++; 1461 1462 e1000_rx_checksum(adapter, staterr, skb); 1463 1464 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 1465 1466 if (rx_desc->wb.upper.header_status & 1467 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) 1468 adapter->rx_hdr_split++; 1469 1470 e1000_receive_skb(adapter, netdev, skb, staterr, 1471 rx_desc->wb.middle.vlan); 1472 1473 next_desc: 1474 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); 1475 buffer_info->skb = NULL; 1476 1477 /* return some buffers to hardware, one at a time is too slow */ 1478 if (cleaned_count >= E1000_RX_BUFFER_WRITE) { 1479 adapter->alloc_rx_buf(rx_ring, cleaned_count, 1480 GFP_ATOMIC); 1481 cleaned_count = 0; 1482 } 1483 1484 /* use prefetched values */ 1485 rx_desc = next_rxd; 1486 buffer_info = next_buffer; 1487 1488 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 1489 } 1490 rx_ring->next_to_clean = i; 1491 1492 cleaned_count = e1000_desc_unused(rx_ring); 1493 if (cleaned_count) 1494 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); 1495 1496 adapter->total_rx_bytes += total_rx_bytes; 1497 adapter->total_rx_packets += total_rx_packets; 1498 return cleaned; 1499 } 1500 1501 /** 1502 * e1000_consume_page - helper function 1503 **/ 1504 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, 1505 u16 length) 1506 { 1507 bi->page = NULL; 1508 skb->len += length; 1509 skb->data_len += length; 1510 skb->truesize += PAGE_SIZE; 1511 } 1512 1513 /** 1514 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy 1515 * @adapter: board private structure 1516 * 1517 * the return value indicates whether actual cleaning was done, there 1518 * is no guarantee that everything was cleaned 1519 **/ 1520 static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, 1521 int work_to_do) 1522 { 1523 struct e1000_adapter *adapter = rx_ring->adapter; 1524 struct net_device *netdev = adapter->netdev; 1525 struct pci_dev *pdev = adapter->pdev; 1526 union e1000_rx_desc_extended *rx_desc, *next_rxd; 1527 struct e1000_buffer *buffer_info, *next_buffer; 1528 u32 length, staterr; 1529 unsigned int i; 1530 int cleaned_count = 0; 1531 bool cleaned = false; 1532 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1533 struct skb_shared_info *shinfo; 1534 1535 i = rx_ring->next_to_clean; 1536 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 1537 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1538 buffer_info = &rx_ring->buffer_info[i]; 1539 1540 while (staterr & E1000_RXD_STAT_DD) { 1541 struct sk_buff *skb; 1542 1543 if (*work_done >= work_to_do) 1544 break; 1545 (*work_done)++; 1546 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ 1547 1548 skb = buffer_info->skb; 1549 buffer_info->skb = NULL; 1550 1551 ++i; 1552 if (i == rx_ring->count) 1553 i = 0; 1554 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); 1555 prefetch(next_rxd); 1556 1557 next_buffer = &rx_ring->buffer_info[i]; 1558 1559 cleaned = true; 1560 cleaned_count++; 1561 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, 1562 DMA_FROM_DEVICE); 1563 buffer_info->dma = 0; 1564 1565 length = le16_to_cpu(rx_desc->wb.upper.length); 1566 1567 /* errors is only valid for DD + EOP descriptors */ 1568 if (unlikely((staterr & E1000_RXD_STAT_EOP) && 1569 ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && 1570 !(netdev->features & NETIF_F_RXALL)))) { 1571 /* recycle both page and skb */ 1572 buffer_info->skb = skb; 1573 /* an error means any chain goes out the window too */ 1574 if (rx_ring->rx_skb_top) 1575 dev_kfree_skb_irq(rx_ring->rx_skb_top); 1576 rx_ring->rx_skb_top = NULL; 1577 goto next_desc; 1578 } 1579 #define rxtop (rx_ring->rx_skb_top) 1580 if (!(staterr & E1000_RXD_STAT_EOP)) { 1581 /* this descriptor is only the beginning (or middle) */ 1582 if (!rxtop) { 1583 /* this is the beginning of a chain */ 1584 rxtop = skb; 1585 skb_fill_page_desc(rxtop, 0, buffer_info->page, 1586 0, length); 1587 } else { 1588 /* this is the middle of a chain */ 1589 shinfo = skb_shinfo(rxtop); 1590 skb_fill_page_desc(rxtop, shinfo->nr_frags, 1591 buffer_info->page, 0, 1592 length); 1593 /* re-use the skb, only consumed the page */ 1594 buffer_info->skb = skb; 1595 } 1596 e1000_consume_page(buffer_info, rxtop, length); 1597 goto next_desc; 1598 } else { 1599 if (rxtop) { 1600 /* end of the chain */ 1601 shinfo = skb_shinfo(rxtop); 1602 skb_fill_page_desc(rxtop, shinfo->nr_frags, 1603 buffer_info->page, 0, 1604 length); 1605 /* re-use the current skb, we only consumed the 1606 * page 1607 */ 1608 buffer_info->skb = skb; 1609 skb = rxtop; 1610 rxtop = NULL; 1611 e1000_consume_page(buffer_info, skb, length); 1612 } else { 1613 /* no chain, got EOP, this buf is the packet 1614 * copybreak to save the put_page/alloc_page 1615 */ 1616 if (length <= copybreak && 1617 skb_tailroom(skb) >= length) { 1618 u8 *vaddr; 1619 vaddr = kmap_atomic(buffer_info->page); 1620 memcpy(skb_tail_pointer(skb), vaddr, 1621 length); 1622 kunmap_atomic(vaddr); 1623 /* re-use the page, so don't erase 1624 * buffer_info->page 1625 */ 1626 skb_put(skb, length); 1627 } else { 1628 skb_fill_page_desc(skb, 0, 1629 buffer_info->page, 0, 1630 length); 1631 e1000_consume_page(buffer_info, skb, 1632 length); 1633 } 1634 } 1635 } 1636 1637 /* Receive Checksum Offload */ 1638 e1000_rx_checksum(adapter, staterr, skb); 1639 1640 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 1641 1642 /* probably a little skewed due to removing CRC */ 1643 total_rx_bytes += skb->len; 1644 total_rx_packets++; 1645 1646 /* eth type trans needs skb->data to point to something */ 1647 if (!pskb_may_pull(skb, ETH_HLEN)) { 1648 e_err("pskb_may_pull failed.\n"); 1649 dev_kfree_skb_irq(skb); 1650 goto next_desc; 1651 } 1652 1653 e1000_receive_skb(adapter, netdev, skb, staterr, 1654 rx_desc->wb.upper.vlan); 1655 1656 next_desc: 1657 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); 1658 1659 /* return some buffers to hardware, one at a time is too slow */ 1660 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 1661 adapter->alloc_rx_buf(rx_ring, cleaned_count, 1662 GFP_ATOMIC); 1663 cleaned_count = 0; 1664 } 1665 1666 /* use prefetched values */ 1667 rx_desc = next_rxd; 1668 buffer_info = next_buffer; 1669 1670 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1671 } 1672 rx_ring->next_to_clean = i; 1673 1674 cleaned_count = e1000_desc_unused(rx_ring); 1675 if (cleaned_count) 1676 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); 1677 1678 adapter->total_rx_bytes += total_rx_bytes; 1679 adapter->total_rx_packets += total_rx_packets; 1680 return cleaned; 1681 } 1682 1683 /** 1684 * e1000_clean_rx_ring - Free Rx Buffers per Queue 1685 * @rx_ring: Rx descriptor ring 1686 **/ 1687 static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) 1688 { 1689 struct e1000_adapter *adapter = rx_ring->adapter; 1690 struct e1000_buffer *buffer_info; 1691 struct e1000_ps_page *ps_page; 1692 struct pci_dev *pdev = adapter->pdev; 1693 unsigned int i, j; 1694 1695 /* Free all the Rx ring sk_buffs */ 1696 for (i = 0; i < rx_ring->count; i++) { 1697 buffer_info = &rx_ring->buffer_info[i]; 1698 if (buffer_info->dma) { 1699 if (adapter->clean_rx == e1000_clean_rx_irq) 1700 dma_unmap_single(&pdev->dev, buffer_info->dma, 1701 adapter->rx_buffer_len, 1702 DMA_FROM_DEVICE); 1703 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) 1704 dma_unmap_page(&pdev->dev, buffer_info->dma, 1705 PAGE_SIZE, DMA_FROM_DEVICE); 1706 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) 1707 dma_unmap_single(&pdev->dev, buffer_info->dma, 1708 adapter->rx_ps_bsize0, 1709 DMA_FROM_DEVICE); 1710 buffer_info->dma = 0; 1711 } 1712 1713 if (buffer_info->page) { 1714 put_page(buffer_info->page); 1715 buffer_info->page = NULL; 1716 } 1717 1718 if (buffer_info->skb) { 1719 dev_kfree_skb(buffer_info->skb); 1720 buffer_info->skb = NULL; 1721 } 1722 1723 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 1724 ps_page = &buffer_info->ps_pages[j]; 1725 if (!ps_page->page) 1726 break; 1727 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, 1728 DMA_FROM_DEVICE); 1729 ps_page->dma = 0; 1730 put_page(ps_page->page); 1731 ps_page->page = NULL; 1732 } 1733 } 1734 1735 /* there also may be some cached data from a chained receive */ 1736 if (rx_ring->rx_skb_top) { 1737 dev_kfree_skb(rx_ring->rx_skb_top); 1738 rx_ring->rx_skb_top = NULL; 1739 } 1740 1741 /* Zero out the descriptor ring */ 1742 memset(rx_ring->desc, 0, rx_ring->size); 1743 1744 rx_ring->next_to_clean = 0; 1745 rx_ring->next_to_use = 0; 1746 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1747 } 1748 1749 static void e1000e_downshift_workaround(struct work_struct *work) 1750 { 1751 struct e1000_adapter *adapter = container_of(work, 1752 struct e1000_adapter, 1753 downshift_task); 1754 1755 if (test_bit(__E1000_DOWN, &adapter->state)) 1756 return; 1757 1758 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); 1759 } 1760 1761 /** 1762 * e1000_intr_msi - Interrupt Handler 1763 * @irq: interrupt number 1764 * @data: pointer to a network interface device structure 1765 **/ 1766 static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) 1767 { 1768 struct net_device *netdev = data; 1769 struct e1000_adapter *adapter = netdev_priv(netdev); 1770 struct e1000_hw *hw = &adapter->hw; 1771 u32 icr = er32(ICR); 1772 1773 /* read ICR disables interrupts using IAM */ 1774 if (icr & E1000_ICR_LSC) { 1775 hw->mac.get_link_status = true; 1776 /* ICH8 workaround-- Call gig speed drop workaround on cable 1777 * disconnect (LSC) before accessing any PHY registers 1778 */ 1779 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 1780 (!(er32(STATUS) & E1000_STATUS_LU))) 1781 schedule_work(&adapter->downshift_task); 1782 1783 /* 80003ES2LAN workaround-- For packet buffer work-around on 1784 * link down event; disable receives here in the ISR and reset 1785 * adapter in watchdog 1786 */ 1787 if (netif_carrier_ok(netdev) && 1788 adapter->flags & FLAG_RX_NEEDS_RESTART) { 1789 /* disable receives */ 1790 u32 rctl = er32(RCTL); 1791 1792 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1793 adapter->flags |= FLAG_RESTART_NOW; 1794 } 1795 /* guard against interrupt when we're going down */ 1796 if (!test_bit(__E1000_DOWN, &adapter->state)) 1797 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1798 } 1799 1800 /* Reset on uncorrectable ECC error */ 1801 if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { 1802 u32 pbeccsts = er32(PBECCSTS); 1803 1804 adapter->corr_errors += 1805 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; 1806 adapter->uncorr_errors += 1807 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> 1808 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; 1809 1810 /* Do the reset outside of interrupt context */ 1811 schedule_work(&adapter->reset_task); 1812 1813 /* return immediately since reset is imminent */ 1814 return IRQ_HANDLED; 1815 } 1816 1817 if (napi_schedule_prep(&adapter->napi)) { 1818 adapter->total_tx_bytes = 0; 1819 adapter->total_tx_packets = 0; 1820 adapter->total_rx_bytes = 0; 1821 adapter->total_rx_packets = 0; 1822 __napi_schedule(&adapter->napi); 1823 } 1824 1825 return IRQ_HANDLED; 1826 } 1827 1828 /** 1829 * e1000_intr - Interrupt Handler 1830 * @irq: interrupt number 1831 * @data: pointer to a network interface device structure 1832 **/ 1833 static irqreturn_t e1000_intr(int __always_unused irq, void *data) 1834 { 1835 struct net_device *netdev = data; 1836 struct e1000_adapter *adapter = netdev_priv(netdev); 1837 struct e1000_hw *hw = &adapter->hw; 1838 u32 rctl, icr = er32(ICR); 1839 1840 if (!icr || test_bit(__E1000_DOWN, &adapter->state)) 1841 return IRQ_NONE; /* Not our interrupt */ 1842 1843 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 1844 * not set, then the adapter didn't send an interrupt 1845 */ 1846 if (!(icr & E1000_ICR_INT_ASSERTED)) 1847 return IRQ_NONE; 1848 1849 /* Interrupt Auto-Mask...upon reading ICR, 1850 * interrupts are masked. No need for the 1851 * IMC write 1852 */ 1853 1854 if (icr & E1000_ICR_LSC) { 1855 hw->mac.get_link_status = true; 1856 /* ICH8 workaround-- Call gig speed drop workaround on cable 1857 * disconnect (LSC) before accessing any PHY registers 1858 */ 1859 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 1860 (!(er32(STATUS) & E1000_STATUS_LU))) 1861 schedule_work(&adapter->downshift_task); 1862 1863 /* 80003ES2LAN workaround-- 1864 * For packet buffer work-around on link down event; 1865 * disable receives here in the ISR and 1866 * reset adapter in watchdog 1867 */ 1868 if (netif_carrier_ok(netdev) && 1869 (adapter->flags & FLAG_RX_NEEDS_RESTART)) { 1870 /* disable receives */ 1871 rctl = er32(RCTL); 1872 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1873 adapter->flags |= FLAG_RESTART_NOW; 1874 } 1875 /* guard against interrupt when we're going down */ 1876 if (!test_bit(__E1000_DOWN, &adapter->state)) 1877 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1878 } 1879 1880 /* Reset on uncorrectable ECC error */ 1881 if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { 1882 u32 pbeccsts = er32(PBECCSTS); 1883 1884 adapter->corr_errors += 1885 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; 1886 adapter->uncorr_errors += 1887 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> 1888 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; 1889 1890 /* Do the reset outside of interrupt context */ 1891 schedule_work(&adapter->reset_task); 1892 1893 /* return immediately since reset is imminent */ 1894 return IRQ_HANDLED; 1895 } 1896 1897 if (napi_schedule_prep(&adapter->napi)) { 1898 adapter->total_tx_bytes = 0; 1899 adapter->total_tx_packets = 0; 1900 adapter->total_rx_bytes = 0; 1901 adapter->total_rx_packets = 0; 1902 __napi_schedule(&adapter->napi); 1903 } 1904 1905 return IRQ_HANDLED; 1906 } 1907 1908 static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) 1909 { 1910 struct net_device *netdev = data; 1911 struct e1000_adapter *adapter = netdev_priv(netdev); 1912 struct e1000_hw *hw = &adapter->hw; 1913 1914 hw->mac.get_link_status = true; 1915 1916 /* guard against interrupt when we're going down */ 1917 if (!test_bit(__E1000_DOWN, &adapter->state)) { 1918 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1919 ew32(IMS, E1000_IMS_OTHER); 1920 } 1921 1922 return IRQ_HANDLED; 1923 } 1924 1925 static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data) 1926 { 1927 struct net_device *netdev = data; 1928 struct e1000_adapter *adapter = netdev_priv(netdev); 1929 struct e1000_hw *hw = &adapter->hw; 1930 struct e1000_ring *tx_ring = adapter->tx_ring; 1931 1932 adapter->total_tx_bytes = 0; 1933 adapter->total_tx_packets = 0; 1934 1935 if (!e1000_clean_tx_irq(tx_ring)) 1936 /* Ring was not completely cleaned, so fire another interrupt */ 1937 ew32(ICS, tx_ring->ims_val); 1938 1939 if (!test_bit(__E1000_DOWN, &adapter->state)) 1940 ew32(IMS, adapter->tx_ring->ims_val); 1941 1942 return IRQ_HANDLED; 1943 } 1944 1945 static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data) 1946 { 1947 struct net_device *netdev = data; 1948 struct e1000_adapter *adapter = netdev_priv(netdev); 1949 struct e1000_ring *rx_ring = adapter->rx_ring; 1950 1951 /* Write the ITR value calculated at the end of the 1952 * previous interrupt. 1953 */ 1954 if (rx_ring->set_itr) { 1955 u32 itr = rx_ring->itr_val ? 1956 1000000000 / (rx_ring->itr_val * 256) : 0; 1957 1958 writel(itr, rx_ring->itr_register); 1959 rx_ring->set_itr = 0; 1960 } 1961 1962 if (napi_schedule_prep(&adapter->napi)) { 1963 adapter->total_rx_bytes = 0; 1964 adapter->total_rx_packets = 0; 1965 __napi_schedule(&adapter->napi); 1966 } 1967 return IRQ_HANDLED; 1968 } 1969 1970 /** 1971 * e1000_configure_msix - Configure MSI-X hardware 1972 * 1973 * e1000_configure_msix sets up the hardware to properly 1974 * generate MSI-X interrupts. 1975 **/ 1976 static void e1000_configure_msix(struct e1000_adapter *adapter) 1977 { 1978 struct e1000_hw *hw = &adapter->hw; 1979 struct e1000_ring *rx_ring = adapter->rx_ring; 1980 struct e1000_ring *tx_ring = adapter->tx_ring; 1981 int vector = 0; 1982 u32 ctrl_ext, ivar = 0; 1983 1984 adapter->eiac_mask = 0; 1985 1986 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ 1987 if (hw->mac.type == e1000_82574) { 1988 u32 rfctl = er32(RFCTL); 1989 1990 rfctl |= E1000_RFCTL_ACK_DIS; 1991 ew32(RFCTL, rfctl); 1992 } 1993 1994 /* Configure Rx vector */ 1995 rx_ring->ims_val = E1000_IMS_RXQ0; 1996 adapter->eiac_mask |= rx_ring->ims_val; 1997 if (rx_ring->itr_val) 1998 writel(1000000000 / (rx_ring->itr_val * 256), 1999 rx_ring->itr_register); 2000 else 2001 writel(1, rx_ring->itr_register); 2002 ivar = E1000_IVAR_INT_ALLOC_VALID | vector; 2003 2004 /* Configure Tx vector */ 2005 tx_ring->ims_val = E1000_IMS_TXQ0; 2006 vector++; 2007 if (tx_ring->itr_val) 2008 writel(1000000000 / (tx_ring->itr_val * 256), 2009 tx_ring->itr_register); 2010 else 2011 writel(1, tx_ring->itr_register); 2012 adapter->eiac_mask |= tx_ring->ims_val; 2013 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); 2014 2015 /* set vector for Other Causes, e.g. link changes */ 2016 vector++; 2017 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); 2018 if (rx_ring->itr_val) 2019 writel(1000000000 / (rx_ring->itr_val * 256), 2020 hw->hw_addr + E1000_EITR_82574(vector)); 2021 else 2022 writel(1, hw->hw_addr + E1000_EITR_82574(vector)); 2023 adapter->eiac_mask |= E1000_IMS_OTHER; 2024 2025 /* Cause Tx interrupts on every write back */ 2026 ivar |= BIT(31); 2027 2028 ew32(IVAR, ivar); 2029 2030 /* enable MSI-X PBA support */ 2031 ctrl_ext = er32(CTRL_EXT) & ~E1000_CTRL_EXT_IAME; 2032 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR | E1000_CTRL_EXT_EIAME; 2033 ew32(CTRL_EXT, ctrl_ext); 2034 e1e_flush(); 2035 } 2036 2037 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) 2038 { 2039 if (adapter->msix_entries) { 2040 pci_disable_msix(adapter->pdev); 2041 kfree(adapter->msix_entries); 2042 adapter->msix_entries = NULL; 2043 } else if (adapter->flags & FLAG_MSI_ENABLED) { 2044 pci_disable_msi(adapter->pdev); 2045 adapter->flags &= ~FLAG_MSI_ENABLED; 2046 } 2047 } 2048 2049 /** 2050 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported 2051 * 2052 * Attempt to configure interrupts using the best available 2053 * capabilities of the hardware and kernel. 2054 **/ 2055 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) 2056 { 2057 int err; 2058 int i; 2059 2060 switch (adapter->int_mode) { 2061 case E1000E_INT_MODE_MSIX: 2062 if (adapter->flags & FLAG_HAS_MSIX) { 2063 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ 2064 adapter->msix_entries = kcalloc(adapter->num_vectors, 2065 sizeof(struct 2066 msix_entry), 2067 GFP_KERNEL); 2068 if (adapter->msix_entries) { 2069 struct e1000_adapter *a = adapter; 2070 2071 for (i = 0; i < adapter->num_vectors; i++) 2072 adapter->msix_entries[i].entry = i; 2073 2074 err = pci_enable_msix_range(a->pdev, 2075 a->msix_entries, 2076 a->num_vectors, 2077 a->num_vectors); 2078 if (err > 0) 2079 return; 2080 } 2081 /* MSI-X failed, so fall through and try MSI */ 2082 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); 2083 e1000e_reset_interrupt_capability(adapter); 2084 } 2085 adapter->int_mode = E1000E_INT_MODE_MSI; 2086 /* Fall through */ 2087 case E1000E_INT_MODE_MSI: 2088 if (!pci_enable_msi(adapter->pdev)) { 2089 adapter->flags |= FLAG_MSI_ENABLED; 2090 } else { 2091 adapter->int_mode = E1000E_INT_MODE_LEGACY; 2092 e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n"); 2093 } 2094 /* Fall through */ 2095 case E1000E_INT_MODE_LEGACY: 2096 /* Don't do anything; this is the system default */ 2097 break; 2098 } 2099 2100 /* store the number of vectors being used */ 2101 adapter->num_vectors = 1; 2102 } 2103 2104 /** 2105 * e1000_request_msix - Initialize MSI-X interrupts 2106 * 2107 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the 2108 * kernel. 2109 **/ 2110 static int e1000_request_msix(struct e1000_adapter *adapter) 2111 { 2112 struct net_device *netdev = adapter->netdev; 2113 int err = 0, vector = 0; 2114 2115 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 2116 snprintf(adapter->rx_ring->name, 2117 sizeof(adapter->rx_ring->name) - 1, 2118 "%s-rx-0", netdev->name); 2119 else 2120 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); 2121 err = request_irq(adapter->msix_entries[vector].vector, 2122 e1000_intr_msix_rx, 0, adapter->rx_ring->name, 2123 netdev); 2124 if (err) 2125 return err; 2126 adapter->rx_ring->itr_register = adapter->hw.hw_addr + 2127 E1000_EITR_82574(vector); 2128 adapter->rx_ring->itr_val = adapter->itr; 2129 vector++; 2130 2131 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 2132 snprintf(adapter->tx_ring->name, 2133 sizeof(adapter->tx_ring->name) - 1, 2134 "%s-tx-0", netdev->name); 2135 else 2136 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); 2137 err = request_irq(adapter->msix_entries[vector].vector, 2138 e1000_intr_msix_tx, 0, adapter->tx_ring->name, 2139 netdev); 2140 if (err) 2141 return err; 2142 adapter->tx_ring->itr_register = adapter->hw.hw_addr + 2143 E1000_EITR_82574(vector); 2144 adapter->tx_ring->itr_val = adapter->itr; 2145 vector++; 2146 2147 err = request_irq(adapter->msix_entries[vector].vector, 2148 e1000_msix_other, 0, netdev->name, netdev); 2149 if (err) 2150 return err; 2151 2152 e1000_configure_msix(adapter); 2153 2154 return 0; 2155 } 2156 2157 /** 2158 * e1000_request_irq - initialize interrupts 2159 * 2160 * Attempts to configure interrupts using the best available 2161 * capabilities of the hardware and kernel. 2162 **/ 2163 static int e1000_request_irq(struct e1000_adapter *adapter) 2164 { 2165 struct net_device *netdev = adapter->netdev; 2166 int err; 2167 2168 if (adapter->msix_entries) { 2169 err = e1000_request_msix(adapter); 2170 if (!err) 2171 return err; 2172 /* fall back to MSI */ 2173 e1000e_reset_interrupt_capability(adapter); 2174 adapter->int_mode = E1000E_INT_MODE_MSI; 2175 e1000e_set_interrupt_capability(adapter); 2176 } 2177 if (adapter->flags & FLAG_MSI_ENABLED) { 2178 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, 2179 netdev->name, netdev); 2180 if (!err) 2181 return err; 2182 2183 /* fall back to legacy interrupt */ 2184 e1000e_reset_interrupt_capability(adapter); 2185 adapter->int_mode = E1000E_INT_MODE_LEGACY; 2186 } 2187 2188 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, 2189 netdev->name, netdev); 2190 if (err) 2191 e_err("Unable to allocate interrupt, Error: %d\n", err); 2192 2193 return err; 2194 } 2195 2196 static void e1000_free_irq(struct e1000_adapter *adapter) 2197 { 2198 struct net_device *netdev = adapter->netdev; 2199 2200 if (adapter->msix_entries) { 2201 int vector = 0; 2202 2203 free_irq(adapter->msix_entries[vector].vector, netdev); 2204 vector++; 2205 2206 free_irq(adapter->msix_entries[vector].vector, netdev); 2207 vector++; 2208 2209 /* Other Causes interrupt vector */ 2210 free_irq(adapter->msix_entries[vector].vector, netdev); 2211 return; 2212 } 2213 2214 free_irq(adapter->pdev->irq, netdev); 2215 } 2216 2217 /** 2218 * e1000_irq_disable - Mask off interrupt generation on the NIC 2219 **/ 2220 static void e1000_irq_disable(struct e1000_adapter *adapter) 2221 { 2222 struct e1000_hw *hw = &adapter->hw; 2223 2224 ew32(IMC, ~0); 2225 if (adapter->msix_entries) 2226 ew32(EIAC_82574, 0); 2227 e1e_flush(); 2228 2229 if (adapter->msix_entries) { 2230 int i; 2231 2232 for (i = 0; i < adapter->num_vectors; i++) 2233 synchronize_irq(adapter->msix_entries[i].vector); 2234 } else { 2235 synchronize_irq(adapter->pdev->irq); 2236 } 2237 } 2238 2239 /** 2240 * e1000_irq_enable - Enable default interrupt generation settings 2241 **/ 2242 static void e1000_irq_enable(struct e1000_adapter *adapter) 2243 { 2244 struct e1000_hw *hw = &adapter->hw; 2245 2246 if (adapter->msix_entries) { 2247 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); 2248 ew32(IMS, adapter->eiac_mask | E1000_IMS_LSC); 2249 } else if (hw->mac.type >= e1000_pch_lpt) { 2250 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); 2251 } else { 2252 ew32(IMS, IMS_ENABLE_MASK); 2253 } 2254 e1e_flush(); 2255 } 2256 2257 /** 2258 * e1000e_get_hw_control - get control of the h/w from f/w 2259 * @adapter: address of board private structure 2260 * 2261 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. 2262 * For ASF and Pass Through versions of f/w this means that 2263 * the driver is loaded. For AMT version (only with 82573) 2264 * of the f/w this means that the network i/f is open. 2265 **/ 2266 void e1000e_get_hw_control(struct e1000_adapter *adapter) 2267 { 2268 struct e1000_hw *hw = &adapter->hw; 2269 u32 ctrl_ext; 2270 u32 swsm; 2271 2272 /* Let firmware know the driver has taken over */ 2273 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { 2274 swsm = er32(SWSM); 2275 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); 2276 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 2277 ctrl_ext = er32(CTRL_EXT); 2278 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2279 } 2280 } 2281 2282 /** 2283 * e1000e_release_hw_control - release control of the h/w to f/w 2284 * @adapter: address of board private structure 2285 * 2286 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. 2287 * For ASF and Pass Through versions of f/w this means that the 2288 * driver is no longer loaded. For AMT version (only with 82573) i 2289 * of the f/w this means that the network i/f is closed. 2290 * 2291 **/ 2292 void e1000e_release_hw_control(struct e1000_adapter *adapter) 2293 { 2294 struct e1000_hw *hw = &adapter->hw; 2295 u32 ctrl_ext; 2296 u32 swsm; 2297 2298 /* Let firmware taken over control of h/w */ 2299 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { 2300 swsm = er32(SWSM); 2301 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 2302 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 2303 ctrl_ext = er32(CTRL_EXT); 2304 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2305 } 2306 } 2307 2308 /** 2309 * e1000_alloc_ring_dma - allocate memory for a ring structure 2310 **/ 2311 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, 2312 struct e1000_ring *ring) 2313 { 2314 struct pci_dev *pdev = adapter->pdev; 2315 2316 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, 2317 GFP_KERNEL); 2318 if (!ring->desc) 2319 return -ENOMEM; 2320 2321 return 0; 2322 } 2323 2324 /** 2325 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) 2326 * @tx_ring: Tx descriptor ring 2327 * 2328 * Return 0 on success, negative on failure 2329 **/ 2330 int e1000e_setup_tx_resources(struct e1000_ring *tx_ring) 2331 { 2332 struct e1000_adapter *adapter = tx_ring->adapter; 2333 int err = -ENOMEM, size; 2334 2335 size = sizeof(struct e1000_buffer) * tx_ring->count; 2336 tx_ring->buffer_info = vzalloc(size); 2337 if (!tx_ring->buffer_info) 2338 goto err; 2339 2340 /* round up to nearest 4K */ 2341 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); 2342 tx_ring->size = ALIGN(tx_ring->size, 4096); 2343 2344 err = e1000_alloc_ring_dma(adapter, tx_ring); 2345 if (err) 2346 goto err; 2347 2348 tx_ring->next_to_use = 0; 2349 tx_ring->next_to_clean = 0; 2350 2351 return 0; 2352 err: 2353 vfree(tx_ring->buffer_info); 2354 e_err("Unable to allocate memory for the transmit descriptor ring\n"); 2355 return err; 2356 } 2357 2358 /** 2359 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) 2360 * @rx_ring: Rx descriptor ring 2361 * 2362 * Returns 0 on success, negative on failure 2363 **/ 2364 int e1000e_setup_rx_resources(struct e1000_ring *rx_ring) 2365 { 2366 struct e1000_adapter *adapter = rx_ring->adapter; 2367 struct e1000_buffer *buffer_info; 2368 int i, size, desc_len, err = -ENOMEM; 2369 2370 size = sizeof(struct e1000_buffer) * rx_ring->count; 2371 rx_ring->buffer_info = vzalloc(size); 2372 if (!rx_ring->buffer_info) 2373 goto err; 2374 2375 for (i = 0; i < rx_ring->count; i++) { 2376 buffer_info = &rx_ring->buffer_info[i]; 2377 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, 2378 sizeof(struct e1000_ps_page), 2379 GFP_KERNEL); 2380 if (!buffer_info->ps_pages) 2381 goto err_pages; 2382 } 2383 2384 desc_len = sizeof(union e1000_rx_desc_packet_split); 2385 2386 /* Round up to nearest 4K */ 2387 rx_ring->size = rx_ring->count * desc_len; 2388 rx_ring->size = ALIGN(rx_ring->size, 4096); 2389 2390 err = e1000_alloc_ring_dma(adapter, rx_ring); 2391 if (err) 2392 goto err_pages; 2393 2394 rx_ring->next_to_clean = 0; 2395 rx_ring->next_to_use = 0; 2396 rx_ring->rx_skb_top = NULL; 2397 2398 return 0; 2399 2400 err_pages: 2401 for (i = 0; i < rx_ring->count; i++) { 2402 buffer_info = &rx_ring->buffer_info[i]; 2403 kfree(buffer_info->ps_pages); 2404 } 2405 err: 2406 vfree(rx_ring->buffer_info); 2407 e_err("Unable to allocate memory for the receive descriptor ring\n"); 2408 return err; 2409 } 2410 2411 /** 2412 * e1000_clean_tx_ring - Free Tx Buffers 2413 * @tx_ring: Tx descriptor ring 2414 **/ 2415 static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) 2416 { 2417 struct e1000_adapter *adapter = tx_ring->adapter; 2418 struct e1000_buffer *buffer_info; 2419 unsigned long size; 2420 unsigned int i; 2421 2422 for (i = 0; i < tx_ring->count; i++) { 2423 buffer_info = &tx_ring->buffer_info[i]; 2424 e1000_put_txbuf(tx_ring, buffer_info); 2425 } 2426 2427 netdev_reset_queue(adapter->netdev); 2428 size = sizeof(struct e1000_buffer) * tx_ring->count; 2429 memset(tx_ring->buffer_info, 0, size); 2430 2431 memset(tx_ring->desc, 0, tx_ring->size); 2432 2433 tx_ring->next_to_use = 0; 2434 tx_ring->next_to_clean = 0; 2435 } 2436 2437 /** 2438 * e1000e_free_tx_resources - Free Tx Resources per Queue 2439 * @tx_ring: Tx descriptor ring 2440 * 2441 * Free all transmit software resources 2442 **/ 2443 void e1000e_free_tx_resources(struct e1000_ring *tx_ring) 2444 { 2445 struct e1000_adapter *adapter = tx_ring->adapter; 2446 struct pci_dev *pdev = adapter->pdev; 2447 2448 e1000_clean_tx_ring(tx_ring); 2449 2450 vfree(tx_ring->buffer_info); 2451 tx_ring->buffer_info = NULL; 2452 2453 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 2454 tx_ring->dma); 2455 tx_ring->desc = NULL; 2456 } 2457 2458 /** 2459 * e1000e_free_rx_resources - Free Rx Resources 2460 * @rx_ring: Rx descriptor ring 2461 * 2462 * Free all receive software resources 2463 **/ 2464 void e1000e_free_rx_resources(struct e1000_ring *rx_ring) 2465 { 2466 struct e1000_adapter *adapter = rx_ring->adapter; 2467 struct pci_dev *pdev = adapter->pdev; 2468 int i; 2469 2470 e1000_clean_rx_ring(rx_ring); 2471 2472 for (i = 0; i < rx_ring->count; i++) 2473 kfree(rx_ring->buffer_info[i].ps_pages); 2474 2475 vfree(rx_ring->buffer_info); 2476 rx_ring->buffer_info = NULL; 2477 2478 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2479 rx_ring->dma); 2480 rx_ring->desc = NULL; 2481 } 2482 2483 /** 2484 * e1000_update_itr - update the dynamic ITR value based on statistics 2485 * @adapter: pointer to adapter 2486 * @itr_setting: current adapter->itr 2487 * @packets: the number of packets during this measurement interval 2488 * @bytes: the number of bytes during this measurement interval 2489 * 2490 * Stores a new ITR value based on packets and byte 2491 * counts during the last interrupt. The advantage of per interrupt 2492 * computation is faster updates and more accurate ITR for the current 2493 * traffic pattern. Constants in this function were computed 2494 * based on theoretical maximum wire speed and thresholds were set based 2495 * on testing data as well as attempting to minimize response time 2496 * while increasing bulk throughput. This functionality is controlled 2497 * by the InterruptThrottleRate module parameter. 2498 **/ 2499 static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes) 2500 { 2501 unsigned int retval = itr_setting; 2502 2503 if (packets == 0) 2504 return itr_setting; 2505 2506 switch (itr_setting) { 2507 case lowest_latency: 2508 /* handle TSO and jumbo frames */ 2509 if (bytes / packets > 8000) 2510 retval = bulk_latency; 2511 else if ((packets < 5) && (bytes > 512)) 2512 retval = low_latency; 2513 break; 2514 case low_latency: /* 50 usec aka 20000 ints/s */ 2515 if (bytes > 10000) { 2516 /* this if handles the TSO accounting */ 2517 if (bytes / packets > 8000) 2518 retval = bulk_latency; 2519 else if ((packets < 10) || ((bytes / packets) > 1200)) 2520 retval = bulk_latency; 2521 else if ((packets > 35)) 2522 retval = lowest_latency; 2523 } else if (bytes / packets > 2000) { 2524 retval = bulk_latency; 2525 } else if (packets <= 2 && bytes < 512) { 2526 retval = lowest_latency; 2527 } 2528 break; 2529 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2530 if (bytes > 25000) { 2531 if (packets > 35) 2532 retval = low_latency; 2533 } else if (bytes < 6000) { 2534 retval = low_latency; 2535 } 2536 break; 2537 } 2538 2539 return retval; 2540 } 2541 2542 static void e1000_set_itr(struct e1000_adapter *adapter) 2543 { 2544 u16 current_itr; 2545 u32 new_itr = adapter->itr; 2546 2547 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2548 if (adapter->link_speed != SPEED_1000) { 2549 current_itr = 0; 2550 new_itr = 4000; 2551 goto set_itr_now; 2552 } 2553 2554 if (adapter->flags2 & FLAG2_DISABLE_AIM) { 2555 new_itr = 0; 2556 goto set_itr_now; 2557 } 2558 2559 adapter->tx_itr = e1000_update_itr(adapter->tx_itr, 2560 adapter->total_tx_packets, 2561 adapter->total_tx_bytes); 2562 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2563 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 2564 adapter->tx_itr = low_latency; 2565 2566 adapter->rx_itr = e1000_update_itr(adapter->rx_itr, 2567 adapter->total_rx_packets, 2568 adapter->total_rx_bytes); 2569 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2570 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 2571 adapter->rx_itr = low_latency; 2572 2573 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2574 2575 /* counts and packets in update_itr are dependent on these numbers */ 2576 switch (current_itr) { 2577 case lowest_latency: 2578 new_itr = 70000; 2579 break; 2580 case low_latency: 2581 new_itr = 20000; /* aka hwitr = ~200 */ 2582 break; 2583 case bulk_latency: 2584 new_itr = 4000; 2585 break; 2586 default: 2587 break; 2588 } 2589 2590 set_itr_now: 2591 if (new_itr != adapter->itr) { 2592 /* this attempts to bias the interrupt rate towards Bulk 2593 * by adding intermediate steps when interrupt rate is 2594 * increasing 2595 */ 2596 new_itr = new_itr > adapter->itr ? 2597 min(adapter->itr + (new_itr >> 2), new_itr) : new_itr; 2598 adapter->itr = new_itr; 2599 adapter->rx_ring->itr_val = new_itr; 2600 if (adapter->msix_entries) 2601 adapter->rx_ring->set_itr = 1; 2602 else 2603 e1000e_write_itr(adapter, new_itr); 2604 } 2605 } 2606 2607 /** 2608 * e1000e_write_itr - write the ITR value to the appropriate registers 2609 * @adapter: address of board private structure 2610 * @itr: new ITR value to program 2611 * 2612 * e1000e_write_itr determines if the adapter is in MSI-X mode 2613 * and, if so, writes the EITR registers with the ITR value. 2614 * Otherwise, it writes the ITR value into the ITR register. 2615 **/ 2616 void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr) 2617 { 2618 struct e1000_hw *hw = &adapter->hw; 2619 u32 new_itr = itr ? 1000000000 / (itr * 256) : 0; 2620 2621 if (adapter->msix_entries) { 2622 int vector; 2623 2624 for (vector = 0; vector < adapter->num_vectors; vector++) 2625 writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector)); 2626 } else { 2627 ew32(ITR, new_itr); 2628 } 2629 } 2630 2631 /** 2632 * e1000_alloc_queues - Allocate memory for all rings 2633 * @adapter: board private structure to initialize 2634 **/ 2635 static int e1000_alloc_queues(struct e1000_adapter *adapter) 2636 { 2637 int size = sizeof(struct e1000_ring); 2638 2639 adapter->tx_ring = kzalloc(size, GFP_KERNEL); 2640 if (!adapter->tx_ring) 2641 goto err; 2642 adapter->tx_ring->count = adapter->tx_ring_count; 2643 adapter->tx_ring->adapter = adapter; 2644 2645 adapter->rx_ring = kzalloc(size, GFP_KERNEL); 2646 if (!adapter->rx_ring) 2647 goto err; 2648 adapter->rx_ring->count = adapter->rx_ring_count; 2649 adapter->rx_ring->adapter = adapter; 2650 2651 return 0; 2652 err: 2653 e_err("Unable to allocate memory for queues\n"); 2654 kfree(adapter->rx_ring); 2655 kfree(adapter->tx_ring); 2656 return -ENOMEM; 2657 } 2658 2659 /** 2660 * e1000e_poll - NAPI Rx polling callback 2661 * @napi: struct associated with this polling callback 2662 * @weight: number of packets driver is allowed to process this poll 2663 **/ 2664 static int e1000e_poll(struct napi_struct *napi, int weight) 2665 { 2666 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, 2667 napi); 2668 struct e1000_hw *hw = &adapter->hw; 2669 struct net_device *poll_dev = adapter->netdev; 2670 int tx_cleaned = 1, work_done = 0; 2671 2672 adapter = netdev_priv(poll_dev); 2673 2674 if (!adapter->msix_entries || 2675 (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) 2676 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); 2677 2678 adapter->clean_rx(adapter->rx_ring, &work_done, weight); 2679 2680 if (!tx_cleaned) 2681 work_done = weight; 2682 2683 /* If weight not fully consumed, exit the polling mode */ 2684 if (work_done < weight) { 2685 if (adapter->itr_setting & 3) 2686 e1000_set_itr(adapter); 2687 napi_complete_done(napi, work_done); 2688 if (!test_bit(__E1000_DOWN, &adapter->state)) { 2689 if (adapter->msix_entries) 2690 ew32(IMS, adapter->rx_ring->ims_val); 2691 else 2692 e1000_irq_enable(adapter); 2693 } 2694 } 2695 2696 return work_done; 2697 } 2698 2699 static int e1000_vlan_rx_add_vid(struct net_device *netdev, 2700 __always_unused __be16 proto, u16 vid) 2701 { 2702 struct e1000_adapter *adapter = netdev_priv(netdev); 2703 struct e1000_hw *hw = &adapter->hw; 2704 u32 vfta, index; 2705 2706 /* don't update vlan cookie if already programmed */ 2707 if ((adapter->hw.mng_cookie.status & 2708 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2709 (vid == adapter->mng_vlan_id)) 2710 return 0; 2711 2712 /* add VID to filter table */ 2713 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2714 index = (vid >> 5) & 0x7F; 2715 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); 2716 vfta |= BIT((vid & 0x1F)); 2717 hw->mac.ops.write_vfta(hw, index, vfta); 2718 } 2719 2720 set_bit(vid, adapter->active_vlans); 2721 2722 return 0; 2723 } 2724 2725 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, 2726 __always_unused __be16 proto, u16 vid) 2727 { 2728 struct e1000_adapter *adapter = netdev_priv(netdev); 2729 struct e1000_hw *hw = &adapter->hw; 2730 u32 vfta, index; 2731 2732 if ((adapter->hw.mng_cookie.status & 2733 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2734 (vid == adapter->mng_vlan_id)) { 2735 /* release control to f/w */ 2736 e1000e_release_hw_control(adapter); 2737 return 0; 2738 } 2739 2740 /* remove VID from filter table */ 2741 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2742 index = (vid >> 5) & 0x7F; 2743 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); 2744 vfta &= ~BIT((vid & 0x1F)); 2745 hw->mac.ops.write_vfta(hw, index, vfta); 2746 } 2747 2748 clear_bit(vid, adapter->active_vlans); 2749 2750 return 0; 2751 } 2752 2753 /** 2754 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering 2755 * @adapter: board private structure to initialize 2756 **/ 2757 static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter) 2758 { 2759 struct net_device *netdev = adapter->netdev; 2760 struct e1000_hw *hw = &adapter->hw; 2761 u32 rctl; 2762 2763 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2764 /* disable VLAN receive filtering */ 2765 rctl = er32(RCTL); 2766 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN); 2767 ew32(RCTL, rctl); 2768 2769 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { 2770 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), 2771 adapter->mng_vlan_id); 2772 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 2773 } 2774 } 2775 } 2776 2777 /** 2778 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering 2779 * @adapter: board private structure to initialize 2780 **/ 2781 static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter) 2782 { 2783 struct e1000_hw *hw = &adapter->hw; 2784 u32 rctl; 2785 2786 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2787 /* enable VLAN receive filtering */ 2788 rctl = er32(RCTL); 2789 rctl |= E1000_RCTL_VFE; 2790 rctl &= ~E1000_RCTL_CFIEN; 2791 ew32(RCTL, rctl); 2792 } 2793 } 2794 2795 /** 2796 * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping 2797 * @adapter: board private structure to initialize 2798 **/ 2799 static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter) 2800 { 2801 struct e1000_hw *hw = &adapter->hw; 2802 u32 ctrl; 2803 2804 /* disable VLAN tag insert/strip */ 2805 ctrl = er32(CTRL); 2806 ctrl &= ~E1000_CTRL_VME; 2807 ew32(CTRL, ctrl); 2808 } 2809 2810 /** 2811 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping 2812 * @adapter: board private structure to initialize 2813 **/ 2814 static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter) 2815 { 2816 struct e1000_hw *hw = &adapter->hw; 2817 u32 ctrl; 2818 2819 /* enable VLAN tag insert/strip */ 2820 ctrl = er32(CTRL); 2821 ctrl |= E1000_CTRL_VME; 2822 ew32(CTRL, ctrl); 2823 } 2824 2825 static void e1000_update_mng_vlan(struct e1000_adapter *adapter) 2826 { 2827 struct net_device *netdev = adapter->netdev; 2828 u16 vid = adapter->hw.mng_cookie.vlan_id; 2829 u16 old_vid = adapter->mng_vlan_id; 2830 2831 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { 2832 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); 2833 adapter->mng_vlan_id = vid; 2834 } 2835 2836 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) 2837 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid); 2838 } 2839 2840 static void e1000_restore_vlan(struct e1000_adapter *adapter) 2841 { 2842 u16 vid; 2843 2844 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); 2845 2846 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 2847 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); 2848 } 2849 2850 static void e1000_init_manageability_pt(struct e1000_adapter *adapter) 2851 { 2852 struct e1000_hw *hw = &adapter->hw; 2853 u32 manc, manc2h, mdef, i, j; 2854 2855 if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) 2856 return; 2857 2858 manc = er32(MANC); 2859 2860 /* enable receiving management packets to the host. this will probably 2861 * generate destination unreachable messages from the host OS, but 2862 * the packets will be handled on SMBUS 2863 */ 2864 manc |= E1000_MANC_EN_MNG2HOST; 2865 manc2h = er32(MANC2H); 2866 2867 switch (hw->mac.type) { 2868 default: 2869 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664); 2870 break; 2871 case e1000_82574: 2872 case e1000_82583: 2873 /* Check if IPMI pass-through decision filter already exists; 2874 * if so, enable it. 2875 */ 2876 for (i = 0, j = 0; i < 8; i++) { 2877 mdef = er32(MDEF(i)); 2878 2879 /* Ignore filters with anything other than IPMI ports */ 2880 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) 2881 continue; 2882 2883 /* Enable this decision filter in MANC2H */ 2884 if (mdef) 2885 manc2h |= BIT(i); 2886 2887 j |= mdef; 2888 } 2889 2890 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) 2891 break; 2892 2893 /* Create new decision filter in an empty filter */ 2894 for (i = 0, j = 0; i < 8; i++) 2895 if (er32(MDEF(i)) == 0) { 2896 ew32(MDEF(i), (E1000_MDEF_PORT_623 | 2897 E1000_MDEF_PORT_664)); 2898 manc2h |= BIT(1); 2899 j++; 2900 break; 2901 } 2902 2903 if (!j) 2904 e_warn("Unable to create IPMI pass-through filter\n"); 2905 break; 2906 } 2907 2908 ew32(MANC2H, manc2h); 2909 ew32(MANC, manc); 2910 } 2911 2912 /** 2913 * e1000_configure_tx - Configure Transmit Unit after Reset 2914 * @adapter: board private structure 2915 * 2916 * Configure the Tx unit of the MAC after a reset. 2917 **/ 2918 static void e1000_configure_tx(struct e1000_adapter *adapter) 2919 { 2920 struct e1000_hw *hw = &adapter->hw; 2921 struct e1000_ring *tx_ring = adapter->tx_ring; 2922 u64 tdba; 2923 u32 tdlen, tctl, tarc; 2924 2925 /* Setup the HW Tx Head and Tail descriptor pointers */ 2926 tdba = tx_ring->dma; 2927 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); 2928 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); 2929 ew32(TDBAH(0), (tdba >> 32)); 2930 ew32(TDLEN(0), tdlen); 2931 ew32(TDH(0), 0); 2932 ew32(TDT(0), 0); 2933 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); 2934 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); 2935 2936 writel(0, tx_ring->head); 2937 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 2938 e1000e_update_tdt_wa(tx_ring, 0); 2939 else 2940 writel(0, tx_ring->tail); 2941 2942 /* Set the Tx Interrupt Delay register */ 2943 ew32(TIDV, adapter->tx_int_delay); 2944 /* Tx irq moderation */ 2945 ew32(TADV, adapter->tx_abs_int_delay); 2946 2947 if (adapter->flags2 & FLAG2_DMA_BURST) { 2948 u32 txdctl = er32(TXDCTL(0)); 2949 2950 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | 2951 E1000_TXDCTL_WTHRESH); 2952 /* set up some performance related parameters to encourage the 2953 * hardware to use the bus more efficiently in bursts, depends 2954 * on the tx_int_delay to be enabled, 2955 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls 2956 * hthresh = 1 ==> prefetch when one or more available 2957 * pthresh = 0x1f ==> prefetch if internal cache 31 or less 2958 * BEWARE: this seems to work but should be considered first if 2959 * there are Tx hangs or other Tx related bugs 2960 */ 2961 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; 2962 ew32(TXDCTL(0), txdctl); 2963 } 2964 /* erratum work around: set txdctl the same for both queues */ 2965 ew32(TXDCTL(1), er32(TXDCTL(0))); 2966 2967 /* Program the Transmit Control Register */ 2968 tctl = er32(TCTL); 2969 tctl &= ~E1000_TCTL_CT; 2970 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 2971 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2972 2973 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { 2974 tarc = er32(TARC(0)); 2975 /* set the speed mode bit, we'll clear it if we're not at 2976 * gigabit link later 2977 */ 2978 #define SPEED_MODE_BIT BIT(21) 2979 tarc |= SPEED_MODE_BIT; 2980 ew32(TARC(0), tarc); 2981 } 2982 2983 /* errata: program both queues to unweighted RR */ 2984 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { 2985 tarc = er32(TARC(0)); 2986 tarc |= 1; 2987 ew32(TARC(0), tarc); 2988 tarc = er32(TARC(1)); 2989 tarc |= 1; 2990 ew32(TARC(1), tarc); 2991 } 2992 2993 /* Setup Transmit Descriptor Settings for eop descriptor */ 2994 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; 2995 2996 /* only set IDE if we are delaying interrupts using the timers */ 2997 if (adapter->tx_int_delay) 2998 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 2999 3000 /* enable Report Status bit */ 3001 adapter->txd_cmd |= E1000_TXD_CMD_RS; 3002 3003 ew32(TCTL, tctl); 3004 3005 hw->mac.ops.config_collision_dist(hw); 3006 3007 /* SPT and CNP Si errata workaround to avoid data corruption */ 3008 if (hw->mac.type >= e1000_pch_spt) { 3009 u32 reg_val; 3010 3011 reg_val = er32(IOSFPC); 3012 reg_val |= E1000_RCTL_RDMTS_HEX; 3013 ew32(IOSFPC, reg_val); 3014 3015 reg_val = er32(TARC(0)); 3016 reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ; 3017 ew32(TARC(0), reg_val); 3018 } 3019 } 3020 3021 /** 3022 * e1000_setup_rctl - configure the receive control registers 3023 * @adapter: Board private structure 3024 **/ 3025 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ 3026 (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) 3027 static void e1000_setup_rctl(struct e1000_adapter *adapter) 3028 { 3029 struct e1000_hw *hw = &adapter->hw; 3030 u32 rctl, rfctl; 3031 u32 pages = 0; 3032 3033 /* Workaround Si errata on PCHx - configure jumbo frame flow. 3034 * If jumbo frames not set, program related MAC/PHY registers 3035 * to h/w defaults 3036 */ 3037 if (hw->mac.type >= e1000_pch2lan) { 3038 s32 ret_val; 3039 3040 if (adapter->netdev->mtu > ETH_DATA_LEN) 3041 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); 3042 else 3043 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); 3044 3045 if (ret_val) 3046 e_dbg("failed to enable|disable jumbo frame workaround mode\n"); 3047 } 3048 3049 /* Program MC offset vector base */ 3050 rctl = er32(RCTL); 3051 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3052 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | 3053 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 3054 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3055 3056 /* Do not Store bad packets */ 3057 rctl &= ~E1000_RCTL_SBP; 3058 3059 /* Enable Long Packet receive */ 3060 if (adapter->netdev->mtu <= ETH_DATA_LEN) 3061 rctl &= ~E1000_RCTL_LPE; 3062 else 3063 rctl |= E1000_RCTL_LPE; 3064 3065 /* Some systems expect that the CRC is included in SMBUS traffic. The 3066 * hardware strips the CRC before sending to both SMBUS (BMC) and to 3067 * host memory when this is enabled 3068 */ 3069 if (adapter->flags2 & FLAG2_CRC_STRIPPING) 3070 rctl |= E1000_RCTL_SECRC; 3071 3072 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ 3073 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { 3074 u16 phy_data; 3075 3076 e1e_rphy(hw, PHY_REG(770, 26), &phy_data); 3077 phy_data &= 0xfff8; 3078 phy_data |= BIT(2); 3079 e1e_wphy(hw, PHY_REG(770, 26), phy_data); 3080 3081 e1e_rphy(hw, 22, &phy_data); 3082 phy_data &= 0x0fff; 3083 phy_data |= BIT(14); 3084 e1e_wphy(hw, 0x10, 0x2823); 3085 e1e_wphy(hw, 0x11, 0x0003); 3086 e1e_wphy(hw, 22, phy_data); 3087 } 3088 3089 /* Setup buffer sizes */ 3090 rctl &= ~E1000_RCTL_SZ_4096; 3091 rctl |= E1000_RCTL_BSEX; 3092 switch (adapter->rx_buffer_len) { 3093 case 2048: 3094 default: 3095 rctl |= E1000_RCTL_SZ_2048; 3096 rctl &= ~E1000_RCTL_BSEX; 3097 break; 3098 case 4096: 3099 rctl |= E1000_RCTL_SZ_4096; 3100 break; 3101 case 8192: 3102 rctl |= E1000_RCTL_SZ_8192; 3103 break; 3104 case 16384: 3105 rctl |= E1000_RCTL_SZ_16384; 3106 break; 3107 } 3108 3109 /* Enable Extended Status in all Receive Descriptors */ 3110 rfctl = er32(RFCTL); 3111 rfctl |= E1000_RFCTL_EXTEN; 3112 ew32(RFCTL, rfctl); 3113 3114 /* 82571 and greater support packet-split where the protocol 3115 * header is placed in skb->data and the packet data is 3116 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. 3117 * In the case of a non-split, skb->data is linearly filled, 3118 * followed by the page buffers. Therefore, skb->data is 3119 * sized to hold the largest protocol header. 3120 * 3121 * allocations using alloc_page take too long for regular MTU 3122 * so only enable packet split for jumbo frames 3123 * 3124 * Using pages when the page size is greater than 16k wastes 3125 * a lot of memory, since we allocate 3 pages at all times 3126 * per packet. 3127 */ 3128 pages = PAGE_USE_COUNT(adapter->netdev->mtu); 3129 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) 3130 adapter->rx_ps_pages = pages; 3131 else 3132 adapter->rx_ps_pages = 0; 3133 3134 if (adapter->rx_ps_pages) { 3135 u32 psrctl = 0; 3136 3137 /* Enable Packet split descriptors */ 3138 rctl |= E1000_RCTL_DTYP_PS; 3139 3140 psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT; 3141 3142 switch (adapter->rx_ps_pages) { 3143 case 3: 3144 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT; 3145 /* fall-through */ 3146 case 2: 3147 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT; 3148 /* fall-through */ 3149 case 1: 3150 psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT; 3151 break; 3152 } 3153 3154 ew32(PSRCTL, psrctl); 3155 } 3156 3157 /* This is useful for sniffing bad packets. */ 3158 if (adapter->netdev->features & NETIF_F_RXALL) { 3159 /* UPE and MPE will be handled by normal PROMISC logic 3160 * in e1000e_set_rx_mode 3161 */ 3162 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ 3163 E1000_RCTL_BAM | /* RX All Bcast Pkts */ 3164 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 3165 3166 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ 3167 E1000_RCTL_DPF | /* Allow filtered pause */ 3168 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ 3169 /* Do not mess with E1000_CTRL_VME, it affects transmit as well, 3170 * and that breaks VLANs. 3171 */ 3172 } 3173 3174 ew32(RCTL, rctl); 3175 /* just started the receive unit, no need to restart */ 3176 adapter->flags &= ~FLAG_RESTART_NOW; 3177 } 3178 3179 /** 3180 * e1000_configure_rx - Configure Receive Unit after Reset 3181 * @adapter: board private structure 3182 * 3183 * Configure the Rx unit of the MAC after a reset. 3184 **/ 3185 static void e1000_configure_rx(struct e1000_adapter *adapter) 3186 { 3187 struct e1000_hw *hw = &adapter->hw; 3188 struct e1000_ring *rx_ring = adapter->rx_ring; 3189 u64 rdba; 3190 u32 rdlen, rctl, rxcsum, ctrl_ext; 3191 3192 if (adapter->rx_ps_pages) { 3193 /* this is a 32 byte descriptor */ 3194 rdlen = rx_ring->count * 3195 sizeof(union e1000_rx_desc_packet_split); 3196 adapter->clean_rx = e1000_clean_rx_irq_ps; 3197 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 3198 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { 3199 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); 3200 adapter->clean_rx = e1000_clean_jumbo_rx_irq; 3201 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 3202 } else { 3203 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); 3204 adapter->clean_rx = e1000_clean_rx_irq; 3205 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 3206 } 3207 3208 /* disable receives while setting up the descriptors */ 3209 rctl = er32(RCTL); 3210 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) 3211 ew32(RCTL, rctl & ~E1000_RCTL_EN); 3212 e1e_flush(); 3213 usleep_range(10000, 20000); 3214 3215 if (adapter->flags2 & FLAG2_DMA_BURST) { 3216 /* set the writeback threshold (only takes effect if the RDTR 3217 * is set). set GRAN=1 and write back up to 0x4 worth, and 3218 * enable prefetching of 0x20 Rx descriptors 3219 * granularity = 01 3220 * wthresh = 04, 3221 * hthresh = 04, 3222 * pthresh = 0x20 3223 */ 3224 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); 3225 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); 3226 3227 /* override the delay timers for enabling bursting, only if 3228 * the value was not set by the user via module options 3229 */ 3230 if (adapter->rx_int_delay == DEFAULT_RDTR) 3231 adapter->rx_int_delay = BURST_RDTR; 3232 if (adapter->rx_abs_int_delay == DEFAULT_RADV) 3233 adapter->rx_abs_int_delay = BURST_RADV; 3234 } 3235 3236 /* set the Receive Delay Timer Register */ 3237 ew32(RDTR, adapter->rx_int_delay); 3238 3239 /* irq moderation */ 3240 ew32(RADV, adapter->rx_abs_int_delay); 3241 if ((adapter->itr_setting != 0) && (adapter->itr != 0)) 3242 e1000e_write_itr(adapter, adapter->itr); 3243 3244 ctrl_ext = er32(CTRL_EXT); 3245 /* Auto-Mask interrupts upon ICR access */ 3246 ctrl_ext |= E1000_CTRL_EXT_IAME; 3247 ew32(IAM, 0xffffffff); 3248 ew32(CTRL_EXT, ctrl_ext); 3249 e1e_flush(); 3250 3251 /* Setup the HW Rx Head and Tail Descriptor Pointers and 3252 * the Base and Length of the Rx Descriptor Ring 3253 */ 3254 rdba = rx_ring->dma; 3255 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); 3256 ew32(RDBAH(0), (rdba >> 32)); 3257 ew32(RDLEN(0), rdlen); 3258 ew32(RDH(0), 0); 3259 ew32(RDT(0), 0); 3260 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); 3261 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); 3262 3263 writel(0, rx_ring->head); 3264 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 3265 e1000e_update_rdt_wa(rx_ring, 0); 3266 else 3267 writel(0, rx_ring->tail); 3268 3269 /* Enable Receive Checksum Offload for TCP and UDP */ 3270 rxcsum = er32(RXCSUM); 3271 if (adapter->netdev->features & NETIF_F_RXCSUM) 3272 rxcsum |= E1000_RXCSUM_TUOFL; 3273 else 3274 rxcsum &= ~E1000_RXCSUM_TUOFL; 3275 ew32(RXCSUM, rxcsum); 3276 3277 /* With jumbo frames, excessive C-state transition latencies result 3278 * in dropped transactions. 3279 */ 3280 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3281 u32 lat = 3282 ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 - 3283 adapter->max_frame_size) * 8 / 1000; 3284 3285 if (adapter->flags & FLAG_IS_ICH) { 3286 u32 rxdctl = er32(RXDCTL(0)); 3287 3288 ew32(RXDCTL(0), rxdctl | 0x3); 3289 } 3290 3291 pm_qos_update_request(&adapter->pm_qos_req, lat); 3292 } else { 3293 pm_qos_update_request(&adapter->pm_qos_req, 3294 PM_QOS_DEFAULT_VALUE); 3295 } 3296 3297 /* Enable Receives */ 3298 ew32(RCTL, rctl); 3299 } 3300 3301 /** 3302 * e1000e_write_mc_addr_list - write multicast addresses to MTA 3303 * @netdev: network interface device structure 3304 * 3305 * Writes multicast address list to the MTA hash table. 3306 * Returns: -ENOMEM on failure 3307 * 0 on no addresses written 3308 * X on writing X addresses to MTA 3309 */ 3310 static int e1000e_write_mc_addr_list(struct net_device *netdev) 3311 { 3312 struct e1000_adapter *adapter = netdev_priv(netdev); 3313 struct e1000_hw *hw = &adapter->hw; 3314 struct netdev_hw_addr *ha; 3315 u8 *mta_list; 3316 int i; 3317 3318 if (netdev_mc_empty(netdev)) { 3319 /* nothing to program, so clear mc list */ 3320 hw->mac.ops.update_mc_addr_list(hw, NULL, 0); 3321 return 0; 3322 } 3323 3324 mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC); 3325 if (!mta_list) 3326 return -ENOMEM; 3327 3328 /* update_mc_addr_list expects a packed array of only addresses. */ 3329 i = 0; 3330 netdev_for_each_mc_addr(ha, netdev) 3331 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 3332 3333 hw->mac.ops.update_mc_addr_list(hw, mta_list, i); 3334 kfree(mta_list); 3335 3336 return netdev_mc_count(netdev); 3337 } 3338 3339 /** 3340 * e1000e_write_uc_addr_list - write unicast addresses to RAR table 3341 * @netdev: network interface device structure 3342 * 3343 * Writes unicast address list to the RAR table. 3344 * Returns: -ENOMEM on failure/insufficient address space 3345 * 0 on no addresses written 3346 * X on writing X addresses to the RAR table 3347 **/ 3348 static int e1000e_write_uc_addr_list(struct net_device *netdev) 3349 { 3350 struct e1000_adapter *adapter = netdev_priv(netdev); 3351 struct e1000_hw *hw = &adapter->hw; 3352 unsigned int rar_entries; 3353 int count = 0; 3354 3355 rar_entries = hw->mac.ops.rar_get_count(hw); 3356 3357 /* save a rar entry for our hardware address */ 3358 rar_entries--; 3359 3360 /* save a rar entry for the LAA workaround */ 3361 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) 3362 rar_entries--; 3363 3364 /* return ENOMEM indicating insufficient memory for addresses */ 3365 if (netdev_uc_count(netdev) > rar_entries) 3366 return -ENOMEM; 3367 3368 if (!netdev_uc_empty(netdev) && rar_entries) { 3369 struct netdev_hw_addr *ha; 3370 3371 /* write the addresses in reverse order to avoid write 3372 * combining 3373 */ 3374 netdev_for_each_uc_addr(ha, netdev) { 3375 int ret_val; 3376 3377 if (!rar_entries) 3378 break; 3379 ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); 3380 if (ret_val < 0) 3381 return -ENOMEM; 3382 count++; 3383 } 3384 } 3385 3386 /* zero out the remaining RAR entries not used above */ 3387 for (; rar_entries > 0; rar_entries--) { 3388 ew32(RAH(rar_entries), 0); 3389 ew32(RAL(rar_entries), 0); 3390 } 3391 e1e_flush(); 3392 3393 return count; 3394 } 3395 3396 /** 3397 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set 3398 * @netdev: network interface device structure 3399 * 3400 * The ndo_set_rx_mode entry point is called whenever the unicast or multicast 3401 * address list or the network interface flags are updated. This routine is 3402 * responsible for configuring the hardware for proper unicast, multicast, 3403 * promiscuous mode, and all-multi behavior. 3404 **/ 3405 static void e1000e_set_rx_mode(struct net_device *netdev) 3406 { 3407 struct e1000_adapter *adapter = netdev_priv(netdev); 3408 struct e1000_hw *hw = &adapter->hw; 3409 u32 rctl; 3410 3411 if (pm_runtime_suspended(netdev->dev.parent)) 3412 return; 3413 3414 /* Check for Promiscuous and All Multicast modes */ 3415 rctl = er32(RCTL); 3416 3417 /* clear the affected bits */ 3418 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); 3419 3420 if (netdev->flags & IFF_PROMISC) { 3421 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 3422 /* Do not hardware filter VLANs in promisc mode */ 3423 e1000e_vlan_filter_disable(adapter); 3424 } else { 3425 int count; 3426 3427 if (netdev->flags & IFF_ALLMULTI) { 3428 rctl |= E1000_RCTL_MPE; 3429 } else { 3430 /* Write addresses to the MTA, if the attempt fails 3431 * then we should just turn on promiscuous mode so 3432 * that we can at least receive multicast traffic 3433 */ 3434 count = e1000e_write_mc_addr_list(netdev); 3435 if (count < 0) 3436 rctl |= E1000_RCTL_MPE; 3437 } 3438 e1000e_vlan_filter_enable(adapter); 3439 /* Write addresses to available RAR registers, if there is not 3440 * sufficient space to store all the addresses then enable 3441 * unicast promiscuous mode 3442 */ 3443 count = e1000e_write_uc_addr_list(netdev); 3444 if (count < 0) 3445 rctl |= E1000_RCTL_UPE; 3446 } 3447 3448 ew32(RCTL, rctl); 3449 3450 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 3451 e1000e_vlan_strip_enable(adapter); 3452 else 3453 e1000e_vlan_strip_disable(adapter); 3454 } 3455 3456 static void e1000e_setup_rss_hash(struct e1000_adapter *adapter) 3457 { 3458 struct e1000_hw *hw = &adapter->hw; 3459 u32 mrqc, rxcsum; 3460 u32 rss_key[10]; 3461 int i; 3462 3463 netdev_rss_key_fill(rss_key, sizeof(rss_key)); 3464 for (i = 0; i < 10; i++) 3465 ew32(RSSRK(i), rss_key[i]); 3466 3467 /* Direct all traffic to queue 0 */ 3468 for (i = 0; i < 32; i++) 3469 ew32(RETA(i), 0); 3470 3471 /* Disable raw packet checksumming so that RSS hash is placed in 3472 * descriptor on writeback. 3473 */ 3474 rxcsum = er32(RXCSUM); 3475 rxcsum |= E1000_RXCSUM_PCSD; 3476 3477 ew32(RXCSUM, rxcsum); 3478 3479 mrqc = (E1000_MRQC_RSS_FIELD_IPV4 | 3480 E1000_MRQC_RSS_FIELD_IPV4_TCP | 3481 E1000_MRQC_RSS_FIELD_IPV6 | 3482 E1000_MRQC_RSS_FIELD_IPV6_TCP | 3483 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); 3484 3485 ew32(MRQC, mrqc); 3486 } 3487 3488 /** 3489 * e1000e_get_base_timinca - get default SYSTIM time increment attributes 3490 * @adapter: board private structure 3491 * @timinca: pointer to returned time increment attributes 3492 * 3493 * Get attributes for incrementing the System Time Register SYSTIML/H at 3494 * the default base frequency, and set the cyclecounter shift value. 3495 **/ 3496 s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) 3497 { 3498 struct e1000_hw *hw = &adapter->hw; 3499 u32 incvalue, incperiod, shift; 3500 3501 /* Make sure clock is enabled on I217/I218/I219 before checking 3502 * the frequency 3503 */ 3504 if ((hw->mac.type >= e1000_pch_lpt) && 3505 !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) && 3506 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { 3507 u32 fextnvm7 = er32(FEXTNVM7); 3508 3509 if (!(fextnvm7 & BIT(0))) { 3510 ew32(FEXTNVM7, fextnvm7 | BIT(0)); 3511 e1e_flush(); 3512 } 3513 } 3514 3515 switch (hw->mac.type) { 3516 case e1000_pch2lan: 3517 /* Stable 96MHz frequency */ 3518 incperiod = INCPERIOD_96MHZ; 3519 incvalue = INCVALUE_96MHZ; 3520 shift = INCVALUE_SHIFT_96MHZ; 3521 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; 3522 break; 3523 case e1000_pch_lpt: 3524 if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { 3525 /* Stable 96MHz frequency */ 3526 incperiod = INCPERIOD_96MHZ; 3527 incvalue = INCVALUE_96MHZ; 3528 shift = INCVALUE_SHIFT_96MHZ; 3529 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; 3530 } else { 3531 /* Stable 25MHz frequency */ 3532 incperiod = INCPERIOD_25MHZ; 3533 incvalue = INCVALUE_25MHZ; 3534 shift = INCVALUE_SHIFT_25MHZ; 3535 adapter->cc.shift = shift; 3536 } 3537 break; 3538 case e1000_pch_spt: 3539 if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { 3540 /* Stable 24MHz frequency */ 3541 incperiod = INCPERIOD_24MHZ; 3542 incvalue = INCVALUE_24MHZ; 3543 shift = INCVALUE_SHIFT_24MHZ; 3544 adapter->cc.shift = shift; 3545 break; 3546 } 3547 return -EINVAL; 3548 case e1000_pch_cnp: 3549 if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { 3550 /* Stable 24MHz frequency */ 3551 incperiod = INCPERIOD_24MHZ; 3552 incvalue = INCVALUE_24MHZ; 3553 shift = INCVALUE_SHIFT_24MHZ; 3554 adapter->cc.shift = shift; 3555 } else { 3556 /* Stable 38400KHz frequency */ 3557 incperiod = INCPERIOD_38400KHZ; 3558 incvalue = INCVALUE_38400KHZ; 3559 shift = INCVALUE_SHIFT_38400KHZ; 3560 adapter->cc.shift = shift; 3561 } 3562 break; 3563 case e1000_82574: 3564 case e1000_82583: 3565 /* Stable 25MHz frequency */ 3566 incperiod = INCPERIOD_25MHZ; 3567 incvalue = INCVALUE_25MHZ; 3568 shift = INCVALUE_SHIFT_25MHZ; 3569 adapter->cc.shift = shift; 3570 break; 3571 default: 3572 return -EINVAL; 3573 } 3574 3575 *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) | 3576 ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK)); 3577 3578 return 0; 3579 } 3580 3581 /** 3582 * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable 3583 * @adapter: board private structure 3584 * 3585 * Outgoing time stamping can be enabled and disabled. Play nice and 3586 * disable it when requested, although it shouldn't cause any overhead 3587 * when no packet needs it. At most one packet in the queue may be 3588 * marked for time stamping, otherwise it would be impossible to tell 3589 * for sure to which packet the hardware time stamp belongs. 3590 * 3591 * Incoming time stamping has to be configured via the hardware filters. 3592 * Not all combinations are supported, in particular event type has to be 3593 * specified. Matching the kind of event packet is not supported, with the 3594 * exception of "all V2 events regardless of level 2 or 4". 3595 **/ 3596 static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, 3597 struct hwtstamp_config *config) 3598 { 3599 struct e1000_hw *hw = &adapter->hw; 3600 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; 3601 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; 3602 u32 rxmtrl = 0; 3603 u16 rxudp = 0; 3604 bool is_l4 = false; 3605 bool is_l2 = false; 3606 u32 regval; 3607 3608 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) 3609 return -EINVAL; 3610 3611 /* flags reserved for future extensions - must be zero */ 3612 if (config->flags) 3613 return -EINVAL; 3614 3615 switch (config->tx_type) { 3616 case HWTSTAMP_TX_OFF: 3617 tsync_tx_ctl = 0; 3618 break; 3619 case HWTSTAMP_TX_ON: 3620 break; 3621 default: 3622 return -ERANGE; 3623 } 3624 3625 switch (config->rx_filter) { 3626 case HWTSTAMP_FILTER_NONE: 3627 tsync_rx_ctl = 0; 3628 break; 3629 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 3630 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; 3631 rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE; 3632 is_l4 = true; 3633 break; 3634 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 3635 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; 3636 rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE; 3637 is_l4 = true; 3638 break; 3639 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 3640 /* Also time stamps V2 L2 Path Delay Request/Response */ 3641 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2; 3642 rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE; 3643 is_l2 = true; 3644 break; 3645 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 3646 /* Also time stamps V2 L2 Path Delay Request/Response. */ 3647 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2; 3648 rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE; 3649 is_l2 = true; 3650 break; 3651 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 3652 /* Hardware cannot filter just V2 L4 Sync messages; 3653 * fall-through to V2 (both L2 and L4) Sync. 3654 */ 3655 case HWTSTAMP_FILTER_PTP_V2_SYNC: 3656 /* Also time stamps V2 Path Delay Request/Response. */ 3657 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; 3658 rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE; 3659 is_l2 = true; 3660 is_l4 = true; 3661 break; 3662 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 3663 /* Hardware cannot filter just V2 L4 Delay Request messages; 3664 * fall-through to V2 (both L2 and L4) Delay Request. 3665 */ 3666 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 3667 /* Also time stamps V2 Path Delay Request/Response. */ 3668 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; 3669 rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE; 3670 is_l2 = true; 3671 is_l4 = true; 3672 break; 3673 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 3674 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 3675 /* Hardware cannot filter just V2 L4 or L2 Event messages; 3676 * fall-through to all V2 (both L2 and L4) Events. 3677 */ 3678 case HWTSTAMP_FILTER_PTP_V2_EVENT: 3679 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; 3680 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 3681 is_l2 = true; 3682 is_l4 = true; 3683 break; 3684 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 3685 /* For V1, the hardware can only filter Sync messages or 3686 * Delay Request messages but not both so fall-through to 3687 * time stamp all packets. 3688 */ 3689 case HWTSTAMP_FILTER_NTP_ALL: 3690 case HWTSTAMP_FILTER_ALL: 3691 is_l2 = true; 3692 is_l4 = true; 3693 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; 3694 config->rx_filter = HWTSTAMP_FILTER_ALL; 3695 break; 3696 default: 3697 return -ERANGE; 3698 } 3699 3700 adapter->hwtstamp_config = *config; 3701 3702 /* enable/disable Tx h/w time stamping */ 3703 regval = er32(TSYNCTXCTL); 3704 regval &= ~E1000_TSYNCTXCTL_ENABLED; 3705 regval |= tsync_tx_ctl; 3706 ew32(TSYNCTXCTL, regval); 3707 if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) != 3708 (regval & E1000_TSYNCTXCTL_ENABLED)) { 3709 e_err("Timesync Tx Control register not set as expected\n"); 3710 return -EAGAIN; 3711 } 3712 3713 /* enable/disable Rx h/w time stamping */ 3714 regval = er32(TSYNCRXCTL); 3715 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); 3716 regval |= tsync_rx_ctl; 3717 ew32(TSYNCRXCTL, regval); 3718 if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED | 3719 E1000_TSYNCRXCTL_TYPE_MASK)) != 3720 (regval & (E1000_TSYNCRXCTL_ENABLED | 3721 E1000_TSYNCRXCTL_TYPE_MASK))) { 3722 e_err("Timesync Rx Control register not set as expected\n"); 3723 return -EAGAIN; 3724 } 3725 3726 /* L2: define ethertype filter for time stamped packets */ 3727 if (is_l2) 3728 rxmtrl |= ETH_P_1588; 3729 3730 /* define which PTP packets get time stamped */ 3731 ew32(RXMTRL, rxmtrl); 3732 3733 /* Filter by destination port */ 3734 if (is_l4) { 3735 rxudp = PTP_EV_PORT; 3736 cpu_to_be16s(&rxudp); 3737 } 3738 ew32(RXUDP, rxudp); 3739 3740 e1e_flush(); 3741 3742 /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */ 3743 er32(RXSTMPH); 3744 er32(TXSTMPH); 3745 3746 return 0; 3747 } 3748 3749 /** 3750 * e1000_configure - configure the hardware for Rx and Tx 3751 * @adapter: private board structure 3752 **/ 3753 static void e1000_configure(struct e1000_adapter *adapter) 3754 { 3755 struct e1000_ring *rx_ring = adapter->rx_ring; 3756 3757 e1000e_set_rx_mode(adapter->netdev); 3758 3759 e1000_restore_vlan(adapter); 3760 e1000_init_manageability_pt(adapter); 3761 3762 e1000_configure_tx(adapter); 3763 3764 if (adapter->netdev->features & NETIF_F_RXHASH) 3765 e1000e_setup_rss_hash(adapter); 3766 e1000_setup_rctl(adapter); 3767 e1000_configure_rx(adapter); 3768 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); 3769 } 3770 3771 /** 3772 * e1000e_power_up_phy - restore link in case the phy was powered down 3773 * @adapter: address of board private structure 3774 * 3775 * The phy may be powered down to save power and turn off link when the 3776 * driver is unloaded and wake on lan is not enabled (among others) 3777 * *** this routine MUST be followed by a call to e1000e_reset *** 3778 **/ 3779 void e1000e_power_up_phy(struct e1000_adapter *adapter) 3780 { 3781 if (adapter->hw.phy.ops.power_up) 3782 adapter->hw.phy.ops.power_up(&adapter->hw); 3783 3784 adapter->hw.mac.ops.setup_link(&adapter->hw); 3785 } 3786 3787 /** 3788 * e1000_power_down_phy - Power down the PHY 3789 * 3790 * Power down the PHY so no link is implied when interface is down. 3791 * The PHY cannot be powered down if management or WoL is active. 3792 */ 3793 static void e1000_power_down_phy(struct e1000_adapter *adapter) 3794 { 3795 if (adapter->hw.phy.ops.power_down) 3796 adapter->hw.phy.ops.power_down(&adapter->hw); 3797 } 3798 3799 /** 3800 * e1000_flush_tx_ring - remove all descriptors from the tx_ring 3801 * 3802 * We want to clear all pending descriptors from the TX ring. 3803 * zeroing happens when the HW reads the regs. We assign the ring itself as 3804 * the data of the next descriptor. We don't care about the data we are about 3805 * to reset the HW. 3806 */ 3807 static void e1000_flush_tx_ring(struct e1000_adapter *adapter) 3808 { 3809 struct e1000_hw *hw = &adapter->hw; 3810 struct e1000_ring *tx_ring = adapter->tx_ring; 3811 struct e1000_tx_desc *tx_desc = NULL; 3812 u32 tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS; 3813 u16 size = 512; 3814 3815 tctl = er32(TCTL); 3816 ew32(TCTL, tctl | E1000_TCTL_EN); 3817 tdt = er32(TDT(0)); 3818 BUG_ON(tdt != tx_ring->next_to_use); 3819 tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use); 3820 tx_desc->buffer_addr = tx_ring->dma; 3821 3822 tx_desc->lower.data = cpu_to_le32(txd_lower | size); 3823 tx_desc->upper.data = 0; 3824 /* flush descriptors to memory before notifying the HW */ 3825 wmb(); 3826 tx_ring->next_to_use++; 3827 if (tx_ring->next_to_use == tx_ring->count) 3828 tx_ring->next_to_use = 0; 3829 ew32(TDT(0), tx_ring->next_to_use); 3830 mmiowb(); 3831 usleep_range(200, 250); 3832 } 3833 3834 /** 3835 * e1000_flush_rx_ring - remove all descriptors from the rx_ring 3836 * 3837 * Mark all descriptors in the RX ring as consumed and disable the rx ring 3838 */ 3839 static void e1000_flush_rx_ring(struct e1000_adapter *adapter) 3840 { 3841 u32 rctl, rxdctl; 3842 struct e1000_hw *hw = &adapter->hw; 3843 3844 rctl = er32(RCTL); 3845 ew32(RCTL, rctl & ~E1000_RCTL_EN); 3846 e1e_flush(); 3847 usleep_range(100, 150); 3848 3849 rxdctl = er32(RXDCTL(0)); 3850 /* zero the lower 14 bits (prefetch and host thresholds) */ 3851 rxdctl &= 0xffffc000; 3852 3853 /* update thresholds: prefetch threshold to 31, host threshold to 1 3854 * and make sure the granularity is "descriptors" and not "cache lines" 3855 */ 3856 rxdctl |= (0x1F | BIT(8) | E1000_RXDCTL_THRESH_UNIT_DESC); 3857 3858 ew32(RXDCTL(0), rxdctl); 3859 /* momentarily enable the RX ring for the changes to take effect */ 3860 ew32(RCTL, rctl | E1000_RCTL_EN); 3861 e1e_flush(); 3862 usleep_range(100, 150); 3863 ew32(RCTL, rctl & ~E1000_RCTL_EN); 3864 } 3865 3866 /** 3867 * e1000_flush_desc_rings - remove all descriptors from the descriptor rings 3868 * 3869 * In i219, the descriptor rings must be emptied before resetting the HW 3870 * or before changing the device state to D3 during runtime (runtime PM). 3871 * 3872 * Failure to do this will cause the HW to enter a unit hang state which can 3873 * only be released by PCI reset on the device 3874 * 3875 */ 3876 3877 static void e1000_flush_desc_rings(struct e1000_adapter *adapter) 3878 { 3879 u16 hang_state; 3880 u32 fext_nvm11, tdlen; 3881 struct e1000_hw *hw = &adapter->hw; 3882 3883 /* First, disable MULR fix in FEXTNVM11 */ 3884 fext_nvm11 = er32(FEXTNVM11); 3885 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; 3886 ew32(FEXTNVM11, fext_nvm11); 3887 /* do nothing if we're not in faulty state, or if the queue is empty */ 3888 tdlen = er32(TDLEN(0)); 3889 pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, 3890 &hang_state); 3891 if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen) 3892 return; 3893 e1000_flush_tx_ring(adapter); 3894 /* recheck, maybe the fault is caused by the rx ring */ 3895 pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, 3896 &hang_state); 3897 if (hang_state & FLUSH_DESC_REQUIRED) 3898 e1000_flush_rx_ring(adapter); 3899 } 3900 3901 /** 3902 * e1000e_systim_reset - reset the timesync registers after a hardware reset 3903 * @adapter: board private structure 3904 * 3905 * When the MAC is reset, all hardware bits for timesync will be reset to the 3906 * default values. This function will restore the settings last in place. 3907 * Since the clock SYSTIME registers are reset, we will simply restore the 3908 * cyclecounter to the kernel real clock time. 3909 **/ 3910 static void e1000e_systim_reset(struct e1000_adapter *adapter) 3911 { 3912 struct ptp_clock_info *info = &adapter->ptp_clock_info; 3913 struct e1000_hw *hw = &adapter->hw; 3914 unsigned long flags; 3915 u32 timinca; 3916 s32 ret_val; 3917 3918 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) 3919 return; 3920 3921 if (info->adjfreq) { 3922 /* restore the previous ptp frequency delta */ 3923 ret_val = info->adjfreq(info, adapter->ptp_delta); 3924 } else { 3925 /* set the default base frequency if no adjustment possible */ 3926 ret_val = e1000e_get_base_timinca(adapter, &timinca); 3927 if (!ret_val) 3928 ew32(TIMINCA, timinca); 3929 } 3930 3931 if (ret_val) { 3932 dev_warn(&adapter->pdev->dev, 3933 "Failed to restore TIMINCA clock rate delta: %d\n", 3934 ret_val); 3935 return; 3936 } 3937 3938 /* reset the systim ns time counter */ 3939 spin_lock_irqsave(&adapter->systim_lock, flags); 3940 timecounter_init(&adapter->tc, &adapter->cc, 3941 ktime_to_ns(ktime_get_real())); 3942 spin_unlock_irqrestore(&adapter->systim_lock, flags); 3943 3944 /* restore the previous hwtstamp configuration settings */ 3945 e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); 3946 } 3947 3948 /** 3949 * e1000e_reset - bring the hardware into a known good state 3950 * 3951 * This function boots the hardware and enables some settings that 3952 * require a configuration cycle of the hardware - those cannot be 3953 * set/changed during runtime. After reset the device needs to be 3954 * properly configured for Rx, Tx etc. 3955 */ 3956 void e1000e_reset(struct e1000_adapter *adapter) 3957 { 3958 struct e1000_mac_info *mac = &adapter->hw.mac; 3959 struct e1000_fc_info *fc = &adapter->hw.fc; 3960 struct e1000_hw *hw = &adapter->hw; 3961 u32 tx_space, min_tx_space, min_rx_space; 3962 u32 pba = adapter->pba; 3963 u16 hwm; 3964 3965 /* reset Packet Buffer Allocation to default */ 3966 ew32(PBA, pba); 3967 3968 if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) { 3969 /* To maintain wire speed transmits, the Tx FIFO should be 3970 * large enough to accommodate two full transmit packets, 3971 * rounded up to the next 1KB and expressed in KB. Likewise, 3972 * the Rx FIFO should be large enough to accommodate at least 3973 * one full receive packet and is similarly rounded up and 3974 * expressed in KB. 3975 */ 3976 pba = er32(PBA); 3977 /* upper 16 bits has Tx packet buffer allocation size in KB */ 3978 tx_space = pba >> 16; 3979 /* lower 16 bits has Rx packet buffer allocation size in KB */ 3980 pba &= 0xffff; 3981 /* the Tx fifo also stores 16 bytes of information about the Tx 3982 * but don't include ethernet FCS because hardware appends it 3983 */ 3984 min_tx_space = (adapter->max_frame_size + 3985 sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2; 3986 min_tx_space = ALIGN(min_tx_space, 1024); 3987 min_tx_space >>= 10; 3988 /* software strips receive CRC, so leave room for it */ 3989 min_rx_space = adapter->max_frame_size; 3990 min_rx_space = ALIGN(min_rx_space, 1024); 3991 min_rx_space >>= 10; 3992 3993 /* If current Tx allocation is less than the min Tx FIFO size, 3994 * and the min Tx FIFO size is less than the current Rx FIFO 3995 * allocation, take space away from current Rx allocation 3996 */ 3997 if ((tx_space < min_tx_space) && 3998 ((min_tx_space - tx_space) < pba)) { 3999 pba -= min_tx_space - tx_space; 4000 4001 /* if short on Rx space, Rx wins and must trump Tx 4002 * adjustment 4003 */ 4004 if (pba < min_rx_space) 4005 pba = min_rx_space; 4006 } 4007 4008 ew32(PBA, pba); 4009 } 4010 4011 /* flow control settings 4012 * 4013 * The high water mark must be low enough to fit one full frame 4014 * (or the size used for early receive) above it in the Rx FIFO. 4015 * Set it to the lower of: 4016 * - 90% of the Rx FIFO size, and 4017 * - the full Rx FIFO size minus one full frame 4018 */ 4019 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) 4020 fc->pause_time = 0xFFFF; 4021 else 4022 fc->pause_time = E1000_FC_PAUSE_TIME; 4023 fc->send_xon = true; 4024 fc->current_mode = fc->requested_mode; 4025 4026 switch (hw->mac.type) { 4027 case e1000_ich9lan: 4028 case e1000_ich10lan: 4029 if (adapter->netdev->mtu > ETH_DATA_LEN) { 4030 pba = 14; 4031 ew32(PBA, pba); 4032 fc->high_water = 0x2800; 4033 fc->low_water = fc->high_water - 8; 4034 break; 4035 } 4036 /* fall-through */ 4037 default: 4038 hwm = min(((pba << 10) * 9 / 10), 4039 ((pba << 10) - adapter->max_frame_size)); 4040 4041 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ 4042 fc->low_water = fc->high_water - 8; 4043 break; 4044 case e1000_pchlan: 4045 /* Workaround PCH LOM adapter hangs with certain network 4046 * loads. If hangs persist, try disabling Tx flow control. 4047 */ 4048 if (adapter->netdev->mtu > ETH_DATA_LEN) { 4049 fc->high_water = 0x3500; 4050 fc->low_water = 0x1500; 4051 } else { 4052 fc->high_water = 0x5000; 4053 fc->low_water = 0x3000; 4054 } 4055 fc->refresh_time = 0x1000; 4056 break; 4057 case e1000_pch2lan: 4058 case e1000_pch_lpt: 4059 case e1000_pch_spt: 4060 case e1000_pch_cnp: 4061 fc->refresh_time = 0x0400; 4062 4063 if (adapter->netdev->mtu <= ETH_DATA_LEN) { 4064 fc->high_water = 0x05C20; 4065 fc->low_water = 0x05048; 4066 fc->pause_time = 0x0650; 4067 break; 4068 } 4069 4070 pba = 14; 4071 ew32(PBA, pba); 4072 fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH; 4073 fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL; 4074 break; 4075 } 4076 4077 /* Alignment of Tx data is on an arbitrary byte boundary with the 4078 * maximum size per Tx descriptor limited only to the transmit 4079 * allocation of the packet buffer minus 96 bytes with an upper 4080 * limit of 24KB due to receive synchronization limitations. 4081 */ 4082 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, 4083 24 << 10); 4084 4085 /* Disable Adaptive Interrupt Moderation if 2 full packets cannot 4086 * fit in receive buffer. 4087 */ 4088 if (adapter->itr_setting & 0x3) { 4089 if ((adapter->max_frame_size * 2) > (pba << 10)) { 4090 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { 4091 dev_info(&adapter->pdev->dev, 4092 "Interrupt Throttle Rate off\n"); 4093 adapter->flags2 |= FLAG2_DISABLE_AIM; 4094 e1000e_write_itr(adapter, 0); 4095 } 4096 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { 4097 dev_info(&adapter->pdev->dev, 4098 "Interrupt Throttle Rate on\n"); 4099 adapter->flags2 &= ~FLAG2_DISABLE_AIM; 4100 adapter->itr = 20000; 4101 e1000e_write_itr(adapter, adapter->itr); 4102 } 4103 } 4104 4105 if (hw->mac.type >= e1000_pch_spt) 4106 e1000_flush_desc_rings(adapter); 4107 /* Allow time for pending master requests to run */ 4108 mac->ops.reset_hw(hw); 4109 4110 /* For parts with AMT enabled, let the firmware know 4111 * that the network interface is in control 4112 */ 4113 if (adapter->flags & FLAG_HAS_AMT) 4114 e1000e_get_hw_control(adapter); 4115 4116 ew32(WUC, 0); 4117 4118 if (mac->ops.init_hw(hw)) 4119 e_err("Hardware Error\n"); 4120 4121 e1000_update_mng_vlan(adapter); 4122 4123 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 4124 ew32(VET, ETH_P_8021Q); 4125 4126 e1000e_reset_adaptive(hw); 4127 4128 /* restore systim and hwtstamp settings */ 4129 e1000e_systim_reset(adapter); 4130 4131 /* Set EEE advertisement as appropriate */ 4132 if (adapter->flags2 & FLAG2_HAS_EEE) { 4133 s32 ret_val; 4134 u16 adv_addr; 4135 4136 switch (hw->phy.type) { 4137 case e1000_phy_82579: 4138 adv_addr = I82579_EEE_ADVERTISEMENT; 4139 break; 4140 case e1000_phy_i217: 4141 adv_addr = I217_EEE_ADVERTISEMENT; 4142 break; 4143 default: 4144 dev_err(&adapter->pdev->dev, 4145 "Invalid PHY type setting EEE advertisement\n"); 4146 return; 4147 } 4148 4149 ret_val = hw->phy.ops.acquire(hw); 4150 if (ret_val) { 4151 dev_err(&adapter->pdev->dev, 4152 "EEE advertisement - unable to acquire PHY\n"); 4153 return; 4154 } 4155 4156 e1000_write_emi_reg_locked(hw, adv_addr, 4157 hw->dev_spec.ich8lan.eee_disable ? 4158 0 : adapter->eee_advert); 4159 4160 hw->phy.ops.release(hw); 4161 } 4162 4163 if (!netif_running(adapter->netdev) && 4164 !test_bit(__E1000_TESTING, &adapter->state)) 4165 e1000_power_down_phy(adapter); 4166 4167 e1000_get_phy_info(hw); 4168 4169 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && 4170 !(adapter->flags & FLAG_SMART_POWER_DOWN)) { 4171 u16 phy_data = 0; 4172 /* speed up time to link by disabling smart power down, ignore 4173 * the return value of this function because there is nothing 4174 * different we would do if it failed 4175 */ 4176 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); 4177 phy_data &= ~IGP02E1000_PM_SPD; 4178 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); 4179 } 4180 if (hw->mac.type >= e1000_pch_spt && adapter->int_mode == 0) { 4181 u32 reg; 4182 4183 /* Fextnvm7 @ 0xe4[2] = 1 */ 4184 reg = er32(FEXTNVM7); 4185 reg |= E1000_FEXTNVM7_SIDE_CLK_UNGATE; 4186 ew32(FEXTNVM7, reg); 4187 /* Fextnvm9 @ 0x5bb4[13:12] = 11 */ 4188 reg = er32(FEXTNVM9); 4189 reg |= E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS | 4190 E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS; 4191 ew32(FEXTNVM9, reg); 4192 } 4193 4194 } 4195 4196 /** 4197 * e1000e_trigger_lsc - trigger an LSC interrupt 4198 * @adapter: 4199 * 4200 * Fire a link status change interrupt to start the watchdog. 4201 **/ 4202 static void e1000e_trigger_lsc(struct e1000_adapter *adapter) 4203 { 4204 struct e1000_hw *hw = &adapter->hw; 4205 4206 if (adapter->msix_entries) 4207 ew32(ICS, E1000_ICS_OTHER); 4208 else 4209 ew32(ICS, E1000_ICS_LSC); 4210 } 4211 4212 void e1000e_up(struct e1000_adapter *adapter) 4213 { 4214 /* hardware has been reset, we need to reload some things */ 4215 e1000_configure(adapter); 4216 4217 clear_bit(__E1000_DOWN, &adapter->state); 4218 4219 if (adapter->msix_entries) 4220 e1000_configure_msix(adapter); 4221 e1000_irq_enable(adapter); 4222 4223 netif_start_queue(adapter->netdev); 4224 4225 e1000e_trigger_lsc(adapter); 4226 } 4227 4228 static void e1000e_flush_descriptors(struct e1000_adapter *adapter) 4229 { 4230 struct e1000_hw *hw = &adapter->hw; 4231 4232 if (!(adapter->flags2 & FLAG2_DMA_BURST)) 4233 return; 4234 4235 /* flush pending descriptor writebacks to memory */ 4236 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 4237 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); 4238 4239 /* execute the writes immediately */ 4240 e1e_flush(); 4241 4242 /* due to rare timing issues, write to TIDV/RDTR again to ensure the 4243 * write is successful 4244 */ 4245 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 4246 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); 4247 4248 /* execute the writes immediately */ 4249 e1e_flush(); 4250 } 4251 4252 static void e1000e_update_stats(struct e1000_adapter *adapter); 4253 4254 /** 4255 * e1000e_down - quiesce the device and optionally reset the hardware 4256 * @adapter: board private structure 4257 * @reset: boolean flag to reset the hardware or not 4258 */ 4259 void e1000e_down(struct e1000_adapter *adapter, bool reset) 4260 { 4261 struct net_device *netdev = adapter->netdev; 4262 struct e1000_hw *hw = &adapter->hw; 4263 u32 tctl, rctl; 4264 4265 /* signal that we're down so the interrupt handler does not 4266 * reschedule our watchdog timer 4267 */ 4268 set_bit(__E1000_DOWN, &adapter->state); 4269 4270 netif_carrier_off(netdev); 4271 4272 /* disable receives in the hardware */ 4273 rctl = er32(RCTL); 4274 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) 4275 ew32(RCTL, rctl & ~E1000_RCTL_EN); 4276 /* flush and sleep below */ 4277 4278 netif_stop_queue(netdev); 4279 4280 /* disable transmits in the hardware */ 4281 tctl = er32(TCTL); 4282 tctl &= ~E1000_TCTL_EN; 4283 ew32(TCTL, tctl); 4284 4285 /* flush both disables and wait for them to finish */ 4286 e1e_flush(); 4287 usleep_range(10000, 20000); 4288 4289 e1000_irq_disable(adapter); 4290 4291 napi_synchronize(&adapter->napi); 4292 4293 del_timer_sync(&adapter->watchdog_timer); 4294 del_timer_sync(&adapter->phy_info_timer); 4295 4296 spin_lock(&adapter->stats64_lock); 4297 e1000e_update_stats(adapter); 4298 spin_unlock(&adapter->stats64_lock); 4299 4300 e1000e_flush_descriptors(adapter); 4301 4302 adapter->link_speed = 0; 4303 adapter->link_duplex = 0; 4304 4305 /* Disable Si errata workaround on PCHx for jumbo frame flow */ 4306 if ((hw->mac.type >= e1000_pch2lan) && 4307 (adapter->netdev->mtu > ETH_DATA_LEN) && 4308 e1000_lv_jumbo_workaround_ich8lan(hw, false)) 4309 e_dbg("failed to disable jumbo frame workaround mode\n"); 4310 4311 if (!pci_channel_offline(adapter->pdev)) { 4312 if (reset) 4313 e1000e_reset(adapter); 4314 else if (hw->mac.type >= e1000_pch_spt) 4315 e1000_flush_desc_rings(adapter); 4316 } 4317 e1000_clean_tx_ring(adapter->tx_ring); 4318 e1000_clean_rx_ring(adapter->rx_ring); 4319 } 4320 4321 void e1000e_reinit_locked(struct e1000_adapter *adapter) 4322 { 4323 might_sleep(); 4324 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 4325 usleep_range(1000, 2000); 4326 e1000e_down(adapter, true); 4327 e1000e_up(adapter); 4328 clear_bit(__E1000_RESETTING, &adapter->state); 4329 } 4330 4331 /** 4332 * e1000e_sanitize_systim - sanitize raw cycle counter reads 4333 * @hw: pointer to the HW structure 4334 * @systim: time value read, sanitized and returned 4335 * 4336 * Errata for 82574/82583 possible bad bits read from SYSTIMH/L: 4337 * check to see that the time is incrementing at a reasonable 4338 * rate and is a multiple of incvalue. 4339 **/ 4340 static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim) 4341 { 4342 u64 time_delta, rem, temp; 4343 u64 systim_next; 4344 u32 incvalue; 4345 int i; 4346 4347 incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; 4348 for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { 4349 /* latch SYSTIMH on read of SYSTIML */ 4350 systim_next = (u64)er32(SYSTIML); 4351 systim_next |= (u64)er32(SYSTIMH) << 32; 4352 4353 time_delta = systim_next - systim; 4354 temp = time_delta; 4355 /* VMWare users have seen incvalue of zero, don't div / 0 */ 4356 rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0); 4357 4358 systim = systim_next; 4359 4360 if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0)) 4361 break; 4362 } 4363 4364 return systim; 4365 } 4366 4367 /** 4368 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) 4369 * @cc: cyclecounter structure 4370 **/ 4371 static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc) 4372 { 4373 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, 4374 cc); 4375 struct e1000_hw *hw = &adapter->hw; 4376 u32 systimel, systimeh; 4377 u64 systim; 4378 /* SYSTIMH latching upon SYSTIML read does not work well. 4379 * This means that if SYSTIML overflows after we read it but before 4380 * we read SYSTIMH, the value of SYSTIMH has been incremented and we 4381 * will experience a huge non linear increment in the systime value 4382 * to fix that we test for overflow and if true, we re-read systime. 4383 */ 4384 systimel = er32(SYSTIML); 4385 systimeh = er32(SYSTIMH); 4386 /* Is systimel is so large that overflow is possible? */ 4387 if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) { 4388 u32 systimel_2 = er32(SYSTIML); 4389 if (systimel > systimel_2) { 4390 /* There was an overflow, read again SYSTIMH, and use 4391 * systimel_2 4392 */ 4393 systimeh = er32(SYSTIMH); 4394 systimel = systimel_2; 4395 } 4396 } 4397 systim = (u64)systimel; 4398 systim |= (u64)systimeh << 32; 4399 4400 if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) 4401 systim = e1000e_sanitize_systim(hw, systim); 4402 4403 return systim; 4404 } 4405 4406 /** 4407 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) 4408 * @adapter: board private structure to initialize 4409 * 4410 * e1000_sw_init initializes the Adapter private data structure. 4411 * Fields are initialized based on PCI device information and 4412 * OS network device settings (MTU size). 4413 **/ 4414 static int e1000_sw_init(struct e1000_adapter *adapter) 4415 { 4416 struct net_device *netdev = adapter->netdev; 4417 4418 adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; 4419 adapter->rx_ps_bsize0 = 128; 4420 adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; 4421 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 4422 adapter->tx_ring_count = E1000_DEFAULT_TXD; 4423 adapter->rx_ring_count = E1000_DEFAULT_RXD; 4424 4425 spin_lock_init(&adapter->stats64_lock); 4426 4427 e1000e_set_interrupt_capability(adapter); 4428 4429 if (e1000_alloc_queues(adapter)) 4430 return -ENOMEM; 4431 4432 /* Setup hardware time stamping cyclecounter */ 4433 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { 4434 adapter->cc.read = e1000e_cyclecounter_read; 4435 adapter->cc.mask = CYCLECOUNTER_MASK(64); 4436 adapter->cc.mult = 1; 4437 /* cc.shift set in e1000e_get_base_tininca() */ 4438 4439 spin_lock_init(&adapter->systim_lock); 4440 INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work); 4441 } 4442 4443 /* Explicitly disable IRQ since the NIC can be in any state. */ 4444 e1000_irq_disable(adapter); 4445 4446 set_bit(__E1000_DOWN, &adapter->state); 4447 return 0; 4448 } 4449 4450 /** 4451 * e1000_intr_msi_test - Interrupt Handler 4452 * @irq: interrupt number 4453 * @data: pointer to a network interface device structure 4454 **/ 4455 static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data) 4456 { 4457 struct net_device *netdev = data; 4458 struct e1000_adapter *adapter = netdev_priv(netdev); 4459 struct e1000_hw *hw = &adapter->hw; 4460 u32 icr = er32(ICR); 4461 4462 e_dbg("icr is %08X\n", icr); 4463 if (icr & E1000_ICR_RXSEQ) { 4464 adapter->flags &= ~FLAG_MSI_TEST_FAILED; 4465 /* Force memory writes to complete before acknowledging the 4466 * interrupt is handled. 4467 */ 4468 wmb(); 4469 } 4470 4471 return IRQ_HANDLED; 4472 } 4473 4474 /** 4475 * e1000_test_msi_interrupt - Returns 0 for successful test 4476 * @adapter: board private struct 4477 * 4478 * code flow taken from tg3.c 4479 **/ 4480 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) 4481 { 4482 struct net_device *netdev = adapter->netdev; 4483 struct e1000_hw *hw = &adapter->hw; 4484 int err; 4485 4486 /* poll_enable hasn't been called yet, so don't need disable */ 4487 /* clear any pending events */ 4488 er32(ICR); 4489 4490 /* free the real vector and request a test handler */ 4491 e1000_free_irq(adapter); 4492 e1000e_reset_interrupt_capability(adapter); 4493 4494 /* Assume that the test fails, if it succeeds then the test 4495 * MSI irq handler will unset this flag 4496 */ 4497 adapter->flags |= FLAG_MSI_TEST_FAILED; 4498 4499 err = pci_enable_msi(adapter->pdev); 4500 if (err) 4501 goto msi_test_failed; 4502 4503 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, 4504 netdev->name, netdev); 4505 if (err) { 4506 pci_disable_msi(adapter->pdev); 4507 goto msi_test_failed; 4508 } 4509 4510 /* Force memory writes to complete before enabling and firing an 4511 * interrupt. 4512 */ 4513 wmb(); 4514 4515 e1000_irq_enable(adapter); 4516 4517 /* fire an unusual interrupt on the test handler */ 4518 ew32(ICS, E1000_ICS_RXSEQ); 4519 e1e_flush(); 4520 msleep(100); 4521 4522 e1000_irq_disable(adapter); 4523 4524 rmb(); /* read flags after interrupt has been fired */ 4525 4526 if (adapter->flags & FLAG_MSI_TEST_FAILED) { 4527 adapter->int_mode = E1000E_INT_MODE_LEGACY; 4528 e_info("MSI interrupt test failed, using legacy interrupt.\n"); 4529 } else { 4530 e_dbg("MSI interrupt test succeeded!\n"); 4531 } 4532 4533 free_irq(adapter->pdev->irq, netdev); 4534 pci_disable_msi(adapter->pdev); 4535 4536 msi_test_failed: 4537 e1000e_set_interrupt_capability(adapter); 4538 return e1000_request_irq(adapter); 4539 } 4540 4541 /** 4542 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored 4543 * @adapter: board private struct 4544 * 4545 * code flow taken from tg3.c, called with e1000 interrupts disabled. 4546 **/ 4547 static int e1000_test_msi(struct e1000_adapter *adapter) 4548 { 4549 int err; 4550 u16 pci_cmd; 4551 4552 if (!(adapter->flags & FLAG_MSI_ENABLED)) 4553 return 0; 4554 4555 /* disable SERR in case the MSI write causes a master abort */ 4556 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); 4557 if (pci_cmd & PCI_COMMAND_SERR) 4558 pci_write_config_word(adapter->pdev, PCI_COMMAND, 4559 pci_cmd & ~PCI_COMMAND_SERR); 4560 4561 err = e1000_test_msi_interrupt(adapter); 4562 4563 /* re-enable SERR */ 4564 if (pci_cmd & PCI_COMMAND_SERR) { 4565 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); 4566 pci_cmd |= PCI_COMMAND_SERR; 4567 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); 4568 } 4569 4570 return err; 4571 } 4572 4573 /** 4574 * e1000e_open - Called when a network interface is made active 4575 * @netdev: network interface device structure 4576 * 4577 * Returns 0 on success, negative value on failure 4578 * 4579 * The open entry point is called when a network interface is made 4580 * active by the system (IFF_UP). At this point all resources needed 4581 * for transmit and receive operations are allocated, the interrupt 4582 * handler is registered with the OS, the watchdog timer is started, 4583 * and the stack is notified that the interface is ready. 4584 **/ 4585 int e1000e_open(struct net_device *netdev) 4586 { 4587 struct e1000_adapter *adapter = netdev_priv(netdev); 4588 struct e1000_hw *hw = &adapter->hw; 4589 struct pci_dev *pdev = adapter->pdev; 4590 int err; 4591 4592 /* disallow open during test */ 4593 if (test_bit(__E1000_TESTING, &adapter->state)) 4594 return -EBUSY; 4595 4596 pm_runtime_get_sync(&pdev->dev); 4597 4598 netif_carrier_off(netdev); 4599 4600 /* allocate transmit descriptors */ 4601 err = e1000e_setup_tx_resources(adapter->tx_ring); 4602 if (err) 4603 goto err_setup_tx; 4604 4605 /* allocate receive descriptors */ 4606 err = e1000e_setup_rx_resources(adapter->rx_ring); 4607 if (err) 4608 goto err_setup_rx; 4609 4610 /* If AMT is enabled, let the firmware know that the network 4611 * interface is now open and reset the part to a known state. 4612 */ 4613 if (adapter->flags & FLAG_HAS_AMT) { 4614 e1000e_get_hw_control(adapter); 4615 e1000e_reset(adapter); 4616 } 4617 4618 e1000e_power_up_phy(adapter); 4619 4620 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 4621 if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) 4622 e1000_update_mng_vlan(adapter); 4623 4624 /* DMA latency requirement to workaround jumbo issue */ 4625 pm_qos_add_request(&adapter->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 4626 PM_QOS_DEFAULT_VALUE); 4627 4628 /* before we allocate an interrupt, we must be ready to handle it. 4629 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 4630 * as soon as we call pci_request_irq, so we have to setup our 4631 * clean_rx handler before we do so. 4632 */ 4633 e1000_configure(adapter); 4634 4635 err = e1000_request_irq(adapter); 4636 if (err) 4637 goto err_req_irq; 4638 4639 /* Work around PCIe errata with MSI interrupts causing some chipsets to 4640 * ignore e1000e MSI messages, which means we need to test our MSI 4641 * interrupt now 4642 */ 4643 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { 4644 err = e1000_test_msi(adapter); 4645 if (err) { 4646 e_err("Interrupt allocation failed\n"); 4647 goto err_req_irq; 4648 } 4649 } 4650 4651 /* From here on the code is the same as e1000e_up() */ 4652 clear_bit(__E1000_DOWN, &adapter->state); 4653 4654 napi_enable(&adapter->napi); 4655 4656 e1000_irq_enable(adapter); 4657 4658 adapter->tx_hang_recheck = false; 4659 netif_start_queue(netdev); 4660 4661 hw->mac.get_link_status = true; 4662 pm_runtime_put(&pdev->dev); 4663 4664 e1000e_trigger_lsc(adapter); 4665 4666 return 0; 4667 4668 err_req_irq: 4669 pm_qos_remove_request(&adapter->pm_qos_req); 4670 e1000e_release_hw_control(adapter); 4671 e1000_power_down_phy(adapter); 4672 e1000e_free_rx_resources(adapter->rx_ring); 4673 err_setup_rx: 4674 e1000e_free_tx_resources(adapter->tx_ring); 4675 err_setup_tx: 4676 e1000e_reset(adapter); 4677 pm_runtime_put_sync(&pdev->dev); 4678 4679 return err; 4680 } 4681 4682 /** 4683 * e1000e_close - Disables a network interface 4684 * @netdev: network interface device structure 4685 * 4686 * Returns 0, this is not allowed to fail 4687 * 4688 * The close entry point is called when an interface is de-activated 4689 * by the OS. The hardware is still under the drivers control, but 4690 * needs to be disabled. A global MAC reset is issued to stop the 4691 * hardware, and all transmit and receive resources are freed. 4692 **/ 4693 int e1000e_close(struct net_device *netdev) 4694 { 4695 struct e1000_adapter *adapter = netdev_priv(netdev); 4696 struct pci_dev *pdev = adapter->pdev; 4697 int count = E1000_CHECK_RESET_COUNT; 4698 4699 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) 4700 usleep_range(10000, 20000); 4701 4702 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 4703 4704 pm_runtime_get_sync(&pdev->dev); 4705 4706 if (!test_bit(__E1000_DOWN, &adapter->state)) { 4707 e1000e_down(adapter, true); 4708 e1000_free_irq(adapter); 4709 4710 /* Link status message must follow this format */ 4711 pr_info("%s NIC Link is Down\n", adapter->netdev->name); 4712 } 4713 4714 napi_disable(&adapter->napi); 4715 4716 e1000e_free_tx_resources(adapter->tx_ring); 4717 e1000e_free_rx_resources(adapter->rx_ring); 4718 4719 /* kill manageability vlan ID if supported, but not if a vlan with 4720 * the same ID is registered on the host OS (let 8021q kill it) 4721 */ 4722 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) 4723 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), 4724 adapter->mng_vlan_id); 4725 4726 /* If AMT is enabled, let the firmware know that the network 4727 * interface is now closed 4728 */ 4729 if ((adapter->flags & FLAG_HAS_AMT) && 4730 !test_bit(__E1000_TESTING, &adapter->state)) 4731 e1000e_release_hw_control(adapter); 4732 4733 pm_qos_remove_request(&adapter->pm_qos_req); 4734 4735 pm_runtime_put_sync(&pdev->dev); 4736 4737 return 0; 4738 } 4739 4740 /** 4741 * e1000_set_mac - Change the Ethernet Address of the NIC 4742 * @netdev: network interface device structure 4743 * @p: pointer to an address structure 4744 * 4745 * Returns 0 on success, negative on failure 4746 **/ 4747 static int e1000_set_mac(struct net_device *netdev, void *p) 4748 { 4749 struct e1000_adapter *adapter = netdev_priv(netdev); 4750 struct e1000_hw *hw = &adapter->hw; 4751 struct sockaddr *addr = p; 4752 4753 if (!is_valid_ether_addr(addr->sa_data)) 4754 return -EADDRNOTAVAIL; 4755 4756 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 4757 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); 4758 4759 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 4760 4761 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { 4762 /* activate the work around */ 4763 e1000e_set_laa_state_82571(&adapter->hw, 1); 4764 4765 /* Hold a copy of the LAA in RAR[14] This is done so that 4766 * between the time RAR[0] gets clobbered and the time it 4767 * gets fixed (in e1000_watchdog), the actual LAA is in one 4768 * of the RARs and no incoming packets directed to this port 4769 * are dropped. Eventually the LAA will be in RAR[0] and 4770 * RAR[14] 4771 */ 4772 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 4773 adapter->hw.mac.rar_entry_count - 1); 4774 } 4775 4776 return 0; 4777 } 4778 4779 /** 4780 * e1000e_update_phy_task - work thread to update phy 4781 * @work: pointer to our work struct 4782 * 4783 * this worker thread exists because we must acquire a 4784 * semaphore to read the phy, which we could msleep while 4785 * waiting for it, and we can't msleep in a timer. 4786 **/ 4787 static void e1000e_update_phy_task(struct work_struct *work) 4788 { 4789 struct e1000_adapter *adapter = container_of(work, 4790 struct e1000_adapter, 4791 update_phy_task); 4792 struct e1000_hw *hw = &adapter->hw; 4793 4794 if (test_bit(__E1000_DOWN, &adapter->state)) 4795 return; 4796 4797 e1000_get_phy_info(hw); 4798 4799 /* Enable EEE on 82579 after link up */ 4800 if (hw->phy.type >= e1000_phy_82579) 4801 e1000_set_eee_pchlan(hw); 4802 } 4803 4804 /** 4805 * e1000_update_phy_info - timre call-back to update PHY info 4806 * @data: pointer to adapter cast into an unsigned long 4807 * 4808 * Need to wait a few seconds after link up to get diagnostic information from 4809 * the phy 4810 **/ 4811 static void e1000_update_phy_info(unsigned long data) 4812 { 4813 struct e1000_adapter *adapter = (struct e1000_adapter *)data; 4814 4815 if (test_bit(__E1000_DOWN, &adapter->state)) 4816 return; 4817 4818 schedule_work(&adapter->update_phy_task); 4819 } 4820 4821 /** 4822 * e1000e_update_phy_stats - Update the PHY statistics counters 4823 * @adapter: board private structure 4824 * 4825 * Read/clear the upper 16-bit PHY registers and read/accumulate lower 4826 **/ 4827 static void e1000e_update_phy_stats(struct e1000_adapter *adapter) 4828 { 4829 struct e1000_hw *hw = &adapter->hw; 4830 s32 ret_val; 4831 u16 phy_data; 4832 4833 ret_val = hw->phy.ops.acquire(hw); 4834 if (ret_val) 4835 return; 4836 4837 /* A page set is expensive so check if already on desired page. 4838 * If not, set to the page with the PHY status registers. 4839 */ 4840 hw->phy.addr = 1; 4841 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 4842 &phy_data); 4843 if (ret_val) 4844 goto release; 4845 if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) { 4846 ret_val = hw->phy.ops.set_page(hw, 4847 HV_STATS_PAGE << IGP_PAGE_SHIFT); 4848 if (ret_val) 4849 goto release; 4850 } 4851 4852 /* Single Collision Count */ 4853 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); 4854 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); 4855 if (!ret_val) 4856 adapter->stats.scc += phy_data; 4857 4858 /* Excessive Collision Count */ 4859 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); 4860 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); 4861 if (!ret_val) 4862 adapter->stats.ecol += phy_data; 4863 4864 /* Multiple Collision Count */ 4865 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); 4866 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); 4867 if (!ret_val) 4868 adapter->stats.mcc += phy_data; 4869 4870 /* Late Collision Count */ 4871 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); 4872 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); 4873 if (!ret_val) 4874 adapter->stats.latecol += phy_data; 4875 4876 /* Collision Count - also used for adaptive IFS */ 4877 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); 4878 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); 4879 if (!ret_val) 4880 hw->mac.collision_delta = phy_data; 4881 4882 /* Defer Count */ 4883 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); 4884 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); 4885 if (!ret_val) 4886 adapter->stats.dc += phy_data; 4887 4888 /* Transmit with no CRS */ 4889 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); 4890 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); 4891 if (!ret_val) 4892 adapter->stats.tncrs += phy_data; 4893 4894 release: 4895 hw->phy.ops.release(hw); 4896 } 4897 4898 /** 4899 * e1000e_update_stats - Update the board statistics counters 4900 * @adapter: board private structure 4901 **/ 4902 static void e1000e_update_stats(struct e1000_adapter *adapter) 4903 { 4904 struct net_device *netdev = adapter->netdev; 4905 struct e1000_hw *hw = &adapter->hw; 4906 struct pci_dev *pdev = adapter->pdev; 4907 4908 /* Prevent stats update while adapter is being reset, or if the pci 4909 * connection is down. 4910 */ 4911 if (adapter->link_speed == 0) 4912 return; 4913 if (pci_channel_offline(pdev)) 4914 return; 4915 4916 adapter->stats.crcerrs += er32(CRCERRS); 4917 adapter->stats.gprc += er32(GPRC); 4918 adapter->stats.gorc += er32(GORCL); 4919 er32(GORCH); /* Clear gorc */ 4920 adapter->stats.bprc += er32(BPRC); 4921 adapter->stats.mprc += er32(MPRC); 4922 adapter->stats.roc += er32(ROC); 4923 4924 adapter->stats.mpc += er32(MPC); 4925 4926 /* Half-duplex statistics */ 4927 if (adapter->link_duplex == HALF_DUPLEX) { 4928 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { 4929 e1000e_update_phy_stats(adapter); 4930 } else { 4931 adapter->stats.scc += er32(SCC); 4932 adapter->stats.ecol += er32(ECOL); 4933 adapter->stats.mcc += er32(MCC); 4934 adapter->stats.latecol += er32(LATECOL); 4935 adapter->stats.dc += er32(DC); 4936 4937 hw->mac.collision_delta = er32(COLC); 4938 4939 if ((hw->mac.type != e1000_82574) && 4940 (hw->mac.type != e1000_82583)) 4941 adapter->stats.tncrs += er32(TNCRS); 4942 } 4943 adapter->stats.colc += hw->mac.collision_delta; 4944 } 4945 4946 adapter->stats.xonrxc += er32(XONRXC); 4947 adapter->stats.xontxc += er32(XONTXC); 4948 adapter->stats.xoffrxc += er32(XOFFRXC); 4949 adapter->stats.xofftxc += er32(XOFFTXC); 4950 adapter->stats.gptc += er32(GPTC); 4951 adapter->stats.gotc += er32(GOTCL); 4952 er32(GOTCH); /* Clear gotc */ 4953 adapter->stats.rnbc += er32(RNBC); 4954 adapter->stats.ruc += er32(RUC); 4955 4956 adapter->stats.mptc += er32(MPTC); 4957 adapter->stats.bptc += er32(BPTC); 4958 4959 /* used for adaptive IFS */ 4960 4961 hw->mac.tx_packet_delta = er32(TPT); 4962 adapter->stats.tpt += hw->mac.tx_packet_delta; 4963 4964 adapter->stats.algnerrc += er32(ALGNERRC); 4965 adapter->stats.rxerrc += er32(RXERRC); 4966 adapter->stats.cexterr += er32(CEXTERR); 4967 adapter->stats.tsctc += er32(TSCTC); 4968 adapter->stats.tsctfc += er32(TSCTFC); 4969 4970 /* Fill out the OS statistics structure */ 4971 netdev->stats.multicast = adapter->stats.mprc; 4972 netdev->stats.collisions = adapter->stats.colc; 4973 4974 /* Rx Errors */ 4975 4976 /* RLEC on some newer hardware can be incorrect so build 4977 * our own version based on RUC and ROC 4978 */ 4979 netdev->stats.rx_errors = adapter->stats.rxerrc + 4980 adapter->stats.crcerrs + adapter->stats.algnerrc + 4981 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; 4982 netdev->stats.rx_length_errors = adapter->stats.ruc + 4983 adapter->stats.roc; 4984 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 4985 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 4986 netdev->stats.rx_missed_errors = adapter->stats.mpc; 4987 4988 /* Tx Errors */ 4989 netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol; 4990 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 4991 netdev->stats.tx_window_errors = adapter->stats.latecol; 4992 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 4993 4994 /* Tx Dropped needs to be maintained elsewhere */ 4995 4996 /* Management Stats */ 4997 adapter->stats.mgptc += er32(MGTPTC); 4998 adapter->stats.mgprc += er32(MGTPRC); 4999 adapter->stats.mgpdc += er32(MGTPDC); 5000 5001 /* Correctable ECC Errors */ 5002 if (hw->mac.type >= e1000_pch_lpt) { 5003 u32 pbeccsts = er32(PBECCSTS); 5004 5005 adapter->corr_errors += 5006 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; 5007 adapter->uncorr_errors += 5008 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> 5009 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; 5010 } 5011 } 5012 5013 /** 5014 * e1000_phy_read_status - Update the PHY register status snapshot 5015 * @adapter: board private structure 5016 **/ 5017 static void e1000_phy_read_status(struct e1000_adapter *adapter) 5018 { 5019 struct e1000_hw *hw = &adapter->hw; 5020 struct e1000_phy_regs *phy = &adapter->phy_regs; 5021 5022 if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) && 5023 (er32(STATUS) & E1000_STATUS_LU) && 5024 (adapter->hw.phy.media_type == e1000_media_type_copper)) { 5025 int ret_val; 5026 5027 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); 5028 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); 5029 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); 5030 ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa); 5031 ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion); 5032 ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000); 5033 ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000); 5034 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); 5035 if (ret_val) 5036 e_warn("Error reading PHY register\n"); 5037 } else { 5038 /* Do not read PHY registers if link is not up 5039 * Set values to typical power-on defaults 5040 */ 5041 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); 5042 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | 5043 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE | 5044 BMSR_ERCAP); 5045 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | 5046 ADVERTISE_ALL | ADVERTISE_CSMA); 5047 phy->lpa = 0; 5048 phy->expansion = EXPANSION_ENABLENPAGE; 5049 phy->ctrl1000 = ADVERTISE_1000FULL; 5050 phy->stat1000 = 0; 5051 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); 5052 } 5053 } 5054 5055 static void e1000_print_link_info(struct e1000_adapter *adapter) 5056 { 5057 struct e1000_hw *hw = &adapter->hw; 5058 u32 ctrl = er32(CTRL); 5059 5060 /* Link status message must follow this format for user tools */ 5061 pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", 5062 adapter->netdev->name, adapter->link_speed, 5063 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", 5064 (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" : 5065 (ctrl & E1000_CTRL_RFCE) ? "Rx" : 5066 (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"); 5067 } 5068 5069 static bool e1000e_has_link(struct e1000_adapter *adapter) 5070 { 5071 struct e1000_hw *hw = &adapter->hw; 5072 bool link_active = false; 5073 s32 ret_val = 0; 5074 5075 /* get_link_status is set on LSC (link status) interrupt or 5076 * Rx sequence error interrupt. get_link_status will stay 5077 * false until the check_for_link establishes link 5078 * for copper adapters ONLY 5079 */ 5080 switch (hw->phy.media_type) { 5081 case e1000_media_type_copper: 5082 if (hw->mac.get_link_status) { 5083 ret_val = hw->mac.ops.check_for_link(hw); 5084 link_active = !hw->mac.get_link_status; 5085 } else { 5086 link_active = true; 5087 } 5088 break; 5089 case e1000_media_type_fiber: 5090 ret_val = hw->mac.ops.check_for_link(hw); 5091 link_active = !!(er32(STATUS) & E1000_STATUS_LU); 5092 break; 5093 case e1000_media_type_internal_serdes: 5094 ret_val = hw->mac.ops.check_for_link(hw); 5095 link_active = adapter->hw.mac.serdes_has_link; 5096 break; 5097 default: 5098 case e1000_media_type_unknown: 5099 break; 5100 } 5101 5102 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && 5103 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { 5104 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ 5105 e_info("Gigabit has been disabled, downgrading speed\n"); 5106 } 5107 5108 return link_active; 5109 } 5110 5111 static void e1000e_enable_receives(struct e1000_adapter *adapter) 5112 { 5113 /* make sure the receive unit is started */ 5114 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && 5115 (adapter->flags & FLAG_RESTART_NOW)) { 5116 struct e1000_hw *hw = &adapter->hw; 5117 u32 rctl = er32(RCTL); 5118 5119 ew32(RCTL, rctl | E1000_RCTL_EN); 5120 adapter->flags &= ~FLAG_RESTART_NOW; 5121 } 5122 } 5123 5124 static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) 5125 { 5126 struct e1000_hw *hw = &adapter->hw; 5127 5128 /* With 82574 controllers, PHY needs to be checked periodically 5129 * for hung state and reset, if two calls return true 5130 */ 5131 if (e1000_check_phy_82574(hw)) 5132 adapter->phy_hang_count++; 5133 else 5134 adapter->phy_hang_count = 0; 5135 5136 if (adapter->phy_hang_count > 1) { 5137 adapter->phy_hang_count = 0; 5138 e_dbg("PHY appears hung - resetting\n"); 5139 schedule_work(&adapter->reset_task); 5140 } 5141 } 5142 5143 /** 5144 * e1000_watchdog - Timer Call-back 5145 * @data: pointer to adapter cast into an unsigned long 5146 **/ 5147 static void e1000_watchdog(unsigned long data) 5148 { 5149 struct e1000_adapter *adapter = (struct e1000_adapter *)data; 5150 5151 /* Do the rest outside of interrupt context */ 5152 schedule_work(&adapter->watchdog_task); 5153 5154 /* TODO: make this use queue_delayed_work() */ 5155 } 5156 5157 static void e1000_watchdog_task(struct work_struct *work) 5158 { 5159 struct e1000_adapter *adapter = container_of(work, 5160 struct e1000_adapter, 5161 watchdog_task); 5162 struct net_device *netdev = adapter->netdev; 5163 struct e1000_mac_info *mac = &adapter->hw.mac; 5164 struct e1000_phy_info *phy = &adapter->hw.phy; 5165 struct e1000_ring *tx_ring = adapter->tx_ring; 5166 struct e1000_hw *hw = &adapter->hw; 5167 u32 link, tctl; 5168 5169 if (test_bit(__E1000_DOWN, &adapter->state)) 5170 return; 5171 5172 link = e1000e_has_link(adapter); 5173 if ((netif_carrier_ok(netdev)) && link) { 5174 /* Cancel scheduled suspend requests. */ 5175 pm_runtime_resume(netdev->dev.parent); 5176 5177 e1000e_enable_receives(adapter); 5178 goto link_up; 5179 } 5180 5181 if ((e1000e_enable_tx_pkt_filtering(hw)) && 5182 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) 5183 e1000_update_mng_vlan(adapter); 5184 5185 if (link) { 5186 if (!netif_carrier_ok(netdev)) { 5187 bool txb2b = true; 5188 5189 /* Cancel scheduled suspend requests. */ 5190 pm_runtime_resume(netdev->dev.parent); 5191 5192 /* update snapshot of PHY registers on LSC */ 5193 e1000_phy_read_status(adapter); 5194 mac->ops.get_link_up_info(&adapter->hw, 5195 &adapter->link_speed, 5196 &adapter->link_duplex); 5197 e1000_print_link_info(adapter); 5198 5199 /* check if SmartSpeed worked */ 5200 e1000e_check_downshift(hw); 5201 if (phy->speed_downgraded) 5202 netdev_warn(netdev, 5203 "Link Speed was downgraded by SmartSpeed\n"); 5204 5205 /* On supported PHYs, check for duplex mismatch only 5206 * if link has autonegotiated at 10/100 half 5207 */ 5208 if ((hw->phy.type == e1000_phy_igp_3 || 5209 hw->phy.type == e1000_phy_bm) && 5210 hw->mac.autoneg && 5211 (adapter->link_speed == SPEED_10 || 5212 adapter->link_speed == SPEED_100) && 5213 (adapter->link_duplex == HALF_DUPLEX)) { 5214 u16 autoneg_exp; 5215 5216 e1e_rphy(hw, MII_EXPANSION, &autoneg_exp); 5217 5218 if (!(autoneg_exp & EXPANSION_NWAY)) 5219 e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n"); 5220 } 5221 5222 /* adjust timeout factor according to speed/duplex */ 5223 adapter->tx_timeout_factor = 1; 5224 switch (adapter->link_speed) { 5225 case SPEED_10: 5226 txb2b = false; 5227 adapter->tx_timeout_factor = 16; 5228 break; 5229 case SPEED_100: 5230 txb2b = false; 5231 adapter->tx_timeout_factor = 10; 5232 break; 5233 } 5234 5235 /* workaround: re-program speed mode bit after 5236 * link-up event 5237 */ 5238 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && 5239 !txb2b) { 5240 u32 tarc0; 5241 5242 tarc0 = er32(TARC(0)); 5243 tarc0 &= ~SPEED_MODE_BIT; 5244 ew32(TARC(0), tarc0); 5245 } 5246 5247 /* disable TSO for pcie and 10/100 speeds, to avoid 5248 * some hardware issues 5249 */ 5250 if (!(adapter->flags & FLAG_TSO_FORCE)) { 5251 switch (adapter->link_speed) { 5252 case SPEED_10: 5253 case SPEED_100: 5254 e_info("10/100 speed: disabling TSO\n"); 5255 netdev->features &= ~NETIF_F_TSO; 5256 netdev->features &= ~NETIF_F_TSO6; 5257 break; 5258 case SPEED_1000: 5259 netdev->features |= NETIF_F_TSO; 5260 netdev->features |= NETIF_F_TSO6; 5261 break; 5262 default: 5263 /* oops */ 5264 break; 5265 } 5266 } 5267 5268 /* enable transmits in the hardware, need to do this 5269 * after setting TARC(0) 5270 */ 5271 tctl = er32(TCTL); 5272 tctl |= E1000_TCTL_EN; 5273 ew32(TCTL, tctl); 5274 5275 /* Perform any post-link-up configuration before 5276 * reporting link up. 5277 */ 5278 if (phy->ops.cfg_on_link_up) 5279 phy->ops.cfg_on_link_up(hw); 5280 5281 netif_carrier_on(netdev); 5282 5283 if (!test_bit(__E1000_DOWN, &adapter->state)) 5284 mod_timer(&adapter->phy_info_timer, 5285 round_jiffies(jiffies + 2 * HZ)); 5286 } 5287 } else { 5288 if (netif_carrier_ok(netdev)) { 5289 adapter->link_speed = 0; 5290 adapter->link_duplex = 0; 5291 /* Link status message must follow this format */ 5292 pr_info("%s NIC Link is Down\n", adapter->netdev->name); 5293 netif_carrier_off(netdev); 5294 if (!test_bit(__E1000_DOWN, &adapter->state)) 5295 mod_timer(&adapter->phy_info_timer, 5296 round_jiffies(jiffies + 2 * HZ)); 5297 5298 /* 8000ES2LAN requires a Rx packet buffer work-around 5299 * on link down event; reset the controller to flush 5300 * the Rx packet buffer. 5301 */ 5302 if (adapter->flags & FLAG_RX_NEEDS_RESTART) 5303 adapter->flags |= FLAG_RESTART_NOW; 5304 else 5305 pm_schedule_suspend(netdev->dev.parent, 5306 LINK_TIMEOUT); 5307 } 5308 } 5309 5310 link_up: 5311 spin_lock(&adapter->stats64_lock); 5312 e1000e_update_stats(adapter); 5313 5314 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 5315 adapter->tpt_old = adapter->stats.tpt; 5316 mac->collision_delta = adapter->stats.colc - adapter->colc_old; 5317 adapter->colc_old = adapter->stats.colc; 5318 5319 adapter->gorc = adapter->stats.gorc - adapter->gorc_old; 5320 adapter->gorc_old = adapter->stats.gorc; 5321 adapter->gotc = adapter->stats.gotc - adapter->gotc_old; 5322 adapter->gotc_old = adapter->stats.gotc; 5323 spin_unlock(&adapter->stats64_lock); 5324 5325 /* If the link is lost the controller stops DMA, but 5326 * if there is queued Tx work it cannot be done. So 5327 * reset the controller to flush the Tx packet buffers. 5328 */ 5329 if (!netif_carrier_ok(netdev) && 5330 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) 5331 adapter->flags |= FLAG_RESTART_NOW; 5332 5333 /* If reset is necessary, do it outside of interrupt context. */ 5334 if (adapter->flags & FLAG_RESTART_NOW) { 5335 schedule_work(&adapter->reset_task); 5336 /* return immediately since reset is imminent */ 5337 return; 5338 } 5339 5340 e1000e_update_adaptive(&adapter->hw); 5341 5342 /* Simple mode for Interrupt Throttle Rate (ITR) */ 5343 if (adapter->itr_setting == 4) { 5344 /* Symmetric Tx/Rx gets a reduced ITR=2000; 5345 * Total asymmetrical Tx or Rx gets ITR=8000; 5346 * everyone else is between 2000-8000. 5347 */ 5348 u32 goc = (adapter->gotc + adapter->gorc) / 10000; 5349 u32 dif = (adapter->gotc > adapter->gorc ? 5350 adapter->gotc - adapter->gorc : 5351 adapter->gorc - adapter->gotc) / 10000; 5352 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 5353 5354 e1000e_write_itr(adapter, itr); 5355 } 5356 5357 /* Cause software interrupt to ensure Rx ring is cleaned */ 5358 if (adapter->msix_entries) 5359 ew32(ICS, adapter->rx_ring->ims_val); 5360 else 5361 ew32(ICS, E1000_ICS_RXDMT0); 5362 5363 /* flush pending descriptors to memory before detecting Tx hang */ 5364 e1000e_flush_descriptors(adapter); 5365 5366 /* Force detection of hung controller every watchdog period */ 5367 adapter->detect_tx_hung = true; 5368 5369 /* With 82571 controllers, LAA may be overwritten due to controller 5370 * reset from the other port. Set the appropriate LAA in RAR[0] 5371 */ 5372 if (e1000e_get_laa_state_82571(hw)) 5373 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0); 5374 5375 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) 5376 e1000e_check_82574_phy_workaround(adapter); 5377 5378 /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */ 5379 if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { 5380 if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) && 5381 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) { 5382 er32(RXSTMPH); 5383 adapter->rx_hwtstamp_cleared++; 5384 } else { 5385 adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP; 5386 } 5387 } 5388 5389 /* Reset the timer */ 5390 if (!test_bit(__E1000_DOWN, &adapter->state)) 5391 mod_timer(&adapter->watchdog_timer, 5392 round_jiffies(jiffies + 2 * HZ)); 5393 } 5394 5395 #define E1000_TX_FLAGS_CSUM 0x00000001 5396 #define E1000_TX_FLAGS_VLAN 0x00000002 5397 #define E1000_TX_FLAGS_TSO 0x00000004 5398 #define E1000_TX_FLAGS_IPV4 0x00000008 5399 #define E1000_TX_FLAGS_NO_FCS 0x00000010 5400 #define E1000_TX_FLAGS_HWTSTAMP 0x00000020 5401 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 5402 #define E1000_TX_FLAGS_VLAN_SHIFT 16 5403 5404 static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb, 5405 __be16 protocol) 5406 { 5407 struct e1000_context_desc *context_desc; 5408 struct e1000_buffer *buffer_info; 5409 unsigned int i; 5410 u32 cmd_length = 0; 5411 u16 ipcse = 0, mss; 5412 u8 ipcss, ipcso, tucss, tucso, hdr_len; 5413 int err; 5414 5415 if (!skb_is_gso(skb)) 5416 return 0; 5417 5418 err = skb_cow_head(skb, 0); 5419 if (err < 0) 5420 return err; 5421 5422 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 5423 mss = skb_shinfo(skb)->gso_size; 5424 if (protocol == htons(ETH_P_IP)) { 5425 struct iphdr *iph = ip_hdr(skb); 5426 iph->tot_len = 0; 5427 iph->check = 0; 5428 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 5429 0, IPPROTO_TCP, 0); 5430 cmd_length = E1000_TXD_CMD_IP; 5431 ipcse = skb_transport_offset(skb) - 1; 5432 } else if (skb_is_gso_v6(skb)) { 5433 ipv6_hdr(skb)->payload_len = 0; 5434 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5435 &ipv6_hdr(skb)->daddr, 5436 0, IPPROTO_TCP, 0); 5437 ipcse = 0; 5438 } 5439 ipcss = skb_network_offset(skb); 5440 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 5441 tucss = skb_transport_offset(skb); 5442 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 5443 5444 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 5445 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 5446 5447 i = tx_ring->next_to_use; 5448 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 5449 buffer_info = &tx_ring->buffer_info[i]; 5450 5451 context_desc->lower_setup.ip_fields.ipcss = ipcss; 5452 context_desc->lower_setup.ip_fields.ipcso = ipcso; 5453 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 5454 context_desc->upper_setup.tcp_fields.tucss = tucss; 5455 context_desc->upper_setup.tcp_fields.tucso = tucso; 5456 context_desc->upper_setup.tcp_fields.tucse = 0; 5457 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 5458 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 5459 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 5460 5461 buffer_info->time_stamp = jiffies; 5462 buffer_info->next_to_watch = i; 5463 5464 i++; 5465 if (i == tx_ring->count) 5466 i = 0; 5467 tx_ring->next_to_use = i; 5468 5469 return 1; 5470 } 5471 5472 static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb, 5473 __be16 protocol) 5474 { 5475 struct e1000_adapter *adapter = tx_ring->adapter; 5476 struct e1000_context_desc *context_desc; 5477 struct e1000_buffer *buffer_info; 5478 unsigned int i; 5479 u8 css; 5480 u32 cmd_len = E1000_TXD_CMD_DEXT; 5481 5482 if (skb->ip_summed != CHECKSUM_PARTIAL) 5483 return false; 5484 5485 switch (protocol) { 5486 case cpu_to_be16(ETH_P_IP): 5487 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 5488 cmd_len |= E1000_TXD_CMD_TCP; 5489 break; 5490 case cpu_to_be16(ETH_P_IPV6): 5491 /* XXX not handling all IPV6 headers */ 5492 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 5493 cmd_len |= E1000_TXD_CMD_TCP; 5494 break; 5495 default: 5496 if (unlikely(net_ratelimit())) 5497 e_warn("checksum_partial proto=%x!\n", 5498 be16_to_cpu(protocol)); 5499 break; 5500 } 5501 5502 css = skb_checksum_start_offset(skb); 5503 5504 i = tx_ring->next_to_use; 5505 buffer_info = &tx_ring->buffer_info[i]; 5506 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 5507 5508 context_desc->lower_setup.ip_config = 0; 5509 context_desc->upper_setup.tcp_fields.tucss = css; 5510 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; 5511 context_desc->upper_setup.tcp_fields.tucse = 0; 5512 context_desc->tcp_seg_setup.data = 0; 5513 context_desc->cmd_and_length = cpu_to_le32(cmd_len); 5514 5515 buffer_info->time_stamp = jiffies; 5516 buffer_info->next_to_watch = i; 5517 5518 i++; 5519 if (i == tx_ring->count) 5520 i = 0; 5521 tx_ring->next_to_use = i; 5522 5523 return true; 5524 } 5525 5526 static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, 5527 unsigned int first, unsigned int max_per_txd, 5528 unsigned int nr_frags) 5529 { 5530 struct e1000_adapter *adapter = tx_ring->adapter; 5531 struct pci_dev *pdev = adapter->pdev; 5532 struct e1000_buffer *buffer_info; 5533 unsigned int len = skb_headlen(skb); 5534 unsigned int offset = 0, size, count = 0, i; 5535 unsigned int f, bytecount, segs; 5536 5537 i = tx_ring->next_to_use; 5538 5539 while (len) { 5540 buffer_info = &tx_ring->buffer_info[i]; 5541 size = min(len, max_per_txd); 5542 5543 buffer_info->length = size; 5544 buffer_info->time_stamp = jiffies; 5545 buffer_info->next_to_watch = i; 5546 buffer_info->dma = dma_map_single(&pdev->dev, 5547 skb->data + offset, 5548 size, DMA_TO_DEVICE); 5549 buffer_info->mapped_as_page = false; 5550 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 5551 goto dma_error; 5552 5553 len -= size; 5554 offset += size; 5555 count++; 5556 5557 if (len) { 5558 i++; 5559 if (i == tx_ring->count) 5560 i = 0; 5561 } 5562 } 5563 5564 for (f = 0; f < nr_frags; f++) { 5565 const struct skb_frag_struct *frag; 5566 5567 frag = &skb_shinfo(skb)->frags[f]; 5568 len = skb_frag_size(frag); 5569 offset = 0; 5570 5571 while (len) { 5572 i++; 5573 if (i == tx_ring->count) 5574 i = 0; 5575 5576 buffer_info = &tx_ring->buffer_info[i]; 5577 size = min(len, max_per_txd); 5578 5579 buffer_info->length = size; 5580 buffer_info->time_stamp = jiffies; 5581 buffer_info->next_to_watch = i; 5582 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 5583 offset, size, 5584 DMA_TO_DEVICE); 5585 buffer_info->mapped_as_page = true; 5586 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 5587 goto dma_error; 5588 5589 len -= size; 5590 offset += size; 5591 count++; 5592 } 5593 } 5594 5595 segs = skb_shinfo(skb)->gso_segs ? : 1; 5596 /* multiply data chunks by size of headers */ 5597 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; 5598 5599 tx_ring->buffer_info[i].skb = skb; 5600 tx_ring->buffer_info[i].segs = segs; 5601 tx_ring->buffer_info[i].bytecount = bytecount; 5602 tx_ring->buffer_info[first].next_to_watch = i; 5603 5604 return count; 5605 5606 dma_error: 5607 dev_err(&pdev->dev, "Tx DMA map failed\n"); 5608 buffer_info->dma = 0; 5609 if (count) 5610 count--; 5611 5612 while (count--) { 5613 if (i == 0) 5614 i += tx_ring->count; 5615 i--; 5616 buffer_info = &tx_ring->buffer_info[i]; 5617 e1000_put_txbuf(tx_ring, buffer_info); 5618 } 5619 5620 return 0; 5621 } 5622 5623 static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) 5624 { 5625 struct e1000_adapter *adapter = tx_ring->adapter; 5626 struct e1000_tx_desc *tx_desc = NULL; 5627 struct e1000_buffer *buffer_info; 5628 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 5629 unsigned int i; 5630 5631 if (tx_flags & E1000_TX_FLAGS_TSO) { 5632 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 5633 E1000_TXD_CMD_TSE; 5634 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 5635 5636 if (tx_flags & E1000_TX_FLAGS_IPV4) 5637 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 5638 } 5639 5640 if (tx_flags & E1000_TX_FLAGS_CSUM) { 5641 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 5642 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 5643 } 5644 5645 if (tx_flags & E1000_TX_FLAGS_VLAN) { 5646 txd_lower |= E1000_TXD_CMD_VLE; 5647 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 5648 } 5649 5650 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 5651 txd_lower &= ~(E1000_TXD_CMD_IFCS); 5652 5653 if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) { 5654 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 5655 txd_upper |= E1000_TXD_EXTCMD_TSTAMP; 5656 } 5657 5658 i = tx_ring->next_to_use; 5659 5660 do { 5661 buffer_info = &tx_ring->buffer_info[i]; 5662 tx_desc = E1000_TX_DESC(*tx_ring, i); 5663 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 5664 tx_desc->lower.data = cpu_to_le32(txd_lower | 5665 buffer_info->length); 5666 tx_desc->upper.data = cpu_to_le32(txd_upper); 5667 5668 i++; 5669 if (i == tx_ring->count) 5670 i = 0; 5671 } while (--count > 0); 5672 5673 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 5674 5675 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ 5676 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 5677 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); 5678 5679 /* Force memory writes to complete before letting h/w 5680 * know there are new descriptors to fetch. (Only 5681 * applicable for weak-ordered memory model archs, 5682 * such as IA-64). 5683 */ 5684 wmb(); 5685 5686 tx_ring->next_to_use = i; 5687 } 5688 5689 #define MINIMUM_DHCP_PACKET_SIZE 282 5690 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, 5691 struct sk_buff *skb) 5692 { 5693 struct e1000_hw *hw = &adapter->hw; 5694 u16 length, offset; 5695 5696 if (skb_vlan_tag_present(skb) && 5697 !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 5698 (adapter->hw.mng_cookie.status & 5699 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) 5700 return 0; 5701 5702 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) 5703 return 0; 5704 5705 if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP)) 5706 return 0; 5707 5708 { 5709 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14); 5710 struct udphdr *udp; 5711 5712 if (ip->protocol != IPPROTO_UDP) 5713 return 0; 5714 5715 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); 5716 if (ntohs(udp->dest) != 67) 5717 return 0; 5718 5719 offset = (u8 *)udp + 8 - skb->data; 5720 length = skb->len - offset; 5721 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); 5722 } 5723 5724 return 0; 5725 } 5726 5727 static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) 5728 { 5729 struct e1000_adapter *adapter = tx_ring->adapter; 5730 5731 netif_stop_queue(adapter->netdev); 5732 /* Herbert's original patch had: 5733 * smp_mb__after_netif_stop_queue(); 5734 * but since that doesn't exist yet, just open code it. 5735 */ 5736 smp_mb(); 5737 5738 /* We need to check again in a case another CPU has just 5739 * made room available. 5740 */ 5741 if (e1000_desc_unused(tx_ring) < size) 5742 return -EBUSY; 5743 5744 /* A reprieve! */ 5745 netif_start_queue(adapter->netdev); 5746 ++adapter->restart_queue; 5747 return 0; 5748 } 5749 5750 static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) 5751 { 5752 BUG_ON(size > tx_ring->count); 5753 5754 if (e1000_desc_unused(tx_ring) >= size) 5755 return 0; 5756 return __e1000_maybe_stop_tx(tx_ring, size); 5757 } 5758 5759 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 5760 struct net_device *netdev) 5761 { 5762 struct e1000_adapter *adapter = netdev_priv(netdev); 5763 struct e1000_ring *tx_ring = adapter->tx_ring; 5764 unsigned int first; 5765 unsigned int tx_flags = 0; 5766 unsigned int len = skb_headlen(skb); 5767 unsigned int nr_frags; 5768 unsigned int mss; 5769 int count = 0; 5770 int tso; 5771 unsigned int f; 5772 __be16 protocol = vlan_get_protocol(skb); 5773 5774 if (test_bit(__E1000_DOWN, &adapter->state)) { 5775 dev_kfree_skb_any(skb); 5776 return NETDEV_TX_OK; 5777 } 5778 5779 if (skb->len <= 0) { 5780 dev_kfree_skb_any(skb); 5781 return NETDEV_TX_OK; 5782 } 5783 5784 /* The minimum packet size with TCTL.PSP set is 17 bytes so 5785 * pad skb in order to meet this minimum size requirement 5786 */ 5787 if (skb_put_padto(skb, 17)) 5788 return NETDEV_TX_OK; 5789 5790 mss = skb_shinfo(skb)->gso_size; 5791 if (mss) { 5792 u8 hdr_len; 5793 5794 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data 5795 * points to just header, pull a few bytes of payload from 5796 * frags into skb->data 5797 */ 5798 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 5799 /* we do this workaround for ES2LAN, but it is un-necessary, 5800 * avoiding it could save a lot of cycles 5801 */ 5802 if (skb->data_len && (hdr_len == len)) { 5803 unsigned int pull_size; 5804 5805 pull_size = min_t(unsigned int, 4, skb->data_len); 5806 if (!__pskb_pull_tail(skb, pull_size)) { 5807 e_err("__pskb_pull_tail failed.\n"); 5808 dev_kfree_skb_any(skb); 5809 return NETDEV_TX_OK; 5810 } 5811 len = skb_headlen(skb); 5812 } 5813 } 5814 5815 /* reserve a descriptor for the offload context */ 5816 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 5817 count++; 5818 count++; 5819 5820 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit); 5821 5822 nr_frags = skb_shinfo(skb)->nr_frags; 5823 for (f = 0; f < nr_frags; f++) 5824 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]), 5825 adapter->tx_fifo_limit); 5826 5827 if (adapter->hw.mac.tx_pkt_filtering) 5828 e1000_transfer_dhcp_info(adapter, skb); 5829 5830 /* need: count + 2 desc gap to keep tail from touching 5831 * head, otherwise try next time 5832 */ 5833 if (e1000_maybe_stop_tx(tx_ring, count + 2)) 5834 return NETDEV_TX_BUSY; 5835 5836 if (skb_vlan_tag_present(skb)) { 5837 tx_flags |= E1000_TX_FLAGS_VLAN; 5838 tx_flags |= (skb_vlan_tag_get(skb) << 5839 E1000_TX_FLAGS_VLAN_SHIFT); 5840 } 5841 5842 first = tx_ring->next_to_use; 5843 5844 tso = e1000_tso(tx_ring, skb, protocol); 5845 if (tso < 0) { 5846 dev_kfree_skb_any(skb); 5847 return NETDEV_TX_OK; 5848 } 5849 5850 if (tso) 5851 tx_flags |= E1000_TX_FLAGS_TSO; 5852 else if (e1000_tx_csum(tx_ring, skb, protocol)) 5853 tx_flags |= E1000_TX_FLAGS_CSUM; 5854 5855 /* Old method was to assume IPv4 packet by default if TSO was enabled. 5856 * 82571 hardware supports TSO capabilities for IPv6 as well... 5857 * no longer assume, we must. 5858 */ 5859 if (protocol == htons(ETH_P_IP)) 5860 tx_flags |= E1000_TX_FLAGS_IPV4; 5861 5862 if (unlikely(skb->no_fcs)) 5863 tx_flags |= E1000_TX_FLAGS_NO_FCS; 5864 5865 /* if count is 0 then mapping error has occurred */ 5866 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, 5867 nr_frags); 5868 if (count) { 5869 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 5870 (adapter->flags & FLAG_HAS_HW_TIMESTAMP)) { 5871 if (!adapter->tx_hwtstamp_skb) { 5872 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 5873 tx_flags |= E1000_TX_FLAGS_HWTSTAMP; 5874 adapter->tx_hwtstamp_skb = skb_get(skb); 5875 adapter->tx_hwtstamp_start = jiffies; 5876 schedule_work(&adapter->tx_hwtstamp_work); 5877 } else { 5878 adapter->tx_hwtstamp_skipped++; 5879 } 5880 } 5881 5882 skb_tx_timestamp(skb); 5883 5884 netdev_sent_queue(netdev, skb->len); 5885 e1000_tx_queue(tx_ring, tx_flags, count); 5886 /* Make sure there is space in the ring for the next send. */ 5887 e1000_maybe_stop_tx(tx_ring, 5888 (MAX_SKB_FRAGS * 5889 DIV_ROUND_UP(PAGE_SIZE, 5890 adapter->tx_fifo_limit) + 2)); 5891 5892 if (!skb->xmit_more || 5893 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { 5894 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 5895 e1000e_update_tdt_wa(tx_ring, 5896 tx_ring->next_to_use); 5897 else 5898 writel(tx_ring->next_to_use, tx_ring->tail); 5899 5900 /* we need this if more than one processor can write 5901 * to our tail at a time, it synchronizes IO on 5902 *IA64/Altix systems 5903 */ 5904 mmiowb(); 5905 } 5906 } else { 5907 dev_kfree_skb_any(skb); 5908 tx_ring->buffer_info[first].time_stamp = 0; 5909 tx_ring->next_to_use = first; 5910 } 5911 5912 return NETDEV_TX_OK; 5913 } 5914 5915 /** 5916 * e1000_tx_timeout - Respond to a Tx Hang 5917 * @netdev: network interface device structure 5918 **/ 5919 static void e1000_tx_timeout(struct net_device *netdev) 5920 { 5921 struct e1000_adapter *adapter = netdev_priv(netdev); 5922 5923 /* Do the reset outside of interrupt context */ 5924 adapter->tx_timeout_count++; 5925 schedule_work(&adapter->reset_task); 5926 } 5927 5928 static void e1000_reset_task(struct work_struct *work) 5929 { 5930 struct e1000_adapter *adapter; 5931 adapter = container_of(work, struct e1000_adapter, reset_task); 5932 5933 /* don't run the task if already down */ 5934 if (test_bit(__E1000_DOWN, &adapter->state)) 5935 return; 5936 5937 if (!(adapter->flags & FLAG_RESTART_NOW)) { 5938 e1000e_dump(adapter); 5939 e_err("Reset adapter unexpectedly\n"); 5940 } 5941 e1000e_reinit_locked(adapter); 5942 } 5943 5944 /** 5945 * e1000_get_stats64 - Get System Network Statistics 5946 * @netdev: network interface device structure 5947 * @stats: rtnl_link_stats64 pointer 5948 * 5949 * Returns the address of the device statistics structure. 5950 **/ 5951 void e1000e_get_stats64(struct net_device *netdev, 5952 struct rtnl_link_stats64 *stats) 5953 { 5954 struct e1000_adapter *adapter = netdev_priv(netdev); 5955 5956 spin_lock(&adapter->stats64_lock); 5957 e1000e_update_stats(adapter); 5958 /* Fill out the OS statistics structure */ 5959 stats->rx_bytes = adapter->stats.gorc; 5960 stats->rx_packets = adapter->stats.gprc; 5961 stats->tx_bytes = adapter->stats.gotc; 5962 stats->tx_packets = adapter->stats.gptc; 5963 stats->multicast = adapter->stats.mprc; 5964 stats->collisions = adapter->stats.colc; 5965 5966 /* Rx Errors */ 5967 5968 /* RLEC on some newer hardware can be incorrect so build 5969 * our own version based on RUC and ROC 5970 */ 5971 stats->rx_errors = adapter->stats.rxerrc + 5972 adapter->stats.crcerrs + adapter->stats.algnerrc + 5973 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; 5974 stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc; 5975 stats->rx_crc_errors = adapter->stats.crcerrs; 5976 stats->rx_frame_errors = adapter->stats.algnerrc; 5977 stats->rx_missed_errors = adapter->stats.mpc; 5978 5979 /* Tx Errors */ 5980 stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol; 5981 stats->tx_aborted_errors = adapter->stats.ecol; 5982 stats->tx_window_errors = adapter->stats.latecol; 5983 stats->tx_carrier_errors = adapter->stats.tncrs; 5984 5985 /* Tx Dropped needs to be maintained elsewhere */ 5986 5987 spin_unlock(&adapter->stats64_lock); 5988 } 5989 5990 /** 5991 * e1000_change_mtu - Change the Maximum Transfer Unit 5992 * @netdev: network interface device structure 5993 * @new_mtu: new value for maximum frame size 5994 * 5995 * Returns 0 on success, negative on failure 5996 **/ 5997 static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 5998 { 5999 struct e1000_adapter *adapter = netdev_priv(netdev); 6000 int max_frame = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; 6001 6002 /* Jumbo frame support */ 6003 if ((new_mtu > ETH_DATA_LEN) && 6004 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { 6005 e_err("Jumbo Frames not supported.\n"); 6006 return -EINVAL; 6007 } 6008 6009 /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */ 6010 if ((adapter->hw.mac.type >= e1000_pch2lan) && 6011 !(adapter->flags2 & FLAG2_CRC_STRIPPING) && 6012 (new_mtu > ETH_DATA_LEN)) { 6013 e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n"); 6014 return -EINVAL; 6015 } 6016 6017 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 6018 usleep_range(1000, 2000); 6019 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ 6020 adapter->max_frame_size = max_frame; 6021 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); 6022 netdev->mtu = new_mtu; 6023 6024 pm_runtime_get_sync(netdev->dev.parent); 6025 6026 if (netif_running(netdev)) 6027 e1000e_down(adapter, true); 6028 6029 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 6030 * means we reserve 2 more, this pushes us to allocate from the next 6031 * larger slab size. 6032 * i.e. RXBUFFER_2048 --> size-4096 slab 6033 * However with the new *_jumbo_rx* routines, jumbo receives will use 6034 * fragmented skbs 6035 */ 6036 6037 if (max_frame <= 2048) 6038 adapter->rx_buffer_len = 2048; 6039 else 6040 adapter->rx_buffer_len = 4096; 6041 6042 /* adjust allocation if LPE protects us, and we aren't using SBP */ 6043 if (max_frame <= (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) 6044 adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; 6045 6046 if (netif_running(netdev)) 6047 e1000e_up(adapter); 6048 else 6049 e1000e_reset(adapter); 6050 6051 pm_runtime_put_sync(netdev->dev.parent); 6052 6053 clear_bit(__E1000_RESETTING, &adapter->state); 6054 6055 return 0; 6056 } 6057 6058 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 6059 int cmd) 6060 { 6061 struct e1000_adapter *adapter = netdev_priv(netdev); 6062 struct mii_ioctl_data *data = if_mii(ifr); 6063 6064 if (adapter->hw.phy.media_type != e1000_media_type_copper) 6065 return -EOPNOTSUPP; 6066 6067 switch (cmd) { 6068 case SIOCGMIIPHY: 6069 data->phy_id = adapter->hw.phy.addr; 6070 break; 6071 case SIOCGMIIREG: 6072 e1000_phy_read_status(adapter); 6073 6074 switch (data->reg_num & 0x1F) { 6075 case MII_BMCR: 6076 data->val_out = adapter->phy_regs.bmcr; 6077 break; 6078 case MII_BMSR: 6079 data->val_out = adapter->phy_regs.bmsr; 6080 break; 6081 case MII_PHYSID1: 6082 data->val_out = (adapter->hw.phy.id >> 16); 6083 break; 6084 case MII_PHYSID2: 6085 data->val_out = (adapter->hw.phy.id & 0xFFFF); 6086 break; 6087 case MII_ADVERTISE: 6088 data->val_out = adapter->phy_regs.advertise; 6089 break; 6090 case MII_LPA: 6091 data->val_out = adapter->phy_regs.lpa; 6092 break; 6093 case MII_EXPANSION: 6094 data->val_out = adapter->phy_regs.expansion; 6095 break; 6096 case MII_CTRL1000: 6097 data->val_out = adapter->phy_regs.ctrl1000; 6098 break; 6099 case MII_STAT1000: 6100 data->val_out = adapter->phy_regs.stat1000; 6101 break; 6102 case MII_ESTATUS: 6103 data->val_out = adapter->phy_regs.estatus; 6104 break; 6105 default: 6106 return -EIO; 6107 } 6108 break; 6109 case SIOCSMIIREG: 6110 default: 6111 return -EOPNOTSUPP; 6112 } 6113 return 0; 6114 } 6115 6116 /** 6117 * e1000e_hwtstamp_ioctl - control hardware time stamping 6118 * @netdev: network interface device structure 6119 * @ifreq: interface request 6120 * 6121 * Outgoing time stamping can be enabled and disabled. Play nice and 6122 * disable it when requested, although it shouldn't cause any overhead 6123 * when no packet needs it. At most one packet in the queue may be 6124 * marked for time stamping, otherwise it would be impossible to tell 6125 * for sure to which packet the hardware time stamp belongs. 6126 * 6127 * Incoming time stamping has to be configured via the hardware filters. 6128 * Not all combinations are supported, in particular event type has to be 6129 * specified. Matching the kind of event packet is not supported, with the 6130 * exception of "all V2 events regardless of level 2 or 4". 6131 **/ 6132 static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) 6133 { 6134 struct e1000_adapter *adapter = netdev_priv(netdev); 6135 struct hwtstamp_config config; 6136 int ret_val; 6137 6138 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 6139 return -EFAULT; 6140 6141 ret_val = e1000e_config_hwtstamp(adapter, &config); 6142 if (ret_val) 6143 return ret_val; 6144 6145 switch (config.rx_filter) { 6146 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 6147 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 6148 case HWTSTAMP_FILTER_PTP_V2_SYNC: 6149 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 6150 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 6151 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 6152 /* With V2 type filters which specify a Sync or Delay Request, 6153 * Path Delay Request/Response messages are also time stamped 6154 * by hardware so notify the caller the requested packets plus 6155 * some others are time stamped. 6156 */ 6157 config.rx_filter = HWTSTAMP_FILTER_SOME; 6158 break; 6159 default: 6160 break; 6161 } 6162 6163 return copy_to_user(ifr->ifr_data, &config, 6164 sizeof(config)) ? -EFAULT : 0; 6165 } 6166 6167 static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) 6168 { 6169 struct e1000_adapter *adapter = netdev_priv(netdev); 6170 6171 return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config, 6172 sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0; 6173 } 6174 6175 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 6176 { 6177 switch (cmd) { 6178 case SIOCGMIIPHY: 6179 case SIOCGMIIREG: 6180 case SIOCSMIIREG: 6181 return e1000_mii_ioctl(netdev, ifr, cmd); 6182 case SIOCSHWTSTAMP: 6183 return e1000e_hwtstamp_set(netdev, ifr); 6184 case SIOCGHWTSTAMP: 6185 return e1000e_hwtstamp_get(netdev, ifr); 6186 default: 6187 return -EOPNOTSUPP; 6188 } 6189 } 6190 6191 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) 6192 { 6193 struct e1000_hw *hw = &adapter->hw; 6194 u32 i, mac_reg, wuc; 6195 u16 phy_reg, wuc_enable; 6196 int retval; 6197 6198 /* copy MAC RARs to PHY RARs */ 6199 e1000_copy_rx_addrs_to_phy_ich8lan(hw); 6200 6201 retval = hw->phy.ops.acquire(hw); 6202 if (retval) { 6203 e_err("Could not acquire PHY\n"); 6204 return retval; 6205 } 6206 6207 /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */ 6208 retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable); 6209 if (retval) 6210 goto release; 6211 6212 /* copy MAC MTA to PHY MTA - only needed for pchlan */ 6213 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { 6214 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); 6215 hw->phy.ops.write_reg_page(hw, BM_MTA(i), 6216 (u16)(mac_reg & 0xFFFF)); 6217 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, 6218 (u16)((mac_reg >> 16) & 0xFFFF)); 6219 } 6220 6221 /* configure PHY Rx Control register */ 6222 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); 6223 mac_reg = er32(RCTL); 6224 if (mac_reg & E1000_RCTL_UPE) 6225 phy_reg |= BM_RCTL_UPE; 6226 if (mac_reg & E1000_RCTL_MPE) 6227 phy_reg |= BM_RCTL_MPE; 6228 phy_reg &= ~(BM_RCTL_MO_MASK); 6229 if (mac_reg & E1000_RCTL_MO_3) 6230 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) 6231 << BM_RCTL_MO_SHIFT); 6232 if (mac_reg & E1000_RCTL_BAM) 6233 phy_reg |= BM_RCTL_BAM; 6234 if (mac_reg & E1000_RCTL_PMCF) 6235 phy_reg |= BM_RCTL_PMCF; 6236 mac_reg = er32(CTRL); 6237 if (mac_reg & E1000_CTRL_RFCE) 6238 phy_reg |= BM_RCTL_RFCE; 6239 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); 6240 6241 wuc = E1000_WUC_PME_EN; 6242 if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC)) 6243 wuc |= E1000_WUC_APME; 6244 6245 /* enable PHY wakeup in MAC register */ 6246 ew32(WUFC, wufc); 6247 ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME | 6248 E1000_WUC_PME_STATUS | wuc)); 6249 6250 /* configure and enable PHY wakeup in PHY registers */ 6251 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); 6252 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc); 6253 6254 /* activate PHY wakeup */ 6255 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; 6256 retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable); 6257 if (retval) 6258 e_err("Could not set PHY Host Wakeup bit\n"); 6259 release: 6260 hw->phy.ops.release(hw); 6261 6262 return retval; 6263 } 6264 6265 static void e1000e_flush_lpic(struct pci_dev *pdev) 6266 { 6267 struct net_device *netdev = pci_get_drvdata(pdev); 6268 struct e1000_adapter *adapter = netdev_priv(netdev); 6269 struct e1000_hw *hw = &adapter->hw; 6270 u32 ret_val; 6271 6272 pm_runtime_get_sync(netdev->dev.parent); 6273 6274 ret_val = hw->phy.ops.acquire(hw); 6275 if (ret_val) 6276 goto fl_out; 6277 6278 pr_info("EEE TX LPI TIMER: %08X\n", 6279 er32(LPIC) >> E1000_LPIC_LPIET_SHIFT); 6280 6281 hw->phy.ops.release(hw); 6282 6283 fl_out: 6284 pm_runtime_put_sync(netdev->dev.parent); 6285 } 6286 6287 static int e1000e_pm_freeze(struct device *dev) 6288 { 6289 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); 6290 struct e1000_adapter *adapter = netdev_priv(netdev); 6291 6292 netif_device_detach(netdev); 6293 6294 if (netif_running(netdev)) { 6295 int count = E1000_CHECK_RESET_COUNT; 6296 6297 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) 6298 usleep_range(10000, 20000); 6299 6300 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 6301 6302 /* Quiesce the device without resetting the hardware */ 6303 e1000e_down(adapter, false); 6304 e1000_free_irq(adapter); 6305 } 6306 e1000e_reset_interrupt_capability(adapter); 6307 6308 /* Allow time for pending master requests to run */ 6309 e1000e_disable_pcie_master(&adapter->hw); 6310 6311 return 0; 6312 } 6313 6314 static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) 6315 { 6316 struct net_device *netdev = pci_get_drvdata(pdev); 6317 struct e1000_adapter *adapter = netdev_priv(netdev); 6318 struct e1000_hw *hw = &adapter->hw; 6319 u32 ctrl, ctrl_ext, rctl, status; 6320 /* Runtime suspend should only enable wakeup for link changes */ 6321 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; 6322 int retval = 0; 6323 6324 status = er32(STATUS); 6325 if (status & E1000_STATUS_LU) 6326 wufc &= ~E1000_WUFC_LNKC; 6327 6328 if (wufc) { 6329 e1000_setup_rctl(adapter); 6330 e1000e_set_rx_mode(netdev); 6331 6332 /* turn on all-multi mode if wake on multicast is enabled */ 6333 if (wufc & E1000_WUFC_MC) { 6334 rctl = er32(RCTL); 6335 rctl |= E1000_RCTL_MPE; 6336 ew32(RCTL, rctl); 6337 } 6338 6339 ctrl = er32(CTRL); 6340 ctrl |= E1000_CTRL_ADVD3WUC; 6341 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) 6342 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; 6343 ew32(CTRL, ctrl); 6344 6345 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 6346 adapter->hw.phy.media_type == 6347 e1000_media_type_internal_serdes) { 6348 /* keep the laser running in D3 */ 6349 ctrl_ext = er32(CTRL_EXT); 6350 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; 6351 ew32(CTRL_EXT, ctrl_ext); 6352 } 6353 6354 if (!runtime) 6355 e1000e_power_up_phy(adapter); 6356 6357 if (adapter->flags & FLAG_IS_ICH) 6358 e1000_suspend_workarounds_ich8lan(&adapter->hw); 6359 6360 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { 6361 /* enable wakeup by the PHY */ 6362 retval = e1000_init_phy_wakeup(adapter, wufc); 6363 if (retval) 6364 return retval; 6365 } else { 6366 /* enable wakeup by the MAC */ 6367 ew32(WUFC, wufc); 6368 ew32(WUC, E1000_WUC_PME_EN); 6369 } 6370 } else { 6371 ew32(WUC, 0); 6372 ew32(WUFC, 0); 6373 6374 e1000_power_down_phy(adapter); 6375 } 6376 6377 if (adapter->hw.phy.type == e1000_phy_igp_3) { 6378 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 6379 } else if (hw->mac.type >= e1000_pch_lpt) { 6380 if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) 6381 /* ULP does not support wake from unicast, multicast 6382 * or broadcast. 6383 */ 6384 retval = e1000_enable_ulp_lpt_lp(hw, !runtime); 6385 6386 if (retval) 6387 return retval; 6388 } 6389 6390 /* Ensure that the appropriate bits are set in LPI_CTRL 6391 * for EEE in Sx 6392 */ 6393 if ((hw->phy.type >= e1000_phy_i217) && 6394 adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) { 6395 u16 lpi_ctrl = 0; 6396 6397 retval = hw->phy.ops.acquire(hw); 6398 if (!retval) { 6399 retval = e1e_rphy_locked(hw, I82579_LPI_CTRL, 6400 &lpi_ctrl); 6401 if (!retval) { 6402 if (adapter->eee_advert & 6403 hw->dev_spec.ich8lan.eee_lp_ability & 6404 I82579_EEE_100_SUPPORTED) 6405 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; 6406 if (adapter->eee_advert & 6407 hw->dev_spec.ich8lan.eee_lp_ability & 6408 I82579_EEE_1000_SUPPORTED) 6409 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; 6410 6411 retval = e1e_wphy_locked(hw, I82579_LPI_CTRL, 6412 lpi_ctrl); 6413 } 6414 } 6415 hw->phy.ops.release(hw); 6416 } 6417 6418 /* Release control of h/w to f/w. If f/w is AMT enabled, this 6419 * would have already happened in close and is redundant. 6420 */ 6421 e1000e_release_hw_control(adapter); 6422 6423 pci_clear_master(pdev); 6424 6425 /* The pci-e switch on some quad port adapters will report a 6426 * correctable error when the MAC transitions from D0 to D3. To 6427 * prevent this we need to mask off the correctable errors on the 6428 * downstream port of the pci-e switch. 6429 * 6430 * We don't have the associated upstream bridge while assigning 6431 * the PCI device into guest. For example, the KVM on power is 6432 * one of the cases. 6433 */ 6434 if (adapter->flags & FLAG_IS_QUAD_PORT) { 6435 struct pci_dev *us_dev = pdev->bus->self; 6436 u16 devctl; 6437 6438 if (!us_dev) 6439 return 0; 6440 6441 pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl); 6442 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, 6443 (devctl & ~PCI_EXP_DEVCTL_CERE)); 6444 6445 pci_save_state(pdev); 6446 pci_prepare_to_sleep(pdev); 6447 6448 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); 6449 } 6450 6451 return 0; 6452 } 6453 6454 /** 6455 * __e1000e_disable_aspm - Disable ASPM states 6456 * @pdev: pointer to PCI device struct 6457 * @state: bit-mask of ASPM states to disable 6458 * @locked: indication if this context holds pci_bus_sem locked. 6459 * 6460 * Some devices *must* have certain ASPM states disabled per hardware errata. 6461 **/ 6462 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state, int locked) 6463 { 6464 struct pci_dev *parent = pdev->bus->self; 6465 u16 aspm_dis_mask = 0; 6466 u16 pdev_aspmc, parent_aspmc; 6467 6468 switch (state) { 6469 case PCIE_LINK_STATE_L0S: 6470 case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1: 6471 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S; 6472 /* fall-through - can't have L1 without L0s */ 6473 case PCIE_LINK_STATE_L1: 6474 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1; 6475 break; 6476 default: 6477 return; 6478 } 6479 6480 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc); 6481 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC; 6482 6483 if (parent) { 6484 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, 6485 &parent_aspmc); 6486 parent_aspmc &= PCI_EXP_LNKCTL_ASPMC; 6487 } 6488 6489 /* Nothing to do if the ASPM states to be disabled already are */ 6490 if (!(pdev_aspmc & aspm_dis_mask) && 6491 (!parent || !(parent_aspmc & aspm_dis_mask))) 6492 return; 6493 6494 dev_info(&pdev->dev, "Disabling ASPM %s %s\n", 6495 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ? 6496 "L0s" : "", 6497 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ? 6498 "L1" : ""); 6499 6500 #ifdef CONFIG_PCIEASPM 6501 if (locked) 6502 pci_disable_link_state_locked(pdev, state); 6503 else 6504 pci_disable_link_state(pdev, state); 6505 6506 /* Double-check ASPM control. If not disabled by the above, the 6507 * BIOS is preventing that from happening (or CONFIG_PCIEASPM is 6508 * not enabled); override by writing PCI config space directly. 6509 */ 6510 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc); 6511 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC; 6512 6513 if (!(aspm_dis_mask & pdev_aspmc)) 6514 return; 6515 #endif 6516 6517 /* Both device and parent should have the same ASPM setting. 6518 * Disable ASPM in downstream component first and then upstream. 6519 */ 6520 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask); 6521 6522 if (parent) 6523 pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, 6524 aspm_dis_mask); 6525 } 6526 6527 /** 6528 * e1000e_disable_aspm - Disable ASPM states. 6529 * @pdev: pointer to PCI device struct 6530 * @state: bit-mask of ASPM states to disable 6531 * 6532 * This function acquires the pci_bus_sem! 6533 * Some devices *must* have certain ASPM states disabled per hardware errata. 6534 **/ 6535 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 6536 { 6537 __e1000e_disable_aspm(pdev, state, 0); 6538 } 6539 6540 /** 6541 * e1000e_disable_aspm_locked Disable ASPM states. 6542 * @pdev: pointer to PCI device struct 6543 * @state: bit-mask of ASPM states to disable 6544 * 6545 * This function must be called with pci_bus_sem acquired! 6546 * Some devices *must* have certain ASPM states disabled per hardware errata. 6547 **/ 6548 static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state) 6549 { 6550 __e1000e_disable_aspm(pdev, state, 1); 6551 } 6552 6553 #ifdef CONFIG_PM 6554 static int __e1000_resume(struct pci_dev *pdev) 6555 { 6556 struct net_device *netdev = pci_get_drvdata(pdev); 6557 struct e1000_adapter *adapter = netdev_priv(netdev); 6558 struct e1000_hw *hw = &adapter->hw; 6559 u16 aspm_disable_flag = 0; 6560 6561 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) 6562 aspm_disable_flag = PCIE_LINK_STATE_L0S; 6563 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) 6564 aspm_disable_flag |= PCIE_LINK_STATE_L1; 6565 if (aspm_disable_flag) 6566 e1000e_disable_aspm(pdev, aspm_disable_flag); 6567 6568 pci_set_master(pdev); 6569 6570 if (hw->mac.type >= e1000_pch2lan) 6571 e1000_resume_workarounds_pchlan(&adapter->hw); 6572 6573 e1000e_power_up_phy(adapter); 6574 6575 /* report the system wakeup cause from S3/S4 */ 6576 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { 6577 u16 phy_data; 6578 6579 e1e_rphy(&adapter->hw, BM_WUS, &phy_data); 6580 if (phy_data) { 6581 e_info("PHY Wakeup cause - %s\n", 6582 phy_data & E1000_WUS_EX ? "Unicast Packet" : 6583 phy_data & E1000_WUS_MC ? "Multicast Packet" : 6584 phy_data & E1000_WUS_BC ? "Broadcast Packet" : 6585 phy_data & E1000_WUS_MAG ? "Magic Packet" : 6586 phy_data & E1000_WUS_LNKC ? 6587 "Link Status Change" : "other"); 6588 } 6589 e1e_wphy(&adapter->hw, BM_WUS, ~0); 6590 } else { 6591 u32 wus = er32(WUS); 6592 6593 if (wus) { 6594 e_info("MAC Wakeup cause - %s\n", 6595 wus & E1000_WUS_EX ? "Unicast Packet" : 6596 wus & E1000_WUS_MC ? "Multicast Packet" : 6597 wus & E1000_WUS_BC ? "Broadcast Packet" : 6598 wus & E1000_WUS_MAG ? "Magic Packet" : 6599 wus & E1000_WUS_LNKC ? "Link Status Change" : 6600 "other"); 6601 } 6602 ew32(WUS, ~0); 6603 } 6604 6605 e1000e_reset(adapter); 6606 6607 e1000_init_manageability_pt(adapter); 6608 6609 /* If the controller has AMT, do not set DRV_LOAD until the interface 6610 * is up. For all other cases, let the f/w know that the h/w is now 6611 * under the control of the driver. 6612 */ 6613 if (!(adapter->flags & FLAG_HAS_AMT)) 6614 e1000e_get_hw_control(adapter); 6615 6616 return 0; 6617 } 6618 6619 #ifdef CONFIG_PM_SLEEP 6620 static int e1000e_pm_thaw(struct device *dev) 6621 { 6622 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); 6623 struct e1000_adapter *adapter = netdev_priv(netdev); 6624 6625 e1000e_set_interrupt_capability(adapter); 6626 if (netif_running(netdev)) { 6627 u32 err = e1000_request_irq(adapter); 6628 6629 if (err) 6630 return err; 6631 6632 e1000e_up(adapter); 6633 } 6634 6635 netif_device_attach(netdev); 6636 6637 return 0; 6638 } 6639 6640 static int e1000e_pm_suspend(struct device *dev) 6641 { 6642 struct pci_dev *pdev = to_pci_dev(dev); 6643 int rc; 6644 6645 e1000e_flush_lpic(pdev); 6646 6647 e1000e_pm_freeze(dev); 6648 6649 rc = __e1000_shutdown(pdev, false); 6650 if (rc) 6651 e1000e_pm_thaw(dev); 6652 6653 return rc; 6654 } 6655 6656 static int e1000e_pm_resume(struct device *dev) 6657 { 6658 struct pci_dev *pdev = to_pci_dev(dev); 6659 int rc; 6660 6661 rc = __e1000_resume(pdev); 6662 if (rc) 6663 return rc; 6664 6665 return e1000e_pm_thaw(dev); 6666 } 6667 #endif /* CONFIG_PM_SLEEP */ 6668 6669 static int e1000e_pm_runtime_idle(struct device *dev) 6670 { 6671 struct pci_dev *pdev = to_pci_dev(dev); 6672 struct net_device *netdev = pci_get_drvdata(pdev); 6673 struct e1000_adapter *adapter = netdev_priv(netdev); 6674 u16 eee_lp; 6675 6676 eee_lp = adapter->hw.dev_spec.ich8lan.eee_lp_ability; 6677 6678 if (!e1000e_has_link(adapter)) { 6679 adapter->hw.dev_spec.ich8lan.eee_lp_ability = eee_lp; 6680 pm_schedule_suspend(dev, 5 * MSEC_PER_SEC); 6681 } 6682 6683 return -EBUSY; 6684 } 6685 6686 static int e1000e_pm_runtime_resume(struct device *dev) 6687 { 6688 struct pci_dev *pdev = to_pci_dev(dev); 6689 struct net_device *netdev = pci_get_drvdata(pdev); 6690 struct e1000_adapter *adapter = netdev_priv(netdev); 6691 int rc; 6692 6693 rc = __e1000_resume(pdev); 6694 if (rc) 6695 return rc; 6696 6697 if (netdev->flags & IFF_UP) 6698 e1000e_up(adapter); 6699 6700 return rc; 6701 } 6702 6703 static int e1000e_pm_runtime_suspend(struct device *dev) 6704 { 6705 struct pci_dev *pdev = to_pci_dev(dev); 6706 struct net_device *netdev = pci_get_drvdata(pdev); 6707 struct e1000_adapter *adapter = netdev_priv(netdev); 6708 6709 if (netdev->flags & IFF_UP) { 6710 int count = E1000_CHECK_RESET_COUNT; 6711 6712 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) 6713 usleep_range(10000, 20000); 6714 6715 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 6716 6717 /* Down the device without resetting the hardware */ 6718 e1000e_down(adapter, false); 6719 } 6720 6721 if (__e1000_shutdown(pdev, true)) { 6722 e1000e_pm_runtime_resume(dev); 6723 return -EBUSY; 6724 } 6725 6726 return 0; 6727 } 6728 #endif /* CONFIG_PM */ 6729 6730 static void e1000_shutdown(struct pci_dev *pdev) 6731 { 6732 e1000e_flush_lpic(pdev); 6733 6734 e1000e_pm_freeze(&pdev->dev); 6735 6736 __e1000_shutdown(pdev, false); 6737 } 6738 6739 #ifdef CONFIG_NET_POLL_CONTROLLER 6740 6741 static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data) 6742 { 6743 struct net_device *netdev = data; 6744 struct e1000_adapter *adapter = netdev_priv(netdev); 6745 6746 if (adapter->msix_entries) { 6747 int vector, msix_irq; 6748 6749 vector = 0; 6750 msix_irq = adapter->msix_entries[vector].vector; 6751 if (disable_hardirq(msix_irq)) 6752 e1000_intr_msix_rx(msix_irq, netdev); 6753 enable_irq(msix_irq); 6754 6755 vector++; 6756 msix_irq = adapter->msix_entries[vector].vector; 6757 if (disable_hardirq(msix_irq)) 6758 e1000_intr_msix_tx(msix_irq, netdev); 6759 enable_irq(msix_irq); 6760 6761 vector++; 6762 msix_irq = adapter->msix_entries[vector].vector; 6763 if (disable_hardirq(msix_irq)) 6764 e1000_msix_other(msix_irq, netdev); 6765 enable_irq(msix_irq); 6766 } 6767 6768 return IRQ_HANDLED; 6769 } 6770 6771 /** 6772 * e1000_netpoll 6773 * @netdev: network interface device structure 6774 * 6775 * Polling 'interrupt' - used by things like netconsole to send skbs 6776 * without having to re-enable interrupts. It's not called while 6777 * the interrupt routine is executing. 6778 */ 6779 static void e1000_netpoll(struct net_device *netdev) 6780 { 6781 struct e1000_adapter *adapter = netdev_priv(netdev); 6782 6783 switch (adapter->int_mode) { 6784 case E1000E_INT_MODE_MSIX: 6785 e1000_intr_msix(adapter->pdev->irq, netdev); 6786 break; 6787 case E1000E_INT_MODE_MSI: 6788 if (disable_hardirq(adapter->pdev->irq)) 6789 e1000_intr_msi(adapter->pdev->irq, netdev); 6790 enable_irq(adapter->pdev->irq); 6791 break; 6792 default: /* E1000E_INT_MODE_LEGACY */ 6793 if (disable_hardirq(adapter->pdev->irq)) 6794 e1000_intr(adapter->pdev->irq, netdev); 6795 enable_irq(adapter->pdev->irq); 6796 break; 6797 } 6798 } 6799 #endif 6800 6801 /** 6802 * e1000_io_error_detected - called when PCI error is detected 6803 * @pdev: Pointer to PCI device 6804 * @state: The current pci connection state 6805 * 6806 * This function is called after a PCI bus error affecting 6807 * this device has been detected. 6808 */ 6809 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 6810 pci_channel_state_t state) 6811 { 6812 struct net_device *netdev = pci_get_drvdata(pdev); 6813 struct e1000_adapter *adapter = netdev_priv(netdev); 6814 6815 netif_device_detach(netdev); 6816 6817 if (state == pci_channel_io_perm_failure) 6818 return PCI_ERS_RESULT_DISCONNECT; 6819 6820 if (netif_running(netdev)) 6821 e1000e_down(adapter, true); 6822 pci_disable_device(pdev); 6823 6824 /* Request a slot slot reset. */ 6825 return PCI_ERS_RESULT_NEED_RESET; 6826 } 6827 6828 /** 6829 * e1000_io_slot_reset - called after the pci bus has been reset. 6830 * @pdev: Pointer to PCI device 6831 * 6832 * Restart the card from scratch, as if from a cold-boot. Implementation 6833 * resembles the first-half of the e1000e_pm_resume routine. 6834 */ 6835 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 6836 { 6837 struct net_device *netdev = pci_get_drvdata(pdev); 6838 struct e1000_adapter *adapter = netdev_priv(netdev); 6839 struct e1000_hw *hw = &adapter->hw; 6840 u16 aspm_disable_flag = 0; 6841 int err; 6842 pci_ers_result_t result; 6843 6844 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) 6845 aspm_disable_flag = PCIE_LINK_STATE_L0S; 6846 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) 6847 aspm_disable_flag |= PCIE_LINK_STATE_L1; 6848 if (aspm_disable_flag) 6849 e1000e_disable_aspm_locked(pdev, aspm_disable_flag); 6850 6851 err = pci_enable_device_mem(pdev); 6852 if (err) { 6853 dev_err(&pdev->dev, 6854 "Cannot re-enable PCI device after reset.\n"); 6855 result = PCI_ERS_RESULT_DISCONNECT; 6856 } else { 6857 pdev->state_saved = true; 6858 pci_restore_state(pdev); 6859 pci_set_master(pdev); 6860 6861 pci_enable_wake(pdev, PCI_D3hot, 0); 6862 pci_enable_wake(pdev, PCI_D3cold, 0); 6863 6864 e1000e_reset(adapter); 6865 ew32(WUS, ~0); 6866 result = PCI_ERS_RESULT_RECOVERED; 6867 } 6868 6869 pci_cleanup_aer_uncorrect_error_status(pdev); 6870 6871 return result; 6872 } 6873 6874 /** 6875 * e1000_io_resume - called when traffic can start flowing again. 6876 * @pdev: Pointer to PCI device 6877 * 6878 * This callback is called when the error recovery driver tells us that 6879 * its OK to resume normal operation. Implementation resembles the 6880 * second-half of the e1000e_pm_resume routine. 6881 */ 6882 static void e1000_io_resume(struct pci_dev *pdev) 6883 { 6884 struct net_device *netdev = pci_get_drvdata(pdev); 6885 struct e1000_adapter *adapter = netdev_priv(netdev); 6886 6887 e1000_init_manageability_pt(adapter); 6888 6889 if (netif_running(netdev)) 6890 e1000e_up(adapter); 6891 6892 netif_device_attach(netdev); 6893 6894 /* If the controller has AMT, do not set DRV_LOAD until the interface 6895 * is up. For all other cases, let the f/w know that the h/w is now 6896 * under the control of the driver. 6897 */ 6898 if (!(adapter->flags & FLAG_HAS_AMT)) 6899 e1000e_get_hw_control(adapter); 6900 } 6901 6902 static void e1000_print_device_info(struct e1000_adapter *adapter) 6903 { 6904 struct e1000_hw *hw = &adapter->hw; 6905 struct net_device *netdev = adapter->netdev; 6906 u32 ret_val; 6907 u8 pba_str[E1000_PBANUM_LENGTH]; 6908 6909 /* print bus type/speed/width info */ 6910 e_info("(PCI Express:2.5GT/s:%s) %pM\n", 6911 /* bus width */ 6912 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 6913 "Width x1"), 6914 /* MAC address */ 6915 netdev->dev_addr); 6916 e_info("Intel(R) PRO/%s Network Connection\n", 6917 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); 6918 ret_val = e1000_read_pba_string_generic(hw, pba_str, 6919 E1000_PBANUM_LENGTH); 6920 if (ret_val) 6921 strlcpy((char *)pba_str, "Unknown", sizeof(pba_str)); 6922 e_info("MAC: %d, PHY: %d, PBA No: %s\n", 6923 hw->mac.type, hw->phy.type, pba_str); 6924 } 6925 6926 static void e1000_eeprom_checks(struct e1000_adapter *adapter) 6927 { 6928 struct e1000_hw *hw = &adapter->hw; 6929 int ret_val; 6930 u16 buf = 0; 6931 6932 if (hw->mac.type != e1000_82573) 6933 return; 6934 6935 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); 6936 le16_to_cpus(&buf); 6937 if (!ret_val && (!(buf & BIT(0)))) { 6938 /* Deep Smart Power Down (DSPD) */ 6939 dev_warn(&adapter->pdev->dev, 6940 "Warning: detected DSPD enabled in EEPROM\n"); 6941 } 6942 } 6943 6944 static netdev_features_t e1000_fix_features(struct net_device *netdev, 6945 netdev_features_t features) 6946 { 6947 struct e1000_adapter *adapter = netdev_priv(netdev); 6948 struct e1000_hw *hw = &adapter->hw; 6949 6950 /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */ 6951 if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN)) 6952 features &= ~NETIF_F_RXFCS; 6953 6954 /* Since there is no support for separate Rx/Tx vlan accel 6955 * enable/disable make sure Tx flag is always in same state as Rx. 6956 */ 6957 if (features & NETIF_F_HW_VLAN_CTAG_RX) 6958 features |= NETIF_F_HW_VLAN_CTAG_TX; 6959 else 6960 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 6961 6962 return features; 6963 } 6964 6965 static int e1000_set_features(struct net_device *netdev, 6966 netdev_features_t features) 6967 { 6968 struct e1000_adapter *adapter = netdev_priv(netdev); 6969 netdev_features_t changed = features ^ netdev->features; 6970 6971 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) 6972 adapter->flags |= FLAG_TSO_FORCE; 6973 6974 if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | 6975 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS | 6976 NETIF_F_RXALL))) 6977 return 0; 6978 6979 if (changed & NETIF_F_RXFCS) { 6980 if (features & NETIF_F_RXFCS) { 6981 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; 6982 } else { 6983 /* We need to take it back to defaults, which might mean 6984 * stripping is still disabled at the adapter level. 6985 */ 6986 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING) 6987 adapter->flags2 |= FLAG2_CRC_STRIPPING; 6988 else 6989 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; 6990 } 6991 } 6992 6993 netdev->features = features; 6994 6995 if (netif_running(netdev)) 6996 e1000e_reinit_locked(adapter); 6997 else 6998 e1000e_reset(adapter); 6999 7000 return 0; 7001 } 7002 7003 static const struct net_device_ops e1000e_netdev_ops = { 7004 .ndo_open = e1000e_open, 7005 .ndo_stop = e1000e_close, 7006 .ndo_start_xmit = e1000_xmit_frame, 7007 .ndo_get_stats64 = e1000e_get_stats64, 7008 .ndo_set_rx_mode = e1000e_set_rx_mode, 7009 .ndo_set_mac_address = e1000_set_mac, 7010 .ndo_change_mtu = e1000_change_mtu, 7011 .ndo_do_ioctl = e1000_ioctl, 7012 .ndo_tx_timeout = e1000_tx_timeout, 7013 .ndo_validate_addr = eth_validate_addr, 7014 7015 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, 7016 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, 7017 #ifdef CONFIG_NET_POLL_CONTROLLER 7018 .ndo_poll_controller = e1000_netpoll, 7019 #endif 7020 .ndo_set_features = e1000_set_features, 7021 .ndo_fix_features = e1000_fix_features, 7022 .ndo_features_check = passthru_features_check, 7023 }; 7024 7025 /** 7026 * e1000_probe - Device Initialization Routine 7027 * @pdev: PCI device information struct 7028 * @ent: entry in e1000_pci_tbl 7029 * 7030 * Returns 0 on success, negative on failure 7031 * 7032 * e1000_probe initializes an adapter identified by a pci_dev structure. 7033 * The OS initialization, configuring of the adapter private structure, 7034 * and a hardware reset occur. 7035 **/ 7036 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 7037 { 7038 struct net_device *netdev; 7039 struct e1000_adapter *adapter; 7040 struct e1000_hw *hw; 7041 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; 7042 resource_size_t mmio_start, mmio_len; 7043 resource_size_t flash_start, flash_len; 7044 static int cards_found; 7045 u16 aspm_disable_flag = 0; 7046 int bars, i, err, pci_using_dac; 7047 u16 eeprom_data = 0; 7048 u16 eeprom_apme_mask = E1000_EEPROM_APME; 7049 s32 ret_val = 0; 7050 7051 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) 7052 aspm_disable_flag = PCIE_LINK_STATE_L0S; 7053 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) 7054 aspm_disable_flag |= PCIE_LINK_STATE_L1; 7055 if (aspm_disable_flag) 7056 e1000e_disable_aspm(pdev, aspm_disable_flag); 7057 7058 err = pci_enable_device_mem(pdev); 7059 if (err) 7060 return err; 7061 7062 pci_using_dac = 0; 7063 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 7064 if (!err) { 7065 pci_using_dac = 1; 7066 } else { 7067 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 7068 if (err) { 7069 dev_err(&pdev->dev, 7070 "No usable DMA configuration, aborting\n"); 7071 goto err_dma; 7072 } 7073 } 7074 7075 bars = pci_select_bars(pdev, IORESOURCE_MEM); 7076 err = pci_request_selected_regions_exclusive(pdev, bars, 7077 e1000e_driver_name); 7078 if (err) 7079 goto err_pci_reg; 7080 7081 /* AER (Advanced Error Reporting) hooks */ 7082 pci_enable_pcie_error_reporting(pdev); 7083 7084 pci_set_master(pdev); 7085 /* PCI config space info */ 7086 err = pci_save_state(pdev); 7087 if (err) 7088 goto err_alloc_etherdev; 7089 7090 err = -ENOMEM; 7091 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 7092 if (!netdev) 7093 goto err_alloc_etherdev; 7094 7095 SET_NETDEV_DEV(netdev, &pdev->dev); 7096 7097 netdev->irq = pdev->irq; 7098 7099 pci_set_drvdata(pdev, netdev); 7100 adapter = netdev_priv(netdev); 7101 hw = &adapter->hw; 7102 adapter->netdev = netdev; 7103 adapter->pdev = pdev; 7104 adapter->ei = ei; 7105 adapter->pba = ei->pba; 7106 adapter->flags = ei->flags; 7107 adapter->flags2 = ei->flags2; 7108 adapter->hw.adapter = adapter; 7109 adapter->hw.mac.type = ei->mac; 7110 adapter->max_hw_frame_size = ei->max_hw_frame_size; 7111 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 7112 7113 mmio_start = pci_resource_start(pdev, 0); 7114 mmio_len = pci_resource_len(pdev, 0); 7115 7116 err = -EIO; 7117 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 7118 if (!adapter->hw.hw_addr) 7119 goto err_ioremap; 7120 7121 if ((adapter->flags & FLAG_HAS_FLASH) && 7122 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM) && 7123 (hw->mac.type < e1000_pch_spt)) { 7124 flash_start = pci_resource_start(pdev, 1); 7125 flash_len = pci_resource_len(pdev, 1); 7126 adapter->hw.flash_address = ioremap(flash_start, flash_len); 7127 if (!adapter->hw.flash_address) 7128 goto err_flashmap; 7129 } 7130 7131 /* Set default EEE advertisement */ 7132 if (adapter->flags2 & FLAG2_HAS_EEE) 7133 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; 7134 7135 /* construct the net_device struct */ 7136 netdev->netdev_ops = &e1000e_netdev_ops; 7137 e1000e_set_ethtool_ops(netdev); 7138 netdev->watchdog_timeo = 5 * HZ; 7139 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64); 7140 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); 7141 7142 netdev->mem_start = mmio_start; 7143 netdev->mem_end = mmio_start + mmio_len; 7144 7145 adapter->bd_number = cards_found++; 7146 7147 e1000e_check_options(adapter); 7148 7149 /* setup adapter struct */ 7150 err = e1000_sw_init(adapter); 7151 if (err) 7152 goto err_sw_init; 7153 7154 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 7155 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); 7156 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 7157 7158 err = ei->get_variants(adapter); 7159 if (err) 7160 goto err_hw_init; 7161 7162 if ((adapter->flags & FLAG_IS_ICH) && 7163 (adapter->flags & FLAG_READ_ONLY_NVM) && 7164 (hw->mac.type < e1000_pch_spt)) 7165 e1000e_write_protect_nvm_ich8lan(&adapter->hw); 7166 7167 hw->mac.ops.get_bus_info(&adapter->hw); 7168 7169 adapter->hw.phy.autoneg_wait_to_complete = 0; 7170 7171 /* Copper options */ 7172 if (adapter->hw.phy.media_type == e1000_media_type_copper) { 7173 adapter->hw.phy.mdix = AUTO_ALL_MODES; 7174 adapter->hw.phy.disable_polarity_correction = 0; 7175 adapter->hw.phy.ms_type = e1000_ms_hw_default; 7176 } 7177 7178 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) 7179 dev_info(&pdev->dev, 7180 "PHY reset is blocked due to SOL/IDER session.\n"); 7181 7182 /* Set initial default active device features */ 7183 netdev->features = (NETIF_F_SG | 7184 NETIF_F_HW_VLAN_CTAG_RX | 7185 NETIF_F_HW_VLAN_CTAG_TX | 7186 NETIF_F_TSO | 7187 NETIF_F_TSO6 | 7188 NETIF_F_RXHASH | 7189 NETIF_F_RXCSUM | 7190 NETIF_F_HW_CSUM); 7191 7192 /* Set user-changeable features (subset of all device features) */ 7193 netdev->hw_features = netdev->features; 7194 netdev->hw_features |= NETIF_F_RXFCS; 7195 netdev->priv_flags |= IFF_SUPP_NOFCS; 7196 netdev->hw_features |= NETIF_F_RXALL; 7197 7198 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) 7199 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 7200 7201 netdev->vlan_features |= (NETIF_F_SG | 7202 NETIF_F_TSO | 7203 NETIF_F_TSO6 | 7204 NETIF_F_HW_CSUM); 7205 7206 netdev->priv_flags |= IFF_UNICAST_FLT; 7207 7208 if (pci_using_dac) { 7209 netdev->features |= NETIF_F_HIGHDMA; 7210 netdev->vlan_features |= NETIF_F_HIGHDMA; 7211 } 7212 7213 /* MTU range: 68 - max_hw_frame_size */ 7214 netdev->min_mtu = ETH_MIN_MTU; 7215 netdev->max_mtu = adapter->max_hw_frame_size - 7216 (VLAN_ETH_HLEN + ETH_FCS_LEN); 7217 7218 if (e1000e_enable_mng_pass_thru(&adapter->hw)) 7219 adapter->flags |= FLAG_MNG_PT_ENABLED; 7220 7221 /* before reading the NVM, reset the controller to 7222 * put the device in a known good starting state 7223 */ 7224 adapter->hw.mac.ops.reset_hw(&adapter->hw); 7225 7226 /* systems with ASPM and others may see the checksum fail on the first 7227 * attempt. Let's give it a few tries 7228 */ 7229 for (i = 0;; i++) { 7230 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) 7231 break; 7232 if (i == 2) { 7233 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); 7234 err = -EIO; 7235 goto err_eeprom; 7236 } 7237 } 7238 7239 e1000_eeprom_checks(adapter); 7240 7241 /* copy the MAC address */ 7242 if (e1000e_read_mac_addr(&adapter->hw)) 7243 dev_err(&pdev->dev, 7244 "NVM Read Error while reading MAC address\n"); 7245 7246 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 7247 7248 if (!is_valid_ether_addr(netdev->dev_addr)) { 7249 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", 7250 netdev->dev_addr); 7251 err = -EIO; 7252 goto err_eeprom; 7253 } 7254 7255 init_timer(&adapter->watchdog_timer); 7256 adapter->watchdog_timer.function = e1000_watchdog; 7257 adapter->watchdog_timer.data = (unsigned long)adapter; 7258 7259 init_timer(&adapter->phy_info_timer); 7260 adapter->phy_info_timer.function = e1000_update_phy_info; 7261 adapter->phy_info_timer.data = (unsigned long)adapter; 7262 7263 INIT_WORK(&adapter->reset_task, e1000_reset_task); 7264 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); 7265 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); 7266 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); 7267 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); 7268 7269 /* Initialize link parameters. User can change them with ethtool */ 7270 adapter->hw.mac.autoneg = 1; 7271 adapter->fc_autoneg = true; 7272 adapter->hw.fc.requested_mode = e1000_fc_default; 7273 adapter->hw.fc.current_mode = e1000_fc_default; 7274 adapter->hw.phy.autoneg_advertised = 0x2f; 7275 7276 /* Initial Wake on LAN setting - If APM wake is enabled in 7277 * the EEPROM, enable the ACPI Magic Packet filter 7278 */ 7279 if (adapter->flags & FLAG_APME_IN_WUC) { 7280 /* APME bit in EEPROM is mapped to WUC.APME */ 7281 eeprom_data = er32(WUC); 7282 eeprom_apme_mask = E1000_WUC_APME; 7283 if ((hw->mac.type > e1000_ich10lan) && 7284 (eeprom_data & E1000_WUC_PHY_WAKE)) 7285 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; 7286 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { 7287 if (adapter->flags & FLAG_APME_CHECK_PORT_B && 7288 (adapter->hw.bus.func == 1)) 7289 ret_val = e1000_read_nvm(&adapter->hw, 7290 NVM_INIT_CONTROL3_PORT_B, 7291 1, &eeprom_data); 7292 else 7293 ret_val = e1000_read_nvm(&adapter->hw, 7294 NVM_INIT_CONTROL3_PORT_A, 7295 1, &eeprom_data); 7296 } 7297 7298 /* fetch WoL from EEPROM */ 7299 if (ret_val) 7300 e_dbg("NVM read error getting WoL initial values: %d\n", ret_val); 7301 else if (eeprom_data & eeprom_apme_mask) 7302 adapter->eeprom_wol |= E1000_WUFC_MAG; 7303 7304 /* now that we have the eeprom settings, apply the special cases 7305 * where the eeprom may be wrong or the board simply won't support 7306 * wake on lan on a particular port 7307 */ 7308 if (!(adapter->flags & FLAG_HAS_WOL)) 7309 adapter->eeprom_wol = 0; 7310 7311 /* initialize the wol settings based on the eeprom settings */ 7312 adapter->wol = adapter->eeprom_wol; 7313 7314 /* make sure adapter isn't asleep if manageability is enabled */ 7315 if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) || 7316 (hw->mac.ops.check_mng_mode(hw))) 7317 device_wakeup_enable(&pdev->dev); 7318 7319 /* save off EEPROM version number */ 7320 ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); 7321 7322 if (ret_val) { 7323 e_dbg("NVM read error getting EEPROM version: %d\n", ret_val); 7324 adapter->eeprom_vers = 0; 7325 } 7326 7327 /* init PTP hardware clock */ 7328 e1000e_ptp_init(adapter); 7329 7330 /* reset the hardware with the new settings */ 7331 e1000e_reset(adapter); 7332 7333 /* If the controller has AMT, do not set DRV_LOAD until the interface 7334 * is up. For all other cases, let the f/w know that the h/w is now 7335 * under the control of the driver. 7336 */ 7337 if (!(adapter->flags & FLAG_HAS_AMT)) 7338 e1000e_get_hw_control(adapter); 7339 7340 strlcpy(netdev->name, "eth%d", sizeof(netdev->name)); 7341 err = register_netdev(netdev); 7342 if (err) 7343 goto err_register; 7344 7345 /* carrier off reporting is important to ethtool even BEFORE open */ 7346 netif_carrier_off(netdev); 7347 7348 e1000_print_device_info(adapter); 7349 7350 if (pci_dev_run_wake(pdev)) 7351 pm_runtime_put_noidle(&pdev->dev); 7352 7353 return 0; 7354 7355 err_register: 7356 if (!(adapter->flags & FLAG_HAS_AMT)) 7357 e1000e_release_hw_control(adapter); 7358 err_eeprom: 7359 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw)) 7360 e1000_phy_hw_reset(&adapter->hw); 7361 err_hw_init: 7362 kfree(adapter->tx_ring); 7363 kfree(adapter->rx_ring); 7364 err_sw_init: 7365 if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt)) 7366 iounmap(adapter->hw.flash_address); 7367 e1000e_reset_interrupt_capability(adapter); 7368 err_flashmap: 7369 iounmap(adapter->hw.hw_addr); 7370 err_ioremap: 7371 free_netdev(netdev); 7372 err_alloc_etherdev: 7373 pci_release_mem_regions(pdev); 7374 err_pci_reg: 7375 err_dma: 7376 pci_disable_device(pdev); 7377 return err; 7378 } 7379 7380 /** 7381 * e1000_remove - Device Removal Routine 7382 * @pdev: PCI device information struct 7383 * 7384 * e1000_remove is called by the PCI subsystem to alert the driver 7385 * that it should release a PCI device. The could be caused by a 7386 * Hot-Plug event, or because the driver is going to be removed from 7387 * memory. 7388 **/ 7389 static void e1000_remove(struct pci_dev *pdev) 7390 { 7391 struct net_device *netdev = pci_get_drvdata(pdev); 7392 struct e1000_adapter *adapter = netdev_priv(netdev); 7393 bool down = test_bit(__E1000_DOWN, &adapter->state); 7394 7395 e1000e_ptp_remove(adapter); 7396 7397 /* The timers may be rescheduled, so explicitly disable them 7398 * from being rescheduled. 7399 */ 7400 if (!down) 7401 set_bit(__E1000_DOWN, &adapter->state); 7402 del_timer_sync(&adapter->watchdog_timer); 7403 del_timer_sync(&adapter->phy_info_timer); 7404 7405 cancel_work_sync(&adapter->reset_task); 7406 cancel_work_sync(&adapter->watchdog_task); 7407 cancel_work_sync(&adapter->downshift_task); 7408 cancel_work_sync(&adapter->update_phy_task); 7409 cancel_work_sync(&adapter->print_hang_task); 7410 7411 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { 7412 cancel_work_sync(&adapter->tx_hwtstamp_work); 7413 if (adapter->tx_hwtstamp_skb) { 7414 dev_kfree_skb_any(adapter->tx_hwtstamp_skb); 7415 adapter->tx_hwtstamp_skb = NULL; 7416 } 7417 } 7418 7419 /* Don't lie to e1000_close() down the road. */ 7420 if (!down) 7421 clear_bit(__E1000_DOWN, &adapter->state); 7422 unregister_netdev(netdev); 7423 7424 if (pci_dev_run_wake(pdev)) 7425 pm_runtime_get_noresume(&pdev->dev); 7426 7427 /* Release control of h/w to f/w. If f/w is AMT enabled, this 7428 * would have already happened in close and is redundant. 7429 */ 7430 e1000e_release_hw_control(adapter); 7431 7432 e1000e_reset_interrupt_capability(adapter); 7433 kfree(adapter->tx_ring); 7434 kfree(adapter->rx_ring); 7435 7436 iounmap(adapter->hw.hw_addr); 7437 if ((adapter->hw.flash_address) && 7438 (adapter->hw.mac.type < e1000_pch_spt)) 7439 iounmap(adapter->hw.flash_address); 7440 pci_release_mem_regions(pdev); 7441 7442 free_netdev(netdev); 7443 7444 /* AER disable */ 7445 pci_disable_pcie_error_reporting(pdev); 7446 7447 pci_disable_device(pdev); 7448 } 7449 7450 /* PCI Error Recovery (ERS) */ 7451 static const struct pci_error_handlers e1000_err_handler = { 7452 .error_detected = e1000_io_error_detected, 7453 .slot_reset = e1000_io_slot_reset, 7454 .resume = e1000_io_resume, 7455 }; 7456 7457 static const struct pci_device_id e1000_pci_tbl[] = { 7458 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, 7459 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, 7460 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, 7461 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), 7462 board_82571 }, 7463 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, 7464 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, 7465 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, 7466 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, 7467 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, 7468 7469 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, 7470 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, 7471 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, 7472 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, 7473 7474 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, 7475 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, 7476 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, 7477 7478 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 }, 7479 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 }, 7480 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 }, 7481 7482 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), 7483 board_80003es2lan }, 7484 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), 7485 board_80003es2lan }, 7486 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), 7487 board_80003es2lan }, 7488 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), 7489 board_80003es2lan }, 7490 7491 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, 7492 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, 7493 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, 7494 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, 7495 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, 7496 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, 7497 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, 7498 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, 7499 7500 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, 7501 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, 7502 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, 7503 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, 7504 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, 7505 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan }, 7506 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, 7507 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, 7508 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, 7509 7510 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, 7511 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, 7512 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, 7513 7514 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, 7515 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, 7516 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan }, 7517 7518 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, 7519 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, 7520 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, 7521 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, 7522 7523 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, 7524 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, 7525 7526 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt }, 7527 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, 7528 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt }, 7529 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt }, 7530 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt }, 7531 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt }, 7532 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt }, 7533 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt }, 7534 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM), board_pch_spt }, 7535 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt }, 7536 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt }, 7537 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt }, 7538 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LBG_I219_LM3), board_pch_spt }, 7539 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM4), board_pch_spt }, 7540 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V4), board_pch_spt }, 7541 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM5), board_pch_spt }, 7542 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V5), board_pch_spt }, 7543 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM6), board_pch_cnp }, 7544 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V6), board_pch_cnp }, 7545 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM7), board_pch_cnp }, 7546 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V7), board_pch_cnp }, 7547 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM8), board_pch_cnp }, 7548 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V8), board_pch_cnp }, 7549 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM9), board_pch_cnp }, 7550 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V9), board_pch_cnp }, 7551 7552 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ 7553 }; 7554 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 7555 7556 static const struct dev_pm_ops e1000_pm_ops = { 7557 #ifdef CONFIG_PM_SLEEP 7558 .suspend = e1000e_pm_suspend, 7559 .resume = e1000e_pm_resume, 7560 .freeze = e1000e_pm_freeze, 7561 .thaw = e1000e_pm_thaw, 7562 .poweroff = e1000e_pm_suspend, 7563 .restore = e1000e_pm_resume, 7564 #endif 7565 SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume, 7566 e1000e_pm_runtime_idle) 7567 }; 7568 7569 /* PCI Device API Driver */ 7570 static struct pci_driver e1000_driver = { 7571 .name = e1000e_driver_name, 7572 .id_table = e1000_pci_tbl, 7573 .probe = e1000_probe, 7574 .remove = e1000_remove, 7575 .driver = { 7576 .pm = &e1000_pm_ops, 7577 }, 7578 .shutdown = e1000_shutdown, 7579 .err_handler = &e1000_err_handler 7580 }; 7581 7582 /** 7583 * e1000_init_module - Driver Registration Routine 7584 * 7585 * e1000_init_module is the first routine called when the driver is 7586 * loaded. All it does is register with the PCI subsystem. 7587 **/ 7588 static int __init e1000_init_module(void) 7589 { 7590 pr_info("Intel(R) PRO/1000 Network Driver - %s\n", 7591 e1000e_driver_version); 7592 pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n"); 7593 7594 return pci_register_driver(&e1000_driver); 7595 } 7596 module_init(e1000_init_module); 7597 7598 /** 7599 * e1000_exit_module - Driver Exit Cleanup Routine 7600 * 7601 * e1000_exit_module is called just before the driver is removed 7602 * from memory. 7603 **/ 7604 static void __exit e1000_exit_module(void) 7605 { 7606 pci_unregister_driver(&e1000_driver); 7607 } 7608 module_exit(e1000_exit_module); 7609 7610 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 7611 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 7612 MODULE_LICENSE("GPL"); 7613 MODULE_VERSION(DRV_VERSION); 7614 7615 /* netdev.c */ 7616