1 /******************************************************************************* 2 3 Intel PRO/1000 Linux driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/module.h> 32 #include <linux/types.h> 33 #include <linux/init.h> 34 #include <linux/pci.h> 35 #include <linux/vmalloc.h> 36 #include <linux/pagemap.h> 37 #include <linux/delay.h> 38 #include <linux/netdevice.h> 39 #include <linux/interrupt.h> 40 #include <linux/tcp.h> 41 #include <linux/ipv6.h> 42 #include <linux/slab.h> 43 #include <net/checksum.h> 44 #include <net/ip6_checksum.h> 45 #include <linux/mii.h> 46 #include <linux/ethtool.h> 47 #include <linux/if_vlan.h> 48 #include <linux/cpu.h> 49 #include <linux/smp.h> 50 #include <linux/pm_qos.h> 51 #include <linux/pm_runtime.h> 52 #include <linux/aer.h> 53 #include <linux/prefetch.h> 54 55 #include "e1000.h" 56 57 #define DRV_EXTRAVERSION "-k" 58 59 #define DRV_VERSION "1.9.5" DRV_EXTRAVERSION 60 char e1000e_driver_name[] = "e1000e"; 61 const char e1000e_driver_version[] = DRV_VERSION; 62 63 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); 64 65 static const struct e1000_info *e1000_info_tbl[] = { 66 [board_82571] = &e1000_82571_info, 67 [board_82572] = &e1000_82572_info, 68 [board_82573] = &e1000_82573_info, 69 [board_82574] = &e1000_82574_info, 70 [board_82583] = &e1000_82583_info, 71 [board_80003es2lan] = &e1000_es2_info, 72 [board_ich8lan] = &e1000_ich8_info, 73 [board_ich9lan] = &e1000_ich9_info, 74 [board_ich10lan] = &e1000_ich10_info, 75 [board_pchlan] = &e1000_pch_info, 76 [board_pch2lan] = &e1000_pch2_info, 77 }; 78 79 struct e1000_reg_info { 80 u32 ofs; 81 char *name; 82 }; 83 84 #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ 85 #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ 86 #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ 87 #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ 88 #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ 89 90 #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ 91 #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ 92 #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ 93 #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ 94 #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ 95 96 static const struct e1000_reg_info e1000_reg_info_tbl[] = { 97 98 /* General Registers */ 99 {E1000_CTRL, "CTRL"}, 100 {E1000_STATUS, "STATUS"}, 101 {E1000_CTRL_EXT, "CTRL_EXT"}, 102 103 /* Interrupt Registers */ 104 {E1000_ICR, "ICR"}, 105 106 /* Rx Registers */ 107 {E1000_RCTL, "RCTL"}, 108 {E1000_RDLEN, "RDLEN"}, 109 {E1000_RDH, "RDH"}, 110 {E1000_RDT, "RDT"}, 111 {E1000_RDTR, "RDTR"}, 112 {E1000_RXDCTL(0), "RXDCTL"}, 113 {E1000_ERT, "ERT"}, 114 {E1000_RDBAL, "RDBAL"}, 115 {E1000_RDBAH, "RDBAH"}, 116 {E1000_RDFH, "RDFH"}, 117 {E1000_RDFT, "RDFT"}, 118 {E1000_RDFHS, "RDFHS"}, 119 {E1000_RDFTS, "RDFTS"}, 120 {E1000_RDFPC, "RDFPC"}, 121 122 /* Tx Registers */ 123 {E1000_TCTL, "TCTL"}, 124 {E1000_TDBAL, "TDBAL"}, 125 {E1000_TDBAH, "TDBAH"}, 126 {E1000_TDLEN, "TDLEN"}, 127 {E1000_TDH, "TDH"}, 128 {E1000_TDT, "TDT"}, 129 {E1000_TIDV, "TIDV"}, 130 {E1000_TXDCTL(0), "TXDCTL"}, 131 {E1000_TADV, "TADV"}, 132 {E1000_TARC(0), "TARC"}, 133 {E1000_TDFH, "TDFH"}, 134 {E1000_TDFT, "TDFT"}, 135 {E1000_TDFHS, "TDFHS"}, 136 {E1000_TDFTS, "TDFTS"}, 137 {E1000_TDFPC, "TDFPC"}, 138 139 /* List Terminator */ 140 {0, NULL} 141 }; 142 143 /* 144 * e1000_regdump - register printout routine 145 */ 146 static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) 147 { 148 int n = 0; 149 char rname[16]; 150 u32 regs[8]; 151 152 switch (reginfo->ofs) { 153 case E1000_RXDCTL(0): 154 for (n = 0; n < 2; n++) 155 regs[n] = __er32(hw, E1000_RXDCTL(n)); 156 break; 157 case E1000_TXDCTL(0): 158 for (n = 0; n < 2; n++) 159 regs[n] = __er32(hw, E1000_TXDCTL(n)); 160 break; 161 case E1000_TARC(0): 162 for (n = 0; n < 2; n++) 163 regs[n] = __er32(hw, E1000_TARC(n)); 164 break; 165 default: 166 pr_info("%-15s %08x\n", 167 reginfo->name, __er32(hw, reginfo->ofs)); 168 return; 169 } 170 171 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); 172 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); 173 } 174 175 /* 176 * e1000e_dump - Print registers, Tx-ring and Rx-ring 177 */ 178 static void e1000e_dump(struct e1000_adapter *adapter) 179 { 180 struct net_device *netdev = adapter->netdev; 181 struct e1000_hw *hw = &adapter->hw; 182 struct e1000_reg_info *reginfo; 183 struct e1000_ring *tx_ring = adapter->tx_ring; 184 struct e1000_tx_desc *tx_desc; 185 struct my_u0 { 186 __le64 a; 187 __le64 b; 188 } *u0; 189 struct e1000_buffer *buffer_info; 190 struct e1000_ring *rx_ring = adapter->rx_ring; 191 union e1000_rx_desc_packet_split *rx_desc_ps; 192 union e1000_rx_desc_extended *rx_desc; 193 struct my_u1 { 194 __le64 a; 195 __le64 b; 196 __le64 c; 197 __le64 d; 198 } *u1; 199 u32 staterr; 200 int i = 0; 201 202 if (!netif_msg_hw(adapter)) 203 return; 204 205 /* Print netdevice Info */ 206 if (netdev) { 207 dev_info(&adapter->pdev->dev, "Net device Info\n"); 208 pr_info("Device Name state trans_start last_rx\n"); 209 pr_info("%-15s %016lX %016lX %016lX\n", 210 netdev->name, netdev->state, netdev->trans_start, 211 netdev->last_rx); 212 } 213 214 /* Print Registers */ 215 dev_info(&adapter->pdev->dev, "Register Dump\n"); 216 pr_info(" Register Name Value\n"); 217 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; 218 reginfo->name; reginfo++) { 219 e1000_regdump(hw, reginfo); 220 } 221 222 /* Print Tx Ring Summary */ 223 if (!netdev || !netif_running(netdev)) 224 return; 225 226 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); 227 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); 228 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; 229 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", 230 0, tx_ring->next_to_use, tx_ring->next_to_clean, 231 (unsigned long long)buffer_info->dma, 232 buffer_info->length, 233 buffer_info->next_to_watch, 234 (unsigned long long)buffer_info->time_stamp); 235 236 /* Print Tx Ring */ 237 if (!netif_msg_tx_done(adapter)) 238 goto rx_ring_summary; 239 240 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); 241 242 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) 243 * 244 * Legacy Transmit Descriptor 245 * +--------------------------------------------------------------+ 246 * 0 | Buffer Address [63:0] (Reserved on Write Back) | 247 * +--------------------------------------------------------------+ 248 * 8 | Special | CSS | Status | CMD | CSO | Length | 249 * +--------------------------------------------------------------+ 250 * 63 48 47 36 35 32 31 24 23 16 15 0 251 * 252 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload 253 * 63 48 47 40 39 32 31 16 15 8 7 0 254 * +----------------------------------------------------------------+ 255 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | 256 * +----------------------------------------------------------------+ 257 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | 258 * +----------------------------------------------------------------+ 259 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 260 * 261 * Extended Data Descriptor (DTYP=0x1) 262 * +----------------------------------------------------------------+ 263 * 0 | Buffer Address [63:0] | 264 * +----------------------------------------------------------------+ 265 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | 266 * +----------------------------------------------------------------+ 267 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 268 */ 269 pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n"); 270 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n"); 271 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n"); 272 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 273 const char *next_desc; 274 tx_desc = E1000_TX_DESC(*tx_ring, i); 275 buffer_info = &tx_ring->buffer_info[i]; 276 u0 = (struct my_u0 *)tx_desc; 277 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) 278 next_desc = " NTC/U"; 279 else if (i == tx_ring->next_to_use) 280 next_desc = " NTU"; 281 else if (i == tx_ring->next_to_clean) 282 next_desc = " NTC"; 283 else 284 next_desc = ""; 285 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n", 286 (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : 287 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), 288 i, 289 (unsigned long long)le64_to_cpu(u0->a), 290 (unsigned long long)le64_to_cpu(u0->b), 291 (unsigned long long)buffer_info->dma, 292 buffer_info->length, buffer_info->next_to_watch, 293 (unsigned long long)buffer_info->time_stamp, 294 buffer_info->skb, next_desc); 295 296 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) 297 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 298 16, 1, phys_to_virt(buffer_info->dma), 299 buffer_info->length, true); 300 } 301 302 /* Print Rx Ring Summary */ 303 rx_ring_summary: 304 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); 305 pr_info("Queue [NTU] [NTC]\n"); 306 pr_info(" %5d %5X %5X\n", 307 0, rx_ring->next_to_use, rx_ring->next_to_clean); 308 309 /* Print Rx Ring */ 310 if (!netif_msg_rx_status(adapter)) 311 return; 312 313 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); 314 switch (adapter->rx_ps_pages) { 315 case 1: 316 case 2: 317 case 3: 318 /* [Extended] Packet Split Receive Descriptor Format 319 * 320 * +-----------------------------------------------------+ 321 * 0 | Buffer Address 0 [63:0] | 322 * +-----------------------------------------------------+ 323 * 8 | Buffer Address 1 [63:0] | 324 * +-----------------------------------------------------+ 325 * 16 | Buffer Address 2 [63:0] | 326 * +-----------------------------------------------------+ 327 * 24 | Buffer Address 3 [63:0] | 328 * +-----------------------------------------------------+ 329 */ 330 pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n"); 331 /* [Extended] Receive Descriptor (Write-Back) Format 332 * 333 * 63 48 47 32 31 13 12 8 7 4 3 0 334 * +------------------------------------------------------+ 335 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS | 336 * | Checksum | Ident | | Queue | | Type | 337 * +------------------------------------------------------+ 338 * 8 | VLAN Tag | Length | Extended Error | Extended Status | 339 * +------------------------------------------------------+ 340 * 63 48 47 32 31 20 19 0 341 */ 342 pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n"); 343 for (i = 0; i < rx_ring->count; i++) { 344 const char *next_desc; 345 buffer_info = &rx_ring->buffer_info[i]; 346 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); 347 u1 = (struct my_u1 *)rx_desc_ps; 348 staterr = 349 le32_to_cpu(rx_desc_ps->wb.middle.status_error); 350 351 if (i == rx_ring->next_to_use) 352 next_desc = " NTU"; 353 else if (i == rx_ring->next_to_clean) 354 next_desc = " NTC"; 355 else 356 next_desc = ""; 357 358 if (staterr & E1000_RXD_STAT_DD) { 359 /* Descriptor Done */ 360 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n", 361 "RWB", i, 362 (unsigned long long)le64_to_cpu(u1->a), 363 (unsigned long long)le64_to_cpu(u1->b), 364 (unsigned long long)le64_to_cpu(u1->c), 365 (unsigned long long)le64_to_cpu(u1->d), 366 buffer_info->skb, next_desc); 367 } else { 368 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n", 369 "R ", i, 370 (unsigned long long)le64_to_cpu(u1->a), 371 (unsigned long long)le64_to_cpu(u1->b), 372 (unsigned long long)le64_to_cpu(u1->c), 373 (unsigned long long)le64_to_cpu(u1->d), 374 (unsigned long long)buffer_info->dma, 375 buffer_info->skb, next_desc); 376 377 if (netif_msg_pktdata(adapter)) 378 print_hex_dump(KERN_INFO, "", 379 DUMP_PREFIX_ADDRESS, 16, 1, 380 phys_to_virt(buffer_info->dma), 381 adapter->rx_ps_bsize0, true); 382 } 383 } 384 break; 385 default: 386 case 0: 387 /* Extended Receive Descriptor (Read) Format 388 * 389 * +-----------------------------------------------------+ 390 * 0 | Buffer Address [63:0] | 391 * +-----------------------------------------------------+ 392 * 8 | Reserved | 393 * +-----------------------------------------------------+ 394 */ 395 pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n"); 396 /* Extended Receive Descriptor (Write-Back) Format 397 * 398 * 63 48 47 32 31 24 23 4 3 0 399 * +------------------------------------------------------+ 400 * | RSS Hash | | | | 401 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS | 402 * | Packet | IP | | | Type | 403 * | Checksum | Ident | | | | 404 * +------------------------------------------------------+ 405 * 8 | VLAN Tag | Length | Extended Error | Extended Status | 406 * +------------------------------------------------------+ 407 * 63 48 47 32 31 20 19 0 408 */ 409 pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n"); 410 411 for (i = 0; i < rx_ring->count; i++) { 412 const char *next_desc; 413 414 buffer_info = &rx_ring->buffer_info[i]; 415 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 416 u1 = (struct my_u1 *)rx_desc; 417 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 418 419 if (i == rx_ring->next_to_use) 420 next_desc = " NTU"; 421 else if (i == rx_ring->next_to_clean) 422 next_desc = " NTC"; 423 else 424 next_desc = ""; 425 426 if (staterr & E1000_RXD_STAT_DD) { 427 /* Descriptor Done */ 428 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n", 429 "RWB", i, 430 (unsigned long long)le64_to_cpu(u1->a), 431 (unsigned long long)le64_to_cpu(u1->b), 432 buffer_info->skb, next_desc); 433 } else { 434 pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n", 435 "R ", i, 436 (unsigned long long)le64_to_cpu(u1->a), 437 (unsigned long long)le64_to_cpu(u1->b), 438 (unsigned long long)buffer_info->dma, 439 buffer_info->skb, next_desc); 440 441 if (netif_msg_pktdata(adapter)) 442 print_hex_dump(KERN_INFO, "", 443 DUMP_PREFIX_ADDRESS, 16, 444 1, 445 phys_to_virt 446 (buffer_info->dma), 447 adapter->rx_buffer_len, 448 true); 449 } 450 } 451 } 452 } 453 454 /** 455 * e1000_desc_unused - calculate if we have unused descriptors 456 **/ 457 static int e1000_desc_unused(struct e1000_ring *ring) 458 { 459 if (ring->next_to_clean > ring->next_to_use) 460 return ring->next_to_clean - ring->next_to_use - 1; 461 462 return ring->count + ring->next_to_clean - ring->next_to_use - 1; 463 } 464 465 /** 466 * e1000_receive_skb - helper function to handle Rx indications 467 * @adapter: board private structure 468 * @status: descriptor status field as written by hardware 469 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 470 * @skb: pointer to sk_buff to be indicated to stack 471 **/ 472 static void e1000_receive_skb(struct e1000_adapter *adapter, 473 struct net_device *netdev, struct sk_buff *skb, 474 u8 status, __le16 vlan) 475 { 476 u16 tag = le16_to_cpu(vlan); 477 skb->protocol = eth_type_trans(skb, netdev); 478 479 if (status & E1000_RXD_STAT_VP) 480 __vlan_hwaccel_put_tag(skb, tag); 481 482 napi_gro_receive(&adapter->napi, skb); 483 } 484 485 /** 486 * e1000_rx_checksum - Receive Checksum Offload 487 * @adapter: board private structure 488 * @status_err: receive descriptor status and error fields 489 * @csum: receive descriptor csum field 490 * @sk_buff: socket buffer with received data 491 **/ 492 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 493 __le16 csum, struct sk_buff *skb) 494 { 495 u16 status = (u16)status_err; 496 u8 errors = (u8)(status_err >> 24); 497 498 skb_checksum_none_assert(skb); 499 500 /* Rx checksum disabled */ 501 if (!(adapter->netdev->features & NETIF_F_RXCSUM)) 502 return; 503 504 /* Ignore Checksum bit is set */ 505 if (status & E1000_RXD_STAT_IXSM) 506 return; 507 508 /* TCP/UDP checksum error bit is set */ 509 if (errors & E1000_RXD_ERR_TCPE) { 510 /* let the stack verify checksum errors */ 511 adapter->hw_csum_err++; 512 return; 513 } 514 515 /* TCP/UDP Checksum has not been calculated */ 516 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) 517 return; 518 519 /* It must be a TCP or UDP packet with a valid checksum */ 520 if (status & E1000_RXD_STAT_TCPCS) { 521 /* TCP checksum is good */ 522 skb->ip_summed = CHECKSUM_UNNECESSARY; 523 } else { 524 /* 525 * IP fragment with UDP payload 526 * Hardware complements the payload checksum, so we undo it 527 * and then put the value in host order for further stack use. 528 */ 529 __sum16 sum = (__force __sum16)swab16((__force u16)csum); 530 skb->csum = csum_unfold(~sum); 531 skb->ip_summed = CHECKSUM_COMPLETE; 532 } 533 adapter->hw_csum_good++; 534 } 535 536 /** 537 * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa() 538 * @hw: pointer to the HW structure 539 * @tail: address of tail descriptor register 540 * @i: value to write to tail descriptor register 541 * 542 * When updating the tail register, the ME could be accessing Host CSR 543 * registers at the same time. Normally, this is handled in h/w by an 544 * arbiter but on some parts there is a bug that acknowledges Host accesses 545 * later than it should which could result in the descriptor register to 546 * have an incorrect value. Workaround this by checking the FWSM register 547 * which has bit 24 set while ME is accessing Host CSR registers, wait 548 * if it is set and try again a number of times. 549 **/ 550 static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, void __iomem *tail, 551 unsigned int i) 552 { 553 unsigned int j = 0; 554 555 while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) && 556 (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI)) 557 udelay(50); 558 559 writel(i, tail); 560 561 if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail))) 562 return E1000_ERR_SWFW_SYNC; 563 564 return 0; 565 } 566 567 static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) 568 { 569 struct e1000_adapter *adapter = rx_ring->adapter; 570 struct e1000_hw *hw = &adapter->hw; 571 572 if (e1000e_update_tail_wa(hw, rx_ring->tail, i)) { 573 u32 rctl = er32(RCTL); 574 ew32(RCTL, rctl & ~E1000_RCTL_EN); 575 e_err("ME firmware caused invalid RDT - resetting\n"); 576 schedule_work(&adapter->reset_task); 577 } 578 } 579 580 static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) 581 { 582 struct e1000_adapter *adapter = tx_ring->adapter; 583 struct e1000_hw *hw = &adapter->hw; 584 585 if (e1000e_update_tail_wa(hw, tx_ring->tail, i)) { 586 u32 tctl = er32(TCTL); 587 ew32(TCTL, tctl & ~E1000_TCTL_EN); 588 e_err("ME firmware caused invalid TDT - resetting\n"); 589 schedule_work(&adapter->reset_task); 590 } 591 } 592 593 /** 594 * e1000_alloc_rx_buffers - Replace used receive buffers 595 * @rx_ring: Rx descriptor ring 596 **/ 597 static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring, 598 int cleaned_count, gfp_t gfp) 599 { 600 struct e1000_adapter *adapter = rx_ring->adapter; 601 struct net_device *netdev = adapter->netdev; 602 struct pci_dev *pdev = adapter->pdev; 603 union e1000_rx_desc_extended *rx_desc; 604 struct e1000_buffer *buffer_info; 605 struct sk_buff *skb; 606 unsigned int i; 607 unsigned int bufsz = adapter->rx_buffer_len; 608 609 i = rx_ring->next_to_use; 610 buffer_info = &rx_ring->buffer_info[i]; 611 612 while (cleaned_count--) { 613 skb = buffer_info->skb; 614 if (skb) { 615 skb_trim(skb, 0); 616 goto map_skb; 617 } 618 619 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); 620 if (!skb) { 621 /* Better luck next round */ 622 adapter->alloc_rx_buff_failed++; 623 break; 624 } 625 626 buffer_info->skb = skb; 627 map_skb: 628 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 629 adapter->rx_buffer_len, 630 DMA_FROM_DEVICE); 631 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 632 dev_err(&pdev->dev, "Rx DMA map failed\n"); 633 adapter->rx_dma_failed++; 634 break; 635 } 636 637 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 638 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 639 640 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { 641 /* 642 * Force memory writes to complete before letting h/w 643 * know there are new descriptors to fetch. (Only 644 * applicable for weak-ordered memory model archs, 645 * such as IA-64). 646 */ 647 wmb(); 648 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 649 e1000e_update_rdt_wa(rx_ring, i); 650 else 651 writel(i, rx_ring->tail); 652 } 653 i++; 654 if (i == rx_ring->count) 655 i = 0; 656 buffer_info = &rx_ring->buffer_info[i]; 657 } 658 659 rx_ring->next_to_use = i; 660 } 661 662 /** 663 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split 664 * @rx_ring: Rx descriptor ring 665 **/ 666 static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, 667 int cleaned_count, gfp_t gfp) 668 { 669 struct e1000_adapter *adapter = rx_ring->adapter; 670 struct net_device *netdev = adapter->netdev; 671 struct pci_dev *pdev = adapter->pdev; 672 union e1000_rx_desc_packet_split *rx_desc; 673 struct e1000_buffer *buffer_info; 674 struct e1000_ps_page *ps_page; 675 struct sk_buff *skb; 676 unsigned int i, j; 677 678 i = rx_ring->next_to_use; 679 buffer_info = &rx_ring->buffer_info[i]; 680 681 while (cleaned_count--) { 682 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 683 684 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 685 ps_page = &buffer_info->ps_pages[j]; 686 if (j >= adapter->rx_ps_pages) { 687 /* all unused desc entries get hw null ptr */ 688 rx_desc->read.buffer_addr[j + 1] = 689 ~cpu_to_le64(0); 690 continue; 691 } 692 if (!ps_page->page) { 693 ps_page->page = alloc_page(gfp); 694 if (!ps_page->page) { 695 adapter->alloc_rx_buff_failed++; 696 goto no_buffers; 697 } 698 ps_page->dma = dma_map_page(&pdev->dev, 699 ps_page->page, 700 0, PAGE_SIZE, 701 DMA_FROM_DEVICE); 702 if (dma_mapping_error(&pdev->dev, 703 ps_page->dma)) { 704 dev_err(&adapter->pdev->dev, 705 "Rx DMA page map failed\n"); 706 adapter->rx_dma_failed++; 707 goto no_buffers; 708 } 709 } 710 /* 711 * Refresh the desc even if buffer_addrs 712 * didn't change because each write-back 713 * erases this info. 714 */ 715 rx_desc->read.buffer_addr[j + 1] = 716 cpu_to_le64(ps_page->dma); 717 } 718 719 skb = __netdev_alloc_skb_ip_align(netdev, 720 adapter->rx_ps_bsize0, 721 gfp); 722 723 if (!skb) { 724 adapter->alloc_rx_buff_failed++; 725 break; 726 } 727 728 buffer_info->skb = skb; 729 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 730 adapter->rx_ps_bsize0, 731 DMA_FROM_DEVICE); 732 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 733 dev_err(&pdev->dev, "Rx DMA map failed\n"); 734 adapter->rx_dma_failed++; 735 /* cleanup skb */ 736 dev_kfree_skb_any(skb); 737 buffer_info->skb = NULL; 738 break; 739 } 740 741 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 742 743 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { 744 /* 745 * Force memory writes to complete before letting h/w 746 * know there are new descriptors to fetch. (Only 747 * applicable for weak-ordered memory model archs, 748 * such as IA-64). 749 */ 750 wmb(); 751 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 752 e1000e_update_rdt_wa(rx_ring, i << 1); 753 else 754 writel(i << 1, rx_ring->tail); 755 } 756 757 i++; 758 if (i == rx_ring->count) 759 i = 0; 760 buffer_info = &rx_ring->buffer_info[i]; 761 } 762 763 no_buffers: 764 rx_ring->next_to_use = i; 765 } 766 767 /** 768 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 769 * @rx_ring: Rx descriptor ring 770 * @cleaned_count: number of buffers to allocate this pass 771 **/ 772 773 static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring, 774 int cleaned_count, gfp_t gfp) 775 { 776 struct e1000_adapter *adapter = rx_ring->adapter; 777 struct net_device *netdev = adapter->netdev; 778 struct pci_dev *pdev = adapter->pdev; 779 union e1000_rx_desc_extended *rx_desc; 780 struct e1000_buffer *buffer_info; 781 struct sk_buff *skb; 782 unsigned int i; 783 unsigned int bufsz = 256 - 16 /* for skb_reserve */; 784 785 i = rx_ring->next_to_use; 786 buffer_info = &rx_ring->buffer_info[i]; 787 788 while (cleaned_count--) { 789 skb = buffer_info->skb; 790 if (skb) { 791 skb_trim(skb, 0); 792 goto check_page; 793 } 794 795 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); 796 if (unlikely(!skb)) { 797 /* Better luck next round */ 798 adapter->alloc_rx_buff_failed++; 799 break; 800 } 801 802 buffer_info->skb = skb; 803 check_page: 804 /* allocate a new page if necessary */ 805 if (!buffer_info->page) { 806 buffer_info->page = alloc_page(gfp); 807 if (unlikely(!buffer_info->page)) { 808 adapter->alloc_rx_buff_failed++; 809 break; 810 } 811 } 812 813 if (!buffer_info->dma) 814 buffer_info->dma = dma_map_page(&pdev->dev, 815 buffer_info->page, 0, 816 PAGE_SIZE, 817 DMA_FROM_DEVICE); 818 819 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 820 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 821 822 if (unlikely(++i == rx_ring->count)) 823 i = 0; 824 buffer_info = &rx_ring->buffer_info[i]; 825 } 826 827 if (likely(rx_ring->next_to_use != i)) { 828 rx_ring->next_to_use = i; 829 if (unlikely(i-- == 0)) 830 i = (rx_ring->count - 1); 831 832 /* Force memory writes to complete before letting h/w 833 * know there are new descriptors to fetch. (Only 834 * applicable for weak-ordered memory model archs, 835 * such as IA-64). */ 836 wmb(); 837 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 838 e1000e_update_rdt_wa(rx_ring, i); 839 else 840 writel(i, rx_ring->tail); 841 } 842 } 843 844 static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss, 845 struct sk_buff *skb) 846 { 847 if (netdev->features & NETIF_F_RXHASH) 848 skb->rxhash = le32_to_cpu(rss); 849 } 850 851 /** 852 * e1000_clean_rx_irq - Send received data up the network stack 853 * @rx_ring: Rx descriptor ring 854 * 855 * the return value indicates whether actual cleaning was done, there 856 * is no guarantee that everything was cleaned 857 **/ 858 static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, 859 int work_to_do) 860 { 861 struct e1000_adapter *adapter = rx_ring->adapter; 862 struct net_device *netdev = adapter->netdev; 863 struct pci_dev *pdev = adapter->pdev; 864 struct e1000_hw *hw = &adapter->hw; 865 union e1000_rx_desc_extended *rx_desc, *next_rxd; 866 struct e1000_buffer *buffer_info, *next_buffer; 867 u32 length, staterr; 868 unsigned int i; 869 int cleaned_count = 0; 870 bool cleaned = false; 871 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 872 873 i = rx_ring->next_to_clean; 874 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 875 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 876 buffer_info = &rx_ring->buffer_info[i]; 877 878 while (staterr & E1000_RXD_STAT_DD) { 879 struct sk_buff *skb; 880 881 if (*work_done >= work_to_do) 882 break; 883 (*work_done)++; 884 rmb(); /* read descriptor and rx_buffer_info after status DD */ 885 886 skb = buffer_info->skb; 887 buffer_info->skb = NULL; 888 889 prefetch(skb->data - NET_IP_ALIGN); 890 891 i++; 892 if (i == rx_ring->count) 893 i = 0; 894 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); 895 prefetch(next_rxd); 896 897 next_buffer = &rx_ring->buffer_info[i]; 898 899 cleaned = true; 900 cleaned_count++; 901 dma_unmap_single(&pdev->dev, 902 buffer_info->dma, 903 adapter->rx_buffer_len, 904 DMA_FROM_DEVICE); 905 buffer_info->dma = 0; 906 907 length = le16_to_cpu(rx_desc->wb.upper.length); 908 909 /* 910 * !EOP means multiple descriptors were used to store a single 911 * packet, if that's the case we need to toss it. In fact, we 912 * need to toss every packet with the EOP bit clear and the 913 * next frame that _does_ have the EOP bit set, as it is by 914 * definition only a frame fragment 915 */ 916 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) 917 adapter->flags2 |= FLAG2_IS_DISCARDING; 918 919 if (adapter->flags2 & FLAG2_IS_DISCARDING) { 920 /* All receives must fit into a single buffer */ 921 e_dbg("Receive packet consumed multiple buffers\n"); 922 /* recycle */ 923 buffer_info->skb = skb; 924 if (staterr & E1000_RXD_STAT_EOP) 925 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 926 goto next_desc; 927 } 928 929 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && 930 !(netdev->features & NETIF_F_RXALL))) { 931 /* recycle */ 932 buffer_info->skb = skb; 933 goto next_desc; 934 } 935 936 /* adjust length to remove Ethernet CRC */ 937 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { 938 /* If configured to store CRC, don't subtract FCS, 939 * but keep the FCS bytes out of the total_rx_bytes 940 * counter 941 */ 942 if (netdev->features & NETIF_F_RXFCS) 943 total_rx_bytes -= 4; 944 else 945 length -= 4; 946 } 947 948 total_rx_bytes += length; 949 total_rx_packets++; 950 951 /* 952 * code added for copybreak, this should improve 953 * performance for small packets with large amounts 954 * of reassembly being done in the stack 955 */ 956 if (length < copybreak) { 957 struct sk_buff *new_skb = 958 netdev_alloc_skb_ip_align(netdev, length); 959 if (new_skb) { 960 skb_copy_to_linear_data_offset(new_skb, 961 -NET_IP_ALIGN, 962 (skb->data - 963 NET_IP_ALIGN), 964 (length + 965 NET_IP_ALIGN)); 966 /* save the skb in buffer_info as good */ 967 buffer_info->skb = skb; 968 skb = new_skb; 969 } 970 /* else just continue with the old one */ 971 } 972 /* end copybreak code */ 973 skb_put(skb, length); 974 975 /* Receive Checksum Offload */ 976 e1000_rx_checksum(adapter, staterr, 977 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 978 979 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 980 981 e1000_receive_skb(adapter, netdev, skb, staterr, 982 rx_desc->wb.upper.vlan); 983 984 next_desc: 985 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); 986 987 /* return some buffers to hardware, one at a time is too slow */ 988 if (cleaned_count >= E1000_RX_BUFFER_WRITE) { 989 adapter->alloc_rx_buf(rx_ring, cleaned_count, 990 GFP_ATOMIC); 991 cleaned_count = 0; 992 } 993 994 /* use prefetched values */ 995 rx_desc = next_rxd; 996 buffer_info = next_buffer; 997 998 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 999 } 1000 rx_ring->next_to_clean = i; 1001 1002 cleaned_count = e1000_desc_unused(rx_ring); 1003 if (cleaned_count) 1004 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); 1005 1006 adapter->total_rx_bytes += total_rx_bytes; 1007 adapter->total_rx_packets += total_rx_packets; 1008 return cleaned; 1009 } 1010 1011 static void e1000_put_txbuf(struct e1000_ring *tx_ring, 1012 struct e1000_buffer *buffer_info) 1013 { 1014 struct e1000_adapter *adapter = tx_ring->adapter; 1015 1016 if (buffer_info->dma) { 1017 if (buffer_info->mapped_as_page) 1018 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, 1019 buffer_info->length, DMA_TO_DEVICE); 1020 else 1021 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1022 buffer_info->length, DMA_TO_DEVICE); 1023 buffer_info->dma = 0; 1024 } 1025 if (buffer_info->skb) { 1026 dev_kfree_skb_any(buffer_info->skb); 1027 buffer_info->skb = NULL; 1028 } 1029 buffer_info->time_stamp = 0; 1030 } 1031 1032 static void e1000_print_hw_hang(struct work_struct *work) 1033 { 1034 struct e1000_adapter *adapter = container_of(work, 1035 struct e1000_adapter, 1036 print_hang_task); 1037 struct net_device *netdev = adapter->netdev; 1038 struct e1000_ring *tx_ring = adapter->tx_ring; 1039 unsigned int i = tx_ring->next_to_clean; 1040 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; 1041 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); 1042 struct e1000_hw *hw = &adapter->hw; 1043 u16 phy_status, phy_1000t_status, phy_ext_status; 1044 u16 pci_status; 1045 1046 if (test_bit(__E1000_DOWN, &adapter->state)) 1047 return; 1048 1049 if (!adapter->tx_hang_recheck && 1050 (adapter->flags2 & FLAG2_DMA_BURST)) { 1051 /* May be block on write-back, flush and detect again 1052 * flush pending descriptor writebacks to memory 1053 */ 1054 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 1055 /* execute the writes immediately */ 1056 e1e_flush(); 1057 adapter->tx_hang_recheck = true; 1058 return; 1059 } 1060 /* Real hang detected */ 1061 adapter->tx_hang_recheck = false; 1062 netif_stop_queue(netdev); 1063 1064 e1e_rphy(hw, PHY_STATUS, &phy_status); 1065 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); 1066 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); 1067 1068 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); 1069 1070 /* detected Hardware unit hang */ 1071 e_err("Detected Hardware Unit Hang:\n" 1072 " TDH <%x>\n" 1073 " TDT <%x>\n" 1074 " next_to_use <%x>\n" 1075 " next_to_clean <%x>\n" 1076 "buffer_info[next_to_clean]:\n" 1077 " time_stamp <%lx>\n" 1078 " next_to_watch <%x>\n" 1079 " jiffies <%lx>\n" 1080 " next_to_watch.status <%x>\n" 1081 "MAC Status <%x>\n" 1082 "PHY Status <%x>\n" 1083 "PHY 1000BASE-T Status <%x>\n" 1084 "PHY Extended Status <%x>\n" 1085 "PCI Status <%x>\n", 1086 readl(tx_ring->head), 1087 readl(tx_ring->tail), 1088 tx_ring->next_to_use, 1089 tx_ring->next_to_clean, 1090 tx_ring->buffer_info[eop].time_stamp, 1091 eop, 1092 jiffies, 1093 eop_desc->upper.fields.status, 1094 er32(STATUS), 1095 phy_status, 1096 phy_1000t_status, 1097 phy_ext_status, 1098 pci_status); 1099 } 1100 1101 /** 1102 * e1000_clean_tx_irq - Reclaim resources after transmit completes 1103 * @tx_ring: Tx descriptor ring 1104 * 1105 * the return value indicates whether actual cleaning was done, there 1106 * is no guarantee that everything was cleaned 1107 **/ 1108 static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) 1109 { 1110 struct e1000_adapter *adapter = tx_ring->adapter; 1111 struct net_device *netdev = adapter->netdev; 1112 struct e1000_hw *hw = &adapter->hw; 1113 struct e1000_tx_desc *tx_desc, *eop_desc; 1114 struct e1000_buffer *buffer_info; 1115 unsigned int i, eop; 1116 unsigned int count = 0; 1117 unsigned int total_tx_bytes = 0, total_tx_packets = 0; 1118 unsigned int bytes_compl = 0, pkts_compl = 0; 1119 1120 i = tx_ring->next_to_clean; 1121 eop = tx_ring->buffer_info[i].next_to_watch; 1122 eop_desc = E1000_TX_DESC(*tx_ring, eop); 1123 1124 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 1125 (count < tx_ring->count)) { 1126 bool cleaned = false; 1127 rmb(); /* read buffer_info after eop_desc */ 1128 for (; !cleaned; count++) { 1129 tx_desc = E1000_TX_DESC(*tx_ring, i); 1130 buffer_info = &tx_ring->buffer_info[i]; 1131 cleaned = (i == eop); 1132 1133 if (cleaned) { 1134 total_tx_packets += buffer_info->segs; 1135 total_tx_bytes += buffer_info->bytecount; 1136 if (buffer_info->skb) { 1137 bytes_compl += buffer_info->skb->len; 1138 pkts_compl++; 1139 } 1140 } 1141 1142 e1000_put_txbuf(tx_ring, buffer_info); 1143 tx_desc->upper.data = 0; 1144 1145 i++; 1146 if (i == tx_ring->count) 1147 i = 0; 1148 } 1149 1150 if (i == tx_ring->next_to_use) 1151 break; 1152 eop = tx_ring->buffer_info[i].next_to_watch; 1153 eop_desc = E1000_TX_DESC(*tx_ring, eop); 1154 } 1155 1156 tx_ring->next_to_clean = i; 1157 1158 netdev_completed_queue(netdev, pkts_compl, bytes_compl); 1159 1160 #define TX_WAKE_THRESHOLD 32 1161 if (count && netif_carrier_ok(netdev) && 1162 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { 1163 /* Make sure that anybody stopping the queue after this 1164 * sees the new next_to_clean. 1165 */ 1166 smp_mb(); 1167 1168 if (netif_queue_stopped(netdev) && 1169 !(test_bit(__E1000_DOWN, &adapter->state))) { 1170 netif_wake_queue(netdev); 1171 ++adapter->restart_queue; 1172 } 1173 } 1174 1175 if (adapter->detect_tx_hung) { 1176 /* 1177 * Detect a transmit hang in hardware, this serializes the 1178 * check with the clearing of time_stamp and movement of i 1179 */ 1180 adapter->detect_tx_hung = false; 1181 if (tx_ring->buffer_info[i].time_stamp && 1182 time_after(jiffies, tx_ring->buffer_info[i].time_stamp 1183 + (adapter->tx_timeout_factor * HZ)) && 1184 !(er32(STATUS) & E1000_STATUS_TXOFF)) 1185 schedule_work(&adapter->print_hang_task); 1186 else 1187 adapter->tx_hang_recheck = false; 1188 } 1189 adapter->total_tx_bytes += total_tx_bytes; 1190 adapter->total_tx_packets += total_tx_packets; 1191 return count < tx_ring->count; 1192 } 1193 1194 /** 1195 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split 1196 * @rx_ring: Rx descriptor ring 1197 * 1198 * the return value indicates whether actual cleaning was done, there 1199 * is no guarantee that everything was cleaned 1200 **/ 1201 static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, 1202 int work_to_do) 1203 { 1204 struct e1000_adapter *adapter = rx_ring->adapter; 1205 struct e1000_hw *hw = &adapter->hw; 1206 union e1000_rx_desc_packet_split *rx_desc, *next_rxd; 1207 struct net_device *netdev = adapter->netdev; 1208 struct pci_dev *pdev = adapter->pdev; 1209 struct e1000_buffer *buffer_info, *next_buffer; 1210 struct e1000_ps_page *ps_page; 1211 struct sk_buff *skb; 1212 unsigned int i, j; 1213 u32 length, staterr; 1214 int cleaned_count = 0; 1215 bool cleaned = false; 1216 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1217 1218 i = rx_ring->next_to_clean; 1219 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 1220 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 1221 buffer_info = &rx_ring->buffer_info[i]; 1222 1223 while (staterr & E1000_RXD_STAT_DD) { 1224 if (*work_done >= work_to_do) 1225 break; 1226 (*work_done)++; 1227 skb = buffer_info->skb; 1228 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1229 1230 /* in the packet split case this is header only */ 1231 prefetch(skb->data - NET_IP_ALIGN); 1232 1233 i++; 1234 if (i == rx_ring->count) 1235 i = 0; 1236 next_rxd = E1000_RX_DESC_PS(*rx_ring, i); 1237 prefetch(next_rxd); 1238 1239 next_buffer = &rx_ring->buffer_info[i]; 1240 1241 cleaned = true; 1242 cleaned_count++; 1243 dma_unmap_single(&pdev->dev, buffer_info->dma, 1244 adapter->rx_ps_bsize0, DMA_FROM_DEVICE); 1245 buffer_info->dma = 0; 1246 1247 /* see !EOP comment in other Rx routine */ 1248 if (!(staterr & E1000_RXD_STAT_EOP)) 1249 adapter->flags2 |= FLAG2_IS_DISCARDING; 1250 1251 if (adapter->flags2 & FLAG2_IS_DISCARDING) { 1252 e_dbg("Packet Split buffers didn't pick up the full packet\n"); 1253 dev_kfree_skb_irq(skb); 1254 if (staterr & E1000_RXD_STAT_EOP) 1255 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1256 goto next_desc; 1257 } 1258 1259 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && 1260 !(netdev->features & NETIF_F_RXALL))) { 1261 dev_kfree_skb_irq(skb); 1262 goto next_desc; 1263 } 1264 1265 length = le16_to_cpu(rx_desc->wb.middle.length0); 1266 1267 if (!length) { 1268 e_dbg("Last part of the packet spanning multiple descriptors\n"); 1269 dev_kfree_skb_irq(skb); 1270 goto next_desc; 1271 } 1272 1273 /* Good Receive */ 1274 skb_put(skb, length); 1275 1276 { 1277 /* 1278 * this looks ugly, but it seems compiler issues make 1279 * it more efficient than reusing j 1280 */ 1281 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); 1282 1283 /* 1284 * page alloc/put takes too long and effects small 1285 * packet throughput, so unsplit small packets and 1286 * save the alloc/put only valid in softirq (napi) 1287 * context to call kmap_* 1288 */ 1289 if (l1 && (l1 <= copybreak) && 1290 ((length + l1) <= adapter->rx_ps_bsize0)) { 1291 u8 *vaddr; 1292 1293 ps_page = &buffer_info->ps_pages[0]; 1294 1295 /* 1296 * there is no documentation about how to call 1297 * kmap_atomic, so we can't hold the mapping 1298 * very long 1299 */ 1300 dma_sync_single_for_cpu(&pdev->dev, 1301 ps_page->dma, 1302 PAGE_SIZE, 1303 DMA_FROM_DEVICE); 1304 vaddr = kmap_atomic(ps_page->page); 1305 memcpy(skb_tail_pointer(skb), vaddr, l1); 1306 kunmap_atomic(vaddr); 1307 dma_sync_single_for_device(&pdev->dev, 1308 ps_page->dma, 1309 PAGE_SIZE, 1310 DMA_FROM_DEVICE); 1311 1312 /* remove the CRC */ 1313 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { 1314 if (!(netdev->features & NETIF_F_RXFCS)) 1315 l1 -= 4; 1316 } 1317 1318 skb_put(skb, l1); 1319 goto copydone; 1320 } /* if */ 1321 } 1322 1323 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 1324 length = le16_to_cpu(rx_desc->wb.upper.length[j]); 1325 if (!length) 1326 break; 1327 1328 ps_page = &buffer_info->ps_pages[j]; 1329 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, 1330 DMA_FROM_DEVICE); 1331 ps_page->dma = 0; 1332 skb_fill_page_desc(skb, j, ps_page->page, 0, length); 1333 ps_page->page = NULL; 1334 skb->len += length; 1335 skb->data_len += length; 1336 skb->truesize += PAGE_SIZE; 1337 } 1338 1339 /* strip the ethernet crc, problem is we're using pages now so 1340 * this whole operation can get a little cpu intensive 1341 */ 1342 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { 1343 if (!(netdev->features & NETIF_F_RXFCS)) 1344 pskb_trim(skb, skb->len - 4); 1345 } 1346 1347 copydone: 1348 total_rx_bytes += skb->len; 1349 total_rx_packets++; 1350 1351 e1000_rx_checksum(adapter, staterr, 1352 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 1353 1354 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 1355 1356 if (rx_desc->wb.upper.header_status & 1357 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) 1358 adapter->rx_hdr_split++; 1359 1360 e1000_receive_skb(adapter, netdev, skb, 1361 staterr, rx_desc->wb.middle.vlan); 1362 1363 next_desc: 1364 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); 1365 buffer_info->skb = NULL; 1366 1367 /* return some buffers to hardware, one at a time is too slow */ 1368 if (cleaned_count >= E1000_RX_BUFFER_WRITE) { 1369 adapter->alloc_rx_buf(rx_ring, cleaned_count, 1370 GFP_ATOMIC); 1371 cleaned_count = 0; 1372 } 1373 1374 /* use prefetched values */ 1375 rx_desc = next_rxd; 1376 buffer_info = next_buffer; 1377 1378 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 1379 } 1380 rx_ring->next_to_clean = i; 1381 1382 cleaned_count = e1000_desc_unused(rx_ring); 1383 if (cleaned_count) 1384 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); 1385 1386 adapter->total_rx_bytes += total_rx_bytes; 1387 adapter->total_rx_packets += total_rx_packets; 1388 return cleaned; 1389 } 1390 1391 /** 1392 * e1000_consume_page - helper function 1393 **/ 1394 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, 1395 u16 length) 1396 { 1397 bi->page = NULL; 1398 skb->len += length; 1399 skb->data_len += length; 1400 skb->truesize += PAGE_SIZE; 1401 } 1402 1403 /** 1404 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy 1405 * @adapter: board private structure 1406 * 1407 * the return value indicates whether actual cleaning was done, there 1408 * is no guarantee that everything was cleaned 1409 **/ 1410 static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, 1411 int work_to_do) 1412 { 1413 struct e1000_adapter *adapter = rx_ring->adapter; 1414 struct net_device *netdev = adapter->netdev; 1415 struct pci_dev *pdev = adapter->pdev; 1416 union e1000_rx_desc_extended *rx_desc, *next_rxd; 1417 struct e1000_buffer *buffer_info, *next_buffer; 1418 u32 length, staterr; 1419 unsigned int i; 1420 int cleaned_count = 0; 1421 bool cleaned = false; 1422 unsigned int total_rx_bytes=0, total_rx_packets=0; 1423 1424 i = rx_ring->next_to_clean; 1425 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 1426 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1427 buffer_info = &rx_ring->buffer_info[i]; 1428 1429 while (staterr & E1000_RXD_STAT_DD) { 1430 struct sk_buff *skb; 1431 1432 if (*work_done >= work_to_do) 1433 break; 1434 (*work_done)++; 1435 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1436 1437 skb = buffer_info->skb; 1438 buffer_info->skb = NULL; 1439 1440 ++i; 1441 if (i == rx_ring->count) 1442 i = 0; 1443 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); 1444 prefetch(next_rxd); 1445 1446 next_buffer = &rx_ring->buffer_info[i]; 1447 1448 cleaned = true; 1449 cleaned_count++; 1450 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, 1451 DMA_FROM_DEVICE); 1452 buffer_info->dma = 0; 1453 1454 length = le16_to_cpu(rx_desc->wb.upper.length); 1455 1456 /* errors is only valid for DD + EOP descriptors */ 1457 if (unlikely((staterr & E1000_RXD_STAT_EOP) && 1458 ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && 1459 !(netdev->features & NETIF_F_RXALL)))) { 1460 /* recycle both page and skb */ 1461 buffer_info->skb = skb; 1462 /* an error means any chain goes out the window too */ 1463 if (rx_ring->rx_skb_top) 1464 dev_kfree_skb_irq(rx_ring->rx_skb_top); 1465 rx_ring->rx_skb_top = NULL; 1466 goto next_desc; 1467 } 1468 1469 #define rxtop (rx_ring->rx_skb_top) 1470 if (!(staterr & E1000_RXD_STAT_EOP)) { 1471 /* this descriptor is only the beginning (or middle) */ 1472 if (!rxtop) { 1473 /* this is the beginning of a chain */ 1474 rxtop = skb; 1475 skb_fill_page_desc(rxtop, 0, buffer_info->page, 1476 0, length); 1477 } else { 1478 /* this is the middle of a chain */ 1479 skb_fill_page_desc(rxtop, 1480 skb_shinfo(rxtop)->nr_frags, 1481 buffer_info->page, 0, length); 1482 /* re-use the skb, only consumed the page */ 1483 buffer_info->skb = skb; 1484 } 1485 e1000_consume_page(buffer_info, rxtop, length); 1486 goto next_desc; 1487 } else { 1488 if (rxtop) { 1489 /* end of the chain */ 1490 skb_fill_page_desc(rxtop, 1491 skb_shinfo(rxtop)->nr_frags, 1492 buffer_info->page, 0, length); 1493 /* re-use the current skb, we only consumed the 1494 * page */ 1495 buffer_info->skb = skb; 1496 skb = rxtop; 1497 rxtop = NULL; 1498 e1000_consume_page(buffer_info, skb, length); 1499 } else { 1500 /* no chain, got EOP, this buf is the packet 1501 * copybreak to save the put_page/alloc_page */ 1502 if (length <= copybreak && 1503 skb_tailroom(skb) >= length) { 1504 u8 *vaddr; 1505 vaddr = kmap_atomic(buffer_info->page); 1506 memcpy(skb_tail_pointer(skb), vaddr, 1507 length); 1508 kunmap_atomic(vaddr); 1509 /* re-use the page, so don't erase 1510 * buffer_info->page */ 1511 skb_put(skb, length); 1512 } else { 1513 skb_fill_page_desc(skb, 0, 1514 buffer_info->page, 0, 1515 length); 1516 e1000_consume_page(buffer_info, skb, 1517 length); 1518 } 1519 } 1520 } 1521 1522 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 1523 e1000_rx_checksum(adapter, staterr, 1524 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 1525 1526 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 1527 1528 /* probably a little skewed due to removing CRC */ 1529 total_rx_bytes += skb->len; 1530 total_rx_packets++; 1531 1532 /* eth type trans needs skb->data to point to something */ 1533 if (!pskb_may_pull(skb, ETH_HLEN)) { 1534 e_err("pskb_may_pull failed.\n"); 1535 dev_kfree_skb_irq(skb); 1536 goto next_desc; 1537 } 1538 1539 e1000_receive_skb(adapter, netdev, skb, staterr, 1540 rx_desc->wb.upper.vlan); 1541 1542 next_desc: 1543 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); 1544 1545 /* return some buffers to hardware, one at a time is too slow */ 1546 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 1547 adapter->alloc_rx_buf(rx_ring, cleaned_count, 1548 GFP_ATOMIC); 1549 cleaned_count = 0; 1550 } 1551 1552 /* use prefetched values */ 1553 rx_desc = next_rxd; 1554 buffer_info = next_buffer; 1555 1556 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1557 } 1558 rx_ring->next_to_clean = i; 1559 1560 cleaned_count = e1000_desc_unused(rx_ring); 1561 if (cleaned_count) 1562 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); 1563 1564 adapter->total_rx_bytes += total_rx_bytes; 1565 adapter->total_rx_packets += total_rx_packets; 1566 return cleaned; 1567 } 1568 1569 /** 1570 * e1000_clean_rx_ring - Free Rx Buffers per Queue 1571 * @rx_ring: Rx descriptor ring 1572 **/ 1573 static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) 1574 { 1575 struct e1000_adapter *adapter = rx_ring->adapter; 1576 struct e1000_buffer *buffer_info; 1577 struct e1000_ps_page *ps_page; 1578 struct pci_dev *pdev = adapter->pdev; 1579 unsigned int i, j; 1580 1581 /* Free all the Rx ring sk_buffs */ 1582 for (i = 0; i < rx_ring->count; i++) { 1583 buffer_info = &rx_ring->buffer_info[i]; 1584 if (buffer_info->dma) { 1585 if (adapter->clean_rx == e1000_clean_rx_irq) 1586 dma_unmap_single(&pdev->dev, buffer_info->dma, 1587 adapter->rx_buffer_len, 1588 DMA_FROM_DEVICE); 1589 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) 1590 dma_unmap_page(&pdev->dev, buffer_info->dma, 1591 PAGE_SIZE, 1592 DMA_FROM_DEVICE); 1593 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) 1594 dma_unmap_single(&pdev->dev, buffer_info->dma, 1595 adapter->rx_ps_bsize0, 1596 DMA_FROM_DEVICE); 1597 buffer_info->dma = 0; 1598 } 1599 1600 if (buffer_info->page) { 1601 put_page(buffer_info->page); 1602 buffer_info->page = NULL; 1603 } 1604 1605 if (buffer_info->skb) { 1606 dev_kfree_skb(buffer_info->skb); 1607 buffer_info->skb = NULL; 1608 } 1609 1610 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 1611 ps_page = &buffer_info->ps_pages[j]; 1612 if (!ps_page->page) 1613 break; 1614 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, 1615 DMA_FROM_DEVICE); 1616 ps_page->dma = 0; 1617 put_page(ps_page->page); 1618 ps_page->page = NULL; 1619 } 1620 } 1621 1622 /* there also may be some cached data from a chained receive */ 1623 if (rx_ring->rx_skb_top) { 1624 dev_kfree_skb(rx_ring->rx_skb_top); 1625 rx_ring->rx_skb_top = NULL; 1626 } 1627 1628 /* Zero out the descriptor ring */ 1629 memset(rx_ring->desc, 0, rx_ring->size); 1630 1631 rx_ring->next_to_clean = 0; 1632 rx_ring->next_to_use = 0; 1633 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1634 1635 writel(0, rx_ring->head); 1636 writel(0, rx_ring->tail); 1637 } 1638 1639 static void e1000e_downshift_workaround(struct work_struct *work) 1640 { 1641 struct e1000_adapter *adapter = container_of(work, 1642 struct e1000_adapter, downshift_task); 1643 1644 if (test_bit(__E1000_DOWN, &adapter->state)) 1645 return; 1646 1647 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); 1648 } 1649 1650 /** 1651 * e1000_intr_msi - Interrupt Handler 1652 * @irq: interrupt number 1653 * @data: pointer to a network interface device structure 1654 **/ 1655 static irqreturn_t e1000_intr_msi(int irq, void *data) 1656 { 1657 struct net_device *netdev = data; 1658 struct e1000_adapter *adapter = netdev_priv(netdev); 1659 struct e1000_hw *hw = &adapter->hw; 1660 u32 icr = er32(ICR); 1661 1662 /* 1663 * read ICR disables interrupts using IAM 1664 */ 1665 1666 if (icr & E1000_ICR_LSC) { 1667 hw->mac.get_link_status = true; 1668 /* 1669 * ICH8 workaround-- Call gig speed drop workaround on cable 1670 * disconnect (LSC) before accessing any PHY registers 1671 */ 1672 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 1673 (!(er32(STATUS) & E1000_STATUS_LU))) 1674 schedule_work(&adapter->downshift_task); 1675 1676 /* 1677 * 80003ES2LAN workaround-- For packet buffer work-around on 1678 * link down event; disable receives here in the ISR and reset 1679 * adapter in watchdog 1680 */ 1681 if (netif_carrier_ok(netdev) && 1682 adapter->flags & FLAG_RX_NEEDS_RESTART) { 1683 /* disable receives */ 1684 u32 rctl = er32(RCTL); 1685 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1686 adapter->flags |= FLAG_RX_RESTART_NOW; 1687 } 1688 /* guard against interrupt when we're going down */ 1689 if (!test_bit(__E1000_DOWN, &adapter->state)) 1690 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1691 } 1692 1693 if (napi_schedule_prep(&adapter->napi)) { 1694 adapter->total_tx_bytes = 0; 1695 adapter->total_tx_packets = 0; 1696 adapter->total_rx_bytes = 0; 1697 adapter->total_rx_packets = 0; 1698 __napi_schedule(&adapter->napi); 1699 } 1700 1701 return IRQ_HANDLED; 1702 } 1703 1704 /** 1705 * e1000_intr - Interrupt Handler 1706 * @irq: interrupt number 1707 * @data: pointer to a network interface device structure 1708 **/ 1709 static irqreturn_t e1000_intr(int irq, void *data) 1710 { 1711 struct net_device *netdev = data; 1712 struct e1000_adapter *adapter = netdev_priv(netdev); 1713 struct e1000_hw *hw = &adapter->hw; 1714 u32 rctl, icr = er32(ICR); 1715 1716 if (!icr || test_bit(__E1000_DOWN, &adapter->state)) 1717 return IRQ_NONE; /* Not our interrupt */ 1718 1719 /* 1720 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is 1721 * not set, then the adapter didn't send an interrupt 1722 */ 1723 if (!(icr & E1000_ICR_INT_ASSERTED)) 1724 return IRQ_NONE; 1725 1726 /* 1727 * Interrupt Auto-Mask...upon reading ICR, 1728 * interrupts are masked. No need for the 1729 * IMC write 1730 */ 1731 1732 if (icr & E1000_ICR_LSC) { 1733 hw->mac.get_link_status = true; 1734 /* 1735 * ICH8 workaround-- Call gig speed drop workaround on cable 1736 * disconnect (LSC) before accessing any PHY registers 1737 */ 1738 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 1739 (!(er32(STATUS) & E1000_STATUS_LU))) 1740 schedule_work(&adapter->downshift_task); 1741 1742 /* 1743 * 80003ES2LAN workaround-- 1744 * For packet buffer work-around on link down event; 1745 * disable receives here in the ISR and 1746 * reset adapter in watchdog 1747 */ 1748 if (netif_carrier_ok(netdev) && 1749 (adapter->flags & FLAG_RX_NEEDS_RESTART)) { 1750 /* disable receives */ 1751 rctl = er32(RCTL); 1752 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1753 adapter->flags |= FLAG_RX_RESTART_NOW; 1754 } 1755 /* guard against interrupt when we're going down */ 1756 if (!test_bit(__E1000_DOWN, &adapter->state)) 1757 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1758 } 1759 1760 if (napi_schedule_prep(&adapter->napi)) { 1761 adapter->total_tx_bytes = 0; 1762 adapter->total_tx_packets = 0; 1763 adapter->total_rx_bytes = 0; 1764 adapter->total_rx_packets = 0; 1765 __napi_schedule(&adapter->napi); 1766 } 1767 1768 return IRQ_HANDLED; 1769 } 1770 1771 static irqreturn_t e1000_msix_other(int irq, void *data) 1772 { 1773 struct net_device *netdev = data; 1774 struct e1000_adapter *adapter = netdev_priv(netdev); 1775 struct e1000_hw *hw = &adapter->hw; 1776 u32 icr = er32(ICR); 1777 1778 if (!(icr & E1000_ICR_INT_ASSERTED)) { 1779 if (!test_bit(__E1000_DOWN, &adapter->state)) 1780 ew32(IMS, E1000_IMS_OTHER); 1781 return IRQ_NONE; 1782 } 1783 1784 if (icr & adapter->eiac_mask) 1785 ew32(ICS, (icr & adapter->eiac_mask)); 1786 1787 if (icr & E1000_ICR_OTHER) { 1788 if (!(icr & E1000_ICR_LSC)) 1789 goto no_link_interrupt; 1790 hw->mac.get_link_status = true; 1791 /* guard against interrupt when we're going down */ 1792 if (!test_bit(__E1000_DOWN, &adapter->state)) 1793 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1794 } 1795 1796 no_link_interrupt: 1797 if (!test_bit(__E1000_DOWN, &adapter->state)) 1798 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER); 1799 1800 return IRQ_HANDLED; 1801 } 1802 1803 1804 static irqreturn_t e1000_intr_msix_tx(int irq, void *data) 1805 { 1806 struct net_device *netdev = data; 1807 struct e1000_adapter *adapter = netdev_priv(netdev); 1808 struct e1000_hw *hw = &adapter->hw; 1809 struct e1000_ring *tx_ring = adapter->tx_ring; 1810 1811 1812 adapter->total_tx_bytes = 0; 1813 adapter->total_tx_packets = 0; 1814 1815 if (!e1000_clean_tx_irq(tx_ring)) 1816 /* Ring was not completely cleaned, so fire another interrupt */ 1817 ew32(ICS, tx_ring->ims_val); 1818 1819 return IRQ_HANDLED; 1820 } 1821 1822 static irqreturn_t e1000_intr_msix_rx(int irq, void *data) 1823 { 1824 struct net_device *netdev = data; 1825 struct e1000_adapter *adapter = netdev_priv(netdev); 1826 struct e1000_ring *rx_ring = adapter->rx_ring; 1827 1828 /* Write the ITR value calculated at the end of the 1829 * previous interrupt. 1830 */ 1831 if (rx_ring->set_itr) { 1832 writel(1000000000 / (rx_ring->itr_val * 256), 1833 rx_ring->itr_register); 1834 rx_ring->set_itr = 0; 1835 } 1836 1837 if (napi_schedule_prep(&adapter->napi)) { 1838 adapter->total_rx_bytes = 0; 1839 adapter->total_rx_packets = 0; 1840 __napi_schedule(&adapter->napi); 1841 } 1842 return IRQ_HANDLED; 1843 } 1844 1845 /** 1846 * e1000_configure_msix - Configure MSI-X hardware 1847 * 1848 * e1000_configure_msix sets up the hardware to properly 1849 * generate MSI-X interrupts. 1850 **/ 1851 static void e1000_configure_msix(struct e1000_adapter *adapter) 1852 { 1853 struct e1000_hw *hw = &adapter->hw; 1854 struct e1000_ring *rx_ring = adapter->rx_ring; 1855 struct e1000_ring *tx_ring = adapter->tx_ring; 1856 int vector = 0; 1857 u32 ctrl_ext, ivar = 0; 1858 1859 adapter->eiac_mask = 0; 1860 1861 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ 1862 if (hw->mac.type == e1000_82574) { 1863 u32 rfctl = er32(RFCTL); 1864 rfctl |= E1000_RFCTL_ACK_DIS; 1865 ew32(RFCTL, rfctl); 1866 } 1867 1868 #define E1000_IVAR_INT_ALLOC_VALID 0x8 1869 /* Configure Rx vector */ 1870 rx_ring->ims_val = E1000_IMS_RXQ0; 1871 adapter->eiac_mask |= rx_ring->ims_val; 1872 if (rx_ring->itr_val) 1873 writel(1000000000 / (rx_ring->itr_val * 256), 1874 rx_ring->itr_register); 1875 else 1876 writel(1, rx_ring->itr_register); 1877 ivar = E1000_IVAR_INT_ALLOC_VALID | vector; 1878 1879 /* Configure Tx vector */ 1880 tx_ring->ims_val = E1000_IMS_TXQ0; 1881 vector++; 1882 if (tx_ring->itr_val) 1883 writel(1000000000 / (tx_ring->itr_val * 256), 1884 tx_ring->itr_register); 1885 else 1886 writel(1, tx_ring->itr_register); 1887 adapter->eiac_mask |= tx_ring->ims_val; 1888 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); 1889 1890 /* set vector for Other Causes, e.g. link changes */ 1891 vector++; 1892 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); 1893 if (rx_ring->itr_val) 1894 writel(1000000000 / (rx_ring->itr_val * 256), 1895 hw->hw_addr + E1000_EITR_82574(vector)); 1896 else 1897 writel(1, hw->hw_addr + E1000_EITR_82574(vector)); 1898 1899 /* Cause Tx interrupts on every write back */ 1900 ivar |= (1 << 31); 1901 1902 ew32(IVAR, ivar); 1903 1904 /* enable MSI-X PBA support */ 1905 ctrl_ext = er32(CTRL_EXT); 1906 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; 1907 1908 /* Auto-Mask Other interrupts upon ICR read */ 1909 #define E1000_EIAC_MASK_82574 0x01F00000 1910 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); 1911 ctrl_ext |= E1000_CTRL_EXT_EIAME; 1912 ew32(CTRL_EXT, ctrl_ext); 1913 e1e_flush(); 1914 } 1915 1916 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) 1917 { 1918 if (adapter->msix_entries) { 1919 pci_disable_msix(adapter->pdev); 1920 kfree(adapter->msix_entries); 1921 adapter->msix_entries = NULL; 1922 } else if (adapter->flags & FLAG_MSI_ENABLED) { 1923 pci_disable_msi(adapter->pdev); 1924 adapter->flags &= ~FLAG_MSI_ENABLED; 1925 } 1926 } 1927 1928 /** 1929 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported 1930 * 1931 * Attempt to configure interrupts using the best available 1932 * capabilities of the hardware and kernel. 1933 **/ 1934 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) 1935 { 1936 int err; 1937 int i; 1938 1939 switch (adapter->int_mode) { 1940 case E1000E_INT_MODE_MSIX: 1941 if (adapter->flags & FLAG_HAS_MSIX) { 1942 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ 1943 adapter->msix_entries = kcalloc(adapter->num_vectors, 1944 sizeof(struct msix_entry), 1945 GFP_KERNEL); 1946 if (adapter->msix_entries) { 1947 for (i = 0; i < adapter->num_vectors; i++) 1948 adapter->msix_entries[i].entry = i; 1949 1950 err = pci_enable_msix(adapter->pdev, 1951 adapter->msix_entries, 1952 adapter->num_vectors); 1953 if (err == 0) 1954 return; 1955 } 1956 /* MSI-X failed, so fall through and try MSI */ 1957 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); 1958 e1000e_reset_interrupt_capability(adapter); 1959 } 1960 adapter->int_mode = E1000E_INT_MODE_MSI; 1961 /* Fall through */ 1962 case E1000E_INT_MODE_MSI: 1963 if (!pci_enable_msi(adapter->pdev)) { 1964 adapter->flags |= FLAG_MSI_ENABLED; 1965 } else { 1966 adapter->int_mode = E1000E_INT_MODE_LEGACY; 1967 e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n"); 1968 } 1969 /* Fall through */ 1970 case E1000E_INT_MODE_LEGACY: 1971 /* Don't do anything; this is the system default */ 1972 break; 1973 } 1974 1975 /* store the number of vectors being used */ 1976 adapter->num_vectors = 1; 1977 } 1978 1979 /** 1980 * e1000_request_msix - Initialize MSI-X interrupts 1981 * 1982 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the 1983 * kernel. 1984 **/ 1985 static int e1000_request_msix(struct e1000_adapter *adapter) 1986 { 1987 struct net_device *netdev = adapter->netdev; 1988 int err = 0, vector = 0; 1989 1990 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 1991 snprintf(adapter->rx_ring->name, 1992 sizeof(adapter->rx_ring->name) - 1, 1993 "%s-rx-0", netdev->name); 1994 else 1995 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); 1996 err = request_irq(adapter->msix_entries[vector].vector, 1997 e1000_intr_msix_rx, 0, adapter->rx_ring->name, 1998 netdev); 1999 if (err) 2000 return err; 2001 adapter->rx_ring->itr_register = adapter->hw.hw_addr + 2002 E1000_EITR_82574(vector); 2003 adapter->rx_ring->itr_val = adapter->itr; 2004 vector++; 2005 2006 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 2007 snprintf(adapter->tx_ring->name, 2008 sizeof(adapter->tx_ring->name) - 1, 2009 "%s-tx-0", netdev->name); 2010 else 2011 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); 2012 err = request_irq(adapter->msix_entries[vector].vector, 2013 e1000_intr_msix_tx, 0, adapter->tx_ring->name, 2014 netdev); 2015 if (err) 2016 return err; 2017 adapter->tx_ring->itr_register = adapter->hw.hw_addr + 2018 E1000_EITR_82574(vector); 2019 adapter->tx_ring->itr_val = adapter->itr; 2020 vector++; 2021 2022 err = request_irq(adapter->msix_entries[vector].vector, 2023 e1000_msix_other, 0, netdev->name, netdev); 2024 if (err) 2025 return err; 2026 2027 e1000_configure_msix(adapter); 2028 2029 return 0; 2030 } 2031 2032 /** 2033 * e1000_request_irq - initialize interrupts 2034 * 2035 * Attempts to configure interrupts using the best available 2036 * capabilities of the hardware and kernel. 2037 **/ 2038 static int e1000_request_irq(struct e1000_adapter *adapter) 2039 { 2040 struct net_device *netdev = adapter->netdev; 2041 int err; 2042 2043 if (adapter->msix_entries) { 2044 err = e1000_request_msix(adapter); 2045 if (!err) 2046 return err; 2047 /* fall back to MSI */ 2048 e1000e_reset_interrupt_capability(adapter); 2049 adapter->int_mode = E1000E_INT_MODE_MSI; 2050 e1000e_set_interrupt_capability(adapter); 2051 } 2052 if (adapter->flags & FLAG_MSI_ENABLED) { 2053 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, 2054 netdev->name, netdev); 2055 if (!err) 2056 return err; 2057 2058 /* fall back to legacy interrupt */ 2059 e1000e_reset_interrupt_capability(adapter); 2060 adapter->int_mode = E1000E_INT_MODE_LEGACY; 2061 } 2062 2063 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, 2064 netdev->name, netdev); 2065 if (err) 2066 e_err("Unable to allocate interrupt, Error: %d\n", err); 2067 2068 return err; 2069 } 2070 2071 static void e1000_free_irq(struct e1000_adapter *adapter) 2072 { 2073 struct net_device *netdev = adapter->netdev; 2074 2075 if (adapter->msix_entries) { 2076 int vector = 0; 2077 2078 free_irq(adapter->msix_entries[vector].vector, netdev); 2079 vector++; 2080 2081 free_irq(adapter->msix_entries[vector].vector, netdev); 2082 vector++; 2083 2084 /* Other Causes interrupt vector */ 2085 free_irq(adapter->msix_entries[vector].vector, netdev); 2086 return; 2087 } 2088 2089 free_irq(adapter->pdev->irq, netdev); 2090 } 2091 2092 /** 2093 * e1000_irq_disable - Mask off interrupt generation on the NIC 2094 **/ 2095 static void e1000_irq_disable(struct e1000_adapter *adapter) 2096 { 2097 struct e1000_hw *hw = &adapter->hw; 2098 2099 ew32(IMC, ~0); 2100 if (adapter->msix_entries) 2101 ew32(EIAC_82574, 0); 2102 e1e_flush(); 2103 2104 if (adapter->msix_entries) { 2105 int i; 2106 for (i = 0; i < adapter->num_vectors; i++) 2107 synchronize_irq(adapter->msix_entries[i].vector); 2108 } else { 2109 synchronize_irq(adapter->pdev->irq); 2110 } 2111 } 2112 2113 /** 2114 * e1000_irq_enable - Enable default interrupt generation settings 2115 **/ 2116 static void e1000_irq_enable(struct e1000_adapter *adapter) 2117 { 2118 struct e1000_hw *hw = &adapter->hw; 2119 2120 if (adapter->msix_entries) { 2121 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); 2122 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); 2123 } else { 2124 ew32(IMS, IMS_ENABLE_MASK); 2125 } 2126 e1e_flush(); 2127 } 2128 2129 /** 2130 * e1000e_get_hw_control - get control of the h/w from f/w 2131 * @adapter: address of board private structure 2132 * 2133 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. 2134 * For ASF and Pass Through versions of f/w this means that 2135 * the driver is loaded. For AMT version (only with 82573) 2136 * of the f/w this means that the network i/f is open. 2137 **/ 2138 void e1000e_get_hw_control(struct e1000_adapter *adapter) 2139 { 2140 struct e1000_hw *hw = &adapter->hw; 2141 u32 ctrl_ext; 2142 u32 swsm; 2143 2144 /* Let firmware know the driver has taken over */ 2145 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { 2146 swsm = er32(SWSM); 2147 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); 2148 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 2149 ctrl_ext = er32(CTRL_EXT); 2150 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2151 } 2152 } 2153 2154 /** 2155 * e1000e_release_hw_control - release control of the h/w to f/w 2156 * @adapter: address of board private structure 2157 * 2158 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. 2159 * For ASF and Pass Through versions of f/w this means that the 2160 * driver is no longer loaded. For AMT version (only with 82573) i 2161 * of the f/w this means that the network i/f is closed. 2162 * 2163 **/ 2164 void e1000e_release_hw_control(struct e1000_adapter *adapter) 2165 { 2166 struct e1000_hw *hw = &adapter->hw; 2167 u32 ctrl_ext; 2168 u32 swsm; 2169 2170 /* Let firmware taken over control of h/w */ 2171 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { 2172 swsm = er32(SWSM); 2173 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 2174 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 2175 ctrl_ext = er32(CTRL_EXT); 2176 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2177 } 2178 } 2179 2180 /** 2181 * @e1000_alloc_ring - allocate memory for a ring structure 2182 **/ 2183 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, 2184 struct e1000_ring *ring) 2185 { 2186 struct pci_dev *pdev = adapter->pdev; 2187 2188 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, 2189 GFP_KERNEL); 2190 if (!ring->desc) 2191 return -ENOMEM; 2192 2193 return 0; 2194 } 2195 2196 /** 2197 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) 2198 * @tx_ring: Tx descriptor ring 2199 * 2200 * Return 0 on success, negative on failure 2201 **/ 2202 int e1000e_setup_tx_resources(struct e1000_ring *tx_ring) 2203 { 2204 struct e1000_adapter *adapter = tx_ring->adapter; 2205 int err = -ENOMEM, size; 2206 2207 size = sizeof(struct e1000_buffer) * tx_ring->count; 2208 tx_ring->buffer_info = vzalloc(size); 2209 if (!tx_ring->buffer_info) 2210 goto err; 2211 2212 /* round up to nearest 4K */ 2213 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); 2214 tx_ring->size = ALIGN(tx_ring->size, 4096); 2215 2216 err = e1000_alloc_ring_dma(adapter, tx_ring); 2217 if (err) 2218 goto err; 2219 2220 tx_ring->next_to_use = 0; 2221 tx_ring->next_to_clean = 0; 2222 2223 return 0; 2224 err: 2225 vfree(tx_ring->buffer_info); 2226 e_err("Unable to allocate memory for the transmit descriptor ring\n"); 2227 return err; 2228 } 2229 2230 /** 2231 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) 2232 * @rx_ring: Rx descriptor ring 2233 * 2234 * Returns 0 on success, negative on failure 2235 **/ 2236 int e1000e_setup_rx_resources(struct e1000_ring *rx_ring) 2237 { 2238 struct e1000_adapter *adapter = rx_ring->adapter; 2239 struct e1000_buffer *buffer_info; 2240 int i, size, desc_len, err = -ENOMEM; 2241 2242 size = sizeof(struct e1000_buffer) * rx_ring->count; 2243 rx_ring->buffer_info = vzalloc(size); 2244 if (!rx_ring->buffer_info) 2245 goto err; 2246 2247 for (i = 0; i < rx_ring->count; i++) { 2248 buffer_info = &rx_ring->buffer_info[i]; 2249 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, 2250 sizeof(struct e1000_ps_page), 2251 GFP_KERNEL); 2252 if (!buffer_info->ps_pages) 2253 goto err_pages; 2254 } 2255 2256 desc_len = sizeof(union e1000_rx_desc_packet_split); 2257 2258 /* Round up to nearest 4K */ 2259 rx_ring->size = rx_ring->count * desc_len; 2260 rx_ring->size = ALIGN(rx_ring->size, 4096); 2261 2262 err = e1000_alloc_ring_dma(adapter, rx_ring); 2263 if (err) 2264 goto err_pages; 2265 2266 rx_ring->next_to_clean = 0; 2267 rx_ring->next_to_use = 0; 2268 rx_ring->rx_skb_top = NULL; 2269 2270 return 0; 2271 2272 err_pages: 2273 for (i = 0; i < rx_ring->count; i++) { 2274 buffer_info = &rx_ring->buffer_info[i]; 2275 kfree(buffer_info->ps_pages); 2276 } 2277 err: 2278 vfree(rx_ring->buffer_info); 2279 e_err("Unable to allocate memory for the receive descriptor ring\n"); 2280 return err; 2281 } 2282 2283 /** 2284 * e1000_clean_tx_ring - Free Tx Buffers 2285 * @tx_ring: Tx descriptor ring 2286 **/ 2287 static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) 2288 { 2289 struct e1000_adapter *adapter = tx_ring->adapter; 2290 struct e1000_buffer *buffer_info; 2291 unsigned long size; 2292 unsigned int i; 2293 2294 for (i = 0; i < tx_ring->count; i++) { 2295 buffer_info = &tx_ring->buffer_info[i]; 2296 e1000_put_txbuf(tx_ring, buffer_info); 2297 } 2298 2299 netdev_reset_queue(adapter->netdev); 2300 size = sizeof(struct e1000_buffer) * tx_ring->count; 2301 memset(tx_ring->buffer_info, 0, size); 2302 2303 memset(tx_ring->desc, 0, tx_ring->size); 2304 2305 tx_ring->next_to_use = 0; 2306 tx_ring->next_to_clean = 0; 2307 2308 writel(0, tx_ring->head); 2309 writel(0, tx_ring->tail); 2310 } 2311 2312 /** 2313 * e1000e_free_tx_resources - Free Tx Resources per Queue 2314 * @tx_ring: Tx descriptor ring 2315 * 2316 * Free all transmit software resources 2317 **/ 2318 void e1000e_free_tx_resources(struct e1000_ring *tx_ring) 2319 { 2320 struct e1000_adapter *adapter = tx_ring->adapter; 2321 struct pci_dev *pdev = adapter->pdev; 2322 2323 e1000_clean_tx_ring(tx_ring); 2324 2325 vfree(tx_ring->buffer_info); 2326 tx_ring->buffer_info = NULL; 2327 2328 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 2329 tx_ring->dma); 2330 tx_ring->desc = NULL; 2331 } 2332 2333 /** 2334 * e1000e_free_rx_resources - Free Rx Resources 2335 * @rx_ring: Rx descriptor ring 2336 * 2337 * Free all receive software resources 2338 **/ 2339 void e1000e_free_rx_resources(struct e1000_ring *rx_ring) 2340 { 2341 struct e1000_adapter *adapter = rx_ring->adapter; 2342 struct pci_dev *pdev = adapter->pdev; 2343 int i; 2344 2345 e1000_clean_rx_ring(rx_ring); 2346 2347 for (i = 0; i < rx_ring->count; i++) 2348 kfree(rx_ring->buffer_info[i].ps_pages); 2349 2350 vfree(rx_ring->buffer_info); 2351 rx_ring->buffer_info = NULL; 2352 2353 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2354 rx_ring->dma); 2355 rx_ring->desc = NULL; 2356 } 2357 2358 /** 2359 * e1000_update_itr - update the dynamic ITR value based on statistics 2360 * @adapter: pointer to adapter 2361 * @itr_setting: current adapter->itr 2362 * @packets: the number of packets during this measurement interval 2363 * @bytes: the number of bytes during this measurement interval 2364 * 2365 * Stores a new ITR value based on packets and byte 2366 * counts during the last interrupt. The advantage of per interrupt 2367 * computation is faster updates and more accurate ITR for the current 2368 * traffic pattern. Constants in this function were computed 2369 * based on theoretical maximum wire speed and thresholds were set based 2370 * on testing data as well as attempting to minimize response time 2371 * while increasing bulk throughput. This functionality is controlled 2372 * by the InterruptThrottleRate module parameter. 2373 **/ 2374 static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2375 u16 itr_setting, int packets, 2376 int bytes) 2377 { 2378 unsigned int retval = itr_setting; 2379 2380 if (packets == 0) 2381 return itr_setting; 2382 2383 switch (itr_setting) { 2384 case lowest_latency: 2385 /* handle TSO and jumbo frames */ 2386 if (bytes/packets > 8000) 2387 retval = bulk_latency; 2388 else if ((packets < 5) && (bytes > 512)) 2389 retval = low_latency; 2390 break; 2391 case low_latency: /* 50 usec aka 20000 ints/s */ 2392 if (bytes > 10000) { 2393 /* this if handles the TSO accounting */ 2394 if (bytes/packets > 8000) 2395 retval = bulk_latency; 2396 else if ((packets < 10) || ((bytes/packets) > 1200)) 2397 retval = bulk_latency; 2398 else if ((packets > 35)) 2399 retval = lowest_latency; 2400 } else if (bytes/packets > 2000) { 2401 retval = bulk_latency; 2402 } else if (packets <= 2 && bytes < 512) { 2403 retval = lowest_latency; 2404 } 2405 break; 2406 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2407 if (bytes > 25000) { 2408 if (packets > 35) 2409 retval = low_latency; 2410 } else if (bytes < 6000) { 2411 retval = low_latency; 2412 } 2413 break; 2414 } 2415 2416 return retval; 2417 } 2418 2419 static void e1000_set_itr(struct e1000_adapter *adapter) 2420 { 2421 struct e1000_hw *hw = &adapter->hw; 2422 u16 current_itr; 2423 u32 new_itr = adapter->itr; 2424 2425 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2426 if (adapter->link_speed != SPEED_1000) { 2427 current_itr = 0; 2428 new_itr = 4000; 2429 goto set_itr_now; 2430 } 2431 2432 if (adapter->flags2 & FLAG2_DISABLE_AIM) { 2433 new_itr = 0; 2434 goto set_itr_now; 2435 } 2436 2437 adapter->tx_itr = e1000_update_itr(adapter, 2438 adapter->tx_itr, 2439 adapter->total_tx_packets, 2440 adapter->total_tx_bytes); 2441 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2442 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 2443 adapter->tx_itr = low_latency; 2444 2445 adapter->rx_itr = e1000_update_itr(adapter, 2446 adapter->rx_itr, 2447 adapter->total_rx_packets, 2448 adapter->total_rx_bytes); 2449 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2450 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 2451 adapter->rx_itr = low_latency; 2452 2453 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2454 2455 switch (current_itr) { 2456 /* counts and packets in update_itr are dependent on these numbers */ 2457 case lowest_latency: 2458 new_itr = 70000; 2459 break; 2460 case low_latency: 2461 new_itr = 20000; /* aka hwitr = ~200 */ 2462 break; 2463 case bulk_latency: 2464 new_itr = 4000; 2465 break; 2466 default: 2467 break; 2468 } 2469 2470 set_itr_now: 2471 if (new_itr != adapter->itr) { 2472 /* 2473 * this attempts to bias the interrupt rate towards Bulk 2474 * by adding intermediate steps when interrupt rate is 2475 * increasing 2476 */ 2477 new_itr = new_itr > adapter->itr ? 2478 min(adapter->itr + (new_itr >> 2), new_itr) : 2479 new_itr; 2480 adapter->itr = new_itr; 2481 adapter->rx_ring->itr_val = new_itr; 2482 if (adapter->msix_entries) 2483 adapter->rx_ring->set_itr = 1; 2484 else 2485 if (new_itr) 2486 ew32(ITR, 1000000000 / (new_itr * 256)); 2487 else 2488 ew32(ITR, 0); 2489 } 2490 } 2491 2492 /** 2493 * e1000_alloc_queues - Allocate memory for all rings 2494 * @adapter: board private structure to initialize 2495 **/ 2496 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) 2497 { 2498 int size = sizeof(struct e1000_ring); 2499 2500 adapter->tx_ring = kzalloc(size, GFP_KERNEL); 2501 if (!adapter->tx_ring) 2502 goto err; 2503 adapter->tx_ring->count = adapter->tx_ring_count; 2504 adapter->tx_ring->adapter = adapter; 2505 2506 adapter->rx_ring = kzalloc(size, GFP_KERNEL); 2507 if (!adapter->rx_ring) 2508 goto err; 2509 adapter->rx_ring->count = adapter->rx_ring_count; 2510 adapter->rx_ring->adapter = adapter; 2511 2512 return 0; 2513 err: 2514 e_err("Unable to allocate memory for queues\n"); 2515 kfree(adapter->rx_ring); 2516 kfree(adapter->tx_ring); 2517 return -ENOMEM; 2518 } 2519 2520 /** 2521 * e1000_clean - NAPI Rx polling callback 2522 * @napi: struct associated with this polling callback 2523 * @budget: amount of packets driver is allowed to process this poll 2524 **/ 2525 static int e1000_clean(struct napi_struct *napi, int budget) 2526 { 2527 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); 2528 struct e1000_hw *hw = &adapter->hw; 2529 struct net_device *poll_dev = adapter->netdev; 2530 int tx_cleaned = 1, work_done = 0; 2531 2532 adapter = netdev_priv(poll_dev); 2533 2534 if (adapter->msix_entries && 2535 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) 2536 goto clean_rx; 2537 2538 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); 2539 2540 clean_rx: 2541 adapter->clean_rx(adapter->rx_ring, &work_done, budget); 2542 2543 if (!tx_cleaned) 2544 work_done = budget; 2545 2546 /* If budget not fully consumed, exit the polling mode */ 2547 if (work_done < budget) { 2548 if (adapter->itr_setting & 3) 2549 e1000_set_itr(adapter); 2550 napi_complete(napi); 2551 if (!test_bit(__E1000_DOWN, &adapter->state)) { 2552 if (adapter->msix_entries) 2553 ew32(IMS, adapter->rx_ring->ims_val); 2554 else 2555 e1000_irq_enable(adapter); 2556 } 2557 } 2558 2559 return work_done; 2560 } 2561 2562 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 2563 { 2564 struct e1000_adapter *adapter = netdev_priv(netdev); 2565 struct e1000_hw *hw = &adapter->hw; 2566 u32 vfta, index; 2567 2568 /* don't update vlan cookie if already programmed */ 2569 if ((adapter->hw.mng_cookie.status & 2570 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2571 (vid == adapter->mng_vlan_id)) 2572 return 0; 2573 2574 /* add VID to filter table */ 2575 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2576 index = (vid >> 5) & 0x7F; 2577 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); 2578 vfta |= (1 << (vid & 0x1F)); 2579 hw->mac.ops.write_vfta(hw, index, vfta); 2580 } 2581 2582 set_bit(vid, adapter->active_vlans); 2583 2584 return 0; 2585 } 2586 2587 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 2588 { 2589 struct e1000_adapter *adapter = netdev_priv(netdev); 2590 struct e1000_hw *hw = &adapter->hw; 2591 u32 vfta, index; 2592 2593 if ((adapter->hw.mng_cookie.status & 2594 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2595 (vid == adapter->mng_vlan_id)) { 2596 /* release control to f/w */ 2597 e1000e_release_hw_control(adapter); 2598 return 0; 2599 } 2600 2601 /* remove VID from filter table */ 2602 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2603 index = (vid >> 5) & 0x7F; 2604 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); 2605 vfta &= ~(1 << (vid & 0x1F)); 2606 hw->mac.ops.write_vfta(hw, index, vfta); 2607 } 2608 2609 clear_bit(vid, adapter->active_vlans); 2610 2611 return 0; 2612 } 2613 2614 /** 2615 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering 2616 * @adapter: board private structure to initialize 2617 **/ 2618 static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter) 2619 { 2620 struct net_device *netdev = adapter->netdev; 2621 struct e1000_hw *hw = &adapter->hw; 2622 u32 rctl; 2623 2624 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2625 /* disable VLAN receive filtering */ 2626 rctl = er32(RCTL); 2627 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN); 2628 ew32(RCTL, rctl); 2629 2630 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { 2631 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 2632 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 2633 } 2634 } 2635 } 2636 2637 /** 2638 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering 2639 * @adapter: board private structure to initialize 2640 **/ 2641 static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter) 2642 { 2643 struct e1000_hw *hw = &adapter->hw; 2644 u32 rctl; 2645 2646 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2647 /* enable VLAN receive filtering */ 2648 rctl = er32(RCTL); 2649 rctl |= E1000_RCTL_VFE; 2650 rctl &= ~E1000_RCTL_CFIEN; 2651 ew32(RCTL, rctl); 2652 } 2653 } 2654 2655 /** 2656 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping 2657 * @adapter: board private structure to initialize 2658 **/ 2659 static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter) 2660 { 2661 struct e1000_hw *hw = &adapter->hw; 2662 u32 ctrl; 2663 2664 /* disable VLAN tag insert/strip */ 2665 ctrl = er32(CTRL); 2666 ctrl &= ~E1000_CTRL_VME; 2667 ew32(CTRL, ctrl); 2668 } 2669 2670 /** 2671 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping 2672 * @adapter: board private structure to initialize 2673 **/ 2674 static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter) 2675 { 2676 struct e1000_hw *hw = &adapter->hw; 2677 u32 ctrl; 2678 2679 /* enable VLAN tag insert/strip */ 2680 ctrl = er32(CTRL); 2681 ctrl |= E1000_CTRL_VME; 2682 ew32(CTRL, ctrl); 2683 } 2684 2685 static void e1000_update_mng_vlan(struct e1000_adapter *adapter) 2686 { 2687 struct net_device *netdev = adapter->netdev; 2688 u16 vid = adapter->hw.mng_cookie.vlan_id; 2689 u16 old_vid = adapter->mng_vlan_id; 2690 2691 if (adapter->hw.mng_cookie.status & 2692 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { 2693 e1000_vlan_rx_add_vid(netdev, vid); 2694 adapter->mng_vlan_id = vid; 2695 } 2696 2697 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) 2698 e1000_vlan_rx_kill_vid(netdev, old_vid); 2699 } 2700 2701 static void e1000_restore_vlan(struct e1000_adapter *adapter) 2702 { 2703 u16 vid; 2704 2705 e1000_vlan_rx_add_vid(adapter->netdev, 0); 2706 2707 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 2708 e1000_vlan_rx_add_vid(adapter->netdev, vid); 2709 } 2710 2711 static void e1000_init_manageability_pt(struct e1000_adapter *adapter) 2712 { 2713 struct e1000_hw *hw = &adapter->hw; 2714 u32 manc, manc2h, mdef, i, j; 2715 2716 if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) 2717 return; 2718 2719 manc = er32(MANC); 2720 2721 /* 2722 * enable receiving management packets to the host. this will probably 2723 * generate destination unreachable messages from the host OS, but 2724 * the packets will be handled on SMBUS 2725 */ 2726 manc |= E1000_MANC_EN_MNG2HOST; 2727 manc2h = er32(MANC2H); 2728 2729 switch (hw->mac.type) { 2730 default: 2731 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664); 2732 break; 2733 case e1000_82574: 2734 case e1000_82583: 2735 /* 2736 * Check if IPMI pass-through decision filter already exists; 2737 * if so, enable it. 2738 */ 2739 for (i = 0, j = 0; i < 8; i++) { 2740 mdef = er32(MDEF(i)); 2741 2742 /* Ignore filters with anything other than IPMI ports */ 2743 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) 2744 continue; 2745 2746 /* Enable this decision filter in MANC2H */ 2747 if (mdef) 2748 manc2h |= (1 << i); 2749 2750 j |= mdef; 2751 } 2752 2753 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) 2754 break; 2755 2756 /* Create new decision filter in an empty filter */ 2757 for (i = 0, j = 0; i < 8; i++) 2758 if (er32(MDEF(i)) == 0) { 2759 ew32(MDEF(i), (E1000_MDEF_PORT_623 | 2760 E1000_MDEF_PORT_664)); 2761 manc2h |= (1 << 1); 2762 j++; 2763 break; 2764 } 2765 2766 if (!j) 2767 e_warn("Unable to create IPMI pass-through filter\n"); 2768 break; 2769 } 2770 2771 ew32(MANC2H, manc2h); 2772 ew32(MANC, manc); 2773 } 2774 2775 /** 2776 * e1000_configure_tx - Configure Transmit Unit after Reset 2777 * @adapter: board private structure 2778 * 2779 * Configure the Tx unit of the MAC after a reset. 2780 **/ 2781 static void e1000_configure_tx(struct e1000_adapter *adapter) 2782 { 2783 struct e1000_hw *hw = &adapter->hw; 2784 struct e1000_ring *tx_ring = adapter->tx_ring; 2785 u64 tdba; 2786 u32 tdlen, tarc; 2787 2788 /* Setup the HW Tx Head and Tail descriptor pointers */ 2789 tdba = tx_ring->dma; 2790 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); 2791 ew32(TDBAL, (tdba & DMA_BIT_MASK(32))); 2792 ew32(TDBAH, (tdba >> 32)); 2793 ew32(TDLEN, tdlen); 2794 ew32(TDH, 0); 2795 ew32(TDT, 0); 2796 tx_ring->head = adapter->hw.hw_addr + E1000_TDH; 2797 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT; 2798 2799 /* Set the Tx Interrupt Delay register */ 2800 ew32(TIDV, adapter->tx_int_delay); 2801 /* Tx irq moderation */ 2802 ew32(TADV, adapter->tx_abs_int_delay); 2803 2804 if (adapter->flags2 & FLAG2_DMA_BURST) { 2805 u32 txdctl = er32(TXDCTL(0)); 2806 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | 2807 E1000_TXDCTL_WTHRESH); 2808 /* 2809 * set up some performance related parameters to encourage the 2810 * hardware to use the bus more efficiently in bursts, depends 2811 * on the tx_int_delay to be enabled, 2812 * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time 2813 * hthresh = 1 ==> prefetch when one or more available 2814 * pthresh = 0x1f ==> prefetch if internal cache 31 or less 2815 * BEWARE: this seems to work but should be considered first if 2816 * there are Tx hangs or other Tx related bugs 2817 */ 2818 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; 2819 ew32(TXDCTL(0), txdctl); 2820 } 2821 /* erratum work around: set txdctl the same for both queues */ 2822 ew32(TXDCTL(1), er32(TXDCTL(0))); 2823 2824 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { 2825 tarc = er32(TARC(0)); 2826 /* 2827 * set the speed mode bit, we'll clear it if we're not at 2828 * gigabit link later 2829 */ 2830 #define SPEED_MODE_BIT (1 << 21) 2831 tarc |= SPEED_MODE_BIT; 2832 ew32(TARC(0), tarc); 2833 } 2834 2835 /* errata: program both queues to unweighted RR */ 2836 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { 2837 tarc = er32(TARC(0)); 2838 tarc |= 1; 2839 ew32(TARC(0), tarc); 2840 tarc = er32(TARC(1)); 2841 tarc |= 1; 2842 ew32(TARC(1), tarc); 2843 } 2844 2845 /* Setup Transmit Descriptor Settings for eop descriptor */ 2846 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; 2847 2848 /* only set IDE if we are delaying interrupts using the timers */ 2849 if (adapter->tx_int_delay) 2850 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 2851 2852 /* enable Report Status bit */ 2853 adapter->txd_cmd |= E1000_TXD_CMD_RS; 2854 2855 hw->mac.ops.config_collision_dist(hw); 2856 } 2857 2858 /** 2859 * e1000_setup_rctl - configure the receive control registers 2860 * @adapter: Board private structure 2861 **/ 2862 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ 2863 (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) 2864 static void e1000_setup_rctl(struct e1000_adapter *adapter) 2865 { 2866 struct e1000_hw *hw = &adapter->hw; 2867 u32 rctl, rfctl; 2868 u32 pages = 0; 2869 2870 /* Workaround Si errata on 82579 - configure jumbo frame flow */ 2871 if (hw->mac.type == e1000_pch2lan) { 2872 s32 ret_val; 2873 2874 if (adapter->netdev->mtu > ETH_DATA_LEN) 2875 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); 2876 else 2877 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); 2878 2879 if (ret_val) 2880 e_dbg("failed to enable jumbo frame workaround mode\n"); 2881 } 2882 2883 /* Program MC offset vector base */ 2884 rctl = er32(RCTL); 2885 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2886 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | 2887 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 2888 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2889 2890 /* Do not Store bad packets */ 2891 rctl &= ~E1000_RCTL_SBP; 2892 2893 /* Enable Long Packet receive */ 2894 if (adapter->netdev->mtu <= ETH_DATA_LEN) 2895 rctl &= ~E1000_RCTL_LPE; 2896 else 2897 rctl |= E1000_RCTL_LPE; 2898 2899 /* Some systems expect that the CRC is included in SMBUS traffic. The 2900 * hardware strips the CRC before sending to both SMBUS (BMC) and to 2901 * host memory when this is enabled 2902 */ 2903 if (adapter->flags2 & FLAG2_CRC_STRIPPING) 2904 rctl |= E1000_RCTL_SECRC; 2905 2906 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ 2907 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { 2908 u16 phy_data; 2909 2910 e1e_rphy(hw, PHY_REG(770, 26), &phy_data); 2911 phy_data &= 0xfff8; 2912 phy_data |= (1 << 2); 2913 e1e_wphy(hw, PHY_REG(770, 26), phy_data); 2914 2915 e1e_rphy(hw, 22, &phy_data); 2916 phy_data &= 0x0fff; 2917 phy_data |= (1 << 14); 2918 e1e_wphy(hw, 0x10, 0x2823); 2919 e1e_wphy(hw, 0x11, 0x0003); 2920 e1e_wphy(hw, 22, phy_data); 2921 } 2922 2923 /* Setup buffer sizes */ 2924 rctl &= ~E1000_RCTL_SZ_4096; 2925 rctl |= E1000_RCTL_BSEX; 2926 switch (adapter->rx_buffer_len) { 2927 case 2048: 2928 default: 2929 rctl |= E1000_RCTL_SZ_2048; 2930 rctl &= ~E1000_RCTL_BSEX; 2931 break; 2932 case 4096: 2933 rctl |= E1000_RCTL_SZ_4096; 2934 break; 2935 case 8192: 2936 rctl |= E1000_RCTL_SZ_8192; 2937 break; 2938 case 16384: 2939 rctl |= E1000_RCTL_SZ_16384; 2940 break; 2941 } 2942 2943 /* Enable Extended Status in all Receive Descriptors */ 2944 rfctl = er32(RFCTL); 2945 rfctl |= E1000_RFCTL_EXTEN; 2946 2947 /* 2948 * 82571 and greater support packet-split where the protocol 2949 * header is placed in skb->data and the packet data is 2950 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. 2951 * In the case of a non-split, skb->data is linearly filled, 2952 * followed by the page buffers. Therefore, skb->data is 2953 * sized to hold the largest protocol header. 2954 * 2955 * allocations using alloc_page take too long for regular MTU 2956 * so only enable packet split for jumbo frames 2957 * 2958 * Using pages when the page size is greater than 16k wastes 2959 * a lot of memory, since we allocate 3 pages at all times 2960 * per packet. 2961 */ 2962 pages = PAGE_USE_COUNT(adapter->netdev->mtu); 2963 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) 2964 adapter->rx_ps_pages = pages; 2965 else 2966 adapter->rx_ps_pages = 0; 2967 2968 if (adapter->rx_ps_pages) { 2969 u32 psrctl = 0; 2970 2971 /* 2972 * disable packet split support for IPv6 extension headers, 2973 * because some malformed IPv6 headers can hang the Rx 2974 */ 2975 rfctl |= (E1000_RFCTL_IPV6_EX_DIS | 2976 E1000_RFCTL_NEW_IPV6_EXT_DIS); 2977 2978 /* Enable Packet split descriptors */ 2979 rctl |= E1000_RCTL_DTYP_PS; 2980 2981 psrctl |= adapter->rx_ps_bsize0 >> 2982 E1000_PSRCTL_BSIZE0_SHIFT; 2983 2984 switch (adapter->rx_ps_pages) { 2985 case 3: 2986 psrctl |= PAGE_SIZE << 2987 E1000_PSRCTL_BSIZE3_SHIFT; 2988 case 2: 2989 psrctl |= PAGE_SIZE << 2990 E1000_PSRCTL_BSIZE2_SHIFT; 2991 case 1: 2992 psrctl |= PAGE_SIZE >> 2993 E1000_PSRCTL_BSIZE1_SHIFT; 2994 break; 2995 } 2996 2997 ew32(PSRCTL, psrctl); 2998 } 2999 3000 /* This is useful for sniffing bad packets. */ 3001 if (adapter->netdev->features & NETIF_F_RXALL) { 3002 /* UPE and MPE will be handled by normal PROMISC logic 3003 * in e1000e_set_rx_mode */ 3004 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ 3005 E1000_RCTL_BAM | /* RX All Bcast Pkts */ 3006 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 3007 3008 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ 3009 E1000_RCTL_DPF | /* Allow filtered pause */ 3010 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ 3011 /* Do not mess with E1000_CTRL_VME, it affects transmit as well, 3012 * and that breaks VLANs. 3013 */ 3014 } 3015 3016 ew32(RFCTL, rfctl); 3017 ew32(RCTL, rctl); 3018 /* just started the receive unit, no need to restart */ 3019 adapter->flags &= ~FLAG_RX_RESTART_NOW; 3020 } 3021 3022 /** 3023 * e1000_configure_rx - Configure Receive Unit after Reset 3024 * @adapter: board private structure 3025 * 3026 * Configure the Rx unit of the MAC after a reset. 3027 **/ 3028 static void e1000_configure_rx(struct e1000_adapter *adapter) 3029 { 3030 struct e1000_hw *hw = &adapter->hw; 3031 struct e1000_ring *rx_ring = adapter->rx_ring; 3032 u64 rdba; 3033 u32 rdlen, rctl, rxcsum, ctrl_ext; 3034 3035 if (adapter->rx_ps_pages) { 3036 /* this is a 32 byte descriptor */ 3037 rdlen = rx_ring->count * 3038 sizeof(union e1000_rx_desc_packet_split); 3039 adapter->clean_rx = e1000_clean_rx_irq_ps; 3040 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 3041 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { 3042 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); 3043 adapter->clean_rx = e1000_clean_jumbo_rx_irq; 3044 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 3045 } else { 3046 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); 3047 adapter->clean_rx = e1000_clean_rx_irq; 3048 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 3049 } 3050 3051 /* disable receives while setting up the descriptors */ 3052 rctl = er32(RCTL); 3053 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) 3054 ew32(RCTL, rctl & ~E1000_RCTL_EN); 3055 e1e_flush(); 3056 usleep_range(10000, 20000); 3057 3058 if (adapter->flags2 & FLAG2_DMA_BURST) { 3059 /* 3060 * set the writeback threshold (only takes effect if the RDTR 3061 * is set). set GRAN=1 and write back up to 0x4 worth, and 3062 * enable prefetching of 0x20 Rx descriptors 3063 * granularity = 01 3064 * wthresh = 04, 3065 * hthresh = 04, 3066 * pthresh = 0x20 3067 */ 3068 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); 3069 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); 3070 3071 /* 3072 * override the delay timers for enabling bursting, only if 3073 * the value was not set by the user via module options 3074 */ 3075 if (adapter->rx_int_delay == DEFAULT_RDTR) 3076 adapter->rx_int_delay = BURST_RDTR; 3077 if (adapter->rx_abs_int_delay == DEFAULT_RADV) 3078 adapter->rx_abs_int_delay = BURST_RADV; 3079 } 3080 3081 /* set the Receive Delay Timer Register */ 3082 ew32(RDTR, adapter->rx_int_delay); 3083 3084 /* irq moderation */ 3085 ew32(RADV, adapter->rx_abs_int_delay); 3086 if ((adapter->itr_setting != 0) && (adapter->itr != 0)) 3087 ew32(ITR, 1000000000 / (adapter->itr * 256)); 3088 3089 ctrl_ext = er32(CTRL_EXT); 3090 /* Auto-Mask interrupts upon ICR access */ 3091 ctrl_ext |= E1000_CTRL_EXT_IAME; 3092 ew32(IAM, 0xffffffff); 3093 ew32(CTRL_EXT, ctrl_ext); 3094 e1e_flush(); 3095 3096 /* 3097 * Setup the HW Rx Head and Tail Descriptor Pointers and 3098 * the Base and Length of the Rx Descriptor Ring 3099 */ 3100 rdba = rx_ring->dma; 3101 ew32(RDBAL, (rdba & DMA_BIT_MASK(32))); 3102 ew32(RDBAH, (rdba >> 32)); 3103 ew32(RDLEN, rdlen); 3104 ew32(RDH, 0); 3105 ew32(RDT, 0); 3106 rx_ring->head = adapter->hw.hw_addr + E1000_RDH; 3107 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT; 3108 3109 /* Enable Receive Checksum Offload for TCP and UDP */ 3110 rxcsum = er32(RXCSUM); 3111 if (adapter->netdev->features & NETIF_F_RXCSUM) { 3112 rxcsum |= E1000_RXCSUM_TUOFL; 3113 3114 /* 3115 * IPv4 payload checksum for UDP fragments must be 3116 * used in conjunction with packet-split. 3117 */ 3118 if (adapter->rx_ps_pages) 3119 rxcsum |= E1000_RXCSUM_IPPCSE; 3120 } else { 3121 rxcsum &= ~E1000_RXCSUM_TUOFL; 3122 /* no need to clear IPPCSE as it defaults to 0 */ 3123 } 3124 ew32(RXCSUM, rxcsum); 3125 3126 if (adapter->hw.mac.type == e1000_pch2lan) { 3127 /* 3128 * With jumbo frames, excessive C-state transition 3129 * latencies result in dropped transactions. 3130 */ 3131 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3132 u32 rxdctl = er32(RXDCTL(0)); 3133 ew32(RXDCTL(0), rxdctl | 0x3); 3134 pm_qos_update_request(&adapter->netdev->pm_qos_req, 55); 3135 } else { 3136 pm_qos_update_request(&adapter->netdev->pm_qos_req, 3137 PM_QOS_DEFAULT_VALUE); 3138 } 3139 } 3140 3141 /* Enable Receives */ 3142 ew32(RCTL, rctl); 3143 } 3144 3145 /** 3146 * e1000e_write_mc_addr_list - write multicast addresses to MTA 3147 * @netdev: network interface device structure 3148 * 3149 * Writes multicast address list to the MTA hash table. 3150 * Returns: -ENOMEM on failure 3151 * 0 on no addresses written 3152 * X on writing X addresses to MTA 3153 */ 3154 static int e1000e_write_mc_addr_list(struct net_device *netdev) 3155 { 3156 struct e1000_adapter *adapter = netdev_priv(netdev); 3157 struct e1000_hw *hw = &adapter->hw; 3158 struct netdev_hw_addr *ha; 3159 u8 *mta_list; 3160 int i; 3161 3162 if (netdev_mc_empty(netdev)) { 3163 /* nothing to program, so clear mc list */ 3164 hw->mac.ops.update_mc_addr_list(hw, NULL, 0); 3165 return 0; 3166 } 3167 3168 mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC); 3169 if (!mta_list) 3170 return -ENOMEM; 3171 3172 /* update_mc_addr_list expects a packed array of only addresses. */ 3173 i = 0; 3174 netdev_for_each_mc_addr(ha, netdev) 3175 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 3176 3177 hw->mac.ops.update_mc_addr_list(hw, mta_list, i); 3178 kfree(mta_list); 3179 3180 return netdev_mc_count(netdev); 3181 } 3182 3183 /** 3184 * e1000e_write_uc_addr_list - write unicast addresses to RAR table 3185 * @netdev: network interface device structure 3186 * 3187 * Writes unicast address list to the RAR table. 3188 * Returns: -ENOMEM on failure/insufficient address space 3189 * 0 on no addresses written 3190 * X on writing X addresses to the RAR table 3191 **/ 3192 static int e1000e_write_uc_addr_list(struct net_device *netdev) 3193 { 3194 struct e1000_adapter *adapter = netdev_priv(netdev); 3195 struct e1000_hw *hw = &adapter->hw; 3196 unsigned int rar_entries = hw->mac.rar_entry_count; 3197 int count = 0; 3198 3199 /* save a rar entry for our hardware address */ 3200 rar_entries--; 3201 3202 /* save a rar entry for the LAA workaround */ 3203 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) 3204 rar_entries--; 3205 3206 /* return ENOMEM indicating insufficient memory for addresses */ 3207 if (netdev_uc_count(netdev) > rar_entries) 3208 return -ENOMEM; 3209 3210 if (!netdev_uc_empty(netdev) && rar_entries) { 3211 struct netdev_hw_addr *ha; 3212 3213 /* 3214 * write the addresses in reverse order to avoid write 3215 * combining 3216 */ 3217 netdev_for_each_uc_addr(ha, netdev) { 3218 if (!rar_entries) 3219 break; 3220 e1000e_rar_set(hw, ha->addr, rar_entries--); 3221 count++; 3222 } 3223 } 3224 3225 /* zero out the remaining RAR entries not used above */ 3226 for (; rar_entries > 0; rar_entries--) { 3227 ew32(RAH(rar_entries), 0); 3228 ew32(RAL(rar_entries), 0); 3229 } 3230 e1e_flush(); 3231 3232 return count; 3233 } 3234 3235 /** 3236 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set 3237 * @netdev: network interface device structure 3238 * 3239 * The ndo_set_rx_mode entry point is called whenever the unicast or multicast 3240 * address list or the network interface flags are updated. This routine is 3241 * responsible for configuring the hardware for proper unicast, multicast, 3242 * promiscuous mode, and all-multi behavior. 3243 **/ 3244 static void e1000e_set_rx_mode(struct net_device *netdev) 3245 { 3246 struct e1000_adapter *adapter = netdev_priv(netdev); 3247 struct e1000_hw *hw = &adapter->hw; 3248 u32 rctl; 3249 3250 /* Check for Promiscuous and All Multicast modes */ 3251 rctl = er32(RCTL); 3252 3253 /* clear the affected bits */ 3254 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); 3255 3256 if (netdev->flags & IFF_PROMISC) { 3257 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 3258 /* Do not hardware filter VLANs in promisc mode */ 3259 e1000e_vlan_filter_disable(adapter); 3260 } else { 3261 int count; 3262 3263 if (netdev->flags & IFF_ALLMULTI) { 3264 rctl |= E1000_RCTL_MPE; 3265 } else { 3266 /* 3267 * Write addresses to the MTA, if the attempt fails 3268 * then we should just turn on promiscuous mode so 3269 * that we can at least receive multicast traffic 3270 */ 3271 count = e1000e_write_mc_addr_list(netdev); 3272 if (count < 0) 3273 rctl |= E1000_RCTL_MPE; 3274 } 3275 e1000e_vlan_filter_enable(adapter); 3276 /* 3277 * Write addresses to available RAR registers, if there is not 3278 * sufficient space to store all the addresses then enable 3279 * unicast promiscuous mode 3280 */ 3281 count = e1000e_write_uc_addr_list(netdev); 3282 if (count < 0) 3283 rctl |= E1000_RCTL_UPE; 3284 } 3285 3286 ew32(RCTL, rctl); 3287 3288 if (netdev->features & NETIF_F_HW_VLAN_RX) 3289 e1000e_vlan_strip_enable(adapter); 3290 else 3291 e1000e_vlan_strip_disable(adapter); 3292 } 3293 3294 static void e1000e_setup_rss_hash(struct e1000_adapter *adapter) 3295 { 3296 struct e1000_hw *hw = &adapter->hw; 3297 u32 mrqc, rxcsum; 3298 int i; 3299 static const u32 rsskey[10] = { 3300 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0, 3301 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe 3302 }; 3303 3304 /* Fill out hash function seed */ 3305 for (i = 0; i < 10; i++) 3306 ew32(RSSRK(i), rsskey[i]); 3307 3308 /* Direct all traffic to queue 0 */ 3309 for (i = 0; i < 32; i++) 3310 ew32(RETA(i), 0); 3311 3312 /* 3313 * Disable raw packet checksumming so that RSS hash is placed in 3314 * descriptor on writeback. 3315 */ 3316 rxcsum = er32(RXCSUM); 3317 rxcsum |= E1000_RXCSUM_PCSD; 3318 3319 ew32(RXCSUM, rxcsum); 3320 3321 mrqc = (E1000_MRQC_RSS_FIELD_IPV4 | 3322 E1000_MRQC_RSS_FIELD_IPV4_TCP | 3323 E1000_MRQC_RSS_FIELD_IPV6 | 3324 E1000_MRQC_RSS_FIELD_IPV6_TCP | 3325 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); 3326 3327 ew32(MRQC, mrqc); 3328 } 3329 3330 /** 3331 * e1000_configure - configure the hardware for Rx and Tx 3332 * @adapter: private board structure 3333 **/ 3334 static void e1000_configure(struct e1000_adapter *adapter) 3335 { 3336 struct e1000_ring *rx_ring = adapter->rx_ring; 3337 3338 e1000e_set_rx_mode(adapter->netdev); 3339 3340 e1000_restore_vlan(adapter); 3341 e1000_init_manageability_pt(adapter); 3342 3343 e1000_configure_tx(adapter); 3344 3345 if (adapter->netdev->features & NETIF_F_RXHASH) 3346 e1000e_setup_rss_hash(adapter); 3347 e1000_setup_rctl(adapter); 3348 e1000_configure_rx(adapter); 3349 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); 3350 } 3351 3352 /** 3353 * e1000e_power_up_phy - restore link in case the phy was powered down 3354 * @adapter: address of board private structure 3355 * 3356 * The phy may be powered down to save power and turn off link when the 3357 * driver is unloaded and wake on lan is not enabled (among others) 3358 * *** this routine MUST be followed by a call to e1000e_reset *** 3359 **/ 3360 void e1000e_power_up_phy(struct e1000_adapter *adapter) 3361 { 3362 if (adapter->hw.phy.ops.power_up) 3363 adapter->hw.phy.ops.power_up(&adapter->hw); 3364 3365 adapter->hw.mac.ops.setup_link(&adapter->hw); 3366 } 3367 3368 /** 3369 * e1000_power_down_phy - Power down the PHY 3370 * 3371 * Power down the PHY so no link is implied when interface is down. 3372 * The PHY cannot be powered down if management or WoL is active. 3373 */ 3374 static void e1000_power_down_phy(struct e1000_adapter *adapter) 3375 { 3376 /* WoL is enabled */ 3377 if (adapter->wol) 3378 return; 3379 3380 if (adapter->hw.phy.ops.power_down) 3381 adapter->hw.phy.ops.power_down(&adapter->hw); 3382 } 3383 3384 /** 3385 * e1000e_reset - bring the hardware into a known good state 3386 * 3387 * This function boots the hardware and enables some settings that 3388 * require a configuration cycle of the hardware - those cannot be 3389 * set/changed during runtime. After reset the device needs to be 3390 * properly configured for Rx, Tx etc. 3391 */ 3392 void e1000e_reset(struct e1000_adapter *adapter) 3393 { 3394 struct e1000_mac_info *mac = &adapter->hw.mac; 3395 struct e1000_fc_info *fc = &adapter->hw.fc; 3396 struct e1000_hw *hw = &adapter->hw; 3397 u32 tx_space, min_tx_space, min_rx_space; 3398 u32 pba = adapter->pba; 3399 u16 hwm; 3400 3401 /* reset Packet Buffer Allocation to default */ 3402 ew32(PBA, pba); 3403 3404 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { 3405 /* 3406 * To maintain wire speed transmits, the Tx FIFO should be 3407 * large enough to accommodate two full transmit packets, 3408 * rounded up to the next 1KB and expressed in KB. Likewise, 3409 * the Rx FIFO should be large enough to accommodate at least 3410 * one full receive packet and is similarly rounded up and 3411 * expressed in KB. 3412 */ 3413 pba = er32(PBA); 3414 /* upper 16 bits has Tx packet buffer allocation size in KB */ 3415 tx_space = pba >> 16; 3416 /* lower 16 bits has Rx packet buffer allocation size in KB */ 3417 pba &= 0xffff; 3418 /* 3419 * the Tx fifo also stores 16 bytes of information about the Tx 3420 * but don't include ethernet FCS because hardware appends it 3421 */ 3422 min_tx_space = (adapter->max_frame_size + 3423 sizeof(struct e1000_tx_desc) - 3424 ETH_FCS_LEN) * 2; 3425 min_tx_space = ALIGN(min_tx_space, 1024); 3426 min_tx_space >>= 10; 3427 /* software strips receive CRC, so leave room for it */ 3428 min_rx_space = adapter->max_frame_size; 3429 min_rx_space = ALIGN(min_rx_space, 1024); 3430 min_rx_space >>= 10; 3431 3432 /* 3433 * If current Tx allocation is less than the min Tx FIFO size, 3434 * and the min Tx FIFO size is less than the current Rx FIFO 3435 * allocation, take space away from current Rx allocation 3436 */ 3437 if ((tx_space < min_tx_space) && 3438 ((min_tx_space - tx_space) < pba)) { 3439 pba -= min_tx_space - tx_space; 3440 3441 /* 3442 * if short on Rx space, Rx wins and must trump Tx 3443 * adjustment or use Early Receive if available 3444 */ 3445 if (pba < min_rx_space) 3446 pba = min_rx_space; 3447 } 3448 3449 ew32(PBA, pba); 3450 } 3451 3452 /* 3453 * flow control settings 3454 * 3455 * The high water mark must be low enough to fit one full frame 3456 * (or the size used for early receive) above it in the Rx FIFO. 3457 * Set it to the lower of: 3458 * - 90% of the Rx FIFO size, and 3459 * - the full Rx FIFO size minus one full frame 3460 */ 3461 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) 3462 fc->pause_time = 0xFFFF; 3463 else 3464 fc->pause_time = E1000_FC_PAUSE_TIME; 3465 fc->send_xon = true; 3466 fc->current_mode = fc->requested_mode; 3467 3468 switch (hw->mac.type) { 3469 case e1000_ich9lan: 3470 case e1000_ich10lan: 3471 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3472 pba = 14; 3473 ew32(PBA, pba); 3474 fc->high_water = 0x2800; 3475 fc->low_water = fc->high_water - 8; 3476 break; 3477 } 3478 /* fall-through */ 3479 default: 3480 hwm = min(((pba << 10) * 9 / 10), 3481 ((pba << 10) - adapter->max_frame_size)); 3482 3483 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ 3484 fc->low_water = fc->high_water - 8; 3485 break; 3486 case e1000_pchlan: 3487 /* 3488 * Workaround PCH LOM adapter hangs with certain network 3489 * loads. If hangs persist, try disabling Tx flow control. 3490 */ 3491 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3492 fc->high_water = 0x3500; 3493 fc->low_water = 0x1500; 3494 } else { 3495 fc->high_water = 0x5000; 3496 fc->low_water = 0x3000; 3497 } 3498 fc->refresh_time = 0x1000; 3499 break; 3500 case e1000_pch2lan: 3501 fc->high_water = 0x05C20; 3502 fc->low_water = 0x05048; 3503 fc->pause_time = 0x0650; 3504 fc->refresh_time = 0x0400; 3505 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3506 pba = 14; 3507 ew32(PBA, pba); 3508 } 3509 break; 3510 } 3511 3512 /* 3513 * Disable Adaptive Interrupt Moderation if 2 full packets cannot 3514 * fit in receive buffer. 3515 */ 3516 if (adapter->itr_setting & 0x3) { 3517 if ((adapter->max_frame_size * 2) > (pba << 10)) { 3518 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { 3519 dev_info(&adapter->pdev->dev, 3520 "Interrupt Throttle Rate turned off\n"); 3521 adapter->flags2 |= FLAG2_DISABLE_AIM; 3522 ew32(ITR, 0); 3523 } 3524 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { 3525 dev_info(&adapter->pdev->dev, 3526 "Interrupt Throttle Rate turned on\n"); 3527 adapter->flags2 &= ~FLAG2_DISABLE_AIM; 3528 adapter->itr = 20000; 3529 ew32(ITR, 1000000000 / (adapter->itr * 256)); 3530 } 3531 } 3532 3533 /* Allow time for pending master requests to run */ 3534 mac->ops.reset_hw(hw); 3535 3536 /* 3537 * For parts with AMT enabled, let the firmware know 3538 * that the network interface is in control 3539 */ 3540 if (adapter->flags & FLAG_HAS_AMT) 3541 e1000e_get_hw_control(adapter); 3542 3543 ew32(WUC, 0); 3544 3545 if (mac->ops.init_hw(hw)) 3546 e_err("Hardware Error\n"); 3547 3548 e1000_update_mng_vlan(adapter); 3549 3550 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 3551 ew32(VET, ETH_P_8021Q); 3552 3553 e1000e_reset_adaptive(hw); 3554 3555 if (!netif_running(adapter->netdev) && 3556 !test_bit(__E1000_TESTING, &adapter->state)) { 3557 e1000_power_down_phy(adapter); 3558 return; 3559 } 3560 3561 e1000_get_phy_info(hw); 3562 3563 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && 3564 !(adapter->flags & FLAG_SMART_POWER_DOWN)) { 3565 u16 phy_data = 0; 3566 /* 3567 * speed up time to link by disabling smart power down, ignore 3568 * the return value of this function because there is nothing 3569 * different we would do if it failed 3570 */ 3571 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); 3572 phy_data &= ~IGP02E1000_PM_SPD; 3573 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); 3574 } 3575 } 3576 3577 int e1000e_up(struct e1000_adapter *adapter) 3578 { 3579 struct e1000_hw *hw = &adapter->hw; 3580 3581 /* hardware has been reset, we need to reload some things */ 3582 e1000_configure(adapter); 3583 3584 clear_bit(__E1000_DOWN, &adapter->state); 3585 3586 if (adapter->msix_entries) 3587 e1000_configure_msix(adapter); 3588 e1000_irq_enable(adapter); 3589 3590 netif_start_queue(adapter->netdev); 3591 3592 /* fire a link change interrupt to start the watchdog */ 3593 if (adapter->msix_entries) 3594 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); 3595 else 3596 ew32(ICS, E1000_ICS_LSC); 3597 3598 return 0; 3599 } 3600 3601 static void e1000e_flush_descriptors(struct e1000_adapter *adapter) 3602 { 3603 struct e1000_hw *hw = &adapter->hw; 3604 3605 if (!(adapter->flags2 & FLAG2_DMA_BURST)) 3606 return; 3607 3608 /* flush pending descriptor writebacks to memory */ 3609 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 3610 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); 3611 3612 /* execute the writes immediately */ 3613 e1e_flush(); 3614 } 3615 3616 static void e1000e_update_stats(struct e1000_adapter *adapter); 3617 3618 void e1000e_down(struct e1000_adapter *adapter) 3619 { 3620 struct net_device *netdev = adapter->netdev; 3621 struct e1000_hw *hw = &adapter->hw; 3622 u32 tctl, rctl; 3623 3624 /* 3625 * signal that we're down so the interrupt handler does not 3626 * reschedule our watchdog timer 3627 */ 3628 set_bit(__E1000_DOWN, &adapter->state); 3629 3630 /* disable receives in the hardware */ 3631 rctl = er32(RCTL); 3632 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) 3633 ew32(RCTL, rctl & ~E1000_RCTL_EN); 3634 /* flush and sleep below */ 3635 3636 netif_stop_queue(netdev); 3637 3638 /* disable transmits in the hardware */ 3639 tctl = er32(TCTL); 3640 tctl &= ~E1000_TCTL_EN; 3641 ew32(TCTL, tctl); 3642 3643 /* flush both disables and wait for them to finish */ 3644 e1e_flush(); 3645 usleep_range(10000, 20000); 3646 3647 e1000_irq_disable(adapter); 3648 3649 del_timer_sync(&adapter->watchdog_timer); 3650 del_timer_sync(&adapter->phy_info_timer); 3651 3652 netif_carrier_off(netdev); 3653 3654 spin_lock(&adapter->stats64_lock); 3655 e1000e_update_stats(adapter); 3656 spin_unlock(&adapter->stats64_lock); 3657 3658 e1000e_flush_descriptors(adapter); 3659 e1000_clean_tx_ring(adapter->tx_ring); 3660 e1000_clean_rx_ring(adapter->rx_ring); 3661 3662 adapter->link_speed = 0; 3663 adapter->link_duplex = 0; 3664 3665 if (!pci_channel_offline(adapter->pdev)) 3666 e1000e_reset(adapter); 3667 3668 /* 3669 * TODO: for power management, we could drop the link and 3670 * pci_disable_device here. 3671 */ 3672 } 3673 3674 void e1000e_reinit_locked(struct e1000_adapter *adapter) 3675 { 3676 might_sleep(); 3677 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 3678 usleep_range(1000, 2000); 3679 e1000e_down(adapter); 3680 e1000e_up(adapter); 3681 clear_bit(__E1000_RESETTING, &adapter->state); 3682 } 3683 3684 /** 3685 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) 3686 * @adapter: board private structure to initialize 3687 * 3688 * e1000_sw_init initializes the Adapter private data structure. 3689 * Fields are initialized based on PCI device information and 3690 * OS network device settings (MTU size). 3691 **/ 3692 static int __devinit e1000_sw_init(struct e1000_adapter *adapter) 3693 { 3694 struct net_device *netdev = adapter->netdev; 3695 3696 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; 3697 adapter->rx_ps_bsize0 = 128; 3698 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 3699 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3700 adapter->tx_ring_count = E1000_DEFAULT_TXD; 3701 adapter->rx_ring_count = E1000_DEFAULT_RXD; 3702 3703 spin_lock_init(&adapter->stats64_lock); 3704 3705 e1000e_set_interrupt_capability(adapter); 3706 3707 if (e1000_alloc_queues(adapter)) 3708 return -ENOMEM; 3709 3710 /* Explicitly disable IRQ since the NIC can be in any state. */ 3711 e1000_irq_disable(adapter); 3712 3713 set_bit(__E1000_DOWN, &adapter->state); 3714 return 0; 3715 } 3716 3717 /** 3718 * e1000_intr_msi_test - Interrupt Handler 3719 * @irq: interrupt number 3720 * @data: pointer to a network interface device structure 3721 **/ 3722 static irqreturn_t e1000_intr_msi_test(int irq, void *data) 3723 { 3724 struct net_device *netdev = data; 3725 struct e1000_adapter *adapter = netdev_priv(netdev); 3726 struct e1000_hw *hw = &adapter->hw; 3727 u32 icr = er32(ICR); 3728 3729 e_dbg("icr is %08X\n", icr); 3730 if (icr & E1000_ICR_RXSEQ) { 3731 adapter->flags &= ~FLAG_MSI_TEST_FAILED; 3732 wmb(); 3733 } 3734 3735 return IRQ_HANDLED; 3736 } 3737 3738 /** 3739 * e1000_test_msi_interrupt - Returns 0 for successful test 3740 * @adapter: board private struct 3741 * 3742 * code flow taken from tg3.c 3743 **/ 3744 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) 3745 { 3746 struct net_device *netdev = adapter->netdev; 3747 struct e1000_hw *hw = &adapter->hw; 3748 int err; 3749 3750 /* poll_enable hasn't been called yet, so don't need disable */ 3751 /* clear any pending events */ 3752 er32(ICR); 3753 3754 /* free the real vector and request a test handler */ 3755 e1000_free_irq(adapter); 3756 e1000e_reset_interrupt_capability(adapter); 3757 3758 /* Assume that the test fails, if it succeeds then the test 3759 * MSI irq handler will unset this flag */ 3760 adapter->flags |= FLAG_MSI_TEST_FAILED; 3761 3762 err = pci_enable_msi(adapter->pdev); 3763 if (err) 3764 goto msi_test_failed; 3765 3766 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, 3767 netdev->name, netdev); 3768 if (err) { 3769 pci_disable_msi(adapter->pdev); 3770 goto msi_test_failed; 3771 } 3772 3773 wmb(); 3774 3775 e1000_irq_enable(adapter); 3776 3777 /* fire an unusual interrupt on the test handler */ 3778 ew32(ICS, E1000_ICS_RXSEQ); 3779 e1e_flush(); 3780 msleep(50); 3781 3782 e1000_irq_disable(adapter); 3783 3784 rmb(); 3785 3786 if (adapter->flags & FLAG_MSI_TEST_FAILED) { 3787 adapter->int_mode = E1000E_INT_MODE_LEGACY; 3788 e_info("MSI interrupt test failed, using legacy interrupt.\n"); 3789 } else { 3790 e_dbg("MSI interrupt test succeeded!\n"); 3791 } 3792 3793 free_irq(adapter->pdev->irq, netdev); 3794 pci_disable_msi(adapter->pdev); 3795 3796 msi_test_failed: 3797 e1000e_set_interrupt_capability(adapter); 3798 return e1000_request_irq(adapter); 3799 } 3800 3801 /** 3802 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored 3803 * @adapter: board private struct 3804 * 3805 * code flow taken from tg3.c, called with e1000 interrupts disabled. 3806 **/ 3807 static int e1000_test_msi(struct e1000_adapter *adapter) 3808 { 3809 int err; 3810 u16 pci_cmd; 3811 3812 if (!(adapter->flags & FLAG_MSI_ENABLED)) 3813 return 0; 3814 3815 /* disable SERR in case the MSI write causes a master abort */ 3816 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); 3817 if (pci_cmd & PCI_COMMAND_SERR) 3818 pci_write_config_word(adapter->pdev, PCI_COMMAND, 3819 pci_cmd & ~PCI_COMMAND_SERR); 3820 3821 err = e1000_test_msi_interrupt(adapter); 3822 3823 /* re-enable SERR */ 3824 if (pci_cmd & PCI_COMMAND_SERR) { 3825 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); 3826 pci_cmd |= PCI_COMMAND_SERR; 3827 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); 3828 } 3829 3830 return err; 3831 } 3832 3833 /** 3834 * e1000_open - Called when a network interface is made active 3835 * @netdev: network interface device structure 3836 * 3837 * Returns 0 on success, negative value on failure 3838 * 3839 * The open entry point is called when a network interface is made 3840 * active by the system (IFF_UP). At this point all resources needed 3841 * for transmit and receive operations are allocated, the interrupt 3842 * handler is registered with the OS, the watchdog timer is started, 3843 * and the stack is notified that the interface is ready. 3844 **/ 3845 static int e1000_open(struct net_device *netdev) 3846 { 3847 struct e1000_adapter *adapter = netdev_priv(netdev); 3848 struct e1000_hw *hw = &adapter->hw; 3849 struct pci_dev *pdev = adapter->pdev; 3850 int err; 3851 3852 /* disallow open during test */ 3853 if (test_bit(__E1000_TESTING, &adapter->state)) 3854 return -EBUSY; 3855 3856 pm_runtime_get_sync(&pdev->dev); 3857 3858 netif_carrier_off(netdev); 3859 3860 /* allocate transmit descriptors */ 3861 err = e1000e_setup_tx_resources(adapter->tx_ring); 3862 if (err) 3863 goto err_setup_tx; 3864 3865 /* allocate receive descriptors */ 3866 err = e1000e_setup_rx_resources(adapter->rx_ring); 3867 if (err) 3868 goto err_setup_rx; 3869 3870 /* 3871 * If AMT is enabled, let the firmware know that the network 3872 * interface is now open and reset the part to a known state. 3873 */ 3874 if (adapter->flags & FLAG_HAS_AMT) { 3875 e1000e_get_hw_control(adapter); 3876 e1000e_reset(adapter); 3877 } 3878 3879 e1000e_power_up_phy(adapter); 3880 3881 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 3882 if ((adapter->hw.mng_cookie.status & 3883 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) 3884 e1000_update_mng_vlan(adapter); 3885 3886 /* DMA latency requirement to workaround jumbo issue */ 3887 if (adapter->hw.mac.type == e1000_pch2lan) 3888 pm_qos_add_request(&adapter->netdev->pm_qos_req, 3889 PM_QOS_CPU_DMA_LATENCY, 3890 PM_QOS_DEFAULT_VALUE); 3891 3892 /* 3893 * before we allocate an interrupt, we must be ready to handle it. 3894 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 3895 * as soon as we call pci_request_irq, so we have to setup our 3896 * clean_rx handler before we do so. 3897 */ 3898 e1000_configure(adapter); 3899 3900 err = e1000_request_irq(adapter); 3901 if (err) 3902 goto err_req_irq; 3903 3904 /* 3905 * Work around PCIe errata with MSI interrupts causing some chipsets to 3906 * ignore e1000e MSI messages, which means we need to test our MSI 3907 * interrupt now 3908 */ 3909 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { 3910 err = e1000_test_msi(adapter); 3911 if (err) { 3912 e_err("Interrupt allocation failed\n"); 3913 goto err_req_irq; 3914 } 3915 } 3916 3917 /* From here on the code is the same as e1000e_up() */ 3918 clear_bit(__E1000_DOWN, &adapter->state); 3919 3920 napi_enable(&adapter->napi); 3921 3922 e1000_irq_enable(adapter); 3923 3924 adapter->tx_hang_recheck = false; 3925 netif_start_queue(netdev); 3926 3927 adapter->idle_check = true; 3928 pm_runtime_put(&pdev->dev); 3929 3930 /* fire a link status change interrupt to start the watchdog */ 3931 if (adapter->msix_entries) 3932 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); 3933 else 3934 ew32(ICS, E1000_ICS_LSC); 3935 3936 return 0; 3937 3938 err_req_irq: 3939 e1000e_release_hw_control(adapter); 3940 e1000_power_down_phy(adapter); 3941 e1000e_free_rx_resources(adapter->rx_ring); 3942 err_setup_rx: 3943 e1000e_free_tx_resources(adapter->tx_ring); 3944 err_setup_tx: 3945 e1000e_reset(adapter); 3946 pm_runtime_put_sync(&pdev->dev); 3947 3948 return err; 3949 } 3950 3951 /** 3952 * e1000_close - Disables a network interface 3953 * @netdev: network interface device structure 3954 * 3955 * Returns 0, this is not allowed to fail 3956 * 3957 * The close entry point is called when an interface is de-activated 3958 * by the OS. The hardware is still under the drivers control, but 3959 * needs to be disabled. A global MAC reset is issued to stop the 3960 * hardware, and all transmit and receive resources are freed. 3961 **/ 3962 static int e1000_close(struct net_device *netdev) 3963 { 3964 struct e1000_adapter *adapter = netdev_priv(netdev); 3965 struct pci_dev *pdev = adapter->pdev; 3966 3967 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 3968 3969 pm_runtime_get_sync(&pdev->dev); 3970 3971 napi_disable(&adapter->napi); 3972 3973 if (!test_bit(__E1000_DOWN, &adapter->state)) { 3974 e1000e_down(adapter); 3975 e1000_free_irq(adapter); 3976 } 3977 e1000_power_down_phy(adapter); 3978 3979 e1000e_free_tx_resources(adapter->tx_ring); 3980 e1000e_free_rx_resources(adapter->rx_ring); 3981 3982 /* 3983 * kill manageability vlan ID if supported, but not if a vlan with 3984 * the same ID is registered on the host OS (let 8021q kill it) 3985 */ 3986 if (adapter->hw.mng_cookie.status & 3987 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) 3988 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 3989 3990 /* 3991 * If AMT is enabled, let the firmware know that the network 3992 * interface is now closed 3993 */ 3994 if ((adapter->flags & FLAG_HAS_AMT) && 3995 !test_bit(__E1000_TESTING, &adapter->state)) 3996 e1000e_release_hw_control(adapter); 3997 3998 if (adapter->hw.mac.type == e1000_pch2lan) 3999 pm_qos_remove_request(&adapter->netdev->pm_qos_req); 4000 4001 pm_runtime_put_sync(&pdev->dev); 4002 4003 return 0; 4004 } 4005 /** 4006 * e1000_set_mac - Change the Ethernet Address of the NIC 4007 * @netdev: network interface device structure 4008 * @p: pointer to an address structure 4009 * 4010 * Returns 0 on success, negative on failure 4011 **/ 4012 static int e1000_set_mac(struct net_device *netdev, void *p) 4013 { 4014 struct e1000_adapter *adapter = netdev_priv(netdev); 4015 struct sockaddr *addr = p; 4016 4017 if (!is_valid_ether_addr(addr->sa_data)) 4018 return -EADDRNOTAVAIL; 4019 4020 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 4021 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); 4022 4023 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 4024 4025 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { 4026 /* activate the work around */ 4027 e1000e_set_laa_state_82571(&adapter->hw, 1); 4028 4029 /* 4030 * Hold a copy of the LAA in RAR[14] This is done so that 4031 * between the time RAR[0] gets clobbered and the time it 4032 * gets fixed (in e1000_watchdog), the actual LAA is in one 4033 * of the RARs and no incoming packets directed to this port 4034 * are dropped. Eventually the LAA will be in RAR[0] and 4035 * RAR[14] 4036 */ 4037 e1000e_rar_set(&adapter->hw, 4038 adapter->hw.mac.addr, 4039 adapter->hw.mac.rar_entry_count - 1); 4040 } 4041 4042 return 0; 4043 } 4044 4045 /** 4046 * e1000e_update_phy_task - work thread to update phy 4047 * @work: pointer to our work struct 4048 * 4049 * this worker thread exists because we must acquire a 4050 * semaphore to read the phy, which we could msleep while 4051 * waiting for it, and we can't msleep in a timer. 4052 **/ 4053 static void e1000e_update_phy_task(struct work_struct *work) 4054 { 4055 struct e1000_adapter *adapter = container_of(work, 4056 struct e1000_adapter, update_phy_task); 4057 4058 if (test_bit(__E1000_DOWN, &adapter->state)) 4059 return; 4060 4061 e1000_get_phy_info(&adapter->hw); 4062 } 4063 4064 /* 4065 * Need to wait a few seconds after link up to get diagnostic information from 4066 * the phy 4067 */ 4068 static void e1000_update_phy_info(unsigned long data) 4069 { 4070 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 4071 4072 if (test_bit(__E1000_DOWN, &adapter->state)) 4073 return; 4074 4075 schedule_work(&adapter->update_phy_task); 4076 } 4077 4078 /** 4079 * e1000e_update_phy_stats - Update the PHY statistics counters 4080 * @adapter: board private structure 4081 * 4082 * Read/clear the upper 16-bit PHY registers and read/accumulate lower 4083 **/ 4084 static void e1000e_update_phy_stats(struct e1000_adapter *adapter) 4085 { 4086 struct e1000_hw *hw = &adapter->hw; 4087 s32 ret_val; 4088 u16 phy_data; 4089 4090 ret_val = hw->phy.ops.acquire(hw); 4091 if (ret_val) 4092 return; 4093 4094 /* 4095 * A page set is expensive so check if already on desired page. 4096 * If not, set to the page with the PHY status registers. 4097 */ 4098 hw->phy.addr = 1; 4099 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 4100 &phy_data); 4101 if (ret_val) 4102 goto release; 4103 if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) { 4104 ret_val = hw->phy.ops.set_page(hw, 4105 HV_STATS_PAGE << IGP_PAGE_SHIFT); 4106 if (ret_val) 4107 goto release; 4108 } 4109 4110 /* Single Collision Count */ 4111 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); 4112 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); 4113 if (!ret_val) 4114 adapter->stats.scc += phy_data; 4115 4116 /* Excessive Collision Count */ 4117 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); 4118 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); 4119 if (!ret_val) 4120 adapter->stats.ecol += phy_data; 4121 4122 /* Multiple Collision Count */ 4123 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); 4124 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); 4125 if (!ret_val) 4126 adapter->stats.mcc += phy_data; 4127 4128 /* Late Collision Count */ 4129 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); 4130 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); 4131 if (!ret_val) 4132 adapter->stats.latecol += phy_data; 4133 4134 /* Collision Count - also used for adaptive IFS */ 4135 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); 4136 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); 4137 if (!ret_val) 4138 hw->mac.collision_delta = phy_data; 4139 4140 /* Defer Count */ 4141 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); 4142 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); 4143 if (!ret_val) 4144 adapter->stats.dc += phy_data; 4145 4146 /* Transmit with no CRS */ 4147 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); 4148 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); 4149 if (!ret_val) 4150 adapter->stats.tncrs += phy_data; 4151 4152 release: 4153 hw->phy.ops.release(hw); 4154 } 4155 4156 /** 4157 * e1000e_update_stats - Update the board statistics counters 4158 * @adapter: board private structure 4159 **/ 4160 static void e1000e_update_stats(struct e1000_adapter *adapter) 4161 { 4162 struct net_device *netdev = adapter->netdev; 4163 struct e1000_hw *hw = &adapter->hw; 4164 struct pci_dev *pdev = adapter->pdev; 4165 4166 /* 4167 * Prevent stats update while adapter is being reset, or if the pci 4168 * connection is down. 4169 */ 4170 if (adapter->link_speed == 0) 4171 return; 4172 if (pci_channel_offline(pdev)) 4173 return; 4174 4175 adapter->stats.crcerrs += er32(CRCERRS); 4176 adapter->stats.gprc += er32(GPRC); 4177 adapter->stats.gorc += er32(GORCL); 4178 er32(GORCH); /* Clear gorc */ 4179 adapter->stats.bprc += er32(BPRC); 4180 adapter->stats.mprc += er32(MPRC); 4181 adapter->stats.roc += er32(ROC); 4182 4183 adapter->stats.mpc += er32(MPC); 4184 4185 /* Half-duplex statistics */ 4186 if (adapter->link_duplex == HALF_DUPLEX) { 4187 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { 4188 e1000e_update_phy_stats(adapter); 4189 } else { 4190 adapter->stats.scc += er32(SCC); 4191 adapter->stats.ecol += er32(ECOL); 4192 adapter->stats.mcc += er32(MCC); 4193 adapter->stats.latecol += er32(LATECOL); 4194 adapter->stats.dc += er32(DC); 4195 4196 hw->mac.collision_delta = er32(COLC); 4197 4198 if ((hw->mac.type != e1000_82574) && 4199 (hw->mac.type != e1000_82583)) 4200 adapter->stats.tncrs += er32(TNCRS); 4201 } 4202 adapter->stats.colc += hw->mac.collision_delta; 4203 } 4204 4205 adapter->stats.xonrxc += er32(XONRXC); 4206 adapter->stats.xontxc += er32(XONTXC); 4207 adapter->stats.xoffrxc += er32(XOFFRXC); 4208 adapter->stats.xofftxc += er32(XOFFTXC); 4209 adapter->stats.gptc += er32(GPTC); 4210 adapter->stats.gotc += er32(GOTCL); 4211 er32(GOTCH); /* Clear gotc */ 4212 adapter->stats.rnbc += er32(RNBC); 4213 adapter->stats.ruc += er32(RUC); 4214 4215 adapter->stats.mptc += er32(MPTC); 4216 adapter->stats.bptc += er32(BPTC); 4217 4218 /* used for adaptive IFS */ 4219 4220 hw->mac.tx_packet_delta = er32(TPT); 4221 adapter->stats.tpt += hw->mac.tx_packet_delta; 4222 4223 adapter->stats.algnerrc += er32(ALGNERRC); 4224 adapter->stats.rxerrc += er32(RXERRC); 4225 adapter->stats.cexterr += er32(CEXTERR); 4226 adapter->stats.tsctc += er32(TSCTC); 4227 adapter->stats.tsctfc += er32(TSCTFC); 4228 4229 /* Fill out the OS statistics structure */ 4230 netdev->stats.multicast = adapter->stats.mprc; 4231 netdev->stats.collisions = adapter->stats.colc; 4232 4233 /* Rx Errors */ 4234 4235 /* 4236 * RLEC on some newer hardware can be incorrect so build 4237 * our own version based on RUC and ROC 4238 */ 4239 netdev->stats.rx_errors = adapter->stats.rxerrc + 4240 adapter->stats.crcerrs + adapter->stats.algnerrc + 4241 adapter->stats.ruc + adapter->stats.roc + 4242 adapter->stats.cexterr; 4243 netdev->stats.rx_length_errors = adapter->stats.ruc + 4244 adapter->stats.roc; 4245 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 4246 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 4247 netdev->stats.rx_missed_errors = adapter->stats.mpc; 4248 4249 /* Tx Errors */ 4250 netdev->stats.tx_errors = adapter->stats.ecol + 4251 adapter->stats.latecol; 4252 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 4253 netdev->stats.tx_window_errors = adapter->stats.latecol; 4254 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 4255 4256 /* Tx Dropped needs to be maintained elsewhere */ 4257 4258 /* Management Stats */ 4259 adapter->stats.mgptc += er32(MGTPTC); 4260 adapter->stats.mgprc += er32(MGTPRC); 4261 adapter->stats.mgpdc += er32(MGTPDC); 4262 } 4263 4264 /** 4265 * e1000_phy_read_status - Update the PHY register status snapshot 4266 * @adapter: board private structure 4267 **/ 4268 static void e1000_phy_read_status(struct e1000_adapter *adapter) 4269 { 4270 struct e1000_hw *hw = &adapter->hw; 4271 struct e1000_phy_regs *phy = &adapter->phy_regs; 4272 4273 if ((er32(STATUS) & E1000_STATUS_LU) && 4274 (adapter->hw.phy.media_type == e1000_media_type_copper)) { 4275 int ret_val; 4276 4277 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); 4278 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); 4279 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); 4280 ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa); 4281 ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion); 4282 ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000); 4283 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); 4284 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); 4285 if (ret_val) 4286 e_warn("Error reading PHY register\n"); 4287 } else { 4288 /* 4289 * Do not read PHY registers if link is not up 4290 * Set values to typical power-on defaults 4291 */ 4292 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); 4293 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | 4294 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE | 4295 BMSR_ERCAP); 4296 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | 4297 ADVERTISE_ALL | ADVERTISE_CSMA); 4298 phy->lpa = 0; 4299 phy->expansion = EXPANSION_ENABLENPAGE; 4300 phy->ctrl1000 = ADVERTISE_1000FULL; 4301 phy->stat1000 = 0; 4302 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); 4303 } 4304 } 4305 4306 static void e1000_print_link_info(struct e1000_adapter *adapter) 4307 { 4308 struct e1000_hw *hw = &adapter->hw; 4309 u32 ctrl = er32(CTRL); 4310 4311 /* Link status message must follow this format for user tools */ 4312 printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", 4313 adapter->netdev->name, 4314 adapter->link_speed, 4315 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", 4316 (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" : 4317 (ctrl & E1000_CTRL_RFCE) ? "Rx" : 4318 (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"); 4319 } 4320 4321 static bool e1000e_has_link(struct e1000_adapter *adapter) 4322 { 4323 struct e1000_hw *hw = &adapter->hw; 4324 bool link_active = false; 4325 s32 ret_val = 0; 4326 4327 /* 4328 * get_link_status is set on LSC (link status) interrupt or 4329 * Rx sequence error interrupt. get_link_status will stay 4330 * false until the check_for_link establishes link 4331 * for copper adapters ONLY 4332 */ 4333 switch (hw->phy.media_type) { 4334 case e1000_media_type_copper: 4335 if (hw->mac.get_link_status) { 4336 ret_val = hw->mac.ops.check_for_link(hw); 4337 link_active = !hw->mac.get_link_status; 4338 } else { 4339 link_active = true; 4340 } 4341 break; 4342 case e1000_media_type_fiber: 4343 ret_val = hw->mac.ops.check_for_link(hw); 4344 link_active = !!(er32(STATUS) & E1000_STATUS_LU); 4345 break; 4346 case e1000_media_type_internal_serdes: 4347 ret_val = hw->mac.ops.check_for_link(hw); 4348 link_active = adapter->hw.mac.serdes_has_link; 4349 break; 4350 default: 4351 case e1000_media_type_unknown: 4352 break; 4353 } 4354 4355 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && 4356 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { 4357 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ 4358 e_info("Gigabit has been disabled, downgrading speed\n"); 4359 } 4360 4361 return link_active; 4362 } 4363 4364 static void e1000e_enable_receives(struct e1000_adapter *adapter) 4365 { 4366 /* make sure the receive unit is started */ 4367 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && 4368 (adapter->flags & FLAG_RX_RESTART_NOW)) { 4369 struct e1000_hw *hw = &adapter->hw; 4370 u32 rctl = er32(RCTL); 4371 ew32(RCTL, rctl | E1000_RCTL_EN); 4372 adapter->flags &= ~FLAG_RX_RESTART_NOW; 4373 } 4374 } 4375 4376 static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) 4377 { 4378 struct e1000_hw *hw = &adapter->hw; 4379 4380 /* 4381 * With 82574 controllers, PHY needs to be checked periodically 4382 * for hung state and reset, if two calls return true 4383 */ 4384 if (e1000_check_phy_82574(hw)) 4385 adapter->phy_hang_count++; 4386 else 4387 adapter->phy_hang_count = 0; 4388 4389 if (adapter->phy_hang_count > 1) { 4390 adapter->phy_hang_count = 0; 4391 schedule_work(&adapter->reset_task); 4392 } 4393 } 4394 4395 /** 4396 * e1000_watchdog - Timer Call-back 4397 * @data: pointer to adapter cast into an unsigned long 4398 **/ 4399 static void e1000_watchdog(unsigned long data) 4400 { 4401 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 4402 4403 /* Do the rest outside of interrupt context */ 4404 schedule_work(&adapter->watchdog_task); 4405 4406 /* TODO: make this use queue_delayed_work() */ 4407 } 4408 4409 static void e1000_watchdog_task(struct work_struct *work) 4410 { 4411 struct e1000_adapter *adapter = container_of(work, 4412 struct e1000_adapter, watchdog_task); 4413 struct net_device *netdev = adapter->netdev; 4414 struct e1000_mac_info *mac = &adapter->hw.mac; 4415 struct e1000_phy_info *phy = &adapter->hw.phy; 4416 struct e1000_ring *tx_ring = adapter->tx_ring; 4417 struct e1000_hw *hw = &adapter->hw; 4418 u32 link, tctl; 4419 4420 if (test_bit(__E1000_DOWN, &adapter->state)) 4421 return; 4422 4423 link = e1000e_has_link(adapter); 4424 if ((netif_carrier_ok(netdev)) && link) { 4425 /* Cancel scheduled suspend requests. */ 4426 pm_runtime_resume(netdev->dev.parent); 4427 4428 e1000e_enable_receives(adapter); 4429 goto link_up; 4430 } 4431 4432 if ((e1000e_enable_tx_pkt_filtering(hw)) && 4433 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) 4434 e1000_update_mng_vlan(adapter); 4435 4436 if (link) { 4437 if (!netif_carrier_ok(netdev)) { 4438 bool txb2b = true; 4439 4440 /* Cancel scheduled suspend requests. */ 4441 pm_runtime_resume(netdev->dev.parent); 4442 4443 /* update snapshot of PHY registers on LSC */ 4444 e1000_phy_read_status(adapter); 4445 mac->ops.get_link_up_info(&adapter->hw, 4446 &adapter->link_speed, 4447 &adapter->link_duplex); 4448 e1000_print_link_info(adapter); 4449 /* 4450 * On supported PHYs, check for duplex mismatch only 4451 * if link has autonegotiated at 10/100 half 4452 */ 4453 if ((hw->phy.type == e1000_phy_igp_3 || 4454 hw->phy.type == e1000_phy_bm) && 4455 (hw->mac.autoneg == true) && 4456 (adapter->link_speed == SPEED_10 || 4457 adapter->link_speed == SPEED_100) && 4458 (adapter->link_duplex == HALF_DUPLEX)) { 4459 u16 autoneg_exp; 4460 4461 e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp); 4462 4463 if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS)) 4464 e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n"); 4465 } 4466 4467 /* adjust timeout factor according to speed/duplex */ 4468 adapter->tx_timeout_factor = 1; 4469 switch (adapter->link_speed) { 4470 case SPEED_10: 4471 txb2b = false; 4472 adapter->tx_timeout_factor = 16; 4473 break; 4474 case SPEED_100: 4475 txb2b = false; 4476 adapter->tx_timeout_factor = 10; 4477 break; 4478 } 4479 4480 /* 4481 * workaround: re-program speed mode bit after 4482 * link-up event 4483 */ 4484 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && 4485 !txb2b) { 4486 u32 tarc0; 4487 tarc0 = er32(TARC(0)); 4488 tarc0 &= ~SPEED_MODE_BIT; 4489 ew32(TARC(0), tarc0); 4490 } 4491 4492 /* 4493 * disable TSO for pcie and 10/100 speeds, to avoid 4494 * some hardware issues 4495 */ 4496 if (!(adapter->flags & FLAG_TSO_FORCE)) { 4497 switch (adapter->link_speed) { 4498 case SPEED_10: 4499 case SPEED_100: 4500 e_info("10/100 speed: disabling TSO\n"); 4501 netdev->features &= ~NETIF_F_TSO; 4502 netdev->features &= ~NETIF_F_TSO6; 4503 break; 4504 case SPEED_1000: 4505 netdev->features |= NETIF_F_TSO; 4506 netdev->features |= NETIF_F_TSO6; 4507 break; 4508 default: 4509 /* oops */ 4510 break; 4511 } 4512 } 4513 4514 /* 4515 * enable transmits in the hardware, need to do this 4516 * after setting TARC(0) 4517 */ 4518 tctl = er32(TCTL); 4519 tctl |= E1000_TCTL_EN; 4520 ew32(TCTL, tctl); 4521 4522 /* 4523 * Perform any post-link-up configuration before 4524 * reporting link up. 4525 */ 4526 if (phy->ops.cfg_on_link_up) 4527 phy->ops.cfg_on_link_up(hw); 4528 4529 netif_carrier_on(netdev); 4530 4531 if (!test_bit(__E1000_DOWN, &adapter->state)) 4532 mod_timer(&adapter->phy_info_timer, 4533 round_jiffies(jiffies + 2 * HZ)); 4534 } 4535 } else { 4536 if (netif_carrier_ok(netdev)) { 4537 adapter->link_speed = 0; 4538 adapter->link_duplex = 0; 4539 /* Link status message must follow this format */ 4540 printk(KERN_INFO "e1000e: %s NIC Link is Down\n", 4541 adapter->netdev->name); 4542 netif_carrier_off(netdev); 4543 if (!test_bit(__E1000_DOWN, &adapter->state)) 4544 mod_timer(&adapter->phy_info_timer, 4545 round_jiffies(jiffies + 2 * HZ)); 4546 4547 if (adapter->flags & FLAG_RX_NEEDS_RESTART) 4548 schedule_work(&adapter->reset_task); 4549 else 4550 pm_schedule_suspend(netdev->dev.parent, 4551 LINK_TIMEOUT); 4552 } 4553 } 4554 4555 link_up: 4556 spin_lock(&adapter->stats64_lock); 4557 e1000e_update_stats(adapter); 4558 4559 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 4560 adapter->tpt_old = adapter->stats.tpt; 4561 mac->collision_delta = adapter->stats.colc - adapter->colc_old; 4562 adapter->colc_old = adapter->stats.colc; 4563 4564 adapter->gorc = adapter->stats.gorc - adapter->gorc_old; 4565 adapter->gorc_old = adapter->stats.gorc; 4566 adapter->gotc = adapter->stats.gotc - adapter->gotc_old; 4567 adapter->gotc_old = adapter->stats.gotc; 4568 spin_unlock(&adapter->stats64_lock); 4569 4570 e1000e_update_adaptive(&adapter->hw); 4571 4572 if (!netif_carrier_ok(netdev) && 4573 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) { 4574 /* 4575 * We've lost link, so the controller stops DMA, 4576 * but we've got queued Tx work that's never going 4577 * to get done, so reset controller to flush Tx. 4578 * (Do the reset outside of interrupt context). 4579 */ 4580 schedule_work(&adapter->reset_task); 4581 /* return immediately since reset is imminent */ 4582 return; 4583 } 4584 4585 /* Simple mode for Interrupt Throttle Rate (ITR) */ 4586 if (adapter->itr_setting == 4) { 4587 /* 4588 * Symmetric Tx/Rx gets a reduced ITR=2000; 4589 * Total asymmetrical Tx or Rx gets ITR=8000; 4590 * everyone else is between 2000-8000. 4591 */ 4592 u32 goc = (adapter->gotc + adapter->gorc) / 10000; 4593 u32 dif = (adapter->gotc > adapter->gorc ? 4594 adapter->gotc - adapter->gorc : 4595 adapter->gorc - adapter->gotc) / 10000; 4596 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 4597 4598 ew32(ITR, 1000000000 / (itr * 256)); 4599 } 4600 4601 /* Cause software interrupt to ensure Rx ring is cleaned */ 4602 if (adapter->msix_entries) 4603 ew32(ICS, adapter->rx_ring->ims_val); 4604 else 4605 ew32(ICS, E1000_ICS_RXDMT0); 4606 4607 /* flush pending descriptors to memory before detecting Tx hang */ 4608 e1000e_flush_descriptors(adapter); 4609 4610 /* Force detection of hung controller every watchdog period */ 4611 adapter->detect_tx_hung = true; 4612 4613 /* 4614 * With 82571 controllers, LAA may be overwritten due to controller 4615 * reset from the other port. Set the appropriate LAA in RAR[0] 4616 */ 4617 if (e1000e_get_laa_state_82571(hw)) 4618 e1000e_rar_set(hw, adapter->hw.mac.addr, 0); 4619 4620 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) 4621 e1000e_check_82574_phy_workaround(adapter); 4622 4623 /* Reset the timer */ 4624 if (!test_bit(__E1000_DOWN, &adapter->state)) 4625 mod_timer(&adapter->watchdog_timer, 4626 round_jiffies(jiffies + 2 * HZ)); 4627 } 4628 4629 #define E1000_TX_FLAGS_CSUM 0x00000001 4630 #define E1000_TX_FLAGS_VLAN 0x00000002 4631 #define E1000_TX_FLAGS_TSO 0x00000004 4632 #define E1000_TX_FLAGS_IPV4 0x00000008 4633 #define E1000_TX_FLAGS_NO_FCS 0x00000010 4634 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 4635 #define E1000_TX_FLAGS_VLAN_SHIFT 16 4636 4637 static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) 4638 { 4639 struct e1000_context_desc *context_desc; 4640 struct e1000_buffer *buffer_info; 4641 unsigned int i; 4642 u32 cmd_length = 0; 4643 u16 ipcse = 0, tucse, mss; 4644 u8 ipcss, ipcso, tucss, tucso, hdr_len; 4645 4646 if (!skb_is_gso(skb)) 4647 return 0; 4648 4649 if (skb_header_cloned(skb)) { 4650 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4651 4652 if (err) 4653 return err; 4654 } 4655 4656 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 4657 mss = skb_shinfo(skb)->gso_size; 4658 if (skb->protocol == htons(ETH_P_IP)) { 4659 struct iphdr *iph = ip_hdr(skb); 4660 iph->tot_len = 0; 4661 iph->check = 0; 4662 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 4663 0, IPPROTO_TCP, 0); 4664 cmd_length = E1000_TXD_CMD_IP; 4665 ipcse = skb_transport_offset(skb) - 1; 4666 } else if (skb_is_gso_v6(skb)) { 4667 ipv6_hdr(skb)->payload_len = 0; 4668 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 4669 &ipv6_hdr(skb)->daddr, 4670 0, IPPROTO_TCP, 0); 4671 ipcse = 0; 4672 } 4673 ipcss = skb_network_offset(skb); 4674 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 4675 tucss = skb_transport_offset(skb); 4676 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 4677 tucse = 0; 4678 4679 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 4680 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 4681 4682 i = tx_ring->next_to_use; 4683 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 4684 buffer_info = &tx_ring->buffer_info[i]; 4685 4686 context_desc->lower_setup.ip_fields.ipcss = ipcss; 4687 context_desc->lower_setup.ip_fields.ipcso = ipcso; 4688 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 4689 context_desc->upper_setup.tcp_fields.tucss = tucss; 4690 context_desc->upper_setup.tcp_fields.tucso = tucso; 4691 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); 4692 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 4693 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 4694 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 4695 4696 buffer_info->time_stamp = jiffies; 4697 buffer_info->next_to_watch = i; 4698 4699 i++; 4700 if (i == tx_ring->count) 4701 i = 0; 4702 tx_ring->next_to_use = i; 4703 4704 return 1; 4705 } 4706 4707 static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) 4708 { 4709 struct e1000_adapter *adapter = tx_ring->adapter; 4710 struct e1000_context_desc *context_desc; 4711 struct e1000_buffer *buffer_info; 4712 unsigned int i; 4713 u8 css; 4714 u32 cmd_len = E1000_TXD_CMD_DEXT; 4715 __be16 protocol; 4716 4717 if (skb->ip_summed != CHECKSUM_PARTIAL) 4718 return 0; 4719 4720 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) 4721 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; 4722 else 4723 protocol = skb->protocol; 4724 4725 switch (protocol) { 4726 case cpu_to_be16(ETH_P_IP): 4727 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 4728 cmd_len |= E1000_TXD_CMD_TCP; 4729 break; 4730 case cpu_to_be16(ETH_P_IPV6): 4731 /* XXX not handling all IPV6 headers */ 4732 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 4733 cmd_len |= E1000_TXD_CMD_TCP; 4734 break; 4735 default: 4736 if (unlikely(net_ratelimit())) 4737 e_warn("checksum_partial proto=%x!\n", 4738 be16_to_cpu(protocol)); 4739 break; 4740 } 4741 4742 css = skb_checksum_start_offset(skb); 4743 4744 i = tx_ring->next_to_use; 4745 buffer_info = &tx_ring->buffer_info[i]; 4746 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 4747 4748 context_desc->lower_setup.ip_config = 0; 4749 context_desc->upper_setup.tcp_fields.tucss = css; 4750 context_desc->upper_setup.tcp_fields.tucso = 4751 css + skb->csum_offset; 4752 context_desc->upper_setup.tcp_fields.tucse = 0; 4753 context_desc->tcp_seg_setup.data = 0; 4754 context_desc->cmd_and_length = cpu_to_le32(cmd_len); 4755 4756 buffer_info->time_stamp = jiffies; 4757 buffer_info->next_to_watch = i; 4758 4759 i++; 4760 if (i == tx_ring->count) 4761 i = 0; 4762 tx_ring->next_to_use = i; 4763 4764 return 1; 4765 } 4766 4767 #define E1000_MAX_PER_TXD 8192 4768 #define E1000_MAX_TXD_PWR 12 4769 4770 static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, 4771 unsigned int first, unsigned int max_per_txd, 4772 unsigned int nr_frags, unsigned int mss) 4773 { 4774 struct e1000_adapter *adapter = tx_ring->adapter; 4775 struct pci_dev *pdev = adapter->pdev; 4776 struct e1000_buffer *buffer_info; 4777 unsigned int len = skb_headlen(skb); 4778 unsigned int offset = 0, size, count = 0, i; 4779 unsigned int f, bytecount, segs; 4780 4781 i = tx_ring->next_to_use; 4782 4783 while (len) { 4784 buffer_info = &tx_ring->buffer_info[i]; 4785 size = min(len, max_per_txd); 4786 4787 buffer_info->length = size; 4788 buffer_info->time_stamp = jiffies; 4789 buffer_info->next_to_watch = i; 4790 buffer_info->dma = dma_map_single(&pdev->dev, 4791 skb->data + offset, 4792 size, DMA_TO_DEVICE); 4793 buffer_info->mapped_as_page = false; 4794 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 4795 goto dma_error; 4796 4797 len -= size; 4798 offset += size; 4799 count++; 4800 4801 if (len) { 4802 i++; 4803 if (i == tx_ring->count) 4804 i = 0; 4805 } 4806 } 4807 4808 for (f = 0; f < nr_frags; f++) { 4809 const struct skb_frag_struct *frag; 4810 4811 frag = &skb_shinfo(skb)->frags[f]; 4812 len = skb_frag_size(frag); 4813 offset = 0; 4814 4815 while (len) { 4816 i++; 4817 if (i == tx_ring->count) 4818 i = 0; 4819 4820 buffer_info = &tx_ring->buffer_info[i]; 4821 size = min(len, max_per_txd); 4822 4823 buffer_info->length = size; 4824 buffer_info->time_stamp = jiffies; 4825 buffer_info->next_to_watch = i; 4826 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 4827 offset, size, DMA_TO_DEVICE); 4828 buffer_info->mapped_as_page = true; 4829 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 4830 goto dma_error; 4831 4832 len -= size; 4833 offset += size; 4834 count++; 4835 } 4836 } 4837 4838 segs = skb_shinfo(skb)->gso_segs ? : 1; 4839 /* multiply data chunks by size of headers */ 4840 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; 4841 4842 tx_ring->buffer_info[i].skb = skb; 4843 tx_ring->buffer_info[i].segs = segs; 4844 tx_ring->buffer_info[i].bytecount = bytecount; 4845 tx_ring->buffer_info[first].next_to_watch = i; 4846 4847 return count; 4848 4849 dma_error: 4850 dev_err(&pdev->dev, "Tx DMA map failed\n"); 4851 buffer_info->dma = 0; 4852 if (count) 4853 count--; 4854 4855 while (count--) { 4856 if (i == 0) 4857 i += tx_ring->count; 4858 i--; 4859 buffer_info = &tx_ring->buffer_info[i]; 4860 e1000_put_txbuf(tx_ring, buffer_info); 4861 } 4862 4863 return 0; 4864 } 4865 4866 static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) 4867 { 4868 struct e1000_adapter *adapter = tx_ring->adapter; 4869 struct e1000_tx_desc *tx_desc = NULL; 4870 struct e1000_buffer *buffer_info; 4871 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 4872 unsigned int i; 4873 4874 if (tx_flags & E1000_TX_FLAGS_TSO) { 4875 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 4876 E1000_TXD_CMD_TSE; 4877 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 4878 4879 if (tx_flags & E1000_TX_FLAGS_IPV4) 4880 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 4881 } 4882 4883 if (tx_flags & E1000_TX_FLAGS_CSUM) { 4884 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 4885 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 4886 } 4887 4888 if (tx_flags & E1000_TX_FLAGS_VLAN) { 4889 txd_lower |= E1000_TXD_CMD_VLE; 4890 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 4891 } 4892 4893 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 4894 txd_lower &= ~(E1000_TXD_CMD_IFCS); 4895 4896 i = tx_ring->next_to_use; 4897 4898 do { 4899 buffer_info = &tx_ring->buffer_info[i]; 4900 tx_desc = E1000_TX_DESC(*tx_ring, i); 4901 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4902 tx_desc->lower.data = 4903 cpu_to_le32(txd_lower | buffer_info->length); 4904 tx_desc->upper.data = cpu_to_le32(txd_upper); 4905 4906 i++; 4907 if (i == tx_ring->count) 4908 i = 0; 4909 } while (--count > 0); 4910 4911 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 4912 4913 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ 4914 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 4915 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); 4916 4917 /* 4918 * Force memory writes to complete before letting h/w 4919 * know there are new descriptors to fetch. (Only 4920 * applicable for weak-ordered memory model archs, 4921 * such as IA-64). 4922 */ 4923 wmb(); 4924 4925 tx_ring->next_to_use = i; 4926 4927 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 4928 e1000e_update_tdt_wa(tx_ring, i); 4929 else 4930 writel(i, tx_ring->tail); 4931 4932 /* 4933 * we need this if more than one processor can write to our tail 4934 * at a time, it synchronizes IO on IA64/Altix systems 4935 */ 4936 mmiowb(); 4937 } 4938 4939 #define MINIMUM_DHCP_PACKET_SIZE 282 4940 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, 4941 struct sk_buff *skb) 4942 { 4943 struct e1000_hw *hw = &adapter->hw; 4944 u16 length, offset; 4945 4946 if (vlan_tx_tag_present(skb)) { 4947 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 4948 (adapter->hw.mng_cookie.status & 4949 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) 4950 return 0; 4951 } 4952 4953 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) 4954 return 0; 4955 4956 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) 4957 return 0; 4958 4959 { 4960 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); 4961 struct udphdr *udp; 4962 4963 if (ip->protocol != IPPROTO_UDP) 4964 return 0; 4965 4966 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); 4967 if (ntohs(udp->dest) != 67) 4968 return 0; 4969 4970 offset = (u8 *)udp + 8 - skb->data; 4971 length = skb->len - offset; 4972 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); 4973 } 4974 4975 return 0; 4976 } 4977 4978 static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) 4979 { 4980 struct e1000_adapter *adapter = tx_ring->adapter; 4981 4982 netif_stop_queue(adapter->netdev); 4983 /* 4984 * Herbert's original patch had: 4985 * smp_mb__after_netif_stop_queue(); 4986 * but since that doesn't exist yet, just open code it. 4987 */ 4988 smp_mb(); 4989 4990 /* 4991 * We need to check again in a case another CPU has just 4992 * made room available. 4993 */ 4994 if (e1000_desc_unused(tx_ring) < size) 4995 return -EBUSY; 4996 4997 /* A reprieve! */ 4998 netif_start_queue(adapter->netdev); 4999 ++adapter->restart_queue; 5000 return 0; 5001 } 5002 5003 static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) 5004 { 5005 if (e1000_desc_unused(tx_ring) >= size) 5006 return 0; 5007 return __e1000_maybe_stop_tx(tx_ring, size); 5008 } 5009 5010 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) 5011 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 5012 struct net_device *netdev) 5013 { 5014 struct e1000_adapter *adapter = netdev_priv(netdev); 5015 struct e1000_ring *tx_ring = adapter->tx_ring; 5016 unsigned int first; 5017 unsigned int max_per_txd = E1000_MAX_PER_TXD; 5018 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 5019 unsigned int tx_flags = 0; 5020 unsigned int len = skb_headlen(skb); 5021 unsigned int nr_frags; 5022 unsigned int mss; 5023 int count = 0; 5024 int tso; 5025 unsigned int f; 5026 5027 if (test_bit(__E1000_DOWN, &adapter->state)) { 5028 dev_kfree_skb_any(skb); 5029 return NETDEV_TX_OK; 5030 } 5031 5032 if (skb->len <= 0) { 5033 dev_kfree_skb_any(skb); 5034 return NETDEV_TX_OK; 5035 } 5036 5037 mss = skb_shinfo(skb)->gso_size; 5038 /* 5039 * The controller does a simple calculation to 5040 * make sure there is enough room in the FIFO before 5041 * initiating the DMA for each buffer. The calc is: 5042 * 4 = ceil(buffer len/mss). To make sure we don't 5043 * overrun the FIFO, adjust the max buffer len if mss 5044 * drops. 5045 */ 5046 if (mss) { 5047 u8 hdr_len; 5048 max_per_txd = min(mss << 2, max_per_txd); 5049 max_txd_pwr = fls(max_per_txd) - 1; 5050 5051 /* 5052 * TSO Workaround for 82571/2/3 Controllers -- if skb->data 5053 * points to just header, pull a few bytes of payload from 5054 * frags into skb->data 5055 */ 5056 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 5057 /* 5058 * we do this workaround for ES2LAN, but it is un-necessary, 5059 * avoiding it could save a lot of cycles 5060 */ 5061 if (skb->data_len && (hdr_len == len)) { 5062 unsigned int pull_size; 5063 5064 pull_size = min_t(unsigned int, 4, skb->data_len); 5065 if (!__pskb_pull_tail(skb, pull_size)) { 5066 e_err("__pskb_pull_tail failed.\n"); 5067 dev_kfree_skb_any(skb); 5068 return NETDEV_TX_OK; 5069 } 5070 len = skb_headlen(skb); 5071 } 5072 } 5073 5074 /* reserve a descriptor for the offload context */ 5075 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 5076 count++; 5077 count++; 5078 5079 count += TXD_USE_COUNT(len, max_txd_pwr); 5080 5081 nr_frags = skb_shinfo(skb)->nr_frags; 5082 for (f = 0; f < nr_frags; f++) 5083 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), 5084 max_txd_pwr); 5085 5086 if (adapter->hw.mac.tx_pkt_filtering) 5087 e1000_transfer_dhcp_info(adapter, skb); 5088 5089 /* 5090 * need: count + 2 desc gap to keep tail from touching 5091 * head, otherwise try next time 5092 */ 5093 if (e1000_maybe_stop_tx(tx_ring, count + 2)) 5094 return NETDEV_TX_BUSY; 5095 5096 if (vlan_tx_tag_present(skb)) { 5097 tx_flags |= E1000_TX_FLAGS_VLAN; 5098 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 5099 } 5100 5101 first = tx_ring->next_to_use; 5102 5103 tso = e1000_tso(tx_ring, skb); 5104 if (tso < 0) { 5105 dev_kfree_skb_any(skb); 5106 return NETDEV_TX_OK; 5107 } 5108 5109 if (tso) 5110 tx_flags |= E1000_TX_FLAGS_TSO; 5111 else if (e1000_tx_csum(tx_ring, skb)) 5112 tx_flags |= E1000_TX_FLAGS_CSUM; 5113 5114 /* 5115 * Old method was to assume IPv4 packet by default if TSO was enabled. 5116 * 82571 hardware supports TSO capabilities for IPv6 as well... 5117 * no longer assume, we must. 5118 */ 5119 if (skb->protocol == htons(ETH_P_IP)) 5120 tx_flags |= E1000_TX_FLAGS_IPV4; 5121 5122 if (unlikely(skb->no_fcs)) 5123 tx_flags |= E1000_TX_FLAGS_NO_FCS; 5124 5125 /* if count is 0 then mapping error has occurred */ 5126 count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss); 5127 if (count) { 5128 netdev_sent_queue(netdev, skb->len); 5129 e1000_tx_queue(tx_ring, tx_flags, count); 5130 /* Make sure there is space in the ring for the next send. */ 5131 e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2); 5132 5133 } else { 5134 dev_kfree_skb_any(skb); 5135 tx_ring->buffer_info[first].time_stamp = 0; 5136 tx_ring->next_to_use = first; 5137 } 5138 5139 return NETDEV_TX_OK; 5140 } 5141 5142 /** 5143 * e1000_tx_timeout - Respond to a Tx Hang 5144 * @netdev: network interface device structure 5145 **/ 5146 static void e1000_tx_timeout(struct net_device *netdev) 5147 { 5148 struct e1000_adapter *adapter = netdev_priv(netdev); 5149 5150 /* Do the reset outside of interrupt context */ 5151 adapter->tx_timeout_count++; 5152 schedule_work(&adapter->reset_task); 5153 } 5154 5155 static void e1000_reset_task(struct work_struct *work) 5156 { 5157 struct e1000_adapter *adapter; 5158 adapter = container_of(work, struct e1000_adapter, reset_task); 5159 5160 /* don't run the task if already down */ 5161 if (test_bit(__E1000_DOWN, &adapter->state)) 5162 return; 5163 5164 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && 5165 (adapter->flags & FLAG_RX_RESTART_NOW))) { 5166 e1000e_dump(adapter); 5167 e_err("Reset adapter\n"); 5168 } 5169 e1000e_reinit_locked(adapter); 5170 } 5171 5172 /** 5173 * e1000_get_stats64 - Get System Network Statistics 5174 * @netdev: network interface device structure 5175 * @stats: rtnl_link_stats64 pointer 5176 * 5177 * Returns the address of the device statistics structure. 5178 **/ 5179 struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, 5180 struct rtnl_link_stats64 *stats) 5181 { 5182 struct e1000_adapter *adapter = netdev_priv(netdev); 5183 5184 memset(stats, 0, sizeof(struct rtnl_link_stats64)); 5185 spin_lock(&adapter->stats64_lock); 5186 e1000e_update_stats(adapter); 5187 /* Fill out the OS statistics structure */ 5188 stats->rx_bytes = adapter->stats.gorc; 5189 stats->rx_packets = adapter->stats.gprc; 5190 stats->tx_bytes = adapter->stats.gotc; 5191 stats->tx_packets = adapter->stats.gptc; 5192 stats->multicast = adapter->stats.mprc; 5193 stats->collisions = adapter->stats.colc; 5194 5195 /* Rx Errors */ 5196 5197 /* 5198 * RLEC on some newer hardware can be incorrect so build 5199 * our own version based on RUC and ROC 5200 */ 5201 stats->rx_errors = adapter->stats.rxerrc + 5202 adapter->stats.crcerrs + adapter->stats.algnerrc + 5203 adapter->stats.ruc + adapter->stats.roc + 5204 adapter->stats.cexterr; 5205 stats->rx_length_errors = adapter->stats.ruc + 5206 adapter->stats.roc; 5207 stats->rx_crc_errors = adapter->stats.crcerrs; 5208 stats->rx_frame_errors = adapter->stats.algnerrc; 5209 stats->rx_missed_errors = adapter->stats.mpc; 5210 5211 /* Tx Errors */ 5212 stats->tx_errors = adapter->stats.ecol + 5213 adapter->stats.latecol; 5214 stats->tx_aborted_errors = adapter->stats.ecol; 5215 stats->tx_window_errors = adapter->stats.latecol; 5216 stats->tx_carrier_errors = adapter->stats.tncrs; 5217 5218 /* Tx Dropped needs to be maintained elsewhere */ 5219 5220 spin_unlock(&adapter->stats64_lock); 5221 return stats; 5222 } 5223 5224 /** 5225 * e1000_change_mtu - Change the Maximum Transfer Unit 5226 * @netdev: network interface device structure 5227 * @new_mtu: new value for maximum frame size 5228 * 5229 * Returns 0 on success, negative on failure 5230 **/ 5231 static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 5232 { 5233 struct e1000_adapter *adapter = netdev_priv(netdev); 5234 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5235 5236 /* Jumbo frame support */ 5237 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { 5238 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { 5239 e_err("Jumbo Frames not supported.\n"); 5240 return -EINVAL; 5241 } 5242 5243 /* 5244 * IP payload checksum (enabled with jumbos/packet-split when 5245 * Rx checksum is enabled) and generation of RSS hash is 5246 * mutually exclusive in the hardware. 5247 */ 5248 if ((netdev->features & NETIF_F_RXCSUM) && 5249 (netdev->features & NETIF_F_RXHASH)) { 5250 e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disable one of the receive offload features before enabling jumbos.\n"); 5251 return -EINVAL; 5252 } 5253 } 5254 5255 /* Supported frame sizes */ 5256 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) || 5257 (max_frame > adapter->max_hw_frame_size)) { 5258 e_err("Unsupported MTU setting\n"); 5259 return -EINVAL; 5260 } 5261 5262 /* Jumbo frame workaround on 82579 requires CRC be stripped */ 5263 if ((adapter->hw.mac.type == e1000_pch2lan) && 5264 !(adapter->flags2 & FLAG2_CRC_STRIPPING) && 5265 (new_mtu > ETH_DATA_LEN)) { 5266 e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n"); 5267 return -EINVAL; 5268 } 5269 5270 /* 82573 Errata 17 */ 5271 if (((adapter->hw.mac.type == e1000_82573) || 5272 (adapter->hw.mac.type == e1000_82574)) && 5273 (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) { 5274 adapter->flags2 |= FLAG2_DISABLE_ASPM_L1; 5275 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1); 5276 } 5277 5278 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 5279 usleep_range(1000, 2000); 5280 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ 5281 adapter->max_frame_size = max_frame; 5282 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); 5283 netdev->mtu = new_mtu; 5284 if (netif_running(netdev)) 5285 e1000e_down(adapter); 5286 5287 /* 5288 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 5289 * means we reserve 2 more, this pushes us to allocate from the next 5290 * larger slab size. 5291 * i.e. RXBUFFER_2048 --> size-4096 slab 5292 * However with the new *_jumbo_rx* routines, jumbo receives will use 5293 * fragmented skbs 5294 */ 5295 5296 if (max_frame <= 2048) 5297 adapter->rx_buffer_len = 2048; 5298 else 5299 adapter->rx_buffer_len = 4096; 5300 5301 /* adjust allocation if LPE protects us, and we aren't using SBP */ 5302 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 5303 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) 5304 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN 5305 + ETH_FCS_LEN; 5306 5307 if (netif_running(netdev)) 5308 e1000e_up(adapter); 5309 else 5310 e1000e_reset(adapter); 5311 5312 clear_bit(__E1000_RESETTING, &adapter->state); 5313 5314 return 0; 5315 } 5316 5317 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 5318 int cmd) 5319 { 5320 struct e1000_adapter *adapter = netdev_priv(netdev); 5321 struct mii_ioctl_data *data = if_mii(ifr); 5322 5323 if (adapter->hw.phy.media_type != e1000_media_type_copper) 5324 return -EOPNOTSUPP; 5325 5326 switch (cmd) { 5327 case SIOCGMIIPHY: 5328 data->phy_id = adapter->hw.phy.addr; 5329 break; 5330 case SIOCGMIIREG: 5331 e1000_phy_read_status(adapter); 5332 5333 switch (data->reg_num & 0x1F) { 5334 case MII_BMCR: 5335 data->val_out = adapter->phy_regs.bmcr; 5336 break; 5337 case MII_BMSR: 5338 data->val_out = adapter->phy_regs.bmsr; 5339 break; 5340 case MII_PHYSID1: 5341 data->val_out = (adapter->hw.phy.id >> 16); 5342 break; 5343 case MII_PHYSID2: 5344 data->val_out = (adapter->hw.phy.id & 0xFFFF); 5345 break; 5346 case MII_ADVERTISE: 5347 data->val_out = adapter->phy_regs.advertise; 5348 break; 5349 case MII_LPA: 5350 data->val_out = adapter->phy_regs.lpa; 5351 break; 5352 case MII_EXPANSION: 5353 data->val_out = adapter->phy_regs.expansion; 5354 break; 5355 case MII_CTRL1000: 5356 data->val_out = adapter->phy_regs.ctrl1000; 5357 break; 5358 case MII_STAT1000: 5359 data->val_out = adapter->phy_regs.stat1000; 5360 break; 5361 case MII_ESTATUS: 5362 data->val_out = adapter->phy_regs.estatus; 5363 break; 5364 default: 5365 return -EIO; 5366 } 5367 break; 5368 case SIOCSMIIREG: 5369 default: 5370 return -EOPNOTSUPP; 5371 } 5372 return 0; 5373 } 5374 5375 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 5376 { 5377 switch (cmd) { 5378 case SIOCGMIIPHY: 5379 case SIOCGMIIREG: 5380 case SIOCSMIIREG: 5381 return e1000_mii_ioctl(netdev, ifr, cmd); 5382 default: 5383 return -EOPNOTSUPP; 5384 } 5385 } 5386 5387 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) 5388 { 5389 struct e1000_hw *hw = &adapter->hw; 5390 u32 i, mac_reg; 5391 u16 phy_reg, wuc_enable; 5392 int retval = 0; 5393 5394 /* copy MAC RARs to PHY RARs */ 5395 e1000_copy_rx_addrs_to_phy_ich8lan(hw); 5396 5397 retval = hw->phy.ops.acquire(hw); 5398 if (retval) { 5399 e_err("Could not acquire PHY\n"); 5400 return retval; 5401 } 5402 5403 /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */ 5404 retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable); 5405 if (retval) 5406 goto release; 5407 5408 /* copy MAC MTA to PHY MTA - only needed for pchlan */ 5409 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { 5410 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); 5411 hw->phy.ops.write_reg_page(hw, BM_MTA(i), 5412 (u16)(mac_reg & 0xFFFF)); 5413 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, 5414 (u16)((mac_reg >> 16) & 0xFFFF)); 5415 } 5416 5417 /* configure PHY Rx Control register */ 5418 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); 5419 mac_reg = er32(RCTL); 5420 if (mac_reg & E1000_RCTL_UPE) 5421 phy_reg |= BM_RCTL_UPE; 5422 if (mac_reg & E1000_RCTL_MPE) 5423 phy_reg |= BM_RCTL_MPE; 5424 phy_reg &= ~(BM_RCTL_MO_MASK); 5425 if (mac_reg & E1000_RCTL_MO_3) 5426 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) 5427 << BM_RCTL_MO_SHIFT); 5428 if (mac_reg & E1000_RCTL_BAM) 5429 phy_reg |= BM_RCTL_BAM; 5430 if (mac_reg & E1000_RCTL_PMCF) 5431 phy_reg |= BM_RCTL_PMCF; 5432 mac_reg = er32(CTRL); 5433 if (mac_reg & E1000_CTRL_RFCE) 5434 phy_reg |= BM_RCTL_RFCE; 5435 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); 5436 5437 /* enable PHY wakeup in MAC register */ 5438 ew32(WUFC, wufc); 5439 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); 5440 5441 /* configure and enable PHY wakeup in PHY registers */ 5442 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); 5443 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); 5444 5445 /* activate PHY wakeup */ 5446 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; 5447 retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable); 5448 if (retval) 5449 e_err("Could not set PHY Host Wakeup bit\n"); 5450 release: 5451 hw->phy.ops.release(hw); 5452 5453 return retval; 5454 } 5455 5456 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, 5457 bool runtime) 5458 { 5459 struct net_device *netdev = pci_get_drvdata(pdev); 5460 struct e1000_adapter *adapter = netdev_priv(netdev); 5461 struct e1000_hw *hw = &adapter->hw; 5462 u32 ctrl, ctrl_ext, rctl, status; 5463 /* Runtime suspend should only enable wakeup for link changes */ 5464 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; 5465 int retval = 0; 5466 5467 netif_device_detach(netdev); 5468 5469 if (netif_running(netdev)) { 5470 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 5471 e1000e_down(adapter); 5472 e1000_free_irq(adapter); 5473 } 5474 e1000e_reset_interrupt_capability(adapter); 5475 5476 retval = pci_save_state(pdev); 5477 if (retval) 5478 return retval; 5479 5480 status = er32(STATUS); 5481 if (status & E1000_STATUS_LU) 5482 wufc &= ~E1000_WUFC_LNKC; 5483 5484 if (wufc) { 5485 e1000_setup_rctl(adapter); 5486 e1000e_set_rx_mode(netdev); 5487 5488 /* turn on all-multi mode if wake on multicast is enabled */ 5489 if (wufc & E1000_WUFC_MC) { 5490 rctl = er32(RCTL); 5491 rctl |= E1000_RCTL_MPE; 5492 ew32(RCTL, rctl); 5493 } 5494 5495 ctrl = er32(CTRL); 5496 /* advertise wake from D3Cold */ 5497 #define E1000_CTRL_ADVD3WUC 0x00100000 5498 /* phy power management enable */ 5499 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 5500 ctrl |= E1000_CTRL_ADVD3WUC; 5501 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) 5502 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; 5503 ew32(CTRL, ctrl); 5504 5505 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 5506 adapter->hw.phy.media_type == 5507 e1000_media_type_internal_serdes) { 5508 /* keep the laser running in D3 */ 5509 ctrl_ext = er32(CTRL_EXT); 5510 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; 5511 ew32(CTRL_EXT, ctrl_ext); 5512 } 5513 5514 if (adapter->flags & FLAG_IS_ICH) 5515 e1000_suspend_workarounds_ich8lan(&adapter->hw); 5516 5517 /* Allow time for pending master requests to run */ 5518 e1000e_disable_pcie_master(&adapter->hw); 5519 5520 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { 5521 /* enable wakeup by the PHY */ 5522 retval = e1000_init_phy_wakeup(adapter, wufc); 5523 if (retval) 5524 return retval; 5525 } else { 5526 /* enable wakeup by the MAC */ 5527 ew32(WUFC, wufc); 5528 ew32(WUC, E1000_WUC_PME_EN); 5529 } 5530 } else { 5531 ew32(WUC, 0); 5532 ew32(WUFC, 0); 5533 } 5534 5535 *enable_wake = !!wufc; 5536 5537 /* make sure adapter isn't asleep if manageability is enabled */ 5538 if ((adapter->flags & FLAG_MNG_PT_ENABLED) || 5539 (hw->mac.ops.check_mng_mode(hw))) 5540 *enable_wake = true; 5541 5542 if (adapter->hw.phy.type == e1000_phy_igp_3) 5543 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 5544 5545 /* 5546 * Release control of h/w to f/w. If f/w is AMT enabled, this 5547 * would have already happened in close and is redundant. 5548 */ 5549 e1000e_release_hw_control(adapter); 5550 5551 pci_disable_device(pdev); 5552 5553 return 0; 5554 } 5555 5556 static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) 5557 { 5558 if (sleep && wake) { 5559 pci_prepare_to_sleep(pdev); 5560 return; 5561 } 5562 5563 pci_wake_from_d3(pdev, wake); 5564 pci_set_power_state(pdev, PCI_D3hot); 5565 } 5566 5567 static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, 5568 bool wake) 5569 { 5570 struct net_device *netdev = pci_get_drvdata(pdev); 5571 struct e1000_adapter *adapter = netdev_priv(netdev); 5572 5573 /* 5574 * The pci-e switch on some quad port adapters will report a 5575 * correctable error when the MAC transitions from D0 to D3. To 5576 * prevent this we need to mask off the correctable errors on the 5577 * downstream port of the pci-e switch. 5578 */ 5579 if (adapter->flags & FLAG_IS_QUAD_PORT) { 5580 struct pci_dev *us_dev = pdev->bus->self; 5581 int pos = pci_pcie_cap(us_dev); 5582 u16 devctl; 5583 5584 pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl); 5585 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, 5586 (devctl & ~PCI_EXP_DEVCTL_CERE)); 5587 5588 e1000_power_off(pdev, sleep, wake); 5589 5590 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); 5591 } else { 5592 e1000_power_off(pdev, sleep, wake); 5593 } 5594 } 5595 5596 #ifdef CONFIG_PCIEASPM 5597 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 5598 { 5599 pci_disable_link_state_locked(pdev, state); 5600 } 5601 #else 5602 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 5603 { 5604 int pos; 5605 u16 reg16; 5606 5607 /* 5608 * Both device and parent should have the same ASPM setting. 5609 * Disable ASPM in downstream component first and then upstream. 5610 */ 5611 pos = pci_pcie_cap(pdev); 5612 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); 5613 reg16 &= ~state; 5614 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 5615 5616 if (!pdev->bus->self) 5617 return; 5618 5619 pos = pci_pcie_cap(pdev->bus->self); 5620 pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16); 5621 reg16 &= ~state; 5622 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); 5623 } 5624 #endif 5625 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 5626 { 5627 dev_info(&pdev->dev, "Disabling ASPM %s %s\n", 5628 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", 5629 (state & PCIE_LINK_STATE_L1) ? "L1" : ""); 5630 5631 __e1000e_disable_aspm(pdev, state); 5632 } 5633 5634 #ifdef CONFIG_PM 5635 static bool e1000e_pm_ready(struct e1000_adapter *adapter) 5636 { 5637 return !!adapter->tx_ring->buffer_info; 5638 } 5639 5640 static int __e1000_resume(struct pci_dev *pdev) 5641 { 5642 struct net_device *netdev = pci_get_drvdata(pdev); 5643 struct e1000_adapter *adapter = netdev_priv(netdev); 5644 struct e1000_hw *hw = &adapter->hw; 5645 u16 aspm_disable_flag = 0; 5646 u32 err; 5647 5648 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) 5649 aspm_disable_flag = PCIE_LINK_STATE_L0S; 5650 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) 5651 aspm_disable_flag |= PCIE_LINK_STATE_L1; 5652 if (aspm_disable_flag) 5653 e1000e_disable_aspm(pdev, aspm_disable_flag); 5654 5655 pci_set_power_state(pdev, PCI_D0); 5656 pci_restore_state(pdev); 5657 pci_save_state(pdev); 5658 5659 e1000e_set_interrupt_capability(adapter); 5660 if (netif_running(netdev)) { 5661 err = e1000_request_irq(adapter); 5662 if (err) 5663 return err; 5664 } 5665 5666 if (hw->mac.type == e1000_pch2lan) 5667 e1000_resume_workarounds_pchlan(&adapter->hw); 5668 5669 e1000e_power_up_phy(adapter); 5670 5671 /* report the system wakeup cause from S3/S4 */ 5672 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { 5673 u16 phy_data; 5674 5675 e1e_rphy(&adapter->hw, BM_WUS, &phy_data); 5676 if (phy_data) { 5677 e_info("PHY Wakeup cause - %s\n", 5678 phy_data & E1000_WUS_EX ? "Unicast Packet" : 5679 phy_data & E1000_WUS_MC ? "Multicast Packet" : 5680 phy_data & E1000_WUS_BC ? "Broadcast Packet" : 5681 phy_data & E1000_WUS_MAG ? "Magic Packet" : 5682 phy_data & E1000_WUS_LNKC ? 5683 "Link Status Change" : "other"); 5684 } 5685 e1e_wphy(&adapter->hw, BM_WUS, ~0); 5686 } else { 5687 u32 wus = er32(WUS); 5688 if (wus) { 5689 e_info("MAC Wakeup cause - %s\n", 5690 wus & E1000_WUS_EX ? "Unicast Packet" : 5691 wus & E1000_WUS_MC ? "Multicast Packet" : 5692 wus & E1000_WUS_BC ? "Broadcast Packet" : 5693 wus & E1000_WUS_MAG ? "Magic Packet" : 5694 wus & E1000_WUS_LNKC ? "Link Status Change" : 5695 "other"); 5696 } 5697 ew32(WUS, ~0); 5698 } 5699 5700 e1000e_reset(adapter); 5701 5702 e1000_init_manageability_pt(adapter); 5703 5704 if (netif_running(netdev)) 5705 e1000e_up(adapter); 5706 5707 netif_device_attach(netdev); 5708 5709 /* 5710 * If the controller has AMT, do not set DRV_LOAD until the interface 5711 * is up. For all other cases, let the f/w know that the h/w is now 5712 * under the control of the driver. 5713 */ 5714 if (!(adapter->flags & FLAG_HAS_AMT)) 5715 e1000e_get_hw_control(adapter); 5716 5717 return 0; 5718 } 5719 5720 #ifdef CONFIG_PM_SLEEP 5721 static int e1000_suspend(struct device *dev) 5722 { 5723 struct pci_dev *pdev = to_pci_dev(dev); 5724 int retval; 5725 bool wake; 5726 5727 retval = __e1000_shutdown(pdev, &wake, false); 5728 if (!retval) 5729 e1000_complete_shutdown(pdev, true, wake); 5730 5731 return retval; 5732 } 5733 5734 static int e1000_resume(struct device *dev) 5735 { 5736 struct pci_dev *pdev = to_pci_dev(dev); 5737 struct net_device *netdev = pci_get_drvdata(pdev); 5738 struct e1000_adapter *adapter = netdev_priv(netdev); 5739 5740 if (e1000e_pm_ready(adapter)) 5741 adapter->idle_check = true; 5742 5743 return __e1000_resume(pdev); 5744 } 5745 #endif /* CONFIG_PM_SLEEP */ 5746 5747 #ifdef CONFIG_PM_RUNTIME 5748 static int e1000_runtime_suspend(struct device *dev) 5749 { 5750 struct pci_dev *pdev = to_pci_dev(dev); 5751 struct net_device *netdev = pci_get_drvdata(pdev); 5752 struct e1000_adapter *adapter = netdev_priv(netdev); 5753 5754 if (e1000e_pm_ready(adapter)) { 5755 bool wake; 5756 5757 __e1000_shutdown(pdev, &wake, true); 5758 } 5759 5760 return 0; 5761 } 5762 5763 static int e1000_idle(struct device *dev) 5764 { 5765 struct pci_dev *pdev = to_pci_dev(dev); 5766 struct net_device *netdev = pci_get_drvdata(pdev); 5767 struct e1000_adapter *adapter = netdev_priv(netdev); 5768 5769 if (!e1000e_pm_ready(adapter)) 5770 return 0; 5771 5772 if (adapter->idle_check) { 5773 adapter->idle_check = false; 5774 if (!e1000e_has_link(adapter)) 5775 pm_schedule_suspend(dev, MSEC_PER_SEC); 5776 } 5777 5778 return -EBUSY; 5779 } 5780 5781 static int e1000_runtime_resume(struct device *dev) 5782 { 5783 struct pci_dev *pdev = to_pci_dev(dev); 5784 struct net_device *netdev = pci_get_drvdata(pdev); 5785 struct e1000_adapter *adapter = netdev_priv(netdev); 5786 5787 if (!e1000e_pm_ready(adapter)) 5788 return 0; 5789 5790 adapter->idle_check = !dev->power.runtime_auto; 5791 return __e1000_resume(pdev); 5792 } 5793 #endif /* CONFIG_PM_RUNTIME */ 5794 #endif /* CONFIG_PM */ 5795 5796 static void e1000_shutdown(struct pci_dev *pdev) 5797 { 5798 bool wake = false; 5799 5800 __e1000_shutdown(pdev, &wake, false); 5801 5802 if (system_state == SYSTEM_POWER_OFF) 5803 e1000_complete_shutdown(pdev, false, wake); 5804 } 5805 5806 #ifdef CONFIG_NET_POLL_CONTROLLER 5807 5808 static irqreturn_t e1000_intr_msix(int irq, void *data) 5809 { 5810 struct net_device *netdev = data; 5811 struct e1000_adapter *adapter = netdev_priv(netdev); 5812 5813 if (adapter->msix_entries) { 5814 int vector, msix_irq; 5815 5816 vector = 0; 5817 msix_irq = adapter->msix_entries[vector].vector; 5818 disable_irq(msix_irq); 5819 e1000_intr_msix_rx(msix_irq, netdev); 5820 enable_irq(msix_irq); 5821 5822 vector++; 5823 msix_irq = adapter->msix_entries[vector].vector; 5824 disable_irq(msix_irq); 5825 e1000_intr_msix_tx(msix_irq, netdev); 5826 enable_irq(msix_irq); 5827 5828 vector++; 5829 msix_irq = adapter->msix_entries[vector].vector; 5830 disable_irq(msix_irq); 5831 e1000_msix_other(msix_irq, netdev); 5832 enable_irq(msix_irq); 5833 } 5834 5835 return IRQ_HANDLED; 5836 } 5837 5838 /* 5839 * Polling 'interrupt' - used by things like netconsole to send skbs 5840 * without having to re-enable interrupts. It's not called while 5841 * the interrupt routine is executing. 5842 */ 5843 static void e1000_netpoll(struct net_device *netdev) 5844 { 5845 struct e1000_adapter *adapter = netdev_priv(netdev); 5846 5847 switch (adapter->int_mode) { 5848 case E1000E_INT_MODE_MSIX: 5849 e1000_intr_msix(adapter->pdev->irq, netdev); 5850 break; 5851 case E1000E_INT_MODE_MSI: 5852 disable_irq(adapter->pdev->irq); 5853 e1000_intr_msi(adapter->pdev->irq, netdev); 5854 enable_irq(adapter->pdev->irq); 5855 break; 5856 default: /* E1000E_INT_MODE_LEGACY */ 5857 disable_irq(adapter->pdev->irq); 5858 e1000_intr(adapter->pdev->irq, netdev); 5859 enable_irq(adapter->pdev->irq); 5860 break; 5861 } 5862 } 5863 #endif 5864 5865 /** 5866 * e1000_io_error_detected - called when PCI error is detected 5867 * @pdev: Pointer to PCI device 5868 * @state: The current pci connection state 5869 * 5870 * This function is called after a PCI bus error affecting 5871 * this device has been detected. 5872 */ 5873 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 5874 pci_channel_state_t state) 5875 { 5876 struct net_device *netdev = pci_get_drvdata(pdev); 5877 struct e1000_adapter *adapter = netdev_priv(netdev); 5878 5879 netif_device_detach(netdev); 5880 5881 if (state == pci_channel_io_perm_failure) 5882 return PCI_ERS_RESULT_DISCONNECT; 5883 5884 if (netif_running(netdev)) 5885 e1000e_down(adapter); 5886 pci_disable_device(pdev); 5887 5888 /* Request a slot slot reset. */ 5889 return PCI_ERS_RESULT_NEED_RESET; 5890 } 5891 5892 /** 5893 * e1000_io_slot_reset - called after the pci bus has been reset. 5894 * @pdev: Pointer to PCI device 5895 * 5896 * Restart the card from scratch, as if from a cold-boot. Implementation 5897 * resembles the first-half of the e1000_resume routine. 5898 */ 5899 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 5900 { 5901 struct net_device *netdev = pci_get_drvdata(pdev); 5902 struct e1000_adapter *adapter = netdev_priv(netdev); 5903 struct e1000_hw *hw = &adapter->hw; 5904 u16 aspm_disable_flag = 0; 5905 int err; 5906 pci_ers_result_t result; 5907 5908 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) 5909 aspm_disable_flag = PCIE_LINK_STATE_L0S; 5910 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) 5911 aspm_disable_flag |= PCIE_LINK_STATE_L1; 5912 if (aspm_disable_flag) 5913 e1000e_disable_aspm(pdev, aspm_disable_flag); 5914 5915 err = pci_enable_device_mem(pdev); 5916 if (err) { 5917 dev_err(&pdev->dev, 5918 "Cannot re-enable PCI device after reset.\n"); 5919 result = PCI_ERS_RESULT_DISCONNECT; 5920 } else { 5921 pci_set_master(pdev); 5922 pdev->state_saved = true; 5923 pci_restore_state(pdev); 5924 5925 pci_enable_wake(pdev, PCI_D3hot, 0); 5926 pci_enable_wake(pdev, PCI_D3cold, 0); 5927 5928 e1000e_reset(adapter); 5929 ew32(WUS, ~0); 5930 result = PCI_ERS_RESULT_RECOVERED; 5931 } 5932 5933 pci_cleanup_aer_uncorrect_error_status(pdev); 5934 5935 return result; 5936 } 5937 5938 /** 5939 * e1000_io_resume - called when traffic can start flowing again. 5940 * @pdev: Pointer to PCI device 5941 * 5942 * This callback is called when the error recovery driver tells us that 5943 * its OK to resume normal operation. Implementation resembles the 5944 * second-half of the e1000_resume routine. 5945 */ 5946 static void e1000_io_resume(struct pci_dev *pdev) 5947 { 5948 struct net_device *netdev = pci_get_drvdata(pdev); 5949 struct e1000_adapter *adapter = netdev_priv(netdev); 5950 5951 e1000_init_manageability_pt(adapter); 5952 5953 if (netif_running(netdev)) { 5954 if (e1000e_up(adapter)) { 5955 dev_err(&pdev->dev, 5956 "can't bring device back up after reset\n"); 5957 return; 5958 } 5959 } 5960 5961 netif_device_attach(netdev); 5962 5963 /* 5964 * If the controller has AMT, do not set DRV_LOAD until the interface 5965 * is up. For all other cases, let the f/w know that the h/w is now 5966 * under the control of the driver. 5967 */ 5968 if (!(adapter->flags & FLAG_HAS_AMT)) 5969 e1000e_get_hw_control(adapter); 5970 5971 } 5972 5973 static void e1000_print_device_info(struct e1000_adapter *adapter) 5974 { 5975 struct e1000_hw *hw = &adapter->hw; 5976 struct net_device *netdev = adapter->netdev; 5977 u32 ret_val; 5978 u8 pba_str[E1000_PBANUM_LENGTH]; 5979 5980 /* print bus type/speed/width info */ 5981 e_info("(PCI Express:2.5GT/s:%s) %pM\n", 5982 /* bus width */ 5983 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 5984 "Width x1"), 5985 /* MAC address */ 5986 netdev->dev_addr); 5987 e_info("Intel(R) PRO/%s Network Connection\n", 5988 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); 5989 ret_val = e1000_read_pba_string_generic(hw, pba_str, 5990 E1000_PBANUM_LENGTH); 5991 if (ret_val) 5992 strlcpy((char *)pba_str, "Unknown", sizeof(pba_str)); 5993 e_info("MAC: %d, PHY: %d, PBA No: %s\n", 5994 hw->mac.type, hw->phy.type, pba_str); 5995 } 5996 5997 static void e1000_eeprom_checks(struct e1000_adapter *adapter) 5998 { 5999 struct e1000_hw *hw = &adapter->hw; 6000 int ret_val; 6001 u16 buf = 0; 6002 6003 if (hw->mac.type != e1000_82573) 6004 return; 6005 6006 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); 6007 le16_to_cpus(&buf); 6008 if (!ret_val && (!(buf & (1 << 0)))) { 6009 /* Deep Smart Power Down (DSPD) */ 6010 dev_warn(&adapter->pdev->dev, 6011 "Warning: detected DSPD enabled in EEPROM\n"); 6012 } 6013 } 6014 6015 static int e1000_set_features(struct net_device *netdev, 6016 netdev_features_t features) 6017 { 6018 struct e1000_adapter *adapter = netdev_priv(netdev); 6019 netdev_features_t changed = features ^ netdev->features; 6020 6021 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) 6022 adapter->flags |= FLAG_TSO_FORCE; 6023 6024 if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | 6025 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS | 6026 NETIF_F_RXALL))) 6027 return 0; 6028 6029 /* 6030 * IP payload checksum (enabled with jumbos/packet-split when Rx 6031 * checksum is enabled) and generation of RSS hash is mutually 6032 * exclusive in the hardware. 6033 */ 6034 if (adapter->rx_ps_pages && 6035 (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) { 6036 e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames. Disable jumbos or enable only one of the receive offload features.\n"); 6037 return -EINVAL; 6038 } 6039 6040 if (changed & NETIF_F_RXFCS) { 6041 if (features & NETIF_F_RXFCS) { 6042 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; 6043 } else { 6044 /* We need to take it back to defaults, which might mean 6045 * stripping is still disabled at the adapter level. 6046 */ 6047 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING) 6048 adapter->flags2 |= FLAG2_CRC_STRIPPING; 6049 else 6050 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; 6051 } 6052 } 6053 6054 netdev->features = features; 6055 6056 if (netif_running(netdev)) 6057 e1000e_reinit_locked(adapter); 6058 else 6059 e1000e_reset(adapter); 6060 6061 return 0; 6062 } 6063 6064 static const struct net_device_ops e1000e_netdev_ops = { 6065 .ndo_open = e1000_open, 6066 .ndo_stop = e1000_close, 6067 .ndo_start_xmit = e1000_xmit_frame, 6068 .ndo_get_stats64 = e1000e_get_stats64, 6069 .ndo_set_rx_mode = e1000e_set_rx_mode, 6070 .ndo_set_mac_address = e1000_set_mac, 6071 .ndo_change_mtu = e1000_change_mtu, 6072 .ndo_do_ioctl = e1000_ioctl, 6073 .ndo_tx_timeout = e1000_tx_timeout, 6074 .ndo_validate_addr = eth_validate_addr, 6075 6076 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, 6077 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, 6078 #ifdef CONFIG_NET_POLL_CONTROLLER 6079 .ndo_poll_controller = e1000_netpoll, 6080 #endif 6081 .ndo_set_features = e1000_set_features, 6082 }; 6083 6084 /** 6085 * e1000_probe - Device Initialization Routine 6086 * @pdev: PCI device information struct 6087 * @ent: entry in e1000_pci_tbl 6088 * 6089 * Returns 0 on success, negative on failure 6090 * 6091 * e1000_probe initializes an adapter identified by a pci_dev structure. 6092 * The OS initialization, configuring of the adapter private structure, 6093 * and a hardware reset occur. 6094 **/ 6095 static int __devinit e1000_probe(struct pci_dev *pdev, 6096 const struct pci_device_id *ent) 6097 { 6098 struct net_device *netdev; 6099 struct e1000_adapter *adapter; 6100 struct e1000_hw *hw; 6101 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; 6102 resource_size_t mmio_start, mmio_len; 6103 resource_size_t flash_start, flash_len; 6104 static int cards_found; 6105 u16 aspm_disable_flag = 0; 6106 int i, err, pci_using_dac; 6107 u16 eeprom_data = 0; 6108 u16 eeprom_apme_mask = E1000_EEPROM_APME; 6109 6110 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) 6111 aspm_disable_flag = PCIE_LINK_STATE_L0S; 6112 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) 6113 aspm_disable_flag |= PCIE_LINK_STATE_L1; 6114 if (aspm_disable_flag) 6115 e1000e_disable_aspm(pdev, aspm_disable_flag); 6116 6117 err = pci_enable_device_mem(pdev); 6118 if (err) 6119 return err; 6120 6121 pci_using_dac = 0; 6122 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 6123 if (!err) { 6124 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 6125 if (!err) 6126 pci_using_dac = 1; 6127 } else { 6128 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 6129 if (err) { 6130 err = dma_set_coherent_mask(&pdev->dev, 6131 DMA_BIT_MASK(32)); 6132 if (err) { 6133 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); 6134 goto err_dma; 6135 } 6136 } 6137 } 6138 6139 err = pci_request_selected_regions_exclusive(pdev, 6140 pci_select_bars(pdev, IORESOURCE_MEM), 6141 e1000e_driver_name); 6142 if (err) 6143 goto err_pci_reg; 6144 6145 /* AER (Advanced Error Reporting) hooks */ 6146 pci_enable_pcie_error_reporting(pdev); 6147 6148 pci_set_master(pdev); 6149 /* PCI config space info */ 6150 err = pci_save_state(pdev); 6151 if (err) 6152 goto err_alloc_etherdev; 6153 6154 err = -ENOMEM; 6155 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 6156 if (!netdev) 6157 goto err_alloc_etherdev; 6158 6159 SET_NETDEV_DEV(netdev, &pdev->dev); 6160 6161 netdev->irq = pdev->irq; 6162 6163 pci_set_drvdata(pdev, netdev); 6164 adapter = netdev_priv(netdev); 6165 hw = &adapter->hw; 6166 adapter->netdev = netdev; 6167 adapter->pdev = pdev; 6168 adapter->ei = ei; 6169 adapter->pba = ei->pba; 6170 adapter->flags = ei->flags; 6171 adapter->flags2 = ei->flags2; 6172 adapter->hw.adapter = adapter; 6173 adapter->hw.mac.type = ei->mac; 6174 adapter->max_hw_frame_size = ei->max_hw_frame_size; 6175 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; 6176 6177 mmio_start = pci_resource_start(pdev, 0); 6178 mmio_len = pci_resource_len(pdev, 0); 6179 6180 err = -EIO; 6181 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 6182 if (!adapter->hw.hw_addr) 6183 goto err_ioremap; 6184 6185 if ((adapter->flags & FLAG_HAS_FLASH) && 6186 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 6187 flash_start = pci_resource_start(pdev, 1); 6188 flash_len = pci_resource_len(pdev, 1); 6189 adapter->hw.flash_address = ioremap(flash_start, flash_len); 6190 if (!adapter->hw.flash_address) 6191 goto err_flashmap; 6192 } 6193 6194 /* construct the net_device struct */ 6195 netdev->netdev_ops = &e1000e_netdev_ops; 6196 e1000e_set_ethtool_ops(netdev); 6197 netdev->watchdog_timeo = 5 * HZ; 6198 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); 6199 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); 6200 6201 netdev->mem_start = mmio_start; 6202 netdev->mem_end = mmio_start + mmio_len; 6203 6204 adapter->bd_number = cards_found++; 6205 6206 e1000e_check_options(adapter); 6207 6208 /* setup adapter struct */ 6209 err = e1000_sw_init(adapter); 6210 if (err) 6211 goto err_sw_init; 6212 6213 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 6214 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); 6215 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 6216 6217 err = ei->get_variants(adapter); 6218 if (err) 6219 goto err_hw_init; 6220 6221 if ((adapter->flags & FLAG_IS_ICH) && 6222 (adapter->flags & FLAG_READ_ONLY_NVM)) 6223 e1000e_write_protect_nvm_ich8lan(&adapter->hw); 6224 6225 hw->mac.ops.get_bus_info(&adapter->hw); 6226 6227 adapter->hw.phy.autoneg_wait_to_complete = 0; 6228 6229 /* Copper options */ 6230 if (adapter->hw.phy.media_type == e1000_media_type_copper) { 6231 adapter->hw.phy.mdix = AUTO_ALL_MODES; 6232 adapter->hw.phy.disable_polarity_correction = 0; 6233 adapter->hw.phy.ms_type = e1000_ms_hw_default; 6234 } 6235 6236 if (hw->phy.ops.check_reset_block(hw)) 6237 e_info("PHY reset is blocked due to SOL/IDER session.\n"); 6238 6239 /* Set initial default active device features */ 6240 netdev->features = (NETIF_F_SG | 6241 NETIF_F_HW_VLAN_RX | 6242 NETIF_F_HW_VLAN_TX | 6243 NETIF_F_TSO | 6244 NETIF_F_TSO6 | 6245 NETIF_F_RXHASH | 6246 NETIF_F_RXCSUM | 6247 NETIF_F_HW_CSUM); 6248 6249 /* Set user-changeable features (subset of all device features) */ 6250 netdev->hw_features = netdev->features; 6251 netdev->hw_features |= NETIF_F_RXFCS; 6252 netdev->priv_flags |= IFF_SUPP_NOFCS; 6253 netdev->hw_features |= NETIF_F_RXALL; 6254 6255 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) 6256 netdev->features |= NETIF_F_HW_VLAN_FILTER; 6257 6258 netdev->vlan_features |= (NETIF_F_SG | 6259 NETIF_F_TSO | 6260 NETIF_F_TSO6 | 6261 NETIF_F_HW_CSUM); 6262 6263 netdev->priv_flags |= IFF_UNICAST_FLT; 6264 6265 if (pci_using_dac) { 6266 netdev->features |= NETIF_F_HIGHDMA; 6267 netdev->vlan_features |= NETIF_F_HIGHDMA; 6268 } 6269 6270 if (e1000e_enable_mng_pass_thru(&adapter->hw)) 6271 adapter->flags |= FLAG_MNG_PT_ENABLED; 6272 6273 /* 6274 * before reading the NVM, reset the controller to 6275 * put the device in a known good starting state 6276 */ 6277 adapter->hw.mac.ops.reset_hw(&adapter->hw); 6278 6279 /* 6280 * systems with ASPM and others may see the checksum fail on the first 6281 * attempt. Let's give it a few tries 6282 */ 6283 for (i = 0;; i++) { 6284 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) 6285 break; 6286 if (i == 2) { 6287 e_err("The NVM Checksum Is Not Valid\n"); 6288 err = -EIO; 6289 goto err_eeprom; 6290 } 6291 } 6292 6293 e1000_eeprom_checks(adapter); 6294 6295 /* copy the MAC address */ 6296 if (e1000e_read_mac_addr(&adapter->hw)) 6297 e_err("NVM Read Error while reading MAC address\n"); 6298 6299 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 6300 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 6301 6302 if (!is_valid_ether_addr(netdev->perm_addr)) { 6303 e_err("Invalid MAC Address: %pM\n", netdev->perm_addr); 6304 err = -EIO; 6305 goto err_eeprom; 6306 } 6307 6308 init_timer(&adapter->watchdog_timer); 6309 adapter->watchdog_timer.function = e1000_watchdog; 6310 adapter->watchdog_timer.data = (unsigned long) adapter; 6311 6312 init_timer(&adapter->phy_info_timer); 6313 adapter->phy_info_timer.function = e1000_update_phy_info; 6314 adapter->phy_info_timer.data = (unsigned long) adapter; 6315 6316 INIT_WORK(&adapter->reset_task, e1000_reset_task); 6317 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); 6318 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); 6319 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); 6320 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); 6321 6322 /* Initialize link parameters. User can change them with ethtool */ 6323 adapter->hw.mac.autoneg = 1; 6324 adapter->fc_autoneg = true; 6325 adapter->hw.fc.requested_mode = e1000_fc_default; 6326 adapter->hw.fc.current_mode = e1000_fc_default; 6327 adapter->hw.phy.autoneg_advertised = 0x2f; 6328 6329 /* ring size defaults */ 6330 adapter->rx_ring->count = 256; 6331 adapter->tx_ring->count = 256; 6332 6333 /* 6334 * Initial Wake on LAN setting - If APM wake is enabled in 6335 * the EEPROM, enable the ACPI Magic Packet filter 6336 */ 6337 if (adapter->flags & FLAG_APME_IN_WUC) { 6338 /* APME bit in EEPROM is mapped to WUC.APME */ 6339 eeprom_data = er32(WUC); 6340 eeprom_apme_mask = E1000_WUC_APME; 6341 if ((hw->mac.type > e1000_ich10lan) && 6342 (eeprom_data & E1000_WUC_PHY_WAKE)) 6343 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; 6344 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { 6345 if (adapter->flags & FLAG_APME_CHECK_PORT_B && 6346 (adapter->hw.bus.func == 1)) 6347 e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B, 6348 1, &eeprom_data); 6349 else 6350 e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, 6351 1, &eeprom_data); 6352 } 6353 6354 /* fetch WoL from EEPROM */ 6355 if (eeprom_data & eeprom_apme_mask) 6356 adapter->eeprom_wol |= E1000_WUFC_MAG; 6357 6358 /* 6359 * now that we have the eeprom settings, apply the special cases 6360 * where the eeprom may be wrong or the board simply won't support 6361 * wake on lan on a particular port 6362 */ 6363 if (!(adapter->flags & FLAG_HAS_WOL)) 6364 adapter->eeprom_wol = 0; 6365 6366 /* initialize the wol settings based on the eeprom settings */ 6367 adapter->wol = adapter->eeprom_wol; 6368 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 6369 6370 /* save off EEPROM version number */ 6371 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); 6372 6373 /* reset the hardware with the new settings */ 6374 e1000e_reset(adapter); 6375 6376 /* 6377 * If the controller has AMT, do not set DRV_LOAD until the interface 6378 * is up. For all other cases, let the f/w know that the h/w is now 6379 * under the control of the driver. 6380 */ 6381 if (!(adapter->flags & FLAG_HAS_AMT)) 6382 e1000e_get_hw_control(adapter); 6383 6384 strlcpy(netdev->name, "eth%d", sizeof(netdev->name)); 6385 err = register_netdev(netdev); 6386 if (err) 6387 goto err_register; 6388 6389 /* carrier off reporting is important to ethtool even BEFORE open */ 6390 netif_carrier_off(netdev); 6391 6392 e1000_print_device_info(adapter); 6393 6394 if (pci_dev_run_wake(pdev)) 6395 pm_runtime_put_noidle(&pdev->dev); 6396 6397 return 0; 6398 6399 err_register: 6400 if (!(adapter->flags & FLAG_HAS_AMT)) 6401 e1000e_release_hw_control(adapter); 6402 err_eeprom: 6403 if (!hw->phy.ops.check_reset_block(hw)) 6404 e1000_phy_hw_reset(&adapter->hw); 6405 err_hw_init: 6406 kfree(adapter->tx_ring); 6407 kfree(adapter->rx_ring); 6408 err_sw_init: 6409 if (adapter->hw.flash_address) 6410 iounmap(adapter->hw.flash_address); 6411 e1000e_reset_interrupt_capability(adapter); 6412 err_flashmap: 6413 iounmap(adapter->hw.hw_addr); 6414 err_ioremap: 6415 free_netdev(netdev); 6416 err_alloc_etherdev: 6417 pci_release_selected_regions(pdev, 6418 pci_select_bars(pdev, IORESOURCE_MEM)); 6419 err_pci_reg: 6420 err_dma: 6421 pci_disable_device(pdev); 6422 return err; 6423 } 6424 6425 /** 6426 * e1000_remove - Device Removal Routine 6427 * @pdev: PCI device information struct 6428 * 6429 * e1000_remove is called by the PCI subsystem to alert the driver 6430 * that it should release a PCI device. The could be caused by a 6431 * Hot-Plug event, or because the driver is going to be removed from 6432 * memory. 6433 **/ 6434 static void __devexit e1000_remove(struct pci_dev *pdev) 6435 { 6436 struct net_device *netdev = pci_get_drvdata(pdev); 6437 struct e1000_adapter *adapter = netdev_priv(netdev); 6438 bool down = test_bit(__E1000_DOWN, &adapter->state); 6439 6440 /* 6441 * The timers may be rescheduled, so explicitly disable them 6442 * from being rescheduled. 6443 */ 6444 if (!down) 6445 set_bit(__E1000_DOWN, &adapter->state); 6446 del_timer_sync(&adapter->watchdog_timer); 6447 del_timer_sync(&adapter->phy_info_timer); 6448 6449 cancel_work_sync(&adapter->reset_task); 6450 cancel_work_sync(&adapter->watchdog_task); 6451 cancel_work_sync(&adapter->downshift_task); 6452 cancel_work_sync(&adapter->update_phy_task); 6453 cancel_work_sync(&adapter->print_hang_task); 6454 6455 if (!(netdev->flags & IFF_UP)) 6456 e1000_power_down_phy(adapter); 6457 6458 /* Don't lie to e1000_close() down the road. */ 6459 if (!down) 6460 clear_bit(__E1000_DOWN, &adapter->state); 6461 unregister_netdev(netdev); 6462 6463 if (pci_dev_run_wake(pdev)) 6464 pm_runtime_get_noresume(&pdev->dev); 6465 6466 /* 6467 * Release control of h/w to f/w. If f/w is AMT enabled, this 6468 * would have already happened in close and is redundant. 6469 */ 6470 e1000e_release_hw_control(adapter); 6471 6472 e1000e_reset_interrupt_capability(adapter); 6473 kfree(adapter->tx_ring); 6474 kfree(adapter->rx_ring); 6475 6476 iounmap(adapter->hw.hw_addr); 6477 if (adapter->hw.flash_address) 6478 iounmap(adapter->hw.flash_address); 6479 pci_release_selected_regions(pdev, 6480 pci_select_bars(pdev, IORESOURCE_MEM)); 6481 6482 free_netdev(netdev); 6483 6484 /* AER disable */ 6485 pci_disable_pcie_error_reporting(pdev); 6486 6487 pci_disable_device(pdev); 6488 } 6489 6490 /* PCI Error Recovery (ERS) */ 6491 static struct pci_error_handlers e1000_err_handler = { 6492 .error_detected = e1000_io_error_detected, 6493 .slot_reset = e1000_io_slot_reset, 6494 .resume = e1000_io_resume, 6495 }; 6496 6497 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { 6498 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, 6499 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, 6500 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, 6501 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, 6502 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, 6503 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, 6504 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, 6505 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, 6506 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, 6507 6508 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, 6509 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, 6510 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, 6511 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, 6512 6513 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, 6514 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, 6515 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, 6516 6517 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 }, 6518 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 }, 6519 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 }, 6520 6521 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), 6522 board_80003es2lan }, 6523 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), 6524 board_80003es2lan }, 6525 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), 6526 board_80003es2lan }, 6527 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), 6528 board_80003es2lan }, 6529 6530 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, 6531 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, 6532 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, 6533 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, 6534 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, 6535 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, 6536 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, 6537 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, 6538 6539 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, 6540 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, 6541 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, 6542 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, 6543 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, 6544 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan }, 6545 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, 6546 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, 6547 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, 6548 6549 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, 6550 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, 6551 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, 6552 6553 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, 6554 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, 6555 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan }, 6556 6557 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, 6558 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, 6559 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, 6560 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, 6561 6562 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, 6563 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, 6564 6565 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ 6566 }; 6567 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 6568 6569 #ifdef CONFIG_PM 6570 static const struct dev_pm_ops e1000_pm_ops = { 6571 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) 6572 SET_RUNTIME_PM_OPS(e1000_runtime_suspend, 6573 e1000_runtime_resume, e1000_idle) 6574 }; 6575 #endif 6576 6577 /* PCI Device API Driver */ 6578 static struct pci_driver e1000_driver = { 6579 .name = e1000e_driver_name, 6580 .id_table = e1000_pci_tbl, 6581 .probe = e1000_probe, 6582 .remove = __devexit_p(e1000_remove), 6583 #ifdef CONFIG_PM 6584 .driver = { 6585 .pm = &e1000_pm_ops, 6586 }, 6587 #endif 6588 .shutdown = e1000_shutdown, 6589 .err_handler = &e1000_err_handler 6590 }; 6591 6592 /** 6593 * e1000_init_module - Driver Registration Routine 6594 * 6595 * e1000_init_module is the first routine called when the driver is 6596 * loaded. All it does is register with the PCI subsystem. 6597 **/ 6598 static int __init e1000_init_module(void) 6599 { 6600 int ret; 6601 pr_info("Intel(R) PRO/1000 Network Driver - %s\n", 6602 e1000e_driver_version); 6603 pr_info("Copyright(c) 1999 - 2012 Intel Corporation.\n"); 6604 ret = pci_register_driver(&e1000_driver); 6605 6606 return ret; 6607 } 6608 module_init(e1000_init_module); 6609 6610 /** 6611 * e1000_exit_module - Driver Exit Cleanup Routine 6612 * 6613 * e1000_exit_module is called just before the driver is removed 6614 * from memory. 6615 **/ 6616 static void __exit e1000_exit_module(void) 6617 { 6618 pci_unregister_driver(&e1000_driver); 6619 } 6620 module_exit(e1000_exit_module); 6621 6622 6623 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 6624 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 6625 MODULE_LICENSE("GPL"); 6626 MODULE_VERSION(DRV_VERSION); 6627 6628 /* netdev.c */ 6629