1 /******************************************************************************* 2 3 Intel PRO/1000 Linux driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/module.h> 32 #include <linux/types.h> 33 #include <linux/init.h> 34 #include <linux/pci.h> 35 #include <linux/vmalloc.h> 36 #include <linux/pagemap.h> 37 #include <linux/delay.h> 38 #include <linux/netdevice.h> 39 #include <linux/interrupt.h> 40 #include <linux/tcp.h> 41 #include <linux/ipv6.h> 42 #include <linux/slab.h> 43 #include <net/checksum.h> 44 #include <net/ip6_checksum.h> 45 #include <linux/mii.h> 46 #include <linux/ethtool.h> 47 #include <linux/if_vlan.h> 48 #include <linux/cpu.h> 49 #include <linux/smp.h> 50 #include <linux/pm_qos.h> 51 #include <linux/pm_runtime.h> 52 #include <linux/aer.h> 53 #include <linux/prefetch.h> 54 55 #include "e1000.h" 56 57 #define DRV_EXTRAVERSION "-k" 58 59 #define DRV_VERSION "1.9.5" DRV_EXTRAVERSION 60 char e1000e_driver_name[] = "e1000e"; 61 const char e1000e_driver_version[] = DRV_VERSION; 62 63 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 64 static int debug = -1; 65 module_param(debug, int, 0); 66 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 67 68 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); 69 70 static const struct e1000_info *e1000_info_tbl[] = { 71 [board_82571] = &e1000_82571_info, 72 [board_82572] = &e1000_82572_info, 73 [board_82573] = &e1000_82573_info, 74 [board_82574] = &e1000_82574_info, 75 [board_82583] = &e1000_82583_info, 76 [board_80003es2lan] = &e1000_es2_info, 77 [board_ich8lan] = &e1000_ich8_info, 78 [board_ich9lan] = &e1000_ich9_info, 79 [board_ich10lan] = &e1000_ich10_info, 80 [board_pchlan] = &e1000_pch_info, 81 [board_pch2lan] = &e1000_pch2_info, 82 }; 83 84 struct e1000_reg_info { 85 u32 ofs; 86 char *name; 87 }; 88 89 #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ 90 #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ 91 #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ 92 #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ 93 #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ 94 95 #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ 96 #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ 97 #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ 98 #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ 99 #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ 100 101 static const struct e1000_reg_info e1000_reg_info_tbl[] = { 102 103 /* General Registers */ 104 {E1000_CTRL, "CTRL"}, 105 {E1000_STATUS, "STATUS"}, 106 {E1000_CTRL_EXT, "CTRL_EXT"}, 107 108 /* Interrupt Registers */ 109 {E1000_ICR, "ICR"}, 110 111 /* Rx Registers */ 112 {E1000_RCTL, "RCTL"}, 113 {E1000_RDLEN, "RDLEN"}, 114 {E1000_RDH, "RDH"}, 115 {E1000_RDT, "RDT"}, 116 {E1000_RDTR, "RDTR"}, 117 {E1000_RXDCTL(0), "RXDCTL"}, 118 {E1000_ERT, "ERT"}, 119 {E1000_RDBAL, "RDBAL"}, 120 {E1000_RDBAH, "RDBAH"}, 121 {E1000_RDFH, "RDFH"}, 122 {E1000_RDFT, "RDFT"}, 123 {E1000_RDFHS, "RDFHS"}, 124 {E1000_RDFTS, "RDFTS"}, 125 {E1000_RDFPC, "RDFPC"}, 126 127 /* Tx Registers */ 128 {E1000_TCTL, "TCTL"}, 129 {E1000_TDBAL, "TDBAL"}, 130 {E1000_TDBAH, "TDBAH"}, 131 {E1000_TDLEN, "TDLEN"}, 132 {E1000_TDH, "TDH"}, 133 {E1000_TDT, "TDT"}, 134 {E1000_TIDV, "TIDV"}, 135 {E1000_TXDCTL(0), "TXDCTL"}, 136 {E1000_TADV, "TADV"}, 137 {E1000_TARC(0), "TARC"}, 138 {E1000_TDFH, "TDFH"}, 139 {E1000_TDFT, "TDFT"}, 140 {E1000_TDFHS, "TDFHS"}, 141 {E1000_TDFTS, "TDFTS"}, 142 {E1000_TDFPC, "TDFPC"}, 143 144 /* List Terminator */ 145 {0, NULL} 146 }; 147 148 /* 149 * e1000_regdump - register printout routine 150 */ 151 static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) 152 { 153 int n = 0; 154 char rname[16]; 155 u32 regs[8]; 156 157 switch (reginfo->ofs) { 158 case E1000_RXDCTL(0): 159 for (n = 0; n < 2; n++) 160 regs[n] = __er32(hw, E1000_RXDCTL(n)); 161 break; 162 case E1000_TXDCTL(0): 163 for (n = 0; n < 2; n++) 164 regs[n] = __er32(hw, E1000_TXDCTL(n)); 165 break; 166 case E1000_TARC(0): 167 for (n = 0; n < 2; n++) 168 regs[n] = __er32(hw, E1000_TARC(n)); 169 break; 170 default: 171 pr_info("%-15s %08x\n", 172 reginfo->name, __er32(hw, reginfo->ofs)); 173 return; 174 } 175 176 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); 177 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); 178 } 179 180 /* 181 * e1000e_dump - Print registers, Tx-ring and Rx-ring 182 */ 183 static void e1000e_dump(struct e1000_adapter *adapter) 184 { 185 struct net_device *netdev = adapter->netdev; 186 struct e1000_hw *hw = &adapter->hw; 187 struct e1000_reg_info *reginfo; 188 struct e1000_ring *tx_ring = adapter->tx_ring; 189 struct e1000_tx_desc *tx_desc; 190 struct my_u0 { 191 __le64 a; 192 __le64 b; 193 } *u0; 194 struct e1000_buffer *buffer_info; 195 struct e1000_ring *rx_ring = adapter->rx_ring; 196 union e1000_rx_desc_packet_split *rx_desc_ps; 197 union e1000_rx_desc_extended *rx_desc; 198 struct my_u1 { 199 __le64 a; 200 __le64 b; 201 __le64 c; 202 __le64 d; 203 } *u1; 204 u32 staterr; 205 int i = 0; 206 207 if (!netif_msg_hw(adapter)) 208 return; 209 210 /* Print netdevice Info */ 211 if (netdev) { 212 dev_info(&adapter->pdev->dev, "Net device Info\n"); 213 pr_info("Device Name state trans_start last_rx\n"); 214 pr_info("%-15s %016lX %016lX %016lX\n", 215 netdev->name, netdev->state, netdev->trans_start, 216 netdev->last_rx); 217 } 218 219 /* Print Registers */ 220 dev_info(&adapter->pdev->dev, "Register Dump\n"); 221 pr_info(" Register Name Value\n"); 222 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; 223 reginfo->name; reginfo++) { 224 e1000_regdump(hw, reginfo); 225 } 226 227 /* Print Tx Ring Summary */ 228 if (!netdev || !netif_running(netdev)) 229 return; 230 231 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); 232 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); 233 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; 234 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", 235 0, tx_ring->next_to_use, tx_ring->next_to_clean, 236 (unsigned long long)buffer_info->dma, 237 buffer_info->length, 238 buffer_info->next_to_watch, 239 (unsigned long long)buffer_info->time_stamp); 240 241 /* Print Tx Ring */ 242 if (!netif_msg_tx_done(adapter)) 243 goto rx_ring_summary; 244 245 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); 246 247 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) 248 * 249 * Legacy Transmit Descriptor 250 * +--------------------------------------------------------------+ 251 * 0 | Buffer Address [63:0] (Reserved on Write Back) | 252 * +--------------------------------------------------------------+ 253 * 8 | Special | CSS | Status | CMD | CSO | Length | 254 * +--------------------------------------------------------------+ 255 * 63 48 47 36 35 32 31 24 23 16 15 0 256 * 257 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload 258 * 63 48 47 40 39 32 31 16 15 8 7 0 259 * +----------------------------------------------------------------+ 260 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | 261 * +----------------------------------------------------------------+ 262 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | 263 * +----------------------------------------------------------------+ 264 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 265 * 266 * Extended Data Descriptor (DTYP=0x1) 267 * +----------------------------------------------------------------+ 268 * 0 | Buffer Address [63:0] | 269 * +----------------------------------------------------------------+ 270 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | 271 * +----------------------------------------------------------------+ 272 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 273 */ 274 pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n"); 275 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n"); 276 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n"); 277 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 278 const char *next_desc; 279 tx_desc = E1000_TX_DESC(*tx_ring, i); 280 buffer_info = &tx_ring->buffer_info[i]; 281 u0 = (struct my_u0 *)tx_desc; 282 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) 283 next_desc = " NTC/U"; 284 else if (i == tx_ring->next_to_use) 285 next_desc = " NTU"; 286 else if (i == tx_ring->next_to_clean) 287 next_desc = " NTC"; 288 else 289 next_desc = ""; 290 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n", 291 (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : 292 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), 293 i, 294 (unsigned long long)le64_to_cpu(u0->a), 295 (unsigned long long)le64_to_cpu(u0->b), 296 (unsigned long long)buffer_info->dma, 297 buffer_info->length, buffer_info->next_to_watch, 298 (unsigned long long)buffer_info->time_stamp, 299 buffer_info->skb, next_desc); 300 301 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) 302 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 303 16, 1, phys_to_virt(buffer_info->dma), 304 buffer_info->length, true); 305 } 306 307 /* Print Rx Ring Summary */ 308 rx_ring_summary: 309 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); 310 pr_info("Queue [NTU] [NTC]\n"); 311 pr_info(" %5d %5X %5X\n", 312 0, rx_ring->next_to_use, rx_ring->next_to_clean); 313 314 /* Print Rx Ring */ 315 if (!netif_msg_rx_status(adapter)) 316 return; 317 318 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); 319 switch (adapter->rx_ps_pages) { 320 case 1: 321 case 2: 322 case 3: 323 /* [Extended] Packet Split Receive Descriptor Format 324 * 325 * +-----------------------------------------------------+ 326 * 0 | Buffer Address 0 [63:0] | 327 * +-----------------------------------------------------+ 328 * 8 | Buffer Address 1 [63:0] | 329 * +-----------------------------------------------------+ 330 * 16 | Buffer Address 2 [63:0] | 331 * +-----------------------------------------------------+ 332 * 24 | Buffer Address 3 [63:0] | 333 * +-----------------------------------------------------+ 334 */ 335 pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n"); 336 /* [Extended] Receive Descriptor (Write-Back) Format 337 * 338 * 63 48 47 32 31 13 12 8 7 4 3 0 339 * +------------------------------------------------------+ 340 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS | 341 * | Checksum | Ident | | Queue | | Type | 342 * +------------------------------------------------------+ 343 * 8 | VLAN Tag | Length | Extended Error | Extended Status | 344 * +------------------------------------------------------+ 345 * 63 48 47 32 31 20 19 0 346 */ 347 pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n"); 348 for (i = 0; i < rx_ring->count; i++) { 349 const char *next_desc; 350 buffer_info = &rx_ring->buffer_info[i]; 351 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); 352 u1 = (struct my_u1 *)rx_desc_ps; 353 staterr = 354 le32_to_cpu(rx_desc_ps->wb.middle.status_error); 355 356 if (i == rx_ring->next_to_use) 357 next_desc = " NTU"; 358 else if (i == rx_ring->next_to_clean) 359 next_desc = " NTC"; 360 else 361 next_desc = ""; 362 363 if (staterr & E1000_RXD_STAT_DD) { 364 /* Descriptor Done */ 365 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n", 366 "RWB", i, 367 (unsigned long long)le64_to_cpu(u1->a), 368 (unsigned long long)le64_to_cpu(u1->b), 369 (unsigned long long)le64_to_cpu(u1->c), 370 (unsigned long long)le64_to_cpu(u1->d), 371 buffer_info->skb, next_desc); 372 } else { 373 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n", 374 "R ", i, 375 (unsigned long long)le64_to_cpu(u1->a), 376 (unsigned long long)le64_to_cpu(u1->b), 377 (unsigned long long)le64_to_cpu(u1->c), 378 (unsigned long long)le64_to_cpu(u1->d), 379 (unsigned long long)buffer_info->dma, 380 buffer_info->skb, next_desc); 381 382 if (netif_msg_pktdata(adapter)) 383 print_hex_dump(KERN_INFO, "", 384 DUMP_PREFIX_ADDRESS, 16, 1, 385 phys_to_virt(buffer_info->dma), 386 adapter->rx_ps_bsize0, true); 387 } 388 } 389 break; 390 default: 391 case 0: 392 /* Extended Receive Descriptor (Read) Format 393 * 394 * +-----------------------------------------------------+ 395 * 0 | Buffer Address [63:0] | 396 * +-----------------------------------------------------+ 397 * 8 | Reserved | 398 * +-----------------------------------------------------+ 399 */ 400 pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n"); 401 /* Extended Receive Descriptor (Write-Back) Format 402 * 403 * 63 48 47 32 31 24 23 4 3 0 404 * +------------------------------------------------------+ 405 * | RSS Hash | | | | 406 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS | 407 * | Packet | IP | | | Type | 408 * | Checksum | Ident | | | | 409 * +------------------------------------------------------+ 410 * 8 | VLAN Tag | Length | Extended Error | Extended Status | 411 * +------------------------------------------------------+ 412 * 63 48 47 32 31 20 19 0 413 */ 414 pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n"); 415 416 for (i = 0; i < rx_ring->count; i++) { 417 const char *next_desc; 418 419 buffer_info = &rx_ring->buffer_info[i]; 420 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 421 u1 = (struct my_u1 *)rx_desc; 422 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 423 424 if (i == rx_ring->next_to_use) 425 next_desc = " NTU"; 426 else if (i == rx_ring->next_to_clean) 427 next_desc = " NTC"; 428 else 429 next_desc = ""; 430 431 if (staterr & E1000_RXD_STAT_DD) { 432 /* Descriptor Done */ 433 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n", 434 "RWB", i, 435 (unsigned long long)le64_to_cpu(u1->a), 436 (unsigned long long)le64_to_cpu(u1->b), 437 buffer_info->skb, next_desc); 438 } else { 439 pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n", 440 "R ", i, 441 (unsigned long long)le64_to_cpu(u1->a), 442 (unsigned long long)le64_to_cpu(u1->b), 443 (unsigned long long)buffer_info->dma, 444 buffer_info->skb, next_desc); 445 446 if (netif_msg_pktdata(adapter)) 447 print_hex_dump(KERN_INFO, "", 448 DUMP_PREFIX_ADDRESS, 16, 449 1, 450 phys_to_virt 451 (buffer_info->dma), 452 adapter->rx_buffer_len, 453 true); 454 } 455 } 456 } 457 } 458 459 /** 460 * e1000_desc_unused - calculate if we have unused descriptors 461 **/ 462 static int e1000_desc_unused(struct e1000_ring *ring) 463 { 464 if (ring->next_to_clean > ring->next_to_use) 465 return ring->next_to_clean - ring->next_to_use - 1; 466 467 return ring->count + ring->next_to_clean - ring->next_to_use - 1; 468 } 469 470 /** 471 * e1000_receive_skb - helper function to handle Rx indications 472 * @adapter: board private structure 473 * @status: descriptor status field as written by hardware 474 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 475 * @skb: pointer to sk_buff to be indicated to stack 476 **/ 477 static void e1000_receive_skb(struct e1000_adapter *adapter, 478 struct net_device *netdev, struct sk_buff *skb, 479 u8 status, __le16 vlan) 480 { 481 u16 tag = le16_to_cpu(vlan); 482 skb->protocol = eth_type_trans(skb, netdev); 483 484 if (status & E1000_RXD_STAT_VP) 485 __vlan_hwaccel_put_tag(skb, tag); 486 487 napi_gro_receive(&adapter->napi, skb); 488 } 489 490 /** 491 * e1000_rx_checksum - Receive Checksum Offload 492 * @adapter: board private structure 493 * @status_err: receive descriptor status and error fields 494 * @csum: receive descriptor csum field 495 * @sk_buff: socket buffer with received data 496 **/ 497 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 498 __le16 csum, struct sk_buff *skb) 499 { 500 u16 status = (u16)status_err; 501 u8 errors = (u8)(status_err >> 24); 502 503 skb_checksum_none_assert(skb); 504 505 /* Rx checksum disabled */ 506 if (!(adapter->netdev->features & NETIF_F_RXCSUM)) 507 return; 508 509 /* Ignore Checksum bit is set */ 510 if (status & E1000_RXD_STAT_IXSM) 511 return; 512 513 /* TCP/UDP checksum error bit is set */ 514 if (errors & E1000_RXD_ERR_TCPE) { 515 /* let the stack verify checksum errors */ 516 adapter->hw_csum_err++; 517 return; 518 } 519 520 /* TCP/UDP Checksum has not been calculated */ 521 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) 522 return; 523 524 /* It must be a TCP or UDP packet with a valid checksum */ 525 if (status & E1000_RXD_STAT_TCPCS) { 526 /* TCP checksum is good */ 527 skb->ip_summed = CHECKSUM_UNNECESSARY; 528 } else { 529 /* 530 * IP fragment with UDP payload 531 * Hardware complements the payload checksum, so we undo it 532 * and then put the value in host order for further stack use. 533 */ 534 __sum16 sum = (__force __sum16)swab16((__force u16)csum); 535 skb->csum = csum_unfold(~sum); 536 skb->ip_summed = CHECKSUM_COMPLETE; 537 } 538 adapter->hw_csum_good++; 539 } 540 541 /** 542 * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa() 543 * @hw: pointer to the HW structure 544 * @tail: address of tail descriptor register 545 * @i: value to write to tail descriptor register 546 * 547 * When updating the tail register, the ME could be accessing Host CSR 548 * registers at the same time. Normally, this is handled in h/w by an 549 * arbiter but on some parts there is a bug that acknowledges Host accesses 550 * later than it should which could result in the descriptor register to 551 * have an incorrect value. Workaround this by checking the FWSM register 552 * which has bit 24 set while ME is accessing Host CSR registers, wait 553 * if it is set and try again a number of times. 554 **/ 555 static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, void __iomem *tail, 556 unsigned int i) 557 { 558 unsigned int j = 0; 559 560 while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) && 561 (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI)) 562 udelay(50); 563 564 writel(i, tail); 565 566 if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail))) 567 return E1000_ERR_SWFW_SYNC; 568 569 return 0; 570 } 571 572 static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) 573 { 574 struct e1000_adapter *adapter = rx_ring->adapter; 575 struct e1000_hw *hw = &adapter->hw; 576 577 if (e1000e_update_tail_wa(hw, rx_ring->tail, i)) { 578 u32 rctl = er32(RCTL); 579 ew32(RCTL, rctl & ~E1000_RCTL_EN); 580 e_err("ME firmware caused invalid RDT - resetting\n"); 581 schedule_work(&adapter->reset_task); 582 } 583 } 584 585 static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) 586 { 587 struct e1000_adapter *adapter = tx_ring->adapter; 588 struct e1000_hw *hw = &adapter->hw; 589 590 if (e1000e_update_tail_wa(hw, tx_ring->tail, i)) { 591 u32 tctl = er32(TCTL); 592 ew32(TCTL, tctl & ~E1000_TCTL_EN); 593 e_err("ME firmware caused invalid TDT - resetting\n"); 594 schedule_work(&adapter->reset_task); 595 } 596 } 597 598 /** 599 * e1000_alloc_rx_buffers - Replace used receive buffers 600 * @rx_ring: Rx descriptor ring 601 **/ 602 static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring, 603 int cleaned_count, gfp_t gfp) 604 { 605 struct e1000_adapter *adapter = rx_ring->adapter; 606 struct net_device *netdev = adapter->netdev; 607 struct pci_dev *pdev = adapter->pdev; 608 union e1000_rx_desc_extended *rx_desc; 609 struct e1000_buffer *buffer_info; 610 struct sk_buff *skb; 611 unsigned int i; 612 unsigned int bufsz = adapter->rx_buffer_len; 613 614 i = rx_ring->next_to_use; 615 buffer_info = &rx_ring->buffer_info[i]; 616 617 while (cleaned_count--) { 618 skb = buffer_info->skb; 619 if (skb) { 620 skb_trim(skb, 0); 621 goto map_skb; 622 } 623 624 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); 625 if (!skb) { 626 /* Better luck next round */ 627 adapter->alloc_rx_buff_failed++; 628 break; 629 } 630 631 buffer_info->skb = skb; 632 map_skb: 633 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 634 adapter->rx_buffer_len, 635 DMA_FROM_DEVICE); 636 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 637 dev_err(&pdev->dev, "Rx DMA map failed\n"); 638 adapter->rx_dma_failed++; 639 break; 640 } 641 642 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 643 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 644 645 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { 646 /* 647 * Force memory writes to complete before letting h/w 648 * know there are new descriptors to fetch. (Only 649 * applicable for weak-ordered memory model archs, 650 * such as IA-64). 651 */ 652 wmb(); 653 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 654 e1000e_update_rdt_wa(rx_ring, i); 655 else 656 writel(i, rx_ring->tail); 657 } 658 i++; 659 if (i == rx_ring->count) 660 i = 0; 661 buffer_info = &rx_ring->buffer_info[i]; 662 } 663 664 rx_ring->next_to_use = i; 665 } 666 667 /** 668 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split 669 * @rx_ring: Rx descriptor ring 670 **/ 671 static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, 672 int cleaned_count, gfp_t gfp) 673 { 674 struct e1000_adapter *adapter = rx_ring->adapter; 675 struct net_device *netdev = adapter->netdev; 676 struct pci_dev *pdev = adapter->pdev; 677 union e1000_rx_desc_packet_split *rx_desc; 678 struct e1000_buffer *buffer_info; 679 struct e1000_ps_page *ps_page; 680 struct sk_buff *skb; 681 unsigned int i, j; 682 683 i = rx_ring->next_to_use; 684 buffer_info = &rx_ring->buffer_info[i]; 685 686 while (cleaned_count--) { 687 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 688 689 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 690 ps_page = &buffer_info->ps_pages[j]; 691 if (j >= adapter->rx_ps_pages) { 692 /* all unused desc entries get hw null ptr */ 693 rx_desc->read.buffer_addr[j + 1] = 694 ~cpu_to_le64(0); 695 continue; 696 } 697 if (!ps_page->page) { 698 ps_page->page = alloc_page(gfp); 699 if (!ps_page->page) { 700 adapter->alloc_rx_buff_failed++; 701 goto no_buffers; 702 } 703 ps_page->dma = dma_map_page(&pdev->dev, 704 ps_page->page, 705 0, PAGE_SIZE, 706 DMA_FROM_DEVICE); 707 if (dma_mapping_error(&pdev->dev, 708 ps_page->dma)) { 709 dev_err(&adapter->pdev->dev, 710 "Rx DMA page map failed\n"); 711 adapter->rx_dma_failed++; 712 goto no_buffers; 713 } 714 } 715 /* 716 * Refresh the desc even if buffer_addrs 717 * didn't change because each write-back 718 * erases this info. 719 */ 720 rx_desc->read.buffer_addr[j + 1] = 721 cpu_to_le64(ps_page->dma); 722 } 723 724 skb = __netdev_alloc_skb_ip_align(netdev, 725 adapter->rx_ps_bsize0, 726 gfp); 727 728 if (!skb) { 729 adapter->alloc_rx_buff_failed++; 730 break; 731 } 732 733 buffer_info->skb = skb; 734 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 735 adapter->rx_ps_bsize0, 736 DMA_FROM_DEVICE); 737 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 738 dev_err(&pdev->dev, "Rx DMA map failed\n"); 739 adapter->rx_dma_failed++; 740 /* cleanup skb */ 741 dev_kfree_skb_any(skb); 742 buffer_info->skb = NULL; 743 break; 744 } 745 746 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 747 748 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { 749 /* 750 * Force memory writes to complete before letting h/w 751 * know there are new descriptors to fetch. (Only 752 * applicable for weak-ordered memory model archs, 753 * such as IA-64). 754 */ 755 wmb(); 756 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 757 e1000e_update_rdt_wa(rx_ring, i << 1); 758 else 759 writel(i << 1, rx_ring->tail); 760 } 761 762 i++; 763 if (i == rx_ring->count) 764 i = 0; 765 buffer_info = &rx_ring->buffer_info[i]; 766 } 767 768 no_buffers: 769 rx_ring->next_to_use = i; 770 } 771 772 /** 773 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 774 * @rx_ring: Rx descriptor ring 775 * @cleaned_count: number of buffers to allocate this pass 776 **/ 777 778 static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring, 779 int cleaned_count, gfp_t gfp) 780 { 781 struct e1000_adapter *adapter = rx_ring->adapter; 782 struct net_device *netdev = adapter->netdev; 783 struct pci_dev *pdev = adapter->pdev; 784 union e1000_rx_desc_extended *rx_desc; 785 struct e1000_buffer *buffer_info; 786 struct sk_buff *skb; 787 unsigned int i; 788 unsigned int bufsz = 256 - 16 /* for skb_reserve */; 789 790 i = rx_ring->next_to_use; 791 buffer_info = &rx_ring->buffer_info[i]; 792 793 while (cleaned_count--) { 794 skb = buffer_info->skb; 795 if (skb) { 796 skb_trim(skb, 0); 797 goto check_page; 798 } 799 800 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); 801 if (unlikely(!skb)) { 802 /* Better luck next round */ 803 adapter->alloc_rx_buff_failed++; 804 break; 805 } 806 807 buffer_info->skb = skb; 808 check_page: 809 /* allocate a new page if necessary */ 810 if (!buffer_info->page) { 811 buffer_info->page = alloc_page(gfp); 812 if (unlikely(!buffer_info->page)) { 813 adapter->alloc_rx_buff_failed++; 814 break; 815 } 816 } 817 818 if (!buffer_info->dma) 819 buffer_info->dma = dma_map_page(&pdev->dev, 820 buffer_info->page, 0, 821 PAGE_SIZE, 822 DMA_FROM_DEVICE); 823 824 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 825 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 826 827 if (unlikely(++i == rx_ring->count)) 828 i = 0; 829 buffer_info = &rx_ring->buffer_info[i]; 830 } 831 832 if (likely(rx_ring->next_to_use != i)) { 833 rx_ring->next_to_use = i; 834 if (unlikely(i-- == 0)) 835 i = (rx_ring->count - 1); 836 837 /* Force memory writes to complete before letting h/w 838 * know there are new descriptors to fetch. (Only 839 * applicable for weak-ordered memory model archs, 840 * such as IA-64). */ 841 wmb(); 842 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 843 e1000e_update_rdt_wa(rx_ring, i); 844 else 845 writel(i, rx_ring->tail); 846 } 847 } 848 849 static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss, 850 struct sk_buff *skb) 851 { 852 if (netdev->features & NETIF_F_RXHASH) 853 skb->rxhash = le32_to_cpu(rss); 854 } 855 856 /** 857 * e1000_clean_rx_irq - Send received data up the network stack 858 * @rx_ring: Rx descriptor ring 859 * 860 * the return value indicates whether actual cleaning was done, there 861 * is no guarantee that everything was cleaned 862 **/ 863 static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, 864 int work_to_do) 865 { 866 struct e1000_adapter *adapter = rx_ring->adapter; 867 struct net_device *netdev = adapter->netdev; 868 struct pci_dev *pdev = adapter->pdev; 869 struct e1000_hw *hw = &adapter->hw; 870 union e1000_rx_desc_extended *rx_desc, *next_rxd; 871 struct e1000_buffer *buffer_info, *next_buffer; 872 u32 length, staterr; 873 unsigned int i; 874 int cleaned_count = 0; 875 bool cleaned = false; 876 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 877 878 i = rx_ring->next_to_clean; 879 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 880 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 881 buffer_info = &rx_ring->buffer_info[i]; 882 883 while (staterr & E1000_RXD_STAT_DD) { 884 struct sk_buff *skb; 885 886 if (*work_done >= work_to_do) 887 break; 888 (*work_done)++; 889 rmb(); /* read descriptor and rx_buffer_info after status DD */ 890 891 skb = buffer_info->skb; 892 buffer_info->skb = NULL; 893 894 prefetch(skb->data - NET_IP_ALIGN); 895 896 i++; 897 if (i == rx_ring->count) 898 i = 0; 899 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); 900 prefetch(next_rxd); 901 902 next_buffer = &rx_ring->buffer_info[i]; 903 904 cleaned = true; 905 cleaned_count++; 906 dma_unmap_single(&pdev->dev, 907 buffer_info->dma, 908 adapter->rx_buffer_len, 909 DMA_FROM_DEVICE); 910 buffer_info->dma = 0; 911 912 length = le16_to_cpu(rx_desc->wb.upper.length); 913 914 /* 915 * !EOP means multiple descriptors were used to store a single 916 * packet, if that's the case we need to toss it. In fact, we 917 * need to toss every packet with the EOP bit clear and the 918 * next frame that _does_ have the EOP bit set, as it is by 919 * definition only a frame fragment 920 */ 921 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) 922 adapter->flags2 |= FLAG2_IS_DISCARDING; 923 924 if (adapter->flags2 & FLAG2_IS_DISCARDING) { 925 /* All receives must fit into a single buffer */ 926 e_dbg("Receive packet consumed multiple buffers\n"); 927 /* recycle */ 928 buffer_info->skb = skb; 929 if (staterr & E1000_RXD_STAT_EOP) 930 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 931 goto next_desc; 932 } 933 934 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && 935 !(netdev->features & NETIF_F_RXALL))) { 936 /* recycle */ 937 buffer_info->skb = skb; 938 goto next_desc; 939 } 940 941 /* adjust length to remove Ethernet CRC */ 942 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { 943 /* If configured to store CRC, don't subtract FCS, 944 * but keep the FCS bytes out of the total_rx_bytes 945 * counter 946 */ 947 if (netdev->features & NETIF_F_RXFCS) 948 total_rx_bytes -= 4; 949 else 950 length -= 4; 951 } 952 953 total_rx_bytes += length; 954 total_rx_packets++; 955 956 /* 957 * code added for copybreak, this should improve 958 * performance for small packets with large amounts 959 * of reassembly being done in the stack 960 */ 961 if (length < copybreak) { 962 struct sk_buff *new_skb = 963 netdev_alloc_skb_ip_align(netdev, length); 964 if (new_skb) { 965 skb_copy_to_linear_data_offset(new_skb, 966 -NET_IP_ALIGN, 967 (skb->data - 968 NET_IP_ALIGN), 969 (length + 970 NET_IP_ALIGN)); 971 /* save the skb in buffer_info as good */ 972 buffer_info->skb = skb; 973 skb = new_skb; 974 } 975 /* else just continue with the old one */ 976 } 977 /* end copybreak code */ 978 skb_put(skb, length); 979 980 /* Receive Checksum Offload */ 981 e1000_rx_checksum(adapter, staterr, 982 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 983 984 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 985 986 e1000_receive_skb(adapter, netdev, skb, staterr, 987 rx_desc->wb.upper.vlan); 988 989 next_desc: 990 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); 991 992 /* return some buffers to hardware, one at a time is too slow */ 993 if (cleaned_count >= E1000_RX_BUFFER_WRITE) { 994 adapter->alloc_rx_buf(rx_ring, cleaned_count, 995 GFP_ATOMIC); 996 cleaned_count = 0; 997 } 998 999 /* use prefetched values */ 1000 rx_desc = next_rxd; 1001 buffer_info = next_buffer; 1002 1003 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1004 } 1005 rx_ring->next_to_clean = i; 1006 1007 cleaned_count = e1000_desc_unused(rx_ring); 1008 if (cleaned_count) 1009 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); 1010 1011 adapter->total_rx_bytes += total_rx_bytes; 1012 adapter->total_rx_packets += total_rx_packets; 1013 return cleaned; 1014 } 1015 1016 static void e1000_put_txbuf(struct e1000_ring *tx_ring, 1017 struct e1000_buffer *buffer_info) 1018 { 1019 struct e1000_adapter *adapter = tx_ring->adapter; 1020 1021 if (buffer_info->dma) { 1022 if (buffer_info->mapped_as_page) 1023 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, 1024 buffer_info->length, DMA_TO_DEVICE); 1025 else 1026 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1027 buffer_info->length, DMA_TO_DEVICE); 1028 buffer_info->dma = 0; 1029 } 1030 if (buffer_info->skb) { 1031 dev_kfree_skb_any(buffer_info->skb); 1032 buffer_info->skb = NULL; 1033 } 1034 buffer_info->time_stamp = 0; 1035 } 1036 1037 static void e1000_print_hw_hang(struct work_struct *work) 1038 { 1039 struct e1000_adapter *adapter = container_of(work, 1040 struct e1000_adapter, 1041 print_hang_task); 1042 struct net_device *netdev = adapter->netdev; 1043 struct e1000_ring *tx_ring = adapter->tx_ring; 1044 unsigned int i = tx_ring->next_to_clean; 1045 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; 1046 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); 1047 struct e1000_hw *hw = &adapter->hw; 1048 u16 phy_status, phy_1000t_status, phy_ext_status; 1049 u16 pci_status; 1050 1051 if (test_bit(__E1000_DOWN, &adapter->state)) 1052 return; 1053 1054 if (!adapter->tx_hang_recheck && 1055 (adapter->flags2 & FLAG2_DMA_BURST)) { 1056 /* May be block on write-back, flush and detect again 1057 * flush pending descriptor writebacks to memory 1058 */ 1059 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 1060 /* execute the writes immediately */ 1061 e1e_flush(); 1062 /* 1063 * Due to rare timing issues, write to TIDV again to ensure 1064 * the write is successful 1065 */ 1066 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 1067 /* execute the writes immediately */ 1068 e1e_flush(); 1069 adapter->tx_hang_recheck = true; 1070 return; 1071 } 1072 /* Real hang detected */ 1073 adapter->tx_hang_recheck = false; 1074 netif_stop_queue(netdev); 1075 1076 e1e_rphy(hw, PHY_STATUS, &phy_status); 1077 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); 1078 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); 1079 1080 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); 1081 1082 /* detected Hardware unit hang */ 1083 e_err("Detected Hardware Unit Hang:\n" 1084 " TDH <%x>\n" 1085 " TDT <%x>\n" 1086 " next_to_use <%x>\n" 1087 " next_to_clean <%x>\n" 1088 "buffer_info[next_to_clean]:\n" 1089 " time_stamp <%lx>\n" 1090 " next_to_watch <%x>\n" 1091 " jiffies <%lx>\n" 1092 " next_to_watch.status <%x>\n" 1093 "MAC Status <%x>\n" 1094 "PHY Status <%x>\n" 1095 "PHY 1000BASE-T Status <%x>\n" 1096 "PHY Extended Status <%x>\n" 1097 "PCI Status <%x>\n", 1098 readl(tx_ring->head), 1099 readl(tx_ring->tail), 1100 tx_ring->next_to_use, 1101 tx_ring->next_to_clean, 1102 tx_ring->buffer_info[eop].time_stamp, 1103 eop, 1104 jiffies, 1105 eop_desc->upper.fields.status, 1106 er32(STATUS), 1107 phy_status, 1108 phy_1000t_status, 1109 phy_ext_status, 1110 pci_status); 1111 } 1112 1113 /** 1114 * e1000_clean_tx_irq - Reclaim resources after transmit completes 1115 * @tx_ring: Tx descriptor ring 1116 * 1117 * the return value indicates whether actual cleaning was done, there 1118 * is no guarantee that everything was cleaned 1119 **/ 1120 static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) 1121 { 1122 struct e1000_adapter *adapter = tx_ring->adapter; 1123 struct net_device *netdev = adapter->netdev; 1124 struct e1000_hw *hw = &adapter->hw; 1125 struct e1000_tx_desc *tx_desc, *eop_desc; 1126 struct e1000_buffer *buffer_info; 1127 unsigned int i, eop; 1128 unsigned int count = 0; 1129 unsigned int total_tx_bytes = 0, total_tx_packets = 0; 1130 unsigned int bytes_compl = 0, pkts_compl = 0; 1131 1132 i = tx_ring->next_to_clean; 1133 eop = tx_ring->buffer_info[i].next_to_watch; 1134 eop_desc = E1000_TX_DESC(*tx_ring, eop); 1135 1136 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 1137 (count < tx_ring->count)) { 1138 bool cleaned = false; 1139 rmb(); /* read buffer_info after eop_desc */ 1140 for (; !cleaned; count++) { 1141 tx_desc = E1000_TX_DESC(*tx_ring, i); 1142 buffer_info = &tx_ring->buffer_info[i]; 1143 cleaned = (i == eop); 1144 1145 if (cleaned) { 1146 total_tx_packets += buffer_info->segs; 1147 total_tx_bytes += buffer_info->bytecount; 1148 if (buffer_info->skb) { 1149 bytes_compl += buffer_info->skb->len; 1150 pkts_compl++; 1151 } 1152 } 1153 1154 e1000_put_txbuf(tx_ring, buffer_info); 1155 tx_desc->upper.data = 0; 1156 1157 i++; 1158 if (i == tx_ring->count) 1159 i = 0; 1160 } 1161 1162 if (i == tx_ring->next_to_use) 1163 break; 1164 eop = tx_ring->buffer_info[i].next_to_watch; 1165 eop_desc = E1000_TX_DESC(*tx_ring, eop); 1166 } 1167 1168 tx_ring->next_to_clean = i; 1169 1170 netdev_completed_queue(netdev, pkts_compl, bytes_compl); 1171 1172 #define TX_WAKE_THRESHOLD 32 1173 if (count && netif_carrier_ok(netdev) && 1174 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { 1175 /* Make sure that anybody stopping the queue after this 1176 * sees the new next_to_clean. 1177 */ 1178 smp_mb(); 1179 1180 if (netif_queue_stopped(netdev) && 1181 !(test_bit(__E1000_DOWN, &adapter->state))) { 1182 netif_wake_queue(netdev); 1183 ++adapter->restart_queue; 1184 } 1185 } 1186 1187 if (adapter->detect_tx_hung) { 1188 /* 1189 * Detect a transmit hang in hardware, this serializes the 1190 * check with the clearing of time_stamp and movement of i 1191 */ 1192 adapter->detect_tx_hung = false; 1193 if (tx_ring->buffer_info[i].time_stamp && 1194 time_after(jiffies, tx_ring->buffer_info[i].time_stamp 1195 + (adapter->tx_timeout_factor * HZ)) && 1196 !(er32(STATUS) & E1000_STATUS_TXOFF)) 1197 schedule_work(&adapter->print_hang_task); 1198 else 1199 adapter->tx_hang_recheck = false; 1200 } 1201 adapter->total_tx_bytes += total_tx_bytes; 1202 adapter->total_tx_packets += total_tx_packets; 1203 return count < tx_ring->count; 1204 } 1205 1206 /** 1207 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split 1208 * @rx_ring: Rx descriptor ring 1209 * 1210 * the return value indicates whether actual cleaning was done, there 1211 * is no guarantee that everything was cleaned 1212 **/ 1213 static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, 1214 int work_to_do) 1215 { 1216 struct e1000_adapter *adapter = rx_ring->adapter; 1217 struct e1000_hw *hw = &adapter->hw; 1218 union e1000_rx_desc_packet_split *rx_desc, *next_rxd; 1219 struct net_device *netdev = adapter->netdev; 1220 struct pci_dev *pdev = adapter->pdev; 1221 struct e1000_buffer *buffer_info, *next_buffer; 1222 struct e1000_ps_page *ps_page; 1223 struct sk_buff *skb; 1224 unsigned int i, j; 1225 u32 length, staterr; 1226 int cleaned_count = 0; 1227 bool cleaned = false; 1228 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1229 1230 i = rx_ring->next_to_clean; 1231 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 1232 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 1233 buffer_info = &rx_ring->buffer_info[i]; 1234 1235 while (staterr & E1000_RXD_STAT_DD) { 1236 if (*work_done >= work_to_do) 1237 break; 1238 (*work_done)++; 1239 skb = buffer_info->skb; 1240 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1241 1242 /* in the packet split case this is header only */ 1243 prefetch(skb->data - NET_IP_ALIGN); 1244 1245 i++; 1246 if (i == rx_ring->count) 1247 i = 0; 1248 next_rxd = E1000_RX_DESC_PS(*rx_ring, i); 1249 prefetch(next_rxd); 1250 1251 next_buffer = &rx_ring->buffer_info[i]; 1252 1253 cleaned = true; 1254 cleaned_count++; 1255 dma_unmap_single(&pdev->dev, buffer_info->dma, 1256 adapter->rx_ps_bsize0, DMA_FROM_DEVICE); 1257 buffer_info->dma = 0; 1258 1259 /* see !EOP comment in other Rx routine */ 1260 if (!(staterr & E1000_RXD_STAT_EOP)) 1261 adapter->flags2 |= FLAG2_IS_DISCARDING; 1262 1263 if (adapter->flags2 & FLAG2_IS_DISCARDING) { 1264 e_dbg("Packet Split buffers didn't pick up the full packet\n"); 1265 dev_kfree_skb_irq(skb); 1266 if (staterr & E1000_RXD_STAT_EOP) 1267 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1268 goto next_desc; 1269 } 1270 1271 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && 1272 !(netdev->features & NETIF_F_RXALL))) { 1273 dev_kfree_skb_irq(skb); 1274 goto next_desc; 1275 } 1276 1277 length = le16_to_cpu(rx_desc->wb.middle.length0); 1278 1279 if (!length) { 1280 e_dbg("Last part of the packet spanning multiple descriptors\n"); 1281 dev_kfree_skb_irq(skb); 1282 goto next_desc; 1283 } 1284 1285 /* Good Receive */ 1286 skb_put(skb, length); 1287 1288 { 1289 /* 1290 * this looks ugly, but it seems compiler issues make 1291 * it more efficient than reusing j 1292 */ 1293 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); 1294 1295 /* 1296 * page alloc/put takes too long and effects small 1297 * packet throughput, so unsplit small packets and 1298 * save the alloc/put only valid in softirq (napi) 1299 * context to call kmap_* 1300 */ 1301 if (l1 && (l1 <= copybreak) && 1302 ((length + l1) <= adapter->rx_ps_bsize0)) { 1303 u8 *vaddr; 1304 1305 ps_page = &buffer_info->ps_pages[0]; 1306 1307 /* 1308 * there is no documentation about how to call 1309 * kmap_atomic, so we can't hold the mapping 1310 * very long 1311 */ 1312 dma_sync_single_for_cpu(&pdev->dev, 1313 ps_page->dma, 1314 PAGE_SIZE, 1315 DMA_FROM_DEVICE); 1316 vaddr = kmap_atomic(ps_page->page); 1317 memcpy(skb_tail_pointer(skb), vaddr, l1); 1318 kunmap_atomic(vaddr); 1319 dma_sync_single_for_device(&pdev->dev, 1320 ps_page->dma, 1321 PAGE_SIZE, 1322 DMA_FROM_DEVICE); 1323 1324 /* remove the CRC */ 1325 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { 1326 if (!(netdev->features & NETIF_F_RXFCS)) 1327 l1 -= 4; 1328 } 1329 1330 skb_put(skb, l1); 1331 goto copydone; 1332 } /* if */ 1333 } 1334 1335 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 1336 length = le16_to_cpu(rx_desc->wb.upper.length[j]); 1337 if (!length) 1338 break; 1339 1340 ps_page = &buffer_info->ps_pages[j]; 1341 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, 1342 DMA_FROM_DEVICE); 1343 ps_page->dma = 0; 1344 skb_fill_page_desc(skb, j, ps_page->page, 0, length); 1345 ps_page->page = NULL; 1346 skb->len += length; 1347 skb->data_len += length; 1348 skb->truesize += PAGE_SIZE; 1349 } 1350 1351 /* strip the ethernet crc, problem is we're using pages now so 1352 * this whole operation can get a little cpu intensive 1353 */ 1354 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { 1355 if (!(netdev->features & NETIF_F_RXFCS)) 1356 pskb_trim(skb, skb->len - 4); 1357 } 1358 1359 copydone: 1360 total_rx_bytes += skb->len; 1361 total_rx_packets++; 1362 1363 e1000_rx_checksum(adapter, staterr, 1364 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 1365 1366 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 1367 1368 if (rx_desc->wb.upper.header_status & 1369 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) 1370 adapter->rx_hdr_split++; 1371 1372 e1000_receive_skb(adapter, netdev, skb, 1373 staterr, rx_desc->wb.middle.vlan); 1374 1375 next_desc: 1376 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); 1377 buffer_info->skb = NULL; 1378 1379 /* return some buffers to hardware, one at a time is too slow */ 1380 if (cleaned_count >= E1000_RX_BUFFER_WRITE) { 1381 adapter->alloc_rx_buf(rx_ring, cleaned_count, 1382 GFP_ATOMIC); 1383 cleaned_count = 0; 1384 } 1385 1386 /* use prefetched values */ 1387 rx_desc = next_rxd; 1388 buffer_info = next_buffer; 1389 1390 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 1391 } 1392 rx_ring->next_to_clean = i; 1393 1394 cleaned_count = e1000_desc_unused(rx_ring); 1395 if (cleaned_count) 1396 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); 1397 1398 adapter->total_rx_bytes += total_rx_bytes; 1399 adapter->total_rx_packets += total_rx_packets; 1400 return cleaned; 1401 } 1402 1403 /** 1404 * e1000_consume_page - helper function 1405 **/ 1406 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, 1407 u16 length) 1408 { 1409 bi->page = NULL; 1410 skb->len += length; 1411 skb->data_len += length; 1412 skb->truesize += PAGE_SIZE; 1413 } 1414 1415 /** 1416 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy 1417 * @adapter: board private structure 1418 * 1419 * the return value indicates whether actual cleaning was done, there 1420 * is no guarantee that everything was cleaned 1421 **/ 1422 static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, 1423 int work_to_do) 1424 { 1425 struct e1000_adapter *adapter = rx_ring->adapter; 1426 struct net_device *netdev = adapter->netdev; 1427 struct pci_dev *pdev = adapter->pdev; 1428 union e1000_rx_desc_extended *rx_desc, *next_rxd; 1429 struct e1000_buffer *buffer_info, *next_buffer; 1430 u32 length, staterr; 1431 unsigned int i; 1432 int cleaned_count = 0; 1433 bool cleaned = false; 1434 unsigned int total_rx_bytes=0, total_rx_packets=0; 1435 1436 i = rx_ring->next_to_clean; 1437 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 1438 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1439 buffer_info = &rx_ring->buffer_info[i]; 1440 1441 while (staterr & E1000_RXD_STAT_DD) { 1442 struct sk_buff *skb; 1443 1444 if (*work_done >= work_to_do) 1445 break; 1446 (*work_done)++; 1447 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1448 1449 skb = buffer_info->skb; 1450 buffer_info->skb = NULL; 1451 1452 ++i; 1453 if (i == rx_ring->count) 1454 i = 0; 1455 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); 1456 prefetch(next_rxd); 1457 1458 next_buffer = &rx_ring->buffer_info[i]; 1459 1460 cleaned = true; 1461 cleaned_count++; 1462 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, 1463 DMA_FROM_DEVICE); 1464 buffer_info->dma = 0; 1465 1466 length = le16_to_cpu(rx_desc->wb.upper.length); 1467 1468 /* errors is only valid for DD + EOP descriptors */ 1469 if (unlikely((staterr & E1000_RXD_STAT_EOP) && 1470 ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && 1471 !(netdev->features & NETIF_F_RXALL)))) { 1472 /* recycle both page and skb */ 1473 buffer_info->skb = skb; 1474 /* an error means any chain goes out the window too */ 1475 if (rx_ring->rx_skb_top) 1476 dev_kfree_skb_irq(rx_ring->rx_skb_top); 1477 rx_ring->rx_skb_top = NULL; 1478 goto next_desc; 1479 } 1480 1481 #define rxtop (rx_ring->rx_skb_top) 1482 if (!(staterr & E1000_RXD_STAT_EOP)) { 1483 /* this descriptor is only the beginning (or middle) */ 1484 if (!rxtop) { 1485 /* this is the beginning of a chain */ 1486 rxtop = skb; 1487 skb_fill_page_desc(rxtop, 0, buffer_info->page, 1488 0, length); 1489 } else { 1490 /* this is the middle of a chain */ 1491 skb_fill_page_desc(rxtop, 1492 skb_shinfo(rxtop)->nr_frags, 1493 buffer_info->page, 0, length); 1494 /* re-use the skb, only consumed the page */ 1495 buffer_info->skb = skb; 1496 } 1497 e1000_consume_page(buffer_info, rxtop, length); 1498 goto next_desc; 1499 } else { 1500 if (rxtop) { 1501 /* end of the chain */ 1502 skb_fill_page_desc(rxtop, 1503 skb_shinfo(rxtop)->nr_frags, 1504 buffer_info->page, 0, length); 1505 /* re-use the current skb, we only consumed the 1506 * page */ 1507 buffer_info->skb = skb; 1508 skb = rxtop; 1509 rxtop = NULL; 1510 e1000_consume_page(buffer_info, skb, length); 1511 } else { 1512 /* no chain, got EOP, this buf is the packet 1513 * copybreak to save the put_page/alloc_page */ 1514 if (length <= copybreak && 1515 skb_tailroom(skb) >= length) { 1516 u8 *vaddr; 1517 vaddr = kmap_atomic(buffer_info->page); 1518 memcpy(skb_tail_pointer(skb), vaddr, 1519 length); 1520 kunmap_atomic(vaddr); 1521 /* re-use the page, so don't erase 1522 * buffer_info->page */ 1523 skb_put(skb, length); 1524 } else { 1525 skb_fill_page_desc(skb, 0, 1526 buffer_info->page, 0, 1527 length); 1528 e1000_consume_page(buffer_info, skb, 1529 length); 1530 } 1531 } 1532 } 1533 1534 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 1535 e1000_rx_checksum(adapter, staterr, 1536 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 1537 1538 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 1539 1540 /* probably a little skewed due to removing CRC */ 1541 total_rx_bytes += skb->len; 1542 total_rx_packets++; 1543 1544 /* eth type trans needs skb->data to point to something */ 1545 if (!pskb_may_pull(skb, ETH_HLEN)) { 1546 e_err("pskb_may_pull failed.\n"); 1547 dev_kfree_skb_irq(skb); 1548 goto next_desc; 1549 } 1550 1551 e1000_receive_skb(adapter, netdev, skb, staterr, 1552 rx_desc->wb.upper.vlan); 1553 1554 next_desc: 1555 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); 1556 1557 /* return some buffers to hardware, one at a time is too slow */ 1558 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 1559 adapter->alloc_rx_buf(rx_ring, cleaned_count, 1560 GFP_ATOMIC); 1561 cleaned_count = 0; 1562 } 1563 1564 /* use prefetched values */ 1565 rx_desc = next_rxd; 1566 buffer_info = next_buffer; 1567 1568 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1569 } 1570 rx_ring->next_to_clean = i; 1571 1572 cleaned_count = e1000_desc_unused(rx_ring); 1573 if (cleaned_count) 1574 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); 1575 1576 adapter->total_rx_bytes += total_rx_bytes; 1577 adapter->total_rx_packets += total_rx_packets; 1578 return cleaned; 1579 } 1580 1581 /** 1582 * e1000_clean_rx_ring - Free Rx Buffers per Queue 1583 * @rx_ring: Rx descriptor ring 1584 **/ 1585 static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) 1586 { 1587 struct e1000_adapter *adapter = rx_ring->adapter; 1588 struct e1000_buffer *buffer_info; 1589 struct e1000_ps_page *ps_page; 1590 struct pci_dev *pdev = adapter->pdev; 1591 unsigned int i, j; 1592 1593 /* Free all the Rx ring sk_buffs */ 1594 for (i = 0; i < rx_ring->count; i++) { 1595 buffer_info = &rx_ring->buffer_info[i]; 1596 if (buffer_info->dma) { 1597 if (adapter->clean_rx == e1000_clean_rx_irq) 1598 dma_unmap_single(&pdev->dev, buffer_info->dma, 1599 adapter->rx_buffer_len, 1600 DMA_FROM_DEVICE); 1601 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) 1602 dma_unmap_page(&pdev->dev, buffer_info->dma, 1603 PAGE_SIZE, 1604 DMA_FROM_DEVICE); 1605 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) 1606 dma_unmap_single(&pdev->dev, buffer_info->dma, 1607 adapter->rx_ps_bsize0, 1608 DMA_FROM_DEVICE); 1609 buffer_info->dma = 0; 1610 } 1611 1612 if (buffer_info->page) { 1613 put_page(buffer_info->page); 1614 buffer_info->page = NULL; 1615 } 1616 1617 if (buffer_info->skb) { 1618 dev_kfree_skb(buffer_info->skb); 1619 buffer_info->skb = NULL; 1620 } 1621 1622 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 1623 ps_page = &buffer_info->ps_pages[j]; 1624 if (!ps_page->page) 1625 break; 1626 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, 1627 DMA_FROM_DEVICE); 1628 ps_page->dma = 0; 1629 put_page(ps_page->page); 1630 ps_page->page = NULL; 1631 } 1632 } 1633 1634 /* there also may be some cached data from a chained receive */ 1635 if (rx_ring->rx_skb_top) { 1636 dev_kfree_skb(rx_ring->rx_skb_top); 1637 rx_ring->rx_skb_top = NULL; 1638 } 1639 1640 /* Zero out the descriptor ring */ 1641 memset(rx_ring->desc, 0, rx_ring->size); 1642 1643 rx_ring->next_to_clean = 0; 1644 rx_ring->next_to_use = 0; 1645 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1646 1647 writel(0, rx_ring->head); 1648 writel(0, rx_ring->tail); 1649 } 1650 1651 static void e1000e_downshift_workaround(struct work_struct *work) 1652 { 1653 struct e1000_adapter *adapter = container_of(work, 1654 struct e1000_adapter, downshift_task); 1655 1656 if (test_bit(__E1000_DOWN, &adapter->state)) 1657 return; 1658 1659 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); 1660 } 1661 1662 /** 1663 * e1000_intr_msi - Interrupt Handler 1664 * @irq: interrupt number 1665 * @data: pointer to a network interface device structure 1666 **/ 1667 static irqreturn_t e1000_intr_msi(int irq, void *data) 1668 { 1669 struct net_device *netdev = data; 1670 struct e1000_adapter *adapter = netdev_priv(netdev); 1671 struct e1000_hw *hw = &adapter->hw; 1672 u32 icr = er32(ICR); 1673 1674 /* 1675 * read ICR disables interrupts using IAM 1676 */ 1677 1678 if (icr & E1000_ICR_LSC) { 1679 hw->mac.get_link_status = true; 1680 /* 1681 * ICH8 workaround-- Call gig speed drop workaround on cable 1682 * disconnect (LSC) before accessing any PHY registers 1683 */ 1684 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 1685 (!(er32(STATUS) & E1000_STATUS_LU))) 1686 schedule_work(&adapter->downshift_task); 1687 1688 /* 1689 * 80003ES2LAN workaround-- For packet buffer work-around on 1690 * link down event; disable receives here in the ISR and reset 1691 * adapter in watchdog 1692 */ 1693 if (netif_carrier_ok(netdev) && 1694 adapter->flags & FLAG_RX_NEEDS_RESTART) { 1695 /* disable receives */ 1696 u32 rctl = er32(RCTL); 1697 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1698 adapter->flags |= FLAG_RX_RESTART_NOW; 1699 } 1700 /* guard against interrupt when we're going down */ 1701 if (!test_bit(__E1000_DOWN, &adapter->state)) 1702 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1703 } 1704 1705 if (napi_schedule_prep(&adapter->napi)) { 1706 adapter->total_tx_bytes = 0; 1707 adapter->total_tx_packets = 0; 1708 adapter->total_rx_bytes = 0; 1709 adapter->total_rx_packets = 0; 1710 __napi_schedule(&adapter->napi); 1711 } 1712 1713 return IRQ_HANDLED; 1714 } 1715 1716 /** 1717 * e1000_intr - Interrupt Handler 1718 * @irq: interrupt number 1719 * @data: pointer to a network interface device structure 1720 **/ 1721 static irqreturn_t e1000_intr(int irq, void *data) 1722 { 1723 struct net_device *netdev = data; 1724 struct e1000_adapter *adapter = netdev_priv(netdev); 1725 struct e1000_hw *hw = &adapter->hw; 1726 u32 rctl, icr = er32(ICR); 1727 1728 if (!icr || test_bit(__E1000_DOWN, &adapter->state)) 1729 return IRQ_NONE; /* Not our interrupt */ 1730 1731 /* 1732 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is 1733 * not set, then the adapter didn't send an interrupt 1734 */ 1735 if (!(icr & E1000_ICR_INT_ASSERTED)) 1736 return IRQ_NONE; 1737 1738 /* 1739 * Interrupt Auto-Mask...upon reading ICR, 1740 * interrupts are masked. No need for the 1741 * IMC write 1742 */ 1743 1744 if (icr & E1000_ICR_LSC) { 1745 hw->mac.get_link_status = true; 1746 /* 1747 * ICH8 workaround-- Call gig speed drop workaround on cable 1748 * disconnect (LSC) before accessing any PHY registers 1749 */ 1750 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 1751 (!(er32(STATUS) & E1000_STATUS_LU))) 1752 schedule_work(&adapter->downshift_task); 1753 1754 /* 1755 * 80003ES2LAN workaround-- 1756 * For packet buffer work-around on link down event; 1757 * disable receives here in the ISR and 1758 * reset adapter in watchdog 1759 */ 1760 if (netif_carrier_ok(netdev) && 1761 (adapter->flags & FLAG_RX_NEEDS_RESTART)) { 1762 /* disable receives */ 1763 rctl = er32(RCTL); 1764 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1765 adapter->flags |= FLAG_RX_RESTART_NOW; 1766 } 1767 /* guard against interrupt when we're going down */ 1768 if (!test_bit(__E1000_DOWN, &adapter->state)) 1769 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1770 } 1771 1772 if (napi_schedule_prep(&adapter->napi)) { 1773 adapter->total_tx_bytes = 0; 1774 adapter->total_tx_packets = 0; 1775 adapter->total_rx_bytes = 0; 1776 adapter->total_rx_packets = 0; 1777 __napi_schedule(&adapter->napi); 1778 } 1779 1780 return IRQ_HANDLED; 1781 } 1782 1783 static irqreturn_t e1000_msix_other(int irq, void *data) 1784 { 1785 struct net_device *netdev = data; 1786 struct e1000_adapter *adapter = netdev_priv(netdev); 1787 struct e1000_hw *hw = &adapter->hw; 1788 u32 icr = er32(ICR); 1789 1790 if (!(icr & E1000_ICR_INT_ASSERTED)) { 1791 if (!test_bit(__E1000_DOWN, &adapter->state)) 1792 ew32(IMS, E1000_IMS_OTHER); 1793 return IRQ_NONE; 1794 } 1795 1796 if (icr & adapter->eiac_mask) 1797 ew32(ICS, (icr & adapter->eiac_mask)); 1798 1799 if (icr & E1000_ICR_OTHER) { 1800 if (!(icr & E1000_ICR_LSC)) 1801 goto no_link_interrupt; 1802 hw->mac.get_link_status = true; 1803 /* guard against interrupt when we're going down */ 1804 if (!test_bit(__E1000_DOWN, &adapter->state)) 1805 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1806 } 1807 1808 no_link_interrupt: 1809 if (!test_bit(__E1000_DOWN, &adapter->state)) 1810 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER); 1811 1812 return IRQ_HANDLED; 1813 } 1814 1815 1816 static irqreturn_t e1000_intr_msix_tx(int irq, void *data) 1817 { 1818 struct net_device *netdev = data; 1819 struct e1000_adapter *adapter = netdev_priv(netdev); 1820 struct e1000_hw *hw = &adapter->hw; 1821 struct e1000_ring *tx_ring = adapter->tx_ring; 1822 1823 1824 adapter->total_tx_bytes = 0; 1825 adapter->total_tx_packets = 0; 1826 1827 if (!e1000_clean_tx_irq(tx_ring)) 1828 /* Ring was not completely cleaned, so fire another interrupt */ 1829 ew32(ICS, tx_ring->ims_val); 1830 1831 return IRQ_HANDLED; 1832 } 1833 1834 static irqreturn_t e1000_intr_msix_rx(int irq, void *data) 1835 { 1836 struct net_device *netdev = data; 1837 struct e1000_adapter *adapter = netdev_priv(netdev); 1838 struct e1000_ring *rx_ring = adapter->rx_ring; 1839 1840 /* Write the ITR value calculated at the end of the 1841 * previous interrupt. 1842 */ 1843 if (rx_ring->set_itr) { 1844 writel(1000000000 / (rx_ring->itr_val * 256), 1845 rx_ring->itr_register); 1846 rx_ring->set_itr = 0; 1847 } 1848 1849 if (napi_schedule_prep(&adapter->napi)) { 1850 adapter->total_rx_bytes = 0; 1851 adapter->total_rx_packets = 0; 1852 __napi_schedule(&adapter->napi); 1853 } 1854 return IRQ_HANDLED; 1855 } 1856 1857 /** 1858 * e1000_configure_msix - Configure MSI-X hardware 1859 * 1860 * e1000_configure_msix sets up the hardware to properly 1861 * generate MSI-X interrupts. 1862 **/ 1863 static void e1000_configure_msix(struct e1000_adapter *adapter) 1864 { 1865 struct e1000_hw *hw = &adapter->hw; 1866 struct e1000_ring *rx_ring = adapter->rx_ring; 1867 struct e1000_ring *tx_ring = adapter->tx_ring; 1868 int vector = 0; 1869 u32 ctrl_ext, ivar = 0; 1870 1871 adapter->eiac_mask = 0; 1872 1873 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ 1874 if (hw->mac.type == e1000_82574) { 1875 u32 rfctl = er32(RFCTL); 1876 rfctl |= E1000_RFCTL_ACK_DIS; 1877 ew32(RFCTL, rfctl); 1878 } 1879 1880 #define E1000_IVAR_INT_ALLOC_VALID 0x8 1881 /* Configure Rx vector */ 1882 rx_ring->ims_val = E1000_IMS_RXQ0; 1883 adapter->eiac_mask |= rx_ring->ims_val; 1884 if (rx_ring->itr_val) 1885 writel(1000000000 / (rx_ring->itr_val * 256), 1886 rx_ring->itr_register); 1887 else 1888 writel(1, rx_ring->itr_register); 1889 ivar = E1000_IVAR_INT_ALLOC_VALID | vector; 1890 1891 /* Configure Tx vector */ 1892 tx_ring->ims_val = E1000_IMS_TXQ0; 1893 vector++; 1894 if (tx_ring->itr_val) 1895 writel(1000000000 / (tx_ring->itr_val * 256), 1896 tx_ring->itr_register); 1897 else 1898 writel(1, tx_ring->itr_register); 1899 adapter->eiac_mask |= tx_ring->ims_val; 1900 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); 1901 1902 /* set vector for Other Causes, e.g. link changes */ 1903 vector++; 1904 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); 1905 if (rx_ring->itr_val) 1906 writel(1000000000 / (rx_ring->itr_val * 256), 1907 hw->hw_addr + E1000_EITR_82574(vector)); 1908 else 1909 writel(1, hw->hw_addr + E1000_EITR_82574(vector)); 1910 1911 /* Cause Tx interrupts on every write back */ 1912 ivar |= (1 << 31); 1913 1914 ew32(IVAR, ivar); 1915 1916 /* enable MSI-X PBA support */ 1917 ctrl_ext = er32(CTRL_EXT); 1918 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; 1919 1920 /* Auto-Mask Other interrupts upon ICR read */ 1921 #define E1000_EIAC_MASK_82574 0x01F00000 1922 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); 1923 ctrl_ext |= E1000_CTRL_EXT_EIAME; 1924 ew32(CTRL_EXT, ctrl_ext); 1925 e1e_flush(); 1926 } 1927 1928 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) 1929 { 1930 if (adapter->msix_entries) { 1931 pci_disable_msix(adapter->pdev); 1932 kfree(adapter->msix_entries); 1933 adapter->msix_entries = NULL; 1934 } else if (adapter->flags & FLAG_MSI_ENABLED) { 1935 pci_disable_msi(adapter->pdev); 1936 adapter->flags &= ~FLAG_MSI_ENABLED; 1937 } 1938 } 1939 1940 /** 1941 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported 1942 * 1943 * Attempt to configure interrupts using the best available 1944 * capabilities of the hardware and kernel. 1945 **/ 1946 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) 1947 { 1948 int err; 1949 int i; 1950 1951 switch (adapter->int_mode) { 1952 case E1000E_INT_MODE_MSIX: 1953 if (adapter->flags & FLAG_HAS_MSIX) { 1954 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ 1955 adapter->msix_entries = kcalloc(adapter->num_vectors, 1956 sizeof(struct msix_entry), 1957 GFP_KERNEL); 1958 if (adapter->msix_entries) { 1959 for (i = 0; i < adapter->num_vectors; i++) 1960 adapter->msix_entries[i].entry = i; 1961 1962 err = pci_enable_msix(adapter->pdev, 1963 adapter->msix_entries, 1964 adapter->num_vectors); 1965 if (err == 0) 1966 return; 1967 } 1968 /* MSI-X failed, so fall through and try MSI */ 1969 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); 1970 e1000e_reset_interrupt_capability(adapter); 1971 } 1972 adapter->int_mode = E1000E_INT_MODE_MSI; 1973 /* Fall through */ 1974 case E1000E_INT_MODE_MSI: 1975 if (!pci_enable_msi(adapter->pdev)) { 1976 adapter->flags |= FLAG_MSI_ENABLED; 1977 } else { 1978 adapter->int_mode = E1000E_INT_MODE_LEGACY; 1979 e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n"); 1980 } 1981 /* Fall through */ 1982 case E1000E_INT_MODE_LEGACY: 1983 /* Don't do anything; this is the system default */ 1984 break; 1985 } 1986 1987 /* store the number of vectors being used */ 1988 adapter->num_vectors = 1; 1989 } 1990 1991 /** 1992 * e1000_request_msix - Initialize MSI-X interrupts 1993 * 1994 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the 1995 * kernel. 1996 **/ 1997 static int e1000_request_msix(struct e1000_adapter *adapter) 1998 { 1999 struct net_device *netdev = adapter->netdev; 2000 int err = 0, vector = 0; 2001 2002 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 2003 snprintf(adapter->rx_ring->name, 2004 sizeof(adapter->rx_ring->name) - 1, 2005 "%s-rx-0", netdev->name); 2006 else 2007 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); 2008 err = request_irq(adapter->msix_entries[vector].vector, 2009 e1000_intr_msix_rx, 0, adapter->rx_ring->name, 2010 netdev); 2011 if (err) 2012 return err; 2013 adapter->rx_ring->itr_register = adapter->hw.hw_addr + 2014 E1000_EITR_82574(vector); 2015 adapter->rx_ring->itr_val = adapter->itr; 2016 vector++; 2017 2018 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 2019 snprintf(adapter->tx_ring->name, 2020 sizeof(adapter->tx_ring->name) - 1, 2021 "%s-tx-0", netdev->name); 2022 else 2023 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); 2024 err = request_irq(adapter->msix_entries[vector].vector, 2025 e1000_intr_msix_tx, 0, adapter->tx_ring->name, 2026 netdev); 2027 if (err) 2028 return err; 2029 adapter->tx_ring->itr_register = adapter->hw.hw_addr + 2030 E1000_EITR_82574(vector); 2031 adapter->tx_ring->itr_val = adapter->itr; 2032 vector++; 2033 2034 err = request_irq(adapter->msix_entries[vector].vector, 2035 e1000_msix_other, 0, netdev->name, netdev); 2036 if (err) 2037 return err; 2038 2039 e1000_configure_msix(adapter); 2040 2041 return 0; 2042 } 2043 2044 /** 2045 * e1000_request_irq - initialize interrupts 2046 * 2047 * Attempts to configure interrupts using the best available 2048 * capabilities of the hardware and kernel. 2049 **/ 2050 static int e1000_request_irq(struct e1000_adapter *adapter) 2051 { 2052 struct net_device *netdev = adapter->netdev; 2053 int err; 2054 2055 if (adapter->msix_entries) { 2056 err = e1000_request_msix(adapter); 2057 if (!err) 2058 return err; 2059 /* fall back to MSI */ 2060 e1000e_reset_interrupt_capability(adapter); 2061 adapter->int_mode = E1000E_INT_MODE_MSI; 2062 e1000e_set_interrupt_capability(adapter); 2063 } 2064 if (adapter->flags & FLAG_MSI_ENABLED) { 2065 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, 2066 netdev->name, netdev); 2067 if (!err) 2068 return err; 2069 2070 /* fall back to legacy interrupt */ 2071 e1000e_reset_interrupt_capability(adapter); 2072 adapter->int_mode = E1000E_INT_MODE_LEGACY; 2073 } 2074 2075 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, 2076 netdev->name, netdev); 2077 if (err) 2078 e_err("Unable to allocate interrupt, Error: %d\n", err); 2079 2080 return err; 2081 } 2082 2083 static void e1000_free_irq(struct e1000_adapter *adapter) 2084 { 2085 struct net_device *netdev = adapter->netdev; 2086 2087 if (adapter->msix_entries) { 2088 int vector = 0; 2089 2090 free_irq(adapter->msix_entries[vector].vector, netdev); 2091 vector++; 2092 2093 free_irq(adapter->msix_entries[vector].vector, netdev); 2094 vector++; 2095 2096 /* Other Causes interrupt vector */ 2097 free_irq(adapter->msix_entries[vector].vector, netdev); 2098 return; 2099 } 2100 2101 free_irq(adapter->pdev->irq, netdev); 2102 } 2103 2104 /** 2105 * e1000_irq_disable - Mask off interrupt generation on the NIC 2106 **/ 2107 static void e1000_irq_disable(struct e1000_adapter *adapter) 2108 { 2109 struct e1000_hw *hw = &adapter->hw; 2110 2111 ew32(IMC, ~0); 2112 if (adapter->msix_entries) 2113 ew32(EIAC_82574, 0); 2114 e1e_flush(); 2115 2116 if (adapter->msix_entries) { 2117 int i; 2118 for (i = 0; i < adapter->num_vectors; i++) 2119 synchronize_irq(adapter->msix_entries[i].vector); 2120 } else { 2121 synchronize_irq(adapter->pdev->irq); 2122 } 2123 } 2124 2125 /** 2126 * e1000_irq_enable - Enable default interrupt generation settings 2127 **/ 2128 static void e1000_irq_enable(struct e1000_adapter *adapter) 2129 { 2130 struct e1000_hw *hw = &adapter->hw; 2131 2132 if (adapter->msix_entries) { 2133 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); 2134 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); 2135 } else { 2136 ew32(IMS, IMS_ENABLE_MASK); 2137 } 2138 e1e_flush(); 2139 } 2140 2141 /** 2142 * e1000e_get_hw_control - get control of the h/w from f/w 2143 * @adapter: address of board private structure 2144 * 2145 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. 2146 * For ASF and Pass Through versions of f/w this means that 2147 * the driver is loaded. For AMT version (only with 82573) 2148 * of the f/w this means that the network i/f is open. 2149 **/ 2150 void e1000e_get_hw_control(struct e1000_adapter *adapter) 2151 { 2152 struct e1000_hw *hw = &adapter->hw; 2153 u32 ctrl_ext; 2154 u32 swsm; 2155 2156 /* Let firmware know the driver has taken over */ 2157 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { 2158 swsm = er32(SWSM); 2159 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); 2160 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 2161 ctrl_ext = er32(CTRL_EXT); 2162 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2163 } 2164 } 2165 2166 /** 2167 * e1000e_release_hw_control - release control of the h/w to f/w 2168 * @adapter: address of board private structure 2169 * 2170 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. 2171 * For ASF and Pass Through versions of f/w this means that the 2172 * driver is no longer loaded. For AMT version (only with 82573) i 2173 * of the f/w this means that the network i/f is closed. 2174 * 2175 **/ 2176 void e1000e_release_hw_control(struct e1000_adapter *adapter) 2177 { 2178 struct e1000_hw *hw = &adapter->hw; 2179 u32 ctrl_ext; 2180 u32 swsm; 2181 2182 /* Let firmware taken over control of h/w */ 2183 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { 2184 swsm = er32(SWSM); 2185 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 2186 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 2187 ctrl_ext = er32(CTRL_EXT); 2188 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2189 } 2190 } 2191 2192 /** 2193 * @e1000_alloc_ring - allocate memory for a ring structure 2194 **/ 2195 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, 2196 struct e1000_ring *ring) 2197 { 2198 struct pci_dev *pdev = adapter->pdev; 2199 2200 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, 2201 GFP_KERNEL); 2202 if (!ring->desc) 2203 return -ENOMEM; 2204 2205 return 0; 2206 } 2207 2208 /** 2209 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) 2210 * @tx_ring: Tx descriptor ring 2211 * 2212 * Return 0 on success, negative on failure 2213 **/ 2214 int e1000e_setup_tx_resources(struct e1000_ring *tx_ring) 2215 { 2216 struct e1000_adapter *adapter = tx_ring->adapter; 2217 int err = -ENOMEM, size; 2218 2219 size = sizeof(struct e1000_buffer) * tx_ring->count; 2220 tx_ring->buffer_info = vzalloc(size); 2221 if (!tx_ring->buffer_info) 2222 goto err; 2223 2224 /* round up to nearest 4K */ 2225 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); 2226 tx_ring->size = ALIGN(tx_ring->size, 4096); 2227 2228 err = e1000_alloc_ring_dma(adapter, tx_ring); 2229 if (err) 2230 goto err; 2231 2232 tx_ring->next_to_use = 0; 2233 tx_ring->next_to_clean = 0; 2234 2235 return 0; 2236 err: 2237 vfree(tx_ring->buffer_info); 2238 e_err("Unable to allocate memory for the transmit descriptor ring\n"); 2239 return err; 2240 } 2241 2242 /** 2243 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) 2244 * @rx_ring: Rx descriptor ring 2245 * 2246 * Returns 0 on success, negative on failure 2247 **/ 2248 int e1000e_setup_rx_resources(struct e1000_ring *rx_ring) 2249 { 2250 struct e1000_adapter *adapter = rx_ring->adapter; 2251 struct e1000_buffer *buffer_info; 2252 int i, size, desc_len, err = -ENOMEM; 2253 2254 size = sizeof(struct e1000_buffer) * rx_ring->count; 2255 rx_ring->buffer_info = vzalloc(size); 2256 if (!rx_ring->buffer_info) 2257 goto err; 2258 2259 for (i = 0; i < rx_ring->count; i++) { 2260 buffer_info = &rx_ring->buffer_info[i]; 2261 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, 2262 sizeof(struct e1000_ps_page), 2263 GFP_KERNEL); 2264 if (!buffer_info->ps_pages) 2265 goto err_pages; 2266 } 2267 2268 desc_len = sizeof(union e1000_rx_desc_packet_split); 2269 2270 /* Round up to nearest 4K */ 2271 rx_ring->size = rx_ring->count * desc_len; 2272 rx_ring->size = ALIGN(rx_ring->size, 4096); 2273 2274 err = e1000_alloc_ring_dma(adapter, rx_ring); 2275 if (err) 2276 goto err_pages; 2277 2278 rx_ring->next_to_clean = 0; 2279 rx_ring->next_to_use = 0; 2280 rx_ring->rx_skb_top = NULL; 2281 2282 return 0; 2283 2284 err_pages: 2285 for (i = 0; i < rx_ring->count; i++) { 2286 buffer_info = &rx_ring->buffer_info[i]; 2287 kfree(buffer_info->ps_pages); 2288 } 2289 err: 2290 vfree(rx_ring->buffer_info); 2291 e_err("Unable to allocate memory for the receive descriptor ring\n"); 2292 return err; 2293 } 2294 2295 /** 2296 * e1000_clean_tx_ring - Free Tx Buffers 2297 * @tx_ring: Tx descriptor ring 2298 **/ 2299 static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) 2300 { 2301 struct e1000_adapter *adapter = tx_ring->adapter; 2302 struct e1000_buffer *buffer_info; 2303 unsigned long size; 2304 unsigned int i; 2305 2306 for (i = 0; i < tx_ring->count; i++) { 2307 buffer_info = &tx_ring->buffer_info[i]; 2308 e1000_put_txbuf(tx_ring, buffer_info); 2309 } 2310 2311 netdev_reset_queue(adapter->netdev); 2312 size = sizeof(struct e1000_buffer) * tx_ring->count; 2313 memset(tx_ring->buffer_info, 0, size); 2314 2315 memset(tx_ring->desc, 0, tx_ring->size); 2316 2317 tx_ring->next_to_use = 0; 2318 tx_ring->next_to_clean = 0; 2319 2320 writel(0, tx_ring->head); 2321 writel(0, tx_ring->tail); 2322 } 2323 2324 /** 2325 * e1000e_free_tx_resources - Free Tx Resources per Queue 2326 * @tx_ring: Tx descriptor ring 2327 * 2328 * Free all transmit software resources 2329 **/ 2330 void e1000e_free_tx_resources(struct e1000_ring *tx_ring) 2331 { 2332 struct e1000_adapter *adapter = tx_ring->adapter; 2333 struct pci_dev *pdev = adapter->pdev; 2334 2335 e1000_clean_tx_ring(tx_ring); 2336 2337 vfree(tx_ring->buffer_info); 2338 tx_ring->buffer_info = NULL; 2339 2340 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 2341 tx_ring->dma); 2342 tx_ring->desc = NULL; 2343 } 2344 2345 /** 2346 * e1000e_free_rx_resources - Free Rx Resources 2347 * @rx_ring: Rx descriptor ring 2348 * 2349 * Free all receive software resources 2350 **/ 2351 void e1000e_free_rx_resources(struct e1000_ring *rx_ring) 2352 { 2353 struct e1000_adapter *adapter = rx_ring->adapter; 2354 struct pci_dev *pdev = adapter->pdev; 2355 int i; 2356 2357 e1000_clean_rx_ring(rx_ring); 2358 2359 for (i = 0; i < rx_ring->count; i++) 2360 kfree(rx_ring->buffer_info[i].ps_pages); 2361 2362 vfree(rx_ring->buffer_info); 2363 rx_ring->buffer_info = NULL; 2364 2365 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2366 rx_ring->dma); 2367 rx_ring->desc = NULL; 2368 } 2369 2370 /** 2371 * e1000_update_itr - update the dynamic ITR value based on statistics 2372 * @adapter: pointer to adapter 2373 * @itr_setting: current adapter->itr 2374 * @packets: the number of packets during this measurement interval 2375 * @bytes: the number of bytes during this measurement interval 2376 * 2377 * Stores a new ITR value based on packets and byte 2378 * counts during the last interrupt. The advantage of per interrupt 2379 * computation is faster updates and more accurate ITR for the current 2380 * traffic pattern. Constants in this function were computed 2381 * based on theoretical maximum wire speed and thresholds were set based 2382 * on testing data as well as attempting to minimize response time 2383 * while increasing bulk throughput. This functionality is controlled 2384 * by the InterruptThrottleRate module parameter. 2385 **/ 2386 static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2387 u16 itr_setting, int packets, 2388 int bytes) 2389 { 2390 unsigned int retval = itr_setting; 2391 2392 if (packets == 0) 2393 return itr_setting; 2394 2395 switch (itr_setting) { 2396 case lowest_latency: 2397 /* handle TSO and jumbo frames */ 2398 if (bytes/packets > 8000) 2399 retval = bulk_latency; 2400 else if ((packets < 5) && (bytes > 512)) 2401 retval = low_latency; 2402 break; 2403 case low_latency: /* 50 usec aka 20000 ints/s */ 2404 if (bytes > 10000) { 2405 /* this if handles the TSO accounting */ 2406 if (bytes/packets > 8000) 2407 retval = bulk_latency; 2408 else if ((packets < 10) || ((bytes/packets) > 1200)) 2409 retval = bulk_latency; 2410 else if ((packets > 35)) 2411 retval = lowest_latency; 2412 } else if (bytes/packets > 2000) { 2413 retval = bulk_latency; 2414 } else if (packets <= 2 && bytes < 512) { 2415 retval = lowest_latency; 2416 } 2417 break; 2418 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2419 if (bytes > 25000) { 2420 if (packets > 35) 2421 retval = low_latency; 2422 } else if (bytes < 6000) { 2423 retval = low_latency; 2424 } 2425 break; 2426 } 2427 2428 return retval; 2429 } 2430 2431 static void e1000_set_itr(struct e1000_adapter *adapter) 2432 { 2433 struct e1000_hw *hw = &adapter->hw; 2434 u16 current_itr; 2435 u32 new_itr = adapter->itr; 2436 2437 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2438 if (adapter->link_speed != SPEED_1000) { 2439 current_itr = 0; 2440 new_itr = 4000; 2441 goto set_itr_now; 2442 } 2443 2444 if (adapter->flags2 & FLAG2_DISABLE_AIM) { 2445 new_itr = 0; 2446 goto set_itr_now; 2447 } 2448 2449 adapter->tx_itr = e1000_update_itr(adapter, 2450 adapter->tx_itr, 2451 adapter->total_tx_packets, 2452 adapter->total_tx_bytes); 2453 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2454 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 2455 adapter->tx_itr = low_latency; 2456 2457 adapter->rx_itr = e1000_update_itr(adapter, 2458 adapter->rx_itr, 2459 adapter->total_rx_packets, 2460 adapter->total_rx_bytes); 2461 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2462 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 2463 adapter->rx_itr = low_latency; 2464 2465 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2466 2467 switch (current_itr) { 2468 /* counts and packets in update_itr are dependent on these numbers */ 2469 case lowest_latency: 2470 new_itr = 70000; 2471 break; 2472 case low_latency: 2473 new_itr = 20000; /* aka hwitr = ~200 */ 2474 break; 2475 case bulk_latency: 2476 new_itr = 4000; 2477 break; 2478 default: 2479 break; 2480 } 2481 2482 set_itr_now: 2483 if (new_itr != adapter->itr) { 2484 /* 2485 * this attempts to bias the interrupt rate towards Bulk 2486 * by adding intermediate steps when interrupt rate is 2487 * increasing 2488 */ 2489 new_itr = new_itr > adapter->itr ? 2490 min(adapter->itr + (new_itr >> 2), new_itr) : 2491 new_itr; 2492 adapter->itr = new_itr; 2493 adapter->rx_ring->itr_val = new_itr; 2494 if (adapter->msix_entries) 2495 adapter->rx_ring->set_itr = 1; 2496 else 2497 if (new_itr) 2498 ew32(ITR, 1000000000 / (new_itr * 256)); 2499 else 2500 ew32(ITR, 0); 2501 } 2502 } 2503 2504 /** 2505 * e1000_alloc_queues - Allocate memory for all rings 2506 * @adapter: board private structure to initialize 2507 **/ 2508 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) 2509 { 2510 int size = sizeof(struct e1000_ring); 2511 2512 adapter->tx_ring = kzalloc(size, GFP_KERNEL); 2513 if (!adapter->tx_ring) 2514 goto err; 2515 adapter->tx_ring->count = adapter->tx_ring_count; 2516 adapter->tx_ring->adapter = adapter; 2517 2518 adapter->rx_ring = kzalloc(size, GFP_KERNEL); 2519 if (!adapter->rx_ring) 2520 goto err; 2521 adapter->rx_ring->count = adapter->rx_ring_count; 2522 adapter->rx_ring->adapter = adapter; 2523 2524 return 0; 2525 err: 2526 e_err("Unable to allocate memory for queues\n"); 2527 kfree(adapter->rx_ring); 2528 kfree(adapter->tx_ring); 2529 return -ENOMEM; 2530 } 2531 2532 /** 2533 * e1000_clean - NAPI Rx polling callback 2534 * @napi: struct associated with this polling callback 2535 * @budget: amount of packets driver is allowed to process this poll 2536 **/ 2537 static int e1000_clean(struct napi_struct *napi, int budget) 2538 { 2539 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); 2540 struct e1000_hw *hw = &adapter->hw; 2541 struct net_device *poll_dev = adapter->netdev; 2542 int tx_cleaned = 1, work_done = 0; 2543 2544 adapter = netdev_priv(poll_dev); 2545 2546 if (adapter->msix_entries && 2547 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) 2548 goto clean_rx; 2549 2550 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); 2551 2552 clean_rx: 2553 adapter->clean_rx(adapter->rx_ring, &work_done, budget); 2554 2555 if (!tx_cleaned) 2556 work_done = budget; 2557 2558 /* If budget not fully consumed, exit the polling mode */ 2559 if (work_done < budget) { 2560 if (adapter->itr_setting & 3) 2561 e1000_set_itr(adapter); 2562 napi_complete(napi); 2563 if (!test_bit(__E1000_DOWN, &adapter->state)) { 2564 if (adapter->msix_entries) 2565 ew32(IMS, adapter->rx_ring->ims_val); 2566 else 2567 e1000_irq_enable(adapter); 2568 } 2569 } 2570 2571 return work_done; 2572 } 2573 2574 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 2575 { 2576 struct e1000_adapter *adapter = netdev_priv(netdev); 2577 struct e1000_hw *hw = &adapter->hw; 2578 u32 vfta, index; 2579 2580 /* don't update vlan cookie if already programmed */ 2581 if ((adapter->hw.mng_cookie.status & 2582 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2583 (vid == adapter->mng_vlan_id)) 2584 return 0; 2585 2586 /* add VID to filter table */ 2587 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2588 index = (vid >> 5) & 0x7F; 2589 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); 2590 vfta |= (1 << (vid & 0x1F)); 2591 hw->mac.ops.write_vfta(hw, index, vfta); 2592 } 2593 2594 set_bit(vid, adapter->active_vlans); 2595 2596 return 0; 2597 } 2598 2599 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 2600 { 2601 struct e1000_adapter *adapter = netdev_priv(netdev); 2602 struct e1000_hw *hw = &adapter->hw; 2603 u32 vfta, index; 2604 2605 if ((adapter->hw.mng_cookie.status & 2606 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2607 (vid == adapter->mng_vlan_id)) { 2608 /* release control to f/w */ 2609 e1000e_release_hw_control(adapter); 2610 return 0; 2611 } 2612 2613 /* remove VID from filter table */ 2614 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2615 index = (vid >> 5) & 0x7F; 2616 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); 2617 vfta &= ~(1 << (vid & 0x1F)); 2618 hw->mac.ops.write_vfta(hw, index, vfta); 2619 } 2620 2621 clear_bit(vid, adapter->active_vlans); 2622 2623 return 0; 2624 } 2625 2626 /** 2627 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering 2628 * @adapter: board private structure to initialize 2629 **/ 2630 static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter) 2631 { 2632 struct net_device *netdev = adapter->netdev; 2633 struct e1000_hw *hw = &adapter->hw; 2634 u32 rctl; 2635 2636 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2637 /* disable VLAN receive filtering */ 2638 rctl = er32(RCTL); 2639 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN); 2640 ew32(RCTL, rctl); 2641 2642 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { 2643 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 2644 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 2645 } 2646 } 2647 } 2648 2649 /** 2650 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering 2651 * @adapter: board private structure to initialize 2652 **/ 2653 static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter) 2654 { 2655 struct e1000_hw *hw = &adapter->hw; 2656 u32 rctl; 2657 2658 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2659 /* enable VLAN receive filtering */ 2660 rctl = er32(RCTL); 2661 rctl |= E1000_RCTL_VFE; 2662 rctl &= ~E1000_RCTL_CFIEN; 2663 ew32(RCTL, rctl); 2664 } 2665 } 2666 2667 /** 2668 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping 2669 * @adapter: board private structure to initialize 2670 **/ 2671 static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter) 2672 { 2673 struct e1000_hw *hw = &adapter->hw; 2674 u32 ctrl; 2675 2676 /* disable VLAN tag insert/strip */ 2677 ctrl = er32(CTRL); 2678 ctrl &= ~E1000_CTRL_VME; 2679 ew32(CTRL, ctrl); 2680 } 2681 2682 /** 2683 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping 2684 * @adapter: board private structure to initialize 2685 **/ 2686 static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter) 2687 { 2688 struct e1000_hw *hw = &adapter->hw; 2689 u32 ctrl; 2690 2691 /* enable VLAN tag insert/strip */ 2692 ctrl = er32(CTRL); 2693 ctrl |= E1000_CTRL_VME; 2694 ew32(CTRL, ctrl); 2695 } 2696 2697 static void e1000_update_mng_vlan(struct e1000_adapter *adapter) 2698 { 2699 struct net_device *netdev = adapter->netdev; 2700 u16 vid = adapter->hw.mng_cookie.vlan_id; 2701 u16 old_vid = adapter->mng_vlan_id; 2702 2703 if (adapter->hw.mng_cookie.status & 2704 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { 2705 e1000_vlan_rx_add_vid(netdev, vid); 2706 adapter->mng_vlan_id = vid; 2707 } 2708 2709 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) 2710 e1000_vlan_rx_kill_vid(netdev, old_vid); 2711 } 2712 2713 static void e1000_restore_vlan(struct e1000_adapter *adapter) 2714 { 2715 u16 vid; 2716 2717 e1000_vlan_rx_add_vid(adapter->netdev, 0); 2718 2719 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 2720 e1000_vlan_rx_add_vid(adapter->netdev, vid); 2721 } 2722 2723 static void e1000_init_manageability_pt(struct e1000_adapter *adapter) 2724 { 2725 struct e1000_hw *hw = &adapter->hw; 2726 u32 manc, manc2h, mdef, i, j; 2727 2728 if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) 2729 return; 2730 2731 manc = er32(MANC); 2732 2733 /* 2734 * enable receiving management packets to the host. this will probably 2735 * generate destination unreachable messages from the host OS, but 2736 * the packets will be handled on SMBUS 2737 */ 2738 manc |= E1000_MANC_EN_MNG2HOST; 2739 manc2h = er32(MANC2H); 2740 2741 switch (hw->mac.type) { 2742 default: 2743 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664); 2744 break; 2745 case e1000_82574: 2746 case e1000_82583: 2747 /* 2748 * Check if IPMI pass-through decision filter already exists; 2749 * if so, enable it. 2750 */ 2751 for (i = 0, j = 0; i < 8; i++) { 2752 mdef = er32(MDEF(i)); 2753 2754 /* Ignore filters with anything other than IPMI ports */ 2755 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) 2756 continue; 2757 2758 /* Enable this decision filter in MANC2H */ 2759 if (mdef) 2760 manc2h |= (1 << i); 2761 2762 j |= mdef; 2763 } 2764 2765 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) 2766 break; 2767 2768 /* Create new decision filter in an empty filter */ 2769 for (i = 0, j = 0; i < 8; i++) 2770 if (er32(MDEF(i)) == 0) { 2771 ew32(MDEF(i), (E1000_MDEF_PORT_623 | 2772 E1000_MDEF_PORT_664)); 2773 manc2h |= (1 << 1); 2774 j++; 2775 break; 2776 } 2777 2778 if (!j) 2779 e_warn("Unable to create IPMI pass-through filter\n"); 2780 break; 2781 } 2782 2783 ew32(MANC2H, manc2h); 2784 ew32(MANC, manc); 2785 } 2786 2787 /** 2788 * e1000_configure_tx - Configure Transmit Unit after Reset 2789 * @adapter: board private structure 2790 * 2791 * Configure the Tx unit of the MAC after a reset. 2792 **/ 2793 static void e1000_configure_tx(struct e1000_adapter *adapter) 2794 { 2795 struct e1000_hw *hw = &adapter->hw; 2796 struct e1000_ring *tx_ring = adapter->tx_ring; 2797 u64 tdba; 2798 u32 tdlen, tarc; 2799 2800 /* Setup the HW Tx Head and Tail descriptor pointers */ 2801 tdba = tx_ring->dma; 2802 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); 2803 ew32(TDBAL, (tdba & DMA_BIT_MASK(32))); 2804 ew32(TDBAH, (tdba >> 32)); 2805 ew32(TDLEN, tdlen); 2806 ew32(TDH, 0); 2807 ew32(TDT, 0); 2808 tx_ring->head = adapter->hw.hw_addr + E1000_TDH; 2809 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT; 2810 2811 /* Set the Tx Interrupt Delay register */ 2812 ew32(TIDV, adapter->tx_int_delay); 2813 /* Tx irq moderation */ 2814 ew32(TADV, adapter->tx_abs_int_delay); 2815 2816 if (adapter->flags2 & FLAG2_DMA_BURST) { 2817 u32 txdctl = er32(TXDCTL(0)); 2818 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | 2819 E1000_TXDCTL_WTHRESH); 2820 /* 2821 * set up some performance related parameters to encourage the 2822 * hardware to use the bus more efficiently in bursts, depends 2823 * on the tx_int_delay to be enabled, 2824 * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time 2825 * hthresh = 1 ==> prefetch when one or more available 2826 * pthresh = 0x1f ==> prefetch if internal cache 31 or less 2827 * BEWARE: this seems to work but should be considered first if 2828 * there are Tx hangs or other Tx related bugs 2829 */ 2830 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; 2831 ew32(TXDCTL(0), txdctl); 2832 } 2833 /* erratum work around: set txdctl the same for both queues */ 2834 ew32(TXDCTL(1), er32(TXDCTL(0))); 2835 2836 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { 2837 tarc = er32(TARC(0)); 2838 /* 2839 * set the speed mode bit, we'll clear it if we're not at 2840 * gigabit link later 2841 */ 2842 #define SPEED_MODE_BIT (1 << 21) 2843 tarc |= SPEED_MODE_BIT; 2844 ew32(TARC(0), tarc); 2845 } 2846 2847 /* errata: program both queues to unweighted RR */ 2848 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { 2849 tarc = er32(TARC(0)); 2850 tarc |= 1; 2851 ew32(TARC(0), tarc); 2852 tarc = er32(TARC(1)); 2853 tarc |= 1; 2854 ew32(TARC(1), tarc); 2855 } 2856 2857 /* Setup Transmit Descriptor Settings for eop descriptor */ 2858 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; 2859 2860 /* only set IDE if we are delaying interrupts using the timers */ 2861 if (adapter->tx_int_delay) 2862 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 2863 2864 /* enable Report Status bit */ 2865 adapter->txd_cmd |= E1000_TXD_CMD_RS; 2866 2867 hw->mac.ops.config_collision_dist(hw); 2868 } 2869 2870 /** 2871 * e1000_setup_rctl - configure the receive control registers 2872 * @adapter: Board private structure 2873 **/ 2874 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ 2875 (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) 2876 static void e1000_setup_rctl(struct e1000_adapter *adapter) 2877 { 2878 struct e1000_hw *hw = &adapter->hw; 2879 u32 rctl, rfctl; 2880 u32 pages = 0; 2881 2882 /* Workaround Si errata on 82579 - configure jumbo frame flow */ 2883 if (hw->mac.type == e1000_pch2lan) { 2884 s32 ret_val; 2885 2886 if (adapter->netdev->mtu > ETH_DATA_LEN) 2887 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); 2888 else 2889 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); 2890 2891 if (ret_val) 2892 e_dbg("failed to enable jumbo frame workaround mode\n"); 2893 } 2894 2895 /* Program MC offset vector base */ 2896 rctl = er32(RCTL); 2897 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2898 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | 2899 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 2900 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2901 2902 /* Do not Store bad packets */ 2903 rctl &= ~E1000_RCTL_SBP; 2904 2905 /* Enable Long Packet receive */ 2906 if (adapter->netdev->mtu <= ETH_DATA_LEN) 2907 rctl &= ~E1000_RCTL_LPE; 2908 else 2909 rctl |= E1000_RCTL_LPE; 2910 2911 /* Some systems expect that the CRC is included in SMBUS traffic. The 2912 * hardware strips the CRC before sending to both SMBUS (BMC) and to 2913 * host memory when this is enabled 2914 */ 2915 if (adapter->flags2 & FLAG2_CRC_STRIPPING) 2916 rctl |= E1000_RCTL_SECRC; 2917 2918 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ 2919 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { 2920 u16 phy_data; 2921 2922 e1e_rphy(hw, PHY_REG(770, 26), &phy_data); 2923 phy_data &= 0xfff8; 2924 phy_data |= (1 << 2); 2925 e1e_wphy(hw, PHY_REG(770, 26), phy_data); 2926 2927 e1e_rphy(hw, 22, &phy_data); 2928 phy_data &= 0x0fff; 2929 phy_data |= (1 << 14); 2930 e1e_wphy(hw, 0x10, 0x2823); 2931 e1e_wphy(hw, 0x11, 0x0003); 2932 e1e_wphy(hw, 22, phy_data); 2933 } 2934 2935 /* Setup buffer sizes */ 2936 rctl &= ~E1000_RCTL_SZ_4096; 2937 rctl |= E1000_RCTL_BSEX; 2938 switch (adapter->rx_buffer_len) { 2939 case 2048: 2940 default: 2941 rctl |= E1000_RCTL_SZ_2048; 2942 rctl &= ~E1000_RCTL_BSEX; 2943 break; 2944 case 4096: 2945 rctl |= E1000_RCTL_SZ_4096; 2946 break; 2947 case 8192: 2948 rctl |= E1000_RCTL_SZ_8192; 2949 break; 2950 case 16384: 2951 rctl |= E1000_RCTL_SZ_16384; 2952 break; 2953 } 2954 2955 /* Enable Extended Status in all Receive Descriptors */ 2956 rfctl = er32(RFCTL); 2957 rfctl |= E1000_RFCTL_EXTEN; 2958 2959 /* 2960 * 82571 and greater support packet-split where the protocol 2961 * header is placed in skb->data and the packet data is 2962 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. 2963 * In the case of a non-split, skb->data is linearly filled, 2964 * followed by the page buffers. Therefore, skb->data is 2965 * sized to hold the largest protocol header. 2966 * 2967 * allocations using alloc_page take too long for regular MTU 2968 * so only enable packet split for jumbo frames 2969 * 2970 * Using pages when the page size is greater than 16k wastes 2971 * a lot of memory, since we allocate 3 pages at all times 2972 * per packet. 2973 */ 2974 pages = PAGE_USE_COUNT(adapter->netdev->mtu); 2975 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) 2976 adapter->rx_ps_pages = pages; 2977 else 2978 adapter->rx_ps_pages = 0; 2979 2980 if (adapter->rx_ps_pages) { 2981 u32 psrctl = 0; 2982 2983 /* 2984 * disable packet split support for IPv6 extension headers, 2985 * because some malformed IPv6 headers can hang the Rx 2986 */ 2987 rfctl |= (E1000_RFCTL_IPV6_EX_DIS | 2988 E1000_RFCTL_NEW_IPV6_EXT_DIS); 2989 2990 /* Enable Packet split descriptors */ 2991 rctl |= E1000_RCTL_DTYP_PS; 2992 2993 psrctl |= adapter->rx_ps_bsize0 >> 2994 E1000_PSRCTL_BSIZE0_SHIFT; 2995 2996 switch (adapter->rx_ps_pages) { 2997 case 3: 2998 psrctl |= PAGE_SIZE << 2999 E1000_PSRCTL_BSIZE3_SHIFT; 3000 case 2: 3001 psrctl |= PAGE_SIZE << 3002 E1000_PSRCTL_BSIZE2_SHIFT; 3003 case 1: 3004 psrctl |= PAGE_SIZE >> 3005 E1000_PSRCTL_BSIZE1_SHIFT; 3006 break; 3007 } 3008 3009 ew32(PSRCTL, psrctl); 3010 } 3011 3012 /* This is useful for sniffing bad packets. */ 3013 if (adapter->netdev->features & NETIF_F_RXALL) { 3014 /* UPE and MPE will be handled by normal PROMISC logic 3015 * in e1000e_set_rx_mode */ 3016 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ 3017 E1000_RCTL_BAM | /* RX All Bcast Pkts */ 3018 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 3019 3020 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ 3021 E1000_RCTL_DPF | /* Allow filtered pause */ 3022 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ 3023 /* Do not mess with E1000_CTRL_VME, it affects transmit as well, 3024 * and that breaks VLANs. 3025 */ 3026 } 3027 3028 ew32(RFCTL, rfctl); 3029 ew32(RCTL, rctl); 3030 /* just started the receive unit, no need to restart */ 3031 adapter->flags &= ~FLAG_RX_RESTART_NOW; 3032 } 3033 3034 /** 3035 * e1000_configure_rx - Configure Receive Unit after Reset 3036 * @adapter: board private structure 3037 * 3038 * Configure the Rx unit of the MAC after a reset. 3039 **/ 3040 static void e1000_configure_rx(struct e1000_adapter *adapter) 3041 { 3042 struct e1000_hw *hw = &adapter->hw; 3043 struct e1000_ring *rx_ring = adapter->rx_ring; 3044 u64 rdba; 3045 u32 rdlen, rctl, rxcsum, ctrl_ext; 3046 3047 if (adapter->rx_ps_pages) { 3048 /* this is a 32 byte descriptor */ 3049 rdlen = rx_ring->count * 3050 sizeof(union e1000_rx_desc_packet_split); 3051 adapter->clean_rx = e1000_clean_rx_irq_ps; 3052 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 3053 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { 3054 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); 3055 adapter->clean_rx = e1000_clean_jumbo_rx_irq; 3056 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 3057 } else { 3058 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); 3059 adapter->clean_rx = e1000_clean_rx_irq; 3060 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 3061 } 3062 3063 /* disable receives while setting up the descriptors */ 3064 rctl = er32(RCTL); 3065 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) 3066 ew32(RCTL, rctl & ~E1000_RCTL_EN); 3067 e1e_flush(); 3068 usleep_range(10000, 20000); 3069 3070 if (adapter->flags2 & FLAG2_DMA_BURST) { 3071 /* 3072 * set the writeback threshold (only takes effect if the RDTR 3073 * is set). set GRAN=1 and write back up to 0x4 worth, and 3074 * enable prefetching of 0x20 Rx descriptors 3075 * granularity = 01 3076 * wthresh = 04, 3077 * hthresh = 04, 3078 * pthresh = 0x20 3079 */ 3080 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); 3081 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); 3082 3083 /* 3084 * override the delay timers for enabling bursting, only if 3085 * the value was not set by the user via module options 3086 */ 3087 if (adapter->rx_int_delay == DEFAULT_RDTR) 3088 adapter->rx_int_delay = BURST_RDTR; 3089 if (adapter->rx_abs_int_delay == DEFAULT_RADV) 3090 adapter->rx_abs_int_delay = BURST_RADV; 3091 } 3092 3093 /* set the Receive Delay Timer Register */ 3094 ew32(RDTR, adapter->rx_int_delay); 3095 3096 /* irq moderation */ 3097 ew32(RADV, adapter->rx_abs_int_delay); 3098 if ((adapter->itr_setting != 0) && (adapter->itr != 0)) 3099 ew32(ITR, 1000000000 / (adapter->itr * 256)); 3100 3101 ctrl_ext = er32(CTRL_EXT); 3102 /* Auto-Mask interrupts upon ICR access */ 3103 ctrl_ext |= E1000_CTRL_EXT_IAME; 3104 ew32(IAM, 0xffffffff); 3105 ew32(CTRL_EXT, ctrl_ext); 3106 e1e_flush(); 3107 3108 /* 3109 * Setup the HW Rx Head and Tail Descriptor Pointers and 3110 * the Base and Length of the Rx Descriptor Ring 3111 */ 3112 rdba = rx_ring->dma; 3113 ew32(RDBAL, (rdba & DMA_BIT_MASK(32))); 3114 ew32(RDBAH, (rdba >> 32)); 3115 ew32(RDLEN, rdlen); 3116 ew32(RDH, 0); 3117 ew32(RDT, 0); 3118 rx_ring->head = adapter->hw.hw_addr + E1000_RDH; 3119 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT; 3120 3121 /* Enable Receive Checksum Offload for TCP and UDP */ 3122 rxcsum = er32(RXCSUM); 3123 if (adapter->netdev->features & NETIF_F_RXCSUM) { 3124 rxcsum |= E1000_RXCSUM_TUOFL; 3125 3126 /* 3127 * IPv4 payload checksum for UDP fragments must be 3128 * used in conjunction with packet-split. 3129 */ 3130 if (adapter->rx_ps_pages) 3131 rxcsum |= E1000_RXCSUM_IPPCSE; 3132 } else { 3133 rxcsum &= ~E1000_RXCSUM_TUOFL; 3134 /* no need to clear IPPCSE as it defaults to 0 */ 3135 } 3136 ew32(RXCSUM, rxcsum); 3137 3138 if (adapter->hw.mac.type == e1000_pch2lan) { 3139 /* 3140 * With jumbo frames, excessive C-state transition 3141 * latencies result in dropped transactions. 3142 */ 3143 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3144 u32 rxdctl = er32(RXDCTL(0)); 3145 ew32(RXDCTL(0), rxdctl | 0x3); 3146 pm_qos_update_request(&adapter->netdev->pm_qos_req, 55); 3147 } else { 3148 pm_qos_update_request(&adapter->netdev->pm_qos_req, 3149 PM_QOS_DEFAULT_VALUE); 3150 } 3151 } 3152 3153 /* Enable Receives */ 3154 ew32(RCTL, rctl); 3155 } 3156 3157 /** 3158 * e1000e_write_mc_addr_list - write multicast addresses to MTA 3159 * @netdev: network interface device structure 3160 * 3161 * Writes multicast address list to the MTA hash table. 3162 * Returns: -ENOMEM on failure 3163 * 0 on no addresses written 3164 * X on writing X addresses to MTA 3165 */ 3166 static int e1000e_write_mc_addr_list(struct net_device *netdev) 3167 { 3168 struct e1000_adapter *adapter = netdev_priv(netdev); 3169 struct e1000_hw *hw = &adapter->hw; 3170 struct netdev_hw_addr *ha; 3171 u8 *mta_list; 3172 int i; 3173 3174 if (netdev_mc_empty(netdev)) { 3175 /* nothing to program, so clear mc list */ 3176 hw->mac.ops.update_mc_addr_list(hw, NULL, 0); 3177 return 0; 3178 } 3179 3180 mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC); 3181 if (!mta_list) 3182 return -ENOMEM; 3183 3184 /* update_mc_addr_list expects a packed array of only addresses. */ 3185 i = 0; 3186 netdev_for_each_mc_addr(ha, netdev) 3187 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 3188 3189 hw->mac.ops.update_mc_addr_list(hw, mta_list, i); 3190 kfree(mta_list); 3191 3192 return netdev_mc_count(netdev); 3193 } 3194 3195 /** 3196 * e1000e_write_uc_addr_list - write unicast addresses to RAR table 3197 * @netdev: network interface device structure 3198 * 3199 * Writes unicast address list to the RAR table. 3200 * Returns: -ENOMEM on failure/insufficient address space 3201 * 0 on no addresses written 3202 * X on writing X addresses to the RAR table 3203 **/ 3204 static int e1000e_write_uc_addr_list(struct net_device *netdev) 3205 { 3206 struct e1000_adapter *adapter = netdev_priv(netdev); 3207 struct e1000_hw *hw = &adapter->hw; 3208 unsigned int rar_entries = hw->mac.rar_entry_count; 3209 int count = 0; 3210 3211 /* save a rar entry for our hardware address */ 3212 rar_entries--; 3213 3214 /* save a rar entry for the LAA workaround */ 3215 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) 3216 rar_entries--; 3217 3218 /* return ENOMEM indicating insufficient memory for addresses */ 3219 if (netdev_uc_count(netdev) > rar_entries) 3220 return -ENOMEM; 3221 3222 if (!netdev_uc_empty(netdev) && rar_entries) { 3223 struct netdev_hw_addr *ha; 3224 3225 /* 3226 * write the addresses in reverse order to avoid write 3227 * combining 3228 */ 3229 netdev_for_each_uc_addr(ha, netdev) { 3230 if (!rar_entries) 3231 break; 3232 e1000e_rar_set(hw, ha->addr, rar_entries--); 3233 count++; 3234 } 3235 } 3236 3237 /* zero out the remaining RAR entries not used above */ 3238 for (; rar_entries > 0; rar_entries--) { 3239 ew32(RAH(rar_entries), 0); 3240 ew32(RAL(rar_entries), 0); 3241 } 3242 e1e_flush(); 3243 3244 return count; 3245 } 3246 3247 /** 3248 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set 3249 * @netdev: network interface device structure 3250 * 3251 * The ndo_set_rx_mode entry point is called whenever the unicast or multicast 3252 * address list or the network interface flags are updated. This routine is 3253 * responsible for configuring the hardware for proper unicast, multicast, 3254 * promiscuous mode, and all-multi behavior. 3255 **/ 3256 static void e1000e_set_rx_mode(struct net_device *netdev) 3257 { 3258 struct e1000_adapter *adapter = netdev_priv(netdev); 3259 struct e1000_hw *hw = &adapter->hw; 3260 u32 rctl; 3261 3262 /* Check for Promiscuous and All Multicast modes */ 3263 rctl = er32(RCTL); 3264 3265 /* clear the affected bits */ 3266 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); 3267 3268 if (netdev->flags & IFF_PROMISC) { 3269 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 3270 /* Do not hardware filter VLANs in promisc mode */ 3271 e1000e_vlan_filter_disable(adapter); 3272 } else { 3273 int count; 3274 3275 if (netdev->flags & IFF_ALLMULTI) { 3276 rctl |= E1000_RCTL_MPE; 3277 } else { 3278 /* 3279 * Write addresses to the MTA, if the attempt fails 3280 * then we should just turn on promiscuous mode so 3281 * that we can at least receive multicast traffic 3282 */ 3283 count = e1000e_write_mc_addr_list(netdev); 3284 if (count < 0) 3285 rctl |= E1000_RCTL_MPE; 3286 } 3287 e1000e_vlan_filter_enable(adapter); 3288 /* 3289 * Write addresses to available RAR registers, if there is not 3290 * sufficient space to store all the addresses then enable 3291 * unicast promiscuous mode 3292 */ 3293 count = e1000e_write_uc_addr_list(netdev); 3294 if (count < 0) 3295 rctl |= E1000_RCTL_UPE; 3296 } 3297 3298 ew32(RCTL, rctl); 3299 3300 if (netdev->features & NETIF_F_HW_VLAN_RX) 3301 e1000e_vlan_strip_enable(adapter); 3302 else 3303 e1000e_vlan_strip_disable(adapter); 3304 } 3305 3306 static void e1000e_setup_rss_hash(struct e1000_adapter *adapter) 3307 { 3308 struct e1000_hw *hw = &adapter->hw; 3309 u32 mrqc, rxcsum; 3310 int i; 3311 static const u32 rsskey[10] = { 3312 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0, 3313 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe 3314 }; 3315 3316 /* Fill out hash function seed */ 3317 for (i = 0; i < 10; i++) 3318 ew32(RSSRK(i), rsskey[i]); 3319 3320 /* Direct all traffic to queue 0 */ 3321 for (i = 0; i < 32; i++) 3322 ew32(RETA(i), 0); 3323 3324 /* 3325 * Disable raw packet checksumming so that RSS hash is placed in 3326 * descriptor on writeback. 3327 */ 3328 rxcsum = er32(RXCSUM); 3329 rxcsum |= E1000_RXCSUM_PCSD; 3330 3331 ew32(RXCSUM, rxcsum); 3332 3333 mrqc = (E1000_MRQC_RSS_FIELD_IPV4 | 3334 E1000_MRQC_RSS_FIELD_IPV4_TCP | 3335 E1000_MRQC_RSS_FIELD_IPV6 | 3336 E1000_MRQC_RSS_FIELD_IPV6_TCP | 3337 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); 3338 3339 ew32(MRQC, mrqc); 3340 } 3341 3342 /** 3343 * e1000_configure - configure the hardware for Rx and Tx 3344 * @adapter: private board structure 3345 **/ 3346 static void e1000_configure(struct e1000_adapter *adapter) 3347 { 3348 struct e1000_ring *rx_ring = adapter->rx_ring; 3349 3350 e1000e_set_rx_mode(adapter->netdev); 3351 3352 e1000_restore_vlan(adapter); 3353 e1000_init_manageability_pt(adapter); 3354 3355 e1000_configure_tx(adapter); 3356 3357 if (adapter->netdev->features & NETIF_F_RXHASH) 3358 e1000e_setup_rss_hash(adapter); 3359 e1000_setup_rctl(adapter); 3360 e1000_configure_rx(adapter); 3361 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); 3362 } 3363 3364 /** 3365 * e1000e_power_up_phy - restore link in case the phy was powered down 3366 * @adapter: address of board private structure 3367 * 3368 * The phy may be powered down to save power and turn off link when the 3369 * driver is unloaded and wake on lan is not enabled (among others) 3370 * *** this routine MUST be followed by a call to e1000e_reset *** 3371 **/ 3372 void e1000e_power_up_phy(struct e1000_adapter *adapter) 3373 { 3374 if (adapter->hw.phy.ops.power_up) 3375 adapter->hw.phy.ops.power_up(&adapter->hw); 3376 3377 adapter->hw.mac.ops.setup_link(&adapter->hw); 3378 } 3379 3380 /** 3381 * e1000_power_down_phy - Power down the PHY 3382 * 3383 * Power down the PHY so no link is implied when interface is down. 3384 * The PHY cannot be powered down if management or WoL is active. 3385 */ 3386 static void e1000_power_down_phy(struct e1000_adapter *adapter) 3387 { 3388 /* WoL is enabled */ 3389 if (adapter->wol) 3390 return; 3391 3392 if (adapter->hw.phy.ops.power_down) 3393 adapter->hw.phy.ops.power_down(&adapter->hw); 3394 } 3395 3396 /** 3397 * e1000e_reset - bring the hardware into a known good state 3398 * 3399 * This function boots the hardware and enables some settings that 3400 * require a configuration cycle of the hardware - those cannot be 3401 * set/changed during runtime. After reset the device needs to be 3402 * properly configured for Rx, Tx etc. 3403 */ 3404 void e1000e_reset(struct e1000_adapter *adapter) 3405 { 3406 struct e1000_mac_info *mac = &adapter->hw.mac; 3407 struct e1000_fc_info *fc = &adapter->hw.fc; 3408 struct e1000_hw *hw = &adapter->hw; 3409 u32 tx_space, min_tx_space, min_rx_space; 3410 u32 pba = adapter->pba; 3411 u16 hwm; 3412 3413 /* reset Packet Buffer Allocation to default */ 3414 ew32(PBA, pba); 3415 3416 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { 3417 /* 3418 * To maintain wire speed transmits, the Tx FIFO should be 3419 * large enough to accommodate two full transmit packets, 3420 * rounded up to the next 1KB and expressed in KB. Likewise, 3421 * the Rx FIFO should be large enough to accommodate at least 3422 * one full receive packet and is similarly rounded up and 3423 * expressed in KB. 3424 */ 3425 pba = er32(PBA); 3426 /* upper 16 bits has Tx packet buffer allocation size in KB */ 3427 tx_space = pba >> 16; 3428 /* lower 16 bits has Rx packet buffer allocation size in KB */ 3429 pba &= 0xffff; 3430 /* 3431 * the Tx fifo also stores 16 bytes of information about the Tx 3432 * but don't include ethernet FCS because hardware appends it 3433 */ 3434 min_tx_space = (adapter->max_frame_size + 3435 sizeof(struct e1000_tx_desc) - 3436 ETH_FCS_LEN) * 2; 3437 min_tx_space = ALIGN(min_tx_space, 1024); 3438 min_tx_space >>= 10; 3439 /* software strips receive CRC, so leave room for it */ 3440 min_rx_space = adapter->max_frame_size; 3441 min_rx_space = ALIGN(min_rx_space, 1024); 3442 min_rx_space >>= 10; 3443 3444 /* 3445 * If current Tx allocation is less than the min Tx FIFO size, 3446 * and the min Tx FIFO size is less than the current Rx FIFO 3447 * allocation, take space away from current Rx allocation 3448 */ 3449 if ((tx_space < min_tx_space) && 3450 ((min_tx_space - tx_space) < pba)) { 3451 pba -= min_tx_space - tx_space; 3452 3453 /* 3454 * if short on Rx space, Rx wins and must trump Tx 3455 * adjustment or use Early Receive if available 3456 */ 3457 if (pba < min_rx_space) 3458 pba = min_rx_space; 3459 } 3460 3461 ew32(PBA, pba); 3462 } 3463 3464 /* 3465 * flow control settings 3466 * 3467 * The high water mark must be low enough to fit one full frame 3468 * (or the size used for early receive) above it in the Rx FIFO. 3469 * Set it to the lower of: 3470 * - 90% of the Rx FIFO size, and 3471 * - the full Rx FIFO size minus one full frame 3472 */ 3473 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) 3474 fc->pause_time = 0xFFFF; 3475 else 3476 fc->pause_time = E1000_FC_PAUSE_TIME; 3477 fc->send_xon = true; 3478 fc->current_mode = fc->requested_mode; 3479 3480 switch (hw->mac.type) { 3481 case e1000_ich9lan: 3482 case e1000_ich10lan: 3483 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3484 pba = 14; 3485 ew32(PBA, pba); 3486 fc->high_water = 0x2800; 3487 fc->low_water = fc->high_water - 8; 3488 break; 3489 } 3490 /* fall-through */ 3491 default: 3492 hwm = min(((pba << 10) * 9 / 10), 3493 ((pba << 10) - adapter->max_frame_size)); 3494 3495 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ 3496 fc->low_water = fc->high_water - 8; 3497 break; 3498 case e1000_pchlan: 3499 /* 3500 * Workaround PCH LOM adapter hangs with certain network 3501 * loads. If hangs persist, try disabling Tx flow control. 3502 */ 3503 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3504 fc->high_water = 0x3500; 3505 fc->low_water = 0x1500; 3506 } else { 3507 fc->high_water = 0x5000; 3508 fc->low_water = 0x3000; 3509 } 3510 fc->refresh_time = 0x1000; 3511 break; 3512 case e1000_pch2lan: 3513 fc->high_water = 0x05C20; 3514 fc->low_water = 0x05048; 3515 fc->pause_time = 0x0650; 3516 fc->refresh_time = 0x0400; 3517 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3518 pba = 14; 3519 ew32(PBA, pba); 3520 } 3521 break; 3522 } 3523 3524 /* 3525 * Disable Adaptive Interrupt Moderation if 2 full packets cannot 3526 * fit in receive buffer. 3527 */ 3528 if (adapter->itr_setting & 0x3) { 3529 if ((adapter->max_frame_size * 2) > (pba << 10)) { 3530 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { 3531 dev_info(&adapter->pdev->dev, 3532 "Interrupt Throttle Rate turned off\n"); 3533 adapter->flags2 |= FLAG2_DISABLE_AIM; 3534 ew32(ITR, 0); 3535 } 3536 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { 3537 dev_info(&adapter->pdev->dev, 3538 "Interrupt Throttle Rate turned on\n"); 3539 adapter->flags2 &= ~FLAG2_DISABLE_AIM; 3540 adapter->itr = 20000; 3541 ew32(ITR, 1000000000 / (adapter->itr * 256)); 3542 } 3543 } 3544 3545 /* Allow time for pending master requests to run */ 3546 mac->ops.reset_hw(hw); 3547 3548 /* 3549 * For parts with AMT enabled, let the firmware know 3550 * that the network interface is in control 3551 */ 3552 if (adapter->flags & FLAG_HAS_AMT) 3553 e1000e_get_hw_control(adapter); 3554 3555 ew32(WUC, 0); 3556 3557 if (mac->ops.init_hw(hw)) 3558 e_err("Hardware Error\n"); 3559 3560 e1000_update_mng_vlan(adapter); 3561 3562 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 3563 ew32(VET, ETH_P_8021Q); 3564 3565 e1000e_reset_adaptive(hw); 3566 3567 if (!netif_running(adapter->netdev) && 3568 !test_bit(__E1000_TESTING, &adapter->state)) { 3569 e1000_power_down_phy(adapter); 3570 return; 3571 } 3572 3573 e1000_get_phy_info(hw); 3574 3575 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && 3576 !(adapter->flags & FLAG_SMART_POWER_DOWN)) { 3577 u16 phy_data = 0; 3578 /* 3579 * speed up time to link by disabling smart power down, ignore 3580 * the return value of this function because there is nothing 3581 * different we would do if it failed 3582 */ 3583 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); 3584 phy_data &= ~IGP02E1000_PM_SPD; 3585 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); 3586 } 3587 } 3588 3589 int e1000e_up(struct e1000_adapter *adapter) 3590 { 3591 struct e1000_hw *hw = &adapter->hw; 3592 3593 /* hardware has been reset, we need to reload some things */ 3594 e1000_configure(adapter); 3595 3596 clear_bit(__E1000_DOWN, &adapter->state); 3597 3598 if (adapter->msix_entries) 3599 e1000_configure_msix(adapter); 3600 e1000_irq_enable(adapter); 3601 3602 netif_start_queue(adapter->netdev); 3603 3604 /* fire a link change interrupt to start the watchdog */ 3605 if (adapter->msix_entries) 3606 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); 3607 else 3608 ew32(ICS, E1000_ICS_LSC); 3609 3610 return 0; 3611 } 3612 3613 static void e1000e_flush_descriptors(struct e1000_adapter *adapter) 3614 { 3615 struct e1000_hw *hw = &adapter->hw; 3616 3617 if (!(adapter->flags2 & FLAG2_DMA_BURST)) 3618 return; 3619 3620 /* flush pending descriptor writebacks to memory */ 3621 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 3622 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); 3623 3624 /* execute the writes immediately */ 3625 e1e_flush(); 3626 3627 /* 3628 * due to rare timing issues, write to TIDV/RDTR again to ensure the 3629 * write is successful 3630 */ 3631 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 3632 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); 3633 3634 /* execute the writes immediately */ 3635 e1e_flush(); 3636 } 3637 3638 static void e1000e_update_stats(struct e1000_adapter *adapter); 3639 3640 void e1000e_down(struct e1000_adapter *adapter) 3641 { 3642 struct net_device *netdev = adapter->netdev; 3643 struct e1000_hw *hw = &adapter->hw; 3644 u32 tctl, rctl; 3645 3646 /* 3647 * signal that we're down so the interrupt handler does not 3648 * reschedule our watchdog timer 3649 */ 3650 set_bit(__E1000_DOWN, &adapter->state); 3651 3652 /* disable receives in the hardware */ 3653 rctl = er32(RCTL); 3654 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) 3655 ew32(RCTL, rctl & ~E1000_RCTL_EN); 3656 /* flush and sleep below */ 3657 3658 netif_stop_queue(netdev); 3659 3660 /* disable transmits in the hardware */ 3661 tctl = er32(TCTL); 3662 tctl &= ~E1000_TCTL_EN; 3663 ew32(TCTL, tctl); 3664 3665 /* flush both disables and wait for them to finish */ 3666 e1e_flush(); 3667 usleep_range(10000, 20000); 3668 3669 e1000_irq_disable(adapter); 3670 3671 del_timer_sync(&adapter->watchdog_timer); 3672 del_timer_sync(&adapter->phy_info_timer); 3673 3674 netif_carrier_off(netdev); 3675 3676 spin_lock(&adapter->stats64_lock); 3677 e1000e_update_stats(adapter); 3678 spin_unlock(&adapter->stats64_lock); 3679 3680 e1000e_flush_descriptors(adapter); 3681 e1000_clean_tx_ring(adapter->tx_ring); 3682 e1000_clean_rx_ring(adapter->rx_ring); 3683 3684 adapter->link_speed = 0; 3685 adapter->link_duplex = 0; 3686 3687 if (!pci_channel_offline(adapter->pdev)) 3688 e1000e_reset(adapter); 3689 3690 /* 3691 * TODO: for power management, we could drop the link and 3692 * pci_disable_device here. 3693 */ 3694 } 3695 3696 void e1000e_reinit_locked(struct e1000_adapter *adapter) 3697 { 3698 might_sleep(); 3699 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 3700 usleep_range(1000, 2000); 3701 e1000e_down(adapter); 3702 e1000e_up(adapter); 3703 clear_bit(__E1000_RESETTING, &adapter->state); 3704 } 3705 3706 /** 3707 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) 3708 * @adapter: board private structure to initialize 3709 * 3710 * e1000_sw_init initializes the Adapter private data structure. 3711 * Fields are initialized based on PCI device information and 3712 * OS network device settings (MTU size). 3713 **/ 3714 static int __devinit e1000_sw_init(struct e1000_adapter *adapter) 3715 { 3716 struct net_device *netdev = adapter->netdev; 3717 3718 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; 3719 adapter->rx_ps_bsize0 = 128; 3720 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 3721 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3722 adapter->tx_ring_count = E1000_DEFAULT_TXD; 3723 adapter->rx_ring_count = E1000_DEFAULT_RXD; 3724 3725 spin_lock_init(&adapter->stats64_lock); 3726 3727 e1000e_set_interrupt_capability(adapter); 3728 3729 if (e1000_alloc_queues(adapter)) 3730 return -ENOMEM; 3731 3732 /* Explicitly disable IRQ since the NIC can be in any state. */ 3733 e1000_irq_disable(adapter); 3734 3735 set_bit(__E1000_DOWN, &adapter->state); 3736 return 0; 3737 } 3738 3739 /** 3740 * e1000_intr_msi_test - Interrupt Handler 3741 * @irq: interrupt number 3742 * @data: pointer to a network interface device structure 3743 **/ 3744 static irqreturn_t e1000_intr_msi_test(int irq, void *data) 3745 { 3746 struct net_device *netdev = data; 3747 struct e1000_adapter *adapter = netdev_priv(netdev); 3748 struct e1000_hw *hw = &adapter->hw; 3749 u32 icr = er32(ICR); 3750 3751 e_dbg("icr is %08X\n", icr); 3752 if (icr & E1000_ICR_RXSEQ) { 3753 adapter->flags &= ~FLAG_MSI_TEST_FAILED; 3754 wmb(); 3755 } 3756 3757 return IRQ_HANDLED; 3758 } 3759 3760 /** 3761 * e1000_test_msi_interrupt - Returns 0 for successful test 3762 * @adapter: board private struct 3763 * 3764 * code flow taken from tg3.c 3765 **/ 3766 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) 3767 { 3768 struct net_device *netdev = adapter->netdev; 3769 struct e1000_hw *hw = &adapter->hw; 3770 int err; 3771 3772 /* poll_enable hasn't been called yet, so don't need disable */ 3773 /* clear any pending events */ 3774 er32(ICR); 3775 3776 /* free the real vector and request a test handler */ 3777 e1000_free_irq(adapter); 3778 e1000e_reset_interrupt_capability(adapter); 3779 3780 /* Assume that the test fails, if it succeeds then the test 3781 * MSI irq handler will unset this flag */ 3782 adapter->flags |= FLAG_MSI_TEST_FAILED; 3783 3784 err = pci_enable_msi(adapter->pdev); 3785 if (err) 3786 goto msi_test_failed; 3787 3788 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, 3789 netdev->name, netdev); 3790 if (err) { 3791 pci_disable_msi(adapter->pdev); 3792 goto msi_test_failed; 3793 } 3794 3795 wmb(); 3796 3797 e1000_irq_enable(adapter); 3798 3799 /* fire an unusual interrupt on the test handler */ 3800 ew32(ICS, E1000_ICS_RXSEQ); 3801 e1e_flush(); 3802 msleep(100); 3803 3804 e1000_irq_disable(adapter); 3805 3806 rmb(); 3807 3808 if (adapter->flags & FLAG_MSI_TEST_FAILED) { 3809 adapter->int_mode = E1000E_INT_MODE_LEGACY; 3810 e_info("MSI interrupt test failed, using legacy interrupt.\n"); 3811 } else { 3812 e_dbg("MSI interrupt test succeeded!\n"); 3813 } 3814 3815 free_irq(adapter->pdev->irq, netdev); 3816 pci_disable_msi(adapter->pdev); 3817 3818 msi_test_failed: 3819 e1000e_set_interrupt_capability(adapter); 3820 return e1000_request_irq(adapter); 3821 } 3822 3823 /** 3824 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored 3825 * @adapter: board private struct 3826 * 3827 * code flow taken from tg3.c, called with e1000 interrupts disabled. 3828 **/ 3829 static int e1000_test_msi(struct e1000_adapter *adapter) 3830 { 3831 int err; 3832 u16 pci_cmd; 3833 3834 if (!(adapter->flags & FLAG_MSI_ENABLED)) 3835 return 0; 3836 3837 /* disable SERR in case the MSI write causes a master abort */ 3838 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); 3839 if (pci_cmd & PCI_COMMAND_SERR) 3840 pci_write_config_word(adapter->pdev, PCI_COMMAND, 3841 pci_cmd & ~PCI_COMMAND_SERR); 3842 3843 err = e1000_test_msi_interrupt(adapter); 3844 3845 /* re-enable SERR */ 3846 if (pci_cmd & PCI_COMMAND_SERR) { 3847 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); 3848 pci_cmd |= PCI_COMMAND_SERR; 3849 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); 3850 } 3851 3852 return err; 3853 } 3854 3855 /** 3856 * e1000_open - Called when a network interface is made active 3857 * @netdev: network interface device structure 3858 * 3859 * Returns 0 on success, negative value on failure 3860 * 3861 * The open entry point is called when a network interface is made 3862 * active by the system (IFF_UP). At this point all resources needed 3863 * for transmit and receive operations are allocated, the interrupt 3864 * handler is registered with the OS, the watchdog timer is started, 3865 * and the stack is notified that the interface is ready. 3866 **/ 3867 static int e1000_open(struct net_device *netdev) 3868 { 3869 struct e1000_adapter *adapter = netdev_priv(netdev); 3870 struct e1000_hw *hw = &adapter->hw; 3871 struct pci_dev *pdev = adapter->pdev; 3872 int err; 3873 3874 /* disallow open during test */ 3875 if (test_bit(__E1000_TESTING, &adapter->state)) 3876 return -EBUSY; 3877 3878 pm_runtime_get_sync(&pdev->dev); 3879 3880 netif_carrier_off(netdev); 3881 3882 /* allocate transmit descriptors */ 3883 err = e1000e_setup_tx_resources(adapter->tx_ring); 3884 if (err) 3885 goto err_setup_tx; 3886 3887 /* allocate receive descriptors */ 3888 err = e1000e_setup_rx_resources(adapter->rx_ring); 3889 if (err) 3890 goto err_setup_rx; 3891 3892 /* 3893 * If AMT is enabled, let the firmware know that the network 3894 * interface is now open and reset the part to a known state. 3895 */ 3896 if (adapter->flags & FLAG_HAS_AMT) { 3897 e1000e_get_hw_control(adapter); 3898 e1000e_reset(adapter); 3899 } 3900 3901 e1000e_power_up_phy(adapter); 3902 3903 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 3904 if ((adapter->hw.mng_cookie.status & 3905 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) 3906 e1000_update_mng_vlan(adapter); 3907 3908 /* DMA latency requirement to workaround jumbo issue */ 3909 if (adapter->hw.mac.type == e1000_pch2lan) 3910 pm_qos_add_request(&adapter->netdev->pm_qos_req, 3911 PM_QOS_CPU_DMA_LATENCY, 3912 PM_QOS_DEFAULT_VALUE); 3913 3914 /* 3915 * before we allocate an interrupt, we must be ready to handle it. 3916 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 3917 * as soon as we call pci_request_irq, so we have to setup our 3918 * clean_rx handler before we do so. 3919 */ 3920 e1000_configure(adapter); 3921 3922 err = e1000_request_irq(adapter); 3923 if (err) 3924 goto err_req_irq; 3925 3926 /* 3927 * Work around PCIe errata with MSI interrupts causing some chipsets to 3928 * ignore e1000e MSI messages, which means we need to test our MSI 3929 * interrupt now 3930 */ 3931 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { 3932 err = e1000_test_msi(adapter); 3933 if (err) { 3934 e_err("Interrupt allocation failed\n"); 3935 goto err_req_irq; 3936 } 3937 } 3938 3939 /* From here on the code is the same as e1000e_up() */ 3940 clear_bit(__E1000_DOWN, &adapter->state); 3941 3942 napi_enable(&adapter->napi); 3943 3944 e1000_irq_enable(adapter); 3945 3946 adapter->tx_hang_recheck = false; 3947 netif_start_queue(netdev); 3948 3949 adapter->idle_check = true; 3950 pm_runtime_put(&pdev->dev); 3951 3952 /* fire a link status change interrupt to start the watchdog */ 3953 if (adapter->msix_entries) 3954 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); 3955 else 3956 ew32(ICS, E1000_ICS_LSC); 3957 3958 return 0; 3959 3960 err_req_irq: 3961 e1000e_release_hw_control(adapter); 3962 e1000_power_down_phy(adapter); 3963 e1000e_free_rx_resources(adapter->rx_ring); 3964 err_setup_rx: 3965 e1000e_free_tx_resources(adapter->tx_ring); 3966 err_setup_tx: 3967 e1000e_reset(adapter); 3968 pm_runtime_put_sync(&pdev->dev); 3969 3970 return err; 3971 } 3972 3973 /** 3974 * e1000_close - Disables a network interface 3975 * @netdev: network interface device structure 3976 * 3977 * Returns 0, this is not allowed to fail 3978 * 3979 * The close entry point is called when an interface is de-activated 3980 * by the OS. The hardware is still under the drivers control, but 3981 * needs to be disabled. A global MAC reset is issued to stop the 3982 * hardware, and all transmit and receive resources are freed. 3983 **/ 3984 static int e1000_close(struct net_device *netdev) 3985 { 3986 struct e1000_adapter *adapter = netdev_priv(netdev); 3987 struct pci_dev *pdev = adapter->pdev; 3988 int count = E1000_CHECK_RESET_COUNT; 3989 3990 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) 3991 usleep_range(10000, 20000); 3992 3993 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 3994 3995 pm_runtime_get_sync(&pdev->dev); 3996 3997 napi_disable(&adapter->napi); 3998 3999 if (!test_bit(__E1000_DOWN, &adapter->state)) { 4000 e1000e_down(adapter); 4001 e1000_free_irq(adapter); 4002 } 4003 e1000_power_down_phy(adapter); 4004 4005 e1000e_free_tx_resources(adapter->tx_ring); 4006 e1000e_free_rx_resources(adapter->rx_ring); 4007 4008 /* 4009 * kill manageability vlan ID if supported, but not if a vlan with 4010 * the same ID is registered on the host OS (let 8021q kill it) 4011 */ 4012 if (adapter->hw.mng_cookie.status & 4013 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) 4014 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 4015 4016 /* 4017 * If AMT is enabled, let the firmware know that the network 4018 * interface is now closed 4019 */ 4020 if ((adapter->flags & FLAG_HAS_AMT) && 4021 !test_bit(__E1000_TESTING, &adapter->state)) 4022 e1000e_release_hw_control(adapter); 4023 4024 if (adapter->hw.mac.type == e1000_pch2lan) 4025 pm_qos_remove_request(&adapter->netdev->pm_qos_req); 4026 4027 pm_runtime_put_sync(&pdev->dev); 4028 4029 return 0; 4030 } 4031 /** 4032 * e1000_set_mac - Change the Ethernet Address of the NIC 4033 * @netdev: network interface device structure 4034 * @p: pointer to an address structure 4035 * 4036 * Returns 0 on success, negative on failure 4037 **/ 4038 static int e1000_set_mac(struct net_device *netdev, void *p) 4039 { 4040 struct e1000_adapter *adapter = netdev_priv(netdev); 4041 struct sockaddr *addr = p; 4042 4043 if (!is_valid_ether_addr(addr->sa_data)) 4044 return -EADDRNOTAVAIL; 4045 4046 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 4047 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); 4048 4049 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 4050 4051 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { 4052 /* activate the work around */ 4053 e1000e_set_laa_state_82571(&adapter->hw, 1); 4054 4055 /* 4056 * Hold a copy of the LAA in RAR[14] This is done so that 4057 * between the time RAR[0] gets clobbered and the time it 4058 * gets fixed (in e1000_watchdog), the actual LAA is in one 4059 * of the RARs and no incoming packets directed to this port 4060 * are dropped. Eventually the LAA will be in RAR[0] and 4061 * RAR[14] 4062 */ 4063 e1000e_rar_set(&adapter->hw, 4064 adapter->hw.mac.addr, 4065 adapter->hw.mac.rar_entry_count - 1); 4066 } 4067 4068 return 0; 4069 } 4070 4071 /** 4072 * e1000e_update_phy_task - work thread to update phy 4073 * @work: pointer to our work struct 4074 * 4075 * this worker thread exists because we must acquire a 4076 * semaphore to read the phy, which we could msleep while 4077 * waiting for it, and we can't msleep in a timer. 4078 **/ 4079 static void e1000e_update_phy_task(struct work_struct *work) 4080 { 4081 struct e1000_adapter *adapter = container_of(work, 4082 struct e1000_adapter, update_phy_task); 4083 4084 if (test_bit(__E1000_DOWN, &adapter->state)) 4085 return; 4086 4087 e1000_get_phy_info(&adapter->hw); 4088 } 4089 4090 /* 4091 * Need to wait a few seconds after link up to get diagnostic information from 4092 * the phy 4093 */ 4094 static void e1000_update_phy_info(unsigned long data) 4095 { 4096 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 4097 4098 if (test_bit(__E1000_DOWN, &adapter->state)) 4099 return; 4100 4101 schedule_work(&adapter->update_phy_task); 4102 } 4103 4104 /** 4105 * e1000e_update_phy_stats - Update the PHY statistics counters 4106 * @adapter: board private structure 4107 * 4108 * Read/clear the upper 16-bit PHY registers and read/accumulate lower 4109 **/ 4110 static void e1000e_update_phy_stats(struct e1000_adapter *adapter) 4111 { 4112 struct e1000_hw *hw = &adapter->hw; 4113 s32 ret_val; 4114 u16 phy_data; 4115 4116 ret_val = hw->phy.ops.acquire(hw); 4117 if (ret_val) 4118 return; 4119 4120 /* 4121 * A page set is expensive so check if already on desired page. 4122 * If not, set to the page with the PHY status registers. 4123 */ 4124 hw->phy.addr = 1; 4125 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 4126 &phy_data); 4127 if (ret_val) 4128 goto release; 4129 if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) { 4130 ret_val = hw->phy.ops.set_page(hw, 4131 HV_STATS_PAGE << IGP_PAGE_SHIFT); 4132 if (ret_val) 4133 goto release; 4134 } 4135 4136 /* Single Collision Count */ 4137 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); 4138 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); 4139 if (!ret_val) 4140 adapter->stats.scc += phy_data; 4141 4142 /* Excessive Collision Count */ 4143 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); 4144 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); 4145 if (!ret_val) 4146 adapter->stats.ecol += phy_data; 4147 4148 /* Multiple Collision Count */ 4149 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); 4150 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); 4151 if (!ret_val) 4152 adapter->stats.mcc += phy_data; 4153 4154 /* Late Collision Count */ 4155 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); 4156 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); 4157 if (!ret_val) 4158 adapter->stats.latecol += phy_data; 4159 4160 /* Collision Count - also used for adaptive IFS */ 4161 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); 4162 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); 4163 if (!ret_val) 4164 hw->mac.collision_delta = phy_data; 4165 4166 /* Defer Count */ 4167 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); 4168 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); 4169 if (!ret_val) 4170 adapter->stats.dc += phy_data; 4171 4172 /* Transmit with no CRS */ 4173 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); 4174 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); 4175 if (!ret_val) 4176 adapter->stats.tncrs += phy_data; 4177 4178 release: 4179 hw->phy.ops.release(hw); 4180 } 4181 4182 /** 4183 * e1000e_update_stats - Update the board statistics counters 4184 * @adapter: board private structure 4185 **/ 4186 static void e1000e_update_stats(struct e1000_adapter *adapter) 4187 { 4188 struct net_device *netdev = adapter->netdev; 4189 struct e1000_hw *hw = &adapter->hw; 4190 struct pci_dev *pdev = adapter->pdev; 4191 4192 /* 4193 * Prevent stats update while adapter is being reset, or if the pci 4194 * connection is down. 4195 */ 4196 if (adapter->link_speed == 0) 4197 return; 4198 if (pci_channel_offline(pdev)) 4199 return; 4200 4201 adapter->stats.crcerrs += er32(CRCERRS); 4202 adapter->stats.gprc += er32(GPRC); 4203 adapter->stats.gorc += er32(GORCL); 4204 er32(GORCH); /* Clear gorc */ 4205 adapter->stats.bprc += er32(BPRC); 4206 adapter->stats.mprc += er32(MPRC); 4207 adapter->stats.roc += er32(ROC); 4208 4209 adapter->stats.mpc += er32(MPC); 4210 4211 /* Half-duplex statistics */ 4212 if (adapter->link_duplex == HALF_DUPLEX) { 4213 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { 4214 e1000e_update_phy_stats(adapter); 4215 } else { 4216 adapter->stats.scc += er32(SCC); 4217 adapter->stats.ecol += er32(ECOL); 4218 adapter->stats.mcc += er32(MCC); 4219 adapter->stats.latecol += er32(LATECOL); 4220 adapter->stats.dc += er32(DC); 4221 4222 hw->mac.collision_delta = er32(COLC); 4223 4224 if ((hw->mac.type != e1000_82574) && 4225 (hw->mac.type != e1000_82583)) 4226 adapter->stats.tncrs += er32(TNCRS); 4227 } 4228 adapter->stats.colc += hw->mac.collision_delta; 4229 } 4230 4231 adapter->stats.xonrxc += er32(XONRXC); 4232 adapter->stats.xontxc += er32(XONTXC); 4233 adapter->stats.xoffrxc += er32(XOFFRXC); 4234 adapter->stats.xofftxc += er32(XOFFTXC); 4235 adapter->stats.gptc += er32(GPTC); 4236 adapter->stats.gotc += er32(GOTCL); 4237 er32(GOTCH); /* Clear gotc */ 4238 adapter->stats.rnbc += er32(RNBC); 4239 adapter->stats.ruc += er32(RUC); 4240 4241 adapter->stats.mptc += er32(MPTC); 4242 adapter->stats.bptc += er32(BPTC); 4243 4244 /* used for adaptive IFS */ 4245 4246 hw->mac.tx_packet_delta = er32(TPT); 4247 adapter->stats.tpt += hw->mac.tx_packet_delta; 4248 4249 adapter->stats.algnerrc += er32(ALGNERRC); 4250 adapter->stats.rxerrc += er32(RXERRC); 4251 adapter->stats.cexterr += er32(CEXTERR); 4252 adapter->stats.tsctc += er32(TSCTC); 4253 adapter->stats.tsctfc += er32(TSCTFC); 4254 4255 /* Fill out the OS statistics structure */ 4256 netdev->stats.multicast = adapter->stats.mprc; 4257 netdev->stats.collisions = adapter->stats.colc; 4258 4259 /* Rx Errors */ 4260 4261 /* 4262 * RLEC on some newer hardware can be incorrect so build 4263 * our own version based on RUC and ROC 4264 */ 4265 netdev->stats.rx_errors = adapter->stats.rxerrc + 4266 adapter->stats.crcerrs + adapter->stats.algnerrc + 4267 adapter->stats.ruc + adapter->stats.roc + 4268 adapter->stats.cexterr; 4269 netdev->stats.rx_length_errors = adapter->stats.ruc + 4270 adapter->stats.roc; 4271 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 4272 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 4273 netdev->stats.rx_missed_errors = adapter->stats.mpc; 4274 4275 /* Tx Errors */ 4276 netdev->stats.tx_errors = adapter->stats.ecol + 4277 adapter->stats.latecol; 4278 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 4279 netdev->stats.tx_window_errors = adapter->stats.latecol; 4280 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 4281 4282 /* Tx Dropped needs to be maintained elsewhere */ 4283 4284 /* Management Stats */ 4285 adapter->stats.mgptc += er32(MGTPTC); 4286 adapter->stats.mgprc += er32(MGTPRC); 4287 adapter->stats.mgpdc += er32(MGTPDC); 4288 } 4289 4290 /** 4291 * e1000_phy_read_status - Update the PHY register status snapshot 4292 * @adapter: board private structure 4293 **/ 4294 static void e1000_phy_read_status(struct e1000_adapter *adapter) 4295 { 4296 struct e1000_hw *hw = &adapter->hw; 4297 struct e1000_phy_regs *phy = &adapter->phy_regs; 4298 4299 if ((er32(STATUS) & E1000_STATUS_LU) && 4300 (adapter->hw.phy.media_type == e1000_media_type_copper)) { 4301 int ret_val; 4302 4303 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); 4304 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); 4305 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); 4306 ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa); 4307 ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion); 4308 ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000); 4309 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); 4310 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); 4311 if (ret_val) 4312 e_warn("Error reading PHY register\n"); 4313 } else { 4314 /* 4315 * Do not read PHY registers if link is not up 4316 * Set values to typical power-on defaults 4317 */ 4318 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); 4319 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | 4320 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE | 4321 BMSR_ERCAP); 4322 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | 4323 ADVERTISE_ALL | ADVERTISE_CSMA); 4324 phy->lpa = 0; 4325 phy->expansion = EXPANSION_ENABLENPAGE; 4326 phy->ctrl1000 = ADVERTISE_1000FULL; 4327 phy->stat1000 = 0; 4328 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); 4329 } 4330 } 4331 4332 static void e1000_print_link_info(struct e1000_adapter *adapter) 4333 { 4334 struct e1000_hw *hw = &adapter->hw; 4335 u32 ctrl = er32(CTRL); 4336 4337 /* Link status message must follow this format for user tools */ 4338 printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", 4339 adapter->netdev->name, 4340 adapter->link_speed, 4341 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", 4342 (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" : 4343 (ctrl & E1000_CTRL_RFCE) ? "Rx" : 4344 (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"); 4345 } 4346 4347 static bool e1000e_has_link(struct e1000_adapter *adapter) 4348 { 4349 struct e1000_hw *hw = &adapter->hw; 4350 bool link_active = false; 4351 s32 ret_val = 0; 4352 4353 /* 4354 * get_link_status is set on LSC (link status) interrupt or 4355 * Rx sequence error interrupt. get_link_status will stay 4356 * false until the check_for_link establishes link 4357 * for copper adapters ONLY 4358 */ 4359 switch (hw->phy.media_type) { 4360 case e1000_media_type_copper: 4361 if (hw->mac.get_link_status) { 4362 ret_val = hw->mac.ops.check_for_link(hw); 4363 link_active = !hw->mac.get_link_status; 4364 } else { 4365 link_active = true; 4366 } 4367 break; 4368 case e1000_media_type_fiber: 4369 ret_val = hw->mac.ops.check_for_link(hw); 4370 link_active = !!(er32(STATUS) & E1000_STATUS_LU); 4371 break; 4372 case e1000_media_type_internal_serdes: 4373 ret_val = hw->mac.ops.check_for_link(hw); 4374 link_active = adapter->hw.mac.serdes_has_link; 4375 break; 4376 default: 4377 case e1000_media_type_unknown: 4378 break; 4379 } 4380 4381 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && 4382 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { 4383 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ 4384 e_info("Gigabit has been disabled, downgrading speed\n"); 4385 } 4386 4387 return link_active; 4388 } 4389 4390 static void e1000e_enable_receives(struct e1000_adapter *adapter) 4391 { 4392 /* make sure the receive unit is started */ 4393 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && 4394 (adapter->flags & FLAG_RX_RESTART_NOW)) { 4395 struct e1000_hw *hw = &adapter->hw; 4396 u32 rctl = er32(RCTL); 4397 ew32(RCTL, rctl | E1000_RCTL_EN); 4398 adapter->flags &= ~FLAG_RX_RESTART_NOW; 4399 } 4400 } 4401 4402 static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) 4403 { 4404 struct e1000_hw *hw = &adapter->hw; 4405 4406 /* 4407 * With 82574 controllers, PHY needs to be checked periodically 4408 * for hung state and reset, if two calls return true 4409 */ 4410 if (e1000_check_phy_82574(hw)) 4411 adapter->phy_hang_count++; 4412 else 4413 adapter->phy_hang_count = 0; 4414 4415 if (adapter->phy_hang_count > 1) { 4416 adapter->phy_hang_count = 0; 4417 schedule_work(&adapter->reset_task); 4418 } 4419 } 4420 4421 /** 4422 * e1000_watchdog - Timer Call-back 4423 * @data: pointer to adapter cast into an unsigned long 4424 **/ 4425 static void e1000_watchdog(unsigned long data) 4426 { 4427 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 4428 4429 /* Do the rest outside of interrupt context */ 4430 schedule_work(&adapter->watchdog_task); 4431 4432 /* TODO: make this use queue_delayed_work() */ 4433 } 4434 4435 static void e1000_watchdog_task(struct work_struct *work) 4436 { 4437 struct e1000_adapter *adapter = container_of(work, 4438 struct e1000_adapter, watchdog_task); 4439 struct net_device *netdev = adapter->netdev; 4440 struct e1000_mac_info *mac = &adapter->hw.mac; 4441 struct e1000_phy_info *phy = &adapter->hw.phy; 4442 struct e1000_ring *tx_ring = adapter->tx_ring; 4443 struct e1000_hw *hw = &adapter->hw; 4444 u32 link, tctl; 4445 4446 if (test_bit(__E1000_DOWN, &adapter->state)) 4447 return; 4448 4449 link = e1000e_has_link(adapter); 4450 if ((netif_carrier_ok(netdev)) && link) { 4451 /* Cancel scheduled suspend requests. */ 4452 pm_runtime_resume(netdev->dev.parent); 4453 4454 e1000e_enable_receives(adapter); 4455 goto link_up; 4456 } 4457 4458 if ((e1000e_enable_tx_pkt_filtering(hw)) && 4459 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) 4460 e1000_update_mng_vlan(adapter); 4461 4462 if (link) { 4463 if (!netif_carrier_ok(netdev)) { 4464 bool txb2b = true; 4465 4466 /* Cancel scheduled suspend requests. */ 4467 pm_runtime_resume(netdev->dev.parent); 4468 4469 /* update snapshot of PHY registers on LSC */ 4470 e1000_phy_read_status(adapter); 4471 mac->ops.get_link_up_info(&adapter->hw, 4472 &adapter->link_speed, 4473 &adapter->link_duplex); 4474 e1000_print_link_info(adapter); 4475 /* 4476 * On supported PHYs, check for duplex mismatch only 4477 * if link has autonegotiated at 10/100 half 4478 */ 4479 if ((hw->phy.type == e1000_phy_igp_3 || 4480 hw->phy.type == e1000_phy_bm) && 4481 (hw->mac.autoneg == true) && 4482 (adapter->link_speed == SPEED_10 || 4483 adapter->link_speed == SPEED_100) && 4484 (adapter->link_duplex == HALF_DUPLEX)) { 4485 u16 autoneg_exp; 4486 4487 e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp); 4488 4489 if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS)) 4490 e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n"); 4491 } 4492 4493 /* adjust timeout factor according to speed/duplex */ 4494 adapter->tx_timeout_factor = 1; 4495 switch (adapter->link_speed) { 4496 case SPEED_10: 4497 txb2b = false; 4498 adapter->tx_timeout_factor = 16; 4499 break; 4500 case SPEED_100: 4501 txb2b = false; 4502 adapter->tx_timeout_factor = 10; 4503 break; 4504 } 4505 4506 /* 4507 * workaround: re-program speed mode bit after 4508 * link-up event 4509 */ 4510 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && 4511 !txb2b) { 4512 u32 tarc0; 4513 tarc0 = er32(TARC(0)); 4514 tarc0 &= ~SPEED_MODE_BIT; 4515 ew32(TARC(0), tarc0); 4516 } 4517 4518 /* 4519 * disable TSO for pcie and 10/100 speeds, to avoid 4520 * some hardware issues 4521 */ 4522 if (!(adapter->flags & FLAG_TSO_FORCE)) { 4523 switch (adapter->link_speed) { 4524 case SPEED_10: 4525 case SPEED_100: 4526 e_info("10/100 speed: disabling TSO\n"); 4527 netdev->features &= ~NETIF_F_TSO; 4528 netdev->features &= ~NETIF_F_TSO6; 4529 break; 4530 case SPEED_1000: 4531 netdev->features |= NETIF_F_TSO; 4532 netdev->features |= NETIF_F_TSO6; 4533 break; 4534 default: 4535 /* oops */ 4536 break; 4537 } 4538 } 4539 4540 /* 4541 * enable transmits in the hardware, need to do this 4542 * after setting TARC(0) 4543 */ 4544 tctl = er32(TCTL); 4545 tctl |= E1000_TCTL_EN; 4546 ew32(TCTL, tctl); 4547 4548 /* 4549 * Perform any post-link-up configuration before 4550 * reporting link up. 4551 */ 4552 if (phy->ops.cfg_on_link_up) 4553 phy->ops.cfg_on_link_up(hw); 4554 4555 netif_carrier_on(netdev); 4556 4557 if (!test_bit(__E1000_DOWN, &adapter->state)) 4558 mod_timer(&adapter->phy_info_timer, 4559 round_jiffies(jiffies + 2 * HZ)); 4560 } 4561 } else { 4562 if (netif_carrier_ok(netdev)) { 4563 adapter->link_speed = 0; 4564 adapter->link_duplex = 0; 4565 /* Link status message must follow this format */ 4566 printk(KERN_INFO "e1000e: %s NIC Link is Down\n", 4567 adapter->netdev->name); 4568 netif_carrier_off(netdev); 4569 if (!test_bit(__E1000_DOWN, &adapter->state)) 4570 mod_timer(&adapter->phy_info_timer, 4571 round_jiffies(jiffies + 2 * HZ)); 4572 4573 if (adapter->flags & FLAG_RX_NEEDS_RESTART) 4574 schedule_work(&adapter->reset_task); 4575 else 4576 pm_schedule_suspend(netdev->dev.parent, 4577 LINK_TIMEOUT); 4578 } 4579 } 4580 4581 link_up: 4582 spin_lock(&adapter->stats64_lock); 4583 e1000e_update_stats(adapter); 4584 4585 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 4586 adapter->tpt_old = adapter->stats.tpt; 4587 mac->collision_delta = adapter->stats.colc - adapter->colc_old; 4588 adapter->colc_old = adapter->stats.colc; 4589 4590 adapter->gorc = adapter->stats.gorc - adapter->gorc_old; 4591 adapter->gorc_old = adapter->stats.gorc; 4592 adapter->gotc = adapter->stats.gotc - adapter->gotc_old; 4593 adapter->gotc_old = adapter->stats.gotc; 4594 spin_unlock(&adapter->stats64_lock); 4595 4596 e1000e_update_adaptive(&adapter->hw); 4597 4598 if (!netif_carrier_ok(netdev) && 4599 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) { 4600 /* 4601 * We've lost link, so the controller stops DMA, 4602 * but we've got queued Tx work that's never going 4603 * to get done, so reset controller to flush Tx. 4604 * (Do the reset outside of interrupt context). 4605 */ 4606 schedule_work(&adapter->reset_task); 4607 /* return immediately since reset is imminent */ 4608 return; 4609 } 4610 4611 /* Simple mode for Interrupt Throttle Rate (ITR) */ 4612 if (adapter->itr_setting == 4) { 4613 /* 4614 * Symmetric Tx/Rx gets a reduced ITR=2000; 4615 * Total asymmetrical Tx or Rx gets ITR=8000; 4616 * everyone else is between 2000-8000. 4617 */ 4618 u32 goc = (adapter->gotc + adapter->gorc) / 10000; 4619 u32 dif = (adapter->gotc > adapter->gorc ? 4620 adapter->gotc - adapter->gorc : 4621 adapter->gorc - adapter->gotc) / 10000; 4622 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 4623 4624 ew32(ITR, 1000000000 / (itr * 256)); 4625 } 4626 4627 /* Cause software interrupt to ensure Rx ring is cleaned */ 4628 if (adapter->msix_entries) 4629 ew32(ICS, adapter->rx_ring->ims_val); 4630 else 4631 ew32(ICS, E1000_ICS_RXDMT0); 4632 4633 /* flush pending descriptors to memory before detecting Tx hang */ 4634 e1000e_flush_descriptors(adapter); 4635 4636 /* Force detection of hung controller every watchdog period */ 4637 adapter->detect_tx_hung = true; 4638 4639 /* 4640 * With 82571 controllers, LAA may be overwritten due to controller 4641 * reset from the other port. Set the appropriate LAA in RAR[0] 4642 */ 4643 if (e1000e_get_laa_state_82571(hw)) 4644 e1000e_rar_set(hw, adapter->hw.mac.addr, 0); 4645 4646 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) 4647 e1000e_check_82574_phy_workaround(adapter); 4648 4649 /* Reset the timer */ 4650 if (!test_bit(__E1000_DOWN, &adapter->state)) 4651 mod_timer(&adapter->watchdog_timer, 4652 round_jiffies(jiffies + 2 * HZ)); 4653 } 4654 4655 #define E1000_TX_FLAGS_CSUM 0x00000001 4656 #define E1000_TX_FLAGS_VLAN 0x00000002 4657 #define E1000_TX_FLAGS_TSO 0x00000004 4658 #define E1000_TX_FLAGS_IPV4 0x00000008 4659 #define E1000_TX_FLAGS_NO_FCS 0x00000010 4660 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 4661 #define E1000_TX_FLAGS_VLAN_SHIFT 16 4662 4663 static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) 4664 { 4665 struct e1000_context_desc *context_desc; 4666 struct e1000_buffer *buffer_info; 4667 unsigned int i; 4668 u32 cmd_length = 0; 4669 u16 ipcse = 0, tucse, mss; 4670 u8 ipcss, ipcso, tucss, tucso, hdr_len; 4671 4672 if (!skb_is_gso(skb)) 4673 return 0; 4674 4675 if (skb_header_cloned(skb)) { 4676 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4677 4678 if (err) 4679 return err; 4680 } 4681 4682 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 4683 mss = skb_shinfo(skb)->gso_size; 4684 if (skb->protocol == htons(ETH_P_IP)) { 4685 struct iphdr *iph = ip_hdr(skb); 4686 iph->tot_len = 0; 4687 iph->check = 0; 4688 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 4689 0, IPPROTO_TCP, 0); 4690 cmd_length = E1000_TXD_CMD_IP; 4691 ipcse = skb_transport_offset(skb) - 1; 4692 } else if (skb_is_gso_v6(skb)) { 4693 ipv6_hdr(skb)->payload_len = 0; 4694 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 4695 &ipv6_hdr(skb)->daddr, 4696 0, IPPROTO_TCP, 0); 4697 ipcse = 0; 4698 } 4699 ipcss = skb_network_offset(skb); 4700 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 4701 tucss = skb_transport_offset(skb); 4702 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 4703 tucse = 0; 4704 4705 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 4706 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 4707 4708 i = tx_ring->next_to_use; 4709 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 4710 buffer_info = &tx_ring->buffer_info[i]; 4711 4712 context_desc->lower_setup.ip_fields.ipcss = ipcss; 4713 context_desc->lower_setup.ip_fields.ipcso = ipcso; 4714 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 4715 context_desc->upper_setup.tcp_fields.tucss = tucss; 4716 context_desc->upper_setup.tcp_fields.tucso = tucso; 4717 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); 4718 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 4719 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 4720 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 4721 4722 buffer_info->time_stamp = jiffies; 4723 buffer_info->next_to_watch = i; 4724 4725 i++; 4726 if (i == tx_ring->count) 4727 i = 0; 4728 tx_ring->next_to_use = i; 4729 4730 return 1; 4731 } 4732 4733 static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) 4734 { 4735 struct e1000_adapter *adapter = tx_ring->adapter; 4736 struct e1000_context_desc *context_desc; 4737 struct e1000_buffer *buffer_info; 4738 unsigned int i; 4739 u8 css; 4740 u32 cmd_len = E1000_TXD_CMD_DEXT; 4741 __be16 protocol; 4742 4743 if (skb->ip_summed != CHECKSUM_PARTIAL) 4744 return 0; 4745 4746 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) 4747 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; 4748 else 4749 protocol = skb->protocol; 4750 4751 switch (protocol) { 4752 case cpu_to_be16(ETH_P_IP): 4753 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 4754 cmd_len |= E1000_TXD_CMD_TCP; 4755 break; 4756 case cpu_to_be16(ETH_P_IPV6): 4757 /* XXX not handling all IPV6 headers */ 4758 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 4759 cmd_len |= E1000_TXD_CMD_TCP; 4760 break; 4761 default: 4762 if (unlikely(net_ratelimit())) 4763 e_warn("checksum_partial proto=%x!\n", 4764 be16_to_cpu(protocol)); 4765 break; 4766 } 4767 4768 css = skb_checksum_start_offset(skb); 4769 4770 i = tx_ring->next_to_use; 4771 buffer_info = &tx_ring->buffer_info[i]; 4772 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 4773 4774 context_desc->lower_setup.ip_config = 0; 4775 context_desc->upper_setup.tcp_fields.tucss = css; 4776 context_desc->upper_setup.tcp_fields.tucso = 4777 css + skb->csum_offset; 4778 context_desc->upper_setup.tcp_fields.tucse = 0; 4779 context_desc->tcp_seg_setup.data = 0; 4780 context_desc->cmd_and_length = cpu_to_le32(cmd_len); 4781 4782 buffer_info->time_stamp = jiffies; 4783 buffer_info->next_to_watch = i; 4784 4785 i++; 4786 if (i == tx_ring->count) 4787 i = 0; 4788 tx_ring->next_to_use = i; 4789 4790 return 1; 4791 } 4792 4793 #define E1000_MAX_PER_TXD 8192 4794 #define E1000_MAX_TXD_PWR 12 4795 4796 static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, 4797 unsigned int first, unsigned int max_per_txd, 4798 unsigned int nr_frags, unsigned int mss) 4799 { 4800 struct e1000_adapter *adapter = tx_ring->adapter; 4801 struct pci_dev *pdev = adapter->pdev; 4802 struct e1000_buffer *buffer_info; 4803 unsigned int len = skb_headlen(skb); 4804 unsigned int offset = 0, size, count = 0, i; 4805 unsigned int f, bytecount, segs; 4806 4807 i = tx_ring->next_to_use; 4808 4809 while (len) { 4810 buffer_info = &tx_ring->buffer_info[i]; 4811 size = min(len, max_per_txd); 4812 4813 buffer_info->length = size; 4814 buffer_info->time_stamp = jiffies; 4815 buffer_info->next_to_watch = i; 4816 buffer_info->dma = dma_map_single(&pdev->dev, 4817 skb->data + offset, 4818 size, DMA_TO_DEVICE); 4819 buffer_info->mapped_as_page = false; 4820 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 4821 goto dma_error; 4822 4823 len -= size; 4824 offset += size; 4825 count++; 4826 4827 if (len) { 4828 i++; 4829 if (i == tx_ring->count) 4830 i = 0; 4831 } 4832 } 4833 4834 for (f = 0; f < nr_frags; f++) { 4835 const struct skb_frag_struct *frag; 4836 4837 frag = &skb_shinfo(skb)->frags[f]; 4838 len = skb_frag_size(frag); 4839 offset = 0; 4840 4841 while (len) { 4842 i++; 4843 if (i == tx_ring->count) 4844 i = 0; 4845 4846 buffer_info = &tx_ring->buffer_info[i]; 4847 size = min(len, max_per_txd); 4848 4849 buffer_info->length = size; 4850 buffer_info->time_stamp = jiffies; 4851 buffer_info->next_to_watch = i; 4852 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 4853 offset, size, DMA_TO_DEVICE); 4854 buffer_info->mapped_as_page = true; 4855 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 4856 goto dma_error; 4857 4858 len -= size; 4859 offset += size; 4860 count++; 4861 } 4862 } 4863 4864 segs = skb_shinfo(skb)->gso_segs ? : 1; 4865 /* multiply data chunks by size of headers */ 4866 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; 4867 4868 tx_ring->buffer_info[i].skb = skb; 4869 tx_ring->buffer_info[i].segs = segs; 4870 tx_ring->buffer_info[i].bytecount = bytecount; 4871 tx_ring->buffer_info[first].next_to_watch = i; 4872 4873 return count; 4874 4875 dma_error: 4876 dev_err(&pdev->dev, "Tx DMA map failed\n"); 4877 buffer_info->dma = 0; 4878 if (count) 4879 count--; 4880 4881 while (count--) { 4882 if (i == 0) 4883 i += tx_ring->count; 4884 i--; 4885 buffer_info = &tx_ring->buffer_info[i]; 4886 e1000_put_txbuf(tx_ring, buffer_info); 4887 } 4888 4889 return 0; 4890 } 4891 4892 static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) 4893 { 4894 struct e1000_adapter *adapter = tx_ring->adapter; 4895 struct e1000_tx_desc *tx_desc = NULL; 4896 struct e1000_buffer *buffer_info; 4897 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 4898 unsigned int i; 4899 4900 if (tx_flags & E1000_TX_FLAGS_TSO) { 4901 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 4902 E1000_TXD_CMD_TSE; 4903 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 4904 4905 if (tx_flags & E1000_TX_FLAGS_IPV4) 4906 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 4907 } 4908 4909 if (tx_flags & E1000_TX_FLAGS_CSUM) { 4910 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 4911 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 4912 } 4913 4914 if (tx_flags & E1000_TX_FLAGS_VLAN) { 4915 txd_lower |= E1000_TXD_CMD_VLE; 4916 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 4917 } 4918 4919 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 4920 txd_lower &= ~(E1000_TXD_CMD_IFCS); 4921 4922 i = tx_ring->next_to_use; 4923 4924 do { 4925 buffer_info = &tx_ring->buffer_info[i]; 4926 tx_desc = E1000_TX_DESC(*tx_ring, i); 4927 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4928 tx_desc->lower.data = 4929 cpu_to_le32(txd_lower | buffer_info->length); 4930 tx_desc->upper.data = cpu_to_le32(txd_upper); 4931 4932 i++; 4933 if (i == tx_ring->count) 4934 i = 0; 4935 } while (--count > 0); 4936 4937 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 4938 4939 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ 4940 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 4941 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); 4942 4943 /* 4944 * Force memory writes to complete before letting h/w 4945 * know there are new descriptors to fetch. (Only 4946 * applicable for weak-ordered memory model archs, 4947 * such as IA-64). 4948 */ 4949 wmb(); 4950 4951 tx_ring->next_to_use = i; 4952 4953 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 4954 e1000e_update_tdt_wa(tx_ring, i); 4955 else 4956 writel(i, tx_ring->tail); 4957 4958 /* 4959 * we need this if more than one processor can write to our tail 4960 * at a time, it synchronizes IO on IA64/Altix systems 4961 */ 4962 mmiowb(); 4963 } 4964 4965 #define MINIMUM_DHCP_PACKET_SIZE 282 4966 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, 4967 struct sk_buff *skb) 4968 { 4969 struct e1000_hw *hw = &adapter->hw; 4970 u16 length, offset; 4971 4972 if (vlan_tx_tag_present(skb)) { 4973 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 4974 (adapter->hw.mng_cookie.status & 4975 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) 4976 return 0; 4977 } 4978 4979 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) 4980 return 0; 4981 4982 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) 4983 return 0; 4984 4985 { 4986 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); 4987 struct udphdr *udp; 4988 4989 if (ip->protocol != IPPROTO_UDP) 4990 return 0; 4991 4992 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); 4993 if (ntohs(udp->dest) != 67) 4994 return 0; 4995 4996 offset = (u8 *)udp + 8 - skb->data; 4997 length = skb->len - offset; 4998 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); 4999 } 5000 5001 return 0; 5002 } 5003 5004 static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) 5005 { 5006 struct e1000_adapter *adapter = tx_ring->adapter; 5007 5008 netif_stop_queue(adapter->netdev); 5009 /* 5010 * Herbert's original patch had: 5011 * smp_mb__after_netif_stop_queue(); 5012 * but since that doesn't exist yet, just open code it. 5013 */ 5014 smp_mb(); 5015 5016 /* 5017 * We need to check again in a case another CPU has just 5018 * made room available. 5019 */ 5020 if (e1000_desc_unused(tx_ring) < size) 5021 return -EBUSY; 5022 5023 /* A reprieve! */ 5024 netif_start_queue(adapter->netdev); 5025 ++adapter->restart_queue; 5026 return 0; 5027 } 5028 5029 static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) 5030 { 5031 if (e1000_desc_unused(tx_ring) >= size) 5032 return 0; 5033 return __e1000_maybe_stop_tx(tx_ring, size); 5034 } 5035 5036 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) 5037 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 5038 struct net_device *netdev) 5039 { 5040 struct e1000_adapter *adapter = netdev_priv(netdev); 5041 struct e1000_ring *tx_ring = adapter->tx_ring; 5042 unsigned int first; 5043 unsigned int max_per_txd = E1000_MAX_PER_TXD; 5044 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 5045 unsigned int tx_flags = 0; 5046 unsigned int len = skb_headlen(skb); 5047 unsigned int nr_frags; 5048 unsigned int mss; 5049 int count = 0; 5050 int tso; 5051 unsigned int f; 5052 5053 if (test_bit(__E1000_DOWN, &adapter->state)) { 5054 dev_kfree_skb_any(skb); 5055 return NETDEV_TX_OK; 5056 } 5057 5058 if (skb->len <= 0) { 5059 dev_kfree_skb_any(skb); 5060 return NETDEV_TX_OK; 5061 } 5062 5063 mss = skb_shinfo(skb)->gso_size; 5064 /* 5065 * The controller does a simple calculation to 5066 * make sure there is enough room in the FIFO before 5067 * initiating the DMA for each buffer. The calc is: 5068 * 4 = ceil(buffer len/mss). To make sure we don't 5069 * overrun the FIFO, adjust the max buffer len if mss 5070 * drops. 5071 */ 5072 if (mss) { 5073 u8 hdr_len; 5074 max_per_txd = min(mss << 2, max_per_txd); 5075 max_txd_pwr = fls(max_per_txd) - 1; 5076 5077 /* 5078 * TSO Workaround for 82571/2/3 Controllers -- if skb->data 5079 * points to just header, pull a few bytes of payload from 5080 * frags into skb->data 5081 */ 5082 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 5083 /* 5084 * we do this workaround for ES2LAN, but it is un-necessary, 5085 * avoiding it could save a lot of cycles 5086 */ 5087 if (skb->data_len && (hdr_len == len)) { 5088 unsigned int pull_size; 5089 5090 pull_size = min_t(unsigned int, 4, skb->data_len); 5091 if (!__pskb_pull_tail(skb, pull_size)) { 5092 e_err("__pskb_pull_tail failed.\n"); 5093 dev_kfree_skb_any(skb); 5094 return NETDEV_TX_OK; 5095 } 5096 len = skb_headlen(skb); 5097 } 5098 } 5099 5100 /* reserve a descriptor for the offload context */ 5101 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 5102 count++; 5103 count++; 5104 5105 count += TXD_USE_COUNT(len, max_txd_pwr); 5106 5107 nr_frags = skb_shinfo(skb)->nr_frags; 5108 for (f = 0; f < nr_frags; f++) 5109 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), 5110 max_txd_pwr); 5111 5112 if (adapter->hw.mac.tx_pkt_filtering) 5113 e1000_transfer_dhcp_info(adapter, skb); 5114 5115 /* 5116 * need: count + 2 desc gap to keep tail from touching 5117 * head, otherwise try next time 5118 */ 5119 if (e1000_maybe_stop_tx(tx_ring, count + 2)) 5120 return NETDEV_TX_BUSY; 5121 5122 if (vlan_tx_tag_present(skb)) { 5123 tx_flags |= E1000_TX_FLAGS_VLAN; 5124 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 5125 } 5126 5127 first = tx_ring->next_to_use; 5128 5129 tso = e1000_tso(tx_ring, skb); 5130 if (tso < 0) { 5131 dev_kfree_skb_any(skb); 5132 return NETDEV_TX_OK; 5133 } 5134 5135 if (tso) 5136 tx_flags |= E1000_TX_FLAGS_TSO; 5137 else if (e1000_tx_csum(tx_ring, skb)) 5138 tx_flags |= E1000_TX_FLAGS_CSUM; 5139 5140 /* 5141 * Old method was to assume IPv4 packet by default if TSO was enabled. 5142 * 82571 hardware supports TSO capabilities for IPv6 as well... 5143 * no longer assume, we must. 5144 */ 5145 if (skb->protocol == htons(ETH_P_IP)) 5146 tx_flags |= E1000_TX_FLAGS_IPV4; 5147 5148 if (unlikely(skb->no_fcs)) 5149 tx_flags |= E1000_TX_FLAGS_NO_FCS; 5150 5151 /* if count is 0 then mapping error has occurred */ 5152 count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss); 5153 if (count) { 5154 netdev_sent_queue(netdev, skb->len); 5155 e1000_tx_queue(tx_ring, tx_flags, count); 5156 /* Make sure there is space in the ring for the next send. */ 5157 e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2); 5158 5159 } else { 5160 dev_kfree_skb_any(skb); 5161 tx_ring->buffer_info[first].time_stamp = 0; 5162 tx_ring->next_to_use = first; 5163 } 5164 5165 return NETDEV_TX_OK; 5166 } 5167 5168 /** 5169 * e1000_tx_timeout - Respond to a Tx Hang 5170 * @netdev: network interface device structure 5171 **/ 5172 static void e1000_tx_timeout(struct net_device *netdev) 5173 { 5174 struct e1000_adapter *adapter = netdev_priv(netdev); 5175 5176 /* Do the reset outside of interrupt context */ 5177 adapter->tx_timeout_count++; 5178 schedule_work(&adapter->reset_task); 5179 } 5180 5181 static void e1000_reset_task(struct work_struct *work) 5182 { 5183 struct e1000_adapter *adapter; 5184 adapter = container_of(work, struct e1000_adapter, reset_task); 5185 5186 /* don't run the task if already down */ 5187 if (test_bit(__E1000_DOWN, &adapter->state)) 5188 return; 5189 5190 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && 5191 (adapter->flags & FLAG_RX_RESTART_NOW))) { 5192 e1000e_dump(adapter); 5193 e_err("Reset adapter\n"); 5194 } 5195 e1000e_reinit_locked(adapter); 5196 } 5197 5198 /** 5199 * e1000_get_stats64 - Get System Network Statistics 5200 * @netdev: network interface device structure 5201 * @stats: rtnl_link_stats64 pointer 5202 * 5203 * Returns the address of the device statistics structure. 5204 **/ 5205 struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, 5206 struct rtnl_link_stats64 *stats) 5207 { 5208 struct e1000_adapter *adapter = netdev_priv(netdev); 5209 5210 memset(stats, 0, sizeof(struct rtnl_link_stats64)); 5211 spin_lock(&adapter->stats64_lock); 5212 e1000e_update_stats(adapter); 5213 /* Fill out the OS statistics structure */ 5214 stats->rx_bytes = adapter->stats.gorc; 5215 stats->rx_packets = adapter->stats.gprc; 5216 stats->tx_bytes = adapter->stats.gotc; 5217 stats->tx_packets = adapter->stats.gptc; 5218 stats->multicast = adapter->stats.mprc; 5219 stats->collisions = adapter->stats.colc; 5220 5221 /* Rx Errors */ 5222 5223 /* 5224 * RLEC on some newer hardware can be incorrect so build 5225 * our own version based on RUC and ROC 5226 */ 5227 stats->rx_errors = adapter->stats.rxerrc + 5228 adapter->stats.crcerrs + adapter->stats.algnerrc + 5229 adapter->stats.ruc + adapter->stats.roc + 5230 adapter->stats.cexterr; 5231 stats->rx_length_errors = adapter->stats.ruc + 5232 adapter->stats.roc; 5233 stats->rx_crc_errors = adapter->stats.crcerrs; 5234 stats->rx_frame_errors = adapter->stats.algnerrc; 5235 stats->rx_missed_errors = adapter->stats.mpc; 5236 5237 /* Tx Errors */ 5238 stats->tx_errors = adapter->stats.ecol + 5239 adapter->stats.latecol; 5240 stats->tx_aborted_errors = adapter->stats.ecol; 5241 stats->tx_window_errors = adapter->stats.latecol; 5242 stats->tx_carrier_errors = adapter->stats.tncrs; 5243 5244 /* Tx Dropped needs to be maintained elsewhere */ 5245 5246 spin_unlock(&adapter->stats64_lock); 5247 return stats; 5248 } 5249 5250 /** 5251 * e1000_change_mtu - Change the Maximum Transfer Unit 5252 * @netdev: network interface device structure 5253 * @new_mtu: new value for maximum frame size 5254 * 5255 * Returns 0 on success, negative on failure 5256 **/ 5257 static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 5258 { 5259 struct e1000_adapter *adapter = netdev_priv(netdev); 5260 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5261 5262 /* Jumbo frame support */ 5263 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { 5264 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { 5265 e_err("Jumbo Frames not supported.\n"); 5266 return -EINVAL; 5267 } 5268 5269 /* 5270 * IP payload checksum (enabled with jumbos/packet-split when 5271 * Rx checksum is enabled) and generation of RSS hash is 5272 * mutually exclusive in the hardware. 5273 */ 5274 if ((netdev->features & NETIF_F_RXCSUM) && 5275 (netdev->features & NETIF_F_RXHASH)) { 5276 e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disable one of the receive offload features before enabling jumbos.\n"); 5277 return -EINVAL; 5278 } 5279 } 5280 5281 /* Supported frame sizes */ 5282 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) || 5283 (max_frame > adapter->max_hw_frame_size)) { 5284 e_err("Unsupported MTU setting\n"); 5285 return -EINVAL; 5286 } 5287 5288 /* Jumbo frame workaround on 82579 requires CRC be stripped */ 5289 if ((adapter->hw.mac.type == e1000_pch2lan) && 5290 !(adapter->flags2 & FLAG2_CRC_STRIPPING) && 5291 (new_mtu > ETH_DATA_LEN)) { 5292 e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n"); 5293 return -EINVAL; 5294 } 5295 5296 /* 82573 Errata 17 */ 5297 if (((adapter->hw.mac.type == e1000_82573) || 5298 (adapter->hw.mac.type == e1000_82574)) && 5299 (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) { 5300 adapter->flags2 |= FLAG2_DISABLE_ASPM_L1; 5301 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1); 5302 } 5303 5304 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 5305 usleep_range(1000, 2000); 5306 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ 5307 adapter->max_frame_size = max_frame; 5308 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); 5309 netdev->mtu = new_mtu; 5310 if (netif_running(netdev)) 5311 e1000e_down(adapter); 5312 5313 /* 5314 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 5315 * means we reserve 2 more, this pushes us to allocate from the next 5316 * larger slab size. 5317 * i.e. RXBUFFER_2048 --> size-4096 slab 5318 * However with the new *_jumbo_rx* routines, jumbo receives will use 5319 * fragmented skbs 5320 */ 5321 5322 if (max_frame <= 2048) 5323 adapter->rx_buffer_len = 2048; 5324 else 5325 adapter->rx_buffer_len = 4096; 5326 5327 /* adjust allocation if LPE protects us, and we aren't using SBP */ 5328 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 5329 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) 5330 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN 5331 + ETH_FCS_LEN; 5332 5333 if (netif_running(netdev)) 5334 e1000e_up(adapter); 5335 else 5336 e1000e_reset(adapter); 5337 5338 clear_bit(__E1000_RESETTING, &adapter->state); 5339 5340 return 0; 5341 } 5342 5343 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 5344 int cmd) 5345 { 5346 struct e1000_adapter *adapter = netdev_priv(netdev); 5347 struct mii_ioctl_data *data = if_mii(ifr); 5348 5349 if (adapter->hw.phy.media_type != e1000_media_type_copper) 5350 return -EOPNOTSUPP; 5351 5352 switch (cmd) { 5353 case SIOCGMIIPHY: 5354 data->phy_id = adapter->hw.phy.addr; 5355 break; 5356 case SIOCGMIIREG: 5357 e1000_phy_read_status(adapter); 5358 5359 switch (data->reg_num & 0x1F) { 5360 case MII_BMCR: 5361 data->val_out = adapter->phy_regs.bmcr; 5362 break; 5363 case MII_BMSR: 5364 data->val_out = adapter->phy_regs.bmsr; 5365 break; 5366 case MII_PHYSID1: 5367 data->val_out = (adapter->hw.phy.id >> 16); 5368 break; 5369 case MII_PHYSID2: 5370 data->val_out = (adapter->hw.phy.id & 0xFFFF); 5371 break; 5372 case MII_ADVERTISE: 5373 data->val_out = adapter->phy_regs.advertise; 5374 break; 5375 case MII_LPA: 5376 data->val_out = adapter->phy_regs.lpa; 5377 break; 5378 case MII_EXPANSION: 5379 data->val_out = adapter->phy_regs.expansion; 5380 break; 5381 case MII_CTRL1000: 5382 data->val_out = adapter->phy_regs.ctrl1000; 5383 break; 5384 case MII_STAT1000: 5385 data->val_out = adapter->phy_regs.stat1000; 5386 break; 5387 case MII_ESTATUS: 5388 data->val_out = adapter->phy_regs.estatus; 5389 break; 5390 default: 5391 return -EIO; 5392 } 5393 break; 5394 case SIOCSMIIREG: 5395 default: 5396 return -EOPNOTSUPP; 5397 } 5398 return 0; 5399 } 5400 5401 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 5402 { 5403 switch (cmd) { 5404 case SIOCGMIIPHY: 5405 case SIOCGMIIREG: 5406 case SIOCSMIIREG: 5407 return e1000_mii_ioctl(netdev, ifr, cmd); 5408 default: 5409 return -EOPNOTSUPP; 5410 } 5411 } 5412 5413 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) 5414 { 5415 struct e1000_hw *hw = &adapter->hw; 5416 u32 i, mac_reg; 5417 u16 phy_reg, wuc_enable; 5418 int retval = 0; 5419 5420 /* copy MAC RARs to PHY RARs */ 5421 e1000_copy_rx_addrs_to_phy_ich8lan(hw); 5422 5423 retval = hw->phy.ops.acquire(hw); 5424 if (retval) { 5425 e_err("Could not acquire PHY\n"); 5426 return retval; 5427 } 5428 5429 /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */ 5430 retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable); 5431 if (retval) 5432 goto release; 5433 5434 /* copy MAC MTA to PHY MTA - only needed for pchlan */ 5435 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { 5436 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); 5437 hw->phy.ops.write_reg_page(hw, BM_MTA(i), 5438 (u16)(mac_reg & 0xFFFF)); 5439 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, 5440 (u16)((mac_reg >> 16) & 0xFFFF)); 5441 } 5442 5443 /* configure PHY Rx Control register */ 5444 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); 5445 mac_reg = er32(RCTL); 5446 if (mac_reg & E1000_RCTL_UPE) 5447 phy_reg |= BM_RCTL_UPE; 5448 if (mac_reg & E1000_RCTL_MPE) 5449 phy_reg |= BM_RCTL_MPE; 5450 phy_reg &= ~(BM_RCTL_MO_MASK); 5451 if (mac_reg & E1000_RCTL_MO_3) 5452 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) 5453 << BM_RCTL_MO_SHIFT); 5454 if (mac_reg & E1000_RCTL_BAM) 5455 phy_reg |= BM_RCTL_BAM; 5456 if (mac_reg & E1000_RCTL_PMCF) 5457 phy_reg |= BM_RCTL_PMCF; 5458 mac_reg = er32(CTRL); 5459 if (mac_reg & E1000_CTRL_RFCE) 5460 phy_reg |= BM_RCTL_RFCE; 5461 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); 5462 5463 /* enable PHY wakeup in MAC register */ 5464 ew32(WUFC, wufc); 5465 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); 5466 5467 /* configure and enable PHY wakeup in PHY registers */ 5468 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); 5469 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); 5470 5471 /* activate PHY wakeup */ 5472 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; 5473 retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable); 5474 if (retval) 5475 e_err("Could not set PHY Host Wakeup bit\n"); 5476 release: 5477 hw->phy.ops.release(hw); 5478 5479 return retval; 5480 } 5481 5482 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, 5483 bool runtime) 5484 { 5485 struct net_device *netdev = pci_get_drvdata(pdev); 5486 struct e1000_adapter *adapter = netdev_priv(netdev); 5487 struct e1000_hw *hw = &adapter->hw; 5488 u32 ctrl, ctrl_ext, rctl, status; 5489 /* Runtime suspend should only enable wakeup for link changes */ 5490 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; 5491 int retval = 0; 5492 5493 netif_device_detach(netdev); 5494 5495 if (netif_running(netdev)) { 5496 int count = E1000_CHECK_RESET_COUNT; 5497 5498 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) 5499 usleep_range(10000, 20000); 5500 5501 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 5502 e1000e_down(adapter); 5503 e1000_free_irq(adapter); 5504 } 5505 e1000e_reset_interrupt_capability(adapter); 5506 5507 retval = pci_save_state(pdev); 5508 if (retval) 5509 return retval; 5510 5511 status = er32(STATUS); 5512 if (status & E1000_STATUS_LU) 5513 wufc &= ~E1000_WUFC_LNKC; 5514 5515 if (wufc) { 5516 e1000_setup_rctl(adapter); 5517 e1000e_set_rx_mode(netdev); 5518 5519 /* turn on all-multi mode if wake on multicast is enabled */ 5520 if (wufc & E1000_WUFC_MC) { 5521 rctl = er32(RCTL); 5522 rctl |= E1000_RCTL_MPE; 5523 ew32(RCTL, rctl); 5524 } 5525 5526 ctrl = er32(CTRL); 5527 /* advertise wake from D3Cold */ 5528 #define E1000_CTRL_ADVD3WUC 0x00100000 5529 /* phy power management enable */ 5530 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 5531 ctrl |= E1000_CTRL_ADVD3WUC; 5532 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) 5533 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; 5534 ew32(CTRL, ctrl); 5535 5536 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 5537 adapter->hw.phy.media_type == 5538 e1000_media_type_internal_serdes) { 5539 /* keep the laser running in D3 */ 5540 ctrl_ext = er32(CTRL_EXT); 5541 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; 5542 ew32(CTRL_EXT, ctrl_ext); 5543 } 5544 5545 if (adapter->flags & FLAG_IS_ICH) 5546 e1000_suspend_workarounds_ich8lan(&adapter->hw); 5547 5548 /* Allow time for pending master requests to run */ 5549 e1000e_disable_pcie_master(&adapter->hw); 5550 5551 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { 5552 /* enable wakeup by the PHY */ 5553 retval = e1000_init_phy_wakeup(adapter, wufc); 5554 if (retval) 5555 return retval; 5556 } else { 5557 /* enable wakeup by the MAC */ 5558 ew32(WUFC, wufc); 5559 ew32(WUC, E1000_WUC_PME_EN); 5560 } 5561 } else { 5562 ew32(WUC, 0); 5563 ew32(WUFC, 0); 5564 } 5565 5566 *enable_wake = !!wufc; 5567 5568 /* make sure adapter isn't asleep if manageability is enabled */ 5569 if ((adapter->flags & FLAG_MNG_PT_ENABLED) || 5570 (hw->mac.ops.check_mng_mode(hw))) 5571 *enable_wake = true; 5572 5573 if (adapter->hw.phy.type == e1000_phy_igp_3) 5574 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 5575 5576 /* 5577 * Release control of h/w to f/w. If f/w is AMT enabled, this 5578 * would have already happened in close and is redundant. 5579 */ 5580 e1000e_release_hw_control(adapter); 5581 5582 pci_disable_device(pdev); 5583 5584 return 0; 5585 } 5586 5587 static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) 5588 { 5589 if (sleep && wake) { 5590 pci_prepare_to_sleep(pdev); 5591 return; 5592 } 5593 5594 pci_wake_from_d3(pdev, wake); 5595 pci_set_power_state(pdev, PCI_D3hot); 5596 } 5597 5598 static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, 5599 bool wake) 5600 { 5601 struct net_device *netdev = pci_get_drvdata(pdev); 5602 struct e1000_adapter *adapter = netdev_priv(netdev); 5603 5604 /* 5605 * The pci-e switch on some quad port adapters will report a 5606 * correctable error when the MAC transitions from D0 to D3. To 5607 * prevent this we need to mask off the correctable errors on the 5608 * downstream port of the pci-e switch. 5609 */ 5610 if (adapter->flags & FLAG_IS_QUAD_PORT) { 5611 struct pci_dev *us_dev = pdev->bus->self; 5612 int pos = pci_pcie_cap(us_dev); 5613 u16 devctl; 5614 5615 pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl); 5616 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, 5617 (devctl & ~PCI_EXP_DEVCTL_CERE)); 5618 5619 e1000_power_off(pdev, sleep, wake); 5620 5621 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); 5622 } else { 5623 e1000_power_off(pdev, sleep, wake); 5624 } 5625 } 5626 5627 #ifdef CONFIG_PCIEASPM 5628 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 5629 { 5630 pci_disable_link_state_locked(pdev, state); 5631 } 5632 #else 5633 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 5634 { 5635 int pos; 5636 u16 reg16; 5637 5638 /* 5639 * Both device and parent should have the same ASPM setting. 5640 * Disable ASPM in downstream component first and then upstream. 5641 */ 5642 pos = pci_pcie_cap(pdev); 5643 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); 5644 reg16 &= ~state; 5645 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 5646 5647 if (!pdev->bus->self) 5648 return; 5649 5650 pos = pci_pcie_cap(pdev->bus->self); 5651 pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16); 5652 reg16 &= ~state; 5653 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); 5654 } 5655 #endif 5656 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 5657 { 5658 dev_info(&pdev->dev, "Disabling ASPM %s %s\n", 5659 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", 5660 (state & PCIE_LINK_STATE_L1) ? "L1" : ""); 5661 5662 __e1000e_disable_aspm(pdev, state); 5663 } 5664 5665 #ifdef CONFIG_PM 5666 static bool e1000e_pm_ready(struct e1000_adapter *adapter) 5667 { 5668 return !!adapter->tx_ring->buffer_info; 5669 } 5670 5671 static int __e1000_resume(struct pci_dev *pdev) 5672 { 5673 struct net_device *netdev = pci_get_drvdata(pdev); 5674 struct e1000_adapter *adapter = netdev_priv(netdev); 5675 struct e1000_hw *hw = &adapter->hw; 5676 u16 aspm_disable_flag = 0; 5677 u32 err; 5678 5679 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) 5680 aspm_disable_flag = PCIE_LINK_STATE_L0S; 5681 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) 5682 aspm_disable_flag |= PCIE_LINK_STATE_L1; 5683 if (aspm_disable_flag) 5684 e1000e_disable_aspm(pdev, aspm_disable_flag); 5685 5686 pci_set_power_state(pdev, PCI_D0); 5687 pci_restore_state(pdev); 5688 pci_save_state(pdev); 5689 5690 e1000e_set_interrupt_capability(adapter); 5691 if (netif_running(netdev)) { 5692 err = e1000_request_irq(adapter); 5693 if (err) 5694 return err; 5695 } 5696 5697 if (hw->mac.type == e1000_pch2lan) 5698 e1000_resume_workarounds_pchlan(&adapter->hw); 5699 5700 e1000e_power_up_phy(adapter); 5701 5702 /* report the system wakeup cause from S3/S4 */ 5703 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { 5704 u16 phy_data; 5705 5706 e1e_rphy(&adapter->hw, BM_WUS, &phy_data); 5707 if (phy_data) { 5708 e_info("PHY Wakeup cause - %s\n", 5709 phy_data & E1000_WUS_EX ? "Unicast Packet" : 5710 phy_data & E1000_WUS_MC ? "Multicast Packet" : 5711 phy_data & E1000_WUS_BC ? "Broadcast Packet" : 5712 phy_data & E1000_WUS_MAG ? "Magic Packet" : 5713 phy_data & E1000_WUS_LNKC ? 5714 "Link Status Change" : "other"); 5715 } 5716 e1e_wphy(&adapter->hw, BM_WUS, ~0); 5717 } else { 5718 u32 wus = er32(WUS); 5719 if (wus) { 5720 e_info("MAC Wakeup cause - %s\n", 5721 wus & E1000_WUS_EX ? "Unicast Packet" : 5722 wus & E1000_WUS_MC ? "Multicast Packet" : 5723 wus & E1000_WUS_BC ? "Broadcast Packet" : 5724 wus & E1000_WUS_MAG ? "Magic Packet" : 5725 wus & E1000_WUS_LNKC ? "Link Status Change" : 5726 "other"); 5727 } 5728 ew32(WUS, ~0); 5729 } 5730 5731 e1000e_reset(adapter); 5732 5733 e1000_init_manageability_pt(adapter); 5734 5735 if (netif_running(netdev)) 5736 e1000e_up(adapter); 5737 5738 netif_device_attach(netdev); 5739 5740 /* 5741 * If the controller has AMT, do not set DRV_LOAD until the interface 5742 * is up. For all other cases, let the f/w know that the h/w is now 5743 * under the control of the driver. 5744 */ 5745 if (!(adapter->flags & FLAG_HAS_AMT)) 5746 e1000e_get_hw_control(adapter); 5747 5748 return 0; 5749 } 5750 5751 #ifdef CONFIG_PM_SLEEP 5752 static int e1000_suspend(struct device *dev) 5753 { 5754 struct pci_dev *pdev = to_pci_dev(dev); 5755 int retval; 5756 bool wake; 5757 5758 retval = __e1000_shutdown(pdev, &wake, false); 5759 if (!retval) 5760 e1000_complete_shutdown(pdev, true, wake); 5761 5762 return retval; 5763 } 5764 5765 static int e1000_resume(struct device *dev) 5766 { 5767 struct pci_dev *pdev = to_pci_dev(dev); 5768 struct net_device *netdev = pci_get_drvdata(pdev); 5769 struct e1000_adapter *adapter = netdev_priv(netdev); 5770 5771 if (e1000e_pm_ready(adapter)) 5772 adapter->idle_check = true; 5773 5774 return __e1000_resume(pdev); 5775 } 5776 #endif /* CONFIG_PM_SLEEP */ 5777 5778 #ifdef CONFIG_PM_RUNTIME 5779 static int e1000_runtime_suspend(struct device *dev) 5780 { 5781 struct pci_dev *pdev = to_pci_dev(dev); 5782 struct net_device *netdev = pci_get_drvdata(pdev); 5783 struct e1000_adapter *adapter = netdev_priv(netdev); 5784 5785 if (e1000e_pm_ready(adapter)) { 5786 bool wake; 5787 5788 __e1000_shutdown(pdev, &wake, true); 5789 } 5790 5791 return 0; 5792 } 5793 5794 static int e1000_idle(struct device *dev) 5795 { 5796 struct pci_dev *pdev = to_pci_dev(dev); 5797 struct net_device *netdev = pci_get_drvdata(pdev); 5798 struct e1000_adapter *adapter = netdev_priv(netdev); 5799 5800 if (!e1000e_pm_ready(adapter)) 5801 return 0; 5802 5803 if (adapter->idle_check) { 5804 adapter->idle_check = false; 5805 if (!e1000e_has_link(adapter)) 5806 pm_schedule_suspend(dev, MSEC_PER_SEC); 5807 } 5808 5809 return -EBUSY; 5810 } 5811 5812 static int e1000_runtime_resume(struct device *dev) 5813 { 5814 struct pci_dev *pdev = to_pci_dev(dev); 5815 struct net_device *netdev = pci_get_drvdata(pdev); 5816 struct e1000_adapter *adapter = netdev_priv(netdev); 5817 5818 if (!e1000e_pm_ready(adapter)) 5819 return 0; 5820 5821 adapter->idle_check = !dev->power.runtime_auto; 5822 return __e1000_resume(pdev); 5823 } 5824 #endif /* CONFIG_PM_RUNTIME */ 5825 #endif /* CONFIG_PM */ 5826 5827 static void e1000_shutdown(struct pci_dev *pdev) 5828 { 5829 bool wake = false; 5830 5831 __e1000_shutdown(pdev, &wake, false); 5832 5833 if (system_state == SYSTEM_POWER_OFF) 5834 e1000_complete_shutdown(pdev, false, wake); 5835 } 5836 5837 #ifdef CONFIG_NET_POLL_CONTROLLER 5838 5839 static irqreturn_t e1000_intr_msix(int irq, void *data) 5840 { 5841 struct net_device *netdev = data; 5842 struct e1000_adapter *adapter = netdev_priv(netdev); 5843 5844 if (adapter->msix_entries) { 5845 int vector, msix_irq; 5846 5847 vector = 0; 5848 msix_irq = adapter->msix_entries[vector].vector; 5849 disable_irq(msix_irq); 5850 e1000_intr_msix_rx(msix_irq, netdev); 5851 enable_irq(msix_irq); 5852 5853 vector++; 5854 msix_irq = adapter->msix_entries[vector].vector; 5855 disable_irq(msix_irq); 5856 e1000_intr_msix_tx(msix_irq, netdev); 5857 enable_irq(msix_irq); 5858 5859 vector++; 5860 msix_irq = adapter->msix_entries[vector].vector; 5861 disable_irq(msix_irq); 5862 e1000_msix_other(msix_irq, netdev); 5863 enable_irq(msix_irq); 5864 } 5865 5866 return IRQ_HANDLED; 5867 } 5868 5869 /* 5870 * Polling 'interrupt' - used by things like netconsole to send skbs 5871 * without having to re-enable interrupts. It's not called while 5872 * the interrupt routine is executing. 5873 */ 5874 static void e1000_netpoll(struct net_device *netdev) 5875 { 5876 struct e1000_adapter *adapter = netdev_priv(netdev); 5877 5878 switch (adapter->int_mode) { 5879 case E1000E_INT_MODE_MSIX: 5880 e1000_intr_msix(adapter->pdev->irq, netdev); 5881 break; 5882 case E1000E_INT_MODE_MSI: 5883 disable_irq(adapter->pdev->irq); 5884 e1000_intr_msi(adapter->pdev->irq, netdev); 5885 enable_irq(adapter->pdev->irq); 5886 break; 5887 default: /* E1000E_INT_MODE_LEGACY */ 5888 disable_irq(adapter->pdev->irq); 5889 e1000_intr(adapter->pdev->irq, netdev); 5890 enable_irq(adapter->pdev->irq); 5891 break; 5892 } 5893 } 5894 #endif 5895 5896 /** 5897 * e1000_io_error_detected - called when PCI error is detected 5898 * @pdev: Pointer to PCI device 5899 * @state: The current pci connection state 5900 * 5901 * This function is called after a PCI bus error affecting 5902 * this device has been detected. 5903 */ 5904 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 5905 pci_channel_state_t state) 5906 { 5907 struct net_device *netdev = pci_get_drvdata(pdev); 5908 struct e1000_adapter *adapter = netdev_priv(netdev); 5909 5910 netif_device_detach(netdev); 5911 5912 if (state == pci_channel_io_perm_failure) 5913 return PCI_ERS_RESULT_DISCONNECT; 5914 5915 if (netif_running(netdev)) 5916 e1000e_down(adapter); 5917 pci_disable_device(pdev); 5918 5919 /* Request a slot slot reset. */ 5920 return PCI_ERS_RESULT_NEED_RESET; 5921 } 5922 5923 /** 5924 * e1000_io_slot_reset - called after the pci bus has been reset. 5925 * @pdev: Pointer to PCI device 5926 * 5927 * Restart the card from scratch, as if from a cold-boot. Implementation 5928 * resembles the first-half of the e1000_resume routine. 5929 */ 5930 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 5931 { 5932 struct net_device *netdev = pci_get_drvdata(pdev); 5933 struct e1000_adapter *adapter = netdev_priv(netdev); 5934 struct e1000_hw *hw = &adapter->hw; 5935 u16 aspm_disable_flag = 0; 5936 int err; 5937 pci_ers_result_t result; 5938 5939 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) 5940 aspm_disable_flag = PCIE_LINK_STATE_L0S; 5941 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) 5942 aspm_disable_flag |= PCIE_LINK_STATE_L1; 5943 if (aspm_disable_flag) 5944 e1000e_disable_aspm(pdev, aspm_disable_flag); 5945 5946 err = pci_enable_device_mem(pdev); 5947 if (err) { 5948 dev_err(&pdev->dev, 5949 "Cannot re-enable PCI device after reset.\n"); 5950 result = PCI_ERS_RESULT_DISCONNECT; 5951 } else { 5952 pci_set_master(pdev); 5953 pdev->state_saved = true; 5954 pci_restore_state(pdev); 5955 5956 pci_enable_wake(pdev, PCI_D3hot, 0); 5957 pci_enable_wake(pdev, PCI_D3cold, 0); 5958 5959 e1000e_reset(adapter); 5960 ew32(WUS, ~0); 5961 result = PCI_ERS_RESULT_RECOVERED; 5962 } 5963 5964 pci_cleanup_aer_uncorrect_error_status(pdev); 5965 5966 return result; 5967 } 5968 5969 /** 5970 * e1000_io_resume - called when traffic can start flowing again. 5971 * @pdev: Pointer to PCI device 5972 * 5973 * This callback is called when the error recovery driver tells us that 5974 * its OK to resume normal operation. Implementation resembles the 5975 * second-half of the e1000_resume routine. 5976 */ 5977 static void e1000_io_resume(struct pci_dev *pdev) 5978 { 5979 struct net_device *netdev = pci_get_drvdata(pdev); 5980 struct e1000_adapter *adapter = netdev_priv(netdev); 5981 5982 e1000_init_manageability_pt(adapter); 5983 5984 if (netif_running(netdev)) { 5985 if (e1000e_up(adapter)) { 5986 dev_err(&pdev->dev, 5987 "can't bring device back up after reset\n"); 5988 return; 5989 } 5990 } 5991 5992 netif_device_attach(netdev); 5993 5994 /* 5995 * If the controller has AMT, do not set DRV_LOAD until the interface 5996 * is up. For all other cases, let the f/w know that the h/w is now 5997 * under the control of the driver. 5998 */ 5999 if (!(adapter->flags & FLAG_HAS_AMT)) 6000 e1000e_get_hw_control(adapter); 6001 6002 } 6003 6004 static void e1000_print_device_info(struct e1000_adapter *adapter) 6005 { 6006 struct e1000_hw *hw = &adapter->hw; 6007 struct net_device *netdev = adapter->netdev; 6008 u32 ret_val; 6009 u8 pba_str[E1000_PBANUM_LENGTH]; 6010 6011 /* print bus type/speed/width info */ 6012 e_info("(PCI Express:2.5GT/s:%s) %pM\n", 6013 /* bus width */ 6014 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 6015 "Width x1"), 6016 /* MAC address */ 6017 netdev->dev_addr); 6018 e_info("Intel(R) PRO/%s Network Connection\n", 6019 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); 6020 ret_val = e1000_read_pba_string_generic(hw, pba_str, 6021 E1000_PBANUM_LENGTH); 6022 if (ret_val) 6023 strlcpy((char *)pba_str, "Unknown", sizeof(pba_str)); 6024 e_info("MAC: %d, PHY: %d, PBA No: %s\n", 6025 hw->mac.type, hw->phy.type, pba_str); 6026 } 6027 6028 static void e1000_eeprom_checks(struct e1000_adapter *adapter) 6029 { 6030 struct e1000_hw *hw = &adapter->hw; 6031 int ret_val; 6032 u16 buf = 0; 6033 6034 if (hw->mac.type != e1000_82573) 6035 return; 6036 6037 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); 6038 le16_to_cpus(&buf); 6039 if (!ret_val && (!(buf & (1 << 0)))) { 6040 /* Deep Smart Power Down (DSPD) */ 6041 dev_warn(&adapter->pdev->dev, 6042 "Warning: detected DSPD enabled in EEPROM\n"); 6043 } 6044 } 6045 6046 static int e1000_set_features(struct net_device *netdev, 6047 netdev_features_t features) 6048 { 6049 struct e1000_adapter *adapter = netdev_priv(netdev); 6050 netdev_features_t changed = features ^ netdev->features; 6051 6052 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) 6053 adapter->flags |= FLAG_TSO_FORCE; 6054 6055 if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | 6056 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS | 6057 NETIF_F_RXALL))) 6058 return 0; 6059 6060 /* 6061 * IP payload checksum (enabled with jumbos/packet-split when Rx 6062 * checksum is enabled) and generation of RSS hash is mutually 6063 * exclusive in the hardware. 6064 */ 6065 if (adapter->rx_ps_pages && 6066 (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) { 6067 e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames. Disable jumbos or enable only one of the receive offload features.\n"); 6068 return -EINVAL; 6069 } 6070 6071 if (changed & NETIF_F_RXFCS) { 6072 if (features & NETIF_F_RXFCS) { 6073 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; 6074 } else { 6075 /* We need to take it back to defaults, which might mean 6076 * stripping is still disabled at the adapter level. 6077 */ 6078 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING) 6079 adapter->flags2 |= FLAG2_CRC_STRIPPING; 6080 else 6081 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; 6082 } 6083 } 6084 6085 netdev->features = features; 6086 6087 if (netif_running(netdev)) 6088 e1000e_reinit_locked(adapter); 6089 else 6090 e1000e_reset(adapter); 6091 6092 return 0; 6093 } 6094 6095 static const struct net_device_ops e1000e_netdev_ops = { 6096 .ndo_open = e1000_open, 6097 .ndo_stop = e1000_close, 6098 .ndo_start_xmit = e1000_xmit_frame, 6099 .ndo_get_stats64 = e1000e_get_stats64, 6100 .ndo_set_rx_mode = e1000e_set_rx_mode, 6101 .ndo_set_mac_address = e1000_set_mac, 6102 .ndo_change_mtu = e1000_change_mtu, 6103 .ndo_do_ioctl = e1000_ioctl, 6104 .ndo_tx_timeout = e1000_tx_timeout, 6105 .ndo_validate_addr = eth_validate_addr, 6106 6107 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, 6108 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, 6109 #ifdef CONFIG_NET_POLL_CONTROLLER 6110 .ndo_poll_controller = e1000_netpoll, 6111 #endif 6112 .ndo_set_features = e1000_set_features, 6113 }; 6114 6115 /** 6116 * e1000_probe - Device Initialization Routine 6117 * @pdev: PCI device information struct 6118 * @ent: entry in e1000_pci_tbl 6119 * 6120 * Returns 0 on success, negative on failure 6121 * 6122 * e1000_probe initializes an adapter identified by a pci_dev structure. 6123 * The OS initialization, configuring of the adapter private structure, 6124 * and a hardware reset occur. 6125 **/ 6126 static int __devinit e1000_probe(struct pci_dev *pdev, 6127 const struct pci_device_id *ent) 6128 { 6129 struct net_device *netdev; 6130 struct e1000_adapter *adapter; 6131 struct e1000_hw *hw; 6132 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; 6133 resource_size_t mmio_start, mmio_len; 6134 resource_size_t flash_start, flash_len; 6135 static int cards_found; 6136 u16 aspm_disable_flag = 0; 6137 int i, err, pci_using_dac; 6138 u16 eeprom_data = 0; 6139 u16 eeprom_apme_mask = E1000_EEPROM_APME; 6140 6141 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) 6142 aspm_disable_flag = PCIE_LINK_STATE_L0S; 6143 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) 6144 aspm_disable_flag |= PCIE_LINK_STATE_L1; 6145 if (aspm_disable_flag) 6146 e1000e_disable_aspm(pdev, aspm_disable_flag); 6147 6148 err = pci_enable_device_mem(pdev); 6149 if (err) 6150 return err; 6151 6152 pci_using_dac = 0; 6153 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 6154 if (!err) { 6155 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 6156 if (!err) 6157 pci_using_dac = 1; 6158 } else { 6159 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 6160 if (err) { 6161 err = dma_set_coherent_mask(&pdev->dev, 6162 DMA_BIT_MASK(32)); 6163 if (err) { 6164 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); 6165 goto err_dma; 6166 } 6167 } 6168 } 6169 6170 err = pci_request_selected_regions_exclusive(pdev, 6171 pci_select_bars(pdev, IORESOURCE_MEM), 6172 e1000e_driver_name); 6173 if (err) 6174 goto err_pci_reg; 6175 6176 /* AER (Advanced Error Reporting) hooks */ 6177 pci_enable_pcie_error_reporting(pdev); 6178 6179 pci_set_master(pdev); 6180 /* PCI config space info */ 6181 err = pci_save_state(pdev); 6182 if (err) 6183 goto err_alloc_etherdev; 6184 6185 err = -ENOMEM; 6186 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 6187 if (!netdev) 6188 goto err_alloc_etherdev; 6189 6190 SET_NETDEV_DEV(netdev, &pdev->dev); 6191 6192 netdev->irq = pdev->irq; 6193 6194 pci_set_drvdata(pdev, netdev); 6195 adapter = netdev_priv(netdev); 6196 hw = &adapter->hw; 6197 adapter->netdev = netdev; 6198 adapter->pdev = pdev; 6199 adapter->ei = ei; 6200 adapter->pba = ei->pba; 6201 adapter->flags = ei->flags; 6202 adapter->flags2 = ei->flags2; 6203 adapter->hw.adapter = adapter; 6204 adapter->hw.mac.type = ei->mac; 6205 adapter->max_hw_frame_size = ei->max_hw_frame_size; 6206 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 6207 6208 mmio_start = pci_resource_start(pdev, 0); 6209 mmio_len = pci_resource_len(pdev, 0); 6210 6211 err = -EIO; 6212 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 6213 if (!adapter->hw.hw_addr) 6214 goto err_ioremap; 6215 6216 if ((adapter->flags & FLAG_HAS_FLASH) && 6217 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 6218 flash_start = pci_resource_start(pdev, 1); 6219 flash_len = pci_resource_len(pdev, 1); 6220 adapter->hw.flash_address = ioremap(flash_start, flash_len); 6221 if (!adapter->hw.flash_address) 6222 goto err_flashmap; 6223 } 6224 6225 /* construct the net_device struct */ 6226 netdev->netdev_ops = &e1000e_netdev_ops; 6227 e1000e_set_ethtool_ops(netdev); 6228 netdev->watchdog_timeo = 5 * HZ; 6229 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); 6230 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); 6231 6232 netdev->mem_start = mmio_start; 6233 netdev->mem_end = mmio_start + mmio_len; 6234 6235 adapter->bd_number = cards_found++; 6236 6237 e1000e_check_options(adapter); 6238 6239 /* setup adapter struct */ 6240 err = e1000_sw_init(adapter); 6241 if (err) 6242 goto err_sw_init; 6243 6244 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 6245 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); 6246 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 6247 6248 err = ei->get_variants(adapter); 6249 if (err) 6250 goto err_hw_init; 6251 6252 if ((adapter->flags & FLAG_IS_ICH) && 6253 (adapter->flags & FLAG_READ_ONLY_NVM)) 6254 e1000e_write_protect_nvm_ich8lan(&adapter->hw); 6255 6256 hw->mac.ops.get_bus_info(&adapter->hw); 6257 6258 adapter->hw.phy.autoneg_wait_to_complete = 0; 6259 6260 /* Copper options */ 6261 if (adapter->hw.phy.media_type == e1000_media_type_copper) { 6262 adapter->hw.phy.mdix = AUTO_ALL_MODES; 6263 adapter->hw.phy.disable_polarity_correction = 0; 6264 adapter->hw.phy.ms_type = e1000_ms_hw_default; 6265 } 6266 6267 if (hw->phy.ops.check_reset_block(hw)) 6268 e_info("PHY reset is blocked due to SOL/IDER session.\n"); 6269 6270 /* Set initial default active device features */ 6271 netdev->features = (NETIF_F_SG | 6272 NETIF_F_HW_VLAN_RX | 6273 NETIF_F_HW_VLAN_TX | 6274 NETIF_F_TSO | 6275 NETIF_F_TSO6 | 6276 NETIF_F_RXHASH | 6277 NETIF_F_RXCSUM | 6278 NETIF_F_HW_CSUM); 6279 6280 /* Set user-changeable features (subset of all device features) */ 6281 netdev->hw_features = netdev->features; 6282 netdev->hw_features |= NETIF_F_RXFCS; 6283 netdev->priv_flags |= IFF_SUPP_NOFCS; 6284 netdev->hw_features |= NETIF_F_RXALL; 6285 6286 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) 6287 netdev->features |= NETIF_F_HW_VLAN_FILTER; 6288 6289 netdev->vlan_features |= (NETIF_F_SG | 6290 NETIF_F_TSO | 6291 NETIF_F_TSO6 | 6292 NETIF_F_HW_CSUM); 6293 6294 netdev->priv_flags |= IFF_UNICAST_FLT; 6295 6296 if (pci_using_dac) { 6297 netdev->features |= NETIF_F_HIGHDMA; 6298 netdev->vlan_features |= NETIF_F_HIGHDMA; 6299 } 6300 6301 if (e1000e_enable_mng_pass_thru(&adapter->hw)) 6302 adapter->flags |= FLAG_MNG_PT_ENABLED; 6303 6304 /* 6305 * before reading the NVM, reset the controller to 6306 * put the device in a known good starting state 6307 */ 6308 adapter->hw.mac.ops.reset_hw(&adapter->hw); 6309 6310 /* 6311 * systems with ASPM and others may see the checksum fail on the first 6312 * attempt. Let's give it a few tries 6313 */ 6314 for (i = 0;; i++) { 6315 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) 6316 break; 6317 if (i == 2) { 6318 e_err("The NVM Checksum Is Not Valid\n"); 6319 err = -EIO; 6320 goto err_eeprom; 6321 } 6322 } 6323 6324 e1000_eeprom_checks(adapter); 6325 6326 /* copy the MAC address */ 6327 if (e1000e_read_mac_addr(&adapter->hw)) 6328 e_err("NVM Read Error while reading MAC address\n"); 6329 6330 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 6331 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 6332 6333 if (!is_valid_ether_addr(netdev->perm_addr)) { 6334 e_err("Invalid MAC Address: %pM\n", netdev->perm_addr); 6335 err = -EIO; 6336 goto err_eeprom; 6337 } 6338 6339 init_timer(&adapter->watchdog_timer); 6340 adapter->watchdog_timer.function = e1000_watchdog; 6341 adapter->watchdog_timer.data = (unsigned long) adapter; 6342 6343 init_timer(&adapter->phy_info_timer); 6344 adapter->phy_info_timer.function = e1000_update_phy_info; 6345 adapter->phy_info_timer.data = (unsigned long) adapter; 6346 6347 INIT_WORK(&adapter->reset_task, e1000_reset_task); 6348 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); 6349 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); 6350 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); 6351 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); 6352 6353 /* Initialize link parameters. User can change them with ethtool */ 6354 adapter->hw.mac.autoneg = 1; 6355 adapter->fc_autoneg = true; 6356 adapter->hw.fc.requested_mode = e1000_fc_default; 6357 adapter->hw.fc.current_mode = e1000_fc_default; 6358 adapter->hw.phy.autoneg_advertised = 0x2f; 6359 6360 /* ring size defaults */ 6361 adapter->rx_ring->count = 256; 6362 adapter->tx_ring->count = 256; 6363 6364 /* 6365 * Initial Wake on LAN setting - If APM wake is enabled in 6366 * the EEPROM, enable the ACPI Magic Packet filter 6367 */ 6368 if (adapter->flags & FLAG_APME_IN_WUC) { 6369 /* APME bit in EEPROM is mapped to WUC.APME */ 6370 eeprom_data = er32(WUC); 6371 eeprom_apme_mask = E1000_WUC_APME; 6372 if ((hw->mac.type > e1000_ich10lan) && 6373 (eeprom_data & E1000_WUC_PHY_WAKE)) 6374 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; 6375 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { 6376 if (adapter->flags & FLAG_APME_CHECK_PORT_B && 6377 (adapter->hw.bus.func == 1)) 6378 e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B, 6379 1, &eeprom_data); 6380 else 6381 e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, 6382 1, &eeprom_data); 6383 } 6384 6385 /* fetch WoL from EEPROM */ 6386 if (eeprom_data & eeprom_apme_mask) 6387 adapter->eeprom_wol |= E1000_WUFC_MAG; 6388 6389 /* 6390 * now that we have the eeprom settings, apply the special cases 6391 * where the eeprom may be wrong or the board simply won't support 6392 * wake on lan on a particular port 6393 */ 6394 if (!(adapter->flags & FLAG_HAS_WOL)) 6395 adapter->eeprom_wol = 0; 6396 6397 /* initialize the wol settings based on the eeprom settings */ 6398 adapter->wol = adapter->eeprom_wol; 6399 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 6400 6401 /* save off EEPROM version number */ 6402 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); 6403 6404 /* reset the hardware with the new settings */ 6405 e1000e_reset(adapter); 6406 6407 /* 6408 * If the controller has AMT, do not set DRV_LOAD until the interface 6409 * is up. For all other cases, let the f/w know that the h/w is now 6410 * under the control of the driver. 6411 */ 6412 if (!(adapter->flags & FLAG_HAS_AMT)) 6413 e1000e_get_hw_control(adapter); 6414 6415 strlcpy(netdev->name, "eth%d", sizeof(netdev->name)); 6416 err = register_netdev(netdev); 6417 if (err) 6418 goto err_register; 6419 6420 /* carrier off reporting is important to ethtool even BEFORE open */ 6421 netif_carrier_off(netdev); 6422 6423 e1000_print_device_info(adapter); 6424 6425 if (pci_dev_run_wake(pdev)) 6426 pm_runtime_put_noidle(&pdev->dev); 6427 6428 return 0; 6429 6430 err_register: 6431 if (!(adapter->flags & FLAG_HAS_AMT)) 6432 e1000e_release_hw_control(adapter); 6433 err_eeprom: 6434 if (!hw->phy.ops.check_reset_block(hw)) 6435 e1000_phy_hw_reset(&adapter->hw); 6436 err_hw_init: 6437 kfree(adapter->tx_ring); 6438 kfree(adapter->rx_ring); 6439 err_sw_init: 6440 if (adapter->hw.flash_address) 6441 iounmap(adapter->hw.flash_address); 6442 e1000e_reset_interrupt_capability(adapter); 6443 err_flashmap: 6444 iounmap(adapter->hw.hw_addr); 6445 err_ioremap: 6446 free_netdev(netdev); 6447 err_alloc_etherdev: 6448 pci_release_selected_regions(pdev, 6449 pci_select_bars(pdev, IORESOURCE_MEM)); 6450 err_pci_reg: 6451 err_dma: 6452 pci_disable_device(pdev); 6453 return err; 6454 } 6455 6456 /** 6457 * e1000_remove - Device Removal Routine 6458 * @pdev: PCI device information struct 6459 * 6460 * e1000_remove is called by the PCI subsystem to alert the driver 6461 * that it should release a PCI device. The could be caused by a 6462 * Hot-Plug event, or because the driver is going to be removed from 6463 * memory. 6464 **/ 6465 static void __devexit e1000_remove(struct pci_dev *pdev) 6466 { 6467 struct net_device *netdev = pci_get_drvdata(pdev); 6468 struct e1000_adapter *adapter = netdev_priv(netdev); 6469 bool down = test_bit(__E1000_DOWN, &adapter->state); 6470 6471 /* 6472 * The timers may be rescheduled, so explicitly disable them 6473 * from being rescheduled. 6474 */ 6475 if (!down) 6476 set_bit(__E1000_DOWN, &adapter->state); 6477 del_timer_sync(&adapter->watchdog_timer); 6478 del_timer_sync(&adapter->phy_info_timer); 6479 6480 cancel_work_sync(&adapter->reset_task); 6481 cancel_work_sync(&adapter->watchdog_task); 6482 cancel_work_sync(&adapter->downshift_task); 6483 cancel_work_sync(&adapter->update_phy_task); 6484 cancel_work_sync(&adapter->print_hang_task); 6485 6486 if (!(netdev->flags & IFF_UP)) 6487 e1000_power_down_phy(adapter); 6488 6489 /* Don't lie to e1000_close() down the road. */ 6490 if (!down) 6491 clear_bit(__E1000_DOWN, &adapter->state); 6492 unregister_netdev(netdev); 6493 6494 if (pci_dev_run_wake(pdev)) 6495 pm_runtime_get_noresume(&pdev->dev); 6496 6497 /* 6498 * Release control of h/w to f/w. If f/w is AMT enabled, this 6499 * would have already happened in close and is redundant. 6500 */ 6501 e1000e_release_hw_control(adapter); 6502 6503 e1000e_reset_interrupt_capability(adapter); 6504 kfree(adapter->tx_ring); 6505 kfree(adapter->rx_ring); 6506 6507 iounmap(adapter->hw.hw_addr); 6508 if (adapter->hw.flash_address) 6509 iounmap(adapter->hw.flash_address); 6510 pci_release_selected_regions(pdev, 6511 pci_select_bars(pdev, IORESOURCE_MEM)); 6512 6513 free_netdev(netdev); 6514 6515 /* AER disable */ 6516 pci_disable_pcie_error_reporting(pdev); 6517 6518 pci_disable_device(pdev); 6519 } 6520 6521 /* PCI Error Recovery (ERS) */ 6522 static struct pci_error_handlers e1000_err_handler = { 6523 .error_detected = e1000_io_error_detected, 6524 .slot_reset = e1000_io_slot_reset, 6525 .resume = e1000_io_resume, 6526 }; 6527 6528 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { 6529 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, 6530 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, 6531 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, 6532 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, 6533 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, 6534 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, 6535 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, 6536 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, 6537 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, 6538 6539 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, 6540 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, 6541 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, 6542 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, 6543 6544 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, 6545 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, 6546 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, 6547 6548 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 }, 6549 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 }, 6550 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 }, 6551 6552 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), 6553 board_80003es2lan }, 6554 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), 6555 board_80003es2lan }, 6556 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), 6557 board_80003es2lan }, 6558 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), 6559 board_80003es2lan }, 6560 6561 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, 6562 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, 6563 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, 6564 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, 6565 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, 6566 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, 6567 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, 6568 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, 6569 6570 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, 6571 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, 6572 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, 6573 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, 6574 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, 6575 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan }, 6576 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, 6577 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, 6578 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, 6579 6580 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, 6581 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, 6582 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, 6583 6584 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, 6585 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, 6586 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan }, 6587 6588 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, 6589 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, 6590 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, 6591 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, 6592 6593 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, 6594 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, 6595 6596 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ 6597 }; 6598 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 6599 6600 #ifdef CONFIG_PM 6601 static const struct dev_pm_ops e1000_pm_ops = { 6602 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) 6603 SET_RUNTIME_PM_OPS(e1000_runtime_suspend, 6604 e1000_runtime_resume, e1000_idle) 6605 }; 6606 #endif 6607 6608 /* PCI Device API Driver */ 6609 static struct pci_driver e1000_driver = { 6610 .name = e1000e_driver_name, 6611 .id_table = e1000_pci_tbl, 6612 .probe = e1000_probe, 6613 .remove = __devexit_p(e1000_remove), 6614 #ifdef CONFIG_PM 6615 .driver = { 6616 .pm = &e1000_pm_ops, 6617 }, 6618 #endif 6619 .shutdown = e1000_shutdown, 6620 .err_handler = &e1000_err_handler 6621 }; 6622 6623 /** 6624 * e1000_init_module - Driver Registration Routine 6625 * 6626 * e1000_init_module is the first routine called when the driver is 6627 * loaded. All it does is register with the PCI subsystem. 6628 **/ 6629 static int __init e1000_init_module(void) 6630 { 6631 int ret; 6632 pr_info("Intel(R) PRO/1000 Network Driver - %s\n", 6633 e1000e_driver_version); 6634 pr_info("Copyright(c) 1999 - 2012 Intel Corporation.\n"); 6635 ret = pci_register_driver(&e1000_driver); 6636 6637 return ret; 6638 } 6639 module_init(e1000_init_module); 6640 6641 /** 6642 * e1000_exit_module - Driver Exit Cleanup Routine 6643 * 6644 * e1000_exit_module is called just before the driver is removed 6645 * from memory. 6646 **/ 6647 static void __exit e1000_exit_module(void) 6648 { 6649 pci_unregister_driver(&e1000_driver); 6650 } 6651 module_exit(e1000_exit_module); 6652 6653 6654 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 6655 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 6656 MODULE_LICENSE("GPL"); 6657 MODULE_VERSION(DRV_VERSION); 6658 6659 /* netdev.c */ 6660