1 // SPDX-License-Identifier: GPL-2.0 2 /* Intel PRO/1000 Linux driver 3 * Copyright(c) 1999 - 2015 Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * The full GNU General Public License is included in this distribution in 15 * the file called "COPYING". 16 * 17 * Contact Information: 18 * Linux NICS <linux.nics@intel.com> 19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 21 */ 22 23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24 25 #include <linux/module.h> 26 #include <linux/types.h> 27 #include <linux/init.h> 28 #include <linux/pci.h> 29 #include <linux/vmalloc.h> 30 #include <linux/pagemap.h> 31 #include <linux/delay.h> 32 #include <linux/netdevice.h> 33 #include <linux/interrupt.h> 34 #include <linux/tcp.h> 35 #include <linux/ipv6.h> 36 #include <linux/slab.h> 37 #include <net/checksum.h> 38 #include <net/ip6_checksum.h> 39 #include <linux/ethtool.h> 40 #include <linux/if_vlan.h> 41 #include <linux/cpu.h> 42 #include <linux/smp.h> 43 #include <linux/pm_qos.h> 44 #include <linux/pm_runtime.h> 45 #include <linux/aer.h> 46 #include <linux/prefetch.h> 47 48 #include "e1000.h" 49 50 #define DRV_EXTRAVERSION "-k" 51 52 #define DRV_VERSION "3.2.6" DRV_EXTRAVERSION 53 char e1000e_driver_name[] = "e1000e"; 54 const char e1000e_driver_version[] = DRV_VERSION; 55 56 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 57 static int debug = -1; 58 module_param(debug, int, 0); 59 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 60 61 static const struct e1000_info *e1000_info_tbl[] = { 62 [board_82571] = &e1000_82571_info, 63 [board_82572] = &e1000_82572_info, 64 [board_82573] = &e1000_82573_info, 65 [board_82574] = &e1000_82574_info, 66 [board_82583] = &e1000_82583_info, 67 [board_80003es2lan] = &e1000_es2_info, 68 [board_ich8lan] = &e1000_ich8_info, 69 [board_ich9lan] = &e1000_ich9_info, 70 [board_ich10lan] = &e1000_ich10_info, 71 [board_pchlan] = &e1000_pch_info, 72 [board_pch2lan] = &e1000_pch2_info, 73 [board_pch_lpt] = &e1000_pch_lpt_info, 74 [board_pch_spt] = &e1000_pch_spt_info, 75 [board_pch_cnp] = &e1000_pch_cnp_info, 76 }; 77 78 struct e1000_reg_info { 79 u32 ofs; 80 char *name; 81 }; 82 83 static const struct e1000_reg_info e1000_reg_info_tbl[] = { 84 /* General Registers */ 85 {E1000_CTRL, "CTRL"}, 86 {E1000_STATUS, "STATUS"}, 87 {E1000_CTRL_EXT, "CTRL_EXT"}, 88 89 /* Interrupt Registers */ 90 {E1000_ICR, "ICR"}, 91 92 /* Rx Registers */ 93 {E1000_RCTL, "RCTL"}, 94 {E1000_RDLEN(0), "RDLEN"}, 95 {E1000_RDH(0), "RDH"}, 96 {E1000_RDT(0), "RDT"}, 97 {E1000_RDTR, "RDTR"}, 98 {E1000_RXDCTL(0), "RXDCTL"}, 99 {E1000_ERT, "ERT"}, 100 {E1000_RDBAL(0), "RDBAL"}, 101 {E1000_RDBAH(0), "RDBAH"}, 102 {E1000_RDFH, "RDFH"}, 103 {E1000_RDFT, "RDFT"}, 104 {E1000_RDFHS, "RDFHS"}, 105 {E1000_RDFTS, "RDFTS"}, 106 {E1000_RDFPC, "RDFPC"}, 107 108 /* Tx Registers */ 109 {E1000_TCTL, "TCTL"}, 110 {E1000_TDBAL(0), "TDBAL"}, 111 {E1000_TDBAH(0), "TDBAH"}, 112 {E1000_TDLEN(0), "TDLEN"}, 113 {E1000_TDH(0), "TDH"}, 114 {E1000_TDT(0), "TDT"}, 115 {E1000_TIDV, "TIDV"}, 116 {E1000_TXDCTL(0), "TXDCTL"}, 117 {E1000_TADV, "TADV"}, 118 {E1000_TARC(0), "TARC"}, 119 {E1000_TDFH, "TDFH"}, 120 {E1000_TDFT, "TDFT"}, 121 {E1000_TDFHS, "TDFHS"}, 122 {E1000_TDFTS, "TDFTS"}, 123 {E1000_TDFPC, "TDFPC"}, 124 125 /* List Terminator */ 126 {0, NULL} 127 }; 128 129 /** 130 * __ew32_prepare - prepare to write to MAC CSR register on certain parts 131 * @hw: pointer to the HW structure 132 * 133 * When updating the MAC CSR registers, the Manageability Engine (ME) could 134 * be accessing the registers at the same time. Normally, this is handled in 135 * h/w by an arbiter but on some parts there is a bug that acknowledges Host 136 * accesses later than it should which could result in the register to have 137 * an incorrect value. Workaround this by checking the FWSM register which 138 * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set 139 * and try again a number of times. 140 **/ 141 s32 __ew32_prepare(struct e1000_hw *hw) 142 { 143 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; 144 145 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) 146 udelay(50); 147 148 return i; 149 } 150 151 void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) 152 { 153 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 154 __ew32_prepare(hw); 155 156 writel(val, hw->hw_addr + reg); 157 } 158 159 /** 160 * e1000_regdump - register printout routine 161 * @hw: pointer to the HW structure 162 * @reginfo: pointer to the register info table 163 **/ 164 static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) 165 { 166 int n = 0; 167 char rname[16]; 168 u32 regs[8]; 169 170 switch (reginfo->ofs) { 171 case E1000_RXDCTL(0): 172 for (n = 0; n < 2; n++) 173 regs[n] = __er32(hw, E1000_RXDCTL(n)); 174 break; 175 case E1000_TXDCTL(0): 176 for (n = 0; n < 2; n++) 177 regs[n] = __er32(hw, E1000_TXDCTL(n)); 178 break; 179 case E1000_TARC(0): 180 for (n = 0; n < 2; n++) 181 regs[n] = __er32(hw, E1000_TARC(n)); 182 break; 183 default: 184 pr_info("%-15s %08x\n", 185 reginfo->name, __er32(hw, reginfo->ofs)); 186 return; 187 } 188 189 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); 190 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); 191 } 192 193 static void e1000e_dump_ps_pages(struct e1000_adapter *adapter, 194 struct e1000_buffer *bi) 195 { 196 int i; 197 struct e1000_ps_page *ps_page; 198 199 for (i = 0; i < adapter->rx_ps_pages; i++) { 200 ps_page = &bi->ps_pages[i]; 201 202 if (ps_page->page) { 203 pr_info("packet dump for ps_page %d:\n", i); 204 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 205 16, 1, page_address(ps_page->page), 206 PAGE_SIZE, true); 207 } 208 } 209 } 210 211 /** 212 * e1000e_dump - Print registers, Tx-ring and Rx-ring 213 * @adapter: board private structure 214 **/ 215 static void e1000e_dump(struct e1000_adapter *adapter) 216 { 217 struct net_device *netdev = adapter->netdev; 218 struct e1000_hw *hw = &adapter->hw; 219 struct e1000_reg_info *reginfo; 220 struct e1000_ring *tx_ring = adapter->tx_ring; 221 struct e1000_tx_desc *tx_desc; 222 struct my_u0 { 223 __le64 a; 224 __le64 b; 225 } *u0; 226 struct e1000_buffer *buffer_info; 227 struct e1000_ring *rx_ring = adapter->rx_ring; 228 union e1000_rx_desc_packet_split *rx_desc_ps; 229 union e1000_rx_desc_extended *rx_desc; 230 struct my_u1 { 231 __le64 a; 232 __le64 b; 233 __le64 c; 234 __le64 d; 235 } *u1; 236 u32 staterr; 237 int i = 0; 238 239 if (!netif_msg_hw(adapter)) 240 return; 241 242 /* Print netdevice Info */ 243 if (netdev) { 244 dev_info(&adapter->pdev->dev, "Net device Info\n"); 245 pr_info("Device Name state trans_start\n"); 246 pr_info("%-15s %016lX %016lX\n", netdev->name, 247 netdev->state, dev_trans_start(netdev)); 248 } 249 250 /* Print Registers */ 251 dev_info(&adapter->pdev->dev, "Register Dump\n"); 252 pr_info(" Register Name Value\n"); 253 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; 254 reginfo->name; reginfo++) { 255 e1000_regdump(hw, reginfo); 256 } 257 258 /* Print Tx Ring Summary */ 259 if (!netdev || !netif_running(netdev)) 260 return; 261 262 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); 263 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); 264 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; 265 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", 266 0, tx_ring->next_to_use, tx_ring->next_to_clean, 267 (unsigned long long)buffer_info->dma, 268 buffer_info->length, 269 buffer_info->next_to_watch, 270 (unsigned long long)buffer_info->time_stamp); 271 272 /* Print Tx Ring */ 273 if (!netif_msg_tx_done(adapter)) 274 goto rx_ring_summary; 275 276 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); 277 278 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) 279 * 280 * Legacy Transmit Descriptor 281 * +--------------------------------------------------------------+ 282 * 0 | Buffer Address [63:0] (Reserved on Write Back) | 283 * +--------------------------------------------------------------+ 284 * 8 | Special | CSS | Status | CMD | CSO | Length | 285 * +--------------------------------------------------------------+ 286 * 63 48 47 36 35 32 31 24 23 16 15 0 287 * 288 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload 289 * 63 48 47 40 39 32 31 16 15 8 7 0 290 * +----------------------------------------------------------------+ 291 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | 292 * +----------------------------------------------------------------+ 293 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | 294 * +----------------------------------------------------------------+ 295 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 296 * 297 * Extended Data Descriptor (DTYP=0x1) 298 * +----------------------------------------------------------------+ 299 * 0 | Buffer Address [63:0] | 300 * +----------------------------------------------------------------+ 301 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | 302 * +----------------------------------------------------------------+ 303 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 304 */ 305 pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n"); 306 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n"); 307 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n"); 308 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 309 const char *next_desc; 310 tx_desc = E1000_TX_DESC(*tx_ring, i); 311 buffer_info = &tx_ring->buffer_info[i]; 312 u0 = (struct my_u0 *)tx_desc; 313 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) 314 next_desc = " NTC/U"; 315 else if (i == tx_ring->next_to_use) 316 next_desc = " NTU"; 317 else if (i == tx_ring->next_to_clean) 318 next_desc = " NTC"; 319 else 320 next_desc = ""; 321 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n", 322 (!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' : 323 ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')), 324 i, 325 (unsigned long long)le64_to_cpu(u0->a), 326 (unsigned long long)le64_to_cpu(u0->b), 327 (unsigned long long)buffer_info->dma, 328 buffer_info->length, buffer_info->next_to_watch, 329 (unsigned long long)buffer_info->time_stamp, 330 buffer_info->skb, next_desc); 331 332 if (netif_msg_pktdata(adapter) && buffer_info->skb) 333 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 334 16, 1, buffer_info->skb->data, 335 buffer_info->skb->len, true); 336 } 337 338 /* Print Rx Ring Summary */ 339 rx_ring_summary: 340 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); 341 pr_info("Queue [NTU] [NTC]\n"); 342 pr_info(" %5d %5X %5X\n", 343 0, rx_ring->next_to_use, rx_ring->next_to_clean); 344 345 /* Print Rx Ring */ 346 if (!netif_msg_rx_status(adapter)) 347 return; 348 349 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); 350 switch (adapter->rx_ps_pages) { 351 case 1: 352 case 2: 353 case 3: 354 /* [Extended] Packet Split Receive Descriptor Format 355 * 356 * +-----------------------------------------------------+ 357 * 0 | Buffer Address 0 [63:0] | 358 * +-----------------------------------------------------+ 359 * 8 | Buffer Address 1 [63:0] | 360 * +-----------------------------------------------------+ 361 * 16 | Buffer Address 2 [63:0] | 362 * +-----------------------------------------------------+ 363 * 24 | Buffer Address 3 [63:0] | 364 * +-----------------------------------------------------+ 365 */ 366 pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n"); 367 /* [Extended] Receive Descriptor (Write-Back) Format 368 * 369 * 63 48 47 32 31 13 12 8 7 4 3 0 370 * +------------------------------------------------------+ 371 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS | 372 * | Checksum | Ident | | Queue | | Type | 373 * +------------------------------------------------------+ 374 * 8 | VLAN Tag | Length | Extended Error | Extended Status | 375 * +------------------------------------------------------+ 376 * 63 48 47 32 31 20 19 0 377 */ 378 pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n"); 379 for (i = 0; i < rx_ring->count; i++) { 380 const char *next_desc; 381 buffer_info = &rx_ring->buffer_info[i]; 382 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); 383 u1 = (struct my_u1 *)rx_desc_ps; 384 staterr = 385 le32_to_cpu(rx_desc_ps->wb.middle.status_error); 386 387 if (i == rx_ring->next_to_use) 388 next_desc = " NTU"; 389 else if (i == rx_ring->next_to_clean) 390 next_desc = " NTC"; 391 else 392 next_desc = ""; 393 394 if (staterr & E1000_RXD_STAT_DD) { 395 /* Descriptor Done */ 396 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n", 397 "RWB", i, 398 (unsigned long long)le64_to_cpu(u1->a), 399 (unsigned long long)le64_to_cpu(u1->b), 400 (unsigned long long)le64_to_cpu(u1->c), 401 (unsigned long long)le64_to_cpu(u1->d), 402 buffer_info->skb, next_desc); 403 } else { 404 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n", 405 "R ", i, 406 (unsigned long long)le64_to_cpu(u1->a), 407 (unsigned long long)le64_to_cpu(u1->b), 408 (unsigned long long)le64_to_cpu(u1->c), 409 (unsigned long long)le64_to_cpu(u1->d), 410 (unsigned long long)buffer_info->dma, 411 buffer_info->skb, next_desc); 412 413 if (netif_msg_pktdata(adapter)) 414 e1000e_dump_ps_pages(adapter, 415 buffer_info); 416 } 417 } 418 break; 419 default: 420 case 0: 421 /* Extended Receive Descriptor (Read) Format 422 * 423 * +-----------------------------------------------------+ 424 * 0 | Buffer Address [63:0] | 425 * +-----------------------------------------------------+ 426 * 8 | Reserved | 427 * +-----------------------------------------------------+ 428 */ 429 pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n"); 430 /* Extended Receive Descriptor (Write-Back) Format 431 * 432 * 63 48 47 32 31 24 23 4 3 0 433 * +------------------------------------------------------+ 434 * | RSS Hash | | | | 435 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS | 436 * | Packet | IP | | | Type | 437 * | Checksum | Ident | | | | 438 * +------------------------------------------------------+ 439 * 8 | VLAN Tag | Length | Extended Error | Extended Status | 440 * +------------------------------------------------------+ 441 * 63 48 47 32 31 20 19 0 442 */ 443 pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n"); 444 445 for (i = 0; i < rx_ring->count; i++) { 446 const char *next_desc; 447 448 buffer_info = &rx_ring->buffer_info[i]; 449 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 450 u1 = (struct my_u1 *)rx_desc; 451 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 452 453 if (i == rx_ring->next_to_use) 454 next_desc = " NTU"; 455 else if (i == rx_ring->next_to_clean) 456 next_desc = " NTC"; 457 else 458 next_desc = ""; 459 460 if (staterr & E1000_RXD_STAT_DD) { 461 /* Descriptor Done */ 462 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n", 463 "RWB", i, 464 (unsigned long long)le64_to_cpu(u1->a), 465 (unsigned long long)le64_to_cpu(u1->b), 466 buffer_info->skb, next_desc); 467 } else { 468 pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n", 469 "R ", i, 470 (unsigned long long)le64_to_cpu(u1->a), 471 (unsigned long long)le64_to_cpu(u1->b), 472 (unsigned long long)buffer_info->dma, 473 buffer_info->skb, next_desc); 474 475 if (netif_msg_pktdata(adapter) && 476 buffer_info->skb) 477 print_hex_dump(KERN_INFO, "", 478 DUMP_PREFIX_ADDRESS, 16, 479 1, 480 buffer_info->skb->data, 481 adapter->rx_buffer_len, 482 true); 483 } 484 } 485 } 486 } 487 488 /** 489 * e1000_desc_unused - calculate if we have unused descriptors 490 **/ 491 static int e1000_desc_unused(struct e1000_ring *ring) 492 { 493 if (ring->next_to_clean > ring->next_to_use) 494 return ring->next_to_clean - ring->next_to_use - 1; 495 496 return ring->count + ring->next_to_clean - ring->next_to_use - 1; 497 } 498 499 /** 500 * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp 501 * @adapter: board private structure 502 * @hwtstamps: time stamp structure to update 503 * @systim: unsigned 64bit system time value. 504 * 505 * Convert the system time value stored in the RX/TXSTMP registers into a 506 * hwtstamp which can be used by the upper level time stamping functions. 507 * 508 * The 'systim_lock' spinlock is used to protect the consistency of the 509 * system time value. This is needed because reading the 64 bit time 510 * value involves reading two 32 bit registers. The first read latches the 511 * value. 512 **/ 513 static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter, 514 struct skb_shared_hwtstamps *hwtstamps, 515 u64 systim) 516 { 517 u64 ns; 518 unsigned long flags; 519 520 spin_lock_irqsave(&adapter->systim_lock, flags); 521 ns = timecounter_cyc2time(&adapter->tc, systim); 522 spin_unlock_irqrestore(&adapter->systim_lock, flags); 523 524 memset(hwtstamps, 0, sizeof(*hwtstamps)); 525 hwtstamps->hwtstamp = ns_to_ktime(ns); 526 } 527 528 /** 529 * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp 530 * @adapter: board private structure 531 * @status: descriptor extended error and status field 532 * @skb: particular skb to include time stamp 533 * 534 * If the time stamp is valid, convert it into the timecounter ns value 535 * and store that result into the shhwtstamps structure which is passed 536 * up the network stack. 537 **/ 538 static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status, 539 struct sk_buff *skb) 540 { 541 struct e1000_hw *hw = &adapter->hw; 542 u64 rxstmp; 543 544 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) || 545 !(status & E1000_RXDEXT_STATERR_TST) || 546 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) 547 return; 548 549 /* The Rx time stamp registers contain the time stamp. No other 550 * received packet will be time stamped until the Rx time stamp 551 * registers are read. Because only one packet can be time stamped 552 * at a time, the register values must belong to this packet and 553 * therefore none of the other additional attributes need to be 554 * compared. 555 */ 556 rxstmp = (u64)er32(RXSTMPL); 557 rxstmp |= (u64)er32(RXSTMPH) << 32; 558 e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp); 559 560 adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP; 561 } 562 563 /** 564 * e1000_receive_skb - helper function to handle Rx indications 565 * @adapter: board private structure 566 * @staterr: descriptor extended error and status field as written by hardware 567 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 568 * @skb: pointer to sk_buff to be indicated to stack 569 **/ 570 static void e1000_receive_skb(struct e1000_adapter *adapter, 571 struct net_device *netdev, struct sk_buff *skb, 572 u32 staterr, __le16 vlan) 573 { 574 u16 tag = le16_to_cpu(vlan); 575 576 e1000e_rx_hwtstamp(adapter, staterr, skb); 577 578 skb->protocol = eth_type_trans(skb, netdev); 579 580 if (staterr & E1000_RXD_STAT_VP) 581 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 582 583 napi_gro_receive(&adapter->napi, skb); 584 } 585 586 /** 587 * e1000_rx_checksum - Receive Checksum Offload 588 * @adapter: board private structure 589 * @status_err: receive descriptor status and error fields 590 * @csum: receive descriptor csum field 591 * @sk_buff: socket buffer with received data 592 **/ 593 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 594 struct sk_buff *skb) 595 { 596 u16 status = (u16)status_err; 597 u8 errors = (u8)(status_err >> 24); 598 599 skb_checksum_none_assert(skb); 600 601 /* Rx checksum disabled */ 602 if (!(adapter->netdev->features & NETIF_F_RXCSUM)) 603 return; 604 605 /* Ignore Checksum bit is set */ 606 if (status & E1000_RXD_STAT_IXSM) 607 return; 608 609 /* TCP/UDP checksum error bit or IP checksum error bit is set */ 610 if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) { 611 /* let the stack verify checksum errors */ 612 adapter->hw_csum_err++; 613 return; 614 } 615 616 /* TCP/UDP Checksum has not been calculated */ 617 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) 618 return; 619 620 /* It must be a TCP or UDP packet with a valid checksum */ 621 skb->ip_summed = CHECKSUM_UNNECESSARY; 622 adapter->hw_csum_good++; 623 } 624 625 static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) 626 { 627 struct e1000_adapter *adapter = rx_ring->adapter; 628 struct e1000_hw *hw = &adapter->hw; 629 s32 ret_val = __ew32_prepare(hw); 630 631 writel(i, rx_ring->tail); 632 633 if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { 634 u32 rctl = er32(RCTL); 635 636 ew32(RCTL, rctl & ~E1000_RCTL_EN); 637 e_err("ME firmware caused invalid RDT - resetting\n"); 638 schedule_work(&adapter->reset_task); 639 } 640 } 641 642 static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) 643 { 644 struct e1000_adapter *adapter = tx_ring->adapter; 645 struct e1000_hw *hw = &adapter->hw; 646 s32 ret_val = __ew32_prepare(hw); 647 648 writel(i, tx_ring->tail); 649 650 if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { 651 u32 tctl = er32(TCTL); 652 653 ew32(TCTL, tctl & ~E1000_TCTL_EN); 654 e_err("ME firmware caused invalid TDT - resetting\n"); 655 schedule_work(&adapter->reset_task); 656 } 657 } 658 659 /** 660 * e1000_alloc_rx_buffers - Replace used receive buffers 661 * @rx_ring: Rx descriptor ring 662 **/ 663 static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring, 664 int cleaned_count, gfp_t gfp) 665 { 666 struct e1000_adapter *adapter = rx_ring->adapter; 667 struct net_device *netdev = adapter->netdev; 668 struct pci_dev *pdev = adapter->pdev; 669 union e1000_rx_desc_extended *rx_desc; 670 struct e1000_buffer *buffer_info; 671 struct sk_buff *skb; 672 unsigned int i; 673 unsigned int bufsz = adapter->rx_buffer_len; 674 675 i = rx_ring->next_to_use; 676 buffer_info = &rx_ring->buffer_info[i]; 677 678 while (cleaned_count--) { 679 skb = buffer_info->skb; 680 if (skb) { 681 skb_trim(skb, 0); 682 goto map_skb; 683 } 684 685 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); 686 if (!skb) { 687 /* Better luck next round */ 688 adapter->alloc_rx_buff_failed++; 689 break; 690 } 691 692 buffer_info->skb = skb; 693 map_skb: 694 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 695 adapter->rx_buffer_len, 696 DMA_FROM_DEVICE); 697 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 698 dev_err(&pdev->dev, "Rx DMA map failed\n"); 699 adapter->rx_dma_failed++; 700 break; 701 } 702 703 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 704 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 705 706 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { 707 /* Force memory writes to complete before letting h/w 708 * know there are new descriptors to fetch. (Only 709 * applicable for weak-ordered memory model archs, 710 * such as IA-64). 711 */ 712 wmb(); 713 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 714 e1000e_update_rdt_wa(rx_ring, i); 715 else 716 writel(i, rx_ring->tail); 717 } 718 i++; 719 if (i == rx_ring->count) 720 i = 0; 721 buffer_info = &rx_ring->buffer_info[i]; 722 } 723 724 rx_ring->next_to_use = i; 725 } 726 727 /** 728 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split 729 * @rx_ring: Rx descriptor ring 730 **/ 731 static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, 732 int cleaned_count, gfp_t gfp) 733 { 734 struct e1000_adapter *adapter = rx_ring->adapter; 735 struct net_device *netdev = adapter->netdev; 736 struct pci_dev *pdev = adapter->pdev; 737 union e1000_rx_desc_packet_split *rx_desc; 738 struct e1000_buffer *buffer_info; 739 struct e1000_ps_page *ps_page; 740 struct sk_buff *skb; 741 unsigned int i, j; 742 743 i = rx_ring->next_to_use; 744 buffer_info = &rx_ring->buffer_info[i]; 745 746 while (cleaned_count--) { 747 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 748 749 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 750 ps_page = &buffer_info->ps_pages[j]; 751 if (j >= adapter->rx_ps_pages) { 752 /* all unused desc entries get hw null ptr */ 753 rx_desc->read.buffer_addr[j + 1] = 754 ~cpu_to_le64(0); 755 continue; 756 } 757 if (!ps_page->page) { 758 ps_page->page = alloc_page(gfp); 759 if (!ps_page->page) { 760 adapter->alloc_rx_buff_failed++; 761 goto no_buffers; 762 } 763 ps_page->dma = dma_map_page(&pdev->dev, 764 ps_page->page, 765 0, PAGE_SIZE, 766 DMA_FROM_DEVICE); 767 if (dma_mapping_error(&pdev->dev, 768 ps_page->dma)) { 769 dev_err(&adapter->pdev->dev, 770 "Rx DMA page map failed\n"); 771 adapter->rx_dma_failed++; 772 goto no_buffers; 773 } 774 } 775 /* Refresh the desc even if buffer_addrs 776 * didn't change because each write-back 777 * erases this info. 778 */ 779 rx_desc->read.buffer_addr[j + 1] = 780 cpu_to_le64(ps_page->dma); 781 } 782 783 skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0, 784 gfp); 785 786 if (!skb) { 787 adapter->alloc_rx_buff_failed++; 788 break; 789 } 790 791 buffer_info->skb = skb; 792 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 793 adapter->rx_ps_bsize0, 794 DMA_FROM_DEVICE); 795 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 796 dev_err(&pdev->dev, "Rx DMA map failed\n"); 797 adapter->rx_dma_failed++; 798 /* cleanup skb */ 799 dev_kfree_skb_any(skb); 800 buffer_info->skb = NULL; 801 break; 802 } 803 804 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 805 806 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { 807 /* Force memory writes to complete before letting h/w 808 * know there are new descriptors to fetch. (Only 809 * applicable for weak-ordered memory model archs, 810 * such as IA-64). 811 */ 812 wmb(); 813 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 814 e1000e_update_rdt_wa(rx_ring, i << 1); 815 else 816 writel(i << 1, rx_ring->tail); 817 } 818 819 i++; 820 if (i == rx_ring->count) 821 i = 0; 822 buffer_info = &rx_ring->buffer_info[i]; 823 } 824 825 no_buffers: 826 rx_ring->next_to_use = i; 827 } 828 829 /** 830 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 831 * @rx_ring: Rx descriptor ring 832 * @cleaned_count: number of buffers to allocate this pass 833 **/ 834 835 static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring, 836 int cleaned_count, gfp_t gfp) 837 { 838 struct e1000_adapter *adapter = rx_ring->adapter; 839 struct net_device *netdev = adapter->netdev; 840 struct pci_dev *pdev = adapter->pdev; 841 union e1000_rx_desc_extended *rx_desc; 842 struct e1000_buffer *buffer_info; 843 struct sk_buff *skb; 844 unsigned int i; 845 unsigned int bufsz = 256 - 16; /* for skb_reserve */ 846 847 i = rx_ring->next_to_use; 848 buffer_info = &rx_ring->buffer_info[i]; 849 850 while (cleaned_count--) { 851 skb = buffer_info->skb; 852 if (skb) { 853 skb_trim(skb, 0); 854 goto check_page; 855 } 856 857 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); 858 if (unlikely(!skb)) { 859 /* Better luck next round */ 860 adapter->alloc_rx_buff_failed++; 861 break; 862 } 863 864 buffer_info->skb = skb; 865 check_page: 866 /* allocate a new page if necessary */ 867 if (!buffer_info->page) { 868 buffer_info->page = alloc_page(gfp); 869 if (unlikely(!buffer_info->page)) { 870 adapter->alloc_rx_buff_failed++; 871 break; 872 } 873 } 874 875 if (!buffer_info->dma) { 876 buffer_info->dma = dma_map_page(&pdev->dev, 877 buffer_info->page, 0, 878 PAGE_SIZE, 879 DMA_FROM_DEVICE); 880 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 881 adapter->alloc_rx_buff_failed++; 882 break; 883 } 884 } 885 886 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 887 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 888 889 if (unlikely(++i == rx_ring->count)) 890 i = 0; 891 buffer_info = &rx_ring->buffer_info[i]; 892 } 893 894 if (likely(rx_ring->next_to_use != i)) { 895 rx_ring->next_to_use = i; 896 if (unlikely(i-- == 0)) 897 i = (rx_ring->count - 1); 898 899 /* Force memory writes to complete before letting h/w 900 * know there are new descriptors to fetch. (Only 901 * applicable for weak-ordered memory model archs, 902 * such as IA-64). 903 */ 904 wmb(); 905 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 906 e1000e_update_rdt_wa(rx_ring, i); 907 else 908 writel(i, rx_ring->tail); 909 } 910 } 911 912 static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss, 913 struct sk_buff *skb) 914 { 915 if (netdev->features & NETIF_F_RXHASH) 916 skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3); 917 } 918 919 /** 920 * e1000_clean_rx_irq - Send received data up the network stack 921 * @rx_ring: Rx descriptor ring 922 * 923 * the return value indicates whether actual cleaning was done, there 924 * is no guarantee that everything was cleaned 925 **/ 926 static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, 927 int work_to_do) 928 { 929 struct e1000_adapter *adapter = rx_ring->adapter; 930 struct net_device *netdev = adapter->netdev; 931 struct pci_dev *pdev = adapter->pdev; 932 struct e1000_hw *hw = &adapter->hw; 933 union e1000_rx_desc_extended *rx_desc, *next_rxd; 934 struct e1000_buffer *buffer_info, *next_buffer; 935 u32 length, staterr; 936 unsigned int i; 937 int cleaned_count = 0; 938 bool cleaned = false; 939 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 940 941 i = rx_ring->next_to_clean; 942 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 943 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 944 buffer_info = &rx_ring->buffer_info[i]; 945 946 while (staterr & E1000_RXD_STAT_DD) { 947 struct sk_buff *skb; 948 949 if (*work_done >= work_to_do) 950 break; 951 (*work_done)++; 952 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ 953 954 skb = buffer_info->skb; 955 buffer_info->skb = NULL; 956 957 prefetch(skb->data - NET_IP_ALIGN); 958 959 i++; 960 if (i == rx_ring->count) 961 i = 0; 962 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); 963 prefetch(next_rxd); 964 965 next_buffer = &rx_ring->buffer_info[i]; 966 967 cleaned = true; 968 cleaned_count++; 969 dma_unmap_single(&pdev->dev, buffer_info->dma, 970 adapter->rx_buffer_len, DMA_FROM_DEVICE); 971 buffer_info->dma = 0; 972 973 length = le16_to_cpu(rx_desc->wb.upper.length); 974 975 /* !EOP means multiple descriptors were used to store a single 976 * packet, if that's the case we need to toss it. In fact, we 977 * need to toss every packet with the EOP bit clear and the 978 * next frame that _does_ have the EOP bit set, as it is by 979 * definition only a frame fragment 980 */ 981 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) 982 adapter->flags2 |= FLAG2_IS_DISCARDING; 983 984 if (adapter->flags2 & FLAG2_IS_DISCARDING) { 985 /* All receives must fit into a single buffer */ 986 e_dbg("Receive packet consumed multiple buffers\n"); 987 /* recycle */ 988 buffer_info->skb = skb; 989 if (staterr & E1000_RXD_STAT_EOP) 990 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 991 goto next_desc; 992 } 993 994 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && 995 !(netdev->features & NETIF_F_RXALL))) { 996 /* recycle */ 997 buffer_info->skb = skb; 998 goto next_desc; 999 } 1000 1001 /* adjust length to remove Ethernet CRC */ 1002 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { 1003 /* If configured to store CRC, don't subtract FCS, 1004 * but keep the FCS bytes out of the total_rx_bytes 1005 * counter 1006 */ 1007 if (netdev->features & NETIF_F_RXFCS) 1008 total_rx_bytes -= 4; 1009 else 1010 length -= 4; 1011 } 1012 1013 total_rx_bytes += length; 1014 total_rx_packets++; 1015 1016 /* code added for copybreak, this should improve 1017 * performance for small packets with large amounts 1018 * of reassembly being done in the stack 1019 */ 1020 if (length < copybreak) { 1021 struct sk_buff *new_skb = 1022 napi_alloc_skb(&adapter->napi, length); 1023 if (new_skb) { 1024 skb_copy_to_linear_data_offset(new_skb, 1025 -NET_IP_ALIGN, 1026 (skb->data - 1027 NET_IP_ALIGN), 1028 (length + 1029 NET_IP_ALIGN)); 1030 /* save the skb in buffer_info as good */ 1031 buffer_info->skb = skb; 1032 skb = new_skb; 1033 } 1034 /* else just continue with the old one */ 1035 } 1036 /* end copybreak code */ 1037 skb_put(skb, length); 1038 1039 /* Receive Checksum Offload */ 1040 e1000_rx_checksum(adapter, staterr, skb); 1041 1042 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 1043 1044 e1000_receive_skb(adapter, netdev, skb, staterr, 1045 rx_desc->wb.upper.vlan); 1046 1047 next_desc: 1048 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); 1049 1050 /* return some buffers to hardware, one at a time is too slow */ 1051 if (cleaned_count >= E1000_RX_BUFFER_WRITE) { 1052 adapter->alloc_rx_buf(rx_ring, cleaned_count, 1053 GFP_ATOMIC); 1054 cleaned_count = 0; 1055 } 1056 1057 /* use prefetched values */ 1058 rx_desc = next_rxd; 1059 buffer_info = next_buffer; 1060 1061 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1062 } 1063 rx_ring->next_to_clean = i; 1064 1065 cleaned_count = e1000_desc_unused(rx_ring); 1066 if (cleaned_count) 1067 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); 1068 1069 adapter->total_rx_bytes += total_rx_bytes; 1070 adapter->total_rx_packets += total_rx_packets; 1071 return cleaned; 1072 } 1073 1074 static void e1000_put_txbuf(struct e1000_ring *tx_ring, 1075 struct e1000_buffer *buffer_info, 1076 bool drop) 1077 { 1078 struct e1000_adapter *adapter = tx_ring->adapter; 1079 1080 if (buffer_info->dma) { 1081 if (buffer_info->mapped_as_page) 1082 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, 1083 buffer_info->length, DMA_TO_DEVICE); 1084 else 1085 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1086 buffer_info->length, DMA_TO_DEVICE); 1087 buffer_info->dma = 0; 1088 } 1089 if (buffer_info->skb) { 1090 if (drop) 1091 dev_kfree_skb_any(buffer_info->skb); 1092 else 1093 dev_consume_skb_any(buffer_info->skb); 1094 buffer_info->skb = NULL; 1095 } 1096 buffer_info->time_stamp = 0; 1097 } 1098 1099 static void e1000_print_hw_hang(struct work_struct *work) 1100 { 1101 struct e1000_adapter *adapter = container_of(work, 1102 struct e1000_adapter, 1103 print_hang_task); 1104 struct net_device *netdev = adapter->netdev; 1105 struct e1000_ring *tx_ring = adapter->tx_ring; 1106 unsigned int i = tx_ring->next_to_clean; 1107 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; 1108 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); 1109 struct e1000_hw *hw = &adapter->hw; 1110 u16 phy_status, phy_1000t_status, phy_ext_status; 1111 u16 pci_status; 1112 1113 if (test_bit(__E1000_DOWN, &adapter->state)) 1114 return; 1115 1116 if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { 1117 /* May be block on write-back, flush and detect again 1118 * flush pending descriptor writebacks to memory 1119 */ 1120 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 1121 /* execute the writes immediately */ 1122 e1e_flush(); 1123 /* Due to rare timing issues, write to TIDV again to ensure 1124 * the write is successful 1125 */ 1126 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 1127 /* execute the writes immediately */ 1128 e1e_flush(); 1129 adapter->tx_hang_recheck = true; 1130 return; 1131 } 1132 adapter->tx_hang_recheck = false; 1133 1134 if (er32(TDH(0)) == er32(TDT(0))) { 1135 e_dbg("false hang detected, ignoring\n"); 1136 return; 1137 } 1138 1139 /* Real hang detected */ 1140 netif_stop_queue(netdev); 1141 1142 e1e_rphy(hw, MII_BMSR, &phy_status); 1143 e1e_rphy(hw, MII_STAT1000, &phy_1000t_status); 1144 e1e_rphy(hw, MII_ESTATUS, &phy_ext_status); 1145 1146 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); 1147 1148 /* detected Hardware unit hang */ 1149 e_err("Detected Hardware Unit Hang:\n" 1150 " TDH <%x>\n" 1151 " TDT <%x>\n" 1152 " next_to_use <%x>\n" 1153 " next_to_clean <%x>\n" 1154 "buffer_info[next_to_clean]:\n" 1155 " time_stamp <%lx>\n" 1156 " next_to_watch <%x>\n" 1157 " jiffies <%lx>\n" 1158 " next_to_watch.status <%x>\n" 1159 "MAC Status <%x>\n" 1160 "PHY Status <%x>\n" 1161 "PHY 1000BASE-T Status <%x>\n" 1162 "PHY Extended Status <%x>\n" 1163 "PCI Status <%x>\n", 1164 readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use, 1165 tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp, 1166 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), 1167 phy_status, phy_1000t_status, phy_ext_status, pci_status); 1168 1169 e1000e_dump(adapter); 1170 1171 /* Suggest workaround for known h/w issue */ 1172 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) 1173 e_err("Try turning off Tx pause (flow control) via ethtool\n"); 1174 } 1175 1176 /** 1177 * e1000e_tx_hwtstamp_work - check for Tx time stamp 1178 * @work: pointer to work struct 1179 * 1180 * This work function polls the TSYNCTXCTL valid bit to determine when a 1181 * timestamp has been taken for the current stored skb. The timestamp must 1182 * be for this skb because only one such packet is allowed in the queue. 1183 */ 1184 static void e1000e_tx_hwtstamp_work(struct work_struct *work) 1185 { 1186 struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, 1187 tx_hwtstamp_work); 1188 struct e1000_hw *hw = &adapter->hw; 1189 1190 if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) { 1191 struct sk_buff *skb = adapter->tx_hwtstamp_skb; 1192 struct skb_shared_hwtstamps shhwtstamps; 1193 u64 txstmp; 1194 1195 txstmp = er32(TXSTMPL); 1196 txstmp |= (u64)er32(TXSTMPH) << 32; 1197 1198 e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp); 1199 1200 /* Clear the global tx_hwtstamp_skb pointer and force writes 1201 * prior to notifying the stack of a Tx timestamp. 1202 */ 1203 adapter->tx_hwtstamp_skb = NULL; 1204 wmb(); /* force write prior to skb_tstamp_tx */ 1205 1206 skb_tstamp_tx(skb, &shhwtstamps); 1207 dev_consume_skb_any(skb); 1208 } else if (time_after(jiffies, adapter->tx_hwtstamp_start 1209 + adapter->tx_timeout_factor * HZ)) { 1210 dev_kfree_skb_any(adapter->tx_hwtstamp_skb); 1211 adapter->tx_hwtstamp_skb = NULL; 1212 adapter->tx_hwtstamp_timeouts++; 1213 e_warn("clearing Tx timestamp hang\n"); 1214 } else { 1215 /* reschedule to check later */ 1216 schedule_work(&adapter->tx_hwtstamp_work); 1217 } 1218 } 1219 1220 /** 1221 * e1000_clean_tx_irq - Reclaim resources after transmit completes 1222 * @tx_ring: Tx descriptor ring 1223 * 1224 * the return value indicates whether actual cleaning was done, there 1225 * is no guarantee that everything was cleaned 1226 **/ 1227 static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) 1228 { 1229 struct e1000_adapter *adapter = tx_ring->adapter; 1230 struct net_device *netdev = adapter->netdev; 1231 struct e1000_hw *hw = &adapter->hw; 1232 struct e1000_tx_desc *tx_desc, *eop_desc; 1233 struct e1000_buffer *buffer_info; 1234 unsigned int i, eop; 1235 unsigned int count = 0; 1236 unsigned int total_tx_bytes = 0, total_tx_packets = 0; 1237 unsigned int bytes_compl = 0, pkts_compl = 0; 1238 1239 i = tx_ring->next_to_clean; 1240 eop = tx_ring->buffer_info[i].next_to_watch; 1241 eop_desc = E1000_TX_DESC(*tx_ring, eop); 1242 1243 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 1244 (count < tx_ring->count)) { 1245 bool cleaned = false; 1246 1247 dma_rmb(); /* read buffer_info after eop_desc */ 1248 for (; !cleaned; count++) { 1249 tx_desc = E1000_TX_DESC(*tx_ring, i); 1250 buffer_info = &tx_ring->buffer_info[i]; 1251 cleaned = (i == eop); 1252 1253 if (cleaned) { 1254 total_tx_packets += buffer_info->segs; 1255 total_tx_bytes += buffer_info->bytecount; 1256 if (buffer_info->skb) { 1257 bytes_compl += buffer_info->skb->len; 1258 pkts_compl++; 1259 } 1260 } 1261 1262 e1000_put_txbuf(tx_ring, buffer_info, false); 1263 tx_desc->upper.data = 0; 1264 1265 i++; 1266 if (i == tx_ring->count) 1267 i = 0; 1268 } 1269 1270 if (i == tx_ring->next_to_use) 1271 break; 1272 eop = tx_ring->buffer_info[i].next_to_watch; 1273 eop_desc = E1000_TX_DESC(*tx_ring, eop); 1274 } 1275 1276 tx_ring->next_to_clean = i; 1277 1278 netdev_completed_queue(netdev, pkts_compl, bytes_compl); 1279 1280 #define TX_WAKE_THRESHOLD 32 1281 if (count && netif_carrier_ok(netdev) && 1282 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { 1283 /* Make sure that anybody stopping the queue after this 1284 * sees the new next_to_clean. 1285 */ 1286 smp_mb(); 1287 1288 if (netif_queue_stopped(netdev) && 1289 !(test_bit(__E1000_DOWN, &adapter->state))) { 1290 netif_wake_queue(netdev); 1291 ++adapter->restart_queue; 1292 } 1293 } 1294 1295 if (adapter->detect_tx_hung) { 1296 /* Detect a transmit hang in hardware, this serializes the 1297 * check with the clearing of time_stamp and movement of i 1298 */ 1299 adapter->detect_tx_hung = false; 1300 if (tx_ring->buffer_info[i].time_stamp && 1301 time_after(jiffies, tx_ring->buffer_info[i].time_stamp 1302 + (adapter->tx_timeout_factor * HZ)) && 1303 !(er32(STATUS) & E1000_STATUS_TXOFF)) 1304 schedule_work(&adapter->print_hang_task); 1305 else 1306 adapter->tx_hang_recheck = false; 1307 } 1308 adapter->total_tx_bytes += total_tx_bytes; 1309 adapter->total_tx_packets += total_tx_packets; 1310 return count < tx_ring->count; 1311 } 1312 1313 /** 1314 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split 1315 * @rx_ring: Rx descriptor ring 1316 * 1317 * the return value indicates whether actual cleaning was done, there 1318 * is no guarantee that everything was cleaned 1319 **/ 1320 static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, 1321 int work_to_do) 1322 { 1323 struct e1000_adapter *adapter = rx_ring->adapter; 1324 struct e1000_hw *hw = &adapter->hw; 1325 union e1000_rx_desc_packet_split *rx_desc, *next_rxd; 1326 struct net_device *netdev = adapter->netdev; 1327 struct pci_dev *pdev = adapter->pdev; 1328 struct e1000_buffer *buffer_info, *next_buffer; 1329 struct e1000_ps_page *ps_page; 1330 struct sk_buff *skb; 1331 unsigned int i, j; 1332 u32 length, staterr; 1333 int cleaned_count = 0; 1334 bool cleaned = false; 1335 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1336 1337 i = rx_ring->next_to_clean; 1338 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 1339 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 1340 buffer_info = &rx_ring->buffer_info[i]; 1341 1342 while (staterr & E1000_RXD_STAT_DD) { 1343 if (*work_done >= work_to_do) 1344 break; 1345 (*work_done)++; 1346 skb = buffer_info->skb; 1347 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ 1348 1349 /* in the packet split case this is header only */ 1350 prefetch(skb->data - NET_IP_ALIGN); 1351 1352 i++; 1353 if (i == rx_ring->count) 1354 i = 0; 1355 next_rxd = E1000_RX_DESC_PS(*rx_ring, i); 1356 prefetch(next_rxd); 1357 1358 next_buffer = &rx_ring->buffer_info[i]; 1359 1360 cleaned = true; 1361 cleaned_count++; 1362 dma_unmap_single(&pdev->dev, buffer_info->dma, 1363 adapter->rx_ps_bsize0, DMA_FROM_DEVICE); 1364 buffer_info->dma = 0; 1365 1366 /* see !EOP comment in other Rx routine */ 1367 if (!(staterr & E1000_RXD_STAT_EOP)) 1368 adapter->flags2 |= FLAG2_IS_DISCARDING; 1369 1370 if (adapter->flags2 & FLAG2_IS_DISCARDING) { 1371 e_dbg("Packet Split buffers didn't pick up the full packet\n"); 1372 dev_kfree_skb_irq(skb); 1373 if (staterr & E1000_RXD_STAT_EOP) 1374 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1375 goto next_desc; 1376 } 1377 1378 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && 1379 !(netdev->features & NETIF_F_RXALL))) { 1380 dev_kfree_skb_irq(skb); 1381 goto next_desc; 1382 } 1383 1384 length = le16_to_cpu(rx_desc->wb.middle.length0); 1385 1386 if (!length) { 1387 e_dbg("Last part of the packet spanning multiple descriptors\n"); 1388 dev_kfree_skb_irq(skb); 1389 goto next_desc; 1390 } 1391 1392 /* Good Receive */ 1393 skb_put(skb, length); 1394 1395 { 1396 /* this looks ugly, but it seems compiler issues make 1397 * it more efficient than reusing j 1398 */ 1399 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); 1400 1401 /* page alloc/put takes too long and effects small 1402 * packet throughput, so unsplit small packets and 1403 * save the alloc/put only valid in softirq (napi) 1404 * context to call kmap_* 1405 */ 1406 if (l1 && (l1 <= copybreak) && 1407 ((length + l1) <= adapter->rx_ps_bsize0)) { 1408 u8 *vaddr; 1409 1410 ps_page = &buffer_info->ps_pages[0]; 1411 1412 /* there is no documentation about how to call 1413 * kmap_atomic, so we can't hold the mapping 1414 * very long 1415 */ 1416 dma_sync_single_for_cpu(&pdev->dev, 1417 ps_page->dma, 1418 PAGE_SIZE, 1419 DMA_FROM_DEVICE); 1420 vaddr = kmap_atomic(ps_page->page); 1421 memcpy(skb_tail_pointer(skb), vaddr, l1); 1422 kunmap_atomic(vaddr); 1423 dma_sync_single_for_device(&pdev->dev, 1424 ps_page->dma, 1425 PAGE_SIZE, 1426 DMA_FROM_DEVICE); 1427 1428 /* remove the CRC */ 1429 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { 1430 if (!(netdev->features & NETIF_F_RXFCS)) 1431 l1 -= 4; 1432 } 1433 1434 skb_put(skb, l1); 1435 goto copydone; 1436 } /* if */ 1437 } 1438 1439 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 1440 length = le16_to_cpu(rx_desc->wb.upper.length[j]); 1441 if (!length) 1442 break; 1443 1444 ps_page = &buffer_info->ps_pages[j]; 1445 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, 1446 DMA_FROM_DEVICE); 1447 ps_page->dma = 0; 1448 skb_fill_page_desc(skb, j, ps_page->page, 0, length); 1449 ps_page->page = NULL; 1450 skb->len += length; 1451 skb->data_len += length; 1452 skb->truesize += PAGE_SIZE; 1453 } 1454 1455 /* strip the ethernet crc, problem is we're using pages now so 1456 * this whole operation can get a little cpu intensive 1457 */ 1458 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { 1459 if (!(netdev->features & NETIF_F_RXFCS)) 1460 pskb_trim(skb, skb->len - 4); 1461 } 1462 1463 copydone: 1464 total_rx_bytes += skb->len; 1465 total_rx_packets++; 1466 1467 e1000_rx_checksum(adapter, staterr, skb); 1468 1469 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 1470 1471 if (rx_desc->wb.upper.header_status & 1472 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) 1473 adapter->rx_hdr_split++; 1474 1475 e1000_receive_skb(adapter, netdev, skb, staterr, 1476 rx_desc->wb.middle.vlan); 1477 1478 next_desc: 1479 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); 1480 buffer_info->skb = NULL; 1481 1482 /* return some buffers to hardware, one at a time is too slow */ 1483 if (cleaned_count >= E1000_RX_BUFFER_WRITE) { 1484 adapter->alloc_rx_buf(rx_ring, cleaned_count, 1485 GFP_ATOMIC); 1486 cleaned_count = 0; 1487 } 1488 1489 /* use prefetched values */ 1490 rx_desc = next_rxd; 1491 buffer_info = next_buffer; 1492 1493 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 1494 } 1495 rx_ring->next_to_clean = i; 1496 1497 cleaned_count = e1000_desc_unused(rx_ring); 1498 if (cleaned_count) 1499 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); 1500 1501 adapter->total_rx_bytes += total_rx_bytes; 1502 adapter->total_rx_packets += total_rx_packets; 1503 return cleaned; 1504 } 1505 1506 /** 1507 * e1000_consume_page - helper function 1508 **/ 1509 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, 1510 u16 length) 1511 { 1512 bi->page = NULL; 1513 skb->len += length; 1514 skb->data_len += length; 1515 skb->truesize += PAGE_SIZE; 1516 } 1517 1518 /** 1519 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy 1520 * @adapter: board private structure 1521 * 1522 * the return value indicates whether actual cleaning was done, there 1523 * is no guarantee that everything was cleaned 1524 **/ 1525 static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, 1526 int work_to_do) 1527 { 1528 struct e1000_adapter *adapter = rx_ring->adapter; 1529 struct net_device *netdev = adapter->netdev; 1530 struct pci_dev *pdev = adapter->pdev; 1531 union e1000_rx_desc_extended *rx_desc, *next_rxd; 1532 struct e1000_buffer *buffer_info, *next_buffer; 1533 u32 length, staterr; 1534 unsigned int i; 1535 int cleaned_count = 0; 1536 bool cleaned = false; 1537 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1538 struct skb_shared_info *shinfo; 1539 1540 i = rx_ring->next_to_clean; 1541 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 1542 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1543 buffer_info = &rx_ring->buffer_info[i]; 1544 1545 while (staterr & E1000_RXD_STAT_DD) { 1546 struct sk_buff *skb; 1547 1548 if (*work_done >= work_to_do) 1549 break; 1550 (*work_done)++; 1551 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ 1552 1553 skb = buffer_info->skb; 1554 buffer_info->skb = NULL; 1555 1556 ++i; 1557 if (i == rx_ring->count) 1558 i = 0; 1559 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); 1560 prefetch(next_rxd); 1561 1562 next_buffer = &rx_ring->buffer_info[i]; 1563 1564 cleaned = true; 1565 cleaned_count++; 1566 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, 1567 DMA_FROM_DEVICE); 1568 buffer_info->dma = 0; 1569 1570 length = le16_to_cpu(rx_desc->wb.upper.length); 1571 1572 /* errors is only valid for DD + EOP descriptors */ 1573 if (unlikely((staterr & E1000_RXD_STAT_EOP) && 1574 ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && 1575 !(netdev->features & NETIF_F_RXALL)))) { 1576 /* recycle both page and skb */ 1577 buffer_info->skb = skb; 1578 /* an error means any chain goes out the window too */ 1579 if (rx_ring->rx_skb_top) 1580 dev_kfree_skb_irq(rx_ring->rx_skb_top); 1581 rx_ring->rx_skb_top = NULL; 1582 goto next_desc; 1583 } 1584 #define rxtop (rx_ring->rx_skb_top) 1585 if (!(staterr & E1000_RXD_STAT_EOP)) { 1586 /* this descriptor is only the beginning (or middle) */ 1587 if (!rxtop) { 1588 /* this is the beginning of a chain */ 1589 rxtop = skb; 1590 skb_fill_page_desc(rxtop, 0, buffer_info->page, 1591 0, length); 1592 } else { 1593 /* this is the middle of a chain */ 1594 shinfo = skb_shinfo(rxtop); 1595 skb_fill_page_desc(rxtop, shinfo->nr_frags, 1596 buffer_info->page, 0, 1597 length); 1598 /* re-use the skb, only consumed the page */ 1599 buffer_info->skb = skb; 1600 } 1601 e1000_consume_page(buffer_info, rxtop, length); 1602 goto next_desc; 1603 } else { 1604 if (rxtop) { 1605 /* end of the chain */ 1606 shinfo = skb_shinfo(rxtop); 1607 skb_fill_page_desc(rxtop, shinfo->nr_frags, 1608 buffer_info->page, 0, 1609 length); 1610 /* re-use the current skb, we only consumed the 1611 * page 1612 */ 1613 buffer_info->skb = skb; 1614 skb = rxtop; 1615 rxtop = NULL; 1616 e1000_consume_page(buffer_info, skb, length); 1617 } else { 1618 /* no chain, got EOP, this buf is the packet 1619 * copybreak to save the put_page/alloc_page 1620 */ 1621 if (length <= copybreak && 1622 skb_tailroom(skb) >= length) { 1623 u8 *vaddr; 1624 vaddr = kmap_atomic(buffer_info->page); 1625 memcpy(skb_tail_pointer(skb), vaddr, 1626 length); 1627 kunmap_atomic(vaddr); 1628 /* re-use the page, so don't erase 1629 * buffer_info->page 1630 */ 1631 skb_put(skb, length); 1632 } else { 1633 skb_fill_page_desc(skb, 0, 1634 buffer_info->page, 0, 1635 length); 1636 e1000_consume_page(buffer_info, skb, 1637 length); 1638 } 1639 } 1640 } 1641 1642 /* Receive Checksum Offload */ 1643 e1000_rx_checksum(adapter, staterr, skb); 1644 1645 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 1646 1647 /* probably a little skewed due to removing CRC */ 1648 total_rx_bytes += skb->len; 1649 total_rx_packets++; 1650 1651 /* eth type trans needs skb->data to point to something */ 1652 if (!pskb_may_pull(skb, ETH_HLEN)) { 1653 e_err("pskb_may_pull failed.\n"); 1654 dev_kfree_skb_irq(skb); 1655 goto next_desc; 1656 } 1657 1658 e1000_receive_skb(adapter, netdev, skb, staterr, 1659 rx_desc->wb.upper.vlan); 1660 1661 next_desc: 1662 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); 1663 1664 /* return some buffers to hardware, one at a time is too slow */ 1665 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 1666 adapter->alloc_rx_buf(rx_ring, cleaned_count, 1667 GFP_ATOMIC); 1668 cleaned_count = 0; 1669 } 1670 1671 /* use prefetched values */ 1672 rx_desc = next_rxd; 1673 buffer_info = next_buffer; 1674 1675 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1676 } 1677 rx_ring->next_to_clean = i; 1678 1679 cleaned_count = e1000_desc_unused(rx_ring); 1680 if (cleaned_count) 1681 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); 1682 1683 adapter->total_rx_bytes += total_rx_bytes; 1684 adapter->total_rx_packets += total_rx_packets; 1685 return cleaned; 1686 } 1687 1688 /** 1689 * e1000_clean_rx_ring - Free Rx Buffers per Queue 1690 * @rx_ring: Rx descriptor ring 1691 **/ 1692 static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) 1693 { 1694 struct e1000_adapter *adapter = rx_ring->adapter; 1695 struct e1000_buffer *buffer_info; 1696 struct e1000_ps_page *ps_page; 1697 struct pci_dev *pdev = adapter->pdev; 1698 unsigned int i, j; 1699 1700 /* Free all the Rx ring sk_buffs */ 1701 for (i = 0; i < rx_ring->count; i++) { 1702 buffer_info = &rx_ring->buffer_info[i]; 1703 if (buffer_info->dma) { 1704 if (adapter->clean_rx == e1000_clean_rx_irq) 1705 dma_unmap_single(&pdev->dev, buffer_info->dma, 1706 adapter->rx_buffer_len, 1707 DMA_FROM_DEVICE); 1708 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) 1709 dma_unmap_page(&pdev->dev, buffer_info->dma, 1710 PAGE_SIZE, DMA_FROM_DEVICE); 1711 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) 1712 dma_unmap_single(&pdev->dev, buffer_info->dma, 1713 adapter->rx_ps_bsize0, 1714 DMA_FROM_DEVICE); 1715 buffer_info->dma = 0; 1716 } 1717 1718 if (buffer_info->page) { 1719 put_page(buffer_info->page); 1720 buffer_info->page = NULL; 1721 } 1722 1723 if (buffer_info->skb) { 1724 dev_kfree_skb(buffer_info->skb); 1725 buffer_info->skb = NULL; 1726 } 1727 1728 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 1729 ps_page = &buffer_info->ps_pages[j]; 1730 if (!ps_page->page) 1731 break; 1732 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, 1733 DMA_FROM_DEVICE); 1734 ps_page->dma = 0; 1735 put_page(ps_page->page); 1736 ps_page->page = NULL; 1737 } 1738 } 1739 1740 /* there also may be some cached data from a chained receive */ 1741 if (rx_ring->rx_skb_top) { 1742 dev_kfree_skb(rx_ring->rx_skb_top); 1743 rx_ring->rx_skb_top = NULL; 1744 } 1745 1746 /* Zero out the descriptor ring */ 1747 memset(rx_ring->desc, 0, rx_ring->size); 1748 1749 rx_ring->next_to_clean = 0; 1750 rx_ring->next_to_use = 0; 1751 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1752 } 1753 1754 static void e1000e_downshift_workaround(struct work_struct *work) 1755 { 1756 struct e1000_adapter *adapter = container_of(work, 1757 struct e1000_adapter, 1758 downshift_task); 1759 1760 if (test_bit(__E1000_DOWN, &adapter->state)) 1761 return; 1762 1763 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); 1764 } 1765 1766 /** 1767 * e1000_intr_msi - Interrupt Handler 1768 * @irq: interrupt number 1769 * @data: pointer to a network interface device structure 1770 **/ 1771 static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) 1772 { 1773 struct net_device *netdev = data; 1774 struct e1000_adapter *adapter = netdev_priv(netdev); 1775 struct e1000_hw *hw = &adapter->hw; 1776 u32 icr = er32(ICR); 1777 1778 /* read ICR disables interrupts using IAM */ 1779 if (icr & E1000_ICR_LSC) { 1780 hw->mac.get_link_status = true; 1781 /* ICH8 workaround-- Call gig speed drop workaround on cable 1782 * disconnect (LSC) before accessing any PHY registers 1783 */ 1784 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 1785 (!(er32(STATUS) & E1000_STATUS_LU))) 1786 schedule_work(&adapter->downshift_task); 1787 1788 /* 80003ES2LAN workaround-- For packet buffer work-around on 1789 * link down event; disable receives here in the ISR and reset 1790 * adapter in watchdog 1791 */ 1792 if (netif_carrier_ok(netdev) && 1793 adapter->flags & FLAG_RX_NEEDS_RESTART) { 1794 /* disable receives */ 1795 u32 rctl = er32(RCTL); 1796 1797 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1798 adapter->flags |= FLAG_RESTART_NOW; 1799 } 1800 /* guard against interrupt when we're going down */ 1801 if (!test_bit(__E1000_DOWN, &adapter->state)) 1802 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1803 } 1804 1805 /* Reset on uncorrectable ECC error */ 1806 if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { 1807 u32 pbeccsts = er32(PBECCSTS); 1808 1809 adapter->corr_errors += 1810 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; 1811 adapter->uncorr_errors += 1812 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> 1813 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; 1814 1815 /* Do the reset outside of interrupt context */ 1816 schedule_work(&adapter->reset_task); 1817 1818 /* return immediately since reset is imminent */ 1819 return IRQ_HANDLED; 1820 } 1821 1822 if (napi_schedule_prep(&adapter->napi)) { 1823 adapter->total_tx_bytes = 0; 1824 adapter->total_tx_packets = 0; 1825 adapter->total_rx_bytes = 0; 1826 adapter->total_rx_packets = 0; 1827 __napi_schedule(&adapter->napi); 1828 } 1829 1830 return IRQ_HANDLED; 1831 } 1832 1833 /** 1834 * e1000_intr - Interrupt Handler 1835 * @irq: interrupt number 1836 * @data: pointer to a network interface device structure 1837 **/ 1838 static irqreturn_t e1000_intr(int __always_unused irq, void *data) 1839 { 1840 struct net_device *netdev = data; 1841 struct e1000_adapter *adapter = netdev_priv(netdev); 1842 struct e1000_hw *hw = &adapter->hw; 1843 u32 rctl, icr = er32(ICR); 1844 1845 if (!icr || test_bit(__E1000_DOWN, &adapter->state)) 1846 return IRQ_NONE; /* Not our interrupt */ 1847 1848 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 1849 * not set, then the adapter didn't send an interrupt 1850 */ 1851 if (!(icr & E1000_ICR_INT_ASSERTED)) 1852 return IRQ_NONE; 1853 1854 /* Interrupt Auto-Mask...upon reading ICR, 1855 * interrupts are masked. No need for the 1856 * IMC write 1857 */ 1858 1859 if (icr & E1000_ICR_LSC) { 1860 hw->mac.get_link_status = true; 1861 /* ICH8 workaround-- Call gig speed drop workaround on cable 1862 * disconnect (LSC) before accessing any PHY registers 1863 */ 1864 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 1865 (!(er32(STATUS) & E1000_STATUS_LU))) 1866 schedule_work(&adapter->downshift_task); 1867 1868 /* 80003ES2LAN workaround-- 1869 * For packet buffer work-around on link down event; 1870 * disable receives here in the ISR and 1871 * reset adapter in watchdog 1872 */ 1873 if (netif_carrier_ok(netdev) && 1874 (adapter->flags & FLAG_RX_NEEDS_RESTART)) { 1875 /* disable receives */ 1876 rctl = er32(RCTL); 1877 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1878 adapter->flags |= FLAG_RESTART_NOW; 1879 } 1880 /* guard against interrupt when we're going down */ 1881 if (!test_bit(__E1000_DOWN, &adapter->state)) 1882 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1883 } 1884 1885 /* Reset on uncorrectable ECC error */ 1886 if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { 1887 u32 pbeccsts = er32(PBECCSTS); 1888 1889 adapter->corr_errors += 1890 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; 1891 adapter->uncorr_errors += 1892 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> 1893 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; 1894 1895 /* Do the reset outside of interrupt context */ 1896 schedule_work(&adapter->reset_task); 1897 1898 /* return immediately since reset is imminent */ 1899 return IRQ_HANDLED; 1900 } 1901 1902 if (napi_schedule_prep(&adapter->napi)) { 1903 adapter->total_tx_bytes = 0; 1904 adapter->total_tx_packets = 0; 1905 adapter->total_rx_bytes = 0; 1906 adapter->total_rx_packets = 0; 1907 __napi_schedule(&adapter->napi); 1908 } 1909 1910 return IRQ_HANDLED; 1911 } 1912 1913 static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) 1914 { 1915 struct net_device *netdev = data; 1916 struct e1000_adapter *adapter = netdev_priv(netdev); 1917 struct e1000_hw *hw = &adapter->hw; 1918 u32 icr = er32(ICR); 1919 1920 if (icr & adapter->eiac_mask) 1921 ew32(ICS, (icr & adapter->eiac_mask)); 1922 1923 if (icr & E1000_ICR_LSC) { 1924 hw->mac.get_link_status = true; 1925 /* guard against interrupt when we're going down */ 1926 if (!test_bit(__E1000_DOWN, &adapter->state)) 1927 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1928 } 1929 1930 if (!test_bit(__E1000_DOWN, &adapter->state)) 1931 ew32(IMS, E1000_IMS_OTHER | IMS_OTHER_MASK); 1932 1933 return IRQ_HANDLED; 1934 } 1935 1936 static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data) 1937 { 1938 struct net_device *netdev = data; 1939 struct e1000_adapter *adapter = netdev_priv(netdev); 1940 struct e1000_hw *hw = &adapter->hw; 1941 struct e1000_ring *tx_ring = adapter->tx_ring; 1942 1943 adapter->total_tx_bytes = 0; 1944 adapter->total_tx_packets = 0; 1945 1946 if (!e1000_clean_tx_irq(tx_ring)) 1947 /* Ring was not completely cleaned, so fire another interrupt */ 1948 ew32(ICS, tx_ring->ims_val); 1949 1950 if (!test_bit(__E1000_DOWN, &adapter->state)) 1951 ew32(IMS, adapter->tx_ring->ims_val); 1952 1953 return IRQ_HANDLED; 1954 } 1955 1956 static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data) 1957 { 1958 struct net_device *netdev = data; 1959 struct e1000_adapter *adapter = netdev_priv(netdev); 1960 struct e1000_ring *rx_ring = adapter->rx_ring; 1961 1962 /* Write the ITR value calculated at the end of the 1963 * previous interrupt. 1964 */ 1965 if (rx_ring->set_itr) { 1966 u32 itr = rx_ring->itr_val ? 1967 1000000000 / (rx_ring->itr_val * 256) : 0; 1968 1969 writel(itr, rx_ring->itr_register); 1970 rx_ring->set_itr = 0; 1971 } 1972 1973 if (napi_schedule_prep(&adapter->napi)) { 1974 adapter->total_rx_bytes = 0; 1975 adapter->total_rx_packets = 0; 1976 __napi_schedule(&adapter->napi); 1977 } 1978 return IRQ_HANDLED; 1979 } 1980 1981 /** 1982 * e1000_configure_msix - Configure MSI-X hardware 1983 * 1984 * e1000_configure_msix sets up the hardware to properly 1985 * generate MSI-X interrupts. 1986 **/ 1987 static void e1000_configure_msix(struct e1000_adapter *adapter) 1988 { 1989 struct e1000_hw *hw = &adapter->hw; 1990 struct e1000_ring *rx_ring = adapter->rx_ring; 1991 struct e1000_ring *tx_ring = adapter->tx_ring; 1992 int vector = 0; 1993 u32 ctrl_ext, ivar = 0; 1994 1995 adapter->eiac_mask = 0; 1996 1997 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ 1998 if (hw->mac.type == e1000_82574) { 1999 u32 rfctl = er32(RFCTL); 2000 2001 rfctl |= E1000_RFCTL_ACK_DIS; 2002 ew32(RFCTL, rfctl); 2003 } 2004 2005 /* Configure Rx vector */ 2006 rx_ring->ims_val = E1000_IMS_RXQ0; 2007 adapter->eiac_mask |= rx_ring->ims_val; 2008 if (rx_ring->itr_val) 2009 writel(1000000000 / (rx_ring->itr_val * 256), 2010 rx_ring->itr_register); 2011 else 2012 writel(1, rx_ring->itr_register); 2013 ivar = E1000_IVAR_INT_ALLOC_VALID | vector; 2014 2015 /* Configure Tx vector */ 2016 tx_ring->ims_val = E1000_IMS_TXQ0; 2017 vector++; 2018 if (tx_ring->itr_val) 2019 writel(1000000000 / (tx_ring->itr_val * 256), 2020 tx_ring->itr_register); 2021 else 2022 writel(1, tx_ring->itr_register); 2023 adapter->eiac_mask |= tx_ring->ims_val; 2024 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); 2025 2026 /* set vector for Other Causes, e.g. link changes */ 2027 vector++; 2028 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); 2029 if (rx_ring->itr_val) 2030 writel(1000000000 / (rx_ring->itr_val * 256), 2031 hw->hw_addr + E1000_EITR_82574(vector)); 2032 else 2033 writel(1, hw->hw_addr + E1000_EITR_82574(vector)); 2034 2035 /* Cause Tx interrupts on every write back */ 2036 ivar |= BIT(31); 2037 2038 ew32(IVAR, ivar); 2039 2040 /* enable MSI-X PBA support */ 2041 ctrl_ext = er32(CTRL_EXT) & ~E1000_CTRL_EXT_IAME; 2042 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR | E1000_CTRL_EXT_EIAME; 2043 ew32(CTRL_EXT, ctrl_ext); 2044 e1e_flush(); 2045 } 2046 2047 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) 2048 { 2049 if (adapter->msix_entries) { 2050 pci_disable_msix(adapter->pdev); 2051 kfree(adapter->msix_entries); 2052 adapter->msix_entries = NULL; 2053 } else if (adapter->flags & FLAG_MSI_ENABLED) { 2054 pci_disable_msi(adapter->pdev); 2055 adapter->flags &= ~FLAG_MSI_ENABLED; 2056 } 2057 } 2058 2059 /** 2060 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported 2061 * 2062 * Attempt to configure interrupts using the best available 2063 * capabilities of the hardware and kernel. 2064 **/ 2065 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) 2066 { 2067 int err; 2068 int i; 2069 2070 switch (adapter->int_mode) { 2071 case E1000E_INT_MODE_MSIX: 2072 if (adapter->flags & FLAG_HAS_MSIX) { 2073 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ 2074 adapter->msix_entries = kcalloc(adapter->num_vectors, 2075 sizeof(struct 2076 msix_entry), 2077 GFP_KERNEL); 2078 if (adapter->msix_entries) { 2079 struct e1000_adapter *a = adapter; 2080 2081 for (i = 0; i < adapter->num_vectors; i++) 2082 adapter->msix_entries[i].entry = i; 2083 2084 err = pci_enable_msix_range(a->pdev, 2085 a->msix_entries, 2086 a->num_vectors, 2087 a->num_vectors); 2088 if (err > 0) 2089 return; 2090 } 2091 /* MSI-X failed, so fall through and try MSI */ 2092 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); 2093 e1000e_reset_interrupt_capability(adapter); 2094 } 2095 adapter->int_mode = E1000E_INT_MODE_MSI; 2096 /* Fall through */ 2097 case E1000E_INT_MODE_MSI: 2098 if (!pci_enable_msi(adapter->pdev)) { 2099 adapter->flags |= FLAG_MSI_ENABLED; 2100 } else { 2101 adapter->int_mode = E1000E_INT_MODE_LEGACY; 2102 e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n"); 2103 } 2104 /* Fall through */ 2105 case E1000E_INT_MODE_LEGACY: 2106 /* Don't do anything; this is the system default */ 2107 break; 2108 } 2109 2110 /* store the number of vectors being used */ 2111 adapter->num_vectors = 1; 2112 } 2113 2114 /** 2115 * e1000_request_msix - Initialize MSI-X interrupts 2116 * 2117 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the 2118 * kernel. 2119 **/ 2120 static int e1000_request_msix(struct e1000_adapter *adapter) 2121 { 2122 struct net_device *netdev = adapter->netdev; 2123 int err = 0, vector = 0; 2124 2125 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 2126 snprintf(adapter->rx_ring->name, 2127 sizeof(adapter->rx_ring->name) - 1, 2128 "%s-rx-0", netdev->name); 2129 else 2130 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); 2131 err = request_irq(adapter->msix_entries[vector].vector, 2132 e1000_intr_msix_rx, 0, adapter->rx_ring->name, 2133 netdev); 2134 if (err) 2135 return err; 2136 adapter->rx_ring->itr_register = adapter->hw.hw_addr + 2137 E1000_EITR_82574(vector); 2138 adapter->rx_ring->itr_val = adapter->itr; 2139 vector++; 2140 2141 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 2142 snprintf(adapter->tx_ring->name, 2143 sizeof(adapter->tx_ring->name) - 1, 2144 "%s-tx-0", netdev->name); 2145 else 2146 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); 2147 err = request_irq(adapter->msix_entries[vector].vector, 2148 e1000_intr_msix_tx, 0, adapter->tx_ring->name, 2149 netdev); 2150 if (err) 2151 return err; 2152 adapter->tx_ring->itr_register = adapter->hw.hw_addr + 2153 E1000_EITR_82574(vector); 2154 adapter->tx_ring->itr_val = adapter->itr; 2155 vector++; 2156 2157 err = request_irq(adapter->msix_entries[vector].vector, 2158 e1000_msix_other, 0, netdev->name, netdev); 2159 if (err) 2160 return err; 2161 2162 e1000_configure_msix(adapter); 2163 2164 return 0; 2165 } 2166 2167 /** 2168 * e1000_request_irq - initialize interrupts 2169 * 2170 * Attempts to configure interrupts using the best available 2171 * capabilities of the hardware and kernel. 2172 **/ 2173 static int e1000_request_irq(struct e1000_adapter *adapter) 2174 { 2175 struct net_device *netdev = adapter->netdev; 2176 int err; 2177 2178 if (adapter->msix_entries) { 2179 err = e1000_request_msix(adapter); 2180 if (!err) 2181 return err; 2182 /* fall back to MSI */ 2183 e1000e_reset_interrupt_capability(adapter); 2184 adapter->int_mode = E1000E_INT_MODE_MSI; 2185 e1000e_set_interrupt_capability(adapter); 2186 } 2187 if (adapter->flags & FLAG_MSI_ENABLED) { 2188 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, 2189 netdev->name, netdev); 2190 if (!err) 2191 return err; 2192 2193 /* fall back to legacy interrupt */ 2194 e1000e_reset_interrupt_capability(adapter); 2195 adapter->int_mode = E1000E_INT_MODE_LEGACY; 2196 } 2197 2198 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, 2199 netdev->name, netdev); 2200 if (err) 2201 e_err("Unable to allocate interrupt, Error: %d\n", err); 2202 2203 return err; 2204 } 2205 2206 static void e1000_free_irq(struct e1000_adapter *adapter) 2207 { 2208 struct net_device *netdev = adapter->netdev; 2209 2210 if (adapter->msix_entries) { 2211 int vector = 0; 2212 2213 free_irq(adapter->msix_entries[vector].vector, netdev); 2214 vector++; 2215 2216 free_irq(adapter->msix_entries[vector].vector, netdev); 2217 vector++; 2218 2219 /* Other Causes interrupt vector */ 2220 free_irq(adapter->msix_entries[vector].vector, netdev); 2221 return; 2222 } 2223 2224 free_irq(adapter->pdev->irq, netdev); 2225 } 2226 2227 /** 2228 * e1000_irq_disable - Mask off interrupt generation on the NIC 2229 **/ 2230 static void e1000_irq_disable(struct e1000_adapter *adapter) 2231 { 2232 struct e1000_hw *hw = &adapter->hw; 2233 2234 ew32(IMC, ~0); 2235 if (adapter->msix_entries) 2236 ew32(EIAC_82574, 0); 2237 e1e_flush(); 2238 2239 if (adapter->msix_entries) { 2240 int i; 2241 2242 for (i = 0; i < adapter->num_vectors; i++) 2243 synchronize_irq(adapter->msix_entries[i].vector); 2244 } else { 2245 synchronize_irq(adapter->pdev->irq); 2246 } 2247 } 2248 2249 /** 2250 * e1000_irq_enable - Enable default interrupt generation settings 2251 **/ 2252 static void e1000_irq_enable(struct e1000_adapter *adapter) 2253 { 2254 struct e1000_hw *hw = &adapter->hw; 2255 2256 if (adapter->msix_entries) { 2257 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); 2258 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | 2259 IMS_OTHER_MASK); 2260 } else if (hw->mac.type >= e1000_pch_lpt) { 2261 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); 2262 } else { 2263 ew32(IMS, IMS_ENABLE_MASK); 2264 } 2265 e1e_flush(); 2266 } 2267 2268 /** 2269 * e1000e_get_hw_control - get control of the h/w from f/w 2270 * @adapter: address of board private structure 2271 * 2272 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. 2273 * For ASF and Pass Through versions of f/w this means that 2274 * the driver is loaded. For AMT version (only with 82573) 2275 * of the f/w this means that the network i/f is open. 2276 **/ 2277 void e1000e_get_hw_control(struct e1000_adapter *adapter) 2278 { 2279 struct e1000_hw *hw = &adapter->hw; 2280 u32 ctrl_ext; 2281 u32 swsm; 2282 2283 /* Let firmware know the driver has taken over */ 2284 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { 2285 swsm = er32(SWSM); 2286 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); 2287 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 2288 ctrl_ext = er32(CTRL_EXT); 2289 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2290 } 2291 } 2292 2293 /** 2294 * e1000e_release_hw_control - release control of the h/w to f/w 2295 * @adapter: address of board private structure 2296 * 2297 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. 2298 * For ASF and Pass Through versions of f/w this means that the 2299 * driver is no longer loaded. For AMT version (only with 82573) i 2300 * of the f/w this means that the network i/f is closed. 2301 * 2302 **/ 2303 void e1000e_release_hw_control(struct e1000_adapter *adapter) 2304 { 2305 struct e1000_hw *hw = &adapter->hw; 2306 u32 ctrl_ext; 2307 u32 swsm; 2308 2309 /* Let firmware taken over control of h/w */ 2310 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { 2311 swsm = er32(SWSM); 2312 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 2313 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 2314 ctrl_ext = er32(CTRL_EXT); 2315 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2316 } 2317 } 2318 2319 /** 2320 * e1000_alloc_ring_dma - allocate memory for a ring structure 2321 **/ 2322 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, 2323 struct e1000_ring *ring) 2324 { 2325 struct pci_dev *pdev = adapter->pdev; 2326 2327 ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma, 2328 GFP_KERNEL); 2329 if (!ring->desc) 2330 return -ENOMEM; 2331 2332 return 0; 2333 } 2334 2335 /** 2336 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) 2337 * @tx_ring: Tx descriptor ring 2338 * 2339 * Return 0 on success, negative on failure 2340 **/ 2341 int e1000e_setup_tx_resources(struct e1000_ring *tx_ring) 2342 { 2343 struct e1000_adapter *adapter = tx_ring->adapter; 2344 int err = -ENOMEM, size; 2345 2346 size = sizeof(struct e1000_buffer) * tx_ring->count; 2347 tx_ring->buffer_info = vzalloc(size); 2348 if (!tx_ring->buffer_info) 2349 goto err; 2350 2351 /* round up to nearest 4K */ 2352 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); 2353 tx_ring->size = ALIGN(tx_ring->size, 4096); 2354 2355 err = e1000_alloc_ring_dma(adapter, tx_ring); 2356 if (err) 2357 goto err; 2358 2359 tx_ring->next_to_use = 0; 2360 tx_ring->next_to_clean = 0; 2361 2362 return 0; 2363 err: 2364 vfree(tx_ring->buffer_info); 2365 e_err("Unable to allocate memory for the transmit descriptor ring\n"); 2366 return err; 2367 } 2368 2369 /** 2370 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) 2371 * @rx_ring: Rx descriptor ring 2372 * 2373 * Returns 0 on success, negative on failure 2374 **/ 2375 int e1000e_setup_rx_resources(struct e1000_ring *rx_ring) 2376 { 2377 struct e1000_adapter *adapter = rx_ring->adapter; 2378 struct e1000_buffer *buffer_info; 2379 int i, size, desc_len, err = -ENOMEM; 2380 2381 size = sizeof(struct e1000_buffer) * rx_ring->count; 2382 rx_ring->buffer_info = vzalloc(size); 2383 if (!rx_ring->buffer_info) 2384 goto err; 2385 2386 for (i = 0; i < rx_ring->count; i++) { 2387 buffer_info = &rx_ring->buffer_info[i]; 2388 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, 2389 sizeof(struct e1000_ps_page), 2390 GFP_KERNEL); 2391 if (!buffer_info->ps_pages) 2392 goto err_pages; 2393 } 2394 2395 desc_len = sizeof(union e1000_rx_desc_packet_split); 2396 2397 /* Round up to nearest 4K */ 2398 rx_ring->size = rx_ring->count * desc_len; 2399 rx_ring->size = ALIGN(rx_ring->size, 4096); 2400 2401 err = e1000_alloc_ring_dma(adapter, rx_ring); 2402 if (err) 2403 goto err_pages; 2404 2405 rx_ring->next_to_clean = 0; 2406 rx_ring->next_to_use = 0; 2407 rx_ring->rx_skb_top = NULL; 2408 2409 return 0; 2410 2411 err_pages: 2412 for (i = 0; i < rx_ring->count; i++) { 2413 buffer_info = &rx_ring->buffer_info[i]; 2414 kfree(buffer_info->ps_pages); 2415 } 2416 err: 2417 vfree(rx_ring->buffer_info); 2418 e_err("Unable to allocate memory for the receive descriptor ring\n"); 2419 return err; 2420 } 2421 2422 /** 2423 * e1000_clean_tx_ring - Free Tx Buffers 2424 * @tx_ring: Tx descriptor ring 2425 **/ 2426 static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) 2427 { 2428 struct e1000_adapter *adapter = tx_ring->adapter; 2429 struct e1000_buffer *buffer_info; 2430 unsigned long size; 2431 unsigned int i; 2432 2433 for (i = 0; i < tx_ring->count; i++) { 2434 buffer_info = &tx_ring->buffer_info[i]; 2435 e1000_put_txbuf(tx_ring, buffer_info, false); 2436 } 2437 2438 netdev_reset_queue(adapter->netdev); 2439 size = sizeof(struct e1000_buffer) * tx_ring->count; 2440 memset(tx_ring->buffer_info, 0, size); 2441 2442 memset(tx_ring->desc, 0, tx_ring->size); 2443 2444 tx_ring->next_to_use = 0; 2445 tx_ring->next_to_clean = 0; 2446 } 2447 2448 /** 2449 * e1000e_free_tx_resources - Free Tx Resources per Queue 2450 * @tx_ring: Tx descriptor ring 2451 * 2452 * Free all transmit software resources 2453 **/ 2454 void e1000e_free_tx_resources(struct e1000_ring *tx_ring) 2455 { 2456 struct e1000_adapter *adapter = tx_ring->adapter; 2457 struct pci_dev *pdev = adapter->pdev; 2458 2459 e1000_clean_tx_ring(tx_ring); 2460 2461 vfree(tx_ring->buffer_info); 2462 tx_ring->buffer_info = NULL; 2463 2464 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 2465 tx_ring->dma); 2466 tx_ring->desc = NULL; 2467 } 2468 2469 /** 2470 * e1000e_free_rx_resources - Free Rx Resources 2471 * @rx_ring: Rx descriptor ring 2472 * 2473 * Free all receive software resources 2474 **/ 2475 void e1000e_free_rx_resources(struct e1000_ring *rx_ring) 2476 { 2477 struct e1000_adapter *adapter = rx_ring->adapter; 2478 struct pci_dev *pdev = adapter->pdev; 2479 int i; 2480 2481 e1000_clean_rx_ring(rx_ring); 2482 2483 for (i = 0; i < rx_ring->count; i++) 2484 kfree(rx_ring->buffer_info[i].ps_pages); 2485 2486 vfree(rx_ring->buffer_info); 2487 rx_ring->buffer_info = NULL; 2488 2489 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2490 rx_ring->dma); 2491 rx_ring->desc = NULL; 2492 } 2493 2494 /** 2495 * e1000_update_itr - update the dynamic ITR value based on statistics 2496 * @adapter: pointer to adapter 2497 * @itr_setting: current adapter->itr 2498 * @packets: the number of packets during this measurement interval 2499 * @bytes: the number of bytes during this measurement interval 2500 * 2501 * Stores a new ITR value based on packets and byte 2502 * counts during the last interrupt. The advantage of per interrupt 2503 * computation is faster updates and more accurate ITR for the current 2504 * traffic pattern. Constants in this function were computed 2505 * based on theoretical maximum wire speed and thresholds were set based 2506 * on testing data as well as attempting to minimize response time 2507 * while increasing bulk throughput. This functionality is controlled 2508 * by the InterruptThrottleRate module parameter. 2509 **/ 2510 static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes) 2511 { 2512 unsigned int retval = itr_setting; 2513 2514 if (packets == 0) 2515 return itr_setting; 2516 2517 switch (itr_setting) { 2518 case lowest_latency: 2519 /* handle TSO and jumbo frames */ 2520 if (bytes / packets > 8000) 2521 retval = bulk_latency; 2522 else if ((packets < 5) && (bytes > 512)) 2523 retval = low_latency; 2524 break; 2525 case low_latency: /* 50 usec aka 20000 ints/s */ 2526 if (bytes > 10000) { 2527 /* this if handles the TSO accounting */ 2528 if (bytes / packets > 8000) 2529 retval = bulk_latency; 2530 else if ((packets < 10) || ((bytes / packets) > 1200)) 2531 retval = bulk_latency; 2532 else if ((packets > 35)) 2533 retval = lowest_latency; 2534 } else if (bytes / packets > 2000) { 2535 retval = bulk_latency; 2536 } else if (packets <= 2 && bytes < 512) { 2537 retval = lowest_latency; 2538 } 2539 break; 2540 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2541 if (bytes > 25000) { 2542 if (packets > 35) 2543 retval = low_latency; 2544 } else if (bytes < 6000) { 2545 retval = low_latency; 2546 } 2547 break; 2548 } 2549 2550 return retval; 2551 } 2552 2553 static void e1000_set_itr(struct e1000_adapter *adapter) 2554 { 2555 u16 current_itr; 2556 u32 new_itr = adapter->itr; 2557 2558 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2559 if (adapter->link_speed != SPEED_1000) { 2560 current_itr = 0; 2561 new_itr = 4000; 2562 goto set_itr_now; 2563 } 2564 2565 if (adapter->flags2 & FLAG2_DISABLE_AIM) { 2566 new_itr = 0; 2567 goto set_itr_now; 2568 } 2569 2570 adapter->tx_itr = e1000_update_itr(adapter->tx_itr, 2571 adapter->total_tx_packets, 2572 adapter->total_tx_bytes); 2573 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2574 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 2575 adapter->tx_itr = low_latency; 2576 2577 adapter->rx_itr = e1000_update_itr(adapter->rx_itr, 2578 adapter->total_rx_packets, 2579 adapter->total_rx_bytes); 2580 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2581 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 2582 adapter->rx_itr = low_latency; 2583 2584 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2585 2586 /* counts and packets in update_itr are dependent on these numbers */ 2587 switch (current_itr) { 2588 case lowest_latency: 2589 new_itr = 70000; 2590 break; 2591 case low_latency: 2592 new_itr = 20000; /* aka hwitr = ~200 */ 2593 break; 2594 case bulk_latency: 2595 new_itr = 4000; 2596 break; 2597 default: 2598 break; 2599 } 2600 2601 set_itr_now: 2602 if (new_itr != adapter->itr) { 2603 /* this attempts to bias the interrupt rate towards Bulk 2604 * by adding intermediate steps when interrupt rate is 2605 * increasing 2606 */ 2607 new_itr = new_itr > adapter->itr ? 2608 min(adapter->itr + (new_itr >> 2), new_itr) : new_itr; 2609 adapter->itr = new_itr; 2610 adapter->rx_ring->itr_val = new_itr; 2611 if (adapter->msix_entries) 2612 adapter->rx_ring->set_itr = 1; 2613 else 2614 e1000e_write_itr(adapter, new_itr); 2615 } 2616 } 2617 2618 /** 2619 * e1000e_write_itr - write the ITR value to the appropriate registers 2620 * @adapter: address of board private structure 2621 * @itr: new ITR value to program 2622 * 2623 * e1000e_write_itr determines if the adapter is in MSI-X mode 2624 * and, if so, writes the EITR registers with the ITR value. 2625 * Otherwise, it writes the ITR value into the ITR register. 2626 **/ 2627 void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr) 2628 { 2629 struct e1000_hw *hw = &adapter->hw; 2630 u32 new_itr = itr ? 1000000000 / (itr * 256) : 0; 2631 2632 if (adapter->msix_entries) { 2633 int vector; 2634 2635 for (vector = 0; vector < adapter->num_vectors; vector++) 2636 writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector)); 2637 } else { 2638 ew32(ITR, new_itr); 2639 } 2640 } 2641 2642 /** 2643 * e1000_alloc_queues - Allocate memory for all rings 2644 * @adapter: board private structure to initialize 2645 **/ 2646 static int e1000_alloc_queues(struct e1000_adapter *adapter) 2647 { 2648 int size = sizeof(struct e1000_ring); 2649 2650 adapter->tx_ring = kzalloc(size, GFP_KERNEL); 2651 if (!adapter->tx_ring) 2652 goto err; 2653 adapter->tx_ring->count = adapter->tx_ring_count; 2654 adapter->tx_ring->adapter = adapter; 2655 2656 adapter->rx_ring = kzalloc(size, GFP_KERNEL); 2657 if (!adapter->rx_ring) 2658 goto err; 2659 adapter->rx_ring->count = adapter->rx_ring_count; 2660 adapter->rx_ring->adapter = adapter; 2661 2662 return 0; 2663 err: 2664 e_err("Unable to allocate memory for queues\n"); 2665 kfree(adapter->rx_ring); 2666 kfree(adapter->tx_ring); 2667 return -ENOMEM; 2668 } 2669 2670 /** 2671 * e1000e_poll - NAPI Rx polling callback 2672 * @napi: struct associated with this polling callback 2673 * @weight: number of packets driver is allowed to process this poll 2674 **/ 2675 static int e1000e_poll(struct napi_struct *napi, int weight) 2676 { 2677 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, 2678 napi); 2679 struct e1000_hw *hw = &adapter->hw; 2680 struct net_device *poll_dev = adapter->netdev; 2681 int tx_cleaned = 1, work_done = 0; 2682 2683 adapter = netdev_priv(poll_dev); 2684 2685 if (!adapter->msix_entries || 2686 (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) 2687 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); 2688 2689 adapter->clean_rx(adapter->rx_ring, &work_done, weight); 2690 2691 if (!tx_cleaned) 2692 work_done = weight; 2693 2694 /* If weight not fully consumed, exit the polling mode */ 2695 if (work_done < weight) { 2696 if (adapter->itr_setting & 3) 2697 e1000_set_itr(adapter); 2698 napi_complete_done(napi, work_done); 2699 if (!test_bit(__E1000_DOWN, &adapter->state)) { 2700 if (adapter->msix_entries) 2701 ew32(IMS, adapter->rx_ring->ims_val); 2702 else 2703 e1000_irq_enable(adapter); 2704 } 2705 } 2706 2707 return work_done; 2708 } 2709 2710 static int e1000_vlan_rx_add_vid(struct net_device *netdev, 2711 __always_unused __be16 proto, u16 vid) 2712 { 2713 struct e1000_adapter *adapter = netdev_priv(netdev); 2714 struct e1000_hw *hw = &adapter->hw; 2715 u32 vfta, index; 2716 2717 /* don't update vlan cookie if already programmed */ 2718 if ((adapter->hw.mng_cookie.status & 2719 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2720 (vid == adapter->mng_vlan_id)) 2721 return 0; 2722 2723 /* add VID to filter table */ 2724 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2725 index = (vid >> 5) & 0x7F; 2726 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); 2727 vfta |= BIT((vid & 0x1F)); 2728 hw->mac.ops.write_vfta(hw, index, vfta); 2729 } 2730 2731 set_bit(vid, adapter->active_vlans); 2732 2733 return 0; 2734 } 2735 2736 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, 2737 __always_unused __be16 proto, u16 vid) 2738 { 2739 struct e1000_adapter *adapter = netdev_priv(netdev); 2740 struct e1000_hw *hw = &adapter->hw; 2741 u32 vfta, index; 2742 2743 if ((adapter->hw.mng_cookie.status & 2744 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2745 (vid == adapter->mng_vlan_id)) { 2746 /* release control to f/w */ 2747 e1000e_release_hw_control(adapter); 2748 return 0; 2749 } 2750 2751 /* remove VID from filter table */ 2752 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2753 index = (vid >> 5) & 0x7F; 2754 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); 2755 vfta &= ~BIT((vid & 0x1F)); 2756 hw->mac.ops.write_vfta(hw, index, vfta); 2757 } 2758 2759 clear_bit(vid, adapter->active_vlans); 2760 2761 return 0; 2762 } 2763 2764 /** 2765 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering 2766 * @adapter: board private structure to initialize 2767 **/ 2768 static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter) 2769 { 2770 struct net_device *netdev = adapter->netdev; 2771 struct e1000_hw *hw = &adapter->hw; 2772 u32 rctl; 2773 2774 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2775 /* disable VLAN receive filtering */ 2776 rctl = er32(RCTL); 2777 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN); 2778 ew32(RCTL, rctl); 2779 2780 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { 2781 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), 2782 adapter->mng_vlan_id); 2783 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 2784 } 2785 } 2786 } 2787 2788 /** 2789 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering 2790 * @adapter: board private structure to initialize 2791 **/ 2792 static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter) 2793 { 2794 struct e1000_hw *hw = &adapter->hw; 2795 u32 rctl; 2796 2797 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2798 /* enable VLAN receive filtering */ 2799 rctl = er32(RCTL); 2800 rctl |= E1000_RCTL_VFE; 2801 rctl &= ~E1000_RCTL_CFIEN; 2802 ew32(RCTL, rctl); 2803 } 2804 } 2805 2806 /** 2807 * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping 2808 * @adapter: board private structure to initialize 2809 **/ 2810 static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter) 2811 { 2812 struct e1000_hw *hw = &adapter->hw; 2813 u32 ctrl; 2814 2815 /* disable VLAN tag insert/strip */ 2816 ctrl = er32(CTRL); 2817 ctrl &= ~E1000_CTRL_VME; 2818 ew32(CTRL, ctrl); 2819 } 2820 2821 /** 2822 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping 2823 * @adapter: board private structure to initialize 2824 **/ 2825 static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter) 2826 { 2827 struct e1000_hw *hw = &adapter->hw; 2828 u32 ctrl; 2829 2830 /* enable VLAN tag insert/strip */ 2831 ctrl = er32(CTRL); 2832 ctrl |= E1000_CTRL_VME; 2833 ew32(CTRL, ctrl); 2834 } 2835 2836 static void e1000_update_mng_vlan(struct e1000_adapter *adapter) 2837 { 2838 struct net_device *netdev = adapter->netdev; 2839 u16 vid = adapter->hw.mng_cookie.vlan_id; 2840 u16 old_vid = adapter->mng_vlan_id; 2841 2842 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { 2843 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); 2844 adapter->mng_vlan_id = vid; 2845 } 2846 2847 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) 2848 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid); 2849 } 2850 2851 static void e1000_restore_vlan(struct e1000_adapter *adapter) 2852 { 2853 u16 vid; 2854 2855 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); 2856 2857 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 2858 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); 2859 } 2860 2861 static void e1000_init_manageability_pt(struct e1000_adapter *adapter) 2862 { 2863 struct e1000_hw *hw = &adapter->hw; 2864 u32 manc, manc2h, mdef, i, j; 2865 2866 if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) 2867 return; 2868 2869 manc = er32(MANC); 2870 2871 /* enable receiving management packets to the host. this will probably 2872 * generate destination unreachable messages from the host OS, but 2873 * the packets will be handled on SMBUS 2874 */ 2875 manc |= E1000_MANC_EN_MNG2HOST; 2876 manc2h = er32(MANC2H); 2877 2878 switch (hw->mac.type) { 2879 default: 2880 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664); 2881 break; 2882 case e1000_82574: 2883 case e1000_82583: 2884 /* Check if IPMI pass-through decision filter already exists; 2885 * if so, enable it. 2886 */ 2887 for (i = 0, j = 0; i < 8; i++) { 2888 mdef = er32(MDEF(i)); 2889 2890 /* Ignore filters with anything other than IPMI ports */ 2891 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) 2892 continue; 2893 2894 /* Enable this decision filter in MANC2H */ 2895 if (mdef) 2896 manc2h |= BIT(i); 2897 2898 j |= mdef; 2899 } 2900 2901 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) 2902 break; 2903 2904 /* Create new decision filter in an empty filter */ 2905 for (i = 0, j = 0; i < 8; i++) 2906 if (er32(MDEF(i)) == 0) { 2907 ew32(MDEF(i), (E1000_MDEF_PORT_623 | 2908 E1000_MDEF_PORT_664)); 2909 manc2h |= BIT(1); 2910 j++; 2911 break; 2912 } 2913 2914 if (!j) 2915 e_warn("Unable to create IPMI pass-through filter\n"); 2916 break; 2917 } 2918 2919 ew32(MANC2H, manc2h); 2920 ew32(MANC, manc); 2921 } 2922 2923 /** 2924 * e1000_configure_tx - Configure Transmit Unit after Reset 2925 * @adapter: board private structure 2926 * 2927 * Configure the Tx unit of the MAC after a reset. 2928 **/ 2929 static void e1000_configure_tx(struct e1000_adapter *adapter) 2930 { 2931 struct e1000_hw *hw = &adapter->hw; 2932 struct e1000_ring *tx_ring = adapter->tx_ring; 2933 u64 tdba; 2934 u32 tdlen, tctl, tarc; 2935 2936 /* Setup the HW Tx Head and Tail descriptor pointers */ 2937 tdba = tx_ring->dma; 2938 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); 2939 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); 2940 ew32(TDBAH(0), (tdba >> 32)); 2941 ew32(TDLEN(0), tdlen); 2942 ew32(TDH(0), 0); 2943 ew32(TDT(0), 0); 2944 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); 2945 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); 2946 2947 writel(0, tx_ring->head); 2948 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 2949 e1000e_update_tdt_wa(tx_ring, 0); 2950 else 2951 writel(0, tx_ring->tail); 2952 2953 /* Set the Tx Interrupt Delay register */ 2954 ew32(TIDV, adapter->tx_int_delay); 2955 /* Tx irq moderation */ 2956 ew32(TADV, adapter->tx_abs_int_delay); 2957 2958 if (adapter->flags2 & FLAG2_DMA_BURST) { 2959 u32 txdctl = er32(TXDCTL(0)); 2960 2961 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | 2962 E1000_TXDCTL_WTHRESH); 2963 /* set up some performance related parameters to encourage the 2964 * hardware to use the bus more efficiently in bursts, depends 2965 * on the tx_int_delay to be enabled, 2966 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls 2967 * hthresh = 1 ==> prefetch when one or more available 2968 * pthresh = 0x1f ==> prefetch if internal cache 31 or less 2969 * BEWARE: this seems to work but should be considered first if 2970 * there are Tx hangs or other Tx related bugs 2971 */ 2972 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; 2973 ew32(TXDCTL(0), txdctl); 2974 } 2975 /* erratum work around: set txdctl the same for both queues */ 2976 ew32(TXDCTL(1), er32(TXDCTL(0))); 2977 2978 /* Program the Transmit Control Register */ 2979 tctl = er32(TCTL); 2980 tctl &= ~E1000_TCTL_CT; 2981 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 2982 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2983 2984 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { 2985 tarc = er32(TARC(0)); 2986 /* set the speed mode bit, we'll clear it if we're not at 2987 * gigabit link later 2988 */ 2989 #define SPEED_MODE_BIT BIT(21) 2990 tarc |= SPEED_MODE_BIT; 2991 ew32(TARC(0), tarc); 2992 } 2993 2994 /* errata: program both queues to unweighted RR */ 2995 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { 2996 tarc = er32(TARC(0)); 2997 tarc |= 1; 2998 ew32(TARC(0), tarc); 2999 tarc = er32(TARC(1)); 3000 tarc |= 1; 3001 ew32(TARC(1), tarc); 3002 } 3003 3004 /* Setup Transmit Descriptor Settings for eop descriptor */ 3005 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; 3006 3007 /* only set IDE if we are delaying interrupts using the timers */ 3008 if (adapter->tx_int_delay) 3009 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 3010 3011 /* enable Report Status bit */ 3012 adapter->txd_cmd |= E1000_TXD_CMD_RS; 3013 3014 ew32(TCTL, tctl); 3015 3016 hw->mac.ops.config_collision_dist(hw); 3017 3018 /* SPT and KBL Si errata workaround to avoid data corruption */ 3019 if (hw->mac.type == e1000_pch_spt) { 3020 u32 reg_val; 3021 3022 reg_val = er32(IOSFPC); 3023 reg_val |= E1000_RCTL_RDMTS_HEX; 3024 ew32(IOSFPC, reg_val); 3025 3026 reg_val = er32(TARC(0)); 3027 /* SPT and KBL Si errata workaround to avoid Tx hang. 3028 * Dropping the number of outstanding requests from 3029 * 3 to 2 in order to avoid a buffer overrun. 3030 */ 3031 reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ; 3032 reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ; 3033 ew32(TARC(0), reg_val); 3034 } 3035 } 3036 3037 /** 3038 * e1000_setup_rctl - configure the receive control registers 3039 * @adapter: Board private structure 3040 **/ 3041 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ 3042 (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) 3043 static void e1000_setup_rctl(struct e1000_adapter *adapter) 3044 { 3045 struct e1000_hw *hw = &adapter->hw; 3046 u32 rctl, rfctl; 3047 u32 pages = 0; 3048 3049 /* Workaround Si errata on PCHx - configure jumbo frame flow. 3050 * If jumbo frames not set, program related MAC/PHY registers 3051 * to h/w defaults 3052 */ 3053 if (hw->mac.type >= e1000_pch2lan) { 3054 s32 ret_val; 3055 3056 if (adapter->netdev->mtu > ETH_DATA_LEN) 3057 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); 3058 else 3059 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); 3060 3061 if (ret_val) 3062 e_dbg("failed to enable|disable jumbo frame workaround mode\n"); 3063 } 3064 3065 /* Program MC offset vector base */ 3066 rctl = er32(RCTL); 3067 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3068 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | 3069 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 3070 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3071 3072 /* Do not Store bad packets */ 3073 rctl &= ~E1000_RCTL_SBP; 3074 3075 /* Enable Long Packet receive */ 3076 if (adapter->netdev->mtu <= ETH_DATA_LEN) 3077 rctl &= ~E1000_RCTL_LPE; 3078 else 3079 rctl |= E1000_RCTL_LPE; 3080 3081 /* Some systems expect that the CRC is included in SMBUS traffic. The 3082 * hardware strips the CRC before sending to both SMBUS (BMC) and to 3083 * host memory when this is enabled 3084 */ 3085 if (adapter->flags2 & FLAG2_CRC_STRIPPING) 3086 rctl |= E1000_RCTL_SECRC; 3087 3088 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ 3089 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { 3090 u16 phy_data; 3091 3092 e1e_rphy(hw, PHY_REG(770, 26), &phy_data); 3093 phy_data &= 0xfff8; 3094 phy_data |= BIT(2); 3095 e1e_wphy(hw, PHY_REG(770, 26), phy_data); 3096 3097 e1e_rphy(hw, 22, &phy_data); 3098 phy_data &= 0x0fff; 3099 phy_data |= BIT(14); 3100 e1e_wphy(hw, 0x10, 0x2823); 3101 e1e_wphy(hw, 0x11, 0x0003); 3102 e1e_wphy(hw, 22, phy_data); 3103 } 3104 3105 /* Setup buffer sizes */ 3106 rctl &= ~E1000_RCTL_SZ_4096; 3107 rctl |= E1000_RCTL_BSEX; 3108 switch (adapter->rx_buffer_len) { 3109 case 2048: 3110 default: 3111 rctl |= E1000_RCTL_SZ_2048; 3112 rctl &= ~E1000_RCTL_BSEX; 3113 break; 3114 case 4096: 3115 rctl |= E1000_RCTL_SZ_4096; 3116 break; 3117 case 8192: 3118 rctl |= E1000_RCTL_SZ_8192; 3119 break; 3120 case 16384: 3121 rctl |= E1000_RCTL_SZ_16384; 3122 break; 3123 } 3124 3125 /* Enable Extended Status in all Receive Descriptors */ 3126 rfctl = er32(RFCTL); 3127 rfctl |= E1000_RFCTL_EXTEN; 3128 ew32(RFCTL, rfctl); 3129 3130 /* 82571 and greater support packet-split where the protocol 3131 * header is placed in skb->data and the packet data is 3132 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. 3133 * In the case of a non-split, skb->data is linearly filled, 3134 * followed by the page buffers. Therefore, skb->data is 3135 * sized to hold the largest protocol header. 3136 * 3137 * allocations using alloc_page take too long for regular MTU 3138 * so only enable packet split for jumbo frames 3139 * 3140 * Using pages when the page size is greater than 16k wastes 3141 * a lot of memory, since we allocate 3 pages at all times 3142 * per packet. 3143 */ 3144 pages = PAGE_USE_COUNT(adapter->netdev->mtu); 3145 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) 3146 adapter->rx_ps_pages = pages; 3147 else 3148 adapter->rx_ps_pages = 0; 3149 3150 if (adapter->rx_ps_pages) { 3151 u32 psrctl = 0; 3152 3153 /* Enable Packet split descriptors */ 3154 rctl |= E1000_RCTL_DTYP_PS; 3155 3156 psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT; 3157 3158 switch (adapter->rx_ps_pages) { 3159 case 3: 3160 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT; 3161 /* fall-through */ 3162 case 2: 3163 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT; 3164 /* fall-through */ 3165 case 1: 3166 psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT; 3167 break; 3168 } 3169 3170 ew32(PSRCTL, psrctl); 3171 } 3172 3173 /* This is useful for sniffing bad packets. */ 3174 if (adapter->netdev->features & NETIF_F_RXALL) { 3175 /* UPE and MPE will be handled by normal PROMISC logic 3176 * in e1000e_set_rx_mode 3177 */ 3178 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ 3179 E1000_RCTL_BAM | /* RX All Bcast Pkts */ 3180 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 3181 3182 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ 3183 E1000_RCTL_DPF | /* Allow filtered pause */ 3184 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ 3185 /* Do not mess with E1000_CTRL_VME, it affects transmit as well, 3186 * and that breaks VLANs. 3187 */ 3188 } 3189 3190 ew32(RCTL, rctl); 3191 /* just started the receive unit, no need to restart */ 3192 adapter->flags &= ~FLAG_RESTART_NOW; 3193 } 3194 3195 /** 3196 * e1000_configure_rx - Configure Receive Unit after Reset 3197 * @adapter: board private structure 3198 * 3199 * Configure the Rx unit of the MAC after a reset. 3200 **/ 3201 static void e1000_configure_rx(struct e1000_adapter *adapter) 3202 { 3203 struct e1000_hw *hw = &adapter->hw; 3204 struct e1000_ring *rx_ring = adapter->rx_ring; 3205 u64 rdba; 3206 u32 rdlen, rctl, rxcsum, ctrl_ext; 3207 3208 if (adapter->rx_ps_pages) { 3209 /* this is a 32 byte descriptor */ 3210 rdlen = rx_ring->count * 3211 sizeof(union e1000_rx_desc_packet_split); 3212 adapter->clean_rx = e1000_clean_rx_irq_ps; 3213 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 3214 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { 3215 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); 3216 adapter->clean_rx = e1000_clean_jumbo_rx_irq; 3217 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 3218 } else { 3219 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); 3220 adapter->clean_rx = e1000_clean_rx_irq; 3221 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 3222 } 3223 3224 /* disable receives while setting up the descriptors */ 3225 rctl = er32(RCTL); 3226 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) 3227 ew32(RCTL, rctl & ~E1000_RCTL_EN); 3228 e1e_flush(); 3229 usleep_range(10000, 20000); 3230 3231 if (adapter->flags2 & FLAG2_DMA_BURST) { 3232 /* set the writeback threshold (only takes effect if the RDTR 3233 * is set). set GRAN=1 and write back up to 0x4 worth, and 3234 * enable prefetching of 0x20 Rx descriptors 3235 * granularity = 01 3236 * wthresh = 04, 3237 * hthresh = 04, 3238 * pthresh = 0x20 3239 */ 3240 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); 3241 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); 3242 } 3243 3244 /* set the Receive Delay Timer Register */ 3245 ew32(RDTR, adapter->rx_int_delay); 3246 3247 /* irq moderation */ 3248 ew32(RADV, adapter->rx_abs_int_delay); 3249 if ((adapter->itr_setting != 0) && (adapter->itr != 0)) 3250 e1000e_write_itr(adapter, adapter->itr); 3251 3252 ctrl_ext = er32(CTRL_EXT); 3253 /* Auto-Mask interrupts upon ICR access */ 3254 ctrl_ext |= E1000_CTRL_EXT_IAME; 3255 ew32(IAM, 0xffffffff); 3256 ew32(CTRL_EXT, ctrl_ext); 3257 e1e_flush(); 3258 3259 /* Setup the HW Rx Head and Tail Descriptor Pointers and 3260 * the Base and Length of the Rx Descriptor Ring 3261 */ 3262 rdba = rx_ring->dma; 3263 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); 3264 ew32(RDBAH(0), (rdba >> 32)); 3265 ew32(RDLEN(0), rdlen); 3266 ew32(RDH(0), 0); 3267 ew32(RDT(0), 0); 3268 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); 3269 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); 3270 3271 writel(0, rx_ring->head); 3272 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 3273 e1000e_update_rdt_wa(rx_ring, 0); 3274 else 3275 writel(0, rx_ring->tail); 3276 3277 /* Enable Receive Checksum Offload for TCP and UDP */ 3278 rxcsum = er32(RXCSUM); 3279 if (adapter->netdev->features & NETIF_F_RXCSUM) 3280 rxcsum |= E1000_RXCSUM_TUOFL; 3281 else 3282 rxcsum &= ~E1000_RXCSUM_TUOFL; 3283 ew32(RXCSUM, rxcsum); 3284 3285 /* With jumbo frames, excessive C-state transition latencies result 3286 * in dropped transactions. 3287 */ 3288 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3289 u32 lat = 3290 ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 - 3291 adapter->max_frame_size) * 8 / 1000; 3292 3293 if (adapter->flags & FLAG_IS_ICH) { 3294 u32 rxdctl = er32(RXDCTL(0)); 3295 3296 ew32(RXDCTL(0), rxdctl | 0x3 | BIT(8)); 3297 } 3298 3299 dev_info(&adapter->pdev->dev, 3300 "Some CPU C-states have been disabled in order to enable jumbo frames\n"); 3301 pm_qos_update_request(&adapter->pm_qos_req, lat); 3302 } else { 3303 pm_qos_update_request(&adapter->pm_qos_req, 3304 PM_QOS_DEFAULT_VALUE); 3305 } 3306 3307 /* Enable Receives */ 3308 ew32(RCTL, rctl); 3309 } 3310 3311 /** 3312 * e1000e_write_mc_addr_list - write multicast addresses to MTA 3313 * @netdev: network interface device structure 3314 * 3315 * Writes multicast address list to the MTA hash table. 3316 * Returns: -ENOMEM on failure 3317 * 0 on no addresses written 3318 * X on writing X addresses to MTA 3319 */ 3320 static int e1000e_write_mc_addr_list(struct net_device *netdev) 3321 { 3322 struct e1000_adapter *adapter = netdev_priv(netdev); 3323 struct e1000_hw *hw = &adapter->hw; 3324 struct netdev_hw_addr *ha; 3325 u8 *mta_list; 3326 int i; 3327 3328 if (netdev_mc_empty(netdev)) { 3329 /* nothing to program, so clear mc list */ 3330 hw->mac.ops.update_mc_addr_list(hw, NULL, 0); 3331 return 0; 3332 } 3333 3334 mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC); 3335 if (!mta_list) 3336 return -ENOMEM; 3337 3338 /* update_mc_addr_list expects a packed array of only addresses. */ 3339 i = 0; 3340 netdev_for_each_mc_addr(ha, netdev) 3341 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 3342 3343 hw->mac.ops.update_mc_addr_list(hw, mta_list, i); 3344 kfree(mta_list); 3345 3346 return netdev_mc_count(netdev); 3347 } 3348 3349 /** 3350 * e1000e_write_uc_addr_list - write unicast addresses to RAR table 3351 * @netdev: network interface device structure 3352 * 3353 * Writes unicast address list to the RAR table. 3354 * Returns: -ENOMEM on failure/insufficient address space 3355 * 0 on no addresses written 3356 * X on writing X addresses to the RAR table 3357 **/ 3358 static int e1000e_write_uc_addr_list(struct net_device *netdev) 3359 { 3360 struct e1000_adapter *adapter = netdev_priv(netdev); 3361 struct e1000_hw *hw = &adapter->hw; 3362 unsigned int rar_entries; 3363 int count = 0; 3364 3365 rar_entries = hw->mac.ops.rar_get_count(hw); 3366 3367 /* save a rar entry for our hardware address */ 3368 rar_entries--; 3369 3370 /* save a rar entry for the LAA workaround */ 3371 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) 3372 rar_entries--; 3373 3374 /* return ENOMEM indicating insufficient memory for addresses */ 3375 if (netdev_uc_count(netdev) > rar_entries) 3376 return -ENOMEM; 3377 3378 if (!netdev_uc_empty(netdev) && rar_entries) { 3379 struct netdev_hw_addr *ha; 3380 3381 /* write the addresses in reverse order to avoid write 3382 * combining 3383 */ 3384 netdev_for_each_uc_addr(ha, netdev) { 3385 int ret_val; 3386 3387 if (!rar_entries) 3388 break; 3389 ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); 3390 if (ret_val < 0) 3391 return -ENOMEM; 3392 count++; 3393 } 3394 } 3395 3396 /* zero out the remaining RAR entries not used above */ 3397 for (; rar_entries > 0; rar_entries--) { 3398 ew32(RAH(rar_entries), 0); 3399 ew32(RAL(rar_entries), 0); 3400 } 3401 e1e_flush(); 3402 3403 return count; 3404 } 3405 3406 /** 3407 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set 3408 * @netdev: network interface device structure 3409 * 3410 * The ndo_set_rx_mode entry point is called whenever the unicast or multicast 3411 * address list or the network interface flags are updated. This routine is 3412 * responsible for configuring the hardware for proper unicast, multicast, 3413 * promiscuous mode, and all-multi behavior. 3414 **/ 3415 static void e1000e_set_rx_mode(struct net_device *netdev) 3416 { 3417 struct e1000_adapter *adapter = netdev_priv(netdev); 3418 struct e1000_hw *hw = &adapter->hw; 3419 u32 rctl; 3420 3421 if (pm_runtime_suspended(netdev->dev.parent)) 3422 return; 3423 3424 /* Check for Promiscuous and All Multicast modes */ 3425 rctl = er32(RCTL); 3426 3427 /* clear the affected bits */ 3428 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); 3429 3430 if (netdev->flags & IFF_PROMISC) { 3431 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 3432 /* Do not hardware filter VLANs in promisc mode */ 3433 e1000e_vlan_filter_disable(adapter); 3434 } else { 3435 int count; 3436 3437 if (netdev->flags & IFF_ALLMULTI) { 3438 rctl |= E1000_RCTL_MPE; 3439 } else { 3440 /* Write addresses to the MTA, if the attempt fails 3441 * then we should just turn on promiscuous mode so 3442 * that we can at least receive multicast traffic 3443 */ 3444 count = e1000e_write_mc_addr_list(netdev); 3445 if (count < 0) 3446 rctl |= E1000_RCTL_MPE; 3447 } 3448 e1000e_vlan_filter_enable(adapter); 3449 /* Write addresses to available RAR registers, if there is not 3450 * sufficient space to store all the addresses then enable 3451 * unicast promiscuous mode 3452 */ 3453 count = e1000e_write_uc_addr_list(netdev); 3454 if (count < 0) 3455 rctl |= E1000_RCTL_UPE; 3456 } 3457 3458 ew32(RCTL, rctl); 3459 3460 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 3461 e1000e_vlan_strip_enable(adapter); 3462 else 3463 e1000e_vlan_strip_disable(adapter); 3464 } 3465 3466 static void e1000e_setup_rss_hash(struct e1000_adapter *adapter) 3467 { 3468 struct e1000_hw *hw = &adapter->hw; 3469 u32 mrqc, rxcsum; 3470 u32 rss_key[10]; 3471 int i; 3472 3473 netdev_rss_key_fill(rss_key, sizeof(rss_key)); 3474 for (i = 0; i < 10; i++) 3475 ew32(RSSRK(i), rss_key[i]); 3476 3477 /* Direct all traffic to queue 0 */ 3478 for (i = 0; i < 32; i++) 3479 ew32(RETA(i), 0); 3480 3481 /* Disable raw packet checksumming so that RSS hash is placed in 3482 * descriptor on writeback. 3483 */ 3484 rxcsum = er32(RXCSUM); 3485 rxcsum |= E1000_RXCSUM_PCSD; 3486 3487 ew32(RXCSUM, rxcsum); 3488 3489 mrqc = (E1000_MRQC_RSS_FIELD_IPV4 | 3490 E1000_MRQC_RSS_FIELD_IPV4_TCP | 3491 E1000_MRQC_RSS_FIELD_IPV6 | 3492 E1000_MRQC_RSS_FIELD_IPV6_TCP | 3493 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); 3494 3495 ew32(MRQC, mrqc); 3496 } 3497 3498 /** 3499 * e1000e_get_base_timinca - get default SYSTIM time increment attributes 3500 * @adapter: board private structure 3501 * @timinca: pointer to returned time increment attributes 3502 * 3503 * Get attributes for incrementing the System Time Register SYSTIML/H at 3504 * the default base frequency, and set the cyclecounter shift value. 3505 **/ 3506 s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) 3507 { 3508 struct e1000_hw *hw = &adapter->hw; 3509 u32 incvalue, incperiod, shift; 3510 3511 /* Make sure clock is enabled on I217/I218/I219 before checking 3512 * the frequency 3513 */ 3514 if ((hw->mac.type >= e1000_pch_lpt) && 3515 !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) && 3516 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { 3517 u32 fextnvm7 = er32(FEXTNVM7); 3518 3519 if (!(fextnvm7 & BIT(0))) { 3520 ew32(FEXTNVM7, fextnvm7 | BIT(0)); 3521 e1e_flush(); 3522 } 3523 } 3524 3525 switch (hw->mac.type) { 3526 case e1000_pch2lan: 3527 /* Stable 96MHz frequency */ 3528 incperiod = INCPERIOD_96MHZ; 3529 incvalue = INCVALUE_96MHZ; 3530 shift = INCVALUE_SHIFT_96MHZ; 3531 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; 3532 break; 3533 case e1000_pch_lpt: 3534 if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { 3535 /* Stable 96MHz frequency */ 3536 incperiod = INCPERIOD_96MHZ; 3537 incvalue = INCVALUE_96MHZ; 3538 shift = INCVALUE_SHIFT_96MHZ; 3539 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; 3540 } else { 3541 /* Stable 25MHz frequency */ 3542 incperiod = INCPERIOD_25MHZ; 3543 incvalue = INCVALUE_25MHZ; 3544 shift = INCVALUE_SHIFT_25MHZ; 3545 adapter->cc.shift = shift; 3546 } 3547 break; 3548 case e1000_pch_spt: 3549 if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { 3550 /* Stable 24MHz frequency */ 3551 incperiod = INCPERIOD_24MHZ; 3552 incvalue = INCVALUE_24MHZ; 3553 shift = INCVALUE_SHIFT_24MHZ; 3554 adapter->cc.shift = shift; 3555 break; 3556 } 3557 return -EINVAL; 3558 case e1000_pch_cnp: 3559 if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { 3560 /* Stable 24MHz frequency */ 3561 incperiod = INCPERIOD_24MHZ; 3562 incvalue = INCVALUE_24MHZ; 3563 shift = INCVALUE_SHIFT_24MHZ; 3564 adapter->cc.shift = shift; 3565 } else { 3566 /* Stable 38400KHz frequency */ 3567 incperiod = INCPERIOD_38400KHZ; 3568 incvalue = INCVALUE_38400KHZ; 3569 shift = INCVALUE_SHIFT_38400KHZ; 3570 adapter->cc.shift = shift; 3571 } 3572 break; 3573 case e1000_82574: 3574 case e1000_82583: 3575 /* Stable 25MHz frequency */ 3576 incperiod = INCPERIOD_25MHZ; 3577 incvalue = INCVALUE_25MHZ; 3578 shift = INCVALUE_SHIFT_25MHZ; 3579 adapter->cc.shift = shift; 3580 break; 3581 default: 3582 return -EINVAL; 3583 } 3584 3585 *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) | 3586 ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK)); 3587 3588 return 0; 3589 } 3590 3591 /** 3592 * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable 3593 * @adapter: board private structure 3594 * 3595 * Outgoing time stamping can be enabled and disabled. Play nice and 3596 * disable it when requested, although it shouldn't cause any overhead 3597 * when no packet needs it. At most one packet in the queue may be 3598 * marked for time stamping, otherwise it would be impossible to tell 3599 * for sure to which packet the hardware time stamp belongs. 3600 * 3601 * Incoming time stamping has to be configured via the hardware filters. 3602 * Not all combinations are supported, in particular event type has to be 3603 * specified. Matching the kind of event packet is not supported, with the 3604 * exception of "all V2 events regardless of level 2 or 4". 3605 **/ 3606 static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, 3607 struct hwtstamp_config *config) 3608 { 3609 struct e1000_hw *hw = &adapter->hw; 3610 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; 3611 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; 3612 u32 rxmtrl = 0; 3613 u16 rxudp = 0; 3614 bool is_l4 = false; 3615 bool is_l2 = false; 3616 u32 regval; 3617 3618 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) 3619 return -EINVAL; 3620 3621 /* flags reserved for future extensions - must be zero */ 3622 if (config->flags) 3623 return -EINVAL; 3624 3625 switch (config->tx_type) { 3626 case HWTSTAMP_TX_OFF: 3627 tsync_tx_ctl = 0; 3628 break; 3629 case HWTSTAMP_TX_ON: 3630 break; 3631 default: 3632 return -ERANGE; 3633 } 3634 3635 switch (config->rx_filter) { 3636 case HWTSTAMP_FILTER_NONE: 3637 tsync_rx_ctl = 0; 3638 break; 3639 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 3640 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; 3641 rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE; 3642 is_l4 = true; 3643 break; 3644 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 3645 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; 3646 rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE; 3647 is_l4 = true; 3648 break; 3649 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 3650 /* Also time stamps V2 L2 Path Delay Request/Response */ 3651 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2; 3652 rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE; 3653 is_l2 = true; 3654 break; 3655 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 3656 /* Also time stamps V2 L2 Path Delay Request/Response. */ 3657 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2; 3658 rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE; 3659 is_l2 = true; 3660 break; 3661 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 3662 /* Hardware cannot filter just V2 L4 Sync messages; 3663 * fall-through to V2 (both L2 and L4) Sync. 3664 */ 3665 case HWTSTAMP_FILTER_PTP_V2_SYNC: 3666 /* Also time stamps V2 Path Delay Request/Response. */ 3667 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; 3668 rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE; 3669 is_l2 = true; 3670 is_l4 = true; 3671 break; 3672 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 3673 /* Hardware cannot filter just V2 L4 Delay Request messages; 3674 * fall-through to V2 (both L2 and L4) Delay Request. 3675 */ 3676 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 3677 /* Also time stamps V2 Path Delay Request/Response. */ 3678 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; 3679 rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE; 3680 is_l2 = true; 3681 is_l4 = true; 3682 break; 3683 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 3684 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 3685 /* Hardware cannot filter just V2 L4 or L2 Event messages; 3686 * fall-through to all V2 (both L2 and L4) Events. 3687 */ 3688 case HWTSTAMP_FILTER_PTP_V2_EVENT: 3689 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; 3690 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 3691 is_l2 = true; 3692 is_l4 = true; 3693 break; 3694 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 3695 /* For V1, the hardware can only filter Sync messages or 3696 * Delay Request messages but not both so fall-through to 3697 * time stamp all packets. 3698 */ 3699 case HWTSTAMP_FILTER_NTP_ALL: 3700 case HWTSTAMP_FILTER_ALL: 3701 is_l2 = true; 3702 is_l4 = true; 3703 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; 3704 config->rx_filter = HWTSTAMP_FILTER_ALL; 3705 break; 3706 default: 3707 return -ERANGE; 3708 } 3709 3710 adapter->hwtstamp_config = *config; 3711 3712 /* enable/disable Tx h/w time stamping */ 3713 regval = er32(TSYNCTXCTL); 3714 regval &= ~E1000_TSYNCTXCTL_ENABLED; 3715 regval |= tsync_tx_ctl; 3716 ew32(TSYNCTXCTL, regval); 3717 if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) != 3718 (regval & E1000_TSYNCTXCTL_ENABLED)) { 3719 e_err("Timesync Tx Control register not set as expected\n"); 3720 return -EAGAIN; 3721 } 3722 3723 /* enable/disable Rx h/w time stamping */ 3724 regval = er32(TSYNCRXCTL); 3725 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); 3726 regval |= tsync_rx_ctl; 3727 ew32(TSYNCRXCTL, regval); 3728 if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED | 3729 E1000_TSYNCRXCTL_TYPE_MASK)) != 3730 (regval & (E1000_TSYNCRXCTL_ENABLED | 3731 E1000_TSYNCRXCTL_TYPE_MASK))) { 3732 e_err("Timesync Rx Control register not set as expected\n"); 3733 return -EAGAIN; 3734 } 3735 3736 /* L2: define ethertype filter for time stamped packets */ 3737 if (is_l2) 3738 rxmtrl |= ETH_P_1588; 3739 3740 /* define which PTP packets get time stamped */ 3741 ew32(RXMTRL, rxmtrl); 3742 3743 /* Filter by destination port */ 3744 if (is_l4) { 3745 rxudp = PTP_EV_PORT; 3746 cpu_to_be16s(&rxudp); 3747 } 3748 ew32(RXUDP, rxudp); 3749 3750 e1e_flush(); 3751 3752 /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */ 3753 er32(RXSTMPH); 3754 er32(TXSTMPH); 3755 3756 return 0; 3757 } 3758 3759 /** 3760 * e1000_configure - configure the hardware for Rx and Tx 3761 * @adapter: private board structure 3762 **/ 3763 static void e1000_configure(struct e1000_adapter *adapter) 3764 { 3765 struct e1000_ring *rx_ring = adapter->rx_ring; 3766 3767 e1000e_set_rx_mode(adapter->netdev); 3768 3769 e1000_restore_vlan(adapter); 3770 e1000_init_manageability_pt(adapter); 3771 3772 e1000_configure_tx(adapter); 3773 3774 if (adapter->netdev->features & NETIF_F_RXHASH) 3775 e1000e_setup_rss_hash(adapter); 3776 e1000_setup_rctl(adapter); 3777 e1000_configure_rx(adapter); 3778 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); 3779 } 3780 3781 /** 3782 * e1000e_power_up_phy - restore link in case the phy was powered down 3783 * @adapter: address of board private structure 3784 * 3785 * The phy may be powered down to save power and turn off link when the 3786 * driver is unloaded and wake on lan is not enabled (among others) 3787 * *** this routine MUST be followed by a call to e1000e_reset *** 3788 **/ 3789 void e1000e_power_up_phy(struct e1000_adapter *adapter) 3790 { 3791 if (adapter->hw.phy.ops.power_up) 3792 adapter->hw.phy.ops.power_up(&adapter->hw); 3793 3794 adapter->hw.mac.ops.setup_link(&adapter->hw); 3795 } 3796 3797 /** 3798 * e1000_power_down_phy - Power down the PHY 3799 * 3800 * Power down the PHY so no link is implied when interface is down. 3801 * The PHY cannot be powered down if management or WoL is active. 3802 */ 3803 static void e1000_power_down_phy(struct e1000_adapter *adapter) 3804 { 3805 if (adapter->hw.phy.ops.power_down) 3806 adapter->hw.phy.ops.power_down(&adapter->hw); 3807 } 3808 3809 /** 3810 * e1000_flush_tx_ring - remove all descriptors from the tx_ring 3811 * 3812 * We want to clear all pending descriptors from the TX ring. 3813 * zeroing happens when the HW reads the regs. We assign the ring itself as 3814 * the data of the next descriptor. We don't care about the data we are about 3815 * to reset the HW. 3816 */ 3817 static void e1000_flush_tx_ring(struct e1000_adapter *adapter) 3818 { 3819 struct e1000_hw *hw = &adapter->hw; 3820 struct e1000_ring *tx_ring = adapter->tx_ring; 3821 struct e1000_tx_desc *tx_desc = NULL; 3822 u32 tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS; 3823 u16 size = 512; 3824 3825 tctl = er32(TCTL); 3826 ew32(TCTL, tctl | E1000_TCTL_EN); 3827 tdt = er32(TDT(0)); 3828 BUG_ON(tdt != tx_ring->next_to_use); 3829 tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use); 3830 tx_desc->buffer_addr = tx_ring->dma; 3831 3832 tx_desc->lower.data = cpu_to_le32(txd_lower | size); 3833 tx_desc->upper.data = 0; 3834 /* flush descriptors to memory before notifying the HW */ 3835 wmb(); 3836 tx_ring->next_to_use++; 3837 if (tx_ring->next_to_use == tx_ring->count) 3838 tx_ring->next_to_use = 0; 3839 ew32(TDT(0), tx_ring->next_to_use); 3840 mmiowb(); 3841 usleep_range(200, 250); 3842 } 3843 3844 /** 3845 * e1000_flush_rx_ring - remove all descriptors from the rx_ring 3846 * 3847 * Mark all descriptors in the RX ring as consumed and disable the rx ring 3848 */ 3849 static void e1000_flush_rx_ring(struct e1000_adapter *adapter) 3850 { 3851 u32 rctl, rxdctl; 3852 struct e1000_hw *hw = &adapter->hw; 3853 3854 rctl = er32(RCTL); 3855 ew32(RCTL, rctl & ~E1000_RCTL_EN); 3856 e1e_flush(); 3857 usleep_range(100, 150); 3858 3859 rxdctl = er32(RXDCTL(0)); 3860 /* zero the lower 14 bits (prefetch and host thresholds) */ 3861 rxdctl &= 0xffffc000; 3862 3863 /* update thresholds: prefetch threshold to 31, host threshold to 1 3864 * and make sure the granularity is "descriptors" and not "cache lines" 3865 */ 3866 rxdctl |= (0x1F | BIT(8) | E1000_RXDCTL_THRESH_UNIT_DESC); 3867 3868 ew32(RXDCTL(0), rxdctl); 3869 /* momentarily enable the RX ring for the changes to take effect */ 3870 ew32(RCTL, rctl | E1000_RCTL_EN); 3871 e1e_flush(); 3872 usleep_range(100, 150); 3873 ew32(RCTL, rctl & ~E1000_RCTL_EN); 3874 } 3875 3876 /** 3877 * e1000_flush_desc_rings - remove all descriptors from the descriptor rings 3878 * 3879 * In i219, the descriptor rings must be emptied before resetting the HW 3880 * or before changing the device state to D3 during runtime (runtime PM). 3881 * 3882 * Failure to do this will cause the HW to enter a unit hang state which can 3883 * only be released by PCI reset on the device 3884 * 3885 */ 3886 3887 static void e1000_flush_desc_rings(struct e1000_adapter *adapter) 3888 { 3889 u16 hang_state; 3890 u32 fext_nvm11, tdlen; 3891 struct e1000_hw *hw = &adapter->hw; 3892 3893 /* First, disable MULR fix in FEXTNVM11 */ 3894 fext_nvm11 = er32(FEXTNVM11); 3895 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; 3896 ew32(FEXTNVM11, fext_nvm11); 3897 /* do nothing if we're not in faulty state, or if the queue is empty */ 3898 tdlen = er32(TDLEN(0)); 3899 pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, 3900 &hang_state); 3901 if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen) 3902 return; 3903 e1000_flush_tx_ring(adapter); 3904 /* recheck, maybe the fault is caused by the rx ring */ 3905 pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, 3906 &hang_state); 3907 if (hang_state & FLUSH_DESC_REQUIRED) 3908 e1000_flush_rx_ring(adapter); 3909 } 3910 3911 /** 3912 * e1000e_systim_reset - reset the timesync registers after a hardware reset 3913 * @adapter: board private structure 3914 * 3915 * When the MAC is reset, all hardware bits for timesync will be reset to the 3916 * default values. This function will restore the settings last in place. 3917 * Since the clock SYSTIME registers are reset, we will simply restore the 3918 * cyclecounter to the kernel real clock time. 3919 **/ 3920 static void e1000e_systim_reset(struct e1000_adapter *adapter) 3921 { 3922 struct ptp_clock_info *info = &adapter->ptp_clock_info; 3923 struct e1000_hw *hw = &adapter->hw; 3924 unsigned long flags; 3925 u32 timinca; 3926 s32 ret_val; 3927 3928 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) 3929 return; 3930 3931 if (info->adjfreq) { 3932 /* restore the previous ptp frequency delta */ 3933 ret_val = info->adjfreq(info, adapter->ptp_delta); 3934 } else { 3935 /* set the default base frequency if no adjustment possible */ 3936 ret_val = e1000e_get_base_timinca(adapter, &timinca); 3937 if (!ret_val) 3938 ew32(TIMINCA, timinca); 3939 } 3940 3941 if (ret_val) { 3942 dev_warn(&adapter->pdev->dev, 3943 "Failed to restore TIMINCA clock rate delta: %d\n", 3944 ret_val); 3945 return; 3946 } 3947 3948 /* reset the systim ns time counter */ 3949 spin_lock_irqsave(&adapter->systim_lock, flags); 3950 timecounter_init(&adapter->tc, &adapter->cc, 3951 ktime_to_ns(ktime_get_real())); 3952 spin_unlock_irqrestore(&adapter->systim_lock, flags); 3953 3954 /* restore the previous hwtstamp configuration settings */ 3955 e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); 3956 } 3957 3958 /** 3959 * e1000e_reset - bring the hardware into a known good state 3960 * 3961 * This function boots the hardware and enables some settings that 3962 * require a configuration cycle of the hardware - those cannot be 3963 * set/changed during runtime. After reset the device needs to be 3964 * properly configured for Rx, Tx etc. 3965 */ 3966 void e1000e_reset(struct e1000_adapter *adapter) 3967 { 3968 struct e1000_mac_info *mac = &adapter->hw.mac; 3969 struct e1000_fc_info *fc = &adapter->hw.fc; 3970 struct e1000_hw *hw = &adapter->hw; 3971 u32 tx_space, min_tx_space, min_rx_space; 3972 u32 pba = adapter->pba; 3973 u16 hwm; 3974 3975 /* reset Packet Buffer Allocation to default */ 3976 ew32(PBA, pba); 3977 3978 if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) { 3979 /* To maintain wire speed transmits, the Tx FIFO should be 3980 * large enough to accommodate two full transmit packets, 3981 * rounded up to the next 1KB and expressed in KB. Likewise, 3982 * the Rx FIFO should be large enough to accommodate at least 3983 * one full receive packet and is similarly rounded up and 3984 * expressed in KB. 3985 */ 3986 pba = er32(PBA); 3987 /* upper 16 bits has Tx packet buffer allocation size in KB */ 3988 tx_space = pba >> 16; 3989 /* lower 16 bits has Rx packet buffer allocation size in KB */ 3990 pba &= 0xffff; 3991 /* the Tx fifo also stores 16 bytes of information about the Tx 3992 * but don't include ethernet FCS because hardware appends it 3993 */ 3994 min_tx_space = (adapter->max_frame_size + 3995 sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2; 3996 min_tx_space = ALIGN(min_tx_space, 1024); 3997 min_tx_space >>= 10; 3998 /* software strips receive CRC, so leave room for it */ 3999 min_rx_space = adapter->max_frame_size; 4000 min_rx_space = ALIGN(min_rx_space, 1024); 4001 min_rx_space >>= 10; 4002 4003 /* If current Tx allocation is less than the min Tx FIFO size, 4004 * and the min Tx FIFO size is less than the current Rx FIFO 4005 * allocation, take space away from current Rx allocation 4006 */ 4007 if ((tx_space < min_tx_space) && 4008 ((min_tx_space - tx_space) < pba)) { 4009 pba -= min_tx_space - tx_space; 4010 4011 /* if short on Rx space, Rx wins and must trump Tx 4012 * adjustment 4013 */ 4014 if (pba < min_rx_space) 4015 pba = min_rx_space; 4016 } 4017 4018 ew32(PBA, pba); 4019 } 4020 4021 /* flow control settings 4022 * 4023 * The high water mark must be low enough to fit one full frame 4024 * (or the size used for early receive) above it in the Rx FIFO. 4025 * Set it to the lower of: 4026 * - 90% of the Rx FIFO size, and 4027 * - the full Rx FIFO size minus one full frame 4028 */ 4029 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) 4030 fc->pause_time = 0xFFFF; 4031 else 4032 fc->pause_time = E1000_FC_PAUSE_TIME; 4033 fc->send_xon = true; 4034 fc->current_mode = fc->requested_mode; 4035 4036 switch (hw->mac.type) { 4037 case e1000_ich9lan: 4038 case e1000_ich10lan: 4039 if (adapter->netdev->mtu > ETH_DATA_LEN) { 4040 pba = 14; 4041 ew32(PBA, pba); 4042 fc->high_water = 0x2800; 4043 fc->low_water = fc->high_water - 8; 4044 break; 4045 } 4046 /* fall-through */ 4047 default: 4048 hwm = min(((pba << 10) * 9 / 10), 4049 ((pba << 10) - adapter->max_frame_size)); 4050 4051 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ 4052 fc->low_water = fc->high_water - 8; 4053 break; 4054 case e1000_pchlan: 4055 /* Workaround PCH LOM adapter hangs with certain network 4056 * loads. If hangs persist, try disabling Tx flow control. 4057 */ 4058 if (adapter->netdev->mtu > ETH_DATA_LEN) { 4059 fc->high_water = 0x3500; 4060 fc->low_water = 0x1500; 4061 } else { 4062 fc->high_water = 0x5000; 4063 fc->low_water = 0x3000; 4064 } 4065 fc->refresh_time = 0x1000; 4066 break; 4067 case e1000_pch2lan: 4068 case e1000_pch_lpt: 4069 case e1000_pch_spt: 4070 case e1000_pch_cnp: 4071 fc->refresh_time = 0x0400; 4072 4073 if (adapter->netdev->mtu <= ETH_DATA_LEN) { 4074 fc->high_water = 0x05C20; 4075 fc->low_water = 0x05048; 4076 fc->pause_time = 0x0650; 4077 break; 4078 } 4079 4080 pba = 14; 4081 ew32(PBA, pba); 4082 fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH; 4083 fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL; 4084 break; 4085 } 4086 4087 /* Alignment of Tx data is on an arbitrary byte boundary with the 4088 * maximum size per Tx descriptor limited only to the transmit 4089 * allocation of the packet buffer minus 96 bytes with an upper 4090 * limit of 24KB due to receive synchronization limitations. 4091 */ 4092 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, 4093 24 << 10); 4094 4095 /* Disable Adaptive Interrupt Moderation if 2 full packets cannot 4096 * fit in receive buffer. 4097 */ 4098 if (adapter->itr_setting & 0x3) { 4099 if ((adapter->max_frame_size * 2) > (pba << 10)) { 4100 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { 4101 dev_info(&adapter->pdev->dev, 4102 "Interrupt Throttle Rate off\n"); 4103 adapter->flags2 |= FLAG2_DISABLE_AIM; 4104 e1000e_write_itr(adapter, 0); 4105 } 4106 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { 4107 dev_info(&adapter->pdev->dev, 4108 "Interrupt Throttle Rate on\n"); 4109 adapter->flags2 &= ~FLAG2_DISABLE_AIM; 4110 adapter->itr = 20000; 4111 e1000e_write_itr(adapter, adapter->itr); 4112 } 4113 } 4114 4115 if (hw->mac.type >= e1000_pch_spt) 4116 e1000_flush_desc_rings(adapter); 4117 /* Allow time for pending master requests to run */ 4118 mac->ops.reset_hw(hw); 4119 4120 /* For parts with AMT enabled, let the firmware know 4121 * that the network interface is in control 4122 */ 4123 if (adapter->flags & FLAG_HAS_AMT) 4124 e1000e_get_hw_control(adapter); 4125 4126 ew32(WUC, 0); 4127 4128 if (mac->ops.init_hw(hw)) 4129 e_err("Hardware Error\n"); 4130 4131 e1000_update_mng_vlan(adapter); 4132 4133 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 4134 ew32(VET, ETH_P_8021Q); 4135 4136 e1000e_reset_adaptive(hw); 4137 4138 /* restore systim and hwtstamp settings */ 4139 e1000e_systim_reset(adapter); 4140 4141 /* Set EEE advertisement as appropriate */ 4142 if (adapter->flags2 & FLAG2_HAS_EEE) { 4143 s32 ret_val; 4144 u16 adv_addr; 4145 4146 switch (hw->phy.type) { 4147 case e1000_phy_82579: 4148 adv_addr = I82579_EEE_ADVERTISEMENT; 4149 break; 4150 case e1000_phy_i217: 4151 adv_addr = I217_EEE_ADVERTISEMENT; 4152 break; 4153 default: 4154 dev_err(&adapter->pdev->dev, 4155 "Invalid PHY type setting EEE advertisement\n"); 4156 return; 4157 } 4158 4159 ret_val = hw->phy.ops.acquire(hw); 4160 if (ret_val) { 4161 dev_err(&adapter->pdev->dev, 4162 "EEE advertisement - unable to acquire PHY\n"); 4163 return; 4164 } 4165 4166 e1000_write_emi_reg_locked(hw, adv_addr, 4167 hw->dev_spec.ich8lan.eee_disable ? 4168 0 : adapter->eee_advert); 4169 4170 hw->phy.ops.release(hw); 4171 } 4172 4173 if (!netif_running(adapter->netdev) && 4174 !test_bit(__E1000_TESTING, &adapter->state)) 4175 e1000_power_down_phy(adapter); 4176 4177 e1000_get_phy_info(hw); 4178 4179 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && 4180 !(adapter->flags & FLAG_SMART_POWER_DOWN)) { 4181 u16 phy_data = 0; 4182 /* speed up time to link by disabling smart power down, ignore 4183 * the return value of this function because there is nothing 4184 * different we would do if it failed 4185 */ 4186 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); 4187 phy_data &= ~IGP02E1000_PM_SPD; 4188 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); 4189 } 4190 if (hw->mac.type >= e1000_pch_spt && adapter->int_mode == 0) { 4191 u32 reg; 4192 4193 /* Fextnvm7 @ 0xe4[2] = 1 */ 4194 reg = er32(FEXTNVM7); 4195 reg |= E1000_FEXTNVM7_SIDE_CLK_UNGATE; 4196 ew32(FEXTNVM7, reg); 4197 /* Fextnvm9 @ 0x5bb4[13:12] = 11 */ 4198 reg = er32(FEXTNVM9); 4199 reg |= E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS | 4200 E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS; 4201 ew32(FEXTNVM9, reg); 4202 } 4203 4204 } 4205 4206 /** 4207 * e1000e_trigger_lsc - trigger an LSC interrupt 4208 * @adapter: 4209 * 4210 * Fire a link status change interrupt to start the watchdog. 4211 **/ 4212 static void e1000e_trigger_lsc(struct e1000_adapter *adapter) 4213 { 4214 struct e1000_hw *hw = &adapter->hw; 4215 4216 if (adapter->msix_entries) 4217 ew32(ICS, E1000_ICS_LSC | E1000_ICS_OTHER); 4218 else 4219 ew32(ICS, E1000_ICS_LSC); 4220 } 4221 4222 void e1000e_up(struct e1000_adapter *adapter) 4223 { 4224 /* hardware has been reset, we need to reload some things */ 4225 e1000_configure(adapter); 4226 4227 clear_bit(__E1000_DOWN, &adapter->state); 4228 4229 if (adapter->msix_entries) 4230 e1000_configure_msix(adapter); 4231 e1000_irq_enable(adapter); 4232 4233 netif_start_queue(adapter->netdev); 4234 4235 e1000e_trigger_lsc(adapter); 4236 } 4237 4238 static void e1000e_flush_descriptors(struct e1000_adapter *adapter) 4239 { 4240 struct e1000_hw *hw = &adapter->hw; 4241 4242 if (!(adapter->flags2 & FLAG2_DMA_BURST)) 4243 return; 4244 4245 /* flush pending descriptor writebacks to memory */ 4246 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 4247 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); 4248 4249 /* execute the writes immediately */ 4250 e1e_flush(); 4251 4252 /* due to rare timing issues, write to TIDV/RDTR again to ensure the 4253 * write is successful 4254 */ 4255 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 4256 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); 4257 4258 /* execute the writes immediately */ 4259 e1e_flush(); 4260 } 4261 4262 static void e1000e_update_stats(struct e1000_adapter *adapter); 4263 4264 /** 4265 * e1000e_down - quiesce the device and optionally reset the hardware 4266 * @adapter: board private structure 4267 * @reset: boolean flag to reset the hardware or not 4268 */ 4269 void e1000e_down(struct e1000_adapter *adapter, bool reset) 4270 { 4271 struct net_device *netdev = adapter->netdev; 4272 struct e1000_hw *hw = &adapter->hw; 4273 u32 tctl, rctl; 4274 4275 /* signal that we're down so the interrupt handler does not 4276 * reschedule our watchdog timer 4277 */ 4278 set_bit(__E1000_DOWN, &adapter->state); 4279 4280 netif_carrier_off(netdev); 4281 4282 /* disable receives in the hardware */ 4283 rctl = er32(RCTL); 4284 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) 4285 ew32(RCTL, rctl & ~E1000_RCTL_EN); 4286 /* flush and sleep below */ 4287 4288 netif_stop_queue(netdev); 4289 4290 /* disable transmits in the hardware */ 4291 tctl = er32(TCTL); 4292 tctl &= ~E1000_TCTL_EN; 4293 ew32(TCTL, tctl); 4294 4295 /* flush both disables and wait for them to finish */ 4296 e1e_flush(); 4297 usleep_range(10000, 20000); 4298 4299 e1000_irq_disable(adapter); 4300 4301 napi_synchronize(&adapter->napi); 4302 4303 del_timer_sync(&adapter->watchdog_timer); 4304 del_timer_sync(&adapter->phy_info_timer); 4305 4306 spin_lock(&adapter->stats64_lock); 4307 e1000e_update_stats(adapter); 4308 spin_unlock(&adapter->stats64_lock); 4309 4310 e1000e_flush_descriptors(adapter); 4311 4312 adapter->link_speed = 0; 4313 adapter->link_duplex = 0; 4314 4315 /* Disable Si errata workaround on PCHx for jumbo frame flow */ 4316 if ((hw->mac.type >= e1000_pch2lan) && 4317 (adapter->netdev->mtu > ETH_DATA_LEN) && 4318 e1000_lv_jumbo_workaround_ich8lan(hw, false)) 4319 e_dbg("failed to disable jumbo frame workaround mode\n"); 4320 4321 if (!pci_channel_offline(adapter->pdev)) { 4322 if (reset) 4323 e1000e_reset(adapter); 4324 else if (hw->mac.type >= e1000_pch_spt) 4325 e1000_flush_desc_rings(adapter); 4326 } 4327 e1000_clean_tx_ring(adapter->tx_ring); 4328 e1000_clean_rx_ring(adapter->rx_ring); 4329 } 4330 4331 void e1000e_reinit_locked(struct e1000_adapter *adapter) 4332 { 4333 might_sleep(); 4334 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 4335 usleep_range(1000, 2000); 4336 e1000e_down(adapter, true); 4337 e1000e_up(adapter); 4338 clear_bit(__E1000_RESETTING, &adapter->state); 4339 } 4340 4341 /** 4342 * e1000e_sanitize_systim - sanitize raw cycle counter reads 4343 * @hw: pointer to the HW structure 4344 * @systim: time value read, sanitized and returned 4345 * 4346 * Errata for 82574/82583 possible bad bits read from SYSTIMH/L: 4347 * check to see that the time is incrementing at a reasonable 4348 * rate and is a multiple of incvalue. 4349 **/ 4350 static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim) 4351 { 4352 u64 time_delta, rem, temp; 4353 u64 systim_next; 4354 u32 incvalue; 4355 int i; 4356 4357 incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; 4358 for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { 4359 /* latch SYSTIMH on read of SYSTIML */ 4360 systim_next = (u64)er32(SYSTIML); 4361 systim_next |= (u64)er32(SYSTIMH) << 32; 4362 4363 time_delta = systim_next - systim; 4364 temp = time_delta; 4365 /* VMWare users have seen incvalue of zero, don't div / 0 */ 4366 rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0); 4367 4368 systim = systim_next; 4369 4370 if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0)) 4371 break; 4372 } 4373 4374 return systim; 4375 } 4376 4377 /** 4378 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) 4379 * @cc: cyclecounter structure 4380 **/ 4381 static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc) 4382 { 4383 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, 4384 cc); 4385 struct e1000_hw *hw = &adapter->hw; 4386 u32 systimel, systimeh; 4387 u64 systim; 4388 /* SYSTIMH latching upon SYSTIML read does not work well. 4389 * This means that if SYSTIML overflows after we read it but before 4390 * we read SYSTIMH, the value of SYSTIMH has been incremented and we 4391 * will experience a huge non linear increment in the systime value 4392 * to fix that we test for overflow and if true, we re-read systime. 4393 */ 4394 systimel = er32(SYSTIML); 4395 systimeh = er32(SYSTIMH); 4396 /* Is systimel is so large that overflow is possible? */ 4397 if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) { 4398 u32 systimel_2 = er32(SYSTIML); 4399 if (systimel > systimel_2) { 4400 /* There was an overflow, read again SYSTIMH, and use 4401 * systimel_2 4402 */ 4403 systimeh = er32(SYSTIMH); 4404 systimel = systimel_2; 4405 } 4406 } 4407 systim = (u64)systimel; 4408 systim |= (u64)systimeh << 32; 4409 4410 if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) 4411 systim = e1000e_sanitize_systim(hw, systim); 4412 4413 return systim; 4414 } 4415 4416 /** 4417 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) 4418 * @adapter: board private structure to initialize 4419 * 4420 * e1000_sw_init initializes the Adapter private data structure. 4421 * Fields are initialized based on PCI device information and 4422 * OS network device settings (MTU size). 4423 **/ 4424 static int e1000_sw_init(struct e1000_adapter *adapter) 4425 { 4426 struct net_device *netdev = adapter->netdev; 4427 4428 adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; 4429 adapter->rx_ps_bsize0 = 128; 4430 adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; 4431 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 4432 adapter->tx_ring_count = E1000_DEFAULT_TXD; 4433 adapter->rx_ring_count = E1000_DEFAULT_RXD; 4434 4435 spin_lock_init(&adapter->stats64_lock); 4436 4437 e1000e_set_interrupt_capability(adapter); 4438 4439 if (e1000_alloc_queues(adapter)) 4440 return -ENOMEM; 4441 4442 /* Setup hardware time stamping cyclecounter */ 4443 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { 4444 adapter->cc.read = e1000e_cyclecounter_read; 4445 adapter->cc.mask = CYCLECOUNTER_MASK(64); 4446 adapter->cc.mult = 1; 4447 /* cc.shift set in e1000e_get_base_tininca() */ 4448 4449 spin_lock_init(&adapter->systim_lock); 4450 INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work); 4451 } 4452 4453 /* Explicitly disable IRQ since the NIC can be in any state. */ 4454 e1000_irq_disable(adapter); 4455 4456 set_bit(__E1000_DOWN, &adapter->state); 4457 return 0; 4458 } 4459 4460 /** 4461 * e1000_intr_msi_test - Interrupt Handler 4462 * @irq: interrupt number 4463 * @data: pointer to a network interface device structure 4464 **/ 4465 static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data) 4466 { 4467 struct net_device *netdev = data; 4468 struct e1000_adapter *adapter = netdev_priv(netdev); 4469 struct e1000_hw *hw = &adapter->hw; 4470 u32 icr = er32(ICR); 4471 4472 e_dbg("icr is %08X\n", icr); 4473 if (icr & E1000_ICR_RXSEQ) { 4474 adapter->flags &= ~FLAG_MSI_TEST_FAILED; 4475 /* Force memory writes to complete before acknowledging the 4476 * interrupt is handled. 4477 */ 4478 wmb(); 4479 } 4480 4481 return IRQ_HANDLED; 4482 } 4483 4484 /** 4485 * e1000_test_msi_interrupt - Returns 0 for successful test 4486 * @adapter: board private struct 4487 * 4488 * code flow taken from tg3.c 4489 **/ 4490 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) 4491 { 4492 struct net_device *netdev = adapter->netdev; 4493 struct e1000_hw *hw = &adapter->hw; 4494 int err; 4495 4496 /* poll_enable hasn't been called yet, so don't need disable */ 4497 /* clear any pending events */ 4498 er32(ICR); 4499 4500 /* free the real vector and request a test handler */ 4501 e1000_free_irq(adapter); 4502 e1000e_reset_interrupt_capability(adapter); 4503 4504 /* Assume that the test fails, if it succeeds then the test 4505 * MSI irq handler will unset this flag 4506 */ 4507 adapter->flags |= FLAG_MSI_TEST_FAILED; 4508 4509 err = pci_enable_msi(adapter->pdev); 4510 if (err) 4511 goto msi_test_failed; 4512 4513 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, 4514 netdev->name, netdev); 4515 if (err) { 4516 pci_disable_msi(adapter->pdev); 4517 goto msi_test_failed; 4518 } 4519 4520 /* Force memory writes to complete before enabling and firing an 4521 * interrupt. 4522 */ 4523 wmb(); 4524 4525 e1000_irq_enable(adapter); 4526 4527 /* fire an unusual interrupt on the test handler */ 4528 ew32(ICS, E1000_ICS_RXSEQ); 4529 e1e_flush(); 4530 msleep(100); 4531 4532 e1000_irq_disable(adapter); 4533 4534 rmb(); /* read flags after interrupt has been fired */ 4535 4536 if (adapter->flags & FLAG_MSI_TEST_FAILED) { 4537 adapter->int_mode = E1000E_INT_MODE_LEGACY; 4538 e_info("MSI interrupt test failed, using legacy interrupt.\n"); 4539 } else { 4540 e_dbg("MSI interrupt test succeeded!\n"); 4541 } 4542 4543 free_irq(adapter->pdev->irq, netdev); 4544 pci_disable_msi(adapter->pdev); 4545 4546 msi_test_failed: 4547 e1000e_set_interrupt_capability(adapter); 4548 return e1000_request_irq(adapter); 4549 } 4550 4551 /** 4552 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored 4553 * @adapter: board private struct 4554 * 4555 * code flow taken from tg3.c, called with e1000 interrupts disabled. 4556 **/ 4557 static int e1000_test_msi(struct e1000_adapter *adapter) 4558 { 4559 int err; 4560 u16 pci_cmd; 4561 4562 if (!(adapter->flags & FLAG_MSI_ENABLED)) 4563 return 0; 4564 4565 /* disable SERR in case the MSI write causes a master abort */ 4566 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); 4567 if (pci_cmd & PCI_COMMAND_SERR) 4568 pci_write_config_word(adapter->pdev, PCI_COMMAND, 4569 pci_cmd & ~PCI_COMMAND_SERR); 4570 4571 err = e1000_test_msi_interrupt(adapter); 4572 4573 /* re-enable SERR */ 4574 if (pci_cmd & PCI_COMMAND_SERR) { 4575 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); 4576 pci_cmd |= PCI_COMMAND_SERR; 4577 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); 4578 } 4579 4580 return err; 4581 } 4582 4583 /** 4584 * e1000e_open - Called when a network interface is made active 4585 * @netdev: network interface device structure 4586 * 4587 * Returns 0 on success, negative value on failure 4588 * 4589 * The open entry point is called when a network interface is made 4590 * active by the system (IFF_UP). At this point all resources needed 4591 * for transmit and receive operations are allocated, the interrupt 4592 * handler is registered with the OS, the watchdog timer is started, 4593 * and the stack is notified that the interface is ready. 4594 **/ 4595 int e1000e_open(struct net_device *netdev) 4596 { 4597 struct e1000_adapter *adapter = netdev_priv(netdev); 4598 struct e1000_hw *hw = &adapter->hw; 4599 struct pci_dev *pdev = adapter->pdev; 4600 int err; 4601 4602 /* disallow open during test */ 4603 if (test_bit(__E1000_TESTING, &adapter->state)) 4604 return -EBUSY; 4605 4606 pm_runtime_get_sync(&pdev->dev); 4607 4608 netif_carrier_off(netdev); 4609 4610 /* allocate transmit descriptors */ 4611 err = e1000e_setup_tx_resources(adapter->tx_ring); 4612 if (err) 4613 goto err_setup_tx; 4614 4615 /* allocate receive descriptors */ 4616 err = e1000e_setup_rx_resources(adapter->rx_ring); 4617 if (err) 4618 goto err_setup_rx; 4619 4620 /* If AMT is enabled, let the firmware know that the network 4621 * interface is now open and reset the part to a known state. 4622 */ 4623 if (adapter->flags & FLAG_HAS_AMT) { 4624 e1000e_get_hw_control(adapter); 4625 e1000e_reset(adapter); 4626 } 4627 4628 e1000e_power_up_phy(adapter); 4629 4630 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 4631 if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) 4632 e1000_update_mng_vlan(adapter); 4633 4634 /* DMA latency requirement to workaround jumbo issue */ 4635 pm_qos_add_request(&adapter->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 4636 PM_QOS_DEFAULT_VALUE); 4637 4638 /* before we allocate an interrupt, we must be ready to handle it. 4639 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 4640 * as soon as we call pci_request_irq, so we have to setup our 4641 * clean_rx handler before we do so. 4642 */ 4643 e1000_configure(adapter); 4644 4645 err = e1000_request_irq(adapter); 4646 if (err) 4647 goto err_req_irq; 4648 4649 /* Work around PCIe errata with MSI interrupts causing some chipsets to 4650 * ignore e1000e MSI messages, which means we need to test our MSI 4651 * interrupt now 4652 */ 4653 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { 4654 err = e1000_test_msi(adapter); 4655 if (err) { 4656 e_err("Interrupt allocation failed\n"); 4657 goto err_req_irq; 4658 } 4659 } 4660 4661 /* From here on the code is the same as e1000e_up() */ 4662 clear_bit(__E1000_DOWN, &adapter->state); 4663 4664 napi_enable(&adapter->napi); 4665 4666 e1000_irq_enable(adapter); 4667 4668 adapter->tx_hang_recheck = false; 4669 netif_start_queue(netdev); 4670 4671 hw->mac.get_link_status = true; 4672 pm_runtime_put(&pdev->dev); 4673 4674 e1000e_trigger_lsc(adapter); 4675 4676 return 0; 4677 4678 err_req_irq: 4679 pm_qos_remove_request(&adapter->pm_qos_req); 4680 e1000e_release_hw_control(adapter); 4681 e1000_power_down_phy(adapter); 4682 e1000e_free_rx_resources(adapter->rx_ring); 4683 err_setup_rx: 4684 e1000e_free_tx_resources(adapter->tx_ring); 4685 err_setup_tx: 4686 e1000e_reset(adapter); 4687 pm_runtime_put_sync(&pdev->dev); 4688 4689 return err; 4690 } 4691 4692 /** 4693 * e1000e_close - Disables a network interface 4694 * @netdev: network interface device structure 4695 * 4696 * Returns 0, this is not allowed to fail 4697 * 4698 * The close entry point is called when an interface is de-activated 4699 * by the OS. The hardware is still under the drivers control, but 4700 * needs to be disabled. A global MAC reset is issued to stop the 4701 * hardware, and all transmit and receive resources are freed. 4702 **/ 4703 int e1000e_close(struct net_device *netdev) 4704 { 4705 struct e1000_adapter *adapter = netdev_priv(netdev); 4706 struct pci_dev *pdev = adapter->pdev; 4707 int count = E1000_CHECK_RESET_COUNT; 4708 4709 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) 4710 usleep_range(10000, 20000); 4711 4712 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 4713 4714 pm_runtime_get_sync(&pdev->dev); 4715 4716 if (!test_bit(__E1000_DOWN, &adapter->state)) { 4717 e1000e_down(adapter, true); 4718 e1000_free_irq(adapter); 4719 4720 /* Link status message must follow this format */ 4721 pr_info("%s NIC Link is Down\n", adapter->netdev->name); 4722 } 4723 4724 napi_disable(&adapter->napi); 4725 4726 e1000e_free_tx_resources(adapter->tx_ring); 4727 e1000e_free_rx_resources(adapter->rx_ring); 4728 4729 /* kill manageability vlan ID if supported, but not if a vlan with 4730 * the same ID is registered on the host OS (let 8021q kill it) 4731 */ 4732 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) 4733 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), 4734 adapter->mng_vlan_id); 4735 4736 /* If AMT is enabled, let the firmware know that the network 4737 * interface is now closed 4738 */ 4739 if ((adapter->flags & FLAG_HAS_AMT) && 4740 !test_bit(__E1000_TESTING, &adapter->state)) 4741 e1000e_release_hw_control(adapter); 4742 4743 pm_qos_remove_request(&adapter->pm_qos_req); 4744 4745 pm_runtime_put_sync(&pdev->dev); 4746 4747 return 0; 4748 } 4749 4750 /** 4751 * e1000_set_mac - Change the Ethernet Address of the NIC 4752 * @netdev: network interface device structure 4753 * @p: pointer to an address structure 4754 * 4755 * Returns 0 on success, negative on failure 4756 **/ 4757 static int e1000_set_mac(struct net_device *netdev, void *p) 4758 { 4759 struct e1000_adapter *adapter = netdev_priv(netdev); 4760 struct e1000_hw *hw = &adapter->hw; 4761 struct sockaddr *addr = p; 4762 4763 if (!is_valid_ether_addr(addr->sa_data)) 4764 return -EADDRNOTAVAIL; 4765 4766 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 4767 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); 4768 4769 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 4770 4771 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { 4772 /* activate the work around */ 4773 e1000e_set_laa_state_82571(&adapter->hw, 1); 4774 4775 /* Hold a copy of the LAA in RAR[14] This is done so that 4776 * between the time RAR[0] gets clobbered and the time it 4777 * gets fixed (in e1000_watchdog), the actual LAA is in one 4778 * of the RARs and no incoming packets directed to this port 4779 * are dropped. Eventually the LAA will be in RAR[0] and 4780 * RAR[14] 4781 */ 4782 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 4783 adapter->hw.mac.rar_entry_count - 1); 4784 } 4785 4786 return 0; 4787 } 4788 4789 /** 4790 * e1000e_update_phy_task - work thread to update phy 4791 * @work: pointer to our work struct 4792 * 4793 * this worker thread exists because we must acquire a 4794 * semaphore to read the phy, which we could msleep while 4795 * waiting for it, and we can't msleep in a timer. 4796 **/ 4797 static void e1000e_update_phy_task(struct work_struct *work) 4798 { 4799 struct e1000_adapter *adapter = container_of(work, 4800 struct e1000_adapter, 4801 update_phy_task); 4802 struct e1000_hw *hw = &adapter->hw; 4803 4804 if (test_bit(__E1000_DOWN, &adapter->state)) 4805 return; 4806 4807 e1000_get_phy_info(hw); 4808 4809 /* Enable EEE on 82579 after link up */ 4810 if (hw->phy.type >= e1000_phy_82579) 4811 e1000_set_eee_pchlan(hw); 4812 } 4813 4814 /** 4815 * e1000_update_phy_info - timre call-back to update PHY info 4816 * @data: pointer to adapter cast into an unsigned long 4817 * 4818 * Need to wait a few seconds after link up to get diagnostic information from 4819 * the phy 4820 **/ 4821 static void e1000_update_phy_info(struct timer_list *t) 4822 { 4823 struct e1000_adapter *adapter = from_timer(adapter, t, phy_info_timer); 4824 4825 if (test_bit(__E1000_DOWN, &adapter->state)) 4826 return; 4827 4828 schedule_work(&adapter->update_phy_task); 4829 } 4830 4831 /** 4832 * e1000e_update_phy_stats - Update the PHY statistics counters 4833 * @adapter: board private structure 4834 * 4835 * Read/clear the upper 16-bit PHY registers and read/accumulate lower 4836 **/ 4837 static void e1000e_update_phy_stats(struct e1000_adapter *adapter) 4838 { 4839 struct e1000_hw *hw = &adapter->hw; 4840 s32 ret_val; 4841 u16 phy_data; 4842 4843 ret_val = hw->phy.ops.acquire(hw); 4844 if (ret_val) 4845 return; 4846 4847 /* A page set is expensive so check if already on desired page. 4848 * If not, set to the page with the PHY status registers. 4849 */ 4850 hw->phy.addr = 1; 4851 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 4852 &phy_data); 4853 if (ret_val) 4854 goto release; 4855 if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) { 4856 ret_val = hw->phy.ops.set_page(hw, 4857 HV_STATS_PAGE << IGP_PAGE_SHIFT); 4858 if (ret_val) 4859 goto release; 4860 } 4861 4862 /* Single Collision Count */ 4863 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); 4864 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); 4865 if (!ret_val) 4866 adapter->stats.scc += phy_data; 4867 4868 /* Excessive Collision Count */ 4869 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); 4870 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); 4871 if (!ret_val) 4872 adapter->stats.ecol += phy_data; 4873 4874 /* Multiple Collision Count */ 4875 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); 4876 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); 4877 if (!ret_val) 4878 adapter->stats.mcc += phy_data; 4879 4880 /* Late Collision Count */ 4881 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); 4882 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); 4883 if (!ret_val) 4884 adapter->stats.latecol += phy_data; 4885 4886 /* Collision Count - also used for adaptive IFS */ 4887 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); 4888 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); 4889 if (!ret_val) 4890 hw->mac.collision_delta = phy_data; 4891 4892 /* Defer Count */ 4893 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); 4894 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); 4895 if (!ret_val) 4896 adapter->stats.dc += phy_data; 4897 4898 /* Transmit with no CRS */ 4899 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); 4900 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); 4901 if (!ret_val) 4902 adapter->stats.tncrs += phy_data; 4903 4904 release: 4905 hw->phy.ops.release(hw); 4906 } 4907 4908 /** 4909 * e1000e_update_stats - Update the board statistics counters 4910 * @adapter: board private structure 4911 **/ 4912 static void e1000e_update_stats(struct e1000_adapter *adapter) 4913 { 4914 struct net_device *netdev = adapter->netdev; 4915 struct e1000_hw *hw = &adapter->hw; 4916 struct pci_dev *pdev = adapter->pdev; 4917 4918 /* Prevent stats update while adapter is being reset, or if the pci 4919 * connection is down. 4920 */ 4921 if (adapter->link_speed == 0) 4922 return; 4923 if (pci_channel_offline(pdev)) 4924 return; 4925 4926 adapter->stats.crcerrs += er32(CRCERRS); 4927 adapter->stats.gprc += er32(GPRC); 4928 adapter->stats.gorc += er32(GORCL); 4929 er32(GORCH); /* Clear gorc */ 4930 adapter->stats.bprc += er32(BPRC); 4931 adapter->stats.mprc += er32(MPRC); 4932 adapter->stats.roc += er32(ROC); 4933 4934 adapter->stats.mpc += er32(MPC); 4935 4936 /* Half-duplex statistics */ 4937 if (adapter->link_duplex == HALF_DUPLEX) { 4938 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { 4939 e1000e_update_phy_stats(adapter); 4940 } else { 4941 adapter->stats.scc += er32(SCC); 4942 adapter->stats.ecol += er32(ECOL); 4943 adapter->stats.mcc += er32(MCC); 4944 adapter->stats.latecol += er32(LATECOL); 4945 adapter->stats.dc += er32(DC); 4946 4947 hw->mac.collision_delta = er32(COLC); 4948 4949 if ((hw->mac.type != e1000_82574) && 4950 (hw->mac.type != e1000_82583)) 4951 adapter->stats.tncrs += er32(TNCRS); 4952 } 4953 adapter->stats.colc += hw->mac.collision_delta; 4954 } 4955 4956 adapter->stats.xonrxc += er32(XONRXC); 4957 adapter->stats.xontxc += er32(XONTXC); 4958 adapter->stats.xoffrxc += er32(XOFFRXC); 4959 adapter->stats.xofftxc += er32(XOFFTXC); 4960 adapter->stats.gptc += er32(GPTC); 4961 adapter->stats.gotc += er32(GOTCL); 4962 er32(GOTCH); /* Clear gotc */ 4963 adapter->stats.rnbc += er32(RNBC); 4964 adapter->stats.ruc += er32(RUC); 4965 4966 adapter->stats.mptc += er32(MPTC); 4967 adapter->stats.bptc += er32(BPTC); 4968 4969 /* used for adaptive IFS */ 4970 4971 hw->mac.tx_packet_delta = er32(TPT); 4972 adapter->stats.tpt += hw->mac.tx_packet_delta; 4973 4974 adapter->stats.algnerrc += er32(ALGNERRC); 4975 adapter->stats.rxerrc += er32(RXERRC); 4976 adapter->stats.cexterr += er32(CEXTERR); 4977 adapter->stats.tsctc += er32(TSCTC); 4978 adapter->stats.tsctfc += er32(TSCTFC); 4979 4980 /* Fill out the OS statistics structure */ 4981 netdev->stats.multicast = adapter->stats.mprc; 4982 netdev->stats.collisions = adapter->stats.colc; 4983 4984 /* Rx Errors */ 4985 4986 /* RLEC on some newer hardware can be incorrect so build 4987 * our own version based on RUC and ROC 4988 */ 4989 netdev->stats.rx_errors = adapter->stats.rxerrc + 4990 adapter->stats.crcerrs + adapter->stats.algnerrc + 4991 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; 4992 netdev->stats.rx_length_errors = adapter->stats.ruc + 4993 adapter->stats.roc; 4994 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 4995 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 4996 netdev->stats.rx_missed_errors = adapter->stats.mpc; 4997 4998 /* Tx Errors */ 4999 netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol; 5000 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 5001 netdev->stats.tx_window_errors = adapter->stats.latecol; 5002 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 5003 5004 /* Tx Dropped needs to be maintained elsewhere */ 5005 5006 /* Management Stats */ 5007 adapter->stats.mgptc += er32(MGTPTC); 5008 adapter->stats.mgprc += er32(MGTPRC); 5009 adapter->stats.mgpdc += er32(MGTPDC); 5010 5011 /* Correctable ECC Errors */ 5012 if (hw->mac.type >= e1000_pch_lpt) { 5013 u32 pbeccsts = er32(PBECCSTS); 5014 5015 adapter->corr_errors += 5016 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; 5017 adapter->uncorr_errors += 5018 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> 5019 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; 5020 } 5021 } 5022 5023 /** 5024 * e1000_phy_read_status - Update the PHY register status snapshot 5025 * @adapter: board private structure 5026 **/ 5027 static void e1000_phy_read_status(struct e1000_adapter *adapter) 5028 { 5029 struct e1000_hw *hw = &adapter->hw; 5030 struct e1000_phy_regs *phy = &adapter->phy_regs; 5031 5032 if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) && 5033 (er32(STATUS) & E1000_STATUS_LU) && 5034 (adapter->hw.phy.media_type == e1000_media_type_copper)) { 5035 int ret_val; 5036 5037 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); 5038 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); 5039 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); 5040 ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa); 5041 ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion); 5042 ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000); 5043 ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000); 5044 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); 5045 if (ret_val) 5046 e_warn("Error reading PHY register\n"); 5047 } else { 5048 /* Do not read PHY registers if link is not up 5049 * Set values to typical power-on defaults 5050 */ 5051 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); 5052 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | 5053 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE | 5054 BMSR_ERCAP); 5055 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | 5056 ADVERTISE_ALL | ADVERTISE_CSMA); 5057 phy->lpa = 0; 5058 phy->expansion = EXPANSION_ENABLENPAGE; 5059 phy->ctrl1000 = ADVERTISE_1000FULL; 5060 phy->stat1000 = 0; 5061 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); 5062 } 5063 } 5064 5065 static void e1000_print_link_info(struct e1000_adapter *adapter) 5066 { 5067 struct e1000_hw *hw = &adapter->hw; 5068 u32 ctrl = er32(CTRL); 5069 5070 /* Link status message must follow this format for user tools */ 5071 pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", 5072 adapter->netdev->name, adapter->link_speed, 5073 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", 5074 (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" : 5075 (ctrl & E1000_CTRL_RFCE) ? "Rx" : 5076 (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"); 5077 } 5078 5079 static bool e1000e_has_link(struct e1000_adapter *adapter) 5080 { 5081 struct e1000_hw *hw = &adapter->hw; 5082 bool link_active = false; 5083 s32 ret_val = 0; 5084 5085 /* get_link_status is set on LSC (link status) interrupt or 5086 * Rx sequence error interrupt. get_link_status will stay 5087 * true until the check_for_link establishes link 5088 * for copper adapters ONLY 5089 */ 5090 switch (hw->phy.media_type) { 5091 case e1000_media_type_copper: 5092 if (hw->mac.get_link_status) { 5093 ret_val = hw->mac.ops.check_for_link(hw); 5094 link_active = !hw->mac.get_link_status; 5095 } else { 5096 link_active = true; 5097 } 5098 break; 5099 case e1000_media_type_fiber: 5100 ret_val = hw->mac.ops.check_for_link(hw); 5101 link_active = !!(er32(STATUS) & E1000_STATUS_LU); 5102 break; 5103 case e1000_media_type_internal_serdes: 5104 ret_val = hw->mac.ops.check_for_link(hw); 5105 link_active = hw->mac.serdes_has_link; 5106 break; 5107 default: 5108 case e1000_media_type_unknown: 5109 break; 5110 } 5111 5112 if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && 5113 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { 5114 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ 5115 e_info("Gigabit has been disabled, downgrading speed\n"); 5116 } 5117 5118 return link_active; 5119 } 5120 5121 static void e1000e_enable_receives(struct e1000_adapter *adapter) 5122 { 5123 /* make sure the receive unit is started */ 5124 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && 5125 (adapter->flags & FLAG_RESTART_NOW)) { 5126 struct e1000_hw *hw = &adapter->hw; 5127 u32 rctl = er32(RCTL); 5128 5129 ew32(RCTL, rctl | E1000_RCTL_EN); 5130 adapter->flags &= ~FLAG_RESTART_NOW; 5131 } 5132 } 5133 5134 static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) 5135 { 5136 struct e1000_hw *hw = &adapter->hw; 5137 5138 /* With 82574 controllers, PHY needs to be checked periodically 5139 * for hung state and reset, if two calls return true 5140 */ 5141 if (e1000_check_phy_82574(hw)) 5142 adapter->phy_hang_count++; 5143 else 5144 adapter->phy_hang_count = 0; 5145 5146 if (adapter->phy_hang_count > 1) { 5147 adapter->phy_hang_count = 0; 5148 e_dbg("PHY appears hung - resetting\n"); 5149 schedule_work(&adapter->reset_task); 5150 } 5151 } 5152 5153 /** 5154 * e1000_watchdog - Timer Call-back 5155 * @data: pointer to adapter cast into an unsigned long 5156 **/ 5157 static void e1000_watchdog(struct timer_list *t) 5158 { 5159 struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer); 5160 5161 /* Do the rest outside of interrupt context */ 5162 schedule_work(&adapter->watchdog_task); 5163 5164 /* TODO: make this use queue_delayed_work() */ 5165 } 5166 5167 static void e1000_watchdog_task(struct work_struct *work) 5168 { 5169 struct e1000_adapter *adapter = container_of(work, 5170 struct e1000_adapter, 5171 watchdog_task); 5172 struct net_device *netdev = adapter->netdev; 5173 struct e1000_mac_info *mac = &adapter->hw.mac; 5174 struct e1000_phy_info *phy = &adapter->hw.phy; 5175 struct e1000_ring *tx_ring = adapter->tx_ring; 5176 struct e1000_hw *hw = &adapter->hw; 5177 u32 link, tctl; 5178 5179 if (test_bit(__E1000_DOWN, &adapter->state)) 5180 return; 5181 5182 link = e1000e_has_link(adapter); 5183 if ((netif_carrier_ok(netdev)) && link) { 5184 /* Cancel scheduled suspend requests. */ 5185 pm_runtime_resume(netdev->dev.parent); 5186 5187 e1000e_enable_receives(adapter); 5188 goto link_up; 5189 } 5190 5191 if ((e1000e_enable_tx_pkt_filtering(hw)) && 5192 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) 5193 e1000_update_mng_vlan(adapter); 5194 5195 if (link) { 5196 if (!netif_carrier_ok(netdev)) { 5197 bool txb2b = true; 5198 5199 /* Cancel scheduled suspend requests. */ 5200 pm_runtime_resume(netdev->dev.parent); 5201 5202 /* update snapshot of PHY registers on LSC */ 5203 e1000_phy_read_status(adapter); 5204 mac->ops.get_link_up_info(&adapter->hw, 5205 &adapter->link_speed, 5206 &adapter->link_duplex); 5207 e1000_print_link_info(adapter); 5208 5209 /* check if SmartSpeed worked */ 5210 e1000e_check_downshift(hw); 5211 if (phy->speed_downgraded) 5212 netdev_warn(netdev, 5213 "Link Speed was downgraded by SmartSpeed\n"); 5214 5215 /* On supported PHYs, check for duplex mismatch only 5216 * if link has autonegotiated at 10/100 half 5217 */ 5218 if ((hw->phy.type == e1000_phy_igp_3 || 5219 hw->phy.type == e1000_phy_bm) && 5220 hw->mac.autoneg && 5221 (adapter->link_speed == SPEED_10 || 5222 adapter->link_speed == SPEED_100) && 5223 (adapter->link_duplex == HALF_DUPLEX)) { 5224 u16 autoneg_exp; 5225 5226 e1e_rphy(hw, MII_EXPANSION, &autoneg_exp); 5227 5228 if (!(autoneg_exp & EXPANSION_NWAY)) 5229 e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n"); 5230 } 5231 5232 /* adjust timeout factor according to speed/duplex */ 5233 adapter->tx_timeout_factor = 1; 5234 switch (adapter->link_speed) { 5235 case SPEED_10: 5236 txb2b = false; 5237 adapter->tx_timeout_factor = 16; 5238 break; 5239 case SPEED_100: 5240 txb2b = false; 5241 adapter->tx_timeout_factor = 10; 5242 break; 5243 } 5244 5245 /* workaround: re-program speed mode bit after 5246 * link-up event 5247 */ 5248 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && 5249 !txb2b) { 5250 u32 tarc0; 5251 5252 tarc0 = er32(TARC(0)); 5253 tarc0 &= ~SPEED_MODE_BIT; 5254 ew32(TARC(0), tarc0); 5255 } 5256 5257 /* disable TSO for pcie and 10/100 speeds, to avoid 5258 * some hardware issues 5259 */ 5260 if (!(adapter->flags & FLAG_TSO_FORCE)) { 5261 switch (adapter->link_speed) { 5262 case SPEED_10: 5263 case SPEED_100: 5264 e_info("10/100 speed: disabling TSO\n"); 5265 netdev->features &= ~NETIF_F_TSO; 5266 netdev->features &= ~NETIF_F_TSO6; 5267 break; 5268 case SPEED_1000: 5269 netdev->features |= NETIF_F_TSO; 5270 netdev->features |= NETIF_F_TSO6; 5271 break; 5272 default: 5273 /* oops */ 5274 break; 5275 } 5276 } 5277 5278 /* enable transmits in the hardware, need to do this 5279 * after setting TARC(0) 5280 */ 5281 tctl = er32(TCTL); 5282 tctl |= E1000_TCTL_EN; 5283 ew32(TCTL, tctl); 5284 5285 /* Perform any post-link-up configuration before 5286 * reporting link up. 5287 */ 5288 if (phy->ops.cfg_on_link_up) 5289 phy->ops.cfg_on_link_up(hw); 5290 5291 netif_carrier_on(netdev); 5292 5293 if (!test_bit(__E1000_DOWN, &adapter->state)) 5294 mod_timer(&adapter->phy_info_timer, 5295 round_jiffies(jiffies + 2 * HZ)); 5296 } 5297 } else { 5298 if (netif_carrier_ok(netdev)) { 5299 adapter->link_speed = 0; 5300 adapter->link_duplex = 0; 5301 /* Link status message must follow this format */ 5302 pr_info("%s NIC Link is Down\n", adapter->netdev->name); 5303 netif_carrier_off(netdev); 5304 if (!test_bit(__E1000_DOWN, &adapter->state)) 5305 mod_timer(&adapter->phy_info_timer, 5306 round_jiffies(jiffies + 2 * HZ)); 5307 5308 /* 8000ES2LAN requires a Rx packet buffer work-around 5309 * on link down event; reset the controller to flush 5310 * the Rx packet buffer. 5311 */ 5312 if (adapter->flags & FLAG_RX_NEEDS_RESTART) 5313 adapter->flags |= FLAG_RESTART_NOW; 5314 else 5315 pm_schedule_suspend(netdev->dev.parent, 5316 LINK_TIMEOUT); 5317 } 5318 } 5319 5320 link_up: 5321 spin_lock(&adapter->stats64_lock); 5322 e1000e_update_stats(adapter); 5323 5324 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 5325 adapter->tpt_old = adapter->stats.tpt; 5326 mac->collision_delta = adapter->stats.colc - adapter->colc_old; 5327 adapter->colc_old = adapter->stats.colc; 5328 5329 adapter->gorc = adapter->stats.gorc - adapter->gorc_old; 5330 adapter->gorc_old = adapter->stats.gorc; 5331 adapter->gotc = adapter->stats.gotc - adapter->gotc_old; 5332 adapter->gotc_old = adapter->stats.gotc; 5333 spin_unlock(&adapter->stats64_lock); 5334 5335 /* If the link is lost the controller stops DMA, but 5336 * if there is queued Tx work it cannot be done. So 5337 * reset the controller to flush the Tx packet buffers. 5338 */ 5339 if (!netif_carrier_ok(netdev) && 5340 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) 5341 adapter->flags |= FLAG_RESTART_NOW; 5342 5343 /* If reset is necessary, do it outside of interrupt context. */ 5344 if (adapter->flags & FLAG_RESTART_NOW) { 5345 schedule_work(&adapter->reset_task); 5346 /* return immediately since reset is imminent */ 5347 return; 5348 } 5349 5350 e1000e_update_adaptive(&adapter->hw); 5351 5352 /* Simple mode for Interrupt Throttle Rate (ITR) */ 5353 if (adapter->itr_setting == 4) { 5354 /* Symmetric Tx/Rx gets a reduced ITR=2000; 5355 * Total asymmetrical Tx or Rx gets ITR=8000; 5356 * everyone else is between 2000-8000. 5357 */ 5358 u32 goc = (adapter->gotc + adapter->gorc) / 10000; 5359 u32 dif = (adapter->gotc > adapter->gorc ? 5360 adapter->gotc - adapter->gorc : 5361 adapter->gorc - adapter->gotc) / 10000; 5362 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 5363 5364 e1000e_write_itr(adapter, itr); 5365 } 5366 5367 /* Cause software interrupt to ensure Rx ring is cleaned */ 5368 if (adapter->msix_entries) 5369 ew32(ICS, adapter->rx_ring->ims_val); 5370 else 5371 ew32(ICS, E1000_ICS_RXDMT0); 5372 5373 /* flush pending descriptors to memory before detecting Tx hang */ 5374 e1000e_flush_descriptors(adapter); 5375 5376 /* Force detection of hung controller every watchdog period */ 5377 adapter->detect_tx_hung = true; 5378 5379 /* With 82571 controllers, LAA may be overwritten due to controller 5380 * reset from the other port. Set the appropriate LAA in RAR[0] 5381 */ 5382 if (e1000e_get_laa_state_82571(hw)) 5383 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0); 5384 5385 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) 5386 e1000e_check_82574_phy_workaround(adapter); 5387 5388 /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */ 5389 if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { 5390 if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) && 5391 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) { 5392 er32(RXSTMPH); 5393 adapter->rx_hwtstamp_cleared++; 5394 } else { 5395 adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP; 5396 } 5397 } 5398 5399 /* Reset the timer */ 5400 if (!test_bit(__E1000_DOWN, &adapter->state)) 5401 mod_timer(&adapter->watchdog_timer, 5402 round_jiffies(jiffies + 2 * HZ)); 5403 } 5404 5405 #define E1000_TX_FLAGS_CSUM 0x00000001 5406 #define E1000_TX_FLAGS_VLAN 0x00000002 5407 #define E1000_TX_FLAGS_TSO 0x00000004 5408 #define E1000_TX_FLAGS_IPV4 0x00000008 5409 #define E1000_TX_FLAGS_NO_FCS 0x00000010 5410 #define E1000_TX_FLAGS_HWTSTAMP 0x00000020 5411 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 5412 #define E1000_TX_FLAGS_VLAN_SHIFT 16 5413 5414 static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb, 5415 __be16 protocol) 5416 { 5417 struct e1000_context_desc *context_desc; 5418 struct e1000_buffer *buffer_info; 5419 unsigned int i; 5420 u32 cmd_length = 0; 5421 u16 ipcse = 0, mss; 5422 u8 ipcss, ipcso, tucss, tucso, hdr_len; 5423 int err; 5424 5425 if (!skb_is_gso(skb)) 5426 return 0; 5427 5428 err = skb_cow_head(skb, 0); 5429 if (err < 0) 5430 return err; 5431 5432 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 5433 mss = skb_shinfo(skb)->gso_size; 5434 if (protocol == htons(ETH_P_IP)) { 5435 struct iphdr *iph = ip_hdr(skb); 5436 iph->tot_len = 0; 5437 iph->check = 0; 5438 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 5439 0, IPPROTO_TCP, 0); 5440 cmd_length = E1000_TXD_CMD_IP; 5441 ipcse = skb_transport_offset(skb) - 1; 5442 } else if (skb_is_gso_v6(skb)) { 5443 ipv6_hdr(skb)->payload_len = 0; 5444 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5445 &ipv6_hdr(skb)->daddr, 5446 0, IPPROTO_TCP, 0); 5447 ipcse = 0; 5448 } 5449 ipcss = skb_network_offset(skb); 5450 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 5451 tucss = skb_transport_offset(skb); 5452 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 5453 5454 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 5455 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 5456 5457 i = tx_ring->next_to_use; 5458 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 5459 buffer_info = &tx_ring->buffer_info[i]; 5460 5461 context_desc->lower_setup.ip_fields.ipcss = ipcss; 5462 context_desc->lower_setup.ip_fields.ipcso = ipcso; 5463 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 5464 context_desc->upper_setup.tcp_fields.tucss = tucss; 5465 context_desc->upper_setup.tcp_fields.tucso = tucso; 5466 context_desc->upper_setup.tcp_fields.tucse = 0; 5467 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 5468 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 5469 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 5470 5471 buffer_info->time_stamp = jiffies; 5472 buffer_info->next_to_watch = i; 5473 5474 i++; 5475 if (i == tx_ring->count) 5476 i = 0; 5477 tx_ring->next_to_use = i; 5478 5479 return 1; 5480 } 5481 5482 static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb, 5483 __be16 protocol) 5484 { 5485 struct e1000_adapter *adapter = tx_ring->adapter; 5486 struct e1000_context_desc *context_desc; 5487 struct e1000_buffer *buffer_info; 5488 unsigned int i; 5489 u8 css; 5490 u32 cmd_len = E1000_TXD_CMD_DEXT; 5491 5492 if (skb->ip_summed != CHECKSUM_PARTIAL) 5493 return false; 5494 5495 switch (protocol) { 5496 case cpu_to_be16(ETH_P_IP): 5497 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 5498 cmd_len |= E1000_TXD_CMD_TCP; 5499 break; 5500 case cpu_to_be16(ETH_P_IPV6): 5501 /* XXX not handling all IPV6 headers */ 5502 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 5503 cmd_len |= E1000_TXD_CMD_TCP; 5504 break; 5505 default: 5506 if (unlikely(net_ratelimit())) 5507 e_warn("checksum_partial proto=%x!\n", 5508 be16_to_cpu(protocol)); 5509 break; 5510 } 5511 5512 css = skb_checksum_start_offset(skb); 5513 5514 i = tx_ring->next_to_use; 5515 buffer_info = &tx_ring->buffer_info[i]; 5516 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 5517 5518 context_desc->lower_setup.ip_config = 0; 5519 context_desc->upper_setup.tcp_fields.tucss = css; 5520 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; 5521 context_desc->upper_setup.tcp_fields.tucse = 0; 5522 context_desc->tcp_seg_setup.data = 0; 5523 context_desc->cmd_and_length = cpu_to_le32(cmd_len); 5524 5525 buffer_info->time_stamp = jiffies; 5526 buffer_info->next_to_watch = i; 5527 5528 i++; 5529 if (i == tx_ring->count) 5530 i = 0; 5531 tx_ring->next_to_use = i; 5532 5533 return true; 5534 } 5535 5536 static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, 5537 unsigned int first, unsigned int max_per_txd, 5538 unsigned int nr_frags) 5539 { 5540 struct e1000_adapter *adapter = tx_ring->adapter; 5541 struct pci_dev *pdev = adapter->pdev; 5542 struct e1000_buffer *buffer_info; 5543 unsigned int len = skb_headlen(skb); 5544 unsigned int offset = 0, size, count = 0, i; 5545 unsigned int f, bytecount, segs; 5546 5547 i = tx_ring->next_to_use; 5548 5549 while (len) { 5550 buffer_info = &tx_ring->buffer_info[i]; 5551 size = min(len, max_per_txd); 5552 5553 buffer_info->length = size; 5554 buffer_info->time_stamp = jiffies; 5555 buffer_info->next_to_watch = i; 5556 buffer_info->dma = dma_map_single(&pdev->dev, 5557 skb->data + offset, 5558 size, DMA_TO_DEVICE); 5559 buffer_info->mapped_as_page = false; 5560 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 5561 goto dma_error; 5562 5563 len -= size; 5564 offset += size; 5565 count++; 5566 5567 if (len) { 5568 i++; 5569 if (i == tx_ring->count) 5570 i = 0; 5571 } 5572 } 5573 5574 for (f = 0; f < nr_frags; f++) { 5575 const struct skb_frag_struct *frag; 5576 5577 frag = &skb_shinfo(skb)->frags[f]; 5578 len = skb_frag_size(frag); 5579 offset = 0; 5580 5581 while (len) { 5582 i++; 5583 if (i == tx_ring->count) 5584 i = 0; 5585 5586 buffer_info = &tx_ring->buffer_info[i]; 5587 size = min(len, max_per_txd); 5588 5589 buffer_info->length = size; 5590 buffer_info->time_stamp = jiffies; 5591 buffer_info->next_to_watch = i; 5592 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 5593 offset, size, 5594 DMA_TO_DEVICE); 5595 buffer_info->mapped_as_page = true; 5596 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 5597 goto dma_error; 5598 5599 len -= size; 5600 offset += size; 5601 count++; 5602 } 5603 } 5604 5605 segs = skb_shinfo(skb)->gso_segs ? : 1; 5606 /* multiply data chunks by size of headers */ 5607 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; 5608 5609 tx_ring->buffer_info[i].skb = skb; 5610 tx_ring->buffer_info[i].segs = segs; 5611 tx_ring->buffer_info[i].bytecount = bytecount; 5612 tx_ring->buffer_info[first].next_to_watch = i; 5613 5614 return count; 5615 5616 dma_error: 5617 dev_err(&pdev->dev, "Tx DMA map failed\n"); 5618 buffer_info->dma = 0; 5619 if (count) 5620 count--; 5621 5622 while (count--) { 5623 if (i == 0) 5624 i += tx_ring->count; 5625 i--; 5626 buffer_info = &tx_ring->buffer_info[i]; 5627 e1000_put_txbuf(tx_ring, buffer_info, true); 5628 } 5629 5630 return 0; 5631 } 5632 5633 static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) 5634 { 5635 struct e1000_adapter *adapter = tx_ring->adapter; 5636 struct e1000_tx_desc *tx_desc = NULL; 5637 struct e1000_buffer *buffer_info; 5638 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 5639 unsigned int i; 5640 5641 if (tx_flags & E1000_TX_FLAGS_TSO) { 5642 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 5643 E1000_TXD_CMD_TSE; 5644 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 5645 5646 if (tx_flags & E1000_TX_FLAGS_IPV4) 5647 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 5648 } 5649 5650 if (tx_flags & E1000_TX_FLAGS_CSUM) { 5651 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 5652 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 5653 } 5654 5655 if (tx_flags & E1000_TX_FLAGS_VLAN) { 5656 txd_lower |= E1000_TXD_CMD_VLE; 5657 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 5658 } 5659 5660 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 5661 txd_lower &= ~(E1000_TXD_CMD_IFCS); 5662 5663 if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) { 5664 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 5665 txd_upper |= E1000_TXD_EXTCMD_TSTAMP; 5666 } 5667 5668 i = tx_ring->next_to_use; 5669 5670 do { 5671 buffer_info = &tx_ring->buffer_info[i]; 5672 tx_desc = E1000_TX_DESC(*tx_ring, i); 5673 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 5674 tx_desc->lower.data = cpu_to_le32(txd_lower | 5675 buffer_info->length); 5676 tx_desc->upper.data = cpu_to_le32(txd_upper); 5677 5678 i++; 5679 if (i == tx_ring->count) 5680 i = 0; 5681 } while (--count > 0); 5682 5683 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 5684 5685 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ 5686 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 5687 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); 5688 5689 /* Force memory writes to complete before letting h/w 5690 * know there are new descriptors to fetch. (Only 5691 * applicable for weak-ordered memory model archs, 5692 * such as IA-64). 5693 */ 5694 wmb(); 5695 5696 tx_ring->next_to_use = i; 5697 } 5698 5699 #define MINIMUM_DHCP_PACKET_SIZE 282 5700 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, 5701 struct sk_buff *skb) 5702 { 5703 struct e1000_hw *hw = &adapter->hw; 5704 u16 length, offset; 5705 5706 if (skb_vlan_tag_present(skb) && 5707 !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 5708 (adapter->hw.mng_cookie.status & 5709 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) 5710 return 0; 5711 5712 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) 5713 return 0; 5714 5715 if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP)) 5716 return 0; 5717 5718 { 5719 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14); 5720 struct udphdr *udp; 5721 5722 if (ip->protocol != IPPROTO_UDP) 5723 return 0; 5724 5725 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); 5726 if (ntohs(udp->dest) != 67) 5727 return 0; 5728 5729 offset = (u8 *)udp + 8 - skb->data; 5730 length = skb->len - offset; 5731 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); 5732 } 5733 5734 return 0; 5735 } 5736 5737 static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) 5738 { 5739 struct e1000_adapter *adapter = tx_ring->adapter; 5740 5741 netif_stop_queue(adapter->netdev); 5742 /* Herbert's original patch had: 5743 * smp_mb__after_netif_stop_queue(); 5744 * but since that doesn't exist yet, just open code it. 5745 */ 5746 smp_mb(); 5747 5748 /* We need to check again in a case another CPU has just 5749 * made room available. 5750 */ 5751 if (e1000_desc_unused(tx_ring) < size) 5752 return -EBUSY; 5753 5754 /* A reprieve! */ 5755 netif_start_queue(adapter->netdev); 5756 ++adapter->restart_queue; 5757 return 0; 5758 } 5759 5760 static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) 5761 { 5762 BUG_ON(size > tx_ring->count); 5763 5764 if (e1000_desc_unused(tx_ring) >= size) 5765 return 0; 5766 return __e1000_maybe_stop_tx(tx_ring, size); 5767 } 5768 5769 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 5770 struct net_device *netdev) 5771 { 5772 struct e1000_adapter *adapter = netdev_priv(netdev); 5773 struct e1000_ring *tx_ring = adapter->tx_ring; 5774 unsigned int first; 5775 unsigned int tx_flags = 0; 5776 unsigned int len = skb_headlen(skb); 5777 unsigned int nr_frags; 5778 unsigned int mss; 5779 int count = 0; 5780 int tso; 5781 unsigned int f; 5782 __be16 protocol = vlan_get_protocol(skb); 5783 5784 if (test_bit(__E1000_DOWN, &adapter->state)) { 5785 dev_kfree_skb_any(skb); 5786 return NETDEV_TX_OK; 5787 } 5788 5789 if (skb->len <= 0) { 5790 dev_kfree_skb_any(skb); 5791 return NETDEV_TX_OK; 5792 } 5793 5794 /* The minimum packet size with TCTL.PSP set is 17 bytes so 5795 * pad skb in order to meet this minimum size requirement 5796 */ 5797 if (skb_put_padto(skb, 17)) 5798 return NETDEV_TX_OK; 5799 5800 mss = skb_shinfo(skb)->gso_size; 5801 if (mss) { 5802 u8 hdr_len; 5803 5804 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data 5805 * points to just header, pull a few bytes of payload from 5806 * frags into skb->data 5807 */ 5808 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 5809 /* we do this workaround for ES2LAN, but it is un-necessary, 5810 * avoiding it could save a lot of cycles 5811 */ 5812 if (skb->data_len && (hdr_len == len)) { 5813 unsigned int pull_size; 5814 5815 pull_size = min_t(unsigned int, 4, skb->data_len); 5816 if (!__pskb_pull_tail(skb, pull_size)) { 5817 e_err("__pskb_pull_tail failed.\n"); 5818 dev_kfree_skb_any(skb); 5819 return NETDEV_TX_OK; 5820 } 5821 len = skb_headlen(skb); 5822 } 5823 } 5824 5825 /* reserve a descriptor for the offload context */ 5826 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 5827 count++; 5828 count++; 5829 5830 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit); 5831 5832 nr_frags = skb_shinfo(skb)->nr_frags; 5833 for (f = 0; f < nr_frags; f++) 5834 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]), 5835 adapter->tx_fifo_limit); 5836 5837 if (adapter->hw.mac.tx_pkt_filtering) 5838 e1000_transfer_dhcp_info(adapter, skb); 5839 5840 /* need: count + 2 desc gap to keep tail from touching 5841 * head, otherwise try next time 5842 */ 5843 if (e1000_maybe_stop_tx(tx_ring, count + 2)) 5844 return NETDEV_TX_BUSY; 5845 5846 if (skb_vlan_tag_present(skb)) { 5847 tx_flags |= E1000_TX_FLAGS_VLAN; 5848 tx_flags |= (skb_vlan_tag_get(skb) << 5849 E1000_TX_FLAGS_VLAN_SHIFT); 5850 } 5851 5852 first = tx_ring->next_to_use; 5853 5854 tso = e1000_tso(tx_ring, skb, protocol); 5855 if (tso < 0) { 5856 dev_kfree_skb_any(skb); 5857 return NETDEV_TX_OK; 5858 } 5859 5860 if (tso) 5861 tx_flags |= E1000_TX_FLAGS_TSO; 5862 else if (e1000_tx_csum(tx_ring, skb, protocol)) 5863 tx_flags |= E1000_TX_FLAGS_CSUM; 5864 5865 /* Old method was to assume IPv4 packet by default if TSO was enabled. 5866 * 82571 hardware supports TSO capabilities for IPv6 as well... 5867 * no longer assume, we must. 5868 */ 5869 if (protocol == htons(ETH_P_IP)) 5870 tx_flags |= E1000_TX_FLAGS_IPV4; 5871 5872 if (unlikely(skb->no_fcs)) 5873 tx_flags |= E1000_TX_FLAGS_NO_FCS; 5874 5875 /* if count is 0 then mapping error has occurred */ 5876 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, 5877 nr_frags); 5878 if (count) { 5879 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 5880 (adapter->flags & FLAG_HAS_HW_TIMESTAMP)) { 5881 if (!adapter->tx_hwtstamp_skb) { 5882 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 5883 tx_flags |= E1000_TX_FLAGS_HWTSTAMP; 5884 adapter->tx_hwtstamp_skb = skb_get(skb); 5885 adapter->tx_hwtstamp_start = jiffies; 5886 schedule_work(&adapter->tx_hwtstamp_work); 5887 } else { 5888 adapter->tx_hwtstamp_skipped++; 5889 } 5890 } 5891 5892 skb_tx_timestamp(skb); 5893 5894 netdev_sent_queue(netdev, skb->len); 5895 e1000_tx_queue(tx_ring, tx_flags, count); 5896 /* Make sure there is space in the ring for the next send. */ 5897 e1000_maybe_stop_tx(tx_ring, 5898 (MAX_SKB_FRAGS * 5899 DIV_ROUND_UP(PAGE_SIZE, 5900 adapter->tx_fifo_limit) + 2)); 5901 5902 if (!skb->xmit_more || 5903 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { 5904 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 5905 e1000e_update_tdt_wa(tx_ring, 5906 tx_ring->next_to_use); 5907 else 5908 writel(tx_ring->next_to_use, tx_ring->tail); 5909 5910 /* we need this if more than one processor can write 5911 * to our tail at a time, it synchronizes IO on 5912 *IA64/Altix systems 5913 */ 5914 mmiowb(); 5915 } 5916 } else { 5917 dev_kfree_skb_any(skb); 5918 tx_ring->buffer_info[first].time_stamp = 0; 5919 tx_ring->next_to_use = first; 5920 } 5921 5922 return NETDEV_TX_OK; 5923 } 5924 5925 /** 5926 * e1000_tx_timeout - Respond to a Tx Hang 5927 * @netdev: network interface device structure 5928 **/ 5929 static void e1000_tx_timeout(struct net_device *netdev) 5930 { 5931 struct e1000_adapter *adapter = netdev_priv(netdev); 5932 5933 /* Do the reset outside of interrupt context */ 5934 adapter->tx_timeout_count++; 5935 schedule_work(&adapter->reset_task); 5936 } 5937 5938 static void e1000_reset_task(struct work_struct *work) 5939 { 5940 struct e1000_adapter *adapter; 5941 adapter = container_of(work, struct e1000_adapter, reset_task); 5942 5943 /* don't run the task if already down */ 5944 if (test_bit(__E1000_DOWN, &adapter->state)) 5945 return; 5946 5947 if (!(adapter->flags & FLAG_RESTART_NOW)) { 5948 e1000e_dump(adapter); 5949 e_err("Reset adapter unexpectedly\n"); 5950 } 5951 e1000e_reinit_locked(adapter); 5952 } 5953 5954 /** 5955 * e1000_get_stats64 - Get System Network Statistics 5956 * @netdev: network interface device structure 5957 * @stats: rtnl_link_stats64 pointer 5958 * 5959 * Returns the address of the device statistics structure. 5960 **/ 5961 void e1000e_get_stats64(struct net_device *netdev, 5962 struct rtnl_link_stats64 *stats) 5963 { 5964 struct e1000_adapter *adapter = netdev_priv(netdev); 5965 5966 spin_lock(&adapter->stats64_lock); 5967 e1000e_update_stats(adapter); 5968 /* Fill out the OS statistics structure */ 5969 stats->rx_bytes = adapter->stats.gorc; 5970 stats->rx_packets = adapter->stats.gprc; 5971 stats->tx_bytes = adapter->stats.gotc; 5972 stats->tx_packets = adapter->stats.gptc; 5973 stats->multicast = adapter->stats.mprc; 5974 stats->collisions = adapter->stats.colc; 5975 5976 /* Rx Errors */ 5977 5978 /* RLEC on some newer hardware can be incorrect so build 5979 * our own version based on RUC and ROC 5980 */ 5981 stats->rx_errors = adapter->stats.rxerrc + 5982 adapter->stats.crcerrs + adapter->stats.algnerrc + 5983 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; 5984 stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc; 5985 stats->rx_crc_errors = adapter->stats.crcerrs; 5986 stats->rx_frame_errors = adapter->stats.algnerrc; 5987 stats->rx_missed_errors = adapter->stats.mpc; 5988 5989 /* Tx Errors */ 5990 stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol; 5991 stats->tx_aborted_errors = adapter->stats.ecol; 5992 stats->tx_window_errors = adapter->stats.latecol; 5993 stats->tx_carrier_errors = adapter->stats.tncrs; 5994 5995 /* Tx Dropped needs to be maintained elsewhere */ 5996 5997 spin_unlock(&adapter->stats64_lock); 5998 } 5999 6000 /** 6001 * e1000_change_mtu - Change the Maximum Transfer Unit 6002 * @netdev: network interface device structure 6003 * @new_mtu: new value for maximum frame size 6004 * 6005 * Returns 0 on success, negative on failure 6006 **/ 6007 static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 6008 { 6009 struct e1000_adapter *adapter = netdev_priv(netdev); 6010 int max_frame = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; 6011 6012 /* Jumbo frame support */ 6013 if ((new_mtu > ETH_DATA_LEN) && 6014 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { 6015 e_err("Jumbo Frames not supported.\n"); 6016 return -EINVAL; 6017 } 6018 6019 /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */ 6020 if ((adapter->hw.mac.type >= e1000_pch2lan) && 6021 !(adapter->flags2 & FLAG2_CRC_STRIPPING) && 6022 (new_mtu > ETH_DATA_LEN)) { 6023 e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n"); 6024 return -EINVAL; 6025 } 6026 6027 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 6028 usleep_range(1000, 2000); 6029 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ 6030 adapter->max_frame_size = max_frame; 6031 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); 6032 netdev->mtu = new_mtu; 6033 6034 pm_runtime_get_sync(netdev->dev.parent); 6035 6036 if (netif_running(netdev)) 6037 e1000e_down(adapter, true); 6038 6039 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 6040 * means we reserve 2 more, this pushes us to allocate from the next 6041 * larger slab size. 6042 * i.e. RXBUFFER_2048 --> size-4096 slab 6043 * However with the new *_jumbo_rx* routines, jumbo receives will use 6044 * fragmented skbs 6045 */ 6046 6047 if (max_frame <= 2048) 6048 adapter->rx_buffer_len = 2048; 6049 else 6050 adapter->rx_buffer_len = 4096; 6051 6052 /* adjust allocation if LPE protects us, and we aren't using SBP */ 6053 if (max_frame <= (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) 6054 adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; 6055 6056 if (netif_running(netdev)) 6057 e1000e_up(adapter); 6058 else 6059 e1000e_reset(adapter); 6060 6061 pm_runtime_put_sync(netdev->dev.parent); 6062 6063 clear_bit(__E1000_RESETTING, &adapter->state); 6064 6065 return 0; 6066 } 6067 6068 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 6069 int cmd) 6070 { 6071 struct e1000_adapter *adapter = netdev_priv(netdev); 6072 struct mii_ioctl_data *data = if_mii(ifr); 6073 6074 if (adapter->hw.phy.media_type != e1000_media_type_copper) 6075 return -EOPNOTSUPP; 6076 6077 switch (cmd) { 6078 case SIOCGMIIPHY: 6079 data->phy_id = adapter->hw.phy.addr; 6080 break; 6081 case SIOCGMIIREG: 6082 e1000_phy_read_status(adapter); 6083 6084 switch (data->reg_num & 0x1F) { 6085 case MII_BMCR: 6086 data->val_out = adapter->phy_regs.bmcr; 6087 break; 6088 case MII_BMSR: 6089 data->val_out = adapter->phy_regs.bmsr; 6090 break; 6091 case MII_PHYSID1: 6092 data->val_out = (adapter->hw.phy.id >> 16); 6093 break; 6094 case MII_PHYSID2: 6095 data->val_out = (adapter->hw.phy.id & 0xFFFF); 6096 break; 6097 case MII_ADVERTISE: 6098 data->val_out = adapter->phy_regs.advertise; 6099 break; 6100 case MII_LPA: 6101 data->val_out = adapter->phy_regs.lpa; 6102 break; 6103 case MII_EXPANSION: 6104 data->val_out = adapter->phy_regs.expansion; 6105 break; 6106 case MII_CTRL1000: 6107 data->val_out = adapter->phy_regs.ctrl1000; 6108 break; 6109 case MII_STAT1000: 6110 data->val_out = adapter->phy_regs.stat1000; 6111 break; 6112 case MII_ESTATUS: 6113 data->val_out = adapter->phy_regs.estatus; 6114 break; 6115 default: 6116 return -EIO; 6117 } 6118 break; 6119 case SIOCSMIIREG: 6120 default: 6121 return -EOPNOTSUPP; 6122 } 6123 return 0; 6124 } 6125 6126 /** 6127 * e1000e_hwtstamp_ioctl - control hardware time stamping 6128 * @netdev: network interface device structure 6129 * @ifreq: interface request 6130 * 6131 * Outgoing time stamping can be enabled and disabled. Play nice and 6132 * disable it when requested, although it shouldn't cause any overhead 6133 * when no packet needs it. At most one packet in the queue may be 6134 * marked for time stamping, otherwise it would be impossible to tell 6135 * for sure to which packet the hardware time stamp belongs. 6136 * 6137 * Incoming time stamping has to be configured via the hardware filters. 6138 * Not all combinations are supported, in particular event type has to be 6139 * specified. Matching the kind of event packet is not supported, with the 6140 * exception of "all V2 events regardless of level 2 or 4". 6141 **/ 6142 static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) 6143 { 6144 struct e1000_adapter *adapter = netdev_priv(netdev); 6145 struct hwtstamp_config config; 6146 int ret_val; 6147 6148 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 6149 return -EFAULT; 6150 6151 ret_val = e1000e_config_hwtstamp(adapter, &config); 6152 if (ret_val) 6153 return ret_val; 6154 6155 switch (config.rx_filter) { 6156 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 6157 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 6158 case HWTSTAMP_FILTER_PTP_V2_SYNC: 6159 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 6160 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 6161 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 6162 /* With V2 type filters which specify a Sync or Delay Request, 6163 * Path Delay Request/Response messages are also time stamped 6164 * by hardware so notify the caller the requested packets plus 6165 * some others are time stamped. 6166 */ 6167 config.rx_filter = HWTSTAMP_FILTER_SOME; 6168 break; 6169 default: 6170 break; 6171 } 6172 6173 return copy_to_user(ifr->ifr_data, &config, 6174 sizeof(config)) ? -EFAULT : 0; 6175 } 6176 6177 static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) 6178 { 6179 struct e1000_adapter *adapter = netdev_priv(netdev); 6180 6181 return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config, 6182 sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0; 6183 } 6184 6185 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 6186 { 6187 switch (cmd) { 6188 case SIOCGMIIPHY: 6189 case SIOCGMIIREG: 6190 case SIOCSMIIREG: 6191 return e1000_mii_ioctl(netdev, ifr, cmd); 6192 case SIOCSHWTSTAMP: 6193 return e1000e_hwtstamp_set(netdev, ifr); 6194 case SIOCGHWTSTAMP: 6195 return e1000e_hwtstamp_get(netdev, ifr); 6196 default: 6197 return -EOPNOTSUPP; 6198 } 6199 } 6200 6201 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) 6202 { 6203 struct e1000_hw *hw = &adapter->hw; 6204 u32 i, mac_reg, wuc; 6205 u16 phy_reg, wuc_enable; 6206 int retval; 6207 6208 /* copy MAC RARs to PHY RARs */ 6209 e1000_copy_rx_addrs_to_phy_ich8lan(hw); 6210 6211 retval = hw->phy.ops.acquire(hw); 6212 if (retval) { 6213 e_err("Could not acquire PHY\n"); 6214 return retval; 6215 } 6216 6217 /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */ 6218 retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable); 6219 if (retval) 6220 goto release; 6221 6222 /* copy MAC MTA to PHY MTA - only needed for pchlan */ 6223 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { 6224 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); 6225 hw->phy.ops.write_reg_page(hw, BM_MTA(i), 6226 (u16)(mac_reg & 0xFFFF)); 6227 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, 6228 (u16)((mac_reg >> 16) & 0xFFFF)); 6229 } 6230 6231 /* configure PHY Rx Control register */ 6232 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); 6233 mac_reg = er32(RCTL); 6234 if (mac_reg & E1000_RCTL_UPE) 6235 phy_reg |= BM_RCTL_UPE; 6236 if (mac_reg & E1000_RCTL_MPE) 6237 phy_reg |= BM_RCTL_MPE; 6238 phy_reg &= ~(BM_RCTL_MO_MASK); 6239 if (mac_reg & E1000_RCTL_MO_3) 6240 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) 6241 << BM_RCTL_MO_SHIFT); 6242 if (mac_reg & E1000_RCTL_BAM) 6243 phy_reg |= BM_RCTL_BAM; 6244 if (mac_reg & E1000_RCTL_PMCF) 6245 phy_reg |= BM_RCTL_PMCF; 6246 mac_reg = er32(CTRL); 6247 if (mac_reg & E1000_CTRL_RFCE) 6248 phy_reg |= BM_RCTL_RFCE; 6249 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); 6250 6251 wuc = E1000_WUC_PME_EN; 6252 if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC)) 6253 wuc |= E1000_WUC_APME; 6254 6255 /* enable PHY wakeup in MAC register */ 6256 ew32(WUFC, wufc); 6257 ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME | 6258 E1000_WUC_PME_STATUS | wuc)); 6259 6260 /* configure and enable PHY wakeup in PHY registers */ 6261 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); 6262 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc); 6263 6264 /* activate PHY wakeup */ 6265 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; 6266 retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable); 6267 if (retval) 6268 e_err("Could not set PHY Host Wakeup bit\n"); 6269 release: 6270 hw->phy.ops.release(hw); 6271 6272 return retval; 6273 } 6274 6275 static void e1000e_flush_lpic(struct pci_dev *pdev) 6276 { 6277 struct net_device *netdev = pci_get_drvdata(pdev); 6278 struct e1000_adapter *adapter = netdev_priv(netdev); 6279 struct e1000_hw *hw = &adapter->hw; 6280 u32 ret_val; 6281 6282 pm_runtime_get_sync(netdev->dev.parent); 6283 6284 ret_val = hw->phy.ops.acquire(hw); 6285 if (ret_val) 6286 goto fl_out; 6287 6288 pr_info("EEE TX LPI TIMER: %08X\n", 6289 er32(LPIC) >> E1000_LPIC_LPIET_SHIFT); 6290 6291 hw->phy.ops.release(hw); 6292 6293 fl_out: 6294 pm_runtime_put_sync(netdev->dev.parent); 6295 } 6296 6297 static int e1000e_pm_freeze(struct device *dev) 6298 { 6299 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); 6300 struct e1000_adapter *adapter = netdev_priv(netdev); 6301 6302 netif_device_detach(netdev); 6303 6304 if (netif_running(netdev)) { 6305 int count = E1000_CHECK_RESET_COUNT; 6306 6307 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) 6308 usleep_range(10000, 20000); 6309 6310 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 6311 6312 /* Quiesce the device without resetting the hardware */ 6313 e1000e_down(adapter, false); 6314 e1000_free_irq(adapter); 6315 } 6316 e1000e_reset_interrupt_capability(adapter); 6317 6318 /* Allow time for pending master requests to run */ 6319 e1000e_disable_pcie_master(&adapter->hw); 6320 6321 return 0; 6322 } 6323 6324 static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) 6325 { 6326 struct net_device *netdev = pci_get_drvdata(pdev); 6327 struct e1000_adapter *adapter = netdev_priv(netdev); 6328 struct e1000_hw *hw = &adapter->hw; 6329 u32 ctrl, ctrl_ext, rctl, status; 6330 /* Runtime suspend should only enable wakeup for link changes */ 6331 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; 6332 int retval = 0; 6333 6334 status = er32(STATUS); 6335 if (status & E1000_STATUS_LU) 6336 wufc &= ~E1000_WUFC_LNKC; 6337 6338 if (wufc) { 6339 e1000_setup_rctl(adapter); 6340 e1000e_set_rx_mode(netdev); 6341 6342 /* turn on all-multi mode if wake on multicast is enabled */ 6343 if (wufc & E1000_WUFC_MC) { 6344 rctl = er32(RCTL); 6345 rctl |= E1000_RCTL_MPE; 6346 ew32(RCTL, rctl); 6347 } 6348 6349 ctrl = er32(CTRL); 6350 ctrl |= E1000_CTRL_ADVD3WUC; 6351 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) 6352 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; 6353 ew32(CTRL, ctrl); 6354 6355 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 6356 adapter->hw.phy.media_type == 6357 e1000_media_type_internal_serdes) { 6358 /* keep the laser running in D3 */ 6359 ctrl_ext = er32(CTRL_EXT); 6360 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; 6361 ew32(CTRL_EXT, ctrl_ext); 6362 } 6363 6364 if (!runtime) 6365 e1000e_power_up_phy(adapter); 6366 6367 if (adapter->flags & FLAG_IS_ICH) 6368 e1000_suspend_workarounds_ich8lan(&adapter->hw); 6369 6370 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { 6371 /* enable wakeup by the PHY */ 6372 retval = e1000_init_phy_wakeup(adapter, wufc); 6373 if (retval) 6374 return retval; 6375 } else { 6376 /* enable wakeup by the MAC */ 6377 ew32(WUFC, wufc); 6378 ew32(WUC, E1000_WUC_PME_EN); 6379 } 6380 } else { 6381 ew32(WUC, 0); 6382 ew32(WUFC, 0); 6383 6384 e1000_power_down_phy(adapter); 6385 } 6386 6387 if (adapter->hw.phy.type == e1000_phy_igp_3) { 6388 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 6389 } else if (hw->mac.type >= e1000_pch_lpt) { 6390 if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) 6391 /* ULP does not support wake from unicast, multicast 6392 * or broadcast. 6393 */ 6394 retval = e1000_enable_ulp_lpt_lp(hw, !runtime); 6395 6396 if (retval) 6397 return retval; 6398 } 6399 6400 /* Ensure that the appropriate bits are set in LPI_CTRL 6401 * for EEE in Sx 6402 */ 6403 if ((hw->phy.type >= e1000_phy_i217) && 6404 adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) { 6405 u16 lpi_ctrl = 0; 6406 6407 retval = hw->phy.ops.acquire(hw); 6408 if (!retval) { 6409 retval = e1e_rphy_locked(hw, I82579_LPI_CTRL, 6410 &lpi_ctrl); 6411 if (!retval) { 6412 if (adapter->eee_advert & 6413 hw->dev_spec.ich8lan.eee_lp_ability & 6414 I82579_EEE_100_SUPPORTED) 6415 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; 6416 if (adapter->eee_advert & 6417 hw->dev_spec.ich8lan.eee_lp_ability & 6418 I82579_EEE_1000_SUPPORTED) 6419 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; 6420 6421 retval = e1e_wphy_locked(hw, I82579_LPI_CTRL, 6422 lpi_ctrl); 6423 } 6424 } 6425 hw->phy.ops.release(hw); 6426 } 6427 6428 /* Release control of h/w to f/w. If f/w is AMT enabled, this 6429 * would have already happened in close and is redundant. 6430 */ 6431 e1000e_release_hw_control(adapter); 6432 6433 pci_clear_master(pdev); 6434 6435 /* The pci-e switch on some quad port adapters will report a 6436 * correctable error when the MAC transitions from D0 to D3. To 6437 * prevent this we need to mask off the correctable errors on the 6438 * downstream port of the pci-e switch. 6439 * 6440 * We don't have the associated upstream bridge while assigning 6441 * the PCI device into guest. For example, the KVM on power is 6442 * one of the cases. 6443 */ 6444 if (adapter->flags & FLAG_IS_QUAD_PORT) { 6445 struct pci_dev *us_dev = pdev->bus->self; 6446 u16 devctl; 6447 6448 if (!us_dev) 6449 return 0; 6450 6451 pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl); 6452 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, 6453 (devctl & ~PCI_EXP_DEVCTL_CERE)); 6454 6455 pci_save_state(pdev); 6456 pci_prepare_to_sleep(pdev); 6457 6458 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); 6459 } 6460 6461 return 0; 6462 } 6463 6464 /** 6465 * __e1000e_disable_aspm - Disable ASPM states 6466 * @pdev: pointer to PCI device struct 6467 * @state: bit-mask of ASPM states to disable 6468 * @locked: indication if this context holds pci_bus_sem locked. 6469 * 6470 * Some devices *must* have certain ASPM states disabled per hardware errata. 6471 **/ 6472 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state, int locked) 6473 { 6474 struct pci_dev *parent = pdev->bus->self; 6475 u16 aspm_dis_mask = 0; 6476 u16 pdev_aspmc, parent_aspmc; 6477 6478 switch (state) { 6479 case PCIE_LINK_STATE_L0S: 6480 case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1: 6481 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S; 6482 /* fall-through - can't have L1 without L0s */ 6483 case PCIE_LINK_STATE_L1: 6484 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1; 6485 break; 6486 default: 6487 return; 6488 } 6489 6490 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc); 6491 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC; 6492 6493 if (parent) { 6494 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, 6495 &parent_aspmc); 6496 parent_aspmc &= PCI_EXP_LNKCTL_ASPMC; 6497 } 6498 6499 /* Nothing to do if the ASPM states to be disabled already are */ 6500 if (!(pdev_aspmc & aspm_dis_mask) && 6501 (!parent || !(parent_aspmc & aspm_dis_mask))) 6502 return; 6503 6504 dev_info(&pdev->dev, "Disabling ASPM %s %s\n", 6505 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ? 6506 "L0s" : "", 6507 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ? 6508 "L1" : ""); 6509 6510 #ifdef CONFIG_PCIEASPM 6511 if (locked) 6512 pci_disable_link_state_locked(pdev, state); 6513 else 6514 pci_disable_link_state(pdev, state); 6515 6516 /* Double-check ASPM control. If not disabled by the above, the 6517 * BIOS is preventing that from happening (or CONFIG_PCIEASPM is 6518 * not enabled); override by writing PCI config space directly. 6519 */ 6520 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc); 6521 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC; 6522 6523 if (!(aspm_dis_mask & pdev_aspmc)) 6524 return; 6525 #endif 6526 6527 /* Both device and parent should have the same ASPM setting. 6528 * Disable ASPM in downstream component first and then upstream. 6529 */ 6530 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask); 6531 6532 if (parent) 6533 pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, 6534 aspm_dis_mask); 6535 } 6536 6537 /** 6538 * e1000e_disable_aspm - Disable ASPM states. 6539 * @pdev: pointer to PCI device struct 6540 * @state: bit-mask of ASPM states to disable 6541 * 6542 * This function acquires the pci_bus_sem! 6543 * Some devices *must* have certain ASPM states disabled per hardware errata. 6544 **/ 6545 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 6546 { 6547 __e1000e_disable_aspm(pdev, state, 0); 6548 } 6549 6550 /** 6551 * e1000e_disable_aspm_locked Disable ASPM states. 6552 * @pdev: pointer to PCI device struct 6553 * @state: bit-mask of ASPM states to disable 6554 * 6555 * This function must be called with pci_bus_sem acquired! 6556 * Some devices *must* have certain ASPM states disabled per hardware errata. 6557 **/ 6558 static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state) 6559 { 6560 __e1000e_disable_aspm(pdev, state, 1); 6561 } 6562 6563 #ifdef CONFIG_PM 6564 static int __e1000_resume(struct pci_dev *pdev) 6565 { 6566 struct net_device *netdev = pci_get_drvdata(pdev); 6567 struct e1000_adapter *adapter = netdev_priv(netdev); 6568 struct e1000_hw *hw = &adapter->hw; 6569 u16 aspm_disable_flag = 0; 6570 6571 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) 6572 aspm_disable_flag = PCIE_LINK_STATE_L0S; 6573 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) 6574 aspm_disable_flag |= PCIE_LINK_STATE_L1; 6575 if (aspm_disable_flag) 6576 e1000e_disable_aspm(pdev, aspm_disable_flag); 6577 6578 pci_set_master(pdev); 6579 6580 if (hw->mac.type >= e1000_pch2lan) 6581 e1000_resume_workarounds_pchlan(&adapter->hw); 6582 6583 e1000e_power_up_phy(adapter); 6584 6585 /* report the system wakeup cause from S3/S4 */ 6586 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { 6587 u16 phy_data; 6588 6589 e1e_rphy(&adapter->hw, BM_WUS, &phy_data); 6590 if (phy_data) { 6591 e_info("PHY Wakeup cause - %s\n", 6592 phy_data & E1000_WUS_EX ? "Unicast Packet" : 6593 phy_data & E1000_WUS_MC ? "Multicast Packet" : 6594 phy_data & E1000_WUS_BC ? "Broadcast Packet" : 6595 phy_data & E1000_WUS_MAG ? "Magic Packet" : 6596 phy_data & E1000_WUS_LNKC ? 6597 "Link Status Change" : "other"); 6598 } 6599 e1e_wphy(&adapter->hw, BM_WUS, ~0); 6600 } else { 6601 u32 wus = er32(WUS); 6602 6603 if (wus) { 6604 e_info("MAC Wakeup cause - %s\n", 6605 wus & E1000_WUS_EX ? "Unicast Packet" : 6606 wus & E1000_WUS_MC ? "Multicast Packet" : 6607 wus & E1000_WUS_BC ? "Broadcast Packet" : 6608 wus & E1000_WUS_MAG ? "Magic Packet" : 6609 wus & E1000_WUS_LNKC ? "Link Status Change" : 6610 "other"); 6611 } 6612 ew32(WUS, ~0); 6613 } 6614 6615 e1000e_reset(adapter); 6616 6617 e1000_init_manageability_pt(adapter); 6618 6619 /* If the controller has AMT, do not set DRV_LOAD until the interface 6620 * is up. For all other cases, let the f/w know that the h/w is now 6621 * under the control of the driver. 6622 */ 6623 if (!(adapter->flags & FLAG_HAS_AMT)) 6624 e1000e_get_hw_control(adapter); 6625 6626 return 0; 6627 } 6628 6629 #ifdef CONFIG_PM_SLEEP 6630 static int e1000e_pm_thaw(struct device *dev) 6631 { 6632 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); 6633 struct e1000_adapter *adapter = netdev_priv(netdev); 6634 6635 e1000e_set_interrupt_capability(adapter); 6636 if (netif_running(netdev)) { 6637 u32 err = e1000_request_irq(adapter); 6638 6639 if (err) 6640 return err; 6641 6642 e1000e_up(adapter); 6643 } 6644 6645 netif_device_attach(netdev); 6646 6647 return 0; 6648 } 6649 6650 static int e1000e_pm_suspend(struct device *dev) 6651 { 6652 struct pci_dev *pdev = to_pci_dev(dev); 6653 int rc; 6654 6655 e1000e_flush_lpic(pdev); 6656 6657 e1000e_pm_freeze(dev); 6658 6659 rc = __e1000_shutdown(pdev, false); 6660 if (rc) 6661 e1000e_pm_thaw(dev); 6662 6663 return rc; 6664 } 6665 6666 static int e1000e_pm_resume(struct device *dev) 6667 { 6668 struct pci_dev *pdev = to_pci_dev(dev); 6669 int rc; 6670 6671 rc = __e1000_resume(pdev); 6672 if (rc) 6673 return rc; 6674 6675 return e1000e_pm_thaw(dev); 6676 } 6677 #endif /* CONFIG_PM_SLEEP */ 6678 6679 static int e1000e_pm_runtime_idle(struct device *dev) 6680 { 6681 struct pci_dev *pdev = to_pci_dev(dev); 6682 struct net_device *netdev = pci_get_drvdata(pdev); 6683 struct e1000_adapter *adapter = netdev_priv(netdev); 6684 u16 eee_lp; 6685 6686 eee_lp = adapter->hw.dev_spec.ich8lan.eee_lp_ability; 6687 6688 if (!e1000e_has_link(adapter)) { 6689 adapter->hw.dev_spec.ich8lan.eee_lp_ability = eee_lp; 6690 pm_schedule_suspend(dev, 5 * MSEC_PER_SEC); 6691 } 6692 6693 return -EBUSY; 6694 } 6695 6696 static int e1000e_pm_runtime_resume(struct device *dev) 6697 { 6698 struct pci_dev *pdev = to_pci_dev(dev); 6699 struct net_device *netdev = pci_get_drvdata(pdev); 6700 struct e1000_adapter *adapter = netdev_priv(netdev); 6701 int rc; 6702 6703 rc = __e1000_resume(pdev); 6704 if (rc) 6705 return rc; 6706 6707 if (netdev->flags & IFF_UP) 6708 e1000e_up(adapter); 6709 6710 return rc; 6711 } 6712 6713 static int e1000e_pm_runtime_suspend(struct device *dev) 6714 { 6715 struct pci_dev *pdev = to_pci_dev(dev); 6716 struct net_device *netdev = pci_get_drvdata(pdev); 6717 struct e1000_adapter *adapter = netdev_priv(netdev); 6718 6719 if (netdev->flags & IFF_UP) { 6720 int count = E1000_CHECK_RESET_COUNT; 6721 6722 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) 6723 usleep_range(10000, 20000); 6724 6725 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 6726 6727 /* Down the device without resetting the hardware */ 6728 e1000e_down(adapter, false); 6729 } 6730 6731 if (__e1000_shutdown(pdev, true)) { 6732 e1000e_pm_runtime_resume(dev); 6733 return -EBUSY; 6734 } 6735 6736 return 0; 6737 } 6738 #endif /* CONFIG_PM */ 6739 6740 static void e1000_shutdown(struct pci_dev *pdev) 6741 { 6742 e1000e_flush_lpic(pdev); 6743 6744 e1000e_pm_freeze(&pdev->dev); 6745 6746 __e1000_shutdown(pdev, false); 6747 } 6748 6749 #ifdef CONFIG_NET_POLL_CONTROLLER 6750 6751 static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data) 6752 { 6753 struct net_device *netdev = data; 6754 struct e1000_adapter *adapter = netdev_priv(netdev); 6755 6756 if (adapter->msix_entries) { 6757 int vector, msix_irq; 6758 6759 vector = 0; 6760 msix_irq = adapter->msix_entries[vector].vector; 6761 if (disable_hardirq(msix_irq)) 6762 e1000_intr_msix_rx(msix_irq, netdev); 6763 enable_irq(msix_irq); 6764 6765 vector++; 6766 msix_irq = adapter->msix_entries[vector].vector; 6767 if (disable_hardirq(msix_irq)) 6768 e1000_intr_msix_tx(msix_irq, netdev); 6769 enable_irq(msix_irq); 6770 6771 vector++; 6772 msix_irq = adapter->msix_entries[vector].vector; 6773 if (disable_hardirq(msix_irq)) 6774 e1000_msix_other(msix_irq, netdev); 6775 enable_irq(msix_irq); 6776 } 6777 6778 return IRQ_HANDLED; 6779 } 6780 6781 /** 6782 * e1000_netpoll 6783 * @netdev: network interface device structure 6784 * 6785 * Polling 'interrupt' - used by things like netconsole to send skbs 6786 * without having to re-enable interrupts. It's not called while 6787 * the interrupt routine is executing. 6788 */ 6789 static void e1000_netpoll(struct net_device *netdev) 6790 { 6791 struct e1000_adapter *adapter = netdev_priv(netdev); 6792 6793 switch (adapter->int_mode) { 6794 case E1000E_INT_MODE_MSIX: 6795 e1000_intr_msix(adapter->pdev->irq, netdev); 6796 break; 6797 case E1000E_INT_MODE_MSI: 6798 if (disable_hardirq(adapter->pdev->irq)) 6799 e1000_intr_msi(adapter->pdev->irq, netdev); 6800 enable_irq(adapter->pdev->irq); 6801 break; 6802 default: /* E1000E_INT_MODE_LEGACY */ 6803 if (disable_hardirq(adapter->pdev->irq)) 6804 e1000_intr(adapter->pdev->irq, netdev); 6805 enable_irq(adapter->pdev->irq); 6806 break; 6807 } 6808 } 6809 #endif 6810 6811 /** 6812 * e1000_io_error_detected - called when PCI error is detected 6813 * @pdev: Pointer to PCI device 6814 * @state: The current pci connection state 6815 * 6816 * This function is called after a PCI bus error affecting 6817 * this device has been detected. 6818 */ 6819 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 6820 pci_channel_state_t state) 6821 { 6822 struct net_device *netdev = pci_get_drvdata(pdev); 6823 struct e1000_adapter *adapter = netdev_priv(netdev); 6824 6825 netif_device_detach(netdev); 6826 6827 if (state == pci_channel_io_perm_failure) 6828 return PCI_ERS_RESULT_DISCONNECT; 6829 6830 if (netif_running(netdev)) 6831 e1000e_down(adapter, true); 6832 pci_disable_device(pdev); 6833 6834 /* Request a slot slot reset. */ 6835 return PCI_ERS_RESULT_NEED_RESET; 6836 } 6837 6838 /** 6839 * e1000_io_slot_reset - called after the pci bus has been reset. 6840 * @pdev: Pointer to PCI device 6841 * 6842 * Restart the card from scratch, as if from a cold-boot. Implementation 6843 * resembles the first-half of the e1000e_pm_resume routine. 6844 */ 6845 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 6846 { 6847 struct net_device *netdev = pci_get_drvdata(pdev); 6848 struct e1000_adapter *adapter = netdev_priv(netdev); 6849 struct e1000_hw *hw = &adapter->hw; 6850 u16 aspm_disable_flag = 0; 6851 int err; 6852 pci_ers_result_t result; 6853 6854 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) 6855 aspm_disable_flag = PCIE_LINK_STATE_L0S; 6856 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) 6857 aspm_disable_flag |= PCIE_LINK_STATE_L1; 6858 if (aspm_disable_flag) 6859 e1000e_disable_aspm_locked(pdev, aspm_disable_flag); 6860 6861 err = pci_enable_device_mem(pdev); 6862 if (err) { 6863 dev_err(&pdev->dev, 6864 "Cannot re-enable PCI device after reset.\n"); 6865 result = PCI_ERS_RESULT_DISCONNECT; 6866 } else { 6867 pdev->state_saved = true; 6868 pci_restore_state(pdev); 6869 pci_set_master(pdev); 6870 6871 pci_enable_wake(pdev, PCI_D3hot, 0); 6872 pci_enable_wake(pdev, PCI_D3cold, 0); 6873 6874 e1000e_reset(adapter); 6875 ew32(WUS, ~0); 6876 result = PCI_ERS_RESULT_RECOVERED; 6877 } 6878 6879 pci_cleanup_aer_uncorrect_error_status(pdev); 6880 6881 return result; 6882 } 6883 6884 /** 6885 * e1000_io_resume - called when traffic can start flowing again. 6886 * @pdev: Pointer to PCI device 6887 * 6888 * This callback is called when the error recovery driver tells us that 6889 * its OK to resume normal operation. Implementation resembles the 6890 * second-half of the e1000e_pm_resume routine. 6891 */ 6892 static void e1000_io_resume(struct pci_dev *pdev) 6893 { 6894 struct net_device *netdev = pci_get_drvdata(pdev); 6895 struct e1000_adapter *adapter = netdev_priv(netdev); 6896 6897 e1000_init_manageability_pt(adapter); 6898 6899 if (netif_running(netdev)) 6900 e1000e_up(adapter); 6901 6902 netif_device_attach(netdev); 6903 6904 /* If the controller has AMT, do not set DRV_LOAD until the interface 6905 * is up. For all other cases, let the f/w know that the h/w is now 6906 * under the control of the driver. 6907 */ 6908 if (!(adapter->flags & FLAG_HAS_AMT)) 6909 e1000e_get_hw_control(adapter); 6910 } 6911 6912 static void e1000_print_device_info(struct e1000_adapter *adapter) 6913 { 6914 struct e1000_hw *hw = &adapter->hw; 6915 struct net_device *netdev = adapter->netdev; 6916 u32 ret_val; 6917 u8 pba_str[E1000_PBANUM_LENGTH]; 6918 6919 /* print bus type/speed/width info */ 6920 e_info("(PCI Express:2.5GT/s:%s) %pM\n", 6921 /* bus width */ 6922 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 6923 "Width x1"), 6924 /* MAC address */ 6925 netdev->dev_addr); 6926 e_info("Intel(R) PRO/%s Network Connection\n", 6927 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); 6928 ret_val = e1000_read_pba_string_generic(hw, pba_str, 6929 E1000_PBANUM_LENGTH); 6930 if (ret_val) 6931 strlcpy((char *)pba_str, "Unknown", sizeof(pba_str)); 6932 e_info("MAC: %d, PHY: %d, PBA No: %s\n", 6933 hw->mac.type, hw->phy.type, pba_str); 6934 } 6935 6936 static void e1000_eeprom_checks(struct e1000_adapter *adapter) 6937 { 6938 struct e1000_hw *hw = &adapter->hw; 6939 int ret_val; 6940 u16 buf = 0; 6941 6942 if (hw->mac.type != e1000_82573) 6943 return; 6944 6945 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); 6946 le16_to_cpus(&buf); 6947 if (!ret_val && (!(buf & BIT(0)))) { 6948 /* Deep Smart Power Down (DSPD) */ 6949 dev_warn(&adapter->pdev->dev, 6950 "Warning: detected DSPD enabled in EEPROM\n"); 6951 } 6952 } 6953 6954 static netdev_features_t e1000_fix_features(struct net_device *netdev, 6955 netdev_features_t features) 6956 { 6957 struct e1000_adapter *adapter = netdev_priv(netdev); 6958 struct e1000_hw *hw = &adapter->hw; 6959 6960 /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */ 6961 if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN)) 6962 features &= ~NETIF_F_RXFCS; 6963 6964 /* Since there is no support for separate Rx/Tx vlan accel 6965 * enable/disable make sure Tx flag is always in same state as Rx. 6966 */ 6967 if (features & NETIF_F_HW_VLAN_CTAG_RX) 6968 features |= NETIF_F_HW_VLAN_CTAG_TX; 6969 else 6970 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 6971 6972 return features; 6973 } 6974 6975 static int e1000_set_features(struct net_device *netdev, 6976 netdev_features_t features) 6977 { 6978 struct e1000_adapter *adapter = netdev_priv(netdev); 6979 netdev_features_t changed = features ^ netdev->features; 6980 6981 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) 6982 adapter->flags |= FLAG_TSO_FORCE; 6983 6984 if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | 6985 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS | 6986 NETIF_F_RXALL))) 6987 return 0; 6988 6989 if (changed & NETIF_F_RXFCS) { 6990 if (features & NETIF_F_RXFCS) { 6991 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; 6992 } else { 6993 /* We need to take it back to defaults, which might mean 6994 * stripping is still disabled at the adapter level. 6995 */ 6996 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING) 6997 adapter->flags2 |= FLAG2_CRC_STRIPPING; 6998 else 6999 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; 7000 } 7001 } 7002 7003 netdev->features = features; 7004 7005 if (netif_running(netdev)) 7006 e1000e_reinit_locked(adapter); 7007 else 7008 e1000e_reset(adapter); 7009 7010 return 0; 7011 } 7012 7013 static const struct net_device_ops e1000e_netdev_ops = { 7014 .ndo_open = e1000e_open, 7015 .ndo_stop = e1000e_close, 7016 .ndo_start_xmit = e1000_xmit_frame, 7017 .ndo_get_stats64 = e1000e_get_stats64, 7018 .ndo_set_rx_mode = e1000e_set_rx_mode, 7019 .ndo_set_mac_address = e1000_set_mac, 7020 .ndo_change_mtu = e1000_change_mtu, 7021 .ndo_do_ioctl = e1000_ioctl, 7022 .ndo_tx_timeout = e1000_tx_timeout, 7023 .ndo_validate_addr = eth_validate_addr, 7024 7025 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, 7026 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, 7027 #ifdef CONFIG_NET_POLL_CONTROLLER 7028 .ndo_poll_controller = e1000_netpoll, 7029 #endif 7030 .ndo_set_features = e1000_set_features, 7031 .ndo_fix_features = e1000_fix_features, 7032 .ndo_features_check = passthru_features_check, 7033 }; 7034 7035 /** 7036 * e1000_probe - Device Initialization Routine 7037 * @pdev: PCI device information struct 7038 * @ent: entry in e1000_pci_tbl 7039 * 7040 * Returns 0 on success, negative on failure 7041 * 7042 * e1000_probe initializes an adapter identified by a pci_dev structure. 7043 * The OS initialization, configuring of the adapter private structure, 7044 * and a hardware reset occur. 7045 **/ 7046 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 7047 { 7048 struct net_device *netdev; 7049 struct e1000_adapter *adapter; 7050 struct e1000_hw *hw; 7051 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; 7052 resource_size_t mmio_start, mmio_len; 7053 resource_size_t flash_start, flash_len; 7054 static int cards_found; 7055 u16 aspm_disable_flag = 0; 7056 int bars, i, err, pci_using_dac; 7057 u16 eeprom_data = 0; 7058 u16 eeprom_apme_mask = E1000_EEPROM_APME; 7059 s32 ret_val = 0; 7060 7061 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) 7062 aspm_disable_flag = PCIE_LINK_STATE_L0S; 7063 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) 7064 aspm_disable_flag |= PCIE_LINK_STATE_L1; 7065 if (aspm_disable_flag) 7066 e1000e_disable_aspm(pdev, aspm_disable_flag); 7067 7068 err = pci_enable_device_mem(pdev); 7069 if (err) 7070 return err; 7071 7072 pci_using_dac = 0; 7073 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 7074 if (!err) { 7075 pci_using_dac = 1; 7076 } else { 7077 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 7078 if (err) { 7079 dev_err(&pdev->dev, 7080 "No usable DMA configuration, aborting\n"); 7081 goto err_dma; 7082 } 7083 } 7084 7085 bars = pci_select_bars(pdev, IORESOURCE_MEM); 7086 err = pci_request_selected_regions_exclusive(pdev, bars, 7087 e1000e_driver_name); 7088 if (err) 7089 goto err_pci_reg; 7090 7091 /* AER (Advanced Error Reporting) hooks */ 7092 pci_enable_pcie_error_reporting(pdev); 7093 7094 pci_set_master(pdev); 7095 /* PCI config space info */ 7096 err = pci_save_state(pdev); 7097 if (err) 7098 goto err_alloc_etherdev; 7099 7100 err = -ENOMEM; 7101 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 7102 if (!netdev) 7103 goto err_alloc_etherdev; 7104 7105 SET_NETDEV_DEV(netdev, &pdev->dev); 7106 7107 netdev->irq = pdev->irq; 7108 7109 pci_set_drvdata(pdev, netdev); 7110 adapter = netdev_priv(netdev); 7111 hw = &adapter->hw; 7112 adapter->netdev = netdev; 7113 adapter->pdev = pdev; 7114 adapter->ei = ei; 7115 adapter->pba = ei->pba; 7116 adapter->flags = ei->flags; 7117 adapter->flags2 = ei->flags2; 7118 adapter->hw.adapter = adapter; 7119 adapter->hw.mac.type = ei->mac; 7120 adapter->max_hw_frame_size = ei->max_hw_frame_size; 7121 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 7122 7123 mmio_start = pci_resource_start(pdev, 0); 7124 mmio_len = pci_resource_len(pdev, 0); 7125 7126 err = -EIO; 7127 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 7128 if (!adapter->hw.hw_addr) 7129 goto err_ioremap; 7130 7131 if ((adapter->flags & FLAG_HAS_FLASH) && 7132 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM) && 7133 (hw->mac.type < e1000_pch_spt)) { 7134 flash_start = pci_resource_start(pdev, 1); 7135 flash_len = pci_resource_len(pdev, 1); 7136 adapter->hw.flash_address = ioremap(flash_start, flash_len); 7137 if (!adapter->hw.flash_address) 7138 goto err_flashmap; 7139 } 7140 7141 /* Set default EEE advertisement */ 7142 if (adapter->flags2 & FLAG2_HAS_EEE) 7143 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; 7144 7145 /* construct the net_device struct */ 7146 netdev->netdev_ops = &e1000e_netdev_ops; 7147 e1000e_set_ethtool_ops(netdev); 7148 netdev->watchdog_timeo = 5 * HZ; 7149 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64); 7150 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); 7151 7152 netdev->mem_start = mmio_start; 7153 netdev->mem_end = mmio_start + mmio_len; 7154 7155 adapter->bd_number = cards_found++; 7156 7157 e1000e_check_options(adapter); 7158 7159 /* setup adapter struct */ 7160 err = e1000_sw_init(adapter); 7161 if (err) 7162 goto err_sw_init; 7163 7164 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 7165 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); 7166 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 7167 7168 err = ei->get_variants(adapter); 7169 if (err) 7170 goto err_hw_init; 7171 7172 if ((adapter->flags & FLAG_IS_ICH) && 7173 (adapter->flags & FLAG_READ_ONLY_NVM) && 7174 (hw->mac.type < e1000_pch_spt)) 7175 e1000e_write_protect_nvm_ich8lan(&adapter->hw); 7176 7177 hw->mac.ops.get_bus_info(&adapter->hw); 7178 7179 adapter->hw.phy.autoneg_wait_to_complete = 0; 7180 7181 /* Copper options */ 7182 if (adapter->hw.phy.media_type == e1000_media_type_copper) { 7183 adapter->hw.phy.mdix = AUTO_ALL_MODES; 7184 adapter->hw.phy.disable_polarity_correction = 0; 7185 adapter->hw.phy.ms_type = e1000_ms_hw_default; 7186 } 7187 7188 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) 7189 dev_info(&pdev->dev, 7190 "PHY reset is blocked due to SOL/IDER session.\n"); 7191 7192 /* Set initial default active device features */ 7193 netdev->features = (NETIF_F_SG | 7194 NETIF_F_HW_VLAN_CTAG_RX | 7195 NETIF_F_HW_VLAN_CTAG_TX | 7196 NETIF_F_TSO | 7197 NETIF_F_TSO6 | 7198 NETIF_F_RXHASH | 7199 NETIF_F_RXCSUM | 7200 NETIF_F_HW_CSUM); 7201 7202 /* Set user-changeable features (subset of all device features) */ 7203 netdev->hw_features = netdev->features; 7204 netdev->hw_features |= NETIF_F_RXFCS; 7205 netdev->priv_flags |= IFF_SUPP_NOFCS; 7206 netdev->hw_features |= NETIF_F_RXALL; 7207 7208 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) 7209 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 7210 7211 netdev->vlan_features |= (NETIF_F_SG | 7212 NETIF_F_TSO | 7213 NETIF_F_TSO6 | 7214 NETIF_F_HW_CSUM); 7215 7216 netdev->priv_flags |= IFF_UNICAST_FLT; 7217 7218 if (pci_using_dac) { 7219 netdev->features |= NETIF_F_HIGHDMA; 7220 netdev->vlan_features |= NETIF_F_HIGHDMA; 7221 } 7222 7223 /* MTU range: 68 - max_hw_frame_size */ 7224 netdev->min_mtu = ETH_MIN_MTU; 7225 netdev->max_mtu = adapter->max_hw_frame_size - 7226 (VLAN_ETH_HLEN + ETH_FCS_LEN); 7227 7228 if (e1000e_enable_mng_pass_thru(&adapter->hw)) 7229 adapter->flags |= FLAG_MNG_PT_ENABLED; 7230 7231 /* before reading the NVM, reset the controller to 7232 * put the device in a known good starting state 7233 */ 7234 adapter->hw.mac.ops.reset_hw(&adapter->hw); 7235 7236 /* systems with ASPM and others may see the checksum fail on the first 7237 * attempt. Let's give it a few tries 7238 */ 7239 for (i = 0;; i++) { 7240 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) 7241 break; 7242 if (i == 2) { 7243 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); 7244 err = -EIO; 7245 goto err_eeprom; 7246 } 7247 } 7248 7249 e1000_eeprom_checks(adapter); 7250 7251 /* copy the MAC address */ 7252 if (e1000e_read_mac_addr(&adapter->hw)) 7253 dev_err(&pdev->dev, 7254 "NVM Read Error while reading MAC address\n"); 7255 7256 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 7257 7258 if (!is_valid_ether_addr(netdev->dev_addr)) { 7259 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", 7260 netdev->dev_addr); 7261 err = -EIO; 7262 goto err_eeprom; 7263 } 7264 7265 timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0); 7266 timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0); 7267 7268 INIT_WORK(&adapter->reset_task, e1000_reset_task); 7269 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); 7270 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); 7271 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); 7272 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); 7273 7274 /* Initialize link parameters. User can change them with ethtool */ 7275 adapter->hw.mac.autoneg = 1; 7276 adapter->fc_autoneg = true; 7277 adapter->hw.fc.requested_mode = e1000_fc_default; 7278 adapter->hw.fc.current_mode = e1000_fc_default; 7279 adapter->hw.phy.autoneg_advertised = 0x2f; 7280 7281 /* Initial Wake on LAN setting - If APM wake is enabled in 7282 * the EEPROM, enable the ACPI Magic Packet filter 7283 */ 7284 if (adapter->flags & FLAG_APME_IN_WUC) { 7285 /* APME bit in EEPROM is mapped to WUC.APME */ 7286 eeprom_data = er32(WUC); 7287 eeprom_apme_mask = E1000_WUC_APME; 7288 if ((hw->mac.type > e1000_ich10lan) && 7289 (eeprom_data & E1000_WUC_PHY_WAKE)) 7290 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; 7291 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { 7292 if (adapter->flags & FLAG_APME_CHECK_PORT_B && 7293 (adapter->hw.bus.func == 1)) 7294 ret_val = e1000_read_nvm(&adapter->hw, 7295 NVM_INIT_CONTROL3_PORT_B, 7296 1, &eeprom_data); 7297 else 7298 ret_val = e1000_read_nvm(&adapter->hw, 7299 NVM_INIT_CONTROL3_PORT_A, 7300 1, &eeprom_data); 7301 } 7302 7303 /* fetch WoL from EEPROM */ 7304 if (ret_val) 7305 e_dbg("NVM read error getting WoL initial values: %d\n", ret_val); 7306 else if (eeprom_data & eeprom_apme_mask) 7307 adapter->eeprom_wol |= E1000_WUFC_MAG; 7308 7309 /* now that we have the eeprom settings, apply the special cases 7310 * where the eeprom may be wrong or the board simply won't support 7311 * wake on lan on a particular port 7312 */ 7313 if (!(adapter->flags & FLAG_HAS_WOL)) 7314 adapter->eeprom_wol = 0; 7315 7316 /* initialize the wol settings based on the eeprom settings */ 7317 adapter->wol = adapter->eeprom_wol; 7318 7319 /* make sure adapter isn't asleep if manageability is enabled */ 7320 if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) || 7321 (hw->mac.ops.check_mng_mode(hw))) 7322 device_wakeup_enable(&pdev->dev); 7323 7324 /* save off EEPROM version number */ 7325 ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); 7326 7327 if (ret_val) { 7328 e_dbg("NVM read error getting EEPROM version: %d\n", ret_val); 7329 adapter->eeprom_vers = 0; 7330 } 7331 7332 /* init PTP hardware clock */ 7333 e1000e_ptp_init(adapter); 7334 7335 /* reset the hardware with the new settings */ 7336 e1000e_reset(adapter); 7337 7338 /* If the controller has AMT, do not set DRV_LOAD until the interface 7339 * is up. For all other cases, let the f/w know that the h/w is now 7340 * under the control of the driver. 7341 */ 7342 if (!(adapter->flags & FLAG_HAS_AMT)) 7343 e1000e_get_hw_control(adapter); 7344 7345 strlcpy(netdev->name, "eth%d", sizeof(netdev->name)); 7346 err = register_netdev(netdev); 7347 if (err) 7348 goto err_register; 7349 7350 /* carrier off reporting is important to ethtool even BEFORE open */ 7351 netif_carrier_off(netdev); 7352 7353 e1000_print_device_info(adapter); 7354 7355 if (pci_dev_run_wake(pdev)) 7356 pm_runtime_put_noidle(&pdev->dev); 7357 7358 return 0; 7359 7360 err_register: 7361 if (!(adapter->flags & FLAG_HAS_AMT)) 7362 e1000e_release_hw_control(adapter); 7363 err_eeprom: 7364 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw)) 7365 e1000_phy_hw_reset(&adapter->hw); 7366 err_hw_init: 7367 kfree(adapter->tx_ring); 7368 kfree(adapter->rx_ring); 7369 err_sw_init: 7370 if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt)) 7371 iounmap(adapter->hw.flash_address); 7372 e1000e_reset_interrupt_capability(adapter); 7373 err_flashmap: 7374 iounmap(adapter->hw.hw_addr); 7375 err_ioremap: 7376 free_netdev(netdev); 7377 err_alloc_etherdev: 7378 pci_release_mem_regions(pdev); 7379 err_pci_reg: 7380 err_dma: 7381 pci_disable_device(pdev); 7382 return err; 7383 } 7384 7385 /** 7386 * e1000_remove - Device Removal Routine 7387 * @pdev: PCI device information struct 7388 * 7389 * e1000_remove is called by the PCI subsystem to alert the driver 7390 * that it should release a PCI device. The could be caused by a 7391 * Hot-Plug event, or because the driver is going to be removed from 7392 * memory. 7393 **/ 7394 static void e1000_remove(struct pci_dev *pdev) 7395 { 7396 struct net_device *netdev = pci_get_drvdata(pdev); 7397 struct e1000_adapter *adapter = netdev_priv(netdev); 7398 bool down = test_bit(__E1000_DOWN, &adapter->state); 7399 7400 e1000e_ptp_remove(adapter); 7401 7402 /* The timers may be rescheduled, so explicitly disable them 7403 * from being rescheduled. 7404 */ 7405 if (!down) 7406 set_bit(__E1000_DOWN, &adapter->state); 7407 del_timer_sync(&adapter->watchdog_timer); 7408 del_timer_sync(&adapter->phy_info_timer); 7409 7410 cancel_work_sync(&adapter->reset_task); 7411 cancel_work_sync(&adapter->watchdog_task); 7412 cancel_work_sync(&adapter->downshift_task); 7413 cancel_work_sync(&adapter->update_phy_task); 7414 cancel_work_sync(&adapter->print_hang_task); 7415 7416 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { 7417 cancel_work_sync(&adapter->tx_hwtstamp_work); 7418 if (adapter->tx_hwtstamp_skb) { 7419 dev_consume_skb_any(adapter->tx_hwtstamp_skb); 7420 adapter->tx_hwtstamp_skb = NULL; 7421 } 7422 } 7423 7424 /* Don't lie to e1000_close() down the road. */ 7425 if (!down) 7426 clear_bit(__E1000_DOWN, &adapter->state); 7427 unregister_netdev(netdev); 7428 7429 if (pci_dev_run_wake(pdev)) 7430 pm_runtime_get_noresume(&pdev->dev); 7431 7432 /* Release control of h/w to f/w. If f/w is AMT enabled, this 7433 * would have already happened in close and is redundant. 7434 */ 7435 e1000e_release_hw_control(adapter); 7436 7437 e1000e_reset_interrupt_capability(adapter); 7438 kfree(adapter->tx_ring); 7439 kfree(adapter->rx_ring); 7440 7441 iounmap(adapter->hw.hw_addr); 7442 if ((adapter->hw.flash_address) && 7443 (adapter->hw.mac.type < e1000_pch_spt)) 7444 iounmap(adapter->hw.flash_address); 7445 pci_release_mem_regions(pdev); 7446 7447 free_netdev(netdev); 7448 7449 /* AER disable */ 7450 pci_disable_pcie_error_reporting(pdev); 7451 7452 pci_disable_device(pdev); 7453 } 7454 7455 /* PCI Error Recovery (ERS) */ 7456 static const struct pci_error_handlers e1000_err_handler = { 7457 .error_detected = e1000_io_error_detected, 7458 .slot_reset = e1000_io_slot_reset, 7459 .resume = e1000_io_resume, 7460 }; 7461 7462 static const struct pci_device_id e1000_pci_tbl[] = { 7463 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, 7464 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, 7465 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, 7466 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), 7467 board_82571 }, 7468 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, 7469 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, 7470 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, 7471 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, 7472 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, 7473 7474 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, 7475 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, 7476 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, 7477 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, 7478 7479 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, 7480 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, 7481 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, 7482 7483 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 }, 7484 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 }, 7485 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 }, 7486 7487 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), 7488 board_80003es2lan }, 7489 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), 7490 board_80003es2lan }, 7491 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), 7492 board_80003es2lan }, 7493 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), 7494 board_80003es2lan }, 7495 7496 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, 7497 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, 7498 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, 7499 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, 7500 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, 7501 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, 7502 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, 7503 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, 7504 7505 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, 7506 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, 7507 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, 7508 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, 7509 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, 7510 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan }, 7511 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, 7512 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, 7513 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, 7514 7515 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, 7516 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, 7517 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, 7518 7519 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, 7520 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, 7521 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan }, 7522 7523 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, 7524 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, 7525 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, 7526 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, 7527 7528 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, 7529 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, 7530 7531 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt }, 7532 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, 7533 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt }, 7534 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt }, 7535 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt }, 7536 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt }, 7537 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt }, 7538 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt }, 7539 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM), board_pch_spt }, 7540 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt }, 7541 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt }, 7542 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt }, 7543 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LBG_I219_LM3), board_pch_spt }, 7544 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM4), board_pch_spt }, 7545 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V4), board_pch_spt }, 7546 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM5), board_pch_spt }, 7547 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V5), board_pch_spt }, 7548 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM6), board_pch_cnp }, 7549 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V6), board_pch_cnp }, 7550 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM7), board_pch_cnp }, 7551 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V7), board_pch_cnp }, 7552 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM8), board_pch_cnp }, 7553 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V8), board_pch_cnp }, 7554 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM9), board_pch_cnp }, 7555 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V9), board_pch_cnp }, 7556 7557 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ 7558 }; 7559 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 7560 7561 static const struct dev_pm_ops e1000_pm_ops = { 7562 #ifdef CONFIG_PM_SLEEP 7563 .suspend = e1000e_pm_suspend, 7564 .resume = e1000e_pm_resume, 7565 .freeze = e1000e_pm_freeze, 7566 .thaw = e1000e_pm_thaw, 7567 .poweroff = e1000e_pm_suspend, 7568 .restore = e1000e_pm_resume, 7569 #endif 7570 SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume, 7571 e1000e_pm_runtime_idle) 7572 }; 7573 7574 /* PCI Device API Driver */ 7575 static struct pci_driver e1000_driver = { 7576 .name = e1000e_driver_name, 7577 .id_table = e1000_pci_tbl, 7578 .probe = e1000_probe, 7579 .remove = e1000_remove, 7580 .driver = { 7581 .pm = &e1000_pm_ops, 7582 }, 7583 .shutdown = e1000_shutdown, 7584 .err_handler = &e1000_err_handler 7585 }; 7586 7587 /** 7588 * e1000_init_module - Driver Registration Routine 7589 * 7590 * e1000_init_module is the first routine called when the driver is 7591 * loaded. All it does is register with the PCI subsystem. 7592 **/ 7593 static int __init e1000_init_module(void) 7594 { 7595 pr_info("Intel(R) PRO/1000 Network Driver - %s\n", 7596 e1000e_driver_version); 7597 pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n"); 7598 7599 return pci_register_driver(&e1000_driver); 7600 } 7601 module_init(e1000_init_module); 7602 7603 /** 7604 * e1000_exit_module - Driver Exit Cleanup Routine 7605 * 7606 * e1000_exit_module is called just before the driver is removed 7607 * from memory. 7608 **/ 7609 static void __exit e1000_exit_module(void) 7610 { 7611 pci_unregister_driver(&e1000_driver); 7612 } 7613 module_exit(e1000_exit_module); 7614 7615 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 7616 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 7617 MODULE_LICENSE("GPL"); 7618 MODULE_VERSION(DRV_VERSION); 7619 7620 /* netdev.c */ 7621