1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2014 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #include <linux/types.h> 30 #include <linux/module.h> 31 #include <linux/pci.h> 32 #include <linux/netdevice.h> 33 #include <linux/vmalloc.h> 34 #include <linux/string.h> 35 #include <linux/in.h> 36 #include <linux/interrupt.h> 37 #include <linux/ip.h> 38 #include <linux/tcp.h> 39 #include <linux/sctp.h> 40 #include <linux/pkt_sched.h> 41 #include <linux/ipv6.h> 42 #include <linux/slab.h> 43 #include <net/checksum.h> 44 #include <net/ip6_checksum.h> 45 #include <linux/ethtool.h> 46 #include <linux/if.h> 47 #include <linux/if_vlan.h> 48 #include <linux/if_macvlan.h> 49 #include <linux/if_bridge.h> 50 #include <linux/prefetch.h> 51 #include <scsi/fc/fc_fcoe.h> 52 53 #include "ixgbe.h" 54 #include "ixgbe_common.h" 55 #include "ixgbe_dcb_82599.h" 56 #include "ixgbe_sriov.h" 57 58 char ixgbe_driver_name[] = "ixgbe"; 59 static const char ixgbe_driver_string[] = 60 "Intel(R) 10 Gigabit PCI Express Network Driver"; 61 #ifdef IXGBE_FCOE 62 char ixgbe_default_device_descr[] = 63 "Intel(R) 10 Gigabit Network Connection"; 64 #else 65 static char ixgbe_default_device_descr[] = 66 "Intel(R) 10 Gigabit Network Connection"; 67 #endif 68 #define DRV_VERSION "3.19.1-k" 69 const char ixgbe_driver_version[] = DRV_VERSION; 70 static const char ixgbe_copyright[] = 71 "Copyright (c) 1999-2014 Intel Corporation."; 72 73 static const struct ixgbe_info *ixgbe_info_tbl[] = { 74 [board_82598] = &ixgbe_82598_info, 75 [board_82599] = &ixgbe_82599_info, 76 [board_X540] = &ixgbe_X540_info, 77 }; 78 79 /* ixgbe_pci_tbl - PCI Device ID Table 80 * 81 * Wildcard entries (PCI_ANY_ID) should come last 82 * Last entry must be all 0s 83 * 84 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 85 * Class, Class Mask, private data (not used) } 86 */ 87 static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { 88 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 }, 89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 }, 90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 }, 91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 }, 92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 }, 93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 }, 94 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 }, 95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 }, 96 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 }, 97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 }, 98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 }, 99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 }, 100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 }, 101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 }, 102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 }, 103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 }, 104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 }, 105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 }, 106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 }, 107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 }, 108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 }, 109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 }, 110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 }, 111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 }, 112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, 113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, 114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 }, 115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, 116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, 117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, 118 /* required last entry */ 119 {0, } 120 }; 121 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); 122 123 #ifdef CONFIG_IXGBE_DCA 124 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, 125 void *p); 126 static struct notifier_block dca_notifier = { 127 .notifier_call = ixgbe_notify_dca, 128 .next = NULL, 129 .priority = 0 130 }; 131 #endif 132 133 #ifdef CONFIG_PCI_IOV 134 static unsigned int max_vfs; 135 module_param(max_vfs, uint, 0); 136 MODULE_PARM_DESC(max_vfs, 137 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)"); 138 #endif /* CONFIG_PCI_IOV */ 139 140 static unsigned int allow_unsupported_sfp; 141 module_param(allow_unsupported_sfp, uint, 0); 142 MODULE_PARM_DESC(allow_unsupported_sfp, 143 "Allow unsupported and untested SFP+ modules on 82599-based adapters"); 144 145 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 146 static int debug = -1; 147 module_param(debug, int, 0); 148 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 149 150 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 151 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); 152 MODULE_LICENSE("GPL"); 153 MODULE_VERSION(DRV_VERSION); 154 155 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); 156 157 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, 158 u32 reg, u16 *value) 159 { 160 struct pci_dev *parent_dev; 161 struct pci_bus *parent_bus; 162 163 parent_bus = adapter->pdev->bus->parent; 164 if (!parent_bus) 165 return -1; 166 167 parent_dev = parent_bus->self; 168 if (!parent_dev) 169 return -1; 170 171 if (!pci_is_pcie(parent_dev)) 172 return -1; 173 174 pcie_capability_read_word(parent_dev, reg, value); 175 if (*value == IXGBE_FAILED_READ_CFG_WORD && 176 ixgbe_check_cfg_remove(&adapter->hw, parent_dev)) 177 return -1; 178 return 0; 179 } 180 181 static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) 182 { 183 struct ixgbe_hw *hw = &adapter->hw; 184 u16 link_status = 0; 185 int err; 186 187 hw->bus.type = ixgbe_bus_type_pci_express; 188 189 /* Get the negotiated link width and speed from PCI config space of the 190 * parent, as this device is behind a switch 191 */ 192 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status); 193 194 /* assume caller will handle error case */ 195 if (err) 196 return err; 197 198 hw->bus.width = ixgbe_convert_bus_width(link_status); 199 hw->bus.speed = ixgbe_convert_bus_speed(link_status); 200 201 return 0; 202 } 203 204 /** 205 * ixgbe_check_from_parent - Determine whether PCIe info should come from parent 206 * @hw: hw specific details 207 * 208 * This function is used by probe to determine whether a device's PCI-Express 209 * bandwidth details should be gathered from the parent bus instead of from the 210 * device. Used to ensure that various locations all have the correct device ID 211 * checks. 212 */ 213 static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) 214 { 215 switch (hw->device_id) { 216 case IXGBE_DEV_ID_82599_SFP_SF_QP: 217 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 218 return true; 219 default: 220 return false; 221 } 222 } 223 224 static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, 225 int expected_gts) 226 { 227 int max_gts = 0; 228 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; 229 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; 230 struct pci_dev *pdev; 231 232 /* determine whether to use the the parent device 233 */ 234 if (ixgbe_pcie_from_parent(&adapter->hw)) 235 pdev = adapter->pdev->bus->parent->self; 236 else 237 pdev = adapter->pdev; 238 239 if (pcie_get_minimum_link(pdev, &speed, &width) || 240 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) { 241 e_dev_warn("Unable to determine PCI Express bandwidth.\n"); 242 return; 243 } 244 245 switch (speed) { 246 case PCIE_SPEED_2_5GT: 247 /* 8b/10b encoding reduces max throughput by 20% */ 248 max_gts = 2 * width; 249 break; 250 case PCIE_SPEED_5_0GT: 251 /* 8b/10b encoding reduces max throughput by 20% */ 252 max_gts = 4 * width; 253 break; 254 case PCIE_SPEED_8_0GT: 255 /* 128b/130b encoding reduces throughput by less than 2% */ 256 max_gts = 8 * width; 257 break; 258 default: 259 e_dev_warn("Unable to determine PCI Express bandwidth.\n"); 260 return; 261 } 262 263 e_dev_info("PCI Express bandwidth of %dGT/s available\n", 264 max_gts); 265 e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n", 266 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : 267 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : 268 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : 269 "Unknown"), 270 width, 271 (speed == PCIE_SPEED_2_5GT ? "20%" : 272 speed == PCIE_SPEED_5_0GT ? "20%" : 273 speed == PCIE_SPEED_8_0GT ? "<2%" : 274 "Unknown")); 275 276 if (max_gts < expected_gts) { 277 e_dev_warn("This is not sufficient for optimal performance of this card.\n"); 278 e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n", 279 expected_gts); 280 e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n"); 281 } 282 } 283 284 static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) 285 { 286 if (!test_bit(__IXGBE_DOWN, &adapter->state) && 287 !test_bit(__IXGBE_REMOVING, &adapter->state) && 288 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) 289 schedule_work(&adapter->service_task); 290 } 291 292 static void ixgbe_remove_adapter(struct ixgbe_hw *hw) 293 { 294 struct ixgbe_adapter *adapter = hw->back; 295 296 if (!hw->hw_addr) 297 return; 298 hw->hw_addr = NULL; 299 e_dev_err("Adapter removed\n"); 300 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) 301 ixgbe_service_event_schedule(adapter); 302 } 303 304 void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) 305 { 306 u32 value; 307 308 /* The following check not only optimizes a bit by not 309 * performing a read on the status register when the 310 * register just read was a status register read that 311 * returned IXGBE_FAILED_READ_REG. It also blocks any 312 * potential recursion. 313 */ 314 if (reg == IXGBE_STATUS) { 315 ixgbe_remove_adapter(hw); 316 return; 317 } 318 value = ixgbe_read_reg(hw, IXGBE_STATUS); 319 if (value == IXGBE_FAILED_READ_REG) 320 ixgbe_remove_adapter(hw); 321 } 322 323 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) 324 { 325 u16 value; 326 327 pci_read_config_word(pdev, PCI_VENDOR_ID, &value); 328 if (value == IXGBE_FAILED_READ_CFG_WORD) { 329 ixgbe_remove_adapter(hw); 330 return true; 331 } 332 return false; 333 } 334 335 u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg) 336 { 337 struct ixgbe_adapter *adapter = hw->back; 338 u16 value; 339 340 if (ixgbe_removed(hw->hw_addr)) 341 return IXGBE_FAILED_READ_CFG_WORD; 342 pci_read_config_word(adapter->pdev, reg, &value); 343 if (value == IXGBE_FAILED_READ_CFG_WORD && 344 ixgbe_check_cfg_remove(hw, adapter->pdev)) 345 return IXGBE_FAILED_READ_CFG_WORD; 346 return value; 347 } 348 349 #ifdef CONFIG_PCI_IOV 350 static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg) 351 { 352 struct ixgbe_adapter *adapter = hw->back; 353 u32 value; 354 355 if (ixgbe_removed(hw->hw_addr)) 356 return IXGBE_FAILED_READ_CFG_DWORD; 357 pci_read_config_dword(adapter->pdev, reg, &value); 358 if (value == IXGBE_FAILED_READ_CFG_DWORD && 359 ixgbe_check_cfg_remove(hw, adapter->pdev)) 360 return IXGBE_FAILED_READ_CFG_DWORD; 361 return value; 362 } 363 #endif /* CONFIG_PCI_IOV */ 364 365 void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value) 366 { 367 struct ixgbe_adapter *adapter = hw->back; 368 369 if (ixgbe_removed(hw->hw_addr)) 370 return; 371 pci_write_config_word(adapter->pdev, reg, value); 372 } 373 374 static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) 375 { 376 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); 377 378 /* flush memory to make sure state is correct before next watchdog */ 379 smp_mb__before_atomic(); 380 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); 381 } 382 383 struct ixgbe_reg_info { 384 u32 ofs; 385 char *name; 386 }; 387 388 static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { 389 390 /* General Registers */ 391 {IXGBE_CTRL, "CTRL"}, 392 {IXGBE_STATUS, "STATUS"}, 393 {IXGBE_CTRL_EXT, "CTRL_EXT"}, 394 395 /* Interrupt Registers */ 396 {IXGBE_EICR, "EICR"}, 397 398 /* RX Registers */ 399 {IXGBE_SRRCTL(0), "SRRCTL"}, 400 {IXGBE_DCA_RXCTRL(0), "DRXCTL"}, 401 {IXGBE_RDLEN(0), "RDLEN"}, 402 {IXGBE_RDH(0), "RDH"}, 403 {IXGBE_RDT(0), "RDT"}, 404 {IXGBE_RXDCTL(0), "RXDCTL"}, 405 {IXGBE_RDBAL(0), "RDBAL"}, 406 {IXGBE_RDBAH(0), "RDBAH"}, 407 408 /* TX Registers */ 409 {IXGBE_TDBAL(0), "TDBAL"}, 410 {IXGBE_TDBAH(0), "TDBAH"}, 411 {IXGBE_TDLEN(0), "TDLEN"}, 412 {IXGBE_TDH(0), "TDH"}, 413 {IXGBE_TDT(0), "TDT"}, 414 {IXGBE_TXDCTL(0), "TXDCTL"}, 415 416 /* List Terminator */ 417 {} 418 }; 419 420 421 /* 422 * ixgbe_regdump - register printout routine 423 */ 424 static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) 425 { 426 int i = 0, j = 0; 427 char rname[16]; 428 u32 regs[64]; 429 430 switch (reginfo->ofs) { 431 case IXGBE_SRRCTL(0): 432 for (i = 0; i < 64; i++) 433 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); 434 break; 435 case IXGBE_DCA_RXCTRL(0): 436 for (i = 0; i < 64; i++) 437 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 438 break; 439 case IXGBE_RDLEN(0): 440 for (i = 0; i < 64; i++) 441 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); 442 break; 443 case IXGBE_RDH(0): 444 for (i = 0; i < 64; i++) 445 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); 446 break; 447 case IXGBE_RDT(0): 448 for (i = 0; i < 64; i++) 449 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); 450 break; 451 case IXGBE_RXDCTL(0): 452 for (i = 0; i < 64; i++) 453 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 454 break; 455 case IXGBE_RDBAL(0): 456 for (i = 0; i < 64; i++) 457 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); 458 break; 459 case IXGBE_RDBAH(0): 460 for (i = 0; i < 64; i++) 461 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); 462 break; 463 case IXGBE_TDBAL(0): 464 for (i = 0; i < 64; i++) 465 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); 466 break; 467 case IXGBE_TDBAH(0): 468 for (i = 0; i < 64; i++) 469 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); 470 break; 471 case IXGBE_TDLEN(0): 472 for (i = 0; i < 64; i++) 473 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); 474 break; 475 case IXGBE_TDH(0): 476 for (i = 0; i < 64; i++) 477 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); 478 break; 479 case IXGBE_TDT(0): 480 for (i = 0; i < 64; i++) 481 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); 482 break; 483 case IXGBE_TXDCTL(0): 484 for (i = 0; i < 64; i++) 485 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 486 break; 487 default: 488 pr_info("%-15s %08x\n", reginfo->name, 489 IXGBE_READ_REG(hw, reginfo->ofs)); 490 return; 491 } 492 493 for (i = 0; i < 8; i++) { 494 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); 495 pr_err("%-15s", rname); 496 for (j = 0; j < 8; j++) 497 pr_cont(" %08x", regs[i*8+j]); 498 pr_cont("\n"); 499 } 500 501 } 502 503 /* 504 * ixgbe_dump - Print registers, tx-rings and rx-rings 505 */ 506 static void ixgbe_dump(struct ixgbe_adapter *adapter) 507 { 508 struct net_device *netdev = adapter->netdev; 509 struct ixgbe_hw *hw = &adapter->hw; 510 struct ixgbe_reg_info *reginfo; 511 int n = 0; 512 struct ixgbe_ring *tx_ring; 513 struct ixgbe_tx_buffer *tx_buffer; 514 union ixgbe_adv_tx_desc *tx_desc; 515 struct my_u0 { u64 a; u64 b; } *u0; 516 struct ixgbe_ring *rx_ring; 517 union ixgbe_adv_rx_desc *rx_desc; 518 struct ixgbe_rx_buffer *rx_buffer_info; 519 u32 staterr; 520 int i = 0; 521 522 if (!netif_msg_hw(adapter)) 523 return; 524 525 /* Print netdevice Info */ 526 if (netdev) { 527 dev_info(&adapter->pdev->dev, "Net device Info\n"); 528 pr_info("Device Name state " 529 "trans_start last_rx\n"); 530 pr_info("%-15s %016lX %016lX %016lX\n", 531 netdev->name, 532 netdev->state, 533 netdev->trans_start, 534 netdev->last_rx); 535 } 536 537 /* Print Registers */ 538 dev_info(&adapter->pdev->dev, "Register Dump\n"); 539 pr_info(" Register Name Value\n"); 540 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; 541 reginfo->name; reginfo++) { 542 ixgbe_regdump(hw, reginfo); 543 } 544 545 /* Print TX Ring Summary */ 546 if (!netdev || !netif_running(netdev)) 547 goto exit; 548 549 dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); 550 pr_info(" %s %s %s %s\n", 551 "Queue [NTU] [NTC] [bi(ntc)->dma ]", 552 "leng", "ntw", "timestamp"); 553 for (n = 0; n < adapter->num_tx_queues; n++) { 554 tx_ring = adapter->tx_ring[n]; 555 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; 556 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", 557 n, tx_ring->next_to_use, tx_ring->next_to_clean, 558 (u64)dma_unmap_addr(tx_buffer, dma), 559 dma_unmap_len(tx_buffer, len), 560 tx_buffer->next_to_watch, 561 (u64)tx_buffer->time_stamp); 562 } 563 564 /* Print TX Rings */ 565 if (!netif_msg_tx_done(adapter)) 566 goto rx_ring_summary; 567 568 dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); 569 570 /* Transmit Descriptor Formats 571 * 572 * 82598 Advanced Transmit Descriptor 573 * +--------------------------------------------------------------+ 574 * 0 | Buffer Address [63:0] | 575 * +--------------------------------------------------------------+ 576 * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | 577 * +--------------------------------------------------------------+ 578 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 579 * 580 * 82598 Advanced Transmit Descriptor (Write-Back Format) 581 * +--------------------------------------------------------------+ 582 * 0 | RSV [63:0] | 583 * +--------------------------------------------------------------+ 584 * 8 | RSV | STA | NXTSEQ | 585 * +--------------------------------------------------------------+ 586 * 63 36 35 32 31 0 587 * 588 * 82599+ Advanced Transmit Descriptor 589 * +--------------------------------------------------------------+ 590 * 0 | Buffer Address [63:0] | 591 * +--------------------------------------------------------------+ 592 * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN | 593 * +--------------------------------------------------------------+ 594 * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0 595 * 596 * 82599+ Advanced Transmit Descriptor (Write-Back Format) 597 * +--------------------------------------------------------------+ 598 * 0 | RSV [63:0] | 599 * +--------------------------------------------------------------+ 600 * 8 | RSV | STA | RSV | 601 * +--------------------------------------------------------------+ 602 * 63 36 35 32 31 0 603 */ 604 605 for (n = 0; n < adapter->num_tx_queues; n++) { 606 tx_ring = adapter->tx_ring[n]; 607 pr_info("------------------------------------\n"); 608 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); 609 pr_info("------------------------------------\n"); 610 pr_info("%s%s %s %s %s %s\n", 611 "T [desc] [address 63:0 ] ", 612 "[PlPOIdStDDt Ln] [bi->dma ] ", 613 "leng", "ntw", "timestamp", "bi->skb"); 614 615 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 616 tx_desc = IXGBE_TX_DESC(tx_ring, i); 617 tx_buffer = &tx_ring->tx_buffer_info[i]; 618 u0 = (struct my_u0 *)tx_desc; 619 if (dma_unmap_len(tx_buffer, len) > 0) { 620 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p", 621 i, 622 le64_to_cpu(u0->a), 623 le64_to_cpu(u0->b), 624 (u64)dma_unmap_addr(tx_buffer, dma), 625 dma_unmap_len(tx_buffer, len), 626 tx_buffer->next_to_watch, 627 (u64)tx_buffer->time_stamp, 628 tx_buffer->skb); 629 if (i == tx_ring->next_to_use && 630 i == tx_ring->next_to_clean) 631 pr_cont(" NTC/U\n"); 632 else if (i == tx_ring->next_to_use) 633 pr_cont(" NTU\n"); 634 else if (i == tx_ring->next_to_clean) 635 pr_cont(" NTC\n"); 636 else 637 pr_cont("\n"); 638 639 if (netif_msg_pktdata(adapter) && 640 tx_buffer->skb) 641 print_hex_dump(KERN_INFO, "", 642 DUMP_PREFIX_ADDRESS, 16, 1, 643 tx_buffer->skb->data, 644 dma_unmap_len(tx_buffer, len), 645 true); 646 } 647 } 648 } 649 650 /* Print RX Rings Summary */ 651 rx_ring_summary: 652 dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); 653 pr_info("Queue [NTU] [NTC]\n"); 654 for (n = 0; n < adapter->num_rx_queues; n++) { 655 rx_ring = adapter->rx_ring[n]; 656 pr_info("%5d %5X %5X\n", 657 n, rx_ring->next_to_use, rx_ring->next_to_clean); 658 } 659 660 /* Print RX Rings */ 661 if (!netif_msg_rx_status(adapter)) 662 goto exit; 663 664 dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); 665 666 /* Receive Descriptor Formats 667 * 668 * 82598 Advanced Receive Descriptor (Read) Format 669 * 63 1 0 670 * +-----------------------------------------------------+ 671 * 0 | Packet Buffer Address [63:1] |A0/NSE| 672 * +----------------------------------------------+------+ 673 * 8 | Header Buffer Address [63:1] | DD | 674 * +-----------------------------------------------------+ 675 * 676 * 677 * 82598 Advanced Receive Descriptor (Write-Back) Format 678 * 679 * 63 48 47 32 31 30 21 20 16 15 4 3 0 680 * +------------------------------------------------------+ 681 * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS | 682 * | Packet | IP | | | | Type | Type | 683 * | Checksum | Ident | | | | | | 684 * +------------------------------------------------------+ 685 * 8 | VLAN Tag | Length | Extended Error | Extended Status | 686 * +------------------------------------------------------+ 687 * 63 48 47 32 31 20 19 0 688 * 689 * 82599+ Advanced Receive Descriptor (Read) Format 690 * 63 1 0 691 * +-----------------------------------------------------+ 692 * 0 | Packet Buffer Address [63:1] |A0/NSE| 693 * +----------------------------------------------+------+ 694 * 8 | Header Buffer Address [63:1] | DD | 695 * +-----------------------------------------------------+ 696 * 697 * 698 * 82599+ Advanced Receive Descriptor (Write-Back) Format 699 * 700 * 63 48 47 32 31 30 21 20 17 16 4 3 0 701 * +------------------------------------------------------+ 702 * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS | 703 * |/ RTT / PCoE_PARAM | | | CNT | Type | Type | 704 * |/ Flow Dir Flt ID | | | | | | 705 * +------------------------------------------------------+ 706 * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP | 707 * +------------------------------------------------------+ 708 * 63 48 47 32 31 20 19 0 709 */ 710 711 for (n = 0; n < adapter->num_rx_queues; n++) { 712 rx_ring = adapter->rx_ring[n]; 713 pr_info("------------------------------------\n"); 714 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); 715 pr_info("------------------------------------\n"); 716 pr_info("%s%s%s", 717 "R [desc] [ PktBuf A0] ", 718 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ", 719 "<-- Adv Rx Read format\n"); 720 pr_info("%s%s%s", 721 "RWB[desc] [PcsmIpSHl PtRs] ", 722 "[vl er S cks ln] ---------------- [bi->skb ] ", 723 "<-- Adv Rx Write-Back format\n"); 724 725 for (i = 0; i < rx_ring->count; i++) { 726 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 727 rx_desc = IXGBE_RX_DESC(rx_ring, i); 728 u0 = (struct my_u0 *)rx_desc; 729 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 730 if (staterr & IXGBE_RXD_STAT_DD) { 731 /* Descriptor Done */ 732 pr_info("RWB[0x%03X] %016llX " 733 "%016llX ---------------- %p", i, 734 le64_to_cpu(u0->a), 735 le64_to_cpu(u0->b), 736 rx_buffer_info->skb); 737 } else { 738 pr_info("R [0x%03X] %016llX " 739 "%016llX %016llX %p", i, 740 le64_to_cpu(u0->a), 741 le64_to_cpu(u0->b), 742 (u64)rx_buffer_info->dma, 743 rx_buffer_info->skb); 744 745 if (netif_msg_pktdata(adapter) && 746 rx_buffer_info->dma) { 747 print_hex_dump(KERN_INFO, "", 748 DUMP_PREFIX_ADDRESS, 16, 1, 749 page_address(rx_buffer_info->page) + 750 rx_buffer_info->page_offset, 751 ixgbe_rx_bufsz(rx_ring), true); 752 } 753 } 754 755 if (i == rx_ring->next_to_use) 756 pr_cont(" NTU\n"); 757 else if (i == rx_ring->next_to_clean) 758 pr_cont(" NTC\n"); 759 else 760 pr_cont("\n"); 761 762 } 763 } 764 765 exit: 766 return; 767 } 768 769 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) 770 { 771 u32 ctrl_ext; 772 773 /* Let firmware take over control of h/w */ 774 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 775 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 776 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); 777 } 778 779 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) 780 { 781 u32 ctrl_ext; 782 783 /* Let firmware know the driver has taken over */ 784 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 785 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 786 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); 787 } 788 789 /** 790 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors 791 * @adapter: pointer to adapter struct 792 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 793 * @queue: queue to map the corresponding interrupt to 794 * @msix_vector: the vector to map to the corresponding queue 795 * 796 */ 797 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, 798 u8 queue, u8 msix_vector) 799 { 800 u32 ivar, index; 801 struct ixgbe_hw *hw = &adapter->hw; 802 switch (hw->mac.type) { 803 case ixgbe_mac_82598EB: 804 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 805 if (direction == -1) 806 direction = 0; 807 index = (((direction * 64) + queue) >> 2) & 0x1F; 808 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 809 ivar &= ~(0xFF << (8 * (queue & 0x3))); 810 ivar |= (msix_vector << (8 * (queue & 0x3))); 811 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 812 break; 813 case ixgbe_mac_82599EB: 814 case ixgbe_mac_X540: 815 if (direction == -1) { 816 /* other causes */ 817 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 818 index = ((queue & 1) * 8); 819 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); 820 ivar &= ~(0xFF << index); 821 ivar |= (msix_vector << index); 822 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); 823 break; 824 } else { 825 /* tx or rx causes */ 826 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 827 index = ((16 * (queue & 1)) + (8 * direction)); 828 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); 829 ivar &= ~(0xFF << index); 830 ivar |= (msix_vector << index); 831 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar); 832 break; 833 } 834 default: 835 break; 836 } 837 } 838 839 static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, 840 u64 qmask) 841 { 842 u32 mask; 843 844 switch (adapter->hw.mac.type) { 845 case ixgbe_mac_82598EB: 846 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 847 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 848 break; 849 case ixgbe_mac_82599EB: 850 case ixgbe_mac_X540: 851 mask = (qmask & 0xFFFFFFFF); 852 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); 853 mask = (qmask >> 32); 854 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); 855 break; 856 default: 857 break; 858 } 859 } 860 861 void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring, 862 struct ixgbe_tx_buffer *tx_buffer) 863 { 864 if (tx_buffer->skb) { 865 dev_kfree_skb_any(tx_buffer->skb); 866 if (dma_unmap_len(tx_buffer, len)) 867 dma_unmap_single(ring->dev, 868 dma_unmap_addr(tx_buffer, dma), 869 dma_unmap_len(tx_buffer, len), 870 DMA_TO_DEVICE); 871 } else if (dma_unmap_len(tx_buffer, len)) { 872 dma_unmap_page(ring->dev, 873 dma_unmap_addr(tx_buffer, dma), 874 dma_unmap_len(tx_buffer, len), 875 DMA_TO_DEVICE); 876 } 877 tx_buffer->next_to_watch = NULL; 878 tx_buffer->skb = NULL; 879 dma_unmap_len_set(tx_buffer, len, 0); 880 /* tx_buffer must be completely set up in the transmit path */ 881 } 882 883 static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) 884 { 885 struct ixgbe_hw *hw = &adapter->hw; 886 struct ixgbe_hw_stats *hwstats = &adapter->stats; 887 int i; 888 u32 data; 889 890 if ((hw->fc.current_mode != ixgbe_fc_full) && 891 (hw->fc.current_mode != ixgbe_fc_rx_pause)) 892 return; 893 894 switch (hw->mac.type) { 895 case ixgbe_mac_82598EB: 896 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 897 break; 898 default: 899 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 900 } 901 hwstats->lxoffrxc += data; 902 903 /* refill credits (no tx hang) if we received xoff */ 904 if (!data) 905 return; 906 907 for (i = 0; i < adapter->num_tx_queues; i++) 908 clear_bit(__IXGBE_HANG_CHECK_ARMED, 909 &adapter->tx_ring[i]->state); 910 } 911 912 static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) 913 { 914 struct ixgbe_hw *hw = &adapter->hw; 915 struct ixgbe_hw_stats *hwstats = &adapter->stats; 916 u32 xoff[8] = {0}; 917 u8 tc; 918 int i; 919 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; 920 921 if (adapter->ixgbe_ieee_pfc) 922 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); 923 924 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) { 925 ixgbe_update_xoff_rx_lfc(adapter); 926 return; 927 } 928 929 /* update stats for each tc, only valid with PFC enabled */ 930 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { 931 u32 pxoffrxc; 932 933 switch (hw->mac.type) { 934 case ixgbe_mac_82598EB: 935 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 936 break; 937 default: 938 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 939 } 940 hwstats->pxoffrxc[i] += pxoffrxc; 941 /* Get the TC for given UP */ 942 tc = netdev_get_prio_tc_map(adapter->netdev, i); 943 xoff[tc] += pxoffrxc; 944 } 945 946 /* disarm tx queues that have received xoff frames */ 947 for (i = 0; i < adapter->num_tx_queues; i++) { 948 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; 949 950 tc = tx_ring->dcb_tc; 951 if (xoff[tc]) 952 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); 953 } 954 } 955 956 static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) 957 { 958 return ring->stats.packets; 959 } 960 961 static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) 962 { 963 struct ixgbe_adapter *adapter; 964 struct ixgbe_hw *hw; 965 u32 head, tail; 966 967 if (ring->l2_accel_priv) 968 adapter = ring->l2_accel_priv->real_adapter; 969 else 970 adapter = netdev_priv(ring->netdev); 971 972 hw = &adapter->hw; 973 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); 974 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); 975 976 if (head != tail) 977 return (head < tail) ? 978 tail - head : (tail + ring->count - head); 979 980 return 0; 981 } 982 983 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) 984 { 985 u32 tx_done = ixgbe_get_tx_completed(tx_ring); 986 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; 987 u32 tx_pending = ixgbe_get_tx_pending(tx_ring); 988 bool ret = false; 989 990 clear_check_for_tx_hang(tx_ring); 991 992 /* 993 * Check for a hung queue, but be thorough. This verifies 994 * that a transmit has been completed since the previous 995 * check AND there is at least one packet pending. The 996 * ARMED bit is set to indicate a potential hang. The 997 * bit is cleared if a pause frame is received to remove 998 * false hang detection due to PFC or 802.3x frames. By 999 * requiring this to fail twice we avoid races with 1000 * pfc clearing the ARMED bit and conditions where we 1001 * run the check_tx_hang logic with a transmit completion 1002 * pending but without time to complete it yet. 1003 */ 1004 if ((tx_done_old == tx_done) && tx_pending) { 1005 /* make sure it is true for two checks in a row */ 1006 ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, 1007 &tx_ring->state); 1008 } else { 1009 /* update completed stats and continue */ 1010 tx_ring->tx_stats.tx_done_old = tx_done; 1011 /* reset the countdown */ 1012 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); 1013 } 1014 1015 return ret; 1016 } 1017 1018 /** 1019 * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout 1020 * @adapter: driver private struct 1021 **/ 1022 static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) 1023 { 1024 1025 /* Do the reset outside of interrupt context */ 1026 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 1027 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; 1028 e_warn(drv, "initiating reset due to tx timeout\n"); 1029 ixgbe_service_event_schedule(adapter); 1030 } 1031 } 1032 1033 /** 1034 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes 1035 * @q_vector: structure containing interrupt and ring information 1036 * @tx_ring: tx ring to clean 1037 **/ 1038 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, 1039 struct ixgbe_ring *tx_ring) 1040 { 1041 struct ixgbe_adapter *adapter = q_vector->adapter; 1042 struct ixgbe_tx_buffer *tx_buffer; 1043 union ixgbe_adv_tx_desc *tx_desc; 1044 unsigned int total_bytes = 0, total_packets = 0; 1045 unsigned int budget = q_vector->tx.work_limit; 1046 unsigned int i = tx_ring->next_to_clean; 1047 1048 if (test_bit(__IXGBE_DOWN, &adapter->state)) 1049 return true; 1050 1051 tx_buffer = &tx_ring->tx_buffer_info[i]; 1052 tx_desc = IXGBE_TX_DESC(tx_ring, i); 1053 i -= tx_ring->count; 1054 1055 do { 1056 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 1057 1058 /* if next_to_watch is not set then there is no work pending */ 1059 if (!eop_desc) 1060 break; 1061 1062 /* prevent any other reads prior to eop_desc */ 1063 read_barrier_depends(); 1064 1065 /* if DD is not set pending work has not been completed */ 1066 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) 1067 break; 1068 1069 /* clear next_to_watch to prevent false hangs */ 1070 tx_buffer->next_to_watch = NULL; 1071 1072 /* update the statistics for this packet */ 1073 total_bytes += tx_buffer->bytecount; 1074 total_packets += tx_buffer->gso_segs; 1075 1076 /* free the skb */ 1077 dev_kfree_skb_any(tx_buffer->skb); 1078 1079 /* unmap skb header data */ 1080 dma_unmap_single(tx_ring->dev, 1081 dma_unmap_addr(tx_buffer, dma), 1082 dma_unmap_len(tx_buffer, len), 1083 DMA_TO_DEVICE); 1084 1085 /* clear tx_buffer data */ 1086 tx_buffer->skb = NULL; 1087 dma_unmap_len_set(tx_buffer, len, 0); 1088 1089 /* unmap remaining buffers */ 1090 while (tx_desc != eop_desc) { 1091 tx_buffer++; 1092 tx_desc++; 1093 i++; 1094 if (unlikely(!i)) { 1095 i -= tx_ring->count; 1096 tx_buffer = tx_ring->tx_buffer_info; 1097 tx_desc = IXGBE_TX_DESC(tx_ring, 0); 1098 } 1099 1100 /* unmap any remaining paged data */ 1101 if (dma_unmap_len(tx_buffer, len)) { 1102 dma_unmap_page(tx_ring->dev, 1103 dma_unmap_addr(tx_buffer, dma), 1104 dma_unmap_len(tx_buffer, len), 1105 DMA_TO_DEVICE); 1106 dma_unmap_len_set(tx_buffer, len, 0); 1107 } 1108 } 1109 1110 /* move us one more past the eop_desc for start of next pkt */ 1111 tx_buffer++; 1112 tx_desc++; 1113 i++; 1114 if (unlikely(!i)) { 1115 i -= tx_ring->count; 1116 tx_buffer = tx_ring->tx_buffer_info; 1117 tx_desc = IXGBE_TX_DESC(tx_ring, 0); 1118 } 1119 1120 /* issue prefetch for next Tx descriptor */ 1121 prefetch(tx_desc); 1122 1123 /* update budget accounting */ 1124 budget--; 1125 } while (likely(budget)); 1126 1127 i += tx_ring->count; 1128 tx_ring->next_to_clean = i; 1129 u64_stats_update_begin(&tx_ring->syncp); 1130 tx_ring->stats.bytes += total_bytes; 1131 tx_ring->stats.packets += total_packets; 1132 u64_stats_update_end(&tx_ring->syncp); 1133 q_vector->tx.total_bytes += total_bytes; 1134 q_vector->tx.total_packets += total_packets; 1135 1136 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { 1137 /* schedule immediate reset if we believe we hung */ 1138 struct ixgbe_hw *hw = &adapter->hw; 1139 e_err(drv, "Detected Tx Unit Hang\n" 1140 " Tx Queue <%d>\n" 1141 " TDH, TDT <%x>, <%x>\n" 1142 " next_to_use <%x>\n" 1143 " next_to_clean <%x>\n" 1144 "tx_buffer_info[next_to_clean]\n" 1145 " time_stamp <%lx>\n" 1146 " jiffies <%lx>\n", 1147 tx_ring->queue_index, 1148 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), 1149 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), 1150 tx_ring->next_to_use, i, 1151 tx_ring->tx_buffer_info[i].time_stamp, jiffies); 1152 1153 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 1154 1155 e_info(probe, 1156 "tx hang %d detected on queue %d, resetting adapter\n", 1157 adapter->tx_timeout_count + 1, tx_ring->queue_index); 1158 1159 /* schedule immediate reset if we believe we hung */ 1160 ixgbe_tx_timeout_reset(adapter); 1161 1162 /* the adapter is about to reset, no point in enabling stuff */ 1163 return true; 1164 } 1165 1166 netdev_tx_completed_queue(txring_txq(tx_ring), 1167 total_packets, total_bytes); 1168 1169 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 1170 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 1171 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 1172 /* Make sure that anybody stopping the queue after this 1173 * sees the new next_to_clean. 1174 */ 1175 smp_mb(); 1176 if (__netif_subqueue_stopped(tx_ring->netdev, 1177 tx_ring->queue_index) 1178 && !test_bit(__IXGBE_DOWN, &adapter->state)) { 1179 netif_wake_subqueue(tx_ring->netdev, 1180 tx_ring->queue_index); 1181 ++tx_ring->tx_stats.restart_queue; 1182 } 1183 } 1184 1185 return !!budget; 1186 } 1187 1188 #ifdef CONFIG_IXGBE_DCA 1189 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, 1190 struct ixgbe_ring *tx_ring, 1191 int cpu) 1192 { 1193 struct ixgbe_hw *hw = &adapter->hw; 1194 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); 1195 u16 reg_offset; 1196 1197 switch (hw->mac.type) { 1198 case ixgbe_mac_82598EB: 1199 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); 1200 break; 1201 case ixgbe_mac_82599EB: 1202 case ixgbe_mac_X540: 1203 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx); 1204 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599; 1205 break; 1206 default: 1207 /* for unknown hardware do not write register */ 1208 return; 1209 } 1210 1211 /* 1212 * We can enable relaxed ordering for reads, but not writes when 1213 * DCA is enabled. This is due to a known issue in some chipsets 1214 * which will cause the DCA tag to be cleared. 1215 */ 1216 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN | 1217 IXGBE_DCA_TXCTRL_DATA_RRO_EN | 1218 IXGBE_DCA_TXCTRL_DESC_DCA_EN; 1219 1220 IXGBE_WRITE_REG(hw, reg_offset, txctrl); 1221 } 1222 1223 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, 1224 struct ixgbe_ring *rx_ring, 1225 int cpu) 1226 { 1227 struct ixgbe_hw *hw = &adapter->hw; 1228 u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu); 1229 u8 reg_idx = rx_ring->reg_idx; 1230 1231 1232 switch (hw->mac.type) { 1233 case ixgbe_mac_82599EB: 1234 case ixgbe_mac_X540: 1235 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599; 1236 break; 1237 default: 1238 break; 1239 } 1240 1241 /* 1242 * We can enable relaxed ordering for reads, but not writes when 1243 * DCA is enabled. This is due to a known issue in some chipsets 1244 * which will cause the DCA tag to be cleared. 1245 */ 1246 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN | 1247 IXGBE_DCA_RXCTRL_DESC_DCA_EN; 1248 1249 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); 1250 } 1251 1252 static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) 1253 { 1254 struct ixgbe_adapter *adapter = q_vector->adapter; 1255 struct ixgbe_ring *ring; 1256 int cpu = get_cpu(); 1257 1258 if (q_vector->cpu == cpu) 1259 goto out_no_update; 1260 1261 ixgbe_for_each_ring(ring, q_vector->tx) 1262 ixgbe_update_tx_dca(adapter, ring, cpu); 1263 1264 ixgbe_for_each_ring(ring, q_vector->rx) 1265 ixgbe_update_rx_dca(adapter, ring, cpu); 1266 1267 q_vector->cpu = cpu; 1268 out_no_update: 1269 put_cpu(); 1270 } 1271 1272 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) 1273 { 1274 int i; 1275 1276 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) 1277 return; 1278 1279 /* always use CB2 mode, difference is masked in the CB driver */ 1280 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); 1281 1282 for (i = 0; i < adapter->num_q_vectors; i++) { 1283 adapter->q_vector[i]->cpu = -1; 1284 ixgbe_update_dca(adapter->q_vector[i]); 1285 } 1286 } 1287 1288 static int __ixgbe_notify_dca(struct device *dev, void *data) 1289 { 1290 struct ixgbe_adapter *adapter = dev_get_drvdata(dev); 1291 unsigned long event = *(unsigned long *)data; 1292 1293 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) 1294 return 0; 1295 1296 switch (event) { 1297 case DCA_PROVIDER_ADD: 1298 /* if we're already enabled, don't do it again */ 1299 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1300 break; 1301 if (dca_add_requester(dev) == 0) { 1302 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 1303 ixgbe_setup_dca(adapter); 1304 break; 1305 } 1306 /* Fall Through since DCA is disabled. */ 1307 case DCA_PROVIDER_REMOVE: 1308 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 1309 dca_remove_requester(dev); 1310 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 1311 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); 1312 } 1313 break; 1314 } 1315 1316 return 0; 1317 } 1318 1319 #endif /* CONFIG_IXGBE_DCA */ 1320 static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, 1321 union ixgbe_adv_rx_desc *rx_desc, 1322 struct sk_buff *skb) 1323 { 1324 if (ring->netdev->features & NETIF_F_RXHASH) 1325 skb_set_hash(skb, 1326 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), 1327 PKT_HASH_TYPE_L3); 1328 } 1329 1330 #ifdef IXGBE_FCOE 1331 /** 1332 * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type 1333 * @ring: structure containing ring specific data 1334 * @rx_desc: advanced rx descriptor 1335 * 1336 * Returns : true if it is FCoE pkt 1337 */ 1338 static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring, 1339 union ixgbe_adv_rx_desc *rx_desc) 1340 { 1341 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 1342 1343 return test_bit(__IXGBE_RX_FCOE, &ring->state) && 1344 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) == 1345 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << 1346 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); 1347 } 1348 1349 #endif /* IXGBE_FCOE */ 1350 /** 1351 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum 1352 * @ring: structure containing ring specific data 1353 * @rx_desc: current Rx descriptor being processed 1354 * @skb: skb currently being received and modified 1355 **/ 1356 static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, 1357 union ixgbe_adv_rx_desc *rx_desc, 1358 struct sk_buff *skb) 1359 { 1360 skb_checksum_none_assert(skb); 1361 1362 /* Rx csum disabled */ 1363 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 1364 return; 1365 1366 /* if IP and error */ 1367 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && 1368 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { 1369 ring->rx_stats.csum_err++; 1370 return; 1371 } 1372 1373 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) 1374 return; 1375 1376 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { 1377 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 1378 1379 /* 1380 * 82599 errata, UDP frames with a 0 checksum can be marked as 1381 * checksum errors. 1382 */ 1383 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) && 1384 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state)) 1385 return; 1386 1387 ring->rx_stats.csum_err++; 1388 return; 1389 } 1390 1391 /* It must be a TCP or UDP packet with a valid checksum */ 1392 skb->ip_summed = CHECKSUM_UNNECESSARY; 1393 } 1394 1395 static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) 1396 { 1397 rx_ring->next_to_use = val; 1398 1399 /* update next to alloc since we have filled the ring */ 1400 rx_ring->next_to_alloc = val; 1401 /* 1402 * Force memory writes to complete before letting h/w 1403 * know there are new descriptors to fetch. (Only 1404 * applicable for weak-ordered memory model archs, 1405 * such as IA-64). 1406 */ 1407 wmb(); 1408 ixgbe_write_tail(rx_ring, val); 1409 } 1410 1411 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, 1412 struct ixgbe_rx_buffer *bi) 1413 { 1414 struct page *page = bi->page; 1415 dma_addr_t dma = bi->dma; 1416 1417 /* since we are recycling buffers we should seldom need to alloc */ 1418 if (likely(dma)) 1419 return true; 1420 1421 /* alloc new page for storage */ 1422 if (likely(!page)) { 1423 page = __skb_alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP, 1424 bi->skb, ixgbe_rx_pg_order(rx_ring)); 1425 if (unlikely(!page)) { 1426 rx_ring->rx_stats.alloc_rx_page_failed++; 1427 return false; 1428 } 1429 bi->page = page; 1430 } 1431 1432 /* map page for use */ 1433 dma = dma_map_page(rx_ring->dev, page, 0, 1434 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); 1435 1436 /* 1437 * if mapping failed free memory back to system since 1438 * there isn't much point in holding memory we can't use 1439 */ 1440 if (dma_mapping_error(rx_ring->dev, dma)) { 1441 __free_pages(page, ixgbe_rx_pg_order(rx_ring)); 1442 bi->page = NULL; 1443 1444 rx_ring->rx_stats.alloc_rx_page_failed++; 1445 return false; 1446 } 1447 1448 bi->dma = dma; 1449 bi->page_offset = 0; 1450 1451 return true; 1452 } 1453 1454 /** 1455 * ixgbe_alloc_rx_buffers - Replace used receive buffers 1456 * @rx_ring: ring to place buffers on 1457 * @cleaned_count: number of buffers to replace 1458 **/ 1459 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) 1460 { 1461 union ixgbe_adv_rx_desc *rx_desc; 1462 struct ixgbe_rx_buffer *bi; 1463 u16 i = rx_ring->next_to_use; 1464 1465 /* nothing to do */ 1466 if (!cleaned_count) 1467 return; 1468 1469 rx_desc = IXGBE_RX_DESC(rx_ring, i); 1470 bi = &rx_ring->rx_buffer_info[i]; 1471 i -= rx_ring->count; 1472 1473 do { 1474 if (!ixgbe_alloc_mapped_page(rx_ring, bi)) 1475 break; 1476 1477 /* 1478 * Refresh the desc even if buffer_addrs didn't change 1479 * because each write-back erases this info. 1480 */ 1481 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 1482 1483 rx_desc++; 1484 bi++; 1485 i++; 1486 if (unlikely(!i)) { 1487 rx_desc = IXGBE_RX_DESC(rx_ring, 0); 1488 bi = rx_ring->rx_buffer_info; 1489 i -= rx_ring->count; 1490 } 1491 1492 /* clear the hdr_addr for the next_to_use descriptor */ 1493 rx_desc->read.hdr_addr = 0; 1494 1495 cleaned_count--; 1496 } while (cleaned_count); 1497 1498 i += rx_ring->count; 1499 1500 if (rx_ring->next_to_use != i) 1501 ixgbe_release_rx_desc(rx_ring, i); 1502 } 1503 1504 /** 1505 * ixgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE 1506 * @data: pointer to the start of the headers 1507 * @max_len: total length of section to find headers in 1508 * 1509 * This function is meant to determine the length of headers that will 1510 * be recognized by hardware for LRO, GRO, and RSC offloads. The main 1511 * motivation of doing this is to only perform one pull for IPv4 TCP 1512 * packets so that we can do basic things like calculating the gso_size 1513 * based on the average data per packet. 1514 **/ 1515 static unsigned int ixgbe_get_headlen(unsigned char *data, 1516 unsigned int max_len) 1517 { 1518 union { 1519 unsigned char *network; 1520 /* l2 headers */ 1521 struct ethhdr *eth; 1522 struct vlan_hdr *vlan; 1523 /* l3 headers */ 1524 struct iphdr *ipv4; 1525 struct ipv6hdr *ipv6; 1526 } hdr; 1527 __be16 protocol; 1528 u8 nexthdr = 0; /* default to not TCP */ 1529 u8 hlen; 1530 1531 /* this should never happen, but better safe than sorry */ 1532 if (max_len < ETH_HLEN) 1533 return max_len; 1534 1535 /* initialize network frame pointer */ 1536 hdr.network = data; 1537 1538 /* set first protocol and move network header forward */ 1539 protocol = hdr.eth->h_proto; 1540 hdr.network += ETH_HLEN; 1541 1542 /* handle any vlan tag if present */ 1543 if (protocol == htons(ETH_P_8021Q)) { 1544 if ((hdr.network - data) > (max_len - VLAN_HLEN)) 1545 return max_len; 1546 1547 protocol = hdr.vlan->h_vlan_encapsulated_proto; 1548 hdr.network += VLAN_HLEN; 1549 } 1550 1551 /* handle L3 protocols */ 1552 if (protocol == htons(ETH_P_IP)) { 1553 if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) 1554 return max_len; 1555 1556 /* access ihl as a u8 to avoid unaligned access on ia64 */ 1557 hlen = (hdr.network[0] & 0x0F) << 2; 1558 1559 /* verify hlen meets minimum size requirements */ 1560 if (hlen < sizeof(struct iphdr)) 1561 return hdr.network - data; 1562 1563 /* record next protocol if header is present */ 1564 if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) 1565 nexthdr = hdr.ipv4->protocol; 1566 } else if (protocol == htons(ETH_P_IPV6)) { 1567 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) 1568 return max_len; 1569 1570 /* record next protocol */ 1571 nexthdr = hdr.ipv6->nexthdr; 1572 hlen = sizeof(struct ipv6hdr); 1573 #ifdef IXGBE_FCOE 1574 } else if (protocol == htons(ETH_P_FCOE)) { 1575 if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) 1576 return max_len; 1577 hlen = FCOE_HEADER_LEN; 1578 #endif 1579 } else { 1580 return hdr.network - data; 1581 } 1582 1583 /* relocate pointer to start of L4 header */ 1584 hdr.network += hlen; 1585 1586 /* finally sort out TCP/UDP */ 1587 if (nexthdr == IPPROTO_TCP) { 1588 if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) 1589 return max_len; 1590 1591 /* access doff as a u8 to avoid unaligned access on ia64 */ 1592 hlen = (hdr.network[12] & 0xF0) >> 2; 1593 1594 /* verify hlen meets minimum size requirements */ 1595 if (hlen < sizeof(struct tcphdr)) 1596 return hdr.network - data; 1597 1598 hdr.network += hlen; 1599 } else if (nexthdr == IPPROTO_UDP) { 1600 if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) 1601 return max_len; 1602 1603 hdr.network += sizeof(struct udphdr); 1604 } 1605 1606 /* 1607 * If everything has gone correctly hdr.network should be the 1608 * data section of the packet and will be the end of the header. 1609 * If not then it probably represents the end of the last recognized 1610 * header. 1611 */ 1612 if ((hdr.network - data) < max_len) 1613 return hdr.network - data; 1614 else 1615 return max_len; 1616 } 1617 1618 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, 1619 struct sk_buff *skb) 1620 { 1621 u16 hdr_len = skb_headlen(skb); 1622 1623 /* set gso_size to avoid messing up TCP MSS */ 1624 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), 1625 IXGBE_CB(skb)->append_cnt); 1626 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 1627 } 1628 1629 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, 1630 struct sk_buff *skb) 1631 { 1632 /* if append_cnt is 0 then frame is not RSC */ 1633 if (!IXGBE_CB(skb)->append_cnt) 1634 return; 1635 1636 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; 1637 rx_ring->rx_stats.rsc_flush++; 1638 1639 ixgbe_set_rsc_gso_size(rx_ring, skb); 1640 1641 /* gso_size is computed using append_cnt so always clear it last */ 1642 IXGBE_CB(skb)->append_cnt = 0; 1643 } 1644 1645 /** 1646 * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor 1647 * @rx_ring: rx descriptor ring packet is being transacted on 1648 * @rx_desc: pointer to the EOP Rx descriptor 1649 * @skb: pointer to current skb being populated 1650 * 1651 * This function checks the ring, descriptor, and packet information in 1652 * order to populate the hash, checksum, VLAN, timestamp, protocol, and 1653 * other fields within the skb. 1654 **/ 1655 static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, 1656 union ixgbe_adv_rx_desc *rx_desc, 1657 struct sk_buff *skb) 1658 { 1659 struct net_device *dev = rx_ring->netdev; 1660 1661 ixgbe_update_rsc_stats(rx_ring, skb); 1662 1663 ixgbe_rx_hash(rx_ring, rx_desc, skb); 1664 1665 ixgbe_rx_checksum(rx_ring, rx_desc, skb); 1666 1667 if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) 1668 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); 1669 1670 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1671 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { 1672 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 1673 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 1674 } 1675 1676 skb_record_rx_queue(skb, rx_ring->queue_index); 1677 1678 skb->protocol = eth_type_trans(skb, dev); 1679 } 1680 1681 static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, 1682 struct sk_buff *skb) 1683 { 1684 struct ixgbe_adapter *adapter = q_vector->adapter; 1685 1686 if (ixgbe_qv_busy_polling(q_vector)) 1687 netif_receive_skb(skb); 1688 else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 1689 napi_gro_receive(&q_vector->napi, skb); 1690 else 1691 netif_rx(skb); 1692 } 1693 1694 /** 1695 * ixgbe_is_non_eop - process handling of non-EOP buffers 1696 * @rx_ring: Rx ring being processed 1697 * @rx_desc: Rx descriptor for current buffer 1698 * @skb: Current socket buffer containing buffer in progress 1699 * 1700 * This function updates next to clean. If the buffer is an EOP buffer 1701 * this function exits returning false, otherwise it will place the 1702 * sk_buff in the next buffer to be chained and return true indicating 1703 * that this is in fact a non-EOP buffer. 1704 **/ 1705 static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, 1706 union ixgbe_adv_rx_desc *rx_desc, 1707 struct sk_buff *skb) 1708 { 1709 u32 ntc = rx_ring->next_to_clean + 1; 1710 1711 /* fetch, update, and store next to clean */ 1712 ntc = (ntc < rx_ring->count) ? ntc : 0; 1713 rx_ring->next_to_clean = ntc; 1714 1715 prefetch(IXGBE_RX_DESC(rx_ring, ntc)); 1716 1717 /* update RSC append count if present */ 1718 if (ring_is_rsc_enabled(rx_ring)) { 1719 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & 1720 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); 1721 1722 if (unlikely(rsc_enabled)) { 1723 u32 rsc_cnt = le32_to_cpu(rsc_enabled); 1724 1725 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; 1726 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; 1727 1728 /* update ntc based on RSC value */ 1729 ntc = le32_to_cpu(rx_desc->wb.upper.status_error); 1730 ntc &= IXGBE_RXDADV_NEXTP_MASK; 1731 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; 1732 } 1733 } 1734 1735 /* if we are the last buffer then there is nothing else to do */ 1736 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) 1737 return false; 1738 1739 /* place skb in next buffer to be received */ 1740 rx_ring->rx_buffer_info[ntc].skb = skb; 1741 rx_ring->rx_stats.non_eop_descs++; 1742 1743 return true; 1744 } 1745 1746 /** 1747 * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail 1748 * @rx_ring: rx descriptor ring packet is being transacted on 1749 * @skb: pointer to current skb being adjusted 1750 * 1751 * This function is an ixgbe specific version of __pskb_pull_tail. The 1752 * main difference between this version and the original function is that 1753 * this function can make several assumptions about the state of things 1754 * that allow for significant optimizations versus the standard function. 1755 * As a result we can do things like drop a frag and maintain an accurate 1756 * truesize for the skb. 1757 */ 1758 static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, 1759 struct sk_buff *skb) 1760 { 1761 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 1762 unsigned char *va; 1763 unsigned int pull_len; 1764 1765 /* 1766 * it is valid to use page_address instead of kmap since we are 1767 * working with pages allocated out of the lomem pool per 1768 * alloc_page(GFP_ATOMIC) 1769 */ 1770 va = skb_frag_address(frag); 1771 1772 /* 1773 * we need the header to contain the greater of either ETH_HLEN or 1774 * 60 bytes if the skb->len is less than 60 for skb_pad. 1775 */ 1776 pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE); 1777 1778 /* align pull length to size of long to optimize memcpy performance */ 1779 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); 1780 1781 /* update all of the pointers */ 1782 skb_frag_size_sub(frag, pull_len); 1783 frag->page_offset += pull_len; 1784 skb->data_len -= pull_len; 1785 skb->tail += pull_len; 1786 } 1787 1788 /** 1789 * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB 1790 * @rx_ring: rx descriptor ring packet is being transacted on 1791 * @skb: pointer to current skb being updated 1792 * 1793 * This function provides a basic DMA sync up for the first fragment of an 1794 * skb. The reason for doing this is that the first fragment cannot be 1795 * unmapped until we have reached the end of packet descriptor for a buffer 1796 * chain. 1797 */ 1798 static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, 1799 struct sk_buff *skb) 1800 { 1801 /* if the page was released unmap it, else just sync our portion */ 1802 if (unlikely(IXGBE_CB(skb)->page_released)) { 1803 dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, 1804 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); 1805 IXGBE_CB(skb)->page_released = false; 1806 } else { 1807 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 1808 1809 dma_sync_single_range_for_cpu(rx_ring->dev, 1810 IXGBE_CB(skb)->dma, 1811 frag->page_offset, 1812 ixgbe_rx_bufsz(rx_ring), 1813 DMA_FROM_DEVICE); 1814 } 1815 IXGBE_CB(skb)->dma = 0; 1816 } 1817 1818 /** 1819 * ixgbe_cleanup_headers - Correct corrupted or empty headers 1820 * @rx_ring: rx descriptor ring packet is being transacted on 1821 * @rx_desc: pointer to the EOP Rx descriptor 1822 * @skb: pointer to current skb being fixed 1823 * 1824 * Check for corrupted packet headers caused by senders on the local L2 1825 * embedded NIC switch not setting up their Tx Descriptors right. These 1826 * should be very rare. 1827 * 1828 * Also address the case where we are pulling data in on pages only 1829 * and as such no data is present in the skb header. 1830 * 1831 * In addition if skb is not at least 60 bytes we need to pad it so that 1832 * it is large enough to qualify as a valid Ethernet frame. 1833 * 1834 * Returns true if an error was encountered and skb was freed. 1835 **/ 1836 static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, 1837 union ixgbe_adv_rx_desc *rx_desc, 1838 struct sk_buff *skb) 1839 { 1840 struct net_device *netdev = rx_ring->netdev; 1841 1842 /* verify that the packet does not have any known errors */ 1843 if (unlikely(ixgbe_test_staterr(rx_desc, 1844 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) && 1845 !(netdev->features & NETIF_F_RXALL))) { 1846 dev_kfree_skb_any(skb); 1847 return true; 1848 } 1849 1850 /* place header in linear portion of buffer */ 1851 if (skb_is_nonlinear(skb)) 1852 ixgbe_pull_tail(rx_ring, skb); 1853 1854 #ifdef IXGBE_FCOE 1855 /* do not attempt to pad FCoE Frames as this will disrupt DDP */ 1856 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) 1857 return false; 1858 1859 #endif 1860 /* if skb_pad returns an error the skb was freed */ 1861 if (unlikely(skb->len < 60)) { 1862 int pad_len = 60 - skb->len; 1863 1864 if (skb_pad(skb, pad_len)) 1865 return true; 1866 __skb_put(skb, pad_len); 1867 } 1868 1869 return false; 1870 } 1871 1872 /** 1873 * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring 1874 * @rx_ring: rx descriptor ring to store buffers on 1875 * @old_buff: donor buffer to have page reused 1876 * 1877 * Synchronizes page for reuse by the adapter 1878 **/ 1879 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, 1880 struct ixgbe_rx_buffer *old_buff) 1881 { 1882 struct ixgbe_rx_buffer *new_buff; 1883 u16 nta = rx_ring->next_to_alloc; 1884 1885 new_buff = &rx_ring->rx_buffer_info[nta]; 1886 1887 /* update, and store next to alloc */ 1888 nta++; 1889 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 1890 1891 /* transfer page from old buffer to new buffer */ 1892 new_buff->page = old_buff->page; 1893 new_buff->dma = old_buff->dma; 1894 new_buff->page_offset = old_buff->page_offset; 1895 1896 /* sync the buffer for use by the device */ 1897 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, 1898 new_buff->page_offset, 1899 ixgbe_rx_bufsz(rx_ring), 1900 DMA_FROM_DEVICE); 1901 } 1902 1903 /** 1904 * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff 1905 * @rx_ring: rx descriptor ring to transact packets on 1906 * @rx_buffer: buffer containing page to add 1907 * @rx_desc: descriptor containing length of buffer written by hardware 1908 * @skb: sk_buff to place the data into 1909 * 1910 * This function will add the data contained in rx_buffer->page to the skb. 1911 * This is done either through a direct copy if the data in the buffer is 1912 * less than the skb header size, otherwise it will just attach the page as 1913 * a frag to the skb. 1914 * 1915 * The function will then update the page offset if necessary and return 1916 * true if the buffer can be reused by the adapter. 1917 **/ 1918 static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, 1919 struct ixgbe_rx_buffer *rx_buffer, 1920 union ixgbe_adv_rx_desc *rx_desc, 1921 struct sk_buff *skb) 1922 { 1923 struct page *page = rx_buffer->page; 1924 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); 1925 #if (PAGE_SIZE < 8192) 1926 unsigned int truesize = ixgbe_rx_bufsz(rx_ring); 1927 #else 1928 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); 1929 unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - 1930 ixgbe_rx_bufsz(rx_ring); 1931 #endif 1932 1933 if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { 1934 unsigned char *va = page_address(page) + rx_buffer->page_offset; 1935 1936 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 1937 1938 /* we can reuse buffer as-is, just make sure it is local */ 1939 if (likely(page_to_nid(page) == numa_node_id())) 1940 return true; 1941 1942 /* this page cannot be reused so discard it */ 1943 put_page(page); 1944 return false; 1945 } 1946 1947 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 1948 rx_buffer->page_offset, size, truesize); 1949 1950 /* avoid re-using remote pages */ 1951 if (unlikely(page_to_nid(page) != numa_node_id())) 1952 return false; 1953 1954 #if (PAGE_SIZE < 8192) 1955 /* if we are only owner of page we can reuse it */ 1956 if (unlikely(page_count(page) != 1)) 1957 return false; 1958 1959 /* flip page offset to other buffer */ 1960 rx_buffer->page_offset ^= truesize; 1961 1962 /* 1963 * since we are the only owner of the page and we need to 1964 * increment it, just set the value to 2 in order to avoid 1965 * an unecessary locked operation 1966 */ 1967 atomic_set(&page->_count, 2); 1968 #else 1969 /* move offset up to the next cache line */ 1970 rx_buffer->page_offset += truesize; 1971 1972 if (rx_buffer->page_offset > last_offset) 1973 return false; 1974 1975 /* bump ref count on page before it is given to the stack */ 1976 get_page(page); 1977 #endif 1978 1979 return true; 1980 } 1981 1982 static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, 1983 union ixgbe_adv_rx_desc *rx_desc) 1984 { 1985 struct ixgbe_rx_buffer *rx_buffer; 1986 struct sk_buff *skb; 1987 struct page *page; 1988 1989 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 1990 page = rx_buffer->page; 1991 prefetchw(page); 1992 1993 skb = rx_buffer->skb; 1994 1995 if (likely(!skb)) { 1996 void *page_addr = page_address(page) + 1997 rx_buffer->page_offset; 1998 1999 /* prefetch first cache line of first page */ 2000 prefetch(page_addr); 2001 #if L1_CACHE_BYTES < 128 2002 prefetch(page_addr + L1_CACHE_BYTES); 2003 #endif 2004 2005 /* allocate a skb to store the frags */ 2006 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 2007 IXGBE_RX_HDR_SIZE); 2008 if (unlikely(!skb)) { 2009 rx_ring->rx_stats.alloc_rx_buff_failed++; 2010 return NULL; 2011 } 2012 2013 /* 2014 * we will be copying header into skb->data in 2015 * pskb_may_pull so it is in our interest to prefetch 2016 * it now to avoid a possible cache miss 2017 */ 2018 prefetchw(skb->data); 2019 2020 /* 2021 * Delay unmapping of the first packet. It carries the 2022 * header information, HW may still access the header 2023 * after the writeback. Only unmap it when EOP is 2024 * reached 2025 */ 2026 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) 2027 goto dma_sync; 2028 2029 IXGBE_CB(skb)->dma = rx_buffer->dma; 2030 } else { 2031 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) 2032 ixgbe_dma_sync_frag(rx_ring, skb); 2033 2034 dma_sync: 2035 /* we are reusing so sync this buffer for CPU use */ 2036 dma_sync_single_range_for_cpu(rx_ring->dev, 2037 rx_buffer->dma, 2038 rx_buffer->page_offset, 2039 ixgbe_rx_bufsz(rx_ring), 2040 DMA_FROM_DEVICE); 2041 } 2042 2043 /* pull page into skb */ 2044 if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { 2045 /* hand second half of page back to the ring */ 2046 ixgbe_reuse_rx_page(rx_ring, rx_buffer); 2047 } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { 2048 /* the page has been released from the ring */ 2049 IXGBE_CB(skb)->page_released = true; 2050 } else { 2051 /* we are not reusing the buffer so unmap it */ 2052 dma_unmap_page(rx_ring->dev, rx_buffer->dma, 2053 ixgbe_rx_pg_size(rx_ring), 2054 DMA_FROM_DEVICE); 2055 } 2056 2057 /* clear contents of buffer_info */ 2058 rx_buffer->skb = NULL; 2059 rx_buffer->dma = 0; 2060 rx_buffer->page = NULL; 2061 2062 return skb; 2063 } 2064 2065 /** 2066 * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 2067 * @q_vector: structure containing interrupt and ring information 2068 * @rx_ring: rx descriptor ring to transact packets on 2069 * @budget: Total limit on number of packets to process 2070 * 2071 * This function provides a "bounce buffer" approach to Rx interrupt 2072 * processing. The advantage to this is that on systems that have 2073 * expensive overhead for IOMMU access this provides a means of avoiding 2074 * it by maintaining the mapping of the page to the syste. 2075 * 2076 * Returns amount of work completed 2077 **/ 2078 static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 2079 struct ixgbe_ring *rx_ring, 2080 const int budget) 2081 { 2082 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 2083 #ifdef IXGBE_FCOE 2084 struct ixgbe_adapter *adapter = q_vector->adapter; 2085 int ddp_bytes; 2086 unsigned int mss = 0; 2087 #endif /* IXGBE_FCOE */ 2088 u16 cleaned_count = ixgbe_desc_unused(rx_ring); 2089 2090 while (likely(total_rx_packets < budget)) { 2091 union ixgbe_adv_rx_desc *rx_desc; 2092 struct sk_buff *skb; 2093 2094 /* return some buffers to hardware, one at a time is too slow */ 2095 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { 2096 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); 2097 cleaned_count = 0; 2098 } 2099 2100 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); 2101 2102 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) 2103 break; 2104 2105 /* 2106 * This memory barrier is needed to keep us from reading 2107 * any other fields out of the rx_desc until we know the 2108 * RXD_STAT_DD bit is set 2109 */ 2110 rmb(); 2111 2112 /* retrieve a buffer from the ring */ 2113 skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc); 2114 2115 /* exit if we failed to retrieve a buffer */ 2116 if (!skb) 2117 break; 2118 2119 cleaned_count++; 2120 2121 /* place incomplete frames back on ring for completion */ 2122 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) 2123 continue; 2124 2125 /* verify the packet layout is correct */ 2126 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) 2127 continue; 2128 2129 /* probably a little skewed due to removing CRC */ 2130 total_rx_bytes += skb->len; 2131 2132 /* populate checksum, timestamp, VLAN, and protocol */ 2133 ixgbe_process_skb_fields(rx_ring, rx_desc, skb); 2134 2135 #ifdef IXGBE_FCOE 2136 /* if ddp, not passing to ULD unless for FCP_RSP or error */ 2137 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { 2138 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); 2139 /* include DDPed FCoE data */ 2140 if (ddp_bytes > 0) { 2141 if (!mss) { 2142 mss = rx_ring->netdev->mtu - 2143 sizeof(struct fcoe_hdr) - 2144 sizeof(struct fc_frame_header) - 2145 sizeof(struct fcoe_crc_eof); 2146 if (mss > 512) 2147 mss &= ~511; 2148 } 2149 total_rx_bytes += ddp_bytes; 2150 total_rx_packets += DIV_ROUND_UP(ddp_bytes, 2151 mss); 2152 } 2153 if (!ddp_bytes) { 2154 dev_kfree_skb_any(skb); 2155 continue; 2156 } 2157 } 2158 2159 #endif /* IXGBE_FCOE */ 2160 skb_mark_napi_id(skb, &q_vector->napi); 2161 ixgbe_rx_skb(q_vector, skb); 2162 2163 /* update budget accounting */ 2164 total_rx_packets++; 2165 } 2166 2167 u64_stats_update_begin(&rx_ring->syncp); 2168 rx_ring->stats.packets += total_rx_packets; 2169 rx_ring->stats.bytes += total_rx_bytes; 2170 u64_stats_update_end(&rx_ring->syncp); 2171 q_vector->rx.total_packets += total_rx_packets; 2172 q_vector->rx.total_bytes += total_rx_bytes; 2173 2174 if (cleaned_count) 2175 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); 2176 2177 return total_rx_packets; 2178 } 2179 2180 #ifdef CONFIG_NET_RX_BUSY_POLL 2181 /* must be called with local_bh_disable()d */ 2182 static int ixgbe_low_latency_recv(struct napi_struct *napi) 2183 { 2184 struct ixgbe_q_vector *q_vector = 2185 container_of(napi, struct ixgbe_q_vector, napi); 2186 struct ixgbe_adapter *adapter = q_vector->adapter; 2187 struct ixgbe_ring *ring; 2188 int found = 0; 2189 2190 if (test_bit(__IXGBE_DOWN, &adapter->state)) 2191 return LL_FLUSH_FAILED; 2192 2193 if (!ixgbe_qv_lock_poll(q_vector)) 2194 return LL_FLUSH_BUSY; 2195 2196 ixgbe_for_each_ring(ring, q_vector->rx) { 2197 found = ixgbe_clean_rx_irq(q_vector, ring, 4); 2198 #ifdef BP_EXTENDED_STATS 2199 if (found) 2200 ring->stats.cleaned += found; 2201 else 2202 ring->stats.misses++; 2203 #endif 2204 if (found) 2205 break; 2206 } 2207 2208 ixgbe_qv_unlock_poll(q_vector); 2209 2210 return found; 2211 } 2212 #endif /* CONFIG_NET_RX_BUSY_POLL */ 2213 2214 /** 2215 * ixgbe_configure_msix - Configure MSI-X hardware 2216 * @adapter: board private structure 2217 * 2218 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X 2219 * interrupts. 2220 **/ 2221 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) 2222 { 2223 struct ixgbe_q_vector *q_vector; 2224 int v_idx; 2225 u32 mask; 2226 2227 /* Populate MSIX to EITR Select */ 2228 if (adapter->num_vfs > 32) { 2229 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; 2230 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); 2231 } 2232 2233 /* 2234 * Populate the IVAR table and set the ITR values to the 2235 * corresponding register. 2236 */ 2237 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { 2238 struct ixgbe_ring *ring; 2239 q_vector = adapter->q_vector[v_idx]; 2240 2241 ixgbe_for_each_ring(ring, q_vector->rx) 2242 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); 2243 2244 ixgbe_for_each_ring(ring, q_vector->tx) 2245 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); 2246 2247 ixgbe_write_eitr(q_vector); 2248 } 2249 2250 switch (adapter->hw.mac.type) { 2251 case ixgbe_mac_82598EB: 2252 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, 2253 v_idx); 2254 break; 2255 case ixgbe_mac_82599EB: 2256 case ixgbe_mac_X540: 2257 ixgbe_set_ivar(adapter, -1, 1, v_idx); 2258 break; 2259 default: 2260 break; 2261 } 2262 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 2263 2264 /* set up to autoclear timer, and the vectors */ 2265 mask = IXGBE_EIMS_ENABLE_MASK; 2266 mask &= ~(IXGBE_EIMS_OTHER | 2267 IXGBE_EIMS_MAILBOX | 2268 IXGBE_EIMS_LSC); 2269 2270 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); 2271 } 2272 2273 enum latency_range { 2274 lowest_latency = 0, 2275 low_latency = 1, 2276 bulk_latency = 2, 2277 latency_invalid = 255 2278 }; 2279 2280 /** 2281 * ixgbe_update_itr - update the dynamic ITR value based on statistics 2282 * @q_vector: structure containing interrupt and ring information 2283 * @ring_container: structure containing ring performance data 2284 * 2285 * Stores a new ITR value based on packets and byte 2286 * counts during the last interrupt. The advantage of per interrupt 2287 * computation is faster updates and more accurate ITR for the current 2288 * traffic pattern. Constants in this function were computed 2289 * based on theoretical maximum wire speed and thresholds were set based 2290 * on testing data as well as attempting to minimize response time 2291 * while increasing bulk throughput. 2292 * this functionality is controlled by the InterruptThrottleRate module 2293 * parameter (see ixgbe_param.c) 2294 **/ 2295 static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, 2296 struct ixgbe_ring_container *ring_container) 2297 { 2298 int bytes = ring_container->total_bytes; 2299 int packets = ring_container->total_packets; 2300 u32 timepassed_us; 2301 u64 bytes_perint; 2302 u8 itr_setting = ring_container->itr; 2303 2304 if (packets == 0) 2305 return; 2306 2307 /* simple throttlerate management 2308 * 0-10MB/s lowest (100000 ints/s) 2309 * 10-20MB/s low (20000 ints/s) 2310 * 20-1249MB/s bulk (8000 ints/s) 2311 */ 2312 /* what was last interrupt timeslice? */ 2313 timepassed_us = q_vector->itr >> 2; 2314 if (timepassed_us == 0) 2315 return; 2316 2317 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 2318 2319 switch (itr_setting) { 2320 case lowest_latency: 2321 if (bytes_perint > 10) 2322 itr_setting = low_latency; 2323 break; 2324 case low_latency: 2325 if (bytes_perint > 20) 2326 itr_setting = bulk_latency; 2327 else if (bytes_perint <= 10) 2328 itr_setting = lowest_latency; 2329 break; 2330 case bulk_latency: 2331 if (bytes_perint <= 20) 2332 itr_setting = low_latency; 2333 break; 2334 } 2335 2336 /* clear work counters since we have the values we need */ 2337 ring_container->total_bytes = 0; 2338 ring_container->total_packets = 0; 2339 2340 /* write updated itr to ring container */ 2341 ring_container->itr = itr_setting; 2342 } 2343 2344 /** 2345 * ixgbe_write_eitr - write EITR register in hardware specific way 2346 * @q_vector: structure containing interrupt and ring information 2347 * 2348 * This function is made to be called by ethtool and by the driver 2349 * when it needs to update EITR registers at runtime. Hardware 2350 * specific quirks/differences are taken care of here. 2351 */ 2352 void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) 2353 { 2354 struct ixgbe_adapter *adapter = q_vector->adapter; 2355 struct ixgbe_hw *hw = &adapter->hw; 2356 int v_idx = q_vector->v_idx; 2357 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; 2358 2359 switch (adapter->hw.mac.type) { 2360 case ixgbe_mac_82598EB: 2361 /* must write high and low 16 bits to reset counter */ 2362 itr_reg |= (itr_reg << 16); 2363 break; 2364 case ixgbe_mac_82599EB: 2365 case ixgbe_mac_X540: 2366 /* 2367 * set the WDIS bit to not clear the timer bits and cause an 2368 * immediate assertion of the interrupt 2369 */ 2370 itr_reg |= IXGBE_EITR_CNT_WDIS; 2371 break; 2372 default: 2373 break; 2374 } 2375 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); 2376 } 2377 2378 static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) 2379 { 2380 u32 new_itr = q_vector->itr; 2381 u8 current_itr; 2382 2383 ixgbe_update_itr(q_vector, &q_vector->tx); 2384 ixgbe_update_itr(q_vector, &q_vector->rx); 2385 2386 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 2387 2388 switch (current_itr) { 2389 /* counts and packets in update_itr are dependent on these numbers */ 2390 case lowest_latency: 2391 new_itr = IXGBE_100K_ITR; 2392 break; 2393 case low_latency: 2394 new_itr = IXGBE_20K_ITR; 2395 break; 2396 case bulk_latency: 2397 new_itr = IXGBE_8K_ITR; 2398 break; 2399 default: 2400 break; 2401 } 2402 2403 if (new_itr != q_vector->itr) { 2404 /* do an exponential smoothing */ 2405 new_itr = (10 * new_itr * q_vector->itr) / 2406 ((9 * new_itr) + q_vector->itr); 2407 2408 /* save the algorithm value here */ 2409 q_vector->itr = new_itr; 2410 2411 ixgbe_write_eitr(q_vector); 2412 } 2413 } 2414 2415 /** 2416 * ixgbe_check_overtemp_subtask - check for over temperature 2417 * @adapter: pointer to adapter 2418 **/ 2419 static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) 2420 { 2421 struct ixgbe_hw *hw = &adapter->hw; 2422 u32 eicr = adapter->interrupt_event; 2423 2424 if (test_bit(__IXGBE_DOWN, &adapter->state)) 2425 return; 2426 2427 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && 2428 !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) 2429 return; 2430 2431 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; 2432 2433 switch (hw->device_id) { 2434 case IXGBE_DEV_ID_82599_T3_LOM: 2435 /* 2436 * Since the warning interrupt is for both ports 2437 * we don't have to check if: 2438 * - This interrupt wasn't for our port. 2439 * - We may have missed the interrupt so always have to 2440 * check if we got a LSC 2441 */ 2442 if (!(eicr & IXGBE_EICR_GPI_SDP0) && 2443 !(eicr & IXGBE_EICR_LSC)) 2444 return; 2445 2446 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { 2447 u32 speed; 2448 bool link_up = false; 2449 2450 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2451 2452 if (link_up) 2453 return; 2454 } 2455 2456 /* Check if this is not due to overtemp */ 2457 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP) 2458 return; 2459 2460 break; 2461 default: 2462 if (!(eicr & IXGBE_EICR_GPI_SDP0)) 2463 return; 2464 break; 2465 } 2466 e_crit(drv, 2467 "Network adapter has been stopped because it has over heated. " 2468 "Restart the computer. If the problem persists, " 2469 "power off the system and replace the adapter\n"); 2470 2471 adapter->interrupt_event = 0; 2472 } 2473 2474 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) 2475 { 2476 struct ixgbe_hw *hw = &adapter->hw; 2477 2478 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && 2479 (eicr & IXGBE_EICR_GPI_SDP1)) { 2480 e_crit(probe, "Fan has stopped, replace the adapter\n"); 2481 /* write to clear the interrupt */ 2482 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 2483 } 2484 } 2485 2486 static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) 2487 { 2488 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) 2489 return; 2490 2491 switch (adapter->hw.mac.type) { 2492 case ixgbe_mac_82599EB: 2493 /* 2494 * Need to check link state so complete overtemp check 2495 * on service task 2496 */ 2497 if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) && 2498 (!test_bit(__IXGBE_DOWN, &adapter->state))) { 2499 adapter->interrupt_event = eicr; 2500 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; 2501 ixgbe_service_event_schedule(adapter); 2502 return; 2503 } 2504 return; 2505 case ixgbe_mac_X540: 2506 if (!(eicr & IXGBE_EICR_TS)) 2507 return; 2508 break; 2509 default: 2510 return; 2511 } 2512 2513 e_crit(drv, 2514 "Network adapter has been stopped because it has over heated. " 2515 "Restart the computer. If the problem persists, " 2516 "power off the system and replace the adapter\n"); 2517 } 2518 2519 static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) 2520 { 2521 struct ixgbe_hw *hw = &adapter->hw; 2522 2523 if (eicr & IXGBE_EICR_GPI_SDP2) { 2524 /* Clear the interrupt */ 2525 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); 2526 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 2527 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; 2528 ixgbe_service_event_schedule(adapter); 2529 } 2530 } 2531 2532 if (eicr & IXGBE_EICR_GPI_SDP1) { 2533 /* Clear the interrupt */ 2534 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 2535 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 2536 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; 2537 ixgbe_service_event_schedule(adapter); 2538 } 2539 } 2540 } 2541 2542 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) 2543 { 2544 struct ixgbe_hw *hw = &adapter->hw; 2545 2546 adapter->lsc_int++; 2547 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2548 adapter->link_check_timeout = jiffies; 2549 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 2550 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 2551 IXGBE_WRITE_FLUSH(hw); 2552 ixgbe_service_event_schedule(adapter); 2553 } 2554 } 2555 2556 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, 2557 u64 qmask) 2558 { 2559 u32 mask; 2560 struct ixgbe_hw *hw = &adapter->hw; 2561 2562 switch (hw->mac.type) { 2563 case ixgbe_mac_82598EB: 2564 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 2565 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 2566 break; 2567 case ixgbe_mac_82599EB: 2568 case ixgbe_mac_X540: 2569 mask = (qmask & 0xFFFFFFFF); 2570 if (mask) 2571 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 2572 mask = (qmask >> 32); 2573 if (mask) 2574 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 2575 break; 2576 default: 2577 break; 2578 } 2579 /* skip the flush */ 2580 } 2581 2582 static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, 2583 u64 qmask) 2584 { 2585 u32 mask; 2586 struct ixgbe_hw *hw = &adapter->hw; 2587 2588 switch (hw->mac.type) { 2589 case ixgbe_mac_82598EB: 2590 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 2591 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 2592 break; 2593 case ixgbe_mac_82599EB: 2594 case ixgbe_mac_X540: 2595 mask = (qmask & 0xFFFFFFFF); 2596 if (mask) 2597 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 2598 mask = (qmask >> 32); 2599 if (mask) 2600 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 2601 break; 2602 default: 2603 break; 2604 } 2605 /* skip the flush */ 2606 } 2607 2608 /** 2609 * ixgbe_irq_enable - Enable default interrupt generation settings 2610 * @adapter: board private structure 2611 **/ 2612 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, 2613 bool flush) 2614 { 2615 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 2616 2617 /* don't reenable LSC while waiting for link */ 2618 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) 2619 mask &= ~IXGBE_EIMS_LSC; 2620 2621 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) 2622 switch (adapter->hw.mac.type) { 2623 case ixgbe_mac_82599EB: 2624 mask |= IXGBE_EIMS_GPI_SDP0; 2625 break; 2626 case ixgbe_mac_X540: 2627 mask |= IXGBE_EIMS_TS; 2628 break; 2629 default: 2630 break; 2631 } 2632 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 2633 mask |= IXGBE_EIMS_GPI_SDP1; 2634 switch (adapter->hw.mac.type) { 2635 case ixgbe_mac_82599EB: 2636 mask |= IXGBE_EIMS_GPI_SDP1; 2637 mask |= IXGBE_EIMS_GPI_SDP2; 2638 case ixgbe_mac_X540: 2639 mask |= IXGBE_EIMS_ECC; 2640 mask |= IXGBE_EIMS_MAILBOX; 2641 break; 2642 default: 2643 break; 2644 } 2645 2646 if (adapter->hw.mac.type == ixgbe_mac_X540) 2647 mask |= IXGBE_EIMS_TIMESYNC; 2648 2649 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && 2650 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) 2651 mask |= IXGBE_EIMS_FLOW_DIR; 2652 2653 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 2654 if (queues) 2655 ixgbe_irq_enable_queues(adapter, ~0); 2656 if (flush) 2657 IXGBE_WRITE_FLUSH(&adapter->hw); 2658 } 2659 2660 static irqreturn_t ixgbe_msix_other(int irq, void *data) 2661 { 2662 struct ixgbe_adapter *adapter = data; 2663 struct ixgbe_hw *hw = &adapter->hw; 2664 u32 eicr; 2665 2666 /* 2667 * Workaround for Silicon errata. Use clear-by-write instead 2668 * of clear-by-read. Reading with EICS will return the 2669 * interrupt causes without clearing, which later be done 2670 * with the write to EICR. 2671 */ 2672 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 2673 2674 /* The lower 16bits of the EICR register are for the queue interrupts 2675 * which should be masked here in order to not accidently clear them if 2676 * the bits are high when ixgbe_msix_other is called. There is a race 2677 * condition otherwise which results in possible performance loss 2678 * especially if the ixgbe_msix_other interrupt is triggering 2679 * consistently (as it would when PPS is turned on for the X540 device) 2680 */ 2681 eicr &= 0xFFFF0000; 2682 2683 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 2684 2685 if (eicr & IXGBE_EICR_LSC) 2686 ixgbe_check_lsc(adapter); 2687 2688 if (eicr & IXGBE_EICR_MAILBOX) 2689 ixgbe_msg_task(adapter); 2690 2691 switch (hw->mac.type) { 2692 case ixgbe_mac_82599EB: 2693 case ixgbe_mac_X540: 2694 if (eicr & IXGBE_EICR_ECC) { 2695 e_info(link, "Received ECC Err, initiating reset\n"); 2696 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; 2697 ixgbe_service_event_schedule(adapter); 2698 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 2699 } 2700 /* Handle Flow Director Full threshold interrupt */ 2701 if (eicr & IXGBE_EICR_FLOW_DIR) { 2702 int reinit_count = 0; 2703 int i; 2704 for (i = 0; i < adapter->num_tx_queues; i++) { 2705 struct ixgbe_ring *ring = adapter->tx_ring[i]; 2706 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, 2707 &ring->state)) 2708 reinit_count++; 2709 } 2710 if (reinit_count) { 2711 /* no more flow director interrupts until after init */ 2712 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); 2713 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; 2714 ixgbe_service_event_schedule(adapter); 2715 } 2716 } 2717 ixgbe_check_sfp_event(adapter, eicr); 2718 ixgbe_check_overtemp_event(adapter, eicr); 2719 break; 2720 default: 2721 break; 2722 } 2723 2724 ixgbe_check_fan_failure(adapter, eicr); 2725 2726 if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) 2727 ixgbe_ptp_check_pps_event(adapter, eicr); 2728 2729 /* re-enable the original interrupt state, no lsc, no queues */ 2730 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2731 ixgbe_irq_enable(adapter, false, false); 2732 2733 return IRQ_HANDLED; 2734 } 2735 2736 static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) 2737 { 2738 struct ixgbe_q_vector *q_vector = data; 2739 2740 /* EIAM disabled interrupts (on this vector) for us */ 2741 2742 if (q_vector->rx.ring || q_vector->tx.ring) 2743 napi_schedule(&q_vector->napi); 2744 2745 return IRQ_HANDLED; 2746 } 2747 2748 /** 2749 * ixgbe_poll - NAPI Rx polling callback 2750 * @napi: structure for representing this polling device 2751 * @budget: how many packets driver is allowed to clean 2752 * 2753 * This function is used for legacy and MSI, NAPI mode 2754 **/ 2755 int ixgbe_poll(struct napi_struct *napi, int budget) 2756 { 2757 struct ixgbe_q_vector *q_vector = 2758 container_of(napi, struct ixgbe_q_vector, napi); 2759 struct ixgbe_adapter *adapter = q_vector->adapter; 2760 struct ixgbe_ring *ring; 2761 int per_ring_budget; 2762 bool clean_complete = true; 2763 2764 #ifdef CONFIG_IXGBE_DCA 2765 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 2766 ixgbe_update_dca(q_vector); 2767 #endif 2768 2769 ixgbe_for_each_ring(ring, q_vector->tx) 2770 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); 2771 2772 if (!ixgbe_qv_lock_napi(q_vector)) 2773 return budget; 2774 2775 /* attempt to distribute budget to each queue fairly, but don't allow 2776 * the budget to go below 1 because we'll exit polling */ 2777 if (q_vector->rx.count > 1) 2778 per_ring_budget = max(budget/q_vector->rx.count, 1); 2779 else 2780 per_ring_budget = budget; 2781 2782 ixgbe_for_each_ring(ring, q_vector->rx) 2783 clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring, 2784 per_ring_budget) < per_ring_budget); 2785 2786 ixgbe_qv_unlock_napi(q_vector); 2787 /* If all work not completed, return budget and keep polling */ 2788 if (!clean_complete) 2789 return budget; 2790 2791 /* all work done, exit the polling mode */ 2792 napi_complete(napi); 2793 if (adapter->rx_itr_setting & 1) 2794 ixgbe_set_itr(q_vector); 2795 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2796 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); 2797 2798 return 0; 2799 } 2800 2801 /** 2802 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts 2803 * @adapter: board private structure 2804 * 2805 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests 2806 * interrupts from the kernel. 2807 **/ 2808 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) 2809 { 2810 struct net_device *netdev = adapter->netdev; 2811 int vector, err; 2812 int ri = 0, ti = 0; 2813 2814 for (vector = 0; vector < adapter->num_q_vectors; vector++) { 2815 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; 2816 struct msix_entry *entry = &adapter->msix_entries[vector]; 2817 2818 if (q_vector->tx.ring && q_vector->rx.ring) { 2819 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2820 "%s-%s-%d", netdev->name, "TxRx", ri++); 2821 ti++; 2822 } else if (q_vector->rx.ring) { 2823 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2824 "%s-%s-%d", netdev->name, "rx", ri++); 2825 } else if (q_vector->tx.ring) { 2826 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2827 "%s-%s-%d", netdev->name, "tx", ti++); 2828 } else { 2829 /* skip this unused q_vector */ 2830 continue; 2831 } 2832 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0, 2833 q_vector->name, q_vector); 2834 if (err) { 2835 e_err(probe, "request_irq failed for MSIX interrupt " 2836 "Error: %d\n", err); 2837 goto free_queue_irqs; 2838 } 2839 /* If Flow Director is enabled, set interrupt affinity */ 2840 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 2841 /* assign the mask for this irq */ 2842 irq_set_affinity_hint(entry->vector, 2843 &q_vector->affinity_mask); 2844 } 2845 } 2846 2847 err = request_irq(adapter->msix_entries[vector].vector, 2848 ixgbe_msix_other, 0, netdev->name, adapter); 2849 if (err) { 2850 e_err(probe, "request_irq for msix_other failed: %d\n", err); 2851 goto free_queue_irqs; 2852 } 2853 2854 return 0; 2855 2856 free_queue_irqs: 2857 while (vector) { 2858 vector--; 2859 irq_set_affinity_hint(adapter->msix_entries[vector].vector, 2860 NULL); 2861 free_irq(adapter->msix_entries[vector].vector, 2862 adapter->q_vector[vector]); 2863 } 2864 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 2865 pci_disable_msix(adapter->pdev); 2866 kfree(adapter->msix_entries); 2867 adapter->msix_entries = NULL; 2868 return err; 2869 } 2870 2871 /** 2872 * ixgbe_intr - legacy mode Interrupt Handler 2873 * @irq: interrupt number 2874 * @data: pointer to a network interface device structure 2875 **/ 2876 static irqreturn_t ixgbe_intr(int irq, void *data) 2877 { 2878 struct ixgbe_adapter *adapter = data; 2879 struct ixgbe_hw *hw = &adapter->hw; 2880 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; 2881 u32 eicr; 2882 2883 /* 2884 * Workaround for silicon errata #26 on 82598. Mask the interrupt 2885 * before the read of EICR. 2886 */ 2887 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 2888 2889 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read 2890 * therefore no explicit interrupt disable is necessary */ 2891 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 2892 if (!eicr) { 2893 /* 2894 * shared interrupt alert! 2895 * make sure interrupts are enabled because the read will 2896 * have disabled interrupts due to EIAM 2897 * finish the workaround of silicon errata on 82598. Unmask 2898 * the interrupt that we masked before the EICR read. 2899 */ 2900 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2901 ixgbe_irq_enable(adapter, true, true); 2902 return IRQ_NONE; /* Not our interrupt */ 2903 } 2904 2905 if (eicr & IXGBE_EICR_LSC) 2906 ixgbe_check_lsc(adapter); 2907 2908 switch (hw->mac.type) { 2909 case ixgbe_mac_82599EB: 2910 ixgbe_check_sfp_event(adapter, eicr); 2911 /* Fall through */ 2912 case ixgbe_mac_X540: 2913 if (eicr & IXGBE_EICR_ECC) { 2914 e_info(link, "Received ECC Err, initiating reset\n"); 2915 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; 2916 ixgbe_service_event_schedule(adapter); 2917 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 2918 } 2919 ixgbe_check_overtemp_event(adapter, eicr); 2920 break; 2921 default: 2922 break; 2923 } 2924 2925 ixgbe_check_fan_failure(adapter, eicr); 2926 if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) 2927 ixgbe_ptp_check_pps_event(adapter, eicr); 2928 2929 /* would disable interrupts here but EIAM disabled it */ 2930 napi_schedule(&q_vector->napi); 2931 2932 /* 2933 * re-enable link(maybe) and non-queue interrupts, no flush. 2934 * ixgbe_poll will re-enable the queue interrupts 2935 */ 2936 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2937 ixgbe_irq_enable(adapter, false, false); 2938 2939 return IRQ_HANDLED; 2940 } 2941 2942 /** 2943 * ixgbe_request_irq - initialize interrupts 2944 * @adapter: board private structure 2945 * 2946 * Attempts to configure interrupts using the best available 2947 * capabilities of the hardware and kernel. 2948 **/ 2949 static int ixgbe_request_irq(struct ixgbe_adapter *adapter) 2950 { 2951 struct net_device *netdev = adapter->netdev; 2952 int err; 2953 2954 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 2955 err = ixgbe_request_msix_irqs(adapter); 2956 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) 2957 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, 2958 netdev->name, adapter); 2959 else 2960 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, 2961 netdev->name, adapter); 2962 2963 if (err) 2964 e_err(probe, "request_irq failed, Error %d\n", err); 2965 2966 return err; 2967 } 2968 2969 static void ixgbe_free_irq(struct ixgbe_adapter *adapter) 2970 { 2971 int vector; 2972 2973 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { 2974 free_irq(adapter->pdev->irq, adapter); 2975 return; 2976 } 2977 2978 for (vector = 0; vector < adapter->num_q_vectors; vector++) { 2979 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; 2980 struct msix_entry *entry = &adapter->msix_entries[vector]; 2981 2982 /* free only the irqs that were actually requested */ 2983 if (!q_vector->rx.ring && !q_vector->tx.ring) 2984 continue; 2985 2986 /* clear the affinity_mask in the IRQ descriptor */ 2987 irq_set_affinity_hint(entry->vector, NULL); 2988 2989 free_irq(entry->vector, q_vector); 2990 } 2991 2992 free_irq(adapter->msix_entries[vector++].vector, adapter); 2993 } 2994 2995 /** 2996 * ixgbe_irq_disable - Mask off interrupt generation on the NIC 2997 * @adapter: board private structure 2998 **/ 2999 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) 3000 { 3001 switch (adapter->hw.mac.type) { 3002 case ixgbe_mac_82598EB: 3003 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 3004 break; 3005 case ixgbe_mac_82599EB: 3006 case ixgbe_mac_X540: 3007 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 3008 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 3009 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 3010 break; 3011 default: 3012 break; 3013 } 3014 IXGBE_WRITE_FLUSH(&adapter->hw); 3015 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3016 int vector; 3017 3018 for (vector = 0; vector < adapter->num_q_vectors; vector++) 3019 synchronize_irq(adapter->msix_entries[vector].vector); 3020 3021 synchronize_irq(adapter->msix_entries[vector++].vector); 3022 } else { 3023 synchronize_irq(adapter->pdev->irq); 3024 } 3025 } 3026 3027 /** 3028 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts 3029 * 3030 **/ 3031 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) 3032 { 3033 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; 3034 3035 ixgbe_write_eitr(q_vector); 3036 3037 ixgbe_set_ivar(adapter, 0, 0, 0); 3038 ixgbe_set_ivar(adapter, 1, 0, 0); 3039 3040 e_info(hw, "Legacy interrupt IVAR setup done\n"); 3041 } 3042 3043 /** 3044 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset 3045 * @adapter: board private structure 3046 * @ring: structure containing ring specific data 3047 * 3048 * Configure the Tx descriptor ring after a reset. 3049 **/ 3050 void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, 3051 struct ixgbe_ring *ring) 3052 { 3053 struct ixgbe_hw *hw = &adapter->hw; 3054 u64 tdba = ring->dma; 3055 int wait_loop = 10; 3056 u32 txdctl = IXGBE_TXDCTL_ENABLE; 3057 u8 reg_idx = ring->reg_idx; 3058 3059 /* disable queue to avoid issues while updating state */ 3060 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0); 3061 IXGBE_WRITE_FLUSH(hw); 3062 3063 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), 3064 (tdba & DMA_BIT_MASK(32))); 3065 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); 3066 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), 3067 ring->count * sizeof(union ixgbe_adv_tx_desc)); 3068 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); 3069 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); 3070 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx); 3071 3072 /* 3073 * set WTHRESH to encourage burst writeback, it should not be set 3074 * higher than 1 when: 3075 * - ITR is 0 as it could cause false TX hangs 3076 * - ITR is set to > 100k int/sec and BQL is enabled 3077 * 3078 * In order to avoid issues WTHRESH + PTHRESH should always be equal 3079 * to or less than the number of on chip descriptors, which is 3080 * currently 40. 3081 */ 3082 #if IS_ENABLED(CONFIG_BQL) 3083 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) 3084 #else 3085 if (!ring->q_vector || (ring->q_vector->itr < 8)) 3086 #endif 3087 txdctl |= (1 << 16); /* WTHRESH = 1 */ 3088 else 3089 txdctl |= (8 << 16); /* WTHRESH = 8 */ 3090 3091 /* 3092 * Setting PTHRESH to 32 both improves performance 3093 * and avoids a TX hang with DFP enabled 3094 */ 3095 txdctl |= (1 << 8) | /* HTHRESH = 1 */ 3096 32; /* PTHRESH = 32 */ 3097 3098 /* reinitialize flowdirector state */ 3099 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 3100 ring->atr_sample_rate = adapter->atr_sample_rate; 3101 ring->atr_count = 0; 3102 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); 3103 } else { 3104 ring->atr_sample_rate = 0; 3105 } 3106 3107 /* initialize XPS */ 3108 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) { 3109 struct ixgbe_q_vector *q_vector = ring->q_vector; 3110 3111 if (q_vector) 3112 netif_set_xps_queue(ring->netdev, 3113 &q_vector->affinity_mask, 3114 ring->queue_index); 3115 } 3116 3117 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); 3118 3119 /* enable queue */ 3120 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); 3121 3122 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ 3123 if (hw->mac.type == ixgbe_mac_82598EB && 3124 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) 3125 return; 3126 3127 /* poll to verify queue is enabled */ 3128 do { 3129 usleep_range(1000, 2000); 3130 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); 3131 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); 3132 if (!wait_loop) 3133 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); 3134 } 3135 3136 static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) 3137 { 3138 struct ixgbe_hw *hw = &adapter->hw; 3139 u32 rttdcs, mtqc; 3140 u8 tcs = netdev_get_num_tc(adapter->netdev); 3141 3142 if (hw->mac.type == ixgbe_mac_82598EB) 3143 return; 3144 3145 /* disable the arbiter while setting MTQC */ 3146 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 3147 rttdcs |= IXGBE_RTTDCS_ARBDIS; 3148 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 3149 3150 /* set transmit pool layout */ 3151 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 3152 mtqc = IXGBE_MTQC_VT_ENA; 3153 if (tcs > 4) 3154 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; 3155 else if (tcs > 1) 3156 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; 3157 else if (adapter->ring_feature[RING_F_RSS].indices == 4) 3158 mtqc |= IXGBE_MTQC_32VF; 3159 else 3160 mtqc |= IXGBE_MTQC_64VF; 3161 } else { 3162 if (tcs > 4) 3163 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; 3164 else if (tcs > 1) 3165 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; 3166 else 3167 mtqc = IXGBE_MTQC_64Q_1PB; 3168 } 3169 3170 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); 3171 3172 /* Enable Security TX Buffer IFG for multiple pb */ 3173 if (tcs) { 3174 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); 3175 sectx |= IXGBE_SECTX_DCB; 3176 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx); 3177 } 3178 3179 /* re-enable the arbiter */ 3180 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 3181 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 3182 } 3183 3184 /** 3185 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset 3186 * @adapter: board private structure 3187 * 3188 * Configure the Tx unit of the MAC after a reset. 3189 **/ 3190 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) 3191 { 3192 struct ixgbe_hw *hw = &adapter->hw; 3193 u32 dmatxctl; 3194 u32 i; 3195 3196 ixgbe_setup_mtqc(adapter); 3197 3198 if (hw->mac.type != ixgbe_mac_82598EB) { 3199 /* DMATXCTL.EN must be before Tx queues are enabled */ 3200 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 3201 dmatxctl |= IXGBE_DMATXCTL_TE; 3202 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 3203 } 3204 3205 /* Setup the HW Tx Head and Tail descriptor pointers */ 3206 for (i = 0; i < adapter->num_tx_queues; i++) 3207 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); 3208 } 3209 3210 static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter, 3211 struct ixgbe_ring *ring) 3212 { 3213 struct ixgbe_hw *hw = &adapter->hw; 3214 u8 reg_idx = ring->reg_idx; 3215 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); 3216 3217 srrctl |= IXGBE_SRRCTL_DROP_EN; 3218 3219 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); 3220 } 3221 3222 static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter, 3223 struct ixgbe_ring *ring) 3224 { 3225 struct ixgbe_hw *hw = &adapter->hw; 3226 u8 reg_idx = ring->reg_idx; 3227 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); 3228 3229 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 3230 3231 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); 3232 } 3233 3234 #ifdef CONFIG_IXGBE_DCB 3235 void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) 3236 #else 3237 static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) 3238 #endif 3239 { 3240 int i; 3241 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; 3242 3243 if (adapter->ixgbe_ieee_pfc) 3244 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); 3245 3246 /* 3247 * We should set the drop enable bit if: 3248 * SR-IOV is enabled 3249 * or 3250 * Number of Rx queues > 1 and flow control is disabled 3251 * 3252 * This allows us to avoid head of line blocking for security 3253 * and performance reasons. 3254 */ 3255 if (adapter->num_vfs || (adapter->num_rx_queues > 1 && 3256 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) { 3257 for (i = 0; i < adapter->num_rx_queues; i++) 3258 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); 3259 } else { 3260 for (i = 0; i < adapter->num_rx_queues; i++) 3261 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); 3262 } 3263 } 3264 3265 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 3266 3267 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, 3268 struct ixgbe_ring *rx_ring) 3269 { 3270 struct ixgbe_hw *hw = &adapter->hw; 3271 u32 srrctl; 3272 u8 reg_idx = rx_ring->reg_idx; 3273 3274 if (hw->mac.type == ixgbe_mac_82598EB) { 3275 u16 mask = adapter->ring_feature[RING_F_RSS].mask; 3276 3277 /* 3278 * if VMDq is not active we must program one srrctl register 3279 * per RSS queue since we have enabled RDRXCTL.MVMEN 3280 */ 3281 reg_idx &= mask; 3282 } 3283 3284 /* configure header buffer length, needed for RSC */ 3285 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; 3286 3287 /* configure the packet buffer length */ 3288 srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 3289 3290 /* configure descriptor type */ 3291 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 3292 3293 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); 3294 } 3295 3296 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) 3297 { 3298 struct ixgbe_hw *hw = &adapter->hw; 3299 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, 3300 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, 3301 0x6A3E67EA, 0x14364D17, 0x3BED200D}; 3302 u32 mrqc = 0, reta = 0; 3303 u32 rxcsum; 3304 int i, j; 3305 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; 3306 3307 /* 3308 * Program table for at least 2 queues w/ SR-IOV so that VFs can 3309 * make full use of any rings they may have. We will use the 3310 * PSRTYPE register to control how many rings we use within the PF. 3311 */ 3312 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) 3313 rss_i = 2; 3314 3315 /* Fill out hash function seeds */ 3316 for (i = 0; i < 10; i++) 3317 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); 3318 3319 /* Fill out redirection table */ 3320 for (i = 0, j = 0; i < 128; i++, j++) { 3321 if (j == rss_i) 3322 j = 0; 3323 /* reta = 4-byte sliding window of 3324 * 0x00..(indices-1)(indices-1)00..etc. */ 3325 reta = (reta << 8) | (j * 0x11); 3326 if ((i & 3) == 3) 3327 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 3328 } 3329 3330 /* Disable indicating checksum in descriptor, enables RSS hash */ 3331 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 3332 rxcsum |= IXGBE_RXCSUM_PCSD; 3333 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 3334 3335 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3336 if (adapter->ring_feature[RING_F_RSS].mask) 3337 mrqc = IXGBE_MRQC_RSSEN; 3338 } else { 3339 u8 tcs = netdev_get_num_tc(adapter->netdev); 3340 3341 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 3342 if (tcs > 4) 3343 mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */ 3344 else if (tcs > 1) 3345 mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */ 3346 else if (adapter->ring_feature[RING_F_RSS].indices == 4) 3347 mrqc = IXGBE_MRQC_VMDQRSS32EN; 3348 else 3349 mrqc = IXGBE_MRQC_VMDQRSS64EN; 3350 } else { 3351 if (tcs > 4) 3352 mrqc = IXGBE_MRQC_RTRSS8TCEN; 3353 else if (tcs > 1) 3354 mrqc = IXGBE_MRQC_RTRSS4TCEN; 3355 else 3356 mrqc = IXGBE_MRQC_RSSEN; 3357 } 3358 } 3359 3360 /* Perform hash on these packet types */ 3361 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | 3362 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 3363 IXGBE_MRQC_RSS_FIELD_IPV6 | 3364 IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 3365 3366 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 3367 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 3368 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 3369 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 3370 3371 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 3372 } 3373 3374 /** 3375 * ixgbe_configure_rscctl - enable RSC for the indicated ring 3376 * @adapter: address of board private structure 3377 * @index: index of ring to set 3378 **/ 3379 static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, 3380 struct ixgbe_ring *ring) 3381 { 3382 struct ixgbe_hw *hw = &adapter->hw; 3383 u32 rscctrl; 3384 u8 reg_idx = ring->reg_idx; 3385 3386 if (!ring_is_rsc_enabled(ring)) 3387 return; 3388 3389 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); 3390 rscctrl |= IXGBE_RSCCTL_RSCEN; 3391 /* 3392 * we must limit the number of descriptors so that the 3393 * total size of max desc * buf_len is not greater 3394 * than 65536 3395 */ 3396 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 3397 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); 3398 } 3399 3400 #define IXGBE_MAX_RX_DESC_POLL 10 3401 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, 3402 struct ixgbe_ring *ring) 3403 { 3404 struct ixgbe_hw *hw = &adapter->hw; 3405 int wait_loop = IXGBE_MAX_RX_DESC_POLL; 3406 u32 rxdctl; 3407 u8 reg_idx = ring->reg_idx; 3408 3409 if (ixgbe_removed(hw->hw_addr)) 3410 return; 3411 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ 3412 if (hw->mac.type == ixgbe_mac_82598EB && 3413 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) 3414 return; 3415 3416 do { 3417 usleep_range(1000, 2000); 3418 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 3419 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); 3420 3421 if (!wait_loop) { 3422 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " 3423 "the polling period\n", reg_idx); 3424 } 3425 } 3426 3427 void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, 3428 struct ixgbe_ring *ring) 3429 { 3430 struct ixgbe_hw *hw = &adapter->hw; 3431 int wait_loop = IXGBE_MAX_RX_DESC_POLL; 3432 u32 rxdctl; 3433 u8 reg_idx = ring->reg_idx; 3434 3435 if (ixgbe_removed(hw->hw_addr)) 3436 return; 3437 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 3438 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 3439 3440 /* write value back with RXDCTL.ENABLE bit cleared */ 3441 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); 3442 3443 if (hw->mac.type == ixgbe_mac_82598EB && 3444 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) 3445 return; 3446 3447 /* the hardware may take up to 100us to really disable the rx queue */ 3448 do { 3449 udelay(10); 3450 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 3451 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); 3452 3453 if (!wait_loop) { 3454 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " 3455 "the polling period\n", reg_idx); 3456 } 3457 } 3458 3459 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, 3460 struct ixgbe_ring *ring) 3461 { 3462 struct ixgbe_hw *hw = &adapter->hw; 3463 u64 rdba = ring->dma; 3464 u32 rxdctl; 3465 u8 reg_idx = ring->reg_idx; 3466 3467 /* disable queue to avoid issues while updating state */ 3468 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 3469 ixgbe_disable_rx_queue(adapter, ring); 3470 3471 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); 3472 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); 3473 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), 3474 ring->count * sizeof(union ixgbe_adv_rx_desc)); 3475 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); 3476 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); 3477 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); 3478 3479 ixgbe_configure_srrctl(adapter, ring); 3480 ixgbe_configure_rscctl(adapter, ring); 3481 3482 if (hw->mac.type == ixgbe_mac_82598EB) { 3483 /* 3484 * enable cache line friendly hardware writes: 3485 * PTHRESH=32 descriptors (half the internal cache), 3486 * this also removes ugly rx_no_buffer_count increment 3487 * HTHRESH=4 descriptors (to minimize latency on fetch) 3488 * WTHRESH=8 burst writeback up to two cache lines 3489 */ 3490 rxdctl &= ~0x3FFFFF; 3491 rxdctl |= 0x080420; 3492 } 3493 3494 /* enable receive descriptor ring */ 3495 rxdctl |= IXGBE_RXDCTL_ENABLE; 3496 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); 3497 3498 ixgbe_rx_desc_queue_enable(adapter, ring); 3499 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); 3500 } 3501 3502 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) 3503 { 3504 struct ixgbe_hw *hw = &adapter->hw; 3505 int rss_i = adapter->ring_feature[RING_F_RSS].indices; 3506 u16 pool; 3507 3508 /* PSRTYPE must be initialized in non 82598 adapters */ 3509 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | 3510 IXGBE_PSRTYPE_UDPHDR | 3511 IXGBE_PSRTYPE_IPV4HDR | 3512 IXGBE_PSRTYPE_L2HDR | 3513 IXGBE_PSRTYPE_IPV6HDR; 3514 3515 if (hw->mac.type == ixgbe_mac_82598EB) 3516 return; 3517 3518 if (rss_i > 3) 3519 psrtype |= 2 << 29; 3520 else if (rss_i > 1) 3521 psrtype |= 1 << 29; 3522 3523 for_each_set_bit(pool, &adapter->fwd_bitmask, 32) 3524 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); 3525 } 3526 3527 static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) 3528 { 3529 struct ixgbe_hw *hw = &adapter->hw; 3530 u32 reg_offset, vf_shift; 3531 u32 gcr_ext, vmdctl; 3532 int i; 3533 3534 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 3535 return; 3536 3537 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 3538 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; 3539 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; 3540 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT; 3541 vmdctl |= IXGBE_VT_CTL_REPLEN; 3542 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); 3543 3544 vf_shift = VMDQ_P(0) % 32; 3545 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; 3546 3547 /* Enable only the PF's pool for Tx/Rx */ 3548 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift); 3549 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); 3550 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); 3551 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); 3552 if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB) 3553 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 3554 3555 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ 3556 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); 3557 3558 /* 3559 * Set up VF register offsets for selected VT Mode, 3560 * i.e. 32 or 64 VFs for SR-IOV 3561 */ 3562 switch (adapter->ring_feature[RING_F_VMDQ].mask) { 3563 case IXGBE_82599_VMDQ_8Q_MASK: 3564 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16; 3565 break; 3566 case IXGBE_82599_VMDQ_4Q_MASK: 3567 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32; 3568 break; 3569 default: 3570 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64; 3571 break; 3572 } 3573 3574 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 3575 3576 3577 /* Enable MAC Anti-Spoofing */ 3578 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), 3579 adapter->num_vfs); 3580 /* For VFs that have spoof checking turned off */ 3581 for (i = 0; i < adapter->num_vfs; i++) { 3582 if (!adapter->vfinfo[i].spoofchk_enabled) 3583 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false); 3584 } 3585 } 3586 3587 static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) 3588 { 3589 struct ixgbe_hw *hw = &adapter->hw; 3590 struct net_device *netdev = adapter->netdev; 3591 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 3592 struct ixgbe_ring *rx_ring; 3593 int i; 3594 u32 mhadd, hlreg0; 3595 3596 #ifdef IXGBE_FCOE 3597 /* adjust max frame to be able to do baby jumbo for FCoE */ 3598 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && 3599 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) 3600 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; 3601 3602 #endif /* IXGBE_FCOE */ 3603 3604 /* adjust max frame to be at least the size of a standard frame */ 3605 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) 3606 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); 3607 3608 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 3609 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { 3610 mhadd &= ~IXGBE_MHADD_MFS_MASK; 3611 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; 3612 3613 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 3614 } 3615 3616 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 3617 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ 3618 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 3619 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 3620 3621 /* 3622 * Setup the HW Rx Head and Tail Descriptor Pointers and 3623 * the Base and Length of the Rx Descriptor Ring 3624 */ 3625 for (i = 0; i < adapter->num_rx_queues; i++) { 3626 rx_ring = adapter->rx_ring[i]; 3627 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 3628 set_ring_rsc_enabled(rx_ring); 3629 else 3630 clear_ring_rsc_enabled(rx_ring); 3631 } 3632 } 3633 3634 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) 3635 { 3636 struct ixgbe_hw *hw = &adapter->hw; 3637 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 3638 3639 switch (hw->mac.type) { 3640 case ixgbe_mac_82598EB: 3641 /* 3642 * For VMDq support of different descriptor types or 3643 * buffer sizes through the use of multiple SRRCTL 3644 * registers, RDRXCTL.MVMEN must be set to 1 3645 * 3646 * also, the manual doesn't mention it clearly but DCA hints 3647 * will only use queue 0's tags unless this bit is set. Side 3648 * effects of setting this bit are only that SRRCTL must be 3649 * fully programmed [0..15] 3650 */ 3651 rdrxctl |= IXGBE_RDRXCTL_MVMEN; 3652 break; 3653 case ixgbe_mac_82599EB: 3654 case ixgbe_mac_X540: 3655 /* Disable RSC for ACK packets */ 3656 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, 3657 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); 3658 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 3659 /* hardware requires some bits to be set by default */ 3660 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); 3661 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; 3662 break; 3663 default: 3664 /* We should do nothing since we don't know this hardware */ 3665 return; 3666 } 3667 3668 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 3669 } 3670 3671 /** 3672 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset 3673 * @adapter: board private structure 3674 * 3675 * Configure the Rx unit of the MAC after a reset. 3676 **/ 3677 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) 3678 { 3679 struct ixgbe_hw *hw = &adapter->hw; 3680 int i; 3681 u32 rxctrl, rfctl; 3682 3683 /* disable receives while setting up the descriptors */ 3684 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3685 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 3686 3687 ixgbe_setup_psrtype(adapter); 3688 ixgbe_setup_rdrxctl(adapter); 3689 3690 /* RSC Setup */ 3691 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); 3692 rfctl &= ~IXGBE_RFCTL_RSC_DIS; 3693 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) 3694 rfctl |= IXGBE_RFCTL_RSC_DIS; 3695 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); 3696 3697 /* Program registers for the distribution of queues */ 3698 ixgbe_setup_mrqc(adapter); 3699 3700 /* set_rx_buffer_len must be called before ring initialization */ 3701 ixgbe_set_rx_buffer_len(adapter); 3702 3703 /* 3704 * Setup the HW Rx Head and Tail Descriptor Pointers and 3705 * the Base and Length of the Rx Descriptor Ring 3706 */ 3707 for (i = 0; i < adapter->num_rx_queues; i++) 3708 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); 3709 3710 /* disable drop enable for 82598 parts */ 3711 if (hw->mac.type == ixgbe_mac_82598EB) 3712 rxctrl |= IXGBE_RXCTRL_DMBYPS; 3713 3714 /* enable all receives */ 3715 rxctrl |= IXGBE_RXCTRL_RXEN; 3716 hw->mac.ops.enable_rx_dma(hw, rxctrl); 3717 } 3718 3719 static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, 3720 __be16 proto, u16 vid) 3721 { 3722 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3723 struct ixgbe_hw *hw = &adapter->hw; 3724 3725 /* add VID to filter table */ 3726 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true); 3727 set_bit(vid, adapter->active_vlans); 3728 3729 return 0; 3730 } 3731 3732 static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, 3733 __be16 proto, u16 vid) 3734 { 3735 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3736 struct ixgbe_hw *hw = &adapter->hw; 3737 3738 /* remove VID from filter table */ 3739 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false); 3740 clear_bit(vid, adapter->active_vlans); 3741 3742 return 0; 3743 } 3744 3745 /** 3746 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering 3747 * @adapter: driver data 3748 */ 3749 static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter) 3750 { 3751 struct ixgbe_hw *hw = &adapter->hw; 3752 u32 vlnctrl; 3753 3754 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 3755 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); 3756 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 3757 } 3758 3759 /** 3760 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering 3761 * @adapter: driver data 3762 */ 3763 static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) 3764 { 3765 struct ixgbe_hw *hw = &adapter->hw; 3766 u32 vlnctrl; 3767 3768 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 3769 vlnctrl |= IXGBE_VLNCTRL_VFE; 3770 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 3771 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 3772 } 3773 3774 /** 3775 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping 3776 * @adapter: driver data 3777 */ 3778 static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) 3779 { 3780 struct ixgbe_hw *hw = &adapter->hw; 3781 u32 vlnctrl; 3782 int i, j; 3783 3784 switch (hw->mac.type) { 3785 case ixgbe_mac_82598EB: 3786 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 3787 vlnctrl &= ~IXGBE_VLNCTRL_VME; 3788 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 3789 break; 3790 case ixgbe_mac_82599EB: 3791 case ixgbe_mac_X540: 3792 for (i = 0; i < adapter->num_rx_queues; i++) { 3793 struct ixgbe_ring *ring = adapter->rx_ring[i]; 3794 3795 if (ring->l2_accel_priv) 3796 continue; 3797 j = ring->reg_idx; 3798 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 3799 vlnctrl &= ~IXGBE_RXDCTL_VME; 3800 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); 3801 } 3802 break; 3803 default: 3804 break; 3805 } 3806 } 3807 3808 /** 3809 * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping 3810 * @adapter: driver data 3811 */ 3812 static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) 3813 { 3814 struct ixgbe_hw *hw = &adapter->hw; 3815 u32 vlnctrl; 3816 int i, j; 3817 3818 switch (hw->mac.type) { 3819 case ixgbe_mac_82598EB: 3820 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 3821 vlnctrl |= IXGBE_VLNCTRL_VME; 3822 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 3823 break; 3824 case ixgbe_mac_82599EB: 3825 case ixgbe_mac_X540: 3826 for (i = 0; i < adapter->num_rx_queues; i++) { 3827 struct ixgbe_ring *ring = adapter->rx_ring[i]; 3828 3829 if (ring->l2_accel_priv) 3830 continue; 3831 j = ring->reg_idx; 3832 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 3833 vlnctrl |= IXGBE_RXDCTL_VME; 3834 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); 3835 } 3836 break; 3837 default: 3838 break; 3839 } 3840 } 3841 3842 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) 3843 { 3844 u16 vid; 3845 3846 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); 3847 3848 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 3849 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); 3850 } 3851 3852 /** 3853 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table 3854 * @netdev: network interface device structure 3855 * 3856 * Writes unicast address list to the RAR table. 3857 * Returns: -ENOMEM on failure/insufficient address space 3858 * 0 on no addresses written 3859 * X on writing X addresses to the RAR table 3860 **/ 3861 static int ixgbe_write_uc_addr_list(struct net_device *netdev) 3862 { 3863 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3864 struct ixgbe_hw *hw = &adapter->hw; 3865 unsigned int rar_entries = hw->mac.num_rar_entries - 1; 3866 int count = 0; 3867 3868 /* In SR-IOV/VMDQ modes significantly less RAR entries are available */ 3869 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 3870 rar_entries = IXGBE_MAX_PF_MACVLANS - 1; 3871 3872 /* return ENOMEM indicating insufficient memory for addresses */ 3873 if (netdev_uc_count(netdev) > rar_entries) 3874 return -ENOMEM; 3875 3876 if (!netdev_uc_empty(netdev)) { 3877 struct netdev_hw_addr *ha; 3878 /* return error if we do not support writing to RAR table */ 3879 if (!hw->mac.ops.set_rar) 3880 return -ENOMEM; 3881 3882 netdev_for_each_uc_addr(ha, netdev) { 3883 if (!rar_entries) 3884 break; 3885 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr, 3886 VMDQ_P(0), IXGBE_RAH_AV); 3887 count++; 3888 } 3889 } 3890 /* write the addresses in reverse order to avoid write combining */ 3891 for (; rar_entries > 0 ; rar_entries--) 3892 hw->mac.ops.clear_rar(hw, rar_entries); 3893 3894 return count; 3895 } 3896 3897 /** 3898 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set 3899 * @netdev: network interface device structure 3900 * 3901 * The set_rx_method entry point is called whenever the unicast/multicast 3902 * address list or the network interface flags are updated. This routine is 3903 * responsible for configuring the hardware for proper unicast, multicast and 3904 * promiscuous mode. 3905 **/ 3906 void ixgbe_set_rx_mode(struct net_device *netdev) 3907 { 3908 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3909 struct ixgbe_hw *hw = &adapter->hw; 3910 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; 3911 int count; 3912 3913 /* Check for Promiscuous and All Multicast modes */ 3914 3915 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3916 3917 /* set all bits that we expect to always be set */ 3918 fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ 3919 fctrl |= IXGBE_FCTRL_BAM; 3920 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ 3921 fctrl |= IXGBE_FCTRL_PMCF; 3922 3923 /* clear the bits we are changing the status of */ 3924 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3925 3926 if (netdev->flags & IFF_PROMISC) { 3927 hw->addr_ctrl.user_set_promisc = true; 3928 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3929 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); 3930 /* Only disable hardware filter vlans in promiscuous mode 3931 * if SR-IOV and VMDQ are disabled - otherwise ensure 3932 * that hardware VLAN filters remain enabled. 3933 */ 3934 if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | 3935 IXGBE_FLAG_SRIOV_ENABLED))) 3936 ixgbe_vlan_filter_disable(adapter); 3937 else 3938 ixgbe_vlan_filter_enable(adapter); 3939 } else { 3940 if (netdev->flags & IFF_ALLMULTI) { 3941 fctrl |= IXGBE_FCTRL_MPE; 3942 vmolr |= IXGBE_VMOLR_MPE; 3943 } 3944 ixgbe_vlan_filter_enable(adapter); 3945 hw->addr_ctrl.user_set_promisc = false; 3946 } 3947 3948 /* 3949 * Write addresses to available RAR registers, if there is not 3950 * sufficient space to store all the addresses then enable 3951 * unicast promiscuous mode 3952 */ 3953 count = ixgbe_write_uc_addr_list(netdev); 3954 if (count < 0) { 3955 fctrl |= IXGBE_FCTRL_UPE; 3956 vmolr |= IXGBE_VMOLR_ROPE; 3957 } 3958 3959 /* Write addresses to the MTA, if the attempt fails 3960 * then we should just turn on promiscuous mode so 3961 * that we can at least receive multicast traffic 3962 */ 3963 hw->mac.ops.update_mc_addr_list(hw, netdev); 3964 vmolr |= IXGBE_VMOLR_ROMPE; 3965 3966 if (adapter->num_vfs) 3967 ixgbe_restore_vf_multicasts(adapter); 3968 3969 if (hw->mac.type != ixgbe_mac_82598EB) { 3970 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) & 3971 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | 3972 IXGBE_VMOLR_ROPE); 3973 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr); 3974 } 3975 3976 /* This is useful for sniffing bad packets. */ 3977 if (adapter->netdev->features & NETIF_F_RXALL) { 3978 /* UPE and MPE will be handled by normal PROMISC logic 3979 * in e1000e_set_rx_mode */ 3980 fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */ 3981 IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */ 3982 IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */ 3983 3984 fctrl &= ~(IXGBE_FCTRL_DPF); 3985 /* NOTE: VLAN filtering is disabled by setting PROMISC */ 3986 } 3987 3988 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3989 3990 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 3991 ixgbe_vlan_strip_enable(adapter); 3992 else 3993 ixgbe_vlan_strip_disable(adapter); 3994 } 3995 3996 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) 3997 { 3998 int q_idx; 3999 4000 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { 4001 ixgbe_qv_init_lock(adapter->q_vector[q_idx]); 4002 napi_enable(&adapter->q_vector[q_idx]->napi); 4003 } 4004 } 4005 4006 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) 4007 { 4008 int q_idx; 4009 4010 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { 4011 napi_disable(&adapter->q_vector[q_idx]->napi); 4012 while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) { 4013 pr_info("QV %d locked\n", q_idx); 4014 usleep_range(1000, 20000); 4015 } 4016 } 4017 } 4018 4019 #ifdef CONFIG_IXGBE_DCB 4020 /** 4021 * ixgbe_configure_dcb - Configure DCB hardware 4022 * @adapter: ixgbe adapter struct 4023 * 4024 * This is called by the driver on open to configure the DCB hardware. 4025 * This is also called by the gennetlink interface when reconfiguring 4026 * the DCB state. 4027 */ 4028 static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) 4029 { 4030 struct ixgbe_hw *hw = &adapter->hw; 4031 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 4032 4033 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { 4034 if (hw->mac.type == ixgbe_mac_82598EB) 4035 netif_set_gso_max_size(adapter->netdev, 65536); 4036 return; 4037 } 4038 4039 if (hw->mac.type == ixgbe_mac_82598EB) 4040 netif_set_gso_max_size(adapter->netdev, 32768); 4041 4042 #ifdef IXGBE_FCOE 4043 if (adapter->netdev->features & NETIF_F_FCOE_MTU) 4044 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); 4045 #endif 4046 4047 /* reconfigure the hardware */ 4048 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { 4049 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, 4050 DCB_TX_CONFIG); 4051 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, 4052 DCB_RX_CONFIG); 4053 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); 4054 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) { 4055 ixgbe_dcb_hw_ets(&adapter->hw, 4056 adapter->ixgbe_ieee_ets, 4057 max_frame); 4058 ixgbe_dcb_hw_pfc_config(&adapter->hw, 4059 adapter->ixgbe_ieee_pfc->pfc_en, 4060 adapter->ixgbe_ieee_ets->prio_tc); 4061 } 4062 4063 /* Enable RSS Hash per TC */ 4064 if (hw->mac.type != ixgbe_mac_82598EB) { 4065 u32 msb = 0; 4066 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1; 4067 4068 while (rss_i) { 4069 msb++; 4070 rss_i >>= 1; 4071 } 4072 4073 /* write msb to all 8 TCs in one write */ 4074 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111); 4075 } 4076 } 4077 #endif 4078 4079 /* Additional bittime to account for IXGBE framing */ 4080 #define IXGBE_ETH_FRAMING 20 4081 4082 /** 4083 * ixgbe_hpbthresh - calculate high water mark for flow control 4084 * 4085 * @adapter: board private structure to calculate for 4086 * @pb: packet buffer to calculate 4087 */ 4088 static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) 4089 { 4090 struct ixgbe_hw *hw = &adapter->hw; 4091 struct net_device *dev = adapter->netdev; 4092 int link, tc, kb, marker; 4093 u32 dv_id, rx_pba; 4094 4095 /* Calculate max LAN frame size */ 4096 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING; 4097 4098 #ifdef IXGBE_FCOE 4099 /* FCoE traffic class uses FCOE jumbo frames */ 4100 if ((dev->features & NETIF_F_FCOE_MTU) && 4101 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && 4102 (pb == ixgbe_fcoe_get_tc(adapter))) 4103 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; 4104 4105 #endif 4106 /* Calculate delay value for device */ 4107 switch (hw->mac.type) { 4108 case ixgbe_mac_X540: 4109 dv_id = IXGBE_DV_X540(link, tc); 4110 break; 4111 default: 4112 dv_id = IXGBE_DV(link, tc); 4113 break; 4114 } 4115 4116 /* Loopback switch introduces additional latency */ 4117 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 4118 dv_id += IXGBE_B2BT(tc); 4119 4120 /* Delay value is calculated in bit times convert to KB */ 4121 kb = IXGBE_BT2KB(dv_id); 4122 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10; 4123 4124 marker = rx_pba - kb; 4125 4126 /* It is possible that the packet buffer is not large enough 4127 * to provide required headroom. In this case throw an error 4128 * to user and a do the best we can. 4129 */ 4130 if (marker < 0) { 4131 e_warn(drv, "Packet Buffer(%i) can not provide enough" 4132 "headroom to support flow control." 4133 "Decrease MTU or number of traffic classes\n", pb); 4134 marker = tc + 1; 4135 } 4136 4137 return marker; 4138 } 4139 4140 /** 4141 * ixgbe_lpbthresh - calculate low water mark for for flow control 4142 * 4143 * @adapter: board private structure to calculate for 4144 * @pb: packet buffer to calculate 4145 */ 4146 static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter) 4147 { 4148 struct ixgbe_hw *hw = &adapter->hw; 4149 struct net_device *dev = adapter->netdev; 4150 int tc; 4151 u32 dv_id; 4152 4153 /* Calculate max LAN frame size */ 4154 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; 4155 4156 /* Calculate delay value for device */ 4157 switch (hw->mac.type) { 4158 case ixgbe_mac_X540: 4159 dv_id = IXGBE_LOW_DV_X540(tc); 4160 break; 4161 default: 4162 dv_id = IXGBE_LOW_DV(tc); 4163 break; 4164 } 4165 4166 /* Delay value is calculated in bit times convert to KB */ 4167 return IXGBE_BT2KB(dv_id); 4168 } 4169 4170 /* 4171 * ixgbe_pbthresh_setup - calculate and setup high low water marks 4172 */ 4173 static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter) 4174 { 4175 struct ixgbe_hw *hw = &adapter->hw; 4176 int num_tc = netdev_get_num_tc(adapter->netdev); 4177 int i; 4178 4179 if (!num_tc) 4180 num_tc = 1; 4181 4182 hw->fc.low_water = ixgbe_lpbthresh(adapter); 4183 4184 for (i = 0; i < num_tc; i++) { 4185 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i); 4186 4187 /* Low water marks must not be larger than high water marks */ 4188 if (hw->fc.low_water > hw->fc.high_water[i]) 4189 hw->fc.low_water = 0; 4190 } 4191 } 4192 4193 static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) 4194 { 4195 struct ixgbe_hw *hw = &adapter->hw; 4196 int hdrm; 4197 u8 tc = netdev_get_num_tc(adapter->netdev); 4198 4199 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 4200 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 4201 hdrm = 32 << adapter->fdir_pballoc; 4202 else 4203 hdrm = 0; 4204 4205 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); 4206 ixgbe_pbthresh_setup(adapter); 4207 } 4208 4209 static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) 4210 { 4211 struct ixgbe_hw *hw = &adapter->hw; 4212 struct hlist_node *node2; 4213 struct ixgbe_fdir_filter *filter; 4214 4215 spin_lock(&adapter->fdir_perfect_lock); 4216 4217 if (!hlist_empty(&adapter->fdir_filter_list)) 4218 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); 4219 4220 hlist_for_each_entry_safe(filter, node2, 4221 &adapter->fdir_filter_list, fdir_node) { 4222 ixgbe_fdir_write_perfect_filter_82599(hw, 4223 &filter->filter, 4224 filter->sw_idx, 4225 (filter->action == IXGBE_FDIR_DROP_QUEUE) ? 4226 IXGBE_FDIR_DROP_QUEUE : 4227 adapter->rx_ring[filter->action]->reg_idx); 4228 } 4229 4230 spin_unlock(&adapter->fdir_perfect_lock); 4231 } 4232 4233 static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, 4234 struct ixgbe_adapter *adapter) 4235 { 4236 struct ixgbe_hw *hw = &adapter->hw; 4237 u32 vmolr; 4238 4239 /* No unicast promiscuous support for VMDQ devices. */ 4240 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); 4241 vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE); 4242 4243 /* clear the affected bit */ 4244 vmolr &= ~IXGBE_VMOLR_MPE; 4245 4246 if (dev->flags & IFF_ALLMULTI) { 4247 vmolr |= IXGBE_VMOLR_MPE; 4248 } else { 4249 vmolr |= IXGBE_VMOLR_ROMPE; 4250 hw->mac.ops.update_mc_addr_list(hw, dev); 4251 } 4252 ixgbe_write_uc_addr_list(adapter->netdev); 4253 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); 4254 } 4255 4256 static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, 4257 u8 *addr, u16 pool) 4258 { 4259 struct ixgbe_hw *hw = &adapter->hw; 4260 unsigned int entry; 4261 4262 entry = hw->mac.num_rar_entries - pool; 4263 hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV); 4264 } 4265 4266 static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) 4267 { 4268 struct ixgbe_adapter *adapter = vadapter->real_adapter; 4269 int rss_i = adapter->num_rx_queues_per_pool; 4270 struct ixgbe_hw *hw = &adapter->hw; 4271 u16 pool = vadapter->pool; 4272 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | 4273 IXGBE_PSRTYPE_UDPHDR | 4274 IXGBE_PSRTYPE_IPV4HDR | 4275 IXGBE_PSRTYPE_L2HDR | 4276 IXGBE_PSRTYPE_IPV6HDR; 4277 4278 if (hw->mac.type == ixgbe_mac_82598EB) 4279 return; 4280 4281 if (rss_i > 3) 4282 psrtype |= 2 << 29; 4283 else if (rss_i > 1) 4284 psrtype |= 1 << 29; 4285 4286 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); 4287 } 4288 4289 /** 4290 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue 4291 * @rx_ring: ring to free buffers from 4292 **/ 4293 static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) 4294 { 4295 struct device *dev = rx_ring->dev; 4296 unsigned long size; 4297 u16 i; 4298 4299 /* ring already cleared, nothing to do */ 4300 if (!rx_ring->rx_buffer_info) 4301 return; 4302 4303 /* Free all the Rx ring sk_buffs */ 4304 for (i = 0; i < rx_ring->count; i++) { 4305 struct ixgbe_rx_buffer *rx_buffer; 4306 4307 rx_buffer = &rx_ring->rx_buffer_info[i]; 4308 if (rx_buffer->skb) { 4309 struct sk_buff *skb = rx_buffer->skb; 4310 if (IXGBE_CB(skb)->page_released) { 4311 dma_unmap_page(dev, 4312 IXGBE_CB(skb)->dma, 4313 ixgbe_rx_bufsz(rx_ring), 4314 DMA_FROM_DEVICE); 4315 IXGBE_CB(skb)->page_released = false; 4316 } 4317 dev_kfree_skb(skb); 4318 } 4319 rx_buffer->skb = NULL; 4320 if (rx_buffer->dma) 4321 dma_unmap_page(dev, rx_buffer->dma, 4322 ixgbe_rx_pg_size(rx_ring), 4323 DMA_FROM_DEVICE); 4324 rx_buffer->dma = 0; 4325 if (rx_buffer->page) 4326 __free_pages(rx_buffer->page, 4327 ixgbe_rx_pg_order(rx_ring)); 4328 rx_buffer->page = NULL; 4329 } 4330 4331 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 4332 memset(rx_ring->rx_buffer_info, 0, size); 4333 4334 /* Zero out the descriptor ring */ 4335 memset(rx_ring->desc, 0, rx_ring->size); 4336 4337 rx_ring->next_to_alloc = 0; 4338 rx_ring->next_to_clean = 0; 4339 rx_ring->next_to_use = 0; 4340 } 4341 4342 static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, 4343 struct ixgbe_ring *rx_ring) 4344 { 4345 struct ixgbe_adapter *adapter = vadapter->real_adapter; 4346 int index = rx_ring->queue_index + vadapter->rx_base_queue; 4347 4348 /* shutdown specific queue receive and wait for dma to settle */ 4349 ixgbe_disable_rx_queue(adapter, rx_ring); 4350 usleep_range(10000, 20000); 4351 ixgbe_irq_disable_queues(adapter, ((u64)1 << index)); 4352 ixgbe_clean_rx_ring(rx_ring); 4353 rx_ring->l2_accel_priv = NULL; 4354 } 4355 4356 static int ixgbe_fwd_ring_down(struct net_device *vdev, 4357 struct ixgbe_fwd_adapter *accel) 4358 { 4359 struct ixgbe_adapter *adapter = accel->real_adapter; 4360 unsigned int rxbase = accel->rx_base_queue; 4361 unsigned int txbase = accel->tx_base_queue; 4362 int i; 4363 4364 netif_tx_stop_all_queues(vdev); 4365 4366 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { 4367 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); 4368 adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; 4369 } 4370 4371 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { 4372 adapter->tx_ring[txbase + i]->l2_accel_priv = NULL; 4373 adapter->tx_ring[txbase + i]->netdev = adapter->netdev; 4374 } 4375 4376 4377 return 0; 4378 } 4379 4380 static int ixgbe_fwd_ring_up(struct net_device *vdev, 4381 struct ixgbe_fwd_adapter *accel) 4382 { 4383 struct ixgbe_adapter *adapter = accel->real_adapter; 4384 unsigned int rxbase, txbase, queues; 4385 int i, baseq, err = 0; 4386 4387 if (!test_bit(accel->pool, &adapter->fwd_bitmask)) 4388 return 0; 4389 4390 baseq = accel->pool * adapter->num_rx_queues_per_pool; 4391 netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", 4392 accel->pool, adapter->num_rx_pools, 4393 baseq, baseq + adapter->num_rx_queues_per_pool, 4394 adapter->fwd_bitmask); 4395 4396 accel->netdev = vdev; 4397 accel->rx_base_queue = rxbase = baseq; 4398 accel->tx_base_queue = txbase = baseq; 4399 4400 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) 4401 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); 4402 4403 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { 4404 adapter->rx_ring[rxbase + i]->netdev = vdev; 4405 adapter->rx_ring[rxbase + i]->l2_accel_priv = accel; 4406 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); 4407 } 4408 4409 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { 4410 adapter->tx_ring[txbase + i]->netdev = vdev; 4411 adapter->tx_ring[txbase + i]->l2_accel_priv = accel; 4412 } 4413 4414 queues = min_t(unsigned int, 4415 adapter->num_rx_queues_per_pool, vdev->num_tx_queues); 4416 err = netif_set_real_num_tx_queues(vdev, queues); 4417 if (err) 4418 goto fwd_queue_err; 4419 4420 err = netif_set_real_num_rx_queues(vdev, queues); 4421 if (err) 4422 goto fwd_queue_err; 4423 4424 if (is_valid_ether_addr(vdev->dev_addr)) 4425 ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool); 4426 4427 ixgbe_fwd_psrtype(accel); 4428 ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter); 4429 return err; 4430 fwd_queue_err: 4431 ixgbe_fwd_ring_down(vdev, accel); 4432 return err; 4433 } 4434 4435 static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) 4436 { 4437 struct net_device *upper; 4438 struct list_head *iter; 4439 int err; 4440 4441 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { 4442 if (netif_is_macvlan(upper)) { 4443 struct macvlan_dev *dfwd = netdev_priv(upper); 4444 struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; 4445 4446 if (dfwd->fwd_priv) { 4447 err = ixgbe_fwd_ring_up(upper, vadapter); 4448 if (err) 4449 continue; 4450 } 4451 } 4452 } 4453 } 4454 4455 static void ixgbe_configure(struct ixgbe_adapter *adapter) 4456 { 4457 struct ixgbe_hw *hw = &adapter->hw; 4458 4459 ixgbe_configure_pb(adapter); 4460 #ifdef CONFIG_IXGBE_DCB 4461 ixgbe_configure_dcb(adapter); 4462 #endif 4463 /* 4464 * We must restore virtualization before VLANs or else 4465 * the VLVF registers will not be populated 4466 */ 4467 ixgbe_configure_virtualization(adapter); 4468 4469 ixgbe_set_rx_mode(adapter->netdev); 4470 ixgbe_restore_vlan(adapter); 4471 4472 switch (hw->mac.type) { 4473 case ixgbe_mac_82599EB: 4474 case ixgbe_mac_X540: 4475 hw->mac.ops.disable_rx_buff(hw); 4476 break; 4477 default: 4478 break; 4479 } 4480 4481 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 4482 ixgbe_init_fdir_signature_82599(&adapter->hw, 4483 adapter->fdir_pballoc); 4484 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { 4485 ixgbe_init_fdir_perfect_82599(&adapter->hw, 4486 adapter->fdir_pballoc); 4487 ixgbe_fdir_filter_restore(adapter); 4488 } 4489 4490 switch (hw->mac.type) { 4491 case ixgbe_mac_82599EB: 4492 case ixgbe_mac_X540: 4493 hw->mac.ops.enable_rx_buff(hw); 4494 break; 4495 default: 4496 break; 4497 } 4498 4499 #ifdef IXGBE_FCOE 4500 /* configure FCoE L2 filters, redirection table, and Rx control */ 4501 ixgbe_configure_fcoe(adapter); 4502 4503 #endif /* IXGBE_FCOE */ 4504 ixgbe_configure_tx(adapter); 4505 ixgbe_configure_rx(adapter); 4506 ixgbe_configure_dfwd(adapter); 4507 } 4508 4509 static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) 4510 { 4511 switch (hw->phy.type) { 4512 case ixgbe_phy_sfp_avago: 4513 case ixgbe_phy_sfp_ftl: 4514 case ixgbe_phy_sfp_intel: 4515 case ixgbe_phy_sfp_unknown: 4516 case ixgbe_phy_sfp_passive_tyco: 4517 case ixgbe_phy_sfp_passive_unknown: 4518 case ixgbe_phy_sfp_active_unknown: 4519 case ixgbe_phy_sfp_ftl_active: 4520 case ixgbe_phy_qsfp_passive_unknown: 4521 case ixgbe_phy_qsfp_active_unknown: 4522 case ixgbe_phy_qsfp_intel: 4523 case ixgbe_phy_qsfp_unknown: 4524 return true; 4525 case ixgbe_phy_nl: 4526 if (hw->mac.type == ixgbe_mac_82598EB) 4527 return true; 4528 default: 4529 return false; 4530 } 4531 } 4532 4533 /** 4534 * ixgbe_sfp_link_config - set up SFP+ link 4535 * @adapter: pointer to private adapter struct 4536 **/ 4537 static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) 4538 { 4539 /* 4540 * We are assuming the worst case scenario here, and that 4541 * is that an SFP was inserted/removed after the reset 4542 * but before SFP detection was enabled. As such the best 4543 * solution is to just start searching as soon as we start 4544 */ 4545 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 4546 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; 4547 4548 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; 4549 } 4550 4551 /** 4552 * ixgbe_non_sfp_link_config - set up non-SFP+ link 4553 * @hw: pointer to private hardware struct 4554 * 4555 * Returns 0 on success, negative on failure 4556 **/ 4557 static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) 4558 { 4559 u32 speed; 4560 bool autoneg, link_up = false; 4561 u32 ret = IXGBE_ERR_LINK_SETUP; 4562 4563 if (hw->mac.ops.check_link) 4564 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); 4565 4566 if (ret) 4567 goto link_cfg_out; 4568 4569 speed = hw->phy.autoneg_advertised; 4570 if ((!speed) && (hw->mac.ops.get_link_capabilities)) 4571 ret = hw->mac.ops.get_link_capabilities(hw, &speed, 4572 &autoneg); 4573 if (ret) 4574 goto link_cfg_out; 4575 4576 if (hw->mac.ops.setup_link) 4577 ret = hw->mac.ops.setup_link(hw, speed, link_up); 4578 link_cfg_out: 4579 return ret; 4580 } 4581 4582 static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) 4583 { 4584 struct ixgbe_hw *hw = &adapter->hw; 4585 u32 gpie = 0; 4586 4587 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 4588 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | 4589 IXGBE_GPIE_OCD; 4590 gpie |= IXGBE_GPIE_EIAME; 4591 /* 4592 * use EIAM to auto-mask when MSI-X interrupt is asserted 4593 * this saves a register write for every interrupt 4594 */ 4595 switch (hw->mac.type) { 4596 case ixgbe_mac_82598EB: 4597 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 4598 break; 4599 case ixgbe_mac_82599EB: 4600 case ixgbe_mac_X540: 4601 default: 4602 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 4603 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 4604 break; 4605 } 4606 } else { 4607 /* legacy interrupts, use EIAM to auto-mask when reading EICR, 4608 * specifically only auto mask tx and rx interrupts */ 4609 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 4610 } 4611 4612 /* XXX: to interrupt immediately for EICS writes, enable this */ 4613 /* gpie |= IXGBE_GPIE_EIMEN; */ 4614 4615 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 4616 gpie &= ~IXGBE_GPIE_VTMODE_MASK; 4617 4618 switch (adapter->ring_feature[RING_F_VMDQ].mask) { 4619 case IXGBE_82599_VMDQ_8Q_MASK: 4620 gpie |= IXGBE_GPIE_VTMODE_16; 4621 break; 4622 case IXGBE_82599_VMDQ_4Q_MASK: 4623 gpie |= IXGBE_GPIE_VTMODE_32; 4624 break; 4625 default: 4626 gpie |= IXGBE_GPIE_VTMODE_64; 4627 break; 4628 } 4629 } 4630 4631 /* Enable Thermal over heat sensor interrupt */ 4632 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { 4633 switch (adapter->hw.mac.type) { 4634 case ixgbe_mac_82599EB: 4635 gpie |= IXGBE_SDP0_GPIEN; 4636 break; 4637 case ixgbe_mac_X540: 4638 gpie |= IXGBE_EIMS_TS; 4639 break; 4640 default: 4641 break; 4642 } 4643 } 4644 4645 /* Enable fan failure interrupt */ 4646 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 4647 gpie |= IXGBE_SDP1_GPIEN; 4648 4649 if (hw->mac.type == ixgbe_mac_82599EB) { 4650 gpie |= IXGBE_SDP1_GPIEN; 4651 gpie |= IXGBE_SDP2_GPIEN; 4652 } 4653 4654 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 4655 } 4656 4657 static void ixgbe_up_complete(struct ixgbe_adapter *adapter) 4658 { 4659 struct ixgbe_hw *hw = &adapter->hw; 4660 int err; 4661 u32 ctrl_ext; 4662 4663 ixgbe_get_hw_control(adapter); 4664 ixgbe_setup_gpie(adapter); 4665 4666 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 4667 ixgbe_configure_msix(adapter); 4668 else 4669 ixgbe_configure_msi_and_legacy(adapter); 4670 4671 /* enable the optics for 82599 SFP+ fiber */ 4672 if (hw->mac.ops.enable_tx_laser) 4673 hw->mac.ops.enable_tx_laser(hw); 4674 4675 smp_mb__before_atomic(); 4676 clear_bit(__IXGBE_DOWN, &adapter->state); 4677 ixgbe_napi_enable_all(adapter); 4678 4679 if (ixgbe_is_sfp(hw)) { 4680 ixgbe_sfp_link_config(adapter); 4681 } else { 4682 err = ixgbe_non_sfp_link_config(hw); 4683 if (err) 4684 e_err(probe, "link_config FAILED %d\n", err); 4685 } 4686 4687 /* clear any pending interrupts, may auto mask */ 4688 IXGBE_READ_REG(hw, IXGBE_EICR); 4689 ixgbe_irq_enable(adapter, true, true); 4690 4691 /* 4692 * If this adapter has a fan, check to see if we had a failure 4693 * before we enabled the interrupt. 4694 */ 4695 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 4696 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 4697 if (esdp & IXGBE_ESDP_SDP1) 4698 e_crit(drv, "Fan has stopped, replace the adapter\n"); 4699 } 4700 4701 /* bring the link up in the watchdog, this could race with our first 4702 * link up interrupt but shouldn't be a problem */ 4703 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 4704 adapter->link_check_timeout = jiffies; 4705 mod_timer(&adapter->service_timer, jiffies); 4706 4707 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 4708 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 4709 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 4710 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 4711 } 4712 4713 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) 4714 { 4715 WARN_ON(in_interrupt()); 4716 /* put off any impending NetWatchDogTimeout */ 4717 adapter->netdev->trans_start = jiffies; 4718 4719 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 4720 usleep_range(1000, 2000); 4721 ixgbe_down(adapter); 4722 /* 4723 * If SR-IOV enabled then wait a bit before bringing the adapter 4724 * back up to give the VFs time to respond to the reset. The 4725 * two second wait is based upon the watchdog timer cycle in 4726 * the VF driver. 4727 */ 4728 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 4729 msleep(2000); 4730 ixgbe_up(adapter); 4731 clear_bit(__IXGBE_RESETTING, &adapter->state); 4732 } 4733 4734 void ixgbe_up(struct ixgbe_adapter *adapter) 4735 { 4736 /* hardware has been reset, we need to reload some things */ 4737 ixgbe_configure(adapter); 4738 4739 ixgbe_up_complete(adapter); 4740 } 4741 4742 void ixgbe_reset(struct ixgbe_adapter *adapter) 4743 { 4744 struct ixgbe_hw *hw = &adapter->hw; 4745 int err; 4746 4747 if (ixgbe_removed(hw->hw_addr)) 4748 return; 4749 /* lock SFP init bit to prevent race conditions with the watchdog */ 4750 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) 4751 usleep_range(1000, 2000); 4752 4753 /* clear all SFP and link config related flags while holding SFP_INIT */ 4754 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP | 4755 IXGBE_FLAG2_SFP_NEEDS_RESET); 4756 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 4757 4758 err = hw->mac.ops.init_hw(hw); 4759 switch (err) { 4760 case 0: 4761 case IXGBE_ERR_SFP_NOT_PRESENT: 4762 case IXGBE_ERR_SFP_NOT_SUPPORTED: 4763 break; 4764 case IXGBE_ERR_MASTER_REQUESTS_PENDING: 4765 e_dev_err("master disable timed out\n"); 4766 break; 4767 case IXGBE_ERR_EEPROM_VERSION: 4768 /* We are running on a pre-production device, log a warning */ 4769 e_dev_warn("This device is a pre-production adapter/LOM. " 4770 "Please be aware there may be issues associated with " 4771 "your hardware. If you are experiencing problems " 4772 "please contact your Intel or hardware " 4773 "representative who provided you with this " 4774 "hardware.\n"); 4775 break; 4776 default: 4777 e_dev_err("Hardware Error: %d\n", err); 4778 } 4779 4780 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); 4781 4782 /* reprogram the RAR[0] in case user changed it. */ 4783 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV); 4784 4785 /* update SAN MAC vmdq pool selection */ 4786 if (hw->mac.san_mac_rar_index) 4787 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); 4788 4789 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) 4790 ixgbe_ptp_reset(adapter); 4791 } 4792 4793 /** 4794 * ixgbe_clean_tx_ring - Free Tx Buffers 4795 * @tx_ring: ring to be cleaned 4796 **/ 4797 static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) 4798 { 4799 struct ixgbe_tx_buffer *tx_buffer_info; 4800 unsigned long size; 4801 u16 i; 4802 4803 /* ring already cleared, nothing to do */ 4804 if (!tx_ring->tx_buffer_info) 4805 return; 4806 4807 /* Free all the Tx ring sk_buffs */ 4808 for (i = 0; i < tx_ring->count; i++) { 4809 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 4810 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 4811 } 4812 4813 netdev_tx_reset_queue(txring_txq(tx_ring)); 4814 4815 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 4816 memset(tx_ring->tx_buffer_info, 0, size); 4817 4818 /* Zero out the descriptor ring */ 4819 memset(tx_ring->desc, 0, tx_ring->size); 4820 4821 tx_ring->next_to_use = 0; 4822 tx_ring->next_to_clean = 0; 4823 } 4824 4825 /** 4826 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues 4827 * @adapter: board private structure 4828 **/ 4829 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) 4830 { 4831 int i; 4832 4833 for (i = 0; i < adapter->num_rx_queues; i++) 4834 ixgbe_clean_rx_ring(adapter->rx_ring[i]); 4835 } 4836 4837 /** 4838 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues 4839 * @adapter: board private structure 4840 **/ 4841 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) 4842 { 4843 int i; 4844 4845 for (i = 0; i < adapter->num_tx_queues; i++) 4846 ixgbe_clean_tx_ring(adapter->tx_ring[i]); 4847 } 4848 4849 static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) 4850 { 4851 struct hlist_node *node2; 4852 struct ixgbe_fdir_filter *filter; 4853 4854 spin_lock(&adapter->fdir_perfect_lock); 4855 4856 hlist_for_each_entry_safe(filter, node2, 4857 &adapter->fdir_filter_list, fdir_node) { 4858 hlist_del(&filter->fdir_node); 4859 kfree(filter); 4860 } 4861 adapter->fdir_filter_count = 0; 4862 4863 spin_unlock(&adapter->fdir_perfect_lock); 4864 } 4865 4866 void ixgbe_down(struct ixgbe_adapter *adapter) 4867 { 4868 struct net_device *netdev = adapter->netdev; 4869 struct ixgbe_hw *hw = &adapter->hw; 4870 struct net_device *upper; 4871 struct list_head *iter; 4872 u32 rxctrl; 4873 int i; 4874 4875 /* signal that we are down to the interrupt handler */ 4876 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state)) 4877 return; /* do nothing if already down */ 4878 4879 /* disable receives */ 4880 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4881 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 4882 4883 /* disable all enabled rx queues */ 4884 for (i = 0; i < adapter->num_rx_queues; i++) 4885 /* this call also flushes the previous write */ 4886 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); 4887 4888 usleep_range(10000, 20000); 4889 4890 netif_tx_stop_all_queues(netdev); 4891 4892 /* call carrier off first to avoid false dev_watchdog timeouts */ 4893 netif_carrier_off(netdev); 4894 netif_tx_disable(netdev); 4895 4896 /* disable any upper devices */ 4897 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { 4898 if (netif_is_macvlan(upper)) { 4899 struct macvlan_dev *vlan = netdev_priv(upper); 4900 4901 if (vlan->fwd_priv) { 4902 netif_tx_stop_all_queues(upper); 4903 netif_carrier_off(upper); 4904 netif_tx_disable(upper); 4905 } 4906 } 4907 } 4908 4909 ixgbe_irq_disable(adapter); 4910 4911 ixgbe_napi_disable_all(adapter); 4912 4913 adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | 4914 IXGBE_FLAG2_RESET_REQUESTED); 4915 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 4916 4917 del_timer_sync(&adapter->service_timer); 4918 4919 if (adapter->num_vfs) { 4920 /* Clear EITR Select mapping */ 4921 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); 4922 4923 /* Mark all the VFs as inactive */ 4924 for (i = 0 ; i < adapter->num_vfs; i++) 4925 adapter->vfinfo[i].clear_to_send = false; 4926 4927 /* ping all the active vfs to let them know we are going down */ 4928 ixgbe_ping_all_vfs(adapter); 4929 4930 /* Disable all VFTE/VFRE TX/RX */ 4931 ixgbe_disable_tx_rx(adapter); 4932 } 4933 4934 /* disable transmits in the hardware now that interrupts are off */ 4935 for (i = 0; i < adapter->num_tx_queues; i++) { 4936 u8 reg_idx = adapter->tx_ring[i]->reg_idx; 4937 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); 4938 } 4939 4940 /* Disable the Tx DMA engine on 82599 and X540 */ 4941 switch (hw->mac.type) { 4942 case ixgbe_mac_82599EB: 4943 case ixgbe_mac_X540: 4944 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, 4945 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 4946 ~IXGBE_DMATXCTL_TE)); 4947 break; 4948 default: 4949 break; 4950 } 4951 4952 if (!pci_channel_offline(adapter->pdev)) 4953 ixgbe_reset(adapter); 4954 4955 /* power down the optics for 82599 SFP+ fiber */ 4956 if (hw->mac.ops.disable_tx_laser) 4957 hw->mac.ops.disable_tx_laser(hw); 4958 4959 ixgbe_clean_all_tx_rings(adapter); 4960 ixgbe_clean_all_rx_rings(adapter); 4961 4962 #ifdef CONFIG_IXGBE_DCA 4963 /* since we reset the hardware DCA settings were cleared */ 4964 ixgbe_setup_dca(adapter); 4965 #endif 4966 } 4967 4968 /** 4969 * ixgbe_tx_timeout - Respond to a Tx Hang 4970 * @netdev: network interface device structure 4971 **/ 4972 static void ixgbe_tx_timeout(struct net_device *netdev) 4973 { 4974 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4975 4976 /* Do the reset outside of interrupt context */ 4977 ixgbe_tx_timeout_reset(adapter); 4978 } 4979 4980 /** 4981 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) 4982 * @adapter: board private structure to initialize 4983 * 4984 * ixgbe_sw_init initializes the Adapter private data structure. 4985 * Fields are initialized based on PCI device information and 4986 * OS network device settings (MTU size). 4987 **/ 4988 static int ixgbe_sw_init(struct ixgbe_adapter *adapter) 4989 { 4990 struct ixgbe_hw *hw = &adapter->hw; 4991 struct pci_dev *pdev = adapter->pdev; 4992 unsigned int rss, fdir; 4993 u32 fwsm; 4994 #ifdef CONFIG_IXGBE_DCB 4995 int j; 4996 struct tc_configuration *tc; 4997 #endif 4998 4999 /* PCI config space info */ 5000 5001 hw->vendor_id = pdev->vendor; 5002 hw->device_id = pdev->device; 5003 hw->revision_id = pdev->revision; 5004 hw->subsystem_vendor_id = pdev->subsystem_vendor; 5005 hw->subsystem_device_id = pdev->subsystem_device; 5006 5007 /* Set common capability flags and settings */ 5008 rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); 5009 adapter->ring_feature[RING_F_RSS].limit = rss; 5010 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 5011 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 5012 adapter->max_q_vectors = MAX_Q_VECTORS_82599; 5013 adapter->atr_sample_rate = 20; 5014 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); 5015 adapter->ring_feature[RING_F_FDIR].limit = fdir; 5016 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; 5017 #ifdef CONFIG_IXGBE_DCA 5018 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; 5019 #endif 5020 #ifdef IXGBE_FCOE 5021 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; 5022 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 5023 #ifdef CONFIG_IXGBE_DCB 5024 /* Default traffic class to use for FCoE */ 5025 adapter->fcoe.up = IXGBE_FCOE_DEFTC; 5026 #endif /* CONFIG_IXGBE_DCB */ 5027 #endif /* IXGBE_FCOE */ 5028 5029 /* Set MAC specific capability flags and exceptions */ 5030 switch (hw->mac.type) { 5031 case ixgbe_mac_82598EB: 5032 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; 5033 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; 5034 5035 if (hw->device_id == IXGBE_DEV_ID_82598AT) 5036 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 5037 5038 adapter->max_q_vectors = MAX_Q_VECTORS_82598; 5039 adapter->ring_feature[RING_F_FDIR].limit = 0; 5040 adapter->atr_sample_rate = 0; 5041 adapter->fdir_pballoc = 0; 5042 #ifdef IXGBE_FCOE 5043 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; 5044 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 5045 #ifdef CONFIG_IXGBE_DCB 5046 adapter->fcoe.up = 0; 5047 #endif /* IXGBE_DCB */ 5048 #endif /* IXGBE_FCOE */ 5049 break; 5050 case ixgbe_mac_82599EB: 5051 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) 5052 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 5053 break; 5054 case ixgbe_mac_X540: 5055 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 5056 if (fwsm & IXGBE_FWSM_TS_ENABLED) 5057 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 5058 break; 5059 default: 5060 break; 5061 } 5062 5063 #ifdef IXGBE_FCOE 5064 /* FCoE support exists, always init the FCoE lock */ 5065 spin_lock_init(&adapter->fcoe.lock); 5066 5067 #endif 5068 /* n-tuple support exists, always init our spinlock */ 5069 spin_lock_init(&adapter->fdir_perfect_lock); 5070 5071 #ifdef CONFIG_IXGBE_DCB 5072 switch (hw->mac.type) { 5073 case ixgbe_mac_X540: 5074 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; 5075 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; 5076 break; 5077 default: 5078 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; 5079 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; 5080 break; 5081 } 5082 5083 /* Configure DCB traffic classes */ 5084 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { 5085 tc = &adapter->dcb_cfg.tc_config[j]; 5086 tc->path[DCB_TX_CONFIG].bwg_id = 0; 5087 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); 5088 tc->path[DCB_RX_CONFIG].bwg_id = 0; 5089 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); 5090 tc->dcb_pfc = pfc_disabled; 5091 } 5092 5093 /* Initialize default user to priority mapping, UPx->TC0 */ 5094 tc = &adapter->dcb_cfg.tc_config[0]; 5095 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; 5096 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; 5097 5098 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; 5099 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; 5100 adapter->dcb_cfg.pfc_mode_enable = false; 5101 adapter->dcb_set_bitmap = 0x00; 5102 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; 5103 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, 5104 sizeof(adapter->temp_dcb_cfg)); 5105 5106 #endif 5107 5108 /* default flow control settings */ 5109 hw->fc.requested_mode = ixgbe_fc_full; 5110 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ 5111 ixgbe_pbthresh_setup(adapter); 5112 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; 5113 hw->fc.send_xon = true; 5114 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw); 5115 5116 #ifdef CONFIG_PCI_IOV 5117 if (max_vfs > 0) 5118 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n"); 5119 5120 /* assign number of SR-IOV VFs */ 5121 if (hw->mac.type != ixgbe_mac_82598EB) { 5122 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) { 5123 adapter->num_vfs = 0; 5124 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n"); 5125 } else { 5126 adapter->num_vfs = max_vfs; 5127 } 5128 } 5129 #endif /* CONFIG_PCI_IOV */ 5130 5131 /* enable itr by default in dynamic mode */ 5132 adapter->rx_itr_setting = 1; 5133 adapter->tx_itr_setting = 1; 5134 5135 /* set default ring sizes */ 5136 adapter->tx_ring_count = IXGBE_DEFAULT_TXD; 5137 adapter->rx_ring_count = IXGBE_DEFAULT_RXD; 5138 5139 /* set default work limits */ 5140 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; 5141 5142 /* initialize eeprom parameters */ 5143 if (ixgbe_init_eeprom_params_generic(hw)) { 5144 e_dev_err("EEPROM initialization failed\n"); 5145 return -EIO; 5146 } 5147 5148 /* PF holds first pool slot */ 5149 set_bit(0, &adapter->fwd_bitmask); 5150 set_bit(__IXGBE_DOWN, &adapter->state); 5151 5152 return 0; 5153 } 5154 5155 /** 5156 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) 5157 * @tx_ring: tx descriptor ring (for a specific queue) to setup 5158 * 5159 * Return 0 on success, negative on failure 5160 **/ 5161 int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) 5162 { 5163 struct device *dev = tx_ring->dev; 5164 int orig_node = dev_to_node(dev); 5165 int numa_node = -1; 5166 int size; 5167 5168 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 5169 5170 if (tx_ring->q_vector) 5171 numa_node = tx_ring->q_vector->numa_node; 5172 5173 tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); 5174 if (!tx_ring->tx_buffer_info) 5175 tx_ring->tx_buffer_info = vzalloc(size); 5176 if (!tx_ring->tx_buffer_info) 5177 goto err; 5178 5179 u64_stats_init(&tx_ring->syncp); 5180 5181 /* round up to nearest 4K */ 5182 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 5183 tx_ring->size = ALIGN(tx_ring->size, 4096); 5184 5185 set_dev_node(dev, numa_node); 5186 tx_ring->desc = dma_alloc_coherent(dev, 5187 tx_ring->size, 5188 &tx_ring->dma, 5189 GFP_KERNEL); 5190 set_dev_node(dev, orig_node); 5191 if (!tx_ring->desc) 5192 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 5193 &tx_ring->dma, GFP_KERNEL); 5194 if (!tx_ring->desc) 5195 goto err; 5196 5197 tx_ring->next_to_use = 0; 5198 tx_ring->next_to_clean = 0; 5199 return 0; 5200 5201 err: 5202 vfree(tx_ring->tx_buffer_info); 5203 tx_ring->tx_buffer_info = NULL; 5204 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); 5205 return -ENOMEM; 5206 } 5207 5208 /** 5209 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources 5210 * @adapter: board private structure 5211 * 5212 * If this function returns with an error, then it's possible one or 5213 * more of the rings is populated (while the rest are not). It is the 5214 * callers duty to clean those orphaned rings. 5215 * 5216 * Return 0 on success, negative on failure 5217 **/ 5218 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) 5219 { 5220 int i, err = 0; 5221 5222 for (i = 0; i < adapter->num_tx_queues; i++) { 5223 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); 5224 if (!err) 5225 continue; 5226 5227 e_err(probe, "Allocation for Tx Queue %u failed\n", i); 5228 goto err_setup_tx; 5229 } 5230 5231 return 0; 5232 err_setup_tx: 5233 /* rewind the index freeing the rings as we go */ 5234 while (i--) 5235 ixgbe_free_tx_resources(adapter->tx_ring[i]); 5236 return err; 5237 } 5238 5239 /** 5240 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) 5241 * @rx_ring: rx descriptor ring (for a specific queue) to setup 5242 * 5243 * Returns 0 on success, negative on failure 5244 **/ 5245 int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) 5246 { 5247 struct device *dev = rx_ring->dev; 5248 int orig_node = dev_to_node(dev); 5249 int numa_node = -1; 5250 int size; 5251 5252 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 5253 5254 if (rx_ring->q_vector) 5255 numa_node = rx_ring->q_vector->numa_node; 5256 5257 rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); 5258 if (!rx_ring->rx_buffer_info) 5259 rx_ring->rx_buffer_info = vzalloc(size); 5260 if (!rx_ring->rx_buffer_info) 5261 goto err; 5262 5263 u64_stats_init(&rx_ring->syncp); 5264 5265 /* Round up to nearest 4K */ 5266 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 5267 rx_ring->size = ALIGN(rx_ring->size, 4096); 5268 5269 set_dev_node(dev, numa_node); 5270 rx_ring->desc = dma_alloc_coherent(dev, 5271 rx_ring->size, 5272 &rx_ring->dma, 5273 GFP_KERNEL); 5274 set_dev_node(dev, orig_node); 5275 if (!rx_ring->desc) 5276 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 5277 &rx_ring->dma, GFP_KERNEL); 5278 if (!rx_ring->desc) 5279 goto err; 5280 5281 rx_ring->next_to_clean = 0; 5282 rx_ring->next_to_use = 0; 5283 5284 return 0; 5285 err: 5286 vfree(rx_ring->rx_buffer_info); 5287 rx_ring->rx_buffer_info = NULL; 5288 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); 5289 return -ENOMEM; 5290 } 5291 5292 /** 5293 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources 5294 * @adapter: board private structure 5295 * 5296 * If this function returns with an error, then it's possible one or 5297 * more of the rings is populated (while the rest are not). It is the 5298 * callers duty to clean those orphaned rings. 5299 * 5300 * Return 0 on success, negative on failure 5301 **/ 5302 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) 5303 { 5304 int i, err = 0; 5305 5306 for (i = 0; i < adapter->num_rx_queues; i++) { 5307 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); 5308 if (!err) 5309 continue; 5310 5311 e_err(probe, "Allocation for Rx Queue %u failed\n", i); 5312 goto err_setup_rx; 5313 } 5314 5315 #ifdef IXGBE_FCOE 5316 err = ixgbe_setup_fcoe_ddp_resources(adapter); 5317 if (!err) 5318 #endif 5319 return 0; 5320 err_setup_rx: 5321 /* rewind the index freeing the rings as we go */ 5322 while (i--) 5323 ixgbe_free_rx_resources(adapter->rx_ring[i]); 5324 return err; 5325 } 5326 5327 /** 5328 * ixgbe_free_tx_resources - Free Tx Resources per Queue 5329 * @tx_ring: Tx descriptor ring for a specific queue 5330 * 5331 * Free all transmit software resources 5332 **/ 5333 void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) 5334 { 5335 ixgbe_clean_tx_ring(tx_ring); 5336 5337 vfree(tx_ring->tx_buffer_info); 5338 tx_ring->tx_buffer_info = NULL; 5339 5340 /* if not set, then don't free */ 5341 if (!tx_ring->desc) 5342 return; 5343 5344 dma_free_coherent(tx_ring->dev, tx_ring->size, 5345 tx_ring->desc, tx_ring->dma); 5346 5347 tx_ring->desc = NULL; 5348 } 5349 5350 /** 5351 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues 5352 * @adapter: board private structure 5353 * 5354 * Free all transmit software resources 5355 **/ 5356 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) 5357 { 5358 int i; 5359 5360 for (i = 0; i < adapter->num_tx_queues; i++) 5361 if (adapter->tx_ring[i]->desc) 5362 ixgbe_free_tx_resources(adapter->tx_ring[i]); 5363 } 5364 5365 /** 5366 * ixgbe_free_rx_resources - Free Rx Resources 5367 * @rx_ring: ring to clean the resources from 5368 * 5369 * Free all receive software resources 5370 **/ 5371 void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) 5372 { 5373 ixgbe_clean_rx_ring(rx_ring); 5374 5375 vfree(rx_ring->rx_buffer_info); 5376 rx_ring->rx_buffer_info = NULL; 5377 5378 /* if not set, then don't free */ 5379 if (!rx_ring->desc) 5380 return; 5381 5382 dma_free_coherent(rx_ring->dev, rx_ring->size, 5383 rx_ring->desc, rx_ring->dma); 5384 5385 rx_ring->desc = NULL; 5386 } 5387 5388 /** 5389 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues 5390 * @adapter: board private structure 5391 * 5392 * Free all receive software resources 5393 **/ 5394 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) 5395 { 5396 int i; 5397 5398 #ifdef IXGBE_FCOE 5399 ixgbe_free_fcoe_ddp_resources(adapter); 5400 5401 #endif 5402 for (i = 0; i < adapter->num_rx_queues; i++) 5403 if (adapter->rx_ring[i]->desc) 5404 ixgbe_free_rx_resources(adapter->rx_ring[i]); 5405 } 5406 5407 /** 5408 * ixgbe_change_mtu - Change the Maximum Transfer Unit 5409 * @netdev: network interface device structure 5410 * @new_mtu: new value for maximum frame size 5411 * 5412 * Returns 0 on success, negative on failure 5413 **/ 5414 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) 5415 { 5416 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5417 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5418 5419 /* MTU < 68 is an error and causes problems on some kernels */ 5420 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) 5421 return -EINVAL; 5422 5423 /* 5424 * For 82599EB we cannot allow legacy VFs to enable their receive 5425 * paths when MTU greater than 1500 is configured. So display a 5426 * warning that legacy VFs will be disabled. 5427 */ 5428 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && 5429 (adapter->hw.mac.type == ixgbe_mac_82599EB) && 5430 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) 5431 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); 5432 5433 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); 5434 5435 /* must set new MTU before calling down or up */ 5436 netdev->mtu = new_mtu; 5437 5438 if (netif_running(netdev)) 5439 ixgbe_reinit_locked(adapter); 5440 5441 return 0; 5442 } 5443 5444 /** 5445 * ixgbe_open - Called when a network interface is made active 5446 * @netdev: network interface device structure 5447 * 5448 * Returns 0 on success, negative value on failure 5449 * 5450 * The open entry point is called when a network interface is made 5451 * active by the system (IFF_UP). At this point all resources needed 5452 * for transmit and receive operations are allocated, the interrupt 5453 * handler is registered with the OS, the watchdog timer is started, 5454 * and the stack is notified that the interface is ready. 5455 **/ 5456 static int ixgbe_open(struct net_device *netdev) 5457 { 5458 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5459 int err, queues; 5460 5461 /* disallow open during test */ 5462 if (test_bit(__IXGBE_TESTING, &adapter->state)) 5463 return -EBUSY; 5464 5465 netif_carrier_off(netdev); 5466 5467 /* allocate transmit descriptors */ 5468 err = ixgbe_setup_all_tx_resources(adapter); 5469 if (err) 5470 goto err_setup_tx; 5471 5472 /* allocate receive descriptors */ 5473 err = ixgbe_setup_all_rx_resources(adapter); 5474 if (err) 5475 goto err_setup_rx; 5476 5477 ixgbe_configure(adapter); 5478 5479 err = ixgbe_request_irq(adapter); 5480 if (err) 5481 goto err_req_irq; 5482 5483 /* Notify the stack of the actual queue counts. */ 5484 if (adapter->num_rx_pools > 1) 5485 queues = adapter->num_rx_queues_per_pool; 5486 else 5487 queues = adapter->num_tx_queues; 5488 5489 err = netif_set_real_num_tx_queues(netdev, queues); 5490 if (err) 5491 goto err_set_queues; 5492 5493 if (adapter->num_rx_pools > 1 && 5494 adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES) 5495 queues = IXGBE_MAX_L2A_QUEUES; 5496 else 5497 queues = adapter->num_rx_queues; 5498 err = netif_set_real_num_rx_queues(netdev, queues); 5499 if (err) 5500 goto err_set_queues; 5501 5502 ixgbe_ptp_init(adapter); 5503 5504 ixgbe_up_complete(adapter); 5505 5506 return 0; 5507 5508 err_set_queues: 5509 ixgbe_free_irq(adapter); 5510 err_req_irq: 5511 ixgbe_free_all_rx_resources(adapter); 5512 err_setup_rx: 5513 ixgbe_free_all_tx_resources(adapter); 5514 err_setup_tx: 5515 ixgbe_reset(adapter); 5516 5517 return err; 5518 } 5519 5520 /** 5521 * ixgbe_close - Disables a network interface 5522 * @netdev: network interface device structure 5523 * 5524 * Returns 0, this is not allowed to fail 5525 * 5526 * The close entry point is called when an interface is de-activated 5527 * by the OS. The hardware is still under the drivers control, but 5528 * needs to be disabled. A global MAC reset is issued to stop the 5529 * hardware, and all transmit and receive resources are freed. 5530 **/ 5531 static int ixgbe_close(struct net_device *netdev) 5532 { 5533 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5534 5535 ixgbe_ptp_stop(adapter); 5536 5537 ixgbe_down(adapter); 5538 ixgbe_free_irq(adapter); 5539 5540 ixgbe_fdir_filter_exit(adapter); 5541 5542 ixgbe_free_all_tx_resources(adapter); 5543 ixgbe_free_all_rx_resources(adapter); 5544 5545 ixgbe_release_hw_control(adapter); 5546 5547 return 0; 5548 } 5549 5550 #ifdef CONFIG_PM 5551 static int ixgbe_resume(struct pci_dev *pdev) 5552 { 5553 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 5554 struct net_device *netdev = adapter->netdev; 5555 u32 err; 5556 5557 adapter->hw.hw_addr = adapter->io_addr; 5558 pci_set_power_state(pdev, PCI_D0); 5559 pci_restore_state(pdev); 5560 /* 5561 * pci_restore_state clears dev->state_saved so call 5562 * pci_save_state to restore it. 5563 */ 5564 pci_save_state(pdev); 5565 5566 err = pci_enable_device_mem(pdev); 5567 if (err) { 5568 e_dev_err("Cannot enable PCI device from suspend\n"); 5569 return err; 5570 } 5571 smp_mb__before_atomic(); 5572 clear_bit(__IXGBE_DISABLED, &adapter->state); 5573 pci_set_master(pdev); 5574 5575 pci_wake_from_d3(pdev, false); 5576 5577 ixgbe_reset(adapter); 5578 5579 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 5580 5581 rtnl_lock(); 5582 err = ixgbe_init_interrupt_scheme(adapter); 5583 if (!err && netif_running(netdev)) 5584 err = ixgbe_open(netdev); 5585 5586 rtnl_unlock(); 5587 5588 if (err) 5589 return err; 5590 5591 netif_device_attach(netdev); 5592 5593 return 0; 5594 } 5595 #endif /* CONFIG_PM */ 5596 5597 static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) 5598 { 5599 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 5600 struct net_device *netdev = adapter->netdev; 5601 struct ixgbe_hw *hw = &adapter->hw; 5602 u32 ctrl, fctrl; 5603 u32 wufc = adapter->wol; 5604 #ifdef CONFIG_PM 5605 int retval = 0; 5606 #endif 5607 5608 netif_device_detach(netdev); 5609 5610 rtnl_lock(); 5611 if (netif_running(netdev)) { 5612 ixgbe_down(adapter); 5613 ixgbe_free_irq(adapter); 5614 ixgbe_free_all_tx_resources(adapter); 5615 ixgbe_free_all_rx_resources(adapter); 5616 } 5617 rtnl_unlock(); 5618 5619 ixgbe_clear_interrupt_scheme(adapter); 5620 5621 #ifdef CONFIG_PM 5622 retval = pci_save_state(pdev); 5623 if (retval) 5624 return retval; 5625 5626 #endif 5627 if (hw->mac.ops.stop_link_on_d3) 5628 hw->mac.ops.stop_link_on_d3(hw); 5629 5630 if (wufc) { 5631 ixgbe_set_rx_mode(netdev); 5632 5633 /* enable the optics for 82599 SFP+ fiber as we can WoL */ 5634 if (hw->mac.ops.enable_tx_laser) 5635 hw->mac.ops.enable_tx_laser(hw); 5636 5637 /* turn on all-multi mode if wake on multicast is enabled */ 5638 if (wufc & IXGBE_WUFC_MC) { 5639 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 5640 fctrl |= IXGBE_FCTRL_MPE; 5641 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 5642 } 5643 5644 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 5645 ctrl |= IXGBE_CTRL_GIO_DIS; 5646 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 5647 5648 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc); 5649 } else { 5650 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); 5651 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 5652 } 5653 5654 switch (hw->mac.type) { 5655 case ixgbe_mac_82598EB: 5656 pci_wake_from_d3(pdev, false); 5657 break; 5658 case ixgbe_mac_82599EB: 5659 case ixgbe_mac_X540: 5660 pci_wake_from_d3(pdev, !!wufc); 5661 break; 5662 default: 5663 break; 5664 } 5665 5666 *enable_wake = !!wufc; 5667 5668 ixgbe_release_hw_control(adapter); 5669 5670 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) 5671 pci_disable_device(pdev); 5672 5673 return 0; 5674 } 5675 5676 #ifdef CONFIG_PM 5677 static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) 5678 { 5679 int retval; 5680 bool wake; 5681 5682 retval = __ixgbe_shutdown(pdev, &wake); 5683 if (retval) 5684 return retval; 5685 5686 if (wake) { 5687 pci_prepare_to_sleep(pdev); 5688 } else { 5689 pci_wake_from_d3(pdev, false); 5690 pci_set_power_state(pdev, PCI_D3hot); 5691 } 5692 5693 return 0; 5694 } 5695 #endif /* CONFIG_PM */ 5696 5697 static void ixgbe_shutdown(struct pci_dev *pdev) 5698 { 5699 bool wake; 5700 5701 __ixgbe_shutdown(pdev, &wake); 5702 5703 if (system_state == SYSTEM_POWER_OFF) { 5704 pci_wake_from_d3(pdev, wake); 5705 pci_set_power_state(pdev, PCI_D3hot); 5706 } 5707 } 5708 5709 /** 5710 * ixgbe_update_stats - Update the board statistics counters. 5711 * @adapter: board private structure 5712 **/ 5713 void ixgbe_update_stats(struct ixgbe_adapter *adapter) 5714 { 5715 struct net_device *netdev = adapter->netdev; 5716 struct ixgbe_hw *hw = &adapter->hw; 5717 struct ixgbe_hw_stats *hwstats = &adapter->stats; 5718 u64 total_mpc = 0; 5719 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; 5720 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; 5721 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; 5722 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; 5723 5724 if (test_bit(__IXGBE_DOWN, &adapter->state) || 5725 test_bit(__IXGBE_RESETTING, &adapter->state)) 5726 return; 5727 5728 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 5729 u64 rsc_count = 0; 5730 u64 rsc_flush = 0; 5731 for (i = 0; i < adapter->num_rx_queues; i++) { 5732 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; 5733 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; 5734 } 5735 adapter->rsc_total_count = rsc_count; 5736 adapter->rsc_total_flush = rsc_flush; 5737 } 5738 5739 for (i = 0; i < adapter->num_rx_queues; i++) { 5740 struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; 5741 non_eop_descs += rx_ring->rx_stats.non_eop_descs; 5742 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; 5743 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; 5744 hw_csum_rx_error += rx_ring->rx_stats.csum_err; 5745 bytes += rx_ring->stats.bytes; 5746 packets += rx_ring->stats.packets; 5747 } 5748 adapter->non_eop_descs = non_eop_descs; 5749 adapter->alloc_rx_page_failed = alloc_rx_page_failed; 5750 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; 5751 adapter->hw_csum_rx_error = hw_csum_rx_error; 5752 netdev->stats.rx_bytes = bytes; 5753 netdev->stats.rx_packets = packets; 5754 5755 bytes = 0; 5756 packets = 0; 5757 /* gather some stats to the adapter struct that are per queue */ 5758 for (i = 0; i < adapter->num_tx_queues; i++) { 5759 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; 5760 restart_queue += tx_ring->tx_stats.restart_queue; 5761 tx_busy += tx_ring->tx_stats.tx_busy; 5762 bytes += tx_ring->stats.bytes; 5763 packets += tx_ring->stats.packets; 5764 } 5765 adapter->restart_queue = restart_queue; 5766 adapter->tx_busy = tx_busy; 5767 netdev->stats.tx_bytes = bytes; 5768 netdev->stats.tx_packets = packets; 5769 5770 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 5771 5772 /* 8 register reads */ 5773 for (i = 0; i < 8; i++) { 5774 /* for packet buffers not used, the register should read 0 */ 5775 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 5776 missed_rx += mpc; 5777 hwstats->mpc[i] += mpc; 5778 total_mpc += hwstats->mpc[i]; 5779 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 5780 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 5781 switch (hw->mac.type) { 5782 case ixgbe_mac_82598EB: 5783 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 5784 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 5785 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 5786 hwstats->pxonrxc[i] += 5787 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 5788 break; 5789 case ixgbe_mac_82599EB: 5790 case ixgbe_mac_X540: 5791 hwstats->pxonrxc[i] += 5792 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 5793 break; 5794 default: 5795 break; 5796 } 5797 } 5798 5799 /*16 register reads */ 5800 for (i = 0; i < 16; i++) { 5801 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 5802 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 5803 if ((hw->mac.type == ixgbe_mac_82599EB) || 5804 (hw->mac.type == ixgbe_mac_X540)) { 5805 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 5806 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ 5807 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 5808 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */ 5809 } 5810 } 5811 5812 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 5813 /* work around hardware counting issue */ 5814 hwstats->gprc -= missed_rx; 5815 5816 ixgbe_update_xoff_received(adapter); 5817 5818 /* 82598 hardware only has a 32 bit counter in the high register */ 5819 switch (hw->mac.type) { 5820 case ixgbe_mac_82598EB: 5821 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 5822 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 5823 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 5824 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 5825 break; 5826 case ixgbe_mac_X540: 5827 /* OS2BMC stats are X540 only*/ 5828 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); 5829 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); 5830 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); 5831 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); 5832 case ixgbe_mac_82599EB: 5833 for (i = 0; i < 16; i++) 5834 adapter->hw_rx_no_dma_resources += 5835 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 5836 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 5837 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ 5838 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 5839 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ 5840 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); 5841 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 5842 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 5843 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 5844 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 5845 #ifdef IXGBE_FCOE 5846 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 5847 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 5848 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 5849 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 5850 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 5851 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 5852 /* Add up per cpu counters for total ddp aloc fail */ 5853 if (adapter->fcoe.ddp_pool) { 5854 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 5855 struct ixgbe_fcoe_ddp_pool *ddp_pool; 5856 unsigned int cpu; 5857 u64 noddp = 0, noddp_ext_buff = 0; 5858 for_each_possible_cpu(cpu) { 5859 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); 5860 noddp += ddp_pool->noddp; 5861 noddp_ext_buff += ddp_pool->noddp_ext_buff; 5862 } 5863 hwstats->fcoe_noddp = noddp; 5864 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff; 5865 } 5866 #endif /* IXGBE_FCOE */ 5867 break; 5868 default: 5869 break; 5870 } 5871 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 5872 hwstats->bprc += bprc; 5873 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 5874 if (hw->mac.type == ixgbe_mac_82598EB) 5875 hwstats->mprc -= bprc; 5876 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 5877 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 5878 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 5879 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 5880 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 5881 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 5882 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 5883 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 5884 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 5885 hwstats->lxontxc += lxon; 5886 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 5887 hwstats->lxofftxc += lxoff; 5888 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 5889 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 5890 /* 5891 * 82598 errata - tx of flow control packets is included in tx counters 5892 */ 5893 xon_off_tot = lxon + lxoff; 5894 hwstats->gptc -= xon_off_tot; 5895 hwstats->mptc -= xon_off_tot; 5896 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); 5897 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 5898 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 5899 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 5900 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 5901 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 5902 hwstats->ptc64 -= xon_off_tot; 5903 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 5904 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 5905 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 5906 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 5907 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 5908 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 5909 5910 /* Fill out the OS statistics structure */ 5911 netdev->stats.multicast = hwstats->mprc; 5912 5913 /* Rx Errors */ 5914 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; 5915 netdev->stats.rx_dropped = 0; 5916 netdev->stats.rx_length_errors = hwstats->rlec; 5917 netdev->stats.rx_crc_errors = hwstats->crcerrs; 5918 netdev->stats.rx_missed_errors = total_mpc; 5919 } 5920 5921 /** 5922 * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table 5923 * @adapter: pointer to the device adapter structure 5924 **/ 5925 static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) 5926 { 5927 struct ixgbe_hw *hw = &adapter->hw; 5928 int i; 5929 5930 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) 5931 return; 5932 5933 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; 5934 5935 /* if interface is down do nothing */ 5936 if (test_bit(__IXGBE_DOWN, &adapter->state)) 5937 return; 5938 5939 /* do nothing if we are not using signature filters */ 5940 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) 5941 return; 5942 5943 adapter->fdir_overflow++; 5944 5945 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 5946 for (i = 0; i < adapter->num_tx_queues; i++) 5947 set_bit(__IXGBE_TX_FDIR_INIT_DONE, 5948 &(adapter->tx_ring[i]->state)); 5949 /* re-enable flow director interrupts */ 5950 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); 5951 } else { 5952 e_err(probe, "failed to finish FDIR re-initialization, " 5953 "ignored adding FDIR ATR filters\n"); 5954 } 5955 } 5956 5957 /** 5958 * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts 5959 * @adapter: pointer to the device adapter structure 5960 * 5961 * This function serves two purposes. First it strobes the interrupt lines 5962 * in order to make certain interrupts are occurring. Secondly it sets the 5963 * bits needed to check for TX hangs. As a result we should immediately 5964 * determine if a hang has occurred. 5965 */ 5966 static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) 5967 { 5968 struct ixgbe_hw *hw = &adapter->hw; 5969 u64 eics = 0; 5970 int i; 5971 5972 /* If we're down, removing or resetting, just bail */ 5973 if (test_bit(__IXGBE_DOWN, &adapter->state) || 5974 test_bit(__IXGBE_REMOVING, &adapter->state) || 5975 test_bit(__IXGBE_RESETTING, &adapter->state)) 5976 return; 5977 5978 /* Force detection of hung controller */ 5979 if (netif_carrier_ok(adapter->netdev)) { 5980 for (i = 0; i < adapter->num_tx_queues; i++) 5981 set_check_for_tx_hang(adapter->tx_ring[i]); 5982 } 5983 5984 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { 5985 /* 5986 * for legacy and MSI interrupts don't set any bits 5987 * that are enabled for EIAM, because this operation 5988 * would set *both* EIMS and EICS for any bit in EIAM 5989 */ 5990 IXGBE_WRITE_REG(hw, IXGBE_EICS, 5991 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); 5992 } else { 5993 /* get one bit for every active tx/rx interrupt vector */ 5994 for (i = 0; i < adapter->num_q_vectors; i++) { 5995 struct ixgbe_q_vector *qv = adapter->q_vector[i]; 5996 if (qv->rx.ring || qv->tx.ring) 5997 eics |= ((u64)1 << i); 5998 } 5999 } 6000 6001 /* Cause software interrupt to ensure rings are cleaned */ 6002 ixgbe_irq_rearm_queues(adapter, eics); 6003 6004 } 6005 6006 /** 6007 * ixgbe_watchdog_update_link - update the link status 6008 * @adapter: pointer to the device adapter structure 6009 * @link_speed: pointer to a u32 to store the link_speed 6010 **/ 6011 static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) 6012 { 6013 struct ixgbe_hw *hw = &adapter->hw; 6014 u32 link_speed = adapter->link_speed; 6015 bool link_up = adapter->link_up; 6016 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; 6017 6018 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) 6019 return; 6020 6021 if (hw->mac.ops.check_link) { 6022 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 6023 } else { 6024 /* always assume link is up, if no check link function */ 6025 link_speed = IXGBE_LINK_SPEED_10GB_FULL; 6026 link_up = true; 6027 } 6028 6029 if (adapter->ixgbe_ieee_pfc) 6030 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); 6031 6032 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) { 6033 hw->mac.ops.fc_enable(hw); 6034 ixgbe_set_rx_drop_en(adapter); 6035 } 6036 6037 if (link_up || 6038 time_after(jiffies, (adapter->link_check_timeout + 6039 IXGBE_TRY_LINK_TIMEOUT))) { 6040 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 6041 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); 6042 IXGBE_WRITE_FLUSH(hw); 6043 } 6044 6045 adapter->link_up = link_up; 6046 adapter->link_speed = link_speed; 6047 } 6048 6049 static void ixgbe_update_default_up(struct ixgbe_adapter *adapter) 6050 { 6051 #ifdef CONFIG_IXGBE_DCB 6052 struct net_device *netdev = adapter->netdev; 6053 struct dcb_app app = { 6054 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE, 6055 .protocol = 0, 6056 }; 6057 u8 up = 0; 6058 6059 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) 6060 up = dcb_ieee_getapp_mask(netdev, &app); 6061 6062 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0; 6063 #endif 6064 } 6065 6066 /** 6067 * ixgbe_watchdog_link_is_up - update netif_carrier status and 6068 * print link up message 6069 * @adapter: pointer to the device adapter structure 6070 **/ 6071 static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) 6072 { 6073 struct net_device *netdev = adapter->netdev; 6074 struct ixgbe_hw *hw = &adapter->hw; 6075 struct net_device *upper; 6076 struct list_head *iter; 6077 u32 link_speed = adapter->link_speed; 6078 bool flow_rx, flow_tx; 6079 6080 /* only continue if link was previously down */ 6081 if (netif_carrier_ok(netdev)) 6082 return; 6083 6084 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; 6085 6086 switch (hw->mac.type) { 6087 case ixgbe_mac_82598EB: { 6088 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 6089 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); 6090 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); 6091 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); 6092 } 6093 break; 6094 case ixgbe_mac_X540: 6095 case ixgbe_mac_82599EB: { 6096 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 6097 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 6098 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); 6099 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); 6100 } 6101 break; 6102 default: 6103 flow_tx = false; 6104 flow_rx = false; 6105 break; 6106 } 6107 6108 adapter->last_rx_ptp_check = jiffies; 6109 6110 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) 6111 ixgbe_ptp_start_cyclecounter(adapter); 6112 6113 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", 6114 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 6115 "10 Gbps" : 6116 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 6117 "1 Gbps" : 6118 (link_speed == IXGBE_LINK_SPEED_100_FULL ? 6119 "100 Mbps" : 6120 "unknown speed"))), 6121 ((flow_rx && flow_tx) ? "RX/TX" : 6122 (flow_rx ? "RX" : 6123 (flow_tx ? "TX" : "None")))); 6124 6125 netif_carrier_on(netdev); 6126 ixgbe_check_vf_rate_limit(adapter); 6127 6128 /* enable transmits */ 6129 netif_tx_wake_all_queues(adapter->netdev); 6130 6131 /* enable any upper devices */ 6132 rtnl_lock(); 6133 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { 6134 if (netif_is_macvlan(upper)) { 6135 struct macvlan_dev *vlan = netdev_priv(upper); 6136 6137 if (vlan->fwd_priv) 6138 netif_tx_wake_all_queues(upper); 6139 } 6140 } 6141 rtnl_unlock(); 6142 6143 /* update the default user priority for VFs */ 6144 ixgbe_update_default_up(adapter); 6145 6146 /* ping all the active vfs to let them know link has changed */ 6147 ixgbe_ping_all_vfs(adapter); 6148 } 6149 6150 /** 6151 * ixgbe_watchdog_link_is_down - update netif_carrier status and 6152 * print link down message 6153 * @adapter: pointer to the adapter structure 6154 **/ 6155 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) 6156 { 6157 struct net_device *netdev = adapter->netdev; 6158 struct ixgbe_hw *hw = &adapter->hw; 6159 6160 adapter->link_up = false; 6161 adapter->link_speed = 0; 6162 6163 /* only continue if link was up previously */ 6164 if (!netif_carrier_ok(netdev)) 6165 return; 6166 6167 /* poll for SFP+ cable when link is down */ 6168 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) 6169 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; 6170 6171 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) 6172 ixgbe_ptp_start_cyclecounter(adapter); 6173 6174 e_info(drv, "NIC Link is Down\n"); 6175 netif_carrier_off(netdev); 6176 6177 /* ping all the active vfs to let them know link has changed */ 6178 ixgbe_ping_all_vfs(adapter); 6179 } 6180 6181 /** 6182 * ixgbe_watchdog_flush_tx - flush queues on link down 6183 * @adapter: pointer to the device adapter structure 6184 **/ 6185 static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) 6186 { 6187 int i; 6188 int some_tx_pending = 0; 6189 6190 if (!netif_carrier_ok(adapter->netdev)) { 6191 for (i = 0; i < adapter->num_tx_queues; i++) { 6192 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; 6193 if (tx_ring->next_to_use != tx_ring->next_to_clean) { 6194 some_tx_pending = 1; 6195 break; 6196 } 6197 } 6198 6199 if (some_tx_pending) { 6200 /* We've lost link, so the controller stops DMA, 6201 * but we've got queued Tx work that's never going 6202 * to get done, so reset controller to flush Tx. 6203 * (Do the reset outside of interrupt context). 6204 */ 6205 e_warn(drv, "initiating reset to clear Tx work after link loss\n"); 6206 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; 6207 } 6208 } 6209 } 6210 6211 static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) 6212 { 6213 u32 ssvpc; 6214 6215 /* Do not perform spoof check for 82598 or if not in IOV mode */ 6216 if (adapter->hw.mac.type == ixgbe_mac_82598EB || 6217 adapter->num_vfs == 0) 6218 return; 6219 6220 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); 6221 6222 /* 6223 * ssvpc register is cleared on read, if zero then no 6224 * spoofed packets in the last interval. 6225 */ 6226 if (!ssvpc) 6227 return; 6228 6229 e_warn(drv, "%u Spoofed packets detected\n", ssvpc); 6230 } 6231 6232 /** 6233 * ixgbe_watchdog_subtask - check and bring link up 6234 * @adapter: pointer to the device adapter structure 6235 **/ 6236 static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) 6237 { 6238 /* if interface is down, removing or resetting, do nothing */ 6239 if (test_bit(__IXGBE_DOWN, &adapter->state) || 6240 test_bit(__IXGBE_REMOVING, &adapter->state) || 6241 test_bit(__IXGBE_RESETTING, &adapter->state)) 6242 return; 6243 6244 ixgbe_watchdog_update_link(adapter); 6245 6246 if (adapter->link_up) 6247 ixgbe_watchdog_link_is_up(adapter); 6248 else 6249 ixgbe_watchdog_link_is_down(adapter); 6250 6251 ixgbe_spoof_check(adapter); 6252 ixgbe_update_stats(adapter); 6253 6254 ixgbe_watchdog_flush_tx(adapter); 6255 } 6256 6257 /** 6258 * ixgbe_sfp_detection_subtask - poll for SFP+ cable 6259 * @adapter: the ixgbe adapter structure 6260 **/ 6261 static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) 6262 { 6263 struct ixgbe_hw *hw = &adapter->hw; 6264 s32 err; 6265 6266 /* not searching for SFP so there is nothing to do here */ 6267 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && 6268 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) 6269 return; 6270 6271 /* someone else is in init, wait until next service event */ 6272 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) 6273 return; 6274 6275 err = hw->phy.ops.identify_sfp(hw); 6276 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) 6277 goto sfp_out; 6278 6279 if (err == IXGBE_ERR_SFP_NOT_PRESENT) { 6280 /* If no cable is present, then we need to reset 6281 * the next time we find a good cable. */ 6282 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; 6283 } 6284 6285 /* exit on error */ 6286 if (err) 6287 goto sfp_out; 6288 6289 /* exit if reset not needed */ 6290 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) 6291 goto sfp_out; 6292 6293 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET; 6294 6295 /* 6296 * A module may be identified correctly, but the EEPROM may not have 6297 * support for that module. setup_sfp() will fail in that case, so 6298 * we should not allow that module to load. 6299 */ 6300 if (hw->mac.type == ixgbe_mac_82598EB) 6301 err = hw->phy.ops.reset(hw); 6302 else 6303 err = hw->mac.ops.setup_sfp(hw); 6304 6305 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) 6306 goto sfp_out; 6307 6308 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; 6309 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); 6310 6311 sfp_out: 6312 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); 6313 6314 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && 6315 (adapter->netdev->reg_state == NETREG_REGISTERED)) { 6316 e_dev_err("failed to initialize because an unsupported " 6317 "SFP+ module type was detected.\n"); 6318 e_dev_err("Reload the driver after installing a " 6319 "supported module.\n"); 6320 unregister_netdev(adapter->netdev); 6321 } 6322 } 6323 6324 /** 6325 * ixgbe_sfp_link_config_subtask - set up link SFP after module install 6326 * @adapter: the ixgbe adapter structure 6327 **/ 6328 static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) 6329 { 6330 struct ixgbe_hw *hw = &adapter->hw; 6331 u32 speed; 6332 bool autoneg = false; 6333 6334 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) 6335 return; 6336 6337 /* someone else is in init, wait until next service event */ 6338 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) 6339 return; 6340 6341 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 6342 6343 speed = hw->phy.autoneg_advertised; 6344 if ((!speed) && (hw->mac.ops.get_link_capabilities)) { 6345 hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); 6346 6347 /* setup the highest link when no autoneg */ 6348 if (!autoneg) { 6349 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 6350 speed = IXGBE_LINK_SPEED_10GB_FULL; 6351 } 6352 } 6353 6354 if (hw->mac.ops.setup_link) 6355 hw->mac.ops.setup_link(hw, speed, true); 6356 6357 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 6358 adapter->link_check_timeout = jiffies; 6359 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); 6360 } 6361 6362 #ifdef CONFIG_PCI_IOV 6363 static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) 6364 { 6365 int vf; 6366 struct ixgbe_hw *hw = &adapter->hw; 6367 struct net_device *netdev = adapter->netdev; 6368 u32 gpc; 6369 u32 ciaa, ciad; 6370 6371 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); 6372 if (gpc) /* If incrementing then no need for the check below */ 6373 return; 6374 /* 6375 * Check to see if a bad DMA write target from an errant or 6376 * malicious VF has caused a PCIe error. If so then we can 6377 * issue a VFLR to the offending VF(s) and then resume without 6378 * requesting a full slot reset. 6379 */ 6380 6381 for (vf = 0; vf < adapter->num_vfs; vf++) { 6382 ciaa = (vf << 16) | 0x80000000; 6383 /* 32 bit read so align, we really want status at offset 6 */ 6384 ciaa |= PCI_COMMAND; 6385 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); 6386 ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_82599); 6387 ciaa &= 0x7FFFFFFF; 6388 /* disable debug mode asap after reading data */ 6389 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); 6390 /* Get the upper 16 bits which will be the PCI status reg */ 6391 ciad >>= 16; 6392 if (ciad & PCI_STATUS_REC_MASTER_ABORT) { 6393 netdev_err(netdev, "VF %d Hung DMA\n", vf); 6394 /* Issue VFLR */ 6395 ciaa = (vf << 16) | 0x80000000; 6396 ciaa |= 0xA8; 6397 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); 6398 ciad = 0x00008000; /* VFLR */ 6399 IXGBE_WRITE_REG(hw, IXGBE_CIAD_82599, ciad); 6400 ciaa &= 0x7FFFFFFF; 6401 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); 6402 } 6403 } 6404 } 6405 6406 #endif 6407 /** 6408 * ixgbe_service_timer - Timer Call-back 6409 * @data: pointer to adapter cast into an unsigned long 6410 **/ 6411 static void ixgbe_service_timer(unsigned long data) 6412 { 6413 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; 6414 unsigned long next_event_offset; 6415 bool ready = true; 6416 6417 /* poll faster when waiting for link */ 6418 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) 6419 next_event_offset = HZ / 10; 6420 else 6421 next_event_offset = HZ * 2; 6422 6423 #ifdef CONFIG_PCI_IOV 6424 /* 6425 * don't bother with SR-IOV VF DMA hang check if there are 6426 * no VFs or the link is down 6427 */ 6428 if (!adapter->num_vfs || 6429 (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) 6430 goto normal_timer_service; 6431 6432 /* If we have VFs allocated then we must check for DMA hangs */ 6433 ixgbe_check_for_bad_vf(adapter); 6434 next_event_offset = HZ / 50; 6435 adapter->timer_event_accumulator++; 6436 6437 if (adapter->timer_event_accumulator >= 100) 6438 adapter->timer_event_accumulator = 0; 6439 else 6440 ready = false; 6441 6442 normal_timer_service: 6443 #endif 6444 /* Reset the timer */ 6445 mod_timer(&adapter->service_timer, next_event_offset + jiffies); 6446 6447 if (ready) 6448 ixgbe_service_event_schedule(adapter); 6449 } 6450 6451 static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) 6452 { 6453 if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED)) 6454 return; 6455 6456 adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED; 6457 6458 /* If we're already down, removing or resetting, just bail */ 6459 if (test_bit(__IXGBE_DOWN, &adapter->state) || 6460 test_bit(__IXGBE_REMOVING, &adapter->state) || 6461 test_bit(__IXGBE_RESETTING, &adapter->state)) 6462 return; 6463 6464 ixgbe_dump(adapter); 6465 netdev_err(adapter->netdev, "Reset adapter\n"); 6466 adapter->tx_timeout_count++; 6467 6468 rtnl_lock(); 6469 ixgbe_reinit_locked(adapter); 6470 rtnl_unlock(); 6471 } 6472 6473 /** 6474 * ixgbe_service_task - manages and runs subtasks 6475 * @work: pointer to work_struct containing our data 6476 **/ 6477 static void ixgbe_service_task(struct work_struct *work) 6478 { 6479 struct ixgbe_adapter *adapter = container_of(work, 6480 struct ixgbe_adapter, 6481 service_task); 6482 if (ixgbe_removed(adapter->hw.hw_addr)) { 6483 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 6484 rtnl_lock(); 6485 ixgbe_down(adapter); 6486 rtnl_unlock(); 6487 } 6488 ixgbe_service_event_complete(adapter); 6489 return; 6490 } 6491 ixgbe_reset_subtask(adapter); 6492 ixgbe_sfp_detection_subtask(adapter); 6493 ixgbe_sfp_link_config_subtask(adapter); 6494 ixgbe_check_overtemp_subtask(adapter); 6495 ixgbe_watchdog_subtask(adapter); 6496 ixgbe_fdir_reinit_subtask(adapter); 6497 ixgbe_check_hang_subtask(adapter); 6498 6499 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { 6500 ixgbe_ptp_overflow_check(adapter); 6501 ixgbe_ptp_rx_hang(adapter); 6502 } 6503 6504 ixgbe_service_event_complete(adapter); 6505 } 6506 6507 static int ixgbe_tso(struct ixgbe_ring *tx_ring, 6508 struct ixgbe_tx_buffer *first, 6509 u8 *hdr_len) 6510 { 6511 struct sk_buff *skb = first->skb; 6512 u32 vlan_macip_lens, type_tucmd; 6513 u32 mss_l4len_idx, l4len; 6514 int err; 6515 6516 if (skb->ip_summed != CHECKSUM_PARTIAL) 6517 return 0; 6518 6519 if (!skb_is_gso(skb)) 6520 return 0; 6521 6522 err = skb_cow_head(skb, 0); 6523 if (err < 0) 6524 return err; 6525 6526 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 6527 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 6528 6529 if (first->protocol == htons(ETH_P_IP)) { 6530 struct iphdr *iph = ip_hdr(skb); 6531 iph->tot_len = 0; 6532 iph->check = 0; 6533 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 6534 iph->daddr, 0, 6535 IPPROTO_TCP, 6536 0); 6537 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 6538 first->tx_flags |= IXGBE_TX_FLAGS_TSO | 6539 IXGBE_TX_FLAGS_CSUM | 6540 IXGBE_TX_FLAGS_IPV4; 6541 } else if (skb_is_gso_v6(skb)) { 6542 ipv6_hdr(skb)->payload_len = 0; 6543 tcp_hdr(skb)->check = 6544 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 6545 &ipv6_hdr(skb)->daddr, 6546 0, IPPROTO_TCP, 0); 6547 first->tx_flags |= IXGBE_TX_FLAGS_TSO | 6548 IXGBE_TX_FLAGS_CSUM; 6549 } 6550 6551 /* compute header lengths */ 6552 l4len = tcp_hdrlen(skb); 6553 *hdr_len = skb_transport_offset(skb) + l4len; 6554 6555 /* update gso size and bytecount with header size */ 6556 first->gso_segs = skb_shinfo(skb)->gso_segs; 6557 first->bytecount += (first->gso_segs - 1) * *hdr_len; 6558 6559 /* mss_l4len_id: use 0 as index for TSO */ 6560 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; 6561 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 6562 6563 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 6564 vlan_macip_lens = skb_network_header_len(skb); 6565 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 6566 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 6567 6568 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 6569 mss_l4len_idx); 6570 6571 return 1; 6572 } 6573 6574 static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, 6575 struct ixgbe_tx_buffer *first) 6576 { 6577 struct sk_buff *skb = first->skb; 6578 u32 vlan_macip_lens = 0; 6579 u32 mss_l4len_idx = 0; 6580 u32 type_tucmd = 0; 6581 6582 if (skb->ip_summed != CHECKSUM_PARTIAL) { 6583 if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && 6584 !(first->tx_flags & IXGBE_TX_FLAGS_CC)) 6585 return; 6586 } else { 6587 u8 l4_hdr = 0; 6588 switch (first->protocol) { 6589 case htons(ETH_P_IP): 6590 vlan_macip_lens |= skb_network_header_len(skb); 6591 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 6592 l4_hdr = ip_hdr(skb)->protocol; 6593 break; 6594 case htons(ETH_P_IPV6): 6595 vlan_macip_lens |= skb_network_header_len(skb); 6596 l4_hdr = ipv6_hdr(skb)->nexthdr; 6597 break; 6598 default: 6599 if (unlikely(net_ratelimit())) { 6600 dev_warn(tx_ring->dev, 6601 "partial checksum but proto=%x!\n", 6602 first->protocol); 6603 } 6604 break; 6605 } 6606 6607 switch (l4_hdr) { 6608 case IPPROTO_TCP: 6609 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 6610 mss_l4len_idx = tcp_hdrlen(skb) << 6611 IXGBE_ADVTXD_L4LEN_SHIFT; 6612 break; 6613 case IPPROTO_SCTP: 6614 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; 6615 mss_l4len_idx = sizeof(struct sctphdr) << 6616 IXGBE_ADVTXD_L4LEN_SHIFT; 6617 break; 6618 case IPPROTO_UDP: 6619 mss_l4len_idx = sizeof(struct udphdr) << 6620 IXGBE_ADVTXD_L4LEN_SHIFT; 6621 break; 6622 default: 6623 if (unlikely(net_ratelimit())) { 6624 dev_warn(tx_ring->dev, 6625 "partial checksum but l4 proto=%x!\n", 6626 l4_hdr); 6627 } 6628 break; 6629 } 6630 6631 /* update TX checksum flag */ 6632 first->tx_flags |= IXGBE_TX_FLAGS_CSUM; 6633 } 6634 6635 /* vlan_macip_lens: MACLEN, VLAN tag */ 6636 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 6637 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 6638 6639 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, 6640 type_tucmd, mss_l4len_idx); 6641 } 6642 6643 #define IXGBE_SET_FLAG(_input, _flag, _result) \ 6644 ((_flag <= _result) ? \ 6645 ((u32)(_input & _flag) * (_result / _flag)) : \ 6646 ((u32)(_input & _flag) / (_flag / _result))) 6647 6648 static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) 6649 { 6650 /* set type for advanced descriptor with frame checksum insertion */ 6651 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA | 6652 IXGBE_ADVTXD_DCMD_DEXT | 6653 IXGBE_ADVTXD_DCMD_IFCS; 6654 6655 /* set HW vlan bit if vlan is present */ 6656 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN, 6657 IXGBE_ADVTXD_DCMD_VLE); 6658 6659 /* set segmentation enable bits for TSO/FSO */ 6660 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO, 6661 IXGBE_ADVTXD_DCMD_TSE); 6662 6663 /* set timestamp bit if present */ 6664 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP, 6665 IXGBE_ADVTXD_MAC_TSTAMP); 6666 6667 /* insert frame checksum */ 6668 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS); 6669 6670 return cmd_type; 6671 } 6672 6673 static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, 6674 u32 tx_flags, unsigned int paylen) 6675 { 6676 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; 6677 6678 /* enable L4 checksum for TSO and TX checksum offload */ 6679 olinfo_status |= IXGBE_SET_FLAG(tx_flags, 6680 IXGBE_TX_FLAGS_CSUM, 6681 IXGBE_ADVTXD_POPTS_TXSM); 6682 6683 /* enble IPv4 checksum for TSO */ 6684 olinfo_status |= IXGBE_SET_FLAG(tx_flags, 6685 IXGBE_TX_FLAGS_IPV4, 6686 IXGBE_ADVTXD_POPTS_IXSM); 6687 6688 /* 6689 * Check Context must be set if Tx switch is enabled, which it 6690 * always is for case where virtual functions are running 6691 */ 6692 olinfo_status |= IXGBE_SET_FLAG(tx_flags, 6693 IXGBE_TX_FLAGS_CC, 6694 IXGBE_ADVTXD_CC); 6695 6696 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 6697 } 6698 6699 #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ 6700 IXGBE_TXD_CMD_RS) 6701 6702 static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, 6703 struct ixgbe_tx_buffer *first, 6704 const u8 hdr_len) 6705 { 6706 struct sk_buff *skb = first->skb; 6707 struct ixgbe_tx_buffer *tx_buffer; 6708 union ixgbe_adv_tx_desc *tx_desc; 6709 struct skb_frag_struct *frag; 6710 dma_addr_t dma; 6711 unsigned int data_len, size; 6712 u32 tx_flags = first->tx_flags; 6713 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags); 6714 u16 i = tx_ring->next_to_use; 6715 6716 tx_desc = IXGBE_TX_DESC(tx_ring, i); 6717 6718 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); 6719 6720 size = skb_headlen(skb); 6721 data_len = skb->data_len; 6722 6723 #ifdef IXGBE_FCOE 6724 if (tx_flags & IXGBE_TX_FLAGS_FCOE) { 6725 if (data_len < sizeof(struct fcoe_crc_eof)) { 6726 size -= sizeof(struct fcoe_crc_eof) - data_len; 6727 data_len = 0; 6728 } else { 6729 data_len -= sizeof(struct fcoe_crc_eof); 6730 } 6731 } 6732 6733 #endif 6734 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 6735 6736 tx_buffer = first; 6737 6738 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 6739 if (dma_mapping_error(tx_ring->dev, dma)) 6740 goto dma_error; 6741 6742 /* record length, and DMA address */ 6743 dma_unmap_len_set(tx_buffer, len, size); 6744 dma_unmap_addr_set(tx_buffer, dma, dma); 6745 6746 tx_desc->read.buffer_addr = cpu_to_le64(dma); 6747 6748 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { 6749 tx_desc->read.cmd_type_len = 6750 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD); 6751 6752 i++; 6753 tx_desc++; 6754 if (i == tx_ring->count) { 6755 tx_desc = IXGBE_TX_DESC(tx_ring, 0); 6756 i = 0; 6757 } 6758 tx_desc->read.olinfo_status = 0; 6759 6760 dma += IXGBE_MAX_DATA_PER_TXD; 6761 size -= IXGBE_MAX_DATA_PER_TXD; 6762 6763 tx_desc->read.buffer_addr = cpu_to_le64(dma); 6764 } 6765 6766 if (likely(!data_len)) 6767 break; 6768 6769 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); 6770 6771 i++; 6772 tx_desc++; 6773 if (i == tx_ring->count) { 6774 tx_desc = IXGBE_TX_DESC(tx_ring, 0); 6775 i = 0; 6776 } 6777 tx_desc->read.olinfo_status = 0; 6778 6779 #ifdef IXGBE_FCOE 6780 size = min_t(unsigned int, data_len, skb_frag_size(frag)); 6781 #else 6782 size = skb_frag_size(frag); 6783 #endif 6784 data_len -= size; 6785 6786 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 6787 DMA_TO_DEVICE); 6788 6789 tx_buffer = &tx_ring->tx_buffer_info[i]; 6790 } 6791 6792 /* write last descriptor with RS and EOP bits */ 6793 cmd_type |= size | IXGBE_TXD_CMD; 6794 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 6795 6796 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 6797 6798 /* set the timestamp */ 6799 first->time_stamp = jiffies; 6800 6801 /* 6802 * Force memory writes to complete before letting h/w know there 6803 * are new descriptors to fetch. (Only applicable for weak-ordered 6804 * memory model archs, such as IA-64). 6805 * 6806 * We also need this memory barrier to make certain all of the 6807 * status bits have been updated before next_to_watch is written. 6808 */ 6809 wmb(); 6810 6811 /* set next_to_watch value indicating a packet is present */ 6812 first->next_to_watch = tx_desc; 6813 6814 i++; 6815 if (i == tx_ring->count) 6816 i = 0; 6817 6818 tx_ring->next_to_use = i; 6819 6820 /* notify HW of packet */ 6821 ixgbe_write_tail(tx_ring, i); 6822 6823 return; 6824 dma_error: 6825 dev_err(tx_ring->dev, "TX DMA map failed\n"); 6826 6827 /* clear dma mappings for failed tx_buffer_info map */ 6828 for (;;) { 6829 tx_buffer = &tx_ring->tx_buffer_info[i]; 6830 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); 6831 if (tx_buffer == first) 6832 break; 6833 if (i == 0) 6834 i = tx_ring->count; 6835 i--; 6836 } 6837 6838 tx_ring->next_to_use = i; 6839 } 6840 6841 static void ixgbe_atr(struct ixgbe_ring *ring, 6842 struct ixgbe_tx_buffer *first) 6843 { 6844 struct ixgbe_q_vector *q_vector = ring->q_vector; 6845 union ixgbe_atr_hash_dword input = { .dword = 0 }; 6846 union ixgbe_atr_hash_dword common = { .dword = 0 }; 6847 union { 6848 unsigned char *network; 6849 struct iphdr *ipv4; 6850 struct ipv6hdr *ipv6; 6851 } hdr; 6852 struct tcphdr *th; 6853 __be16 vlan_id; 6854 6855 /* if ring doesn't have a interrupt vector, cannot perform ATR */ 6856 if (!q_vector) 6857 return; 6858 6859 /* do nothing if sampling is disabled */ 6860 if (!ring->atr_sample_rate) 6861 return; 6862 6863 ring->atr_count++; 6864 6865 /* snag network header to get L4 type and address */ 6866 hdr.network = skb_network_header(first->skb); 6867 6868 /* Currently only IPv4/IPv6 with TCP is supported */ 6869 if ((first->protocol != htons(ETH_P_IPV6) || 6870 hdr.ipv6->nexthdr != IPPROTO_TCP) && 6871 (first->protocol != htons(ETH_P_IP) || 6872 hdr.ipv4->protocol != IPPROTO_TCP)) 6873 return; 6874 6875 th = tcp_hdr(first->skb); 6876 6877 /* skip this packet since it is invalid or the socket is closing */ 6878 if (!th || th->fin) 6879 return; 6880 6881 /* sample on all syn packets or once every atr sample count */ 6882 if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) 6883 return; 6884 6885 /* reset sample count */ 6886 ring->atr_count = 0; 6887 6888 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); 6889 6890 /* 6891 * src and dst are inverted, think how the receiver sees them 6892 * 6893 * The input is broken into two sections, a non-compressed section 6894 * containing vm_pool, vlan_id, and flow_type. The rest of the data 6895 * is XORed together and stored in the compressed dword. 6896 */ 6897 input.formatted.vlan_id = vlan_id; 6898 6899 /* 6900 * since src port and flex bytes occupy the same word XOR them together 6901 * and write the value to source port portion of compressed dword 6902 */ 6903 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) 6904 common.port.src ^= th->dest ^ htons(ETH_P_8021Q); 6905 else 6906 common.port.src ^= th->dest ^ first->protocol; 6907 common.port.dst ^= th->source; 6908 6909 if (first->protocol == htons(ETH_P_IP)) { 6910 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 6911 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; 6912 } else { 6913 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; 6914 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ 6915 hdr.ipv6->saddr.s6_addr32[1] ^ 6916 hdr.ipv6->saddr.s6_addr32[2] ^ 6917 hdr.ipv6->saddr.s6_addr32[3] ^ 6918 hdr.ipv6->daddr.s6_addr32[0] ^ 6919 hdr.ipv6->daddr.s6_addr32[1] ^ 6920 hdr.ipv6->daddr.s6_addr32[2] ^ 6921 hdr.ipv6->daddr.s6_addr32[3]; 6922 } 6923 6924 /* This assumes the Rx queue and Tx queue are bound to the same CPU */ 6925 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, 6926 input, common, ring->queue_index); 6927 } 6928 6929 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) 6930 { 6931 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 6932 /* Herbert's original patch had: 6933 * smp_mb__after_netif_stop_queue(); 6934 * but since that doesn't exist yet, just open code it. */ 6935 smp_mb(); 6936 6937 /* We need to check again in a case another CPU has just 6938 * made room available. */ 6939 if (likely(ixgbe_desc_unused(tx_ring) < size)) 6940 return -EBUSY; 6941 6942 /* A reprieve! - use start_queue because it doesn't call schedule */ 6943 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 6944 ++tx_ring->tx_stats.restart_queue; 6945 return 0; 6946 } 6947 6948 static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) 6949 { 6950 if (likely(ixgbe_desc_unused(tx_ring) >= size)) 6951 return 0; 6952 return __ixgbe_maybe_stop_tx(tx_ring, size); 6953 } 6954 6955 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, 6956 void *accel_priv, select_queue_fallback_t fallback) 6957 { 6958 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; 6959 #ifdef IXGBE_FCOE 6960 struct ixgbe_adapter *adapter; 6961 struct ixgbe_ring_feature *f; 6962 int txq; 6963 #endif 6964 6965 if (fwd_adapter) 6966 return skb->queue_mapping + fwd_adapter->tx_base_queue; 6967 6968 #ifdef IXGBE_FCOE 6969 6970 /* 6971 * only execute the code below if protocol is FCoE 6972 * or FIP and we have FCoE enabled on the adapter 6973 */ 6974 switch (vlan_get_protocol(skb)) { 6975 case htons(ETH_P_FCOE): 6976 case htons(ETH_P_FIP): 6977 adapter = netdev_priv(dev); 6978 6979 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 6980 break; 6981 default: 6982 return fallback(dev, skb); 6983 } 6984 6985 f = &adapter->ring_feature[RING_F_FCOE]; 6986 6987 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 6988 smp_processor_id(); 6989 6990 while (txq >= f->indices) 6991 txq -= f->indices; 6992 6993 return txq + f->offset; 6994 #else 6995 return fallback(dev, skb); 6996 #endif 6997 } 6998 6999 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, 7000 struct ixgbe_adapter *adapter, 7001 struct ixgbe_ring *tx_ring) 7002 { 7003 struct ixgbe_tx_buffer *first; 7004 int tso; 7005 u32 tx_flags = 0; 7006 unsigned short f; 7007 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 7008 __be16 protocol = skb->protocol; 7009 u8 hdr_len = 0; 7010 7011 /* 7012 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 7013 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, 7014 * + 2 desc gap to keep tail from touching head, 7015 * + 1 desc for context descriptor, 7016 * otherwise try next time 7017 */ 7018 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 7019 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 7020 7021 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { 7022 tx_ring->tx_stats.tx_busy++; 7023 return NETDEV_TX_BUSY; 7024 } 7025 7026 /* record the location of the first descriptor for this packet */ 7027 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 7028 first->skb = skb; 7029 first->bytecount = skb->len; 7030 first->gso_segs = 1; 7031 7032 /* if we have a HW VLAN tag being added default to the HW one */ 7033 if (vlan_tx_tag_present(skb)) { 7034 tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; 7035 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; 7036 /* else if it is a SW VLAN check the next protocol and store the tag */ 7037 } else if (protocol == htons(ETH_P_8021Q)) { 7038 struct vlan_hdr *vhdr, _vhdr; 7039 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); 7040 if (!vhdr) 7041 goto out_drop; 7042 7043 protocol = vhdr->h_vlan_encapsulated_proto; 7044 tx_flags |= ntohs(vhdr->h_vlan_TCI) << 7045 IXGBE_TX_FLAGS_VLAN_SHIFT; 7046 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; 7047 } 7048 7049 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 7050 !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, 7051 &adapter->state))) { 7052 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 7053 tx_flags |= IXGBE_TX_FLAGS_TSTAMP; 7054 7055 /* schedule check for Tx timestamp */ 7056 adapter->ptp_tx_skb = skb_get(skb); 7057 adapter->ptp_tx_start = jiffies; 7058 schedule_work(&adapter->ptp_tx_work); 7059 } 7060 7061 skb_tx_timestamp(skb); 7062 7063 #ifdef CONFIG_PCI_IOV 7064 /* 7065 * Use the l2switch_enable flag - would be false if the DMA 7066 * Tx switch had been disabled. 7067 */ 7068 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 7069 tx_flags |= IXGBE_TX_FLAGS_CC; 7070 7071 #endif 7072 /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */ 7073 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && 7074 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) || 7075 (skb->priority != TC_PRIO_CONTROL))) { 7076 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; 7077 tx_flags |= (skb->priority & 0x7) << 7078 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; 7079 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { 7080 struct vlan_ethhdr *vhdr; 7081 7082 if (skb_cow_head(skb, 0)) 7083 goto out_drop; 7084 vhdr = (struct vlan_ethhdr *)skb->data; 7085 vhdr->h_vlan_TCI = htons(tx_flags >> 7086 IXGBE_TX_FLAGS_VLAN_SHIFT); 7087 } else { 7088 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; 7089 } 7090 } 7091 7092 /* record initial flags and protocol */ 7093 first->tx_flags = tx_flags; 7094 first->protocol = protocol; 7095 7096 #ifdef IXGBE_FCOE 7097 /* setup tx offload for FCoE */ 7098 if ((protocol == htons(ETH_P_FCOE)) && 7099 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { 7100 tso = ixgbe_fso(tx_ring, first, &hdr_len); 7101 if (tso < 0) 7102 goto out_drop; 7103 7104 goto xmit_fcoe; 7105 } 7106 7107 #endif /* IXGBE_FCOE */ 7108 tso = ixgbe_tso(tx_ring, first, &hdr_len); 7109 if (tso < 0) 7110 goto out_drop; 7111 else if (!tso) 7112 ixgbe_tx_csum(tx_ring, first); 7113 7114 /* add the ATR filter if ATR is on */ 7115 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) 7116 ixgbe_atr(tx_ring, first); 7117 7118 #ifdef IXGBE_FCOE 7119 xmit_fcoe: 7120 #endif /* IXGBE_FCOE */ 7121 ixgbe_tx_map(tx_ring, first, hdr_len); 7122 7123 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); 7124 7125 return NETDEV_TX_OK; 7126 7127 out_drop: 7128 dev_kfree_skb_any(first->skb); 7129 first->skb = NULL; 7130 7131 return NETDEV_TX_OK; 7132 } 7133 7134 static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, 7135 struct net_device *netdev, 7136 struct ixgbe_ring *ring) 7137 { 7138 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7139 struct ixgbe_ring *tx_ring; 7140 7141 /* 7142 * The minimum packet size for olinfo paylen is 17 so pad the skb 7143 * in order to meet this minimum size requirement. 7144 */ 7145 if (unlikely(skb->len < 17)) { 7146 if (skb_pad(skb, 17 - skb->len)) 7147 return NETDEV_TX_OK; 7148 skb->len = 17; 7149 skb_set_tail_pointer(skb, 17); 7150 } 7151 7152 tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; 7153 7154 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); 7155 } 7156 7157 static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, 7158 struct net_device *netdev) 7159 { 7160 return __ixgbe_xmit_frame(skb, netdev, NULL); 7161 } 7162 7163 /** 7164 * ixgbe_set_mac - Change the Ethernet Address of the NIC 7165 * @netdev: network interface device structure 7166 * @p: pointer to an address structure 7167 * 7168 * Returns 0 on success, negative on failure 7169 **/ 7170 static int ixgbe_set_mac(struct net_device *netdev, void *p) 7171 { 7172 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7173 struct ixgbe_hw *hw = &adapter->hw; 7174 struct sockaddr *addr = p; 7175 7176 if (!is_valid_ether_addr(addr->sa_data)) 7177 return -EADDRNOTAVAIL; 7178 7179 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 7180 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 7181 7182 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV); 7183 7184 return 0; 7185 } 7186 7187 static int 7188 ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) 7189 { 7190 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7191 struct ixgbe_hw *hw = &adapter->hw; 7192 u16 value; 7193 int rc; 7194 7195 if (prtad != hw->phy.mdio.prtad) 7196 return -EINVAL; 7197 rc = hw->phy.ops.read_reg(hw, addr, devad, &value); 7198 if (!rc) 7199 rc = value; 7200 return rc; 7201 } 7202 7203 static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, 7204 u16 addr, u16 value) 7205 { 7206 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7207 struct ixgbe_hw *hw = &adapter->hw; 7208 7209 if (prtad != hw->phy.mdio.prtad) 7210 return -EINVAL; 7211 return hw->phy.ops.write_reg(hw, addr, devad, value); 7212 } 7213 7214 static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) 7215 { 7216 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7217 7218 switch (cmd) { 7219 case SIOCSHWTSTAMP: 7220 return ixgbe_ptp_set_ts_config(adapter, req); 7221 case SIOCGHWTSTAMP: 7222 return ixgbe_ptp_get_ts_config(adapter, req); 7223 default: 7224 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); 7225 } 7226 } 7227 7228 /** 7229 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding 7230 * netdev->dev_addrs 7231 * @netdev: network interface device structure 7232 * 7233 * Returns non-zero on failure 7234 **/ 7235 static int ixgbe_add_sanmac_netdev(struct net_device *dev) 7236 { 7237 int err = 0; 7238 struct ixgbe_adapter *adapter = netdev_priv(dev); 7239 struct ixgbe_hw *hw = &adapter->hw; 7240 7241 if (is_valid_ether_addr(hw->mac.san_addr)) { 7242 rtnl_lock(); 7243 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN); 7244 rtnl_unlock(); 7245 7246 /* update SAN MAC vmdq pool selection */ 7247 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); 7248 } 7249 return err; 7250 } 7251 7252 /** 7253 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding 7254 * netdev->dev_addrs 7255 * @netdev: network interface device structure 7256 * 7257 * Returns non-zero on failure 7258 **/ 7259 static int ixgbe_del_sanmac_netdev(struct net_device *dev) 7260 { 7261 int err = 0; 7262 struct ixgbe_adapter *adapter = netdev_priv(dev); 7263 struct ixgbe_mac_info *mac = &adapter->hw.mac; 7264 7265 if (is_valid_ether_addr(mac->san_addr)) { 7266 rtnl_lock(); 7267 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); 7268 rtnl_unlock(); 7269 } 7270 return err; 7271 } 7272 7273 #ifdef CONFIG_NET_POLL_CONTROLLER 7274 /* 7275 * Polling 'interrupt' - used by things like netconsole to send skbs 7276 * without having to re-enable interrupts. It's not called while 7277 * the interrupt routine is executing. 7278 */ 7279 static void ixgbe_netpoll(struct net_device *netdev) 7280 { 7281 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7282 int i; 7283 7284 /* if interface is down do nothing */ 7285 if (test_bit(__IXGBE_DOWN, &adapter->state)) 7286 return; 7287 7288 adapter->flags |= IXGBE_FLAG_IN_NETPOLL; 7289 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 7290 for (i = 0; i < adapter->num_q_vectors; i++) 7291 ixgbe_msix_clean_rings(0, adapter->q_vector[i]); 7292 } else { 7293 ixgbe_intr(adapter->pdev->irq, netdev); 7294 } 7295 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; 7296 } 7297 7298 #endif 7299 static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, 7300 struct rtnl_link_stats64 *stats) 7301 { 7302 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7303 int i; 7304 7305 rcu_read_lock(); 7306 for (i = 0; i < adapter->num_rx_queues; i++) { 7307 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); 7308 u64 bytes, packets; 7309 unsigned int start; 7310 7311 if (ring) { 7312 do { 7313 start = u64_stats_fetch_begin_irq(&ring->syncp); 7314 packets = ring->stats.packets; 7315 bytes = ring->stats.bytes; 7316 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 7317 stats->rx_packets += packets; 7318 stats->rx_bytes += bytes; 7319 } 7320 } 7321 7322 for (i = 0; i < adapter->num_tx_queues; i++) { 7323 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); 7324 u64 bytes, packets; 7325 unsigned int start; 7326 7327 if (ring) { 7328 do { 7329 start = u64_stats_fetch_begin_irq(&ring->syncp); 7330 packets = ring->stats.packets; 7331 bytes = ring->stats.bytes; 7332 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 7333 stats->tx_packets += packets; 7334 stats->tx_bytes += bytes; 7335 } 7336 } 7337 rcu_read_unlock(); 7338 /* following stats updated by ixgbe_watchdog_task() */ 7339 stats->multicast = netdev->stats.multicast; 7340 stats->rx_errors = netdev->stats.rx_errors; 7341 stats->rx_length_errors = netdev->stats.rx_length_errors; 7342 stats->rx_crc_errors = netdev->stats.rx_crc_errors; 7343 stats->rx_missed_errors = netdev->stats.rx_missed_errors; 7344 return stats; 7345 } 7346 7347 #ifdef CONFIG_IXGBE_DCB 7348 /** 7349 * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. 7350 * @adapter: pointer to ixgbe_adapter 7351 * @tc: number of traffic classes currently enabled 7352 * 7353 * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm 7354 * 802.1Q priority maps to a packet buffer that exists. 7355 */ 7356 static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) 7357 { 7358 struct ixgbe_hw *hw = &adapter->hw; 7359 u32 reg, rsave; 7360 int i; 7361 7362 /* 82598 have a static priority to TC mapping that can not 7363 * be changed so no validation is needed. 7364 */ 7365 if (hw->mac.type == ixgbe_mac_82598EB) 7366 return; 7367 7368 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); 7369 rsave = reg; 7370 7371 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 7372 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT); 7373 7374 /* If up2tc is out of bounds default to zero */ 7375 if (up2tc > tc) 7376 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT); 7377 } 7378 7379 if (reg != rsave) 7380 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); 7381 7382 return; 7383 } 7384 7385 /** 7386 * ixgbe_set_prio_tc_map - Configure netdev prio tc map 7387 * @adapter: Pointer to adapter struct 7388 * 7389 * Populate the netdev user priority to tc map 7390 */ 7391 static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) 7392 { 7393 struct net_device *dev = adapter->netdev; 7394 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; 7395 struct ieee_ets *ets = adapter->ixgbe_ieee_ets; 7396 u8 prio; 7397 7398 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) { 7399 u8 tc = 0; 7400 7401 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) 7402 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio); 7403 else if (ets) 7404 tc = ets->prio_tc[prio]; 7405 7406 netdev_set_prio_tc_map(dev, prio, tc); 7407 } 7408 } 7409 7410 #endif /* CONFIG_IXGBE_DCB */ 7411 /** 7412 * ixgbe_setup_tc - configure net_device for multiple traffic classes 7413 * 7414 * @netdev: net device to configure 7415 * @tc: number of traffic classes to enable 7416 */ 7417 int ixgbe_setup_tc(struct net_device *dev, u8 tc) 7418 { 7419 struct ixgbe_adapter *adapter = netdev_priv(dev); 7420 struct ixgbe_hw *hw = &adapter->hw; 7421 bool pools; 7422 7423 /* Hardware supports up to 8 traffic classes */ 7424 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || 7425 (hw->mac.type == ixgbe_mac_82598EB && 7426 tc < MAX_TRAFFIC_CLASS)) 7427 return -EINVAL; 7428 7429 pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); 7430 if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS) 7431 return -EBUSY; 7432 7433 /* Hardware has to reinitialize queues and interrupts to 7434 * match packet buffer alignment. Unfortunately, the 7435 * hardware is not flexible enough to do this dynamically. 7436 */ 7437 if (netif_running(dev)) 7438 ixgbe_close(dev); 7439 ixgbe_clear_interrupt_scheme(adapter); 7440 7441 #ifdef CONFIG_IXGBE_DCB 7442 if (tc) { 7443 netdev_set_num_tc(dev, tc); 7444 ixgbe_set_prio_tc_map(adapter); 7445 7446 adapter->flags |= IXGBE_FLAG_DCB_ENABLED; 7447 7448 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 7449 adapter->last_lfc_mode = adapter->hw.fc.requested_mode; 7450 adapter->hw.fc.requested_mode = ixgbe_fc_none; 7451 } 7452 } else { 7453 netdev_reset_tc(dev); 7454 7455 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 7456 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; 7457 7458 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 7459 7460 adapter->temp_dcb_cfg.pfc_mode_enable = false; 7461 adapter->dcb_cfg.pfc_mode_enable = false; 7462 } 7463 7464 ixgbe_validate_rtr(adapter, tc); 7465 7466 #endif /* CONFIG_IXGBE_DCB */ 7467 ixgbe_init_interrupt_scheme(adapter); 7468 7469 if (netif_running(dev)) 7470 return ixgbe_open(dev); 7471 7472 return 0; 7473 } 7474 7475 #ifdef CONFIG_PCI_IOV 7476 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) 7477 { 7478 struct net_device *netdev = adapter->netdev; 7479 7480 rtnl_lock(); 7481 ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev)); 7482 rtnl_unlock(); 7483 } 7484 7485 #endif 7486 void ixgbe_do_reset(struct net_device *netdev) 7487 { 7488 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7489 7490 if (netif_running(netdev)) 7491 ixgbe_reinit_locked(adapter); 7492 else 7493 ixgbe_reset(adapter); 7494 } 7495 7496 static netdev_features_t ixgbe_fix_features(struct net_device *netdev, 7497 netdev_features_t features) 7498 { 7499 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7500 7501 /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ 7502 if (!(features & NETIF_F_RXCSUM)) 7503 features &= ~NETIF_F_LRO; 7504 7505 /* Turn off LRO if not RSC capable */ 7506 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) 7507 features &= ~NETIF_F_LRO; 7508 7509 return features; 7510 } 7511 7512 static int ixgbe_set_features(struct net_device *netdev, 7513 netdev_features_t features) 7514 { 7515 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7516 netdev_features_t changed = netdev->features ^ features; 7517 bool need_reset = false; 7518 7519 /* Make sure RSC matches LRO, reset if change */ 7520 if (!(features & NETIF_F_LRO)) { 7521 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 7522 need_reset = true; 7523 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; 7524 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && 7525 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { 7526 if (adapter->rx_itr_setting == 1 || 7527 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { 7528 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 7529 need_reset = true; 7530 } else if ((changed ^ features) & NETIF_F_LRO) { 7531 e_info(probe, "rx-usecs set too low, " 7532 "disabling RSC\n"); 7533 } 7534 } 7535 7536 /* 7537 * Check if Flow Director n-tuple support was enabled or disabled. If 7538 * the state changed, we need to reset. 7539 */ 7540 switch (features & NETIF_F_NTUPLE) { 7541 case NETIF_F_NTUPLE: 7542 /* turn off ATR, enable perfect filters and reset */ 7543 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 7544 need_reset = true; 7545 7546 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 7547 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 7548 break; 7549 default: 7550 /* turn off perfect filters, enable ATR and reset */ 7551 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 7552 need_reset = true; 7553 7554 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 7555 7556 /* We cannot enable ATR if SR-IOV is enabled */ 7557 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 7558 break; 7559 7560 /* We cannot enable ATR if we have 2 or more traffic classes */ 7561 if (netdev_get_num_tc(netdev) > 1) 7562 break; 7563 7564 /* We cannot enable ATR if RSS is disabled */ 7565 if (adapter->ring_feature[RING_F_RSS].limit <= 1) 7566 break; 7567 7568 /* A sample rate of 0 indicates ATR disabled */ 7569 if (!adapter->atr_sample_rate) 7570 break; 7571 7572 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 7573 break; 7574 } 7575 7576 if (features & NETIF_F_HW_VLAN_CTAG_RX) 7577 ixgbe_vlan_strip_enable(adapter); 7578 else 7579 ixgbe_vlan_strip_disable(adapter); 7580 7581 if (changed & NETIF_F_RXALL) 7582 need_reset = true; 7583 7584 netdev->features = features; 7585 if (need_reset) 7586 ixgbe_do_reset(netdev); 7587 7588 return 0; 7589 } 7590 7591 static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 7592 struct net_device *dev, 7593 const unsigned char *addr, 7594 u16 flags) 7595 { 7596 struct ixgbe_adapter *adapter = netdev_priv(dev); 7597 int err; 7598 7599 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 7600 return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); 7601 7602 /* Hardware does not support aging addresses so if a 7603 * ndm_state is given only allow permanent addresses 7604 */ 7605 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 7606 pr_info("%s: FDB only supports static addresses\n", 7607 ixgbe_driver_name); 7608 return -EINVAL; 7609 } 7610 7611 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { 7612 u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS; 7613 7614 if (netdev_uc_count(dev) < rar_uc_entries) 7615 err = dev_uc_add_excl(dev, addr); 7616 else 7617 err = -ENOMEM; 7618 } else if (is_multicast_ether_addr(addr)) { 7619 err = dev_mc_add_excl(dev, addr); 7620 } else { 7621 err = -EINVAL; 7622 } 7623 7624 /* Only return duplicate errors if NLM_F_EXCL is set */ 7625 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 7626 err = 0; 7627 7628 return err; 7629 } 7630 7631 static int ixgbe_ndo_bridge_setlink(struct net_device *dev, 7632 struct nlmsghdr *nlh) 7633 { 7634 struct ixgbe_adapter *adapter = netdev_priv(dev); 7635 struct nlattr *attr, *br_spec; 7636 int rem; 7637 7638 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 7639 return -EOPNOTSUPP; 7640 7641 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 7642 7643 nla_for_each_nested(attr, br_spec, rem) { 7644 __u16 mode; 7645 u32 reg = 0; 7646 7647 if (nla_type(attr) != IFLA_BRIDGE_MODE) 7648 continue; 7649 7650 mode = nla_get_u16(attr); 7651 if (mode == BRIDGE_MODE_VEPA) { 7652 reg = 0; 7653 adapter->flags2 &= ~IXGBE_FLAG2_BRIDGE_MODE_VEB; 7654 } else if (mode == BRIDGE_MODE_VEB) { 7655 reg = IXGBE_PFDTXGSWC_VT_LBEN; 7656 adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB; 7657 } else 7658 return -EINVAL; 7659 7660 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg); 7661 7662 e_info(drv, "enabling bridge mode: %s\n", 7663 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); 7664 } 7665 7666 return 0; 7667 } 7668 7669 static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 7670 struct net_device *dev, 7671 u32 filter_mask) 7672 { 7673 struct ixgbe_adapter *adapter = netdev_priv(dev); 7674 u16 mode; 7675 7676 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 7677 return 0; 7678 7679 if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB) 7680 mode = BRIDGE_MODE_VEB; 7681 else 7682 mode = BRIDGE_MODE_VEPA; 7683 7684 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); 7685 } 7686 7687 static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) 7688 { 7689 struct ixgbe_fwd_adapter *fwd_adapter = NULL; 7690 struct ixgbe_adapter *adapter = netdev_priv(pdev); 7691 unsigned int limit; 7692 int pool, err; 7693 7694 #ifdef CONFIG_RPS 7695 if (vdev->num_rx_queues != vdev->num_tx_queues) { 7696 netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n", 7697 vdev->name); 7698 return ERR_PTR(-EINVAL); 7699 } 7700 #endif 7701 /* Check for hardware restriction on number of rx/tx queues */ 7702 if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES || 7703 vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) { 7704 netdev_info(pdev, 7705 "%s: Supports RX/TX Queue counts 1,2, and 4\n", 7706 pdev->name); 7707 return ERR_PTR(-EINVAL); 7708 } 7709 7710 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && 7711 adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) || 7712 (adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) 7713 return ERR_PTR(-EBUSY); 7714 7715 fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL); 7716 if (!fwd_adapter) 7717 return ERR_PTR(-ENOMEM); 7718 7719 pool = find_first_zero_bit(&adapter->fwd_bitmask, 32); 7720 adapter->num_rx_pools++; 7721 set_bit(pool, &adapter->fwd_bitmask); 7722 limit = find_last_bit(&adapter->fwd_bitmask, 32); 7723 7724 /* Enable VMDq flag so device will be set in VM mode */ 7725 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; 7726 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; 7727 adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues; 7728 7729 /* Force reinit of ring allocation with VMDQ enabled */ 7730 err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); 7731 if (err) 7732 goto fwd_add_err; 7733 fwd_adapter->pool = pool; 7734 fwd_adapter->real_adapter = adapter; 7735 err = ixgbe_fwd_ring_up(vdev, fwd_adapter); 7736 if (err) 7737 goto fwd_add_err; 7738 netif_tx_start_all_queues(vdev); 7739 return fwd_adapter; 7740 fwd_add_err: 7741 /* unwind counter and free adapter struct */ 7742 netdev_info(pdev, 7743 "%s: dfwd hardware acceleration failed\n", vdev->name); 7744 clear_bit(pool, &adapter->fwd_bitmask); 7745 adapter->num_rx_pools--; 7746 kfree(fwd_adapter); 7747 return ERR_PTR(err); 7748 } 7749 7750 static void ixgbe_fwd_del(struct net_device *pdev, void *priv) 7751 { 7752 struct ixgbe_fwd_adapter *fwd_adapter = priv; 7753 struct ixgbe_adapter *adapter = fwd_adapter->real_adapter; 7754 unsigned int limit; 7755 7756 clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask); 7757 adapter->num_rx_pools--; 7758 7759 limit = find_last_bit(&adapter->fwd_bitmask, 32); 7760 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; 7761 ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter); 7762 ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); 7763 netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", 7764 fwd_adapter->pool, adapter->num_rx_pools, 7765 fwd_adapter->rx_base_queue, 7766 fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool, 7767 adapter->fwd_bitmask); 7768 kfree(fwd_adapter); 7769 } 7770 7771 static const struct net_device_ops ixgbe_netdev_ops = { 7772 .ndo_open = ixgbe_open, 7773 .ndo_stop = ixgbe_close, 7774 .ndo_start_xmit = ixgbe_xmit_frame, 7775 .ndo_select_queue = ixgbe_select_queue, 7776 .ndo_set_rx_mode = ixgbe_set_rx_mode, 7777 .ndo_validate_addr = eth_validate_addr, 7778 .ndo_set_mac_address = ixgbe_set_mac, 7779 .ndo_change_mtu = ixgbe_change_mtu, 7780 .ndo_tx_timeout = ixgbe_tx_timeout, 7781 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, 7782 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, 7783 .ndo_do_ioctl = ixgbe_ioctl, 7784 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, 7785 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, 7786 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, 7787 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, 7788 .ndo_get_vf_config = ixgbe_ndo_get_vf_config, 7789 .ndo_get_stats64 = ixgbe_get_stats64, 7790 #ifdef CONFIG_IXGBE_DCB 7791 .ndo_setup_tc = ixgbe_setup_tc, 7792 #endif 7793 #ifdef CONFIG_NET_POLL_CONTROLLER 7794 .ndo_poll_controller = ixgbe_netpoll, 7795 #endif 7796 #ifdef CONFIG_NET_RX_BUSY_POLL 7797 .ndo_busy_poll = ixgbe_low_latency_recv, 7798 #endif 7799 #ifdef IXGBE_FCOE 7800 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, 7801 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, 7802 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, 7803 .ndo_fcoe_enable = ixgbe_fcoe_enable, 7804 .ndo_fcoe_disable = ixgbe_fcoe_disable, 7805 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, 7806 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo, 7807 #endif /* IXGBE_FCOE */ 7808 .ndo_set_features = ixgbe_set_features, 7809 .ndo_fix_features = ixgbe_fix_features, 7810 .ndo_fdb_add = ixgbe_ndo_fdb_add, 7811 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, 7812 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 7813 .ndo_dfwd_add_station = ixgbe_fwd_add, 7814 .ndo_dfwd_del_station = ixgbe_fwd_del, 7815 }; 7816 7817 /** 7818 * ixgbe_enumerate_functions - Get the number of ports this device has 7819 * @adapter: adapter structure 7820 * 7821 * This function enumerates the phsyical functions co-located on a single slot, 7822 * in order to determine how many ports a device has. This is most useful in 7823 * determining the required GT/s of PCIe bandwidth necessary for optimal 7824 * performance. 7825 **/ 7826 static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) 7827 { 7828 struct list_head *entry; 7829 int physfns = 0; 7830 7831 /* Some cards can not use the generic count PCIe functions method, 7832 * because they are behind a parent switch, so we hardcode these with 7833 * the correct number of functions. 7834 */ 7835 if (ixgbe_pcie_from_parent(&adapter->hw)) { 7836 physfns = 4; 7837 } else { 7838 list_for_each(entry, &adapter->pdev->bus_list) { 7839 struct pci_dev *pdev = 7840 list_entry(entry, struct pci_dev, bus_list); 7841 /* don't count virtual functions */ 7842 if (!pdev->is_virtfn) 7843 physfns++; 7844 } 7845 } 7846 7847 return physfns; 7848 } 7849 7850 /** 7851 * ixgbe_wol_supported - Check whether device supports WoL 7852 * @hw: hw specific details 7853 * @device_id: the device ID 7854 * @subdev_id: the subsystem device ID 7855 * 7856 * This function is used by probe and ethtool to determine 7857 * which devices have WoL support 7858 * 7859 **/ 7860 int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, 7861 u16 subdevice_id) 7862 { 7863 struct ixgbe_hw *hw = &adapter->hw; 7864 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; 7865 int is_wol_supported = 0; 7866 7867 switch (device_id) { 7868 case IXGBE_DEV_ID_82599_SFP: 7869 /* Only these subdevices could supports WOL */ 7870 switch (subdevice_id) { 7871 case IXGBE_SUBDEV_ID_82599_SFP_WOL0: 7872 case IXGBE_SUBDEV_ID_82599_560FLR: 7873 /* only support first port */ 7874 if (hw->bus.func != 0) 7875 break; 7876 case IXGBE_SUBDEV_ID_82599_SP_560FLR: 7877 case IXGBE_SUBDEV_ID_82599_SFP: 7878 case IXGBE_SUBDEV_ID_82599_RNDC: 7879 case IXGBE_SUBDEV_ID_82599_ECNA_DP: 7880 case IXGBE_SUBDEV_ID_82599_LOM_SFP: 7881 is_wol_supported = 1; 7882 break; 7883 } 7884 break; 7885 case IXGBE_DEV_ID_82599EN_SFP: 7886 /* Only this subdevice supports WOL */ 7887 switch (subdevice_id) { 7888 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1: 7889 is_wol_supported = 1; 7890 break; 7891 } 7892 break; 7893 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 7894 /* All except this subdevice support WOL */ 7895 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) 7896 is_wol_supported = 1; 7897 break; 7898 case IXGBE_DEV_ID_82599_KX4: 7899 is_wol_supported = 1; 7900 break; 7901 case IXGBE_DEV_ID_X540T: 7902 case IXGBE_DEV_ID_X540T1: 7903 /* check eeprom to see if enabled wol */ 7904 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 7905 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && 7906 (hw->bus.func == 0))) { 7907 is_wol_supported = 1; 7908 } 7909 break; 7910 } 7911 7912 return is_wol_supported; 7913 } 7914 7915 /** 7916 * ixgbe_probe - Device Initialization Routine 7917 * @pdev: PCI device information struct 7918 * @ent: entry in ixgbe_pci_tbl 7919 * 7920 * Returns 0 on success, negative on failure 7921 * 7922 * ixgbe_probe initializes an adapter identified by a pci_dev structure. 7923 * The OS initialization, configuring of the adapter private structure, 7924 * and a hardware reset occur. 7925 **/ 7926 static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 7927 { 7928 struct net_device *netdev; 7929 struct ixgbe_adapter *adapter = NULL; 7930 struct ixgbe_hw *hw; 7931 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 7932 static int cards_found; 7933 int i, err, pci_using_dac, expected_gts; 7934 unsigned int indices = MAX_TX_QUEUES; 7935 u8 part_str[IXGBE_PBANUM_LENGTH]; 7936 #ifdef IXGBE_FCOE 7937 u16 device_caps; 7938 #endif 7939 u32 eec; 7940 7941 /* Catch broken hardware that put the wrong VF device ID in 7942 * the PCIe SR-IOV capability. 7943 */ 7944 if (pdev->is_virtfn) { 7945 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", 7946 pci_name(pdev), pdev->vendor, pdev->device); 7947 return -EINVAL; 7948 } 7949 7950 err = pci_enable_device_mem(pdev); 7951 if (err) 7952 return err; 7953 7954 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 7955 pci_using_dac = 1; 7956 } else { 7957 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 7958 if (err) { 7959 dev_err(&pdev->dev, 7960 "No usable DMA configuration, aborting\n"); 7961 goto err_dma; 7962 } 7963 pci_using_dac = 0; 7964 } 7965 7966 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 7967 IORESOURCE_MEM), ixgbe_driver_name); 7968 if (err) { 7969 dev_err(&pdev->dev, 7970 "pci_request_selected_regions failed 0x%x\n", err); 7971 goto err_pci_reg; 7972 } 7973 7974 pci_enable_pcie_error_reporting(pdev); 7975 7976 pci_set_master(pdev); 7977 pci_save_state(pdev); 7978 7979 if (ii->mac == ixgbe_mac_82598EB) { 7980 #ifdef CONFIG_IXGBE_DCB 7981 /* 8 TC w/ 4 queues per TC */ 7982 indices = 4 * MAX_TRAFFIC_CLASS; 7983 #else 7984 indices = IXGBE_MAX_RSS_INDICES; 7985 #endif 7986 } 7987 7988 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); 7989 if (!netdev) { 7990 err = -ENOMEM; 7991 goto err_alloc_etherdev; 7992 } 7993 7994 SET_NETDEV_DEV(netdev, &pdev->dev); 7995 7996 adapter = netdev_priv(netdev); 7997 pci_set_drvdata(pdev, adapter); 7998 7999 adapter->netdev = netdev; 8000 adapter->pdev = pdev; 8001 hw = &adapter->hw; 8002 hw->back = adapter; 8003 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 8004 8005 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 8006 pci_resource_len(pdev, 0)); 8007 adapter->io_addr = hw->hw_addr; 8008 if (!hw->hw_addr) { 8009 err = -EIO; 8010 goto err_ioremap; 8011 } 8012 8013 netdev->netdev_ops = &ixgbe_netdev_ops; 8014 ixgbe_set_ethtool_ops(netdev); 8015 netdev->watchdog_timeo = 5 * HZ; 8016 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 8017 8018 adapter->bd_number = cards_found; 8019 8020 /* Setup hw api */ 8021 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 8022 hw->mac.type = ii->mac; 8023 8024 /* EEPROM */ 8025 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); 8026 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 8027 if (ixgbe_removed(hw->hw_addr)) { 8028 err = -EIO; 8029 goto err_ioremap; 8030 } 8031 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ 8032 if (!(eec & (1 << 8))) 8033 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; 8034 8035 /* PHY */ 8036 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); 8037 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 8038 /* ixgbe_identify_phy_generic will set prtad and mmds properly */ 8039 hw->phy.mdio.prtad = MDIO_PRTAD_NONE; 8040 hw->phy.mdio.mmds = 0; 8041 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 8042 hw->phy.mdio.dev = netdev; 8043 hw->phy.mdio.mdio_read = ixgbe_mdio_read; 8044 hw->phy.mdio.mdio_write = ixgbe_mdio_write; 8045 8046 ii->get_invariants(hw); 8047 8048 /* setup the private structure */ 8049 err = ixgbe_sw_init(adapter); 8050 if (err) 8051 goto err_sw_init; 8052 8053 /* Make it possible the adapter to be woken up via WOL */ 8054 switch (adapter->hw.mac.type) { 8055 case ixgbe_mac_82599EB: 8056 case ixgbe_mac_X540: 8057 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 8058 break; 8059 default: 8060 break; 8061 } 8062 8063 /* 8064 * If there is a fan on this device and it has failed log the 8065 * failure. 8066 */ 8067 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 8068 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 8069 if (esdp & IXGBE_ESDP_SDP1) 8070 e_crit(probe, "Fan has stopped, replace the adapter\n"); 8071 } 8072 8073 if (allow_unsupported_sfp) 8074 hw->allow_unsupported_sfp = allow_unsupported_sfp; 8075 8076 /* reset_hw fills in the perm_addr as well */ 8077 hw->phy.reset_if_overtemp = true; 8078 err = hw->mac.ops.reset_hw(hw); 8079 hw->phy.reset_if_overtemp = false; 8080 if (err == IXGBE_ERR_SFP_NOT_PRESENT && 8081 hw->mac.type == ixgbe_mac_82598EB) { 8082 err = 0; 8083 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 8084 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n"); 8085 e_dev_err("Reload the driver after installing a supported module.\n"); 8086 goto err_sw_init; 8087 } else if (err) { 8088 e_dev_err("HW Init failed: %d\n", err); 8089 goto err_sw_init; 8090 } 8091 8092 #ifdef CONFIG_PCI_IOV 8093 /* SR-IOV not supported on the 82598 */ 8094 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 8095 goto skip_sriov; 8096 /* Mailbox */ 8097 ixgbe_init_mbx_params_pf(hw); 8098 memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); 8099 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT); 8100 ixgbe_enable_sriov(adapter); 8101 skip_sriov: 8102 8103 #endif 8104 netdev->features = NETIF_F_SG | 8105 NETIF_F_IP_CSUM | 8106 NETIF_F_IPV6_CSUM | 8107 NETIF_F_HW_VLAN_CTAG_TX | 8108 NETIF_F_HW_VLAN_CTAG_RX | 8109 NETIF_F_HW_VLAN_CTAG_FILTER | 8110 NETIF_F_TSO | 8111 NETIF_F_TSO6 | 8112 NETIF_F_RXHASH | 8113 NETIF_F_RXCSUM; 8114 8115 netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD; 8116 8117 switch (adapter->hw.mac.type) { 8118 case ixgbe_mac_82599EB: 8119 case ixgbe_mac_X540: 8120 netdev->features |= NETIF_F_SCTP_CSUM; 8121 netdev->hw_features |= NETIF_F_SCTP_CSUM | 8122 NETIF_F_NTUPLE; 8123 break; 8124 default: 8125 break; 8126 } 8127 8128 netdev->hw_features |= NETIF_F_RXALL; 8129 8130 netdev->vlan_features |= NETIF_F_TSO; 8131 netdev->vlan_features |= NETIF_F_TSO6; 8132 netdev->vlan_features |= NETIF_F_IP_CSUM; 8133 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 8134 netdev->vlan_features |= NETIF_F_SG; 8135 8136 netdev->priv_flags |= IFF_UNICAST_FLT; 8137 netdev->priv_flags |= IFF_SUPP_NOFCS; 8138 8139 #ifdef CONFIG_IXGBE_DCB 8140 netdev->dcbnl_ops = &dcbnl_ops; 8141 #endif 8142 8143 #ifdef IXGBE_FCOE 8144 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { 8145 unsigned int fcoe_l; 8146 8147 if (hw->mac.ops.get_device_caps) { 8148 hw->mac.ops.get_device_caps(hw, &device_caps); 8149 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) 8150 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; 8151 } 8152 8153 8154 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus()); 8155 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l; 8156 8157 netdev->features |= NETIF_F_FSO | 8158 NETIF_F_FCOE_CRC; 8159 8160 netdev->vlan_features |= NETIF_F_FSO | 8161 NETIF_F_FCOE_CRC | 8162 NETIF_F_FCOE_MTU; 8163 } 8164 #endif /* IXGBE_FCOE */ 8165 if (pci_using_dac) { 8166 netdev->features |= NETIF_F_HIGHDMA; 8167 netdev->vlan_features |= NETIF_F_HIGHDMA; 8168 } 8169 8170 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) 8171 netdev->hw_features |= NETIF_F_LRO; 8172 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 8173 netdev->features |= NETIF_F_LRO; 8174 8175 /* make sure the EEPROM is good */ 8176 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { 8177 e_dev_err("The EEPROM Checksum Is Not Valid\n"); 8178 err = -EIO; 8179 goto err_sw_init; 8180 } 8181 8182 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); 8183 8184 if (!is_valid_ether_addr(netdev->dev_addr)) { 8185 e_dev_err("invalid MAC address\n"); 8186 err = -EIO; 8187 goto err_sw_init; 8188 } 8189 8190 setup_timer(&adapter->service_timer, &ixgbe_service_timer, 8191 (unsigned long) adapter); 8192 8193 if (ixgbe_removed(hw->hw_addr)) { 8194 err = -EIO; 8195 goto err_sw_init; 8196 } 8197 INIT_WORK(&adapter->service_task, ixgbe_service_task); 8198 set_bit(__IXGBE_SERVICE_INITED, &adapter->state); 8199 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); 8200 8201 err = ixgbe_init_interrupt_scheme(adapter); 8202 if (err) 8203 goto err_sw_init; 8204 8205 /* WOL not supported for all devices */ 8206 adapter->wol = 0; 8207 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); 8208 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device, 8209 pdev->subsystem_device); 8210 if (hw->wol_enabled) 8211 adapter->wol = IXGBE_WUFC_MAG; 8212 8213 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 8214 8215 /* save off EEPROM version number */ 8216 hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh); 8217 hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); 8218 8219 /* pick up the PCI bus settings for reporting later */ 8220 hw->mac.ops.get_bus_info(hw); 8221 if (ixgbe_pcie_from_parent(hw)) 8222 ixgbe_get_parent_bus_info(adapter); 8223 8224 /* calculate the expected PCIe bandwidth required for optimal 8225 * performance. Note that some older parts will never have enough 8226 * bandwidth due to being older generation PCIe parts. We clamp these 8227 * parts to ensure no warning is displayed if it can't be fixed. 8228 */ 8229 switch (hw->mac.type) { 8230 case ixgbe_mac_82598EB: 8231 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16); 8232 break; 8233 default: 8234 expected_gts = ixgbe_enumerate_functions(adapter) * 10; 8235 break; 8236 } 8237 ixgbe_check_minimum_link(adapter, expected_gts); 8238 8239 err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH); 8240 if (err) 8241 strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH); 8242 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 8243 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", 8244 hw->mac.type, hw->phy.type, hw->phy.sfp_type, 8245 part_str); 8246 else 8247 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", 8248 hw->mac.type, hw->phy.type, part_str); 8249 8250 e_dev_info("%pM\n", netdev->dev_addr); 8251 8252 /* reset the hardware with the new settings */ 8253 err = hw->mac.ops.start_hw(hw); 8254 if (err == IXGBE_ERR_EEPROM_VERSION) { 8255 /* We are running on a pre-production device, log a warning */ 8256 e_dev_warn("This device is a pre-production adapter/LOM. " 8257 "Please be aware there may be issues associated " 8258 "with your hardware. If you are experiencing " 8259 "problems please contact your Intel or hardware " 8260 "representative who provided you with this " 8261 "hardware.\n"); 8262 } 8263 strcpy(netdev->name, "eth%d"); 8264 err = register_netdev(netdev); 8265 if (err) 8266 goto err_register; 8267 8268 /* power down the optics for 82599 SFP+ fiber */ 8269 if (hw->mac.ops.disable_tx_laser) 8270 hw->mac.ops.disable_tx_laser(hw); 8271 8272 /* carrier off reporting is important to ethtool even BEFORE open */ 8273 netif_carrier_off(netdev); 8274 8275 #ifdef CONFIG_IXGBE_DCA 8276 if (dca_add_requester(&pdev->dev) == 0) { 8277 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 8278 ixgbe_setup_dca(adapter); 8279 } 8280 #endif 8281 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 8282 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); 8283 for (i = 0; i < adapter->num_vfs; i++) 8284 ixgbe_vf_configuration(pdev, (i | 0x10000000)); 8285 } 8286 8287 /* firmware requires driver version to be 0xFFFFFFFF 8288 * since os does not support feature 8289 */ 8290 if (hw->mac.ops.set_fw_drv_ver) 8291 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 8292 0xFF); 8293 8294 /* add san mac addr to netdev */ 8295 ixgbe_add_sanmac_netdev(netdev); 8296 8297 e_dev_info("%s\n", ixgbe_default_device_descr); 8298 cards_found++; 8299 8300 #ifdef CONFIG_IXGBE_HWMON 8301 if (ixgbe_sysfs_init(adapter)) 8302 e_err(probe, "failed to allocate sysfs resources\n"); 8303 #endif /* CONFIG_IXGBE_HWMON */ 8304 8305 ixgbe_dbg_adapter_init(adapter); 8306 8307 /* Need link setup for MNG FW, else wait for IXGBE_UP */ 8308 if (ixgbe_mng_enabled(hw) && hw->mac.ops.setup_link) 8309 hw->mac.ops.setup_link(hw, 8310 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, 8311 true); 8312 8313 return 0; 8314 8315 err_register: 8316 ixgbe_release_hw_control(adapter); 8317 ixgbe_clear_interrupt_scheme(adapter); 8318 err_sw_init: 8319 ixgbe_disable_sriov(adapter); 8320 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; 8321 iounmap(adapter->io_addr); 8322 err_ioremap: 8323 free_netdev(netdev); 8324 err_alloc_etherdev: 8325 pci_release_selected_regions(pdev, 8326 pci_select_bars(pdev, IORESOURCE_MEM)); 8327 err_pci_reg: 8328 err_dma: 8329 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) 8330 pci_disable_device(pdev); 8331 return err; 8332 } 8333 8334 /** 8335 * ixgbe_remove - Device Removal Routine 8336 * @pdev: PCI device information struct 8337 * 8338 * ixgbe_remove is called by the PCI subsystem to alert the driver 8339 * that it should release a PCI device. The could be caused by a 8340 * Hot-Plug event, or because the driver is going to be removed from 8341 * memory. 8342 **/ 8343 static void ixgbe_remove(struct pci_dev *pdev) 8344 { 8345 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 8346 struct net_device *netdev = adapter->netdev; 8347 8348 ixgbe_dbg_adapter_exit(adapter); 8349 8350 set_bit(__IXGBE_REMOVING, &adapter->state); 8351 cancel_work_sync(&adapter->service_task); 8352 8353 8354 #ifdef CONFIG_IXGBE_DCA 8355 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 8356 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 8357 dca_remove_requester(&pdev->dev); 8358 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); 8359 } 8360 8361 #endif 8362 #ifdef CONFIG_IXGBE_HWMON 8363 ixgbe_sysfs_exit(adapter); 8364 #endif /* CONFIG_IXGBE_HWMON */ 8365 8366 /* remove the added san mac */ 8367 ixgbe_del_sanmac_netdev(netdev); 8368 8369 if (netdev->reg_state == NETREG_REGISTERED) 8370 unregister_netdev(netdev); 8371 8372 #ifdef CONFIG_PCI_IOV 8373 /* 8374 * Only disable SR-IOV on unload if the user specified the now 8375 * deprecated max_vfs module parameter. 8376 */ 8377 if (max_vfs) 8378 ixgbe_disable_sriov(adapter); 8379 #endif 8380 ixgbe_clear_interrupt_scheme(adapter); 8381 8382 ixgbe_release_hw_control(adapter); 8383 8384 #ifdef CONFIG_DCB 8385 kfree(adapter->ixgbe_ieee_pfc); 8386 kfree(adapter->ixgbe_ieee_ets); 8387 8388 #endif 8389 iounmap(adapter->io_addr); 8390 pci_release_selected_regions(pdev, pci_select_bars(pdev, 8391 IORESOURCE_MEM)); 8392 8393 e_dev_info("complete\n"); 8394 8395 free_netdev(netdev); 8396 8397 pci_disable_pcie_error_reporting(pdev); 8398 8399 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) 8400 pci_disable_device(pdev); 8401 } 8402 8403 /** 8404 * ixgbe_io_error_detected - called when PCI error is detected 8405 * @pdev: Pointer to PCI device 8406 * @state: The current pci connection state 8407 * 8408 * This function is called after a PCI bus error affecting 8409 * this device has been detected. 8410 */ 8411 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, 8412 pci_channel_state_t state) 8413 { 8414 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 8415 struct net_device *netdev = adapter->netdev; 8416 8417 #ifdef CONFIG_PCI_IOV 8418 struct ixgbe_hw *hw = &adapter->hw; 8419 struct pci_dev *bdev, *vfdev; 8420 u32 dw0, dw1, dw2, dw3; 8421 int vf, pos; 8422 u16 req_id, pf_func; 8423 8424 if (adapter->hw.mac.type == ixgbe_mac_82598EB || 8425 adapter->num_vfs == 0) 8426 goto skip_bad_vf_detection; 8427 8428 bdev = pdev->bus->self; 8429 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) 8430 bdev = bdev->bus->self; 8431 8432 if (!bdev) 8433 goto skip_bad_vf_detection; 8434 8435 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); 8436 if (!pos) 8437 goto skip_bad_vf_detection; 8438 8439 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG); 8440 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4); 8441 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8); 8442 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12); 8443 if (ixgbe_removed(hw->hw_addr)) 8444 goto skip_bad_vf_detection; 8445 8446 req_id = dw1 >> 16; 8447 /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */ 8448 if (!(req_id & 0x0080)) 8449 goto skip_bad_vf_detection; 8450 8451 pf_func = req_id & 0x01; 8452 if ((pf_func & 1) == (pdev->devfn & 1)) { 8453 unsigned int device_id; 8454 8455 vf = (req_id & 0x7F) >> 1; 8456 e_dev_err("VF %d has caused a PCIe error\n", vf); 8457 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: " 8458 "%8.8x\tdw3: %8.8x\n", 8459 dw0, dw1, dw2, dw3); 8460 switch (adapter->hw.mac.type) { 8461 case ixgbe_mac_82599EB: 8462 device_id = IXGBE_82599_VF_DEVICE_ID; 8463 break; 8464 case ixgbe_mac_X540: 8465 device_id = IXGBE_X540_VF_DEVICE_ID; 8466 break; 8467 default: 8468 device_id = 0; 8469 break; 8470 } 8471 8472 /* Find the pci device of the offending VF */ 8473 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL); 8474 while (vfdev) { 8475 if (vfdev->devfn == (req_id & 0xFF)) 8476 break; 8477 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, 8478 device_id, vfdev); 8479 } 8480 /* 8481 * There's a slim chance the VF could have been hot plugged, 8482 * so if it is no longer present we don't need to issue the 8483 * VFLR. Just clean up the AER in that case. 8484 */ 8485 if (vfdev) { 8486 e_dev_err("Issuing VFLR to VF %d\n", vf); 8487 pci_write_config_dword(vfdev, 0xA8, 0x00008000); 8488 /* Free device reference count */ 8489 pci_dev_put(vfdev); 8490 } 8491 8492 pci_cleanup_aer_uncorrect_error_status(pdev); 8493 } 8494 8495 /* 8496 * Even though the error may have occurred on the other port 8497 * we still need to increment the vf error reference count for 8498 * both ports because the I/O resume function will be called 8499 * for both of them. 8500 */ 8501 adapter->vferr_refcount++; 8502 8503 return PCI_ERS_RESULT_RECOVERED; 8504 8505 skip_bad_vf_detection: 8506 #endif /* CONFIG_PCI_IOV */ 8507 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) 8508 return PCI_ERS_RESULT_DISCONNECT; 8509 8510 rtnl_lock(); 8511 netif_device_detach(netdev); 8512 8513 if (state == pci_channel_io_perm_failure) { 8514 rtnl_unlock(); 8515 return PCI_ERS_RESULT_DISCONNECT; 8516 } 8517 8518 if (netif_running(netdev)) 8519 ixgbe_down(adapter); 8520 8521 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) 8522 pci_disable_device(pdev); 8523 rtnl_unlock(); 8524 8525 /* Request a slot reset. */ 8526 return PCI_ERS_RESULT_NEED_RESET; 8527 } 8528 8529 /** 8530 * ixgbe_io_slot_reset - called after the pci bus has been reset. 8531 * @pdev: Pointer to PCI device 8532 * 8533 * Restart the card from scratch, as if from a cold-boot. 8534 */ 8535 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) 8536 { 8537 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 8538 pci_ers_result_t result; 8539 int err; 8540 8541 if (pci_enable_device_mem(pdev)) { 8542 e_err(probe, "Cannot re-enable PCI device after reset.\n"); 8543 result = PCI_ERS_RESULT_DISCONNECT; 8544 } else { 8545 smp_mb__before_atomic(); 8546 clear_bit(__IXGBE_DISABLED, &adapter->state); 8547 adapter->hw.hw_addr = adapter->io_addr; 8548 pci_set_master(pdev); 8549 pci_restore_state(pdev); 8550 pci_save_state(pdev); 8551 8552 pci_wake_from_d3(pdev, false); 8553 8554 ixgbe_reset(adapter); 8555 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 8556 result = PCI_ERS_RESULT_RECOVERED; 8557 } 8558 8559 err = pci_cleanup_aer_uncorrect_error_status(pdev); 8560 if (err) { 8561 e_dev_err("pci_cleanup_aer_uncorrect_error_status " 8562 "failed 0x%0x\n", err); 8563 /* non-fatal, continue */ 8564 } 8565 8566 return result; 8567 } 8568 8569 /** 8570 * ixgbe_io_resume - called when traffic can start flowing again. 8571 * @pdev: Pointer to PCI device 8572 * 8573 * This callback is called when the error recovery driver tells us that 8574 * its OK to resume normal operation. 8575 */ 8576 static void ixgbe_io_resume(struct pci_dev *pdev) 8577 { 8578 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 8579 struct net_device *netdev = adapter->netdev; 8580 8581 #ifdef CONFIG_PCI_IOV 8582 if (adapter->vferr_refcount) { 8583 e_info(drv, "Resuming after VF err\n"); 8584 adapter->vferr_refcount--; 8585 return; 8586 } 8587 8588 #endif 8589 if (netif_running(netdev)) 8590 ixgbe_up(adapter); 8591 8592 netif_device_attach(netdev); 8593 } 8594 8595 static const struct pci_error_handlers ixgbe_err_handler = { 8596 .error_detected = ixgbe_io_error_detected, 8597 .slot_reset = ixgbe_io_slot_reset, 8598 .resume = ixgbe_io_resume, 8599 }; 8600 8601 static struct pci_driver ixgbe_driver = { 8602 .name = ixgbe_driver_name, 8603 .id_table = ixgbe_pci_tbl, 8604 .probe = ixgbe_probe, 8605 .remove = ixgbe_remove, 8606 #ifdef CONFIG_PM 8607 .suspend = ixgbe_suspend, 8608 .resume = ixgbe_resume, 8609 #endif 8610 .shutdown = ixgbe_shutdown, 8611 .sriov_configure = ixgbe_pci_sriov_configure, 8612 .err_handler = &ixgbe_err_handler 8613 }; 8614 8615 /** 8616 * ixgbe_init_module - Driver Registration Routine 8617 * 8618 * ixgbe_init_module is the first routine called when the driver is 8619 * loaded. All it does is register with the PCI subsystem. 8620 **/ 8621 static int __init ixgbe_init_module(void) 8622 { 8623 int ret; 8624 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); 8625 pr_info("%s\n", ixgbe_copyright); 8626 8627 ixgbe_dbg_init(); 8628 8629 ret = pci_register_driver(&ixgbe_driver); 8630 if (ret) { 8631 ixgbe_dbg_exit(); 8632 return ret; 8633 } 8634 8635 #ifdef CONFIG_IXGBE_DCA 8636 dca_register_notify(&dca_notifier); 8637 #endif 8638 8639 return 0; 8640 } 8641 8642 module_init(ixgbe_init_module); 8643 8644 /** 8645 * ixgbe_exit_module - Driver Exit Cleanup Routine 8646 * 8647 * ixgbe_exit_module is called just before the driver is removed 8648 * from memory. 8649 **/ 8650 static void __exit ixgbe_exit_module(void) 8651 { 8652 #ifdef CONFIG_IXGBE_DCA 8653 dca_unregister_notify(&dca_notifier); 8654 #endif 8655 pci_unregister_driver(&ixgbe_driver); 8656 8657 ixgbe_dbg_exit(); 8658 8659 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 8660 } 8661 8662 #ifdef CONFIG_IXGBE_DCA 8663 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, 8664 void *p) 8665 { 8666 int ret_val; 8667 8668 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, 8669 __ixgbe_notify_dca); 8670 8671 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 8672 } 8673 8674 #endif /* CONFIG_IXGBE_DCA */ 8675 8676 module_exit(ixgbe_exit_module); 8677 8678 /* ixgbe_main.c */ 8679