1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2016 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #include <linux/types.h> 30 #include <linux/module.h> 31 #include <linux/pci.h> 32 #include <linux/netdevice.h> 33 #include <linux/vmalloc.h> 34 #include <linux/string.h> 35 #include <linux/in.h> 36 #include <linux/interrupt.h> 37 #include <linux/ip.h> 38 #include <linux/tcp.h> 39 #include <linux/sctp.h> 40 #include <linux/pkt_sched.h> 41 #include <linux/ipv6.h> 42 #include <linux/slab.h> 43 #include <net/checksum.h> 44 #include <net/ip6_checksum.h> 45 #include <linux/etherdevice.h> 46 #include <linux/ethtool.h> 47 #include <linux/if.h> 48 #include <linux/if_vlan.h> 49 #include <linux/if_macvlan.h> 50 #include <linux/if_bridge.h> 51 #include <linux/prefetch.h> 52 #include <scsi/fc/fc_fcoe.h> 53 #include <net/udp_tunnel.h> 54 #include <net/pkt_cls.h> 55 #include <net/tc_act/tc_gact.h> 56 #include <net/tc_act/tc_mirred.h> 57 58 #include "ixgbe.h" 59 #include "ixgbe_common.h" 60 #include "ixgbe_dcb_82599.h" 61 #include "ixgbe_sriov.h" 62 #include "ixgbe_model.h" 63 64 char ixgbe_driver_name[] = "ixgbe"; 65 static const char ixgbe_driver_string[] = 66 "Intel(R) 10 Gigabit PCI Express Network Driver"; 67 #ifdef IXGBE_FCOE 68 char ixgbe_default_device_descr[] = 69 "Intel(R) 10 Gigabit Network Connection"; 70 #else 71 static char ixgbe_default_device_descr[] = 72 "Intel(R) 10 Gigabit Network Connection"; 73 #endif 74 #define DRV_VERSION "4.4.0-k" 75 const char ixgbe_driver_version[] = DRV_VERSION; 76 static const char ixgbe_copyright[] = 77 "Copyright (c) 1999-2016 Intel Corporation."; 78 79 static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter"; 80 81 static const struct ixgbe_info *ixgbe_info_tbl[] = { 82 [board_82598] = &ixgbe_82598_info, 83 [board_82599] = &ixgbe_82599_info, 84 [board_X540] = &ixgbe_X540_info, 85 [board_X550] = &ixgbe_X550_info, 86 [board_X550EM_x] = &ixgbe_X550EM_x_info, 87 [board_x550em_a] = &ixgbe_x550em_a_info, 88 }; 89 90 /* ixgbe_pci_tbl - PCI Device ID Table 91 * 92 * Wildcard entries (PCI_ANY_ID) should come last 93 * Last entry must be all 0s 94 * 95 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 96 * Class, Class Mask, private data (not used) } 97 */ 98 static const struct pci_device_id ixgbe_pci_tbl[] = { 99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 }, 100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 }, 101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 }, 102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 }, 103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 }, 104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 }, 105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 }, 106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 }, 107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 }, 108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 }, 109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 }, 110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 }, 111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 }, 112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 }, 113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 }, 114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 }, 115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 }, 116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 }, 117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 }, 118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 }, 119 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 }, 120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 }, 121 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 }, 122 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 }, 123 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, 124 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, 125 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 }, 126 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, 127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, 128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, 129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, 130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550}, 131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, 132 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, 133 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, 134 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, 135 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a }, 136 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a }, 137 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, 138 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a }, 139 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, 140 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a}, 141 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, 142 /* required last entry */ 143 {0, } 144 }; 145 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); 146 147 #ifdef CONFIG_IXGBE_DCA 148 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, 149 void *p); 150 static struct notifier_block dca_notifier = { 151 .notifier_call = ixgbe_notify_dca, 152 .next = NULL, 153 .priority = 0 154 }; 155 #endif 156 157 #ifdef CONFIG_PCI_IOV 158 static unsigned int max_vfs; 159 module_param(max_vfs, uint, 0); 160 MODULE_PARM_DESC(max_vfs, 161 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)"); 162 #endif /* CONFIG_PCI_IOV */ 163 164 static unsigned int allow_unsupported_sfp; 165 module_param(allow_unsupported_sfp, uint, 0); 166 MODULE_PARM_DESC(allow_unsupported_sfp, 167 "Allow unsupported and untested SFP+ modules on 82599-based adapters"); 168 169 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 170 static int debug = -1; 171 module_param(debug, int, 0); 172 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 173 174 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 175 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); 176 MODULE_LICENSE("GPL"); 177 MODULE_VERSION(DRV_VERSION); 178 179 static struct workqueue_struct *ixgbe_wq; 180 181 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); 182 183 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, 184 u32 reg, u16 *value) 185 { 186 struct pci_dev *parent_dev; 187 struct pci_bus *parent_bus; 188 189 parent_bus = adapter->pdev->bus->parent; 190 if (!parent_bus) 191 return -1; 192 193 parent_dev = parent_bus->self; 194 if (!parent_dev) 195 return -1; 196 197 if (!pci_is_pcie(parent_dev)) 198 return -1; 199 200 pcie_capability_read_word(parent_dev, reg, value); 201 if (*value == IXGBE_FAILED_READ_CFG_WORD && 202 ixgbe_check_cfg_remove(&adapter->hw, parent_dev)) 203 return -1; 204 return 0; 205 } 206 207 static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) 208 { 209 struct ixgbe_hw *hw = &adapter->hw; 210 u16 link_status = 0; 211 int err; 212 213 hw->bus.type = ixgbe_bus_type_pci_express; 214 215 /* Get the negotiated link width and speed from PCI config space of the 216 * parent, as this device is behind a switch 217 */ 218 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status); 219 220 /* assume caller will handle error case */ 221 if (err) 222 return err; 223 224 hw->bus.width = ixgbe_convert_bus_width(link_status); 225 hw->bus.speed = ixgbe_convert_bus_speed(link_status); 226 227 return 0; 228 } 229 230 /** 231 * ixgbe_check_from_parent - Determine whether PCIe info should come from parent 232 * @hw: hw specific details 233 * 234 * This function is used by probe to determine whether a device's PCI-Express 235 * bandwidth details should be gathered from the parent bus instead of from the 236 * device. Used to ensure that various locations all have the correct device ID 237 * checks. 238 */ 239 static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) 240 { 241 switch (hw->device_id) { 242 case IXGBE_DEV_ID_82599_SFP_SF_QP: 243 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 244 return true; 245 default: 246 return false; 247 } 248 } 249 250 static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, 251 int expected_gts) 252 { 253 struct ixgbe_hw *hw = &adapter->hw; 254 int max_gts = 0; 255 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; 256 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; 257 struct pci_dev *pdev; 258 259 /* Some devices are not connected over PCIe and thus do not negotiate 260 * speed. These devices do not have valid bus info, and thus any report 261 * we generate may not be correct. 262 */ 263 if (hw->bus.type == ixgbe_bus_type_internal) 264 return; 265 266 /* determine whether to use the parent device */ 267 if (ixgbe_pcie_from_parent(&adapter->hw)) 268 pdev = adapter->pdev->bus->parent->self; 269 else 270 pdev = adapter->pdev; 271 272 if (pcie_get_minimum_link(pdev, &speed, &width) || 273 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) { 274 e_dev_warn("Unable to determine PCI Express bandwidth.\n"); 275 return; 276 } 277 278 switch (speed) { 279 case PCIE_SPEED_2_5GT: 280 /* 8b/10b encoding reduces max throughput by 20% */ 281 max_gts = 2 * width; 282 break; 283 case PCIE_SPEED_5_0GT: 284 /* 8b/10b encoding reduces max throughput by 20% */ 285 max_gts = 4 * width; 286 break; 287 case PCIE_SPEED_8_0GT: 288 /* 128b/130b encoding reduces throughput by less than 2% */ 289 max_gts = 8 * width; 290 break; 291 default: 292 e_dev_warn("Unable to determine PCI Express bandwidth.\n"); 293 return; 294 } 295 296 e_dev_info("PCI Express bandwidth of %dGT/s available\n", 297 max_gts); 298 e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n", 299 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : 300 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : 301 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : 302 "Unknown"), 303 width, 304 (speed == PCIE_SPEED_2_5GT ? "20%" : 305 speed == PCIE_SPEED_5_0GT ? "20%" : 306 speed == PCIE_SPEED_8_0GT ? "<2%" : 307 "Unknown")); 308 309 if (max_gts < expected_gts) { 310 e_dev_warn("This is not sufficient for optimal performance of this card.\n"); 311 e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n", 312 expected_gts); 313 e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n"); 314 } 315 } 316 317 static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) 318 { 319 if (!test_bit(__IXGBE_DOWN, &adapter->state) && 320 !test_bit(__IXGBE_REMOVING, &adapter->state) && 321 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) 322 queue_work(ixgbe_wq, &adapter->service_task); 323 } 324 325 static void ixgbe_remove_adapter(struct ixgbe_hw *hw) 326 { 327 struct ixgbe_adapter *adapter = hw->back; 328 329 if (!hw->hw_addr) 330 return; 331 hw->hw_addr = NULL; 332 e_dev_err("Adapter removed\n"); 333 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) 334 ixgbe_service_event_schedule(adapter); 335 } 336 337 static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) 338 { 339 u32 value; 340 341 /* The following check not only optimizes a bit by not 342 * performing a read on the status register when the 343 * register just read was a status register read that 344 * returned IXGBE_FAILED_READ_REG. It also blocks any 345 * potential recursion. 346 */ 347 if (reg == IXGBE_STATUS) { 348 ixgbe_remove_adapter(hw); 349 return; 350 } 351 value = ixgbe_read_reg(hw, IXGBE_STATUS); 352 if (value == IXGBE_FAILED_READ_REG) 353 ixgbe_remove_adapter(hw); 354 } 355 356 /** 357 * ixgbe_read_reg - Read from device register 358 * @hw: hw specific details 359 * @reg: offset of register to read 360 * 361 * Returns : value read or IXGBE_FAILED_READ_REG if removed 362 * 363 * This function is used to read device registers. It checks for device 364 * removal by confirming any read that returns all ones by checking the 365 * status register value for all ones. This function avoids reading from 366 * the hardware if a removal was previously detected in which case it 367 * returns IXGBE_FAILED_READ_REG (all ones). 368 */ 369 u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) 370 { 371 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); 372 u32 value; 373 374 if (ixgbe_removed(reg_addr)) 375 return IXGBE_FAILED_READ_REG; 376 if (unlikely(hw->phy.nw_mng_if_sel & 377 IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) { 378 struct ixgbe_adapter *adapter; 379 int i; 380 381 for (i = 0; i < 200; ++i) { 382 value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY); 383 if (likely(!value)) 384 goto writes_completed; 385 if (value == IXGBE_FAILED_READ_REG) { 386 ixgbe_remove_adapter(hw); 387 return IXGBE_FAILED_READ_REG; 388 } 389 udelay(5); 390 } 391 392 adapter = hw->back; 393 e_warn(hw, "register writes incomplete %08x\n", value); 394 } 395 396 writes_completed: 397 value = readl(reg_addr + reg); 398 if (unlikely(value == IXGBE_FAILED_READ_REG)) 399 ixgbe_check_remove(hw, reg); 400 return value; 401 } 402 403 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) 404 { 405 u16 value; 406 407 pci_read_config_word(pdev, PCI_VENDOR_ID, &value); 408 if (value == IXGBE_FAILED_READ_CFG_WORD) { 409 ixgbe_remove_adapter(hw); 410 return true; 411 } 412 return false; 413 } 414 415 u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg) 416 { 417 struct ixgbe_adapter *adapter = hw->back; 418 u16 value; 419 420 if (ixgbe_removed(hw->hw_addr)) 421 return IXGBE_FAILED_READ_CFG_WORD; 422 pci_read_config_word(adapter->pdev, reg, &value); 423 if (value == IXGBE_FAILED_READ_CFG_WORD && 424 ixgbe_check_cfg_remove(hw, adapter->pdev)) 425 return IXGBE_FAILED_READ_CFG_WORD; 426 return value; 427 } 428 429 #ifdef CONFIG_PCI_IOV 430 static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg) 431 { 432 struct ixgbe_adapter *adapter = hw->back; 433 u32 value; 434 435 if (ixgbe_removed(hw->hw_addr)) 436 return IXGBE_FAILED_READ_CFG_DWORD; 437 pci_read_config_dword(adapter->pdev, reg, &value); 438 if (value == IXGBE_FAILED_READ_CFG_DWORD && 439 ixgbe_check_cfg_remove(hw, adapter->pdev)) 440 return IXGBE_FAILED_READ_CFG_DWORD; 441 return value; 442 } 443 #endif /* CONFIG_PCI_IOV */ 444 445 void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value) 446 { 447 struct ixgbe_adapter *adapter = hw->back; 448 449 if (ixgbe_removed(hw->hw_addr)) 450 return; 451 pci_write_config_word(adapter->pdev, reg, value); 452 } 453 454 static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) 455 { 456 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); 457 458 /* flush memory to make sure state is correct before next watchdog */ 459 smp_mb__before_atomic(); 460 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); 461 } 462 463 struct ixgbe_reg_info { 464 u32 ofs; 465 char *name; 466 }; 467 468 static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { 469 470 /* General Registers */ 471 {IXGBE_CTRL, "CTRL"}, 472 {IXGBE_STATUS, "STATUS"}, 473 {IXGBE_CTRL_EXT, "CTRL_EXT"}, 474 475 /* Interrupt Registers */ 476 {IXGBE_EICR, "EICR"}, 477 478 /* RX Registers */ 479 {IXGBE_SRRCTL(0), "SRRCTL"}, 480 {IXGBE_DCA_RXCTRL(0), "DRXCTL"}, 481 {IXGBE_RDLEN(0), "RDLEN"}, 482 {IXGBE_RDH(0), "RDH"}, 483 {IXGBE_RDT(0), "RDT"}, 484 {IXGBE_RXDCTL(0), "RXDCTL"}, 485 {IXGBE_RDBAL(0), "RDBAL"}, 486 {IXGBE_RDBAH(0), "RDBAH"}, 487 488 /* TX Registers */ 489 {IXGBE_TDBAL(0), "TDBAL"}, 490 {IXGBE_TDBAH(0), "TDBAH"}, 491 {IXGBE_TDLEN(0), "TDLEN"}, 492 {IXGBE_TDH(0), "TDH"}, 493 {IXGBE_TDT(0), "TDT"}, 494 {IXGBE_TXDCTL(0), "TXDCTL"}, 495 496 /* List Terminator */ 497 { .name = NULL } 498 }; 499 500 501 /* 502 * ixgbe_regdump - register printout routine 503 */ 504 static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) 505 { 506 int i = 0, j = 0; 507 char rname[16]; 508 u32 regs[64]; 509 510 switch (reginfo->ofs) { 511 case IXGBE_SRRCTL(0): 512 for (i = 0; i < 64; i++) 513 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); 514 break; 515 case IXGBE_DCA_RXCTRL(0): 516 for (i = 0; i < 64; i++) 517 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 518 break; 519 case IXGBE_RDLEN(0): 520 for (i = 0; i < 64; i++) 521 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); 522 break; 523 case IXGBE_RDH(0): 524 for (i = 0; i < 64; i++) 525 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); 526 break; 527 case IXGBE_RDT(0): 528 for (i = 0; i < 64; i++) 529 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); 530 break; 531 case IXGBE_RXDCTL(0): 532 for (i = 0; i < 64; i++) 533 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 534 break; 535 case IXGBE_RDBAL(0): 536 for (i = 0; i < 64; i++) 537 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); 538 break; 539 case IXGBE_RDBAH(0): 540 for (i = 0; i < 64; i++) 541 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); 542 break; 543 case IXGBE_TDBAL(0): 544 for (i = 0; i < 64; i++) 545 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); 546 break; 547 case IXGBE_TDBAH(0): 548 for (i = 0; i < 64; i++) 549 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); 550 break; 551 case IXGBE_TDLEN(0): 552 for (i = 0; i < 64; i++) 553 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); 554 break; 555 case IXGBE_TDH(0): 556 for (i = 0; i < 64; i++) 557 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); 558 break; 559 case IXGBE_TDT(0): 560 for (i = 0; i < 64; i++) 561 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); 562 break; 563 case IXGBE_TXDCTL(0): 564 for (i = 0; i < 64; i++) 565 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 566 break; 567 default: 568 pr_info("%-15s %08x\n", reginfo->name, 569 IXGBE_READ_REG(hw, reginfo->ofs)); 570 return; 571 } 572 573 for (i = 0; i < 8; i++) { 574 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); 575 pr_err("%-15s", rname); 576 for (j = 0; j < 8; j++) 577 pr_cont(" %08x", regs[i*8+j]); 578 pr_cont("\n"); 579 } 580 581 } 582 583 /* 584 * ixgbe_dump - Print registers, tx-rings and rx-rings 585 */ 586 static void ixgbe_dump(struct ixgbe_adapter *adapter) 587 { 588 struct net_device *netdev = adapter->netdev; 589 struct ixgbe_hw *hw = &adapter->hw; 590 struct ixgbe_reg_info *reginfo; 591 int n = 0; 592 struct ixgbe_ring *tx_ring; 593 struct ixgbe_tx_buffer *tx_buffer; 594 union ixgbe_adv_tx_desc *tx_desc; 595 struct my_u0 { u64 a; u64 b; } *u0; 596 struct ixgbe_ring *rx_ring; 597 union ixgbe_adv_rx_desc *rx_desc; 598 struct ixgbe_rx_buffer *rx_buffer_info; 599 u32 staterr; 600 int i = 0; 601 602 if (!netif_msg_hw(adapter)) 603 return; 604 605 /* Print netdevice Info */ 606 if (netdev) { 607 dev_info(&adapter->pdev->dev, "Net device Info\n"); 608 pr_info("Device Name state " 609 "trans_start last_rx\n"); 610 pr_info("%-15s %016lX %016lX %016lX\n", 611 netdev->name, 612 netdev->state, 613 dev_trans_start(netdev), 614 netdev->last_rx); 615 } 616 617 /* Print Registers */ 618 dev_info(&adapter->pdev->dev, "Register Dump\n"); 619 pr_info(" Register Name Value\n"); 620 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; 621 reginfo->name; reginfo++) { 622 ixgbe_regdump(hw, reginfo); 623 } 624 625 /* Print TX Ring Summary */ 626 if (!netdev || !netif_running(netdev)) 627 return; 628 629 dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); 630 pr_info(" %s %s %s %s\n", 631 "Queue [NTU] [NTC] [bi(ntc)->dma ]", 632 "leng", "ntw", "timestamp"); 633 for (n = 0; n < adapter->num_tx_queues; n++) { 634 tx_ring = adapter->tx_ring[n]; 635 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; 636 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", 637 n, tx_ring->next_to_use, tx_ring->next_to_clean, 638 (u64)dma_unmap_addr(tx_buffer, dma), 639 dma_unmap_len(tx_buffer, len), 640 tx_buffer->next_to_watch, 641 (u64)tx_buffer->time_stamp); 642 } 643 644 /* Print TX Rings */ 645 if (!netif_msg_tx_done(adapter)) 646 goto rx_ring_summary; 647 648 dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); 649 650 /* Transmit Descriptor Formats 651 * 652 * 82598 Advanced Transmit Descriptor 653 * +--------------------------------------------------------------+ 654 * 0 | Buffer Address [63:0] | 655 * +--------------------------------------------------------------+ 656 * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | 657 * +--------------------------------------------------------------+ 658 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 659 * 660 * 82598 Advanced Transmit Descriptor (Write-Back Format) 661 * +--------------------------------------------------------------+ 662 * 0 | RSV [63:0] | 663 * +--------------------------------------------------------------+ 664 * 8 | RSV | STA | NXTSEQ | 665 * +--------------------------------------------------------------+ 666 * 63 36 35 32 31 0 667 * 668 * 82599+ Advanced Transmit Descriptor 669 * +--------------------------------------------------------------+ 670 * 0 | Buffer Address [63:0] | 671 * +--------------------------------------------------------------+ 672 * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN | 673 * +--------------------------------------------------------------+ 674 * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0 675 * 676 * 82599+ Advanced Transmit Descriptor (Write-Back Format) 677 * +--------------------------------------------------------------+ 678 * 0 | RSV [63:0] | 679 * +--------------------------------------------------------------+ 680 * 8 | RSV | STA | RSV | 681 * +--------------------------------------------------------------+ 682 * 63 36 35 32 31 0 683 */ 684 685 for (n = 0; n < adapter->num_tx_queues; n++) { 686 tx_ring = adapter->tx_ring[n]; 687 pr_info("------------------------------------\n"); 688 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); 689 pr_info("------------------------------------\n"); 690 pr_info("%s%s %s %s %s %s\n", 691 "T [desc] [address 63:0 ] ", 692 "[PlPOIdStDDt Ln] [bi->dma ] ", 693 "leng", "ntw", "timestamp", "bi->skb"); 694 695 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 696 tx_desc = IXGBE_TX_DESC(tx_ring, i); 697 tx_buffer = &tx_ring->tx_buffer_info[i]; 698 u0 = (struct my_u0 *)tx_desc; 699 if (dma_unmap_len(tx_buffer, len) > 0) { 700 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p", 701 i, 702 le64_to_cpu(u0->a), 703 le64_to_cpu(u0->b), 704 (u64)dma_unmap_addr(tx_buffer, dma), 705 dma_unmap_len(tx_buffer, len), 706 tx_buffer->next_to_watch, 707 (u64)tx_buffer->time_stamp, 708 tx_buffer->skb); 709 if (i == tx_ring->next_to_use && 710 i == tx_ring->next_to_clean) 711 pr_cont(" NTC/U\n"); 712 else if (i == tx_ring->next_to_use) 713 pr_cont(" NTU\n"); 714 else if (i == tx_ring->next_to_clean) 715 pr_cont(" NTC\n"); 716 else 717 pr_cont("\n"); 718 719 if (netif_msg_pktdata(adapter) && 720 tx_buffer->skb) 721 print_hex_dump(KERN_INFO, "", 722 DUMP_PREFIX_ADDRESS, 16, 1, 723 tx_buffer->skb->data, 724 dma_unmap_len(tx_buffer, len), 725 true); 726 } 727 } 728 } 729 730 /* Print RX Rings Summary */ 731 rx_ring_summary: 732 dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); 733 pr_info("Queue [NTU] [NTC]\n"); 734 for (n = 0; n < adapter->num_rx_queues; n++) { 735 rx_ring = adapter->rx_ring[n]; 736 pr_info("%5d %5X %5X\n", 737 n, rx_ring->next_to_use, rx_ring->next_to_clean); 738 } 739 740 /* Print RX Rings */ 741 if (!netif_msg_rx_status(adapter)) 742 return; 743 744 dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); 745 746 /* Receive Descriptor Formats 747 * 748 * 82598 Advanced Receive Descriptor (Read) Format 749 * 63 1 0 750 * +-----------------------------------------------------+ 751 * 0 | Packet Buffer Address [63:1] |A0/NSE| 752 * +----------------------------------------------+------+ 753 * 8 | Header Buffer Address [63:1] | DD | 754 * +-----------------------------------------------------+ 755 * 756 * 757 * 82598 Advanced Receive Descriptor (Write-Back) Format 758 * 759 * 63 48 47 32 31 30 21 20 16 15 4 3 0 760 * +------------------------------------------------------+ 761 * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS | 762 * | Packet | IP | | | | Type | Type | 763 * | Checksum | Ident | | | | | | 764 * +------------------------------------------------------+ 765 * 8 | VLAN Tag | Length | Extended Error | Extended Status | 766 * +------------------------------------------------------+ 767 * 63 48 47 32 31 20 19 0 768 * 769 * 82599+ Advanced Receive Descriptor (Read) Format 770 * 63 1 0 771 * +-----------------------------------------------------+ 772 * 0 | Packet Buffer Address [63:1] |A0/NSE| 773 * +----------------------------------------------+------+ 774 * 8 | Header Buffer Address [63:1] | DD | 775 * +-----------------------------------------------------+ 776 * 777 * 778 * 82599+ Advanced Receive Descriptor (Write-Back) Format 779 * 780 * 63 48 47 32 31 30 21 20 17 16 4 3 0 781 * +------------------------------------------------------+ 782 * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS | 783 * |/ RTT / PCoE_PARAM | | | CNT | Type | Type | 784 * |/ Flow Dir Flt ID | | | | | | 785 * +------------------------------------------------------+ 786 * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP | 787 * +------------------------------------------------------+ 788 * 63 48 47 32 31 20 19 0 789 */ 790 791 for (n = 0; n < adapter->num_rx_queues; n++) { 792 rx_ring = adapter->rx_ring[n]; 793 pr_info("------------------------------------\n"); 794 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); 795 pr_info("------------------------------------\n"); 796 pr_info("%s%s%s", 797 "R [desc] [ PktBuf A0] ", 798 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ", 799 "<-- Adv Rx Read format\n"); 800 pr_info("%s%s%s", 801 "RWB[desc] [PcsmIpSHl PtRs] ", 802 "[vl er S cks ln] ---------------- [bi->skb ] ", 803 "<-- Adv Rx Write-Back format\n"); 804 805 for (i = 0; i < rx_ring->count; i++) { 806 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 807 rx_desc = IXGBE_RX_DESC(rx_ring, i); 808 u0 = (struct my_u0 *)rx_desc; 809 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 810 if (staterr & IXGBE_RXD_STAT_DD) { 811 /* Descriptor Done */ 812 pr_info("RWB[0x%03X] %016llX " 813 "%016llX ---------------- %p", i, 814 le64_to_cpu(u0->a), 815 le64_to_cpu(u0->b), 816 rx_buffer_info->skb); 817 } else { 818 pr_info("R [0x%03X] %016llX " 819 "%016llX %016llX %p", i, 820 le64_to_cpu(u0->a), 821 le64_to_cpu(u0->b), 822 (u64)rx_buffer_info->dma, 823 rx_buffer_info->skb); 824 825 if (netif_msg_pktdata(adapter) && 826 rx_buffer_info->dma) { 827 print_hex_dump(KERN_INFO, "", 828 DUMP_PREFIX_ADDRESS, 16, 1, 829 page_address(rx_buffer_info->page) + 830 rx_buffer_info->page_offset, 831 ixgbe_rx_bufsz(rx_ring), true); 832 } 833 } 834 835 if (i == rx_ring->next_to_use) 836 pr_cont(" NTU\n"); 837 else if (i == rx_ring->next_to_clean) 838 pr_cont(" NTC\n"); 839 else 840 pr_cont("\n"); 841 842 } 843 } 844 } 845 846 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) 847 { 848 u32 ctrl_ext; 849 850 /* Let firmware take over control of h/w */ 851 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 852 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 853 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); 854 } 855 856 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) 857 { 858 u32 ctrl_ext; 859 860 /* Let firmware know the driver has taken over */ 861 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 862 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 863 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); 864 } 865 866 /** 867 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors 868 * @adapter: pointer to adapter struct 869 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 870 * @queue: queue to map the corresponding interrupt to 871 * @msix_vector: the vector to map to the corresponding queue 872 * 873 */ 874 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, 875 u8 queue, u8 msix_vector) 876 { 877 u32 ivar, index; 878 struct ixgbe_hw *hw = &adapter->hw; 879 switch (hw->mac.type) { 880 case ixgbe_mac_82598EB: 881 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 882 if (direction == -1) 883 direction = 0; 884 index = (((direction * 64) + queue) >> 2) & 0x1F; 885 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 886 ivar &= ~(0xFF << (8 * (queue & 0x3))); 887 ivar |= (msix_vector << (8 * (queue & 0x3))); 888 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 889 break; 890 case ixgbe_mac_82599EB: 891 case ixgbe_mac_X540: 892 case ixgbe_mac_X550: 893 case ixgbe_mac_X550EM_x: 894 case ixgbe_mac_x550em_a: 895 if (direction == -1) { 896 /* other causes */ 897 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 898 index = ((queue & 1) * 8); 899 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); 900 ivar &= ~(0xFF << index); 901 ivar |= (msix_vector << index); 902 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); 903 break; 904 } else { 905 /* tx or rx causes */ 906 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 907 index = ((16 * (queue & 1)) + (8 * direction)); 908 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); 909 ivar &= ~(0xFF << index); 910 ivar |= (msix_vector << index); 911 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar); 912 break; 913 } 914 default: 915 break; 916 } 917 } 918 919 static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, 920 u64 qmask) 921 { 922 u32 mask; 923 924 switch (adapter->hw.mac.type) { 925 case ixgbe_mac_82598EB: 926 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 927 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 928 break; 929 case ixgbe_mac_82599EB: 930 case ixgbe_mac_X540: 931 case ixgbe_mac_X550: 932 case ixgbe_mac_X550EM_x: 933 case ixgbe_mac_x550em_a: 934 mask = (qmask & 0xFFFFFFFF); 935 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); 936 mask = (qmask >> 32); 937 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); 938 break; 939 default: 940 break; 941 } 942 } 943 944 void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring, 945 struct ixgbe_tx_buffer *tx_buffer) 946 { 947 if (tx_buffer->skb) { 948 dev_kfree_skb_any(tx_buffer->skb); 949 if (dma_unmap_len(tx_buffer, len)) 950 dma_unmap_single(ring->dev, 951 dma_unmap_addr(tx_buffer, dma), 952 dma_unmap_len(tx_buffer, len), 953 DMA_TO_DEVICE); 954 } else if (dma_unmap_len(tx_buffer, len)) { 955 dma_unmap_page(ring->dev, 956 dma_unmap_addr(tx_buffer, dma), 957 dma_unmap_len(tx_buffer, len), 958 DMA_TO_DEVICE); 959 } 960 tx_buffer->next_to_watch = NULL; 961 tx_buffer->skb = NULL; 962 dma_unmap_len_set(tx_buffer, len, 0); 963 /* tx_buffer must be completely set up in the transmit path */ 964 } 965 966 static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) 967 { 968 struct ixgbe_hw *hw = &adapter->hw; 969 struct ixgbe_hw_stats *hwstats = &adapter->stats; 970 int i; 971 u32 data; 972 973 if ((hw->fc.current_mode != ixgbe_fc_full) && 974 (hw->fc.current_mode != ixgbe_fc_rx_pause)) 975 return; 976 977 switch (hw->mac.type) { 978 case ixgbe_mac_82598EB: 979 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 980 break; 981 default: 982 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 983 } 984 hwstats->lxoffrxc += data; 985 986 /* refill credits (no tx hang) if we received xoff */ 987 if (!data) 988 return; 989 990 for (i = 0; i < adapter->num_tx_queues; i++) 991 clear_bit(__IXGBE_HANG_CHECK_ARMED, 992 &adapter->tx_ring[i]->state); 993 } 994 995 static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) 996 { 997 struct ixgbe_hw *hw = &adapter->hw; 998 struct ixgbe_hw_stats *hwstats = &adapter->stats; 999 u32 xoff[8] = {0}; 1000 u8 tc; 1001 int i; 1002 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; 1003 1004 if (adapter->ixgbe_ieee_pfc) 1005 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); 1006 1007 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) { 1008 ixgbe_update_xoff_rx_lfc(adapter); 1009 return; 1010 } 1011 1012 /* update stats for each tc, only valid with PFC enabled */ 1013 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { 1014 u32 pxoffrxc; 1015 1016 switch (hw->mac.type) { 1017 case ixgbe_mac_82598EB: 1018 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 1019 break; 1020 default: 1021 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 1022 } 1023 hwstats->pxoffrxc[i] += pxoffrxc; 1024 /* Get the TC for given UP */ 1025 tc = netdev_get_prio_tc_map(adapter->netdev, i); 1026 xoff[tc] += pxoffrxc; 1027 } 1028 1029 /* disarm tx queues that have received xoff frames */ 1030 for (i = 0; i < adapter->num_tx_queues; i++) { 1031 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; 1032 1033 tc = tx_ring->dcb_tc; 1034 if (xoff[tc]) 1035 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); 1036 } 1037 } 1038 1039 static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) 1040 { 1041 return ring->stats.packets; 1042 } 1043 1044 static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) 1045 { 1046 struct ixgbe_adapter *adapter; 1047 struct ixgbe_hw *hw; 1048 u32 head, tail; 1049 1050 if (ring->l2_accel_priv) 1051 adapter = ring->l2_accel_priv->real_adapter; 1052 else 1053 adapter = netdev_priv(ring->netdev); 1054 1055 hw = &adapter->hw; 1056 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); 1057 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); 1058 1059 if (head != tail) 1060 return (head < tail) ? 1061 tail - head : (tail + ring->count - head); 1062 1063 return 0; 1064 } 1065 1066 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) 1067 { 1068 u32 tx_done = ixgbe_get_tx_completed(tx_ring); 1069 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; 1070 u32 tx_pending = ixgbe_get_tx_pending(tx_ring); 1071 1072 clear_check_for_tx_hang(tx_ring); 1073 1074 /* 1075 * Check for a hung queue, but be thorough. This verifies 1076 * that a transmit has been completed since the previous 1077 * check AND there is at least one packet pending. The 1078 * ARMED bit is set to indicate a potential hang. The 1079 * bit is cleared if a pause frame is received to remove 1080 * false hang detection due to PFC or 802.3x frames. By 1081 * requiring this to fail twice we avoid races with 1082 * pfc clearing the ARMED bit and conditions where we 1083 * run the check_tx_hang logic with a transmit completion 1084 * pending but without time to complete it yet. 1085 */ 1086 if (tx_done_old == tx_done && tx_pending) 1087 /* make sure it is true for two checks in a row */ 1088 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, 1089 &tx_ring->state); 1090 /* update completed stats and continue */ 1091 tx_ring->tx_stats.tx_done_old = tx_done; 1092 /* reset the countdown */ 1093 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); 1094 1095 return false; 1096 } 1097 1098 /** 1099 * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout 1100 * @adapter: driver private struct 1101 **/ 1102 static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) 1103 { 1104 1105 /* Do the reset outside of interrupt context */ 1106 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 1107 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); 1108 e_warn(drv, "initiating reset due to tx timeout\n"); 1109 ixgbe_service_event_schedule(adapter); 1110 } 1111 } 1112 1113 /** 1114 * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate 1115 **/ 1116 static int ixgbe_tx_maxrate(struct net_device *netdev, 1117 int queue_index, u32 maxrate) 1118 { 1119 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1120 struct ixgbe_hw *hw = &adapter->hw; 1121 u32 bcnrc_val = ixgbe_link_mbps(adapter); 1122 1123 if (!maxrate) 1124 return 0; 1125 1126 /* Calculate the rate factor values to set */ 1127 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; 1128 bcnrc_val /= maxrate; 1129 1130 /* clear everything but the rate factor */ 1131 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | 1132 IXGBE_RTTBCNRC_RF_DEC_MASK; 1133 1134 /* enable the rate scheduler */ 1135 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; 1136 1137 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index); 1138 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 1139 1140 return 0; 1141 } 1142 1143 /** 1144 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes 1145 * @q_vector: structure containing interrupt and ring information 1146 * @tx_ring: tx ring to clean 1147 * @napi_budget: Used to determine if we are in netpoll 1148 **/ 1149 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, 1150 struct ixgbe_ring *tx_ring, int napi_budget) 1151 { 1152 struct ixgbe_adapter *adapter = q_vector->adapter; 1153 struct ixgbe_tx_buffer *tx_buffer; 1154 union ixgbe_adv_tx_desc *tx_desc; 1155 unsigned int total_bytes = 0, total_packets = 0; 1156 unsigned int budget = q_vector->tx.work_limit; 1157 unsigned int i = tx_ring->next_to_clean; 1158 1159 if (test_bit(__IXGBE_DOWN, &adapter->state)) 1160 return true; 1161 1162 tx_buffer = &tx_ring->tx_buffer_info[i]; 1163 tx_desc = IXGBE_TX_DESC(tx_ring, i); 1164 i -= tx_ring->count; 1165 1166 do { 1167 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 1168 1169 /* if next_to_watch is not set then there is no work pending */ 1170 if (!eop_desc) 1171 break; 1172 1173 /* prevent any other reads prior to eop_desc */ 1174 read_barrier_depends(); 1175 1176 /* if DD is not set pending work has not been completed */ 1177 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) 1178 break; 1179 1180 /* clear next_to_watch to prevent false hangs */ 1181 tx_buffer->next_to_watch = NULL; 1182 1183 /* update the statistics for this packet */ 1184 total_bytes += tx_buffer->bytecount; 1185 total_packets += tx_buffer->gso_segs; 1186 1187 /* free the skb */ 1188 napi_consume_skb(tx_buffer->skb, napi_budget); 1189 1190 /* unmap skb header data */ 1191 dma_unmap_single(tx_ring->dev, 1192 dma_unmap_addr(tx_buffer, dma), 1193 dma_unmap_len(tx_buffer, len), 1194 DMA_TO_DEVICE); 1195 1196 /* clear tx_buffer data */ 1197 tx_buffer->skb = NULL; 1198 dma_unmap_len_set(tx_buffer, len, 0); 1199 1200 /* unmap remaining buffers */ 1201 while (tx_desc != eop_desc) { 1202 tx_buffer++; 1203 tx_desc++; 1204 i++; 1205 if (unlikely(!i)) { 1206 i -= tx_ring->count; 1207 tx_buffer = tx_ring->tx_buffer_info; 1208 tx_desc = IXGBE_TX_DESC(tx_ring, 0); 1209 } 1210 1211 /* unmap any remaining paged data */ 1212 if (dma_unmap_len(tx_buffer, len)) { 1213 dma_unmap_page(tx_ring->dev, 1214 dma_unmap_addr(tx_buffer, dma), 1215 dma_unmap_len(tx_buffer, len), 1216 DMA_TO_DEVICE); 1217 dma_unmap_len_set(tx_buffer, len, 0); 1218 } 1219 } 1220 1221 /* move us one more past the eop_desc for start of next pkt */ 1222 tx_buffer++; 1223 tx_desc++; 1224 i++; 1225 if (unlikely(!i)) { 1226 i -= tx_ring->count; 1227 tx_buffer = tx_ring->tx_buffer_info; 1228 tx_desc = IXGBE_TX_DESC(tx_ring, 0); 1229 } 1230 1231 /* issue prefetch for next Tx descriptor */ 1232 prefetch(tx_desc); 1233 1234 /* update budget accounting */ 1235 budget--; 1236 } while (likely(budget)); 1237 1238 i += tx_ring->count; 1239 tx_ring->next_to_clean = i; 1240 u64_stats_update_begin(&tx_ring->syncp); 1241 tx_ring->stats.bytes += total_bytes; 1242 tx_ring->stats.packets += total_packets; 1243 u64_stats_update_end(&tx_ring->syncp); 1244 q_vector->tx.total_bytes += total_bytes; 1245 q_vector->tx.total_packets += total_packets; 1246 1247 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { 1248 /* schedule immediate reset if we believe we hung */ 1249 struct ixgbe_hw *hw = &adapter->hw; 1250 e_err(drv, "Detected Tx Unit Hang\n" 1251 " Tx Queue <%d>\n" 1252 " TDH, TDT <%x>, <%x>\n" 1253 " next_to_use <%x>\n" 1254 " next_to_clean <%x>\n" 1255 "tx_buffer_info[next_to_clean]\n" 1256 " time_stamp <%lx>\n" 1257 " jiffies <%lx>\n", 1258 tx_ring->queue_index, 1259 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), 1260 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), 1261 tx_ring->next_to_use, i, 1262 tx_ring->tx_buffer_info[i].time_stamp, jiffies); 1263 1264 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 1265 1266 e_info(probe, 1267 "tx hang %d detected on queue %d, resetting adapter\n", 1268 adapter->tx_timeout_count + 1, tx_ring->queue_index); 1269 1270 /* schedule immediate reset if we believe we hung */ 1271 ixgbe_tx_timeout_reset(adapter); 1272 1273 /* the adapter is about to reset, no point in enabling stuff */ 1274 return true; 1275 } 1276 1277 netdev_tx_completed_queue(txring_txq(tx_ring), 1278 total_packets, total_bytes); 1279 1280 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 1281 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 1282 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 1283 /* Make sure that anybody stopping the queue after this 1284 * sees the new next_to_clean. 1285 */ 1286 smp_mb(); 1287 if (__netif_subqueue_stopped(tx_ring->netdev, 1288 tx_ring->queue_index) 1289 && !test_bit(__IXGBE_DOWN, &adapter->state)) { 1290 netif_wake_subqueue(tx_ring->netdev, 1291 tx_ring->queue_index); 1292 ++tx_ring->tx_stats.restart_queue; 1293 } 1294 } 1295 1296 return !!budget; 1297 } 1298 1299 #ifdef CONFIG_IXGBE_DCA 1300 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, 1301 struct ixgbe_ring *tx_ring, 1302 int cpu) 1303 { 1304 struct ixgbe_hw *hw = &adapter->hw; 1305 u32 txctrl = 0; 1306 u16 reg_offset; 1307 1308 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1309 txctrl = dca3_get_tag(tx_ring->dev, cpu); 1310 1311 switch (hw->mac.type) { 1312 case ixgbe_mac_82598EB: 1313 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); 1314 break; 1315 case ixgbe_mac_82599EB: 1316 case ixgbe_mac_X540: 1317 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx); 1318 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599; 1319 break; 1320 default: 1321 /* for unknown hardware do not write register */ 1322 return; 1323 } 1324 1325 /* 1326 * We can enable relaxed ordering for reads, but not writes when 1327 * DCA is enabled. This is due to a known issue in some chipsets 1328 * which will cause the DCA tag to be cleared. 1329 */ 1330 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN | 1331 IXGBE_DCA_TXCTRL_DATA_RRO_EN | 1332 IXGBE_DCA_TXCTRL_DESC_DCA_EN; 1333 1334 IXGBE_WRITE_REG(hw, reg_offset, txctrl); 1335 } 1336 1337 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, 1338 struct ixgbe_ring *rx_ring, 1339 int cpu) 1340 { 1341 struct ixgbe_hw *hw = &adapter->hw; 1342 u32 rxctrl = 0; 1343 u8 reg_idx = rx_ring->reg_idx; 1344 1345 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1346 rxctrl = dca3_get_tag(rx_ring->dev, cpu); 1347 1348 switch (hw->mac.type) { 1349 case ixgbe_mac_82599EB: 1350 case ixgbe_mac_X540: 1351 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599; 1352 break; 1353 default: 1354 break; 1355 } 1356 1357 /* 1358 * We can enable relaxed ordering for reads, but not writes when 1359 * DCA is enabled. This is due to a known issue in some chipsets 1360 * which will cause the DCA tag to be cleared. 1361 */ 1362 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN | 1363 IXGBE_DCA_RXCTRL_DATA_DCA_EN | 1364 IXGBE_DCA_RXCTRL_DESC_DCA_EN; 1365 1366 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); 1367 } 1368 1369 static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) 1370 { 1371 struct ixgbe_adapter *adapter = q_vector->adapter; 1372 struct ixgbe_ring *ring; 1373 int cpu = get_cpu(); 1374 1375 if (q_vector->cpu == cpu) 1376 goto out_no_update; 1377 1378 ixgbe_for_each_ring(ring, q_vector->tx) 1379 ixgbe_update_tx_dca(adapter, ring, cpu); 1380 1381 ixgbe_for_each_ring(ring, q_vector->rx) 1382 ixgbe_update_rx_dca(adapter, ring, cpu); 1383 1384 q_vector->cpu = cpu; 1385 out_no_update: 1386 put_cpu(); 1387 } 1388 1389 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) 1390 { 1391 int i; 1392 1393 /* always use CB2 mode, difference is masked in the CB driver */ 1394 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1395 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1396 IXGBE_DCA_CTRL_DCA_MODE_CB2); 1397 else 1398 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1399 IXGBE_DCA_CTRL_DCA_DISABLE); 1400 1401 for (i = 0; i < adapter->num_q_vectors; i++) { 1402 adapter->q_vector[i]->cpu = -1; 1403 ixgbe_update_dca(adapter->q_vector[i]); 1404 } 1405 } 1406 1407 static int __ixgbe_notify_dca(struct device *dev, void *data) 1408 { 1409 struct ixgbe_adapter *adapter = dev_get_drvdata(dev); 1410 unsigned long event = *(unsigned long *)data; 1411 1412 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) 1413 return 0; 1414 1415 switch (event) { 1416 case DCA_PROVIDER_ADD: 1417 /* if we're already enabled, don't do it again */ 1418 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1419 break; 1420 if (dca_add_requester(dev) == 0) { 1421 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 1422 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1423 IXGBE_DCA_CTRL_DCA_MODE_CB2); 1424 break; 1425 } 1426 /* Fall Through since DCA is disabled. */ 1427 case DCA_PROVIDER_REMOVE: 1428 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 1429 dca_remove_requester(dev); 1430 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 1431 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1432 IXGBE_DCA_CTRL_DCA_DISABLE); 1433 } 1434 break; 1435 } 1436 1437 return 0; 1438 } 1439 1440 #endif /* CONFIG_IXGBE_DCA */ 1441 1442 #define IXGBE_RSS_L4_TYPES_MASK \ 1443 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ 1444 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ 1445 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ 1446 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) 1447 1448 static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, 1449 union ixgbe_adv_rx_desc *rx_desc, 1450 struct sk_buff *skb) 1451 { 1452 u16 rss_type; 1453 1454 if (!(ring->netdev->features & NETIF_F_RXHASH)) 1455 return; 1456 1457 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & 1458 IXGBE_RXDADV_RSSTYPE_MASK; 1459 1460 if (!rss_type) 1461 return; 1462 1463 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), 1464 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? 1465 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); 1466 } 1467 1468 #ifdef IXGBE_FCOE 1469 /** 1470 * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type 1471 * @ring: structure containing ring specific data 1472 * @rx_desc: advanced rx descriptor 1473 * 1474 * Returns : true if it is FCoE pkt 1475 */ 1476 static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring, 1477 union ixgbe_adv_rx_desc *rx_desc) 1478 { 1479 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 1480 1481 return test_bit(__IXGBE_RX_FCOE, &ring->state) && 1482 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) == 1483 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << 1484 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); 1485 } 1486 1487 #endif /* IXGBE_FCOE */ 1488 /** 1489 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum 1490 * @ring: structure containing ring specific data 1491 * @rx_desc: current Rx descriptor being processed 1492 * @skb: skb currently being received and modified 1493 **/ 1494 static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, 1495 union ixgbe_adv_rx_desc *rx_desc, 1496 struct sk_buff *skb) 1497 { 1498 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 1499 bool encap_pkt = false; 1500 1501 skb_checksum_none_assert(skb); 1502 1503 /* Rx csum disabled */ 1504 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 1505 return; 1506 1507 /* check for VXLAN and Geneve packets */ 1508 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) { 1509 encap_pkt = true; 1510 skb->encapsulation = 1; 1511 } 1512 1513 /* if IP and error */ 1514 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && 1515 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { 1516 ring->rx_stats.csum_err++; 1517 return; 1518 } 1519 1520 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) 1521 return; 1522 1523 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { 1524 /* 1525 * 82599 errata, UDP frames with a 0 checksum can be marked as 1526 * checksum errors. 1527 */ 1528 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) && 1529 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state)) 1530 return; 1531 1532 ring->rx_stats.csum_err++; 1533 return; 1534 } 1535 1536 /* It must be a TCP or UDP packet with a valid checksum */ 1537 skb->ip_summed = CHECKSUM_UNNECESSARY; 1538 if (encap_pkt) { 1539 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS)) 1540 return; 1541 1542 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) { 1543 skb->ip_summed = CHECKSUM_NONE; 1544 return; 1545 } 1546 /* If we checked the outer header let the stack know */ 1547 skb->csum_level = 1; 1548 } 1549 } 1550 1551 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, 1552 struct ixgbe_rx_buffer *bi) 1553 { 1554 struct page *page = bi->page; 1555 dma_addr_t dma; 1556 1557 /* since we are recycling buffers we should seldom need to alloc */ 1558 if (likely(page)) 1559 return true; 1560 1561 /* alloc new page for storage */ 1562 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring)); 1563 if (unlikely(!page)) { 1564 rx_ring->rx_stats.alloc_rx_page_failed++; 1565 return false; 1566 } 1567 1568 /* map page for use */ 1569 dma = dma_map_page(rx_ring->dev, page, 0, 1570 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); 1571 1572 /* 1573 * if mapping failed free memory back to system since 1574 * there isn't much point in holding memory we can't use 1575 */ 1576 if (dma_mapping_error(rx_ring->dev, dma)) { 1577 __free_pages(page, ixgbe_rx_pg_order(rx_ring)); 1578 1579 rx_ring->rx_stats.alloc_rx_page_failed++; 1580 return false; 1581 } 1582 1583 bi->dma = dma; 1584 bi->page = page; 1585 bi->page_offset = 0; 1586 1587 return true; 1588 } 1589 1590 /** 1591 * ixgbe_alloc_rx_buffers - Replace used receive buffers 1592 * @rx_ring: ring to place buffers on 1593 * @cleaned_count: number of buffers to replace 1594 **/ 1595 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) 1596 { 1597 union ixgbe_adv_rx_desc *rx_desc; 1598 struct ixgbe_rx_buffer *bi; 1599 u16 i = rx_ring->next_to_use; 1600 1601 /* nothing to do */ 1602 if (!cleaned_count) 1603 return; 1604 1605 rx_desc = IXGBE_RX_DESC(rx_ring, i); 1606 bi = &rx_ring->rx_buffer_info[i]; 1607 i -= rx_ring->count; 1608 1609 do { 1610 if (!ixgbe_alloc_mapped_page(rx_ring, bi)) 1611 break; 1612 1613 /* 1614 * Refresh the desc even if buffer_addrs didn't change 1615 * because each write-back erases this info. 1616 */ 1617 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 1618 1619 rx_desc++; 1620 bi++; 1621 i++; 1622 if (unlikely(!i)) { 1623 rx_desc = IXGBE_RX_DESC(rx_ring, 0); 1624 bi = rx_ring->rx_buffer_info; 1625 i -= rx_ring->count; 1626 } 1627 1628 /* clear the status bits for the next_to_use descriptor */ 1629 rx_desc->wb.upper.status_error = 0; 1630 1631 cleaned_count--; 1632 } while (cleaned_count); 1633 1634 i += rx_ring->count; 1635 1636 if (rx_ring->next_to_use != i) { 1637 rx_ring->next_to_use = i; 1638 1639 /* update next to alloc since we have filled the ring */ 1640 rx_ring->next_to_alloc = i; 1641 1642 /* Force memory writes to complete before letting h/w 1643 * know there are new descriptors to fetch. (Only 1644 * applicable for weak-ordered memory model archs, 1645 * such as IA-64). 1646 */ 1647 wmb(); 1648 writel(i, rx_ring->tail); 1649 } 1650 } 1651 1652 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, 1653 struct sk_buff *skb) 1654 { 1655 u16 hdr_len = skb_headlen(skb); 1656 1657 /* set gso_size to avoid messing up TCP MSS */ 1658 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), 1659 IXGBE_CB(skb)->append_cnt); 1660 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 1661 } 1662 1663 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, 1664 struct sk_buff *skb) 1665 { 1666 /* if append_cnt is 0 then frame is not RSC */ 1667 if (!IXGBE_CB(skb)->append_cnt) 1668 return; 1669 1670 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; 1671 rx_ring->rx_stats.rsc_flush++; 1672 1673 ixgbe_set_rsc_gso_size(rx_ring, skb); 1674 1675 /* gso_size is computed using append_cnt so always clear it last */ 1676 IXGBE_CB(skb)->append_cnt = 0; 1677 } 1678 1679 /** 1680 * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor 1681 * @rx_ring: rx descriptor ring packet is being transacted on 1682 * @rx_desc: pointer to the EOP Rx descriptor 1683 * @skb: pointer to current skb being populated 1684 * 1685 * This function checks the ring, descriptor, and packet information in 1686 * order to populate the hash, checksum, VLAN, timestamp, protocol, and 1687 * other fields within the skb. 1688 **/ 1689 static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, 1690 union ixgbe_adv_rx_desc *rx_desc, 1691 struct sk_buff *skb) 1692 { 1693 struct net_device *dev = rx_ring->netdev; 1694 u32 flags = rx_ring->q_vector->adapter->flags; 1695 1696 ixgbe_update_rsc_stats(rx_ring, skb); 1697 1698 ixgbe_rx_hash(rx_ring, rx_desc, skb); 1699 1700 ixgbe_rx_checksum(rx_ring, rx_desc, skb); 1701 1702 if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED)) 1703 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); 1704 1705 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1706 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { 1707 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 1708 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 1709 } 1710 1711 skb_record_rx_queue(skb, rx_ring->queue_index); 1712 1713 skb->protocol = eth_type_trans(skb, dev); 1714 } 1715 1716 static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, 1717 struct sk_buff *skb) 1718 { 1719 skb_mark_napi_id(skb, &q_vector->napi); 1720 if (ixgbe_qv_busy_polling(q_vector)) 1721 netif_receive_skb(skb); 1722 else 1723 napi_gro_receive(&q_vector->napi, skb); 1724 } 1725 1726 /** 1727 * ixgbe_is_non_eop - process handling of non-EOP buffers 1728 * @rx_ring: Rx ring being processed 1729 * @rx_desc: Rx descriptor for current buffer 1730 * @skb: Current socket buffer containing buffer in progress 1731 * 1732 * This function updates next to clean. If the buffer is an EOP buffer 1733 * this function exits returning false, otherwise it will place the 1734 * sk_buff in the next buffer to be chained and return true indicating 1735 * that this is in fact a non-EOP buffer. 1736 **/ 1737 static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, 1738 union ixgbe_adv_rx_desc *rx_desc, 1739 struct sk_buff *skb) 1740 { 1741 u32 ntc = rx_ring->next_to_clean + 1; 1742 1743 /* fetch, update, and store next to clean */ 1744 ntc = (ntc < rx_ring->count) ? ntc : 0; 1745 rx_ring->next_to_clean = ntc; 1746 1747 prefetch(IXGBE_RX_DESC(rx_ring, ntc)); 1748 1749 /* update RSC append count if present */ 1750 if (ring_is_rsc_enabled(rx_ring)) { 1751 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & 1752 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); 1753 1754 if (unlikely(rsc_enabled)) { 1755 u32 rsc_cnt = le32_to_cpu(rsc_enabled); 1756 1757 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; 1758 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; 1759 1760 /* update ntc based on RSC value */ 1761 ntc = le32_to_cpu(rx_desc->wb.upper.status_error); 1762 ntc &= IXGBE_RXDADV_NEXTP_MASK; 1763 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; 1764 } 1765 } 1766 1767 /* if we are the last buffer then there is nothing else to do */ 1768 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) 1769 return false; 1770 1771 /* place skb in next buffer to be received */ 1772 rx_ring->rx_buffer_info[ntc].skb = skb; 1773 rx_ring->rx_stats.non_eop_descs++; 1774 1775 return true; 1776 } 1777 1778 /** 1779 * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail 1780 * @rx_ring: rx descriptor ring packet is being transacted on 1781 * @skb: pointer to current skb being adjusted 1782 * 1783 * This function is an ixgbe specific version of __pskb_pull_tail. The 1784 * main difference between this version and the original function is that 1785 * this function can make several assumptions about the state of things 1786 * that allow for significant optimizations versus the standard function. 1787 * As a result we can do things like drop a frag and maintain an accurate 1788 * truesize for the skb. 1789 */ 1790 static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, 1791 struct sk_buff *skb) 1792 { 1793 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 1794 unsigned char *va; 1795 unsigned int pull_len; 1796 1797 /* 1798 * it is valid to use page_address instead of kmap since we are 1799 * working with pages allocated out of the lomem pool per 1800 * alloc_page(GFP_ATOMIC) 1801 */ 1802 va = skb_frag_address(frag); 1803 1804 /* 1805 * we need the header to contain the greater of either ETH_HLEN or 1806 * 60 bytes if the skb->len is less than 60 for skb_pad. 1807 */ 1808 pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE); 1809 1810 /* align pull length to size of long to optimize memcpy performance */ 1811 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); 1812 1813 /* update all of the pointers */ 1814 skb_frag_size_sub(frag, pull_len); 1815 frag->page_offset += pull_len; 1816 skb->data_len -= pull_len; 1817 skb->tail += pull_len; 1818 } 1819 1820 /** 1821 * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB 1822 * @rx_ring: rx descriptor ring packet is being transacted on 1823 * @skb: pointer to current skb being updated 1824 * 1825 * This function provides a basic DMA sync up for the first fragment of an 1826 * skb. The reason for doing this is that the first fragment cannot be 1827 * unmapped until we have reached the end of packet descriptor for a buffer 1828 * chain. 1829 */ 1830 static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, 1831 struct sk_buff *skb) 1832 { 1833 /* if the page was released unmap it, else just sync our portion */ 1834 if (unlikely(IXGBE_CB(skb)->page_released)) { 1835 dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, 1836 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); 1837 IXGBE_CB(skb)->page_released = false; 1838 } else { 1839 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 1840 1841 dma_sync_single_range_for_cpu(rx_ring->dev, 1842 IXGBE_CB(skb)->dma, 1843 frag->page_offset, 1844 ixgbe_rx_bufsz(rx_ring), 1845 DMA_FROM_DEVICE); 1846 } 1847 IXGBE_CB(skb)->dma = 0; 1848 } 1849 1850 /** 1851 * ixgbe_cleanup_headers - Correct corrupted or empty headers 1852 * @rx_ring: rx descriptor ring packet is being transacted on 1853 * @rx_desc: pointer to the EOP Rx descriptor 1854 * @skb: pointer to current skb being fixed 1855 * 1856 * Check for corrupted packet headers caused by senders on the local L2 1857 * embedded NIC switch not setting up their Tx Descriptors right. These 1858 * should be very rare. 1859 * 1860 * Also address the case where we are pulling data in on pages only 1861 * and as such no data is present in the skb header. 1862 * 1863 * In addition if skb is not at least 60 bytes we need to pad it so that 1864 * it is large enough to qualify as a valid Ethernet frame. 1865 * 1866 * Returns true if an error was encountered and skb was freed. 1867 **/ 1868 static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, 1869 union ixgbe_adv_rx_desc *rx_desc, 1870 struct sk_buff *skb) 1871 { 1872 struct net_device *netdev = rx_ring->netdev; 1873 1874 /* verify that the packet does not have any known errors */ 1875 if (unlikely(ixgbe_test_staterr(rx_desc, 1876 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) && 1877 !(netdev->features & NETIF_F_RXALL))) { 1878 dev_kfree_skb_any(skb); 1879 return true; 1880 } 1881 1882 /* place header in linear portion of buffer */ 1883 if (skb_is_nonlinear(skb)) 1884 ixgbe_pull_tail(rx_ring, skb); 1885 1886 #ifdef IXGBE_FCOE 1887 /* do not attempt to pad FCoE Frames as this will disrupt DDP */ 1888 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) 1889 return false; 1890 1891 #endif 1892 /* if eth_skb_pad returns an error the skb was freed */ 1893 if (eth_skb_pad(skb)) 1894 return true; 1895 1896 return false; 1897 } 1898 1899 /** 1900 * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring 1901 * @rx_ring: rx descriptor ring to store buffers on 1902 * @old_buff: donor buffer to have page reused 1903 * 1904 * Synchronizes page for reuse by the adapter 1905 **/ 1906 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, 1907 struct ixgbe_rx_buffer *old_buff) 1908 { 1909 struct ixgbe_rx_buffer *new_buff; 1910 u16 nta = rx_ring->next_to_alloc; 1911 1912 new_buff = &rx_ring->rx_buffer_info[nta]; 1913 1914 /* update, and store next to alloc */ 1915 nta++; 1916 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 1917 1918 /* transfer page from old buffer to new buffer */ 1919 *new_buff = *old_buff; 1920 1921 /* sync the buffer for use by the device */ 1922 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, 1923 new_buff->page_offset, 1924 ixgbe_rx_bufsz(rx_ring), 1925 DMA_FROM_DEVICE); 1926 } 1927 1928 static inline bool ixgbe_page_is_reserved(struct page *page) 1929 { 1930 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 1931 } 1932 1933 /** 1934 * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff 1935 * @rx_ring: rx descriptor ring to transact packets on 1936 * @rx_buffer: buffer containing page to add 1937 * @rx_desc: descriptor containing length of buffer written by hardware 1938 * @skb: sk_buff to place the data into 1939 * 1940 * This function will add the data contained in rx_buffer->page to the skb. 1941 * This is done either through a direct copy if the data in the buffer is 1942 * less than the skb header size, otherwise it will just attach the page as 1943 * a frag to the skb. 1944 * 1945 * The function will then update the page offset if necessary and return 1946 * true if the buffer can be reused by the adapter. 1947 **/ 1948 static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, 1949 struct ixgbe_rx_buffer *rx_buffer, 1950 union ixgbe_adv_rx_desc *rx_desc, 1951 struct sk_buff *skb) 1952 { 1953 struct page *page = rx_buffer->page; 1954 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); 1955 #if (PAGE_SIZE < 8192) 1956 unsigned int truesize = ixgbe_rx_bufsz(rx_ring); 1957 #else 1958 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); 1959 unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - 1960 ixgbe_rx_bufsz(rx_ring); 1961 #endif 1962 1963 if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { 1964 unsigned char *va = page_address(page) + rx_buffer->page_offset; 1965 1966 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 1967 1968 /* page is not reserved, we can reuse buffer as-is */ 1969 if (likely(!ixgbe_page_is_reserved(page))) 1970 return true; 1971 1972 /* this page cannot be reused so discard it */ 1973 __free_pages(page, ixgbe_rx_pg_order(rx_ring)); 1974 return false; 1975 } 1976 1977 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 1978 rx_buffer->page_offset, size, truesize); 1979 1980 /* avoid re-using remote pages */ 1981 if (unlikely(ixgbe_page_is_reserved(page))) 1982 return false; 1983 1984 #if (PAGE_SIZE < 8192) 1985 /* if we are only owner of page we can reuse it */ 1986 if (unlikely(page_count(page) != 1)) 1987 return false; 1988 1989 /* flip page offset to other buffer */ 1990 rx_buffer->page_offset ^= truesize; 1991 #else 1992 /* move offset up to the next cache line */ 1993 rx_buffer->page_offset += truesize; 1994 1995 if (rx_buffer->page_offset > last_offset) 1996 return false; 1997 #endif 1998 1999 /* Even if we own the page, we are not allowed to use atomic_set() 2000 * This would break get_page_unless_zero() users. 2001 */ 2002 page_ref_inc(page); 2003 2004 return true; 2005 } 2006 2007 static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, 2008 union ixgbe_adv_rx_desc *rx_desc) 2009 { 2010 struct ixgbe_rx_buffer *rx_buffer; 2011 struct sk_buff *skb; 2012 struct page *page; 2013 2014 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 2015 page = rx_buffer->page; 2016 prefetchw(page); 2017 2018 skb = rx_buffer->skb; 2019 2020 if (likely(!skb)) { 2021 void *page_addr = page_address(page) + 2022 rx_buffer->page_offset; 2023 2024 /* prefetch first cache line of first page */ 2025 prefetch(page_addr); 2026 #if L1_CACHE_BYTES < 128 2027 prefetch(page_addr + L1_CACHE_BYTES); 2028 #endif 2029 2030 /* allocate a skb to store the frags */ 2031 skb = napi_alloc_skb(&rx_ring->q_vector->napi, 2032 IXGBE_RX_HDR_SIZE); 2033 if (unlikely(!skb)) { 2034 rx_ring->rx_stats.alloc_rx_buff_failed++; 2035 return NULL; 2036 } 2037 2038 /* 2039 * we will be copying header into skb->data in 2040 * pskb_may_pull so it is in our interest to prefetch 2041 * it now to avoid a possible cache miss 2042 */ 2043 prefetchw(skb->data); 2044 2045 /* 2046 * Delay unmapping of the first packet. It carries the 2047 * header information, HW may still access the header 2048 * after the writeback. Only unmap it when EOP is 2049 * reached 2050 */ 2051 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) 2052 goto dma_sync; 2053 2054 IXGBE_CB(skb)->dma = rx_buffer->dma; 2055 } else { 2056 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) 2057 ixgbe_dma_sync_frag(rx_ring, skb); 2058 2059 dma_sync: 2060 /* we are reusing so sync this buffer for CPU use */ 2061 dma_sync_single_range_for_cpu(rx_ring->dev, 2062 rx_buffer->dma, 2063 rx_buffer->page_offset, 2064 ixgbe_rx_bufsz(rx_ring), 2065 DMA_FROM_DEVICE); 2066 2067 rx_buffer->skb = NULL; 2068 } 2069 2070 /* pull page into skb */ 2071 if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { 2072 /* hand second half of page back to the ring */ 2073 ixgbe_reuse_rx_page(rx_ring, rx_buffer); 2074 } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { 2075 /* the page has been released from the ring */ 2076 IXGBE_CB(skb)->page_released = true; 2077 } else { 2078 /* we are not reusing the buffer so unmap it */ 2079 dma_unmap_page(rx_ring->dev, rx_buffer->dma, 2080 ixgbe_rx_pg_size(rx_ring), 2081 DMA_FROM_DEVICE); 2082 } 2083 2084 /* clear contents of buffer_info */ 2085 rx_buffer->page = NULL; 2086 2087 return skb; 2088 } 2089 2090 /** 2091 * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 2092 * @q_vector: structure containing interrupt and ring information 2093 * @rx_ring: rx descriptor ring to transact packets on 2094 * @budget: Total limit on number of packets to process 2095 * 2096 * This function provides a "bounce buffer" approach to Rx interrupt 2097 * processing. The advantage to this is that on systems that have 2098 * expensive overhead for IOMMU access this provides a means of avoiding 2099 * it by maintaining the mapping of the page to the syste. 2100 * 2101 * Returns amount of work completed 2102 **/ 2103 static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 2104 struct ixgbe_ring *rx_ring, 2105 const int budget) 2106 { 2107 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 2108 #ifdef IXGBE_FCOE 2109 struct ixgbe_adapter *adapter = q_vector->adapter; 2110 int ddp_bytes; 2111 unsigned int mss = 0; 2112 #endif /* IXGBE_FCOE */ 2113 u16 cleaned_count = ixgbe_desc_unused(rx_ring); 2114 2115 while (likely(total_rx_packets < budget)) { 2116 union ixgbe_adv_rx_desc *rx_desc; 2117 struct sk_buff *skb; 2118 2119 /* return some buffers to hardware, one at a time is too slow */ 2120 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { 2121 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); 2122 cleaned_count = 0; 2123 } 2124 2125 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); 2126 2127 if (!rx_desc->wb.upper.status_error) 2128 break; 2129 2130 /* This memory barrier is needed to keep us from reading 2131 * any other fields out of the rx_desc until we know the 2132 * descriptor has been written back 2133 */ 2134 dma_rmb(); 2135 2136 /* retrieve a buffer from the ring */ 2137 skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc); 2138 2139 /* exit if we failed to retrieve a buffer */ 2140 if (!skb) 2141 break; 2142 2143 cleaned_count++; 2144 2145 /* place incomplete frames back on ring for completion */ 2146 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) 2147 continue; 2148 2149 /* verify the packet layout is correct */ 2150 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) 2151 continue; 2152 2153 /* probably a little skewed due to removing CRC */ 2154 total_rx_bytes += skb->len; 2155 2156 /* populate checksum, timestamp, VLAN, and protocol */ 2157 ixgbe_process_skb_fields(rx_ring, rx_desc, skb); 2158 2159 #ifdef IXGBE_FCOE 2160 /* if ddp, not passing to ULD unless for FCP_RSP or error */ 2161 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { 2162 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); 2163 /* include DDPed FCoE data */ 2164 if (ddp_bytes > 0) { 2165 if (!mss) { 2166 mss = rx_ring->netdev->mtu - 2167 sizeof(struct fcoe_hdr) - 2168 sizeof(struct fc_frame_header) - 2169 sizeof(struct fcoe_crc_eof); 2170 if (mss > 512) 2171 mss &= ~511; 2172 } 2173 total_rx_bytes += ddp_bytes; 2174 total_rx_packets += DIV_ROUND_UP(ddp_bytes, 2175 mss); 2176 } 2177 if (!ddp_bytes) { 2178 dev_kfree_skb_any(skb); 2179 continue; 2180 } 2181 } 2182 2183 #endif /* IXGBE_FCOE */ 2184 ixgbe_rx_skb(q_vector, skb); 2185 2186 /* update budget accounting */ 2187 total_rx_packets++; 2188 } 2189 2190 u64_stats_update_begin(&rx_ring->syncp); 2191 rx_ring->stats.packets += total_rx_packets; 2192 rx_ring->stats.bytes += total_rx_bytes; 2193 u64_stats_update_end(&rx_ring->syncp); 2194 q_vector->rx.total_packets += total_rx_packets; 2195 q_vector->rx.total_bytes += total_rx_bytes; 2196 2197 return total_rx_packets; 2198 } 2199 2200 #ifdef CONFIG_NET_RX_BUSY_POLL 2201 /* must be called with local_bh_disable()d */ 2202 static int ixgbe_low_latency_recv(struct napi_struct *napi) 2203 { 2204 struct ixgbe_q_vector *q_vector = 2205 container_of(napi, struct ixgbe_q_vector, napi); 2206 struct ixgbe_adapter *adapter = q_vector->adapter; 2207 struct ixgbe_ring *ring; 2208 int found = 0; 2209 2210 if (test_bit(__IXGBE_DOWN, &adapter->state)) 2211 return LL_FLUSH_FAILED; 2212 2213 if (!ixgbe_qv_lock_poll(q_vector)) 2214 return LL_FLUSH_BUSY; 2215 2216 ixgbe_for_each_ring(ring, q_vector->rx) { 2217 found = ixgbe_clean_rx_irq(q_vector, ring, 4); 2218 #ifdef BP_EXTENDED_STATS 2219 if (found) 2220 ring->stats.cleaned += found; 2221 else 2222 ring->stats.misses++; 2223 #endif 2224 if (found) 2225 break; 2226 } 2227 2228 ixgbe_qv_unlock_poll(q_vector); 2229 2230 return found; 2231 } 2232 #endif /* CONFIG_NET_RX_BUSY_POLL */ 2233 2234 /** 2235 * ixgbe_configure_msix - Configure MSI-X hardware 2236 * @adapter: board private structure 2237 * 2238 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X 2239 * interrupts. 2240 **/ 2241 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) 2242 { 2243 struct ixgbe_q_vector *q_vector; 2244 int v_idx; 2245 u32 mask; 2246 2247 /* Populate MSIX to EITR Select */ 2248 if (adapter->num_vfs > 32) { 2249 u32 eitrsel = BIT(adapter->num_vfs - 32) - 1; 2250 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); 2251 } 2252 2253 /* 2254 * Populate the IVAR table and set the ITR values to the 2255 * corresponding register. 2256 */ 2257 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { 2258 struct ixgbe_ring *ring; 2259 q_vector = adapter->q_vector[v_idx]; 2260 2261 ixgbe_for_each_ring(ring, q_vector->rx) 2262 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); 2263 2264 ixgbe_for_each_ring(ring, q_vector->tx) 2265 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); 2266 2267 ixgbe_write_eitr(q_vector); 2268 } 2269 2270 switch (adapter->hw.mac.type) { 2271 case ixgbe_mac_82598EB: 2272 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, 2273 v_idx); 2274 break; 2275 case ixgbe_mac_82599EB: 2276 case ixgbe_mac_X540: 2277 case ixgbe_mac_X550: 2278 case ixgbe_mac_X550EM_x: 2279 case ixgbe_mac_x550em_a: 2280 ixgbe_set_ivar(adapter, -1, 1, v_idx); 2281 break; 2282 default: 2283 break; 2284 } 2285 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 2286 2287 /* set up to autoclear timer, and the vectors */ 2288 mask = IXGBE_EIMS_ENABLE_MASK; 2289 mask &= ~(IXGBE_EIMS_OTHER | 2290 IXGBE_EIMS_MAILBOX | 2291 IXGBE_EIMS_LSC); 2292 2293 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); 2294 } 2295 2296 enum latency_range { 2297 lowest_latency = 0, 2298 low_latency = 1, 2299 bulk_latency = 2, 2300 latency_invalid = 255 2301 }; 2302 2303 /** 2304 * ixgbe_update_itr - update the dynamic ITR value based on statistics 2305 * @q_vector: structure containing interrupt and ring information 2306 * @ring_container: structure containing ring performance data 2307 * 2308 * Stores a new ITR value based on packets and byte 2309 * counts during the last interrupt. The advantage of per interrupt 2310 * computation is faster updates and more accurate ITR for the current 2311 * traffic pattern. Constants in this function were computed 2312 * based on theoretical maximum wire speed and thresholds were set based 2313 * on testing data as well as attempting to minimize response time 2314 * while increasing bulk throughput. 2315 * this functionality is controlled by the InterruptThrottleRate module 2316 * parameter (see ixgbe_param.c) 2317 **/ 2318 static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, 2319 struct ixgbe_ring_container *ring_container) 2320 { 2321 int bytes = ring_container->total_bytes; 2322 int packets = ring_container->total_packets; 2323 u32 timepassed_us; 2324 u64 bytes_perint; 2325 u8 itr_setting = ring_container->itr; 2326 2327 if (packets == 0) 2328 return; 2329 2330 /* simple throttlerate management 2331 * 0-10MB/s lowest (100000 ints/s) 2332 * 10-20MB/s low (20000 ints/s) 2333 * 20-1249MB/s bulk (12000 ints/s) 2334 */ 2335 /* what was last interrupt timeslice? */ 2336 timepassed_us = q_vector->itr >> 2; 2337 if (timepassed_us == 0) 2338 return; 2339 2340 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 2341 2342 switch (itr_setting) { 2343 case lowest_latency: 2344 if (bytes_perint > 10) 2345 itr_setting = low_latency; 2346 break; 2347 case low_latency: 2348 if (bytes_perint > 20) 2349 itr_setting = bulk_latency; 2350 else if (bytes_perint <= 10) 2351 itr_setting = lowest_latency; 2352 break; 2353 case bulk_latency: 2354 if (bytes_perint <= 20) 2355 itr_setting = low_latency; 2356 break; 2357 } 2358 2359 /* clear work counters since we have the values we need */ 2360 ring_container->total_bytes = 0; 2361 ring_container->total_packets = 0; 2362 2363 /* write updated itr to ring container */ 2364 ring_container->itr = itr_setting; 2365 } 2366 2367 /** 2368 * ixgbe_write_eitr - write EITR register in hardware specific way 2369 * @q_vector: structure containing interrupt and ring information 2370 * 2371 * This function is made to be called by ethtool and by the driver 2372 * when it needs to update EITR registers at runtime. Hardware 2373 * specific quirks/differences are taken care of here. 2374 */ 2375 void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) 2376 { 2377 struct ixgbe_adapter *adapter = q_vector->adapter; 2378 struct ixgbe_hw *hw = &adapter->hw; 2379 int v_idx = q_vector->v_idx; 2380 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; 2381 2382 switch (adapter->hw.mac.type) { 2383 case ixgbe_mac_82598EB: 2384 /* must write high and low 16 bits to reset counter */ 2385 itr_reg |= (itr_reg << 16); 2386 break; 2387 case ixgbe_mac_82599EB: 2388 case ixgbe_mac_X540: 2389 case ixgbe_mac_X550: 2390 case ixgbe_mac_X550EM_x: 2391 case ixgbe_mac_x550em_a: 2392 /* 2393 * set the WDIS bit to not clear the timer bits and cause an 2394 * immediate assertion of the interrupt 2395 */ 2396 itr_reg |= IXGBE_EITR_CNT_WDIS; 2397 break; 2398 default: 2399 break; 2400 } 2401 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); 2402 } 2403 2404 static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) 2405 { 2406 u32 new_itr = q_vector->itr; 2407 u8 current_itr; 2408 2409 ixgbe_update_itr(q_vector, &q_vector->tx); 2410 ixgbe_update_itr(q_vector, &q_vector->rx); 2411 2412 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 2413 2414 switch (current_itr) { 2415 /* counts and packets in update_itr are dependent on these numbers */ 2416 case lowest_latency: 2417 new_itr = IXGBE_100K_ITR; 2418 break; 2419 case low_latency: 2420 new_itr = IXGBE_20K_ITR; 2421 break; 2422 case bulk_latency: 2423 new_itr = IXGBE_12K_ITR; 2424 break; 2425 default: 2426 break; 2427 } 2428 2429 if (new_itr != q_vector->itr) { 2430 /* do an exponential smoothing */ 2431 new_itr = (10 * new_itr * q_vector->itr) / 2432 ((9 * new_itr) + q_vector->itr); 2433 2434 /* save the algorithm value here */ 2435 q_vector->itr = new_itr; 2436 2437 ixgbe_write_eitr(q_vector); 2438 } 2439 } 2440 2441 /** 2442 * ixgbe_check_overtemp_subtask - check for over temperature 2443 * @adapter: pointer to adapter 2444 **/ 2445 static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) 2446 { 2447 struct ixgbe_hw *hw = &adapter->hw; 2448 u32 eicr = adapter->interrupt_event; 2449 2450 if (test_bit(__IXGBE_DOWN, &adapter->state)) 2451 return; 2452 2453 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && 2454 !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) 2455 return; 2456 2457 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; 2458 2459 switch (hw->device_id) { 2460 case IXGBE_DEV_ID_82599_T3_LOM: 2461 /* 2462 * Since the warning interrupt is for both ports 2463 * we don't have to check if: 2464 * - This interrupt wasn't for our port. 2465 * - We may have missed the interrupt so always have to 2466 * check if we got a LSC 2467 */ 2468 if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) && 2469 !(eicr & IXGBE_EICR_LSC)) 2470 return; 2471 2472 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { 2473 u32 speed; 2474 bool link_up = false; 2475 2476 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2477 2478 if (link_up) 2479 return; 2480 } 2481 2482 /* Check if this is not due to overtemp */ 2483 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP) 2484 return; 2485 2486 break; 2487 default: 2488 if (adapter->hw.mac.type >= ixgbe_mac_X540) 2489 return; 2490 if (!(eicr & IXGBE_EICR_GPI_SDP0(hw))) 2491 return; 2492 break; 2493 } 2494 e_crit(drv, "%s\n", ixgbe_overheat_msg); 2495 2496 adapter->interrupt_event = 0; 2497 } 2498 2499 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) 2500 { 2501 struct ixgbe_hw *hw = &adapter->hw; 2502 2503 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && 2504 (eicr & IXGBE_EICR_GPI_SDP1(hw))) { 2505 e_crit(probe, "Fan has stopped, replace the adapter\n"); 2506 /* write to clear the interrupt */ 2507 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); 2508 } 2509 } 2510 2511 static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) 2512 { 2513 struct ixgbe_hw *hw = &adapter->hw; 2514 2515 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) 2516 return; 2517 2518 switch (adapter->hw.mac.type) { 2519 case ixgbe_mac_82599EB: 2520 /* 2521 * Need to check link state so complete overtemp check 2522 * on service task 2523 */ 2524 if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) || 2525 (eicr & IXGBE_EICR_LSC)) && 2526 (!test_bit(__IXGBE_DOWN, &adapter->state))) { 2527 adapter->interrupt_event = eicr; 2528 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; 2529 ixgbe_service_event_schedule(adapter); 2530 return; 2531 } 2532 return; 2533 case ixgbe_mac_X540: 2534 if (!(eicr & IXGBE_EICR_TS)) 2535 return; 2536 break; 2537 default: 2538 return; 2539 } 2540 2541 e_crit(drv, "%s\n", ixgbe_overheat_msg); 2542 } 2543 2544 static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) 2545 { 2546 switch (hw->mac.type) { 2547 case ixgbe_mac_82598EB: 2548 if (hw->phy.type == ixgbe_phy_nl) 2549 return true; 2550 return false; 2551 case ixgbe_mac_82599EB: 2552 case ixgbe_mac_X550EM_x: 2553 case ixgbe_mac_x550em_a: 2554 switch (hw->mac.ops.get_media_type(hw)) { 2555 case ixgbe_media_type_fiber: 2556 case ixgbe_media_type_fiber_qsfp: 2557 return true; 2558 default: 2559 return false; 2560 } 2561 default: 2562 return false; 2563 } 2564 } 2565 2566 static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) 2567 { 2568 struct ixgbe_hw *hw = &adapter->hw; 2569 u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw); 2570 2571 if (!ixgbe_is_sfp(hw)) 2572 return; 2573 2574 /* Later MAC's use different SDP */ 2575 if (hw->mac.type >= ixgbe_mac_X540) 2576 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 2577 2578 if (eicr & eicr_mask) { 2579 /* Clear the interrupt */ 2580 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 2581 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 2582 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; 2583 adapter->sfp_poll_time = 0; 2584 ixgbe_service_event_schedule(adapter); 2585 } 2586 } 2587 2588 if (adapter->hw.mac.type == ixgbe_mac_82599EB && 2589 (eicr & IXGBE_EICR_GPI_SDP1(hw))) { 2590 /* Clear the interrupt */ 2591 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); 2592 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 2593 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; 2594 ixgbe_service_event_schedule(adapter); 2595 } 2596 } 2597 } 2598 2599 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) 2600 { 2601 struct ixgbe_hw *hw = &adapter->hw; 2602 2603 adapter->lsc_int++; 2604 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2605 adapter->link_check_timeout = jiffies; 2606 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 2607 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 2608 IXGBE_WRITE_FLUSH(hw); 2609 ixgbe_service_event_schedule(adapter); 2610 } 2611 } 2612 2613 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, 2614 u64 qmask) 2615 { 2616 u32 mask; 2617 struct ixgbe_hw *hw = &adapter->hw; 2618 2619 switch (hw->mac.type) { 2620 case ixgbe_mac_82598EB: 2621 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 2622 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 2623 break; 2624 case ixgbe_mac_82599EB: 2625 case ixgbe_mac_X540: 2626 case ixgbe_mac_X550: 2627 case ixgbe_mac_X550EM_x: 2628 case ixgbe_mac_x550em_a: 2629 mask = (qmask & 0xFFFFFFFF); 2630 if (mask) 2631 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 2632 mask = (qmask >> 32); 2633 if (mask) 2634 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 2635 break; 2636 default: 2637 break; 2638 } 2639 /* skip the flush */ 2640 } 2641 2642 static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, 2643 u64 qmask) 2644 { 2645 u32 mask; 2646 struct ixgbe_hw *hw = &adapter->hw; 2647 2648 switch (hw->mac.type) { 2649 case ixgbe_mac_82598EB: 2650 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 2651 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 2652 break; 2653 case ixgbe_mac_82599EB: 2654 case ixgbe_mac_X540: 2655 case ixgbe_mac_X550: 2656 case ixgbe_mac_X550EM_x: 2657 case ixgbe_mac_x550em_a: 2658 mask = (qmask & 0xFFFFFFFF); 2659 if (mask) 2660 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 2661 mask = (qmask >> 32); 2662 if (mask) 2663 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 2664 break; 2665 default: 2666 break; 2667 } 2668 /* skip the flush */ 2669 } 2670 2671 /** 2672 * ixgbe_irq_enable - Enable default interrupt generation settings 2673 * @adapter: board private structure 2674 **/ 2675 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, 2676 bool flush) 2677 { 2678 struct ixgbe_hw *hw = &adapter->hw; 2679 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 2680 2681 /* don't reenable LSC while waiting for link */ 2682 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) 2683 mask &= ~IXGBE_EIMS_LSC; 2684 2685 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) 2686 switch (adapter->hw.mac.type) { 2687 case ixgbe_mac_82599EB: 2688 mask |= IXGBE_EIMS_GPI_SDP0(hw); 2689 break; 2690 case ixgbe_mac_X540: 2691 case ixgbe_mac_X550: 2692 case ixgbe_mac_X550EM_x: 2693 case ixgbe_mac_x550em_a: 2694 mask |= IXGBE_EIMS_TS; 2695 break; 2696 default: 2697 break; 2698 } 2699 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 2700 mask |= IXGBE_EIMS_GPI_SDP1(hw); 2701 switch (adapter->hw.mac.type) { 2702 case ixgbe_mac_82599EB: 2703 mask |= IXGBE_EIMS_GPI_SDP1(hw); 2704 mask |= IXGBE_EIMS_GPI_SDP2(hw); 2705 /* fall through */ 2706 case ixgbe_mac_X540: 2707 case ixgbe_mac_X550: 2708 case ixgbe_mac_X550EM_x: 2709 case ixgbe_mac_x550em_a: 2710 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP || 2711 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP || 2712 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) 2713 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw); 2714 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) 2715 mask |= IXGBE_EICR_GPI_SDP0_X540; 2716 mask |= IXGBE_EIMS_ECC; 2717 mask |= IXGBE_EIMS_MAILBOX; 2718 break; 2719 default: 2720 break; 2721 } 2722 2723 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && 2724 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) 2725 mask |= IXGBE_EIMS_FLOW_DIR; 2726 2727 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 2728 if (queues) 2729 ixgbe_irq_enable_queues(adapter, ~0); 2730 if (flush) 2731 IXGBE_WRITE_FLUSH(&adapter->hw); 2732 } 2733 2734 static irqreturn_t ixgbe_msix_other(int irq, void *data) 2735 { 2736 struct ixgbe_adapter *adapter = data; 2737 struct ixgbe_hw *hw = &adapter->hw; 2738 u32 eicr; 2739 2740 /* 2741 * Workaround for Silicon errata. Use clear-by-write instead 2742 * of clear-by-read. Reading with EICS will return the 2743 * interrupt causes without clearing, which later be done 2744 * with the write to EICR. 2745 */ 2746 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 2747 2748 /* The lower 16bits of the EICR register are for the queue interrupts 2749 * which should be masked here in order to not accidentally clear them if 2750 * the bits are high when ixgbe_msix_other is called. There is a race 2751 * condition otherwise which results in possible performance loss 2752 * especially if the ixgbe_msix_other interrupt is triggering 2753 * consistently (as it would when PPS is turned on for the X540 device) 2754 */ 2755 eicr &= 0xFFFF0000; 2756 2757 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 2758 2759 if (eicr & IXGBE_EICR_LSC) 2760 ixgbe_check_lsc(adapter); 2761 2762 if (eicr & IXGBE_EICR_MAILBOX) 2763 ixgbe_msg_task(adapter); 2764 2765 switch (hw->mac.type) { 2766 case ixgbe_mac_82599EB: 2767 case ixgbe_mac_X540: 2768 case ixgbe_mac_X550: 2769 case ixgbe_mac_X550EM_x: 2770 case ixgbe_mac_x550em_a: 2771 if (hw->phy.type == ixgbe_phy_x550em_ext_t && 2772 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 2773 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; 2774 ixgbe_service_event_schedule(adapter); 2775 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2776 IXGBE_EICR_GPI_SDP0_X540); 2777 } 2778 if (eicr & IXGBE_EICR_ECC) { 2779 e_info(link, "Received ECC Err, initiating reset\n"); 2780 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); 2781 ixgbe_service_event_schedule(adapter); 2782 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 2783 } 2784 /* Handle Flow Director Full threshold interrupt */ 2785 if (eicr & IXGBE_EICR_FLOW_DIR) { 2786 int reinit_count = 0; 2787 int i; 2788 for (i = 0; i < adapter->num_tx_queues; i++) { 2789 struct ixgbe_ring *ring = adapter->tx_ring[i]; 2790 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, 2791 &ring->state)) 2792 reinit_count++; 2793 } 2794 if (reinit_count) { 2795 /* no more flow director interrupts until after init */ 2796 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); 2797 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; 2798 ixgbe_service_event_schedule(adapter); 2799 } 2800 } 2801 ixgbe_check_sfp_event(adapter, eicr); 2802 ixgbe_check_overtemp_event(adapter, eicr); 2803 break; 2804 default: 2805 break; 2806 } 2807 2808 ixgbe_check_fan_failure(adapter, eicr); 2809 2810 if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) 2811 ixgbe_ptp_check_pps_event(adapter); 2812 2813 /* re-enable the original interrupt state, no lsc, no queues */ 2814 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2815 ixgbe_irq_enable(adapter, false, false); 2816 2817 return IRQ_HANDLED; 2818 } 2819 2820 static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) 2821 { 2822 struct ixgbe_q_vector *q_vector = data; 2823 2824 /* EIAM disabled interrupts (on this vector) for us */ 2825 2826 if (q_vector->rx.ring || q_vector->tx.ring) 2827 napi_schedule_irqoff(&q_vector->napi); 2828 2829 return IRQ_HANDLED; 2830 } 2831 2832 /** 2833 * ixgbe_poll - NAPI Rx polling callback 2834 * @napi: structure for representing this polling device 2835 * @budget: how many packets driver is allowed to clean 2836 * 2837 * This function is used for legacy and MSI, NAPI mode 2838 **/ 2839 int ixgbe_poll(struct napi_struct *napi, int budget) 2840 { 2841 struct ixgbe_q_vector *q_vector = 2842 container_of(napi, struct ixgbe_q_vector, napi); 2843 struct ixgbe_adapter *adapter = q_vector->adapter; 2844 struct ixgbe_ring *ring; 2845 int per_ring_budget, work_done = 0; 2846 bool clean_complete = true; 2847 2848 #ifdef CONFIG_IXGBE_DCA 2849 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 2850 ixgbe_update_dca(q_vector); 2851 #endif 2852 2853 ixgbe_for_each_ring(ring, q_vector->tx) { 2854 if (!ixgbe_clean_tx_irq(q_vector, ring, budget)) 2855 clean_complete = false; 2856 } 2857 2858 /* Exit if we are called by netpoll or busy polling is active */ 2859 if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) 2860 return budget; 2861 2862 /* attempt to distribute budget to each queue fairly, but don't allow 2863 * the budget to go below 1 because we'll exit polling */ 2864 if (q_vector->rx.count > 1) 2865 per_ring_budget = max(budget/q_vector->rx.count, 1); 2866 else 2867 per_ring_budget = budget; 2868 2869 ixgbe_for_each_ring(ring, q_vector->rx) { 2870 int cleaned = ixgbe_clean_rx_irq(q_vector, ring, 2871 per_ring_budget); 2872 2873 work_done += cleaned; 2874 if (cleaned >= per_ring_budget) 2875 clean_complete = false; 2876 } 2877 2878 ixgbe_qv_unlock_napi(q_vector); 2879 /* If all work not completed, return budget and keep polling */ 2880 if (!clean_complete) 2881 return budget; 2882 2883 /* all work done, exit the polling mode */ 2884 napi_complete_done(napi, work_done); 2885 if (adapter->rx_itr_setting & 1) 2886 ixgbe_set_itr(q_vector); 2887 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2888 ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx)); 2889 2890 return min(work_done, budget - 1); 2891 } 2892 2893 /** 2894 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts 2895 * @adapter: board private structure 2896 * 2897 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests 2898 * interrupts from the kernel. 2899 **/ 2900 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) 2901 { 2902 struct net_device *netdev = adapter->netdev; 2903 int vector, err; 2904 int ri = 0, ti = 0; 2905 2906 for (vector = 0; vector < adapter->num_q_vectors; vector++) { 2907 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; 2908 struct msix_entry *entry = &adapter->msix_entries[vector]; 2909 2910 if (q_vector->tx.ring && q_vector->rx.ring) { 2911 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2912 "%s-%s-%d", netdev->name, "TxRx", ri++); 2913 ti++; 2914 } else if (q_vector->rx.ring) { 2915 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2916 "%s-%s-%d", netdev->name, "rx", ri++); 2917 } else if (q_vector->tx.ring) { 2918 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2919 "%s-%s-%d", netdev->name, "tx", ti++); 2920 } else { 2921 /* skip this unused q_vector */ 2922 continue; 2923 } 2924 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0, 2925 q_vector->name, q_vector); 2926 if (err) { 2927 e_err(probe, "request_irq failed for MSIX interrupt " 2928 "Error: %d\n", err); 2929 goto free_queue_irqs; 2930 } 2931 /* If Flow Director is enabled, set interrupt affinity */ 2932 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 2933 /* assign the mask for this irq */ 2934 irq_set_affinity_hint(entry->vector, 2935 &q_vector->affinity_mask); 2936 } 2937 } 2938 2939 err = request_irq(adapter->msix_entries[vector].vector, 2940 ixgbe_msix_other, 0, netdev->name, adapter); 2941 if (err) { 2942 e_err(probe, "request_irq for msix_other failed: %d\n", err); 2943 goto free_queue_irqs; 2944 } 2945 2946 return 0; 2947 2948 free_queue_irqs: 2949 while (vector) { 2950 vector--; 2951 irq_set_affinity_hint(adapter->msix_entries[vector].vector, 2952 NULL); 2953 free_irq(adapter->msix_entries[vector].vector, 2954 adapter->q_vector[vector]); 2955 } 2956 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 2957 pci_disable_msix(adapter->pdev); 2958 kfree(adapter->msix_entries); 2959 adapter->msix_entries = NULL; 2960 return err; 2961 } 2962 2963 /** 2964 * ixgbe_intr - legacy mode Interrupt Handler 2965 * @irq: interrupt number 2966 * @data: pointer to a network interface device structure 2967 **/ 2968 static irqreturn_t ixgbe_intr(int irq, void *data) 2969 { 2970 struct ixgbe_adapter *adapter = data; 2971 struct ixgbe_hw *hw = &adapter->hw; 2972 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; 2973 u32 eicr; 2974 2975 /* 2976 * Workaround for silicon errata #26 on 82598. Mask the interrupt 2977 * before the read of EICR. 2978 */ 2979 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 2980 2981 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read 2982 * therefore no explicit interrupt disable is necessary */ 2983 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 2984 if (!eicr) { 2985 /* 2986 * shared interrupt alert! 2987 * make sure interrupts are enabled because the read will 2988 * have disabled interrupts due to EIAM 2989 * finish the workaround of silicon errata on 82598. Unmask 2990 * the interrupt that we masked before the EICR read. 2991 */ 2992 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2993 ixgbe_irq_enable(adapter, true, true); 2994 return IRQ_NONE; /* Not our interrupt */ 2995 } 2996 2997 if (eicr & IXGBE_EICR_LSC) 2998 ixgbe_check_lsc(adapter); 2999 3000 switch (hw->mac.type) { 3001 case ixgbe_mac_82599EB: 3002 ixgbe_check_sfp_event(adapter, eicr); 3003 /* Fall through */ 3004 case ixgbe_mac_X540: 3005 case ixgbe_mac_X550: 3006 case ixgbe_mac_X550EM_x: 3007 case ixgbe_mac_x550em_a: 3008 if (eicr & IXGBE_EICR_ECC) { 3009 e_info(link, "Received ECC Err, initiating reset\n"); 3010 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); 3011 ixgbe_service_event_schedule(adapter); 3012 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 3013 } 3014 ixgbe_check_overtemp_event(adapter, eicr); 3015 break; 3016 default: 3017 break; 3018 } 3019 3020 ixgbe_check_fan_failure(adapter, eicr); 3021 if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) 3022 ixgbe_ptp_check_pps_event(adapter); 3023 3024 /* would disable interrupts here but EIAM disabled it */ 3025 napi_schedule_irqoff(&q_vector->napi); 3026 3027 /* 3028 * re-enable link(maybe) and non-queue interrupts, no flush. 3029 * ixgbe_poll will re-enable the queue interrupts 3030 */ 3031 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 3032 ixgbe_irq_enable(adapter, false, false); 3033 3034 return IRQ_HANDLED; 3035 } 3036 3037 /** 3038 * ixgbe_request_irq - initialize interrupts 3039 * @adapter: board private structure 3040 * 3041 * Attempts to configure interrupts using the best available 3042 * capabilities of the hardware and kernel. 3043 **/ 3044 static int ixgbe_request_irq(struct ixgbe_adapter *adapter) 3045 { 3046 struct net_device *netdev = adapter->netdev; 3047 int err; 3048 3049 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 3050 err = ixgbe_request_msix_irqs(adapter); 3051 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) 3052 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, 3053 netdev->name, adapter); 3054 else 3055 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, 3056 netdev->name, adapter); 3057 3058 if (err) 3059 e_err(probe, "request_irq failed, Error %d\n", err); 3060 3061 return err; 3062 } 3063 3064 static void ixgbe_free_irq(struct ixgbe_adapter *adapter) 3065 { 3066 int vector; 3067 3068 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { 3069 free_irq(adapter->pdev->irq, adapter); 3070 return; 3071 } 3072 3073 for (vector = 0; vector < adapter->num_q_vectors; vector++) { 3074 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; 3075 struct msix_entry *entry = &adapter->msix_entries[vector]; 3076 3077 /* free only the irqs that were actually requested */ 3078 if (!q_vector->rx.ring && !q_vector->tx.ring) 3079 continue; 3080 3081 /* clear the affinity_mask in the IRQ descriptor */ 3082 irq_set_affinity_hint(entry->vector, NULL); 3083 3084 free_irq(entry->vector, q_vector); 3085 } 3086 3087 free_irq(adapter->msix_entries[vector].vector, adapter); 3088 } 3089 3090 /** 3091 * ixgbe_irq_disable - Mask off interrupt generation on the NIC 3092 * @adapter: board private structure 3093 **/ 3094 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) 3095 { 3096 switch (adapter->hw.mac.type) { 3097 case ixgbe_mac_82598EB: 3098 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 3099 break; 3100 case ixgbe_mac_82599EB: 3101 case ixgbe_mac_X540: 3102 case ixgbe_mac_X550: 3103 case ixgbe_mac_X550EM_x: 3104 case ixgbe_mac_x550em_a: 3105 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 3106 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 3107 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 3108 break; 3109 default: 3110 break; 3111 } 3112 IXGBE_WRITE_FLUSH(&adapter->hw); 3113 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3114 int vector; 3115 3116 for (vector = 0; vector < adapter->num_q_vectors; vector++) 3117 synchronize_irq(adapter->msix_entries[vector].vector); 3118 3119 synchronize_irq(adapter->msix_entries[vector++].vector); 3120 } else { 3121 synchronize_irq(adapter->pdev->irq); 3122 } 3123 } 3124 3125 /** 3126 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts 3127 * 3128 **/ 3129 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) 3130 { 3131 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; 3132 3133 ixgbe_write_eitr(q_vector); 3134 3135 ixgbe_set_ivar(adapter, 0, 0, 0); 3136 ixgbe_set_ivar(adapter, 1, 0, 0); 3137 3138 e_info(hw, "Legacy interrupt IVAR setup done\n"); 3139 } 3140 3141 /** 3142 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset 3143 * @adapter: board private structure 3144 * @ring: structure containing ring specific data 3145 * 3146 * Configure the Tx descriptor ring after a reset. 3147 **/ 3148 void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, 3149 struct ixgbe_ring *ring) 3150 { 3151 struct ixgbe_hw *hw = &adapter->hw; 3152 u64 tdba = ring->dma; 3153 int wait_loop = 10; 3154 u32 txdctl = IXGBE_TXDCTL_ENABLE; 3155 u8 reg_idx = ring->reg_idx; 3156 3157 /* disable queue to avoid issues while updating state */ 3158 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0); 3159 IXGBE_WRITE_FLUSH(hw); 3160 3161 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), 3162 (tdba & DMA_BIT_MASK(32))); 3163 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); 3164 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), 3165 ring->count * sizeof(union ixgbe_adv_tx_desc)); 3166 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); 3167 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); 3168 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx); 3169 3170 /* 3171 * set WTHRESH to encourage burst writeback, it should not be set 3172 * higher than 1 when: 3173 * - ITR is 0 as it could cause false TX hangs 3174 * - ITR is set to > 100k int/sec and BQL is enabled 3175 * 3176 * In order to avoid issues WTHRESH + PTHRESH should always be equal 3177 * to or less than the number of on chip descriptors, which is 3178 * currently 40. 3179 */ 3180 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) 3181 txdctl |= 1u << 16; /* WTHRESH = 1 */ 3182 else 3183 txdctl |= 8u << 16; /* WTHRESH = 8 */ 3184 3185 /* 3186 * Setting PTHRESH to 32 both improves performance 3187 * and avoids a TX hang with DFP enabled 3188 */ 3189 txdctl |= (1u << 8) | /* HTHRESH = 1 */ 3190 32; /* PTHRESH = 32 */ 3191 3192 /* reinitialize flowdirector state */ 3193 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 3194 ring->atr_sample_rate = adapter->atr_sample_rate; 3195 ring->atr_count = 0; 3196 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); 3197 } else { 3198 ring->atr_sample_rate = 0; 3199 } 3200 3201 /* initialize XPS */ 3202 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) { 3203 struct ixgbe_q_vector *q_vector = ring->q_vector; 3204 3205 if (q_vector) 3206 netif_set_xps_queue(ring->netdev, 3207 &q_vector->affinity_mask, 3208 ring->queue_index); 3209 } 3210 3211 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); 3212 3213 /* enable queue */ 3214 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); 3215 3216 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ 3217 if (hw->mac.type == ixgbe_mac_82598EB && 3218 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) 3219 return; 3220 3221 /* poll to verify queue is enabled */ 3222 do { 3223 usleep_range(1000, 2000); 3224 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); 3225 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); 3226 if (!wait_loop) 3227 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx); 3228 } 3229 3230 static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) 3231 { 3232 struct ixgbe_hw *hw = &adapter->hw; 3233 u32 rttdcs, mtqc; 3234 u8 tcs = netdev_get_num_tc(adapter->netdev); 3235 3236 if (hw->mac.type == ixgbe_mac_82598EB) 3237 return; 3238 3239 /* disable the arbiter while setting MTQC */ 3240 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 3241 rttdcs |= IXGBE_RTTDCS_ARBDIS; 3242 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 3243 3244 /* set transmit pool layout */ 3245 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 3246 mtqc = IXGBE_MTQC_VT_ENA; 3247 if (tcs > 4) 3248 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; 3249 else if (tcs > 1) 3250 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; 3251 else if (adapter->ring_feature[RING_F_VMDQ].mask == 3252 IXGBE_82599_VMDQ_4Q_MASK) 3253 mtqc |= IXGBE_MTQC_32VF; 3254 else 3255 mtqc |= IXGBE_MTQC_64VF; 3256 } else { 3257 if (tcs > 4) 3258 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; 3259 else if (tcs > 1) 3260 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; 3261 else 3262 mtqc = IXGBE_MTQC_64Q_1PB; 3263 } 3264 3265 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); 3266 3267 /* Enable Security TX Buffer IFG for multiple pb */ 3268 if (tcs) { 3269 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); 3270 sectx |= IXGBE_SECTX_DCB; 3271 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx); 3272 } 3273 3274 /* re-enable the arbiter */ 3275 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 3276 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 3277 } 3278 3279 /** 3280 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset 3281 * @adapter: board private structure 3282 * 3283 * Configure the Tx unit of the MAC after a reset. 3284 **/ 3285 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) 3286 { 3287 struct ixgbe_hw *hw = &adapter->hw; 3288 u32 dmatxctl; 3289 u32 i; 3290 3291 ixgbe_setup_mtqc(adapter); 3292 3293 if (hw->mac.type != ixgbe_mac_82598EB) { 3294 /* DMATXCTL.EN must be before Tx queues are enabled */ 3295 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 3296 dmatxctl |= IXGBE_DMATXCTL_TE; 3297 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 3298 } 3299 3300 /* Setup the HW Tx Head and Tail descriptor pointers */ 3301 for (i = 0; i < adapter->num_tx_queues; i++) 3302 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); 3303 } 3304 3305 static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter, 3306 struct ixgbe_ring *ring) 3307 { 3308 struct ixgbe_hw *hw = &adapter->hw; 3309 u8 reg_idx = ring->reg_idx; 3310 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); 3311 3312 srrctl |= IXGBE_SRRCTL_DROP_EN; 3313 3314 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); 3315 } 3316 3317 static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter, 3318 struct ixgbe_ring *ring) 3319 { 3320 struct ixgbe_hw *hw = &adapter->hw; 3321 u8 reg_idx = ring->reg_idx; 3322 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); 3323 3324 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 3325 3326 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); 3327 } 3328 3329 #ifdef CONFIG_IXGBE_DCB 3330 void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) 3331 #else 3332 static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) 3333 #endif 3334 { 3335 int i; 3336 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; 3337 3338 if (adapter->ixgbe_ieee_pfc) 3339 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); 3340 3341 /* 3342 * We should set the drop enable bit if: 3343 * SR-IOV is enabled 3344 * or 3345 * Number of Rx queues > 1 and flow control is disabled 3346 * 3347 * This allows us to avoid head of line blocking for security 3348 * and performance reasons. 3349 */ 3350 if (adapter->num_vfs || (adapter->num_rx_queues > 1 && 3351 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) { 3352 for (i = 0; i < adapter->num_rx_queues; i++) 3353 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); 3354 } else { 3355 for (i = 0; i < adapter->num_rx_queues; i++) 3356 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); 3357 } 3358 } 3359 3360 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 3361 3362 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, 3363 struct ixgbe_ring *rx_ring) 3364 { 3365 struct ixgbe_hw *hw = &adapter->hw; 3366 u32 srrctl; 3367 u8 reg_idx = rx_ring->reg_idx; 3368 3369 if (hw->mac.type == ixgbe_mac_82598EB) { 3370 u16 mask = adapter->ring_feature[RING_F_RSS].mask; 3371 3372 /* 3373 * if VMDq is not active we must program one srrctl register 3374 * per RSS queue since we have enabled RDRXCTL.MVMEN 3375 */ 3376 reg_idx &= mask; 3377 } 3378 3379 /* configure header buffer length, needed for RSC */ 3380 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; 3381 3382 /* configure the packet buffer length */ 3383 srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 3384 3385 /* configure descriptor type */ 3386 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 3387 3388 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); 3389 } 3390 3391 /** 3392 * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries 3393 * @adapter: device handle 3394 * 3395 * - 82598/82599/X540: 128 3396 * - X550(non-SRIOV mode): 512 3397 * - X550(SRIOV mode): 64 3398 */ 3399 u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) 3400 { 3401 if (adapter->hw.mac.type < ixgbe_mac_X550) 3402 return 128; 3403 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 3404 return 64; 3405 else 3406 return 512; 3407 } 3408 3409 /** 3410 * ixgbe_store_reta - Write the RETA table to HW 3411 * @adapter: device handle 3412 * 3413 * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. 3414 */ 3415 void ixgbe_store_reta(struct ixgbe_adapter *adapter) 3416 { 3417 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); 3418 struct ixgbe_hw *hw = &adapter->hw; 3419 u32 reta = 0; 3420 u32 indices_multi; 3421 u8 *indir_tbl = adapter->rss_indir_tbl; 3422 3423 /* Fill out the redirection table as follows: 3424 * - 82598: 8 bit wide entries containing pair of 4 bit RSS 3425 * indices. 3426 * - 82599/X540: 8 bit wide entries containing 4 bit RSS index 3427 * - X550: 8 bit wide entries containing 6 bit RSS index 3428 */ 3429 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 3430 indices_multi = 0x11; 3431 else 3432 indices_multi = 0x1; 3433 3434 /* Write redirection table to HW */ 3435 for (i = 0; i < reta_entries; i++) { 3436 reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8; 3437 if ((i & 3) == 3) { 3438 if (i < 128) 3439 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 3440 else 3441 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 3442 reta); 3443 reta = 0; 3444 } 3445 } 3446 } 3447 3448 /** 3449 * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode) 3450 * @adapter: device handle 3451 * 3452 * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. 3453 */ 3454 static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) 3455 { 3456 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); 3457 struct ixgbe_hw *hw = &adapter->hw; 3458 u32 vfreta = 0; 3459 unsigned int pf_pool = adapter->num_vfs; 3460 3461 /* Write redirection table to HW */ 3462 for (i = 0; i < reta_entries; i++) { 3463 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; 3464 if ((i & 3) == 3) { 3465 IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool), 3466 vfreta); 3467 vfreta = 0; 3468 } 3469 } 3470 } 3471 3472 static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) 3473 { 3474 struct ixgbe_hw *hw = &adapter->hw; 3475 u32 i, j; 3476 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); 3477 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; 3478 3479 /* Program table for at least 4 queues w/ SR-IOV so that VFs can 3480 * make full use of any rings they may have. We will use the 3481 * PSRTYPE register to control how many rings we use within the PF. 3482 */ 3483 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4)) 3484 rss_i = 4; 3485 3486 /* Fill out hash function seeds */ 3487 for (i = 0; i < 10; i++) 3488 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); 3489 3490 /* Fill out redirection table */ 3491 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); 3492 3493 for (i = 0, j = 0; i < reta_entries; i++, j++) { 3494 if (j == rss_i) 3495 j = 0; 3496 3497 adapter->rss_indir_tbl[i] = j; 3498 } 3499 3500 ixgbe_store_reta(adapter); 3501 } 3502 3503 static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) 3504 { 3505 struct ixgbe_hw *hw = &adapter->hw; 3506 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; 3507 unsigned int pf_pool = adapter->num_vfs; 3508 int i, j; 3509 3510 /* Fill out hash function seeds */ 3511 for (i = 0; i < 10; i++) 3512 IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), 3513 adapter->rss_key[i]); 3514 3515 /* Fill out the redirection table */ 3516 for (i = 0, j = 0; i < 64; i++, j++) { 3517 if (j == rss_i) 3518 j = 0; 3519 3520 adapter->rss_indir_tbl[i] = j; 3521 } 3522 3523 ixgbe_store_vfreta(adapter); 3524 } 3525 3526 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) 3527 { 3528 struct ixgbe_hw *hw = &adapter->hw; 3529 u32 mrqc = 0, rss_field = 0, vfmrqc = 0; 3530 u32 rxcsum; 3531 3532 /* Disable indicating checksum in descriptor, enables RSS hash */ 3533 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 3534 rxcsum |= IXGBE_RXCSUM_PCSD; 3535 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 3536 3537 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3538 if (adapter->ring_feature[RING_F_RSS].mask) 3539 mrqc = IXGBE_MRQC_RSSEN; 3540 } else { 3541 u8 tcs = netdev_get_num_tc(adapter->netdev); 3542 3543 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 3544 if (tcs > 4) 3545 mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */ 3546 else if (tcs > 1) 3547 mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */ 3548 else if (adapter->ring_feature[RING_F_VMDQ].mask == 3549 IXGBE_82599_VMDQ_4Q_MASK) 3550 mrqc = IXGBE_MRQC_VMDQRSS32EN; 3551 else 3552 mrqc = IXGBE_MRQC_VMDQRSS64EN; 3553 } else { 3554 if (tcs > 4) 3555 mrqc = IXGBE_MRQC_RTRSS8TCEN; 3556 else if (tcs > 1) 3557 mrqc = IXGBE_MRQC_RTRSS4TCEN; 3558 else 3559 mrqc = IXGBE_MRQC_RSSEN; 3560 } 3561 } 3562 3563 /* Perform hash on these packet types */ 3564 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 | 3565 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 3566 IXGBE_MRQC_RSS_FIELD_IPV6 | 3567 IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 3568 3569 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 3570 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 3571 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 3572 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 3573 3574 netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); 3575 if ((hw->mac.type >= ixgbe_mac_X550) && 3576 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { 3577 unsigned int pf_pool = adapter->num_vfs; 3578 3579 /* Enable VF RSS mode */ 3580 mrqc |= IXGBE_MRQC_MULTIPLE_RSS; 3581 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 3582 3583 /* Setup RSS through the VF registers */ 3584 ixgbe_setup_vfreta(adapter); 3585 vfmrqc = IXGBE_MRQC_RSSEN; 3586 vfmrqc |= rss_field; 3587 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc); 3588 } else { 3589 ixgbe_setup_reta(adapter); 3590 mrqc |= rss_field; 3591 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 3592 } 3593 } 3594 3595 /** 3596 * ixgbe_configure_rscctl - enable RSC for the indicated ring 3597 * @adapter: address of board private structure 3598 * @index: index of ring to set 3599 **/ 3600 static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, 3601 struct ixgbe_ring *ring) 3602 { 3603 struct ixgbe_hw *hw = &adapter->hw; 3604 u32 rscctrl; 3605 u8 reg_idx = ring->reg_idx; 3606 3607 if (!ring_is_rsc_enabled(ring)) 3608 return; 3609 3610 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); 3611 rscctrl |= IXGBE_RSCCTL_RSCEN; 3612 /* 3613 * we must limit the number of descriptors so that the 3614 * total size of max desc * buf_len is not greater 3615 * than 65536 3616 */ 3617 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 3618 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); 3619 } 3620 3621 #define IXGBE_MAX_RX_DESC_POLL 10 3622 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, 3623 struct ixgbe_ring *ring) 3624 { 3625 struct ixgbe_hw *hw = &adapter->hw; 3626 int wait_loop = IXGBE_MAX_RX_DESC_POLL; 3627 u32 rxdctl; 3628 u8 reg_idx = ring->reg_idx; 3629 3630 if (ixgbe_removed(hw->hw_addr)) 3631 return; 3632 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ 3633 if (hw->mac.type == ixgbe_mac_82598EB && 3634 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) 3635 return; 3636 3637 do { 3638 usleep_range(1000, 2000); 3639 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 3640 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); 3641 3642 if (!wait_loop) { 3643 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " 3644 "the polling period\n", reg_idx); 3645 } 3646 } 3647 3648 void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, 3649 struct ixgbe_ring *ring) 3650 { 3651 struct ixgbe_hw *hw = &adapter->hw; 3652 int wait_loop = IXGBE_MAX_RX_DESC_POLL; 3653 u32 rxdctl; 3654 u8 reg_idx = ring->reg_idx; 3655 3656 if (ixgbe_removed(hw->hw_addr)) 3657 return; 3658 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 3659 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 3660 3661 /* write value back with RXDCTL.ENABLE bit cleared */ 3662 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); 3663 3664 if (hw->mac.type == ixgbe_mac_82598EB && 3665 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) 3666 return; 3667 3668 /* the hardware may take up to 100us to really disable the rx queue */ 3669 do { 3670 udelay(10); 3671 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 3672 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); 3673 3674 if (!wait_loop) { 3675 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " 3676 "the polling period\n", reg_idx); 3677 } 3678 } 3679 3680 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, 3681 struct ixgbe_ring *ring) 3682 { 3683 struct ixgbe_hw *hw = &adapter->hw; 3684 u64 rdba = ring->dma; 3685 u32 rxdctl; 3686 u8 reg_idx = ring->reg_idx; 3687 3688 /* disable queue to avoid issues while updating state */ 3689 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 3690 ixgbe_disable_rx_queue(adapter, ring); 3691 3692 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); 3693 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); 3694 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), 3695 ring->count * sizeof(union ixgbe_adv_rx_desc)); 3696 /* Force flushing of IXGBE_RDLEN to prevent MDD */ 3697 IXGBE_WRITE_FLUSH(hw); 3698 3699 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); 3700 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); 3701 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); 3702 3703 ixgbe_configure_srrctl(adapter, ring); 3704 ixgbe_configure_rscctl(adapter, ring); 3705 3706 if (hw->mac.type == ixgbe_mac_82598EB) { 3707 /* 3708 * enable cache line friendly hardware writes: 3709 * PTHRESH=32 descriptors (half the internal cache), 3710 * this also removes ugly rx_no_buffer_count increment 3711 * HTHRESH=4 descriptors (to minimize latency on fetch) 3712 * WTHRESH=8 burst writeback up to two cache lines 3713 */ 3714 rxdctl &= ~0x3FFFFF; 3715 rxdctl |= 0x080420; 3716 } 3717 3718 /* enable receive descriptor ring */ 3719 rxdctl |= IXGBE_RXDCTL_ENABLE; 3720 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); 3721 3722 ixgbe_rx_desc_queue_enable(adapter, ring); 3723 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); 3724 } 3725 3726 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) 3727 { 3728 struct ixgbe_hw *hw = &adapter->hw; 3729 int rss_i = adapter->ring_feature[RING_F_RSS].indices; 3730 u16 pool; 3731 3732 /* PSRTYPE must be initialized in non 82598 adapters */ 3733 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | 3734 IXGBE_PSRTYPE_UDPHDR | 3735 IXGBE_PSRTYPE_IPV4HDR | 3736 IXGBE_PSRTYPE_L2HDR | 3737 IXGBE_PSRTYPE_IPV6HDR; 3738 3739 if (hw->mac.type == ixgbe_mac_82598EB) 3740 return; 3741 3742 if (rss_i > 3) 3743 psrtype |= 2u << 29; 3744 else if (rss_i > 1) 3745 psrtype |= 1u << 29; 3746 3747 for_each_set_bit(pool, &adapter->fwd_bitmask, 32) 3748 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); 3749 } 3750 3751 static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) 3752 { 3753 struct ixgbe_hw *hw = &adapter->hw; 3754 u32 reg_offset, vf_shift; 3755 u32 gcr_ext, vmdctl; 3756 int i; 3757 3758 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 3759 return; 3760 3761 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 3762 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; 3763 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; 3764 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT; 3765 vmdctl |= IXGBE_VT_CTL_REPLEN; 3766 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); 3767 3768 vf_shift = VMDQ_P(0) % 32; 3769 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; 3770 3771 /* Enable only the PF's pool for Tx/Rx */ 3772 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift)); 3773 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); 3774 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift)); 3775 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); 3776 if (adapter->bridge_mode == BRIDGE_MODE_VEB) 3777 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 3778 3779 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ 3780 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); 3781 3782 /* clear VLAN promisc flag so VFTA will be updated if necessary */ 3783 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; 3784 3785 /* 3786 * Set up VF register offsets for selected VT Mode, 3787 * i.e. 32 or 64 VFs for SR-IOV 3788 */ 3789 switch (adapter->ring_feature[RING_F_VMDQ].mask) { 3790 case IXGBE_82599_VMDQ_8Q_MASK: 3791 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16; 3792 break; 3793 case IXGBE_82599_VMDQ_4Q_MASK: 3794 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32; 3795 break; 3796 default: 3797 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64; 3798 break; 3799 } 3800 3801 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 3802 3803 for (i = 0; i < adapter->num_vfs; i++) { 3804 /* configure spoof checking */ 3805 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, 3806 adapter->vfinfo[i].spoofchk_enabled); 3807 3808 /* Enable/Disable RSS query feature */ 3809 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, 3810 adapter->vfinfo[i].rss_query_enabled); 3811 } 3812 } 3813 3814 static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) 3815 { 3816 struct ixgbe_hw *hw = &adapter->hw; 3817 struct net_device *netdev = adapter->netdev; 3818 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 3819 struct ixgbe_ring *rx_ring; 3820 int i; 3821 u32 mhadd, hlreg0; 3822 3823 #ifdef IXGBE_FCOE 3824 /* adjust max frame to be able to do baby jumbo for FCoE */ 3825 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && 3826 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) 3827 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; 3828 3829 #endif /* IXGBE_FCOE */ 3830 3831 /* adjust max frame to be at least the size of a standard frame */ 3832 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) 3833 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); 3834 3835 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 3836 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { 3837 mhadd &= ~IXGBE_MHADD_MFS_MASK; 3838 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; 3839 3840 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 3841 } 3842 3843 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 3844 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ 3845 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 3846 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 3847 3848 /* 3849 * Setup the HW Rx Head and Tail Descriptor Pointers and 3850 * the Base and Length of the Rx Descriptor Ring 3851 */ 3852 for (i = 0; i < adapter->num_rx_queues; i++) { 3853 rx_ring = adapter->rx_ring[i]; 3854 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 3855 set_ring_rsc_enabled(rx_ring); 3856 else 3857 clear_ring_rsc_enabled(rx_ring); 3858 } 3859 } 3860 3861 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) 3862 { 3863 struct ixgbe_hw *hw = &adapter->hw; 3864 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 3865 3866 switch (hw->mac.type) { 3867 case ixgbe_mac_82598EB: 3868 /* 3869 * For VMDq support of different descriptor types or 3870 * buffer sizes through the use of multiple SRRCTL 3871 * registers, RDRXCTL.MVMEN must be set to 1 3872 * 3873 * also, the manual doesn't mention it clearly but DCA hints 3874 * will only use queue 0's tags unless this bit is set. Side 3875 * effects of setting this bit are only that SRRCTL must be 3876 * fully programmed [0..15] 3877 */ 3878 rdrxctl |= IXGBE_RDRXCTL_MVMEN; 3879 break; 3880 case ixgbe_mac_X550: 3881 case ixgbe_mac_X550EM_x: 3882 case ixgbe_mac_x550em_a: 3883 if (adapter->num_vfs) 3884 rdrxctl |= IXGBE_RDRXCTL_PSP; 3885 /* fall through for older HW */ 3886 case ixgbe_mac_82599EB: 3887 case ixgbe_mac_X540: 3888 /* Disable RSC for ACK packets */ 3889 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, 3890 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); 3891 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 3892 /* hardware requires some bits to be set by default */ 3893 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); 3894 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; 3895 break; 3896 default: 3897 /* We should do nothing since we don't know this hardware */ 3898 return; 3899 } 3900 3901 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 3902 } 3903 3904 /** 3905 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset 3906 * @adapter: board private structure 3907 * 3908 * Configure the Rx unit of the MAC after a reset. 3909 **/ 3910 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) 3911 { 3912 struct ixgbe_hw *hw = &adapter->hw; 3913 int i; 3914 u32 rxctrl, rfctl; 3915 3916 /* disable receives while setting up the descriptors */ 3917 hw->mac.ops.disable_rx(hw); 3918 3919 ixgbe_setup_psrtype(adapter); 3920 ixgbe_setup_rdrxctl(adapter); 3921 3922 /* RSC Setup */ 3923 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); 3924 rfctl &= ~IXGBE_RFCTL_RSC_DIS; 3925 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) 3926 rfctl |= IXGBE_RFCTL_RSC_DIS; 3927 3928 /* disable NFS filtering */ 3929 rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS); 3930 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); 3931 3932 /* Program registers for the distribution of queues */ 3933 ixgbe_setup_mrqc(adapter); 3934 3935 /* set_rx_buffer_len must be called before ring initialization */ 3936 ixgbe_set_rx_buffer_len(adapter); 3937 3938 /* 3939 * Setup the HW Rx Head and Tail Descriptor Pointers and 3940 * the Base and Length of the Rx Descriptor Ring 3941 */ 3942 for (i = 0; i < adapter->num_rx_queues; i++) 3943 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); 3944 3945 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3946 /* disable drop enable for 82598 parts */ 3947 if (hw->mac.type == ixgbe_mac_82598EB) 3948 rxctrl |= IXGBE_RXCTRL_DMBYPS; 3949 3950 /* enable all receives */ 3951 rxctrl |= IXGBE_RXCTRL_RXEN; 3952 hw->mac.ops.enable_rx_dma(hw, rxctrl); 3953 } 3954 3955 static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, 3956 __be16 proto, u16 vid) 3957 { 3958 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3959 struct ixgbe_hw *hw = &adapter->hw; 3960 3961 /* add VID to filter table */ 3962 if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) 3963 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid); 3964 3965 set_bit(vid, adapter->active_vlans); 3966 3967 return 0; 3968 } 3969 3970 static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) 3971 { 3972 u32 vlvf; 3973 int idx; 3974 3975 /* short cut the special case */ 3976 if (vlan == 0) 3977 return 0; 3978 3979 /* Search for the vlan id in the VLVF entries */ 3980 for (idx = IXGBE_VLVF_ENTRIES; --idx;) { 3981 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx)); 3982 if ((vlvf & VLAN_VID_MASK) == vlan) 3983 break; 3984 } 3985 3986 return idx; 3987 } 3988 3989 void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) 3990 { 3991 struct ixgbe_hw *hw = &adapter->hw; 3992 u32 bits, word; 3993 int idx; 3994 3995 idx = ixgbe_find_vlvf_entry(hw, vid); 3996 if (!idx) 3997 return; 3998 3999 /* See if any other pools are set for this VLAN filter 4000 * entry other than the PF. 4001 */ 4002 word = idx * 2 + (VMDQ_P(0) / 32); 4003 bits = ~BIT(VMDQ_P(0) % 32); 4004 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); 4005 4006 /* Disable the filter so this falls into the default pool. */ 4007 if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) { 4008 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) 4009 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0); 4010 IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0); 4011 } 4012 } 4013 4014 static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, 4015 __be16 proto, u16 vid) 4016 { 4017 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4018 struct ixgbe_hw *hw = &adapter->hw; 4019 4020 /* remove VID from filter table */ 4021 if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) 4022 hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true); 4023 4024 clear_bit(vid, adapter->active_vlans); 4025 4026 return 0; 4027 } 4028 4029 /** 4030 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping 4031 * @adapter: driver data 4032 */ 4033 static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) 4034 { 4035 struct ixgbe_hw *hw = &adapter->hw; 4036 u32 vlnctrl; 4037 int i, j; 4038 4039 switch (hw->mac.type) { 4040 case ixgbe_mac_82598EB: 4041 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 4042 vlnctrl &= ~IXGBE_VLNCTRL_VME; 4043 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 4044 break; 4045 case ixgbe_mac_82599EB: 4046 case ixgbe_mac_X540: 4047 case ixgbe_mac_X550: 4048 case ixgbe_mac_X550EM_x: 4049 case ixgbe_mac_x550em_a: 4050 for (i = 0; i < adapter->num_rx_queues; i++) { 4051 struct ixgbe_ring *ring = adapter->rx_ring[i]; 4052 4053 if (ring->l2_accel_priv) 4054 continue; 4055 j = ring->reg_idx; 4056 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 4057 vlnctrl &= ~IXGBE_RXDCTL_VME; 4058 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); 4059 } 4060 break; 4061 default: 4062 break; 4063 } 4064 } 4065 4066 /** 4067 * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping 4068 * @adapter: driver data 4069 */ 4070 static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) 4071 { 4072 struct ixgbe_hw *hw = &adapter->hw; 4073 u32 vlnctrl; 4074 int i, j; 4075 4076 switch (hw->mac.type) { 4077 case ixgbe_mac_82598EB: 4078 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 4079 vlnctrl |= IXGBE_VLNCTRL_VME; 4080 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 4081 break; 4082 case ixgbe_mac_82599EB: 4083 case ixgbe_mac_X540: 4084 case ixgbe_mac_X550: 4085 case ixgbe_mac_X550EM_x: 4086 case ixgbe_mac_x550em_a: 4087 for (i = 0; i < adapter->num_rx_queues; i++) { 4088 struct ixgbe_ring *ring = adapter->rx_ring[i]; 4089 4090 if (ring->l2_accel_priv) 4091 continue; 4092 j = ring->reg_idx; 4093 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 4094 vlnctrl |= IXGBE_RXDCTL_VME; 4095 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); 4096 } 4097 break; 4098 default: 4099 break; 4100 } 4101 } 4102 4103 static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) 4104 { 4105 struct ixgbe_hw *hw = &adapter->hw; 4106 u32 vlnctrl, i; 4107 4108 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 4109 4110 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { 4111 /* For VMDq and SR-IOV we must leave VLAN filtering enabled */ 4112 vlnctrl |= IXGBE_VLNCTRL_VFE; 4113 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 4114 } else { 4115 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 4116 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 4117 return; 4118 } 4119 4120 /* Nothing to do for 82598 */ 4121 if (hw->mac.type == ixgbe_mac_82598EB) 4122 return; 4123 4124 /* We are already in VLAN promisc, nothing to do */ 4125 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) 4126 return; 4127 4128 /* Set flag so we don't redo unnecessary work */ 4129 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; 4130 4131 /* Add PF to all active pools */ 4132 for (i = IXGBE_VLVF_ENTRIES; --i;) { 4133 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); 4134 u32 vlvfb = IXGBE_READ_REG(hw, reg_offset); 4135 4136 vlvfb |= BIT(VMDQ_P(0) % 32); 4137 IXGBE_WRITE_REG(hw, reg_offset, vlvfb); 4138 } 4139 4140 /* Set all bits in the VLAN filter table array */ 4141 for (i = hw->mac.vft_size; i--;) 4142 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U); 4143 } 4144 4145 #define VFTA_BLOCK_SIZE 8 4146 static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) 4147 { 4148 struct ixgbe_hw *hw = &adapter->hw; 4149 u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; 4150 u32 vid_start = vfta_offset * 32; 4151 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); 4152 u32 i, vid, word, bits; 4153 4154 for (i = IXGBE_VLVF_ENTRIES; --i;) { 4155 u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); 4156 4157 /* pull VLAN ID from VLVF */ 4158 vid = vlvf & VLAN_VID_MASK; 4159 4160 /* only concern outselves with a certain range */ 4161 if (vid < vid_start || vid >= vid_end) 4162 continue; 4163 4164 if (vlvf) { 4165 /* record VLAN ID in VFTA */ 4166 vfta[(vid - vid_start) / 32] |= BIT(vid % 32); 4167 4168 /* if PF is part of this then continue */ 4169 if (test_bit(vid, adapter->active_vlans)) 4170 continue; 4171 } 4172 4173 /* remove PF from the pool */ 4174 word = i * 2 + VMDQ_P(0) / 32; 4175 bits = ~BIT(VMDQ_P(0) % 32); 4176 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); 4177 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits); 4178 } 4179 4180 /* extract values from active_vlans and write back to VFTA */ 4181 for (i = VFTA_BLOCK_SIZE; i--;) { 4182 vid = (vfta_offset + i) * 32; 4183 word = vid / BITS_PER_LONG; 4184 bits = vid % BITS_PER_LONG; 4185 4186 vfta[i] |= adapter->active_vlans[word] >> bits; 4187 4188 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]); 4189 } 4190 } 4191 4192 static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) 4193 { 4194 struct ixgbe_hw *hw = &adapter->hw; 4195 u32 vlnctrl, i; 4196 4197 /* Set VLAN filtering to enabled */ 4198 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 4199 vlnctrl |= IXGBE_VLNCTRL_VFE; 4200 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 4201 4202 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) || 4203 hw->mac.type == ixgbe_mac_82598EB) 4204 return; 4205 4206 /* We are not in VLAN promisc, nothing to do */ 4207 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) 4208 return; 4209 4210 /* Set flag so we don't redo unnecessary work */ 4211 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; 4212 4213 for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE) 4214 ixgbe_scrub_vfta(adapter, i); 4215 } 4216 4217 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) 4218 { 4219 u16 vid = 1; 4220 4221 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); 4222 4223 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) 4224 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); 4225 } 4226 4227 /** 4228 * ixgbe_write_mc_addr_list - write multicast addresses to MTA 4229 * @netdev: network interface device structure 4230 * 4231 * Writes multicast address list to the MTA hash table. 4232 * Returns: -ENOMEM on failure 4233 * 0 on no addresses written 4234 * X on writing X addresses to MTA 4235 **/ 4236 static int ixgbe_write_mc_addr_list(struct net_device *netdev) 4237 { 4238 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4239 struct ixgbe_hw *hw = &adapter->hw; 4240 4241 if (!netif_running(netdev)) 4242 return 0; 4243 4244 if (hw->mac.ops.update_mc_addr_list) 4245 hw->mac.ops.update_mc_addr_list(hw, netdev); 4246 else 4247 return -ENOMEM; 4248 4249 #ifdef CONFIG_PCI_IOV 4250 ixgbe_restore_vf_multicasts(adapter); 4251 #endif 4252 4253 return netdev_mc_count(netdev); 4254 } 4255 4256 #ifdef CONFIG_PCI_IOV 4257 void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter) 4258 { 4259 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; 4260 struct ixgbe_hw *hw = &adapter->hw; 4261 int i; 4262 4263 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { 4264 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; 4265 4266 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) 4267 hw->mac.ops.set_rar(hw, i, 4268 mac_table->addr, 4269 mac_table->pool, 4270 IXGBE_RAH_AV); 4271 else 4272 hw->mac.ops.clear_rar(hw, i); 4273 } 4274 } 4275 4276 #endif 4277 static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter) 4278 { 4279 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; 4280 struct ixgbe_hw *hw = &adapter->hw; 4281 int i; 4282 4283 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { 4284 if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED)) 4285 continue; 4286 4287 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; 4288 4289 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) 4290 hw->mac.ops.set_rar(hw, i, 4291 mac_table->addr, 4292 mac_table->pool, 4293 IXGBE_RAH_AV); 4294 else 4295 hw->mac.ops.clear_rar(hw, i); 4296 } 4297 } 4298 4299 static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) 4300 { 4301 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; 4302 struct ixgbe_hw *hw = &adapter->hw; 4303 int i; 4304 4305 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { 4306 mac_table->state |= IXGBE_MAC_STATE_MODIFIED; 4307 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; 4308 } 4309 4310 ixgbe_sync_mac_table(adapter); 4311 } 4312 4313 static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool) 4314 { 4315 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; 4316 struct ixgbe_hw *hw = &adapter->hw; 4317 int i, count = 0; 4318 4319 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { 4320 /* do not count default RAR as available */ 4321 if (mac_table->state & IXGBE_MAC_STATE_DEFAULT) 4322 continue; 4323 4324 /* only count unused and addresses that belong to us */ 4325 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) { 4326 if (mac_table->pool != pool) 4327 continue; 4328 } 4329 4330 count++; 4331 } 4332 4333 return count; 4334 } 4335 4336 /* this function destroys the first RAR entry */ 4337 static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter) 4338 { 4339 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; 4340 struct ixgbe_hw *hw = &adapter->hw; 4341 4342 memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN); 4343 mac_table->pool = VMDQ_P(0); 4344 4345 mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE; 4346 4347 hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool, 4348 IXGBE_RAH_AV); 4349 } 4350 4351 int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, 4352 const u8 *addr, u16 pool) 4353 { 4354 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; 4355 struct ixgbe_hw *hw = &adapter->hw; 4356 int i; 4357 4358 if (is_zero_ether_addr(addr)) 4359 return -EINVAL; 4360 4361 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { 4362 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) 4363 continue; 4364 4365 ether_addr_copy(mac_table->addr, addr); 4366 mac_table->pool = pool; 4367 4368 mac_table->state |= IXGBE_MAC_STATE_MODIFIED | 4369 IXGBE_MAC_STATE_IN_USE; 4370 4371 ixgbe_sync_mac_table(adapter); 4372 4373 return i; 4374 } 4375 4376 return -ENOMEM; 4377 } 4378 4379 int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, 4380 const u8 *addr, u16 pool) 4381 { 4382 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; 4383 struct ixgbe_hw *hw = &adapter->hw; 4384 int i; 4385 4386 if (is_zero_ether_addr(addr)) 4387 return -EINVAL; 4388 4389 /* search table for addr, if found clear IN_USE flag and sync */ 4390 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { 4391 /* we can only delete an entry if it is in use */ 4392 if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE)) 4393 continue; 4394 /* we only care about entries that belong to the given pool */ 4395 if (mac_table->pool != pool) 4396 continue; 4397 /* we only care about a specific MAC address */ 4398 if (!ether_addr_equal(addr, mac_table->addr)) 4399 continue; 4400 4401 mac_table->state |= IXGBE_MAC_STATE_MODIFIED; 4402 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; 4403 4404 ixgbe_sync_mac_table(adapter); 4405 4406 return 0; 4407 } 4408 4409 return -ENOMEM; 4410 } 4411 /** 4412 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table 4413 * @netdev: network interface device structure 4414 * 4415 * Writes unicast address list to the RAR table. 4416 * Returns: -ENOMEM on failure/insufficient address space 4417 * 0 on no addresses written 4418 * X on writing X addresses to the RAR table 4419 **/ 4420 static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) 4421 { 4422 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4423 int count = 0; 4424 4425 /* return ENOMEM indicating insufficient memory for addresses */ 4426 if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn)) 4427 return -ENOMEM; 4428 4429 if (!netdev_uc_empty(netdev)) { 4430 struct netdev_hw_addr *ha; 4431 netdev_for_each_uc_addr(ha, netdev) { 4432 ixgbe_del_mac_filter(adapter, ha->addr, vfn); 4433 ixgbe_add_mac_filter(adapter, ha->addr, vfn); 4434 count++; 4435 } 4436 } 4437 return count; 4438 } 4439 4440 static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) 4441 { 4442 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4443 int ret; 4444 4445 ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0)); 4446 4447 return min_t(int, ret, 0); 4448 } 4449 4450 static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) 4451 { 4452 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4453 4454 ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0)); 4455 4456 return 0; 4457 } 4458 4459 /** 4460 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set 4461 * @netdev: network interface device structure 4462 * 4463 * The set_rx_method entry point is called whenever the unicast/multicast 4464 * address list or the network interface flags are updated. This routine is 4465 * responsible for configuring the hardware for proper unicast, multicast and 4466 * promiscuous mode. 4467 **/ 4468 void ixgbe_set_rx_mode(struct net_device *netdev) 4469 { 4470 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4471 struct ixgbe_hw *hw = &adapter->hw; 4472 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; 4473 netdev_features_t features = netdev->features; 4474 int count; 4475 4476 /* Check for Promiscuous and All Multicast modes */ 4477 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4478 4479 /* set all bits that we expect to always be set */ 4480 fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ 4481 fctrl |= IXGBE_FCTRL_BAM; 4482 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ 4483 fctrl |= IXGBE_FCTRL_PMCF; 4484 4485 /* clear the bits we are changing the status of */ 4486 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4487 if (netdev->flags & IFF_PROMISC) { 4488 hw->addr_ctrl.user_set_promisc = true; 4489 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4490 vmolr |= IXGBE_VMOLR_MPE; 4491 features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 4492 } else { 4493 if (netdev->flags & IFF_ALLMULTI) { 4494 fctrl |= IXGBE_FCTRL_MPE; 4495 vmolr |= IXGBE_VMOLR_MPE; 4496 } 4497 hw->addr_ctrl.user_set_promisc = false; 4498 } 4499 4500 /* 4501 * Write addresses to available RAR registers, if there is not 4502 * sufficient space to store all the addresses then enable 4503 * unicast promiscuous mode 4504 */ 4505 if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) { 4506 fctrl |= IXGBE_FCTRL_UPE; 4507 vmolr |= IXGBE_VMOLR_ROPE; 4508 } 4509 4510 /* Write addresses to the MTA, if the attempt fails 4511 * then we should just turn on promiscuous mode so 4512 * that we can at least receive multicast traffic 4513 */ 4514 count = ixgbe_write_mc_addr_list(netdev); 4515 if (count < 0) { 4516 fctrl |= IXGBE_FCTRL_MPE; 4517 vmolr |= IXGBE_VMOLR_MPE; 4518 } else if (count) { 4519 vmolr |= IXGBE_VMOLR_ROMPE; 4520 } 4521 4522 if (hw->mac.type != ixgbe_mac_82598EB) { 4523 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) & 4524 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | 4525 IXGBE_VMOLR_ROPE); 4526 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr); 4527 } 4528 4529 /* This is useful for sniffing bad packets. */ 4530 if (features & NETIF_F_RXALL) { 4531 /* UPE and MPE will be handled by normal PROMISC logic 4532 * in e1000e_set_rx_mode */ 4533 fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */ 4534 IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */ 4535 IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */ 4536 4537 fctrl &= ~(IXGBE_FCTRL_DPF); 4538 /* NOTE: VLAN filtering is disabled by setting PROMISC */ 4539 } 4540 4541 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4542 4543 if (features & NETIF_F_HW_VLAN_CTAG_RX) 4544 ixgbe_vlan_strip_enable(adapter); 4545 else 4546 ixgbe_vlan_strip_disable(adapter); 4547 4548 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 4549 ixgbe_vlan_promisc_disable(adapter); 4550 else 4551 ixgbe_vlan_promisc_enable(adapter); 4552 } 4553 4554 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) 4555 { 4556 int q_idx; 4557 4558 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { 4559 ixgbe_qv_init_lock(adapter->q_vector[q_idx]); 4560 napi_enable(&adapter->q_vector[q_idx]->napi); 4561 } 4562 } 4563 4564 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) 4565 { 4566 int q_idx; 4567 4568 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { 4569 napi_disable(&adapter->q_vector[q_idx]->napi); 4570 while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) { 4571 pr_info("QV %d locked\n", q_idx); 4572 usleep_range(1000, 20000); 4573 } 4574 } 4575 } 4576 4577 static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) 4578 { 4579 struct ixgbe_hw *hw = &adapter->hw; 4580 u32 vxlanctrl; 4581 4582 if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE | 4583 IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))) 4584 return; 4585 4586 vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask; 4587 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl); 4588 4589 if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK) 4590 adapter->vxlan_port = 0; 4591 4592 if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK) 4593 adapter->geneve_port = 0; 4594 } 4595 4596 #ifdef CONFIG_IXGBE_DCB 4597 /** 4598 * ixgbe_configure_dcb - Configure DCB hardware 4599 * @adapter: ixgbe adapter struct 4600 * 4601 * This is called by the driver on open to configure the DCB hardware. 4602 * This is also called by the gennetlink interface when reconfiguring 4603 * the DCB state. 4604 */ 4605 static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) 4606 { 4607 struct ixgbe_hw *hw = &adapter->hw; 4608 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 4609 4610 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { 4611 if (hw->mac.type == ixgbe_mac_82598EB) 4612 netif_set_gso_max_size(adapter->netdev, 65536); 4613 return; 4614 } 4615 4616 if (hw->mac.type == ixgbe_mac_82598EB) 4617 netif_set_gso_max_size(adapter->netdev, 32768); 4618 4619 #ifdef IXGBE_FCOE 4620 if (adapter->netdev->features & NETIF_F_FCOE_MTU) 4621 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); 4622 #endif 4623 4624 /* reconfigure the hardware */ 4625 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { 4626 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, 4627 DCB_TX_CONFIG); 4628 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, 4629 DCB_RX_CONFIG); 4630 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); 4631 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) { 4632 ixgbe_dcb_hw_ets(&adapter->hw, 4633 adapter->ixgbe_ieee_ets, 4634 max_frame); 4635 ixgbe_dcb_hw_pfc_config(&adapter->hw, 4636 adapter->ixgbe_ieee_pfc->pfc_en, 4637 adapter->ixgbe_ieee_ets->prio_tc); 4638 } 4639 4640 /* Enable RSS Hash per TC */ 4641 if (hw->mac.type != ixgbe_mac_82598EB) { 4642 u32 msb = 0; 4643 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1; 4644 4645 while (rss_i) { 4646 msb++; 4647 rss_i >>= 1; 4648 } 4649 4650 /* write msb to all 8 TCs in one write */ 4651 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111); 4652 } 4653 } 4654 #endif 4655 4656 /* Additional bittime to account for IXGBE framing */ 4657 #define IXGBE_ETH_FRAMING 20 4658 4659 /** 4660 * ixgbe_hpbthresh - calculate high water mark for flow control 4661 * 4662 * @adapter: board private structure to calculate for 4663 * @pb: packet buffer to calculate 4664 */ 4665 static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) 4666 { 4667 struct ixgbe_hw *hw = &adapter->hw; 4668 struct net_device *dev = adapter->netdev; 4669 int link, tc, kb, marker; 4670 u32 dv_id, rx_pba; 4671 4672 /* Calculate max LAN frame size */ 4673 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING; 4674 4675 #ifdef IXGBE_FCOE 4676 /* FCoE traffic class uses FCOE jumbo frames */ 4677 if ((dev->features & NETIF_F_FCOE_MTU) && 4678 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && 4679 (pb == ixgbe_fcoe_get_tc(adapter))) 4680 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; 4681 #endif 4682 4683 /* Calculate delay value for device */ 4684 switch (hw->mac.type) { 4685 case ixgbe_mac_X540: 4686 case ixgbe_mac_X550: 4687 case ixgbe_mac_X550EM_x: 4688 case ixgbe_mac_x550em_a: 4689 dv_id = IXGBE_DV_X540(link, tc); 4690 break; 4691 default: 4692 dv_id = IXGBE_DV(link, tc); 4693 break; 4694 } 4695 4696 /* Loopback switch introduces additional latency */ 4697 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 4698 dv_id += IXGBE_B2BT(tc); 4699 4700 /* Delay value is calculated in bit times convert to KB */ 4701 kb = IXGBE_BT2KB(dv_id); 4702 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10; 4703 4704 marker = rx_pba - kb; 4705 4706 /* It is possible that the packet buffer is not large enough 4707 * to provide required headroom. In this case throw an error 4708 * to user and a do the best we can. 4709 */ 4710 if (marker < 0) { 4711 e_warn(drv, "Packet Buffer(%i) can not provide enough" 4712 "headroom to support flow control." 4713 "Decrease MTU or number of traffic classes\n", pb); 4714 marker = tc + 1; 4715 } 4716 4717 return marker; 4718 } 4719 4720 /** 4721 * ixgbe_lpbthresh - calculate low water mark for for flow control 4722 * 4723 * @adapter: board private structure to calculate for 4724 * @pb: packet buffer to calculate 4725 */ 4726 static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) 4727 { 4728 struct ixgbe_hw *hw = &adapter->hw; 4729 struct net_device *dev = adapter->netdev; 4730 int tc; 4731 u32 dv_id; 4732 4733 /* Calculate max LAN frame size */ 4734 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; 4735 4736 #ifdef IXGBE_FCOE 4737 /* FCoE traffic class uses FCOE jumbo frames */ 4738 if ((dev->features & NETIF_F_FCOE_MTU) && 4739 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && 4740 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) 4741 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; 4742 #endif 4743 4744 /* Calculate delay value for device */ 4745 switch (hw->mac.type) { 4746 case ixgbe_mac_X540: 4747 case ixgbe_mac_X550: 4748 case ixgbe_mac_X550EM_x: 4749 case ixgbe_mac_x550em_a: 4750 dv_id = IXGBE_LOW_DV_X540(tc); 4751 break; 4752 default: 4753 dv_id = IXGBE_LOW_DV(tc); 4754 break; 4755 } 4756 4757 /* Delay value is calculated in bit times convert to KB */ 4758 return IXGBE_BT2KB(dv_id); 4759 } 4760 4761 /* 4762 * ixgbe_pbthresh_setup - calculate and setup high low water marks 4763 */ 4764 static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter) 4765 { 4766 struct ixgbe_hw *hw = &adapter->hw; 4767 int num_tc = netdev_get_num_tc(adapter->netdev); 4768 int i; 4769 4770 if (!num_tc) 4771 num_tc = 1; 4772 4773 for (i = 0; i < num_tc; i++) { 4774 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i); 4775 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i); 4776 4777 /* Low water marks must not be larger than high water marks */ 4778 if (hw->fc.low_water[i] > hw->fc.high_water[i]) 4779 hw->fc.low_water[i] = 0; 4780 } 4781 4782 for (; i < MAX_TRAFFIC_CLASS; i++) 4783 hw->fc.high_water[i] = 0; 4784 } 4785 4786 static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) 4787 { 4788 struct ixgbe_hw *hw = &adapter->hw; 4789 int hdrm; 4790 u8 tc = netdev_get_num_tc(adapter->netdev); 4791 4792 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 4793 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 4794 hdrm = 32 << adapter->fdir_pballoc; 4795 else 4796 hdrm = 0; 4797 4798 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); 4799 ixgbe_pbthresh_setup(adapter); 4800 } 4801 4802 static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) 4803 { 4804 struct ixgbe_hw *hw = &adapter->hw; 4805 struct hlist_node *node2; 4806 struct ixgbe_fdir_filter *filter; 4807 4808 spin_lock(&adapter->fdir_perfect_lock); 4809 4810 if (!hlist_empty(&adapter->fdir_filter_list)) 4811 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); 4812 4813 hlist_for_each_entry_safe(filter, node2, 4814 &adapter->fdir_filter_list, fdir_node) { 4815 ixgbe_fdir_write_perfect_filter_82599(hw, 4816 &filter->filter, 4817 filter->sw_idx, 4818 (filter->action == IXGBE_FDIR_DROP_QUEUE) ? 4819 IXGBE_FDIR_DROP_QUEUE : 4820 adapter->rx_ring[filter->action]->reg_idx); 4821 } 4822 4823 spin_unlock(&adapter->fdir_perfect_lock); 4824 } 4825 4826 static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, 4827 struct ixgbe_adapter *adapter) 4828 { 4829 struct ixgbe_hw *hw = &adapter->hw; 4830 u32 vmolr; 4831 4832 /* No unicast promiscuous support for VMDQ devices. */ 4833 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); 4834 vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE); 4835 4836 /* clear the affected bit */ 4837 vmolr &= ~IXGBE_VMOLR_MPE; 4838 4839 if (dev->flags & IFF_ALLMULTI) { 4840 vmolr |= IXGBE_VMOLR_MPE; 4841 } else { 4842 vmolr |= IXGBE_VMOLR_ROMPE; 4843 hw->mac.ops.update_mc_addr_list(hw, dev); 4844 } 4845 ixgbe_write_uc_addr_list(adapter->netdev, pool); 4846 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); 4847 } 4848 4849 static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) 4850 { 4851 struct ixgbe_adapter *adapter = vadapter->real_adapter; 4852 int rss_i = adapter->num_rx_queues_per_pool; 4853 struct ixgbe_hw *hw = &adapter->hw; 4854 u16 pool = vadapter->pool; 4855 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | 4856 IXGBE_PSRTYPE_UDPHDR | 4857 IXGBE_PSRTYPE_IPV4HDR | 4858 IXGBE_PSRTYPE_L2HDR | 4859 IXGBE_PSRTYPE_IPV6HDR; 4860 4861 if (hw->mac.type == ixgbe_mac_82598EB) 4862 return; 4863 4864 if (rss_i > 3) 4865 psrtype |= 2u << 29; 4866 else if (rss_i > 1) 4867 psrtype |= 1u << 29; 4868 4869 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); 4870 } 4871 4872 /** 4873 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue 4874 * @rx_ring: ring to free buffers from 4875 **/ 4876 static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) 4877 { 4878 struct device *dev = rx_ring->dev; 4879 unsigned long size; 4880 u16 i; 4881 4882 /* ring already cleared, nothing to do */ 4883 if (!rx_ring->rx_buffer_info) 4884 return; 4885 4886 /* Free all the Rx ring sk_buffs */ 4887 for (i = 0; i < rx_ring->count; i++) { 4888 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; 4889 4890 if (rx_buffer->skb) { 4891 struct sk_buff *skb = rx_buffer->skb; 4892 if (IXGBE_CB(skb)->page_released) 4893 dma_unmap_page(dev, 4894 IXGBE_CB(skb)->dma, 4895 ixgbe_rx_bufsz(rx_ring), 4896 DMA_FROM_DEVICE); 4897 dev_kfree_skb(skb); 4898 rx_buffer->skb = NULL; 4899 } 4900 4901 if (!rx_buffer->page) 4902 continue; 4903 4904 dma_unmap_page(dev, rx_buffer->dma, 4905 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); 4906 __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring)); 4907 4908 rx_buffer->page = NULL; 4909 } 4910 4911 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 4912 memset(rx_ring->rx_buffer_info, 0, size); 4913 4914 /* Zero out the descriptor ring */ 4915 memset(rx_ring->desc, 0, rx_ring->size); 4916 4917 rx_ring->next_to_alloc = 0; 4918 rx_ring->next_to_clean = 0; 4919 rx_ring->next_to_use = 0; 4920 } 4921 4922 static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, 4923 struct ixgbe_ring *rx_ring) 4924 { 4925 struct ixgbe_adapter *adapter = vadapter->real_adapter; 4926 int index = rx_ring->queue_index + vadapter->rx_base_queue; 4927 4928 /* shutdown specific queue receive and wait for dma to settle */ 4929 ixgbe_disable_rx_queue(adapter, rx_ring); 4930 usleep_range(10000, 20000); 4931 ixgbe_irq_disable_queues(adapter, BIT_ULL(index)); 4932 ixgbe_clean_rx_ring(rx_ring); 4933 rx_ring->l2_accel_priv = NULL; 4934 } 4935 4936 static int ixgbe_fwd_ring_down(struct net_device *vdev, 4937 struct ixgbe_fwd_adapter *accel) 4938 { 4939 struct ixgbe_adapter *adapter = accel->real_adapter; 4940 unsigned int rxbase = accel->rx_base_queue; 4941 unsigned int txbase = accel->tx_base_queue; 4942 int i; 4943 4944 netif_tx_stop_all_queues(vdev); 4945 4946 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { 4947 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); 4948 adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; 4949 } 4950 4951 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { 4952 adapter->tx_ring[txbase + i]->l2_accel_priv = NULL; 4953 adapter->tx_ring[txbase + i]->netdev = adapter->netdev; 4954 } 4955 4956 4957 return 0; 4958 } 4959 4960 static int ixgbe_fwd_ring_up(struct net_device *vdev, 4961 struct ixgbe_fwd_adapter *accel) 4962 { 4963 struct ixgbe_adapter *adapter = accel->real_adapter; 4964 unsigned int rxbase, txbase, queues; 4965 int i, baseq, err = 0; 4966 4967 if (!test_bit(accel->pool, &adapter->fwd_bitmask)) 4968 return 0; 4969 4970 baseq = accel->pool * adapter->num_rx_queues_per_pool; 4971 netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", 4972 accel->pool, adapter->num_rx_pools, 4973 baseq, baseq + adapter->num_rx_queues_per_pool, 4974 adapter->fwd_bitmask); 4975 4976 accel->netdev = vdev; 4977 accel->rx_base_queue = rxbase = baseq; 4978 accel->tx_base_queue = txbase = baseq; 4979 4980 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) 4981 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); 4982 4983 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { 4984 adapter->rx_ring[rxbase + i]->netdev = vdev; 4985 adapter->rx_ring[rxbase + i]->l2_accel_priv = accel; 4986 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); 4987 } 4988 4989 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { 4990 adapter->tx_ring[txbase + i]->netdev = vdev; 4991 adapter->tx_ring[txbase + i]->l2_accel_priv = accel; 4992 } 4993 4994 queues = min_t(unsigned int, 4995 adapter->num_rx_queues_per_pool, vdev->num_tx_queues); 4996 err = netif_set_real_num_tx_queues(vdev, queues); 4997 if (err) 4998 goto fwd_queue_err; 4999 5000 err = netif_set_real_num_rx_queues(vdev, queues); 5001 if (err) 5002 goto fwd_queue_err; 5003 5004 if (is_valid_ether_addr(vdev->dev_addr)) 5005 ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool); 5006 5007 ixgbe_fwd_psrtype(accel); 5008 ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter); 5009 return err; 5010 fwd_queue_err: 5011 ixgbe_fwd_ring_down(vdev, accel); 5012 return err; 5013 } 5014 5015 static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) 5016 { 5017 struct net_device *upper; 5018 struct list_head *iter; 5019 int err; 5020 5021 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { 5022 if (netif_is_macvlan(upper)) { 5023 struct macvlan_dev *dfwd = netdev_priv(upper); 5024 struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; 5025 5026 if (dfwd->fwd_priv) { 5027 err = ixgbe_fwd_ring_up(upper, vadapter); 5028 if (err) 5029 continue; 5030 } 5031 } 5032 } 5033 } 5034 5035 static void ixgbe_configure(struct ixgbe_adapter *adapter) 5036 { 5037 struct ixgbe_hw *hw = &adapter->hw; 5038 5039 ixgbe_configure_pb(adapter); 5040 #ifdef CONFIG_IXGBE_DCB 5041 ixgbe_configure_dcb(adapter); 5042 #endif 5043 /* 5044 * We must restore virtualization before VLANs or else 5045 * the VLVF registers will not be populated 5046 */ 5047 ixgbe_configure_virtualization(adapter); 5048 5049 ixgbe_set_rx_mode(adapter->netdev); 5050 ixgbe_restore_vlan(adapter); 5051 5052 switch (hw->mac.type) { 5053 case ixgbe_mac_82599EB: 5054 case ixgbe_mac_X540: 5055 hw->mac.ops.disable_rx_buff(hw); 5056 break; 5057 default: 5058 break; 5059 } 5060 5061 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 5062 ixgbe_init_fdir_signature_82599(&adapter->hw, 5063 adapter->fdir_pballoc); 5064 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { 5065 ixgbe_init_fdir_perfect_82599(&adapter->hw, 5066 adapter->fdir_pballoc); 5067 ixgbe_fdir_filter_restore(adapter); 5068 } 5069 5070 switch (hw->mac.type) { 5071 case ixgbe_mac_82599EB: 5072 case ixgbe_mac_X540: 5073 hw->mac.ops.enable_rx_buff(hw); 5074 break; 5075 default: 5076 break; 5077 } 5078 5079 #ifdef CONFIG_IXGBE_DCA 5080 /* configure DCA */ 5081 if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) 5082 ixgbe_setup_dca(adapter); 5083 #endif /* CONFIG_IXGBE_DCA */ 5084 5085 #ifdef IXGBE_FCOE 5086 /* configure FCoE L2 filters, redirection table, and Rx control */ 5087 ixgbe_configure_fcoe(adapter); 5088 5089 #endif /* IXGBE_FCOE */ 5090 ixgbe_configure_tx(adapter); 5091 ixgbe_configure_rx(adapter); 5092 ixgbe_configure_dfwd(adapter); 5093 } 5094 5095 /** 5096 * ixgbe_sfp_link_config - set up SFP+ link 5097 * @adapter: pointer to private adapter struct 5098 **/ 5099 static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) 5100 { 5101 /* 5102 * We are assuming the worst case scenario here, and that 5103 * is that an SFP was inserted/removed after the reset 5104 * but before SFP detection was enabled. As such the best 5105 * solution is to just start searching as soon as we start 5106 */ 5107 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 5108 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; 5109 5110 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; 5111 adapter->sfp_poll_time = 0; 5112 } 5113 5114 /** 5115 * ixgbe_non_sfp_link_config - set up non-SFP+ link 5116 * @hw: pointer to private hardware struct 5117 * 5118 * Returns 0 on success, negative on failure 5119 **/ 5120 static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) 5121 { 5122 u32 speed; 5123 bool autoneg, link_up = false; 5124 int ret = IXGBE_ERR_LINK_SETUP; 5125 5126 if (hw->mac.ops.check_link) 5127 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); 5128 5129 if (ret) 5130 return ret; 5131 5132 speed = hw->phy.autoneg_advertised; 5133 if ((!speed) && (hw->mac.ops.get_link_capabilities)) 5134 ret = hw->mac.ops.get_link_capabilities(hw, &speed, 5135 &autoneg); 5136 if (ret) 5137 return ret; 5138 5139 if (hw->mac.ops.setup_link) 5140 ret = hw->mac.ops.setup_link(hw, speed, link_up); 5141 5142 return ret; 5143 } 5144 5145 static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) 5146 { 5147 struct ixgbe_hw *hw = &adapter->hw; 5148 u32 gpie = 0; 5149 5150 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 5151 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | 5152 IXGBE_GPIE_OCD; 5153 gpie |= IXGBE_GPIE_EIAME; 5154 /* 5155 * use EIAM to auto-mask when MSI-X interrupt is asserted 5156 * this saves a register write for every interrupt 5157 */ 5158 switch (hw->mac.type) { 5159 case ixgbe_mac_82598EB: 5160 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 5161 break; 5162 case ixgbe_mac_82599EB: 5163 case ixgbe_mac_X540: 5164 case ixgbe_mac_X550: 5165 case ixgbe_mac_X550EM_x: 5166 case ixgbe_mac_x550em_a: 5167 default: 5168 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 5169 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 5170 break; 5171 } 5172 } else { 5173 /* legacy interrupts, use EIAM to auto-mask when reading EICR, 5174 * specifically only auto mask tx and rx interrupts */ 5175 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 5176 } 5177 5178 /* XXX: to interrupt immediately for EICS writes, enable this */ 5179 /* gpie |= IXGBE_GPIE_EIMEN; */ 5180 5181 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 5182 gpie &= ~IXGBE_GPIE_VTMODE_MASK; 5183 5184 switch (adapter->ring_feature[RING_F_VMDQ].mask) { 5185 case IXGBE_82599_VMDQ_8Q_MASK: 5186 gpie |= IXGBE_GPIE_VTMODE_16; 5187 break; 5188 case IXGBE_82599_VMDQ_4Q_MASK: 5189 gpie |= IXGBE_GPIE_VTMODE_32; 5190 break; 5191 default: 5192 gpie |= IXGBE_GPIE_VTMODE_64; 5193 break; 5194 } 5195 } 5196 5197 /* Enable Thermal over heat sensor interrupt */ 5198 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { 5199 switch (adapter->hw.mac.type) { 5200 case ixgbe_mac_82599EB: 5201 gpie |= IXGBE_SDP0_GPIEN_8259X; 5202 break; 5203 default: 5204 break; 5205 } 5206 } 5207 5208 /* Enable fan failure interrupt */ 5209 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 5210 gpie |= IXGBE_SDP1_GPIEN(hw); 5211 5212 switch (hw->mac.type) { 5213 case ixgbe_mac_82599EB: 5214 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X; 5215 break; 5216 case ixgbe_mac_X550EM_x: 5217 case ixgbe_mac_x550em_a: 5218 gpie |= IXGBE_SDP0_GPIEN_X540; 5219 break; 5220 default: 5221 break; 5222 } 5223 5224 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 5225 } 5226 5227 static void ixgbe_up_complete(struct ixgbe_adapter *adapter) 5228 { 5229 struct ixgbe_hw *hw = &adapter->hw; 5230 int err; 5231 u32 ctrl_ext; 5232 5233 ixgbe_get_hw_control(adapter); 5234 ixgbe_setup_gpie(adapter); 5235 5236 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 5237 ixgbe_configure_msix(adapter); 5238 else 5239 ixgbe_configure_msi_and_legacy(adapter); 5240 5241 /* enable the optics for 82599 SFP+ fiber */ 5242 if (hw->mac.ops.enable_tx_laser) 5243 hw->mac.ops.enable_tx_laser(hw); 5244 5245 if (hw->phy.ops.set_phy_power) 5246 hw->phy.ops.set_phy_power(hw, true); 5247 5248 smp_mb__before_atomic(); 5249 clear_bit(__IXGBE_DOWN, &adapter->state); 5250 ixgbe_napi_enable_all(adapter); 5251 5252 if (ixgbe_is_sfp(hw)) { 5253 ixgbe_sfp_link_config(adapter); 5254 } else { 5255 err = ixgbe_non_sfp_link_config(hw); 5256 if (err) 5257 e_err(probe, "link_config FAILED %d\n", err); 5258 } 5259 5260 /* clear any pending interrupts, may auto mask */ 5261 IXGBE_READ_REG(hw, IXGBE_EICR); 5262 ixgbe_irq_enable(adapter, true, true); 5263 5264 /* 5265 * If this adapter has a fan, check to see if we had a failure 5266 * before we enabled the interrupt. 5267 */ 5268 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 5269 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 5270 if (esdp & IXGBE_ESDP_SDP1) 5271 e_crit(drv, "Fan has stopped, replace the adapter\n"); 5272 } 5273 5274 /* bring the link up in the watchdog, this could race with our first 5275 * link up interrupt but shouldn't be a problem */ 5276 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 5277 adapter->link_check_timeout = jiffies; 5278 mod_timer(&adapter->service_timer, jiffies); 5279 5280 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 5281 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5282 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 5283 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5284 } 5285 5286 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) 5287 { 5288 WARN_ON(in_interrupt()); 5289 /* put off any impending NetWatchDogTimeout */ 5290 netif_trans_update(adapter->netdev); 5291 5292 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 5293 usleep_range(1000, 2000); 5294 ixgbe_down(adapter); 5295 /* 5296 * If SR-IOV enabled then wait a bit before bringing the adapter 5297 * back up to give the VFs time to respond to the reset. The 5298 * two second wait is based upon the watchdog timer cycle in 5299 * the VF driver. 5300 */ 5301 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 5302 msleep(2000); 5303 ixgbe_up(adapter); 5304 clear_bit(__IXGBE_RESETTING, &adapter->state); 5305 } 5306 5307 void ixgbe_up(struct ixgbe_adapter *adapter) 5308 { 5309 /* hardware has been reset, we need to reload some things */ 5310 ixgbe_configure(adapter); 5311 5312 ixgbe_up_complete(adapter); 5313 } 5314 5315 void ixgbe_reset(struct ixgbe_adapter *adapter) 5316 { 5317 struct ixgbe_hw *hw = &adapter->hw; 5318 struct net_device *netdev = adapter->netdev; 5319 int err; 5320 5321 if (ixgbe_removed(hw->hw_addr)) 5322 return; 5323 /* lock SFP init bit to prevent race conditions with the watchdog */ 5324 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) 5325 usleep_range(1000, 2000); 5326 5327 /* clear all SFP and link config related flags while holding SFP_INIT */ 5328 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP | 5329 IXGBE_FLAG2_SFP_NEEDS_RESET); 5330 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 5331 5332 err = hw->mac.ops.init_hw(hw); 5333 switch (err) { 5334 case 0: 5335 case IXGBE_ERR_SFP_NOT_PRESENT: 5336 case IXGBE_ERR_SFP_NOT_SUPPORTED: 5337 break; 5338 case IXGBE_ERR_MASTER_REQUESTS_PENDING: 5339 e_dev_err("master disable timed out\n"); 5340 break; 5341 case IXGBE_ERR_EEPROM_VERSION: 5342 /* We are running on a pre-production device, log a warning */ 5343 e_dev_warn("This device is a pre-production adapter/LOM. " 5344 "Please be aware there may be issues associated with " 5345 "your hardware. If you are experiencing problems " 5346 "please contact your Intel or hardware " 5347 "representative who provided you with this " 5348 "hardware.\n"); 5349 break; 5350 default: 5351 e_dev_err("Hardware Error: %d\n", err); 5352 } 5353 5354 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); 5355 5356 /* flush entries out of MAC table */ 5357 ixgbe_flush_sw_mac_table(adapter); 5358 __dev_uc_unsync(netdev, NULL); 5359 5360 /* do not flush user set addresses */ 5361 ixgbe_mac_set_default_filter(adapter); 5362 5363 /* update SAN MAC vmdq pool selection */ 5364 if (hw->mac.san_mac_rar_index) 5365 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); 5366 5367 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) 5368 ixgbe_ptp_reset(adapter); 5369 5370 if (hw->phy.ops.set_phy_power) { 5371 if (!netif_running(adapter->netdev) && !adapter->wol) 5372 hw->phy.ops.set_phy_power(hw, false); 5373 else 5374 hw->phy.ops.set_phy_power(hw, true); 5375 } 5376 } 5377 5378 /** 5379 * ixgbe_clean_tx_ring - Free Tx Buffers 5380 * @tx_ring: ring to be cleaned 5381 **/ 5382 static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) 5383 { 5384 struct ixgbe_tx_buffer *tx_buffer_info; 5385 unsigned long size; 5386 u16 i; 5387 5388 /* ring already cleared, nothing to do */ 5389 if (!tx_ring->tx_buffer_info) 5390 return; 5391 5392 /* Free all the Tx ring sk_buffs */ 5393 for (i = 0; i < tx_ring->count; i++) { 5394 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 5395 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 5396 } 5397 5398 netdev_tx_reset_queue(txring_txq(tx_ring)); 5399 5400 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 5401 memset(tx_ring->tx_buffer_info, 0, size); 5402 5403 /* Zero out the descriptor ring */ 5404 memset(tx_ring->desc, 0, tx_ring->size); 5405 5406 tx_ring->next_to_use = 0; 5407 tx_ring->next_to_clean = 0; 5408 } 5409 5410 /** 5411 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues 5412 * @adapter: board private structure 5413 **/ 5414 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) 5415 { 5416 int i; 5417 5418 for (i = 0; i < adapter->num_rx_queues; i++) 5419 ixgbe_clean_rx_ring(adapter->rx_ring[i]); 5420 } 5421 5422 /** 5423 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues 5424 * @adapter: board private structure 5425 **/ 5426 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) 5427 { 5428 int i; 5429 5430 for (i = 0; i < adapter->num_tx_queues; i++) 5431 ixgbe_clean_tx_ring(adapter->tx_ring[i]); 5432 } 5433 5434 static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) 5435 { 5436 struct hlist_node *node2; 5437 struct ixgbe_fdir_filter *filter; 5438 5439 spin_lock(&adapter->fdir_perfect_lock); 5440 5441 hlist_for_each_entry_safe(filter, node2, 5442 &adapter->fdir_filter_list, fdir_node) { 5443 hlist_del(&filter->fdir_node); 5444 kfree(filter); 5445 } 5446 adapter->fdir_filter_count = 0; 5447 5448 spin_unlock(&adapter->fdir_perfect_lock); 5449 } 5450 5451 void ixgbe_down(struct ixgbe_adapter *adapter) 5452 { 5453 struct net_device *netdev = adapter->netdev; 5454 struct ixgbe_hw *hw = &adapter->hw; 5455 struct net_device *upper; 5456 struct list_head *iter; 5457 int i; 5458 5459 /* signal that we are down to the interrupt handler */ 5460 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state)) 5461 return; /* do nothing if already down */ 5462 5463 /* disable receives */ 5464 hw->mac.ops.disable_rx(hw); 5465 5466 /* disable all enabled rx queues */ 5467 for (i = 0; i < adapter->num_rx_queues; i++) 5468 /* this call also flushes the previous write */ 5469 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); 5470 5471 usleep_range(10000, 20000); 5472 5473 netif_tx_stop_all_queues(netdev); 5474 5475 /* call carrier off first to avoid false dev_watchdog timeouts */ 5476 netif_carrier_off(netdev); 5477 netif_tx_disable(netdev); 5478 5479 /* disable any upper devices */ 5480 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { 5481 if (netif_is_macvlan(upper)) { 5482 struct macvlan_dev *vlan = netdev_priv(upper); 5483 5484 if (vlan->fwd_priv) { 5485 netif_tx_stop_all_queues(upper); 5486 netif_carrier_off(upper); 5487 netif_tx_disable(upper); 5488 } 5489 } 5490 } 5491 5492 ixgbe_irq_disable(adapter); 5493 5494 ixgbe_napi_disable_all(adapter); 5495 5496 clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state); 5497 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; 5498 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 5499 5500 del_timer_sync(&adapter->service_timer); 5501 5502 if (adapter->num_vfs) { 5503 /* Clear EITR Select mapping */ 5504 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); 5505 5506 /* Mark all the VFs as inactive */ 5507 for (i = 0 ; i < adapter->num_vfs; i++) 5508 adapter->vfinfo[i].clear_to_send = false; 5509 5510 /* ping all the active vfs to let them know we are going down */ 5511 ixgbe_ping_all_vfs(adapter); 5512 5513 /* Disable all VFTE/VFRE TX/RX */ 5514 ixgbe_disable_tx_rx(adapter); 5515 } 5516 5517 /* disable transmits in the hardware now that interrupts are off */ 5518 for (i = 0; i < adapter->num_tx_queues; i++) { 5519 u8 reg_idx = adapter->tx_ring[i]->reg_idx; 5520 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); 5521 } 5522 5523 /* Disable the Tx DMA engine on 82599 and later MAC */ 5524 switch (hw->mac.type) { 5525 case ixgbe_mac_82599EB: 5526 case ixgbe_mac_X540: 5527 case ixgbe_mac_X550: 5528 case ixgbe_mac_X550EM_x: 5529 case ixgbe_mac_x550em_a: 5530 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, 5531 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 5532 ~IXGBE_DMATXCTL_TE)); 5533 break; 5534 default: 5535 break; 5536 } 5537 5538 if (!pci_channel_offline(adapter->pdev)) 5539 ixgbe_reset(adapter); 5540 5541 /* power down the optics for 82599 SFP+ fiber */ 5542 if (hw->mac.ops.disable_tx_laser) 5543 hw->mac.ops.disable_tx_laser(hw); 5544 5545 ixgbe_clean_all_tx_rings(adapter); 5546 ixgbe_clean_all_rx_rings(adapter); 5547 } 5548 5549 /** 5550 * ixgbe_tx_timeout - Respond to a Tx Hang 5551 * @netdev: network interface device structure 5552 **/ 5553 static void ixgbe_tx_timeout(struct net_device *netdev) 5554 { 5555 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5556 5557 /* Do the reset outside of interrupt context */ 5558 ixgbe_tx_timeout_reset(adapter); 5559 } 5560 5561 #ifdef CONFIG_IXGBE_DCB 5562 static void ixgbe_init_dcb(struct ixgbe_adapter *adapter) 5563 { 5564 struct ixgbe_hw *hw = &adapter->hw; 5565 struct tc_configuration *tc; 5566 int j; 5567 5568 switch (hw->mac.type) { 5569 case ixgbe_mac_82598EB: 5570 case ixgbe_mac_82599EB: 5571 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; 5572 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; 5573 break; 5574 case ixgbe_mac_X540: 5575 case ixgbe_mac_X550: 5576 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; 5577 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; 5578 break; 5579 case ixgbe_mac_X550EM_x: 5580 case ixgbe_mac_x550em_a: 5581 default: 5582 adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS; 5583 adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS; 5584 break; 5585 } 5586 5587 /* Configure DCB traffic classes */ 5588 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { 5589 tc = &adapter->dcb_cfg.tc_config[j]; 5590 tc->path[DCB_TX_CONFIG].bwg_id = 0; 5591 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); 5592 tc->path[DCB_RX_CONFIG].bwg_id = 0; 5593 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); 5594 tc->dcb_pfc = pfc_disabled; 5595 } 5596 5597 /* Initialize default user to priority mapping, UPx->TC0 */ 5598 tc = &adapter->dcb_cfg.tc_config[0]; 5599 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; 5600 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; 5601 5602 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; 5603 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; 5604 adapter->dcb_cfg.pfc_mode_enable = false; 5605 adapter->dcb_set_bitmap = 0x00; 5606 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) 5607 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; 5608 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, 5609 sizeof(adapter->temp_dcb_cfg)); 5610 } 5611 #endif 5612 5613 /** 5614 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) 5615 * @adapter: board private structure to initialize 5616 * 5617 * ixgbe_sw_init initializes the Adapter private data structure. 5618 * Fields are initialized based on PCI device information and 5619 * OS network device settings (MTU size). 5620 **/ 5621 static int ixgbe_sw_init(struct ixgbe_adapter *adapter) 5622 { 5623 struct ixgbe_hw *hw = &adapter->hw; 5624 struct pci_dev *pdev = adapter->pdev; 5625 unsigned int rss, fdir; 5626 u32 fwsm; 5627 int i; 5628 5629 /* PCI config space info */ 5630 5631 hw->vendor_id = pdev->vendor; 5632 hw->device_id = pdev->device; 5633 hw->revision_id = pdev->revision; 5634 hw->subsystem_vendor_id = pdev->subsystem_vendor; 5635 hw->subsystem_device_id = pdev->subsystem_device; 5636 5637 /* Set common capability flags and settings */ 5638 rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus()); 5639 adapter->ring_feature[RING_F_RSS].limit = rss; 5640 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 5641 adapter->max_q_vectors = MAX_Q_VECTORS_82599; 5642 adapter->atr_sample_rate = 20; 5643 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); 5644 adapter->ring_feature[RING_F_FDIR].limit = fdir; 5645 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; 5646 #ifdef CONFIG_IXGBE_DCA 5647 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; 5648 #endif 5649 #ifdef CONFIG_IXGBE_DCB 5650 adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; 5651 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 5652 #endif 5653 #ifdef IXGBE_FCOE 5654 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; 5655 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 5656 #ifdef CONFIG_IXGBE_DCB 5657 /* Default traffic class to use for FCoE */ 5658 adapter->fcoe.up = IXGBE_FCOE_DEFTC; 5659 #endif /* CONFIG_IXGBE_DCB */ 5660 #endif /* IXGBE_FCOE */ 5661 5662 /* initialize static ixgbe jump table entries */ 5663 adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]), 5664 GFP_KERNEL); 5665 if (!adapter->jump_tables[0]) 5666 return -ENOMEM; 5667 adapter->jump_tables[0]->mat = ixgbe_ipv4_fields; 5668 5669 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) 5670 adapter->jump_tables[i] = NULL; 5671 5672 adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * 5673 hw->mac.num_rar_entries, 5674 GFP_ATOMIC); 5675 if (!adapter->mac_table) 5676 return -ENOMEM; 5677 5678 /* Set MAC specific capability flags and exceptions */ 5679 switch (hw->mac.type) { 5680 case ixgbe_mac_82598EB: 5681 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; 5682 5683 if (hw->device_id == IXGBE_DEV_ID_82598AT) 5684 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 5685 5686 adapter->max_q_vectors = MAX_Q_VECTORS_82598; 5687 adapter->ring_feature[RING_F_FDIR].limit = 0; 5688 adapter->atr_sample_rate = 0; 5689 adapter->fdir_pballoc = 0; 5690 #ifdef IXGBE_FCOE 5691 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; 5692 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 5693 #ifdef CONFIG_IXGBE_DCB 5694 adapter->fcoe.up = 0; 5695 #endif /* IXGBE_DCB */ 5696 #endif /* IXGBE_FCOE */ 5697 break; 5698 case ixgbe_mac_82599EB: 5699 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) 5700 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 5701 break; 5702 case ixgbe_mac_X540: 5703 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); 5704 if (fwsm & IXGBE_FWSM_TS_ENABLED) 5705 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 5706 break; 5707 case ixgbe_mac_x550em_a: 5708 adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE; 5709 /* fall through */ 5710 case ixgbe_mac_X550EM_x: 5711 #ifdef CONFIG_IXGBE_DCB 5712 adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; 5713 #endif 5714 #ifdef IXGBE_FCOE 5715 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; 5716 #ifdef CONFIG_IXGBE_DCB 5717 adapter->fcoe.up = 0; 5718 #endif /* IXGBE_DCB */ 5719 #endif /* IXGBE_FCOE */ 5720 /* Fall Through */ 5721 case ixgbe_mac_X550: 5722 #ifdef CONFIG_IXGBE_DCA 5723 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; 5724 #endif 5725 adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; 5726 break; 5727 default: 5728 break; 5729 } 5730 5731 #ifdef IXGBE_FCOE 5732 /* FCoE support exists, always init the FCoE lock */ 5733 spin_lock_init(&adapter->fcoe.lock); 5734 5735 #endif 5736 /* n-tuple support exists, always init our spinlock */ 5737 spin_lock_init(&adapter->fdir_perfect_lock); 5738 5739 #ifdef CONFIG_IXGBE_DCB 5740 ixgbe_init_dcb(adapter); 5741 #endif 5742 5743 /* default flow control settings */ 5744 hw->fc.requested_mode = ixgbe_fc_full; 5745 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ 5746 ixgbe_pbthresh_setup(adapter); 5747 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; 5748 hw->fc.send_xon = true; 5749 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw); 5750 5751 #ifdef CONFIG_PCI_IOV 5752 if (max_vfs > 0) 5753 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n"); 5754 5755 /* assign number of SR-IOV VFs */ 5756 if (hw->mac.type != ixgbe_mac_82598EB) { 5757 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) { 5758 adapter->num_vfs = 0; 5759 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n"); 5760 } else { 5761 adapter->num_vfs = max_vfs; 5762 } 5763 } 5764 #endif /* CONFIG_PCI_IOV */ 5765 5766 /* enable itr by default in dynamic mode */ 5767 adapter->rx_itr_setting = 1; 5768 adapter->tx_itr_setting = 1; 5769 5770 /* set default ring sizes */ 5771 adapter->tx_ring_count = IXGBE_DEFAULT_TXD; 5772 adapter->rx_ring_count = IXGBE_DEFAULT_RXD; 5773 5774 /* set default work limits */ 5775 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; 5776 5777 /* initialize eeprom parameters */ 5778 if (ixgbe_init_eeprom_params_generic(hw)) { 5779 e_dev_err("EEPROM initialization failed\n"); 5780 return -EIO; 5781 } 5782 5783 /* PF holds first pool slot */ 5784 set_bit(0, &adapter->fwd_bitmask); 5785 set_bit(__IXGBE_DOWN, &adapter->state); 5786 5787 return 0; 5788 } 5789 5790 /** 5791 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) 5792 * @tx_ring: tx descriptor ring (for a specific queue) to setup 5793 * 5794 * Return 0 on success, negative on failure 5795 **/ 5796 int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) 5797 { 5798 struct device *dev = tx_ring->dev; 5799 int orig_node = dev_to_node(dev); 5800 int ring_node = -1; 5801 int size; 5802 5803 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 5804 5805 if (tx_ring->q_vector) 5806 ring_node = tx_ring->q_vector->numa_node; 5807 5808 tx_ring->tx_buffer_info = vzalloc_node(size, ring_node); 5809 if (!tx_ring->tx_buffer_info) 5810 tx_ring->tx_buffer_info = vzalloc(size); 5811 if (!tx_ring->tx_buffer_info) 5812 goto err; 5813 5814 u64_stats_init(&tx_ring->syncp); 5815 5816 /* round up to nearest 4K */ 5817 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 5818 tx_ring->size = ALIGN(tx_ring->size, 4096); 5819 5820 set_dev_node(dev, ring_node); 5821 tx_ring->desc = dma_alloc_coherent(dev, 5822 tx_ring->size, 5823 &tx_ring->dma, 5824 GFP_KERNEL); 5825 set_dev_node(dev, orig_node); 5826 if (!tx_ring->desc) 5827 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 5828 &tx_ring->dma, GFP_KERNEL); 5829 if (!tx_ring->desc) 5830 goto err; 5831 5832 tx_ring->next_to_use = 0; 5833 tx_ring->next_to_clean = 0; 5834 return 0; 5835 5836 err: 5837 vfree(tx_ring->tx_buffer_info); 5838 tx_ring->tx_buffer_info = NULL; 5839 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); 5840 return -ENOMEM; 5841 } 5842 5843 /** 5844 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources 5845 * @adapter: board private structure 5846 * 5847 * If this function returns with an error, then it's possible one or 5848 * more of the rings is populated (while the rest are not). It is the 5849 * callers duty to clean those orphaned rings. 5850 * 5851 * Return 0 on success, negative on failure 5852 **/ 5853 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) 5854 { 5855 int i, err = 0; 5856 5857 for (i = 0; i < adapter->num_tx_queues; i++) { 5858 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); 5859 if (!err) 5860 continue; 5861 5862 e_err(probe, "Allocation for Tx Queue %u failed\n", i); 5863 goto err_setup_tx; 5864 } 5865 5866 return 0; 5867 err_setup_tx: 5868 /* rewind the index freeing the rings as we go */ 5869 while (i--) 5870 ixgbe_free_tx_resources(adapter->tx_ring[i]); 5871 return err; 5872 } 5873 5874 /** 5875 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) 5876 * @rx_ring: rx descriptor ring (for a specific queue) to setup 5877 * 5878 * Returns 0 on success, negative on failure 5879 **/ 5880 int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) 5881 { 5882 struct device *dev = rx_ring->dev; 5883 int orig_node = dev_to_node(dev); 5884 int ring_node = -1; 5885 int size; 5886 5887 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 5888 5889 if (rx_ring->q_vector) 5890 ring_node = rx_ring->q_vector->numa_node; 5891 5892 rx_ring->rx_buffer_info = vzalloc_node(size, ring_node); 5893 if (!rx_ring->rx_buffer_info) 5894 rx_ring->rx_buffer_info = vzalloc(size); 5895 if (!rx_ring->rx_buffer_info) 5896 goto err; 5897 5898 u64_stats_init(&rx_ring->syncp); 5899 5900 /* Round up to nearest 4K */ 5901 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 5902 rx_ring->size = ALIGN(rx_ring->size, 4096); 5903 5904 set_dev_node(dev, ring_node); 5905 rx_ring->desc = dma_alloc_coherent(dev, 5906 rx_ring->size, 5907 &rx_ring->dma, 5908 GFP_KERNEL); 5909 set_dev_node(dev, orig_node); 5910 if (!rx_ring->desc) 5911 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 5912 &rx_ring->dma, GFP_KERNEL); 5913 if (!rx_ring->desc) 5914 goto err; 5915 5916 rx_ring->next_to_clean = 0; 5917 rx_ring->next_to_use = 0; 5918 5919 return 0; 5920 err: 5921 vfree(rx_ring->rx_buffer_info); 5922 rx_ring->rx_buffer_info = NULL; 5923 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); 5924 return -ENOMEM; 5925 } 5926 5927 /** 5928 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources 5929 * @adapter: board private structure 5930 * 5931 * If this function returns with an error, then it's possible one or 5932 * more of the rings is populated (while the rest are not). It is the 5933 * callers duty to clean those orphaned rings. 5934 * 5935 * Return 0 on success, negative on failure 5936 **/ 5937 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) 5938 { 5939 int i, err = 0; 5940 5941 for (i = 0; i < adapter->num_rx_queues; i++) { 5942 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); 5943 if (!err) 5944 continue; 5945 5946 e_err(probe, "Allocation for Rx Queue %u failed\n", i); 5947 goto err_setup_rx; 5948 } 5949 5950 #ifdef IXGBE_FCOE 5951 err = ixgbe_setup_fcoe_ddp_resources(adapter); 5952 if (!err) 5953 #endif 5954 return 0; 5955 err_setup_rx: 5956 /* rewind the index freeing the rings as we go */ 5957 while (i--) 5958 ixgbe_free_rx_resources(adapter->rx_ring[i]); 5959 return err; 5960 } 5961 5962 /** 5963 * ixgbe_free_tx_resources - Free Tx Resources per Queue 5964 * @tx_ring: Tx descriptor ring for a specific queue 5965 * 5966 * Free all transmit software resources 5967 **/ 5968 void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) 5969 { 5970 ixgbe_clean_tx_ring(tx_ring); 5971 5972 vfree(tx_ring->tx_buffer_info); 5973 tx_ring->tx_buffer_info = NULL; 5974 5975 /* if not set, then don't free */ 5976 if (!tx_ring->desc) 5977 return; 5978 5979 dma_free_coherent(tx_ring->dev, tx_ring->size, 5980 tx_ring->desc, tx_ring->dma); 5981 5982 tx_ring->desc = NULL; 5983 } 5984 5985 /** 5986 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues 5987 * @adapter: board private structure 5988 * 5989 * Free all transmit software resources 5990 **/ 5991 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) 5992 { 5993 int i; 5994 5995 for (i = 0; i < adapter->num_tx_queues; i++) 5996 if (adapter->tx_ring[i]->desc) 5997 ixgbe_free_tx_resources(adapter->tx_ring[i]); 5998 } 5999 6000 /** 6001 * ixgbe_free_rx_resources - Free Rx Resources 6002 * @rx_ring: ring to clean the resources from 6003 * 6004 * Free all receive software resources 6005 **/ 6006 void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) 6007 { 6008 ixgbe_clean_rx_ring(rx_ring); 6009 6010 vfree(rx_ring->rx_buffer_info); 6011 rx_ring->rx_buffer_info = NULL; 6012 6013 /* if not set, then don't free */ 6014 if (!rx_ring->desc) 6015 return; 6016 6017 dma_free_coherent(rx_ring->dev, rx_ring->size, 6018 rx_ring->desc, rx_ring->dma); 6019 6020 rx_ring->desc = NULL; 6021 } 6022 6023 /** 6024 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues 6025 * @adapter: board private structure 6026 * 6027 * Free all receive software resources 6028 **/ 6029 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) 6030 { 6031 int i; 6032 6033 #ifdef IXGBE_FCOE 6034 ixgbe_free_fcoe_ddp_resources(adapter); 6035 6036 #endif 6037 for (i = 0; i < adapter->num_rx_queues; i++) 6038 if (adapter->rx_ring[i]->desc) 6039 ixgbe_free_rx_resources(adapter->rx_ring[i]); 6040 } 6041 6042 /** 6043 * ixgbe_change_mtu - Change the Maximum Transfer Unit 6044 * @netdev: network interface device structure 6045 * @new_mtu: new value for maximum frame size 6046 * 6047 * Returns 0 on success, negative on failure 6048 **/ 6049 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) 6050 { 6051 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6052 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 6053 6054 /* MTU < 68 is an error and causes problems on some kernels */ 6055 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) 6056 return -EINVAL; 6057 6058 /* 6059 * For 82599EB we cannot allow legacy VFs to enable their receive 6060 * paths when MTU greater than 1500 is configured. So display a 6061 * warning that legacy VFs will be disabled. 6062 */ 6063 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && 6064 (adapter->hw.mac.type == ixgbe_mac_82599EB) && 6065 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) 6066 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); 6067 6068 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); 6069 6070 /* must set new MTU before calling down or up */ 6071 netdev->mtu = new_mtu; 6072 6073 if (netif_running(netdev)) 6074 ixgbe_reinit_locked(adapter); 6075 6076 return 0; 6077 } 6078 6079 /** 6080 * ixgbe_open - Called when a network interface is made active 6081 * @netdev: network interface device structure 6082 * 6083 * Returns 0 on success, negative value on failure 6084 * 6085 * The open entry point is called when a network interface is made 6086 * active by the system (IFF_UP). At this point all resources needed 6087 * for transmit and receive operations are allocated, the interrupt 6088 * handler is registered with the OS, the watchdog timer is started, 6089 * and the stack is notified that the interface is ready. 6090 **/ 6091 int ixgbe_open(struct net_device *netdev) 6092 { 6093 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6094 struct ixgbe_hw *hw = &adapter->hw; 6095 int err, queues; 6096 6097 /* disallow open during test */ 6098 if (test_bit(__IXGBE_TESTING, &adapter->state)) 6099 return -EBUSY; 6100 6101 netif_carrier_off(netdev); 6102 6103 /* allocate transmit descriptors */ 6104 err = ixgbe_setup_all_tx_resources(adapter); 6105 if (err) 6106 goto err_setup_tx; 6107 6108 /* allocate receive descriptors */ 6109 err = ixgbe_setup_all_rx_resources(adapter); 6110 if (err) 6111 goto err_setup_rx; 6112 6113 ixgbe_configure(adapter); 6114 6115 err = ixgbe_request_irq(adapter); 6116 if (err) 6117 goto err_req_irq; 6118 6119 /* Notify the stack of the actual queue counts. */ 6120 if (adapter->num_rx_pools > 1) 6121 queues = adapter->num_rx_queues_per_pool; 6122 else 6123 queues = adapter->num_tx_queues; 6124 6125 err = netif_set_real_num_tx_queues(netdev, queues); 6126 if (err) 6127 goto err_set_queues; 6128 6129 if (adapter->num_rx_pools > 1 && 6130 adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES) 6131 queues = IXGBE_MAX_L2A_QUEUES; 6132 else 6133 queues = adapter->num_rx_queues; 6134 err = netif_set_real_num_rx_queues(netdev, queues); 6135 if (err) 6136 goto err_set_queues; 6137 6138 ixgbe_ptp_init(adapter); 6139 6140 ixgbe_up_complete(adapter); 6141 6142 ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); 6143 udp_tunnel_get_rx_info(netdev); 6144 6145 return 0; 6146 6147 err_set_queues: 6148 ixgbe_free_irq(adapter); 6149 err_req_irq: 6150 ixgbe_free_all_rx_resources(adapter); 6151 if (hw->phy.ops.set_phy_power && !adapter->wol) 6152 hw->phy.ops.set_phy_power(&adapter->hw, false); 6153 err_setup_rx: 6154 ixgbe_free_all_tx_resources(adapter); 6155 err_setup_tx: 6156 ixgbe_reset(adapter); 6157 6158 return err; 6159 } 6160 6161 static void ixgbe_close_suspend(struct ixgbe_adapter *adapter) 6162 { 6163 ixgbe_ptp_suspend(adapter); 6164 6165 if (adapter->hw.phy.ops.enter_lplu) { 6166 adapter->hw.phy.reset_disable = true; 6167 ixgbe_down(adapter); 6168 adapter->hw.phy.ops.enter_lplu(&adapter->hw); 6169 adapter->hw.phy.reset_disable = false; 6170 } else { 6171 ixgbe_down(adapter); 6172 } 6173 6174 ixgbe_free_irq(adapter); 6175 6176 ixgbe_free_all_tx_resources(adapter); 6177 ixgbe_free_all_rx_resources(adapter); 6178 } 6179 6180 /** 6181 * ixgbe_close - Disables a network interface 6182 * @netdev: network interface device structure 6183 * 6184 * Returns 0, this is not allowed to fail 6185 * 6186 * The close entry point is called when an interface is de-activated 6187 * by the OS. The hardware is still under the drivers control, but 6188 * needs to be disabled. A global MAC reset is issued to stop the 6189 * hardware, and all transmit and receive resources are freed. 6190 **/ 6191 int ixgbe_close(struct net_device *netdev) 6192 { 6193 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6194 6195 ixgbe_ptp_stop(adapter); 6196 6197 ixgbe_close_suspend(adapter); 6198 6199 ixgbe_fdir_filter_exit(adapter); 6200 6201 ixgbe_release_hw_control(adapter); 6202 6203 return 0; 6204 } 6205 6206 #ifdef CONFIG_PM 6207 static int ixgbe_resume(struct pci_dev *pdev) 6208 { 6209 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 6210 struct net_device *netdev = adapter->netdev; 6211 u32 err; 6212 6213 adapter->hw.hw_addr = adapter->io_addr; 6214 pci_set_power_state(pdev, PCI_D0); 6215 pci_restore_state(pdev); 6216 /* 6217 * pci_restore_state clears dev->state_saved so call 6218 * pci_save_state to restore it. 6219 */ 6220 pci_save_state(pdev); 6221 6222 err = pci_enable_device_mem(pdev); 6223 if (err) { 6224 e_dev_err("Cannot enable PCI device from suspend\n"); 6225 return err; 6226 } 6227 smp_mb__before_atomic(); 6228 clear_bit(__IXGBE_DISABLED, &adapter->state); 6229 pci_set_master(pdev); 6230 6231 pci_wake_from_d3(pdev, false); 6232 6233 ixgbe_reset(adapter); 6234 6235 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 6236 6237 rtnl_lock(); 6238 err = ixgbe_init_interrupt_scheme(adapter); 6239 if (!err && netif_running(netdev)) 6240 err = ixgbe_open(netdev); 6241 6242 rtnl_unlock(); 6243 6244 if (err) 6245 return err; 6246 6247 netif_device_attach(netdev); 6248 6249 return 0; 6250 } 6251 #endif /* CONFIG_PM */ 6252 6253 static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) 6254 { 6255 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 6256 struct net_device *netdev = adapter->netdev; 6257 struct ixgbe_hw *hw = &adapter->hw; 6258 u32 ctrl, fctrl; 6259 u32 wufc = adapter->wol; 6260 #ifdef CONFIG_PM 6261 int retval = 0; 6262 #endif 6263 6264 netif_device_detach(netdev); 6265 6266 rtnl_lock(); 6267 if (netif_running(netdev)) 6268 ixgbe_close_suspend(adapter); 6269 rtnl_unlock(); 6270 6271 ixgbe_clear_interrupt_scheme(adapter); 6272 6273 #ifdef CONFIG_PM 6274 retval = pci_save_state(pdev); 6275 if (retval) 6276 return retval; 6277 6278 #endif 6279 if (hw->mac.ops.stop_link_on_d3) 6280 hw->mac.ops.stop_link_on_d3(hw); 6281 6282 if (wufc) { 6283 ixgbe_set_rx_mode(netdev); 6284 6285 /* enable the optics for 82599 SFP+ fiber as we can WoL */ 6286 if (hw->mac.ops.enable_tx_laser) 6287 hw->mac.ops.enable_tx_laser(hw); 6288 6289 /* turn on all-multi mode if wake on multicast is enabled */ 6290 if (wufc & IXGBE_WUFC_MC) { 6291 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 6292 fctrl |= IXGBE_FCTRL_MPE; 6293 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 6294 } 6295 6296 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 6297 ctrl |= IXGBE_CTRL_GIO_DIS; 6298 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 6299 6300 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc); 6301 } else { 6302 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); 6303 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 6304 } 6305 6306 switch (hw->mac.type) { 6307 case ixgbe_mac_82598EB: 6308 pci_wake_from_d3(pdev, false); 6309 break; 6310 case ixgbe_mac_82599EB: 6311 case ixgbe_mac_X540: 6312 case ixgbe_mac_X550: 6313 case ixgbe_mac_X550EM_x: 6314 case ixgbe_mac_x550em_a: 6315 pci_wake_from_d3(pdev, !!wufc); 6316 break; 6317 default: 6318 break; 6319 } 6320 6321 *enable_wake = !!wufc; 6322 if (hw->phy.ops.set_phy_power && !*enable_wake) 6323 hw->phy.ops.set_phy_power(hw, false); 6324 6325 ixgbe_release_hw_control(adapter); 6326 6327 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) 6328 pci_disable_device(pdev); 6329 6330 return 0; 6331 } 6332 6333 #ifdef CONFIG_PM 6334 static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) 6335 { 6336 int retval; 6337 bool wake; 6338 6339 retval = __ixgbe_shutdown(pdev, &wake); 6340 if (retval) 6341 return retval; 6342 6343 if (wake) { 6344 pci_prepare_to_sleep(pdev); 6345 } else { 6346 pci_wake_from_d3(pdev, false); 6347 pci_set_power_state(pdev, PCI_D3hot); 6348 } 6349 6350 return 0; 6351 } 6352 #endif /* CONFIG_PM */ 6353 6354 static void ixgbe_shutdown(struct pci_dev *pdev) 6355 { 6356 bool wake; 6357 6358 __ixgbe_shutdown(pdev, &wake); 6359 6360 if (system_state == SYSTEM_POWER_OFF) { 6361 pci_wake_from_d3(pdev, wake); 6362 pci_set_power_state(pdev, PCI_D3hot); 6363 } 6364 } 6365 6366 /** 6367 * ixgbe_update_stats - Update the board statistics counters. 6368 * @adapter: board private structure 6369 **/ 6370 void ixgbe_update_stats(struct ixgbe_adapter *adapter) 6371 { 6372 struct net_device *netdev = adapter->netdev; 6373 struct ixgbe_hw *hw = &adapter->hw; 6374 struct ixgbe_hw_stats *hwstats = &adapter->stats; 6375 u64 total_mpc = 0; 6376 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; 6377 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; 6378 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; 6379 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; 6380 6381 if (test_bit(__IXGBE_DOWN, &adapter->state) || 6382 test_bit(__IXGBE_RESETTING, &adapter->state)) 6383 return; 6384 6385 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 6386 u64 rsc_count = 0; 6387 u64 rsc_flush = 0; 6388 for (i = 0; i < adapter->num_rx_queues; i++) { 6389 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; 6390 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; 6391 } 6392 adapter->rsc_total_count = rsc_count; 6393 adapter->rsc_total_flush = rsc_flush; 6394 } 6395 6396 for (i = 0; i < adapter->num_rx_queues; i++) { 6397 struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; 6398 non_eop_descs += rx_ring->rx_stats.non_eop_descs; 6399 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; 6400 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; 6401 hw_csum_rx_error += rx_ring->rx_stats.csum_err; 6402 bytes += rx_ring->stats.bytes; 6403 packets += rx_ring->stats.packets; 6404 } 6405 adapter->non_eop_descs = non_eop_descs; 6406 adapter->alloc_rx_page_failed = alloc_rx_page_failed; 6407 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; 6408 adapter->hw_csum_rx_error = hw_csum_rx_error; 6409 netdev->stats.rx_bytes = bytes; 6410 netdev->stats.rx_packets = packets; 6411 6412 bytes = 0; 6413 packets = 0; 6414 /* gather some stats to the adapter struct that are per queue */ 6415 for (i = 0; i < adapter->num_tx_queues; i++) { 6416 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; 6417 restart_queue += tx_ring->tx_stats.restart_queue; 6418 tx_busy += tx_ring->tx_stats.tx_busy; 6419 bytes += tx_ring->stats.bytes; 6420 packets += tx_ring->stats.packets; 6421 } 6422 adapter->restart_queue = restart_queue; 6423 adapter->tx_busy = tx_busy; 6424 netdev->stats.tx_bytes = bytes; 6425 netdev->stats.tx_packets = packets; 6426 6427 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 6428 6429 /* 8 register reads */ 6430 for (i = 0; i < 8; i++) { 6431 /* for packet buffers not used, the register should read 0 */ 6432 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 6433 missed_rx += mpc; 6434 hwstats->mpc[i] += mpc; 6435 total_mpc += hwstats->mpc[i]; 6436 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 6437 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 6438 switch (hw->mac.type) { 6439 case ixgbe_mac_82598EB: 6440 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 6441 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 6442 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 6443 hwstats->pxonrxc[i] += 6444 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 6445 break; 6446 case ixgbe_mac_82599EB: 6447 case ixgbe_mac_X540: 6448 case ixgbe_mac_X550: 6449 case ixgbe_mac_X550EM_x: 6450 case ixgbe_mac_x550em_a: 6451 hwstats->pxonrxc[i] += 6452 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 6453 break; 6454 default: 6455 break; 6456 } 6457 } 6458 6459 /*16 register reads */ 6460 for (i = 0; i < 16; i++) { 6461 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 6462 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 6463 if ((hw->mac.type == ixgbe_mac_82599EB) || 6464 (hw->mac.type == ixgbe_mac_X540) || 6465 (hw->mac.type == ixgbe_mac_X550) || 6466 (hw->mac.type == ixgbe_mac_X550EM_x) || 6467 (hw->mac.type == ixgbe_mac_x550em_a)) { 6468 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 6469 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ 6470 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 6471 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */ 6472 } 6473 } 6474 6475 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 6476 /* work around hardware counting issue */ 6477 hwstats->gprc -= missed_rx; 6478 6479 ixgbe_update_xoff_received(adapter); 6480 6481 /* 82598 hardware only has a 32 bit counter in the high register */ 6482 switch (hw->mac.type) { 6483 case ixgbe_mac_82598EB: 6484 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 6485 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 6486 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 6487 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 6488 break; 6489 case ixgbe_mac_X540: 6490 case ixgbe_mac_X550: 6491 case ixgbe_mac_X550EM_x: 6492 case ixgbe_mac_x550em_a: 6493 /* OS2BMC stats are X540 and later */ 6494 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); 6495 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); 6496 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); 6497 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); 6498 case ixgbe_mac_82599EB: 6499 for (i = 0; i < 16; i++) 6500 adapter->hw_rx_no_dma_resources += 6501 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 6502 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 6503 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ 6504 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 6505 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ 6506 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); 6507 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 6508 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 6509 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 6510 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 6511 #ifdef IXGBE_FCOE 6512 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 6513 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 6514 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 6515 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 6516 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 6517 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 6518 /* Add up per cpu counters for total ddp aloc fail */ 6519 if (adapter->fcoe.ddp_pool) { 6520 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 6521 struct ixgbe_fcoe_ddp_pool *ddp_pool; 6522 unsigned int cpu; 6523 u64 noddp = 0, noddp_ext_buff = 0; 6524 for_each_possible_cpu(cpu) { 6525 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); 6526 noddp += ddp_pool->noddp; 6527 noddp_ext_buff += ddp_pool->noddp_ext_buff; 6528 } 6529 hwstats->fcoe_noddp = noddp; 6530 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff; 6531 } 6532 #endif /* IXGBE_FCOE */ 6533 break; 6534 default: 6535 break; 6536 } 6537 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 6538 hwstats->bprc += bprc; 6539 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 6540 if (hw->mac.type == ixgbe_mac_82598EB) 6541 hwstats->mprc -= bprc; 6542 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 6543 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 6544 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 6545 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 6546 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 6547 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 6548 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 6549 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 6550 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 6551 hwstats->lxontxc += lxon; 6552 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 6553 hwstats->lxofftxc += lxoff; 6554 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 6555 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 6556 /* 6557 * 82598 errata - tx of flow control packets is included in tx counters 6558 */ 6559 xon_off_tot = lxon + lxoff; 6560 hwstats->gptc -= xon_off_tot; 6561 hwstats->mptc -= xon_off_tot; 6562 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); 6563 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 6564 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 6565 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 6566 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 6567 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 6568 hwstats->ptc64 -= xon_off_tot; 6569 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 6570 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 6571 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 6572 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 6573 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 6574 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 6575 6576 /* Fill out the OS statistics structure */ 6577 netdev->stats.multicast = hwstats->mprc; 6578 6579 /* Rx Errors */ 6580 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; 6581 netdev->stats.rx_dropped = 0; 6582 netdev->stats.rx_length_errors = hwstats->rlec; 6583 netdev->stats.rx_crc_errors = hwstats->crcerrs; 6584 netdev->stats.rx_missed_errors = total_mpc; 6585 } 6586 6587 /** 6588 * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table 6589 * @adapter: pointer to the device adapter structure 6590 **/ 6591 static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) 6592 { 6593 struct ixgbe_hw *hw = &adapter->hw; 6594 int i; 6595 6596 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) 6597 return; 6598 6599 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; 6600 6601 /* if interface is down do nothing */ 6602 if (test_bit(__IXGBE_DOWN, &adapter->state)) 6603 return; 6604 6605 /* do nothing if we are not using signature filters */ 6606 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) 6607 return; 6608 6609 adapter->fdir_overflow++; 6610 6611 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 6612 for (i = 0; i < adapter->num_tx_queues; i++) 6613 set_bit(__IXGBE_TX_FDIR_INIT_DONE, 6614 &(adapter->tx_ring[i]->state)); 6615 /* re-enable flow director interrupts */ 6616 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); 6617 } else { 6618 e_err(probe, "failed to finish FDIR re-initialization, " 6619 "ignored adding FDIR ATR filters\n"); 6620 } 6621 } 6622 6623 /** 6624 * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts 6625 * @adapter: pointer to the device adapter structure 6626 * 6627 * This function serves two purposes. First it strobes the interrupt lines 6628 * in order to make certain interrupts are occurring. Secondly it sets the 6629 * bits needed to check for TX hangs. As a result we should immediately 6630 * determine if a hang has occurred. 6631 */ 6632 static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) 6633 { 6634 struct ixgbe_hw *hw = &adapter->hw; 6635 u64 eics = 0; 6636 int i; 6637 6638 /* If we're down, removing or resetting, just bail */ 6639 if (test_bit(__IXGBE_DOWN, &adapter->state) || 6640 test_bit(__IXGBE_REMOVING, &adapter->state) || 6641 test_bit(__IXGBE_RESETTING, &adapter->state)) 6642 return; 6643 6644 /* Force detection of hung controller */ 6645 if (netif_carrier_ok(adapter->netdev)) { 6646 for (i = 0; i < adapter->num_tx_queues; i++) 6647 set_check_for_tx_hang(adapter->tx_ring[i]); 6648 } 6649 6650 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { 6651 /* 6652 * for legacy and MSI interrupts don't set any bits 6653 * that are enabled for EIAM, because this operation 6654 * would set *both* EIMS and EICS for any bit in EIAM 6655 */ 6656 IXGBE_WRITE_REG(hw, IXGBE_EICS, 6657 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); 6658 } else { 6659 /* get one bit for every active tx/rx interrupt vector */ 6660 for (i = 0; i < adapter->num_q_vectors; i++) { 6661 struct ixgbe_q_vector *qv = adapter->q_vector[i]; 6662 if (qv->rx.ring || qv->tx.ring) 6663 eics |= BIT_ULL(i); 6664 } 6665 } 6666 6667 /* Cause software interrupt to ensure rings are cleaned */ 6668 ixgbe_irq_rearm_queues(adapter, eics); 6669 } 6670 6671 /** 6672 * ixgbe_watchdog_update_link - update the link status 6673 * @adapter: pointer to the device adapter structure 6674 * @link_speed: pointer to a u32 to store the link_speed 6675 **/ 6676 static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) 6677 { 6678 struct ixgbe_hw *hw = &adapter->hw; 6679 u32 link_speed = adapter->link_speed; 6680 bool link_up = adapter->link_up; 6681 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; 6682 6683 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) 6684 return; 6685 6686 if (hw->mac.ops.check_link) { 6687 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 6688 } else { 6689 /* always assume link is up, if no check link function */ 6690 link_speed = IXGBE_LINK_SPEED_10GB_FULL; 6691 link_up = true; 6692 } 6693 6694 if (adapter->ixgbe_ieee_pfc) 6695 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); 6696 6697 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) { 6698 hw->mac.ops.fc_enable(hw); 6699 ixgbe_set_rx_drop_en(adapter); 6700 } 6701 6702 if (link_up || 6703 time_after(jiffies, (adapter->link_check_timeout + 6704 IXGBE_TRY_LINK_TIMEOUT))) { 6705 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 6706 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); 6707 IXGBE_WRITE_FLUSH(hw); 6708 } 6709 6710 adapter->link_up = link_up; 6711 adapter->link_speed = link_speed; 6712 } 6713 6714 static void ixgbe_update_default_up(struct ixgbe_adapter *adapter) 6715 { 6716 #ifdef CONFIG_IXGBE_DCB 6717 struct net_device *netdev = adapter->netdev; 6718 struct dcb_app app = { 6719 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE, 6720 .protocol = 0, 6721 }; 6722 u8 up = 0; 6723 6724 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) 6725 up = dcb_ieee_getapp_mask(netdev, &app); 6726 6727 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0; 6728 #endif 6729 } 6730 6731 /** 6732 * ixgbe_watchdog_link_is_up - update netif_carrier status and 6733 * print link up message 6734 * @adapter: pointer to the device adapter structure 6735 **/ 6736 static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) 6737 { 6738 struct net_device *netdev = adapter->netdev; 6739 struct ixgbe_hw *hw = &adapter->hw; 6740 struct net_device *upper; 6741 struct list_head *iter; 6742 u32 link_speed = adapter->link_speed; 6743 const char *speed_str; 6744 bool flow_rx, flow_tx; 6745 6746 /* only continue if link was previously down */ 6747 if (netif_carrier_ok(netdev)) 6748 return; 6749 6750 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; 6751 6752 switch (hw->mac.type) { 6753 case ixgbe_mac_82598EB: { 6754 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 6755 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); 6756 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); 6757 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); 6758 } 6759 break; 6760 case ixgbe_mac_X540: 6761 case ixgbe_mac_X550: 6762 case ixgbe_mac_X550EM_x: 6763 case ixgbe_mac_x550em_a: 6764 case ixgbe_mac_82599EB: { 6765 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 6766 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 6767 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); 6768 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); 6769 } 6770 break; 6771 default: 6772 flow_tx = false; 6773 flow_rx = false; 6774 break; 6775 } 6776 6777 adapter->last_rx_ptp_check = jiffies; 6778 6779 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) 6780 ixgbe_ptp_start_cyclecounter(adapter); 6781 6782 switch (link_speed) { 6783 case IXGBE_LINK_SPEED_10GB_FULL: 6784 speed_str = "10 Gbps"; 6785 break; 6786 case IXGBE_LINK_SPEED_2_5GB_FULL: 6787 speed_str = "2.5 Gbps"; 6788 break; 6789 case IXGBE_LINK_SPEED_1GB_FULL: 6790 speed_str = "1 Gbps"; 6791 break; 6792 case IXGBE_LINK_SPEED_100_FULL: 6793 speed_str = "100 Mbps"; 6794 break; 6795 default: 6796 speed_str = "unknown speed"; 6797 break; 6798 } 6799 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str, 6800 ((flow_rx && flow_tx) ? "RX/TX" : 6801 (flow_rx ? "RX" : 6802 (flow_tx ? "TX" : "None")))); 6803 6804 netif_carrier_on(netdev); 6805 ixgbe_check_vf_rate_limit(adapter); 6806 6807 /* enable transmits */ 6808 netif_tx_wake_all_queues(adapter->netdev); 6809 6810 /* enable any upper devices */ 6811 rtnl_lock(); 6812 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { 6813 if (netif_is_macvlan(upper)) { 6814 struct macvlan_dev *vlan = netdev_priv(upper); 6815 6816 if (vlan->fwd_priv) 6817 netif_tx_wake_all_queues(upper); 6818 } 6819 } 6820 rtnl_unlock(); 6821 6822 /* update the default user priority for VFs */ 6823 ixgbe_update_default_up(adapter); 6824 6825 /* ping all the active vfs to let them know link has changed */ 6826 ixgbe_ping_all_vfs(adapter); 6827 } 6828 6829 /** 6830 * ixgbe_watchdog_link_is_down - update netif_carrier status and 6831 * print link down message 6832 * @adapter: pointer to the adapter structure 6833 **/ 6834 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) 6835 { 6836 struct net_device *netdev = adapter->netdev; 6837 struct ixgbe_hw *hw = &adapter->hw; 6838 6839 adapter->link_up = false; 6840 adapter->link_speed = 0; 6841 6842 /* only continue if link was up previously */ 6843 if (!netif_carrier_ok(netdev)) 6844 return; 6845 6846 /* poll for SFP+ cable when link is down */ 6847 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) 6848 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; 6849 6850 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) 6851 ixgbe_ptp_start_cyclecounter(adapter); 6852 6853 e_info(drv, "NIC Link is Down\n"); 6854 netif_carrier_off(netdev); 6855 6856 /* ping all the active vfs to let them know link has changed */ 6857 ixgbe_ping_all_vfs(adapter); 6858 } 6859 6860 static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter) 6861 { 6862 int i; 6863 6864 for (i = 0; i < adapter->num_tx_queues; i++) { 6865 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; 6866 6867 if (tx_ring->next_to_use != tx_ring->next_to_clean) 6868 return true; 6869 } 6870 6871 return false; 6872 } 6873 6874 static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter) 6875 { 6876 struct ixgbe_hw *hw = &adapter->hw; 6877 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 6878 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 6879 6880 int i, j; 6881 6882 if (!adapter->num_vfs) 6883 return false; 6884 6885 /* resetting the PF is only needed for MAC before X550 */ 6886 if (hw->mac.type >= ixgbe_mac_X550) 6887 return false; 6888 6889 for (i = 0; i < adapter->num_vfs; i++) { 6890 for (j = 0; j < q_per_pool; j++) { 6891 u32 h, t; 6892 6893 h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j)); 6894 t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j)); 6895 6896 if (h != t) 6897 return true; 6898 } 6899 } 6900 6901 return false; 6902 } 6903 6904 /** 6905 * ixgbe_watchdog_flush_tx - flush queues on link down 6906 * @adapter: pointer to the device adapter structure 6907 **/ 6908 static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) 6909 { 6910 if (!netif_carrier_ok(adapter->netdev)) { 6911 if (ixgbe_ring_tx_pending(adapter) || 6912 ixgbe_vf_tx_pending(adapter)) { 6913 /* We've lost link, so the controller stops DMA, 6914 * but we've got queued Tx work that's never going 6915 * to get done, so reset controller to flush Tx. 6916 * (Do the reset outside of interrupt context). 6917 */ 6918 e_warn(drv, "initiating reset to clear Tx work after link loss\n"); 6919 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); 6920 } 6921 } 6922 } 6923 6924 #ifdef CONFIG_PCI_IOV 6925 static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter, 6926 struct pci_dev *vfdev) 6927 { 6928 if (!pci_wait_for_pending_transaction(vfdev)) 6929 e_dev_warn("Issuing VFLR with pending transactions\n"); 6930 6931 e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev)); 6932 pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); 6933 6934 msleep(100); 6935 } 6936 6937 static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) 6938 { 6939 struct ixgbe_hw *hw = &adapter->hw; 6940 struct pci_dev *pdev = adapter->pdev; 6941 unsigned int vf; 6942 u32 gpc; 6943 6944 if (!(netif_carrier_ok(adapter->netdev))) 6945 return; 6946 6947 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); 6948 if (gpc) /* If incrementing then no need for the check below */ 6949 return; 6950 /* Check to see if a bad DMA write target from an errant or 6951 * malicious VF has caused a PCIe error. If so then we can 6952 * issue a VFLR to the offending VF(s) and then resume without 6953 * requesting a full slot reset. 6954 */ 6955 6956 if (!pdev) 6957 return; 6958 6959 /* check status reg for all VFs owned by this PF */ 6960 for (vf = 0; vf < adapter->num_vfs; ++vf) { 6961 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; 6962 u16 status_reg; 6963 6964 if (!vfdev) 6965 continue; 6966 pci_read_config_word(vfdev, PCI_STATUS, &status_reg); 6967 if (status_reg != IXGBE_FAILED_READ_CFG_WORD && 6968 status_reg & PCI_STATUS_REC_MASTER_ABORT) 6969 ixgbe_issue_vf_flr(adapter, vfdev); 6970 } 6971 } 6972 6973 static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) 6974 { 6975 u32 ssvpc; 6976 6977 /* Do not perform spoof check for 82598 or if not in IOV mode */ 6978 if (adapter->hw.mac.type == ixgbe_mac_82598EB || 6979 adapter->num_vfs == 0) 6980 return; 6981 6982 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); 6983 6984 /* 6985 * ssvpc register is cleared on read, if zero then no 6986 * spoofed packets in the last interval. 6987 */ 6988 if (!ssvpc) 6989 return; 6990 6991 e_warn(drv, "%u Spoofed packets detected\n", ssvpc); 6992 } 6993 #else 6994 static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter) 6995 { 6996 } 6997 6998 static void 6999 ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter) 7000 { 7001 } 7002 #endif /* CONFIG_PCI_IOV */ 7003 7004 7005 /** 7006 * ixgbe_watchdog_subtask - check and bring link up 7007 * @adapter: pointer to the device adapter structure 7008 **/ 7009 static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) 7010 { 7011 /* if interface is down, removing or resetting, do nothing */ 7012 if (test_bit(__IXGBE_DOWN, &adapter->state) || 7013 test_bit(__IXGBE_REMOVING, &adapter->state) || 7014 test_bit(__IXGBE_RESETTING, &adapter->state)) 7015 return; 7016 7017 ixgbe_watchdog_update_link(adapter); 7018 7019 if (adapter->link_up) 7020 ixgbe_watchdog_link_is_up(adapter); 7021 else 7022 ixgbe_watchdog_link_is_down(adapter); 7023 7024 ixgbe_check_for_bad_vf(adapter); 7025 ixgbe_spoof_check(adapter); 7026 ixgbe_update_stats(adapter); 7027 7028 ixgbe_watchdog_flush_tx(adapter); 7029 } 7030 7031 /** 7032 * ixgbe_sfp_detection_subtask - poll for SFP+ cable 7033 * @adapter: the ixgbe adapter structure 7034 **/ 7035 static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) 7036 { 7037 struct ixgbe_hw *hw = &adapter->hw; 7038 s32 err; 7039 7040 /* not searching for SFP so there is nothing to do here */ 7041 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && 7042 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) 7043 return; 7044 7045 if (adapter->sfp_poll_time && 7046 time_after(adapter->sfp_poll_time, jiffies)) 7047 return; /* If not yet time to poll for SFP */ 7048 7049 /* someone else is in init, wait until next service event */ 7050 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) 7051 return; 7052 7053 adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1; 7054 7055 err = hw->phy.ops.identify_sfp(hw); 7056 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) 7057 goto sfp_out; 7058 7059 if (err == IXGBE_ERR_SFP_NOT_PRESENT) { 7060 /* If no cable is present, then we need to reset 7061 * the next time we find a good cable. */ 7062 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; 7063 } 7064 7065 /* exit on error */ 7066 if (err) 7067 goto sfp_out; 7068 7069 /* exit if reset not needed */ 7070 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) 7071 goto sfp_out; 7072 7073 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET; 7074 7075 /* 7076 * A module may be identified correctly, but the EEPROM may not have 7077 * support for that module. setup_sfp() will fail in that case, so 7078 * we should not allow that module to load. 7079 */ 7080 if (hw->mac.type == ixgbe_mac_82598EB) 7081 err = hw->phy.ops.reset(hw); 7082 else 7083 err = hw->mac.ops.setup_sfp(hw); 7084 7085 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) 7086 goto sfp_out; 7087 7088 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; 7089 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); 7090 7091 sfp_out: 7092 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); 7093 7094 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && 7095 (adapter->netdev->reg_state == NETREG_REGISTERED)) { 7096 e_dev_err("failed to initialize because an unsupported " 7097 "SFP+ module type was detected.\n"); 7098 e_dev_err("Reload the driver after installing a " 7099 "supported module.\n"); 7100 unregister_netdev(adapter->netdev); 7101 } 7102 } 7103 7104 /** 7105 * ixgbe_sfp_link_config_subtask - set up link SFP after module install 7106 * @adapter: the ixgbe adapter structure 7107 **/ 7108 static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) 7109 { 7110 struct ixgbe_hw *hw = &adapter->hw; 7111 u32 speed; 7112 bool autoneg = false; 7113 7114 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) 7115 return; 7116 7117 /* someone else is in init, wait until next service event */ 7118 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) 7119 return; 7120 7121 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 7122 7123 speed = hw->phy.autoneg_advertised; 7124 if ((!speed) && (hw->mac.ops.get_link_capabilities)) { 7125 hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); 7126 7127 /* setup the highest link when no autoneg */ 7128 if (!autoneg) { 7129 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 7130 speed = IXGBE_LINK_SPEED_10GB_FULL; 7131 } 7132 } 7133 7134 if (hw->mac.ops.setup_link) 7135 hw->mac.ops.setup_link(hw, speed, true); 7136 7137 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 7138 adapter->link_check_timeout = jiffies; 7139 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); 7140 } 7141 7142 /** 7143 * ixgbe_service_timer - Timer Call-back 7144 * @data: pointer to adapter cast into an unsigned long 7145 **/ 7146 static void ixgbe_service_timer(unsigned long data) 7147 { 7148 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; 7149 unsigned long next_event_offset; 7150 7151 /* poll faster when waiting for link */ 7152 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) 7153 next_event_offset = HZ / 10; 7154 else 7155 next_event_offset = HZ * 2; 7156 7157 /* Reset the timer */ 7158 mod_timer(&adapter->service_timer, next_event_offset + jiffies); 7159 7160 ixgbe_service_event_schedule(adapter); 7161 } 7162 7163 static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter) 7164 { 7165 struct ixgbe_hw *hw = &adapter->hw; 7166 u32 status; 7167 7168 if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT)) 7169 return; 7170 7171 adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT; 7172 7173 if (!hw->phy.ops.handle_lasi) 7174 return; 7175 7176 status = hw->phy.ops.handle_lasi(&adapter->hw); 7177 if (status != IXGBE_ERR_OVERTEMP) 7178 return; 7179 7180 e_crit(drv, "%s\n", ixgbe_overheat_msg); 7181 } 7182 7183 static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) 7184 { 7185 if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state)) 7186 return; 7187 7188 /* If we're already down, removing or resetting, just bail */ 7189 if (test_bit(__IXGBE_DOWN, &adapter->state) || 7190 test_bit(__IXGBE_REMOVING, &adapter->state) || 7191 test_bit(__IXGBE_RESETTING, &adapter->state)) 7192 return; 7193 7194 ixgbe_dump(adapter); 7195 netdev_err(adapter->netdev, "Reset adapter\n"); 7196 adapter->tx_timeout_count++; 7197 7198 rtnl_lock(); 7199 ixgbe_reinit_locked(adapter); 7200 rtnl_unlock(); 7201 } 7202 7203 /** 7204 * ixgbe_service_task - manages and runs subtasks 7205 * @work: pointer to work_struct containing our data 7206 **/ 7207 static void ixgbe_service_task(struct work_struct *work) 7208 { 7209 struct ixgbe_adapter *adapter = container_of(work, 7210 struct ixgbe_adapter, 7211 service_task); 7212 if (ixgbe_removed(adapter->hw.hw_addr)) { 7213 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 7214 rtnl_lock(); 7215 ixgbe_down(adapter); 7216 rtnl_unlock(); 7217 } 7218 ixgbe_service_event_complete(adapter); 7219 return; 7220 } 7221 if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) { 7222 rtnl_lock(); 7223 adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; 7224 udp_tunnel_get_rx_info(adapter->netdev); 7225 rtnl_unlock(); 7226 } 7227 ixgbe_reset_subtask(adapter); 7228 ixgbe_phy_interrupt_subtask(adapter); 7229 ixgbe_sfp_detection_subtask(adapter); 7230 ixgbe_sfp_link_config_subtask(adapter); 7231 ixgbe_check_overtemp_subtask(adapter); 7232 ixgbe_watchdog_subtask(adapter); 7233 ixgbe_fdir_reinit_subtask(adapter); 7234 ixgbe_check_hang_subtask(adapter); 7235 7236 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { 7237 ixgbe_ptp_overflow_check(adapter); 7238 ixgbe_ptp_rx_hang(adapter); 7239 } 7240 7241 ixgbe_service_event_complete(adapter); 7242 } 7243 7244 static int ixgbe_tso(struct ixgbe_ring *tx_ring, 7245 struct ixgbe_tx_buffer *first, 7246 u8 *hdr_len) 7247 { 7248 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; 7249 struct sk_buff *skb = first->skb; 7250 union { 7251 struct iphdr *v4; 7252 struct ipv6hdr *v6; 7253 unsigned char *hdr; 7254 } ip; 7255 union { 7256 struct tcphdr *tcp; 7257 unsigned char *hdr; 7258 } l4; 7259 u32 paylen, l4_offset; 7260 int err; 7261 7262 if (skb->ip_summed != CHECKSUM_PARTIAL) 7263 return 0; 7264 7265 if (!skb_is_gso(skb)) 7266 return 0; 7267 7268 err = skb_cow_head(skb, 0); 7269 if (err < 0) 7270 return err; 7271 7272 ip.hdr = skb_network_header(skb); 7273 l4.hdr = skb_checksum_start(skb); 7274 7275 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 7276 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 7277 7278 /* initialize outer IP header fields */ 7279 if (ip.v4->version == 4) { 7280 /* IP header will have to cancel out any data that 7281 * is not a part of the outer IP header 7282 */ 7283 ip.v4->check = csum_fold(csum_add(lco_csum(skb), 7284 csum_unfold(l4.tcp->check))); 7285 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 7286 7287 ip.v4->tot_len = 0; 7288 first->tx_flags |= IXGBE_TX_FLAGS_TSO | 7289 IXGBE_TX_FLAGS_CSUM | 7290 IXGBE_TX_FLAGS_IPV4; 7291 } else { 7292 ip.v6->payload_len = 0; 7293 first->tx_flags |= IXGBE_TX_FLAGS_TSO | 7294 IXGBE_TX_FLAGS_CSUM; 7295 } 7296 7297 /* determine offset of inner transport header */ 7298 l4_offset = l4.hdr - skb->data; 7299 7300 /* compute length of segmentation header */ 7301 *hdr_len = (l4.tcp->doff * 4) + l4_offset; 7302 7303 /* remove payload length from inner checksum */ 7304 paylen = skb->len - l4_offset; 7305 csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); 7306 7307 /* update gso size and bytecount with header size */ 7308 first->gso_segs = skb_shinfo(skb)->gso_segs; 7309 first->bytecount += (first->gso_segs - 1) * *hdr_len; 7310 7311 /* mss_l4len_id: use 0 as index for TSO */ 7312 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; 7313 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 7314 7315 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 7316 vlan_macip_lens = l4.hdr - ip.hdr; 7317 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; 7318 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 7319 7320 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 7321 mss_l4len_idx); 7322 7323 return 1; 7324 } 7325 7326 static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb) 7327 { 7328 unsigned int offset = 0; 7329 7330 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); 7331 7332 return offset == skb_checksum_start_offset(skb); 7333 } 7334 7335 static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, 7336 struct ixgbe_tx_buffer *first) 7337 { 7338 struct sk_buff *skb = first->skb; 7339 u32 vlan_macip_lens = 0; 7340 u32 type_tucmd = 0; 7341 7342 if (skb->ip_summed != CHECKSUM_PARTIAL) { 7343 csum_failed: 7344 if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | 7345 IXGBE_TX_FLAGS_CC))) 7346 return; 7347 goto no_csum; 7348 } 7349 7350 switch (skb->csum_offset) { 7351 case offsetof(struct tcphdr, check): 7352 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 7353 /* fall through */ 7354 case offsetof(struct udphdr, check): 7355 break; 7356 case offsetof(struct sctphdr, checksum): 7357 /* validate that this is actually an SCTP request */ 7358 if (((first->protocol == htons(ETH_P_IP)) && 7359 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || 7360 ((first->protocol == htons(ETH_P_IPV6)) && 7361 ixgbe_ipv6_csum_is_sctp(skb))) { 7362 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; 7363 break; 7364 } 7365 /* fall through */ 7366 default: 7367 skb_checksum_help(skb); 7368 goto csum_failed; 7369 } 7370 7371 /* update TX checksum flag */ 7372 first->tx_flags |= IXGBE_TX_FLAGS_CSUM; 7373 vlan_macip_lens = skb_checksum_start_offset(skb) - 7374 skb_network_offset(skb); 7375 no_csum: 7376 /* vlan_macip_lens: MACLEN, VLAN tag */ 7377 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 7378 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 7379 7380 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0); 7381 } 7382 7383 #define IXGBE_SET_FLAG(_input, _flag, _result) \ 7384 ((_flag <= _result) ? \ 7385 ((u32)(_input & _flag) * (_result / _flag)) : \ 7386 ((u32)(_input & _flag) / (_flag / _result))) 7387 7388 static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) 7389 { 7390 /* set type for advanced descriptor with frame checksum insertion */ 7391 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA | 7392 IXGBE_ADVTXD_DCMD_DEXT | 7393 IXGBE_ADVTXD_DCMD_IFCS; 7394 7395 /* set HW vlan bit if vlan is present */ 7396 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN, 7397 IXGBE_ADVTXD_DCMD_VLE); 7398 7399 /* set segmentation enable bits for TSO/FSO */ 7400 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO, 7401 IXGBE_ADVTXD_DCMD_TSE); 7402 7403 /* set timestamp bit if present */ 7404 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP, 7405 IXGBE_ADVTXD_MAC_TSTAMP); 7406 7407 /* insert frame checksum */ 7408 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS); 7409 7410 return cmd_type; 7411 } 7412 7413 static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, 7414 u32 tx_flags, unsigned int paylen) 7415 { 7416 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; 7417 7418 /* enable L4 checksum for TSO and TX checksum offload */ 7419 olinfo_status |= IXGBE_SET_FLAG(tx_flags, 7420 IXGBE_TX_FLAGS_CSUM, 7421 IXGBE_ADVTXD_POPTS_TXSM); 7422 7423 /* enble IPv4 checksum for TSO */ 7424 olinfo_status |= IXGBE_SET_FLAG(tx_flags, 7425 IXGBE_TX_FLAGS_IPV4, 7426 IXGBE_ADVTXD_POPTS_IXSM); 7427 7428 /* 7429 * Check Context must be set if Tx switch is enabled, which it 7430 * always is for case where virtual functions are running 7431 */ 7432 olinfo_status |= IXGBE_SET_FLAG(tx_flags, 7433 IXGBE_TX_FLAGS_CC, 7434 IXGBE_ADVTXD_CC); 7435 7436 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 7437 } 7438 7439 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) 7440 { 7441 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 7442 7443 /* Herbert's original patch had: 7444 * smp_mb__after_netif_stop_queue(); 7445 * but since that doesn't exist yet, just open code it. 7446 */ 7447 smp_mb(); 7448 7449 /* We need to check again in a case another CPU has just 7450 * made room available. 7451 */ 7452 if (likely(ixgbe_desc_unused(tx_ring) < size)) 7453 return -EBUSY; 7454 7455 /* A reprieve! - use start_queue because it doesn't call schedule */ 7456 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 7457 ++tx_ring->tx_stats.restart_queue; 7458 return 0; 7459 } 7460 7461 static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) 7462 { 7463 if (likely(ixgbe_desc_unused(tx_ring) >= size)) 7464 return 0; 7465 7466 return __ixgbe_maybe_stop_tx(tx_ring, size); 7467 } 7468 7469 #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ 7470 IXGBE_TXD_CMD_RS) 7471 7472 static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, 7473 struct ixgbe_tx_buffer *first, 7474 const u8 hdr_len) 7475 { 7476 struct sk_buff *skb = first->skb; 7477 struct ixgbe_tx_buffer *tx_buffer; 7478 union ixgbe_adv_tx_desc *tx_desc; 7479 struct skb_frag_struct *frag; 7480 dma_addr_t dma; 7481 unsigned int data_len, size; 7482 u32 tx_flags = first->tx_flags; 7483 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags); 7484 u16 i = tx_ring->next_to_use; 7485 7486 tx_desc = IXGBE_TX_DESC(tx_ring, i); 7487 7488 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); 7489 7490 size = skb_headlen(skb); 7491 data_len = skb->data_len; 7492 7493 #ifdef IXGBE_FCOE 7494 if (tx_flags & IXGBE_TX_FLAGS_FCOE) { 7495 if (data_len < sizeof(struct fcoe_crc_eof)) { 7496 size -= sizeof(struct fcoe_crc_eof) - data_len; 7497 data_len = 0; 7498 } else { 7499 data_len -= sizeof(struct fcoe_crc_eof); 7500 } 7501 } 7502 7503 #endif 7504 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 7505 7506 tx_buffer = first; 7507 7508 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 7509 if (dma_mapping_error(tx_ring->dev, dma)) 7510 goto dma_error; 7511 7512 /* record length, and DMA address */ 7513 dma_unmap_len_set(tx_buffer, len, size); 7514 dma_unmap_addr_set(tx_buffer, dma, dma); 7515 7516 tx_desc->read.buffer_addr = cpu_to_le64(dma); 7517 7518 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { 7519 tx_desc->read.cmd_type_len = 7520 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD); 7521 7522 i++; 7523 tx_desc++; 7524 if (i == tx_ring->count) { 7525 tx_desc = IXGBE_TX_DESC(tx_ring, 0); 7526 i = 0; 7527 } 7528 tx_desc->read.olinfo_status = 0; 7529 7530 dma += IXGBE_MAX_DATA_PER_TXD; 7531 size -= IXGBE_MAX_DATA_PER_TXD; 7532 7533 tx_desc->read.buffer_addr = cpu_to_le64(dma); 7534 } 7535 7536 if (likely(!data_len)) 7537 break; 7538 7539 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); 7540 7541 i++; 7542 tx_desc++; 7543 if (i == tx_ring->count) { 7544 tx_desc = IXGBE_TX_DESC(tx_ring, 0); 7545 i = 0; 7546 } 7547 tx_desc->read.olinfo_status = 0; 7548 7549 #ifdef IXGBE_FCOE 7550 size = min_t(unsigned int, data_len, skb_frag_size(frag)); 7551 #else 7552 size = skb_frag_size(frag); 7553 #endif 7554 data_len -= size; 7555 7556 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 7557 DMA_TO_DEVICE); 7558 7559 tx_buffer = &tx_ring->tx_buffer_info[i]; 7560 } 7561 7562 /* write last descriptor with RS and EOP bits */ 7563 cmd_type |= size | IXGBE_TXD_CMD; 7564 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 7565 7566 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 7567 7568 /* set the timestamp */ 7569 first->time_stamp = jiffies; 7570 7571 /* 7572 * Force memory writes to complete before letting h/w know there 7573 * are new descriptors to fetch. (Only applicable for weak-ordered 7574 * memory model archs, such as IA-64). 7575 * 7576 * We also need this memory barrier to make certain all of the 7577 * status bits have been updated before next_to_watch is written. 7578 */ 7579 wmb(); 7580 7581 /* set next_to_watch value indicating a packet is present */ 7582 first->next_to_watch = tx_desc; 7583 7584 i++; 7585 if (i == tx_ring->count) 7586 i = 0; 7587 7588 tx_ring->next_to_use = i; 7589 7590 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); 7591 7592 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { 7593 writel(i, tx_ring->tail); 7594 7595 /* we need this if more than one processor can write to our tail 7596 * at a time, it synchronizes IO on IA64/Altix systems 7597 */ 7598 mmiowb(); 7599 } 7600 7601 return; 7602 dma_error: 7603 dev_err(tx_ring->dev, "TX DMA map failed\n"); 7604 7605 /* clear dma mappings for failed tx_buffer_info map */ 7606 for (;;) { 7607 tx_buffer = &tx_ring->tx_buffer_info[i]; 7608 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); 7609 if (tx_buffer == first) 7610 break; 7611 if (i == 0) 7612 i = tx_ring->count; 7613 i--; 7614 } 7615 7616 tx_ring->next_to_use = i; 7617 } 7618 7619 static void ixgbe_atr(struct ixgbe_ring *ring, 7620 struct ixgbe_tx_buffer *first) 7621 { 7622 struct ixgbe_q_vector *q_vector = ring->q_vector; 7623 union ixgbe_atr_hash_dword input = { .dword = 0 }; 7624 union ixgbe_atr_hash_dword common = { .dword = 0 }; 7625 union { 7626 unsigned char *network; 7627 struct iphdr *ipv4; 7628 struct ipv6hdr *ipv6; 7629 } hdr; 7630 struct tcphdr *th; 7631 unsigned int hlen; 7632 struct sk_buff *skb; 7633 __be16 vlan_id; 7634 int l4_proto; 7635 7636 /* if ring doesn't have a interrupt vector, cannot perform ATR */ 7637 if (!q_vector) 7638 return; 7639 7640 /* do nothing if sampling is disabled */ 7641 if (!ring->atr_sample_rate) 7642 return; 7643 7644 ring->atr_count++; 7645 7646 /* currently only IPv4/IPv6 with TCP is supported */ 7647 if ((first->protocol != htons(ETH_P_IP)) && 7648 (first->protocol != htons(ETH_P_IPV6))) 7649 return; 7650 7651 /* snag network header to get L4 type and address */ 7652 skb = first->skb; 7653 hdr.network = skb_network_header(skb); 7654 if (skb->encapsulation && 7655 first->protocol == htons(ETH_P_IP) && 7656 hdr.ipv4->protocol != IPPROTO_UDP) { 7657 struct ixgbe_adapter *adapter = q_vector->adapter; 7658 7659 /* verify the port is recognized as VXLAN */ 7660 if (adapter->vxlan_port && 7661 udp_hdr(skb)->dest == adapter->vxlan_port) 7662 hdr.network = skb_inner_network_header(skb); 7663 7664 if (adapter->geneve_port && 7665 udp_hdr(skb)->dest == adapter->geneve_port) 7666 hdr.network = skb_inner_network_header(skb); 7667 } 7668 7669 /* Currently only IPv4/IPv6 with TCP is supported */ 7670 switch (hdr.ipv4->version) { 7671 case IPVERSION: 7672 /* access ihl as u8 to avoid unaligned access on ia64 */ 7673 hlen = (hdr.network[0] & 0x0F) << 2; 7674 l4_proto = hdr.ipv4->protocol; 7675 break; 7676 case 6: 7677 hlen = hdr.network - skb->data; 7678 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL); 7679 hlen -= hdr.network - skb->data; 7680 break; 7681 default: 7682 return; 7683 } 7684 7685 if (l4_proto != IPPROTO_TCP) 7686 return; 7687 7688 th = (struct tcphdr *)(hdr.network + hlen); 7689 7690 /* skip this packet since the socket is closing */ 7691 if (th->fin) 7692 return; 7693 7694 /* sample on all syn packets or once every atr sample count */ 7695 if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) 7696 return; 7697 7698 /* reset sample count */ 7699 ring->atr_count = 0; 7700 7701 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); 7702 7703 /* 7704 * src and dst are inverted, think how the receiver sees them 7705 * 7706 * The input is broken into two sections, a non-compressed section 7707 * containing vm_pool, vlan_id, and flow_type. The rest of the data 7708 * is XORed together and stored in the compressed dword. 7709 */ 7710 input.formatted.vlan_id = vlan_id; 7711 7712 /* 7713 * since src port and flex bytes occupy the same word XOR them together 7714 * and write the value to source port portion of compressed dword 7715 */ 7716 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) 7717 common.port.src ^= th->dest ^ htons(ETH_P_8021Q); 7718 else 7719 common.port.src ^= th->dest ^ first->protocol; 7720 common.port.dst ^= th->source; 7721 7722 switch (hdr.ipv4->version) { 7723 case IPVERSION: 7724 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 7725 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; 7726 break; 7727 case 6: 7728 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; 7729 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ 7730 hdr.ipv6->saddr.s6_addr32[1] ^ 7731 hdr.ipv6->saddr.s6_addr32[2] ^ 7732 hdr.ipv6->saddr.s6_addr32[3] ^ 7733 hdr.ipv6->daddr.s6_addr32[0] ^ 7734 hdr.ipv6->daddr.s6_addr32[1] ^ 7735 hdr.ipv6->daddr.s6_addr32[2] ^ 7736 hdr.ipv6->daddr.s6_addr32[3]; 7737 break; 7738 default: 7739 break; 7740 } 7741 7742 if (hdr.network != skb_network_header(skb)) 7743 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK; 7744 7745 /* This assumes the Rx queue and Tx queue are bound to the same CPU */ 7746 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, 7747 input, common, ring->queue_index); 7748 } 7749 7750 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, 7751 void *accel_priv, select_queue_fallback_t fallback) 7752 { 7753 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; 7754 #ifdef IXGBE_FCOE 7755 struct ixgbe_adapter *adapter; 7756 struct ixgbe_ring_feature *f; 7757 int txq; 7758 #endif 7759 7760 if (fwd_adapter) 7761 return skb->queue_mapping + fwd_adapter->tx_base_queue; 7762 7763 #ifdef IXGBE_FCOE 7764 7765 /* 7766 * only execute the code below if protocol is FCoE 7767 * or FIP and we have FCoE enabled on the adapter 7768 */ 7769 switch (vlan_get_protocol(skb)) { 7770 case htons(ETH_P_FCOE): 7771 case htons(ETH_P_FIP): 7772 adapter = netdev_priv(dev); 7773 7774 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 7775 break; 7776 default: 7777 return fallback(dev, skb); 7778 } 7779 7780 f = &adapter->ring_feature[RING_F_FCOE]; 7781 7782 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 7783 smp_processor_id(); 7784 7785 while (txq >= f->indices) 7786 txq -= f->indices; 7787 7788 return txq + f->offset; 7789 #else 7790 return fallback(dev, skb); 7791 #endif 7792 } 7793 7794 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, 7795 struct ixgbe_adapter *adapter, 7796 struct ixgbe_ring *tx_ring) 7797 { 7798 struct ixgbe_tx_buffer *first; 7799 int tso; 7800 u32 tx_flags = 0; 7801 unsigned short f; 7802 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 7803 __be16 protocol = skb->protocol; 7804 u8 hdr_len = 0; 7805 7806 /* 7807 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 7808 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, 7809 * + 2 desc gap to keep tail from touching head, 7810 * + 1 desc for context descriptor, 7811 * otherwise try next time 7812 */ 7813 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 7814 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 7815 7816 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { 7817 tx_ring->tx_stats.tx_busy++; 7818 return NETDEV_TX_BUSY; 7819 } 7820 7821 /* record the location of the first descriptor for this packet */ 7822 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 7823 first->skb = skb; 7824 first->bytecount = skb->len; 7825 first->gso_segs = 1; 7826 7827 /* if we have a HW VLAN tag being added default to the HW one */ 7828 if (skb_vlan_tag_present(skb)) { 7829 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; 7830 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; 7831 /* else if it is a SW VLAN check the next protocol and store the tag */ 7832 } else if (protocol == htons(ETH_P_8021Q)) { 7833 struct vlan_hdr *vhdr, _vhdr; 7834 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); 7835 if (!vhdr) 7836 goto out_drop; 7837 7838 tx_flags |= ntohs(vhdr->h_vlan_TCI) << 7839 IXGBE_TX_FLAGS_VLAN_SHIFT; 7840 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; 7841 } 7842 protocol = vlan_get_protocol(skb); 7843 7844 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 7845 adapter->ptp_clock && 7846 !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, 7847 &adapter->state)) { 7848 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 7849 tx_flags |= IXGBE_TX_FLAGS_TSTAMP; 7850 7851 /* schedule check for Tx timestamp */ 7852 adapter->ptp_tx_skb = skb_get(skb); 7853 adapter->ptp_tx_start = jiffies; 7854 schedule_work(&adapter->ptp_tx_work); 7855 } 7856 7857 skb_tx_timestamp(skb); 7858 7859 #ifdef CONFIG_PCI_IOV 7860 /* 7861 * Use the l2switch_enable flag - would be false if the DMA 7862 * Tx switch had been disabled. 7863 */ 7864 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 7865 tx_flags |= IXGBE_TX_FLAGS_CC; 7866 7867 #endif 7868 /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */ 7869 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && 7870 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) || 7871 (skb->priority != TC_PRIO_CONTROL))) { 7872 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; 7873 tx_flags |= (skb->priority & 0x7) << 7874 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; 7875 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { 7876 struct vlan_ethhdr *vhdr; 7877 7878 if (skb_cow_head(skb, 0)) 7879 goto out_drop; 7880 vhdr = (struct vlan_ethhdr *)skb->data; 7881 vhdr->h_vlan_TCI = htons(tx_flags >> 7882 IXGBE_TX_FLAGS_VLAN_SHIFT); 7883 } else { 7884 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; 7885 } 7886 } 7887 7888 /* record initial flags and protocol */ 7889 first->tx_flags = tx_flags; 7890 first->protocol = protocol; 7891 7892 #ifdef IXGBE_FCOE 7893 /* setup tx offload for FCoE */ 7894 if ((protocol == htons(ETH_P_FCOE)) && 7895 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { 7896 tso = ixgbe_fso(tx_ring, first, &hdr_len); 7897 if (tso < 0) 7898 goto out_drop; 7899 7900 goto xmit_fcoe; 7901 } 7902 7903 #endif /* IXGBE_FCOE */ 7904 tso = ixgbe_tso(tx_ring, first, &hdr_len); 7905 if (tso < 0) 7906 goto out_drop; 7907 else if (!tso) 7908 ixgbe_tx_csum(tx_ring, first); 7909 7910 /* add the ATR filter if ATR is on */ 7911 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) 7912 ixgbe_atr(tx_ring, first); 7913 7914 #ifdef IXGBE_FCOE 7915 xmit_fcoe: 7916 #endif /* IXGBE_FCOE */ 7917 ixgbe_tx_map(tx_ring, first, hdr_len); 7918 7919 return NETDEV_TX_OK; 7920 7921 out_drop: 7922 dev_kfree_skb_any(first->skb); 7923 first->skb = NULL; 7924 7925 return NETDEV_TX_OK; 7926 } 7927 7928 static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, 7929 struct net_device *netdev, 7930 struct ixgbe_ring *ring) 7931 { 7932 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7933 struct ixgbe_ring *tx_ring; 7934 7935 /* 7936 * The minimum packet size for olinfo paylen is 17 so pad the skb 7937 * in order to meet this minimum size requirement. 7938 */ 7939 if (skb_put_padto(skb, 17)) 7940 return NETDEV_TX_OK; 7941 7942 tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; 7943 7944 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); 7945 } 7946 7947 static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, 7948 struct net_device *netdev) 7949 { 7950 return __ixgbe_xmit_frame(skb, netdev, NULL); 7951 } 7952 7953 /** 7954 * ixgbe_set_mac - Change the Ethernet Address of the NIC 7955 * @netdev: network interface device structure 7956 * @p: pointer to an address structure 7957 * 7958 * Returns 0 on success, negative on failure 7959 **/ 7960 static int ixgbe_set_mac(struct net_device *netdev, void *p) 7961 { 7962 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7963 struct ixgbe_hw *hw = &adapter->hw; 7964 struct sockaddr *addr = p; 7965 7966 if (!is_valid_ether_addr(addr->sa_data)) 7967 return -EADDRNOTAVAIL; 7968 7969 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 7970 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 7971 7972 ixgbe_mac_set_default_filter(adapter); 7973 7974 return 0; 7975 } 7976 7977 static int 7978 ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) 7979 { 7980 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7981 struct ixgbe_hw *hw = &adapter->hw; 7982 u16 value; 7983 int rc; 7984 7985 if (prtad != hw->phy.mdio.prtad) 7986 return -EINVAL; 7987 rc = hw->phy.ops.read_reg(hw, addr, devad, &value); 7988 if (!rc) 7989 rc = value; 7990 return rc; 7991 } 7992 7993 static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, 7994 u16 addr, u16 value) 7995 { 7996 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7997 struct ixgbe_hw *hw = &adapter->hw; 7998 7999 if (prtad != hw->phy.mdio.prtad) 8000 return -EINVAL; 8001 return hw->phy.ops.write_reg(hw, addr, devad, value); 8002 } 8003 8004 static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) 8005 { 8006 struct ixgbe_adapter *adapter = netdev_priv(netdev); 8007 8008 switch (cmd) { 8009 case SIOCSHWTSTAMP: 8010 return ixgbe_ptp_set_ts_config(adapter, req); 8011 case SIOCGHWTSTAMP: 8012 return ixgbe_ptp_get_ts_config(adapter, req); 8013 default: 8014 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); 8015 } 8016 } 8017 8018 /** 8019 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding 8020 * netdev->dev_addrs 8021 * @netdev: network interface device structure 8022 * 8023 * Returns non-zero on failure 8024 **/ 8025 static int ixgbe_add_sanmac_netdev(struct net_device *dev) 8026 { 8027 int err = 0; 8028 struct ixgbe_adapter *adapter = netdev_priv(dev); 8029 struct ixgbe_hw *hw = &adapter->hw; 8030 8031 if (is_valid_ether_addr(hw->mac.san_addr)) { 8032 rtnl_lock(); 8033 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN); 8034 rtnl_unlock(); 8035 8036 /* update SAN MAC vmdq pool selection */ 8037 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); 8038 } 8039 return err; 8040 } 8041 8042 /** 8043 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding 8044 * netdev->dev_addrs 8045 * @netdev: network interface device structure 8046 * 8047 * Returns non-zero on failure 8048 **/ 8049 static int ixgbe_del_sanmac_netdev(struct net_device *dev) 8050 { 8051 int err = 0; 8052 struct ixgbe_adapter *adapter = netdev_priv(dev); 8053 struct ixgbe_mac_info *mac = &adapter->hw.mac; 8054 8055 if (is_valid_ether_addr(mac->san_addr)) { 8056 rtnl_lock(); 8057 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); 8058 rtnl_unlock(); 8059 } 8060 return err; 8061 } 8062 8063 #ifdef CONFIG_NET_POLL_CONTROLLER 8064 /* 8065 * Polling 'interrupt' - used by things like netconsole to send skbs 8066 * without having to re-enable interrupts. It's not called while 8067 * the interrupt routine is executing. 8068 */ 8069 static void ixgbe_netpoll(struct net_device *netdev) 8070 { 8071 struct ixgbe_adapter *adapter = netdev_priv(netdev); 8072 int i; 8073 8074 /* if interface is down do nothing */ 8075 if (test_bit(__IXGBE_DOWN, &adapter->state)) 8076 return; 8077 8078 /* loop through and schedule all active queues */ 8079 for (i = 0; i < adapter->num_q_vectors; i++) 8080 ixgbe_msix_clean_rings(0, adapter->q_vector[i]); 8081 } 8082 8083 #endif 8084 static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, 8085 struct rtnl_link_stats64 *stats) 8086 { 8087 struct ixgbe_adapter *adapter = netdev_priv(netdev); 8088 int i; 8089 8090 rcu_read_lock(); 8091 for (i = 0; i < adapter->num_rx_queues; i++) { 8092 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); 8093 u64 bytes, packets; 8094 unsigned int start; 8095 8096 if (ring) { 8097 do { 8098 start = u64_stats_fetch_begin_irq(&ring->syncp); 8099 packets = ring->stats.packets; 8100 bytes = ring->stats.bytes; 8101 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 8102 stats->rx_packets += packets; 8103 stats->rx_bytes += bytes; 8104 } 8105 } 8106 8107 for (i = 0; i < adapter->num_tx_queues; i++) { 8108 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); 8109 u64 bytes, packets; 8110 unsigned int start; 8111 8112 if (ring) { 8113 do { 8114 start = u64_stats_fetch_begin_irq(&ring->syncp); 8115 packets = ring->stats.packets; 8116 bytes = ring->stats.bytes; 8117 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 8118 stats->tx_packets += packets; 8119 stats->tx_bytes += bytes; 8120 } 8121 } 8122 rcu_read_unlock(); 8123 /* following stats updated by ixgbe_watchdog_task() */ 8124 stats->multicast = netdev->stats.multicast; 8125 stats->rx_errors = netdev->stats.rx_errors; 8126 stats->rx_length_errors = netdev->stats.rx_length_errors; 8127 stats->rx_crc_errors = netdev->stats.rx_crc_errors; 8128 stats->rx_missed_errors = netdev->stats.rx_missed_errors; 8129 return stats; 8130 } 8131 8132 #ifdef CONFIG_IXGBE_DCB 8133 /** 8134 * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. 8135 * @adapter: pointer to ixgbe_adapter 8136 * @tc: number of traffic classes currently enabled 8137 * 8138 * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm 8139 * 802.1Q priority maps to a packet buffer that exists. 8140 */ 8141 static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) 8142 { 8143 struct ixgbe_hw *hw = &adapter->hw; 8144 u32 reg, rsave; 8145 int i; 8146 8147 /* 82598 have a static priority to TC mapping that can not 8148 * be changed so no validation is needed. 8149 */ 8150 if (hw->mac.type == ixgbe_mac_82598EB) 8151 return; 8152 8153 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); 8154 rsave = reg; 8155 8156 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 8157 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT); 8158 8159 /* If up2tc is out of bounds default to zero */ 8160 if (up2tc > tc) 8161 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT); 8162 } 8163 8164 if (reg != rsave) 8165 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); 8166 8167 return; 8168 } 8169 8170 /** 8171 * ixgbe_set_prio_tc_map - Configure netdev prio tc map 8172 * @adapter: Pointer to adapter struct 8173 * 8174 * Populate the netdev user priority to tc map 8175 */ 8176 static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) 8177 { 8178 struct net_device *dev = adapter->netdev; 8179 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; 8180 struct ieee_ets *ets = adapter->ixgbe_ieee_ets; 8181 u8 prio; 8182 8183 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) { 8184 u8 tc = 0; 8185 8186 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) 8187 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio); 8188 else if (ets) 8189 tc = ets->prio_tc[prio]; 8190 8191 netdev_set_prio_tc_map(dev, prio, tc); 8192 } 8193 } 8194 8195 #endif /* CONFIG_IXGBE_DCB */ 8196 /** 8197 * ixgbe_setup_tc - configure net_device for multiple traffic classes 8198 * 8199 * @netdev: net device to configure 8200 * @tc: number of traffic classes to enable 8201 */ 8202 int ixgbe_setup_tc(struct net_device *dev, u8 tc) 8203 { 8204 struct ixgbe_adapter *adapter = netdev_priv(dev); 8205 struct ixgbe_hw *hw = &adapter->hw; 8206 bool pools; 8207 8208 /* Hardware supports up to 8 traffic classes */ 8209 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) 8210 return -EINVAL; 8211 8212 if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS) 8213 return -EINVAL; 8214 8215 pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); 8216 if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS) 8217 return -EBUSY; 8218 8219 /* Hardware has to reinitialize queues and interrupts to 8220 * match packet buffer alignment. Unfortunately, the 8221 * hardware is not flexible enough to do this dynamically. 8222 */ 8223 if (netif_running(dev)) 8224 ixgbe_close(dev); 8225 else 8226 ixgbe_reset(adapter); 8227 8228 ixgbe_clear_interrupt_scheme(adapter); 8229 8230 #ifdef CONFIG_IXGBE_DCB 8231 if (tc) { 8232 netdev_set_num_tc(dev, tc); 8233 ixgbe_set_prio_tc_map(adapter); 8234 8235 adapter->flags |= IXGBE_FLAG_DCB_ENABLED; 8236 8237 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 8238 adapter->last_lfc_mode = adapter->hw.fc.requested_mode; 8239 adapter->hw.fc.requested_mode = ixgbe_fc_none; 8240 } 8241 } else { 8242 netdev_reset_tc(dev); 8243 8244 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 8245 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; 8246 8247 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 8248 8249 adapter->temp_dcb_cfg.pfc_mode_enable = false; 8250 adapter->dcb_cfg.pfc_mode_enable = false; 8251 } 8252 8253 ixgbe_validate_rtr(adapter, tc); 8254 8255 #endif /* CONFIG_IXGBE_DCB */ 8256 ixgbe_init_interrupt_scheme(adapter); 8257 8258 if (netif_running(dev)) 8259 return ixgbe_open(dev); 8260 8261 return 0; 8262 } 8263 8264 static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, 8265 struct tc_cls_u32_offload *cls) 8266 { 8267 u32 hdl = cls->knode.handle; 8268 u32 uhtid = TC_U32_USERHTID(cls->knode.handle); 8269 u32 loc = cls->knode.handle & 0xfffff; 8270 int err = 0, i, j; 8271 struct ixgbe_jump_table *jump = NULL; 8272 8273 if (loc > IXGBE_MAX_HW_ENTRIES) 8274 return -EINVAL; 8275 8276 if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE)) 8277 return -EINVAL; 8278 8279 /* Clear this filter in the link data it is associated with */ 8280 if (uhtid != 0x800) { 8281 jump = adapter->jump_tables[uhtid]; 8282 if (!jump) 8283 return -EINVAL; 8284 if (!test_bit(loc - 1, jump->child_loc_map)) 8285 return -EINVAL; 8286 clear_bit(loc - 1, jump->child_loc_map); 8287 } 8288 8289 /* Check if the filter being deleted is a link */ 8290 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { 8291 jump = adapter->jump_tables[i]; 8292 if (jump && jump->link_hdl == hdl) { 8293 /* Delete filters in the hardware in the child hash 8294 * table associated with this link 8295 */ 8296 for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) { 8297 if (!test_bit(j, jump->child_loc_map)) 8298 continue; 8299 spin_lock(&adapter->fdir_perfect_lock); 8300 err = ixgbe_update_ethtool_fdir_entry(adapter, 8301 NULL, 8302 j + 1); 8303 spin_unlock(&adapter->fdir_perfect_lock); 8304 clear_bit(j, jump->child_loc_map); 8305 } 8306 /* Remove resources for this link */ 8307 kfree(jump->input); 8308 kfree(jump->mask); 8309 kfree(jump); 8310 adapter->jump_tables[i] = NULL; 8311 return err; 8312 } 8313 } 8314 8315 spin_lock(&adapter->fdir_perfect_lock); 8316 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc); 8317 spin_unlock(&adapter->fdir_perfect_lock); 8318 return err; 8319 } 8320 8321 static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter, 8322 __be16 protocol, 8323 struct tc_cls_u32_offload *cls) 8324 { 8325 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); 8326 8327 if (uhtid >= IXGBE_MAX_LINK_HANDLE) 8328 return -EINVAL; 8329 8330 /* This ixgbe devices do not support hash tables at the moment 8331 * so abort when given hash tables. 8332 */ 8333 if (cls->hnode.divisor > 0) 8334 return -EINVAL; 8335 8336 set_bit(uhtid - 1, &adapter->tables); 8337 return 0; 8338 } 8339 8340 static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, 8341 struct tc_cls_u32_offload *cls) 8342 { 8343 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); 8344 8345 if (uhtid >= IXGBE_MAX_LINK_HANDLE) 8346 return -EINVAL; 8347 8348 clear_bit(uhtid - 1, &adapter->tables); 8349 return 0; 8350 } 8351 8352 #ifdef CONFIG_NET_CLS_ACT 8353 static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, 8354 u8 *queue, u64 *action) 8355 { 8356 unsigned int num_vfs = adapter->num_vfs, vf; 8357 struct net_device *upper; 8358 struct list_head *iter; 8359 8360 /* redirect to a SRIOV VF */ 8361 for (vf = 0; vf < num_vfs; ++vf) { 8362 upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev); 8363 if (upper->ifindex == ifindex) { 8364 if (adapter->num_rx_pools > 1) 8365 *queue = vf * 2; 8366 else 8367 *queue = vf * adapter->num_rx_queues_per_pool; 8368 8369 *action = vf + 1; 8370 *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; 8371 return 0; 8372 } 8373 } 8374 8375 /* redirect to a offloaded macvlan netdev */ 8376 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { 8377 if (netif_is_macvlan(upper)) { 8378 struct macvlan_dev *dfwd = netdev_priv(upper); 8379 struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; 8380 8381 if (vadapter && vadapter->netdev->ifindex == ifindex) { 8382 *queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; 8383 *action = *queue; 8384 return 0; 8385 } 8386 } 8387 } 8388 8389 return -EINVAL; 8390 } 8391 8392 static int parse_tc_actions(struct ixgbe_adapter *adapter, 8393 struct tcf_exts *exts, u64 *action, u8 *queue) 8394 { 8395 const struct tc_action *a; 8396 LIST_HEAD(actions); 8397 int err; 8398 8399 if (tc_no_actions(exts)) 8400 return -EINVAL; 8401 8402 tcf_exts_to_list(exts, &actions); 8403 list_for_each_entry(a, &actions, list) { 8404 8405 /* Drop action */ 8406 if (is_tcf_gact_shot(a)) { 8407 *action = IXGBE_FDIR_DROP_QUEUE; 8408 *queue = IXGBE_FDIR_DROP_QUEUE; 8409 return 0; 8410 } 8411 8412 /* Redirect to a VF or a offloaded macvlan */ 8413 if (is_tcf_mirred_redirect(a)) { 8414 int ifindex = tcf_mirred_ifindex(a); 8415 8416 err = handle_redirect_action(adapter, ifindex, queue, 8417 action); 8418 if (err == 0) 8419 return err; 8420 } 8421 } 8422 8423 return -EINVAL; 8424 } 8425 #else 8426 static int parse_tc_actions(struct ixgbe_adapter *adapter, 8427 struct tcf_exts *exts, u64 *action, u8 *queue) 8428 { 8429 return -EINVAL; 8430 } 8431 #endif /* CONFIG_NET_CLS_ACT */ 8432 8433 static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input, 8434 union ixgbe_atr_input *mask, 8435 struct tc_cls_u32_offload *cls, 8436 struct ixgbe_mat_field *field_ptr, 8437 struct ixgbe_nexthdr *nexthdr) 8438 { 8439 int i, j, off; 8440 __be32 val, m; 8441 bool found_entry = false, found_jump_field = false; 8442 8443 for (i = 0; i < cls->knode.sel->nkeys; i++) { 8444 off = cls->knode.sel->keys[i].off; 8445 val = cls->knode.sel->keys[i].val; 8446 m = cls->knode.sel->keys[i].mask; 8447 8448 for (j = 0; field_ptr[j].val; j++) { 8449 if (field_ptr[j].off == off) { 8450 field_ptr[j].val(input, mask, val, m); 8451 input->filter.formatted.flow_type |= 8452 field_ptr[j].type; 8453 found_entry = true; 8454 break; 8455 } 8456 } 8457 if (nexthdr) { 8458 if (nexthdr->off == cls->knode.sel->keys[i].off && 8459 nexthdr->val == cls->knode.sel->keys[i].val && 8460 nexthdr->mask == cls->knode.sel->keys[i].mask) 8461 found_jump_field = true; 8462 else 8463 continue; 8464 } 8465 } 8466 8467 if (nexthdr && !found_jump_field) 8468 return -EINVAL; 8469 8470 if (!found_entry) 8471 return 0; 8472 8473 mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | 8474 IXGBE_ATR_L4TYPE_MASK; 8475 8476 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) 8477 mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; 8478 8479 return 0; 8480 } 8481 8482 static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, 8483 __be16 protocol, 8484 struct tc_cls_u32_offload *cls) 8485 { 8486 u32 loc = cls->knode.handle & 0xfffff; 8487 struct ixgbe_hw *hw = &adapter->hw; 8488 struct ixgbe_mat_field *field_ptr; 8489 struct ixgbe_fdir_filter *input = NULL; 8490 union ixgbe_atr_input *mask = NULL; 8491 struct ixgbe_jump_table *jump = NULL; 8492 int i, err = -EINVAL; 8493 u8 queue; 8494 u32 uhtid, link_uhtid; 8495 8496 uhtid = TC_U32_USERHTID(cls->knode.handle); 8497 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); 8498 8499 /* At the moment cls_u32 jumps to network layer and skips past 8500 * L2 headers. The canonical method to match L2 frames is to use 8501 * negative values. However this is error prone at best but really 8502 * just broken because there is no way to "know" what sort of hdr 8503 * is in front of the network layer. Fix cls_u32 to support L2 8504 * headers when needed. 8505 */ 8506 if (protocol != htons(ETH_P_IP)) 8507 return err; 8508 8509 if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) { 8510 e_err(drv, "Location out of range\n"); 8511 return err; 8512 } 8513 8514 /* cls u32 is a graph starting at root node 0x800. The driver tracks 8515 * links and also the fields used to advance the parser across each 8516 * link (e.g. nexthdr/eat parameters from 'tc'). This way we can map 8517 * the u32 graph onto the hardware parse graph denoted in ixgbe_model.h 8518 * To add support for new nodes update ixgbe_model.h parse structures 8519 * this function _should_ be generic try not to hardcode values here. 8520 */ 8521 if (uhtid == 0x800) { 8522 field_ptr = (adapter->jump_tables[0])->mat; 8523 } else { 8524 if (uhtid >= IXGBE_MAX_LINK_HANDLE) 8525 return err; 8526 if (!adapter->jump_tables[uhtid]) 8527 return err; 8528 field_ptr = (adapter->jump_tables[uhtid])->mat; 8529 } 8530 8531 if (!field_ptr) 8532 return err; 8533 8534 /* At this point we know the field_ptr is valid and need to either 8535 * build cls_u32 link or attach filter. Because adding a link to 8536 * a handle that does not exist is invalid and the same for adding 8537 * rules to handles that don't exist. 8538 */ 8539 8540 if (link_uhtid) { 8541 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; 8542 8543 if (link_uhtid >= IXGBE_MAX_LINK_HANDLE) 8544 return err; 8545 8546 if (!test_bit(link_uhtid - 1, &adapter->tables)) 8547 return err; 8548 8549 /* Multiple filters as links to the same hash table are not 8550 * supported. To add a new filter with the same next header 8551 * but different match/jump conditions, create a new hash table 8552 * and link to it. 8553 */ 8554 if (adapter->jump_tables[link_uhtid] && 8555 (adapter->jump_tables[link_uhtid])->link_hdl) { 8556 e_err(drv, "Link filter exists for link: %x\n", 8557 link_uhtid); 8558 return err; 8559 } 8560 8561 for (i = 0; nexthdr[i].jump; i++) { 8562 if (nexthdr[i].o != cls->knode.sel->offoff || 8563 nexthdr[i].s != cls->knode.sel->offshift || 8564 nexthdr[i].m != cls->knode.sel->offmask) 8565 return err; 8566 8567 jump = kzalloc(sizeof(*jump), GFP_KERNEL); 8568 if (!jump) 8569 return -ENOMEM; 8570 input = kzalloc(sizeof(*input), GFP_KERNEL); 8571 if (!input) { 8572 err = -ENOMEM; 8573 goto free_jump; 8574 } 8575 mask = kzalloc(sizeof(*mask), GFP_KERNEL); 8576 if (!mask) { 8577 err = -ENOMEM; 8578 goto free_input; 8579 } 8580 jump->input = input; 8581 jump->mask = mask; 8582 jump->link_hdl = cls->knode.handle; 8583 8584 err = ixgbe_clsu32_build_input(input, mask, cls, 8585 field_ptr, &nexthdr[i]); 8586 if (!err) { 8587 jump->mat = nexthdr[i].jump; 8588 adapter->jump_tables[link_uhtid] = jump; 8589 break; 8590 } 8591 } 8592 return 0; 8593 } 8594 8595 input = kzalloc(sizeof(*input), GFP_KERNEL); 8596 if (!input) 8597 return -ENOMEM; 8598 mask = kzalloc(sizeof(*mask), GFP_KERNEL); 8599 if (!mask) { 8600 err = -ENOMEM; 8601 goto free_input; 8602 } 8603 8604 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) { 8605 if ((adapter->jump_tables[uhtid])->input) 8606 memcpy(input, (adapter->jump_tables[uhtid])->input, 8607 sizeof(*input)); 8608 if ((adapter->jump_tables[uhtid])->mask) 8609 memcpy(mask, (adapter->jump_tables[uhtid])->mask, 8610 sizeof(*mask)); 8611 8612 /* Lookup in all child hash tables if this location is already 8613 * filled with a filter 8614 */ 8615 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { 8616 struct ixgbe_jump_table *link = adapter->jump_tables[i]; 8617 8618 if (link && (test_bit(loc - 1, link->child_loc_map))) { 8619 e_err(drv, "Filter exists in location: %x\n", 8620 loc); 8621 err = -EINVAL; 8622 goto err_out; 8623 } 8624 } 8625 } 8626 err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL); 8627 if (err) 8628 goto err_out; 8629 8630 err = parse_tc_actions(adapter, cls->knode.exts, &input->action, 8631 &queue); 8632 if (err < 0) 8633 goto err_out; 8634 8635 input->sw_idx = loc; 8636 8637 spin_lock(&adapter->fdir_perfect_lock); 8638 8639 if (hlist_empty(&adapter->fdir_filter_list)) { 8640 memcpy(&adapter->fdir_mask, mask, sizeof(*mask)); 8641 err = ixgbe_fdir_set_input_mask_82599(hw, mask); 8642 if (err) 8643 goto err_out_w_lock; 8644 } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) { 8645 err = -EINVAL; 8646 goto err_out_w_lock; 8647 } 8648 8649 ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask); 8650 err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, 8651 input->sw_idx, queue); 8652 if (!err) 8653 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); 8654 spin_unlock(&adapter->fdir_perfect_lock); 8655 8656 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) 8657 set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map); 8658 8659 kfree(mask); 8660 return err; 8661 err_out_w_lock: 8662 spin_unlock(&adapter->fdir_perfect_lock); 8663 err_out: 8664 kfree(mask); 8665 free_input: 8666 kfree(input); 8667 free_jump: 8668 kfree(jump); 8669 return err; 8670 } 8671 8672 static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, 8673 struct tc_to_netdev *tc) 8674 { 8675 struct ixgbe_adapter *adapter = netdev_priv(dev); 8676 8677 if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && 8678 tc->type == TC_SETUP_CLSU32) { 8679 switch (tc->cls_u32->command) { 8680 case TC_CLSU32_NEW_KNODE: 8681 case TC_CLSU32_REPLACE_KNODE: 8682 return ixgbe_configure_clsu32(adapter, 8683 proto, tc->cls_u32); 8684 case TC_CLSU32_DELETE_KNODE: 8685 return ixgbe_delete_clsu32(adapter, tc->cls_u32); 8686 case TC_CLSU32_NEW_HNODE: 8687 case TC_CLSU32_REPLACE_HNODE: 8688 return ixgbe_configure_clsu32_add_hnode(adapter, proto, 8689 tc->cls_u32); 8690 case TC_CLSU32_DELETE_HNODE: 8691 return ixgbe_configure_clsu32_del_hnode(adapter, 8692 tc->cls_u32); 8693 default: 8694 return -EINVAL; 8695 } 8696 } 8697 8698 if (tc->type != TC_SETUP_MQPRIO) 8699 return -EINVAL; 8700 8701 return ixgbe_setup_tc(dev, tc->tc); 8702 } 8703 8704 #ifdef CONFIG_PCI_IOV 8705 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) 8706 { 8707 struct net_device *netdev = adapter->netdev; 8708 8709 rtnl_lock(); 8710 ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev)); 8711 rtnl_unlock(); 8712 } 8713 8714 #endif 8715 void ixgbe_do_reset(struct net_device *netdev) 8716 { 8717 struct ixgbe_adapter *adapter = netdev_priv(netdev); 8718 8719 if (netif_running(netdev)) 8720 ixgbe_reinit_locked(adapter); 8721 else 8722 ixgbe_reset(adapter); 8723 } 8724 8725 static netdev_features_t ixgbe_fix_features(struct net_device *netdev, 8726 netdev_features_t features) 8727 { 8728 struct ixgbe_adapter *adapter = netdev_priv(netdev); 8729 8730 /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ 8731 if (!(features & NETIF_F_RXCSUM)) 8732 features &= ~NETIF_F_LRO; 8733 8734 /* Turn off LRO if not RSC capable */ 8735 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) 8736 features &= ~NETIF_F_LRO; 8737 8738 return features; 8739 } 8740 8741 static int ixgbe_set_features(struct net_device *netdev, 8742 netdev_features_t features) 8743 { 8744 struct ixgbe_adapter *adapter = netdev_priv(netdev); 8745 netdev_features_t changed = netdev->features ^ features; 8746 bool need_reset = false; 8747 8748 /* Make sure RSC matches LRO, reset if change */ 8749 if (!(features & NETIF_F_LRO)) { 8750 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 8751 need_reset = true; 8752 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; 8753 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && 8754 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { 8755 if (adapter->rx_itr_setting == 1 || 8756 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { 8757 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 8758 need_reset = true; 8759 } else if ((changed ^ features) & NETIF_F_LRO) { 8760 e_info(probe, "rx-usecs set too low, " 8761 "disabling RSC\n"); 8762 } 8763 } 8764 8765 /* 8766 * Check if Flow Director n-tuple support or hw_tc support was 8767 * enabled or disabled. If the state changed, we need to reset. 8768 */ 8769 if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) { 8770 /* turn off ATR, enable perfect filters and reset */ 8771 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 8772 need_reset = true; 8773 8774 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 8775 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 8776 } else { 8777 /* turn off perfect filters, enable ATR and reset */ 8778 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 8779 need_reset = true; 8780 8781 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 8782 8783 /* We cannot enable ATR if SR-IOV is enabled */ 8784 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED || 8785 /* We cannot enable ATR if we have 2 or more tcs */ 8786 (netdev_get_num_tc(netdev) > 1) || 8787 /* We cannot enable ATR if RSS is disabled */ 8788 (adapter->ring_feature[RING_F_RSS].limit <= 1) || 8789 /* A sample rate of 0 indicates ATR disabled */ 8790 (!adapter->atr_sample_rate)) 8791 ; /* do nothing not supported */ 8792 else /* otherwise supported and set the flag */ 8793 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 8794 } 8795 8796 if (changed & NETIF_F_RXALL) 8797 need_reset = true; 8798 8799 netdev->features = features; 8800 8801 if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { 8802 if (features & NETIF_F_RXCSUM) { 8803 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; 8804 } else { 8805 u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; 8806 8807 ixgbe_clear_udp_tunnel_port(adapter, port_mask); 8808 } 8809 } 8810 8811 if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) { 8812 if (features & NETIF_F_RXCSUM) { 8813 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; 8814 } else { 8815 u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; 8816 8817 ixgbe_clear_udp_tunnel_port(adapter, port_mask); 8818 } 8819 } 8820 8821 if (need_reset) 8822 ixgbe_do_reset(netdev); 8823 else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | 8824 NETIF_F_HW_VLAN_CTAG_FILTER)) 8825 ixgbe_set_rx_mode(netdev); 8826 8827 return 0; 8828 } 8829 8830 /** 8831 * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports 8832 * @dev: The port's netdev 8833 * @ti: Tunnel endpoint information 8834 **/ 8835 static void ixgbe_add_udp_tunnel_port(struct net_device *dev, 8836 struct udp_tunnel_info *ti) 8837 { 8838 struct ixgbe_adapter *adapter = netdev_priv(dev); 8839 struct ixgbe_hw *hw = &adapter->hw; 8840 __be16 port = ti->port; 8841 u32 port_shift = 0; 8842 u32 reg; 8843 8844 if (ti->sa_family != AF_INET) 8845 return; 8846 8847 switch (ti->type) { 8848 case UDP_TUNNEL_TYPE_VXLAN: 8849 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) 8850 return; 8851 8852 if (adapter->vxlan_port == port) 8853 return; 8854 8855 if (adapter->vxlan_port) { 8856 netdev_info(dev, 8857 "VXLAN port %d set, not adding port %d\n", 8858 ntohs(adapter->vxlan_port), 8859 ntohs(port)); 8860 return; 8861 } 8862 8863 adapter->vxlan_port = port; 8864 break; 8865 case UDP_TUNNEL_TYPE_GENEVE: 8866 if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) 8867 return; 8868 8869 if (adapter->geneve_port == port) 8870 return; 8871 8872 if (adapter->geneve_port) { 8873 netdev_info(dev, 8874 "GENEVE port %d set, not adding port %d\n", 8875 ntohs(adapter->geneve_port), 8876 ntohs(port)); 8877 return; 8878 } 8879 8880 port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT; 8881 adapter->geneve_port = port; 8882 break; 8883 default: 8884 return; 8885 } 8886 8887 reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift; 8888 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg); 8889 } 8890 8891 /** 8892 * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports 8893 * @dev: The port's netdev 8894 * @ti: Tunnel endpoint information 8895 **/ 8896 static void ixgbe_del_udp_tunnel_port(struct net_device *dev, 8897 struct udp_tunnel_info *ti) 8898 { 8899 struct ixgbe_adapter *adapter = netdev_priv(dev); 8900 u32 port_mask; 8901 8902 if (ti->type != UDP_TUNNEL_TYPE_VXLAN && 8903 ti->type != UDP_TUNNEL_TYPE_GENEVE) 8904 return; 8905 8906 if (ti->sa_family != AF_INET) 8907 return; 8908 8909 switch (ti->type) { 8910 case UDP_TUNNEL_TYPE_VXLAN: 8911 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) 8912 return; 8913 8914 if (adapter->vxlan_port != ti->port) { 8915 netdev_info(dev, "VXLAN port %d not found\n", 8916 ntohs(ti->port)); 8917 return; 8918 } 8919 8920 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; 8921 break; 8922 case UDP_TUNNEL_TYPE_GENEVE: 8923 if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) 8924 return; 8925 8926 if (adapter->geneve_port != ti->port) { 8927 netdev_info(dev, "GENEVE port %d not found\n", 8928 ntohs(ti->port)); 8929 return; 8930 } 8931 8932 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; 8933 break; 8934 default: 8935 return; 8936 } 8937 8938 ixgbe_clear_udp_tunnel_port(adapter, port_mask); 8939 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; 8940 } 8941 8942 static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 8943 struct net_device *dev, 8944 const unsigned char *addr, u16 vid, 8945 u16 flags) 8946 { 8947 /* guarantee we can provide a unique filter for the unicast address */ 8948 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { 8949 struct ixgbe_adapter *adapter = netdev_priv(dev); 8950 u16 pool = VMDQ_P(0); 8951 8952 if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool)) 8953 return -ENOMEM; 8954 } 8955 8956 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); 8957 } 8958 8959 /** 8960 * ixgbe_configure_bridge_mode - set various bridge modes 8961 * @adapter - the private structure 8962 * @mode - requested bridge mode 8963 * 8964 * Configure some settings require for various bridge modes. 8965 **/ 8966 static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter, 8967 __u16 mode) 8968 { 8969 struct ixgbe_hw *hw = &adapter->hw; 8970 unsigned int p, num_pools; 8971 u32 vmdctl; 8972 8973 switch (mode) { 8974 case BRIDGE_MODE_VEPA: 8975 /* disable Tx loopback, rely on switch hairpin mode */ 8976 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0); 8977 8978 /* must enable Rx switching replication to allow multicast 8979 * packet reception on all VFs, and to enable source address 8980 * pruning. 8981 */ 8982 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); 8983 vmdctl |= IXGBE_VT_CTL_REPLEN; 8984 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 8985 8986 /* enable Rx source address pruning. Note, this requires 8987 * replication to be enabled or else it does nothing. 8988 */ 8989 num_pools = adapter->num_vfs + adapter->num_rx_pools; 8990 for (p = 0; p < num_pools; p++) { 8991 if (hw->mac.ops.set_source_address_pruning) 8992 hw->mac.ops.set_source_address_pruning(hw, 8993 true, 8994 p); 8995 } 8996 break; 8997 case BRIDGE_MODE_VEB: 8998 /* enable Tx loopback for internal VF/PF communication */ 8999 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 9000 IXGBE_PFDTXGSWC_VT_LBEN); 9001 9002 /* disable Rx switching replication unless we have SR-IOV 9003 * virtual functions 9004 */ 9005 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); 9006 if (!adapter->num_vfs) 9007 vmdctl &= ~IXGBE_VT_CTL_REPLEN; 9008 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 9009 9010 /* disable Rx source address pruning, since we don't expect to 9011 * be receiving external loopback of our transmitted frames. 9012 */ 9013 num_pools = adapter->num_vfs + adapter->num_rx_pools; 9014 for (p = 0; p < num_pools; p++) { 9015 if (hw->mac.ops.set_source_address_pruning) 9016 hw->mac.ops.set_source_address_pruning(hw, 9017 false, 9018 p); 9019 } 9020 break; 9021 default: 9022 return -EINVAL; 9023 } 9024 9025 adapter->bridge_mode = mode; 9026 9027 e_info(drv, "enabling bridge mode: %s\n", 9028 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); 9029 9030 return 0; 9031 } 9032 9033 static int ixgbe_ndo_bridge_setlink(struct net_device *dev, 9034 struct nlmsghdr *nlh, u16 flags) 9035 { 9036 struct ixgbe_adapter *adapter = netdev_priv(dev); 9037 struct nlattr *attr, *br_spec; 9038 int rem; 9039 9040 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 9041 return -EOPNOTSUPP; 9042 9043 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 9044 if (!br_spec) 9045 return -EINVAL; 9046 9047 nla_for_each_nested(attr, br_spec, rem) { 9048 int status; 9049 __u16 mode; 9050 9051 if (nla_type(attr) != IFLA_BRIDGE_MODE) 9052 continue; 9053 9054 if (nla_len(attr) < sizeof(mode)) 9055 return -EINVAL; 9056 9057 mode = nla_get_u16(attr); 9058 status = ixgbe_configure_bridge_mode(adapter, mode); 9059 if (status) 9060 return status; 9061 9062 break; 9063 } 9064 9065 return 0; 9066 } 9067 9068 static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 9069 struct net_device *dev, 9070 u32 filter_mask, int nlflags) 9071 { 9072 struct ixgbe_adapter *adapter = netdev_priv(dev); 9073 9074 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 9075 return 0; 9076 9077 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, 9078 adapter->bridge_mode, 0, 0, nlflags, 9079 filter_mask, NULL); 9080 } 9081 9082 static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) 9083 { 9084 struct ixgbe_fwd_adapter *fwd_adapter = NULL; 9085 struct ixgbe_adapter *adapter = netdev_priv(pdev); 9086 int used_pools = adapter->num_vfs + adapter->num_rx_pools; 9087 unsigned int limit; 9088 int pool, err; 9089 9090 /* Hardware has a limited number of available pools. Each VF, and the 9091 * PF require a pool. Check to ensure we don't attempt to use more 9092 * then the available number of pools. 9093 */ 9094 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS) 9095 return ERR_PTR(-EINVAL); 9096 9097 #ifdef CONFIG_RPS 9098 if (vdev->num_rx_queues != vdev->num_tx_queues) { 9099 netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n", 9100 vdev->name); 9101 return ERR_PTR(-EINVAL); 9102 } 9103 #endif 9104 /* Check for hardware restriction on number of rx/tx queues */ 9105 if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES || 9106 vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) { 9107 netdev_info(pdev, 9108 "%s: Supports RX/TX Queue counts 1,2, and 4\n", 9109 pdev->name); 9110 return ERR_PTR(-EINVAL); 9111 } 9112 9113 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && 9114 adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) || 9115 (adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) 9116 return ERR_PTR(-EBUSY); 9117 9118 fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL); 9119 if (!fwd_adapter) 9120 return ERR_PTR(-ENOMEM); 9121 9122 pool = find_first_zero_bit(&adapter->fwd_bitmask, 32); 9123 adapter->num_rx_pools++; 9124 set_bit(pool, &adapter->fwd_bitmask); 9125 limit = find_last_bit(&adapter->fwd_bitmask, 32); 9126 9127 /* Enable VMDq flag so device will be set in VM mode */ 9128 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; 9129 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; 9130 adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues; 9131 9132 /* Force reinit of ring allocation with VMDQ enabled */ 9133 err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); 9134 if (err) 9135 goto fwd_add_err; 9136 fwd_adapter->pool = pool; 9137 fwd_adapter->real_adapter = adapter; 9138 err = ixgbe_fwd_ring_up(vdev, fwd_adapter); 9139 if (err) 9140 goto fwd_add_err; 9141 netif_tx_start_all_queues(vdev); 9142 return fwd_adapter; 9143 fwd_add_err: 9144 /* unwind counter and free adapter struct */ 9145 netdev_info(pdev, 9146 "%s: dfwd hardware acceleration failed\n", vdev->name); 9147 clear_bit(pool, &adapter->fwd_bitmask); 9148 adapter->num_rx_pools--; 9149 kfree(fwd_adapter); 9150 return ERR_PTR(err); 9151 } 9152 9153 static void ixgbe_fwd_del(struct net_device *pdev, void *priv) 9154 { 9155 struct ixgbe_fwd_adapter *fwd_adapter = priv; 9156 struct ixgbe_adapter *adapter = fwd_adapter->real_adapter; 9157 unsigned int limit; 9158 9159 clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask); 9160 adapter->num_rx_pools--; 9161 9162 limit = find_last_bit(&adapter->fwd_bitmask, 32); 9163 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; 9164 ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter); 9165 ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); 9166 netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", 9167 fwd_adapter->pool, adapter->num_rx_pools, 9168 fwd_adapter->rx_base_queue, 9169 fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool, 9170 adapter->fwd_bitmask); 9171 kfree(fwd_adapter); 9172 } 9173 9174 #define IXGBE_MAX_MAC_HDR_LEN 127 9175 #define IXGBE_MAX_NETWORK_HDR_LEN 511 9176 9177 static netdev_features_t 9178 ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, 9179 netdev_features_t features) 9180 { 9181 unsigned int network_hdr_len, mac_hdr_len; 9182 9183 /* Make certain the headers can be described by a context descriptor */ 9184 mac_hdr_len = skb_network_header(skb) - skb->data; 9185 if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN)) 9186 return features & ~(NETIF_F_HW_CSUM | 9187 NETIF_F_SCTP_CRC | 9188 NETIF_F_HW_VLAN_CTAG_TX | 9189 NETIF_F_TSO | 9190 NETIF_F_TSO6); 9191 9192 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); 9193 if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN)) 9194 return features & ~(NETIF_F_HW_CSUM | 9195 NETIF_F_SCTP_CRC | 9196 NETIF_F_TSO | 9197 NETIF_F_TSO6); 9198 9199 /* We can only support IPV4 TSO in tunnels if we can mangle the 9200 * inner IP ID field, so strip TSO if MANGLEID is not supported. 9201 */ 9202 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) 9203 features &= ~NETIF_F_TSO; 9204 9205 return features; 9206 } 9207 9208 static const struct net_device_ops ixgbe_netdev_ops = { 9209 .ndo_open = ixgbe_open, 9210 .ndo_stop = ixgbe_close, 9211 .ndo_start_xmit = ixgbe_xmit_frame, 9212 .ndo_select_queue = ixgbe_select_queue, 9213 .ndo_set_rx_mode = ixgbe_set_rx_mode, 9214 .ndo_validate_addr = eth_validate_addr, 9215 .ndo_set_mac_address = ixgbe_set_mac, 9216 .ndo_change_mtu = ixgbe_change_mtu, 9217 .ndo_tx_timeout = ixgbe_tx_timeout, 9218 .ndo_set_tx_maxrate = ixgbe_tx_maxrate, 9219 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, 9220 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, 9221 .ndo_do_ioctl = ixgbe_ioctl, 9222 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, 9223 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, 9224 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, 9225 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, 9226 .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, 9227 .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, 9228 .ndo_get_vf_config = ixgbe_ndo_get_vf_config, 9229 .ndo_get_stats64 = ixgbe_get_stats64, 9230 .ndo_setup_tc = __ixgbe_setup_tc, 9231 #ifdef CONFIG_NET_POLL_CONTROLLER 9232 .ndo_poll_controller = ixgbe_netpoll, 9233 #endif 9234 #ifdef CONFIG_NET_RX_BUSY_POLL 9235 .ndo_busy_poll = ixgbe_low_latency_recv, 9236 #endif 9237 #ifdef IXGBE_FCOE 9238 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, 9239 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, 9240 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, 9241 .ndo_fcoe_enable = ixgbe_fcoe_enable, 9242 .ndo_fcoe_disable = ixgbe_fcoe_disable, 9243 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, 9244 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo, 9245 #endif /* IXGBE_FCOE */ 9246 .ndo_set_features = ixgbe_set_features, 9247 .ndo_fix_features = ixgbe_fix_features, 9248 .ndo_fdb_add = ixgbe_ndo_fdb_add, 9249 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, 9250 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 9251 .ndo_dfwd_add_station = ixgbe_fwd_add, 9252 .ndo_dfwd_del_station = ixgbe_fwd_del, 9253 .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, 9254 .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, 9255 .ndo_features_check = ixgbe_features_check, 9256 }; 9257 9258 /** 9259 * ixgbe_enumerate_functions - Get the number of ports this device has 9260 * @adapter: adapter structure 9261 * 9262 * This function enumerates the phsyical functions co-located on a single slot, 9263 * in order to determine how many ports a device has. This is most useful in 9264 * determining the required GT/s of PCIe bandwidth necessary for optimal 9265 * performance. 9266 **/ 9267 static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) 9268 { 9269 struct pci_dev *entry, *pdev = adapter->pdev; 9270 int physfns = 0; 9271 9272 /* Some cards can not use the generic count PCIe functions method, 9273 * because they are behind a parent switch, so we hardcode these with 9274 * the correct number of functions. 9275 */ 9276 if (ixgbe_pcie_from_parent(&adapter->hw)) 9277 physfns = 4; 9278 9279 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) { 9280 /* don't count virtual functions */ 9281 if (entry->is_virtfn) 9282 continue; 9283 9284 /* When the devices on the bus don't all match our device ID, 9285 * we can't reliably determine the correct number of 9286 * functions. This can occur if a function has been direct 9287 * attached to a virtual machine using VT-d, for example. In 9288 * this case, simply return -1 to indicate this. 9289 */ 9290 if ((entry->vendor != pdev->vendor) || 9291 (entry->device != pdev->device)) 9292 return -1; 9293 9294 physfns++; 9295 } 9296 9297 return physfns; 9298 } 9299 9300 /** 9301 * ixgbe_wol_supported - Check whether device supports WoL 9302 * @adapter: the adapter private structure 9303 * @device_id: the device ID 9304 * @subdev_id: the subsystem device ID 9305 * 9306 * This function is used by probe and ethtool to determine 9307 * which devices have WoL support 9308 * 9309 **/ 9310 bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, 9311 u16 subdevice_id) 9312 { 9313 struct ixgbe_hw *hw = &adapter->hw; 9314 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; 9315 9316 /* WOL not supported on 82598 */ 9317 if (hw->mac.type == ixgbe_mac_82598EB) 9318 return false; 9319 9320 /* check eeprom to see if WOL is enabled for X540 and newer */ 9321 if (hw->mac.type >= ixgbe_mac_X540) { 9322 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 9323 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && 9324 (hw->bus.func == 0))) 9325 return true; 9326 } 9327 9328 /* WOL is determined based on device IDs for 82599 MACs */ 9329 switch (device_id) { 9330 case IXGBE_DEV_ID_82599_SFP: 9331 /* Only these subdevices could supports WOL */ 9332 switch (subdevice_id) { 9333 case IXGBE_SUBDEV_ID_82599_560FLR: 9334 case IXGBE_SUBDEV_ID_82599_LOM_SNAP6: 9335 case IXGBE_SUBDEV_ID_82599_SFP_WOL0: 9336 case IXGBE_SUBDEV_ID_82599_SFP_2OCP: 9337 /* only support first port */ 9338 if (hw->bus.func != 0) 9339 break; 9340 case IXGBE_SUBDEV_ID_82599_SP_560FLR: 9341 case IXGBE_SUBDEV_ID_82599_SFP: 9342 case IXGBE_SUBDEV_ID_82599_RNDC: 9343 case IXGBE_SUBDEV_ID_82599_ECNA_DP: 9344 case IXGBE_SUBDEV_ID_82599_SFP_1OCP: 9345 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1: 9346 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2: 9347 return true; 9348 } 9349 break; 9350 case IXGBE_DEV_ID_82599EN_SFP: 9351 /* Only these subdevices support WOL */ 9352 switch (subdevice_id) { 9353 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1: 9354 return true; 9355 } 9356 break; 9357 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 9358 /* All except this subdevice support WOL */ 9359 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) 9360 return true; 9361 break; 9362 case IXGBE_DEV_ID_82599_KX4: 9363 return true; 9364 default: 9365 break; 9366 } 9367 9368 return false; 9369 } 9370 9371 /** 9372 * ixgbe_probe - Device Initialization Routine 9373 * @pdev: PCI device information struct 9374 * @ent: entry in ixgbe_pci_tbl 9375 * 9376 * Returns 0 on success, negative on failure 9377 * 9378 * ixgbe_probe initializes an adapter identified by a pci_dev structure. 9379 * The OS initialization, configuring of the adapter private structure, 9380 * and a hardware reset occur. 9381 **/ 9382 static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 9383 { 9384 struct net_device *netdev; 9385 struct ixgbe_adapter *adapter = NULL; 9386 struct ixgbe_hw *hw; 9387 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 9388 int i, err, pci_using_dac, expected_gts; 9389 unsigned int indices = MAX_TX_QUEUES; 9390 u8 part_str[IXGBE_PBANUM_LENGTH]; 9391 bool disable_dev = false; 9392 #ifdef IXGBE_FCOE 9393 u16 device_caps; 9394 #endif 9395 u32 eec; 9396 9397 /* Catch broken hardware that put the wrong VF device ID in 9398 * the PCIe SR-IOV capability. 9399 */ 9400 if (pdev->is_virtfn) { 9401 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", 9402 pci_name(pdev), pdev->vendor, pdev->device); 9403 return -EINVAL; 9404 } 9405 9406 err = pci_enable_device_mem(pdev); 9407 if (err) 9408 return err; 9409 9410 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 9411 pci_using_dac = 1; 9412 } else { 9413 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 9414 if (err) { 9415 dev_err(&pdev->dev, 9416 "No usable DMA configuration, aborting\n"); 9417 goto err_dma; 9418 } 9419 pci_using_dac = 0; 9420 } 9421 9422 err = pci_request_mem_regions(pdev, ixgbe_driver_name); 9423 if (err) { 9424 dev_err(&pdev->dev, 9425 "pci_request_selected_regions failed 0x%x\n", err); 9426 goto err_pci_reg; 9427 } 9428 9429 pci_enable_pcie_error_reporting(pdev); 9430 9431 pci_set_master(pdev); 9432 pci_save_state(pdev); 9433 9434 if (ii->mac == ixgbe_mac_82598EB) { 9435 #ifdef CONFIG_IXGBE_DCB 9436 /* 8 TC w/ 4 queues per TC */ 9437 indices = 4 * MAX_TRAFFIC_CLASS; 9438 #else 9439 indices = IXGBE_MAX_RSS_INDICES; 9440 #endif 9441 } 9442 9443 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); 9444 if (!netdev) { 9445 err = -ENOMEM; 9446 goto err_alloc_etherdev; 9447 } 9448 9449 SET_NETDEV_DEV(netdev, &pdev->dev); 9450 9451 adapter = netdev_priv(netdev); 9452 9453 adapter->netdev = netdev; 9454 adapter->pdev = pdev; 9455 hw = &adapter->hw; 9456 hw->back = adapter; 9457 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 9458 9459 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 9460 pci_resource_len(pdev, 0)); 9461 adapter->io_addr = hw->hw_addr; 9462 if (!hw->hw_addr) { 9463 err = -EIO; 9464 goto err_ioremap; 9465 } 9466 9467 netdev->netdev_ops = &ixgbe_netdev_ops; 9468 ixgbe_set_ethtool_ops(netdev); 9469 netdev->watchdog_timeo = 5 * HZ; 9470 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); 9471 9472 /* Setup hw api */ 9473 hw->mac.ops = *ii->mac_ops; 9474 hw->mac.type = ii->mac; 9475 hw->mvals = ii->mvals; 9476 9477 /* EEPROM */ 9478 hw->eeprom.ops = *ii->eeprom_ops; 9479 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 9480 if (ixgbe_removed(hw->hw_addr)) { 9481 err = -EIO; 9482 goto err_ioremap; 9483 } 9484 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ 9485 if (!(eec & BIT(8))) 9486 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; 9487 9488 /* PHY */ 9489 hw->phy.ops = *ii->phy_ops; 9490 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 9491 /* ixgbe_identify_phy_generic will set prtad and mmds properly */ 9492 hw->phy.mdio.prtad = MDIO_PRTAD_NONE; 9493 hw->phy.mdio.mmds = 0; 9494 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 9495 hw->phy.mdio.dev = netdev; 9496 hw->phy.mdio.mdio_read = ixgbe_mdio_read; 9497 hw->phy.mdio.mdio_write = ixgbe_mdio_write; 9498 9499 ii->get_invariants(hw); 9500 9501 /* setup the private structure */ 9502 err = ixgbe_sw_init(adapter); 9503 if (err) 9504 goto err_sw_init; 9505 9506 /* Make sure the SWFW semaphore is in a valid state */ 9507 if (hw->mac.ops.init_swfw_sync) 9508 hw->mac.ops.init_swfw_sync(hw); 9509 9510 /* Make it possible the adapter to be woken up via WOL */ 9511 switch (adapter->hw.mac.type) { 9512 case ixgbe_mac_82599EB: 9513 case ixgbe_mac_X540: 9514 case ixgbe_mac_X550: 9515 case ixgbe_mac_X550EM_x: 9516 case ixgbe_mac_x550em_a: 9517 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 9518 break; 9519 default: 9520 break; 9521 } 9522 9523 /* 9524 * If there is a fan on this device and it has failed log the 9525 * failure. 9526 */ 9527 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 9528 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 9529 if (esdp & IXGBE_ESDP_SDP1) 9530 e_crit(probe, "Fan has stopped, replace the adapter\n"); 9531 } 9532 9533 if (allow_unsupported_sfp) 9534 hw->allow_unsupported_sfp = allow_unsupported_sfp; 9535 9536 /* reset_hw fills in the perm_addr as well */ 9537 hw->phy.reset_if_overtemp = true; 9538 err = hw->mac.ops.reset_hw(hw); 9539 hw->phy.reset_if_overtemp = false; 9540 if (err == IXGBE_ERR_SFP_NOT_PRESENT) { 9541 err = 0; 9542 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 9543 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n"); 9544 e_dev_err("Reload the driver after installing a supported module.\n"); 9545 goto err_sw_init; 9546 } else if (err) { 9547 e_dev_err("HW Init failed: %d\n", err); 9548 goto err_sw_init; 9549 } 9550 9551 #ifdef CONFIG_PCI_IOV 9552 /* SR-IOV not supported on the 82598 */ 9553 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 9554 goto skip_sriov; 9555 /* Mailbox */ 9556 ixgbe_init_mbx_params_pf(hw); 9557 hw->mbx.ops = ii->mbx_ops; 9558 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT); 9559 ixgbe_enable_sriov(adapter); 9560 skip_sriov: 9561 9562 #endif 9563 netdev->features = NETIF_F_SG | 9564 NETIF_F_TSO | 9565 NETIF_F_TSO6 | 9566 NETIF_F_RXHASH | 9567 NETIF_F_RXCSUM | 9568 NETIF_F_HW_CSUM; 9569 9570 #define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ 9571 NETIF_F_GSO_GRE_CSUM | \ 9572 NETIF_F_GSO_IPXIP4 | \ 9573 NETIF_F_GSO_IPXIP6 | \ 9574 NETIF_F_GSO_UDP_TUNNEL | \ 9575 NETIF_F_GSO_UDP_TUNNEL_CSUM) 9576 9577 netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES; 9578 netdev->features |= NETIF_F_GSO_PARTIAL | 9579 IXGBE_GSO_PARTIAL_FEATURES; 9580 9581 if (hw->mac.type >= ixgbe_mac_82599EB) 9582 netdev->features |= NETIF_F_SCTP_CRC; 9583 9584 /* copy netdev features into list of user selectable features */ 9585 netdev->hw_features |= netdev->features | 9586 NETIF_F_HW_VLAN_CTAG_FILTER | 9587 NETIF_F_HW_VLAN_CTAG_RX | 9588 NETIF_F_HW_VLAN_CTAG_TX | 9589 NETIF_F_RXALL | 9590 NETIF_F_HW_L2FW_DOFFLOAD; 9591 9592 if (hw->mac.type >= ixgbe_mac_82599EB) 9593 netdev->hw_features |= NETIF_F_NTUPLE | 9594 NETIF_F_HW_TC; 9595 9596 if (pci_using_dac) 9597 netdev->features |= NETIF_F_HIGHDMA; 9598 9599 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; 9600 netdev->hw_enc_features |= netdev->vlan_features; 9601 netdev->mpls_features |= NETIF_F_HW_CSUM; 9602 9603 /* set this bit last since it cannot be part of vlan_features */ 9604 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | 9605 NETIF_F_HW_VLAN_CTAG_RX | 9606 NETIF_F_HW_VLAN_CTAG_TX; 9607 9608 netdev->priv_flags |= IFF_UNICAST_FLT; 9609 netdev->priv_flags |= IFF_SUPP_NOFCS; 9610 9611 #ifdef CONFIG_IXGBE_DCB 9612 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) 9613 netdev->dcbnl_ops = &dcbnl_ops; 9614 #endif 9615 9616 #ifdef IXGBE_FCOE 9617 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { 9618 unsigned int fcoe_l; 9619 9620 if (hw->mac.ops.get_device_caps) { 9621 hw->mac.ops.get_device_caps(hw, &device_caps); 9622 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) 9623 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; 9624 } 9625 9626 9627 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus()); 9628 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l; 9629 9630 netdev->features |= NETIF_F_FSO | 9631 NETIF_F_FCOE_CRC; 9632 9633 netdev->vlan_features |= NETIF_F_FSO | 9634 NETIF_F_FCOE_CRC | 9635 NETIF_F_FCOE_MTU; 9636 } 9637 #endif /* IXGBE_FCOE */ 9638 9639 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) 9640 netdev->hw_features |= NETIF_F_LRO; 9641 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 9642 netdev->features |= NETIF_F_LRO; 9643 9644 /* make sure the EEPROM is good */ 9645 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { 9646 e_dev_err("The EEPROM Checksum Is Not Valid\n"); 9647 err = -EIO; 9648 goto err_sw_init; 9649 } 9650 9651 eth_platform_get_mac_address(&adapter->pdev->dev, 9652 adapter->hw.mac.perm_addr); 9653 9654 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); 9655 9656 if (!is_valid_ether_addr(netdev->dev_addr)) { 9657 e_dev_err("invalid MAC address\n"); 9658 err = -EIO; 9659 goto err_sw_init; 9660 } 9661 9662 /* Set hw->mac.addr to permanent MAC address */ 9663 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); 9664 ixgbe_mac_set_default_filter(adapter); 9665 9666 setup_timer(&adapter->service_timer, &ixgbe_service_timer, 9667 (unsigned long) adapter); 9668 9669 if (ixgbe_removed(hw->hw_addr)) { 9670 err = -EIO; 9671 goto err_sw_init; 9672 } 9673 INIT_WORK(&adapter->service_task, ixgbe_service_task); 9674 set_bit(__IXGBE_SERVICE_INITED, &adapter->state); 9675 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); 9676 9677 err = ixgbe_init_interrupt_scheme(adapter); 9678 if (err) 9679 goto err_sw_init; 9680 9681 /* WOL not supported for all devices */ 9682 adapter->wol = 0; 9683 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); 9684 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device, 9685 pdev->subsystem_device); 9686 if (hw->wol_enabled) 9687 adapter->wol = IXGBE_WUFC_MAG; 9688 9689 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 9690 9691 /* save off EEPROM version number */ 9692 hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh); 9693 hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); 9694 9695 /* pick up the PCI bus settings for reporting later */ 9696 if (ixgbe_pcie_from_parent(hw)) 9697 ixgbe_get_parent_bus_info(adapter); 9698 else 9699 hw->mac.ops.get_bus_info(hw); 9700 9701 /* calculate the expected PCIe bandwidth required for optimal 9702 * performance. Note that some older parts will never have enough 9703 * bandwidth due to being older generation PCIe parts. We clamp these 9704 * parts to ensure no warning is displayed if it can't be fixed. 9705 */ 9706 switch (hw->mac.type) { 9707 case ixgbe_mac_82598EB: 9708 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16); 9709 break; 9710 default: 9711 expected_gts = ixgbe_enumerate_functions(adapter) * 10; 9712 break; 9713 } 9714 9715 /* don't check link if we failed to enumerate functions */ 9716 if (expected_gts > 0) 9717 ixgbe_check_minimum_link(adapter, expected_gts); 9718 9719 err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str)); 9720 if (err) 9721 strlcpy(part_str, "Unknown", sizeof(part_str)); 9722 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 9723 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", 9724 hw->mac.type, hw->phy.type, hw->phy.sfp_type, 9725 part_str); 9726 else 9727 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", 9728 hw->mac.type, hw->phy.type, part_str); 9729 9730 e_dev_info("%pM\n", netdev->dev_addr); 9731 9732 /* reset the hardware with the new settings */ 9733 err = hw->mac.ops.start_hw(hw); 9734 if (err == IXGBE_ERR_EEPROM_VERSION) { 9735 /* We are running on a pre-production device, log a warning */ 9736 e_dev_warn("This device is a pre-production adapter/LOM. " 9737 "Please be aware there may be issues associated " 9738 "with your hardware. If you are experiencing " 9739 "problems please contact your Intel or hardware " 9740 "representative who provided you with this " 9741 "hardware.\n"); 9742 } 9743 strcpy(netdev->name, "eth%d"); 9744 err = register_netdev(netdev); 9745 if (err) 9746 goto err_register; 9747 9748 pci_set_drvdata(pdev, adapter); 9749 9750 /* power down the optics for 82599 SFP+ fiber */ 9751 if (hw->mac.ops.disable_tx_laser) 9752 hw->mac.ops.disable_tx_laser(hw); 9753 9754 /* carrier off reporting is important to ethtool even BEFORE open */ 9755 netif_carrier_off(netdev); 9756 9757 #ifdef CONFIG_IXGBE_DCA 9758 if (dca_add_requester(&pdev->dev) == 0) { 9759 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 9760 ixgbe_setup_dca(adapter); 9761 } 9762 #endif 9763 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 9764 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); 9765 for (i = 0; i < adapter->num_vfs; i++) 9766 ixgbe_vf_configuration(pdev, (i | 0x10000000)); 9767 } 9768 9769 /* firmware requires driver version to be 0xFFFFFFFF 9770 * since os does not support feature 9771 */ 9772 if (hw->mac.ops.set_fw_drv_ver) 9773 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 9774 0xFF); 9775 9776 /* add san mac addr to netdev */ 9777 ixgbe_add_sanmac_netdev(netdev); 9778 9779 e_dev_info("%s\n", ixgbe_default_device_descr); 9780 9781 #ifdef CONFIG_IXGBE_HWMON 9782 if (ixgbe_sysfs_init(adapter)) 9783 e_err(probe, "failed to allocate sysfs resources\n"); 9784 #endif /* CONFIG_IXGBE_HWMON */ 9785 9786 ixgbe_dbg_adapter_init(adapter); 9787 9788 /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */ 9789 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link) 9790 hw->mac.ops.setup_link(hw, 9791 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, 9792 true); 9793 9794 return 0; 9795 9796 err_register: 9797 ixgbe_release_hw_control(adapter); 9798 ixgbe_clear_interrupt_scheme(adapter); 9799 err_sw_init: 9800 ixgbe_disable_sriov(adapter); 9801 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; 9802 iounmap(adapter->io_addr); 9803 kfree(adapter->jump_tables[0]); 9804 kfree(adapter->mac_table); 9805 err_ioremap: 9806 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); 9807 free_netdev(netdev); 9808 err_alloc_etherdev: 9809 pci_release_mem_regions(pdev); 9810 err_pci_reg: 9811 err_dma: 9812 if (!adapter || disable_dev) 9813 pci_disable_device(pdev); 9814 return err; 9815 } 9816 9817 /** 9818 * ixgbe_remove - Device Removal Routine 9819 * @pdev: PCI device information struct 9820 * 9821 * ixgbe_remove is called by the PCI subsystem to alert the driver 9822 * that it should release a PCI device. The could be caused by a 9823 * Hot-Plug event, or because the driver is going to be removed from 9824 * memory. 9825 **/ 9826 static void ixgbe_remove(struct pci_dev *pdev) 9827 { 9828 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 9829 struct net_device *netdev; 9830 bool disable_dev; 9831 int i; 9832 9833 /* if !adapter then we already cleaned up in probe */ 9834 if (!adapter) 9835 return; 9836 9837 netdev = adapter->netdev; 9838 ixgbe_dbg_adapter_exit(adapter); 9839 9840 set_bit(__IXGBE_REMOVING, &adapter->state); 9841 cancel_work_sync(&adapter->service_task); 9842 9843 9844 #ifdef CONFIG_IXGBE_DCA 9845 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 9846 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 9847 dca_remove_requester(&pdev->dev); 9848 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 9849 IXGBE_DCA_CTRL_DCA_DISABLE); 9850 } 9851 9852 #endif 9853 #ifdef CONFIG_IXGBE_HWMON 9854 ixgbe_sysfs_exit(adapter); 9855 #endif /* CONFIG_IXGBE_HWMON */ 9856 9857 /* remove the added san mac */ 9858 ixgbe_del_sanmac_netdev(netdev); 9859 9860 #ifdef CONFIG_PCI_IOV 9861 ixgbe_disable_sriov(adapter); 9862 #endif 9863 if (netdev->reg_state == NETREG_REGISTERED) 9864 unregister_netdev(netdev); 9865 9866 ixgbe_clear_interrupt_scheme(adapter); 9867 9868 ixgbe_release_hw_control(adapter); 9869 9870 #ifdef CONFIG_DCB 9871 kfree(adapter->ixgbe_ieee_pfc); 9872 kfree(adapter->ixgbe_ieee_ets); 9873 9874 #endif 9875 iounmap(adapter->io_addr); 9876 pci_release_mem_regions(pdev); 9877 9878 e_dev_info("complete\n"); 9879 9880 for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) { 9881 if (adapter->jump_tables[i]) { 9882 kfree(adapter->jump_tables[i]->input); 9883 kfree(adapter->jump_tables[i]->mask); 9884 } 9885 kfree(adapter->jump_tables[i]); 9886 } 9887 9888 kfree(adapter->mac_table); 9889 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); 9890 free_netdev(netdev); 9891 9892 pci_disable_pcie_error_reporting(pdev); 9893 9894 if (disable_dev) 9895 pci_disable_device(pdev); 9896 } 9897 9898 /** 9899 * ixgbe_io_error_detected - called when PCI error is detected 9900 * @pdev: Pointer to PCI device 9901 * @state: The current pci connection state 9902 * 9903 * This function is called after a PCI bus error affecting 9904 * this device has been detected. 9905 */ 9906 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, 9907 pci_channel_state_t state) 9908 { 9909 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 9910 struct net_device *netdev = adapter->netdev; 9911 9912 #ifdef CONFIG_PCI_IOV 9913 struct ixgbe_hw *hw = &adapter->hw; 9914 struct pci_dev *bdev, *vfdev; 9915 u32 dw0, dw1, dw2, dw3; 9916 int vf, pos; 9917 u16 req_id, pf_func; 9918 9919 if (adapter->hw.mac.type == ixgbe_mac_82598EB || 9920 adapter->num_vfs == 0) 9921 goto skip_bad_vf_detection; 9922 9923 bdev = pdev->bus->self; 9924 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) 9925 bdev = bdev->bus->self; 9926 9927 if (!bdev) 9928 goto skip_bad_vf_detection; 9929 9930 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); 9931 if (!pos) 9932 goto skip_bad_vf_detection; 9933 9934 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG); 9935 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4); 9936 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8); 9937 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12); 9938 if (ixgbe_removed(hw->hw_addr)) 9939 goto skip_bad_vf_detection; 9940 9941 req_id = dw1 >> 16; 9942 /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */ 9943 if (!(req_id & 0x0080)) 9944 goto skip_bad_vf_detection; 9945 9946 pf_func = req_id & 0x01; 9947 if ((pf_func & 1) == (pdev->devfn & 1)) { 9948 unsigned int device_id; 9949 9950 vf = (req_id & 0x7F) >> 1; 9951 e_dev_err("VF %d has caused a PCIe error\n", vf); 9952 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: " 9953 "%8.8x\tdw3: %8.8x\n", 9954 dw0, dw1, dw2, dw3); 9955 switch (adapter->hw.mac.type) { 9956 case ixgbe_mac_82599EB: 9957 device_id = IXGBE_82599_VF_DEVICE_ID; 9958 break; 9959 case ixgbe_mac_X540: 9960 device_id = IXGBE_X540_VF_DEVICE_ID; 9961 break; 9962 case ixgbe_mac_X550: 9963 device_id = IXGBE_DEV_ID_X550_VF; 9964 break; 9965 case ixgbe_mac_X550EM_x: 9966 device_id = IXGBE_DEV_ID_X550EM_X_VF; 9967 break; 9968 case ixgbe_mac_x550em_a: 9969 device_id = IXGBE_DEV_ID_X550EM_A_VF; 9970 break; 9971 default: 9972 device_id = 0; 9973 break; 9974 } 9975 9976 /* Find the pci device of the offending VF */ 9977 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL); 9978 while (vfdev) { 9979 if (vfdev->devfn == (req_id & 0xFF)) 9980 break; 9981 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, 9982 device_id, vfdev); 9983 } 9984 /* 9985 * There's a slim chance the VF could have been hot plugged, 9986 * so if it is no longer present we don't need to issue the 9987 * VFLR. Just clean up the AER in that case. 9988 */ 9989 if (vfdev) { 9990 ixgbe_issue_vf_flr(adapter, vfdev); 9991 /* Free device reference count */ 9992 pci_dev_put(vfdev); 9993 } 9994 9995 pci_cleanup_aer_uncorrect_error_status(pdev); 9996 } 9997 9998 /* 9999 * Even though the error may have occurred on the other port 10000 * we still need to increment the vf error reference count for 10001 * both ports because the I/O resume function will be called 10002 * for both of them. 10003 */ 10004 adapter->vferr_refcount++; 10005 10006 return PCI_ERS_RESULT_RECOVERED; 10007 10008 skip_bad_vf_detection: 10009 #endif /* CONFIG_PCI_IOV */ 10010 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) 10011 return PCI_ERS_RESULT_DISCONNECT; 10012 10013 rtnl_lock(); 10014 netif_device_detach(netdev); 10015 10016 if (state == pci_channel_io_perm_failure) { 10017 rtnl_unlock(); 10018 return PCI_ERS_RESULT_DISCONNECT; 10019 } 10020 10021 if (netif_running(netdev)) 10022 ixgbe_down(adapter); 10023 10024 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) 10025 pci_disable_device(pdev); 10026 rtnl_unlock(); 10027 10028 /* Request a slot reset. */ 10029 return PCI_ERS_RESULT_NEED_RESET; 10030 } 10031 10032 /** 10033 * ixgbe_io_slot_reset - called after the pci bus has been reset. 10034 * @pdev: Pointer to PCI device 10035 * 10036 * Restart the card from scratch, as if from a cold-boot. 10037 */ 10038 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) 10039 { 10040 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 10041 pci_ers_result_t result; 10042 int err; 10043 10044 if (pci_enable_device_mem(pdev)) { 10045 e_err(probe, "Cannot re-enable PCI device after reset.\n"); 10046 result = PCI_ERS_RESULT_DISCONNECT; 10047 } else { 10048 smp_mb__before_atomic(); 10049 clear_bit(__IXGBE_DISABLED, &adapter->state); 10050 adapter->hw.hw_addr = adapter->io_addr; 10051 pci_set_master(pdev); 10052 pci_restore_state(pdev); 10053 pci_save_state(pdev); 10054 10055 pci_wake_from_d3(pdev, false); 10056 10057 ixgbe_reset(adapter); 10058 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 10059 result = PCI_ERS_RESULT_RECOVERED; 10060 } 10061 10062 err = pci_cleanup_aer_uncorrect_error_status(pdev); 10063 if (err) { 10064 e_dev_err("pci_cleanup_aer_uncorrect_error_status " 10065 "failed 0x%0x\n", err); 10066 /* non-fatal, continue */ 10067 } 10068 10069 return result; 10070 } 10071 10072 /** 10073 * ixgbe_io_resume - called when traffic can start flowing again. 10074 * @pdev: Pointer to PCI device 10075 * 10076 * This callback is called when the error recovery driver tells us that 10077 * its OK to resume normal operation. 10078 */ 10079 static void ixgbe_io_resume(struct pci_dev *pdev) 10080 { 10081 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 10082 struct net_device *netdev = adapter->netdev; 10083 10084 #ifdef CONFIG_PCI_IOV 10085 if (adapter->vferr_refcount) { 10086 e_info(drv, "Resuming after VF err\n"); 10087 adapter->vferr_refcount--; 10088 return; 10089 } 10090 10091 #endif 10092 if (netif_running(netdev)) 10093 ixgbe_up(adapter); 10094 10095 netif_device_attach(netdev); 10096 } 10097 10098 static const struct pci_error_handlers ixgbe_err_handler = { 10099 .error_detected = ixgbe_io_error_detected, 10100 .slot_reset = ixgbe_io_slot_reset, 10101 .resume = ixgbe_io_resume, 10102 }; 10103 10104 static struct pci_driver ixgbe_driver = { 10105 .name = ixgbe_driver_name, 10106 .id_table = ixgbe_pci_tbl, 10107 .probe = ixgbe_probe, 10108 .remove = ixgbe_remove, 10109 #ifdef CONFIG_PM 10110 .suspend = ixgbe_suspend, 10111 .resume = ixgbe_resume, 10112 #endif 10113 .shutdown = ixgbe_shutdown, 10114 .sriov_configure = ixgbe_pci_sriov_configure, 10115 .err_handler = &ixgbe_err_handler 10116 }; 10117 10118 /** 10119 * ixgbe_init_module - Driver Registration Routine 10120 * 10121 * ixgbe_init_module is the first routine called when the driver is 10122 * loaded. All it does is register with the PCI subsystem. 10123 **/ 10124 static int __init ixgbe_init_module(void) 10125 { 10126 int ret; 10127 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); 10128 pr_info("%s\n", ixgbe_copyright); 10129 10130 ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name); 10131 if (!ixgbe_wq) { 10132 pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name); 10133 return -ENOMEM; 10134 } 10135 10136 ixgbe_dbg_init(); 10137 10138 ret = pci_register_driver(&ixgbe_driver); 10139 if (ret) { 10140 destroy_workqueue(ixgbe_wq); 10141 ixgbe_dbg_exit(); 10142 return ret; 10143 } 10144 10145 #ifdef CONFIG_IXGBE_DCA 10146 dca_register_notify(&dca_notifier); 10147 #endif 10148 10149 return 0; 10150 } 10151 10152 module_init(ixgbe_init_module); 10153 10154 /** 10155 * ixgbe_exit_module - Driver Exit Cleanup Routine 10156 * 10157 * ixgbe_exit_module is called just before the driver is removed 10158 * from memory. 10159 **/ 10160 static void __exit ixgbe_exit_module(void) 10161 { 10162 #ifdef CONFIG_IXGBE_DCA 10163 dca_unregister_notify(&dca_notifier); 10164 #endif 10165 pci_unregister_driver(&ixgbe_driver); 10166 10167 ixgbe_dbg_exit(); 10168 if (ixgbe_wq) { 10169 destroy_workqueue(ixgbe_wq); 10170 ixgbe_wq = NULL; 10171 } 10172 } 10173 10174 #ifdef CONFIG_IXGBE_DCA 10175 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, 10176 void *p) 10177 { 10178 int ret_val; 10179 10180 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, 10181 __ixgbe_notify_dca); 10182 10183 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 10184 } 10185 10186 #endif /* CONFIG_IXGBE_DCA */ 10187 10188 module_exit(ixgbe_exit_module); 10189 10190 /* ixgbe_main.c */ 10191