1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 1999 - 2018 Intel Corporation. */ 3 4 #include <linux/types.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/netdevice.h> 8 #include <linux/vmalloc.h> 9 #include <linux/string.h> 10 #include <linux/in.h> 11 #include <linux/interrupt.h> 12 #include <linux/ip.h> 13 #include <linux/tcp.h> 14 #include <linux/sctp.h> 15 #include <linux/pkt_sched.h> 16 #include <linux/ipv6.h> 17 #include <linux/slab.h> 18 #include <net/checksum.h> 19 #include <net/ip6_checksum.h> 20 #include <linux/etherdevice.h> 21 #include <linux/ethtool.h> 22 #include <linux/if.h> 23 #include <linux/if_vlan.h> 24 #include <linux/if_macvlan.h> 25 #include <linux/if_bridge.h> 26 #include <linux/prefetch.h> 27 #include <linux/bpf.h> 28 #include <linux/bpf_trace.h> 29 #include <linux/atomic.h> 30 #include <linux/numa.h> 31 #include <generated/utsrelease.h> 32 #include <scsi/fc/fc_fcoe.h> 33 #include <net/udp_tunnel.h> 34 #include <net/pkt_cls.h> 35 #include <net/tc_act/tc_gact.h> 36 #include <net/tc_act/tc_mirred.h> 37 #include <net/vxlan.h> 38 #include <net/mpls.h> 39 #include <net/xdp_sock_drv.h> 40 #include <net/xfrm.h> 41 42 #include "ixgbe.h" 43 #include "ixgbe_common.h" 44 #include "ixgbe_dcb_82599.h" 45 #include "ixgbe_phy.h" 46 #include "ixgbe_sriov.h" 47 #include "ixgbe_model.h" 48 #include "ixgbe_txrx_common.h" 49 50 char ixgbe_driver_name[] = "ixgbe"; 51 static const char ixgbe_driver_string[] = 52 "Intel(R) 10 Gigabit PCI Express Network Driver"; 53 #ifdef IXGBE_FCOE 54 char ixgbe_default_device_descr[] = 55 "Intel(R) 10 Gigabit Network Connection"; 56 #else 57 static char ixgbe_default_device_descr[] = 58 "Intel(R) 10 Gigabit Network Connection"; 59 #endif 60 static const char ixgbe_copyright[] = 61 "Copyright (c) 1999-2016 Intel Corporation."; 62 63 static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter"; 64 65 static const struct ixgbe_info *ixgbe_info_tbl[] = { 66 [board_82598] = &ixgbe_82598_info, 67 [board_82599] = &ixgbe_82599_info, 68 [board_X540] = &ixgbe_X540_info, 69 [board_X550] = &ixgbe_X550_info, 70 [board_X550EM_x] = &ixgbe_X550EM_x_info, 71 [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info, 72 [board_x550em_a] = &ixgbe_x550em_a_info, 73 [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info, 74 }; 75 76 /* ixgbe_pci_tbl - PCI Device ID Table 77 * 78 * Wildcard entries (PCI_ANY_ID) should come last 79 * Last entry must be all 0s 80 * 81 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 82 * Class, Class Mask, private data (not used) } 83 */ 84 static const struct pci_device_id ixgbe_pci_tbl[] = { 85 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 }, 86 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 }, 87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 }, 88 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 }, 89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 }, 90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 }, 91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 }, 92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 }, 93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 }, 94 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 }, 95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 }, 96 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 }, 97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 }, 98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 }, 99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 }, 100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 }, 101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 }, 102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 }, 103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 }, 104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 }, 105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 }, 106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 }, 107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 }, 108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 }, 109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, 110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, 111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 }, 112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, 113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, 114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, 115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, 116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550}, 117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, 118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x}, 119 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, 120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, 121 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, 122 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw}, 123 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a }, 124 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a }, 125 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, 126 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a }, 127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, 128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a}, 129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, 130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw }, 131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw }, 132 /* required last entry */ 133 {0, } 134 }; 135 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); 136 137 #ifdef CONFIG_IXGBE_DCA 138 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, 139 void *p); 140 static struct notifier_block dca_notifier = { 141 .notifier_call = ixgbe_notify_dca, 142 .next = NULL, 143 .priority = 0 144 }; 145 #endif 146 147 #ifdef CONFIG_PCI_IOV 148 static unsigned int max_vfs; 149 module_param(max_vfs, uint, 0); 150 MODULE_PARM_DESC(max_vfs, 151 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)"); 152 #endif /* CONFIG_PCI_IOV */ 153 154 static unsigned int allow_unsupported_sfp; 155 module_param(allow_unsupported_sfp, uint, 0); 156 MODULE_PARM_DESC(allow_unsupported_sfp, 157 "Allow unsupported and untested SFP+ modules on 82599-based adapters"); 158 159 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 160 static int debug = -1; 161 module_param(debug, int, 0); 162 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 163 164 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 165 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); 166 MODULE_LICENSE("GPL v2"); 167 168 DEFINE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key); 169 EXPORT_SYMBOL(ixgbe_xdp_locking_key); 170 171 static struct workqueue_struct *ixgbe_wq; 172 173 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); 174 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); 175 176 static const struct net_device_ops ixgbe_netdev_ops; 177 178 static bool netif_is_ixgbe(struct net_device *dev) 179 { 180 return dev && (dev->netdev_ops == &ixgbe_netdev_ops); 181 } 182 183 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, 184 u32 reg, u16 *value) 185 { 186 struct pci_dev *parent_dev; 187 struct pci_bus *parent_bus; 188 189 parent_bus = adapter->pdev->bus->parent; 190 if (!parent_bus) 191 return -1; 192 193 parent_dev = parent_bus->self; 194 if (!parent_dev) 195 return -1; 196 197 if (!pci_is_pcie(parent_dev)) 198 return -1; 199 200 pcie_capability_read_word(parent_dev, reg, value); 201 if (*value == IXGBE_FAILED_READ_CFG_WORD && 202 ixgbe_check_cfg_remove(&adapter->hw, parent_dev)) 203 return -1; 204 return 0; 205 } 206 207 static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) 208 { 209 struct ixgbe_hw *hw = &adapter->hw; 210 u16 link_status = 0; 211 int err; 212 213 hw->bus.type = ixgbe_bus_type_pci_express; 214 215 /* Get the negotiated link width and speed from PCI config space of the 216 * parent, as this device is behind a switch 217 */ 218 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status); 219 220 /* assume caller will handle error case */ 221 if (err) 222 return err; 223 224 hw->bus.width = ixgbe_convert_bus_width(link_status); 225 hw->bus.speed = ixgbe_convert_bus_speed(link_status); 226 227 return 0; 228 } 229 230 /** 231 * ixgbe_pcie_from_parent - Determine whether PCIe info should come from parent 232 * @hw: hw specific details 233 * 234 * This function is used by probe to determine whether a device's PCI-Express 235 * bandwidth details should be gathered from the parent bus instead of from the 236 * device. Used to ensure that various locations all have the correct device ID 237 * checks. 238 */ 239 static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) 240 { 241 switch (hw->device_id) { 242 case IXGBE_DEV_ID_82599_SFP_SF_QP: 243 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 244 return true; 245 default: 246 return false; 247 } 248 } 249 250 static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, 251 int expected_gts) 252 { 253 struct ixgbe_hw *hw = &adapter->hw; 254 struct pci_dev *pdev; 255 256 /* Some devices are not connected over PCIe and thus do not negotiate 257 * speed. These devices do not have valid bus info, and thus any report 258 * we generate may not be correct. 259 */ 260 if (hw->bus.type == ixgbe_bus_type_internal) 261 return; 262 263 /* determine whether to use the parent device */ 264 if (ixgbe_pcie_from_parent(&adapter->hw)) 265 pdev = adapter->pdev->bus->parent->self; 266 else 267 pdev = adapter->pdev; 268 269 pcie_print_link_status(pdev); 270 } 271 272 static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) 273 { 274 if (!test_bit(__IXGBE_DOWN, &adapter->state) && 275 !test_bit(__IXGBE_REMOVING, &adapter->state) && 276 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) 277 queue_work(ixgbe_wq, &adapter->service_task); 278 } 279 280 static void ixgbe_remove_adapter(struct ixgbe_hw *hw) 281 { 282 struct ixgbe_adapter *adapter = hw->back; 283 284 if (!hw->hw_addr) 285 return; 286 hw->hw_addr = NULL; 287 e_dev_err("Adapter removed\n"); 288 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) 289 ixgbe_service_event_schedule(adapter); 290 } 291 292 static u32 ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) 293 { 294 u8 __iomem *reg_addr; 295 u32 value; 296 int i; 297 298 reg_addr = READ_ONCE(hw->hw_addr); 299 if (ixgbe_removed(reg_addr)) 300 return IXGBE_FAILED_READ_REG; 301 302 /* Register read of 0xFFFFFFF can indicate the adapter has been removed, 303 * so perform several status register reads to determine if the adapter 304 * has been removed. 305 */ 306 for (i = 0; i < IXGBE_FAILED_READ_RETRIES; i++) { 307 value = readl(reg_addr + IXGBE_STATUS); 308 if (value != IXGBE_FAILED_READ_REG) 309 break; 310 mdelay(3); 311 } 312 313 if (value == IXGBE_FAILED_READ_REG) 314 ixgbe_remove_adapter(hw); 315 else 316 value = readl(reg_addr + reg); 317 return value; 318 } 319 320 /** 321 * ixgbe_read_reg - Read from device register 322 * @hw: hw specific details 323 * @reg: offset of register to read 324 * 325 * Returns : value read or IXGBE_FAILED_READ_REG if removed 326 * 327 * This function is used to read device registers. It checks for device 328 * removal by confirming any read that returns all ones by checking the 329 * status register value for all ones. This function avoids reading from 330 * the hardware if a removal was previously detected in which case it 331 * returns IXGBE_FAILED_READ_REG (all ones). 332 */ 333 u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) 334 { 335 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); 336 u32 value; 337 338 if (ixgbe_removed(reg_addr)) 339 return IXGBE_FAILED_READ_REG; 340 if (unlikely(hw->phy.nw_mng_if_sel & 341 IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) { 342 struct ixgbe_adapter *adapter; 343 int i; 344 345 for (i = 0; i < 200; ++i) { 346 value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY); 347 if (likely(!value)) 348 goto writes_completed; 349 if (value == IXGBE_FAILED_READ_REG) { 350 ixgbe_remove_adapter(hw); 351 return IXGBE_FAILED_READ_REG; 352 } 353 udelay(5); 354 } 355 356 adapter = hw->back; 357 e_warn(hw, "register writes incomplete %08x\n", value); 358 } 359 360 writes_completed: 361 value = readl(reg_addr + reg); 362 if (unlikely(value == IXGBE_FAILED_READ_REG)) 363 value = ixgbe_check_remove(hw, reg); 364 return value; 365 } 366 367 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) 368 { 369 u16 value; 370 371 pci_read_config_word(pdev, PCI_VENDOR_ID, &value); 372 if (value == IXGBE_FAILED_READ_CFG_WORD) { 373 ixgbe_remove_adapter(hw); 374 return true; 375 } 376 return false; 377 } 378 379 u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg) 380 { 381 struct ixgbe_adapter *adapter = hw->back; 382 u16 value; 383 384 if (ixgbe_removed(hw->hw_addr)) 385 return IXGBE_FAILED_READ_CFG_WORD; 386 pci_read_config_word(adapter->pdev, reg, &value); 387 if (value == IXGBE_FAILED_READ_CFG_WORD && 388 ixgbe_check_cfg_remove(hw, adapter->pdev)) 389 return IXGBE_FAILED_READ_CFG_WORD; 390 return value; 391 } 392 393 #ifdef CONFIG_PCI_IOV 394 static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg) 395 { 396 struct ixgbe_adapter *adapter = hw->back; 397 u32 value; 398 399 if (ixgbe_removed(hw->hw_addr)) 400 return IXGBE_FAILED_READ_CFG_DWORD; 401 pci_read_config_dword(adapter->pdev, reg, &value); 402 if (value == IXGBE_FAILED_READ_CFG_DWORD && 403 ixgbe_check_cfg_remove(hw, adapter->pdev)) 404 return IXGBE_FAILED_READ_CFG_DWORD; 405 return value; 406 } 407 #endif /* CONFIG_PCI_IOV */ 408 409 void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value) 410 { 411 struct ixgbe_adapter *adapter = hw->back; 412 413 if (ixgbe_removed(hw->hw_addr)) 414 return; 415 pci_write_config_word(adapter->pdev, reg, value); 416 } 417 418 static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) 419 { 420 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); 421 422 /* flush memory to make sure state is correct before next watchdog */ 423 smp_mb__before_atomic(); 424 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); 425 } 426 427 struct ixgbe_reg_info { 428 u32 ofs; 429 char *name; 430 }; 431 432 static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { 433 434 /* General Registers */ 435 {IXGBE_CTRL, "CTRL"}, 436 {IXGBE_STATUS, "STATUS"}, 437 {IXGBE_CTRL_EXT, "CTRL_EXT"}, 438 439 /* Interrupt Registers */ 440 {IXGBE_EICR, "EICR"}, 441 442 /* RX Registers */ 443 {IXGBE_SRRCTL(0), "SRRCTL"}, 444 {IXGBE_DCA_RXCTRL(0), "DRXCTL"}, 445 {IXGBE_RDLEN(0), "RDLEN"}, 446 {IXGBE_RDH(0), "RDH"}, 447 {IXGBE_RDT(0), "RDT"}, 448 {IXGBE_RXDCTL(0), "RXDCTL"}, 449 {IXGBE_RDBAL(0), "RDBAL"}, 450 {IXGBE_RDBAH(0), "RDBAH"}, 451 452 /* TX Registers */ 453 {IXGBE_TDBAL(0), "TDBAL"}, 454 {IXGBE_TDBAH(0), "TDBAH"}, 455 {IXGBE_TDLEN(0), "TDLEN"}, 456 {IXGBE_TDH(0), "TDH"}, 457 {IXGBE_TDT(0), "TDT"}, 458 {IXGBE_TXDCTL(0), "TXDCTL"}, 459 460 /* List Terminator */ 461 { .name = NULL } 462 }; 463 464 465 /* 466 * ixgbe_regdump - register printout routine 467 */ 468 static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) 469 { 470 int i; 471 char rname[16]; 472 u32 regs[64]; 473 474 switch (reginfo->ofs) { 475 case IXGBE_SRRCTL(0): 476 for (i = 0; i < 64; i++) 477 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); 478 break; 479 case IXGBE_DCA_RXCTRL(0): 480 for (i = 0; i < 64; i++) 481 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 482 break; 483 case IXGBE_RDLEN(0): 484 for (i = 0; i < 64; i++) 485 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); 486 break; 487 case IXGBE_RDH(0): 488 for (i = 0; i < 64; i++) 489 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); 490 break; 491 case IXGBE_RDT(0): 492 for (i = 0; i < 64; i++) 493 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); 494 break; 495 case IXGBE_RXDCTL(0): 496 for (i = 0; i < 64; i++) 497 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 498 break; 499 case IXGBE_RDBAL(0): 500 for (i = 0; i < 64; i++) 501 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); 502 break; 503 case IXGBE_RDBAH(0): 504 for (i = 0; i < 64; i++) 505 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); 506 break; 507 case IXGBE_TDBAL(0): 508 for (i = 0; i < 64; i++) 509 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); 510 break; 511 case IXGBE_TDBAH(0): 512 for (i = 0; i < 64; i++) 513 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); 514 break; 515 case IXGBE_TDLEN(0): 516 for (i = 0; i < 64; i++) 517 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); 518 break; 519 case IXGBE_TDH(0): 520 for (i = 0; i < 64; i++) 521 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); 522 break; 523 case IXGBE_TDT(0): 524 for (i = 0; i < 64; i++) 525 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); 526 break; 527 case IXGBE_TXDCTL(0): 528 for (i = 0; i < 64; i++) 529 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 530 break; 531 default: 532 pr_info("%-15s %08x\n", 533 reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs)); 534 return; 535 } 536 537 i = 0; 538 while (i < 64) { 539 int j; 540 char buf[9 * 8 + 1]; 541 char *p = buf; 542 543 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7); 544 for (j = 0; j < 8; j++) 545 p += sprintf(p, " %08x", regs[i++]); 546 pr_err("%-15s%s\n", rname, buf); 547 } 548 549 } 550 551 static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n) 552 { 553 struct ixgbe_tx_buffer *tx_buffer; 554 555 tx_buffer = &ring->tx_buffer_info[ring->next_to_clean]; 556 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", 557 n, ring->next_to_use, ring->next_to_clean, 558 (u64)dma_unmap_addr(tx_buffer, dma), 559 dma_unmap_len(tx_buffer, len), 560 tx_buffer->next_to_watch, 561 (u64)tx_buffer->time_stamp); 562 } 563 564 /* 565 * ixgbe_dump - Print registers, tx-rings and rx-rings 566 */ 567 static void ixgbe_dump(struct ixgbe_adapter *adapter) 568 { 569 struct net_device *netdev = adapter->netdev; 570 struct ixgbe_hw *hw = &adapter->hw; 571 struct ixgbe_reg_info *reginfo; 572 int n = 0; 573 struct ixgbe_ring *ring; 574 struct ixgbe_tx_buffer *tx_buffer; 575 union ixgbe_adv_tx_desc *tx_desc; 576 struct my_u0 { u64 a; u64 b; } *u0; 577 struct ixgbe_ring *rx_ring; 578 union ixgbe_adv_rx_desc *rx_desc; 579 struct ixgbe_rx_buffer *rx_buffer_info; 580 int i = 0; 581 582 if (!netif_msg_hw(adapter)) 583 return; 584 585 /* Print netdevice Info */ 586 if (netdev) { 587 dev_info(&adapter->pdev->dev, "Net device Info\n"); 588 pr_info("Device Name state " 589 "trans_start\n"); 590 pr_info("%-15s %016lX %016lX\n", 591 netdev->name, 592 netdev->state, 593 dev_trans_start(netdev)); 594 } 595 596 /* Print Registers */ 597 dev_info(&adapter->pdev->dev, "Register Dump\n"); 598 pr_info(" Register Name Value\n"); 599 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; 600 reginfo->name; reginfo++) { 601 ixgbe_regdump(hw, reginfo); 602 } 603 604 /* Print TX Ring Summary */ 605 if (!netdev || !netif_running(netdev)) 606 return; 607 608 dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); 609 pr_info(" %s %s %s %s\n", 610 "Queue [NTU] [NTC] [bi(ntc)->dma ]", 611 "leng", "ntw", "timestamp"); 612 for (n = 0; n < adapter->num_tx_queues; n++) { 613 ring = adapter->tx_ring[n]; 614 ixgbe_print_buffer(ring, n); 615 } 616 617 for (n = 0; n < adapter->num_xdp_queues; n++) { 618 ring = adapter->xdp_ring[n]; 619 ixgbe_print_buffer(ring, n); 620 } 621 622 /* Print TX Rings */ 623 if (!netif_msg_tx_done(adapter)) 624 goto rx_ring_summary; 625 626 dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); 627 628 /* Transmit Descriptor Formats 629 * 630 * 82598 Advanced Transmit Descriptor 631 * +--------------------------------------------------------------+ 632 * 0 | Buffer Address [63:0] | 633 * +--------------------------------------------------------------+ 634 * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | 635 * +--------------------------------------------------------------+ 636 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 637 * 638 * 82598 Advanced Transmit Descriptor (Write-Back Format) 639 * +--------------------------------------------------------------+ 640 * 0 | RSV [63:0] | 641 * +--------------------------------------------------------------+ 642 * 8 | RSV | STA | NXTSEQ | 643 * +--------------------------------------------------------------+ 644 * 63 36 35 32 31 0 645 * 646 * 82599+ Advanced Transmit Descriptor 647 * +--------------------------------------------------------------+ 648 * 0 | Buffer Address [63:0] | 649 * +--------------------------------------------------------------+ 650 * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN | 651 * +--------------------------------------------------------------+ 652 * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0 653 * 654 * 82599+ Advanced Transmit Descriptor (Write-Back Format) 655 * +--------------------------------------------------------------+ 656 * 0 | RSV [63:0] | 657 * +--------------------------------------------------------------+ 658 * 8 | RSV | STA | RSV | 659 * +--------------------------------------------------------------+ 660 * 63 36 35 32 31 0 661 */ 662 663 for (n = 0; n < adapter->num_tx_queues; n++) { 664 ring = adapter->tx_ring[n]; 665 pr_info("------------------------------------\n"); 666 pr_info("TX QUEUE INDEX = %d\n", ring->queue_index); 667 pr_info("------------------------------------\n"); 668 pr_info("%s%s %s %s %s %s\n", 669 "T [desc] [address 63:0 ] ", 670 "[PlPOIdStDDt Ln] [bi->dma ] ", 671 "leng", "ntw", "timestamp", "bi->skb"); 672 673 for (i = 0; ring->desc && (i < ring->count); i++) { 674 tx_desc = IXGBE_TX_DESC(ring, i); 675 tx_buffer = &ring->tx_buffer_info[i]; 676 u0 = (struct my_u0 *)tx_desc; 677 if (dma_unmap_len(tx_buffer, len) > 0) { 678 const char *ring_desc; 679 680 if (i == ring->next_to_use && 681 i == ring->next_to_clean) 682 ring_desc = " NTC/U"; 683 else if (i == ring->next_to_use) 684 ring_desc = " NTU"; 685 else if (i == ring->next_to_clean) 686 ring_desc = " NTC"; 687 else 688 ring_desc = ""; 689 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s", 690 i, 691 le64_to_cpu((__force __le64)u0->a), 692 le64_to_cpu((__force __le64)u0->b), 693 (u64)dma_unmap_addr(tx_buffer, dma), 694 dma_unmap_len(tx_buffer, len), 695 tx_buffer->next_to_watch, 696 (u64)tx_buffer->time_stamp, 697 tx_buffer->skb, 698 ring_desc); 699 700 if (netif_msg_pktdata(adapter) && 701 tx_buffer->skb) 702 print_hex_dump(KERN_INFO, "", 703 DUMP_PREFIX_ADDRESS, 16, 1, 704 tx_buffer->skb->data, 705 dma_unmap_len(tx_buffer, len), 706 true); 707 } 708 } 709 } 710 711 /* Print RX Rings Summary */ 712 rx_ring_summary: 713 dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); 714 pr_info("Queue [NTU] [NTC]\n"); 715 for (n = 0; n < adapter->num_rx_queues; n++) { 716 rx_ring = adapter->rx_ring[n]; 717 pr_info("%5d %5X %5X\n", 718 n, rx_ring->next_to_use, rx_ring->next_to_clean); 719 } 720 721 /* Print RX Rings */ 722 if (!netif_msg_rx_status(adapter)) 723 return; 724 725 dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); 726 727 /* Receive Descriptor Formats 728 * 729 * 82598 Advanced Receive Descriptor (Read) Format 730 * 63 1 0 731 * +-----------------------------------------------------+ 732 * 0 | Packet Buffer Address [63:1] |A0/NSE| 733 * +----------------------------------------------+------+ 734 * 8 | Header Buffer Address [63:1] | DD | 735 * +-----------------------------------------------------+ 736 * 737 * 738 * 82598 Advanced Receive Descriptor (Write-Back) Format 739 * 740 * 63 48 47 32 31 30 21 20 16 15 4 3 0 741 * +------------------------------------------------------+ 742 * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS | 743 * | Packet | IP | | | | Type | Type | 744 * | Checksum | Ident | | | | | | 745 * +------------------------------------------------------+ 746 * 8 | VLAN Tag | Length | Extended Error | Extended Status | 747 * +------------------------------------------------------+ 748 * 63 48 47 32 31 20 19 0 749 * 750 * 82599+ Advanced Receive Descriptor (Read) Format 751 * 63 1 0 752 * +-----------------------------------------------------+ 753 * 0 | Packet Buffer Address [63:1] |A0/NSE| 754 * +----------------------------------------------+------+ 755 * 8 | Header Buffer Address [63:1] | DD | 756 * +-----------------------------------------------------+ 757 * 758 * 759 * 82599+ Advanced Receive Descriptor (Write-Back) Format 760 * 761 * 63 48 47 32 31 30 21 20 17 16 4 3 0 762 * +------------------------------------------------------+ 763 * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS | 764 * |/ RTT / PCoE_PARAM | | | CNT | Type | Type | 765 * |/ Flow Dir Flt ID | | | | | | 766 * +------------------------------------------------------+ 767 * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP | 768 * +------------------------------------------------------+ 769 * 63 48 47 32 31 20 19 0 770 */ 771 772 for (n = 0; n < adapter->num_rx_queues; n++) { 773 rx_ring = adapter->rx_ring[n]; 774 pr_info("------------------------------------\n"); 775 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); 776 pr_info("------------------------------------\n"); 777 pr_info("%s%s%s\n", 778 "R [desc] [ PktBuf A0] ", 779 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ", 780 "<-- Adv Rx Read format"); 781 pr_info("%s%s%s\n", 782 "RWB[desc] [PcsmIpSHl PtRs] ", 783 "[vl er S cks ln] ---------------- [bi->skb ] ", 784 "<-- Adv Rx Write-Back format"); 785 786 for (i = 0; i < rx_ring->count; i++) { 787 const char *ring_desc; 788 789 if (i == rx_ring->next_to_use) 790 ring_desc = " NTU"; 791 else if (i == rx_ring->next_to_clean) 792 ring_desc = " NTC"; 793 else 794 ring_desc = ""; 795 796 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 797 rx_desc = IXGBE_RX_DESC(rx_ring, i); 798 u0 = (struct my_u0 *)rx_desc; 799 if (rx_desc->wb.upper.length) { 800 /* Descriptor Done */ 801 pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n", 802 i, 803 le64_to_cpu((__force __le64)u0->a), 804 le64_to_cpu((__force __le64)u0->b), 805 rx_buffer_info->skb, 806 ring_desc); 807 } else { 808 pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n", 809 i, 810 le64_to_cpu((__force __le64)u0->a), 811 le64_to_cpu((__force __le64)u0->b), 812 (u64)rx_buffer_info->dma, 813 rx_buffer_info->skb, 814 ring_desc); 815 816 if (netif_msg_pktdata(adapter) && 817 rx_buffer_info->dma) { 818 print_hex_dump(KERN_INFO, "", 819 DUMP_PREFIX_ADDRESS, 16, 1, 820 page_address(rx_buffer_info->page) + 821 rx_buffer_info->page_offset, 822 ixgbe_rx_bufsz(rx_ring), true); 823 } 824 } 825 } 826 } 827 } 828 829 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) 830 { 831 u32 ctrl_ext; 832 833 /* Let firmware take over control of h/w */ 834 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 835 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 836 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); 837 } 838 839 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) 840 { 841 u32 ctrl_ext; 842 843 /* Let firmware know the driver has taken over */ 844 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 845 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 846 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); 847 } 848 849 /** 850 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors 851 * @adapter: pointer to adapter struct 852 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 853 * @queue: queue to map the corresponding interrupt to 854 * @msix_vector: the vector to map to the corresponding queue 855 * 856 */ 857 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, 858 u8 queue, u8 msix_vector) 859 { 860 u32 ivar, index; 861 struct ixgbe_hw *hw = &adapter->hw; 862 switch (hw->mac.type) { 863 case ixgbe_mac_82598EB: 864 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 865 if (direction == -1) 866 direction = 0; 867 index = (((direction * 64) + queue) >> 2) & 0x1F; 868 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 869 ivar &= ~(0xFF << (8 * (queue & 0x3))); 870 ivar |= (msix_vector << (8 * (queue & 0x3))); 871 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 872 break; 873 case ixgbe_mac_82599EB: 874 case ixgbe_mac_X540: 875 case ixgbe_mac_X550: 876 case ixgbe_mac_X550EM_x: 877 case ixgbe_mac_x550em_a: 878 if (direction == -1) { 879 /* other causes */ 880 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 881 index = ((queue & 1) * 8); 882 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); 883 ivar &= ~(0xFF << index); 884 ivar |= (msix_vector << index); 885 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); 886 break; 887 } else { 888 /* tx or rx causes */ 889 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 890 index = ((16 * (queue & 1)) + (8 * direction)); 891 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); 892 ivar &= ~(0xFF << index); 893 ivar |= (msix_vector << index); 894 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar); 895 break; 896 } 897 default: 898 break; 899 } 900 } 901 902 void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, 903 u64 qmask) 904 { 905 u32 mask; 906 907 switch (adapter->hw.mac.type) { 908 case ixgbe_mac_82598EB: 909 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 910 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 911 break; 912 case ixgbe_mac_82599EB: 913 case ixgbe_mac_X540: 914 case ixgbe_mac_X550: 915 case ixgbe_mac_X550EM_x: 916 case ixgbe_mac_x550em_a: 917 mask = (qmask & 0xFFFFFFFF); 918 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); 919 mask = (qmask >> 32); 920 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); 921 break; 922 default: 923 break; 924 } 925 } 926 927 static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) 928 { 929 struct ixgbe_hw *hw = &adapter->hw; 930 struct ixgbe_hw_stats *hwstats = &adapter->stats; 931 int i; 932 u32 data; 933 934 if ((hw->fc.current_mode != ixgbe_fc_full) && 935 (hw->fc.current_mode != ixgbe_fc_rx_pause)) 936 return; 937 938 switch (hw->mac.type) { 939 case ixgbe_mac_82598EB: 940 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 941 break; 942 default: 943 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 944 } 945 hwstats->lxoffrxc += data; 946 947 /* refill credits (no tx hang) if we received xoff */ 948 if (!data) 949 return; 950 951 for (i = 0; i < adapter->num_tx_queues; i++) 952 clear_bit(__IXGBE_HANG_CHECK_ARMED, 953 &adapter->tx_ring[i]->state); 954 955 for (i = 0; i < adapter->num_xdp_queues; i++) 956 clear_bit(__IXGBE_HANG_CHECK_ARMED, 957 &adapter->xdp_ring[i]->state); 958 } 959 960 static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) 961 { 962 struct ixgbe_hw *hw = &adapter->hw; 963 struct ixgbe_hw_stats *hwstats = &adapter->stats; 964 u32 xoff[8] = {0}; 965 u8 tc; 966 int i; 967 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; 968 969 if (adapter->ixgbe_ieee_pfc) 970 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); 971 972 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) { 973 ixgbe_update_xoff_rx_lfc(adapter); 974 return; 975 } 976 977 /* update stats for each tc, only valid with PFC enabled */ 978 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { 979 u32 pxoffrxc; 980 981 switch (hw->mac.type) { 982 case ixgbe_mac_82598EB: 983 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 984 break; 985 default: 986 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 987 } 988 hwstats->pxoffrxc[i] += pxoffrxc; 989 /* Get the TC for given UP */ 990 tc = netdev_get_prio_tc_map(adapter->netdev, i); 991 xoff[tc] += pxoffrxc; 992 } 993 994 /* disarm tx queues that have received xoff frames */ 995 for (i = 0; i < adapter->num_tx_queues; i++) { 996 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; 997 998 tc = tx_ring->dcb_tc; 999 if (xoff[tc]) 1000 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); 1001 } 1002 1003 for (i = 0; i < adapter->num_xdp_queues; i++) { 1004 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i]; 1005 1006 tc = xdp_ring->dcb_tc; 1007 if (xoff[tc]) 1008 clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state); 1009 } 1010 } 1011 1012 static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) 1013 { 1014 return ring->stats.packets; 1015 } 1016 1017 static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) 1018 { 1019 unsigned int head, tail; 1020 1021 head = ring->next_to_clean; 1022 tail = ring->next_to_use; 1023 1024 return ((head <= tail) ? tail : tail + ring->count) - head; 1025 } 1026 1027 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) 1028 { 1029 u32 tx_done = ixgbe_get_tx_completed(tx_ring); 1030 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; 1031 u32 tx_pending = ixgbe_get_tx_pending(tx_ring); 1032 1033 clear_check_for_tx_hang(tx_ring); 1034 1035 /* 1036 * Check for a hung queue, but be thorough. This verifies 1037 * that a transmit has been completed since the previous 1038 * check AND there is at least one packet pending. The 1039 * ARMED bit is set to indicate a potential hang. The 1040 * bit is cleared if a pause frame is received to remove 1041 * false hang detection due to PFC or 802.3x frames. By 1042 * requiring this to fail twice we avoid races with 1043 * pfc clearing the ARMED bit and conditions where we 1044 * run the check_tx_hang logic with a transmit completion 1045 * pending but without time to complete it yet. 1046 */ 1047 if (tx_done_old == tx_done && tx_pending) 1048 /* make sure it is true for two checks in a row */ 1049 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, 1050 &tx_ring->state); 1051 /* update completed stats and continue */ 1052 tx_ring->tx_stats.tx_done_old = tx_done; 1053 /* reset the countdown */ 1054 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); 1055 1056 return false; 1057 } 1058 1059 /** 1060 * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout 1061 * @adapter: driver private struct 1062 **/ 1063 static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) 1064 { 1065 1066 /* Do the reset outside of interrupt context */ 1067 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 1068 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); 1069 e_warn(drv, "initiating reset due to tx timeout\n"); 1070 ixgbe_service_event_schedule(adapter); 1071 } 1072 } 1073 1074 /** 1075 * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate 1076 * @netdev: network interface device structure 1077 * @queue_index: Tx queue to set 1078 * @maxrate: desired maximum transmit bitrate 1079 **/ 1080 static int ixgbe_tx_maxrate(struct net_device *netdev, 1081 int queue_index, u32 maxrate) 1082 { 1083 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1084 struct ixgbe_hw *hw = &adapter->hw; 1085 u32 bcnrc_val = ixgbe_link_mbps(adapter); 1086 1087 if (!maxrate) 1088 return 0; 1089 1090 /* Calculate the rate factor values to set */ 1091 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; 1092 bcnrc_val /= maxrate; 1093 1094 /* clear everything but the rate factor */ 1095 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | 1096 IXGBE_RTTBCNRC_RF_DEC_MASK; 1097 1098 /* enable the rate scheduler */ 1099 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; 1100 1101 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index); 1102 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 1103 1104 return 0; 1105 } 1106 1107 /** 1108 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes 1109 * @q_vector: structure containing interrupt and ring information 1110 * @tx_ring: tx ring to clean 1111 * @napi_budget: Used to determine if we are in netpoll 1112 **/ 1113 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, 1114 struct ixgbe_ring *tx_ring, int napi_budget) 1115 { 1116 struct ixgbe_adapter *adapter = q_vector->adapter; 1117 struct ixgbe_tx_buffer *tx_buffer; 1118 union ixgbe_adv_tx_desc *tx_desc; 1119 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0; 1120 unsigned int budget = q_vector->tx.work_limit; 1121 unsigned int i = tx_ring->next_to_clean; 1122 1123 if (test_bit(__IXGBE_DOWN, &adapter->state)) 1124 return true; 1125 1126 tx_buffer = &tx_ring->tx_buffer_info[i]; 1127 tx_desc = IXGBE_TX_DESC(tx_ring, i); 1128 i -= tx_ring->count; 1129 1130 do { 1131 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 1132 1133 /* if next_to_watch is not set then there is no work pending */ 1134 if (!eop_desc) 1135 break; 1136 1137 /* prevent any other reads prior to eop_desc */ 1138 smp_rmb(); 1139 1140 /* if DD is not set pending work has not been completed */ 1141 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) 1142 break; 1143 1144 /* clear next_to_watch to prevent false hangs */ 1145 tx_buffer->next_to_watch = NULL; 1146 1147 /* update the statistics for this packet */ 1148 total_bytes += tx_buffer->bytecount; 1149 total_packets += tx_buffer->gso_segs; 1150 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) 1151 total_ipsec++; 1152 1153 /* free the skb */ 1154 if (ring_is_xdp(tx_ring)) 1155 xdp_return_frame(tx_buffer->xdpf); 1156 else 1157 napi_consume_skb(tx_buffer->skb, napi_budget); 1158 1159 /* unmap skb header data */ 1160 dma_unmap_single(tx_ring->dev, 1161 dma_unmap_addr(tx_buffer, dma), 1162 dma_unmap_len(tx_buffer, len), 1163 DMA_TO_DEVICE); 1164 1165 /* clear tx_buffer data */ 1166 dma_unmap_len_set(tx_buffer, len, 0); 1167 1168 /* unmap remaining buffers */ 1169 while (tx_desc != eop_desc) { 1170 tx_buffer++; 1171 tx_desc++; 1172 i++; 1173 if (unlikely(!i)) { 1174 i -= tx_ring->count; 1175 tx_buffer = tx_ring->tx_buffer_info; 1176 tx_desc = IXGBE_TX_DESC(tx_ring, 0); 1177 } 1178 1179 /* unmap any remaining paged data */ 1180 if (dma_unmap_len(tx_buffer, len)) { 1181 dma_unmap_page(tx_ring->dev, 1182 dma_unmap_addr(tx_buffer, dma), 1183 dma_unmap_len(tx_buffer, len), 1184 DMA_TO_DEVICE); 1185 dma_unmap_len_set(tx_buffer, len, 0); 1186 } 1187 } 1188 1189 /* move us one more past the eop_desc for start of next pkt */ 1190 tx_buffer++; 1191 tx_desc++; 1192 i++; 1193 if (unlikely(!i)) { 1194 i -= tx_ring->count; 1195 tx_buffer = tx_ring->tx_buffer_info; 1196 tx_desc = IXGBE_TX_DESC(tx_ring, 0); 1197 } 1198 1199 /* issue prefetch for next Tx descriptor */ 1200 prefetch(tx_desc); 1201 1202 /* update budget accounting */ 1203 budget--; 1204 } while (likely(budget)); 1205 1206 i += tx_ring->count; 1207 tx_ring->next_to_clean = i; 1208 u64_stats_update_begin(&tx_ring->syncp); 1209 tx_ring->stats.bytes += total_bytes; 1210 tx_ring->stats.packets += total_packets; 1211 u64_stats_update_end(&tx_ring->syncp); 1212 q_vector->tx.total_bytes += total_bytes; 1213 q_vector->tx.total_packets += total_packets; 1214 adapter->tx_ipsec += total_ipsec; 1215 1216 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { 1217 /* schedule immediate reset if we believe we hung */ 1218 struct ixgbe_hw *hw = &adapter->hw; 1219 e_err(drv, "Detected Tx Unit Hang %s\n" 1220 " Tx Queue <%d>\n" 1221 " TDH, TDT <%x>, <%x>\n" 1222 " next_to_use <%x>\n" 1223 " next_to_clean <%x>\n" 1224 "tx_buffer_info[next_to_clean]\n" 1225 " time_stamp <%lx>\n" 1226 " jiffies <%lx>\n", 1227 ring_is_xdp(tx_ring) ? "(XDP)" : "", 1228 tx_ring->queue_index, 1229 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), 1230 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), 1231 tx_ring->next_to_use, i, 1232 tx_ring->tx_buffer_info[i].time_stamp, jiffies); 1233 1234 if (!ring_is_xdp(tx_ring)) 1235 netif_stop_subqueue(tx_ring->netdev, 1236 tx_ring->queue_index); 1237 1238 e_info(probe, 1239 "tx hang %d detected on queue %d, resetting adapter\n", 1240 adapter->tx_timeout_count + 1, tx_ring->queue_index); 1241 1242 /* schedule immediate reset if we believe we hung */ 1243 ixgbe_tx_timeout_reset(adapter); 1244 1245 /* the adapter is about to reset, no point in enabling stuff */ 1246 return true; 1247 } 1248 1249 if (ring_is_xdp(tx_ring)) 1250 return !!budget; 1251 1252 netdev_tx_completed_queue(txring_txq(tx_ring), 1253 total_packets, total_bytes); 1254 1255 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 1256 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 1257 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 1258 /* Make sure that anybody stopping the queue after this 1259 * sees the new next_to_clean. 1260 */ 1261 smp_mb(); 1262 if (__netif_subqueue_stopped(tx_ring->netdev, 1263 tx_ring->queue_index) 1264 && !test_bit(__IXGBE_DOWN, &adapter->state)) { 1265 netif_wake_subqueue(tx_ring->netdev, 1266 tx_ring->queue_index); 1267 ++tx_ring->tx_stats.restart_queue; 1268 } 1269 } 1270 1271 return !!budget; 1272 } 1273 1274 #ifdef CONFIG_IXGBE_DCA 1275 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, 1276 struct ixgbe_ring *tx_ring, 1277 int cpu) 1278 { 1279 struct ixgbe_hw *hw = &adapter->hw; 1280 u32 txctrl = 0; 1281 u16 reg_offset; 1282 1283 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1284 txctrl = dca3_get_tag(tx_ring->dev, cpu); 1285 1286 switch (hw->mac.type) { 1287 case ixgbe_mac_82598EB: 1288 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); 1289 break; 1290 case ixgbe_mac_82599EB: 1291 case ixgbe_mac_X540: 1292 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx); 1293 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599; 1294 break; 1295 default: 1296 /* for unknown hardware do not write register */ 1297 return; 1298 } 1299 1300 /* 1301 * We can enable relaxed ordering for reads, but not writes when 1302 * DCA is enabled. This is due to a known issue in some chipsets 1303 * which will cause the DCA tag to be cleared. 1304 */ 1305 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN | 1306 IXGBE_DCA_TXCTRL_DATA_RRO_EN | 1307 IXGBE_DCA_TXCTRL_DESC_DCA_EN; 1308 1309 IXGBE_WRITE_REG(hw, reg_offset, txctrl); 1310 } 1311 1312 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, 1313 struct ixgbe_ring *rx_ring, 1314 int cpu) 1315 { 1316 struct ixgbe_hw *hw = &adapter->hw; 1317 u32 rxctrl = 0; 1318 u8 reg_idx = rx_ring->reg_idx; 1319 1320 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1321 rxctrl = dca3_get_tag(rx_ring->dev, cpu); 1322 1323 switch (hw->mac.type) { 1324 case ixgbe_mac_82599EB: 1325 case ixgbe_mac_X540: 1326 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599; 1327 break; 1328 default: 1329 break; 1330 } 1331 1332 /* 1333 * We can enable relaxed ordering for reads, but not writes when 1334 * DCA is enabled. This is due to a known issue in some chipsets 1335 * which will cause the DCA tag to be cleared. 1336 */ 1337 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN | 1338 IXGBE_DCA_RXCTRL_DATA_DCA_EN | 1339 IXGBE_DCA_RXCTRL_DESC_DCA_EN; 1340 1341 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); 1342 } 1343 1344 static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) 1345 { 1346 struct ixgbe_adapter *adapter = q_vector->adapter; 1347 struct ixgbe_ring *ring; 1348 int cpu = get_cpu(); 1349 1350 if (q_vector->cpu == cpu) 1351 goto out_no_update; 1352 1353 ixgbe_for_each_ring(ring, q_vector->tx) 1354 ixgbe_update_tx_dca(adapter, ring, cpu); 1355 1356 ixgbe_for_each_ring(ring, q_vector->rx) 1357 ixgbe_update_rx_dca(adapter, ring, cpu); 1358 1359 q_vector->cpu = cpu; 1360 out_no_update: 1361 put_cpu(); 1362 } 1363 1364 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) 1365 { 1366 int i; 1367 1368 /* always use CB2 mode, difference is masked in the CB driver */ 1369 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1370 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1371 IXGBE_DCA_CTRL_DCA_MODE_CB2); 1372 else 1373 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1374 IXGBE_DCA_CTRL_DCA_DISABLE); 1375 1376 for (i = 0; i < adapter->num_q_vectors; i++) { 1377 adapter->q_vector[i]->cpu = -1; 1378 ixgbe_update_dca(adapter->q_vector[i]); 1379 } 1380 } 1381 1382 static int __ixgbe_notify_dca(struct device *dev, void *data) 1383 { 1384 struct ixgbe_adapter *adapter = dev_get_drvdata(dev); 1385 unsigned long event = *(unsigned long *)data; 1386 1387 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) 1388 return 0; 1389 1390 switch (event) { 1391 case DCA_PROVIDER_ADD: 1392 /* if we're already enabled, don't do it again */ 1393 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1394 break; 1395 if (dca_add_requester(dev) == 0) { 1396 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 1397 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1398 IXGBE_DCA_CTRL_DCA_MODE_CB2); 1399 break; 1400 } 1401 fallthrough; /* DCA is disabled. */ 1402 case DCA_PROVIDER_REMOVE: 1403 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 1404 dca_remove_requester(dev); 1405 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 1406 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1407 IXGBE_DCA_CTRL_DCA_DISABLE); 1408 } 1409 break; 1410 } 1411 1412 return 0; 1413 } 1414 1415 #endif /* CONFIG_IXGBE_DCA */ 1416 1417 #define IXGBE_RSS_L4_TYPES_MASK \ 1418 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ 1419 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ 1420 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ 1421 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) 1422 1423 static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, 1424 union ixgbe_adv_rx_desc *rx_desc, 1425 struct sk_buff *skb) 1426 { 1427 u16 rss_type; 1428 1429 if (!(ring->netdev->features & NETIF_F_RXHASH)) 1430 return; 1431 1432 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & 1433 IXGBE_RXDADV_RSSTYPE_MASK; 1434 1435 if (!rss_type) 1436 return; 1437 1438 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), 1439 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? 1440 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); 1441 } 1442 1443 #ifdef IXGBE_FCOE 1444 /** 1445 * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type 1446 * @ring: structure containing ring specific data 1447 * @rx_desc: advanced rx descriptor 1448 * 1449 * Returns : true if it is FCoE pkt 1450 */ 1451 static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring, 1452 union ixgbe_adv_rx_desc *rx_desc) 1453 { 1454 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 1455 1456 return test_bit(__IXGBE_RX_FCOE, &ring->state) && 1457 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) == 1458 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << 1459 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); 1460 } 1461 1462 #endif /* IXGBE_FCOE */ 1463 /** 1464 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum 1465 * @ring: structure containing ring specific data 1466 * @rx_desc: current Rx descriptor being processed 1467 * @skb: skb currently being received and modified 1468 **/ 1469 static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, 1470 union ixgbe_adv_rx_desc *rx_desc, 1471 struct sk_buff *skb) 1472 { 1473 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 1474 bool encap_pkt = false; 1475 1476 skb_checksum_none_assert(skb); 1477 1478 /* Rx csum disabled */ 1479 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 1480 return; 1481 1482 /* check for VXLAN and Geneve packets */ 1483 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) { 1484 encap_pkt = true; 1485 skb->encapsulation = 1; 1486 } 1487 1488 /* if IP and error */ 1489 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && 1490 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { 1491 ring->rx_stats.csum_err++; 1492 return; 1493 } 1494 1495 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) 1496 return; 1497 1498 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { 1499 /* 1500 * 82599 errata, UDP frames with a 0 checksum can be marked as 1501 * checksum errors. 1502 */ 1503 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) && 1504 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state)) 1505 return; 1506 1507 ring->rx_stats.csum_err++; 1508 return; 1509 } 1510 1511 /* It must be a TCP or UDP packet with a valid checksum */ 1512 skb->ip_summed = CHECKSUM_UNNECESSARY; 1513 if (encap_pkt) { 1514 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS)) 1515 return; 1516 1517 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) { 1518 skb->ip_summed = CHECKSUM_NONE; 1519 return; 1520 } 1521 /* If we checked the outer header let the stack know */ 1522 skb->csum_level = 1; 1523 } 1524 } 1525 1526 static unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring) 1527 { 1528 return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0; 1529 } 1530 1531 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, 1532 struct ixgbe_rx_buffer *bi) 1533 { 1534 struct page *page = bi->page; 1535 dma_addr_t dma; 1536 1537 /* since we are recycling buffers we should seldom need to alloc */ 1538 if (likely(page)) 1539 return true; 1540 1541 /* alloc new page for storage */ 1542 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring)); 1543 if (unlikely(!page)) { 1544 rx_ring->rx_stats.alloc_rx_page_failed++; 1545 return false; 1546 } 1547 1548 /* map page for use */ 1549 dma = dma_map_page_attrs(rx_ring->dev, page, 0, 1550 ixgbe_rx_pg_size(rx_ring), 1551 DMA_FROM_DEVICE, 1552 IXGBE_RX_DMA_ATTR); 1553 1554 /* 1555 * if mapping failed free memory back to system since 1556 * there isn't much point in holding memory we can't use 1557 */ 1558 if (dma_mapping_error(rx_ring->dev, dma)) { 1559 __free_pages(page, ixgbe_rx_pg_order(rx_ring)); 1560 1561 rx_ring->rx_stats.alloc_rx_page_failed++; 1562 return false; 1563 } 1564 1565 bi->dma = dma; 1566 bi->page = page; 1567 bi->page_offset = rx_ring->rx_offset; 1568 page_ref_add(page, USHRT_MAX - 1); 1569 bi->pagecnt_bias = USHRT_MAX; 1570 rx_ring->rx_stats.alloc_rx_page++; 1571 1572 return true; 1573 } 1574 1575 /** 1576 * ixgbe_alloc_rx_buffers - Replace used receive buffers 1577 * @rx_ring: ring to place buffers on 1578 * @cleaned_count: number of buffers to replace 1579 **/ 1580 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) 1581 { 1582 union ixgbe_adv_rx_desc *rx_desc; 1583 struct ixgbe_rx_buffer *bi; 1584 u16 i = rx_ring->next_to_use; 1585 u16 bufsz; 1586 1587 /* nothing to do */ 1588 if (!cleaned_count) 1589 return; 1590 1591 rx_desc = IXGBE_RX_DESC(rx_ring, i); 1592 bi = &rx_ring->rx_buffer_info[i]; 1593 i -= rx_ring->count; 1594 1595 bufsz = ixgbe_rx_bufsz(rx_ring); 1596 1597 do { 1598 if (!ixgbe_alloc_mapped_page(rx_ring, bi)) 1599 break; 1600 1601 /* sync the buffer for use by the device */ 1602 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 1603 bi->page_offset, bufsz, 1604 DMA_FROM_DEVICE); 1605 1606 /* 1607 * Refresh the desc even if buffer_addrs didn't change 1608 * because each write-back erases this info. 1609 */ 1610 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 1611 1612 rx_desc++; 1613 bi++; 1614 i++; 1615 if (unlikely(!i)) { 1616 rx_desc = IXGBE_RX_DESC(rx_ring, 0); 1617 bi = rx_ring->rx_buffer_info; 1618 i -= rx_ring->count; 1619 } 1620 1621 /* clear the length for the next_to_use descriptor */ 1622 rx_desc->wb.upper.length = 0; 1623 1624 cleaned_count--; 1625 } while (cleaned_count); 1626 1627 i += rx_ring->count; 1628 1629 if (rx_ring->next_to_use != i) { 1630 rx_ring->next_to_use = i; 1631 1632 /* update next to alloc since we have filled the ring */ 1633 rx_ring->next_to_alloc = i; 1634 1635 /* Force memory writes to complete before letting h/w 1636 * know there are new descriptors to fetch. (Only 1637 * applicable for weak-ordered memory model archs, 1638 * such as IA-64). 1639 */ 1640 wmb(); 1641 writel(i, rx_ring->tail); 1642 } 1643 } 1644 1645 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, 1646 struct sk_buff *skb) 1647 { 1648 u16 hdr_len = skb_headlen(skb); 1649 1650 /* set gso_size to avoid messing up TCP MSS */ 1651 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), 1652 IXGBE_CB(skb)->append_cnt); 1653 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 1654 } 1655 1656 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, 1657 struct sk_buff *skb) 1658 { 1659 /* if append_cnt is 0 then frame is not RSC */ 1660 if (!IXGBE_CB(skb)->append_cnt) 1661 return; 1662 1663 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; 1664 rx_ring->rx_stats.rsc_flush++; 1665 1666 ixgbe_set_rsc_gso_size(rx_ring, skb); 1667 1668 /* gso_size is computed using append_cnt so always clear it last */ 1669 IXGBE_CB(skb)->append_cnt = 0; 1670 } 1671 1672 /** 1673 * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor 1674 * @rx_ring: rx descriptor ring packet is being transacted on 1675 * @rx_desc: pointer to the EOP Rx descriptor 1676 * @skb: pointer to current skb being populated 1677 * 1678 * This function checks the ring, descriptor, and packet information in 1679 * order to populate the hash, checksum, VLAN, timestamp, protocol, and 1680 * other fields within the skb. 1681 **/ 1682 void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, 1683 union ixgbe_adv_rx_desc *rx_desc, 1684 struct sk_buff *skb) 1685 { 1686 struct net_device *dev = rx_ring->netdev; 1687 u32 flags = rx_ring->q_vector->adapter->flags; 1688 1689 ixgbe_update_rsc_stats(rx_ring, skb); 1690 1691 ixgbe_rx_hash(rx_ring, rx_desc, skb); 1692 1693 ixgbe_rx_checksum(rx_ring, rx_desc, skb); 1694 1695 if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED)) 1696 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); 1697 1698 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1699 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { 1700 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 1701 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 1702 } 1703 1704 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP)) 1705 ixgbe_ipsec_rx(rx_ring, rx_desc, skb); 1706 1707 /* record Rx queue, or update MACVLAN statistics */ 1708 if (netif_is_ixgbe(dev)) 1709 skb_record_rx_queue(skb, rx_ring->queue_index); 1710 else 1711 macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true, 1712 false); 1713 1714 skb->protocol = eth_type_trans(skb, dev); 1715 } 1716 1717 void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, 1718 struct sk_buff *skb) 1719 { 1720 napi_gro_receive(&q_vector->napi, skb); 1721 } 1722 1723 /** 1724 * ixgbe_is_non_eop - process handling of non-EOP buffers 1725 * @rx_ring: Rx ring being processed 1726 * @rx_desc: Rx descriptor for current buffer 1727 * @skb: Current socket buffer containing buffer in progress 1728 * 1729 * This function updates next to clean. If the buffer is an EOP buffer 1730 * this function exits returning false, otherwise it will place the 1731 * sk_buff in the next buffer to be chained and return true indicating 1732 * that this is in fact a non-EOP buffer. 1733 **/ 1734 static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, 1735 union ixgbe_adv_rx_desc *rx_desc, 1736 struct sk_buff *skb) 1737 { 1738 u32 ntc = rx_ring->next_to_clean + 1; 1739 1740 /* fetch, update, and store next to clean */ 1741 ntc = (ntc < rx_ring->count) ? ntc : 0; 1742 rx_ring->next_to_clean = ntc; 1743 1744 prefetch(IXGBE_RX_DESC(rx_ring, ntc)); 1745 1746 /* update RSC append count if present */ 1747 if (ring_is_rsc_enabled(rx_ring)) { 1748 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & 1749 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); 1750 1751 if (unlikely(rsc_enabled)) { 1752 u32 rsc_cnt = le32_to_cpu(rsc_enabled); 1753 1754 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; 1755 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; 1756 1757 /* update ntc based on RSC value */ 1758 ntc = le32_to_cpu(rx_desc->wb.upper.status_error); 1759 ntc &= IXGBE_RXDADV_NEXTP_MASK; 1760 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; 1761 } 1762 } 1763 1764 /* if we are the last buffer then there is nothing else to do */ 1765 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) 1766 return false; 1767 1768 /* place skb in next buffer to be received */ 1769 rx_ring->rx_buffer_info[ntc].skb = skb; 1770 rx_ring->rx_stats.non_eop_descs++; 1771 1772 return true; 1773 } 1774 1775 /** 1776 * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail 1777 * @rx_ring: rx descriptor ring packet is being transacted on 1778 * @skb: pointer to current skb being adjusted 1779 * 1780 * This function is an ixgbe specific version of __pskb_pull_tail. The 1781 * main difference between this version and the original function is that 1782 * this function can make several assumptions about the state of things 1783 * that allow for significant optimizations versus the standard function. 1784 * As a result we can do things like drop a frag and maintain an accurate 1785 * truesize for the skb. 1786 */ 1787 static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, 1788 struct sk_buff *skb) 1789 { 1790 skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 1791 unsigned char *va; 1792 unsigned int pull_len; 1793 1794 /* 1795 * it is valid to use page_address instead of kmap since we are 1796 * working with pages allocated out of the lomem pool per 1797 * alloc_page(GFP_ATOMIC) 1798 */ 1799 va = skb_frag_address(frag); 1800 1801 /* 1802 * we need the header to contain the greater of either ETH_HLEN or 1803 * 60 bytes if the skb->len is less than 60 for skb_pad. 1804 */ 1805 pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE); 1806 1807 /* align pull length to size of long to optimize memcpy performance */ 1808 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); 1809 1810 /* update all of the pointers */ 1811 skb_frag_size_sub(frag, pull_len); 1812 skb_frag_off_add(frag, pull_len); 1813 skb->data_len -= pull_len; 1814 skb->tail += pull_len; 1815 } 1816 1817 /** 1818 * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB 1819 * @rx_ring: rx descriptor ring packet is being transacted on 1820 * @skb: pointer to current skb being updated 1821 * 1822 * This function provides a basic DMA sync up for the first fragment of an 1823 * skb. The reason for doing this is that the first fragment cannot be 1824 * unmapped until we have reached the end of packet descriptor for a buffer 1825 * chain. 1826 */ 1827 static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, 1828 struct sk_buff *skb) 1829 { 1830 if (ring_uses_build_skb(rx_ring)) { 1831 unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1; 1832 unsigned long offset = (unsigned long)(skb->data) & mask; 1833 1834 dma_sync_single_range_for_cpu(rx_ring->dev, 1835 IXGBE_CB(skb)->dma, 1836 offset, 1837 skb_headlen(skb), 1838 DMA_FROM_DEVICE); 1839 } else { 1840 skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 1841 1842 dma_sync_single_range_for_cpu(rx_ring->dev, 1843 IXGBE_CB(skb)->dma, 1844 skb_frag_off(frag), 1845 skb_frag_size(frag), 1846 DMA_FROM_DEVICE); 1847 } 1848 1849 /* If the page was released, just unmap it. */ 1850 if (unlikely(IXGBE_CB(skb)->page_released)) { 1851 dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, 1852 ixgbe_rx_pg_size(rx_ring), 1853 DMA_FROM_DEVICE, 1854 IXGBE_RX_DMA_ATTR); 1855 } 1856 } 1857 1858 /** 1859 * ixgbe_cleanup_headers - Correct corrupted or empty headers 1860 * @rx_ring: rx descriptor ring packet is being transacted on 1861 * @rx_desc: pointer to the EOP Rx descriptor 1862 * @skb: pointer to current skb being fixed 1863 * 1864 * Check if the skb is valid in the XDP case it will be an error pointer. 1865 * Return true in this case to abort processing and advance to next 1866 * descriptor. 1867 * 1868 * Check for corrupted packet headers caused by senders on the local L2 1869 * embedded NIC switch not setting up their Tx Descriptors right. These 1870 * should be very rare. 1871 * 1872 * Also address the case where we are pulling data in on pages only 1873 * and as such no data is present in the skb header. 1874 * 1875 * In addition if skb is not at least 60 bytes we need to pad it so that 1876 * it is large enough to qualify as a valid Ethernet frame. 1877 * 1878 * Returns true if an error was encountered and skb was freed. 1879 **/ 1880 bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, 1881 union ixgbe_adv_rx_desc *rx_desc, 1882 struct sk_buff *skb) 1883 { 1884 struct net_device *netdev = rx_ring->netdev; 1885 1886 /* XDP packets use error pointer so abort at this point */ 1887 if (IS_ERR(skb)) 1888 return true; 1889 1890 /* Verify netdev is present, and that packet does not have any 1891 * errors that would be unacceptable to the netdev. 1892 */ 1893 if (!netdev || 1894 (unlikely(ixgbe_test_staterr(rx_desc, 1895 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) && 1896 !(netdev->features & NETIF_F_RXALL)))) { 1897 dev_kfree_skb_any(skb); 1898 return true; 1899 } 1900 1901 /* place header in linear portion of buffer */ 1902 if (!skb_headlen(skb)) 1903 ixgbe_pull_tail(rx_ring, skb); 1904 1905 #ifdef IXGBE_FCOE 1906 /* do not attempt to pad FCoE Frames as this will disrupt DDP */ 1907 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) 1908 return false; 1909 1910 #endif 1911 /* if eth_skb_pad returns an error the skb was freed */ 1912 if (eth_skb_pad(skb)) 1913 return true; 1914 1915 return false; 1916 } 1917 1918 /** 1919 * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring 1920 * @rx_ring: rx descriptor ring to store buffers on 1921 * @old_buff: donor buffer to have page reused 1922 * 1923 * Synchronizes page for reuse by the adapter 1924 **/ 1925 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, 1926 struct ixgbe_rx_buffer *old_buff) 1927 { 1928 struct ixgbe_rx_buffer *new_buff; 1929 u16 nta = rx_ring->next_to_alloc; 1930 1931 new_buff = &rx_ring->rx_buffer_info[nta]; 1932 1933 /* update, and store next to alloc */ 1934 nta++; 1935 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 1936 1937 /* Transfer page from old buffer to new buffer. 1938 * Move each member individually to avoid possible store 1939 * forwarding stalls and unnecessary copy of skb. 1940 */ 1941 new_buff->dma = old_buff->dma; 1942 new_buff->page = old_buff->page; 1943 new_buff->page_offset = old_buff->page_offset; 1944 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 1945 } 1946 1947 static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer, 1948 int rx_buffer_pgcnt) 1949 { 1950 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 1951 struct page *page = rx_buffer->page; 1952 1953 /* avoid re-using remote and pfmemalloc pages */ 1954 if (!dev_page_is_reusable(page)) 1955 return false; 1956 1957 #if (PAGE_SIZE < 8192) 1958 /* if we are only owner of page we can reuse it */ 1959 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) 1960 return false; 1961 #else 1962 /* The last offset is a bit aggressive in that we assume the 1963 * worst case of FCoE being enabled and using a 3K buffer. 1964 * However this should have minimal impact as the 1K extra is 1965 * still less than one buffer in size. 1966 */ 1967 #define IXGBE_LAST_OFFSET \ 1968 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K) 1969 if (rx_buffer->page_offset > IXGBE_LAST_OFFSET) 1970 return false; 1971 #endif 1972 1973 /* If we have drained the page fragment pool we need to update 1974 * the pagecnt_bias and page count so that we fully restock the 1975 * number of references the driver holds. 1976 */ 1977 if (unlikely(pagecnt_bias == 1)) { 1978 page_ref_add(page, USHRT_MAX - 1); 1979 rx_buffer->pagecnt_bias = USHRT_MAX; 1980 } 1981 1982 return true; 1983 } 1984 1985 /** 1986 * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff 1987 * @rx_ring: rx descriptor ring to transact packets on 1988 * @rx_buffer: buffer containing page to add 1989 * @skb: sk_buff to place the data into 1990 * @size: size of data in rx_buffer 1991 * 1992 * This function will add the data contained in rx_buffer->page to the skb. 1993 * This is done either through a direct copy if the data in the buffer is 1994 * less than the skb header size, otherwise it will just attach the page as 1995 * a frag to the skb. 1996 * 1997 * The function will then update the page offset if necessary and return 1998 * true if the buffer can be reused by the adapter. 1999 **/ 2000 static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, 2001 struct ixgbe_rx_buffer *rx_buffer, 2002 struct sk_buff *skb, 2003 unsigned int size) 2004 { 2005 #if (PAGE_SIZE < 8192) 2006 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; 2007 #else 2008 unsigned int truesize = rx_ring->rx_offset ? 2009 SKB_DATA_ALIGN(rx_ring->rx_offset + size) : 2010 SKB_DATA_ALIGN(size); 2011 #endif 2012 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 2013 rx_buffer->page_offset, size, truesize); 2014 #if (PAGE_SIZE < 8192) 2015 rx_buffer->page_offset ^= truesize; 2016 #else 2017 rx_buffer->page_offset += truesize; 2018 #endif 2019 } 2020 2021 static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, 2022 union ixgbe_adv_rx_desc *rx_desc, 2023 struct sk_buff **skb, 2024 const unsigned int size, 2025 int *rx_buffer_pgcnt) 2026 { 2027 struct ixgbe_rx_buffer *rx_buffer; 2028 2029 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 2030 *rx_buffer_pgcnt = 2031 #if (PAGE_SIZE < 8192) 2032 page_count(rx_buffer->page); 2033 #else 2034 0; 2035 #endif 2036 prefetchw(rx_buffer->page); 2037 *skb = rx_buffer->skb; 2038 2039 /* Delay unmapping of the first packet. It carries the header 2040 * information, HW may still access the header after the writeback. 2041 * Only unmap it when EOP is reached 2042 */ 2043 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) { 2044 if (!*skb) 2045 goto skip_sync; 2046 } else { 2047 if (*skb) 2048 ixgbe_dma_sync_frag(rx_ring, *skb); 2049 } 2050 2051 /* we are reusing so sync this buffer for CPU use */ 2052 dma_sync_single_range_for_cpu(rx_ring->dev, 2053 rx_buffer->dma, 2054 rx_buffer->page_offset, 2055 size, 2056 DMA_FROM_DEVICE); 2057 skip_sync: 2058 rx_buffer->pagecnt_bias--; 2059 2060 return rx_buffer; 2061 } 2062 2063 static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, 2064 struct ixgbe_rx_buffer *rx_buffer, 2065 struct sk_buff *skb, 2066 int rx_buffer_pgcnt) 2067 { 2068 if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { 2069 /* hand second half of page back to the ring */ 2070 ixgbe_reuse_rx_page(rx_ring, rx_buffer); 2071 } else { 2072 if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) { 2073 /* the page has been released from the ring */ 2074 IXGBE_CB(skb)->page_released = true; 2075 } else { 2076 /* we are not reusing the buffer so unmap it */ 2077 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 2078 ixgbe_rx_pg_size(rx_ring), 2079 DMA_FROM_DEVICE, 2080 IXGBE_RX_DMA_ATTR); 2081 } 2082 __page_frag_cache_drain(rx_buffer->page, 2083 rx_buffer->pagecnt_bias); 2084 } 2085 2086 /* clear contents of rx_buffer */ 2087 rx_buffer->page = NULL; 2088 rx_buffer->skb = NULL; 2089 } 2090 2091 static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, 2092 struct ixgbe_rx_buffer *rx_buffer, 2093 struct xdp_buff *xdp, 2094 union ixgbe_adv_rx_desc *rx_desc) 2095 { 2096 unsigned int size = xdp->data_end - xdp->data; 2097 #if (PAGE_SIZE < 8192) 2098 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; 2099 #else 2100 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - 2101 xdp->data_hard_start); 2102 #endif 2103 struct sk_buff *skb; 2104 2105 /* prefetch first cache line of first page */ 2106 net_prefetch(xdp->data); 2107 2108 /* Note, we get here by enabling legacy-rx via: 2109 * 2110 * ethtool --set-priv-flags <dev> legacy-rx on 2111 * 2112 * In this mode, we currently get 0 extra XDP headroom as 2113 * opposed to having legacy-rx off, where we process XDP 2114 * packets going to stack via ixgbe_build_skb(). The latter 2115 * provides us currently with 192 bytes of headroom. 2116 * 2117 * For ixgbe_construct_skb() mode it means that the 2118 * xdp->data_meta will always point to xdp->data, since 2119 * the helper cannot expand the head. Should this ever 2120 * change in future for legacy-rx mode on, then lets also 2121 * add xdp->data_meta handling here. 2122 */ 2123 2124 /* allocate a skb to store the frags */ 2125 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE); 2126 if (unlikely(!skb)) 2127 return NULL; 2128 2129 if (size > IXGBE_RX_HDR_SIZE) { 2130 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) 2131 IXGBE_CB(skb)->dma = rx_buffer->dma; 2132 2133 skb_add_rx_frag(skb, 0, rx_buffer->page, 2134 xdp->data - page_address(rx_buffer->page), 2135 size, truesize); 2136 #if (PAGE_SIZE < 8192) 2137 rx_buffer->page_offset ^= truesize; 2138 #else 2139 rx_buffer->page_offset += truesize; 2140 #endif 2141 } else { 2142 memcpy(__skb_put(skb, size), 2143 xdp->data, ALIGN(size, sizeof(long))); 2144 rx_buffer->pagecnt_bias++; 2145 } 2146 2147 return skb; 2148 } 2149 2150 static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, 2151 struct ixgbe_rx_buffer *rx_buffer, 2152 struct xdp_buff *xdp, 2153 union ixgbe_adv_rx_desc *rx_desc) 2154 { 2155 unsigned int metasize = xdp->data - xdp->data_meta; 2156 #if (PAGE_SIZE < 8192) 2157 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; 2158 #else 2159 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 2160 SKB_DATA_ALIGN(xdp->data_end - 2161 xdp->data_hard_start); 2162 #endif 2163 struct sk_buff *skb; 2164 2165 /* Prefetch first cache line of first page. If xdp->data_meta 2166 * is unused, this points extactly as xdp->data, otherwise we 2167 * likely have a consumer accessing first few bytes of meta 2168 * data, and then actual data. 2169 */ 2170 net_prefetch(xdp->data_meta); 2171 2172 /* build an skb to around the page buffer */ 2173 skb = napi_build_skb(xdp->data_hard_start, truesize); 2174 if (unlikely(!skb)) 2175 return NULL; 2176 2177 /* update pointers within the skb to store the data */ 2178 skb_reserve(skb, xdp->data - xdp->data_hard_start); 2179 __skb_put(skb, xdp->data_end - xdp->data); 2180 if (metasize) 2181 skb_metadata_set(skb, metasize); 2182 2183 /* record DMA address if this is the start of a chain of buffers */ 2184 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) 2185 IXGBE_CB(skb)->dma = rx_buffer->dma; 2186 2187 /* update buffer offset */ 2188 #if (PAGE_SIZE < 8192) 2189 rx_buffer->page_offset ^= truesize; 2190 #else 2191 rx_buffer->page_offset += truesize; 2192 #endif 2193 2194 return skb; 2195 } 2196 2197 static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, 2198 struct ixgbe_ring *rx_ring, 2199 struct xdp_buff *xdp) 2200 { 2201 int err, result = IXGBE_XDP_PASS; 2202 struct bpf_prog *xdp_prog; 2203 struct ixgbe_ring *ring; 2204 struct xdp_frame *xdpf; 2205 u32 act; 2206 2207 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 2208 2209 if (!xdp_prog) 2210 goto xdp_out; 2211 2212 prefetchw(xdp->data_hard_start); /* xdp_frame write */ 2213 2214 act = bpf_prog_run_xdp(xdp_prog, xdp); 2215 switch (act) { 2216 case XDP_PASS: 2217 break; 2218 case XDP_TX: 2219 xdpf = xdp_convert_buff_to_frame(xdp); 2220 if (unlikely(!xdpf)) 2221 goto out_failure; 2222 ring = ixgbe_determine_xdp_ring(adapter); 2223 if (static_branch_unlikely(&ixgbe_xdp_locking_key)) 2224 spin_lock(&ring->tx_lock); 2225 result = ixgbe_xmit_xdp_ring(ring, xdpf); 2226 if (static_branch_unlikely(&ixgbe_xdp_locking_key)) 2227 spin_unlock(&ring->tx_lock); 2228 if (result == IXGBE_XDP_CONSUMED) 2229 goto out_failure; 2230 break; 2231 case XDP_REDIRECT: 2232 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); 2233 if (err) 2234 goto out_failure; 2235 result = IXGBE_XDP_REDIR; 2236 break; 2237 default: 2238 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); 2239 fallthrough; 2240 case XDP_ABORTED: 2241 out_failure: 2242 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 2243 fallthrough; /* handle aborts by dropping packet */ 2244 case XDP_DROP: 2245 result = IXGBE_XDP_CONSUMED; 2246 break; 2247 } 2248 xdp_out: 2249 return ERR_PTR(-result); 2250 } 2251 2252 static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring, 2253 unsigned int size) 2254 { 2255 unsigned int truesize; 2256 2257 #if (PAGE_SIZE < 8192) 2258 truesize = ixgbe_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 2259 #else 2260 truesize = rx_ring->rx_offset ? 2261 SKB_DATA_ALIGN(rx_ring->rx_offset + size) + 2262 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 2263 SKB_DATA_ALIGN(size); 2264 #endif 2265 return truesize; 2266 } 2267 2268 static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring, 2269 struct ixgbe_rx_buffer *rx_buffer, 2270 unsigned int size) 2271 { 2272 unsigned int truesize = ixgbe_rx_frame_truesize(rx_ring, size); 2273 #if (PAGE_SIZE < 8192) 2274 rx_buffer->page_offset ^= truesize; 2275 #else 2276 rx_buffer->page_offset += truesize; 2277 #endif 2278 } 2279 2280 /** 2281 * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 2282 * @q_vector: structure containing interrupt and ring information 2283 * @rx_ring: rx descriptor ring to transact packets on 2284 * @budget: Total limit on number of packets to process 2285 * 2286 * This function provides a "bounce buffer" approach to Rx interrupt 2287 * processing. The advantage to this is that on systems that have 2288 * expensive overhead for IOMMU access this provides a means of avoiding 2289 * it by maintaining the mapping of the page to the syste. 2290 * 2291 * Returns amount of work completed 2292 **/ 2293 static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 2294 struct ixgbe_ring *rx_ring, 2295 const int budget) 2296 { 2297 unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0; 2298 struct ixgbe_adapter *adapter = q_vector->adapter; 2299 #ifdef IXGBE_FCOE 2300 int ddp_bytes; 2301 unsigned int mss = 0; 2302 #endif /* IXGBE_FCOE */ 2303 u16 cleaned_count = ixgbe_desc_unused(rx_ring); 2304 unsigned int offset = rx_ring->rx_offset; 2305 unsigned int xdp_xmit = 0; 2306 struct xdp_buff xdp; 2307 2308 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 2309 #if (PAGE_SIZE < 8192) 2310 frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0); 2311 #endif 2312 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); 2313 2314 while (likely(total_rx_packets < budget)) { 2315 union ixgbe_adv_rx_desc *rx_desc; 2316 struct ixgbe_rx_buffer *rx_buffer; 2317 struct sk_buff *skb; 2318 int rx_buffer_pgcnt; 2319 unsigned int size; 2320 2321 /* return some buffers to hardware, one at a time is too slow */ 2322 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { 2323 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); 2324 cleaned_count = 0; 2325 } 2326 2327 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); 2328 size = le16_to_cpu(rx_desc->wb.upper.length); 2329 if (!size) 2330 break; 2331 2332 /* This memory barrier is needed to keep us from reading 2333 * any other fields out of the rx_desc until we know the 2334 * descriptor has been written back 2335 */ 2336 dma_rmb(); 2337 2338 rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt); 2339 2340 /* retrieve a buffer from the ring */ 2341 if (!skb) { 2342 unsigned char *hard_start; 2343 2344 hard_start = page_address(rx_buffer->page) + 2345 rx_buffer->page_offset - offset; 2346 xdp_prepare_buff(&xdp, hard_start, offset, size, true); 2347 #if (PAGE_SIZE > 4096) 2348 /* At larger PAGE_SIZE, frame_sz depend on len size */ 2349 xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size); 2350 #endif 2351 skb = ixgbe_run_xdp(adapter, rx_ring, &xdp); 2352 } 2353 2354 if (IS_ERR(skb)) { 2355 unsigned int xdp_res = -PTR_ERR(skb); 2356 2357 if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { 2358 xdp_xmit |= xdp_res; 2359 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); 2360 } else { 2361 rx_buffer->pagecnt_bias++; 2362 } 2363 total_rx_packets++; 2364 total_rx_bytes += size; 2365 } else if (skb) { 2366 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); 2367 } else if (ring_uses_build_skb(rx_ring)) { 2368 skb = ixgbe_build_skb(rx_ring, rx_buffer, 2369 &xdp, rx_desc); 2370 } else { 2371 skb = ixgbe_construct_skb(rx_ring, rx_buffer, 2372 &xdp, rx_desc); 2373 } 2374 2375 /* exit if we failed to retrieve a buffer */ 2376 if (!skb) { 2377 rx_ring->rx_stats.alloc_rx_buff_failed++; 2378 rx_buffer->pagecnt_bias++; 2379 break; 2380 } 2381 2382 ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt); 2383 cleaned_count++; 2384 2385 /* place incomplete frames back on ring for completion */ 2386 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) 2387 continue; 2388 2389 /* verify the packet layout is correct */ 2390 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) 2391 continue; 2392 2393 /* probably a little skewed due to removing CRC */ 2394 total_rx_bytes += skb->len; 2395 2396 /* populate checksum, timestamp, VLAN, and protocol */ 2397 ixgbe_process_skb_fields(rx_ring, rx_desc, skb); 2398 2399 #ifdef IXGBE_FCOE 2400 /* if ddp, not passing to ULD unless for FCP_RSP or error */ 2401 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { 2402 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); 2403 /* include DDPed FCoE data */ 2404 if (ddp_bytes > 0) { 2405 if (!mss) { 2406 mss = rx_ring->netdev->mtu - 2407 sizeof(struct fcoe_hdr) - 2408 sizeof(struct fc_frame_header) - 2409 sizeof(struct fcoe_crc_eof); 2410 if (mss > 512) 2411 mss &= ~511; 2412 } 2413 total_rx_bytes += ddp_bytes; 2414 total_rx_packets += DIV_ROUND_UP(ddp_bytes, 2415 mss); 2416 } 2417 if (!ddp_bytes) { 2418 dev_kfree_skb_any(skb); 2419 continue; 2420 } 2421 } 2422 2423 #endif /* IXGBE_FCOE */ 2424 ixgbe_rx_skb(q_vector, skb); 2425 2426 /* update budget accounting */ 2427 total_rx_packets++; 2428 } 2429 2430 if (xdp_xmit & IXGBE_XDP_REDIR) 2431 xdp_do_flush_map(); 2432 2433 if (xdp_xmit & IXGBE_XDP_TX) { 2434 struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); 2435 2436 ixgbe_xdp_ring_update_tail_locked(ring); 2437 } 2438 2439 u64_stats_update_begin(&rx_ring->syncp); 2440 rx_ring->stats.packets += total_rx_packets; 2441 rx_ring->stats.bytes += total_rx_bytes; 2442 u64_stats_update_end(&rx_ring->syncp); 2443 q_vector->rx.total_packets += total_rx_packets; 2444 q_vector->rx.total_bytes += total_rx_bytes; 2445 2446 return total_rx_packets; 2447 } 2448 2449 /** 2450 * ixgbe_configure_msix - Configure MSI-X hardware 2451 * @adapter: board private structure 2452 * 2453 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X 2454 * interrupts. 2455 **/ 2456 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) 2457 { 2458 struct ixgbe_q_vector *q_vector; 2459 int v_idx; 2460 u32 mask; 2461 2462 /* Populate MSIX to EITR Select */ 2463 if (adapter->num_vfs > 32) { 2464 u32 eitrsel = BIT(adapter->num_vfs - 32) - 1; 2465 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); 2466 } 2467 2468 /* 2469 * Populate the IVAR table and set the ITR values to the 2470 * corresponding register. 2471 */ 2472 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { 2473 struct ixgbe_ring *ring; 2474 q_vector = adapter->q_vector[v_idx]; 2475 2476 ixgbe_for_each_ring(ring, q_vector->rx) 2477 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); 2478 2479 ixgbe_for_each_ring(ring, q_vector->tx) 2480 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); 2481 2482 ixgbe_write_eitr(q_vector); 2483 } 2484 2485 switch (adapter->hw.mac.type) { 2486 case ixgbe_mac_82598EB: 2487 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, 2488 v_idx); 2489 break; 2490 case ixgbe_mac_82599EB: 2491 case ixgbe_mac_X540: 2492 case ixgbe_mac_X550: 2493 case ixgbe_mac_X550EM_x: 2494 case ixgbe_mac_x550em_a: 2495 ixgbe_set_ivar(adapter, -1, 1, v_idx); 2496 break; 2497 default: 2498 break; 2499 } 2500 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 2501 2502 /* set up to autoclear timer, and the vectors */ 2503 mask = IXGBE_EIMS_ENABLE_MASK; 2504 mask &= ~(IXGBE_EIMS_OTHER | 2505 IXGBE_EIMS_MAILBOX | 2506 IXGBE_EIMS_LSC); 2507 2508 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); 2509 } 2510 2511 /** 2512 * ixgbe_update_itr - update the dynamic ITR value based on statistics 2513 * @q_vector: structure containing interrupt and ring information 2514 * @ring_container: structure containing ring performance data 2515 * 2516 * Stores a new ITR value based on packets and byte 2517 * counts during the last interrupt. The advantage of per interrupt 2518 * computation is faster updates and more accurate ITR for the current 2519 * traffic pattern. Constants in this function were computed 2520 * based on theoretical maximum wire speed and thresholds were set based 2521 * on testing data as well as attempting to minimize response time 2522 * while increasing bulk throughput. 2523 **/ 2524 static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, 2525 struct ixgbe_ring_container *ring_container) 2526 { 2527 unsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS | 2528 IXGBE_ITR_ADAPTIVE_LATENCY; 2529 unsigned int avg_wire_size, packets, bytes; 2530 unsigned long next_update = jiffies; 2531 2532 /* If we don't have any rings just leave ourselves set for maximum 2533 * possible latency so we take ourselves out of the equation. 2534 */ 2535 if (!ring_container->ring) 2536 return; 2537 2538 /* If we didn't update within up to 1 - 2 jiffies we can assume 2539 * that either packets are coming in so slow there hasn't been 2540 * any work, or that there is so much work that NAPI is dealing 2541 * with interrupt moderation and we don't need to do anything. 2542 */ 2543 if (time_after(next_update, ring_container->next_update)) 2544 goto clear_counts; 2545 2546 packets = ring_container->total_packets; 2547 2548 /* We have no packets to actually measure against. This means 2549 * either one of the other queues on this vector is active or 2550 * we are a Tx queue doing TSO with too high of an interrupt rate. 2551 * 2552 * When this occurs just tick up our delay by the minimum value 2553 * and hope that this extra delay will prevent us from being called 2554 * without any work on our queue. 2555 */ 2556 if (!packets) { 2557 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC; 2558 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS) 2559 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS; 2560 itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY; 2561 goto clear_counts; 2562 } 2563 2564 bytes = ring_container->total_bytes; 2565 2566 /* If packets are less than 4 or bytes are less than 9000 assume 2567 * insufficient data to use bulk rate limiting approach. We are 2568 * likely latency driven. 2569 */ 2570 if (packets < 4 && bytes < 9000) { 2571 itr = IXGBE_ITR_ADAPTIVE_LATENCY; 2572 goto adjust_by_size; 2573 } 2574 2575 /* Between 4 and 48 we can assume that our current interrupt delay 2576 * is only slightly too low. As such we should increase it by a small 2577 * fixed amount. 2578 */ 2579 if (packets < 48) { 2580 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC; 2581 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS) 2582 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS; 2583 goto clear_counts; 2584 } 2585 2586 /* Between 48 and 96 is our "goldilocks" zone where we are working 2587 * out "just right". Just report that our current ITR is good for us. 2588 */ 2589 if (packets < 96) { 2590 itr = q_vector->itr >> 2; 2591 goto clear_counts; 2592 } 2593 2594 /* If packet count is 96 or greater we are likely looking at a slight 2595 * overrun of the delay we want. Try halving our delay to see if that 2596 * will cut the number of packets in half per interrupt. 2597 */ 2598 if (packets < 256) { 2599 itr = q_vector->itr >> 3; 2600 if (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS) 2601 itr = IXGBE_ITR_ADAPTIVE_MIN_USECS; 2602 goto clear_counts; 2603 } 2604 2605 /* The paths below assume we are dealing with a bulk ITR since number 2606 * of packets is 256 or greater. We are just going to have to compute 2607 * a value and try to bring the count under control, though for smaller 2608 * packet sizes there isn't much we can do as NAPI polling will likely 2609 * be kicking in sooner rather than later. 2610 */ 2611 itr = IXGBE_ITR_ADAPTIVE_BULK; 2612 2613 adjust_by_size: 2614 /* If packet counts are 256 or greater we can assume we have a gross 2615 * overestimation of what the rate should be. Instead of trying to fine 2616 * tune it just use the formula below to try and dial in an exact value 2617 * give the current packet size of the frame. 2618 */ 2619 avg_wire_size = bytes / packets; 2620 2621 /* The following is a crude approximation of: 2622 * wmem_default / (size + overhead) = desired_pkts_per_int 2623 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate 2624 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 2625 * 2626 * Assuming wmem_default is 212992 and overhead is 640 bytes per 2627 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 2628 * formula down to 2629 * 2630 * (170 * (size + 24)) / (size + 640) = ITR 2631 * 2632 * We first do some math on the packet size and then finally bitshift 2633 * by 8 after rounding up. We also have to account for PCIe link speed 2634 * difference as ITR scales based on this. 2635 */ 2636 if (avg_wire_size <= 60) { 2637 /* Start at 50k ints/sec */ 2638 avg_wire_size = 5120; 2639 } else if (avg_wire_size <= 316) { 2640 /* 50K ints/sec to 16K ints/sec */ 2641 avg_wire_size *= 40; 2642 avg_wire_size += 2720; 2643 } else if (avg_wire_size <= 1084) { 2644 /* 16K ints/sec to 9.2K ints/sec */ 2645 avg_wire_size *= 15; 2646 avg_wire_size += 11452; 2647 } else if (avg_wire_size < 1968) { 2648 /* 9.2K ints/sec to 8K ints/sec */ 2649 avg_wire_size *= 5; 2650 avg_wire_size += 22420; 2651 } else { 2652 /* plateau at a limit of 8K ints/sec */ 2653 avg_wire_size = 32256; 2654 } 2655 2656 /* If we are in low latency mode half our delay which doubles the rate 2657 * to somewhere between 100K to 16K ints/sec 2658 */ 2659 if (itr & IXGBE_ITR_ADAPTIVE_LATENCY) 2660 avg_wire_size >>= 1; 2661 2662 /* Resultant value is 256 times larger than it needs to be. This 2663 * gives us room to adjust the value as needed to either increase 2664 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. 2665 * 2666 * Use addition as we have already recorded the new latency flag 2667 * for the ITR value. 2668 */ 2669 switch (q_vector->adapter->link_speed) { 2670 case IXGBE_LINK_SPEED_10GB_FULL: 2671 case IXGBE_LINK_SPEED_100_FULL: 2672 default: 2673 itr += DIV_ROUND_UP(avg_wire_size, 2674 IXGBE_ITR_ADAPTIVE_MIN_INC * 256) * 2675 IXGBE_ITR_ADAPTIVE_MIN_INC; 2676 break; 2677 case IXGBE_LINK_SPEED_2_5GB_FULL: 2678 case IXGBE_LINK_SPEED_1GB_FULL: 2679 case IXGBE_LINK_SPEED_10_FULL: 2680 if (avg_wire_size > 8064) 2681 avg_wire_size = 8064; 2682 itr += DIV_ROUND_UP(avg_wire_size, 2683 IXGBE_ITR_ADAPTIVE_MIN_INC * 64) * 2684 IXGBE_ITR_ADAPTIVE_MIN_INC; 2685 break; 2686 } 2687 2688 clear_counts: 2689 /* write back value */ 2690 ring_container->itr = itr; 2691 2692 /* next update should occur within next jiffy */ 2693 ring_container->next_update = next_update + 1; 2694 2695 ring_container->total_bytes = 0; 2696 ring_container->total_packets = 0; 2697 } 2698 2699 /** 2700 * ixgbe_write_eitr - write EITR register in hardware specific way 2701 * @q_vector: structure containing interrupt and ring information 2702 * 2703 * This function is made to be called by ethtool and by the driver 2704 * when it needs to update EITR registers at runtime. Hardware 2705 * specific quirks/differences are taken care of here. 2706 */ 2707 void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) 2708 { 2709 struct ixgbe_adapter *adapter = q_vector->adapter; 2710 struct ixgbe_hw *hw = &adapter->hw; 2711 int v_idx = q_vector->v_idx; 2712 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; 2713 2714 switch (adapter->hw.mac.type) { 2715 case ixgbe_mac_82598EB: 2716 /* must write high and low 16 bits to reset counter */ 2717 itr_reg |= (itr_reg << 16); 2718 break; 2719 case ixgbe_mac_82599EB: 2720 case ixgbe_mac_X540: 2721 case ixgbe_mac_X550: 2722 case ixgbe_mac_X550EM_x: 2723 case ixgbe_mac_x550em_a: 2724 /* 2725 * set the WDIS bit to not clear the timer bits and cause an 2726 * immediate assertion of the interrupt 2727 */ 2728 itr_reg |= IXGBE_EITR_CNT_WDIS; 2729 break; 2730 default: 2731 break; 2732 } 2733 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); 2734 } 2735 2736 static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) 2737 { 2738 u32 new_itr; 2739 2740 ixgbe_update_itr(q_vector, &q_vector->tx); 2741 ixgbe_update_itr(q_vector, &q_vector->rx); 2742 2743 /* use the smallest value of new ITR delay calculations */ 2744 new_itr = min(q_vector->rx.itr, q_vector->tx.itr); 2745 2746 /* Clear latency flag if set, shift into correct position */ 2747 new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY; 2748 new_itr <<= 2; 2749 2750 if (new_itr != q_vector->itr) { 2751 /* save the algorithm value here */ 2752 q_vector->itr = new_itr; 2753 2754 ixgbe_write_eitr(q_vector); 2755 } 2756 } 2757 2758 /** 2759 * ixgbe_check_overtemp_subtask - check for over temperature 2760 * @adapter: pointer to adapter 2761 **/ 2762 static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) 2763 { 2764 struct ixgbe_hw *hw = &adapter->hw; 2765 u32 eicr = adapter->interrupt_event; 2766 s32 rc; 2767 2768 if (test_bit(__IXGBE_DOWN, &adapter->state)) 2769 return; 2770 2771 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) 2772 return; 2773 2774 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; 2775 2776 switch (hw->device_id) { 2777 case IXGBE_DEV_ID_82599_T3_LOM: 2778 /* 2779 * Since the warning interrupt is for both ports 2780 * we don't have to check if: 2781 * - This interrupt wasn't for our port. 2782 * - We may have missed the interrupt so always have to 2783 * check if we got a LSC 2784 */ 2785 if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) && 2786 !(eicr & IXGBE_EICR_LSC)) 2787 return; 2788 2789 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { 2790 u32 speed; 2791 bool link_up = false; 2792 2793 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2794 2795 if (link_up) 2796 return; 2797 } 2798 2799 /* Check if this is not due to overtemp */ 2800 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP) 2801 return; 2802 2803 break; 2804 case IXGBE_DEV_ID_X550EM_A_1G_T: 2805 case IXGBE_DEV_ID_X550EM_A_1G_T_L: 2806 rc = hw->phy.ops.check_overtemp(hw); 2807 if (rc != IXGBE_ERR_OVERTEMP) 2808 return; 2809 break; 2810 default: 2811 if (adapter->hw.mac.type >= ixgbe_mac_X540) 2812 return; 2813 if (!(eicr & IXGBE_EICR_GPI_SDP0(hw))) 2814 return; 2815 break; 2816 } 2817 e_crit(drv, "%s\n", ixgbe_overheat_msg); 2818 2819 adapter->interrupt_event = 0; 2820 } 2821 2822 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) 2823 { 2824 struct ixgbe_hw *hw = &adapter->hw; 2825 2826 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && 2827 (eicr & IXGBE_EICR_GPI_SDP1(hw))) { 2828 e_crit(probe, "Fan has stopped, replace the adapter\n"); 2829 /* write to clear the interrupt */ 2830 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); 2831 } 2832 } 2833 2834 static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) 2835 { 2836 struct ixgbe_hw *hw = &adapter->hw; 2837 2838 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) 2839 return; 2840 2841 switch (adapter->hw.mac.type) { 2842 case ixgbe_mac_82599EB: 2843 /* 2844 * Need to check link state so complete overtemp check 2845 * on service task 2846 */ 2847 if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) || 2848 (eicr & IXGBE_EICR_LSC)) && 2849 (!test_bit(__IXGBE_DOWN, &adapter->state))) { 2850 adapter->interrupt_event = eicr; 2851 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; 2852 ixgbe_service_event_schedule(adapter); 2853 return; 2854 } 2855 return; 2856 case ixgbe_mac_x550em_a: 2857 if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) { 2858 adapter->interrupt_event = eicr; 2859 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; 2860 ixgbe_service_event_schedule(adapter); 2861 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 2862 IXGBE_EICR_GPI_SDP0_X550EM_a); 2863 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR, 2864 IXGBE_EICR_GPI_SDP0_X550EM_a); 2865 } 2866 return; 2867 case ixgbe_mac_X550: 2868 case ixgbe_mac_X540: 2869 if (!(eicr & IXGBE_EICR_TS)) 2870 return; 2871 break; 2872 default: 2873 return; 2874 } 2875 2876 e_crit(drv, "%s\n", ixgbe_overheat_msg); 2877 } 2878 2879 static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) 2880 { 2881 switch (hw->mac.type) { 2882 case ixgbe_mac_82598EB: 2883 if (hw->phy.type == ixgbe_phy_nl) 2884 return true; 2885 return false; 2886 case ixgbe_mac_82599EB: 2887 case ixgbe_mac_X550EM_x: 2888 case ixgbe_mac_x550em_a: 2889 switch (hw->mac.ops.get_media_type(hw)) { 2890 case ixgbe_media_type_fiber: 2891 case ixgbe_media_type_fiber_qsfp: 2892 return true; 2893 default: 2894 return false; 2895 } 2896 default: 2897 return false; 2898 } 2899 } 2900 2901 static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) 2902 { 2903 struct ixgbe_hw *hw = &adapter->hw; 2904 u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw); 2905 2906 if (!ixgbe_is_sfp(hw)) 2907 return; 2908 2909 /* Later MAC's use different SDP */ 2910 if (hw->mac.type >= ixgbe_mac_X540) 2911 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 2912 2913 if (eicr & eicr_mask) { 2914 /* Clear the interrupt */ 2915 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 2916 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 2917 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; 2918 adapter->sfp_poll_time = 0; 2919 ixgbe_service_event_schedule(adapter); 2920 } 2921 } 2922 2923 if (adapter->hw.mac.type == ixgbe_mac_82599EB && 2924 (eicr & IXGBE_EICR_GPI_SDP1(hw))) { 2925 /* Clear the interrupt */ 2926 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); 2927 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 2928 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; 2929 ixgbe_service_event_schedule(adapter); 2930 } 2931 } 2932 } 2933 2934 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) 2935 { 2936 struct ixgbe_hw *hw = &adapter->hw; 2937 2938 adapter->lsc_int++; 2939 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2940 adapter->link_check_timeout = jiffies; 2941 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 2942 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 2943 IXGBE_WRITE_FLUSH(hw); 2944 ixgbe_service_event_schedule(adapter); 2945 } 2946 } 2947 2948 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, 2949 u64 qmask) 2950 { 2951 u32 mask; 2952 struct ixgbe_hw *hw = &adapter->hw; 2953 2954 switch (hw->mac.type) { 2955 case ixgbe_mac_82598EB: 2956 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 2957 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 2958 break; 2959 case ixgbe_mac_82599EB: 2960 case ixgbe_mac_X540: 2961 case ixgbe_mac_X550: 2962 case ixgbe_mac_X550EM_x: 2963 case ixgbe_mac_x550em_a: 2964 mask = (qmask & 0xFFFFFFFF); 2965 if (mask) 2966 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 2967 mask = (qmask >> 32); 2968 if (mask) 2969 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 2970 break; 2971 default: 2972 break; 2973 } 2974 /* skip the flush */ 2975 } 2976 2977 /** 2978 * ixgbe_irq_enable - Enable default interrupt generation settings 2979 * @adapter: board private structure 2980 * @queues: enable irqs for queues 2981 * @flush: flush register write 2982 **/ 2983 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, 2984 bool flush) 2985 { 2986 struct ixgbe_hw *hw = &adapter->hw; 2987 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 2988 2989 /* don't reenable LSC while waiting for link */ 2990 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) 2991 mask &= ~IXGBE_EIMS_LSC; 2992 2993 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) 2994 switch (adapter->hw.mac.type) { 2995 case ixgbe_mac_82599EB: 2996 mask |= IXGBE_EIMS_GPI_SDP0(hw); 2997 break; 2998 case ixgbe_mac_X540: 2999 case ixgbe_mac_X550: 3000 case ixgbe_mac_X550EM_x: 3001 case ixgbe_mac_x550em_a: 3002 mask |= IXGBE_EIMS_TS; 3003 break; 3004 default: 3005 break; 3006 } 3007 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 3008 mask |= IXGBE_EIMS_GPI_SDP1(hw); 3009 switch (adapter->hw.mac.type) { 3010 case ixgbe_mac_82599EB: 3011 mask |= IXGBE_EIMS_GPI_SDP1(hw); 3012 mask |= IXGBE_EIMS_GPI_SDP2(hw); 3013 fallthrough; 3014 case ixgbe_mac_X540: 3015 case ixgbe_mac_X550: 3016 case ixgbe_mac_X550EM_x: 3017 case ixgbe_mac_x550em_a: 3018 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3019 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP || 3020 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) 3021 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw); 3022 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) 3023 mask |= IXGBE_EICR_GPI_SDP0_X540; 3024 mask |= IXGBE_EIMS_ECC; 3025 mask |= IXGBE_EIMS_MAILBOX; 3026 break; 3027 default: 3028 break; 3029 } 3030 3031 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && 3032 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) 3033 mask |= IXGBE_EIMS_FLOW_DIR; 3034 3035 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 3036 if (queues) 3037 ixgbe_irq_enable_queues(adapter, ~0); 3038 if (flush) 3039 IXGBE_WRITE_FLUSH(&adapter->hw); 3040 } 3041 3042 static irqreturn_t ixgbe_msix_other(int irq, void *data) 3043 { 3044 struct ixgbe_adapter *adapter = data; 3045 struct ixgbe_hw *hw = &adapter->hw; 3046 u32 eicr; 3047 3048 /* 3049 * Workaround for Silicon errata. Use clear-by-write instead 3050 * of clear-by-read. Reading with EICS will return the 3051 * interrupt causes without clearing, which later be done 3052 * with the write to EICR. 3053 */ 3054 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 3055 3056 /* The lower 16bits of the EICR register are for the queue interrupts 3057 * which should be masked here in order to not accidentally clear them if 3058 * the bits are high when ixgbe_msix_other is called. There is a race 3059 * condition otherwise which results in possible performance loss 3060 * especially if the ixgbe_msix_other interrupt is triggering 3061 * consistently (as it would when PPS is turned on for the X540 device) 3062 */ 3063 eicr &= 0xFFFF0000; 3064 3065 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 3066 3067 if (eicr & IXGBE_EICR_LSC) 3068 ixgbe_check_lsc(adapter); 3069 3070 if (eicr & IXGBE_EICR_MAILBOX) 3071 ixgbe_msg_task(adapter); 3072 3073 switch (hw->mac.type) { 3074 case ixgbe_mac_82599EB: 3075 case ixgbe_mac_X540: 3076 case ixgbe_mac_X550: 3077 case ixgbe_mac_X550EM_x: 3078 case ixgbe_mac_x550em_a: 3079 if (hw->phy.type == ixgbe_phy_x550em_ext_t && 3080 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 3081 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; 3082 ixgbe_service_event_schedule(adapter); 3083 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3084 IXGBE_EICR_GPI_SDP0_X540); 3085 } 3086 if (eicr & IXGBE_EICR_ECC) { 3087 e_info(link, "Received ECC Err, initiating reset\n"); 3088 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); 3089 ixgbe_service_event_schedule(adapter); 3090 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 3091 } 3092 /* Handle Flow Director Full threshold interrupt */ 3093 if (eicr & IXGBE_EICR_FLOW_DIR) { 3094 int reinit_count = 0; 3095 int i; 3096 for (i = 0; i < adapter->num_tx_queues; i++) { 3097 struct ixgbe_ring *ring = adapter->tx_ring[i]; 3098 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, 3099 &ring->state)) 3100 reinit_count++; 3101 } 3102 if (reinit_count) { 3103 /* no more flow director interrupts until after init */ 3104 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); 3105 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; 3106 ixgbe_service_event_schedule(adapter); 3107 } 3108 } 3109 ixgbe_check_sfp_event(adapter, eicr); 3110 ixgbe_check_overtemp_event(adapter, eicr); 3111 break; 3112 default: 3113 break; 3114 } 3115 3116 ixgbe_check_fan_failure(adapter, eicr); 3117 3118 if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) 3119 ixgbe_ptp_check_pps_event(adapter); 3120 3121 /* re-enable the original interrupt state, no lsc, no queues */ 3122 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 3123 ixgbe_irq_enable(adapter, false, false); 3124 3125 return IRQ_HANDLED; 3126 } 3127 3128 static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) 3129 { 3130 struct ixgbe_q_vector *q_vector = data; 3131 3132 /* EIAM disabled interrupts (on this vector) for us */ 3133 3134 if (q_vector->rx.ring || q_vector->tx.ring) 3135 napi_schedule_irqoff(&q_vector->napi); 3136 3137 return IRQ_HANDLED; 3138 } 3139 3140 /** 3141 * ixgbe_poll - NAPI Rx polling callback 3142 * @napi: structure for representing this polling device 3143 * @budget: how many packets driver is allowed to clean 3144 * 3145 * This function is used for legacy and MSI, NAPI mode 3146 **/ 3147 int ixgbe_poll(struct napi_struct *napi, int budget) 3148 { 3149 struct ixgbe_q_vector *q_vector = 3150 container_of(napi, struct ixgbe_q_vector, napi); 3151 struct ixgbe_adapter *adapter = q_vector->adapter; 3152 struct ixgbe_ring *ring; 3153 int per_ring_budget, work_done = 0; 3154 bool clean_complete = true; 3155 3156 #ifdef CONFIG_IXGBE_DCA 3157 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 3158 ixgbe_update_dca(q_vector); 3159 #endif 3160 3161 ixgbe_for_each_ring(ring, q_vector->tx) { 3162 bool wd = ring->xsk_pool ? 3163 ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) : 3164 ixgbe_clean_tx_irq(q_vector, ring, budget); 3165 3166 if (!wd) 3167 clean_complete = false; 3168 } 3169 3170 /* Exit if we are called by netpoll */ 3171 if (budget <= 0) 3172 return budget; 3173 3174 /* attempt to distribute budget to each queue fairly, but don't allow 3175 * the budget to go below 1 because we'll exit polling */ 3176 if (q_vector->rx.count > 1) 3177 per_ring_budget = max(budget/q_vector->rx.count, 1); 3178 else 3179 per_ring_budget = budget; 3180 3181 ixgbe_for_each_ring(ring, q_vector->rx) { 3182 int cleaned = ring->xsk_pool ? 3183 ixgbe_clean_rx_irq_zc(q_vector, ring, 3184 per_ring_budget) : 3185 ixgbe_clean_rx_irq(q_vector, ring, 3186 per_ring_budget); 3187 3188 work_done += cleaned; 3189 if (cleaned >= per_ring_budget) 3190 clean_complete = false; 3191 } 3192 3193 /* If all work not completed, return budget and keep polling */ 3194 if (!clean_complete) 3195 return budget; 3196 3197 /* all work done, exit the polling mode */ 3198 if (likely(napi_complete_done(napi, work_done))) { 3199 if (adapter->rx_itr_setting & 1) 3200 ixgbe_set_itr(q_vector); 3201 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 3202 ixgbe_irq_enable_queues(adapter, 3203 BIT_ULL(q_vector->v_idx)); 3204 } 3205 3206 return min(work_done, budget - 1); 3207 } 3208 3209 /** 3210 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts 3211 * @adapter: board private structure 3212 * 3213 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests 3214 * interrupts from the kernel. 3215 **/ 3216 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) 3217 { 3218 struct net_device *netdev = adapter->netdev; 3219 unsigned int ri = 0, ti = 0; 3220 int vector, err; 3221 3222 for (vector = 0; vector < adapter->num_q_vectors; vector++) { 3223 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; 3224 struct msix_entry *entry = &adapter->msix_entries[vector]; 3225 3226 if (q_vector->tx.ring && q_vector->rx.ring) { 3227 snprintf(q_vector->name, sizeof(q_vector->name), 3228 "%s-TxRx-%u", netdev->name, ri++); 3229 ti++; 3230 } else if (q_vector->rx.ring) { 3231 snprintf(q_vector->name, sizeof(q_vector->name), 3232 "%s-rx-%u", netdev->name, ri++); 3233 } else if (q_vector->tx.ring) { 3234 snprintf(q_vector->name, sizeof(q_vector->name), 3235 "%s-tx-%u", netdev->name, ti++); 3236 } else { 3237 /* skip this unused q_vector */ 3238 continue; 3239 } 3240 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0, 3241 q_vector->name, q_vector); 3242 if (err) { 3243 e_err(probe, "request_irq failed for MSIX interrupt " 3244 "Error: %d\n", err); 3245 goto free_queue_irqs; 3246 } 3247 /* If Flow Director is enabled, set interrupt affinity */ 3248 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 3249 /* assign the mask for this irq */ 3250 irq_update_affinity_hint(entry->vector, 3251 &q_vector->affinity_mask); 3252 } 3253 } 3254 3255 err = request_irq(adapter->msix_entries[vector].vector, 3256 ixgbe_msix_other, 0, netdev->name, adapter); 3257 if (err) { 3258 e_err(probe, "request_irq for msix_other failed: %d\n", err); 3259 goto free_queue_irqs; 3260 } 3261 3262 return 0; 3263 3264 free_queue_irqs: 3265 while (vector) { 3266 vector--; 3267 irq_update_affinity_hint(adapter->msix_entries[vector].vector, 3268 NULL); 3269 free_irq(adapter->msix_entries[vector].vector, 3270 adapter->q_vector[vector]); 3271 } 3272 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 3273 pci_disable_msix(adapter->pdev); 3274 kfree(adapter->msix_entries); 3275 adapter->msix_entries = NULL; 3276 return err; 3277 } 3278 3279 /** 3280 * ixgbe_intr - legacy mode Interrupt Handler 3281 * @irq: interrupt number 3282 * @data: pointer to a network interface device structure 3283 **/ 3284 static irqreturn_t ixgbe_intr(int irq, void *data) 3285 { 3286 struct ixgbe_adapter *adapter = data; 3287 struct ixgbe_hw *hw = &adapter->hw; 3288 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; 3289 u32 eicr; 3290 3291 /* 3292 * Workaround for silicon errata #26 on 82598. Mask the interrupt 3293 * before the read of EICR. 3294 */ 3295 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 3296 3297 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read 3298 * therefore no explicit interrupt disable is necessary */ 3299 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3300 if (!eicr) { 3301 /* 3302 * shared interrupt alert! 3303 * make sure interrupts are enabled because the read will 3304 * have disabled interrupts due to EIAM 3305 * finish the workaround of silicon errata on 82598. Unmask 3306 * the interrupt that we masked before the EICR read. 3307 */ 3308 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 3309 ixgbe_irq_enable(adapter, true, true); 3310 return IRQ_NONE; /* Not our interrupt */ 3311 } 3312 3313 if (eicr & IXGBE_EICR_LSC) 3314 ixgbe_check_lsc(adapter); 3315 3316 switch (hw->mac.type) { 3317 case ixgbe_mac_82599EB: 3318 ixgbe_check_sfp_event(adapter, eicr); 3319 fallthrough; 3320 case ixgbe_mac_X540: 3321 case ixgbe_mac_X550: 3322 case ixgbe_mac_X550EM_x: 3323 case ixgbe_mac_x550em_a: 3324 if (eicr & IXGBE_EICR_ECC) { 3325 e_info(link, "Received ECC Err, initiating reset\n"); 3326 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); 3327 ixgbe_service_event_schedule(adapter); 3328 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 3329 } 3330 ixgbe_check_overtemp_event(adapter, eicr); 3331 break; 3332 default: 3333 break; 3334 } 3335 3336 ixgbe_check_fan_failure(adapter, eicr); 3337 if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) 3338 ixgbe_ptp_check_pps_event(adapter); 3339 3340 /* would disable interrupts here but EIAM disabled it */ 3341 napi_schedule_irqoff(&q_vector->napi); 3342 3343 /* 3344 * re-enable link(maybe) and non-queue interrupts, no flush. 3345 * ixgbe_poll will re-enable the queue interrupts 3346 */ 3347 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 3348 ixgbe_irq_enable(adapter, false, false); 3349 3350 return IRQ_HANDLED; 3351 } 3352 3353 /** 3354 * ixgbe_request_irq - initialize interrupts 3355 * @adapter: board private structure 3356 * 3357 * Attempts to configure interrupts using the best available 3358 * capabilities of the hardware and kernel. 3359 **/ 3360 static int ixgbe_request_irq(struct ixgbe_adapter *adapter) 3361 { 3362 struct net_device *netdev = adapter->netdev; 3363 int err; 3364 3365 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 3366 err = ixgbe_request_msix_irqs(adapter); 3367 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) 3368 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, 3369 netdev->name, adapter); 3370 else 3371 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, 3372 netdev->name, adapter); 3373 3374 if (err) 3375 e_err(probe, "request_irq failed, Error %d\n", err); 3376 3377 return err; 3378 } 3379 3380 static void ixgbe_free_irq(struct ixgbe_adapter *adapter) 3381 { 3382 int vector; 3383 3384 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { 3385 free_irq(adapter->pdev->irq, adapter); 3386 return; 3387 } 3388 3389 if (!adapter->msix_entries) 3390 return; 3391 3392 for (vector = 0; vector < adapter->num_q_vectors; vector++) { 3393 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; 3394 struct msix_entry *entry = &adapter->msix_entries[vector]; 3395 3396 /* free only the irqs that were actually requested */ 3397 if (!q_vector->rx.ring && !q_vector->tx.ring) 3398 continue; 3399 3400 /* clear the affinity_mask in the IRQ descriptor */ 3401 irq_update_affinity_hint(entry->vector, NULL); 3402 3403 free_irq(entry->vector, q_vector); 3404 } 3405 3406 free_irq(adapter->msix_entries[vector].vector, adapter); 3407 } 3408 3409 /** 3410 * ixgbe_irq_disable - Mask off interrupt generation on the NIC 3411 * @adapter: board private structure 3412 **/ 3413 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) 3414 { 3415 switch (adapter->hw.mac.type) { 3416 case ixgbe_mac_82598EB: 3417 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 3418 break; 3419 case ixgbe_mac_82599EB: 3420 case ixgbe_mac_X540: 3421 case ixgbe_mac_X550: 3422 case ixgbe_mac_X550EM_x: 3423 case ixgbe_mac_x550em_a: 3424 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 3425 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 3426 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 3427 break; 3428 default: 3429 break; 3430 } 3431 IXGBE_WRITE_FLUSH(&adapter->hw); 3432 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3433 int vector; 3434 3435 for (vector = 0; vector < adapter->num_q_vectors; vector++) 3436 synchronize_irq(adapter->msix_entries[vector].vector); 3437 3438 synchronize_irq(adapter->msix_entries[vector++].vector); 3439 } else { 3440 synchronize_irq(adapter->pdev->irq); 3441 } 3442 } 3443 3444 /** 3445 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts 3446 * @adapter: board private structure 3447 * 3448 **/ 3449 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) 3450 { 3451 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; 3452 3453 ixgbe_write_eitr(q_vector); 3454 3455 ixgbe_set_ivar(adapter, 0, 0, 0); 3456 ixgbe_set_ivar(adapter, 1, 0, 0); 3457 3458 e_info(hw, "Legacy interrupt IVAR setup done\n"); 3459 } 3460 3461 /** 3462 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset 3463 * @adapter: board private structure 3464 * @ring: structure containing ring specific data 3465 * 3466 * Configure the Tx descriptor ring after a reset. 3467 **/ 3468 void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, 3469 struct ixgbe_ring *ring) 3470 { 3471 struct ixgbe_hw *hw = &adapter->hw; 3472 u64 tdba = ring->dma; 3473 int wait_loop = 10; 3474 u32 txdctl = IXGBE_TXDCTL_ENABLE; 3475 u8 reg_idx = ring->reg_idx; 3476 3477 ring->xsk_pool = NULL; 3478 if (ring_is_xdp(ring)) 3479 ring->xsk_pool = ixgbe_xsk_pool(adapter, ring); 3480 3481 /* disable queue to avoid issues while updating state */ 3482 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0); 3483 IXGBE_WRITE_FLUSH(hw); 3484 3485 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), 3486 (tdba & DMA_BIT_MASK(32))); 3487 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); 3488 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), 3489 ring->count * sizeof(union ixgbe_adv_tx_desc)); 3490 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); 3491 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); 3492 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx); 3493 3494 /* 3495 * set WTHRESH to encourage burst writeback, it should not be set 3496 * higher than 1 when: 3497 * - ITR is 0 as it could cause false TX hangs 3498 * - ITR is set to > 100k int/sec and BQL is enabled 3499 * 3500 * In order to avoid issues WTHRESH + PTHRESH should always be equal 3501 * to or less than the number of on chip descriptors, which is 3502 * currently 40. 3503 */ 3504 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) 3505 txdctl |= 1u << 16; /* WTHRESH = 1 */ 3506 else 3507 txdctl |= 8u << 16; /* WTHRESH = 8 */ 3508 3509 /* 3510 * Setting PTHRESH to 32 both improves performance 3511 * and avoids a TX hang with DFP enabled 3512 */ 3513 txdctl |= (1u << 8) | /* HTHRESH = 1 */ 3514 32; /* PTHRESH = 32 */ 3515 3516 /* reinitialize flowdirector state */ 3517 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 3518 ring->atr_sample_rate = adapter->atr_sample_rate; 3519 ring->atr_count = 0; 3520 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); 3521 } else { 3522 ring->atr_sample_rate = 0; 3523 } 3524 3525 /* initialize XPS */ 3526 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) { 3527 struct ixgbe_q_vector *q_vector = ring->q_vector; 3528 3529 if (q_vector) 3530 netif_set_xps_queue(ring->netdev, 3531 &q_vector->affinity_mask, 3532 ring->queue_index); 3533 } 3534 3535 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); 3536 3537 /* reinitialize tx_buffer_info */ 3538 memset(ring->tx_buffer_info, 0, 3539 sizeof(struct ixgbe_tx_buffer) * ring->count); 3540 3541 /* enable queue */ 3542 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); 3543 3544 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ 3545 if (hw->mac.type == ixgbe_mac_82598EB && 3546 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) 3547 return; 3548 3549 /* poll to verify queue is enabled */ 3550 do { 3551 usleep_range(1000, 2000); 3552 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); 3553 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); 3554 if (!wait_loop) 3555 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx); 3556 } 3557 3558 static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) 3559 { 3560 struct ixgbe_hw *hw = &adapter->hw; 3561 u32 rttdcs, mtqc; 3562 u8 tcs = adapter->hw_tcs; 3563 3564 if (hw->mac.type == ixgbe_mac_82598EB) 3565 return; 3566 3567 /* disable the arbiter while setting MTQC */ 3568 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 3569 rttdcs |= IXGBE_RTTDCS_ARBDIS; 3570 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 3571 3572 /* set transmit pool layout */ 3573 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 3574 mtqc = IXGBE_MTQC_VT_ENA; 3575 if (tcs > 4) 3576 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; 3577 else if (tcs > 1) 3578 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; 3579 else if (adapter->ring_feature[RING_F_VMDQ].mask == 3580 IXGBE_82599_VMDQ_4Q_MASK) 3581 mtqc |= IXGBE_MTQC_32VF; 3582 else 3583 mtqc |= IXGBE_MTQC_64VF; 3584 } else { 3585 if (tcs > 4) { 3586 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; 3587 } else if (tcs > 1) { 3588 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; 3589 } else { 3590 u8 max_txq = adapter->num_tx_queues + 3591 adapter->num_xdp_queues; 3592 if (max_txq > 63) 3593 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; 3594 else 3595 mtqc = IXGBE_MTQC_64Q_1PB; 3596 } 3597 } 3598 3599 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); 3600 3601 /* Enable Security TX Buffer IFG for multiple pb */ 3602 if (tcs) { 3603 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); 3604 sectx |= IXGBE_SECTX_DCB; 3605 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx); 3606 } 3607 3608 /* re-enable the arbiter */ 3609 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 3610 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 3611 } 3612 3613 /** 3614 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset 3615 * @adapter: board private structure 3616 * 3617 * Configure the Tx unit of the MAC after a reset. 3618 **/ 3619 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) 3620 { 3621 struct ixgbe_hw *hw = &adapter->hw; 3622 u32 dmatxctl; 3623 u32 i; 3624 3625 ixgbe_setup_mtqc(adapter); 3626 3627 if (hw->mac.type != ixgbe_mac_82598EB) { 3628 /* DMATXCTL.EN must be before Tx queues are enabled */ 3629 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 3630 dmatxctl |= IXGBE_DMATXCTL_TE; 3631 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 3632 } 3633 3634 /* Setup the HW Tx Head and Tail descriptor pointers */ 3635 for (i = 0; i < adapter->num_tx_queues; i++) 3636 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); 3637 for (i = 0; i < adapter->num_xdp_queues; i++) 3638 ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]); 3639 } 3640 3641 static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter, 3642 struct ixgbe_ring *ring) 3643 { 3644 struct ixgbe_hw *hw = &adapter->hw; 3645 u8 reg_idx = ring->reg_idx; 3646 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); 3647 3648 srrctl |= IXGBE_SRRCTL_DROP_EN; 3649 3650 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); 3651 } 3652 3653 static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter, 3654 struct ixgbe_ring *ring) 3655 { 3656 struct ixgbe_hw *hw = &adapter->hw; 3657 u8 reg_idx = ring->reg_idx; 3658 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); 3659 3660 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 3661 3662 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); 3663 } 3664 3665 #ifdef CONFIG_IXGBE_DCB 3666 void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) 3667 #else 3668 static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) 3669 #endif 3670 { 3671 int i; 3672 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; 3673 3674 if (adapter->ixgbe_ieee_pfc) 3675 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); 3676 3677 /* 3678 * We should set the drop enable bit if: 3679 * SR-IOV is enabled 3680 * or 3681 * Number of Rx queues > 1 and flow control is disabled 3682 * 3683 * This allows us to avoid head of line blocking for security 3684 * and performance reasons. 3685 */ 3686 if (adapter->num_vfs || (adapter->num_rx_queues > 1 && 3687 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) { 3688 for (i = 0; i < adapter->num_rx_queues; i++) 3689 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); 3690 } else { 3691 for (i = 0; i < adapter->num_rx_queues; i++) 3692 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); 3693 } 3694 } 3695 3696 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 3697 3698 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, 3699 struct ixgbe_ring *rx_ring) 3700 { 3701 struct ixgbe_hw *hw = &adapter->hw; 3702 u32 srrctl; 3703 u8 reg_idx = rx_ring->reg_idx; 3704 3705 if (hw->mac.type == ixgbe_mac_82598EB) { 3706 u16 mask = adapter->ring_feature[RING_F_RSS].mask; 3707 3708 /* 3709 * if VMDq is not active we must program one srrctl register 3710 * per RSS queue since we have enabled RDRXCTL.MVMEN 3711 */ 3712 reg_idx &= mask; 3713 } 3714 3715 /* configure header buffer length, needed for RSC */ 3716 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; 3717 3718 /* configure the packet buffer length */ 3719 if (rx_ring->xsk_pool) { 3720 u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool); 3721 3722 /* If the MAC support setting RXDCTL.RLPML, the 3723 * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and 3724 * RXDCTL.RLPML is set to the actual UMEM buffer 3725 * size. If not, then we are stuck with a 1k buffer 3726 * size resolution. In this case frames larger than 3727 * the UMEM buffer size viewed in a 1k resolution will 3728 * be dropped. 3729 */ 3730 if (hw->mac.type != ixgbe_mac_82599EB) 3731 srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 3732 else 3733 srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 3734 } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) { 3735 srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 3736 } else { 3737 srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 3738 } 3739 3740 /* configure descriptor type */ 3741 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 3742 3743 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); 3744 } 3745 3746 /** 3747 * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries 3748 * @adapter: device handle 3749 * 3750 * - 82598/82599/X540: 128 3751 * - X550(non-SRIOV mode): 512 3752 * - X550(SRIOV mode): 64 3753 */ 3754 u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) 3755 { 3756 if (adapter->hw.mac.type < ixgbe_mac_X550) 3757 return 128; 3758 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 3759 return 64; 3760 else 3761 return 512; 3762 } 3763 3764 /** 3765 * ixgbe_store_key - Write the RSS key to HW 3766 * @adapter: device handle 3767 * 3768 * Write the RSS key stored in adapter.rss_key to HW. 3769 */ 3770 void ixgbe_store_key(struct ixgbe_adapter *adapter) 3771 { 3772 struct ixgbe_hw *hw = &adapter->hw; 3773 int i; 3774 3775 for (i = 0; i < 10; i++) 3776 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); 3777 } 3778 3779 /** 3780 * ixgbe_init_rss_key - Initialize adapter RSS key 3781 * @adapter: device handle 3782 * 3783 * Allocates and initializes the RSS key if it is not allocated. 3784 **/ 3785 static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter) 3786 { 3787 u32 *rss_key; 3788 3789 if (!adapter->rss_key) { 3790 rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL); 3791 if (unlikely(!rss_key)) 3792 return -ENOMEM; 3793 3794 netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE); 3795 adapter->rss_key = rss_key; 3796 } 3797 3798 return 0; 3799 } 3800 3801 /** 3802 * ixgbe_store_reta - Write the RETA table to HW 3803 * @adapter: device handle 3804 * 3805 * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. 3806 */ 3807 void ixgbe_store_reta(struct ixgbe_adapter *adapter) 3808 { 3809 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); 3810 struct ixgbe_hw *hw = &adapter->hw; 3811 u32 reta = 0; 3812 u32 indices_multi; 3813 u8 *indir_tbl = adapter->rss_indir_tbl; 3814 3815 /* Fill out the redirection table as follows: 3816 * - 82598: 8 bit wide entries containing pair of 4 bit RSS 3817 * indices. 3818 * - 82599/X540: 8 bit wide entries containing 4 bit RSS index 3819 * - X550: 8 bit wide entries containing 6 bit RSS index 3820 */ 3821 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 3822 indices_multi = 0x11; 3823 else 3824 indices_multi = 0x1; 3825 3826 /* Write redirection table to HW */ 3827 for (i = 0; i < reta_entries; i++) { 3828 reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8; 3829 if ((i & 3) == 3) { 3830 if (i < 128) 3831 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 3832 else 3833 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 3834 reta); 3835 reta = 0; 3836 } 3837 } 3838 } 3839 3840 /** 3841 * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode) 3842 * @adapter: device handle 3843 * 3844 * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. 3845 */ 3846 static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) 3847 { 3848 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); 3849 struct ixgbe_hw *hw = &adapter->hw; 3850 u32 vfreta = 0; 3851 3852 /* Write redirection table to HW */ 3853 for (i = 0; i < reta_entries; i++) { 3854 u16 pool = adapter->num_rx_pools; 3855 3856 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; 3857 if ((i & 3) != 3) 3858 continue; 3859 3860 while (pool--) 3861 IXGBE_WRITE_REG(hw, 3862 IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)), 3863 vfreta); 3864 vfreta = 0; 3865 } 3866 } 3867 3868 static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) 3869 { 3870 u32 i, j; 3871 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); 3872 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; 3873 3874 /* Program table for at least 4 queues w/ SR-IOV so that VFs can 3875 * make full use of any rings they may have. We will use the 3876 * PSRTYPE register to control how many rings we use within the PF. 3877 */ 3878 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4)) 3879 rss_i = 4; 3880 3881 /* Fill out hash function seeds */ 3882 ixgbe_store_key(adapter); 3883 3884 /* Fill out redirection table */ 3885 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); 3886 3887 for (i = 0, j = 0; i < reta_entries; i++, j++) { 3888 if (j == rss_i) 3889 j = 0; 3890 3891 adapter->rss_indir_tbl[i] = j; 3892 } 3893 3894 ixgbe_store_reta(adapter); 3895 } 3896 3897 static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) 3898 { 3899 struct ixgbe_hw *hw = &adapter->hw; 3900 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; 3901 int i, j; 3902 3903 /* Fill out hash function seeds */ 3904 for (i = 0; i < 10; i++) { 3905 u16 pool = adapter->num_rx_pools; 3906 3907 while (pool--) 3908 IXGBE_WRITE_REG(hw, 3909 IXGBE_PFVFRSSRK(i, VMDQ_P(pool)), 3910 *(adapter->rss_key + i)); 3911 } 3912 3913 /* Fill out the redirection table */ 3914 for (i = 0, j = 0; i < 64; i++, j++) { 3915 if (j == rss_i) 3916 j = 0; 3917 3918 adapter->rss_indir_tbl[i] = j; 3919 } 3920 3921 ixgbe_store_vfreta(adapter); 3922 } 3923 3924 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) 3925 { 3926 struct ixgbe_hw *hw = &adapter->hw; 3927 u32 mrqc = 0, rss_field = 0, vfmrqc = 0; 3928 u32 rxcsum; 3929 3930 /* Disable indicating checksum in descriptor, enables RSS hash */ 3931 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 3932 rxcsum |= IXGBE_RXCSUM_PCSD; 3933 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 3934 3935 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3936 if (adapter->ring_feature[RING_F_RSS].mask) 3937 mrqc = IXGBE_MRQC_RSSEN; 3938 } else { 3939 u8 tcs = adapter->hw_tcs; 3940 3941 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 3942 if (tcs > 4) 3943 mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */ 3944 else if (tcs > 1) 3945 mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */ 3946 else if (adapter->ring_feature[RING_F_VMDQ].mask == 3947 IXGBE_82599_VMDQ_4Q_MASK) 3948 mrqc = IXGBE_MRQC_VMDQRSS32EN; 3949 else 3950 mrqc = IXGBE_MRQC_VMDQRSS64EN; 3951 3952 /* Enable L3/L4 for Tx Switched packets only for X550, 3953 * older devices do not support this feature 3954 */ 3955 if (hw->mac.type >= ixgbe_mac_X550) 3956 mrqc |= IXGBE_MRQC_L3L4TXSWEN; 3957 } else { 3958 if (tcs > 4) 3959 mrqc = IXGBE_MRQC_RTRSS8TCEN; 3960 else if (tcs > 1) 3961 mrqc = IXGBE_MRQC_RTRSS4TCEN; 3962 else 3963 mrqc = IXGBE_MRQC_RSSEN; 3964 } 3965 } 3966 3967 /* Perform hash on these packet types */ 3968 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 | 3969 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 3970 IXGBE_MRQC_RSS_FIELD_IPV6 | 3971 IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 3972 3973 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 3974 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 3975 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) 3976 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 3977 3978 if ((hw->mac.type >= ixgbe_mac_X550) && 3979 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { 3980 u16 pool = adapter->num_rx_pools; 3981 3982 /* Enable VF RSS mode */ 3983 mrqc |= IXGBE_MRQC_MULTIPLE_RSS; 3984 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 3985 3986 /* Setup RSS through the VF registers */ 3987 ixgbe_setup_vfreta(adapter); 3988 vfmrqc = IXGBE_MRQC_RSSEN; 3989 vfmrqc |= rss_field; 3990 3991 while (pool--) 3992 IXGBE_WRITE_REG(hw, 3993 IXGBE_PFVFMRQC(VMDQ_P(pool)), 3994 vfmrqc); 3995 } else { 3996 ixgbe_setup_reta(adapter); 3997 mrqc |= rss_field; 3998 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 3999 } 4000 } 4001 4002 /** 4003 * ixgbe_configure_rscctl - enable RSC for the indicated ring 4004 * @adapter: address of board private structure 4005 * @ring: structure containing ring specific data 4006 **/ 4007 static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, 4008 struct ixgbe_ring *ring) 4009 { 4010 struct ixgbe_hw *hw = &adapter->hw; 4011 u32 rscctrl; 4012 u8 reg_idx = ring->reg_idx; 4013 4014 if (!ring_is_rsc_enabled(ring)) 4015 return; 4016 4017 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); 4018 rscctrl |= IXGBE_RSCCTL_RSCEN; 4019 /* 4020 * we must limit the number of descriptors so that the 4021 * total size of max desc * buf_len is not greater 4022 * than 65536 4023 */ 4024 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 4025 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); 4026 } 4027 4028 #define IXGBE_MAX_RX_DESC_POLL 10 4029 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, 4030 struct ixgbe_ring *ring) 4031 { 4032 struct ixgbe_hw *hw = &adapter->hw; 4033 int wait_loop = IXGBE_MAX_RX_DESC_POLL; 4034 u32 rxdctl; 4035 u8 reg_idx = ring->reg_idx; 4036 4037 if (ixgbe_removed(hw->hw_addr)) 4038 return; 4039 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ 4040 if (hw->mac.type == ixgbe_mac_82598EB && 4041 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) 4042 return; 4043 4044 do { 4045 usleep_range(1000, 2000); 4046 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 4047 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); 4048 4049 if (!wait_loop) { 4050 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " 4051 "the polling period\n", reg_idx); 4052 } 4053 } 4054 4055 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, 4056 struct ixgbe_ring *ring) 4057 { 4058 struct ixgbe_hw *hw = &adapter->hw; 4059 union ixgbe_adv_rx_desc *rx_desc; 4060 u64 rdba = ring->dma; 4061 u32 rxdctl; 4062 u8 reg_idx = ring->reg_idx; 4063 4064 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); 4065 ring->xsk_pool = ixgbe_xsk_pool(adapter, ring); 4066 if (ring->xsk_pool) { 4067 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 4068 MEM_TYPE_XSK_BUFF_POOL, 4069 NULL)); 4070 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); 4071 } else { 4072 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 4073 MEM_TYPE_PAGE_SHARED, NULL)); 4074 } 4075 4076 /* disable queue to avoid use of these values while updating state */ 4077 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 4078 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 4079 4080 /* write value back with RXDCTL.ENABLE bit cleared */ 4081 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); 4082 IXGBE_WRITE_FLUSH(hw); 4083 4084 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); 4085 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); 4086 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), 4087 ring->count * sizeof(union ixgbe_adv_rx_desc)); 4088 /* Force flushing of IXGBE_RDLEN to prevent MDD */ 4089 IXGBE_WRITE_FLUSH(hw); 4090 4091 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); 4092 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); 4093 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); 4094 4095 ixgbe_configure_srrctl(adapter, ring); 4096 ixgbe_configure_rscctl(adapter, ring); 4097 4098 if (hw->mac.type == ixgbe_mac_82598EB) { 4099 /* 4100 * enable cache line friendly hardware writes: 4101 * PTHRESH=32 descriptors (half the internal cache), 4102 * this also removes ugly rx_no_buffer_count increment 4103 * HTHRESH=4 descriptors (to minimize latency on fetch) 4104 * WTHRESH=8 burst writeback up to two cache lines 4105 */ 4106 rxdctl &= ~0x3FFFFF; 4107 rxdctl |= 0x080420; 4108 #if (PAGE_SIZE < 8192) 4109 /* RXDCTL.RLPML does not work on 82599 */ 4110 } else if (hw->mac.type != ixgbe_mac_82599EB) { 4111 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | 4112 IXGBE_RXDCTL_RLPML_EN); 4113 4114 /* Limit the maximum frame size so we don't overrun the skb. 4115 * This can happen in SRIOV mode when the MTU of the VF is 4116 * higher than the MTU of the PF. 4117 */ 4118 if (ring_uses_build_skb(ring) && 4119 !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) 4120 rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB | 4121 IXGBE_RXDCTL_RLPML_EN; 4122 #endif 4123 } 4124 4125 ring->rx_offset = ixgbe_rx_offset(ring); 4126 4127 if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) { 4128 u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); 4129 4130 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | 4131 IXGBE_RXDCTL_RLPML_EN); 4132 rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN; 4133 4134 ring->rx_buf_len = xsk_buf_len; 4135 } 4136 4137 /* initialize rx_buffer_info */ 4138 memset(ring->rx_buffer_info, 0, 4139 sizeof(struct ixgbe_rx_buffer) * ring->count); 4140 4141 /* initialize Rx descriptor 0 */ 4142 rx_desc = IXGBE_RX_DESC(ring, 0); 4143 rx_desc->wb.upper.length = 0; 4144 4145 /* enable receive descriptor ring */ 4146 rxdctl |= IXGBE_RXDCTL_ENABLE; 4147 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); 4148 4149 ixgbe_rx_desc_queue_enable(adapter, ring); 4150 if (ring->xsk_pool) 4151 ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring)); 4152 else 4153 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); 4154 } 4155 4156 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) 4157 { 4158 struct ixgbe_hw *hw = &adapter->hw; 4159 int rss_i = adapter->ring_feature[RING_F_RSS].indices; 4160 u16 pool = adapter->num_rx_pools; 4161 4162 /* PSRTYPE must be initialized in non 82598 adapters */ 4163 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | 4164 IXGBE_PSRTYPE_UDPHDR | 4165 IXGBE_PSRTYPE_IPV4HDR | 4166 IXGBE_PSRTYPE_L2HDR | 4167 IXGBE_PSRTYPE_IPV6HDR; 4168 4169 if (hw->mac.type == ixgbe_mac_82598EB) 4170 return; 4171 4172 if (rss_i > 3) 4173 psrtype |= 2u << 29; 4174 else if (rss_i > 1) 4175 psrtype |= 1u << 29; 4176 4177 while (pool--) 4178 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); 4179 } 4180 4181 static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) 4182 { 4183 struct ixgbe_hw *hw = &adapter->hw; 4184 u16 pool = adapter->num_rx_pools; 4185 u32 reg_offset, vf_shift, vmolr; 4186 u32 gcr_ext, vmdctl; 4187 int i; 4188 4189 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 4190 return; 4191 4192 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 4193 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; 4194 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; 4195 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT; 4196 vmdctl |= IXGBE_VT_CTL_REPLEN; 4197 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); 4198 4199 /* accept untagged packets until a vlan tag is 4200 * specifically set for the VMDQ queue/pool 4201 */ 4202 vmolr = IXGBE_VMOLR_AUPE; 4203 while (pool--) 4204 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr); 4205 4206 vf_shift = VMDQ_P(0) % 32; 4207 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; 4208 4209 /* Enable only the PF's pool for Tx/Rx */ 4210 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift)); 4211 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); 4212 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift)); 4213 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); 4214 if (adapter->bridge_mode == BRIDGE_MODE_VEB) 4215 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 4216 4217 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ 4218 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); 4219 4220 /* clear VLAN promisc flag so VFTA will be updated if necessary */ 4221 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; 4222 4223 /* 4224 * Set up VF register offsets for selected VT Mode, 4225 * i.e. 32 or 64 VFs for SR-IOV 4226 */ 4227 switch (adapter->ring_feature[RING_F_VMDQ].mask) { 4228 case IXGBE_82599_VMDQ_8Q_MASK: 4229 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16; 4230 break; 4231 case IXGBE_82599_VMDQ_4Q_MASK: 4232 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32; 4233 break; 4234 default: 4235 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64; 4236 break; 4237 } 4238 4239 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 4240 4241 for (i = 0; i < adapter->num_vfs; i++) { 4242 /* configure spoof checking */ 4243 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, 4244 adapter->vfinfo[i].spoofchk_enabled); 4245 4246 /* Enable/Disable RSS query feature */ 4247 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, 4248 adapter->vfinfo[i].rss_query_enabled); 4249 } 4250 } 4251 4252 static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) 4253 { 4254 struct ixgbe_hw *hw = &adapter->hw; 4255 struct net_device *netdev = adapter->netdev; 4256 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 4257 struct ixgbe_ring *rx_ring; 4258 int i; 4259 u32 mhadd, hlreg0; 4260 4261 #ifdef IXGBE_FCOE 4262 /* adjust max frame to be able to do baby jumbo for FCoE */ 4263 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && 4264 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) 4265 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; 4266 4267 #endif /* IXGBE_FCOE */ 4268 4269 /* adjust max frame to be at least the size of a standard frame */ 4270 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) 4271 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); 4272 4273 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 4274 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { 4275 mhadd &= ~IXGBE_MHADD_MFS_MASK; 4276 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; 4277 4278 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 4279 } 4280 4281 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 4282 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ 4283 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 4284 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 4285 4286 /* 4287 * Setup the HW Rx Head and Tail Descriptor Pointers and 4288 * the Base and Length of the Rx Descriptor Ring 4289 */ 4290 for (i = 0; i < adapter->num_rx_queues; i++) { 4291 rx_ring = adapter->rx_ring[i]; 4292 4293 clear_ring_rsc_enabled(rx_ring); 4294 clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); 4295 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); 4296 4297 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 4298 set_ring_rsc_enabled(rx_ring); 4299 4300 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) 4301 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); 4302 4303 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) 4304 continue; 4305 4306 set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); 4307 4308 #if (PAGE_SIZE < 8192) 4309 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 4310 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); 4311 4312 if (IXGBE_2K_TOO_SMALL_WITH_PADDING || 4313 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) 4314 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); 4315 #endif 4316 } 4317 } 4318 4319 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) 4320 { 4321 struct ixgbe_hw *hw = &adapter->hw; 4322 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 4323 4324 switch (hw->mac.type) { 4325 case ixgbe_mac_82598EB: 4326 /* 4327 * For VMDq support of different descriptor types or 4328 * buffer sizes through the use of multiple SRRCTL 4329 * registers, RDRXCTL.MVMEN must be set to 1 4330 * 4331 * also, the manual doesn't mention it clearly but DCA hints 4332 * will only use queue 0's tags unless this bit is set. Side 4333 * effects of setting this bit are only that SRRCTL must be 4334 * fully programmed [0..15] 4335 */ 4336 rdrxctl |= IXGBE_RDRXCTL_MVMEN; 4337 break; 4338 case ixgbe_mac_X550: 4339 case ixgbe_mac_X550EM_x: 4340 case ixgbe_mac_x550em_a: 4341 if (adapter->num_vfs) 4342 rdrxctl |= IXGBE_RDRXCTL_PSP; 4343 fallthrough; 4344 case ixgbe_mac_82599EB: 4345 case ixgbe_mac_X540: 4346 /* Disable RSC for ACK packets */ 4347 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, 4348 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); 4349 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 4350 /* hardware requires some bits to be set by default */ 4351 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); 4352 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; 4353 break; 4354 default: 4355 /* We should do nothing since we don't know this hardware */ 4356 return; 4357 } 4358 4359 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 4360 } 4361 4362 /** 4363 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset 4364 * @adapter: board private structure 4365 * 4366 * Configure the Rx unit of the MAC after a reset. 4367 **/ 4368 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) 4369 { 4370 struct ixgbe_hw *hw = &adapter->hw; 4371 int i; 4372 u32 rxctrl, rfctl; 4373 4374 /* disable receives while setting up the descriptors */ 4375 hw->mac.ops.disable_rx(hw); 4376 4377 ixgbe_setup_psrtype(adapter); 4378 ixgbe_setup_rdrxctl(adapter); 4379 4380 /* RSC Setup */ 4381 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); 4382 rfctl &= ~IXGBE_RFCTL_RSC_DIS; 4383 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) 4384 rfctl |= IXGBE_RFCTL_RSC_DIS; 4385 4386 /* disable NFS filtering */ 4387 rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS); 4388 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); 4389 4390 /* Program registers for the distribution of queues */ 4391 ixgbe_setup_mrqc(adapter); 4392 4393 /* set_rx_buffer_len must be called before ring initialization */ 4394 ixgbe_set_rx_buffer_len(adapter); 4395 4396 /* 4397 * Setup the HW Rx Head and Tail Descriptor Pointers and 4398 * the Base and Length of the Rx Descriptor Ring 4399 */ 4400 for (i = 0; i < adapter->num_rx_queues; i++) 4401 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); 4402 4403 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4404 /* disable drop enable for 82598 parts */ 4405 if (hw->mac.type == ixgbe_mac_82598EB) 4406 rxctrl |= IXGBE_RXCTRL_DMBYPS; 4407 4408 /* enable all receives */ 4409 rxctrl |= IXGBE_RXCTRL_RXEN; 4410 hw->mac.ops.enable_rx_dma(hw, rxctrl); 4411 } 4412 4413 static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, 4414 __be16 proto, u16 vid) 4415 { 4416 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4417 struct ixgbe_hw *hw = &adapter->hw; 4418 4419 /* add VID to filter table */ 4420 if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) 4421 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid); 4422 4423 set_bit(vid, adapter->active_vlans); 4424 4425 return 0; 4426 } 4427 4428 static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) 4429 { 4430 u32 vlvf; 4431 int idx; 4432 4433 /* short cut the special case */ 4434 if (vlan == 0) 4435 return 0; 4436 4437 /* Search for the vlan id in the VLVF entries */ 4438 for (idx = IXGBE_VLVF_ENTRIES; --idx;) { 4439 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx)); 4440 if ((vlvf & VLAN_VID_MASK) == vlan) 4441 break; 4442 } 4443 4444 return idx; 4445 } 4446 4447 void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) 4448 { 4449 struct ixgbe_hw *hw = &adapter->hw; 4450 u32 bits, word; 4451 int idx; 4452 4453 idx = ixgbe_find_vlvf_entry(hw, vid); 4454 if (!idx) 4455 return; 4456 4457 /* See if any other pools are set for this VLAN filter 4458 * entry other than the PF. 4459 */ 4460 word = idx * 2 + (VMDQ_P(0) / 32); 4461 bits = ~BIT(VMDQ_P(0) % 32); 4462 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); 4463 4464 /* Disable the filter so this falls into the default pool. */ 4465 if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) { 4466 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) 4467 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0); 4468 IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0); 4469 } 4470 } 4471 4472 static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, 4473 __be16 proto, u16 vid) 4474 { 4475 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4476 struct ixgbe_hw *hw = &adapter->hw; 4477 4478 /* remove VID from filter table */ 4479 if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) 4480 hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true); 4481 4482 clear_bit(vid, adapter->active_vlans); 4483 4484 return 0; 4485 } 4486 4487 /** 4488 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping 4489 * @adapter: driver data 4490 */ 4491 static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) 4492 { 4493 struct ixgbe_hw *hw = &adapter->hw; 4494 u32 vlnctrl; 4495 int i, j; 4496 4497 switch (hw->mac.type) { 4498 case ixgbe_mac_82598EB: 4499 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 4500 vlnctrl &= ~IXGBE_VLNCTRL_VME; 4501 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 4502 break; 4503 case ixgbe_mac_82599EB: 4504 case ixgbe_mac_X540: 4505 case ixgbe_mac_X550: 4506 case ixgbe_mac_X550EM_x: 4507 case ixgbe_mac_x550em_a: 4508 for (i = 0; i < adapter->num_rx_queues; i++) { 4509 struct ixgbe_ring *ring = adapter->rx_ring[i]; 4510 4511 if (!netif_is_ixgbe(ring->netdev)) 4512 continue; 4513 4514 j = ring->reg_idx; 4515 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 4516 vlnctrl &= ~IXGBE_RXDCTL_VME; 4517 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); 4518 } 4519 break; 4520 default: 4521 break; 4522 } 4523 } 4524 4525 /** 4526 * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping 4527 * @adapter: driver data 4528 */ 4529 static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) 4530 { 4531 struct ixgbe_hw *hw = &adapter->hw; 4532 u32 vlnctrl; 4533 int i, j; 4534 4535 switch (hw->mac.type) { 4536 case ixgbe_mac_82598EB: 4537 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 4538 vlnctrl |= IXGBE_VLNCTRL_VME; 4539 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 4540 break; 4541 case ixgbe_mac_82599EB: 4542 case ixgbe_mac_X540: 4543 case ixgbe_mac_X550: 4544 case ixgbe_mac_X550EM_x: 4545 case ixgbe_mac_x550em_a: 4546 for (i = 0; i < adapter->num_rx_queues; i++) { 4547 struct ixgbe_ring *ring = adapter->rx_ring[i]; 4548 4549 if (!netif_is_ixgbe(ring->netdev)) 4550 continue; 4551 4552 j = ring->reg_idx; 4553 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 4554 vlnctrl |= IXGBE_RXDCTL_VME; 4555 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); 4556 } 4557 break; 4558 default: 4559 break; 4560 } 4561 } 4562 4563 static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) 4564 { 4565 struct ixgbe_hw *hw = &adapter->hw; 4566 u32 vlnctrl, i; 4567 4568 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 4569 4570 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { 4571 /* For VMDq and SR-IOV we must leave VLAN filtering enabled */ 4572 vlnctrl |= IXGBE_VLNCTRL_VFE; 4573 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 4574 } else { 4575 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 4576 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 4577 return; 4578 } 4579 4580 /* Nothing to do for 82598 */ 4581 if (hw->mac.type == ixgbe_mac_82598EB) 4582 return; 4583 4584 /* We are already in VLAN promisc, nothing to do */ 4585 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) 4586 return; 4587 4588 /* Set flag so we don't redo unnecessary work */ 4589 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; 4590 4591 /* Add PF to all active pools */ 4592 for (i = IXGBE_VLVF_ENTRIES; --i;) { 4593 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); 4594 u32 vlvfb = IXGBE_READ_REG(hw, reg_offset); 4595 4596 vlvfb |= BIT(VMDQ_P(0) % 32); 4597 IXGBE_WRITE_REG(hw, reg_offset, vlvfb); 4598 } 4599 4600 /* Set all bits in the VLAN filter table array */ 4601 for (i = hw->mac.vft_size; i--;) 4602 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U); 4603 } 4604 4605 #define VFTA_BLOCK_SIZE 8 4606 static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) 4607 { 4608 struct ixgbe_hw *hw = &adapter->hw; 4609 u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; 4610 u32 vid_start = vfta_offset * 32; 4611 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); 4612 u32 i, vid, word, bits; 4613 4614 for (i = IXGBE_VLVF_ENTRIES; --i;) { 4615 u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); 4616 4617 /* pull VLAN ID from VLVF */ 4618 vid = vlvf & VLAN_VID_MASK; 4619 4620 /* only concern outselves with a certain range */ 4621 if (vid < vid_start || vid >= vid_end) 4622 continue; 4623 4624 if (vlvf) { 4625 /* record VLAN ID in VFTA */ 4626 vfta[(vid - vid_start) / 32] |= BIT(vid % 32); 4627 4628 /* if PF is part of this then continue */ 4629 if (test_bit(vid, adapter->active_vlans)) 4630 continue; 4631 } 4632 4633 /* remove PF from the pool */ 4634 word = i * 2 + VMDQ_P(0) / 32; 4635 bits = ~BIT(VMDQ_P(0) % 32); 4636 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); 4637 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits); 4638 } 4639 4640 /* extract values from active_vlans and write back to VFTA */ 4641 for (i = VFTA_BLOCK_SIZE; i--;) { 4642 vid = (vfta_offset + i) * 32; 4643 word = vid / BITS_PER_LONG; 4644 bits = vid % BITS_PER_LONG; 4645 4646 vfta[i] |= adapter->active_vlans[word] >> bits; 4647 4648 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]); 4649 } 4650 } 4651 4652 static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) 4653 { 4654 struct ixgbe_hw *hw = &adapter->hw; 4655 u32 vlnctrl, i; 4656 4657 /* Set VLAN filtering to enabled */ 4658 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 4659 vlnctrl |= IXGBE_VLNCTRL_VFE; 4660 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 4661 4662 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) || 4663 hw->mac.type == ixgbe_mac_82598EB) 4664 return; 4665 4666 /* We are not in VLAN promisc, nothing to do */ 4667 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) 4668 return; 4669 4670 /* Set flag so we don't redo unnecessary work */ 4671 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; 4672 4673 for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE) 4674 ixgbe_scrub_vfta(adapter, i); 4675 } 4676 4677 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) 4678 { 4679 u16 vid = 1; 4680 4681 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); 4682 4683 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) 4684 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); 4685 } 4686 4687 /** 4688 * ixgbe_write_mc_addr_list - write multicast addresses to MTA 4689 * @netdev: network interface device structure 4690 * 4691 * Writes multicast address list to the MTA hash table. 4692 * Returns: -ENOMEM on failure 4693 * 0 on no addresses written 4694 * X on writing X addresses to MTA 4695 **/ 4696 static int ixgbe_write_mc_addr_list(struct net_device *netdev) 4697 { 4698 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4699 struct ixgbe_hw *hw = &adapter->hw; 4700 4701 if (!netif_running(netdev)) 4702 return 0; 4703 4704 if (hw->mac.ops.update_mc_addr_list) 4705 hw->mac.ops.update_mc_addr_list(hw, netdev); 4706 else 4707 return -ENOMEM; 4708 4709 #ifdef CONFIG_PCI_IOV 4710 ixgbe_restore_vf_multicasts(adapter); 4711 #endif 4712 4713 return netdev_mc_count(netdev); 4714 } 4715 4716 #ifdef CONFIG_PCI_IOV 4717 void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter) 4718 { 4719 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; 4720 struct ixgbe_hw *hw = &adapter->hw; 4721 int i; 4722 4723 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { 4724 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; 4725 4726 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) 4727 hw->mac.ops.set_rar(hw, i, 4728 mac_table->addr, 4729 mac_table->pool, 4730 IXGBE_RAH_AV); 4731 else 4732 hw->mac.ops.clear_rar(hw, i); 4733 } 4734 } 4735 4736 #endif 4737 static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter) 4738 { 4739 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; 4740 struct ixgbe_hw *hw = &adapter->hw; 4741 int i; 4742 4743 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { 4744 if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED)) 4745 continue; 4746 4747 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; 4748 4749 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) 4750 hw->mac.ops.set_rar(hw, i, 4751 mac_table->addr, 4752 mac_table->pool, 4753 IXGBE_RAH_AV); 4754 else 4755 hw->mac.ops.clear_rar(hw, i); 4756 } 4757 } 4758 4759 static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) 4760 { 4761 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; 4762 struct ixgbe_hw *hw = &adapter->hw; 4763 int i; 4764 4765 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { 4766 mac_table->state |= IXGBE_MAC_STATE_MODIFIED; 4767 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; 4768 } 4769 4770 ixgbe_sync_mac_table(adapter); 4771 } 4772 4773 static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool) 4774 { 4775 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; 4776 struct ixgbe_hw *hw = &adapter->hw; 4777 int i, count = 0; 4778 4779 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { 4780 /* do not count default RAR as available */ 4781 if (mac_table->state & IXGBE_MAC_STATE_DEFAULT) 4782 continue; 4783 4784 /* only count unused and addresses that belong to us */ 4785 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) { 4786 if (mac_table->pool != pool) 4787 continue; 4788 } 4789 4790 count++; 4791 } 4792 4793 return count; 4794 } 4795 4796 /* this function destroys the first RAR entry */ 4797 static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter) 4798 { 4799 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; 4800 struct ixgbe_hw *hw = &adapter->hw; 4801 4802 memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN); 4803 mac_table->pool = VMDQ_P(0); 4804 4805 mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE; 4806 4807 hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool, 4808 IXGBE_RAH_AV); 4809 } 4810 4811 int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, 4812 const u8 *addr, u16 pool) 4813 { 4814 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; 4815 struct ixgbe_hw *hw = &adapter->hw; 4816 int i; 4817 4818 if (is_zero_ether_addr(addr)) 4819 return -EINVAL; 4820 4821 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { 4822 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) 4823 continue; 4824 4825 ether_addr_copy(mac_table->addr, addr); 4826 mac_table->pool = pool; 4827 4828 mac_table->state |= IXGBE_MAC_STATE_MODIFIED | 4829 IXGBE_MAC_STATE_IN_USE; 4830 4831 ixgbe_sync_mac_table(adapter); 4832 4833 return i; 4834 } 4835 4836 return -ENOMEM; 4837 } 4838 4839 int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, 4840 const u8 *addr, u16 pool) 4841 { 4842 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; 4843 struct ixgbe_hw *hw = &adapter->hw; 4844 int i; 4845 4846 if (is_zero_ether_addr(addr)) 4847 return -EINVAL; 4848 4849 /* search table for addr, if found clear IN_USE flag and sync */ 4850 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { 4851 /* we can only delete an entry if it is in use */ 4852 if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE)) 4853 continue; 4854 /* we only care about entries that belong to the given pool */ 4855 if (mac_table->pool != pool) 4856 continue; 4857 /* we only care about a specific MAC address */ 4858 if (!ether_addr_equal(addr, mac_table->addr)) 4859 continue; 4860 4861 mac_table->state |= IXGBE_MAC_STATE_MODIFIED; 4862 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; 4863 4864 ixgbe_sync_mac_table(adapter); 4865 4866 return 0; 4867 } 4868 4869 return -ENOMEM; 4870 } 4871 4872 static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) 4873 { 4874 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4875 int ret; 4876 4877 ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0)); 4878 4879 return min_t(int, ret, 0); 4880 } 4881 4882 static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) 4883 { 4884 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4885 4886 ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0)); 4887 4888 return 0; 4889 } 4890 4891 /** 4892 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set 4893 * @netdev: network interface device structure 4894 * 4895 * The set_rx_method entry point is called whenever the unicast/multicast 4896 * address list or the network interface flags are updated. This routine is 4897 * responsible for configuring the hardware for proper unicast, multicast and 4898 * promiscuous mode. 4899 **/ 4900 void ixgbe_set_rx_mode(struct net_device *netdev) 4901 { 4902 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4903 struct ixgbe_hw *hw = &adapter->hw; 4904 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; 4905 netdev_features_t features = netdev->features; 4906 int count; 4907 4908 /* Check for Promiscuous and All Multicast modes */ 4909 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4910 4911 /* set all bits that we expect to always be set */ 4912 fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ 4913 fctrl |= IXGBE_FCTRL_BAM; 4914 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ 4915 fctrl |= IXGBE_FCTRL_PMCF; 4916 4917 /* clear the bits we are changing the status of */ 4918 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4919 if (netdev->flags & IFF_PROMISC) { 4920 hw->addr_ctrl.user_set_promisc = true; 4921 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4922 vmolr |= IXGBE_VMOLR_MPE; 4923 features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 4924 } else { 4925 if (netdev->flags & IFF_ALLMULTI) { 4926 fctrl |= IXGBE_FCTRL_MPE; 4927 vmolr |= IXGBE_VMOLR_MPE; 4928 } 4929 hw->addr_ctrl.user_set_promisc = false; 4930 } 4931 4932 /* 4933 * Write addresses to available RAR registers, if there is not 4934 * sufficient space to store all the addresses then enable 4935 * unicast promiscuous mode 4936 */ 4937 if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) { 4938 fctrl |= IXGBE_FCTRL_UPE; 4939 vmolr |= IXGBE_VMOLR_ROPE; 4940 } 4941 4942 /* Write addresses to the MTA, if the attempt fails 4943 * then we should just turn on promiscuous mode so 4944 * that we can at least receive multicast traffic 4945 */ 4946 count = ixgbe_write_mc_addr_list(netdev); 4947 if (count < 0) { 4948 fctrl |= IXGBE_FCTRL_MPE; 4949 vmolr |= IXGBE_VMOLR_MPE; 4950 } else if (count) { 4951 vmolr |= IXGBE_VMOLR_ROMPE; 4952 } 4953 4954 if (hw->mac.type != ixgbe_mac_82598EB) { 4955 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) & 4956 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | 4957 IXGBE_VMOLR_ROPE); 4958 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr); 4959 } 4960 4961 /* This is useful for sniffing bad packets. */ 4962 if (features & NETIF_F_RXALL) { 4963 /* UPE and MPE will be handled by normal PROMISC logic 4964 * in e1000e_set_rx_mode */ 4965 fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */ 4966 IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */ 4967 IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */ 4968 4969 fctrl &= ~(IXGBE_FCTRL_DPF); 4970 /* NOTE: VLAN filtering is disabled by setting PROMISC */ 4971 } 4972 4973 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4974 4975 if (features & NETIF_F_HW_VLAN_CTAG_RX) 4976 ixgbe_vlan_strip_enable(adapter); 4977 else 4978 ixgbe_vlan_strip_disable(adapter); 4979 4980 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 4981 ixgbe_vlan_promisc_disable(adapter); 4982 else 4983 ixgbe_vlan_promisc_enable(adapter); 4984 } 4985 4986 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) 4987 { 4988 int q_idx; 4989 4990 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) 4991 napi_enable(&adapter->q_vector[q_idx]->napi); 4992 } 4993 4994 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) 4995 { 4996 int q_idx; 4997 4998 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) 4999 napi_disable(&adapter->q_vector[q_idx]->napi); 5000 } 5001 5002 static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table) 5003 { 5004 struct ixgbe_adapter *adapter = netdev_priv(dev); 5005 struct ixgbe_hw *hw = &adapter->hw; 5006 struct udp_tunnel_info ti; 5007 5008 udp_tunnel_nic_get_port(dev, table, 0, &ti); 5009 if (ti.type == UDP_TUNNEL_TYPE_VXLAN) 5010 adapter->vxlan_port = ti.port; 5011 else 5012 adapter->geneve_port = ti.port; 5013 5014 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, 5015 ntohs(adapter->vxlan_port) | 5016 ntohs(adapter->geneve_port) << 5017 IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT); 5018 return 0; 5019 } 5020 5021 static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550 = { 5022 .sync_table = ixgbe_udp_tunnel_sync, 5023 .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY, 5024 .tables = { 5025 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 5026 }, 5027 }; 5028 5029 static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550em_a = { 5030 .sync_table = ixgbe_udp_tunnel_sync, 5031 .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY, 5032 .tables = { 5033 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 5034 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 5035 }, 5036 }; 5037 5038 #ifdef CONFIG_IXGBE_DCB 5039 /** 5040 * ixgbe_configure_dcb - Configure DCB hardware 5041 * @adapter: ixgbe adapter struct 5042 * 5043 * This is called by the driver on open to configure the DCB hardware. 5044 * This is also called by the gennetlink interface when reconfiguring 5045 * the DCB state. 5046 */ 5047 static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) 5048 { 5049 struct ixgbe_hw *hw = &adapter->hw; 5050 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 5051 5052 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { 5053 if (hw->mac.type == ixgbe_mac_82598EB) 5054 netif_set_gso_max_size(adapter->netdev, 65536); 5055 return; 5056 } 5057 5058 if (hw->mac.type == ixgbe_mac_82598EB) 5059 netif_set_gso_max_size(adapter->netdev, 32768); 5060 5061 #ifdef IXGBE_FCOE 5062 if (adapter->netdev->features & NETIF_F_FCOE_MTU) 5063 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); 5064 #endif 5065 5066 /* reconfigure the hardware */ 5067 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { 5068 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, 5069 DCB_TX_CONFIG); 5070 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, 5071 DCB_RX_CONFIG); 5072 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); 5073 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) { 5074 ixgbe_dcb_hw_ets(&adapter->hw, 5075 adapter->ixgbe_ieee_ets, 5076 max_frame); 5077 ixgbe_dcb_hw_pfc_config(&adapter->hw, 5078 adapter->ixgbe_ieee_pfc->pfc_en, 5079 adapter->ixgbe_ieee_ets->prio_tc); 5080 } 5081 5082 /* Enable RSS Hash per TC */ 5083 if (hw->mac.type != ixgbe_mac_82598EB) { 5084 u32 msb = 0; 5085 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1; 5086 5087 while (rss_i) { 5088 msb++; 5089 rss_i >>= 1; 5090 } 5091 5092 /* write msb to all 8 TCs in one write */ 5093 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111); 5094 } 5095 } 5096 #endif 5097 5098 /* Additional bittime to account for IXGBE framing */ 5099 #define IXGBE_ETH_FRAMING 20 5100 5101 /** 5102 * ixgbe_hpbthresh - calculate high water mark for flow control 5103 * 5104 * @adapter: board private structure to calculate for 5105 * @pb: packet buffer to calculate 5106 */ 5107 static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) 5108 { 5109 struct ixgbe_hw *hw = &adapter->hw; 5110 struct net_device *dev = adapter->netdev; 5111 int link, tc, kb, marker; 5112 u32 dv_id, rx_pba; 5113 5114 /* Calculate max LAN frame size */ 5115 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING; 5116 5117 #ifdef IXGBE_FCOE 5118 /* FCoE traffic class uses FCOE jumbo frames */ 5119 if ((dev->features & NETIF_F_FCOE_MTU) && 5120 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && 5121 (pb == ixgbe_fcoe_get_tc(adapter))) 5122 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; 5123 #endif 5124 5125 /* Calculate delay value for device */ 5126 switch (hw->mac.type) { 5127 case ixgbe_mac_X540: 5128 case ixgbe_mac_X550: 5129 case ixgbe_mac_X550EM_x: 5130 case ixgbe_mac_x550em_a: 5131 dv_id = IXGBE_DV_X540(link, tc); 5132 break; 5133 default: 5134 dv_id = IXGBE_DV(link, tc); 5135 break; 5136 } 5137 5138 /* Loopback switch introduces additional latency */ 5139 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 5140 dv_id += IXGBE_B2BT(tc); 5141 5142 /* Delay value is calculated in bit times convert to KB */ 5143 kb = IXGBE_BT2KB(dv_id); 5144 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10; 5145 5146 marker = rx_pba - kb; 5147 5148 /* It is possible that the packet buffer is not large enough 5149 * to provide required headroom. In this case throw an error 5150 * to user and a do the best we can. 5151 */ 5152 if (marker < 0) { 5153 e_warn(drv, "Packet Buffer(%i) can not provide enough" 5154 "headroom to support flow control." 5155 "Decrease MTU or number of traffic classes\n", pb); 5156 marker = tc + 1; 5157 } 5158 5159 return marker; 5160 } 5161 5162 /** 5163 * ixgbe_lpbthresh - calculate low water mark for for flow control 5164 * 5165 * @adapter: board private structure to calculate for 5166 * @pb: packet buffer to calculate 5167 */ 5168 static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) 5169 { 5170 struct ixgbe_hw *hw = &adapter->hw; 5171 struct net_device *dev = adapter->netdev; 5172 int tc; 5173 u32 dv_id; 5174 5175 /* Calculate max LAN frame size */ 5176 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; 5177 5178 #ifdef IXGBE_FCOE 5179 /* FCoE traffic class uses FCOE jumbo frames */ 5180 if ((dev->features & NETIF_F_FCOE_MTU) && 5181 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && 5182 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) 5183 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; 5184 #endif 5185 5186 /* Calculate delay value for device */ 5187 switch (hw->mac.type) { 5188 case ixgbe_mac_X540: 5189 case ixgbe_mac_X550: 5190 case ixgbe_mac_X550EM_x: 5191 case ixgbe_mac_x550em_a: 5192 dv_id = IXGBE_LOW_DV_X540(tc); 5193 break; 5194 default: 5195 dv_id = IXGBE_LOW_DV(tc); 5196 break; 5197 } 5198 5199 /* Delay value is calculated in bit times convert to KB */ 5200 return IXGBE_BT2KB(dv_id); 5201 } 5202 5203 /* 5204 * ixgbe_pbthresh_setup - calculate and setup high low water marks 5205 */ 5206 static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter) 5207 { 5208 struct ixgbe_hw *hw = &adapter->hw; 5209 int num_tc = adapter->hw_tcs; 5210 int i; 5211 5212 if (!num_tc) 5213 num_tc = 1; 5214 5215 for (i = 0; i < num_tc; i++) { 5216 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i); 5217 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i); 5218 5219 /* Low water marks must not be larger than high water marks */ 5220 if (hw->fc.low_water[i] > hw->fc.high_water[i]) 5221 hw->fc.low_water[i] = 0; 5222 } 5223 5224 for (; i < MAX_TRAFFIC_CLASS; i++) 5225 hw->fc.high_water[i] = 0; 5226 } 5227 5228 static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) 5229 { 5230 struct ixgbe_hw *hw = &adapter->hw; 5231 int hdrm; 5232 u8 tc = adapter->hw_tcs; 5233 5234 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 5235 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 5236 hdrm = 32 << adapter->fdir_pballoc; 5237 else 5238 hdrm = 0; 5239 5240 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); 5241 ixgbe_pbthresh_setup(adapter); 5242 } 5243 5244 static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) 5245 { 5246 struct ixgbe_hw *hw = &adapter->hw; 5247 struct hlist_node *node2; 5248 struct ixgbe_fdir_filter *filter; 5249 u8 queue; 5250 5251 spin_lock(&adapter->fdir_perfect_lock); 5252 5253 if (!hlist_empty(&adapter->fdir_filter_list)) 5254 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); 5255 5256 hlist_for_each_entry_safe(filter, node2, 5257 &adapter->fdir_filter_list, fdir_node) { 5258 if (filter->action == IXGBE_FDIR_DROP_QUEUE) { 5259 queue = IXGBE_FDIR_DROP_QUEUE; 5260 } else { 5261 u32 ring = ethtool_get_flow_spec_ring(filter->action); 5262 u8 vf = ethtool_get_flow_spec_ring_vf(filter->action); 5263 5264 if (!vf && (ring >= adapter->num_rx_queues)) { 5265 e_err(drv, "FDIR restore failed without VF, ring: %u\n", 5266 ring); 5267 continue; 5268 } else if (vf && 5269 ((vf > adapter->num_vfs) || 5270 ring >= adapter->num_rx_queues_per_pool)) { 5271 e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n", 5272 vf, ring); 5273 continue; 5274 } 5275 5276 /* Map the ring onto the absolute queue index */ 5277 if (!vf) 5278 queue = adapter->rx_ring[ring]->reg_idx; 5279 else 5280 queue = ((vf - 1) * 5281 adapter->num_rx_queues_per_pool) + ring; 5282 } 5283 5284 ixgbe_fdir_write_perfect_filter_82599(hw, 5285 &filter->filter, filter->sw_idx, queue); 5286 } 5287 5288 spin_unlock(&adapter->fdir_perfect_lock); 5289 } 5290 5291 /** 5292 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue 5293 * @rx_ring: ring to free buffers from 5294 **/ 5295 static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) 5296 { 5297 u16 i = rx_ring->next_to_clean; 5298 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; 5299 5300 if (rx_ring->xsk_pool) { 5301 ixgbe_xsk_clean_rx_ring(rx_ring); 5302 goto skip_free; 5303 } 5304 5305 /* Free all the Rx ring sk_buffs */ 5306 while (i != rx_ring->next_to_alloc) { 5307 if (rx_buffer->skb) { 5308 struct sk_buff *skb = rx_buffer->skb; 5309 if (IXGBE_CB(skb)->page_released) 5310 dma_unmap_page_attrs(rx_ring->dev, 5311 IXGBE_CB(skb)->dma, 5312 ixgbe_rx_pg_size(rx_ring), 5313 DMA_FROM_DEVICE, 5314 IXGBE_RX_DMA_ATTR); 5315 dev_kfree_skb(skb); 5316 } 5317 5318 /* Invalidate cache lines that may have been written to by 5319 * device so that we avoid corrupting memory. 5320 */ 5321 dma_sync_single_range_for_cpu(rx_ring->dev, 5322 rx_buffer->dma, 5323 rx_buffer->page_offset, 5324 ixgbe_rx_bufsz(rx_ring), 5325 DMA_FROM_DEVICE); 5326 5327 /* free resources associated with mapping */ 5328 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 5329 ixgbe_rx_pg_size(rx_ring), 5330 DMA_FROM_DEVICE, 5331 IXGBE_RX_DMA_ATTR); 5332 __page_frag_cache_drain(rx_buffer->page, 5333 rx_buffer->pagecnt_bias); 5334 5335 i++; 5336 rx_buffer++; 5337 if (i == rx_ring->count) { 5338 i = 0; 5339 rx_buffer = rx_ring->rx_buffer_info; 5340 } 5341 } 5342 5343 skip_free: 5344 rx_ring->next_to_alloc = 0; 5345 rx_ring->next_to_clean = 0; 5346 rx_ring->next_to_use = 0; 5347 } 5348 5349 static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, 5350 struct ixgbe_fwd_adapter *accel) 5351 { 5352 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; 5353 int num_tc = netdev_get_num_tc(adapter->netdev); 5354 struct net_device *vdev = accel->netdev; 5355 int i, baseq, err; 5356 5357 baseq = accel->pool * adapter->num_rx_queues_per_pool; 5358 netdev_dbg(vdev, "pool %i:%i queues %i:%i\n", 5359 accel->pool, adapter->num_rx_pools, 5360 baseq, baseq + adapter->num_rx_queues_per_pool); 5361 5362 accel->rx_base_queue = baseq; 5363 accel->tx_base_queue = baseq; 5364 5365 /* record configuration for macvlan interface in vdev */ 5366 for (i = 0; i < num_tc; i++) 5367 netdev_bind_sb_channel_queue(adapter->netdev, vdev, 5368 i, rss_i, baseq + (rss_i * i)); 5369 5370 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) 5371 adapter->rx_ring[baseq + i]->netdev = vdev; 5372 5373 /* Guarantee all rings are updated before we update the 5374 * MAC address filter. 5375 */ 5376 wmb(); 5377 5378 /* ixgbe_add_mac_filter will return an index if it succeeds, so we 5379 * need to only treat it as an error value if it is negative. 5380 */ 5381 err = ixgbe_add_mac_filter(adapter, vdev->dev_addr, 5382 VMDQ_P(accel->pool)); 5383 if (err >= 0) 5384 return 0; 5385 5386 /* if we cannot add the MAC rule then disable the offload */ 5387 macvlan_release_l2fw_offload(vdev); 5388 5389 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) 5390 adapter->rx_ring[baseq + i]->netdev = NULL; 5391 5392 netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n"); 5393 5394 /* unbind the queues and drop the subordinate channel config */ 5395 netdev_unbind_sb_channel(adapter->netdev, vdev); 5396 netdev_set_sb_channel(vdev, 0); 5397 5398 clear_bit(accel->pool, adapter->fwd_bitmask); 5399 kfree(accel); 5400 5401 return err; 5402 } 5403 5404 static int ixgbe_macvlan_up(struct net_device *vdev, 5405 struct netdev_nested_priv *priv) 5406 { 5407 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data; 5408 struct ixgbe_fwd_adapter *accel; 5409 5410 if (!netif_is_macvlan(vdev)) 5411 return 0; 5412 5413 accel = macvlan_accel_priv(vdev); 5414 if (!accel) 5415 return 0; 5416 5417 ixgbe_fwd_ring_up(adapter, accel); 5418 5419 return 0; 5420 } 5421 5422 static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) 5423 { 5424 struct netdev_nested_priv priv = { 5425 .data = (void *)adapter, 5426 }; 5427 5428 netdev_walk_all_upper_dev_rcu(adapter->netdev, 5429 ixgbe_macvlan_up, &priv); 5430 } 5431 5432 static void ixgbe_configure(struct ixgbe_adapter *adapter) 5433 { 5434 struct ixgbe_hw *hw = &adapter->hw; 5435 5436 ixgbe_configure_pb(adapter); 5437 #ifdef CONFIG_IXGBE_DCB 5438 ixgbe_configure_dcb(adapter); 5439 #endif 5440 /* 5441 * We must restore virtualization before VLANs or else 5442 * the VLVF registers will not be populated 5443 */ 5444 ixgbe_configure_virtualization(adapter); 5445 5446 ixgbe_set_rx_mode(adapter->netdev); 5447 ixgbe_restore_vlan(adapter); 5448 ixgbe_ipsec_restore(adapter); 5449 5450 switch (hw->mac.type) { 5451 case ixgbe_mac_82599EB: 5452 case ixgbe_mac_X540: 5453 hw->mac.ops.disable_rx_buff(hw); 5454 break; 5455 default: 5456 break; 5457 } 5458 5459 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 5460 ixgbe_init_fdir_signature_82599(&adapter->hw, 5461 adapter->fdir_pballoc); 5462 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { 5463 ixgbe_init_fdir_perfect_82599(&adapter->hw, 5464 adapter->fdir_pballoc); 5465 ixgbe_fdir_filter_restore(adapter); 5466 } 5467 5468 switch (hw->mac.type) { 5469 case ixgbe_mac_82599EB: 5470 case ixgbe_mac_X540: 5471 hw->mac.ops.enable_rx_buff(hw); 5472 break; 5473 default: 5474 break; 5475 } 5476 5477 #ifdef CONFIG_IXGBE_DCA 5478 /* configure DCA */ 5479 if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) 5480 ixgbe_setup_dca(adapter); 5481 #endif /* CONFIG_IXGBE_DCA */ 5482 5483 #ifdef IXGBE_FCOE 5484 /* configure FCoE L2 filters, redirection table, and Rx control */ 5485 ixgbe_configure_fcoe(adapter); 5486 5487 #endif /* IXGBE_FCOE */ 5488 ixgbe_configure_tx(adapter); 5489 ixgbe_configure_rx(adapter); 5490 ixgbe_configure_dfwd(adapter); 5491 } 5492 5493 /** 5494 * ixgbe_sfp_link_config - set up SFP+ link 5495 * @adapter: pointer to private adapter struct 5496 **/ 5497 static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) 5498 { 5499 /* 5500 * We are assuming the worst case scenario here, and that 5501 * is that an SFP was inserted/removed after the reset 5502 * but before SFP detection was enabled. As such the best 5503 * solution is to just start searching as soon as we start 5504 */ 5505 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 5506 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; 5507 5508 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; 5509 adapter->sfp_poll_time = 0; 5510 } 5511 5512 /** 5513 * ixgbe_non_sfp_link_config - set up non-SFP+ link 5514 * @hw: pointer to private hardware struct 5515 * 5516 * Returns 0 on success, negative on failure 5517 **/ 5518 static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) 5519 { 5520 u32 speed; 5521 bool autoneg, link_up = false; 5522 int ret = IXGBE_ERR_LINK_SETUP; 5523 5524 if (hw->mac.ops.check_link) 5525 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); 5526 5527 if (ret) 5528 return ret; 5529 5530 speed = hw->phy.autoneg_advertised; 5531 if (!speed && hw->mac.ops.get_link_capabilities) { 5532 ret = hw->mac.ops.get_link_capabilities(hw, &speed, 5533 &autoneg); 5534 /* remove NBASE-T speeds from default autonegotiation 5535 * to accommodate broken network switches in the field 5536 * which cannot cope with advertised NBASE-T speeds 5537 */ 5538 speed &= ~(IXGBE_LINK_SPEED_5GB_FULL | 5539 IXGBE_LINK_SPEED_2_5GB_FULL); 5540 } 5541 5542 if (ret) 5543 return ret; 5544 5545 if (hw->mac.ops.setup_link) 5546 ret = hw->mac.ops.setup_link(hw, speed, link_up); 5547 5548 return ret; 5549 } 5550 5551 static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) 5552 { 5553 struct ixgbe_hw *hw = &adapter->hw; 5554 u32 gpie = 0; 5555 5556 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 5557 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | 5558 IXGBE_GPIE_OCD; 5559 gpie |= IXGBE_GPIE_EIAME; 5560 /* 5561 * use EIAM to auto-mask when MSI-X interrupt is asserted 5562 * this saves a register write for every interrupt 5563 */ 5564 switch (hw->mac.type) { 5565 case ixgbe_mac_82598EB: 5566 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 5567 break; 5568 case ixgbe_mac_82599EB: 5569 case ixgbe_mac_X540: 5570 case ixgbe_mac_X550: 5571 case ixgbe_mac_X550EM_x: 5572 case ixgbe_mac_x550em_a: 5573 default: 5574 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 5575 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 5576 break; 5577 } 5578 } else { 5579 /* legacy interrupts, use EIAM to auto-mask when reading EICR, 5580 * specifically only auto mask tx and rx interrupts */ 5581 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 5582 } 5583 5584 /* XXX: to interrupt immediately for EICS writes, enable this */ 5585 /* gpie |= IXGBE_GPIE_EIMEN; */ 5586 5587 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 5588 gpie &= ~IXGBE_GPIE_VTMODE_MASK; 5589 5590 switch (adapter->ring_feature[RING_F_VMDQ].mask) { 5591 case IXGBE_82599_VMDQ_8Q_MASK: 5592 gpie |= IXGBE_GPIE_VTMODE_16; 5593 break; 5594 case IXGBE_82599_VMDQ_4Q_MASK: 5595 gpie |= IXGBE_GPIE_VTMODE_32; 5596 break; 5597 default: 5598 gpie |= IXGBE_GPIE_VTMODE_64; 5599 break; 5600 } 5601 } 5602 5603 /* Enable Thermal over heat sensor interrupt */ 5604 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { 5605 switch (adapter->hw.mac.type) { 5606 case ixgbe_mac_82599EB: 5607 gpie |= IXGBE_SDP0_GPIEN_8259X; 5608 break; 5609 default: 5610 break; 5611 } 5612 } 5613 5614 /* Enable fan failure interrupt */ 5615 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 5616 gpie |= IXGBE_SDP1_GPIEN(hw); 5617 5618 switch (hw->mac.type) { 5619 case ixgbe_mac_82599EB: 5620 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X; 5621 break; 5622 case ixgbe_mac_X550EM_x: 5623 case ixgbe_mac_x550em_a: 5624 gpie |= IXGBE_SDP0_GPIEN_X540; 5625 break; 5626 default: 5627 break; 5628 } 5629 5630 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 5631 } 5632 5633 static void ixgbe_up_complete(struct ixgbe_adapter *adapter) 5634 { 5635 struct ixgbe_hw *hw = &adapter->hw; 5636 int err; 5637 u32 ctrl_ext; 5638 5639 ixgbe_get_hw_control(adapter); 5640 ixgbe_setup_gpie(adapter); 5641 5642 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 5643 ixgbe_configure_msix(adapter); 5644 else 5645 ixgbe_configure_msi_and_legacy(adapter); 5646 5647 /* enable the optics for 82599 SFP+ fiber */ 5648 if (hw->mac.ops.enable_tx_laser) 5649 hw->mac.ops.enable_tx_laser(hw); 5650 5651 if (hw->phy.ops.set_phy_power) 5652 hw->phy.ops.set_phy_power(hw, true); 5653 5654 smp_mb__before_atomic(); 5655 clear_bit(__IXGBE_DOWN, &adapter->state); 5656 ixgbe_napi_enable_all(adapter); 5657 5658 if (ixgbe_is_sfp(hw)) { 5659 ixgbe_sfp_link_config(adapter); 5660 } else { 5661 err = ixgbe_non_sfp_link_config(hw); 5662 if (err) 5663 e_err(probe, "link_config FAILED %d\n", err); 5664 } 5665 5666 /* clear any pending interrupts, may auto mask */ 5667 IXGBE_READ_REG(hw, IXGBE_EICR); 5668 ixgbe_irq_enable(adapter, true, true); 5669 5670 /* 5671 * If this adapter has a fan, check to see if we had a failure 5672 * before we enabled the interrupt. 5673 */ 5674 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 5675 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 5676 if (esdp & IXGBE_ESDP_SDP1) 5677 e_crit(drv, "Fan has stopped, replace the adapter\n"); 5678 } 5679 5680 /* bring the link up in the watchdog, this could race with our first 5681 * link up interrupt but shouldn't be a problem */ 5682 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 5683 adapter->link_check_timeout = jiffies; 5684 mod_timer(&adapter->service_timer, jiffies); 5685 5686 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 5687 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5688 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 5689 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5690 } 5691 5692 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) 5693 { 5694 /* put off any impending NetWatchDogTimeout */ 5695 netif_trans_update(adapter->netdev); 5696 5697 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 5698 usleep_range(1000, 2000); 5699 if (adapter->hw.phy.type == ixgbe_phy_fw) 5700 ixgbe_watchdog_link_is_down(adapter); 5701 ixgbe_down(adapter); 5702 /* 5703 * If SR-IOV enabled then wait a bit before bringing the adapter 5704 * back up to give the VFs time to respond to the reset. The 5705 * two second wait is based upon the watchdog timer cycle in 5706 * the VF driver. 5707 */ 5708 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 5709 msleep(2000); 5710 ixgbe_up(adapter); 5711 clear_bit(__IXGBE_RESETTING, &adapter->state); 5712 } 5713 5714 void ixgbe_up(struct ixgbe_adapter *adapter) 5715 { 5716 /* hardware has been reset, we need to reload some things */ 5717 ixgbe_configure(adapter); 5718 5719 ixgbe_up_complete(adapter); 5720 } 5721 5722 static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter) 5723 { 5724 u16 devctl2; 5725 5726 pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2); 5727 5728 switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) { 5729 case IXGBE_PCIDEVCTRL2_17_34s: 5730 case IXGBE_PCIDEVCTRL2_4_8s: 5731 /* For now we cap the upper limit on delay to 2 seconds 5732 * as we end up going up to 34 seconds of delay in worst 5733 * case timeout value. 5734 */ 5735 case IXGBE_PCIDEVCTRL2_1_2s: 5736 return 2000000ul; /* 2.0 s */ 5737 case IXGBE_PCIDEVCTRL2_260_520ms: 5738 return 520000ul; /* 520 ms */ 5739 case IXGBE_PCIDEVCTRL2_65_130ms: 5740 return 130000ul; /* 130 ms */ 5741 case IXGBE_PCIDEVCTRL2_16_32ms: 5742 return 32000ul; /* 32 ms */ 5743 case IXGBE_PCIDEVCTRL2_1_2ms: 5744 return 2000ul; /* 2 ms */ 5745 case IXGBE_PCIDEVCTRL2_50_100us: 5746 return 100ul; /* 100 us */ 5747 case IXGBE_PCIDEVCTRL2_16_32ms_def: 5748 return 32000ul; /* 32 ms */ 5749 default: 5750 break; 5751 } 5752 5753 /* We shouldn't need to hit this path, but just in case default as 5754 * though completion timeout is not supported and support 32ms. 5755 */ 5756 return 32000ul; 5757 } 5758 5759 void ixgbe_disable_rx(struct ixgbe_adapter *adapter) 5760 { 5761 unsigned long wait_delay, delay_interval; 5762 struct ixgbe_hw *hw = &adapter->hw; 5763 int i, wait_loop; 5764 u32 rxdctl; 5765 5766 /* disable receives */ 5767 hw->mac.ops.disable_rx(hw); 5768 5769 if (ixgbe_removed(hw->hw_addr)) 5770 return; 5771 5772 /* disable all enabled Rx queues */ 5773 for (i = 0; i < adapter->num_rx_queues; i++) { 5774 struct ixgbe_ring *ring = adapter->rx_ring[i]; 5775 u8 reg_idx = ring->reg_idx; 5776 5777 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 5778 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 5779 rxdctl |= IXGBE_RXDCTL_SWFLSH; 5780 5781 /* write value back with RXDCTL.ENABLE bit cleared */ 5782 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); 5783 } 5784 5785 /* RXDCTL.EN may not change on 82598 if link is down, so skip it */ 5786 if (hw->mac.type == ixgbe_mac_82598EB && 5787 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) 5788 return; 5789 5790 /* Determine our minimum delay interval. We will increase this value 5791 * with each subsequent test. This way if the device returns quickly 5792 * we should spend as little time as possible waiting, however as 5793 * the time increases we will wait for larger periods of time. 5794 * 5795 * The trick here is that we increase the interval using the 5796 * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result 5797 * of that wait is that it totals up to 100x whatever interval we 5798 * choose. Since our minimum wait is 100us we can just divide the 5799 * total timeout by 100 to get our minimum delay interval. 5800 */ 5801 delay_interval = ixgbe_get_completion_timeout(adapter) / 100; 5802 5803 wait_loop = IXGBE_MAX_RX_DESC_POLL; 5804 wait_delay = delay_interval; 5805 5806 while (wait_loop--) { 5807 usleep_range(wait_delay, wait_delay + 10); 5808 wait_delay += delay_interval * 2; 5809 rxdctl = 0; 5810 5811 /* OR together the reading of all the active RXDCTL registers, 5812 * and then test the result. We need the disable to complete 5813 * before we start freeing the memory and invalidating the 5814 * DMA mappings. 5815 */ 5816 for (i = 0; i < adapter->num_rx_queues; i++) { 5817 struct ixgbe_ring *ring = adapter->rx_ring[i]; 5818 u8 reg_idx = ring->reg_idx; 5819 5820 rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 5821 } 5822 5823 if (!(rxdctl & IXGBE_RXDCTL_ENABLE)) 5824 return; 5825 } 5826 5827 e_err(drv, 5828 "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n"); 5829 } 5830 5831 void ixgbe_disable_tx(struct ixgbe_adapter *adapter) 5832 { 5833 unsigned long wait_delay, delay_interval; 5834 struct ixgbe_hw *hw = &adapter->hw; 5835 int i, wait_loop; 5836 u32 txdctl; 5837 5838 if (ixgbe_removed(hw->hw_addr)) 5839 return; 5840 5841 /* disable all enabled Tx queues */ 5842 for (i = 0; i < adapter->num_tx_queues; i++) { 5843 struct ixgbe_ring *ring = adapter->tx_ring[i]; 5844 u8 reg_idx = ring->reg_idx; 5845 5846 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); 5847 } 5848 5849 /* disable all enabled XDP Tx queues */ 5850 for (i = 0; i < adapter->num_xdp_queues; i++) { 5851 struct ixgbe_ring *ring = adapter->xdp_ring[i]; 5852 u8 reg_idx = ring->reg_idx; 5853 5854 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); 5855 } 5856 5857 /* If the link is not up there shouldn't be much in the way of 5858 * pending transactions. Those that are left will be flushed out 5859 * when the reset logic goes through the flush sequence to clean out 5860 * the pending Tx transactions. 5861 */ 5862 if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) 5863 goto dma_engine_disable; 5864 5865 /* Determine our minimum delay interval. We will increase this value 5866 * with each subsequent test. This way if the device returns quickly 5867 * we should spend as little time as possible waiting, however as 5868 * the time increases we will wait for larger periods of time. 5869 * 5870 * The trick here is that we increase the interval using the 5871 * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result 5872 * of that wait is that it totals up to 100x whatever interval we 5873 * choose. Since our minimum wait is 100us we can just divide the 5874 * total timeout by 100 to get our minimum delay interval. 5875 */ 5876 delay_interval = ixgbe_get_completion_timeout(adapter) / 100; 5877 5878 wait_loop = IXGBE_MAX_RX_DESC_POLL; 5879 wait_delay = delay_interval; 5880 5881 while (wait_loop--) { 5882 usleep_range(wait_delay, wait_delay + 10); 5883 wait_delay += delay_interval * 2; 5884 txdctl = 0; 5885 5886 /* OR together the reading of all the active TXDCTL registers, 5887 * and then test the result. We need the disable to complete 5888 * before we start freeing the memory and invalidating the 5889 * DMA mappings. 5890 */ 5891 for (i = 0; i < adapter->num_tx_queues; i++) { 5892 struct ixgbe_ring *ring = adapter->tx_ring[i]; 5893 u8 reg_idx = ring->reg_idx; 5894 5895 txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); 5896 } 5897 for (i = 0; i < adapter->num_xdp_queues; i++) { 5898 struct ixgbe_ring *ring = adapter->xdp_ring[i]; 5899 u8 reg_idx = ring->reg_idx; 5900 5901 txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); 5902 } 5903 5904 if (!(txdctl & IXGBE_TXDCTL_ENABLE)) 5905 goto dma_engine_disable; 5906 } 5907 5908 e_err(drv, 5909 "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n"); 5910 5911 dma_engine_disable: 5912 /* Disable the Tx DMA engine on 82599 and later MAC */ 5913 switch (hw->mac.type) { 5914 case ixgbe_mac_82599EB: 5915 case ixgbe_mac_X540: 5916 case ixgbe_mac_X550: 5917 case ixgbe_mac_X550EM_x: 5918 case ixgbe_mac_x550em_a: 5919 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, 5920 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 5921 ~IXGBE_DMATXCTL_TE)); 5922 fallthrough; 5923 default: 5924 break; 5925 } 5926 } 5927 5928 void ixgbe_reset(struct ixgbe_adapter *adapter) 5929 { 5930 struct ixgbe_hw *hw = &adapter->hw; 5931 struct net_device *netdev = adapter->netdev; 5932 int err; 5933 5934 if (ixgbe_removed(hw->hw_addr)) 5935 return; 5936 /* lock SFP init bit to prevent race conditions with the watchdog */ 5937 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) 5938 usleep_range(1000, 2000); 5939 5940 /* clear all SFP and link config related flags while holding SFP_INIT */ 5941 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP | 5942 IXGBE_FLAG2_SFP_NEEDS_RESET); 5943 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 5944 5945 err = hw->mac.ops.init_hw(hw); 5946 switch (err) { 5947 case 0: 5948 case IXGBE_ERR_SFP_NOT_PRESENT: 5949 case IXGBE_ERR_SFP_NOT_SUPPORTED: 5950 break; 5951 case IXGBE_ERR_MASTER_REQUESTS_PENDING: 5952 e_dev_err("master disable timed out\n"); 5953 break; 5954 case IXGBE_ERR_EEPROM_VERSION: 5955 /* We are running on a pre-production device, log a warning */ 5956 e_dev_warn("This device is a pre-production adapter/LOM. " 5957 "Please be aware there may be issues associated with " 5958 "your hardware. If you are experiencing problems " 5959 "please contact your Intel or hardware " 5960 "representative who provided you with this " 5961 "hardware.\n"); 5962 break; 5963 default: 5964 e_dev_err("Hardware Error: %d\n", err); 5965 } 5966 5967 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); 5968 5969 /* flush entries out of MAC table */ 5970 ixgbe_flush_sw_mac_table(adapter); 5971 __dev_uc_unsync(netdev, NULL); 5972 5973 /* do not flush user set addresses */ 5974 ixgbe_mac_set_default_filter(adapter); 5975 5976 /* update SAN MAC vmdq pool selection */ 5977 if (hw->mac.san_mac_rar_index) 5978 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); 5979 5980 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) 5981 ixgbe_ptp_reset(adapter); 5982 5983 if (hw->phy.ops.set_phy_power) { 5984 if (!netif_running(adapter->netdev) && !adapter->wol) 5985 hw->phy.ops.set_phy_power(hw, false); 5986 else 5987 hw->phy.ops.set_phy_power(hw, true); 5988 } 5989 } 5990 5991 /** 5992 * ixgbe_clean_tx_ring - Free Tx Buffers 5993 * @tx_ring: ring to be cleaned 5994 **/ 5995 static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) 5996 { 5997 u16 i = tx_ring->next_to_clean; 5998 struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; 5999 6000 if (tx_ring->xsk_pool) { 6001 ixgbe_xsk_clean_tx_ring(tx_ring); 6002 goto out; 6003 } 6004 6005 while (i != tx_ring->next_to_use) { 6006 union ixgbe_adv_tx_desc *eop_desc, *tx_desc; 6007 6008 /* Free all the Tx ring sk_buffs */ 6009 if (ring_is_xdp(tx_ring)) 6010 xdp_return_frame(tx_buffer->xdpf); 6011 else 6012 dev_kfree_skb_any(tx_buffer->skb); 6013 6014 /* unmap skb header data */ 6015 dma_unmap_single(tx_ring->dev, 6016 dma_unmap_addr(tx_buffer, dma), 6017 dma_unmap_len(tx_buffer, len), 6018 DMA_TO_DEVICE); 6019 6020 /* check for eop_desc to determine the end of the packet */ 6021 eop_desc = tx_buffer->next_to_watch; 6022 tx_desc = IXGBE_TX_DESC(tx_ring, i); 6023 6024 /* unmap remaining buffers */ 6025 while (tx_desc != eop_desc) { 6026 tx_buffer++; 6027 tx_desc++; 6028 i++; 6029 if (unlikely(i == tx_ring->count)) { 6030 i = 0; 6031 tx_buffer = tx_ring->tx_buffer_info; 6032 tx_desc = IXGBE_TX_DESC(tx_ring, 0); 6033 } 6034 6035 /* unmap any remaining paged data */ 6036 if (dma_unmap_len(tx_buffer, len)) 6037 dma_unmap_page(tx_ring->dev, 6038 dma_unmap_addr(tx_buffer, dma), 6039 dma_unmap_len(tx_buffer, len), 6040 DMA_TO_DEVICE); 6041 } 6042 6043 /* move us one more past the eop_desc for start of next pkt */ 6044 tx_buffer++; 6045 i++; 6046 if (unlikely(i == tx_ring->count)) { 6047 i = 0; 6048 tx_buffer = tx_ring->tx_buffer_info; 6049 } 6050 } 6051 6052 /* reset BQL for queue */ 6053 if (!ring_is_xdp(tx_ring)) 6054 netdev_tx_reset_queue(txring_txq(tx_ring)); 6055 6056 out: 6057 /* reset next_to_use and next_to_clean */ 6058 tx_ring->next_to_use = 0; 6059 tx_ring->next_to_clean = 0; 6060 } 6061 6062 /** 6063 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues 6064 * @adapter: board private structure 6065 **/ 6066 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) 6067 { 6068 int i; 6069 6070 for (i = 0; i < adapter->num_rx_queues; i++) 6071 ixgbe_clean_rx_ring(adapter->rx_ring[i]); 6072 } 6073 6074 /** 6075 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues 6076 * @adapter: board private structure 6077 **/ 6078 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) 6079 { 6080 int i; 6081 6082 for (i = 0; i < adapter->num_tx_queues; i++) 6083 ixgbe_clean_tx_ring(adapter->tx_ring[i]); 6084 for (i = 0; i < adapter->num_xdp_queues; i++) 6085 ixgbe_clean_tx_ring(adapter->xdp_ring[i]); 6086 } 6087 6088 static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) 6089 { 6090 struct hlist_node *node2; 6091 struct ixgbe_fdir_filter *filter; 6092 6093 spin_lock(&adapter->fdir_perfect_lock); 6094 6095 hlist_for_each_entry_safe(filter, node2, 6096 &adapter->fdir_filter_list, fdir_node) { 6097 hlist_del(&filter->fdir_node); 6098 kfree(filter); 6099 } 6100 adapter->fdir_filter_count = 0; 6101 6102 spin_unlock(&adapter->fdir_perfect_lock); 6103 } 6104 6105 void ixgbe_down(struct ixgbe_adapter *adapter) 6106 { 6107 struct net_device *netdev = adapter->netdev; 6108 struct ixgbe_hw *hw = &adapter->hw; 6109 int i; 6110 6111 /* signal that we are down to the interrupt handler */ 6112 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state)) 6113 return; /* do nothing if already down */ 6114 6115 /* Shut off incoming Tx traffic */ 6116 netif_tx_stop_all_queues(netdev); 6117 6118 /* call carrier off first to avoid false dev_watchdog timeouts */ 6119 netif_carrier_off(netdev); 6120 netif_tx_disable(netdev); 6121 6122 /* Disable Rx */ 6123 ixgbe_disable_rx(adapter); 6124 6125 /* synchronize_rcu() needed for pending XDP buffers to drain */ 6126 if (adapter->xdp_ring[0]) 6127 synchronize_rcu(); 6128 6129 ixgbe_irq_disable(adapter); 6130 6131 ixgbe_napi_disable_all(adapter); 6132 6133 clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state); 6134 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; 6135 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 6136 6137 del_timer_sync(&adapter->service_timer); 6138 6139 if (adapter->num_vfs) { 6140 /* Clear EITR Select mapping */ 6141 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); 6142 6143 /* Mark all the VFs as inactive */ 6144 for (i = 0 ; i < adapter->num_vfs; i++) 6145 adapter->vfinfo[i].clear_to_send = false; 6146 6147 /* ping all the active vfs to let them know we are going down */ 6148 ixgbe_ping_all_vfs(adapter); 6149 6150 /* Disable all VFTE/VFRE TX/RX */ 6151 ixgbe_disable_tx_rx(adapter); 6152 } 6153 6154 /* disable transmits in the hardware now that interrupts are off */ 6155 ixgbe_disable_tx(adapter); 6156 6157 if (!pci_channel_offline(adapter->pdev)) 6158 ixgbe_reset(adapter); 6159 6160 /* power down the optics for 82599 SFP+ fiber */ 6161 if (hw->mac.ops.disable_tx_laser) 6162 hw->mac.ops.disable_tx_laser(hw); 6163 6164 ixgbe_clean_all_tx_rings(adapter); 6165 ixgbe_clean_all_rx_rings(adapter); 6166 } 6167 6168 /** 6169 * ixgbe_set_eee_capable - helper function to determine EEE support on X550 6170 * @adapter: board private structure 6171 */ 6172 static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter) 6173 { 6174 struct ixgbe_hw *hw = &adapter->hw; 6175 6176 switch (hw->device_id) { 6177 case IXGBE_DEV_ID_X550EM_A_1G_T: 6178 case IXGBE_DEV_ID_X550EM_A_1G_T_L: 6179 if (!hw->phy.eee_speeds_supported) 6180 break; 6181 adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE; 6182 if (!hw->phy.eee_speeds_advertised) 6183 break; 6184 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; 6185 break; 6186 default: 6187 adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE; 6188 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; 6189 break; 6190 } 6191 } 6192 6193 /** 6194 * ixgbe_tx_timeout - Respond to a Tx Hang 6195 * @netdev: network interface device structure 6196 * @txqueue: queue number that timed out 6197 **/ 6198 static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) 6199 { 6200 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6201 6202 /* Do the reset outside of interrupt context */ 6203 ixgbe_tx_timeout_reset(adapter); 6204 } 6205 6206 #ifdef CONFIG_IXGBE_DCB 6207 static void ixgbe_init_dcb(struct ixgbe_adapter *adapter) 6208 { 6209 struct ixgbe_hw *hw = &adapter->hw; 6210 struct tc_configuration *tc; 6211 int j; 6212 6213 switch (hw->mac.type) { 6214 case ixgbe_mac_82598EB: 6215 case ixgbe_mac_82599EB: 6216 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; 6217 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; 6218 break; 6219 case ixgbe_mac_X540: 6220 case ixgbe_mac_X550: 6221 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; 6222 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; 6223 break; 6224 case ixgbe_mac_X550EM_x: 6225 case ixgbe_mac_x550em_a: 6226 default: 6227 adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS; 6228 adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS; 6229 break; 6230 } 6231 6232 /* Configure DCB traffic classes */ 6233 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { 6234 tc = &adapter->dcb_cfg.tc_config[j]; 6235 tc->path[DCB_TX_CONFIG].bwg_id = 0; 6236 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); 6237 tc->path[DCB_RX_CONFIG].bwg_id = 0; 6238 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); 6239 tc->dcb_pfc = pfc_disabled; 6240 } 6241 6242 /* Initialize default user to priority mapping, UPx->TC0 */ 6243 tc = &adapter->dcb_cfg.tc_config[0]; 6244 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; 6245 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; 6246 6247 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; 6248 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; 6249 adapter->dcb_cfg.pfc_mode_enable = false; 6250 adapter->dcb_set_bitmap = 0x00; 6251 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) 6252 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; 6253 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, 6254 sizeof(adapter->temp_dcb_cfg)); 6255 } 6256 #endif 6257 6258 /** 6259 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) 6260 * @adapter: board private structure to initialize 6261 * @ii: pointer to ixgbe_info for device 6262 * 6263 * ixgbe_sw_init initializes the Adapter private data structure. 6264 * Fields are initialized based on PCI device information and 6265 * OS network device settings (MTU size). 6266 **/ 6267 static int ixgbe_sw_init(struct ixgbe_adapter *adapter, 6268 const struct ixgbe_info *ii) 6269 { 6270 struct ixgbe_hw *hw = &adapter->hw; 6271 struct pci_dev *pdev = adapter->pdev; 6272 unsigned int rss, fdir; 6273 u32 fwsm; 6274 int i; 6275 6276 /* PCI config space info */ 6277 6278 hw->vendor_id = pdev->vendor; 6279 hw->device_id = pdev->device; 6280 hw->revision_id = pdev->revision; 6281 hw->subsystem_vendor_id = pdev->subsystem_vendor; 6282 hw->subsystem_device_id = pdev->subsystem_device; 6283 6284 /* get_invariants needs the device IDs */ 6285 ii->get_invariants(hw); 6286 6287 /* Set common capability flags and settings */ 6288 rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus()); 6289 adapter->ring_feature[RING_F_RSS].limit = rss; 6290 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 6291 adapter->max_q_vectors = MAX_Q_VECTORS_82599; 6292 adapter->atr_sample_rate = 20; 6293 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); 6294 adapter->ring_feature[RING_F_FDIR].limit = fdir; 6295 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; 6296 adapter->ring_feature[RING_F_VMDQ].limit = 1; 6297 #ifdef CONFIG_IXGBE_DCA 6298 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; 6299 #endif 6300 #ifdef CONFIG_IXGBE_DCB 6301 adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; 6302 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 6303 #endif 6304 #ifdef IXGBE_FCOE 6305 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; 6306 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 6307 #ifdef CONFIG_IXGBE_DCB 6308 /* Default traffic class to use for FCoE */ 6309 adapter->fcoe.up = IXGBE_FCOE_DEFTC; 6310 #endif /* CONFIG_IXGBE_DCB */ 6311 #endif /* IXGBE_FCOE */ 6312 6313 /* initialize static ixgbe jump table entries */ 6314 adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]), 6315 GFP_KERNEL); 6316 if (!adapter->jump_tables[0]) 6317 return -ENOMEM; 6318 adapter->jump_tables[0]->mat = ixgbe_ipv4_fields; 6319 6320 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) 6321 adapter->jump_tables[i] = NULL; 6322 6323 adapter->mac_table = kcalloc(hw->mac.num_rar_entries, 6324 sizeof(struct ixgbe_mac_addr), 6325 GFP_KERNEL); 6326 if (!adapter->mac_table) 6327 return -ENOMEM; 6328 6329 if (ixgbe_init_rss_key(adapter)) 6330 return -ENOMEM; 6331 6332 adapter->af_xdp_zc_qps = bitmap_zalloc(IXGBE_MAX_XDP_QS, GFP_KERNEL); 6333 if (!adapter->af_xdp_zc_qps) 6334 return -ENOMEM; 6335 6336 /* Set MAC specific capability flags and exceptions */ 6337 switch (hw->mac.type) { 6338 case ixgbe_mac_82598EB: 6339 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; 6340 6341 if (hw->device_id == IXGBE_DEV_ID_82598AT) 6342 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 6343 6344 adapter->max_q_vectors = MAX_Q_VECTORS_82598; 6345 adapter->ring_feature[RING_F_FDIR].limit = 0; 6346 adapter->atr_sample_rate = 0; 6347 adapter->fdir_pballoc = 0; 6348 #ifdef IXGBE_FCOE 6349 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; 6350 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 6351 #ifdef CONFIG_IXGBE_DCB 6352 adapter->fcoe.up = 0; 6353 #endif /* IXGBE_DCB */ 6354 #endif /* IXGBE_FCOE */ 6355 break; 6356 case ixgbe_mac_82599EB: 6357 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) 6358 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 6359 break; 6360 case ixgbe_mac_X540: 6361 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); 6362 if (fwsm & IXGBE_FWSM_TS_ENABLED) 6363 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 6364 break; 6365 case ixgbe_mac_x550em_a: 6366 switch (hw->device_id) { 6367 case IXGBE_DEV_ID_X550EM_A_1G_T: 6368 case IXGBE_DEV_ID_X550EM_A_1G_T_L: 6369 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 6370 break; 6371 default: 6372 break; 6373 } 6374 fallthrough; 6375 case ixgbe_mac_X550EM_x: 6376 #ifdef CONFIG_IXGBE_DCB 6377 adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; 6378 #endif 6379 #ifdef IXGBE_FCOE 6380 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; 6381 #ifdef CONFIG_IXGBE_DCB 6382 adapter->fcoe.up = 0; 6383 #endif /* IXGBE_DCB */ 6384 #endif /* IXGBE_FCOE */ 6385 fallthrough; 6386 case ixgbe_mac_X550: 6387 if (hw->mac.type == ixgbe_mac_X550) 6388 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 6389 #ifdef CONFIG_IXGBE_DCA 6390 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; 6391 #endif 6392 break; 6393 default: 6394 break; 6395 } 6396 6397 #ifdef IXGBE_FCOE 6398 /* FCoE support exists, always init the FCoE lock */ 6399 spin_lock_init(&adapter->fcoe.lock); 6400 6401 #endif 6402 /* n-tuple support exists, always init our spinlock */ 6403 spin_lock_init(&adapter->fdir_perfect_lock); 6404 6405 #ifdef CONFIG_IXGBE_DCB 6406 ixgbe_init_dcb(adapter); 6407 #endif 6408 ixgbe_init_ipsec_offload(adapter); 6409 6410 /* default flow control settings */ 6411 hw->fc.requested_mode = ixgbe_fc_full; 6412 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ 6413 ixgbe_pbthresh_setup(adapter); 6414 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; 6415 hw->fc.send_xon = true; 6416 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw); 6417 6418 #ifdef CONFIG_PCI_IOV 6419 if (max_vfs > 0) 6420 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n"); 6421 6422 /* assign number of SR-IOV VFs */ 6423 if (hw->mac.type != ixgbe_mac_82598EB) { 6424 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) { 6425 max_vfs = 0; 6426 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n"); 6427 } 6428 } 6429 #endif /* CONFIG_PCI_IOV */ 6430 6431 /* enable itr by default in dynamic mode */ 6432 adapter->rx_itr_setting = 1; 6433 adapter->tx_itr_setting = 1; 6434 6435 /* set default ring sizes */ 6436 adapter->tx_ring_count = IXGBE_DEFAULT_TXD; 6437 adapter->rx_ring_count = IXGBE_DEFAULT_RXD; 6438 6439 /* set default work limits */ 6440 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; 6441 6442 /* initialize eeprom parameters */ 6443 if (ixgbe_init_eeprom_params_generic(hw)) { 6444 e_dev_err("EEPROM initialization failed\n"); 6445 return -EIO; 6446 } 6447 6448 /* PF holds first pool slot */ 6449 set_bit(0, adapter->fwd_bitmask); 6450 set_bit(__IXGBE_DOWN, &adapter->state); 6451 6452 return 0; 6453 } 6454 6455 /** 6456 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) 6457 * @tx_ring: tx descriptor ring (for a specific queue) to setup 6458 * 6459 * Return 0 on success, negative on failure 6460 **/ 6461 int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) 6462 { 6463 struct device *dev = tx_ring->dev; 6464 int orig_node = dev_to_node(dev); 6465 int ring_node = NUMA_NO_NODE; 6466 int size; 6467 6468 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 6469 6470 if (tx_ring->q_vector) 6471 ring_node = tx_ring->q_vector->numa_node; 6472 6473 tx_ring->tx_buffer_info = vmalloc_node(size, ring_node); 6474 if (!tx_ring->tx_buffer_info) 6475 tx_ring->tx_buffer_info = vmalloc(size); 6476 if (!tx_ring->tx_buffer_info) 6477 goto err; 6478 6479 /* round up to nearest 4K */ 6480 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 6481 tx_ring->size = ALIGN(tx_ring->size, 4096); 6482 6483 set_dev_node(dev, ring_node); 6484 tx_ring->desc = dma_alloc_coherent(dev, 6485 tx_ring->size, 6486 &tx_ring->dma, 6487 GFP_KERNEL); 6488 set_dev_node(dev, orig_node); 6489 if (!tx_ring->desc) 6490 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 6491 &tx_ring->dma, GFP_KERNEL); 6492 if (!tx_ring->desc) 6493 goto err; 6494 6495 tx_ring->next_to_use = 0; 6496 tx_ring->next_to_clean = 0; 6497 return 0; 6498 6499 err: 6500 vfree(tx_ring->tx_buffer_info); 6501 tx_ring->tx_buffer_info = NULL; 6502 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); 6503 return -ENOMEM; 6504 } 6505 6506 /** 6507 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources 6508 * @adapter: board private structure 6509 * 6510 * If this function returns with an error, then it's possible one or 6511 * more of the rings is populated (while the rest are not). It is the 6512 * callers duty to clean those orphaned rings. 6513 * 6514 * Return 0 on success, negative on failure 6515 **/ 6516 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) 6517 { 6518 int i, j = 0, err = 0; 6519 6520 for (i = 0; i < adapter->num_tx_queues; i++) { 6521 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); 6522 if (!err) 6523 continue; 6524 6525 e_err(probe, "Allocation for Tx Queue %u failed\n", i); 6526 goto err_setup_tx; 6527 } 6528 for (j = 0; j < adapter->num_xdp_queues; j++) { 6529 err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]); 6530 if (!err) 6531 continue; 6532 6533 e_err(probe, "Allocation for Tx Queue %u failed\n", j); 6534 goto err_setup_tx; 6535 } 6536 6537 return 0; 6538 err_setup_tx: 6539 /* rewind the index freeing the rings as we go */ 6540 while (j--) 6541 ixgbe_free_tx_resources(adapter->xdp_ring[j]); 6542 while (i--) 6543 ixgbe_free_tx_resources(adapter->tx_ring[i]); 6544 return err; 6545 } 6546 6547 static int ixgbe_rx_napi_id(struct ixgbe_ring *rx_ring) 6548 { 6549 struct ixgbe_q_vector *q_vector = rx_ring->q_vector; 6550 6551 return q_vector ? q_vector->napi.napi_id : 0; 6552 } 6553 6554 /** 6555 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) 6556 * @adapter: pointer to ixgbe_adapter 6557 * @rx_ring: rx descriptor ring (for a specific queue) to setup 6558 * 6559 * Returns 0 on success, negative on failure 6560 **/ 6561 int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 6562 struct ixgbe_ring *rx_ring) 6563 { 6564 struct device *dev = rx_ring->dev; 6565 int orig_node = dev_to_node(dev); 6566 int ring_node = NUMA_NO_NODE; 6567 int size; 6568 6569 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 6570 6571 if (rx_ring->q_vector) 6572 ring_node = rx_ring->q_vector->numa_node; 6573 6574 rx_ring->rx_buffer_info = vmalloc_node(size, ring_node); 6575 if (!rx_ring->rx_buffer_info) 6576 rx_ring->rx_buffer_info = vmalloc(size); 6577 if (!rx_ring->rx_buffer_info) 6578 goto err; 6579 6580 /* Round up to nearest 4K */ 6581 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 6582 rx_ring->size = ALIGN(rx_ring->size, 4096); 6583 6584 set_dev_node(dev, ring_node); 6585 rx_ring->desc = dma_alloc_coherent(dev, 6586 rx_ring->size, 6587 &rx_ring->dma, 6588 GFP_KERNEL); 6589 set_dev_node(dev, orig_node); 6590 if (!rx_ring->desc) 6591 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 6592 &rx_ring->dma, GFP_KERNEL); 6593 if (!rx_ring->desc) 6594 goto err; 6595 6596 rx_ring->next_to_clean = 0; 6597 rx_ring->next_to_use = 0; 6598 6599 /* XDP RX-queue info */ 6600 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, 6601 rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0) 6602 goto err; 6603 6604 rx_ring->xdp_prog = adapter->xdp_prog; 6605 6606 return 0; 6607 err: 6608 vfree(rx_ring->rx_buffer_info); 6609 rx_ring->rx_buffer_info = NULL; 6610 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); 6611 return -ENOMEM; 6612 } 6613 6614 /** 6615 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources 6616 * @adapter: board private structure 6617 * 6618 * If this function returns with an error, then it's possible one or 6619 * more of the rings is populated (while the rest are not). It is the 6620 * callers duty to clean those orphaned rings. 6621 * 6622 * Return 0 on success, negative on failure 6623 **/ 6624 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) 6625 { 6626 int i, err = 0; 6627 6628 for (i = 0; i < adapter->num_rx_queues; i++) { 6629 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); 6630 if (!err) 6631 continue; 6632 6633 e_err(probe, "Allocation for Rx Queue %u failed\n", i); 6634 goto err_setup_rx; 6635 } 6636 6637 #ifdef IXGBE_FCOE 6638 err = ixgbe_setup_fcoe_ddp_resources(adapter); 6639 if (!err) 6640 #endif 6641 return 0; 6642 err_setup_rx: 6643 /* rewind the index freeing the rings as we go */ 6644 while (i--) 6645 ixgbe_free_rx_resources(adapter->rx_ring[i]); 6646 return err; 6647 } 6648 6649 /** 6650 * ixgbe_free_tx_resources - Free Tx Resources per Queue 6651 * @tx_ring: Tx descriptor ring for a specific queue 6652 * 6653 * Free all transmit software resources 6654 **/ 6655 void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) 6656 { 6657 ixgbe_clean_tx_ring(tx_ring); 6658 6659 vfree(tx_ring->tx_buffer_info); 6660 tx_ring->tx_buffer_info = NULL; 6661 6662 /* if not set, then don't free */ 6663 if (!tx_ring->desc) 6664 return; 6665 6666 dma_free_coherent(tx_ring->dev, tx_ring->size, 6667 tx_ring->desc, tx_ring->dma); 6668 6669 tx_ring->desc = NULL; 6670 } 6671 6672 /** 6673 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues 6674 * @adapter: board private structure 6675 * 6676 * Free all transmit software resources 6677 **/ 6678 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) 6679 { 6680 int i; 6681 6682 for (i = 0; i < adapter->num_tx_queues; i++) 6683 if (adapter->tx_ring[i]->desc) 6684 ixgbe_free_tx_resources(adapter->tx_ring[i]); 6685 for (i = 0; i < adapter->num_xdp_queues; i++) 6686 if (adapter->xdp_ring[i]->desc) 6687 ixgbe_free_tx_resources(adapter->xdp_ring[i]); 6688 } 6689 6690 /** 6691 * ixgbe_free_rx_resources - Free Rx Resources 6692 * @rx_ring: ring to clean the resources from 6693 * 6694 * Free all receive software resources 6695 **/ 6696 void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) 6697 { 6698 ixgbe_clean_rx_ring(rx_ring); 6699 6700 rx_ring->xdp_prog = NULL; 6701 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 6702 vfree(rx_ring->rx_buffer_info); 6703 rx_ring->rx_buffer_info = NULL; 6704 6705 /* if not set, then don't free */ 6706 if (!rx_ring->desc) 6707 return; 6708 6709 dma_free_coherent(rx_ring->dev, rx_ring->size, 6710 rx_ring->desc, rx_ring->dma); 6711 6712 rx_ring->desc = NULL; 6713 } 6714 6715 /** 6716 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues 6717 * @adapter: board private structure 6718 * 6719 * Free all receive software resources 6720 **/ 6721 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) 6722 { 6723 int i; 6724 6725 #ifdef IXGBE_FCOE 6726 ixgbe_free_fcoe_ddp_resources(adapter); 6727 6728 #endif 6729 for (i = 0; i < adapter->num_rx_queues; i++) 6730 if (adapter->rx_ring[i]->desc) 6731 ixgbe_free_rx_resources(adapter->rx_ring[i]); 6732 } 6733 6734 /** 6735 * ixgbe_change_mtu - Change the Maximum Transfer Unit 6736 * @netdev: network interface device structure 6737 * @new_mtu: new value for maximum frame size 6738 * 6739 * Returns 0 on success, negative on failure 6740 **/ 6741 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) 6742 { 6743 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6744 6745 if (adapter->xdp_prog) { 6746 int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 6747 VLAN_HLEN; 6748 int i; 6749 6750 for (i = 0; i < adapter->num_rx_queues; i++) { 6751 struct ixgbe_ring *ring = adapter->rx_ring[i]; 6752 6753 if (new_frame_size > ixgbe_rx_bufsz(ring)) { 6754 e_warn(probe, "Requested MTU size is not supported with XDP\n"); 6755 return -EINVAL; 6756 } 6757 } 6758 } 6759 6760 /* 6761 * For 82599EB we cannot allow legacy VFs to enable their receive 6762 * paths when MTU greater than 1500 is configured. So display a 6763 * warning that legacy VFs will be disabled. 6764 */ 6765 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && 6766 (adapter->hw.mac.type == ixgbe_mac_82599EB) && 6767 (new_mtu > ETH_DATA_LEN)) 6768 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); 6769 6770 netdev_dbg(netdev, "changing MTU from %d to %d\n", 6771 netdev->mtu, new_mtu); 6772 6773 /* must set new MTU before calling down or up */ 6774 netdev->mtu = new_mtu; 6775 6776 if (netif_running(netdev)) 6777 ixgbe_reinit_locked(adapter); 6778 6779 return 0; 6780 } 6781 6782 /** 6783 * ixgbe_open - Called when a network interface is made active 6784 * @netdev: network interface device structure 6785 * 6786 * Returns 0 on success, negative value on failure 6787 * 6788 * The open entry point is called when a network interface is made 6789 * active by the system (IFF_UP). At this point all resources needed 6790 * for transmit and receive operations are allocated, the interrupt 6791 * handler is registered with the OS, the watchdog timer is started, 6792 * and the stack is notified that the interface is ready. 6793 **/ 6794 int ixgbe_open(struct net_device *netdev) 6795 { 6796 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6797 struct ixgbe_hw *hw = &adapter->hw; 6798 int err, queues; 6799 6800 /* disallow open during test */ 6801 if (test_bit(__IXGBE_TESTING, &adapter->state)) 6802 return -EBUSY; 6803 6804 netif_carrier_off(netdev); 6805 6806 /* allocate transmit descriptors */ 6807 err = ixgbe_setup_all_tx_resources(adapter); 6808 if (err) 6809 goto err_setup_tx; 6810 6811 /* allocate receive descriptors */ 6812 err = ixgbe_setup_all_rx_resources(adapter); 6813 if (err) 6814 goto err_setup_rx; 6815 6816 ixgbe_configure(adapter); 6817 6818 err = ixgbe_request_irq(adapter); 6819 if (err) 6820 goto err_req_irq; 6821 6822 /* Notify the stack of the actual queue counts. */ 6823 queues = adapter->num_tx_queues; 6824 err = netif_set_real_num_tx_queues(netdev, queues); 6825 if (err) 6826 goto err_set_queues; 6827 6828 queues = adapter->num_rx_queues; 6829 err = netif_set_real_num_rx_queues(netdev, queues); 6830 if (err) 6831 goto err_set_queues; 6832 6833 ixgbe_ptp_init(adapter); 6834 6835 ixgbe_up_complete(adapter); 6836 6837 udp_tunnel_nic_reset_ntf(netdev); 6838 6839 return 0; 6840 6841 err_set_queues: 6842 ixgbe_free_irq(adapter); 6843 err_req_irq: 6844 ixgbe_free_all_rx_resources(adapter); 6845 if (hw->phy.ops.set_phy_power && !adapter->wol) 6846 hw->phy.ops.set_phy_power(&adapter->hw, false); 6847 err_setup_rx: 6848 ixgbe_free_all_tx_resources(adapter); 6849 err_setup_tx: 6850 ixgbe_reset(adapter); 6851 6852 return err; 6853 } 6854 6855 static void ixgbe_close_suspend(struct ixgbe_adapter *adapter) 6856 { 6857 ixgbe_ptp_suspend(adapter); 6858 6859 if (adapter->hw.phy.ops.enter_lplu) { 6860 adapter->hw.phy.reset_disable = true; 6861 ixgbe_down(adapter); 6862 adapter->hw.phy.ops.enter_lplu(&adapter->hw); 6863 adapter->hw.phy.reset_disable = false; 6864 } else { 6865 ixgbe_down(adapter); 6866 } 6867 6868 ixgbe_free_irq(adapter); 6869 6870 ixgbe_free_all_tx_resources(adapter); 6871 ixgbe_free_all_rx_resources(adapter); 6872 } 6873 6874 /** 6875 * ixgbe_close - Disables a network interface 6876 * @netdev: network interface device structure 6877 * 6878 * Returns 0, this is not allowed to fail 6879 * 6880 * The close entry point is called when an interface is de-activated 6881 * by the OS. The hardware is still under the drivers control, but 6882 * needs to be disabled. A global MAC reset is issued to stop the 6883 * hardware, and all transmit and receive resources are freed. 6884 **/ 6885 int ixgbe_close(struct net_device *netdev) 6886 { 6887 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6888 6889 ixgbe_ptp_stop(adapter); 6890 6891 if (netif_device_present(netdev)) 6892 ixgbe_close_suspend(adapter); 6893 6894 ixgbe_fdir_filter_exit(adapter); 6895 6896 ixgbe_release_hw_control(adapter); 6897 6898 return 0; 6899 } 6900 6901 static int __maybe_unused ixgbe_resume(struct device *dev_d) 6902 { 6903 struct pci_dev *pdev = to_pci_dev(dev_d); 6904 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 6905 struct net_device *netdev = adapter->netdev; 6906 u32 err; 6907 6908 adapter->hw.hw_addr = adapter->io_addr; 6909 6910 err = pci_enable_device_mem(pdev); 6911 if (err) { 6912 e_dev_err("Cannot enable PCI device from suspend\n"); 6913 return err; 6914 } 6915 smp_mb__before_atomic(); 6916 clear_bit(__IXGBE_DISABLED, &adapter->state); 6917 pci_set_master(pdev); 6918 6919 device_wakeup_disable(dev_d); 6920 6921 ixgbe_reset(adapter); 6922 6923 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 6924 6925 rtnl_lock(); 6926 err = ixgbe_init_interrupt_scheme(adapter); 6927 if (!err && netif_running(netdev)) 6928 err = ixgbe_open(netdev); 6929 6930 6931 if (!err) 6932 netif_device_attach(netdev); 6933 rtnl_unlock(); 6934 6935 return err; 6936 } 6937 6938 static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) 6939 { 6940 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 6941 struct net_device *netdev = adapter->netdev; 6942 struct ixgbe_hw *hw = &adapter->hw; 6943 u32 ctrl; 6944 u32 wufc = adapter->wol; 6945 6946 rtnl_lock(); 6947 netif_device_detach(netdev); 6948 6949 if (netif_running(netdev)) 6950 ixgbe_close_suspend(adapter); 6951 6952 ixgbe_clear_interrupt_scheme(adapter); 6953 rtnl_unlock(); 6954 6955 if (hw->mac.ops.stop_link_on_d3) 6956 hw->mac.ops.stop_link_on_d3(hw); 6957 6958 if (wufc) { 6959 u32 fctrl; 6960 6961 ixgbe_set_rx_mode(netdev); 6962 6963 /* enable the optics for 82599 SFP+ fiber as we can WoL */ 6964 if (hw->mac.ops.enable_tx_laser) 6965 hw->mac.ops.enable_tx_laser(hw); 6966 6967 /* enable the reception of multicast packets */ 6968 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 6969 fctrl |= IXGBE_FCTRL_MPE; 6970 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 6971 6972 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 6973 ctrl |= IXGBE_CTRL_GIO_DIS; 6974 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 6975 6976 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc); 6977 } else { 6978 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); 6979 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 6980 } 6981 6982 switch (hw->mac.type) { 6983 case ixgbe_mac_82598EB: 6984 pci_wake_from_d3(pdev, false); 6985 break; 6986 case ixgbe_mac_82599EB: 6987 case ixgbe_mac_X540: 6988 case ixgbe_mac_X550: 6989 case ixgbe_mac_X550EM_x: 6990 case ixgbe_mac_x550em_a: 6991 pci_wake_from_d3(pdev, !!wufc); 6992 break; 6993 default: 6994 break; 6995 } 6996 6997 *enable_wake = !!wufc; 6998 if (hw->phy.ops.set_phy_power && !*enable_wake) 6999 hw->phy.ops.set_phy_power(hw, false); 7000 7001 ixgbe_release_hw_control(adapter); 7002 7003 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) 7004 pci_disable_device(pdev); 7005 7006 return 0; 7007 } 7008 7009 static int __maybe_unused ixgbe_suspend(struct device *dev_d) 7010 { 7011 struct pci_dev *pdev = to_pci_dev(dev_d); 7012 int retval; 7013 bool wake; 7014 7015 retval = __ixgbe_shutdown(pdev, &wake); 7016 7017 device_set_wakeup_enable(dev_d, wake); 7018 7019 return retval; 7020 } 7021 7022 static void ixgbe_shutdown(struct pci_dev *pdev) 7023 { 7024 bool wake; 7025 7026 __ixgbe_shutdown(pdev, &wake); 7027 7028 if (system_state == SYSTEM_POWER_OFF) { 7029 pci_wake_from_d3(pdev, wake); 7030 pci_set_power_state(pdev, PCI_D3hot); 7031 } 7032 } 7033 7034 /** 7035 * ixgbe_update_stats - Update the board statistics counters. 7036 * @adapter: board private structure 7037 **/ 7038 void ixgbe_update_stats(struct ixgbe_adapter *adapter) 7039 { 7040 struct net_device *netdev = adapter->netdev; 7041 struct ixgbe_hw *hw = &adapter->hw; 7042 struct ixgbe_hw_stats *hwstats = &adapter->stats; 7043 u64 total_mpc = 0; 7044 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; 7045 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; 7046 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; 7047 u64 alloc_rx_page = 0; 7048 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; 7049 7050 if (test_bit(__IXGBE_DOWN, &adapter->state) || 7051 test_bit(__IXGBE_RESETTING, &adapter->state)) 7052 return; 7053 7054 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 7055 u64 rsc_count = 0; 7056 u64 rsc_flush = 0; 7057 for (i = 0; i < adapter->num_rx_queues; i++) { 7058 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; 7059 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; 7060 } 7061 adapter->rsc_total_count = rsc_count; 7062 adapter->rsc_total_flush = rsc_flush; 7063 } 7064 7065 for (i = 0; i < adapter->num_rx_queues; i++) { 7066 struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]); 7067 7068 if (!rx_ring) 7069 continue; 7070 non_eop_descs += rx_ring->rx_stats.non_eop_descs; 7071 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; 7072 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; 7073 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; 7074 hw_csum_rx_error += rx_ring->rx_stats.csum_err; 7075 bytes += rx_ring->stats.bytes; 7076 packets += rx_ring->stats.packets; 7077 } 7078 adapter->non_eop_descs = non_eop_descs; 7079 adapter->alloc_rx_page = alloc_rx_page; 7080 adapter->alloc_rx_page_failed = alloc_rx_page_failed; 7081 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; 7082 adapter->hw_csum_rx_error = hw_csum_rx_error; 7083 netdev->stats.rx_bytes = bytes; 7084 netdev->stats.rx_packets = packets; 7085 7086 bytes = 0; 7087 packets = 0; 7088 /* gather some stats to the adapter struct that are per queue */ 7089 for (i = 0; i < adapter->num_tx_queues; i++) { 7090 struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]); 7091 7092 if (!tx_ring) 7093 continue; 7094 restart_queue += tx_ring->tx_stats.restart_queue; 7095 tx_busy += tx_ring->tx_stats.tx_busy; 7096 bytes += tx_ring->stats.bytes; 7097 packets += tx_ring->stats.packets; 7098 } 7099 for (i = 0; i < adapter->num_xdp_queues; i++) { 7100 struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]); 7101 7102 if (!xdp_ring) 7103 continue; 7104 restart_queue += xdp_ring->tx_stats.restart_queue; 7105 tx_busy += xdp_ring->tx_stats.tx_busy; 7106 bytes += xdp_ring->stats.bytes; 7107 packets += xdp_ring->stats.packets; 7108 } 7109 adapter->restart_queue = restart_queue; 7110 adapter->tx_busy = tx_busy; 7111 netdev->stats.tx_bytes = bytes; 7112 netdev->stats.tx_packets = packets; 7113 7114 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 7115 7116 /* 8 register reads */ 7117 for (i = 0; i < 8; i++) { 7118 /* for packet buffers not used, the register should read 0 */ 7119 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 7120 missed_rx += mpc; 7121 hwstats->mpc[i] += mpc; 7122 total_mpc += hwstats->mpc[i]; 7123 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 7124 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 7125 switch (hw->mac.type) { 7126 case ixgbe_mac_82598EB: 7127 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 7128 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 7129 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 7130 hwstats->pxonrxc[i] += 7131 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 7132 break; 7133 case ixgbe_mac_82599EB: 7134 case ixgbe_mac_X540: 7135 case ixgbe_mac_X550: 7136 case ixgbe_mac_X550EM_x: 7137 case ixgbe_mac_x550em_a: 7138 hwstats->pxonrxc[i] += 7139 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 7140 break; 7141 default: 7142 break; 7143 } 7144 } 7145 7146 /*16 register reads */ 7147 for (i = 0; i < 16; i++) { 7148 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 7149 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 7150 if ((hw->mac.type == ixgbe_mac_82599EB) || 7151 (hw->mac.type == ixgbe_mac_X540) || 7152 (hw->mac.type == ixgbe_mac_X550) || 7153 (hw->mac.type == ixgbe_mac_X550EM_x) || 7154 (hw->mac.type == ixgbe_mac_x550em_a)) { 7155 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 7156 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ 7157 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 7158 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */ 7159 } 7160 } 7161 7162 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 7163 /* work around hardware counting issue */ 7164 hwstats->gprc -= missed_rx; 7165 7166 ixgbe_update_xoff_received(adapter); 7167 7168 /* 82598 hardware only has a 32 bit counter in the high register */ 7169 switch (hw->mac.type) { 7170 case ixgbe_mac_82598EB: 7171 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 7172 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 7173 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 7174 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 7175 break; 7176 case ixgbe_mac_X540: 7177 case ixgbe_mac_X550: 7178 case ixgbe_mac_X550EM_x: 7179 case ixgbe_mac_x550em_a: 7180 /* OS2BMC stats are X540 and later */ 7181 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); 7182 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); 7183 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); 7184 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); 7185 fallthrough; 7186 case ixgbe_mac_82599EB: 7187 for (i = 0; i < 16; i++) 7188 adapter->hw_rx_no_dma_resources += 7189 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 7190 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 7191 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ 7192 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 7193 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ 7194 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); 7195 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 7196 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 7197 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 7198 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 7199 #ifdef IXGBE_FCOE 7200 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 7201 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 7202 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 7203 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 7204 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 7205 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 7206 /* Add up per cpu counters for total ddp aloc fail */ 7207 if (adapter->fcoe.ddp_pool) { 7208 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 7209 struct ixgbe_fcoe_ddp_pool *ddp_pool; 7210 unsigned int cpu; 7211 u64 noddp = 0, noddp_ext_buff = 0; 7212 for_each_possible_cpu(cpu) { 7213 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); 7214 noddp += ddp_pool->noddp; 7215 noddp_ext_buff += ddp_pool->noddp_ext_buff; 7216 } 7217 hwstats->fcoe_noddp = noddp; 7218 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff; 7219 } 7220 #endif /* IXGBE_FCOE */ 7221 break; 7222 default: 7223 break; 7224 } 7225 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 7226 hwstats->bprc += bprc; 7227 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 7228 if (hw->mac.type == ixgbe_mac_82598EB) 7229 hwstats->mprc -= bprc; 7230 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 7231 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 7232 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 7233 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 7234 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 7235 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 7236 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 7237 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 7238 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 7239 hwstats->lxontxc += lxon; 7240 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 7241 hwstats->lxofftxc += lxoff; 7242 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 7243 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 7244 /* 7245 * 82598 errata - tx of flow control packets is included in tx counters 7246 */ 7247 xon_off_tot = lxon + lxoff; 7248 hwstats->gptc -= xon_off_tot; 7249 hwstats->mptc -= xon_off_tot; 7250 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); 7251 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 7252 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 7253 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 7254 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 7255 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 7256 hwstats->ptc64 -= xon_off_tot; 7257 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 7258 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 7259 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 7260 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 7261 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 7262 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 7263 7264 /* Fill out the OS statistics structure */ 7265 netdev->stats.multicast = hwstats->mprc; 7266 7267 /* Rx Errors */ 7268 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; 7269 netdev->stats.rx_dropped = 0; 7270 netdev->stats.rx_length_errors = hwstats->rlec; 7271 netdev->stats.rx_crc_errors = hwstats->crcerrs; 7272 netdev->stats.rx_missed_errors = total_mpc; 7273 } 7274 7275 /** 7276 * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table 7277 * @adapter: pointer to the device adapter structure 7278 **/ 7279 static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) 7280 { 7281 struct ixgbe_hw *hw = &adapter->hw; 7282 int i; 7283 7284 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) 7285 return; 7286 7287 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; 7288 7289 /* if interface is down do nothing */ 7290 if (test_bit(__IXGBE_DOWN, &adapter->state)) 7291 return; 7292 7293 /* do nothing if we are not using signature filters */ 7294 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) 7295 return; 7296 7297 adapter->fdir_overflow++; 7298 7299 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 7300 for (i = 0; i < adapter->num_tx_queues; i++) 7301 set_bit(__IXGBE_TX_FDIR_INIT_DONE, 7302 &(adapter->tx_ring[i]->state)); 7303 for (i = 0; i < adapter->num_xdp_queues; i++) 7304 set_bit(__IXGBE_TX_FDIR_INIT_DONE, 7305 &adapter->xdp_ring[i]->state); 7306 /* re-enable flow director interrupts */ 7307 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); 7308 } else { 7309 e_err(probe, "failed to finish FDIR re-initialization, " 7310 "ignored adding FDIR ATR filters\n"); 7311 } 7312 } 7313 7314 /** 7315 * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts 7316 * @adapter: pointer to the device adapter structure 7317 * 7318 * This function serves two purposes. First it strobes the interrupt lines 7319 * in order to make certain interrupts are occurring. Secondly it sets the 7320 * bits needed to check for TX hangs. As a result we should immediately 7321 * determine if a hang has occurred. 7322 */ 7323 static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) 7324 { 7325 struct ixgbe_hw *hw = &adapter->hw; 7326 u64 eics = 0; 7327 int i; 7328 7329 /* If we're down, removing or resetting, just bail */ 7330 if (test_bit(__IXGBE_DOWN, &adapter->state) || 7331 test_bit(__IXGBE_REMOVING, &adapter->state) || 7332 test_bit(__IXGBE_RESETTING, &adapter->state)) 7333 return; 7334 7335 /* Force detection of hung controller */ 7336 if (netif_carrier_ok(adapter->netdev)) { 7337 for (i = 0; i < adapter->num_tx_queues; i++) 7338 set_check_for_tx_hang(adapter->tx_ring[i]); 7339 for (i = 0; i < adapter->num_xdp_queues; i++) 7340 set_check_for_tx_hang(adapter->xdp_ring[i]); 7341 } 7342 7343 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { 7344 /* 7345 * for legacy and MSI interrupts don't set any bits 7346 * that are enabled for EIAM, because this operation 7347 * would set *both* EIMS and EICS for any bit in EIAM 7348 */ 7349 IXGBE_WRITE_REG(hw, IXGBE_EICS, 7350 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); 7351 } else { 7352 /* get one bit for every active tx/rx interrupt vector */ 7353 for (i = 0; i < adapter->num_q_vectors; i++) { 7354 struct ixgbe_q_vector *qv = adapter->q_vector[i]; 7355 if (qv->rx.ring || qv->tx.ring) 7356 eics |= BIT_ULL(i); 7357 } 7358 } 7359 7360 /* Cause software interrupt to ensure rings are cleaned */ 7361 ixgbe_irq_rearm_queues(adapter, eics); 7362 } 7363 7364 /** 7365 * ixgbe_watchdog_update_link - update the link status 7366 * @adapter: pointer to the device adapter structure 7367 **/ 7368 static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) 7369 { 7370 struct ixgbe_hw *hw = &adapter->hw; 7371 u32 link_speed = adapter->link_speed; 7372 bool link_up = adapter->link_up; 7373 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; 7374 7375 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) 7376 return; 7377 7378 if (hw->mac.ops.check_link) { 7379 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 7380 } else { 7381 /* always assume link is up, if no check link function */ 7382 link_speed = IXGBE_LINK_SPEED_10GB_FULL; 7383 link_up = true; 7384 } 7385 7386 if (adapter->ixgbe_ieee_pfc) 7387 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); 7388 7389 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) { 7390 hw->mac.ops.fc_enable(hw); 7391 ixgbe_set_rx_drop_en(adapter); 7392 } 7393 7394 if (link_up || 7395 time_after(jiffies, (adapter->link_check_timeout + 7396 IXGBE_TRY_LINK_TIMEOUT))) { 7397 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 7398 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); 7399 IXGBE_WRITE_FLUSH(hw); 7400 } 7401 7402 adapter->link_up = link_up; 7403 adapter->link_speed = link_speed; 7404 } 7405 7406 static void ixgbe_update_default_up(struct ixgbe_adapter *adapter) 7407 { 7408 #ifdef CONFIG_IXGBE_DCB 7409 struct net_device *netdev = adapter->netdev; 7410 struct dcb_app app = { 7411 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE, 7412 .protocol = 0, 7413 }; 7414 u8 up = 0; 7415 7416 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) 7417 up = dcb_ieee_getapp_mask(netdev, &app); 7418 7419 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0; 7420 #endif 7421 } 7422 7423 /** 7424 * ixgbe_watchdog_link_is_up - update netif_carrier status and 7425 * print link up message 7426 * @adapter: pointer to the device adapter structure 7427 **/ 7428 static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) 7429 { 7430 struct net_device *netdev = adapter->netdev; 7431 struct ixgbe_hw *hw = &adapter->hw; 7432 u32 link_speed = adapter->link_speed; 7433 const char *speed_str; 7434 bool flow_rx, flow_tx; 7435 7436 /* only continue if link was previously down */ 7437 if (netif_carrier_ok(netdev)) 7438 return; 7439 7440 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; 7441 7442 switch (hw->mac.type) { 7443 case ixgbe_mac_82598EB: { 7444 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 7445 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); 7446 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); 7447 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); 7448 } 7449 break; 7450 case ixgbe_mac_X540: 7451 case ixgbe_mac_X550: 7452 case ixgbe_mac_X550EM_x: 7453 case ixgbe_mac_x550em_a: 7454 case ixgbe_mac_82599EB: { 7455 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 7456 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 7457 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); 7458 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); 7459 } 7460 break; 7461 default: 7462 flow_tx = false; 7463 flow_rx = false; 7464 break; 7465 } 7466 7467 adapter->last_rx_ptp_check = jiffies; 7468 7469 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) 7470 ixgbe_ptp_start_cyclecounter(adapter); 7471 7472 switch (link_speed) { 7473 case IXGBE_LINK_SPEED_10GB_FULL: 7474 speed_str = "10 Gbps"; 7475 break; 7476 case IXGBE_LINK_SPEED_5GB_FULL: 7477 speed_str = "5 Gbps"; 7478 break; 7479 case IXGBE_LINK_SPEED_2_5GB_FULL: 7480 speed_str = "2.5 Gbps"; 7481 break; 7482 case IXGBE_LINK_SPEED_1GB_FULL: 7483 speed_str = "1 Gbps"; 7484 break; 7485 case IXGBE_LINK_SPEED_100_FULL: 7486 speed_str = "100 Mbps"; 7487 break; 7488 case IXGBE_LINK_SPEED_10_FULL: 7489 speed_str = "10 Mbps"; 7490 break; 7491 default: 7492 speed_str = "unknown speed"; 7493 break; 7494 } 7495 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str, 7496 ((flow_rx && flow_tx) ? "RX/TX" : 7497 (flow_rx ? "RX" : 7498 (flow_tx ? "TX" : "None")))); 7499 7500 netif_carrier_on(netdev); 7501 ixgbe_check_vf_rate_limit(adapter); 7502 7503 /* enable transmits */ 7504 netif_tx_wake_all_queues(adapter->netdev); 7505 7506 /* update the default user priority for VFs */ 7507 ixgbe_update_default_up(adapter); 7508 7509 /* ping all the active vfs to let them know link has changed */ 7510 ixgbe_ping_all_vfs(adapter); 7511 } 7512 7513 /** 7514 * ixgbe_watchdog_link_is_down - update netif_carrier status and 7515 * print link down message 7516 * @adapter: pointer to the adapter structure 7517 **/ 7518 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) 7519 { 7520 struct net_device *netdev = adapter->netdev; 7521 struct ixgbe_hw *hw = &adapter->hw; 7522 7523 adapter->link_up = false; 7524 adapter->link_speed = 0; 7525 7526 /* only continue if link was up previously */ 7527 if (!netif_carrier_ok(netdev)) 7528 return; 7529 7530 /* poll for SFP+ cable when link is down */ 7531 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) 7532 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; 7533 7534 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) 7535 ixgbe_ptp_start_cyclecounter(adapter); 7536 7537 e_info(drv, "NIC Link is Down\n"); 7538 netif_carrier_off(netdev); 7539 7540 /* ping all the active vfs to let them know link has changed */ 7541 ixgbe_ping_all_vfs(adapter); 7542 } 7543 7544 static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter) 7545 { 7546 int i; 7547 7548 for (i = 0; i < adapter->num_tx_queues; i++) { 7549 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; 7550 7551 if (tx_ring->next_to_use != tx_ring->next_to_clean) 7552 return true; 7553 } 7554 7555 for (i = 0; i < adapter->num_xdp_queues; i++) { 7556 struct ixgbe_ring *ring = adapter->xdp_ring[i]; 7557 7558 if (ring->next_to_use != ring->next_to_clean) 7559 return true; 7560 } 7561 7562 return false; 7563 } 7564 7565 static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter) 7566 { 7567 struct ixgbe_hw *hw = &adapter->hw; 7568 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 7569 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 7570 7571 int i, j; 7572 7573 if (!adapter->num_vfs) 7574 return false; 7575 7576 /* resetting the PF is only needed for MAC before X550 */ 7577 if (hw->mac.type >= ixgbe_mac_X550) 7578 return false; 7579 7580 for (i = 0; i < adapter->num_vfs; i++) { 7581 for (j = 0; j < q_per_pool; j++) { 7582 u32 h, t; 7583 7584 h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j)); 7585 t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j)); 7586 7587 if (h != t) 7588 return true; 7589 } 7590 } 7591 7592 return false; 7593 } 7594 7595 /** 7596 * ixgbe_watchdog_flush_tx - flush queues on link down 7597 * @adapter: pointer to the device adapter structure 7598 **/ 7599 static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) 7600 { 7601 if (!netif_carrier_ok(adapter->netdev)) { 7602 if (ixgbe_ring_tx_pending(adapter) || 7603 ixgbe_vf_tx_pending(adapter)) { 7604 /* We've lost link, so the controller stops DMA, 7605 * but we've got queued Tx work that's never going 7606 * to get done, so reset controller to flush Tx. 7607 * (Do the reset outside of interrupt context). 7608 */ 7609 e_warn(drv, "initiating reset to clear Tx work after link loss\n"); 7610 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); 7611 } 7612 } 7613 } 7614 7615 #ifdef CONFIG_PCI_IOV 7616 static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) 7617 { 7618 struct ixgbe_hw *hw = &adapter->hw; 7619 struct pci_dev *pdev = adapter->pdev; 7620 unsigned int vf; 7621 u32 gpc; 7622 7623 if (!(netif_carrier_ok(adapter->netdev))) 7624 return; 7625 7626 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); 7627 if (gpc) /* If incrementing then no need for the check below */ 7628 return; 7629 /* Check to see if a bad DMA write target from an errant or 7630 * malicious VF has caused a PCIe error. If so then we can 7631 * issue a VFLR to the offending VF(s) and then resume without 7632 * requesting a full slot reset. 7633 */ 7634 7635 if (!pdev) 7636 return; 7637 7638 /* check status reg for all VFs owned by this PF */ 7639 for (vf = 0; vf < adapter->num_vfs; ++vf) { 7640 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; 7641 u16 status_reg; 7642 7643 if (!vfdev) 7644 continue; 7645 pci_read_config_word(vfdev, PCI_STATUS, &status_reg); 7646 if (status_reg != IXGBE_FAILED_READ_CFG_WORD && 7647 status_reg & PCI_STATUS_REC_MASTER_ABORT) 7648 pcie_flr(vfdev); 7649 } 7650 } 7651 7652 static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) 7653 { 7654 u32 ssvpc; 7655 7656 /* Do not perform spoof check for 82598 or if not in IOV mode */ 7657 if (adapter->hw.mac.type == ixgbe_mac_82598EB || 7658 adapter->num_vfs == 0) 7659 return; 7660 7661 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); 7662 7663 /* 7664 * ssvpc register is cleared on read, if zero then no 7665 * spoofed packets in the last interval. 7666 */ 7667 if (!ssvpc) 7668 return; 7669 7670 e_warn(drv, "%u Spoofed packets detected\n", ssvpc); 7671 } 7672 #else 7673 static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter) 7674 { 7675 } 7676 7677 static void 7678 ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter) 7679 { 7680 } 7681 #endif /* CONFIG_PCI_IOV */ 7682 7683 7684 /** 7685 * ixgbe_watchdog_subtask - check and bring link up 7686 * @adapter: pointer to the device adapter structure 7687 **/ 7688 static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) 7689 { 7690 /* if interface is down, removing or resetting, do nothing */ 7691 if (test_bit(__IXGBE_DOWN, &adapter->state) || 7692 test_bit(__IXGBE_REMOVING, &adapter->state) || 7693 test_bit(__IXGBE_RESETTING, &adapter->state)) 7694 return; 7695 7696 ixgbe_watchdog_update_link(adapter); 7697 7698 if (adapter->link_up) 7699 ixgbe_watchdog_link_is_up(adapter); 7700 else 7701 ixgbe_watchdog_link_is_down(adapter); 7702 7703 ixgbe_check_for_bad_vf(adapter); 7704 ixgbe_spoof_check(adapter); 7705 ixgbe_update_stats(adapter); 7706 7707 ixgbe_watchdog_flush_tx(adapter); 7708 } 7709 7710 /** 7711 * ixgbe_sfp_detection_subtask - poll for SFP+ cable 7712 * @adapter: the ixgbe adapter structure 7713 **/ 7714 static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) 7715 { 7716 struct ixgbe_hw *hw = &adapter->hw; 7717 s32 err; 7718 7719 /* not searching for SFP so there is nothing to do here */ 7720 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && 7721 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) 7722 return; 7723 7724 if (adapter->sfp_poll_time && 7725 time_after(adapter->sfp_poll_time, jiffies)) 7726 return; /* If not yet time to poll for SFP */ 7727 7728 /* someone else is in init, wait until next service event */ 7729 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) 7730 return; 7731 7732 adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1; 7733 7734 err = hw->phy.ops.identify_sfp(hw); 7735 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) 7736 goto sfp_out; 7737 7738 if (err == IXGBE_ERR_SFP_NOT_PRESENT) { 7739 /* If no cable is present, then we need to reset 7740 * the next time we find a good cable. */ 7741 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; 7742 } 7743 7744 /* exit on error */ 7745 if (err) 7746 goto sfp_out; 7747 7748 /* exit if reset not needed */ 7749 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) 7750 goto sfp_out; 7751 7752 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET; 7753 7754 /* 7755 * A module may be identified correctly, but the EEPROM may not have 7756 * support for that module. setup_sfp() will fail in that case, so 7757 * we should not allow that module to load. 7758 */ 7759 if (hw->mac.type == ixgbe_mac_82598EB) 7760 err = hw->phy.ops.reset(hw); 7761 else 7762 err = hw->mac.ops.setup_sfp(hw); 7763 7764 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) 7765 goto sfp_out; 7766 7767 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; 7768 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); 7769 7770 sfp_out: 7771 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); 7772 7773 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && 7774 (adapter->netdev->reg_state == NETREG_REGISTERED)) { 7775 e_dev_err("failed to initialize because an unsupported " 7776 "SFP+ module type was detected.\n"); 7777 e_dev_err("Reload the driver after installing a " 7778 "supported module.\n"); 7779 unregister_netdev(adapter->netdev); 7780 } 7781 } 7782 7783 /** 7784 * ixgbe_sfp_link_config_subtask - set up link SFP after module install 7785 * @adapter: the ixgbe adapter structure 7786 **/ 7787 static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) 7788 { 7789 struct ixgbe_hw *hw = &adapter->hw; 7790 u32 cap_speed; 7791 u32 speed; 7792 bool autoneg = false; 7793 7794 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) 7795 return; 7796 7797 /* someone else is in init, wait until next service event */ 7798 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) 7799 return; 7800 7801 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 7802 7803 hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg); 7804 7805 /* advertise highest capable link speed */ 7806 if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL)) 7807 speed = IXGBE_LINK_SPEED_10GB_FULL; 7808 else 7809 speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL | 7810 IXGBE_LINK_SPEED_1GB_FULL); 7811 7812 if (hw->mac.ops.setup_link) 7813 hw->mac.ops.setup_link(hw, speed, true); 7814 7815 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 7816 adapter->link_check_timeout = jiffies; 7817 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); 7818 } 7819 7820 /** 7821 * ixgbe_service_timer - Timer Call-back 7822 * @t: pointer to timer_list structure 7823 **/ 7824 static void ixgbe_service_timer(struct timer_list *t) 7825 { 7826 struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer); 7827 unsigned long next_event_offset; 7828 7829 /* poll faster when waiting for link */ 7830 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) 7831 next_event_offset = HZ / 10; 7832 else 7833 next_event_offset = HZ * 2; 7834 7835 /* Reset the timer */ 7836 mod_timer(&adapter->service_timer, next_event_offset + jiffies); 7837 7838 ixgbe_service_event_schedule(adapter); 7839 } 7840 7841 static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter) 7842 { 7843 struct ixgbe_hw *hw = &adapter->hw; 7844 u32 status; 7845 7846 if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT)) 7847 return; 7848 7849 adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT; 7850 7851 if (!hw->phy.ops.handle_lasi) 7852 return; 7853 7854 status = hw->phy.ops.handle_lasi(&adapter->hw); 7855 if (status != IXGBE_ERR_OVERTEMP) 7856 return; 7857 7858 e_crit(drv, "%s\n", ixgbe_overheat_msg); 7859 } 7860 7861 static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) 7862 { 7863 if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state)) 7864 return; 7865 7866 rtnl_lock(); 7867 /* If we're already down, removing or resetting, just bail */ 7868 if (test_bit(__IXGBE_DOWN, &adapter->state) || 7869 test_bit(__IXGBE_REMOVING, &adapter->state) || 7870 test_bit(__IXGBE_RESETTING, &adapter->state)) { 7871 rtnl_unlock(); 7872 return; 7873 } 7874 7875 ixgbe_dump(adapter); 7876 netdev_err(adapter->netdev, "Reset adapter\n"); 7877 adapter->tx_timeout_count++; 7878 7879 ixgbe_reinit_locked(adapter); 7880 rtnl_unlock(); 7881 } 7882 7883 /** 7884 * ixgbe_check_fw_error - Check firmware for errors 7885 * @adapter: the adapter private structure 7886 * 7887 * Check firmware errors in register FWSM 7888 */ 7889 static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter) 7890 { 7891 struct ixgbe_hw *hw = &adapter->hw; 7892 u32 fwsm; 7893 7894 /* read fwsm.ext_err_ind register and log errors */ 7895 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); 7896 7897 if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK || 7898 !(fwsm & IXGBE_FWSM_FW_VAL_BIT)) 7899 e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n", 7900 fwsm); 7901 7902 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { 7903 e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); 7904 return true; 7905 } 7906 7907 return false; 7908 } 7909 7910 /** 7911 * ixgbe_service_task - manages and runs subtasks 7912 * @work: pointer to work_struct containing our data 7913 **/ 7914 static void ixgbe_service_task(struct work_struct *work) 7915 { 7916 struct ixgbe_adapter *adapter = container_of(work, 7917 struct ixgbe_adapter, 7918 service_task); 7919 if (ixgbe_removed(adapter->hw.hw_addr)) { 7920 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 7921 rtnl_lock(); 7922 ixgbe_down(adapter); 7923 rtnl_unlock(); 7924 } 7925 ixgbe_service_event_complete(adapter); 7926 return; 7927 } 7928 if (ixgbe_check_fw_error(adapter)) { 7929 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 7930 unregister_netdev(adapter->netdev); 7931 ixgbe_service_event_complete(adapter); 7932 return; 7933 } 7934 ixgbe_reset_subtask(adapter); 7935 ixgbe_phy_interrupt_subtask(adapter); 7936 ixgbe_sfp_detection_subtask(adapter); 7937 ixgbe_sfp_link_config_subtask(adapter); 7938 ixgbe_check_overtemp_subtask(adapter); 7939 ixgbe_watchdog_subtask(adapter); 7940 ixgbe_fdir_reinit_subtask(adapter); 7941 ixgbe_check_hang_subtask(adapter); 7942 7943 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { 7944 ixgbe_ptp_overflow_check(adapter); 7945 if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER) 7946 ixgbe_ptp_rx_hang(adapter); 7947 ixgbe_ptp_tx_hang(adapter); 7948 } 7949 7950 ixgbe_service_event_complete(adapter); 7951 } 7952 7953 static int ixgbe_tso(struct ixgbe_ring *tx_ring, 7954 struct ixgbe_tx_buffer *first, 7955 u8 *hdr_len, 7956 struct ixgbe_ipsec_tx_data *itd) 7957 { 7958 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; 7959 struct sk_buff *skb = first->skb; 7960 union { 7961 struct iphdr *v4; 7962 struct ipv6hdr *v6; 7963 unsigned char *hdr; 7964 } ip; 7965 union { 7966 struct tcphdr *tcp; 7967 struct udphdr *udp; 7968 unsigned char *hdr; 7969 } l4; 7970 u32 paylen, l4_offset; 7971 u32 fceof_saidx = 0; 7972 int err; 7973 7974 if (skb->ip_summed != CHECKSUM_PARTIAL) 7975 return 0; 7976 7977 if (!skb_is_gso(skb)) 7978 return 0; 7979 7980 err = skb_cow_head(skb, 0); 7981 if (err < 0) 7982 return err; 7983 7984 if (eth_p_mpls(first->protocol)) 7985 ip.hdr = skb_inner_network_header(skb); 7986 else 7987 ip.hdr = skb_network_header(skb); 7988 l4.hdr = skb_checksum_start(skb); 7989 7990 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 7991 type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? 7992 IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP; 7993 7994 /* initialize outer IP header fields */ 7995 if (ip.v4->version == 4) { 7996 unsigned char *csum_start = skb_checksum_start(skb); 7997 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); 7998 int len = csum_start - trans_start; 7999 8000 /* IP header will have to cancel out any data that 8001 * is not a part of the outer IP header, so set to 8002 * a reverse csum if needed, else init check to 0. 8003 */ 8004 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? 8005 csum_fold(csum_partial(trans_start, 8006 len, 0)) : 0; 8007 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 8008 8009 ip.v4->tot_len = 0; 8010 first->tx_flags |= IXGBE_TX_FLAGS_TSO | 8011 IXGBE_TX_FLAGS_CSUM | 8012 IXGBE_TX_FLAGS_IPV4; 8013 } else { 8014 ip.v6->payload_len = 0; 8015 first->tx_flags |= IXGBE_TX_FLAGS_TSO | 8016 IXGBE_TX_FLAGS_CSUM; 8017 } 8018 8019 /* determine offset of inner transport header */ 8020 l4_offset = l4.hdr - skb->data; 8021 8022 /* remove payload length from inner checksum */ 8023 paylen = skb->len - l4_offset; 8024 8025 if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) { 8026 /* compute length of segmentation header */ 8027 *hdr_len = (l4.tcp->doff * 4) + l4_offset; 8028 csum_replace_by_diff(&l4.tcp->check, 8029 (__force __wsum)htonl(paylen)); 8030 } else { 8031 /* compute length of segmentation header */ 8032 *hdr_len = sizeof(*l4.udp) + l4_offset; 8033 csum_replace_by_diff(&l4.udp->check, 8034 (__force __wsum)htonl(paylen)); 8035 } 8036 8037 /* update gso size and bytecount with header size */ 8038 first->gso_segs = skb_shinfo(skb)->gso_segs; 8039 first->bytecount += (first->gso_segs - 1) * *hdr_len; 8040 8041 /* mss_l4len_id: use 0 as index for TSO */ 8042 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; 8043 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 8044 8045 fceof_saidx |= itd->sa_idx; 8046 type_tucmd |= itd->flags | itd->trailer_len; 8047 8048 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 8049 vlan_macip_lens = l4.hdr - ip.hdr; 8050 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; 8051 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 8052 8053 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 8054 mss_l4len_idx); 8055 8056 return 1; 8057 } 8058 8059 static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, 8060 struct ixgbe_tx_buffer *first, 8061 struct ixgbe_ipsec_tx_data *itd) 8062 { 8063 struct sk_buff *skb = first->skb; 8064 u32 vlan_macip_lens = 0; 8065 u32 fceof_saidx = 0; 8066 u32 type_tucmd = 0; 8067 8068 if (skb->ip_summed != CHECKSUM_PARTIAL) { 8069 csum_failed: 8070 if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | 8071 IXGBE_TX_FLAGS_CC))) 8072 return; 8073 goto no_csum; 8074 } 8075 8076 switch (skb->csum_offset) { 8077 case offsetof(struct tcphdr, check): 8078 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 8079 fallthrough; 8080 case offsetof(struct udphdr, check): 8081 break; 8082 case offsetof(struct sctphdr, checksum): 8083 /* validate that this is actually an SCTP request */ 8084 if (skb_csum_is_sctp(skb)) { 8085 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; 8086 break; 8087 } 8088 fallthrough; 8089 default: 8090 skb_checksum_help(skb); 8091 goto csum_failed; 8092 } 8093 8094 /* update TX checksum flag */ 8095 first->tx_flags |= IXGBE_TX_FLAGS_CSUM; 8096 vlan_macip_lens = skb_checksum_start_offset(skb) - 8097 skb_network_offset(skb); 8098 no_csum: 8099 /* vlan_macip_lens: MACLEN, VLAN tag */ 8100 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 8101 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 8102 8103 fceof_saidx |= itd->sa_idx; 8104 type_tucmd |= itd->flags | itd->trailer_len; 8105 8106 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0); 8107 } 8108 8109 #define IXGBE_SET_FLAG(_input, _flag, _result) \ 8110 ((_flag <= _result) ? \ 8111 ((u32)(_input & _flag) * (_result / _flag)) : \ 8112 ((u32)(_input & _flag) / (_flag / _result))) 8113 8114 static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) 8115 { 8116 /* set type for advanced descriptor with frame checksum insertion */ 8117 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA | 8118 IXGBE_ADVTXD_DCMD_DEXT | 8119 IXGBE_ADVTXD_DCMD_IFCS; 8120 8121 /* set HW vlan bit if vlan is present */ 8122 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN, 8123 IXGBE_ADVTXD_DCMD_VLE); 8124 8125 /* set segmentation enable bits for TSO/FSO */ 8126 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO, 8127 IXGBE_ADVTXD_DCMD_TSE); 8128 8129 /* set timestamp bit if present */ 8130 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP, 8131 IXGBE_ADVTXD_MAC_TSTAMP); 8132 8133 /* insert frame checksum */ 8134 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS); 8135 8136 return cmd_type; 8137 } 8138 8139 static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, 8140 u32 tx_flags, unsigned int paylen) 8141 { 8142 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; 8143 8144 /* enable L4 checksum for TSO and TX checksum offload */ 8145 olinfo_status |= IXGBE_SET_FLAG(tx_flags, 8146 IXGBE_TX_FLAGS_CSUM, 8147 IXGBE_ADVTXD_POPTS_TXSM); 8148 8149 /* enable IPv4 checksum for TSO */ 8150 olinfo_status |= IXGBE_SET_FLAG(tx_flags, 8151 IXGBE_TX_FLAGS_IPV4, 8152 IXGBE_ADVTXD_POPTS_IXSM); 8153 8154 /* enable IPsec */ 8155 olinfo_status |= IXGBE_SET_FLAG(tx_flags, 8156 IXGBE_TX_FLAGS_IPSEC, 8157 IXGBE_ADVTXD_POPTS_IPSEC); 8158 8159 /* 8160 * Check Context must be set if Tx switch is enabled, which it 8161 * always is for case where virtual functions are running 8162 */ 8163 olinfo_status |= IXGBE_SET_FLAG(tx_flags, 8164 IXGBE_TX_FLAGS_CC, 8165 IXGBE_ADVTXD_CC); 8166 8167 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 8168 } 8169 8170 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) 8171 { 8172 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 8173 8174 /* Herbert's original patch had: 8175 * smp_mb__after_netif_stop_queue(); 8176 * but since that doesn't exist yet, just open code it. 8177 */ 8178 smp_mb(); 8179 8180 /* We need to check again in a case another CPU has just 8181 * made room available. 8182 */ 8183 if (likely(ixgbe_desc_unused(tx_ring) < size)) 8184 return -EBUSY; 8185 8186 /* A reprieve! - use start_queue because it doesn't call schedule */ 8187 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 8188 ++tx_ring->tx_stats.restart_queue; 8189 return 0; 8190 } 8191 8192 static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) 8193 { 8194 if (likely(ixgbe_desc_unused(tx_ring) >= size)) 8195 return 0; 8196 8197 return __ixgbe_maybe_stop_tx(tx_ring, size); 8198 } 8199 8200 static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, 8201 struct ixgbe_tx_buffer *first, 8202 const u8 hdr_len) 8203 { 8204 struct sk_buff *skb = first->skb; 8205 struct ixgbe_tx_buffer *tx_buffer; 8206 union ixgbe_adv_tx_desc *tx_desc; 8207 skb_frag_t *frag; 8208 dma_addr_t dma; 8209 unsigned int data_len, size; 8210 u32 tx_flags = first->tx_flags; 8211 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags); 8212 u16 i = tx_ring->next_to_use; 8213 8214 tx_desc = IXGBE_TX_DESC(tx_ring, i); 8215 8216 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); 8217 8218 size = skb_headlen(skb); 8219 data_len = skb->data_len; 8220 8221 #ifdef IXGBE_FCOE 8222 if (tx_flags & IXGBE_TX_FLAGS_FCOE) { 8223 if (data_len < sizeof(struct fcoe_crc_eof)) { 8224 size -= sizeof(struct fcoe_crc_eof) - data_len; 8225 data_len = 0; 8226 } else { 8227 data_len -= sizeof(struct fcoe_crc_eof); 8228 } 8229 } 8230 8231 #endif 8232 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 8233 8234 tx_buffer = first; 8235 8236 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 8237 if (dma_mapping_error(tx_ring->dev, dma)) 8238 goto dma_error; 8239 8240 /* record length, and DMA address */ 8241 dma_unmap_len_set(tx_buffer, len, size); 8242 dma_unmap_addr_set(tx_buffer, dma, dma); 8243 8244 tx_desc->read.buffer_addr = cpu_to_le64(dma); 8245 8246 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { 8247 tx_desc->read.cmd_type_len = 8248 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD); 8249 8250 i++; 8251 tx_desc++; 8252 if (i == tx_ring->count) { 8253 tx_desc = IXGBE_TX_DESC(tx_ring, 0); 8254 i = 0; 8255 } 8256 tx_desc->read.olinfo_status = 0; 8257 8258 dma += IXGBE_MAX_DATA_PER_TXD; 8259 size -= IXGBE_MAX_DATA_PER_TXD; 8260 8261 tx_desc->read.buffer_addr = cpu_to_le64(dma); 8262 } 8263 8264 if (likely(!data_len)) 8265 break; 8266 8267 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); 8268 8269 i++; 8270 tx_desc++; 8271 if (i == tx_ring->count) { 8272 tx_desc = IXGBE_TX_DESC(tx_ring, 0); 8273 i = 0; 8274 } 8275 tx_desc->read.olinfo_status = 0; 8276 8277 #ifdef IXGBE_FCOE 8278 size = min_t(unsigned int, data_len, skb_frag_size(frag)); 8279 #else 8280 size = skb_frag_size(frag); 8281 #endif 8282 data_len -= size; 8283 8284 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 8285 DMA_TO_DEVICE); 8286 8287 tx_buffer = &tx_ring->tx_buffer_info[i]; 8288 } 8289 8290 /* write last descriptor with RS and EOP bits */ 8291 cmd_type |= size | IXGBE_TXD_CMD; 8292 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 8293 8294 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 8295 8296 /* set the timestamp */ 8297 first->time_stamp = jiffies; 8298 8299 skb_tx_timestamp(skb); 8300 8301 /* 8302 * Force memory writes to complete before letting h/w know there 8303 * are new descriptors to fetch. (Only applicable for weak-ordered 8304 * memory model archs, such as IA-64). 8305 * 8306 * We also need this memory barrier to make certain all of the 8307 * status bits have been updated before next_to_watch is written. 8308 */ 8309 wmb(); 8310 8311 /* set next_to_watch value indicating a packet is present */ 8312 first->next_to_watch = tx_desc; 8313 8314 i++; 8315 if (i == tx_ring->count) 8316 i = 0; 8317 8318 tx_ring->next_to_use = i; 8319 8320 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); 8321 8322 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 8323 writel(i, tx_ring->tail); 8324 } 8325 8326 return 0; 8327 dma_error: 8328 dev_err(tx_ring->dev, "TX DMA map failed\n"); 8329 8330 /* clear dma mappings for failed tx_buffer_info map */ 8331 for (;;) { 8332 tx_buffer = &tx_ring->tx_buffer_info[i]; 8333 if (dma_unmap_len(tx_buffer, len)) 8334 dma_unmap_page(tx_ring->dev, 8335 dma_unmap_addr(tx_buffer, dma), 8336 dma_unmap_len(tx_buffer, len), 8337 DMA_TO_DEVICE); 8338 dma_unmap_len_set(tx_buffer, len, 0); 8339 if (tx_buffer == first) 8340 break; 8341 if (i == 0) 8342 i += tx_ring->count; 8343 i--; 8344 } 8345 8346 dev_kfree_skb_any(first->skb); 8347 first->skb = NULL; 8348 8349 tx_ring->next_to_use = i; 8350 8351 return -1; 8352 } 8353 8354 static void ixgbe_atr(struct ixgbe_ring *ring, 8355 struct ixgbe_tx_buffer *first) 8356 { 8357 struct ixgbe_q_vector *q_vector = ring->q_vector; 8358 union ixgbe_atr_hash_dword input = { .dword = 0 }; 8359 union ixgbe_atr_hash_dword common = { .dword = 0 }; 8360 union { 8361 unsigned char *network; 8362 struct iphdr *ipv4; 8363 struct ipv6hdr *ipv6; 8364 } hdr; 8365 struct tcphdr *th; 8366 unsigned int hlen; 8367 struct sk_buff *skb; 8368 __be16 vlan_id; 8369 int l4_proto; 8370 8371 /* if ring doesn't have a interrupt vector, cannot perform ATR */ 8372 if (!q_vector) 8373 return; 8374 8375 /* do nothing if sampling is disabled */ 8376 if (!ring->atr_sample_rate) 8377 return; 8378 8379 ring->atr_count++; 8380 8381 /* currently only IPv4/IPv6 with TCP is supported */ 8382 if ((first->protocol != htons(ETH_P_IP)) && 8383 (first->protocol != htons(ETH_P_IPV6))) 8384 return; 8385 8386 /* snag network header to get L4 type and address */ 8387 skb = first->skb; 8388 hdr.network = skb_network_header(skb); 8389 if (unlikely(hdr.network <= skb->data)) 8390 return; 8391 if (skb->encapsulation && 8392 first->protocol == htons(ETH_P_IP) && 8393 hdr.ipv4->protocol == IPPROTO_UDP) { 8394 struct ixgbe_adapter *adapter = q_vector->adapter; 8395 8396 if (unlikely(skb_tail_pointer(skb) < hdr.network + 8397 VXLAN_HEADROOM)) 8398 return; 8399 8400 /* verify the port is recognized as VXLAN */ 8401 if (adapter->vxlan_port && 8402 udp_hdr(skb)->dest == adapter->vxlan_port) 8403 hdr.network = skb_inner_network_header(skb); 8404 8405 if (adapter->geneve_port && 8406 udp_hdr(skb)->dest == adapter->geneve_port) 8407 hdr.network = skb_inner_network_header(skb); 8408 } 8409 8410 /* Make sure we have at least [minimum IPv4 header + TCP] 8411 * or [IPv6 header] bytes 8412 */ 8413 if (unlikely(skb_tail_pointer(skb) < hdr.network + 40)) 8414 return; 8415 8416 /* Currently only IPv4/IPv6 with TCP is supported */ 8417 switch (hdr.ipv4->version) { 8418 case IPVERSION: 8419 /* access ihl as u8 to avoid unaligned access on ia64 */ 8420 hlen = (hdr.network[0] & 0x0F) << 2; 8421 l4_proto = hdr.ipv4->protocol; 8422 break; 8423 case 6: 8424 hlen = hdr.network - skb->data; 8425 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL); 8426 hlen -= hdr.network - skb->data; 8427 break; 8428 default: 8429 return; 8430 } 8431 8432 if (l4_proto != IPPROTO_TCP) 8433 return; 8434 8435 if (unlikely(skb_tail_pointer(skb) < hdr.network + 8436 hlen + sizeof(struct tcphdr))) 8437 return; 8438 8439 th = (struct tcphdr *)(hdr.network + hlen); 8440 8441 /* skip this packet since the socket is closing */ 8442 if (th->fin) 8443 return; 8444 8445 /* sample on all syn packets or once every atr sample count */ 8446 if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) 8447 return; 8448 8449 /* reset sample count */ 8450 ring->atr_count = 0; 8451 8452 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); 8453 8454 /* 8455 * src and dst are inverted, think how the receiver sees them 8456 * 8457 * The input is broken into two sections, a non-compressed section 8458 * containing vm_pool, vlan_id, and flow_type. The rest of the data 8459 * is XORed together and stored in the compressed dword. 8460 */ 8461 input.formatted.vlan_id = vlan_id; 8462 8463 /* 8464 * since src port and flex bytes occupy the same word XOR them together 8465 * and write the value to source port portion of compressed dword 8466 */ 8467 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) 8468 common.port.src ^= th->dest ^ htons(ETH_P_8021Q); 8469 else 8470 common.port.src ^= th->dest ^ first->protocol; 8471 common.port.dst ^= th->source; 8472 8473 switch (hdr.ipv4->version) { 8474 case IPVERSION: 8475 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 8476 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; 8477 break; 8478 case 6: 8479 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; 8480 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ 8481 hdr.ipv6->saddr.s6_addr32[1] ^ 8482 hdr.ipv6->saddr.s6_addr32[2] ^ 8483 hdr.ipv6->saddr.s6_addr32[3] ^ 8484 hdr.ipv6->daddr.s6_addr32[0] ^ 8485 hdr.ipv6->daddr.s6_addr32[1] ^ 8486 hdr.ipv6->daddr.s6_addr32[2] ^ 8487 hdr.ipv6->daddr.s6_addr32[3]; 8488 break; 8489 default: 8490 break; 8491 } 8492 8493 if (hdr.network != skb_network_header(skb)) 8494 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK; 8495 8496 /* This assumes the Rx queue and Tx queue are bound to the same CPU */ 8497 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, 8498 input, common, ring->queue_index); 8499 } 8500 8501 #ifdef IXGBE_FCOE 8502 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, 8503 struct net_device *sb_dev) 8504 { 8505 struct ixgbe_adapter *adapter; 8506 struct ixgbe_ring_feature *f; 8507 int txq; 8508 8509 if (sb_dev) { 8510 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); 8511 struct net_device *vdev = sb_dev; 8512 8513 txq = vdev->tc_to_txq[tc].offset; 8514 txq += reciprocal_scale(skb_get_hash(skb), 8515 vdev->tc_to_txq[tc].count); 8516 8517 return txq; 8518 } 8519 8520 /* 8521 * only execute the code below if protocol is FCoE 8522 * or FIP and we have FCoE enabled on the adapter 8523 */ 8524 switch (vlan_get_protocol(skb)) { 8525 case htons(ETH_P_FCOE): 8526 case htons(ETH_P_FIP): 8527 adapter = netdev_priv(dev); 8528 8529 if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 8530 break; 8531 fallthrough; 8532 default: 8533 return netdev_pick_tx(dev, skb, sb_dev); 8534 } 8535 8536 f = &adapter->ring_feature[RING_F_FCOE]; 8537 8538 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 8539 smp_processor_id(); 8540 8541 while (txq >= f->indices) 8542 txq -= f->indices; 8543 8544 return txq + f->offset; 8545 } 8546 8547 #endif 8548 int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring, 8549 struct xdp_frame *xdpf) 8550 { 8551 struct ixgbe_tx_buffer *tx_buffer; 8552 union ixgbe_adv_tx_desc *tx_desc; 8553 u32 len, cmd_type; 8554 dma_addr_t dma; 8555 u16 i; 8556 8557 len = xdpf->len; 8558 8559 if (unlikely(!ixgbe_desc_unused(ring))) 8560 return IXGBE_XDP_CONSUMED; 8561 8562 dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE); 8563 if (dma_mapping_error(ring->dev, dma)) 8564 return IXGBE_XDP_CONSUMED; 8565 8566 /* record the location of the first descriptor for this packet */ 8567 tx_buffer = &ring->tx_buffer_info[ring->next_to_use]; 8568 tx_buffer->bytecount = len; 8569 tx_buffer->gso_segs = 1; 8570 tx_buffer->protocol = 0; 8571 8572 i = ring->next_to_use; 8573 tx_desc = IXGBE_TX_DESC(ring, i); 8574 8575 dma_unmap_len_set(tx_buffer, len, len); 8576 dma_unmap_addr_set(tx_buffer, dma, dma); 8577 tx_buffer->xdpf = xdpf; 8578 8579 tx_desc->read.buffer_addr = cpu_to_le64(dma); 8580 8581 /* put descriptor type bits */ 8582 cmd_type = IXGBE_ADVTXD_DTYP_DATA | 8583 IXGBE_ADVTXD_DCMD_DEXT | 8584 IXGBE_ADVTXD_DCMD_IFCS; 8585 cmd_type |= len | IXGBE_TXD_CMD; 8586 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 8587 tx_desc->read.olinfo_status = 8588 cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT); 8589 8590 /* Avoid any potential race with xdp_xmit and cleanup */ 8591 smp_wmb(); 8592 8593 /* set next_to_watch value indicating a packet is present */ 8594 i++; 8595 if (i == ring->count) 8596 i = 0; 8597 8598 tx_buffer->next_to_watch = tx_desc; 8599 ring->next_to_use = i; 8600 8601 return IXGBE_XDP_TX; 8602 } 8603 8604 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, 8605 struct ixgbe_adapter *adapter, 8606 struct ixgbe_ring *tx_ring) 8607 { 8608 struct ixgbe_tx_buffer *first; 8609 int tso; 8610 u32 tx_flags = 0; 8611 unsigned short f; 8612 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 8613 struct ixgbe_ipsec_tx_data ipsec_tx = { 0 }; 8614 __be16 protocol = skb->protocol; 8615 u8 hdr_len = 0; 8616 8617 /* 8618 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 8619 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, 8620 * + 2 desc gap to keep tail from touching head, 8621 * + 1 desc for context descriptor, 8622 * otherwise try next time 8623 */ 8624 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 8625 count += TXD_USE_COUNT(skb_frag_size( 8626 &skb_shinfo(skb)->frags[f])); 8627 8628 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { 8629 tx_ring->tx_stats.tx_busy++; 8630 return NETDEV_TX_BUSY; 8631 } 8632 8633 /* record the location of the first descriptor for this packet */ 8634 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 8635 first->skb = skb; 8636 first->bytecount = skb->len; 8637 first->gso_segs = 1; 8638 8639 /* if we have a HW VLAN tag being added default to the HW one */ 8640 if (skb_vlan_tag_present(skb)) { 8641 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; 8642 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; 8643 /* else if it is a SW VLAN check the next protocol and store the tag */ 8644 } else if (protocol == htons(ETH_P_8021Q)) { 8645 struct vlan_hdr *vhdr, _vhdr; 8646 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); 8647 if (!vhdr) 8648 goto out_drop; 8649 8650 tx_flags |= ntohs(vhdr->h_vlan_TCI) << 8651 IXGBE_TX_FLAGS_VLAN_SHIFT; 8652 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; 8653 } 8654 protocol = vlan_get_protocol(skb); 8655 8656 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 8657 adapter->ptp_clock) { 8658 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && 8659 !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, 8660 &adapter->state)) { 8661 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 8662 tx_flags |= IXGBE_TX_FLAGS_TSTAMP; 8663 8664 /* schedule check for Tx timestamp */ 8665 adapter->ptp_tx_skb = skb_get(skb); 8666 adapter->ptp_tx_start = jiffies; 8667 schedule_work(&adapter->ptp_tx_work); 8668 } else { 8669 adapter->tx_hwtstamp_skipped++; 8670 } 8671 } 8672 8673 #ifdef CONFIG_PCI_IOV 8674 /* 8675 * Use the l2switch_enable flag - would be false if the DMA 8676 * Tx switch had been disabled. 8677 */ 8678 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 8679 tx_flags |= IXGBE_TX_FLAGS_CC; 8680 8681 #endif 8682 /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */ 8683 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && 8684 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) || 8685 (skb->priority != TC_PRIO_CONTROL))) { 8686 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; 8687 tx_flags |= (skb->priority & 0x7) << 8688 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; 8689 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { 8690 struct vlan_ethhdr *vhdr; 8691 8692 if (skb_cow_head(skb, 0)) 8693 goto out_drop; 8694 vhdr = (struct vlan_ethhdr *)skb->data; 8695 vhdr->h_vlan_TCI = htons(tx_flags >> 8696 IXGBE_TX_FLAGS_VLAN_SHIFT); 8697 } else { 8698 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; 8699 } 8700 } 8701 8702 /* record initial flags and protocol */ 8703 first->tx_flags = tx_flags; 8704 first->protocol = protocol; 8705 8706 #ifdef IXGBE_FCOE 8707 /* setup tx offload for FCoE */ 8708 if ((protocol == htons(ETH_P_FCOE)) && 8709 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { 8710 tso = ixgbe_fso(tx_ring, first, &hdr_len); 8711 if (tso < 0) 8712 goto out_drop; 8713 8714 goto xmit_fcoe; 8715 } 8716 8717 #endif /* IXGBE_FCOE */ 8718 8719 #ifdef CONFIG_IXGBE_IPSEC 8720 if (xfrm_offload(skb) && 8721 !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) 8722 goto out_drop; 8723 #endif 8724 tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx); 8725 if (tso < 0) 8726 goto out_drop; 8727 else if (!tso) 8728 ixgbe_tx_csum(tx_ring, first, &ipsec_tx); 8729 8730 /* add the ATR filter if ATR is on */ 8731 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) 8732 ixgbe_atr(tx_ring, first); 8733 8734 #ifdef IXGBE_FCOE 8735 xmit_fcoe: 8736 #endif /* IXGBE_FCOE */ 8737 if (ixgbe_tx_map(tx_ring, first, hdr_len)) 8738 goto cleanup_tx_timestamp; 8739 8740 return NETDEV_TX_OK; 8741 8742 out_drop: 8743 dev_kfree_skb_any(first->skb); 8744 first->skb = NULL; 8745 cleanup_tx_timestamp: 8746 if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) { 8747 dev_kfree_skb_any(adapter->ptp_tx_skb); 8748 adapter->ptp_tx_skb = NULL; 8749 cancel_work_sync(&adapter->ptp_tx_work); 8750 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); 8751 } 8752 8753 return NETDEV_TX_OK; 8754 } 8755 8756 static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, 8757 struct net_device *netdev, 8758 struct ixgbe_ring *ring) 8759 { 8760 struct ixgbe_adapter *adapter = netdev_priv(netdev); 8761 struct ixgbe_ring *tx_ring; 8762 8763 /* 8764 * The minimum packet size for olinfo paylen is 17 so pad the skb 8765 * in order to meet this minimum size requirement. 8766 */ 8767 if (skb_put_padto(skb, 17)) 8768 return NETDEV_TX_OK; 8769 8770 tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)]; 8771 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state))) 8772 return NETDEV_TX_BUSY; 8773 8774 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); 8775 } 8776 8777 static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, 8778 struct net_device *netdev) 8779 { 8780 return __ixgbe_xmit_frame(skb, netdev, NULL); 8781 } 8782 8783 /** 8784 * ixgbe_set_mac - Change the Ethernet Address of the NIC 8785 * @netdev: network interface device structure 8786 * @p: pointer to an address structure 8787 * 8788 * Returns 0 on success, negative on failure 8789 **/ 8790 static int ixgbe_set_mac(struct net_device *netdev, void *p) 8791 { 8792 struct ixgbe_adapter *adapter = netdev_priv(netdev); 8793 struct ixgbe_hw *hw = &adapter->hw; 8794 struct sockaddr *addr = p; 8795 8796 if (!is_valid_ether_addr(addr->sa_data)) 8797 return -EADDRNOTAVAIL; 8798 8799 eth_hw_addr_set(netdev, addr->sa_data); 8800 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 8801 8802 ixgbe_mac_set_default_filter(adapter); 8803 8804 return 0; 8805 } 8806 8807 static int 8808 ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) 8809 { 8810 struct ixgbe_adapter *adapter = netdev_priv(netdev); 8811 struct ixgbe_hw *hw = &adapter->hw; 8812 u16 value; 8813 int rc; 8814 8815 if (adapter->mii_bus) { 8816 int regnum = addr; 8817 8818 if (devad != MDIO_DEVAD_NONE) 8819 regnum |= (devad << 16) | MII_ADDR_C45; 8820 8821 return mdiobus_read(adapter->mii_bus, prtad, regnum); 8822 } 8823 8824 if (prtad != hw->phy.mdio.prtad) 8825 return -EINVAL; 8826 rc = hw->phy.ops.read_reg(hw, addr, devad, &value); 8827 if (!rc) 8828 rc = value; 8829 return rc; 8830 } 8831 8832 static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, 8833 u16 addr, u16 value) 8834 { 8835 struct ixgbe_adapter *adapter = netdev_priv(netdev); 8836 struct ixgbe_hw *hw = &adapter->hw; 8837 8838 if (adapter->mii_bus) { 8839 int regnum = addr; 8840 8841 if (devad != MDIO_DEVAD_NONE) 8842 regnum |= (devad << 16) | MII_ADDR_C45; 8843 8844 return mdiobus_write(adapter->mii_bus, prtad, regnum, value); 8845 } 8846 8847 if (prtad != hw->phy.mdio.prtad) 8848 return -EINVAL; 8849 return hw->phy.ops.write_reg(hw, addr, devad, value); 8850 } 8851 8852 static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) 8853 { 8854 struct ixgbe_adapter *adapter = netdev_priv(netdev); 8855 8856 switch (cmd) { 8857 case SIOCSHWTSTAMP: 8858 return ixgbe_ptp_set_ts_config(adapter, req); 8859 case SIOCGHWTSTAMP: 8860 return ixgbe_ptp_get_ts_config(adapter, req); 8861 case SIOCGMIIPHY: 8862 if (!adapter->hw.phy.ops.read_reg) 8863 return -EOPNOTSUPP; 8864 fallthrough; 8865 default: 8866 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); 8867 } 8868 } 8869 8870 /** 8871 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding 8872 * netdev->dev_addrs 8873 * @dev: network interface device structure 8874 * 8875 * Returns non-zero on failure 8876 **/ 8877 static int ixgbe_add_sanmac_netdev(struct net_device *dev) 8878 { 8879 int err = 0; 8880 struct ixgbe_adapter *adapter = netdev_priv(dev); 8881 struct ixgbe_hw *hw = &adapter->hw; 8882 8883 if (is_valid_ether_addr(hw->mac.san_addr)) { 8884 rtnl_lock(); 8885 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN); 8886 rtnl_unlock(); 8887 8888 /* update SAN MAC vmdq pool selection */ 8889 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); 8890 } 8891 return err; 8892 } 8893 8894 /** 8895 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding 8896 * netdev->dev_addrs 8897 * @dev: network interface device structure 8898 * 8899 * Returns non-zero on failure 8900 **/ 8901 static int ixgbe_del_sanmac_netdev(struct net_device *dev) 8902 { 8903 int err = 0; 8904 struct ixgbe_adapter *adapter = netdev_priv(dev); 8905 struct ixgbe_mac_info *mac = &adapter->hw.mac; 8906 8907 if (is_valid_ether_addr(mac->san_addr)) { 8908 rtnl_lock(); 8909 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); 8910 rtnl_unlock(); 8911 } 8912 return err; 8913 } 8914 8915 static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, 8916 struct ixgbe_ring *ring) 8917 { 8918 u64 bytes, packets; 8919 unsigned int start; 8920 8921 if (ring) { 8922 do { 8923 start = u64_stats_fetch_begin_irq(&ring->syncp); 8924 packets = ring->stats.packets; 8925 bytes = ring->stats.bytes; 8926 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 8927 stats->tx_packets += packets; 8928 stats->tx_bytes += bytes; 8929 } 8930 } 8931 8932 static void ixgbe_get_stats64(struct net_device *netdev, 8933 struct rtnl_link_stats64 *stats) 8934 { 8935 struct ixgbe_adapter *adapter = netdev_priv(netdev); 8936 int i; 8937 8938 rcu_read_lock(); 8939 for (i = 0; i < adapter->num_rx_queues; i++) { 8940 struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); 8941 u64 bytes, packets; 8942 unsigned int start; 8943 8944 if (ring) { 8945 do { 8946 start = u64_stats_fetch_begin_irq(&ring->syncp); 8947 packets = ring->stats.packets; 8948 bytes = ring->stats.bytes; 8949 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 8950 stats->rx_packets += packets; 8951 stats->rx_bytes += bytes; 8952 } 8953 } 8954 8955 for (i = 0; i < adapter->num_tx_queues; i++) { 8956 struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); 8957 8958 ixgbe_get_ring_stats64(stats, ring); 8959 } 8960 for (i = 0; i < adapter->num_xdp_queues; i++) { 8961 struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]); 8962 8963 ixgbe_get_ring_stats64(stats, ring); 8964 } 8965 rcu_read_unlock(); 8966 8967 /* following stats updated by ixgbe_watchdog_task() */ 8968 stats->multicast = netdev->stats.multicast; 8969 stats->rx_errors = netdev->stats.rx_errors; 8970 stats->rx_length_errors = netdev->stats.rx_length_errors; 8971 stats->rx_crc_errors = netdev->stats.rx_crc_errors; 8972 stats->rx_missed_errors = netdev->stats.rx_missed_errors; 8973 } 8974 8975 #ifdef CONFIG_IXGBE_DCB 8976 /** 8977 * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. 8978 * @adapter: pointer to ixgbe_adapter 8979 * @tc: number of traffic classes currently enabled 8980 * 8981 * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm 8982 * 802.1Q priority maps to a packet buffer that exists. 8983 */ 8984 static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) 8985 { 8986 struct ixgbe_hw *hw = &adapter->hw; 8987 u32 reg, rsave; 8988 int i; 8989 8990 /* 82598 have a static priority to TC mapping that can not 8991 * be changed so no validation is needed. 8992 */ 8993 if (hw->mac.type == ixgbe_mac_82598EB) 8994 return; 8995 8996 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); 8997 rsave = reg; 8998 8999 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 9000 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT); 9001 9002 /* If up2tc is out of bounds default to zero */ 9003 if (up2tc > tc) 9004 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT); 9005 } 9006 9007 if (reg != rsave) 9008 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); 9009 9010 return; 9011 } 9012 9013 /** 9014 * ixgbe_set_prio_tc_map - Configure netdev prio tc map 9015 * @adapter: Pointer to adapter struct 9016 * 9017 * Populate the netdev user priority to tc map 9018 */ 9019 static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) 9020 { 9021 struct net_device *dev = adapter->netdev; 9022 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; 9023 struct ieee_ets *ets = adapter->ixgbe_ieee_ets; 9024 u8 prio; 9025 9026 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) { 9027 u8 tc = 0; 9028 9029 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) 9030 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio); 9031 else if (ets) 9032 tc = ets->prio_tc[prio]; 9033 9034 netdev_set_prio_tc_map(dev, prio, tc); 9035 } 9036 } 9037 9038 #endif /* CONFIG_IXGBE_DCB */ 9039 static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, 9040 struct netdev_nested_priv *priv) 9041 { 9042 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data; 9043 struct ixgbe_fwd_adapter *accel; 9044 int pool; 9045 9046 /* we only care about macvlans... */ 9047 if (!netif_is_macvlan(vdev)) 9048 return 0; 9049 9050 /* that have hardware offload enabled... */ 9051 accel = macvlan_accel_priv(vdev); 9052 if (!accel) 9053 return 0; 9054 9055 /* If we can relocate to a different bit do so */ 9056 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); 9057 if (pool < adapter->num_rx_pools) { 9058 set_bit(pool, adapter->fwd_bitmask); 9059 accel->pool = pool; 9060 return 0; 9061 } 9062 9063 /* if we cannot find a free pool then disable the offload */ 9064 netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n"); 9065 macvlan_release_l2fw_offload(vdev); 9066 9067 /* unbind the queues and drop the subordinate channel config */ 9068 netdev_unbind_sb_channel(adapter->netdev, vdev); 9069 netdev_set_sb_channel(vdev, 0); 9070 9071 kfree(accel); 9072 9073 return 0; 9074 } 9075 9076 static void ixgbe_defrag_macvlan_pools(struct net_device *dev) 9077 { 9078 struct ixgbe_adapter *adapter = netdev_priv(dev); 9079 struct netdev_nested_priv priv = { 9080 .data = (void *)adapter, 9081 }; 9082 9083 /* flush any stale bits out of the fwd bitmask */ 9084 bitmap_clear(adapter->fwd_bitmask, 1, 63); 9085 9086 /* walk through upper devices reassigning pools */ 9087 netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool, 9088 &priv); 9089 } 9090 9091 /** 9092 * ixgbe_setup_tc - configure net_device for multiple traffic classes 9093 * 9094 * @dev: net device to configure 9095 * @tc: number of traffic classes to enable 9096 */ 9097 int ixgbe_setup_tc(struct net_device *dev, u8 tc) 9098 { 9099 struct ixgbe_adapter *adapter = netdev_priv(dev); 9100 struct ixgbe_hw *hw = &adapter->hw; 9101 9102 /* Hardware supports up to 8 traffic classes */ 9103 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) 9104 return -EINVAL; 9105 9106 if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS) 9107 return -EINVAL; 9108 9109 /* Hardware has to reinitialize queues and interrupts to 9110 * match packet buffer alignment. Unfortunately, the 9111 * hardware is not flexible enough to do this dynamically. 9112 */ 9113 if (netif_running(dev)) 9114 ixgbe_close(dev); 9115 else 9116 ixgbe_reset(adapter); 9117 9118 ixgbe_clear_interrupt_scheme(adapter); 9119 9120 #ifdef CONFIG_IXGBE_DCB 9121 if (tc) { 9122 if (adapter->xdp_prog) { 9123 e_warn(probe, "DCB is not supported with XDP\n"); 9124 9125 ixgbe_init_interrupt_scheme(adapter); 9126 if (netif_running(dev)) 9127 ixgbe_open(dev); 9128 return -EINVAL; 9129 } 9130 9131 netdev_set_num_tc(dev, tc); 9132 ixgbe_set_prio_tc_map(adapter); 9133 9134 adapter->hw_tcs = tc; 9135 adapter->flags |= IXGBE_FLAG_DCB_ENABLED; 9136 9137 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 9138 adapter->last_lfc_mode = adapter->hw.fc.requested_mode; 9139 adapter->hw.fc.requested_mode = ixgbe_fc_none; 9140 } 9141 } else { 9142 netdev_reset_tc(dev); 9143 9144 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 9145 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; 9146 9147 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 9148 adapter->hw_tcs = tc; 9149 9150 adapter->temp_dcb_cfg.pfc_mode_enable = false; 9151 adapter->dcb_cfg.pfc_mode_enable = false; 9152 } 9153 9154 ixgbe_validate_rtr(adapter, tc); 9155 9156 #endif /* CONFIG_IXGBE_DCB */ 9157 ixgbe_init_interrupt_scheme(adapter); 9158 9159 ixgbe_defrag_macvlan_pools(dev); 9160 9161 if (netif_running(dev)) 9162 return ixgbe_open(dev); 9163 9164 return 0; 9165 } 9166 9167 static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, 9168 struct tc_cls_u32_offload *cls) 9169 { 9170 u32 hdl = cls->knode.handle; 9171 u32 uhtid = TC_U32_USERHTID(cls->knode.handle); 9172 u32 loc = cls->knode.handle & 0xfffff; 9173 int err = 0, i, j; 9174 struct ixgbe_jump_table *jump = NULL; 9175 9176 if (loc > IXGBE_MAX_HW_ENTRIES) 9177 return -EINVAL; 9178 9179 if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE)) 9180 return -EINVAL; 9181 9182 /* Clear this filter in the link data it is associated with */ 9183 if (uhtid != 0x800) { 9184 jump = adapter->jump_tables[uhtid]; 9185 if (!jump) 9186 return -EINVAL; 9187 if (!test_bit(loc - 1, jump->child_loc_map)) 9188 return -EINVAL; 9189 clear_bit(loc - 1, jump->child_loc_map); 9190 } 9191 9192 /* Check if the filter being deleted is a link */ 9193 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { 9194 jump = adapter->jump_tables[i]; 9195 if (jump && jump->link_hdl == hdl) { 9196 /* Delete filters in the hardware in the child hash 9197 * table associated with this link 9198 */ 9199 for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) { 9200 if (!test_bit(j, jump->child_loc_map)) 9201 continue; 9202 spin_lock(&adapter->fdir_perfect_lock); 9203 err = ixgbe_update_ethtool_fdir_entry(adapter, 9204 NULL, 9205 j + 1); 9206 spin_unlock(&adapter->fdir_perfect_lock); 9207 clear_bit(j, jump->child_loc_map); 9208 } 9209 /* Remove resources for this link */ 9210 kfree(jump->input); 9211 kfree(jump->mask); 9212 kfree(jump); 9213 adapter->jump_tables[i] = NULL; 9214 return err; 9215 } 9216 } 9217 9218 spin_lock(&adapter->fdir_perfect_lock); 9219 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc); 9220 spin_unlock(&adapter->fdir_perfect_lock); 9221 return err; 9222 } 9223 9224 static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter, 9225 struct tc_cls_u32_offload *cls) 9226 { 9227 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); 9228 9229 if (uhtid >= IXGBE_MAX_LINK_HANDLE) 9230 return -EINVAL; 9231 9232 /* This ixgbe devices do not support hash tables at the moment 9233 * so abort when given hash tables. 9234 */ 9235 if (cls->hnode.divisor > 0) 9236 return -EINVAL; 9237 9238 set_bit(uhtid - 1, &adapter->tables); 9239 return 0; 9240 } 9241 9242 static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, 9243 struct tc_cls_u32_offload *cls) 9244 { 9245 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); 9246 9247 if (uhtid >= IXGBE_MAX_LINK_HANDLE) 9248 return -EINVAL; 9249 9250 clear_bit(uhtid - 1, &adapter->tables); 9251 return 0; 9252 } 9253 9254 #ifdef CONFIG_NET_CLS_ACT 9255 struct upper_walk_data { 9256 struct ixgbe_adapter *adapter; 9257 u64 action; 9258 int ifindex; 9259 u8 queue; 9260 }; 9261 9262 static int get_macvlan_queue(struct net_device *upper, 9263 struct netdev_nested_priv *priv) 9264 { 9265 if (netif_is_macvlan(upper)) { 9266 struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper); 9267 struct ixgbe_adapter *adapter; 9268 struct upper_walk_data *data; 9269 int ifindex; 9270 9271 data = (struct upper_walk_data *)priv->data; 9272 ifindex = data->ifindex; 9273 adapter = data->adapter; 9274 if (vadapter && upper->ifindex == ifindex) { 9275 data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; 9276 data->action = data->queue; 9277 return 1; 9278 } 9279 } 9280 9281 return 0; 9282 } 9283 9284 static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, 9285 u8 *queue, u64 *action) 9286 { 9287 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 9288 unsigned int num_vfs = adapter->num_vfs, vf; 9289 struct netdev_nested_priv priv; 9290 struct upper_walk_data data; 9291 struct net_device *upper; 9292 9293 /* redirect to a SRIOV VF */ 9294 for (vf = 0; vf < num_vfs; ++vf) { 9295 upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev); 9296 if (upper->ifindex == ifindex) { 9297 *queue = vf * __ALIGN_MASK(1, ~vmdq->mask); 9298 *action = vf + 1; 9299 *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; 9300 return 0; 9301 } 9302 } 9303 9304 /* redirect to a offloaded macvlan netdev */ 9305 data.adapter = adapter; 9306 data.ifindex = ifindex; 9307 data.action = 0; 9308 data.queue = 0; 9309 priv.data = (void *)&data; 9310 if (netdev_walk_all_upper_dev_rcu(adapter->netdev, 9311 get_macvlan_queue, &priv)) { 9312 *action = data.action; 9313 *queue = data.queue; 9314 9315 return 0; 9316 } 9317 9318 return -EINVAL; 9319 } 9320 9321 static int parse_tc_actions(struct ixgbe_adapter *adapter, 9322 struct tcf_exts *exts, u64 *action, u8 *queue) 9323 { 9324 const struct tc_action *a; 9325 int i; 9326 9327 if (!tcf_exts_has_actions(exts)) 9328 return -EINVAL; 9329 9330 tcf_exts_for_each_action(i, a, exts) { 9331 /* Drop action */ 9332 if (is_tcf_gact_shot(a)) { 9333 *action = IXGBE_FDIR_DROP_QUEUE; 9334 *queue = IXGBE_FDIR_DROP_QUEUE; 9335 return 0; 9336 } 9337 9338 /* Redirect to a VF or a offloaded macvlan */ 9339 if (is_tcf_mirred_egress_redirect(a)) { 9340 struct net_device *dev = tcf_mirred_dev(a); 9341 9342 if (!dev) 9343 return -EINVAL; 9344 return handle_redirect_action(adapter, dev->ifindex, 9345 queue, action); 9346 } 9347 9348 return -EINVAL; 9349 } 9350 9351 return -EINVAL; 9352 } 9353 #else 9354 static int parse_tc_actions(struct ixgbe_adapter *adapter, 9355 struct tcf_exts *exts, u64 *action, u8 *queue) 9356 { 9357 return -EINVAL; 9358 } 9359 #endif /* CONFIG_NET_CLS_ACT */ 9360 9361 static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input, 9362 union ixgbe_atr_input *mask, 9363 struct tc_cls_u32_offload *cls, 9364 struct ixgbe_mat_field *field_ptr, 9365 struct ixgbe_nexthdr *nexthdr) 9366 { 9367 int i, j, off; 9368 __be32 val, m; 9369 bool found_entry = false, found_jump_field = false; 9370 9371 for (i = 0; i < cls->knode.sel->nkeys; i++) { 9372 off = cls->knode.sel->keys[i].off; 9373 val = cls->knode.sel->keys[i].val; 9374 m = cls->knode.sel->keys[i].mask; 9375 9376 for (j = 0; field_ptr[j].val; j++) { 9377 if (field_ptr[j].off == off) { 9378 field_ptr[j].val(input, mask, (__force u32)val, 9379 (__force u32)m); 9380 input->filter.formatted.flow_type |= 9381 field_ptr[j].type; 9382 found_entry = true; 9383 break; 9384 } 9385 } 9386 if (nexthdr) { 9387 if (nexthdr->off == cls->knode.sel->keys[i].off && 9388 nexthdr->val == 9389 (__force u32)cls->knode.sel->keys[i].val && 9390 nexthdr->mask == 9391 (__force u32)cls->knode.sel->keys[i].mask) 9392 found_jump_field = true; 9393 else 9394 continue; 9395 } 9396 } 9397 9398 if (nexthdr && !found_jump_field) 9399 return -EINVAL; 9400 9401 if (!found_entry) 9402 return 0; 9403 9404 mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | 9405 IXGBE_ATR_L4TYPE_MASK; 9406 9407 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) 9408 mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; 9409 9410 return 0; 9411 } 9412 9413 static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, 9414 struct tc_cls_u32_offload *cls) 9415 { 9416 __be16 protocol = cls->common.protocol; 9417 u32 loc = cls->knode.handle & 0xfffff; 9418 struct ixgbe_hw *hw = &adapter->hw; 9419 struct ixgbe_mat_field *field_ptr; 9420 struct ixgbe_fdir_filter *input = NULL; 9421 union ixgbe_atr_input *mask = NULL; 9422 struct ixgbe_jump_table *jump = NULL; 9423 int i, err = -EINVAL; 9424 u8 queue; 9425 u32 uhtid, link_uhtid; 9426 9427 uhtid = TC_U32_USERHTID(cls->knode.handle); 9428 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); 9429 9430 /* At the moment cls_u32 jumps to network layer and skips past 9431 * L2 headers. The canonical method to match L2 frames is to use 9432 * negative values. However this is error prone at best but really 9433 * just broken because there is no way to "know" what sort of hdr 9434 * is in front of the network layer. Fix cls_u32 to support L2 9435 * headers when needed. 9436 */ 9437 if (protocol != htons(ETH_P_IP)) 9438 return err; 9439 9440 if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) { 9441 e_err(drv, "Location out of range\n"); 9442 return err; 9443 } 9444 9445 /* cls u32 is a graph starting at root node 0x800. The driver tracks 9446 * links and also the fields used to advance the parser across each 9447 * link (e.g. nexthdr/eat parameters from 'tc'). This way we can map 9448 * the u32 graph onto the hardware parse graph denoted in ixgbe_model.h 9449 * To add support for new nodes update ixgbe_model.h parse structures 9450 * this function _should_ be generic try not to hardcode values here. 9451 */ 9452 if (uhtid == 0x800) { 9453 field_ptr = (adapter->jump_tables[0])->mat; 9454 } else { 9455 if (uhtid >= IXGBE_MAX_LINK_HANDLE) 9456 return err; 9457 if (!adapter->jump_tables[uhtid]) 9458 return err; 9459 field_ptr = (adapter->jump_tables[uhtid])->mat; 9460 } 9461 9462 if (!field_ptr) 9463 return err; 9464 9465 /* At this point we know the field_ptr is valid and need to either 9466 * build cls_u32 link or attach filter. Because adding a link to 9467 * a handle that does not exist is invalid and the same for adding 9468 * rules to handles that don't exist. 9469 */ 9470 9471 if (link_uhtid) { 9472 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; 9473 9474 if (link_uhtid >= IXGBE_MAX_LINK_HANDLE) 9475 return err; 9476 9477 if (!test_bit(link_uhtid - 1, &adapter->tables)) 9478 return err; 9479 9480 /* Multiple filters as links to the same hash table are not 9481 * supported. To add a new filter with the same next header 9482 * but different match/jump conditions, create a new hash table 9483 * and link to it. 9484 */ 9485 if (adapter->jump_tables[link_uhtid] && 9486 (adapter->jump_tables[link_uhtid])->link_hdl) { 9487 e_err(drv, "Link filter exists for link: %x\n", 9488 link_uhtid); 9489 return err; 9490 } 9491 9492 for (i = 0; nexthdr[i].jump; i++) { 9493 if (nexthdr[i].o != cls->knode.sel->offoff || 9494 nexthdr[i].s != cls->knode.sel->offshift || 9495 nexthdr[i].m != 9496 (__force u32)cls->knode.sel->offmask) 9497 return err; 9498 9499 jump = kzalloc(sizeof(*jump), GFP_KERNEL); 9500 if (!jump) 9501 return -ENOMEM; 9502 input = kzalloc(sizeof(*input), GFP_KERNEL); 9503 if (!input) { 9504 err = -ENOMEM; 9505 goto free_jump; 9506 } 9507 mask = kzalloc(sizeof(*mask), GFP_KERNEL); 9508 if (!mask) { 9509 err = -ENOMEM; 9510 goto free_input; 9511 } 9512 jump->input = input; 9513 jump->mask = mask; 9514 jump->link_hdl = cls->knode.handle; 9515 9516 err = ixgbe_clsu32_build_input(input, mask, cls, 9517 field_ptr, &nexthdr[i]); 9518 if (!err) { 9519 jump->mat = nexthdr[i].jump; 9520 adapter->jump_tables[link_uhtid] = jump; 9521 break; 9522 } else { 9523 kfree(mask); 9524 kfree(input); 9525 kfree(jump); 9526 } 9527 } 9528 return 0; 9529 } 9530 9531 input = kzalloc(sizeof(*input), GFP_KERNEL); 9532 if (!input) 9533 return -ENOMEM; 9534 mask = kzalloc(sizeof(*mask), GFP_KERNEL); 9535 if (!mask) { 9536 err = -ENOMEM; 9537 goto free_input; 9538 } 9539 9540 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) { 9541 if ((adapter->jump_tables[uhtid])->input) 9542 memcpy(input, (adapter->jump_tables[uhtid])->input, 9543 sizeof(*input)); 9544 if ((adapter->jump_tables[uhtid])->mask) 9545 memcpy(mask, (adapter->jump_tables[uhtid])->mask, 9546 sizeof(*mask)); 9547 9548 /* Lookup in all child hash tables if this location is already 9549 * filled with a filter 9550 */ 9551 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { 9552 struct ixgbe_jump_table *link = adapter->jump_tables[i]; 9553 9554 if (link && (test_bit(loc - 1, link->child_loc_map))) { 9555 e_err(drv, "Filter exists in location: %x\n", 9556 loc); 9557 err = -EINVAL; 9558 goto err_out; 9559 } 9560 } 9561 } 9562 err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL); 9563 if (err) 9564 goto err_out; 9565 9566 err = parse_tc_actions(adapter, cls->knode.exts, &input->action, 9567 &queue); 9568 if (err < 0) 9569 goto err_out; 9570 9571 input->sw_idx = loc; 9572 9573 spin_lock(&adapter->fdir_perfect_lock); 9574 9575 if (hlist_empty(&adapter->fdir_filter_list)) { 9576 memcpy(&adapter->fdir_mask, mask, sizeof(*mask)); 9577 err = ixgbe_fdir_set_input_mask_82599(hw, mask); 9578 if (err) 9579 goto err_out_w_lock; 9580 } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) { 9581 err = -EINVAL; 9582 goto err_out_w_lock; 9583 } 9584 9585 ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask); 9586 err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, 9587 input->sw_idx, queue); 9588 if (err) 9589 goto err_out_w_lock; 9590 9591 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); 9592 spin_unlock(&adapter->fdir_perfect_lock); 9593 9594 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) 9595 set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map); 9596 9597 kfree(mask); 9598 return err; 9599 err_out_w_lock: 9600 spin_unlock(&adapter->fdir_perfect_lock); 9601 err_out: 9602 kfree(mask); 9603 free_input: 9604 kfree(input); 9605 free_jump: 9606 kfree(jump); 9607 return err; 9608 } 9609 9610 static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter, 9611 struct tc_cls_u32_offload *cls_u32) 9612 { 9613 switch (cls_u32->command) { 9614 case TC_CLSU32_NEW_KNODE: 9615 case TC_CLSU32_REPLACE_KNODE: 9616 return ixgbe_configure_clsu32(adapter, cls_u32); 9617 case TC_CLSU32_DELETE_KNODE: 9618 return ixgbe_delete_clsu32(adapter, cls_u32); 9619 case TC_CLSU32_NEW_HNODE: 9620 case TC_CLSU32_REPLACE_HNODE: 9621 return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32); 9622 case TC_CLSU32_DELETE_HNODE: 9623 return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32); 9624 default: 9625 return -EOPNOTSUPP; 9626 } 9627 } 9628 9629 static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 9630 void *cb_priv) 9631 { 9632 struct ixgbe_adapter *adapter = cb_priv; 9633 9634 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) 9635 return -EOPNOTSUPP; 9636 9637 switch (type) { 9638 case TC_SETUP_CLSU32: 9639 return ixgbe_setup_tc_cls_u32(adapter, type_data); 9640 default: 9641 return -EOPNOTSUPP; 9642 } 9643 } 9644 9645 static int ixgbe_setup_tc_mqprio(struct net_device *dev, 9646 struct tc_mqprio_qopt *mqprio) 9647 { 9648 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 9649 return ixgbe_setup_tc(dev, mqprio->num_tc); 9650 } 9651 9652 static LIST_HEAD(ixgbe_block_cb_list); 9653 9654 static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, 9655 void *type_data) 9656 { 9657 struct ixgbe_adapter *adapter = netdev_priv(dev); 9658 9659 switch (type) { 9660 case TC_SETUP_BLOCK: 9661 return flow_block_cb_setup_simple(type_data, 9662 &ixgbe_block_cb_list, 9663 ixgbe_setup_tc_block_cb, 9664 adapter, adapter, true); 9665 case TC_SETUP_QDISC_MQPRIO: 9666 return ixgbe_setup_tc_mqprio(dev, type_data); 9667 default: 9668 return -EOPNOTSUPP; 9669 } 9670 } 9671 9672 #ifdef CONFIG_PCI_IOV 9673 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) 9674 { 9675 struct net_device *netdev = adapter->netdev; 9676 9677 rtnl_lock(); 9678 ixgbe_setup_tc(netdev, adapter->hw_tcs); 9679 rtnl_unlock(); 9680 } 9681 9682 #endif 9683 void ixgbe_do_reset(struct net_device *netdev) 9684 { 9685 struct ixgbe_adapter *adapter = netdev_priv(netdev); 9686 9687 if (netif_running(netdev)) 9688 ixgbe_reinit_locked(adapter); 9689 else 9690 ixgbe_reset(adapter); 9691 } 9692 9693 static netdev_features_t ixgbe_fix_features(struct net_device *netdev, 9694 netdev_features_t features) 9695 { 9696 struct ixgbe_adapter *adapter = netdev_priv(netdev); 9697 9698 /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ 9699 if (!(features & NETIF_F_RXCSUM)) 9700 features &= ~NETIF_F_LRO; 9701 9702 /* Turn off LRO if not RSC capable */ 9703 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) 9704 features &= ~NETIF_F_LRO; 9705 9706 if (adapter->xdp_prog && (features & NETIF_F_LRO)) { 9707 e_dev_err("LRO is not supported with XDP\n"); 9708 features &= ~NETIF_F_LRO; 9709 } 9710 9711 return features; 9712 } 9713 9714 static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter) 9715 { 9716 int rss = min_t(int, ixgbe_max_rss_indices(adapter), 9717 num_online_cpus()); 9718 9719 /* go back to full RSS if we're not running SR-IOV */ 9720 if (!adapter->ring_feature[RING_F_VMDQ].offset) 9721 adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED | 9722 IXGBE_FLAG_SRIOV_ENABLED); 9723 9724 adapter->ring_feature[RING_F_RSS].limit = rss; 9725 adapter->ring_feature[RING_F_VMDQ].limit = 1; 9726 9727 ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs); 9728 } 9729 9730 static int ixgbe_set_features(struct net_device *netdev, 9731 netdev_features_t features) 9732 { 9733 struct ixgbe_adapter *adapter = netdev_priv(netdev); 9734 netdev_features_t changed = netdev->features ^ features; 9735 bool need_reset = false; 9736 9737 /* Make sure RSC matches LRO, reset if change */ 9738 if (!(features & NETIF_F_LRO)) { 9739 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 9740 need_reset = true; 9741 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; 9742 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && 9743 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { 9744 if (adapter->rx_itr_setting == 1 || 9745 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { 9746 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 9747 need_reset = true; 9748 } else if ((changed ^ features) & NETIF_F_LRO) { 9749 e_info(probe, "rx-usecs set too low, " 9750 "disabling RSC\n"); 9751 } 9752 } 9753 9754 /* 9755 * Check if Flow Director n-tuple support or hw_tc support was 9756 * enabled or disabled. If the state changed, we need to reset. 9757 */ 9758 if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) { 9759 /* turn off ATR, enable perfect filters and reset */ 9760 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 9761 need_reset = true; 9762 9763 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 9764 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 9765 } else { 9766 /* turn off perfect filters, enable ATR and reset */ 9767 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 9768 need_reset = true; 9769 9770 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 9771 9772 /* We cannot enable ATR if SR-IOV is enabled */ 9773 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED || 9774 /* We cannot enable ATR if we have 2 or more tcs */ 9775 (adapter->hw_tcs > 1) || 9776 /* We cannot enable ATR if RSS is disabled */ 9777 (adapter->ring_feature[RING_F_RSS].limit <= 1) || 9778 /* A sample rate of 0 indicates ATR disabled */ 9779 (!adapter->atr_sample_rate)) 9780 ; /* do nothing not supported */ 9781 else /* otherwise supported and set the flag */ 9782 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 9783 } 9784 9785 if (changed & NETIF_F_RXALL) 9786 need_reset = true; 9787 9788 netdev->features = features; 9789 9790 if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1) 9791 ixgbe_reset_l2fw_offload(adapter); 9792 else if (need_reset) 9793 ixgbe_do_reset(netdev); 9794 else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | 9795 NETIF_F_HW_VLAN_CTAG_FILTER)) 9796 ixgbe_set_rx_mode(netdev); 9797 9798 return 1; 9799 } 9800 9801 static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 9802 struct net_device *dev, 9803 const unsigned char *addr, u16 vid, 9804 u16 flags, 9805 struct netlink_ext_ack *extack) 9806 { 9807 /* guarantee we can provide a unique filter for the unicast address */ 9808 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { 9809 struct ixgbe_adapter *adapter = netdev_priv(dev); 9810 u16 pool = VMDQ_P(0); 9811 9812 if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool)) 9813 return -ENOMEM; 9814 } 9815 9816 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); 9817 } 9818 9819 /** 9820 * ixgbe_configure_bridge_mode - set various bridge modes 9821 * @adapter: the private structure 9822 * @mode: requested bridge mode 9823 * 9824 * Configure some settings require for various bridge modes. 9825 **/ 9826 static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter, 9827 __u16 mode) 9828 { 9829 struct ixgbe_hw *hw = &adapter->hw; 9830 unsigned int p, num_pools; 9831 u32 vmdctl; 9832 9833 switch (mode) { 9834 case BRIDGE_MODE_VEPA: 9835 /* disable Tx loopback, rely on switch hairpin mode */ 9836 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0); 9837 9838 /* must enable Rx switching replication to allow multicast 9839 * packet reception on all VFs, and to enable source address 9840 * pruning. 9841 */ 9842 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); 9843 vmdctl |= IXGBE_VT_CTL_REPLEN; 9844 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 9845 9846 /* enable Rx source address pruning. Note, this requires 9847 * replication to be enabled or else it does nothing. 9848 */ 9849 num_pools = adapter->num_vfs + adapter->num_rx_pools; 9850 for (p = 0; p < num_pools; p++) { 9851 if (hw->mac.ops.set_source_address_pruning) 9852 hw->mac.ops.set_source_address_pruning(hw, 9853 true, 9854 p); 9855 } 9856 break; 9857 case BRIDGE_MODE_VEB: 9858 /* enable Tx loopback for internal VF/PF communication */ 9859 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 9860 IXGBE_PFDTXGSWC_VT_LBEN); 9861 9862 /* disable Rx switching replication unless we have SR-IOV 9863 * virtual functions 9864 */ 9865 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); 9866 if (!adapter->num_vfs) 9867 vmdctl &= ~IXGBE_VT_CTL_REPLEN; 9868 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 9869 9870 /* disable Rx source address pruning, since we don't expect to 9871 * be receiving external loopback of our transmitted frames. 9872 */ 9873 num_pools = adapter->num_vfs + adapter->num_rx_pools; 9874 for (p = 0; p < num_pools; p++) { 9875 if (hw->mac.ops.set_source_address_pruning) 9876 hw->mac.ops.set_source_address_pruning(hw, 9877 false, 9878 p); 9879 } 9880 break; 9881 default: 9882 return -EINVAL; 9883 } 9884 9885 adapter->bridge_mode = mode; 9886 9887 e_info(drv, "enabling bridge mode: %s\n", 9888 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); 9889 9890 return 0; 9891 } 9892 9893 static int ixgbe_ndo_bridge_setlink(struct net_device *dev, 9894 struct nlmsghdr *nlh, u16 flags, 9895 struct netlink_ext_ack *extack) 9896 { 9897 struct ixgbe_adapter *adapter = netdev_priv(dev); 9898 struct nlattr *attr, *br_spec; 9899 int rem; 9900 9901 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 9902 return -EOPNOTSUPP; 9903 9904 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 9905 if (!br_spec) 9906 return -EINVAL; 9907 9908 nla_for_each_nested(attr, br_spec, rem) { 9909 int status; 9910 __u16 mode; 9911 9912 if (nla_type(attr) != IFLA_BRIDGE_MODE) 9913 continue; 9914 9915 if (nla_len(attr) < sizeof(mode)) 9916 return -EINVAL; 9917 9918 mode = nla_get_u16(attr); 9919 status = ixgbe_configure_bridge_mode(adapter, mode); 9920 if (status) 9921 return status; 9922 9923 break; 9924 } 9925 9926 return 0; 9927 } 9928 9929 static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 9930 struct net_device *dev, 9931 u32 filter_mask, int nlflags) 9932 { 9933 struct ixgbe_adapter *adapter = netdev_priv(dev); 9934 9935 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 9936 return 0; 9937 9938 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, 9939 adapter->bridge_mode, 0, 0, nlflags, 9940 filter_mask, NULL); 9941 } 9942 9943 static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) 9944 { 9945 struct ixgbe_adapter *adapter = netdev_priv(pdev); 9946 struct ixgbe_fwd_adapter *accel; 9947 int tcs = adapter->hw_tcs ? : 1; 9948 int pool, err; 9949 9950 if (adapter->xdp_prog) { 9951 e_warn(probe, "L2FW offload is not supported with XDP\n"); 9952 return ERR_PTR(-EINVAL); 9953 } 9954 9955 /* The hardware supported by ixgbe only filters on the destination MAC 9956 * address. In order to avoid issues we only support offloading modes 9957 * where the hardware can actually provide the functionality. 9958 */ 9959 if (!macvlan_supports_dest_filter(vdev)) 9960 return ERR_PTR(-EMEDIUMTYPE); 9961 9962 /* We need to lock down the macvlan to be a single queue device so that 9963 * we can reuse the tc_to_txq field in the macvlan netdev to represent 9964 * the queue mapping to our netdev. 9965 */ 9966 if (netif_is_multiqueue(vdev)) 9967 return ERR_PTR(-ERANGE); 9968 9969 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); 9970 if (pool == adapter->num_rx_pools) { 9971 u16 used_pools = adapter->num_vfs + adapter->num_rx_pools; 9972 u16 reserved_pools; 9973 9974 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && 9975 adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) || 9976 adapter->num_rx_pools > IXGBE_MAX_MACVLANS) 9977 return ERR_PTR(-EBUSY); 9978 9979 /* Hardware has a limited number of available pools. Each VF, 9980 * and the PF require a pool. Check to ensure we don't 9981 * attempt to use more then the available number of pools. 9982 */ 9983 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS) 9984 return ERR_PTR(-EBUSY); 9985 9986 /* Enable VMDq flag so device will be set in VM mode */ 9987 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | 9988 IXGBE_FLAG_SRIOV_ENABLED; 9989 9990 /* Try to reserve as many queues per pool as possible, 9991 * we start with the configurations that support 4 queues 9992 * per pools, followed by 2, and then by just 1 per pool. 9993 */ 9994 if (used_pools < 32 && adapter->num_rx_pools < 16) 9995 reserved_pools = min_t(u16, 9996 32 - used_pools, 9997 16 - adapter->num_rx_pools); 9998 else if (adapter->num_rx_pools < 32) 9999 reserved_pools = min_t(u16, 10000 64 - used_pools, 10001 32 - adapter->num_rx_pools); 10002 else 10003 reserved_pools = 64 - used_pools; 10004 10005 10006 if (!reserved_pools) 10007 return ERR_PTR(-EBUSY); 10008 10009 adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools; 10010 10011 /* Force reinit of ring allocation with VMDQ enabled */ 10012 err = ixgbe_setup_tc(pdev, adapter->hw_tcs); 10013 if (err) 10014 return ERR_PTR(err); 10015 10016 if (pool >= adapter->num_rx_pools) 10017 return ERR_PTR(-ENOMEM); 10018 } 10019 10020 accel = kzalloc(sizeof(*accel), GFP_KERNEL); 10021 if (!accel) 10022 return ERR_PTR(-ENOMEM); 10023 10024 set_bit(pool, adapter->fwd_bitmask); 10025 netdev_set_sb_channel(vdev, pool); 10026 accel->pool = pool; 10027 accel->netdev = vdev; 10028 10029 if (!netif_running(pdev)) 10030 return accel; 10031 10032 err = ixgbe_fwd_ring_up(adapter, accel); 10033 if (err) 10034 return ERR_PTR(err); 10035 10036 return accel; 10037 } 10038 10039 static void ixgbe_fwd_del(struct net_device *pdev, void *priv) 10040 { 10041 struct ixgbe_fwd_adapter *accel = priv; 10042 struct ixgbe_adapter *adapter = netdev_priv(pdev); 10043 unsigned int rxbase = accel->rx_base_queue; 10044 unsigned int i; 10045 10046 /* delete unicast filter associated with offloaded interface */ 10047 ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr, 10048 VMDQ_P(accel->pool)); 10049 10050 /* Allow remaining Rx packets to get flushed out of the 10051 * Rx FIFO before we drop the netdev for the ring. 10052 */ 10053 usleep_range(10000, 20000); 10054 10055 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { 10056 struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i]; 10057 struct ixgbe_q_vector *qv = ring->q_vector; 10058 10059 /* Make sure we aren't processing any packets and clear 10060 * netdev to shut down the ring. 10061 */ 10062 if (netif_running(adapter->netdev)) 10063 napi_synchronize(&qv->napi); 10064 ring->netdev = NULL; 10065 } 10066 10067 /* unbind the queues and drop the subordinate channel config */ 10068 netdev_unbind_sb_channel(pdev, accel->netdev); 10069 netdev_set_sb_channel(accel->netdev, 0); 10070 10071 clear_bit(accel->pool, adapter->fwd_bitmask); 10072 kfree(accel); 10073 } 10074 10075 #define IXGBE_MAX_MAC_HDR_LEN 127 10076 #define IXGBE_MAX_NETWORK_HDR_LEN 511 10077 10078 static netdev_features_t 10079 ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, 10080 netdev_features_t features) 10081 { 10082 unsigned int network_hdr_len, mac_hdr_len; 10083 10084 /* Make certain the headers can be described by a context descriptor */ 10085 mac_hdr_len = skb_network_header(skb) - skb->data; 10086 if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN)) 10087 return features & ~(NETIF_F_HW_CSUM | 10088 NETIF_F_SCTP_CRC | 10089 NETIF_F_GSO_UDP_L4 | 10090 NETIF_F_HW_VLAN_CTAG_TX | 10091 NETIF_F_TSO | 10092 NETIF_F_TSO6); 10093 10094 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); 10095 if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN)) 10096 return features & ~(NETIF_F_HW_CSUM | 10097 NETIF_F_SCTP_CRC | 10098 NETIF_F_GSO_UDP_L4 | 10099 NETIF_F_TSO | 10100 NETIF_F_TSO6); 10101 10102 /* We can only support IPV4 TSO in tunnels if we can mangle the 10103 * inner IP ID field, so strip TSO if MANGLEID is not supported. 10104 * IPsec offoad sets skb->encapsulation but still can handle 10105 * the TSO, so it's the exception. 10106 */ 10107 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) { 10108 #ifdef CONFIG_IXGBE_IPSEC 10109 if (!secpath_exists(skb)) 10110 #endif 10111 features &= ~NETIF_F_TSO; 10112 } 10113 10114 return features; 10115 } 10116 10117 static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) 10118 { 10119 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 10120 struct ixgbe_adapter *adapter = netdev_priv(dev); 10121 struct bpf_prog *old_prog; 10122 bool need_reset; 10123 int num_queues; 10124 10125 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 10126 return -EINVAL; 10127 10128 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) 10129 return -EINVAL; 10130 10131 /* verify ixgbe ring attributes are sufficient for XDP */ 10132 for (i = 0; i < adapter->num_rx_queues; i++) { 10133 struct ixgbe_ring *ring = adapter->rx_ring[i]; 10134 10135 if (ring_is_rsc_enabled(ring)) 10136 return -EINVAL; 10137 10138 if (frame_size > ixgbe_rx_bufsz(ring)) 10139 return -EINVAL; 10140 } 10141 10142 /* if the number of cpus is much larger than the maximum of queues, 10143 * we should stop it and then return with ENOMEM like before. 10144 */ 10145 if (nr_cpu_ids > IXGBE_MAX_XDP_QS * 2) 10146 return -ENOMEM; 10147 else if (nr_cpu_ids > IXGBE_MAX_XDP_QS) 10148 static_branch_inc(&ixgbe_xdp_locking_key); 10149 10150 old_prog = xchg(&adapter->xdp_prog, prog); 10151 need_reset = (!!prog != !!old_prog); 10152 10153 /* If transitioning XDP modes reconfigure rings */ 10154 if (need_reset) { 10155 int err; 10156 10157 if (!prog) 10158 /* Wait until ndo_xsk_wakeup completes. */ 10159 synchronize_rcu(); 10160 err = ixgbe_setup_tc(dev, adapter->hw_tcs); 10161 10162 if (err) { 10163 rcu_assign_pointer(adapter->xdp_prog, old_prog); 10164 return -EINVAL; 10165 } 10166 } else { 10167 for (i = 0; i < adapter->num_rx_queues; i++) 10168 (void)xchg(&adapter->rx_ring[i]->xdp_prog, 10169 adapter->xdp_prog); 10170 } 10171 10172 if (old_prog) 10173 bpf_prog_put(old_prog); 10174 10175 /* Kick start the NAPI context if there is an AF_XDP socket open 10176 * on that queue id. This so that receiving will start. 10177 */ 10178 if (need_reset && prog) { 10179 num_queues = min_t(int, adapter->num_rx_queues, 10180 adapter->num_xdp_queues); 10181 for (i = 0; i < num_queues; i++) 10182 if (adapter->xdp_ring[i]->xsk_pool) 10183 (void)ixgbe_xsk_wakeup(adapter->netdev, i, 10184 XDP_WAKEUP_RX); 10185 } 10186 10187 return 0; 10188 } 10189 10190 static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) 10191 { 10192 struct ixgbe_adapter *adapter = netdev_priv(dev); 10193 10194 switch (xdp->command) { 10195 case XDP_SETUP_PROG: 10196 return ixgbe_xdp_setup(dev, xdp->prog); 10197 case XDP_SETUP_XSK_POOL: 10198 return ixgbe_xsk_pool_setup(adapter, xdp->xsk.pool, 10199 xdp->xsk.queue_id); 10200 10201 default: 10202 return -EINVAL; 10203 } 10204 } 10205 10206 void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) 10207 { 10208 /* Force memory writes to complete before letting h/w know there 10209 * are new descriptors to fetch. 10210 */ 10211 wmb(); 10212 writel(ring->next_to_use, ring->tail); 10213 } 10214 10215 void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring) 10216 { 10217 if (static_branch_unlikely(&ixgbe_xdp_locking_key)) 10218 spin_lock(&ring->tx_lock); 10219 ixgbe_xdp_ring_update_tail(ring); 10220 if (static_branch_unlikely(&ixgbe_xdp_locking_key)) 10221 spin_unlock(&ring->tx_lock); 10222 } 10223 10224 static int ixgbe_xdp_xmit(struct net_device *dev, int n, 10225 struct xdp_frame **frames, u32 flags) 10226 { 10227 struct ixgbe_adapter *adapter = netdev_priv(dev); 10228 struct ixgbe_ring *ring; 10229 int nxmit = 0; 10230 int i; 10231 10232 if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) 10233 return -ENETDOWN; 10234 10235 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 10236 return -EINVAL; 10237 10238 /* During program transitions its possible adapter->xdp_prog is assigned 10239 * but ring has not been configured yet. In this case simply abort xmit. 10240 */ 10241 ring = adapter->xdp_prog ? ixgbe_determine_xdp_ring(adapter) : NULL; 10242 if (unlikely(!ring)) 10243 return -ENXIO; 10244 10245 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state))) 10246 return -ENXIO; 10247 10248 if (static_branch_unlikely(&ixgbe_xdp_locking_key)) 10249 spin_lock(&ring->tx_lock); 10250 10251 for (i = 0; i < n; i++) { 10252 struct xdp_frame *xdpf = frames[i]; 10253 int err; 10254 10255 err = ixgbe_xmit_xdp_ring(ring, xdpf); 10256 if (err != IXGBE_XDP_TX) 10257 break; 10258 nxmit++; 10259 } 10260 10261 if (unlikely(flags & XDP_XMIT_FLUSH)) 10262 ixgbe_xdp_ring_update_tail(ring); 10263 10264 if (static_branch_unlikely(&ixgbe_xdp_locking_key)) 10265 spin_unlock(&ring->tx_lock); 10266 10267 return nxmit; 10268 } 10269 10270 static const struct net_device_ops ixgbe_netdev_ops = { 10271 .ndo_open = ixgbe_open, 10272 .ndo_stop = ixgbe_close, 10273 .ndo_start_xmit = ixgbe_xmit_frame, 10274 .ndo_set_rx_mode = ixgbe_set_rx_mode, 10275 .ndo_validate_addr = eth_validate_addr, 10276 .ndo_set_mac_address = ixgbe_set_mac, 10277 .ndo_change_mtu = ixgbe_change_mtu, 10278 .ndo_tx_timeout = ixgbe_tx_timeout, 10279 .ndo_set_tx_maxrate = ixgbe_tx_maxrate, 10280 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, 10281 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, 10282 .ndo_eth_ioctl = ixgbe_ioctl, 10283 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, 10284 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, 10285 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, 10286 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, 10287 .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, 10288 .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, 10289 .ndo_get_vf_config = ixgbe_ndo_get_vf_config, 10290 .ndo_get_stats64 = ixgbe_get_stats64, 10291 .ndo_setup_tc = __ixgbe_setup_tc, 10292 #ifdef IXGBE_FCOE 10293 .ndo_select_queue = ixgbe_select_queue, 10294 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, 10295 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, 10296 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, 10297 .ndo_fcoe_enable = ixgbe_fcoe_enable, 10298 .ndo_fcoe_disable = ixgbe_fcoe_disable, 10299 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, 10300 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo, 10301 #endif /* IXGBE_FCOE */ 10302 .ndo_set_features = ixgbe_set_features, 10303 .ndo_fix_features = ixgbe_fix_features, 10304 .ndo_fdb_add = ixgbe_ndo_fdb_add, 10305 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, 10306 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 10307 .ndo_dfwd_add_station = ixgbe_fwd_add, 10308 .ndo_dfwd_del_station = ixgbe_fwd_del, 10309 .ndo_features_check = ixgbe_features_check, 10310 .ndo_bpf = ixgbe_xdp, 10311 .ndo_xdp_xmit = ixgbe_xdp_xmit, 10312 .ndo_xsk_wakeup = ixgbe_xsk_wakeup, 10313 }; 10314 10315 static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter, 10316 struct ixgbe_ring *tx_ring) 10317 { 10318 unsigned long wait_delay, delay_interval; 10319 struct ixgbe_hw *hw = &adapter->hw; 10320 u8 reg_idx = tx_ring->reg_idx; 10321 int wait_loop; 10322 u32 txdctl; 10323 10324 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); 10325 10326 /* delay mechanism from ixgbe_disable_tx */ 10327 delay_interval = ixgbe_get_completion_timeout(adapter) / 100; 10328 10329 wait_loop = IXGBE_MAX_RX_DESC_POLL; 10330 wait_delay = delay_interval; 10331 10332 while (wait_loop--) { 10333 usleep_range(wait_delay, wait_delay + 10); 10334 wait_delay += delay_interval * 2; 10335 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); 10336 10337 if (!(txdctl & IXGBE_TXDCTL_ENABLE)) 10338 return; 10339 } 10340 10341 e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n"); 10342 } 10343 10344 static void ixgbe_disable_txr(struct ixgbe_adapter *adapter, 10345 struct ixgbe_ring *tx_ring) 10346 { 10347 set_bit(__IXGBE_TX_DISABLED, &tx_ring->state); 10348 ixgbe_disable_txr_hw(adapter, tx_ring); 10349 } 10350 10351 static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter, 10352 struct ixgbe_ring *rx_ring) 10353 { 10354 unsigned long wait_delay, delay_interval; 10355 struct ixgbe_hw *hw = &adapter->hw; 10356 u8 reg_idx = rx_ring->reg_idx; 10357 int wait_loop; 10358 u32 rxdctl; 10359 10360 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 10361 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 10362 rxdctl |= IXGBE_RXDCTL_SWFLSH; 10363 10364 /* write value back with RXDCTL.ENABLE bit cleared */ 10365 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); 10366 10367 /* RXDCTL.EN may not change on 82598 if link is down, so skip it */ 10368 if (hw->mac.type == ixgbe_mac_82598EB && 10369 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) 10370 return; 10371 10372 /* delay mechanism from ixgbe_disable_rx */ 10373 delay_interval = ixgbe_get_completion_timeout(adapter) / 100; 10374 10375 wait_loop = IXGBE_MAX_RX_DESC_POLL; 10376 wait_delay = delay_interval; 10377 10378 while (wait_loop--) { 10379 usleep_range(wait_delay, wait_delay + 10); 10380 wait_delay += delay_interval * 2; 10381 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 10382 10383 if (!(rxdctl & IXGBE_RXDCTL_ENABLE)) 10384 return; 10385 } 10386 10387 e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n"); 10388 } 10389 10390 static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring) 10391 { 10392 memset(&tx_ring->stats, 0, sizeof(tx_ring->stats)); 10393 memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats)); 10394 } 10395 10396 static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring) 10397 { 10398 memset(&rx_ring->stats, 0, sizeof(rx_ring->stats)); 10399 memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); 10400 } 10401 10402 /** 10403 * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings 10404 * @adapter: adapter structure 10405 * @ring: ring index 10406 * 10407 * This function disables a certain Rx/Tx/XDP Tx ring. The function 10408 * assumes that the netdev is running. 10409 **/ 10410 void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring) 10411 { 10412 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; 10413 10414 rx_ring = adapter->rx_ring[ring]; 10415 tx_ring = adapter->tx_ring[ring]; 10416 xdp_ring = adapter->xdp_ring[ring]; 10417 10418 ixgbe_disable_txr(adapter, tx_ring); 10419 if (xdp_ring) 10420 ixgbe_disable_txr(adapter, xdp_ring); 10421 ixgbe_disable_rxr_hw(adapter, rx_ring); 10422 10423 if (xdp_ring) 10424 synchronize_rcu(); 10425 10426 /* Rx/Tx/XDP Tx share the same napi context. */ 10427 napi_disable(&rx_ring->q_vector->napi); 10428 10429 ixgbe_clean_tx_ring(tx_ring); 10430 if (xdp_ring) 10431 ixgbe_clean_tx_ring(xdp_ring); 10432 ixgbe_clean_rx_ring(rx_ring); 10433 10434 ixgbe_reset_txr_stats(tx_ring); 10435 if (xdp_ring) 10436 ixgbe_reset_txr_stats(xdp_ring); 10437 ixgbe_reset_rxr_stats(rx_ring); 10438 } 10439 10440 /** 10441 * ixgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings 10442 * @adapter: adapter structure 10443 * @ring: ring index 10444 * 10445 * This function enables a certain Rx/Tx/XDP Tx ring. The function 10446 * assumes that the netdev is running. 10447 **/ 10448 void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring) 10449 { 10450 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; 10451 10452 rx_ring = adapter->rx_ring[ring]; 10453 tx_ring = adapter->tx_ring[ring]; 10454 xdp_ring = adapter->xdp_ring[ring]; 10455 10456 /* Rx/Tx/XDP Tx share the same napi context. */ 10457 napi_enable(&rx_ring->q_vector->napi); 10458 10459 ixgbe_configure_tx_ring(adapter, tx_ring); 10460 if (xdp_ring) 10461 ixgbe_configure_tx_ring(adapter, xdp_ring); 10462 ixgbe_configure_rx_ring(adapter, rx_ring); 10463 10464 clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state); 10465 if (xdp_ring) 10466 clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state); 10467 } 10468 10469 /** 10470 * ixgbe_enumerate_functions - Get the number of ports this device has 10471 * @adapter: adapter structure 10472 * 10473 * This function enumerates the phsyical functions co-located on a single slot, 10474 * in order to determine how many ports a device has. This is most useful in 10475 * determining the required GT/s of PCIe bandwidth necessary for optimal 10476 * performance. 10477 **/ 10478 static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) 10479 { 10480 struct pci_dev *entry, *pdev = adapter->pdev; 10481 int physfns = 0; 10482 10483 /* Some cards can not use the generic count PCIe functions method, 10484 * because they are behind a parent switch, so we hardcode these with 10485 * the correct number of functions. 10486 */ 10487 if (ixgbe_pcie_from_parent(&adapter->hw)) 10488 physfns = 4; 10489 10490 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) { 10491 /* don't count virtual functions */ 10492 if (entry->is_virtfn) 10493 continue; 10494 10495 /* When the devices on the bus don't all match our device ID, 10496 * we can't reliably determine the correct number of 10497 * functions. This can occur if a function has been direct 10498 * attached to a virtual machine using VT-d, for example. In 10499 * this case, simply return -1 to indicate this. 10500 */ 10501 if ((entry->vendor != pdev->vendor) || 10502 (entry->device != pdev->device)) 10503 return -1; 10504 10505 physfns++; 10506 } 10507 10508 return physfns; 10509 } 10510 10511 /** 10512 * ixgbe_wol_supported - Check whether device supports WoL 10513 * @adapter: the adapter private structure 10514 * @device_id: the device ID 10515 * @subdevice_id: the subsystem device ID 10516 * 10517 * This function is used by probe and ethtool to determine 10518 * which devices have WoL support 10519 * 10520 **/ 10521 bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, 10522 u16 subdevice_id) 10523 { 10524 struct ixgbe_hw *hw = &adapter->hw; 10525 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; 10526 10527 /* WOL not supported on 82598 */ 10528 if (hw->mac.type == ixgbe_mac_82598EB) 10529 return false; 10530 10531 /* check eeprom to see if WOL is enabled for X540 and newer */ 10532 if (hw->mac.type >= ixgbe_mac_X540) { 10533 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 10534 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && 10535 (hw->bus.func == 0))) 10536 return true; 10537 } 10538 10539 /* WOL is determined based on device IDs for 82599 MACs */ 10540 switch (device_id) { 10541 case IXGBE_DEV_ID_82599_SFP: 10542 /* Only these subdevices could supports WOL */ 10543 switch (subdevice_id) { 10544 case IXGBE_SUBDEV_ID_82599_560FLR: 10545 case IXGBE_SUBDEV_ID_82599_LOM_SNAP6: 10546 case IXGBE_SUBDEV_ID_82599_SFP_WOL0: 10547 case IXGBE_SUBDEV_ID_82599_SFP_2OCP: 10548 /* only support first port */ 10549 if (hw->bus.func != 0) 10550 break; 10551 fallthrough; 10552 case IXGBE_SUBDEV_ID_82599_SP_560FLR: 10553 case IXGBE_SUBDEV_ID_82599_SFP: 10554 case IXGBE_SUBDEV_ID_82599_RNDC: 10555 case IXGBE_SUBDEV_ID_82599_ECNA_DP: 10556 case IXGBE_SUBDEV_ID_82599_SFP_1OCP: 10557 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1: 10558 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2: 10559 return true; 10560 } 10561 break; 10562 case IXGBE_DEV_ID_82599EN_SFP: 10563 /* Only these subdevices support WOL */ 10564 switch (subdevice_id) { 10565 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1: 10566 return true; 10567 } 10568 break; 10569 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 10570 /* All except this subdevice support WOL */ 10571 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) 10572 return true; 10573 break; 10574 case IXGBE_DEV_ID_82599_KX4: 10575 return true; 10576 default: 10577 break; 10578 } 10579 10580 return false; 10581 } 10582 10583 /** 10584 * ixgbe_set_fw_version - Set FW version 10585 * @adapter: the adapter private structure 10586 * 10587 * This function is used by probe and ethtool to determine the FW version to 10588 * format to display. The FW version is taken from the EEPROM/NVM. 10589 */ 10590 static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter) 10591 { 10592 struct ixgbe_hw *hw = &adapter->hw; 10593 struct ixgbe_nvm_version nvm_ver; 10594 10595 ixgbe_get_oem_prod_version(hw, &nvm_ver); 10596 if (nvm_ver.oem_valid) { 10597 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), 10598 "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor, 10599 nvm_ver.oem_release); 10600 return; 10601 } 10602 10603 ixgbe_get_etk_id(hw, &nvm_ver); 10604 ixgbe_get_orom_version(hw, &nvm_ver); 10605 10606 if (nvm_ver.or_valid) { 10607 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), 10608 "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major, 10609 nvm_ver.or_build, nvm_ver.or_patch); 10610 return; 10611 } 10612 10613 /* Set ETrack ID format */ 10614 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), 10615 "0x%08x", nvm_ver.etk_id); 10616 } 10617 10618 /** 10619 * ixgbe_probe - Device Initialization Routine 10620 * @pdev: PCI device information struct 10621 * @ent: entry in ixgbe_pci_tbl 10622 * 10623 * Returns 0 on success, negative on failure 10624 * 10625 * ixgbe_probe initializes an adapter identified by a pci_dev structure. 10626 * The OS initialization, configuring of the adapter private structure, 10627 * and a hardware reset occur. 10628 **/ 10629 static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 10630 { 10631 struct net_device *netdev; 10632 struct ixgbe_adapter *adapter = NULL; 10633 struct ixgbe_hw *hw; 10634 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 10635 int i, err, pci_using_dac, expected_gts; 10636 unsigned int indices = MAX_TX_QUEUES; 10637 u8 part_str[IXGBE_PBANUM_LENGTH]; 10638 bool disable_dev = false; 10639 #ifdef IXGBE_FCOE 10640 u16 device_caps; 10641 #endif 10642 u32 eec; 10643 10644 /* Catch broken hardware that put the wrong VF device ID in 10645 * the PCIe SR-IOV capability. 10646 */ 10647 if (pdev->is_virtfn) { 10648 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", 10649 pci_name(pdev), pdev->vendor, pdev->device); 10650 return -EINVAL; 10651 } 10652 10653 err = pci_enable_device_mem(pdev); 10654 if (err) 10655 return err; 10656 10657 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 10658 pci_using_dac = 1; 10659 } else { 10660 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 10661 if (err) { 10662 dev_err(&pdev->dev, 10663 "No usable DMA configuration, aborting\n"); 10664 goto err_dma; 10665 } 10666 pci_using_dac = 0; 10667 } 10668 10669 err = pci_request_mem_regions(pdev, ixgbe_driver_name); 10670 if (err) { 10671 dev_err(&pdev->dev, 10672 "pci_request_selected_regions failed 0x%x\n", err); 10673 goto err_pci_reg; 10674 } 10675 10676 pci_enable_pcie_error_reporting(pdev); 10677 10678 pci_set_master(pdev); 10679 pci_save_state(pdev); 10680 10681 if (ii->mac == ixgbe_mac_82598EB) { 10682 #ifdef CONFIG_IXGBE_DCB 10683 /* 8 TC w/ 4 queues per TC */ 10684 indices = 4 * MAX_TRAFFIC_CLASS; 10685 #else 10686 indices = IXGBE_MAX_RSS_INDICES; 10687 #endif 10688 } 10689 10690 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); 10691 if (!netdev) { 10692 err = -ENOMEM; 10693 goto err_alloc_etherdev; 10694 } 10695 10696 SET_NETDEV_DEV(netdev, &pdev->dev); 10697 10698 adapter = netdev_priv(netdev); 10699 10700 adapter->netdev = netdev; 10701 adapter->pdev = pdev; 10702 hw = &adapter->hw; 10703 hw->back = adapter; 10704 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 10705 10706 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 10707 pci_resource_len(pdev, 0)); 10708 adapter->io_addr = hw->hw_addr; 10709 if (!hw->hw_addr) { 10710 err = -EIO; 10711 goto err_ioremap; 10712 } 10713 10714 netdev->netdev_ops = &ixgbe_netdev_ops; 10715 ixgbe_set_ethtool_ops(netdev); 10716 netdev->watchdog_timeo = 5 * HZ; 10717 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); 10718 10719 /* Setup hw api */ 10720 hw->mac.ops = *ii->mac_ops; 10721 hw->mac.type = ii->mac; 10722 hw->mvals = ii->mvals; 10723 if (ii->link_ops) 10724 hw->link.ops = *ii->link_ops; 10725 10726 /* EEPROM */ 10727 hw->eeprom.ops = *ii->eeprom_ops; 10728 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 10729 if (ixgbe_removed(hw->hw_addr)) { 10730 err = -EIO; 10731 goto err_ioremap; 10732 } 10733 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ 10734 if (!(eec & BIT(8))) 10735 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; 10736 10737 /* PHY */ 10738 hw->phy.ops = *ii->phy_ops; 10739 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 10740 /* ixgbe_identify_phy_generic will set prtad and mmds properly */ 10741 hw->phy.mdio.prtad = MDIO_PRTAD_NONE; 10742 hw->phy.mdio.mmds = 0; 10743 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 10744 hw->phy.mdio.dev = netdev; 10745 hw->phy.mdio.mdio_read = ixgbe_mdio_read; 10746 hw->phy.mdio.mdio_write = ixgbe_mdio_write; 10747 10748 /* setup the private structure */ 10749 err = ixgbe_sw_init(adapter, ii); 10750 if (err) 10751 goto err_sw_init; 10752 10753 switch (adapter->hw.mac.type) { 10754 case ixgbe_mac_X550: 10755 case ixgbe_mac_X550EM_x: 10756 netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550; 10757 break; 10758 case ixgbe_mac_x550em_a: 10759 netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550em_a; 10760 break; 10761 default: 10762 break; 10763 } 10764 10765 /* Make sure the SWFW semaphore is in a valid state */ 10766 if (hw->mac.ops.init_swfw_sync) 10767 hw->mac.ops.init_swfw_sync(hw); 10768 10769 /* Make it possible the adapter to be woken up via WOL */ 10770 switch (adapter->hw.mac.type) { 10771 case ixgbe_mac_82599EB: 10772 case ixgbe_mac_X540: 10773 case ixgbe_mac_X550: 10774 case ixgbe_mac_X550EM_x: 10775 case ixgbe_mac_x550em_a: 10776 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 10777 break; 10778 default: 10779 break; 10780 } 10781 10782 /* 10783 * If there is a fan on this device and it has failed log the 10784 * failure. 10785 */ 10786 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 10787 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 10788 if (esdp & IXGBE_ESDP_SDP1) 10789 e_crit(probe, "Fan has stopped, replace the adapter\n"); 10790 } 10791 10792 if (allow_unsupported_sfp) 10793 hw->allow_unsupported_sfp = allow_unsupported_sfp; 10794 10795 /* reset_hw fills in the perm_addr as well */ 10796 hw->phy.reset_if_overtemp = true; 10797 err = hw->mac.ops.reset_hw(hw); 10798 hw->phy.reset_if_overtemp = false; 10799 ixgbe_set_eee_capable(adapter); 10800 if (err == IXGBE_ERR_SFP_NOT_PRESENT) { 10801 err = 0; 10802 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 10803 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n"); 10804 e_dev_err("Reload the driver after installing a supported module.\n"); 10805 goto err_sw_init; 10806 } else if (err) { 10807 e_dev_err("HW Init failed: %d\n", err); 10808 goto err_sw_init; 10809 } 10810 10811 #ifdef CONFIG_PCI_IOV 10812 /* SR-IOV not supported on the 82598 */ 10813 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 10814 goto skip_sriov; 10815 /* Mailbox */ 10816 ixgbe_init_mbx_params_pf(hw); 10817 hw->mbx.ops = ii->mbx_ops; 10818 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT); 10819 ixgbe_enable_sriov(adapter, max_vfs); 10820 skip_sriov: 10821 10822 #endif 10823 netdev->features = NETIF_F_SG | 10824 NETIF_F_TSO | 10825 NETIF_F_TSO6 | 10826 NETIF_F_RXHASH | 10827 NETIF_F_RXCSUM | 10828 NETIF_F_HW_CSUM; 10829 10830 #define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ 10831 NETIF_F_GSO_GRE_CSUM | \ 10832 NETIF_F_GSO_IPXIP4 | \ 10833 NETIF_F_GSO_IPXIP6 | \ 10834 NETIF_F_GSO_UDP_TUNNEL | \ 10835 NETIF_F_GSO_UDP_TUNNEL_CSUM) 10836 10837 netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES; 10838 netdev->features |= NETIF_F_GSO_PARTIAL | 10839 IXGBE_GSO_PARTIAL_FEATURES; 10840 10841 if (hw->mac.type >= ixgbe_mac_82599EB) 10842 netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4; 10843 10844 #ifdef CONFIG_IXGBE_IPSEC 10845 #define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \ 10846 NETIF_F_HW_ESP_TX_CSUM | \ 10847 NETIF_F_GSO_ESP) 10848 10849 if (adapter->ipsec) 10850 netdev->features |= IXGBE_ESP_FEATURES; 10851 #endif 10852 /* copy netdev features into list of user selectable features */ 10853 netdev->hw_features |= netdev->features | 10854 NETIF_F_HW_VLAN_CTAG_FILTER | 10855 NETIF_F_HW_VLAN_CTAG_RX | 10856 NETIF_F_HW_VLAN_CTAG_TX | 10857 NETIF_F_RXALL | 10858 NETIF_F_HW_L2FW_DOFFLOAD; 10859 10860 if (hw->mac.type >= ixgbe_mac_82599EB) 10861 netdev->hw_features |= NETIF_F_NTUPLE | 10862 NETIF_F_HW_TC; 10863 10864 if (pci_using_dac) 10865 netdev->features |= NETIF_F_HIGHDMA; 10866 10867 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; 10868 netdev->hw_enc_features |= netdev->vlan_features; 10869 netdev->mpls_features |= NETIF_F_SG | 10870 NETIF_F_TSO | 10871 NETIF_F_TSO6 | 10872 NETIF_F_HW_CSUM; 10873 netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES; 10874 10875 /* set this bit last since it cannot be part of vlan_features */ 10876 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | 10877 NETIF_F_HW_VLAN_CTAG_RX | 10878 NETIF_F_HW_VLAN_CTAG_TX; 10879 10880 netdev->priv_flags |= IFF_UNICAST_FLT; 10881 netdev->priv_flags |= IFF_SUPP_NOFCS; 10882 10883 /* MTU range: 68 - 9710 */ 10884 netdev->min_mtu = ETH_MIN_MTU; 10885 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); 10886 10887 #ifdef CONFIG_IXGBE_DCB 10888 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) 10889 netdev->dcbnl_ops = &ixgbe_dcbnl_ops; 10890 #endif 10891 10892 #ifdef IXGBE_FCOE 10893 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { 10894 unsigned int fcoe_l; 10895 10896 if (hw->mac.ops.get_device_caps) { 10897 hw->mac.ops.get_device_caps(hw, &device_caps); 10898 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) 10899 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; 10900 } 10901 10902 10903 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus()); 10904 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l; 10905 10906 netdev->features |= NETIF_F_FSO | 10907 NETIF_F_FCOE_CRC; 10908 10909 netdev->vlan_features |= NETIF_F_FSO | 10910 NETIF_F_FCOE_CRC | 10911 NETIF_F_FCOE_MTU; 10912 } 10913 #endif /* IXGBE_FCOE */ 10914 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) 10915 netdev->hw_features |= NETIF_F_LRO; 10916 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 10917 netdev->features |= NETIF_F_LRO; 10918 10919 if (ixgbe_check_fw_error(adapter)) { 10920 err = -EIO; 10921 goto err_sw_init; 10922 } 10923 10924 /* make sure the EEPROM is good */ 10925 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { 10926 e_dev_err("The EEPROM Checksum Is Not Valid\n"); 10927 err = -EIO; 10928 goto err_sw_init; 10929 } 10930 10931 eth_platform_get_mac_address(&adapter->pdev->dev, 10932 adapter->hw.mac.perm_addr); 10933 10934 eth_hw_addr_set(netdev, hw->mac.perm_addr); 10935 10936 if (!is_valid_ether_addr(netdev->dev_addr)) { 10937 e_dev_err("invalid MAC address\n"); 10938 err = -EIO; 10939 goto err_sw_init; 10940 } 10941 10942 /* Set hw->mac.addr to permanent MAC address */ 10943 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); 10944 ixgbe_mac_set_default_filter(adapter); 10945 10946 timer_setup(&adapter->service_timer, ixgbe_service_timer, 0); 10947 10948 if (ixgbe_removed(hw->hw_addr)) { 10949 err = -EIO; 10950 goto err_sw_init; 10951 } 10952 INIT_WORK(&adapter->service_task, ixgbe_service_task); 10953 set_bit(__IXGBE_SERVICE_INITED, &adapter->state); 10954 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); 10955 10956 err = ixgbe_init_interrupt_scheme(adapter); 10957 if (err) 10958 goto err_sw_init; 10959 10960 for (i = 0; i < adapter->num_rx_queues; i++) 10961 u64_stats_init(&adapter->rx_ring[i]->syncp); 10962 for (i = 0; i < adapter->num_tx_queues; i++) 10963 u64_stats_init(&adapter->tx_ring[i]->syncp); 10964 for (i = 0; i < adapter->num_xdp_queues; i++) 10965 u64_stats_init(&adapter->xdp_ring[i]->syncp); 10966 10967 /* WOL not supported for all devices */ 10968 adapter->wol = 0; 10969 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); 10970 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device, 10971 pdev->subsystem_device); 10972 if (hw->wol_enabled) 10973 adapter->wol = IXGBE_WUFC_MAG; 10974 10975 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 10976 10977 /* save off EEPROM version number */ 10978 ixgbe_set_fw_version(adapter); 10979 10980 /* pick up the PCI bus settings for reporting later */ 10981 if (ixgbe_pcie_from_parent(hw)) 10982 ixgbe_get_parent_bus_info(adapter); 10983 else 10984 hw->mac.ops.get_bus_info(hw); 10985 10986 /* calculate the expected PCIe bandwidth required for optimal 10987 * performance. Note that some older parts will never have enough 10988 * bandwidth due to being older generation PCIe parts. We clamp these 10989 * parts to ensure no warning is displayed if it can't be fixed. 10990 */ 10991 switch (hw->mac.type) { 10992 case ixgbe_mac_82598EB: 10993 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16); 10994 break; 10995 default: 10996 expected_gts = ixgbe_enumerate_functions(adapter) * 10; 10997 break; 10998 } 10999 11000 /* don't check link if we failed to enumerate functions */ 11001 if (expected_gts > 0) 11002 ixgbe_check_minimum_link(adapter, expected_gts); 11003 11004 err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str)); 11005 if (err) 11006 strlcpy(part_str, "Unknown", sizeof(part_str)); 11007 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 11008 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", 11009 hw->mac.type, hw->phy.type, hw->phy.sfp_type, 11010 part_str); 11011 else 11012 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", 11013 hw->mac.type, hw->phy.type, part_str); 11014 11015 e_dev_info("%pM\n", netdev->dev_addr); 11016 11017 /* reset the hardware with the new settings */ 11018 err = hw->mac.ops.start_hw(hw); 11019 if (err == IXGBE_ERR_EEPROM_VERSION) { 11020 /* We are running on a pre-production device, log a warning */ 11021 e_dev_warn("This device is a pre-production adapter/LOM. " 11022 "Please be aware there may be issues associated " 11023 "with your hardware. If you are experiencing " 11024 "problems please contact your Intel or hardware " 11025 "representative who provided you with this " 11026 "hardware.\n"); 11027 } 11028 strcpy(netdev->name, "eth%d"); 11029 pci_set_drvdata(pdev, adapter); 11030 err = register_netdev(netdev); 11031 if (err) 11032 goto err_register; 11033 11034 11035 /* power down the optics for 82599 SFP+ fiber */ 11036 if (hw->mac.ops.disable_tx_laser) 11037 hw->mac.ops.disable_tx_laser(hw); 11038 11039 /* carrier off reporting is important to ethtool even BEFORE open */ 11040 netif_carrier_off(netdev); 11041 11042 #ifdef CONFIG_IXGBE_DCA 11043 if (dca_add_requester(&pdev->dev) == 0) { 11044 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 11045 ixgbe_setup_dca(adapter); 11046 } 11047 #endif 11048 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 11049 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); 11050 for (i = 0; i < adapter->num_vfs; i++) 11051 ixgbe_vf_configuration(pdev, (i | 0x10000000)); 11052 } 11053 11054 /* firmware requires driver version to be 0xFFFFFFFF 11055 * since os does not support feature 11056 */ 11057 if (hw->mac.ops.set_fw_drv_ver) 11058 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, 11059 sizeof(UTS_RELEASE) - 1, 11060 UTS_RELEASE); 11061 11062 /* add san mac addr to netdev */ 11063 ixgbe_add_sanmac_netdev(netdev); 11064 11065 e_dev_info("%s\n", ixgbe_default_device_descr); 11066 11067 #ifdef CONFIG_IXGBE_HWMON 11068 if (ixgbe_sysfs_init(adapter)) 11069 e_err(probe, "failed to allocate sysfs resources\n"); 11070 #endif /* CONFIG_IXGBE_HWMON */ 11071 11072 ixgbe_dbg_adapter_init(adapter); 11073 11074 /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */ 11075 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link) 11076 hw->mac.ops.setup_link(hw, 11077 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, 11078 true); 11079 11080 err = ixgbe_mii_bus_init(hw); 11081 if (err) 11082 goto err_netdev; 11083 11084 return 0; 11085 11086 err_netdev: 11087 unregister_netdev(netdev); 11088 err_register: 11089 ixgbe_release_hw_control(adapter); 11090 ixgbe_clear_interrupt_scheme(adapter); 11091 err_sw_init: 11092 ixgbe_disable_sriov(adapter); 11093 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; 11094 iounmap(adapter->io_addr); 11095 kfree(adapter->jump_tables[0]); 11096 kfree(adapter->mac_table); 11097 kfree(adapter->rss_key); 11098 bitmap_free(adapter->af_xdp_zc_qps); 11099 err_ioremap: 11100 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); 11101 free_netdev(netdev); 11102 err_alloc_etherdev: 11103 pci_disable_pcie_error_reporting(pdev); 11104 pci_release_mem_regions(pdev); 11105 err_pci_reg: 11106 err_dma: 11107 if (!adapter || disable_dev) 11108 pci_disable_device(pdev); 11109 return err; 11110 } 11111 11112 /** 11113 * ixgbe_remove - Device Removal Routine 11114 * @pdev: PCI device information struct 11115 * 11116 * ixgbe_remove is called by the PCI subsystem to alert the driver 11117 * that it should release a PCI device. The could be caused by a 11118 * Hot-Plug event, or because the driver is going to be removed from 11119 * memory. 11120 **/ 11121 static void ixgbe_remove(struct pci_dev *pdev) 11122 { 11123 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 11124 struct net_device *netdev; 11125 bool disable_dev; 11126 int i; 11127 11128 /* if !adapter then we already cleaned up in probe */ 11129 if (!adapter) 11130 return; 11131 11132 netdev = adapter->netdev; 11133 ixgbe_dbg_adapter_exit(adapter); 11134 11135 set_bit(__IXGBE_REMOVING, &adapter->state); 11136 cancel_work_sync(&adapter->service_task); 11137 11138 if (adapter->mii_bus) 11139 mdiobus_unregister(adapter->mii_bus); 11140 11141 #ifdef CONFIG_IXGBE_DCA 11142 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 11143 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 11144 dca_remove_requester(&pdev->dev); 11145 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 11146 IXGBE_DCA_CTRL_DCA_DISABLE); 11147 } 11148 11149 #endif 11150 #ifdef CONFIG_IXGBE_HWMON 11151 ixgbe_sysfs_exit(adapter); 11152 #endif /* CONFIG_IXGBE_HWMON */ 11153 11154 /* remove the added san mac */ 11155 ixgbe_del_sanmac_netdev(netdev); 11156 11157 #ifdef CONFIG_PCI_IOV 11158 ixgbe_disable_sriov(adapter); 11159 #endif 11160 if (netdev->reg_state == NETREG_REGISTERED) 11161 unregister_netdev(netdev); 11162 11163 ixgbe_stop_ipsec_offload(adapter); 11164 ixgbe_clear_interrupt_scheme(adapter); 11165 11166 ixgbe_release_hw_control(adapter); 11167 11168 #ifdef CONFIG_DCB 11169 kfree(adapter->ixgbe_ieee_pfc); 11170 kfree(adapter->ixgbe_ieee_ets); 11171 11172 #endif 11173 iounmap(adapter->io_addr); 11174 pci_release_mem_regions(pdev); 11175 11176 e_dev_info("complete\n"); 11177 11178 for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) { 11179 if (adapter->jump_tables[i]) { 11180 kfree(adapter->jump_tables[i]->input); 11181 kfree(adapter->jump_tables[i]->mask); 11182 } 11183 kfree(adapter->jump_tables[i]); 11184 } 11185 11186 kfree(adapter->mac_table); 11187 kfree(adapter->rss_key); 11188 bitmap_free(adapter->af_xdp_zc_qps); 11189 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); 11190 free_netdev(netdev); 11191 11192 pci_disable_pcie_error_reporting(pdev); 11193 11194 if (disable_dev) 11195 pci_disable_device(pdev); 11196 } 11197 11198 /** 11199 * ixgbe_io_error_detected - called when PCI error is detected 11200 * @pdev: Pointer to PCI device 11201 * @state: The current pci connection state 11202 * 11203 * This function is called after a PCI bus error affecting 11204 * this device has been detected. 11205 */ 11206 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, 11207 pci_channel_state_t state) 11208 { 11209 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 11210 struct net_device *netdev = adapter->netdev; 11211 11212 #ifdef CONFIG_PCI_IOV 11213 struct ixgbe_hw *hw = &adapter->hw; 11214 struct pci_dev *bdev, *vfdev; 11215 u32 dw0, dw1, dw2, dw3; 11216 int vf, pos; 11217 u16 req_id, pf_func; 11218 11219 if (adapter->hw.mac.type == ixgbe_mac_82598EB || 11220 adapter->num_vfs == 0) 11221 goto skip_bad_vf_detection; 11222 11223 bdev = pdev->bus->self; 11224 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) 11225 bdev = bdev->bus->self; 11226 11227 if (!bdev) 11228 goto skip_bad_vf_detection; 11229 11230 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); 11231 if (!pos) 11232 goto skip_bad_vf_detection; 11233 11234 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG); 11235 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4); 11236 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8); 11237 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12); 11238 if (ixgbe_removed(hw->hw_addr)) 11239 goto skip_bad_vf_detection; 11240 11241 req_id = dw1 >> 16; 11242 /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */ 11243 if (!(req_id & 0x0080)) 11244 goto skip_bad_vf_detection; 11245 11246 pf_func = req_id & 0x01; 11247 if ((pf_func & 1) == (pdev->devfn & 1)) { 11248 unsigned int device_id; 11249 11250 vf = (req_id & 0x7F) >> 1; 11251 e_dev_err("VF %d has caused a PCIe error\n", vf); 11252 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: " 11253 "%8.8x\tdw3: %8.8x\n", 11254 dw0, dw1, dw2, dw3); 11255 switch (adapter->hw.mac.type) { 11256 case ixgbe_mac_82599EB: 11257 device_id = IXGBE_82599_VF_DEVICE_ID; 11258 break; 11259 case ixgbe_mac_X540: 11260 device_id = IXGBE_X540_VF_DEVICE_ID; 11261 break; 11262 case ixgbe_mac_X550: 11263 device_id = IXGBE_DEV_ID_X550_VF; 11264 break; 11265 case ixgbe_mac_X550EM_x: 11266 device_id = IXGBE_DEV_ID_X550EM_X_VF; 11267 break; 11268 case ixgbe_mac_x550em_a: 11269 device_id = IXGBE_DEV_ID_X550EM_A_VF; 11270 break; 11271 default: 11272 device_id = 0; 11273 break; 11274 } 11275 11276 /* Find the pci device of the offending VF */ 11277 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL); 11278 while (vfdev) { 11279 if (vfdev->devfn == (req_id & 0xFF)) 11280 break; 11281 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, 11282 device_id, vfdev); 11283 } 11284 /* 11285 * There's a slim chance the VF could have been hot plugged, 11286 * so if it is no longer present we don't need to issue the 11287 * VFLR. Just clean up the AER in that case. 11288 */ 11289 if (vfdev) { 11290 pcie_flr(vfdev); 11291 /* Free device reference count */ 11292 pci_dev_put(vfdev); 11293 } 11294 } 11295 11296 /* 11297 * Even though the error may have occurred on the other port 11298 * we still need to increment the vf error reference count for 11299 * both ports because the I/O resume function will be called 11300 * for both of them. 11301 */ 11302 adapter->vferr_refcount++; 11303 11304 return PCI_ERS_RESULT_RECOVERED; 11305 11306 skip_bad_vf_detection: 11307 #endif /* CONFIG_PCI_IOV */ 11308 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) 11309 return PCI_ERS_RESULT_DISCONNECT; 11310 11311 if (!netif_device_present(netdev)) 11312 return PCI_ERS_RESULT_DISCONNECT; 11313 11314 rtnl_lock(); 11315 netif_device_detach(netdev); 11316 11317 if (netif_running(netdev)) 11318 ixgbe_close_suspend(adapter); 11319 11320 if (state == pci_channel_io_perm_failure) { 11321 rtnl_unlock(); 11322 return PCI_ERS_RESULT_DISCONNECT; 11323 } 11324 11325 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) 11326 pci_disable_device(pdev); 11327 rtnl_unlock(); 11328 11329 /* Request a slot reset. */ 11330 return PCI_ERS_RESULT_NEED_RESET; 11331 } 11332 11333 /** 11334 * ixgbe_io_slot_reset - called after the pci bus has been reset. 11335 * @pdev: Pointer to PCI device 11336 * 11337 * Restart the card from scratch, as if from a cold-boot. 11338 */ 11339 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) 11340 { 11341 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 11342 pci_ers_result_t result; 11343 11344 if (pci_enable_device_mem(pdev)) { 11345 e_err(probe, "Cannot re-enable PCI device after reset.\n"); 11346 result = PCI_ERS_RESULT_DISCONNECT; 11347 } else { 11348 smp_mb__before_atomic(); 11349 clear_bit(__IXGBE_DISABLED, &adapter->state); 11350 adapter->hw.hw_addr = adapter->io_addr; 11351 pci_set_master(pdev); 11352 pci_restore_state(pdev); 11353 pci_save_state(pdev); 11354 11355 pci_wake_from_d3(pdev, false); 11356 11357 ixgbe_reset(adapter); 11358 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 11359 result = PCI_ERS_RESULT_RECOVERED; 11360 } 11361 11362 return result; 11363 } 11364 11365 /** 11366 * ixgbe_io_resume - called when traffic can start flowing again. 11367 * @pdev: Pointer to PCI device 11368 * 11369 * This callback is called when the error recovery driver tells us that 11370 * its OK to resume normal operation. 11371 */ 11372 static void ixgbe_io_resume(struct pci_dev *pdev) 11373 { 11374 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 11375 struct net_device *netdev = adapter->netdev; 11376 11377 #ifdef CONFIG_PCI_IOV 11378 if (adapter->vferr_refcount) { 11379 e_info(drv, "Resuming after VF err\n"); 11380 adapter->vferr_refcount--; 11381 return; 11382 } 11383 11384 #endif 11385 rtnl_lock(); 11386 if (netif_running(netdev)) 11387 ixgbe_open(netdev); 11388 11389 netif_device_attach(netdev); 11390 rtnl_unlock(); 11391 } 11392 11393 static const struct pci_error_handlers ixgbe_err_handler = { 11394 .error_detected = ixgbe_io_error_detected, 11395 .slot_reset = ixgbe_io_slot_reset, 11396 .resume = ixgbe_io_resume, 11397 }; 11398 11399 static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume); 11400 11401 static struct pci_driver ixgbe_driver = { 11402 .name = ixgbe_driver_name, 11403 .id_table = ixgbe_pci_tbl, 11404 .probe = ixgbe_probe, 11405 .remove = ixgbe_remove, 11406 .driver.pm = &ixgbe_pm_ops, 11407 .shutdown = ixgbe_shutdown, 11408 .sriov_configure = ixgbe_pci_sriov_configure, 11409 .err_handler = &ixgbe_err_handler 11410 }; 11411 11412 /** 11413 * ixgbe_init_module - Driver Registration Routine 11414 * 11415 * ixgbe_init_module is the first routine called when the driver is 11416 * loaded. All it does is register with the PCI subsystem. 11417 **/ 11418 static int __init ixgbe_init_module(void) 11419 { 11420 int ret; 11421 pr_info("%s\n", ixgbe_driver_string); 11422 pr_info("%s\n", ixgbe_copyright); 11423 11424 ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name); 11425 if (!ixgbe_wq) { 11426 pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name); 11427 return -ENOMEM; 11428 } 11429 11430 ixgbe_dbg_init(); 11431 11432 ret = pci_register_driver(&ixgbe_driver); 11433 if (ret) { 11434 destroy_workqueue(ixgbe_wq); 11435 ixgbe_dbg_exit(); 11436 return ret; 11437 } 11438 11439 #ifdef CONFIG_IXGBE_DCA 11440 dca_register_notify(&dca_notifier); 11441 #endif 11442 11443 return 0; 11444 } 11445 11446 module_init(ixgbe_init_module); 11447 11448 /** 11449 * ixgbe_exit_module - Driver Exit Cleanup Routine 11450 * 11451 * ixgbe_exit_module is called just before the driver is removed 11452 * from memory. 11453 **/ 11454 static void __exit ixgbe_exit_module(void) 11455 { 11456 #ifdef CONFIG_IXGBE_DCA 11457 dca_unregister_notify(&dca_notifier); 11458 #endif 11459 pci_unregister_driver(&ixgbe_driver); 11460 11461 ixgbe_dbg_exit(); 11462 if (ixgbe_wq) { 11463 destroy_workqueue(ixgbe_wq); 11464 ixgbe_wq = NULL; 11465 } 11466 } 11467 11468 #ifdef CONFIG_IXGBE_DCA 11469 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, 11470 void *p) 11471 { 11472 int ret_val; 11473 11474 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, 11475 __ixgbe_notify_dca); 11476 11477 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 11478 } 11479 11480 #endif /* CONFIG_IXGBE_DCA */ 11481 11482 module_exit(ixgbe_exit_module); 11483 11484 /* ixgbe_main.c */ 11485