1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
3 
4 #include <linux/types.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/netdevice.h>
8 #include <linux/vmalloc.h>
9 #include <linux/string.h>
10 #include <linux/in.h>
11 #include <linux/interrupt.h>
12 #include <linux/ip.h>
13 #include <linux/tcp.h>
14 #include <linux/sctp.h>
15 #include <linux/pkt_sched.h>
16 #include <linux/ipv6.h>
17 #include <linux/slab.h>
18 #include <net/checksum.h>
19 #include <net/ip6_checksum.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/if.h>
23 #include <linux/if_vlan.h>
24 #include <linux/if_macvlan.h>
25 #include <linux/if_bridge.h>
26 #include <linux/prefetch.h>
27 #include <linux/bpf.h>
28 #include <linux/bpf_trace.h>
29 #include <linux/atomic.h>
30 #include <linux/numa.h>
31 #include <generated/utsrelease.h>
32 #include <scsi/fc/fc_fcoe.h>
33 #include <net/udp_tunnel.h>
34 #include <net/pkt_cls.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_mirred.h>
37 #include <net/vxlan.h>
38 #include <net/mpls.h>
39 #include <net/xdp_sock_drv.h>
40 #include <net/xfrm.h>
41 
42 #include "ixgbe.h"
43 #include "ixgbe_common.h"
44 #include "ixgbe_dcb_82599.h"
45 #include "ixgbe_phy.h"
46 #include "ixgbe_sriov.h"
47 #include "ixgbe_model.h"
48 #include "ixgbe_txrx_common.h"
49 
50 char ixgbe_driver_name[] = "ixgbe";
51 static const char ixgbe_driver_string[] =
52 			      "Intel(R) 10 Gigabit PCI Express Network Driver";
53 #ifdef IXGBE_FCOE
54 char ixgbe_default_device_descr[] =
55 			      "Intel(R) 10 Gigabit Network Connection";
56 #else
57 static char ixgbe_default_device_descr[] =
58 			      "Intel(R) 10 Gigabit Network Connection";
59 #endif
60 static const char ixgbe_copyright[] =
61 				"Copyright (c) 1999-2016 Intel Corporation.";
62 
63 static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
64 
65 static const struct ixgbe_info *ixgbe_info_tbl[] = {
66 	[board_82598]		= &ixgbe_82598_info,
67 	[board_82599]		= &ixgbe_82599_info,
68 	[board_X540]		= &ixgbe_X540_info,
69 	[board_X550]		= &ixgbe_X550_info,
70 	[board_X550EM_x]	= &ixgbe_X550EM_x_info,
71 	[board_x550em_x_fw]	= &ixgbe_x550em_x_fw_info,
72 	[board_x550em_a]	= &ixgbe_x550em_a_info,
73 	[board_x550em_a_fw]	= &ixgbe_x550em_a_fw_info,
74 };
75 
76 /* ixgbe_pci_tbl - PCI Device ID Table
77  *
78  * Wildcard entries (PCI_ANY_ID) should come last
79  * Last entry must be all 0s
80  *
81  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
82  *   Class, Class Mask, private data (not used) }
83  */
84 static const struct pci_device_id ixgbe_pci_tbl[] = {
85 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
86 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
87 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
88 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
89 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
90 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
91 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
92 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
93 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
94 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
95 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
96 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
97 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
98 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
99 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
100 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
101 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
102 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
103 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
104 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
105 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
106 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
107 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
108 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
109 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
110 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
111 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
112 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
113 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
114 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
115 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
116 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
117 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
118 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x},
119 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
120 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
121 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
122 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw},
123 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
124 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
125 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
126 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
127 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
128 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
129 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
130 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
131 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
132 	/* required last entry */
133 	{0, }
134 };
135 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
136 
137 #ifdef CONFIG_IXGBE_DCA
138 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
139 			    void *p);
140 static struct notifier_block dca_notifier = {
141 	.notifier_call = ixgbe_notify_dca,
142 	.next          = NULL,
143 	.priority      = 0
144 };
145 #endif
146 
147 #ifdef CONFIG_PCI_IOV
148 static unsigned int max_vfs;
149 module_param(max_vfs, uint, 0);
150 MODULE_PARM_DESC(max_vfs,
151 		 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
152 #endif /* CONFIG_PCI_IOV */
153 
154 static unsigned int allow_unsupported_sfp;
155 module_param(allow_unsupported_sfp, uint, 0);
156 MODULE_PARM_DESC(allow_unsupported_sfp,
157 		 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
158 
159 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
160 static int debug = -1;
161 module_param(debug, int, 0);
162 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
163 
164 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
165 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
166 MODULE_LICENSE("GPL v2");
167 
168 static struct workqueue_struct *ixgbe_wq;
169 
170 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
171 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
172 
173 static const struct net_device_ops ixgbe_netdev_ops;
174 
175 static bool netif_is_ixgbe(struct net_device *dev)
176 {
177 	return dev && (dev->netdev_ops == &ixgbe_netdev_ops);
178 }
179 
180 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
181 					  u32 reg, u16 *value)
182 {
183 	struct pci_dev *parent_dev;
184 	struct pci_bus *parent_bus;
185 
186 	parent_bus = adapter->pdev->bus->parent;
187 	if (!parent_bus)
188 		return -1;
189 
190 	parent_dev = parent_bus->self;
191 	if (!parent_dev)
192 		return -1;
193 
194 	if (!pci_is_pcie(parent_dev))
195 		return -1;
196 
197 	pcie_capability_read_word(parent_dev, reg, value);
198 	if (*value == IXGBE_FAILED_READ_CFG_WORD &&
199 	    ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
200 		return -1;
201 	return 0;
202 }
203 
204 static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
205 {
206 	struct ixgbe_hw *hw = &adapter->hw;
207 	u16 link_status = 0;
208 	int err;
209 
210 	hw->bus.type = ixgbe_bus_type_pci_express;
211 
212 	/* Get the negotiated link width and speed from PCI config space of the
213 	 * parent, as this device is behind a switch
214 	 */
215 	err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
216 
217 	/* assume caller will handle error case */
218 	if (err)
219 		return err;
220 
221 	hw->bus.width = ixgbe_convert_bus_width(link_status);
222 	hw->bus.speed = ixgbe_convert_bus_speed(link_status);
223 
224 	return 0;
225 }
226 
227 /**
228  * ixgbe_pcie_from_parent - Determine whether PCIe info should come from parent
229  * @hw: hw specific details
230  *
231  * This function is used by probe to determine whether a device's PCI-Express
232  * bandwidth details should be gathered from the parent bus instead of from the
233  * device. Used to ensure that various locations all have the correct device ID
234  * checks.
235  */
236 static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
237 {
238 	switch (hw->device_id) {
239 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
240 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
241 		return true;
242 	default:
243 		return false;
244 	}
245 }
246 
247 static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
248 				     int expected_gts)
249 {
250 	struct ixgbe_hw *hw = &adapter->hw;
251 	struct pci_dev *pdev;
252 
253 	/* Some devices are not connected over PCIe and thus do not negotiate
254 	 * speed. These devices do not have valid bus info, and thus any report
255 	 * we generate may not be correct.
256 	 */
257 	if (hw->bus.type == ixgbe_bus_type_internal)
258 		return;
259 
260 	/* determine whether to use the parent device */
261 	if (ixgbe_pcie_from_parent(&adapter->hw))
262 		pdev = adapter->pdev->bus->parent->self;
263 	else
264 		pdev = adapter->pdev;
265 
266 	pcie_print_link_status(pdev);
267 }
268 
269 static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
270 {
271 	if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
272 	    !test_bit(__IXGBE_REMOVING, &adapter->state) &&
273 	    !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
274 		queue_work(ixgbe_wq, &adapter->service_task);
275 }
276 
277 static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
278 {
279 	struct ixgbe_adapter *adapter = hw->back;
280 
281 	if (!hw->hw_addr)
282 		return;
283 	hw->hw_addr = NULL;
284 	e_dev_err("Adapter removed\n");
285 	if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
286 		ixgbe_service_event_schedule(adapter);
287 }
288 
289 static u32 ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
290 {
291 	u8 __iomem *reg_addr;
292 	u32 value;
293 	int i;
294 
295 	reg_addr = READ_ONCE(hw->hw_addr);
296 	if (ixgbe_removed(reg_addr))
297 		return IXGBE_FAILED_READ_REG;
298 
299 	/* Register read of 0xFFFFFFF can indicate the adapter has been removed,
300 	 * so perform several status register reads to determine if the adapter
301 	 * has been removed.
302 	 */
303 	for (i = 0; i < IXGBE_FAILED_READ_RETRIES; i++) {
304 		value = readl(reg_addr + IXGBE_STATUS);
305 		if (value != IXGBE_FAILED_READ_REG)
306 			break;
307 		mdelay(3);
308 	}
309 
310 	if (value == IXGBE_FAILED_READ_REG)
311 		ixgbe_remove_adapter(hw);
312 	else
313 		value = readl(reg_addr + reg);
314 	return value;
315 }
316 
317 /**
318  * ixgbe_read_reg - Read from device register
319  * @hw: hw specific details
320  * @reg: offset of register to read
321  *
322  * Returns : value read or IXGBE_FAILED_READ_REG if removed
323  *
324  * This function is used to read device registers. It checks for device
325  * removal by confirming any read that returns all ones by checking the
326  * status register value for all ones. This function avoids reading from
327  * the hardware if a removal was previously detected in which case it
328  * returns IXGBE_FAILED_READ_REG (all ones).
329  */
330 u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
331 {
332 	u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
333 	u32 value;
334 
335 	if (ixgbe_removed(reg_addr))
336 		return IXGBE_FAILED_READ_REG;
337 	if (unlikely(hw->phy.nw_mng_if_sel &
338 		     IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) {
339 		struct ixgbe_adapter *adapter;
340 		int i;
341 
342 		for (i = 0; i < 200; ++i) {
343 			value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
344 			if (likely(!value))
345 				goto writes_completed;
346 			if (value == IXGBE_FAILED_READ_REG) {
347 				ixgbe_remove_adapter(hw);
348 				return IXGBE_FAILED_READ_REG;
349 			}
350 			udelay(5);
351 		}
352 
353 		adapter = hw->back;
354 		e_warn(hw, "register writes incomplete %08x\n", value);
355 	}
356 
357 writes_completed:
358 	value = readl(reg_addr + reg);
359 	if (unlikely(value == IXGBE_FAILED_READ_REG))
360 		value = ixgbe_check_remove(hw, reg);
361 	return value;
362 }
363 
364 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
365 {
366 	u16 value;
367 
368 	pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
369 	if (value == IXGBE_FAILED_READ_CFG_WORD) {
370 		ixgbe_remove_adapter(hw);
371 		return true;
372 	}
373 	return false;
374 }
375 
376 u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
377 {
378 	struct ixgbe_adapter *adapter = hw->back;
379 	u16 value;
380 
381 	if (ixgbe_removed(hw->hw_addr))
382 		return IXGBE_FAILED_READ_CFG_WORD;
383 	pci_read_config_word(adapter->pdev, reg, &value);
384 	if (value == IXGBE_FAILED_READ_CFG_WORD &&
385 	    ixgbe_check_cfg_remove(hw, adapter->pdev))
386 		return IXGBE_FAILED_READ_CFG_WORD;
387 	return value;
388 }
389 
390 #ifdef CONFIG_PCI_IOV
391 static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
392 {
393 	struct ixgbe_adapter *adapter = hw->back;
394 	u32 value;
395 
396 	if (ixgbe_removed(hw->hw_addr))
397 		return IXGBE_FAILED_READ_CFG_DWORD;
398 	pci_read_config_dword(adapter->pdev, reg, &value);
399 	if (value == IXGBE_FAILED_READ_CFG_DWORD &&
400 	    ixgbe_check_cfg_remove(hw, adapter->pdev))
401 		return IXGBE_FAILED_READ_CFG_DWORD;
402 	return value;
403 }
404 #endif /* CONFIG_PCI_IOV */
405 
406 void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
407 {
408 	struct ixgbe_adapter *adapter = hw->back;
409 
410 	if (ixgbe_removed(hw->hw_addr))
411 		return;
412 	pci_write_config_word(adapter->pdev, reg, value);
413 }
414 
415 static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
416 {
417 	BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
418 
419 	/* flush memory to make sure state is correct before next watchdog */
420 	smp_mb__before_atomic();
421 	clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
422 }
423 
424 struct ixgbe_reg_info {
425 	u32 ofs;
426 	char *name;
427 };
428 
429 static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
430 
431 	/* General Registers */
432 	{IXGBE_CTRL, "CTRL"},
433 	{IXGBE_STATUS, "STATUS"},
434 	{IXGBE_CTRL_EXT, "CTRL_EXT"},
435 
436 	/* Interrupt Registers */
437 	{IXGBE_EICR, "EICR"},
438 
439 	/* RX Registers */
440 	{IXGBE_SRRCTL(0), "SRRCTL"},
441 	{IXGBE_DCA_RXCTRL(0), "DRXCTL"},
442 	{IXGBE_RDLEN(0), "RDLEN"},
443 	{IXGBE_RDH(0), "RDH"},
444 	{IXGBE_RDT(0), "RDT"},
445 	{IXGBE_RXDCTL(0), "RXDCTL"},
446 	{IXGBE_RDBAL(0), "RDBAL"},
447 	{IXGBE_RDBAH(0), "RDBAH"},
448 
449 	/* TX Registers */
450 	{IXGBE_TDBAL(0), "TDBAL"},
451 	{IXGBE_TDBAH(0), "TDBAH"},
452 	{IXGBE_TDLEN(0), "TDLEN"},
453 	{IXGBE_TDH(0), "TDH"},
454 	{IXGBE_TDT(0), "TDT"},
455 	{IXGBE_TXDCTL(0), "TXDCTL"},
456 
457 	/* List Terminator */
458 	{ .name = NULL }
459 };
460 
461 
462 /*
463  * ixgbe_regdump - register printout routine
464  */
465 static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
466 {
467 	int i;
468 	char rname[16];
469 	u32 regs[64];
470 
471 	switch (reginfo->ofs) {
472 	case IXGBE_SRRCTL(0):
473 		for (i = 0; i < 64; i++)
474 			regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
475 		break;
476 	case IXGBE_DCA_RXCTRL(0):
477 		for (i = 0; i < 64; i++)
478 			regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
479 		break;
480 	case IXGBE_RDLEN(0):
481 		for (i = 0; i < 64; i++)
482 			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
483 		break;
484 	case IXGBE_RDH(0):
485 		for (i = 0; i < 64; i++)
486 			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
487 		break;
488 	case IXGBE_RDT(0):
489 		for (i = 0; i < 64; i++)
490 			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
491 		break;
492 	case IXGBE_RXDCTL(0):
493 		for (i = 0; i < 64; i++)
494 			regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
495 		break;
496 	case IXGBE_RDBAL(0):
497 		for (i = 0; i < 64; i++)
498 			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
499 		break;
500 	case IXGBE_RDBAH(0):
501 		for (i = 0; i < 64; i++)
502 			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
503 		break;
504 	case IXGBE_TDBAL(0):
505 		for (i = 0; i < 64; i++)
506 			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
507 		break;
508 	case IXGBE_TDBAH(0):
509 		for (i = 0; i < 64; i++)
510 			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
511 		break;
512 	case IXGBE_TDLEN(0):
513 		for (i = 0; i < 64; i++)
514 			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
515 		break;
516 	case IXGBE_TDH(0):
517 		for (i = 0; i < 64; i++)
518 			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
519 		break;
520 	case IXGBE_TDT(0):
521 		for (i = 0; i < 64; i++)
522 			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
523 		break;
524 	case IXGBE_TXDCTL(0):
525 		for (i = 0; i < 64; i++)
526 			regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
527 		break;
528 	default:
529 		pr_info("%-15s %08x\n",
530 			reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs));
531 		return;
532 	}
533 
534 	i = 0;
535 	while (i < 64) {
536 		int j;
537 		char buf[9 * 8 + 1];
538 		char *p = buf;
539 
540 		snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7);
541 		for (j = 0; j < 8; j++)
542 			p += sprintf(p, " %08x", regs[i++]);
543 		pr_err("%-15s%s\n", rname, buf);
544 	}
545 
546 }
547 
548 static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n)
549 {
550 	struct ixgbe_tx_buffer *tx_buffer;
551 
552 	tx_buffer = &ring->tx_buffer_info[ring->next_to_clean];
553 	pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
554 		n, ring->next_to_use, ring->next_to_clean,
555 		(u64)dma_unmap_addr(tx_buffer, dma),
556 		dma_unmap_len(tx_buffer, len),
557 		tx_buffer->next_to_watch,
558 		(u64)tx_buffer->time_stamp);
559 }
560 
561 /*
562  * ixgbe_dump - Print registers, tx-rings and rx-rings
563  */
564 static void ixgbe_dump(struct ixgbe_adapter *adapter)
565 {
566 	struct net_device *netdev = adapter->netdev;
567 	struct ixgbe_hw *hw = &adapter->hw;
568 	struct ixgbe_reg_info *reginfo;
569 	int n = 0;
570 	struct ixgbe_ring *ring;
571 	struct ixgbe_tx_buffer *tx_buffer;
572 	union ixgbe_adv_tx_desc *tx_desc;
573 	struct my_u0 { u64 a; u64 b; } *u0;
574 	struct ixgbe_ring *rx_ring;
575 	union ixgbe_adv_rx_desc *rx_desc;
576 	struct ixgbe_rx_buffer *rx_buffer_info;
577 	int i = 0;
578 
579 	if (!netif_msg_hw(adapter))
580 		return;
581 
582 	/* Print netdevice Info */
583 	if (netdev) {
584 		dev_info(&adapter->pdev->dev, "Net device Info\n");
585 		pr_info("Device Name     state            "
586 			"trans_start\n");
587 		pr_info("%-15s %016lX %016lX\n",
588 			netdev->name,
589 			netdev->state,
590 			dev_trans_start(netdev));
591 	}
592 
593 	/* Print Registers */
594 	dev_info(&adapter->pdev->dev, "Register Dump\n");
595 	pr_info(" Register Name   Value\n");
596 	for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
597 	     reginfo->name; reginfo++) {
598 		ixgbe_regdump(hw, reginfo);
599 	}
600 
601 	/* Print TX Ring Summary */
602 	if (!netdev || !netif_running(netdev))
603 		return;
604 
605 	dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
606 	pr_info(" %s     %s              %s        %s\n",
607 		"Queue [NTU] [NTC] [bi(ntc)->dma  ]",
608 		"leng", "ntw", "timestamp");
609 	for (n = 0; n < adapter->num_tx_queues; n++) {
610 		ring = adapter->tx_ring[n];
611 		ixgbe_print_buffer(ring, n);
612 	}
613 
614 	for (n = 0; n < adapter->num_xdp_queues; n++) {
615 		ring = adapter->xdp_ring[n];
616 		ixgbe_print_buffer(ring, n);
617 	}
618 
619 	/* Print TX Rings */
620 	if (!netif_msg_tx_done(adapter))
621 		goto rx_ring_summary;
622 
623 	dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
624 
625 	/* Transmit Descriptor Formats
626 	 *
627 	 * 82598 Advanced Transmit Descriptor
628 	 *   +--------------------------------------------------------------+
629 	 * 0 |         Buffer Address [63:0]                                |
630 	 *   +--------------------------------------------------------------+
631 	 * 8 |  PAYLEN  | POPTS  | IDX | STA | DCMD  |DTYP |  RSV |  DTALEN |
632 	 *   +--------------------------------------------------------------+
633 	 *   63       46 45    40 39 36 35 32 31   24 23 20 19              0
634 	 *
635 	 * 82598 Advanced Transmit Descriptor (Write-Back Format)
636 	 *   +--------------------------------------------------------------+
637 	 * 0 |                          RSV [63:0]                          |
638 	 *   +--------------------------------------------------------------+
639 	 * 8 |            RSV           |  STA  |          NXTSEQ           |
640 	 *   +--------------------------------------------------------------+
641 	 *   63                       36 35   32 31                         0
642 	 *
643 	 * 82599+ Advanced Transmit Descriptor
644 	 *   +--------------------------------------------------------------+
645 	 * 0 |         Buffer Address [63:0]                                |
646 	 *   +--------------------------------------------------------------+
647 	 * 8 |PAYLEN  |POPTS|CC|IDX  |STA  |DCMD  |DTYP |MAC  |RSV  |DTALEN |
648 	 *   +--------------------------------------------------------------+
649 	 *   63     46 45 40 39 38 36 35 32 31  24 23 20 19 18 17 16 15     0
650 	 *
651 	 * 82599+ Advanced Transmit Descriptor (Write-Back Format)
652 	 *   +--------------------------------------------------------------+
653 	 * 0 |                          RSV [63:0]                          |
654 	 *   +--------------------------------------------------------------+
655 	 * 8 |            RSV           |  STA  |           RSV             |
656 	 *   +--------------------------------------------------------------+
657 	 *   63                       36 35   32 31                         0
658 	 */
659 
660 	for (n = 0; n < adapter->num_tx_queues; n++) {
661 		ring = adapter->tx_ring[n];
662 		pr_info("------------------------------------\n");
663 		pr_info("TX QUEUE INDEX = %d\n", ring->queue_index);
664 		pr_info("------------------------------------\n");
665 		pr_info("%s%s    %s              %s        %s          %s\n",
666 			"T [desc]     [address 63:0  ] ",
667 			"[PlPOIdStDDt Ln] [bi->dma       ] ",
668 			"leng", "ntw", "timestamp", "bi->skb");
669 
670 		for (i = 0; ring->desc && (i < ring->count); i++) {
671 			tx_desc = IXGBE_TX_DESC(ring, i);
672 			tx_buffer = &ring->tx_buffer_info[i];
673 			u0 = (struct my_u0 *)tx_desc;
674 			if (dma_unmap_len(tx_buffer, len) > 0) {
675 				const char *ring_desc;
676 
677 				if (i == ring->next_to_use &&
678 				    i == ring->next_to_clean)
679 					ring_desc = " NTC/U";
680 				else if (i == ring->next_to_use)
681 					ring_desc = " NTU";
682 				else if (i == ring->next_to_clean)
683 					ring_desc = " NTC";
684 				else
685 					ring_desc = "";
686 				pr_info("T [0x%03X]    %016llX %016llX %016llX %08X %p %016llX %p%s",
687 					i,
688 					le64_to_cpu((__force __le64)u0->a),
689 					le64_to_cpu((__force __le64)u0->b),
690 					(u64)dma_unmap_addr(tx_buffer, dma),
691 					dma_unmap_len(tx_buffer, len),
692 					tx_buffer->next_to_watch,
693 					(u64)tx_buffer->time_stamp,
694 					tx_buffer->skb,
695 					ring_desc);
696 
697 				if (netif_msg_pktdata(adapter) &&
698 				    tx_buffer->skb)
699 					print_hex_dump(KERN_INFO, "",
700 						DUMP_PREFIX_ADDRESS, 16, 1,
701 						tx_buffer->skb->data,
702 						dma_unmap_len(tx_buffer, len),
703 						true);
704 			}
705 		}
706 	}
707 
708 	/* Print RX Rings Summary */
709 rx_ring_summary:
710 	dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
711 	pr_info("Queue [NTU] [NTC]\n");
712 	for (n = 0; n < adapter->num_rx_queues; n++) {
713 		rx_ring = adapter->rx_ring[n];
714 		pr_info("%5d %5X %5X\n",
715 			n, rx_ring->next_to_use, rx_ring->next_to_clean);
716 	}
717 
718 	/* Print RX Rings */
719 	if (!netif_msg_rx_status(adapter))
720 		return;
721 
722 	dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
723 
724 	/* Receive Descriptor Formats
725 	 *
726 	 * 82598 Advanced Receive Descriptor (Read) Format
727 	 *    63                                           1        0
728 	 *    +-----------------------------------------------------+
729 	 *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
730 	 *    +----------------------------------------------+------+
731 	 *  8 |       Header Buffer Address [63:1]           |  DD  |
732 	 *    +-----------------------------------------------------+
733 	 *
734 	 *
735 	 * 82598 Advanced Receive Descriptor (Write-Back) Format
736 	 *
737 	 *   63       48 47    32 31  30      21 20 16 15   4 3     0
738 	 *   +------------------------------------------------------+
739 	 * 0 |       RSS Hash /  |SPH| HDR_LEN  | RSV |Packet|  RSS |
740 	 *   | Packet   | IP     |   |          |     | Type | Type |
741 	 *   | Checksum | Ident  |   |          |     |      |      |
742 	 *   +------------------------------------------------------+
743 	 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
744 	 *   +------------------------------------------------------+
745 	 *   63       48 47    32 31            20 19               0
746 	 *
747 	 * 82599+ Advanced Receive Descriptor (Read) Format
748 	 *    63                                           1        0
749 	 *    +-----------------------------------------------------+
750 	 *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
751 	 *    +----------------------------------------------+------+
752 	 *  8 |       Header Buffer Address [63:1]           |  DD  |
753 	 *    +-----------------------------------------------------+
754 	 *
755 	 *
756 	 * 82599+ Advanced Receive Descriptor (Write-Back) Format
757 	 *
758 	 *   63       48 47    32 31  30      21 20 17 16   4 3     0
759 	 *   +------------------------------------------------------+
760 	 * 0 |RSS / Frag Checksum|SPH| HDR_LEN  |RSC- |Packet|  RSS |
761 	 *   |/ RTT / PCoE_PARAM |   |          | CNT | Type | Type |
762 	 *   |/ Flow Dir Flt ID  |   |          |     |      |      |
763 	 *   +------------------------------------------------------+
764 	 * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP |
765 	 *   +------------------------------------------------------+
766 	 *   63       48 47    32 31          20 19                 0
767 	 */
768 
769 	for (n = 0; n < adapter->num_rx_queues; n++) {
770 		rx_ring = adapter->rx_ring[n];
771 		pr_info("------------------------------------\n");
772 		pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
773 		pr_info("------------------------------------\n");
774 		pr_info("%s%s%s\n",
775 			"R  [desc]      [ PktBuf     A0] ",
776 			"[  HeadBuf   DD] [bi->dma       ] [bi->skb       ] ",
777 			"<-- Adv Rx Read format");
778 		pr_info("%s%s%s\n",
779 			"RWB[desc]      [PcsmIpSHl PtRs] ",
780 			"[vl er S cks ln] ---------------- [bi->skb       ] ",
781 			"<-- Adv Rx Write-Back format");
782 
783 		for (i = 0; i < rx_ring->count; i++) {
784 			const char *ring_desc;
785 
786 			if (i == rx_ring->next_to_use)
787 				ring_desc = " NTU";
788 			else if (i == rx_ring->next_to_clean)
789 				ring_desc = " NTC";
790 			else
791 				ring_desc = "";
792 
793 			rx_buffer_info = &rx_ring->rx_buffer_info[i];
794 			rx_desc = IXGBE_RX_DESC(rx_ring, i);
795 			u0 = (struct my_u0 *)rx_desc;
796 			if (rx_desc->wb.upper.length) {
797 				/* Descriptor Done */
798 				pr_info("RWB[0x%03X]     %016llX %016llX ---------------- %p%s\n",
799 					i,
800 					le64_to_cpu((__force __le64)u0->a),
801 					le64_to_cpu((__force __le64)u0->b),
802 					rx_buffer_info->skb,
803 					ring_desc);
804 			} else {
805 				pr_info("R  [0x%03X]     %016llX %016llX %016llX %p%s\n",
806 					i,
807 					le64_to_cpu((__force __le64)u0->a),
808 					le64_to_cpu((__force __le64)u0->b),
809 					(u64)rx_buffer_info->dma,
810 					rx_buffer_info->skb,
811 					ring_desc);
812 
813 				if (netif_msg_pktdata(adapter) &&
814 				    rx_buffer_info->dma) {
815 					print_hex_dump(KERN_INFO, "",
816 					   DUMP_PREFIX_ADDRESS, 16, 1,
817 					   page_address(rx_buffer_info->page) +
818 						    rx_buffer_info->page_offset,
819 					   ixgbe_rx_bufsz(rx_ring), true);
820 				}
821 			}
822 		}
823 	}
824 }
825 
826 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
827 {
828 	u32 ctrl_ext;
829 
830 	/* Let firmware take over control of h/w */
831 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
832 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
833 			ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
834 }
835 
836 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
837 {
838 	u32 ctrl_ext;
839 
840 	/* Let firmware know the driver has taken over */
841 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
842 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
843 			ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
844 }
845 
846 /**
847  * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
848  * @adapter: pointer to adapter struct
849  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
850  * @queue: queue to map the corresponding interrupt to
851  * @msix_vector: the vector to map to the corresponding queue
852  *
853  */
854 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
855 			   u8 queue, u8 msix_vector)
856 {
857 	u32 ivar, index;
858 	struct ixgbe_hw *hw = &adapter->hw;
859 	switch (hw->mac.type) {
860 	case ixgbe_mac_82598EB:
861 		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
862 		if (direction == -1)
863 			direction = 0;
864 		index = (((direction * 64) + queue) >> 2) & 0x1F;
865 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
866 		ivar &= ~(0xFF << (8 * (queue & 0x3)));
867 		ivar |= (msix_vector << (8 * (queue & 0x3)));
868 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
869 		break;
870 	case ixgbe_mac_82599EB:
871 	case ixgbe_mac_X540:
872 	case ixgbe_mac_X550:
873 	case ixgbe_mac_X550EM_x:
874 	case ixgbe_mac_x550em_a:
875 		if (direction == -1) {
876 			/* other causes */
877 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
878 			index = ((queue & 1) * 8);
879 			ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
880 			ivar &= ~(0xFF << index);
881 			ivar |= (msix_vector << index);
882 			IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
883 			break;
884 		} else {
885 			/* tx or rx causes */
886 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
887 			index = ((16 * (queue & 1)) + (8 * direction));
888 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
889 			ivar &= ~(0xFF << index);
890 			ivar |= (msix_vector << index);
891 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
892 			break;
893 		}
894 	default:
895 		break;
896 	}
897 }
898 
899 void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
900 			    u64 qmask)
901 {
902 	u32 mask;
903 
904 	switch (adapter->hw.mac.type) {
905 	case ixgbe_mac_82598EB:
906 		mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
907 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
908 		break;
909 	case ixgbe_mac_82599EB:
910 	case ixgbe_mac_X540:
911 	case ixgbe_mac_X550:
912 	case ixgbe_mac_X550EM_x:
913 	case ixgbe_mac_x550em_a:
914 		mask = (qmask & 0xFFFFFFFF);
915 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
916 		mask = (qmask >> 32);
917 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
918 		break;
919 	default:
920 		break;
921 	}
922 }
923 
924 static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
925 {
926 	struct ixgbe_hw *hw = &adapter->hw;
927 	struct ixgbe_hw_stats *hwstats = &adapter->stats;
928 	int i;
929 	u32 data;
930 
931 	if ((hw->fc.current_mode != ixgbe_fc_full) &&
932 	    (hw->fc.current_mode != ixgbe_fc_rx_pause))
933 		return;
934 
935 	switch (hw->mac.type) {
936 	case ixgbe_mac_82598EB:
937 		data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
938 		break;
939 	default:
940 		data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
941 	}
942 	hwstats->lxoffrxc += data;
943 
944 	/* refill credits (no tx hang) if we received xoff */
945 	if (!data)
946 		return;
947 
948 	for (i = 0; i < adapter->num_tx_queues; i++)
949 		clear_bit(__IXGBE_HANG_CHECK_ARMED,
950 			  &adapter->tx_ring[i]->state);
951 
952 	for (i = 0; i < adapter->num_xdp_queues; i++)
953 		clear_bit(__IXGBE_HANG_CHECK_ARMED,
954 			  &adapter->xdp_ring[i]->state);
955 }
956 
957 static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
958 {
959 	struct ixgbe_hw *hw = &adapter->hw;
960 	struct ixgbe_hw_stats *hwstats = &adapter->stats;
961 	u32 xoff[8] = {0};
962 	u8 tc;
963 	int i;
964 	bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
965 
966 	if (adapter->ixgbe_ieee_pfc)
967 		pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
968 
969 	if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
970 		ixgbe_update_xoff_rx_lfc(adapter);
971 		return;
972 	}
973 
974 	/* update stats for each tc, only valid with PFC enabled */
975 	for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
976 		u32 pxoffrxc;
977 
978 		switch (hw->mac.type) {
979 		case ixgbe_mac_82598EB:
980 			pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
981 			break;
982 		default:
983 			pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
984 		}
985 		hwstats->pxoffrxc[i] += pxoffrxc;
986 		/* Get the TC for given UP */
987 		tc = netdev_get_prio_tc_map(adapter->netdev, i);
988 		xoff[tc] += pxoffrxc;
989 	}
990 
991 	/* disarm tx queues that have received xoff frames */
992 	for (i = 0; i < adapter->num_tx_queues; i++) {
993 		struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
994 
995 		tc = tx_ring->dcb_tc;
996 		if (xoff[tc])
997 			clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
998 	}
999 
1000 	for (i = 0; i < adapter->num_xdp_queues; i++) {
1001 		struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
1002 
1003 		tc = xdp_ring->dcb_tc;
1004 		if (xoff[tc])
1005 			clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state);
1006 	}
1007 }
1008 
1009 static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
1010 {
1011 	return ring->stats.packets;
1012 }
1013 
1014 static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
1015 {
1016 	unsigned int head, tail;
1017 
1018 	head = ring->next_to_clean;
1019 	tail = ring->next_to_use;
1020 
1021 	return ((head <= tail) ? tail : tail + ring->count) - head;
1022 }
1023 
1024 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1025 {
1026 	u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1027 	u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1028 	u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1029 
1030 	clear_check_for_tx_hang(tx_ring);
1031 
1032 	/*
1033 	 * Check for a hung queue, but be thorough. This verifies
1034 	 * that a transmit has been completed since the previous
1035 	 * check AND there is at least one packet pending. The
1036 	 * ARMED bit is set to indicate a potential hang. The
1037 	 * bit is cleared if a pause frame is received to remove
1038 	 * false hang detection due to PFC or 802.3x frames. By
1039 	 * requiring this to fail twice we avoid races with
1040 	 * pfc clearing the ARMED bit and conditions where we
1041 	 * run the check_tx_hang logic with a transmit completion
1042 	 * pending but without time to complete it yet.
1043 	 */
1044 	if (tx_done_old == tx_done && tx_pending)
1045 		/* make sure it is true for two checks in a row */
1046 		return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
1047 					&tx_ring->state);
1048 	/* update completed stats and continue */
1049 	tx_ring->tx_stats.tx_done_old = tx_done;
1050 	/* reset the countdown */
1051 	clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1052 
1053 	return false;
1054 }
1055 
1056 /**
1057  * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
1058  * @adapter: driver private struct
1059  **/
1060 static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
1061 {
1062 
1063 	/* Do the reset outside of interrupt context */
1064 	if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1065 		set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
1066 		e_warn(drv, "initiating reset due to tx timeout\n");
1067 		ixgbe_service_event_schedule(adapter);
1068 	}
1069 }
1070 
1071 /**
1072  * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
1073  * @netdev: network interface device structure
1074  * @queue_index: Tx queue to set
1075  * @maxrate: desired maximum transmit bitrate
1076  **/
1077 static int ixgbe_tx_maxrate(struct net_device *netdev,
1078 			    int queue_index, u32 maxrate)
1079 {
1080 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1081 	struct ixgbe_hw *hw = &adapter->hw;
1082 	u32 bcnrc_val = ixgbe_link_mbps(adapter);
1083 
1084 	if (!maxrate)
1085 		return 0;
1086 
1087 	/* Calculate the rate factor values to set */
1088 	bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1089 	bcnrc_val /= maxrate;
1090 
1091 	/* clear everything but the rate factor */
1092 	bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1093 	IXGBE_RTTBCNRC_RF_DEC_MASK;
1094 
1095 	/* enable the rate scheduler */
1096 	bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1097 
1098 	IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
1099 	IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1100 
1101 	return 0;
1102 }
1103 
1104 /**
1105  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
1106  * @q_vector: structure containing interrupt and ring information
1107  * @tx_ring: tx ring to clean
1108  * @napi_budget: Used to determine if we are in netpoll
1109  **/
1110 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1111 			       struct ixgbe_ring *tx_ring, int napi_budget)
1112 {
1113 	struct ixgbe_adapter *adapter = q_vector->adapter;
1114 	struct ixgbe_tx_buffer *tx_buffer;
1115 	union ixgbe_adv_tx_desc *tx_desc;
1116 	unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
1117 	unsigned int budget = q_vector->tx.work_limit;
1118 	unsigned int i = tx_ring->next_to_clean;
1119 
1120 	if (test_bit(__IXGBE_DOWN, &adapter->state))
1121 		return true;
1122 
1123 	tx_buffer = &tx_ring->tx_buffer_info[i];
1124 	tx_desc = IXGBE_TX_DESC(tx_ring, i);
1125 	i -= tx_ring->count;
1126 
1127 	do {
1128 		union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
1129 
1130 		/* if next_to_watch is not set then there is no work pending */
1131 		if (!eop_desc)
1132 			break;
1133 
1134 		/* prevent any other reads prior to eop_desc */
1135 		smp_rmb();
1136 
1137 		/* if DD is not set pending work has not been completed */
1138 		if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1139 			break;
1140 
1141 		/* clear next_to_watch to prevent false hangs */
1142 		tx_buffer->next_to_watch = NULL;
1143 
1144 		/* update the statistics for this packet */
1145 		total_bytes += tx_buffer->bytecount;
1146 		total_packets += tx_buffer->gso_segs;
1147 		if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
1148 			total_ipsec++;
1149 
1150 		/* free the skb */
1151 		if (ring_is_xdp(tx_ring))
1152 			xdp_return_frame(tx_buffer->xdpf);
1153 		else
1154 			napi_consume_skb(tx_buffer->skb, napi_budget);
1155 
1156 		/* unmap skb header data */
1157 		dma_unmap_single(tx_ring->dev,
1158 				 dma_unmap_addr(tx_buffer, dma),
1159 				 dma_unmap_len(tx_buffer, len),
1160 				 DMA_TO_DEVICE);
1161 
1162 		/* clear tx_buffer data */
1163 		dma_unmap_len_set(tx_buffer, len, 0);
1164 
1165 		/* unmap remaining buffers */
1166 		while (tx_desc != eop_desc) {
1167 			tx_buffer++;
1168 			tx_desc++;
1169 			i++;
1170 			if (unlikely(!i)) {
1171 				i -= tx_ring->count;
1172 				tx_buffer = tx_ring->tx_buffer_info;
1173 				tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1174 			}
1175 
1176 			/* unmap any remaining paged data */
1177 			if (dma_unmap_len(tx_buffer, len)) {
1178 				dma_unmap_page(tx_ring->dev,
1179 					       dma_unmap_addr(tx_buffer, dma),
1180 					       dma_unmap_len(tx_buffer, len),
1181 					       DMA_TO_DEVICE);
1182 				dma_unmap_len_set(tx_buffer, len, 0);
1183 			}
1184 		}
1185 
1186 		/* move us one more past the eop_desc for start of next pkt */
1187 		tx_buffer++;
1188 		tx_desc++;
1189 		i++;
1190 		if (unlikely(!i)) {
1191 			i -= tx_ring->count;
1192 			tx_buffer = tx_ring->tx_buffer_info;
1193 			tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1194 		}
1195 
1196 		/* issue prefetch for next Tx descriptor */
1197 		prefetch(tx_desc);
1198 
1199 		/* update budget accounting */
1200 		budget--;
1201 	} while (likely(budget));
1202 
1203 	i += tx_ring->count;
1204 	tx_ring->next_to_clean = i;
1205 	u64_stats_update_begin(&tx_ring->syncp);
1206 	tx_ring->stats.bytes += total_bytes;
1207 	tx_ring->stats.packets += total_packets;
1208 	u64_stats_update_end(&tx_ring->syncp);
1209 	q_vector->tx.total_bytes += total_bytes;
1210 	q_vector->tx.total_packets += total_packets;
1211 	adapter->tx_ipsec += total_ipsec;
1212 
1213 	if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1214 		/* schedule immediate reset if we believe we hung */
1215 		struct ixgbe_hw *hw = &adapter->hw;
1216 		e_err(drv, "Detected Tx Unit Hang %s\n"
1217 			"  Tx Queue             <%d>\n"
1218 			"  TDH, TDT             <%x>, <%x>\n"
1219 			"  next_to_use          <%x>\n"
1220 			"  next_to_clean        <%x>\n"
1221 			"tx_buffer_info[next_to_clean]\n"
1222 			"  time_stamp           <%lx>\n"
1223 			"  jiffies              <%lx>\n",
1224 			ring_is_xdp(tx_ring) ? "(XDP)" : "",
1225 			tx_ring->queue_index,
1226 			IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1227 			IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1228 			tx_ring->next_to_use, i,
1229 			tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1230 
1231 		if (!ring_is_xdp(tx_ring))
1232 			netif_stop_subqueue(tx_ring->netdev,
1233 					    tx_ring->queue_index);
1234 
1235 		e_info(probe,
1236 		       "tx hang %d detected on queue %d, resetting adapter\n",
1237 			adapter->tx_timeout_count + 1, tx_ring->queue_index);
1238 
1239 		/* schedule immediate reset if we believe we hung */
1240 		ixgbe_tx_timeout_reset(adapter);
1241 
1242 		/* the adapter is about to reset, no point in enabling stuff */
1243 		return true;
1244 	}
1245 
1246 	if (ring_is_xdp(tx_ring))
1247 		return !!budget;
1248 
1249 	netdev_tx_completed_queue(txring_txq(tx_ring),
1250 				  total_packets, total_bytes);
1251 
1252 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1253 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1254 		     (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1255 		/* Make sure that anybody stopping the queue after this
1256 		 * sees the new next_to_clean.
1257 		 */
1258 		smp_mb();
1259 		if (__netif_subqueue_stopped(tx_ring->netdev,
1260 					     tx_ring->queue_index)
1261 		    && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1262 			netif_wake_subqueue(tx_ring->netdev,
1263 					    tx_ring->queue_index);
1264 			++tx_ring->tx_stats.restart_queue;
1265 		}
1266 	}
1267 
1268 	return !!budget;
1269 }
1270 
1271 #ifdef CONFIG_IXGBE_DCA
1272 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1273 				struct ixgbe_ring *tx_ring,
1274 				int cpu)
1275 {
1276 	struct ixgbe_hw *hw = &adapter->hw;
1277 	u32 txctrl = 0;
1278 	u16 reg_offset;
1279 
1280 	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1281 		txctrl = dca3_get_tag(tx_ring->dev, cpu);
1282 
1283 	switch (hw->mac.type) {
1284 	case ixgbe_mac_82598EB:
1285 		reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1286 		break;
1287 	case ixgbe_mac_82599EB:
1288 	case ixgbe_mac_X540:
1289 		reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1290 		txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1291 		break;
1292 	default:
1293 		/* for unknown hardware do not write register */
1294 		return;
1295 	}
1296 
1297 	/*
1298 	 * We can enable relaxed ordering for reads, but not writes when
1299 	 * DCA is enabled.  This is due to a known issue in some chipsets
1300 	 * which will cause the DCA tag to be cleared.
1301 	 */
1302 	txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1303 		  IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1304 		  IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1305 
1306 	IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1307 }
1308 
1309 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1310 				struct ixgbe_ring *rx_ring,
1311 				int cpu)
1312 {
1313 	struct ixgbe_hw *hw = &adapter->hw;
1314 	u32 rxctrl = 0;
1315 	u8 reg_idx = rx_ring->reg_idx;
1316 
1317 	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1318 		rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1319 
1320 	switch (hw->mac.type) {
1321 	case ixgbe_mac_82599EB:
1322 	case ixgbe_mac_X540:
1323 		rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1324 		break;
1325 	default:
1326 		break;
1327 	}
1328 
1329 	/*
1330 	 * We can enable relaxed ordering for reads, but not writes when
1331 	 * DCA is enabled.  This is due to a known issue in some chipsets
1332 	 * which will cause the DCA tag to be cleared.
1333 	 */
1334 	rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1335 		  IXGBE_DCA_RXCTRL_DATA_DCA_EN |
1336 		  IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1337 
1338 	IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1339 }
1340 
1341 static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1342 {
1343 	struct ixgbe_adapter *adapter = q_vector->adapter;
1344 	struct ixgbe_ring *ring;
1345 	int cpu = get_cpu();
1346 
1347 	if (q_vector->cpu == cpu)
1348 		goto out_no_update;
1349 
1350 	ixgbe_for_each_ring(ring, q_vector->tx)
1351 		ixgbe_update_tx_dca(adapter, ring, cpu);
1352 
1353 	ixgbe_for_each_ring(ring, q_vector->rx)
1354 		ixgbe_update_rx_dca(adapter, ring, cpu);
1355 
1356 	q_vector->cpu = cpu;
1357 out_no_update:
1358 	put_cpu();
1359 }
1360 
1361 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1362 {
1363 	int i;
1364 
1365 	/* always use CB2 mode, difference is masked in the CB driver */
1366 	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1367 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1368 				IXGBE_DCA_CTRL_DCA_MODE_CB2);
1369 	else
1370 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1371 				IXGBE_DCA_CTRL_DCA_DISABLE);
1372 
1373 	for (i = 0; i < adapter->num_q_vectors; i++) {
1374 		adapter->q_vector[i]->cpu = -1;
1375 		ixgbe_update_dca(adapter->q_vector[i]);
1376 	}
1377 }
1378 
1379 static int __ixgbe_notify_dca(struct device *dev, void *data)
1380 {
1381 	struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1382 	unsigned long event = *(unsigned long *)data;
1383 
1384 	if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1385 		return 0;
1386 
1387 	switch (event) {
1388 	case DCA_PROVIDER_ADD:
1389 		/* if we're already enabled, don't do it again */
1390 		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1391 			break;
1392 		if (dca_add_requester(dev) == 0) {
1393 			adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1394 			IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1395 					IXGBE_DCA_CTRL_DCA_MODE_CB2);
1396 			break;
1397 		}
1398 		fallthrough; /* DCA is disabled. */
1399 	case DCA_PROVIDER_REMOVE:
1400 		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1401 			dca_remove_requester(dev);
1402 			adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1403 			IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1404 					IXGBE_DCA_CTRL_DCA_DISABLE);
1405 		}
1406 		break;
1407 	}
1408 
1409 	return 0;
1410 }
1411 
1412 #endif /* CONFIG_IXGBE_DCA */
1413 
1414 #define IXGBE_RSS_L4_TYPES_MASK \
1415 	((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
1416 	 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
1417 	 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
1418 	 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
1419 
1420 static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1421 				 union ixgbe_adv_rx_desc *rx_desc,
1422 				 struct sk_buff *skb)
1423 {
1424 	u16 rss_type;
1425 
1426 	if (!(ring->netdev->features & NETIF_F_RXHASH))
1427 		return;
1428 
1429 	rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
1430 		   IXGBE_RXDADV_RSSTYPE_MASK;
1431 
1432 	if (!rss_type)
1433 		return;
1434 
1435 	skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1436 		     (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
1437 		     PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1438 }
1439 
1440 #ifdef IXGBE_FCOE
1441 /**
1442  * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
1443  * @ring: structure containing ring specific data
1444  * @rx_desc: advanced rx descriptor
1445  *
1446  * Returns : true if it is FCoE pkt
1447  */
1448 static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1449 				    union ixgbe_adv_rx_desc *rx_desc)
1450 {
1451 	__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1452 
1453 	return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1454 	       ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1455 		(cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1456 			     IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1457 }
1458 
1459 #endif /* IXGBE_FCOE */
1460 /**
1461  * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
1462  * @ring: structure containing ring specific data
1463  * @rx_desc: current Rx descriptor being processed
1464  * @skb: skb currently being received and modified
1465  **/
1466 static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1467 				     union ixgbe_adv_rx_desc *rx_desc,
1468 				     struct sk_buff *skb)
1469 {
1470 	__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1471 	bool encap_pkt = false;
1472 
1473 	skb_checksum_none_assert(skb);
1474 
1475 	/* Rx csum disabled */
1476 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
1477 		return;
1478 
1479 	/* check for VXLAN and Geneve packets */
1480 	if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) {
1481 		encap_pkt = true;
1482 		skb->encapsulation = 1;
1483 	}
1484 
1485 	/* if IP and error */
1486 	if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1487 	    ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1488 		ring->rx_stats.csum_err++;
1489 		return;
1490 	}
1491 
1492 	if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1493 		return;
1494 
1495 	if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1496 		/*
1497 		 * 82599 errata, UDP frames with a 0 checksum can be marked as
1498 		 * checksum errors.
1499 		 */
1500 		if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1501 		    test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1502 			return;
1503 
1504 		ring->rx_stats.csum_err++;
1505 		return;
1506 	}
1507 
1508 	/* It must be a TCP or UDP packet with a valid checksum */
1509 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1510 	if (encap_pkt) {
1511 		if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1512 			return;
1513 
1514 		if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1515 			skb->ip_summed = CHECKSUM_NONE;
1516 			return;
1517 		}
1518 		/* If we checked the outer header let the stack know */
1519 		skb->csum_level = 1;
1520 	}
1521 }
1522 
1523 static unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
1524 {
1525 	return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
1526 }
1527 
1528 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1529 				    struct ixgbe_rx_buffer *bi)
1530 {
1531 	struct page *page = bi->page;
1532 	dma_addr_t dma;
1533 
1534 	/* since we are recycling buffers we should seldom need to alloc */
1535 	if (likely(page))
1536 		return true;
1537 
1538 	/* alloc new page for storage */
1539 	page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1540 	if (unlikely(!page)) {
1541 		rx_ring->rx_stats.alloc_rx_page_failed++;
1542 		return false;
1543 	}
1544 
1545 	/* map page for use */
1546 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1547 				 ixgbe_rx_pg_size(rx_ring),
1548 				 DMA_FROM_DEVICE,
1549 				 IXGBE_RX_DMA_ATTR);
1550 
1551 	/*
1552 	 * if mapping failed free memory back to system since
1553 	 * there isn't much point in holding memory we can't use
1554 	 */
1555 	if (dma_mapping_error(rx_ring->dev, dma)) {
1556 		__free_pages(page, ixgbe_rx_pg_order(rx_ring));
1557 
1558 		rx_ring->rx_stats.alloc_rx_page_failed++;
1559 		return false;
1560 	}
1561 
1562 	bi->dma = dma;
1563 	bi->page = page;
1564 	bi->page_offset = rx_ring->rx_offset;
1565 	page_ref_add(page, USHRT_MAX - 1);
1566 	bi->pagecnt_bias = USHRT_MAX;
1567 	rx_ring->rx_stats.alloc_rx_page++;
1568 
1569 	return true;
1570 }
1571 
1572 /**
1573  * ixgbe_alloc_rx_buffers - Replace used receive buffers
1574  * @rx_ring: ring to place buffers on
1575  * @cleaned_count: number of buffers to replace
1576  **/
1577 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1578 {
1579 	union ixgbe_adv_rx_desc *rx_desc;
1580 	struct ixgbe_rx_buffer *bi;
1581 	u16 i = rx_ring->next_to_use;
1582 	u16 bufsz;
1583 
1584 	/* nothing to do */
1585 	if (!cleaned_count)
1586 		return;
1587 
1588 	rx_desc = IXGBE_RX_DESC(rx_ring, i);
1589 	bi = &rx_ring->rx_buffer_info[i];
1590 	i -= rx_ring->count;
1591 
1592 	bufsz = ixgbe_rx_bufsz(rx_ring);
1593 
1594 	do {
1595 		if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1596 			break;
1597 
1598 		/* sync the buffer for use by the device */
1599 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1600 						 bi->page_offset, bufsz,
1601 						 DMA_FROM_DEVICE);
1602 
1603 		/*
1604 		 * Refresh the desc even if buffer_addrs didn't change
1605 		 * because each write-back erases this info.
1606 		 */
1607 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1608 
1609 		rx_desc++;
1610 		bi++;
1611 		i++;
1612 		if (unlikely(!i)) {
1613 			rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1614 			bi = rx_ring->rx_buffer_info;
1615 			i -= rx_ring->count;
1616 		}
1617 
1618 		/* clear the length for the next_to_use descriptor */
1619 		rx_desc->wb.upper.length = 0;
1620 
1621 		cleaned_count--;
1622 	} while (cleaned_count);
1623 
1624 	i += rx_ring->count;
1625 
1626 	if (rx_ring->next_to_use != i) {
1627 		rx_ring->next_to_use = i;
1628 
1629 		/* update next to alloc since we have filled the ring */
1630 		rx_ring->next_to_alloc = i;
1631 
1632 		/* Force memory writes to complete before letting h/w
1633 		 * know there are new descriptors to fetch.  (Only
1634 		 * applicable for weak-ordered memory model archs,
1635 		 * such as IA-64).
1636 		 */
1637 		wmb();
1638 		writel(i, rx_ring->tail);
1639 	}
1640 }
1641 
1642 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1643 				   struct sk_buff *skb)
1644 {
1645 	u16 hdr_len = skb_headlen(skb);
1646 
1647 	/* set gso_size to avoid messing up TCP MSS */
1648 	skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1649 						 IXGBE_CB(skb)->append_cnt);
1650 	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1651 }
1652 
1653 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1654 				   struct sk_buff *skb)
1655 {
1656 	/* if append_cnt is 0 then frame is not RSC */
1657 	if (!IXGBE_CB(skb)->append_cnt)
1658 		return;
1659 
1660 	rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1661 	rx_ring->rx_stats.rsc_flush++;
1662 
1663 	ixgbe_set_rsc_gso_size(rx_ring, skb);
1664 
1665 	/* gso_size is computed using append_cnt so always clear it last */
1666 	IXGBE_CB(skb)->append_cnt = 0;
1667 }
1668 
1669 /**
1670  * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor
1671  * @rx_ring: rx descriptor ring packet is being transacted on
1672  * @rx_desc: pointer to the EOP Rx descriptor
1673  * @skb: pointer to current skb being populated
1674  *
1675  * This function checks the ring, descriptor, and packet information in
1676  * order to populate the hash, checksum, VLAN, timestamp, protocol, and
1677  * other fields within the skb.
1678  **/
1679 void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1680 			      union ixgbe_adv_rx_desc *rx_desc,
1681 			      struct sk_buff *skb)
1682 {
1683 	struct net_device *dev = rx_ring->netdev;
1684 	u32 flags = rx_ring->q_vector->adapter->flags;
1685 
1686 	ixgbe_update_rsc_stats(rx_ring, skb);
1687 
1688 	ixgbe_rx_hash(rx_ring, rx_desc, skb);
1689 
1690 	ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1691 
1692 	if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
1693 		ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
1694 
1695 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1696 	    ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1697 		u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1698 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1699 	}
1700 
1701 	if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
1702 		ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
1703 
1704 	/* record Rx queue, or update MACVLAN statistics */
1705 	if (netif_is_ixgbe(dev))
1706 		skb_record_rx_queue(skb, rx_ring->queue_index);
1707 	else
1708 		macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
1709 				 false);
1710 
1711 	skb->protocol = eth_type_trans(skb, dev);
1712 }
1713 
1714 void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1715 		  struct sk_buff *skb)
1716 {
1717 	napi_gro_receive(&q_vector->napi, skb);
1718 }
1719 
1720 /**
1721  * ixgbe_is_non_eop - process handling of non-EOP buffers
1722  * @rx_ring: Rx ring being processed
1723  * @rx_desc: Rx descriptor for current buffer
1724  * @skb: Current socket buffer containing buffer in progress
1725  *
1726  * This function updates next to clean.  If the buffer is an EOP buffer
1727  * this function exits returning false, otherwise it will place the
1728  * sk_buff in the next buffer to be chained and return true indicating
1729  * that this is in fact a non-EOP buffer.
1730  **/
1731 static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1732 			     union ixgbe_adv_rx_desc *rx_desc,
1733 			     struct sk_buff *skb)
1734 {
1735 	u32 ntc = rx_ring->next_to_clean + 1;
1736 
1737 	/* fetch, update, and store next to clean */
1738 	ntc = (ntc < rx_ring->count) ? ntc : 0;
1739 	rx_ring->next_to_clean = ntc;
1740 
1741 	prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1742 
1743 	/* update RSC append count if present */
1744 	if (ring_is_rsc_enabled(rx_ring)) {
1745 		__le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1746 				     cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1747 
1748 		if (unlikely(rsc_enabled)) {
1749 			u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1750 
1751 			rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1752 			IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1753 
1754 			/* update ntc based on RSC value */
1755 			ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1756 			ntc &= IXGBE_RXDADV_NEXTP_MASK;
1757 			ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1758 		}
1759 	}
1760 
1761 	/* if we are the last buffer then there is nothing else to do */
1762 	if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1763 		return false;
1764 
1765 	/* place skb in next buffer to be received */
1766 	rx_ring->rx_buffer_info[ntc].skb = skb;
1767 	rx_ring->rx_stats.non_eop_descs++;
1768 
1769 	return true;
1770 }
1771 
1772 /**
1773  * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
1774  * @rx_ring: rx descriptor ring packet is being transacted on
1775  * @skb: pointer to current skb being adjusted
1776  *
1777  * This function is an ixgbe specific version of __pskb_pull_tail.  The
1778  * main difference between this version and the original function is that
1779  * this function can make several assumptions about the state of things
1780  * that allow for significant optimizations versus the standard function.
1781  * As a result we can do things like drop a frag and maintain an accurate
1782  * truesize for the skb.
1783  */
1784 static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1785 			    struct sk_buff *skb)
1786 {
1787 	skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1788 	unsigned char *va;
1789 	unsigned int pull_len;
1790 
1791 	/*
1792 	 * it is valid to use page_address instead of kmap since we are
1793 	 * working with pages allocated out of the lomem pool per
1794 	 * alloc_page(GFP_ATOMIC)
1795 	 */
1796 	va = skb_frag_address(frag);
1797 
1798 	/*
1799 	 * we need the header to contain the greater of either ETH_HLEN or
1800 	 * 60 bytes if the skb->len is less than 60 for skb_pad.
1801 	 */
1802 	pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE);
1803 
1804 	/* align pull length to size of long to optimize memcpy performance */
1805 	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1806 
1807 	/* update all of the pointers */
1808 	skb_frag_size_sub(frag, pull_len);
1809 	skb_frag_off_add(frag, pull_len);
1810 	skb->data_len -= pull_len;
1811 	skb->tail += pull_len;
1812 }
1813 
1814 /**
1815  * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
1816  * @rx_ring: rx descriptor ring packet is being transacted on
1817  * @skb: pointer to current skb being updated
1818  *
1819  * This function provides a basic DMA sync up for the first fragment of an
1820  * skb.  The reason for doing this is that the first fragment cannot be
1821  * unmapped until we have reached the end of packet descriptor for a buffer
1822  * chain.
1823  */
1824 static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1825 				struct sk_buff *skb)
1826 {
1827 	if (ring_uses_build_skb(rx_ring)) {
1828 		unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
1829 
1830 		dma_sync_single_range_for_cpu(rx_ring->dev,
1831 					      IXGBE_CB(skb)->dma,
1832 					      offset,
1833 					      skb_headlen(skb),
1834 					      DMA_FROM_DEVICE);
1835 	} else {
1836 		skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1837 
1838 		dma_sync_single_range_for_cpu(rx_ring->dev,
1839 					      IXGBE_CB(skb)->dma,
1840 					      skb_frag_off(frag),
1841 					      skb_frag_size(frag),
1842 					      DMA_FROM_DEVICE);
1843 	}
1844 
1845 	/* If the page was released, just unmap it. */
1846 	if (unlikely(IXGBE_CB(skb)->page_released)) {
1847 		dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
1848 				     ixgbe_rx_pg_size(rx_ring),
1849 				     DMA_FROM_DEVICE,
1850 				     IXGBE_RX_DMA_ATTR);
1851 	}
1852 }
1853 
1854 /**
1855  * ixgbe_cleanup_headers - Correct corrupted or empty headers
1856  * @rx_ring: rx descriptor ring packet is being transacted on
1857  * @rx_desc: pointer to the EOP Rx descriptor
1858  * @skb: pointer to current skb being fixed
1859  *
1860  * Check if the skb is valid in the XDP case it will be an error pointer.
1861  * Return true in this case to abort processing and advance to next
1862  * descriptor.
1863  *
1864  * Check for corrupted packet headers caused by senders on the local L2
1865  * embedded NIC switch not setting up their Tx Descriptors right.  These
1866  * should be very rare.
1867  *
1868  * Also address the case where we are pulling data in on pages only
1869  * and as such no data is present in the skb header.
1870  *
1871  * In addition if skb is not at least 60 bytes we need to pad it so that
1872  * it is large enough to qualify as a valid Ethernet frame.
1873  *
1874  * Returns true if an error was encountered and skb was freed.
1875  **/
1876 bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1877 			   union ixgbe_adv_rx_desc *rx_desc,
1878 			   struct sk_buff *skb)
1879 {
1880 	struct net_device *netdev = rx_ring->netdev;
1881 
1882 	/* XDP packets use error pointer so abort at this point */
1883 	if (IS_ERR(skb))
1884 		return true;
1885 
1886 	/* Verify netdev is present, and that packet does not have any
1887 	 * errors that would be unacceptable to the netdev.
1888 	 */
1889 	if (!netdev ||
1890 	    (unlikely(ixgbe_test_staterr(rx_desc,
1891 					 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1892 	     !(netdev->features & NETIF_F_RXALL)))) {
1893 		dev_kfree_skb_any(skb);
1894 		return true;
1895 	}
1896 
1897 	/* place header in linear portion of buffer */
1898 	if (!skb_headlen(skb))
1899 		ixgbe_pull_tail(rx_ring, skb);
1900 
1901 #ifdef IXGBE_FCOE
1902 	/* do not attempt to pad FCoE Frames as this will disrupt DDP */
1903 	if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1904 		return false;
1905 
1906 #endif
1907 	/* if eth_skb_pad returns an error the skb was freed */
1908 	if (eth_skb_pad(skb))
1909 		return true;
1910 
1911 	return false;
1912 }
1913 
1914 /**
1915  * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
1916  * @rx_ring: rx descriptor ring to store buffers on
1917  * @old_buff: donor buffer to have page reused
1918  *
1919  * Synchronizes page for reuse by the adapter
1920  **/
1921 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1922 				struct ixgbe_rx_buffer *old_buff)
1923 {
1924 	struct ixgbe_rx_buffer *new_buff;
1925 	u16 nta = rx_ring->next_to_alloc;
1926 
1927 	new_buff = &rx_ring->rx_buffer_info[nta];
1928 
1929 	/* update, and store next to alloc */
1930 	nta++;
1931 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1932 
1933 	/* Transfer page from old buffer to new buffer.
1934 	 * Move each member individually to avoid possible store
1935 	 * forwarding stalls and unnecessary copy of skb.
1936 	 */
1937 	new_buff->dma		= old_buff->dma;
1938 	new_buff->page		= old_buff->page;
1939 	new_buff->page_offset	= old_buff->page_offset;
1940 	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
1941 }
1942 
1943 static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
1944 				    int rx_buffer_pgcnt)
1945 {
1946 	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1947 	struct page *page = rx_buffer->page;
1948 
1949 	/* avoid re-using remote and pfmemalloc pages */
1950 	if (!dev_page_is_reusable(page))
1951 		return false;
1952 
1953 #if (PAGE_SIZE < 8192)
1954 	/* if we are only owner of page we can reuse it */
1955 	if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
1956 		return false;
1957 #else
1958 	/* The last offset is a bit aggressive in that we assume the
1959 	 * worst case of FCoE being enabled and using a 3K buffer.
1960 	 * However this should have minimal impact as the 1K extra is
1961 	 * still less than one buffer in size.
1962 	 */
1963 #define IXGBE_LAST_OFFSET \
1964 	(SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
1965 	if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
1966 		return false;
1967 #endif
1968 
1969 	/* If we have drained the page fragment pool we need to update
1970 	 * the pagecnt_bias and page count so that we fully restock the
1971 	 * number of references the driver holds.
1972 	 */
1973 	if (unlikely(pagecnt_bias == 1)) {
1974 		page_ref_add(page, USHRT_MAX - 1);
1975 		rx_buffer->pagecnt_bias = USHRT_MAX;
1976 	}
1977 
1978 	return true;
1979 }
1980 
1981 /**
1982  * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
1983  * @rx_ring: rx descriptor ring to transact packets on
1984  * @rx_buffer: buffer containing page to add
1985  * @skb: sk_buff to place the data into
1986  * @size: size of data in rx_buffer
1987  *
1988  * This function will add the data contained in rx_buffer->page to the skb.
1989  * This is done either through a direct copy if the data in the buffer is
1990  * less than the skb header size, otherwise it will just attach the page as
1991  * a frag to the skb.
1992  *
1993  * The function will then update the page offset if necessary and return
1994  * true if the buffer can be reused by the adapter.
1995  **/
1996 static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1997 			      struct ixgbe_rx_buffer *rx_buffer,
1998 			      struct sk_buff *skb,
1999 			      unsigned int size)
2000 {
2001 #if (PAGE_SIZE < 8192)
2002 	unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2003 #else
2004 	unsigned int truesize = rx_ring->rx_offset ?
2005 				SKB_DATA_ALIGN(rx_ring->rx_offset + size) :
2006 				SKB_DATA_ALIGN(size);
2007 #endif
2008 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
2009 			rx_buffer->page_offset, size, truesize);
2010 #if (PAGE_SIZE < 8192)
2011 	rx_buffer->page_offset ^= truesize;
2012 #else
2013 	rx_buffer->page_offset += truesize;
2014 #endif
2015 }
2016 
2017 static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
2018 						   union ixgbe_adv_rx_desc *rx_desc,
2019 						   struct sk_buff **skb,
2020 						   const unsigned int size,
2021 						   int *rx_buffer_pgcnt)
2022 {
2023 	struct ixgbe_rx_buffer *rx_buffer;
2024 
2025 	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
2026 	*rx_buffer_pgcnt =
2027 #if (PAGE_SIZE < 8192)
2028 		page_count(rx_buffer->page);
2029 #else
2030 		0;
2031 #endif
2032 	prefetchw(rx_buffer->page);
2033 	*skb = rx_buffer->skb;
2034 
2035 	/* Delay unmapping of the first packet. It carries the header
2036 	 * information, HW may still access the header after the writeback.
2037 	 * Only unmap it when EOP is reached
2038 	 */
2039 	if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
2040 		if (!*skb)
2041 			goto skip_sync;
2042 	} else {
2043 		if (*skb)
2044 			ixgbe_dma_sync_frag(rx_ring, *skb);
2045 	}
2046 
2047 	/* we are reusing so sync this buffer for CPU use */
2048 	dma_sync_single_range_for_cpu(rx_ring->dev,
2049 				      rx_buffer->dma,
2050 				      rx_buffer->page_offset,
2051 				      size,
2052 				      DMA_FROM_DEVICE);
2053 skip_sync:
2054 	rx_buffer->pagecnt_bias--;
2055 
2056 	return rx_buffer;
2057 }
2058 
2059 static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
2060 				struct ixgbe_rx_buffer *rx_buffer,
2061 				struct sk_buff *skb,
2062 				int rx_buffer_pgcnt)
2063 {
2064 	if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
2065 		/* hand second half of page back to the ring */
2066 		ixgbe_reuse_rx_page(rx_ring, rx_buffer);
2067 	} else {
2068 		if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
2069 			/* the page has been released from the ring */
2070 			IXGBE_CB(skb)->page_released = true;
2071 		} else {
2072 			/* we are not reusing the buffer so unmap it */
2073 			dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2074 					     ixgbe_rx_pg_size(rx_ring),
2075 					     DMA_FROM_DEVICE,
2076 					     IXGBE_RX_DMA_ATTR);
2077 		}
2078 		__page_frag_cache_drain(rx_buffer->page,
2079 					rx_buffer->pagecnt_bias);
2080 	}
2081 
2082 	/* clear contents of rx_buffer */
2083 	rx_buffer->page = NULL;
2084 	rx_buffer->skb = NULL;
2085 }
2086 
2087 static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
2088 					   struct ixgbe_rx_buffer *rx_buffer,
2089 					   struct xdp_buff *xdp,
2090 					   union ixgbe_adv_rx_desc *rx_desc)
2091 {
2092 	unsigned int size = xdp->data_end - xdp->data;
2093 #if (PAGE_SIZE < 8192)
2094 	unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2095 #else
2096 	unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
2097 					       xdp->data_hard_start);
2098 #endif
2099 	struct sk_buff *skb;
2100 
2101 	/* prefetch first cache line of first page */
2102 	net_prefetch(xdp->data);
2103 
2104 	/* Note, we get here by enabling legacy-rx via:
2105 	 *
2106 	 *    ethtool --set-priv-flags <dev> legacy-rx on
2107 	 *
2108 	 * In this mode, we currently get 0 extra XDP headroom as
2109 	 * opposed to having legacy-rx off, where we process XDP
2110 	 * packets going to stack via ixgbe_build_skb(). The latter
2111 	 * provides us currently with 192 bytes of headroom.
2112 	 *
2113 	 * For ixgbe_construct_skb() mode it means that the
2114 	 * xdp->data_meta will always point to xdp->data, since
2115 	 * the helper cannot expand the head. Should this ever
2116 	 * change in future for legacy-rx mode on, then lets also
2117 	 * add xdp->data_meta handling here.
2118 	 */
2119 
2120 	/* allocate a skb to store the frags */
2121 	skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
2122 	if (unlikely(!skb))
2123 		return NULL;
2124 
2125 	if (size > IXGBE_RX_HDR_SIZE) {
2126 		if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2127 			IXGBE_CB(skb)->dma = rx_buffer->dma;
2128 
2129 		skb_add_rx_frag(skb, 0, rx_buffer->page,
2130 				xdp->data - page_address(rx_buffer->page),
2131 				size, truesize);
2132 #if (PAGE_SIZE < 8192)
2133 		rx_buffer->page_offset ^= truesize;
2134 #else
2135 		rx_buffer->page_offset += truesize;
2136 #endif
2137 	} else {
2138 		memcpy(__skb_put(skb, size),
2139 		       xdp->data, ALIGN(size, sizeof(long)));
2140 		rx_buffer->pagecnt_bias++;
2141 	}
2142 
2143 	return skb;
2144 }
2145 
2146 static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
2147 				       struct ixgbe_rx_buffer *rx_buffer,
2148 				       struct xdp_buff *xdp,
2149 				       union ixgbe_adv_rx_desc *rx_desc)
2150 {
2151 	unsigned int metasize = xdp->data - xdp->data_meta;
2152 #if (PAGE_SIZE < 8192)
2153 	unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2154 #else
2155 	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2156 				SKB_DATA_ALIGN(xdp->data_end -
2157 					       xdp->data_hard_start);
2158 #endif
2159 	struct sk_buff *skb;
2160 
2161 	/* Prefetch first cache line of first page. If xdp->data_meta
2162 	 * is unused, this points extactly as xdp->data, otherwise we
2163 	 * likely have a consumer accessing first few bytes of meta
2164 	 * data, and then actual data.
2165 	 */
2166 	net_prefetch(xdp->data_meta);
2167 
2168 	/* build an skb to around the page buffer */
2169 	skb = build_skb(xdp->data_hard_start, truesize);
2170 	if (unlikely(!skb))
2171 		return NULL;
2172 
2173 	/* update pointers within the skb to store the data */
2174 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
2175 	__skb_put(skb, xdp->data_end - xdp->data);
2176 	if (metasize)
2177 		skb_metadata_set(skb, metasize);
2178 
2179 	/* record DMA address if this is the start of a chain of buffers */
2180 	if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2181 		IXGBE_CB(skb)->dma = rx_buffer->dma;
2182 
2183 	/* update buffer offset */
2184 #if (PAGE_SIZE < 8192)
2185 	rx_buffer->page_offset ^= truesize;
2186 #else
2187 	rx_buffer->page_offset += truesize;
2188 #endif
2189 
2190 	return skb;
2191 }
2192 
2193 static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
2194 				     struct ixgbe_ring *rx_ring,
2195 				     struct xdp_buff *xdp)
2196 {
2197 	int err, result = IXGBE_XDP_PASS;
2198 	struct bpf_prog *xdp_prog;
2199 	struct xdp_frame *xdpf;
2200 	u32 act;
2201 
2202 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2203 
2204 	if (!xdp_prog)
2205 		goto xdp_out;
2206 
2207 	prefetchw(xdp->data_hard_start); /* xdp_frame write */
2208 
2209 	act = bpf_prog_run_xdp(xdp_prog, xdp);
2210 	switch (act) {
2211 	case XDP_PASS:
2212 		break;
2213 	case XDP_TX:
2214 		xdpf = xdp_convert_buff_to_frame(xdp);
2215 		if (unlikely(!xdpf))
2216 			goto out_failure;
2217 		result = ixgbe_xmit_xdp_ring(adapter, xdpf);
2218 		if (result == IXGBE_XDP_CONSUMED)
2219 			goto out_failure;
2220 		break;
2221 	case XDP_REDIRECT:
2222 		err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
2223 		if (err)
2224 			goto out_failure;
2225 		result = IXGBE_XDP_REDIR;
2226 		break;
2227 	default:
2228 		bpf_warn_invalid_xdp_action(act);
2229 		fallthrough;
2230 	case XDP_ABORTED:
2231 out_failure:
2232 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2233 		fallthrough; /* handle aborts by dropping packet */
2234 	case XDP_DROP:
2235 		result = IXGBE_XDP_CONSUMED;
2236 		break;
2237 	}
2238 xdp_out:
2239 	return ERR_PTR(-result);
2240 }
2241 
2242 static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring,
2243 					    unsigned int size)
2244 {
2245 	unsigned int truesize;
2246 
2247 #if (PAGE_SIZE < 8192)
2248 	truesize = ixgbe_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
2249 #else
2250 	truesize = rx_ring->rx_offset ?
2251 		SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
2252 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2253 		SKB_DATA_ALIGN(size);
2254 #endif
2255 	return truesize;
2256 }
2257 
2258 static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
2259 				 struct ixgbe_rx_buffer *rx_buffer,
2260 				 unsigned int size)
2261 {
2262 	unsigned int truesize = ixgbe_rx_frame_truesize(rx_ring, size);
2263 #if (PAGE_SIZE < 8192)
2264 	rx_buffer->page_offset ^= truesize;
2265 #else
2266 	rx_buffer->page_offset += truesize;
2267 #endif
2268 }
2269 
2270 /**
2271  * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2272  * @q_vector: structure containing interrupt and ring information
2273  * @rx_ring: rx descriptor ring to transact packets on
2274  * @budget: Total limit on number of packets to process
2275  *
2276  * This function provides a "bounce buffer" approach to Rx interrupt
2277  * processing.  The advantage to this is that on systems that have
2278  * expensive overhead for IOMMU access this provides a means of avoiding
2279  * it by maintaining the mapping of the page to the syste.
2280  *
2281  * Returns amount of work completed
2282  **/
2283 static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2284 			       struct ixgbe_ring *rx_ring,
2285 			       const int budget)
2286 {
2287 	unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
2288 	struct ixgbe_adapter *adapter = q_vector->adapter;
2289 #ifdef IXGBE_FCOE
2290 	int ddp_bytes;
2291 	unsigned int mss = 0;
2292 #endif /* IXGBE_FCOE */
2293 	u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2294 	unsigned int offset = rx_ring->rx_offset;
2295 	unsigned int xdp_xmit = 0;
2296 	struct xdp_buff xdp;
2297 
2298 	/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
2299 #if (PAGE_SIZE < 8192)
2300 	frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
2301 #endif
2302 	xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
2303 
2304 	while (likely(total_rx_packets < budget)) {
2305 		union ixgbe_adv_rx_desc *rx_desc;
2306 		struct ixgbe_rx_buffer *rx_buffer;
2307 		struct sk_buff *skb;
2308 		int rx_buffer_pgcnt;
2309 		unsigned int size;
2310 
2311 		/* return some buffers to hardware, one at a time is too slow */
2312 		if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
2313 			ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2314 			cleaned_count = 0;
2315 		}
2316 
2317 		rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2318 		size = le16_to_cpu(rx_desc->wb.upper.length);
2319 		if (!size)
2320 			break;
2321 
2322 		/* This memory barrier is needed to keep us from reading
2323 		 * any other fields out of the rx_desc until we know the
2324 		 * descriptor has been written back
2325 		 */
2326 		dma_rmb();
2327 
2328 		rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
2329 
2330 		/* retrieve a buffer from the ring */
2331 		if (!skb) {
2332 			unsigned char *hard_start;
2333 
2334 			hard_start = page_address(rx_buffer->page) +
2335 				     rx_buffer->page_offset - offset;
2336 			xdp_prepare_buff(&xdp, hard_start, offset, size, true);
2337 #if (PAGE_SIZE > 4096)
2338 			/* At larger PAGE_SIZE, frame_sz depend on len size */
2339 			xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
2340 #endif
2341 			skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
2342 		}
2343 
2344 		if (IS_ERR(skb)) {
2345 			unsigned int xdp_res = -PTR_ERR(skb);
2346 
2347 			if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
2348 				xdp_xmit |= xdp_res;
2349 				ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
2350 			} else {
2351 				rx_buffer->pagecnt_bias++;
2352 			}
2353 			total_rx_packets++;
2354 			total_rx_bytes += size;
2355 		} else if (skb) {
2356 			ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
2357 		} else if (ring_uses_build_skb(rx_ring)) {
2358 			skb = ixgbe_build_skb(rx_ring, rx_buffer,
2359 					      &xdp, rx_desc);
2360 		} else {
2361 			skb = ixgbe_construct_skb(rx_ring, rx_buffer,
2362 						  &xdp, rx_desc);
2363 		}
2364 
2365 		/* exit if we failed to retrieve a buffer */
2366 		if (!skb) {
2367 			rx_ring->rx_stats.alloc_rx_buff_failed++;
2368 			rx_buffer->pagecnt_bias++;
2369 			break;
2370 		}
2371 
2372 		ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
2373 		cleaned_count++;
2374 
2375 		/* place incomplete frames back on ring for completion */
2376 		if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2377 			continue;
2378 
2379 		/* verify the packet layout is correct */
2380 		if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2381 			continue;
2382 
2383 		/* probably a little skewed due to removing CRC */
2384 		total_rx_bytes += skb->len;
2385 
2386 		/* populate checksum, timestamp, VLAN, and protocol */
2387 		ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2388 
2389 #ifdef IXGBE_FCOE
2390 		/* if ddp, not passing to ULD unless for FCP_RSP or error */
2391 		if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2392 			ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2393 			/* include DDPed FCoE data */
2394 			if (ddp_bytes > 0) {
2395 				if (!mss) {
2396 					mss = rx_ring->netdev->mtu -
2397 						sizeof(struct fcoe_hdr) -
2398 						sizeof(struct fc_frame_header) -
2399 						sizeof(struct fcoe_crc_eof);
2400 					if (mss > 512)
2401 						mss &= ~511;
2402 				}
2403 				total_rx_bytes += ddp_bytes;
2404 				total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2405 								 mss);
2406 			}
2407 			if (!ddp_bytes) {
2408 				dev_kfree_skb_any(skb);
2409 				continue;
2410 			}
2411 		}
2412 
2413 #endif /* IXGBE_FCOE */
2414 		ixgbe_rx_skb(q_vector, skb);
2415 
2416 		/* update budget accounting */
2417 		total_rx_packets++;
2418 	}
2419 
2420 	if (xdp_xmit & IXGBE_XDP_REDIR)
2421 		xdp_do_flush_map();
2422 
2423 	if (xdp_xmit & IXGBE_XDP_TX) {
2424 		struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
2425 
2426 		/* Force memory writes to complete before letting h/w
2427 		 * know there are new descriptors to fetch.
2428 		 */
2429 		wmb();
2430 		writel(ring->next_to_use, ring->tail);
2431 	}
2432 
2433 	u64_stats_update_begin(&rx_ring->syncp);
2434 	rx_ring->stats.packets += total_rx_packets;
2435 	rx_ring->stats.bytes += total_rx_bytes;
2436 	u64_stats_update_end(&rx_ring->syncp);
2437 	q_vector->rx.total_packets += total_rx_packets;
2438 	q_vector->rx.total_bytes += total_rx_bytes;
2439 
2440 	return total_rx_packets;
2441 }
2442 
2443 /**
2444  * ixgbe_configure_msix - Configure MSI-X hardware
2445  * @adapter: board private structure
2446  *
2447  * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
2448  * interrupts.
2449  **/
2450 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2451 {
2452 	struct ixgbe_q_vector *q_vector;
2453 	int v_idx;
2454 	u32 mask;
2455 
2456 	/* Populate MSIX to EITR Select */
2457 	if (adapter->num_vfs > 32) {
2458 		u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
2459 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2460 	}
2461 
2462 	/*
2463 	 * Populate the IVAR table and set the ITR values to the
2464 	 * corresponding register.
2465 	 */
2466 	for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2467 		struct ixgbe_ring *ring;
2468 		q_vector = adapter->q_vector[v_idx];
2469 
2470 		ixgbe_for_each_ring(ring, q_vector->rx)
2471 			ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2472 
2473 		ixgbe_for_each_ring(ring, q_vector->tx)
2474 			ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2475 
2476 		ixgbe_write_eitr(q_vector);
2477 	}
2478 
2479 	switch (adapter->hw.mac.type) {
2480 	case ixgbe_mac_82598EB:
2481 		ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2482 			       v_idx);
2483 		break;
2484 	case ixgbe_mac_82599EB:
2485 	case ixgbe_mac_X540:
2486 	case ixgbe_mac_X550:
2487 	case ixgbe_mac_X550EM_x:
2488 	case ixgbe_mac_x550em_a:
2489 		ixgbe_set_ivar(adapter, -1, 1, v_idx);
2490 		break;
2491 	default:
2492 		break;
2493 	}
2494 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2495 
2496 	/* set up to autoclear timer, and the vectors */
2497 	mask = IXGBE_EIMS_ENABLE_MASK;
2498 	mask &= ~(IXGBE_EIMS_OTHER |
2499 		  IXGBE_EIMS_MAILBOX |
2500 		  IXGBE_EIMS_LSC);
2501 
2502 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2503 }
2504 
2505 /**
2506  * ixgbe_update_itr - update the dynamic ITR value based on statistics
2507  * @q_vector: structure containing interrupt and ring information
2508  * @ring_container: structure containing ring performance data
2509  *
2510  *      Stores a new ITR value based on packets and byte
2511  *      counts during the last interrupt.  The advantage of per interrupt
2512  *      computation is faster updates and more accurate ITR for the current
2513  *      traffic pattern.  Constants in this function were computed
2514  *      based on theoretical maximum wire speed and thresholds were set based
2515  *      on testing data as well as attempting to minimize response time
2516  *      while increasing bulk throughput.
2517  **/
2518 static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2519 			     struct ixgbe_ring_container *ring_container)
2520 {
2521 	unsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS |
2522 			   IXGBE_ITR_ADAPTIVE_LATENCY;
2523 	unsigned int avg_wire_size, packets, bytes;
2524 	unsigned long next_update = jiffies;
2525 
2526 	/* If we don't have any rings just leave ourselves set for maximum
2527 	 * possible latency so we take ourselves out of the equation.
2528 	 */
2529 	if (!ring_container->ring)
2530 		return;
2531 
2532 	/* If we didn't update within up to 1 - 2 jiffies we can assume
2533 	 * that either packets are coming in so slow there hasn't been
2534 	 * any work, or that there is so much work that NAPI is dealing
2535 	 * with interrupt moderation and we don't need to do anything.
2536 	 */
2537 	if (time_after(next_update, ring_container->next_update))
2538 		goto clear_counts;
2539 
2540 	packets = ring_container->total_packets;
2541 
2542 	/* We have no packets to actually measure against. This means
2543 	 * either one of the other queues on this vector is active or
2544 	 * we are a Tx queue doing TSO with too high of an interrupt rate.
2545 	 *
2546 	 * When this occurs just tick up our delay by the minimum value
2547 	 * and hope that this extra delay will prevent us from being called
2548 	 * without any work on our queue.
2549 	 */
2550 	if (!packets) {
2551 		itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2552 		if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2553 			itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2554 		itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY;
2555 		goto clear_counts;
2556 	}
2557 
2558 	bytes = ring_container->total_bytes;
2559 
2560 	/* If packets are less than 4 or bytes are less than 9000 assume
2561 	 * insufficient data to use bulk rate limiting approach. We are
2562 	 * likely latency driven.
2563 	 */
2564 	if (packets < 4 && bytes < 9000) {
2565 		itr = IXGBE_ITR_ADAPTIVE_LATENCY;
2566 		goto adjust_by_size;
2567 	}
2568 
2569 	/* Between 4 and 48 we can assume that our current interrupt delay
2570 	 * is only slightly too low. As such we should increase it by a small
2571 	 * fixed amount.
2572 	 */
2573 	if (packets < 48) {
2574 		itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2575 		if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2576 			itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2577 		goto clear_counts;
2578 	}
2579 
2580 	/* Between 48 and 96 is our "goldilocks" zone where we are working
2581 	 * out "just right". Just report that our current ITR is good for us.
2582 	 */
2583 	if (packets < 96) {
2584 		itr = q_vector->itr >> 2;
2585 		goto clear_counts;
2586 	}
2587 
2588 	/* If packet count is 96 or greater we are likely looking at a slight
2589 	 * overrun of the delay we want. Try halving our delay to see if that
2590 	 * will cut the number of packets in half per interrupt.
2591 	 */
2592 	if (packets < 256) {
2593 		itr = q_vector->itr >> 3;
2594 		if (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS)
2595 			itr = IXGBE_ITR_ADAPTIVE_MIN_USECS;
2596 		goto clear_counts;
2597 	}
2598 
2599 	/* The paths below assume we are dealing with a bulk ITR since number
2600 	 * of packets is 256 or greater. We are just going to have to compute
2601 	 * a value and try to bring the count under control, though for smaller
2602 	 * packet sizes there isn't much we can do as NAPI polling will likely
2603 	 * be kicking in sooner rather than later.
2604 	 */
2605 	itr = IXGBE_ITR_ADAPTIVE_BULK;
2606 
2607 adjust_by_size:
2608 	/* If packet counts are 256 or greater we can assume we have a gross
2609 	 * overestimation of what the rate should be. Instead of trying to fine
2610 	 * tune it just use the formula below to try and dial in an exact value
2611 	 * give the current packet size of the frame.
2612 	 */
2613 	avg_wire_size = bytes / packets;
2614 
2615 	/* The following is a crude approximation of:
2616 	 *  wmem_default / (size + overhead) = desired_pkts_per_int
2617 	 *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
2618 	 *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
2619 	 *
2620 	 * Assuming wmem_default is 212992 and overhead is 640 bytes per
2621 	 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
2622 	 * formula down to
2623 	 *
2624 	 *  (170 * (size + 24)) / (size + 640) = ITR
2625 	 *
2626 	 * We first do some math on the packet size and then finally bitshift
2627 	 * by 8 after rounding up. We also have to account for PCIe link speed
2628 	 * difference as ITR scales based on this.
2629 	 */
2630 	if (avg_wire_size <= 60) {
2631 		/* Start at 50k ints/sec */
2632 		avg_wire_size = 5120;
2633 	} else if (avg_wire_size <= 316) {
2634 		/* 50K ints/sec to 16K ints/sec */
2635 		avg_wire_size *= 40;
2636 		avg_wire_size += 2720;
2637 	} else if (avg_wire_size <= 1084) {
2638 		/* 16K ints/sec to 9.2K ints/sec */
2639 		avg_wire_size *= 15;
2640 		avg_wire_size += 11452;
2641 	} else if (avg_wire_size < 1968) {
2642 		/* 9.2K ints/sec to 8K ints/sec */
2643 		avg_wire_size *= 5;
2644 		avg_wire_size += 22420;
2645 	} else {
2646 		/* plateau at a limit of 8K ints/sec */
2647 		avg_wire_size = 32256;
2648 	}
2649 
2650 	/* If we are in low latency mode half our delay which doubles the rate
2651 	 * to somewhere between 100K to 16K ints/sec
2652 	 */
2653 	if (itr & IXGBE_ITR_ADAPTIVE_LATENCY)
2654 		avg_wire_size >>= 1;
2655 
2656 	/* Resultant value is 256 times larger than it needs to be. This
2657 	 * gives us room to adjust the value as needed to either increase
2658 	 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
2659 	 *
2660 	 * Use addition as we have already recorded the new latency flag
2661 	 * for the ITR value.
2662 	 */
2663 	switch (q_vector->adapter->link_speed) {
2664 	case IXGBE_LINK_SPEED_10GB_FULL:
2665 	case IXGBE_LINK_SPEED_100_FULL:
2666 	default:
2667 		itr += DIV_ROUND_UP(avg_wire_size,
2668 				    IXGBE_ITR_ADAPTIVE_MIN_INC * 256) *
2669 		       IXGBE_ITR_ADAPTIVE_MIN_INC;
2670 		break;
2671 	case IXGBE_LINK_SPEED_2_5GB_FULL:
2672 	case IXGBE_LINK_SPEED_1GB_FULL:
2673 	case IXGBE_LINK_SPEED_10_FULL:
2674 		if (avg_wire_size > 8064)
2675 			avg_wire_size = 8064;
2676 		itr += DIV_ROUND_UP(avg_wire_size,
2677 				    IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
2678 		       IXGBE_ITR_ADAPTIVE_MIN_INC;
2679 		break;
2680 	}
2681 
2682 clear_counts:
2683 	/* write back value */
2684 	ring_container->itr = itr;
2685 
2686 	/* next update should occur within next jiffy */
2687 	ring_container->next_update = next_update + 1;
2688 
2689 	ring_container->total_bytes = 0;
2690 	ring_container->total_packets = 0;
2691 }
2692 
2693 /**
2694  * ixgbe_write_eitr - write EITR register in hardware specific way
2695  * @q_vector: structure containing interrupt and ring information
2696  *
2697  * This function is made to be called by ethtool and by the driver
2698  * when it needs to update EITR registers at runtime.  Hardware
2699  * specific quirks/differences are taken care of here.
2700  */
2701 void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2702 {
2703 	struct ixgbe_adapter *adapter = q_vector->adapter;
2704 	struct ixgbe_hw *hw = &adapter->hw;
2705 	int v_idx = q_vector->v_idx;
2706 	u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2707 
2708 	switch (adapter->hw.mac.type) {
2709 	case ixgbe_mac_82598EB:
2710 		/* must write high and low 16 bits to reset counter */
2711 		itr_reg |= (itr_reg << 16);
2712 		break;
2713 	case ixgbe_mac_82599EB:
2714 	case ixgbe_mac_X540:
2715 	case ixgbe_mac_X550:
2716 	case ixgbe_mac_X550EM_x:
2717 	case ixgbe_mac_x550em_a:
2718 		/*
2719 		 * set the WDIS bit to not clear the timer bits and cause an
2720 		 * immediate assertion of the interrupt
2721 		 */
2722 		itr_reg |= IXGBE_EITR_CNT_WDIS;
2723 		break;
2724 	default:
2725 		break;
2726 	}
2727 	IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2728 }
2729 
2730 static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2731 {
2732 	u32 new_itr;
2733 
2734 	ixgbe_update_itr(q_vector, &q_vector->tx);
2735 	ixgbe_update_itr(q_vector, &q_vector->rx);
2736 
2737 	/* use the smallest value of new ITR delay calculations */
2738 	new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
2739 
2740 	/* Clear latency flag if set, shift into correct position */
2741 	new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY;
2742 	new_itr <<= 2;
2743 
2744 	if (new_itr != q_vector->itr) {
2745 		/* save the algorithm value here */
2746 		q_vector->itr = new_itr;
2747 
2748 		ixgbe_write_eitr(q_vector);
2749 	}
2750 }
2751 
2752 /**
2753  * ixgbe_check_overtemp_subtask - check for over temperature
2754  * @adapter: pointer to adapter
2755  **/
2756 static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2757 {
2758 	struct ixgbe_hw *hw = &adapter->hw;
2759 	u32 eicr = adapter->interrupt_event;
2760 	s32 rc;
2761 
2762 	if (test_bit(__IXGBE_DOWN, &adapter->state))
2763 		return;
2764 
2765 	if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2766 		return;
2767 
2768 	adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2769 
2770 	switch (hw->device_id) {
2771 	case IXGBE_DEV_ID_82599_T3_LOM:
2772 		/*
2773 		 * Since the warning interrupt is for both ports
2774 		 * we don't have to check if:
2775 		 *  - This interrupt wasn't for our port.
2776 		 *  - We may have missed the interrupt so always have to
2777 		 *    check if we  got a LSC
2778 		 */
2779 		if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
2780 		    !(eicr & IXGBE_EICR_LSC))
2781 			return;
2782 
2783 		if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2784 			u32 speed;
2785 			bool link_up = false;
2786 
2787 			hw->mac.ops.check_link(hw, &speed, &link_up, false);
2788 
2789 			if (link_up)
2790 				return;
2791 		}
2792 
2793 		/* Check if this is not due to overtemp */
2794 		if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2795 			return;
2796 
2797 		break;
2798 	case IXGBE_DEV_ID_X550EM_A_1G_T:
2799 	case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2800 		rc = hw->phy.ops.check_overtemp(hw);
2801 		if (rc != IXGBE_ERR_OVERTEMP)
2802 			return;
2803 		break;
2804 	default:
2805 		if (adapter->hw.mac.type >= ixgbe_mac_X540)
2806 			return;
2807 		if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
2808 			return;
2809 		break;
2810 	}
2811 	e_crit(drv, "%s\n", ixgbe_overheat_msg);
2812 
2813 	adapter->interrupt_event = 0;
2814 }
2815 
2816 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2817 {
2818 	struct ixgbe_hw *hw = &adapter->hw;
2819 
2820 	if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2821 	    (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2822 		e_crit(probe, "Fan has stopped, replace the adapter\n");
2823 		/* write to clear the interrupt */
2824 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2825 	}
2826 }
2827 
2828 static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2829 {
2830 	struct ixgbe_hw *hw = &adapter->hw;
2831 
2832 	if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2833 		return;
2834 
2835 	switch (adapter->hw.mac.type) {
2836 	case ixgbe_mac_82599EB:
2837 		/*
2838 		 * Need to check link state so complete overtemp check
2839 		 * on service task
2840 		 */
2841 		if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
2842 		     (eicr & IXGBE_EICR_LSC)) &&
2843 		    (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2844 			adapter->interrupt_event = eicr;
2845 			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2846 			ixgbe_service_event_schedule(adapter);
2847 			return;
2848 		}
2849 		return;
2850 	case ixgbe_mac_x550em_a:
2851 		if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
2852 			adapter->interrupt_event = eicr;
2853 			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2854 			ixgbe_service_event_schedule(adapter);
2855 			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
2856 					IXGBE_EICR_GPI_SDP0_X550EM_a);
2857 			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
2858 					IXGBE_EICR_GPI_SDP0_X550EM_a);
2859 		}
2860 		return;
2861 	case ixgbe_mac_X550:
2862 	case ixgbe_mac_X540:
2863 		if (!(eicr & IXGBE_EICR_TS))
2864 			return;
2865 		break;
2866 	default:
2867 		return;
2868 	}
2869 
2870 	e_crit(drv, "%s\n", ixgbe_overheat_msg);
2871 }
2872 
2873 static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2874 {
2875 	switch (hw->mac.type) {
2876 	case ixgbe_mac_82598EB:
2877 		if (hw->phy.type == ixgbe_phy_nl)
2878 			return true;
2879 		return false;
2880 	case ixgbe_mac_82599EB:
2881 	case ixgbe_mac_X550EM_x:
2882 	case ixgbe_mac_x550em_a:
2883 		switch (hw->mac.ops.get_media_type(hw)) {
2884 		case ixgbe_media_type_fiber:
2885 		case ixgbe_media_type_fiber_qsfp:
2886 			return true;
2887 		default:
2888 			return false;
2889 		}
2890 	default:
2891 		return false;
2892 	}
2893 }
2894 
2895 static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2896 {
2897 	struct ixgbe_hw *hw = &adapter->hw;
2898 	u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
2899 
2900 	if (!ixgbe_is_sfp(hw))
2901 		return;
2902 
2903 	/* Later MAC's use different SDP */
2904 	if (hw->mac.type >= ixgbe_mac_X540)
2905 		eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2906 
2907 	if (eicr & eicr_mask) {
2908 		/* Clear the interrupt */
2909 		IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2910 		if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2911 			adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2912 			adapter->sfp_poll_time = 0;
2913 			ixgbe_service_event_schedule(adapter);
2914 		}
2915 	}
2916 
2917 	if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
2918 	    (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2919 		/* Clear the interrupt */
2920 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2921 		if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2922 			adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2923 			ixgbe_service_event_schedule(adapter);
2924 		}
2925 	}
2926 }
2927 
2928 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2929 {
2930 	struct ixgbe_hw *hw = &adapter->hw;
2931 
2932 	adapter->lsc_int++;
2933 	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2934 	adapter->link_check_timeout = jiffies;
2935 	if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2936 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2937 		IXGBE_WRITE_FLUSH(hw);
2938 		ixgbe_service_event_schedule(adapter);
2939 	}
2940 }
2941 
2942 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2943 					   u64 qmask)
2944 {
2945 	u32 mask;
2946 	struct ixgbe_hw *hw = &adapter->hw;
2947 
2948 	switch (hw->mac.type) {
2949 	case ixgbe_mac_82598EB:
2950 		mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2951 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2952 		break;
2953 	case ixgbe_mac_82599EB:
2954 	case ixgbe_mac_X540:
2955 	case ixgbe_mac_X550:
2956 	case ixgbe_mac_X550EM_x:
2957 	case ixgbe_mac_x550em_a:
2958 		mask = (qmask & 0xFFFFFFFF);
2959 		if (mask)
2960 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2961 		mask = (qmask >> 32);
2962 		if (mask)
2963 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2964 		break;
2965 	default:
2966 		break;
2967 	}
2968 	/* skip the flush */
2969 }
2970 
2971 /**
2972  * ixgbe_irq_enable - Enable default interrupt generation settings
2973  * @adapter: board private structure
2974  * @queues: enable irqs for queues
2975  * @flush: flush register write
2976  **/
2977 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2978 				    bool flush)
2979 {
2980 	struct ixgbe_hw *hw = &adapter->hw;
2981 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2982 
2983 	/* don't reenable LSC while waiting for link */
2984 	if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
2985 		mask &= ~IXGBE_EIMS_LSC;
2986 
2987 	if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2988 		switch (adapter->hw.mac.type) {
2989 		case ixgbe_mac_82599EB:
2990 			mask |= IXGBE_EIMS_GPI_SDP0(hw);
2991 			break;
2992 		case ixgbe_mac_X540:
2993 		case ixgbe_mac_X550:
2994 		case ixgbe_mac_X550EM_x:
2995 		case ixgbe_mac_x550em_a:
2996 			mask |= IXGBE_EIMS_TS;
2997 			break;
2998 		default:
2999 			break;
3000 		}
3001 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
3002 		mask |= IXGBE_EIMS_GPI_SDP1(hw);
3003 	switch (adapter->hw.mac.type) {
3004 	case ixgbe_mac_82599EB:
3005 		mask |= IXGBE_EIMS_GPI_SDP1(hw);
3006 		mask |= IXGBE_EIMS_GPI_SDP2(hw);
3007 		fallthrough;
3008 	case ixgbe_mac_X540:
3009 	case ixgbe_mac_X550:
3010 	case ixgbe_mac_X550EM_x:
3011 	case ixgbe_mac_x550em_a:
3012 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3013 		    adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3014 		    adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
3015 			mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
3016 		if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
3017 			mask |= IXGBE_EICR_GPI_SDP0_X540;
3018 		mask |= IXGBE_EIMS_ECC;
3019 		mask |= IXGBE_EIMS_MAILBOX;
3020 		break;
3021 	default:
3022 		break;
3023 	}
3024 
3025 	if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
3026 	    !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
3027 		mask |= IXGBE_EIMS_FLOW_DIR;
3028 
3029 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
3030 	if (queues)
3031 		ixgbe_irq_enable_queues(adapter, ~0);
3032 	if (flush)
3033 		IXGBE_WRITE_FLUSH(&adapter->hw);
3034 }
3035 
3036 static irqreturn_t ixgbe_msix_other(int irq, void *data)
3037 {
3038 	struct ixgbe_adapter *adapter = data;
3039 	struct ixgbe_hw *hw = &adapter->hw;
3040 	u32 eicr;
3041 
3042 	/*
3043 	 * Workaround for Silicon errata.  Use clear-by-write instead
3044 	 * of clear-by-read.  Reading with EICS will return the
3045 	 * interrupt causes without clearing, which later be done
3046 	 * with the write to EICR.
3047 	 */
3048 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3049 
3050 	/* The lower 16bits of the EICR register are for the queue interrupts
3051 	 * which should be masked here in order to not accidentally clear them if
3052 	 * the bits are high when ixgbe_msix_other is called. There is a race
3053 	 * condition otherwise which results in possible performance loss
3054 	 * especially if the ixgbe_msix_other interrupt is triggering
3055 	 * consistently (as it would when PPS is turned on for the X540 device)
3056 	 */
3057 	eicr &= 0xFFFF0000;
3058 
3059 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3060 
3061 	if (eicr & IXGBE_EICR_LSC)
3062 		ixgbe_check_lsc(adapter);
3063 
3064 	if (eicr & IXGBE_EICR_MAILBOX)
3065 		ixgbe_msg_task(adapter);
3066 
3067 	switch (hw->mac.type) {
3068 	case ixgbe_mac_82599EB:
3069 	case ixgbe_mac_X540:
3070 	case ixgbe_mac_X550:
3071 	case ixgbe_mac_X550EM_x:
3072 	case ixgbe_mac_x550em_a:
3073 		if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
3074 		    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3075 			adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
3076 			ixgbe_service_event_schedule(adapter);
3077 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3078 					IXGBE_EICR_GPI_SDP0_X540);
3079 		}
3080 		if (eicr & IXGBE_EICR_ECC) {
3081 			e_info(link, "Received ECC Err, initiating reset\n");
3082 			set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3083 			ixgbe_service_event_schedule(adapter);
3084 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3085 		}
3086 		/* Handle Flow Director Full threshold interrupt */
3087 		if (eicr & IXGBE_EICR_FLOW_DIR) {
3088 			int reinit_count = 0;
3089 			int i;
3090 			for (i = 0; i < adapter->num_tx_queues; i++) {
3091 				struct ixgbe_ring *ring = adapter->tx_ring[i];
3092 				if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
3093 						       &ring->state))
3094 					reinit_count++;
3095 			}
3096 			if (reinit_count) {
3097 				/* no more flow director interrupts until after init */
3098 				IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3099 				adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
3100 				ixgbe_service_event_schedule(adapter);
3101 			}
3102 		}
3103 		ixgbe_check_sfp_event(adapter, eicr);
3104 		ixgbe_check_overtemp_event(adapter, eicr);
3105 		break;
3106 	default:
3107 		break;
3108 	}
3109 
3110 	ixgbe_check_fan_failure(adapter, eicr);
3111 
3112 	if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3113 		ixgbe_ptp_check_pps_event(adapter);
3114 
3115 	/* re-enable the original interrupt state, no lsc, no queues */
3116 	if (!test_bit(__IXGBE_DOWN, &adapter->state))
3117 		ixgbe_irq_enable(adapter, false, false);
3118 
3119 	return IRQ_HANDLED;
3120 }
3121 
3122 static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
3123 {
3124 	struct ixgbe_q_vector *q_vector = data;
3125 
3126 	/* EIAM disabled interrupts (on this vector) for us */
3127 
3128 	if (q_vector->rx.ring || q_vector->tx.ring)
3129 		napi_schedule_irqoff(&q_vector->napi);
3130 
3131 	return IRQ_HANDLED;
3132 }
3133 
3134 /**
3135  * ixgbe_poll - NAPI Rx polling callback
3136  * @napi: structure for representing this polling device
3137  * @budget: how many packets driver is allowed to clean
3138  *
3139  * This function is used for legacy and MSI, NAPI mode
3140  **/
3141 int ixgbe_poll(struct napi_struct *napi, int budget)
3142 {
3143 	struct ixgbe_q_vector *q_vector =
3144 				container_of(napi, struct ixgbe_q_vector, napi);
3145 	struct ixgbe_adapter *adapter = q_vector->adapter;
3146 	struct ixgbe_ring *ring;
3147 	int per_ring_budget, work_done = 0;
3148 	bool clean_complete = true;
3149 
3150 #ifdef CONFIG_IXGBE_DCA
3151 	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3152 		ixgbe_update_dca(q_vector);
3153 #endif
3154 
3155 	ixgbe_for_each_ring(ring, q_vector->tx) {
3156 		bool wd = ring->xsk_pool ?
3157 			  ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
3158 			  ixgbe_clean_tx_irq(q_vector, ring, budget);
3159 
3160 		if (!wd)
3161 			clean_complete = false;
3162 	}
3163 
3164 	/* Exit if we are called by netpoll */
3165 	if (budget <= 0)
3166 		return budget;
3167 
3168 	/* attempt to distribute budget to each queue fairly, but don't allow
3169 	 * the budget to go below 1 because we'll exit polling */
3170 	if (q_vector->rx.count > 1)
3171 		per_ring_budget = max(budget/q_vector->rx.count, 1);
3172 	else
3173 		per_ring_budget = budget;
3174 
3175 	ixgbe_for_each_ring(ring, q_vector->rx) {
3176 		int cleaned = ring->xsk_pool ?
3177 			      ixgbe_clean_rx_irq_zc(q_vector, ring,
3178 						    per_ring_budget) :
3179 			      ixgbe_clean_rx_irq(q_vector, ring,
3180 						 per_ring_budget);
3181 
3182 		work_done += cleaned;
3183 		if (cleaned >= per_ring_budget)
3184 			clean_complete = false;
3185 	}
3186 
3187 	/* If all work not completed, return budget and keep polling */
3188 	if (!clean_complete)
3189 		return budget;
3190 
3191 	/* all work done, exit the polling mode */
3192 	if (likely(napi_complete_done(napi, work_done))) {
3193 		if (adapter->rx_itr_setting & 1)
3194 			ixgbe_set_itr(q_vector);
3195 		if (!test_bit(__IXGBE_DOWN, &adapter->state))
3196 			ixgbe_irq_enable_queues(adapter,
3197 						BIT_ULL(q_vector->v_idx));
3198 	}
3199 
3200 	return min(work_done, budget - 1);
3201 }
3202 
3203 /**
3204  * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
3205  * @adapter: board private structure
3206  *
3207  * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
3208  * interrupts from the kernel.
3209  **/
3210 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
3211 {
3212 	struct net_device *netdev = adapter->netdev;
3213 	unsigned int ri = 0, ti = 0;
3214 	int vector, err;
3215 
3216 	for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3217 		struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3218 		struct msix_entry *entry = &adapter->msix_entries[vector];
3219 
3220 		if (q_vector->tx.ring && q_vector->rx.ring) {
3221 			snprintf(q_vector->name, sizeof(q_vector->name),
3222 				 "%s-TxRx-%u", netdev->name, ri++);
3223 			ti++;
3224 		} else if (q_vector->rx.ring) {
3225 			snprintf(q_vector->name, sizeof(q_vector->name),
3226 				 "%s-rx-%u", netdev->name, ri++);
3227 		} else if (q_vector->tx.ring) {
3228 			snprintf(q_vector->name, sizeof(q_vector->name),
3229 				 "%s-tx-%u", netdev->name, ti++);
3230 		} else {
3231 			/* skip this unused q_vector */
3232 			continue;
3233 		}
3234 		err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
3235 				  q_vector->name, q_vector);
3236 		if (err) {
3237 			e_err(probe, "request_irq failed for MSIX interrupt "
3238 			      "Error: %d\n", err);
3239 			goto free_queue_irqs;
3240 		}
3241 		/* If Flow Director is enabled, set interrupt affinity */
3242 		if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3243 			/* assign the mask for this irq */
3244 			irq_set_affinity_hint(entry->vector,
3245 					      &q_vector->affinity_mask);
3246 		}
3247 	}
3248 
3249 	err = request_irq(adapter->msix_entries[vector].vector,
3250 			  ixgbe_msix_other, 0, netdev->name, adapter);
3251 	if (err) {
3252 		e_err(probe, "request_irq for msix_other failed: %d\n", err);
3253 		goto free_queue_irqs;
3254 	}
3255 
3256 	return 0;
3257 
3258 free_queue_irqs:
3259 	while (vector) {
3260 		vector--;
3261 		irq_set_affinity_hint(adapter->msix_entries[vector].vector,
3262 				      NULL);
3263 		free_irq(adapter->msix_entries[vector].vector,
3264 			 adapter->q_vector[vector]);
3265 	}
3266 	adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3267 	pci_disable_msix(adapter->pdev);
3268 	kfree(adapter->msix_entries);
3269 	adapter->msix_entries = NULL;
3270 	return err;
3271 }
3272 
3273 /**
3274  * ixgbe_intr - legacy mode Interrupt Handler
3275  * @irq: interrupt number
3276  * @data: pointer to a network interface device structure
3277  **/
3278 static irqreturn_t ixgbe_intr(int irq, void *data)
3279 {
3280 	struct ixgbe_adapter *adapter = data;
3281 	struct ixgbe_hw *hw = &adapter->hw;
3282 	struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3283 	u32 eicr;
3284 
3285 	/*
3286 	 * Workaround for silicon errata #26 on 82598.  Mask the interrupt
3287 	 * before the read of EICR.
3288 	 */
3289 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3290 
3291 	/* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
3292 	 * therefore no explicit interrupt disable is necessary */
3293 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3294 	if (!eicr) {
3295 		/*
3296 		 * shared interrupt alert!
3297 		 * make sure interrupts are enabled because the read will
3298 		 * have disabled interrupts due to EIAM
3299 		 * finish the workaround of silicon errata on 82598.  Unmask
3300 		 * the interrupt that we masked before the EICR read.
3301 		 */
3302 		if (!test_bit(__IXGBE_DOWN, &adapter->state))
3303 			ixgbe_irq_enable(adapter, true, true);
3304 		return IRQ_NONE;	/* Not our interrupt */
3305 	}
3306 
3307 	if (eicr & IXGBE_EICR_LSC)
3308 		ixgbe_check_lsc(adapter);
3309 
3310 	switch (hw->mac.type) {
3311 	case ixgbe_mac_82599EB:
3312 		ixgbe_check_sfp_event(adapter, eicr);
3313 		fallthrough;
3314 	case ixgbe_mac_X540:
3315 	case ixgbe_mac_X550:
3316 	case ixgbe_mac_X550EM_x:
3317 	case ixgbe_mac_x550em_a:
3318 		if (eicr & IXGBE_EICR_ECC) {
3319 			e_info(link, "Received ECC Err, initiating reset\n");
3320 			set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3321 			ixgbe_service_event_schedule(adapter);
3322 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3323 		}
3324 		ixgbe_check_overtemp_event(adapter, eicr);
3325 		break;
3326 	default:
3327 		break;
3328 	}
3329 
3330 	ixgbe_check_fan_failure(adapter, eicr);
3331 	if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3332 		ixgbe_ptp_check_pps_event(adapter);
3333 
3334 	/* would disable interrupts here but EIAM disabled it */
3335 	napi_schedule_irqoff(&q_vector->napi);
3336 
3337 	/*
3338 	 * re-enable link(maybe) and non-queue interrupts, no flush.
3339 	 * ixgbe_poll will re-enable the queue interrupts
3340 	 */
3341 	if (!test_bit(__IXGBE_DOWN, &adapter->state))
3342 		ixgbe_irq_enable(adapter, false, false);
3343 
3344 	return IRQ_HANDLED;
3345 }
3346 
3347 /**
3348  * ixgbe_request_irq - initialize interrupts
3349  * @adapter: board private structure
3350  *
3351  * Attempts to configure interrupts using the best available
3352  * capabilities of the hardware and kernel.
3353  **/
3354 static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
3355 {
3356 	struct net_device *netdev = adapter->netdev;
3357 	int err;
3358 
3359 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3360 		err = ixgbe_request_msix_irqs(adapter);
3361 	else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
3362 		err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
3363 				  netdev->name, adapter);
3364 	else
3365 		err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
3366 				  netdev->name, adapter);
3367 
3368 	if (err)
3369 		e_err(probe, "request_irq failed, Error %d\n", err);
3370 
3371 	return err;
3372 }
3373 
3374 static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
3375 {
3376 	int vector;
3377 
3378 	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3379 		free_irq(adapter->pdev->irq, adapter);
3380 		return;
3381 	}
3382 
3383 	if (!adapter->msix_entries)
3384 		return;
3385 
3386 	for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3387 		struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3388 		struct msix_entry *entry = &adapter->msix_entries[vector];
3389 
3390 		/* free only the irqs that were actually requested */
3391 		if (!q_vector->rx.ring && !q_vector->tx.ring)
3392 			continue;
3393 
3394 		/* clear the affinity_mask in the IRQ descriptor */
3395 		irq_set_affinity_hint(entry->vector, NULL);
3396 
3397 		free_irq(entry->vector, q_vector);
3398 	}
3399 
3400 	free_irq(adapter->msix_entries[vector].vector, adapter);
3401 }
3402 
3403 /**
3404  * ixgbe_irq_disable - Mask off interrupt generation on the NIC
3405  * @adapter: board private structure
3406  **/
3407 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
3408 {
3409 	switch (adapter->hw.mac.type) {
3410 	case ixgbe_mac_82598EB:
3411 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3412 		break;
3413 	case ixgbe_mac_82599EB:
3414 	case ixgbe_mac_X540:
3415 	case ixgbe_mac_X550:
3416 	case ixgbe_mac_X550EM_x:
3417 	case ixgbe_mac_x550em_a:
3418 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3419 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3420 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3421 		break;
3422 	default:
3423 		break;
3424 	}
3425 	IXGBE_WRITE_FLUSH(&adapter->hw);
3426 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3427 		int vector;
3428 
3429 		for (vector = 0; vector < adapter->num_q_vectors; vector++)
3430 			synchronize_irq(adapter->msix_entries[vector].vector);
3431 
3432 		synchronize_irq(adapter->msix_entries[vector++].vector);
3433 	} else {
3434 		synchronize_irq(adapter->pdev->irq);
3435 	}
3436 }
3437 
3438 /**
3439  * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
3440  * @adapter: board private structure
3441  *
3442  **/
3443 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
3444 {
3445 	struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3446 
3447 	ixgbe_write_eitr(q_vector);
3448 
3449 	ixgbe_set_ivar(adapter, 0, 0, 0);
3450 	ixgbe_set_ivar(adapter, 1, 0, 0);
3451 
3452 	e_info(hw, "Legacy interrupt IVAR setup done\n");
3453 }
3454 
3455 /**
3456  * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
3457  * @adapter: board private structure
3458  * @ring: structure containing ring specific data
3459  *
3460  * Configure the Tx descriptor ring after a reset.
3461  **/
3462 void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
3463 			     struct ixgbe_ring *ring)
3464 {
3465 	struct ixgbe_hw *hw = &adapter->hw;
3466 	u64 tdba = ring->dma;
3467 	int wait_loop = 10;
3468 	u32 txdctl = IXGBE_TXDCTL_ENABLE;
3469 	u8 reg_idx = ring->reg_idx;
3470 
3471 	ring->xsk_pool = NULL;
3472 	if (ring_is_xdp(ring))
3473 		ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
3474 
3475 	/* disable queue to avoid issues while updating state */
3476 	IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
3477 	IXGBE_WRITE_FLUSH(hw);
3478 
3479 	IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
3480 			(tdba & DMA_BIT_MASK(32)));
3481 	IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
3482 	IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
3483 			ring->count * sizeof(union ixgbe_adv_tx_desc));
3484 	IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
3485 	IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
3486 	ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
3487 
3488 	/*
3489 	 * set WTHRESH to encourage burst writeback, it should not be set
3490 	 * higher than 1 when:
3491 	 * - ITR is 0 as it could cause false TX hangs
3492 	 * - ITR is set to > 100k int/sec and BQL is enabled
3493 	 *
3494 	 * In order to avoid issues WTHRESH + PTHRESH should always be equal
3495 	 * to or less than the number of on chip descriptors, which is
3496 	 * currently 40.
3497 	 */
3498 	if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
3499 		txdctl |= 1u << 16;	/* WTHRESH = 1 */
3500 	else
3501 		txdctl |= 8u << 16;	/* WTHRESH = 8 */
3502 
3503 	/*
3504 	 * Setting PTHRESH to 32 both improves performance
3505 	 * and avoids a TX hang with DFP enabled
3506 	 */
3507 	txdctl |= (1u << 8) |	/* HTHRESH = 1 */
3508 		   32;		/* PTHRESH = 32 */
3509 
3510 	/* reinitialize flowdirector state */
3511 	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3512 		ring->atr_sample_rate = adapter->atr_sample_rate;
3513 		ring->atr_count = 0;
3514 		set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
3515 	} else {
3516 		ring->atr_sample_rate = 0;
3517 	}
3518 
3519 	/* initialize XPS */
3520 	if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3521 		struct ixgbe_q_vector *q_vector = ring->q_vector;
3522 
3523 		if (q_vector)
3524 			netif_set_xps_queue(ring->netdev,
3525 					    &q_vector->affinity_mask,
3526 					    ring->queue_index);
3527 	}
3528 
3529 	clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3530 
3531 	/* reinitialize tx_buffer_info */
3532 	memset(ring->tx_buffer_info, 0,
3533 	       sizeof(struct ixgbe_tx_buffer) * ring->count);
3534 
3535 	/* enable queue */
3536 	IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3537 
3538 	/* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
3539 	if (hw->mac.type == ixgbe_mac_82598EB &&
3540 	    !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3541 		return;
3542 
3543 	/* poll to verify queue is enabled */
3544 	do {
3545 		usleep_range(1000, 2000);
3546 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3547 	} while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3548 	if (!wait_loop)
3549 		hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
3550 }
3551 
3552 static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3553 {
3554 	struct ixgbe_hw *hw = &adapter->hw;
3555 	u32 rttdcs, mtqc;
3556 	u8 tcs = adapter->hw_tcs;
3557 
3558 	if (hw->mac.type == ixgbe_mac_82598EB)
3559 		return;
3560 
3561 	/* disable the arbiter while setting MTQC */
3562 	rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3563 	rttdcs |= IXGBE_RTTDCS_ARBDIS;
3564 	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3565 
3566 	/* set transmit pool layout */
3567 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3568 		mtqc = IXGBE_MTQC_VT_ENA;
3569 		if (tcs > 4)
3570 			mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3571 		else if (tcs > 1)
3572 			mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3573 		else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3574 			 IXGBE_82599_VMDQ_4Q_MASK)
3575 			mtqc |= IXGBE_MTQC_32VF;
3576 		else
3577 			mtqc |= IXGBE_MTQC_64VF;
3578 	} else {
3579 		if (tcs > 4) {
3580 			mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3581 		} else if (tcs > 1) {
3582 			mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3583 		} else {
3584 			u8 max_txq = adapter->num_tx_queues +
3585 				adapter->num_xdp_queues;
3586 			if (max_txq > 63)
3587 				mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3588 			else
3589 				mtqc = IXGBE_MTQC_64Q_1PB;
3590 		}
3591 	}
3592 
3593 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3594 
3595 	/* Enable Security TX Buffer IFG for multiple pb */
3596 	if (tcs) {
3597 		u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3598 		sectx |= IXGBE_SECTX_DCB;
3599 		IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3600 	}
3601 
3602 	/* re-enable the arbiter */
3603 	rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3604 	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3605 }
3606 
3607 /**
3608  * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
3609  * @adapter: board private structure
3610  *
3611  * Configure the Tx unit of the MAC after a reset.
3612  **/
3613 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3614 {
3615 	struct ixgbe_hw *hw = &adapter->hw;
3616 	u32 dmatxctl;
3617 	u32 i;
3618 
3619 	ixgbe_setup_mtqc(adapter);
3620 
3621 	if (hw->mac.type != ixgbe_mac_82598EB) {
3622 		/* DMATXCTL.EN must be before Tx queues are enabled */
3623 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3624 		dmatxctl |= IXGBE_DMATXCTL_TE;
3625 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3626 	}
3627 
3628 	/* Setup the HW Tx Head and Tail descriptor pointers */
3629 	for (i = 0; i < adapter->num_tx_queues; i++)
3630 		ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3631 	for (i = 0; i < adapter->num_xdp_queues; i++)
3632 		ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]);
3633 }
3634 
3635 static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3636 				 struct ixgbe_ring *ring)
3637 {
3638 	struct ixgbe_hw *hw = &adapter->hw;
3639 	u8 reg_idx = ring->reg_idx;
3640 	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3641 
3642 	srrctl |= IXGBE_SRRCTL_DROP_EN;
3643 
3644 	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3645 }
3646 
3647 static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3648 				  struct ixgbe_ring *ring)
3649 {
3650 	struct ixgbe_hw *hw = &adapter->hw;
3651 	u8 reg_idx = ring->reg_idx;
3652 	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3653 
3654 	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3655 
3656 	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3657 }
3658 
3659 #ifdef CONFIG_IXGBE_DCB
3660 void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3661 #else
3662 static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3663 #endif
3664 {
3665 	int i;
3666 	bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3667 
3668 	if (adapter->ixgbe_ieee_pfc)
3669 		pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3670 
3671 	/*
3672 	 * We should set the drop enable bit if:
3673 	 *  SR-IOV is enabled
3674 	 *   or
3675 	 *  Number of Rx queues > 1 and flow control is disabled
3676 	 *
3677 	 *  This allows us to avoid head of line blocking for security
3678 	 *  and performance reasons.
3679 	 */
3680 	if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3681 	    !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3682 		for (i = 0; i < adapter->num_rx_queues; i++)
3683 			ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3684 	} else {
3685 		for (i = 0; i < adapter->num_rx_queues; i++)
3686 			ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3687 	}
3688 }
3689 
3690 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3691 
3692 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3693 				   struct ixgbe_ring *rx_ring)
3694 {
3695 	struct ixgbe_hw *hw = &adapter->hw;
3696 	u32 srrctl;
3697 	u8 reg_idx = rx_ring->reg_idx;
3698 
3699 	if (hw->mac.type == ixgbe_mac_82598EB) {
3700 		u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3701 
3702 		/*
3703 		 * if VMDq is not active we must program one srrctl register
3704 		 * per RSS queue since we have enabled RDRXCTL.MVMEN
3705 		 */
3706 		reg_idx &= mask;
3707 	}
3708 
3709 	/* configure header buffer length, needed for RSC */
3710 	srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3711 
3712 	/* configure the packet buffer length */
3713 	if (rx_ring->xsk_pool) {
3714 		u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool);
3715 
3716 		/* If the MAC support setting RXDCTL.RLPML, the
3717 		 * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
3718 		 * RXDCTL.RLPML is set to the actual UMEM buffer
3719 		 * size. If not, then we are stuck with a 1k buffer
3720 		 * size resolution. In this case frames larger than
3721 		 * the UMEM buffer size viewed in a 1k resolution will
3722 		 * be dropped.
3723 		 */
3724 		if (hw->mac.type != ixgbe_mac_82599EB)
3725 			srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3726 		else
3727 			srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3728 	} else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) {
3729 		srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3730 	} else {
3731 		srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3732 	}
3733 
3734 	/* configure descriptor type */
3735 	srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3736 
3737 	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3738 }
3739 
3740 /**
3741  * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries
3742  * @adapter: device handle
3743  *
3744  *  - 82598/82599/X540:     128
3745  *  - X550(non-SRIOV mode): 512
3746  *  - X550(SRIOV mode):     64
3747  */
3748 u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
3749 {
3750 	if (adapter->hw.mac.type < ixgbe_mac_X550)
3751 		return 128;
3752 	else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3753 		return 64;
3754 	else
3755 		return 512;
3756 }
3757 
3758 /**
3759  * ixgbe_store_key - Write the RSS key to HW
3760  * @adapter: device handle
3761  *
3762  * Write the RSS key stored in adapter.rss_key to HW.
3763  */
3764 void ixgbe_store_key(struct ixgbe_adapter *adapter)
3765 {
3766 	struct ixgbe_hw *hw = &adapter->hw;
3767 	int i;
3768 
3769 	for (i = 0; i < 10; i++)
3770 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3771 }
3772 
3773 /**
3774  * ixgbe_init_rss_key - Initialize adapter RSS key
3775  * @adapter: device handle
3776  *
3777  * Allocates and initializes the RSS key if it is not allocated.
3778  **/
3779 static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter)
3780 {
3781 	u32 *rss_key;
3782 
3783 	if (!adapter->rss_key) {
3784 		rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL);
3785 		if (unlikely(!rss_key))
3786 			return -ENOMEM;
3787 
3788 		netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE);
3789 		adapter->rss_key = rss_key;
3790 	}
3791 
3792 	return 0;
3793 }
3794 
3795 /**
3796  * ixgbe_store_reta - Write the RETA table to HW
3797  * @adapter: device handle
3798  *
3799  * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
3800  */
3801 void ixgbe_store_reta(struct ixgbe_adapter *adapter)
3802 {
3803 	u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3804 	struct ixgbe_hw *hw = &adapter->hw;
3805 	u32 reta = 0;
3806 	u32 indices_multi;
3807 	u8 *indir_tbl = adapter->rss_indir_tbl;
3808 
3809 	/* Fill out the redirection table as follows:
3810 	 *  - 82598:      8 bit wide entries containing pair of 4 bit RSS
3811 	 *    indices.
3812 	 *  - 82599/X540: 8 bit wide entries containing 4 bit RSS index
3813 	 *  - X550:       8 bit wide entries containing 6 bit RSS index
3814 	 */
3815 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3816 		indices_multi = 0x11;
3817 	else
3818 		indices_multi = 0x1;
3819 
3820 	/* Write redirection table to HW */
3821 	for (i = 0; i < reta_entries; i++) {
3822 		reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
3823 		if ((i & 3) == 3) {
3824 			if (i < 128)
3825 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3826 			else
3827 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3828 						reta);
3829 			reta = 0;
3830 		}
3831 	}
3832 }
3833 
3834 /**
3835  * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode)
3836  * @adapter: device handle
3837  *
3838  * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
3839  */
3840 static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
3841 {
3842 	u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3843 	struct ixgbe_hw *hw = &adapter->hw;
3844 	u32 vfreta = 0;
3845 
3846 	/* Write redirection table to HW */
3847 	for (i = 0; i < reta_entries; i++) {
3848 		u16 pool = adapter->num_rx_pools;
3849 
3850 		vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
3851 		if ((i & 3) != 3)
3852 			continue;
3853 
3854 		while (pool--)
3855 			IXGBE_WRITE_REG(hw,
3856 					IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
3857 					vfreta);
3858 		vfreta = 0;
3859 	}
3860 }
3861 
3862 static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3863 {
3864 	u32 i, j;
3865 	u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3866 	u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3867 
3868 	/* Program table for at least 4 queues w/ SR-IOV so that VFs can
3869 	 * make full use of any rings they may have.  We will use the
3870 	 * PSRTYPE register to control how many rings we use within the PF.
3871 	 */
3872 	if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
3873 		rss_i = 4;
3874 
3875 	/* Fill out hash function seeds */
3876 	ixgbe_store_key(adapter);
3877 
3878 	/* Fill out redirection table */
3879 	memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
3880 
3881 	for (i = 0, j = 0; i < reta_entries; i++, j++) {
3882 		if (j == rss_i)
3883 			j = 0;
3884 
3885 		adapter->rss_indir_tbl[i] = j;
3886 	}
3887 
3888 	ixgbe_store_reta(adapter);
3889 }
3890 
3891 static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
3892 {
3893 	struct ixgbe_hw *hw = &adapter->hw;
3894 	u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3895 	int i, j;
3896 
3897 	/* Fill out hash function seeds */
3898 	for (i = 0; i < 10; i++) {
3899 		u16 pool = adapter->num_rx_pools;
3900 
3901 		while (pool--)
3902 			IXGBE_WRITE_REG(hw,
3903 					IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
3904 					*(adapter->rss_key + i));
3905 	}
3906 
3907 	/* Fill out the redirection table */
3908 	for (i = 0, j = 0; i < 64; i++, j++) {
3909 		if (j == rss_i)
3910 			j = 0;
3911 
3912 		adapter->rss_indir_tbl[i] = j;
3913 	}
3914 
3915 	ixgbe_store_vfreta(adapter);
3916 }
3917 
3918 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3919 {
3920 	struct ixgbe_hw *hw = &adapter->hw;
3921 	u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
3922 	u32 rxcsum;
3923 
3924 	/* Disable indicating checksum in descriptor, enables RSS hash */
3925 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3926 	rxcsum |= IXGBE_RXCSUM_PCSD;
3927 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3928 
3929 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3930 		if (adapter->ring_feature[RING_F_RSS].mask)
3931 			mrqc = IXGBE_MRQC_RSSEN;
3932 	} else {
3933 		u8 tcs = adapter->hw_tcs;
3934 
3935 		if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3936 			if (tcs > 4)
3937 				mrqc = IXGBE_MRQC_VMDQRT8TCEN;	/* 8 TCs */
3938 			else if (tcs > 1)
3939 				mrqc = IXGBE_MRQC_VMDQRT4TCEN;	/* 4 TCs */
3940 			else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3941 				 IXGBE_82599_VMDQ_4Q_MASK)
3942 				mrqc = IXGBE_MRQC_VMDQRSS32EN;
3943 			else
3944 				mrqc = IXGBE_MRQC_VMDQRSS64EN;
3945 
3946 			/* Enable L3/L4 for Tx Switched packets only for X550,
3947 			 * older devices do not support this feature
3948 			 */
3949 			if (hw->mac.type >= ixgbe_mac_X550)
3950 				mrqc |= IXGBE_MRQC_L3L4TXSWEN;
3951 		} else {
3952 			if (tcs > 4)
3953 				mrqc = IXGBE_MRQC_RTRSS8TCEN;
3954 			else if (tcs > 1)
3955 				mrqc = IXGBE_MRQC_RTRSS4TCEN;
3956 			else
3957 				mrqc = IXGBE_MRQC_RSSEN;
3958 		}
3959 	}
3960 
3961 	/* Perform hash on these packet types */
3962 	rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3963 		     IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3964 		     IXGBE_MRQC_RSS_FIELD_IPV6 |
3965 		     IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3966 
3967 	if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3968 		rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3969 	if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3970 		rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3971 
3972 	if ((hw->mac.type >= ixgbe_mac_X550) &&
3973 	    (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
3974 		u16 pool = adapter->num_rx_pools;
3975 
3976 		/* Enable VF RSS mode */
3977 		mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
3978 		IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3979 
3980 		/* Setup RSS through the VF registers */
3981 		ixgbe_setup_vfreta(adapter);
3982 		vfmrqc = IXGBE_MRQC_RSSEN;
3983 		vfmrqc |= rss_field;
3984 
3985 		while (pool--)
3986 			IXGBE_WRITE_REG(hw,
3987 					IXGBE_PFVFMRQC(VMDQ_P(pool)),
3988 					vfmrqc);
3989 	} else {
3990 		ixgbe_setup_reta(adapter);
3991 		mrqc |= rss_field;
3992 		IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3993 	}
3994 }
3995 
3996 /**
3997  * ixgbe_configure_rscctl - enable RSC for the indicated ring
3998  * @adapter: address of board private structure
3999  * @ring: structure containing ring specific data
4000  **/
4001 static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
4002 				   struct ixgbe_ring *ring)
4003 {
4004 	struct ixgbe_hw *hw = &adapter->hw;
4005 	u32 rscctrl;
4006 	u8 reg_idx = ring->reg_idx;
4007 
4008 	if (!ring_is_rsc_enabled(ring))
4009 		return;
4010 
4011 	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
4012 	rscctrl |= IXGBE_RSCCTL_RSCEN;
4013 	/*
4014 	 * we must limit the number of descriptors so that the
4015 	 * total size of max desc * buf_len is not greater
4016 	 * than 65536
4017 	 */
4018 	rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
4019 	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
4020 }
4021 
4022 #define IXGBE_MAX_RX_DESC_POLL 10
4023 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
4024 				       struct ixgbe_ring *ring)
4025 {
4026 	struct ixgbe_hw *hw = &adapter->hw;
4027 	int wait_loop = IXGBE_MAX_RX_DESC_POLL;
4028 	u32 rxdctl;
4029 	u8 reg_idx = ring->reg_idx;
4030 
4031 	if (ixgbe_removed(hw->hw_addr))
4032 		return;
4033 	/* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
4034 	if (hw->mac.type == ixgbe_mac_82598EB &&
4035 	    !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
4036 		return;
4037 
4038 	do {
4039 		usleep_range(1000, 2000);
4040 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4041 	} while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4042 
4043 	if (!wait_loop) {
4044 		e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
4045 		      "the polling period\n", reg_idx);
4046 	}
4047 }
4048 
4049 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
4050 			     struct ixgbe_ring *ring)
4051 {
4052 	struct ixgbe_hw *hw = &adapter->hw;
4053 	union ixgbe_adv_rx_desc *rx_desc;
4054 	u64 rdba = ring->dma;
4055 	u32 rxdctl;
4056 	u8 reg_idx = ring->reg_idx;
4057 
4058 	xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
4059 	ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
4060 	if (ring->xsk_pool) {
4061 		WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4062 						   MEM_TYPE_XSK_BUFF_POOL,
4063 						   NULL));
4064 		xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
4065 	} else {
4066 		WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4067 						   MEM_TYPE_PAGE_SHARED, NULL));
4068 	}
4069 
4070 	/* disable queue to avoid use of these values while updating state */
4071 	rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4072 	rxdctl &= ~IXGBE_RXDCTL_ENABLE;
4073 
4074 	/* write value back with RXDCTL.ENABLE bit cleared */
4075 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4076 	IXGBE_WRITE_FLUSH(hw);
4077 
4078 	IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
4079 	IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
4080 	IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
4081 			ring->count * sizeof(union ixgbe_adv_rx_desc));
4082 	/* Force flushing of IXGBE_RDLEN to prevent MDD */
4083 	IXGBE_WRITE_FLUSH(hw);
4084 
4085 	IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
4086 	IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
4087 	ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
4088 
4089 	ixgbe_configure_srrctl(adapter, ring);
4090 	ixgbe_configure_rscctl(adapter, ring);
4091 
4092 	if (hw->mac.type == ixgbe_mac_82598EB) {
4093 		/*
4094 		 * enable cache line friendly hardware writes:
4095 		 * PTHRESH=32 descriptors (half the internal cache),
4096 		 * this also removes ugly rx_no_buffer_count increment
4097 		 * HTHRESH=4 descriptors (to minimize latency on fetch)
4098 		 * WTHRESH=8 burst writeback up to two cache lines
4099 		 */
4100 		rxdctl &= ~0x3FFFFF;
4101 		rxdctl |=  0x080420;
4102 #if (PAGE_SIZE < 8192)
4103 	/* RXDCTL.RLPML does not work on 82599 */
4104 	} else if (hw->mac.type != ixgbe_mac_82599EB) {
4105 		rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4106 			    IXGBE_RXDCTL_RLPML_EN);
4107 
4108 		/* Limit the maximum frame size so we don't overrun the skb.
4109 		 * This can happen in SRIOV mode when the MTU of the VF is
4110 		 * higher than the MTU of the PF.
4111 		 */
4112 		if (ring_uses_build_skb(ring) &&
4113 		    !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
4114 			rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
4115 				  IXGBE_RXDCTL_RLPML_EN;
4116 #endif
4117 	}
4118 
4119 	ring->rx_offset = ixgbe_rx_offset(ring);
4120 
4121 	if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
4122 		u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
4123 
4124 		rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4125 			    IXGBE_RXDCTL_RLPML_EN);
4126 		rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN;
4127 
4128 		ring->rx_buf_len = xsk_buf_len;
4129 	}
4130 
4131 	/* initialize rx_buffer_info */
4132 	memset(ring->rx_buffer_info, 0,
4133 	       sizeof(struct ixgbe_rx_buffer) * ring->count);
4134 
4135 	/* initialize Rx descriptor 0 */
4136 	rx_desc = IXGBE_RX_DESC(ring, 0);
4137 	rx_desc->wb.upper.length = 0;
4138 
4139 	/* enable receive descriptor ring */
4140 	rxdctl |= IXGBE_RXDCTL_ENABLE;
4141 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4142 
4143 	ixgbe_rx_desc_queue_enable(adapter, ring);
4144 	if (ring->xsk_pool)
4145 		ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
4146 	else
4147 		ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
4148 }
4149 
4150 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
4151 {
4152 	struct ixgbe_hw *hw = &adapter->hw;
4153 	int rss_i = adapter->ring_feature[RING_F_RSS].indices;
4154 	u16 pool = adapter->num_rx_pools;
4155 
4156 	/* PSRTYPE must be initialized in non 82598 adapters */
4157 	u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4158 		      IXGBE_PSRTYPE_UDPHDR |
4159 		      IXGBE_PSRTYPE_IPV4HDR |
4160 		      IXGBE_PSRTYPE_L2HDR |
4161 		      IXGBE_PSRTYPE_IPV6HDR;
4162 
4163 	if (hw->mac.type == ixgbe_mac_82598EB)
4164 		return;
4165 
4166 	if (rss_i > 3)
4167 		psrtype |= 2u << 29;
4168 	else if (rss_i > 1)
4169 		psrtype |= 1u << 29;
4170 
4171 	while (pool--)
4172 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4173 }
4174 
4175 static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
4176 {
4177 	struct ixgbe_hw *hw = &adapter->hw;
4178 	u16 pool = adapter->num_rx_pools;
4179 	u32 reg_offset, vf_shift, vmolr;
4180 	u32 gcr_ext, vmdctl;
4181 	int i;
4182 
4183 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
4184 		return;
4185 
4186 	vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
4187 	vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
4188 	vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
4189 	vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
4190 	vmdctl |= IXGBE_VT_CTL_REPLEN;
4191 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
4192 
4193 	/* accept untagged packets until a vlan tag is
4194 	 * specifically set for the VMDQ queue/pool
4195 	 */
4196 	vmolr = IXGBE_VMOLR_AUPE;
4197 	while (pool--)
4198 		IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr);
4199 
4200 	vf_shift = VMDQ_P(0) % 32;
4201 	reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
4202 
4203 	/* Enable only the PF's pool for Tx/Rx */
4204 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
4205 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
4206 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
4207 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
4208 	if (adapter->bridge_mode == BRIDGE_MODE_VEB)
4209 		IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4210 
4211 	/* Map PF MAC address in RAR Entry 0 to first pool following VFs */
4212 	hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
4213 
4214 	/* clear VLAN promisc flag so VFTA will be updated if necessary */
4215 	adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4216 
4217 	/*
4218 	 * Set up VF register offsets for selected VT Mode,
4219 	 * i.e. 32 or 64 VFs for SR-IOV
4220 	 */
4221 	switch (adapter->ring_feature[RING_F_VMDQ].mask) {
4222 	case IXGBE_82599_VMDQ_8Q_MASK:
4223 		gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
4224 		break;
4225 	case IXGBE_82599_VMDQ_4Q_MASK:
4226 		gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
4227 		break;
4228 	default:
4229 		gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
4230 		break;
4231 	}
4232 
4233 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4234 
4235 	for (i = 0; i < adapter->num_vfs; i++) {
4236 		/* configure spoof checking */
4237 		ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
4238 					  adapter->vfinfo[i].spoofchk_enabled);
4239 
4240 		/* Enable/Disable RSS query feature  */
4241 		ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
4242 					  adapter->vfinfo[i].rss_query_enabled);
4243 	}
4244 }
4245 
4246 static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
4247 {
4248 	struct ixgbe_hw *hw = &adapter->hw;
4249 	struct net_device *netdev = adapter->netdev;
4250 	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4251 	struct ixgbe_ring *rx_ring;
4252 	int i;
4253 	u32 mhadd, hlreg0;
4254 
4255 #ifdef IXGBE_FCOE
4256 	/* adjust max frame to be able to do baby jumbo for FCoE */
4257 	if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
4258 	    (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
4259 		max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4260 
4261 #endif /* IXGBE_FCOE */
4262 
4263 	/* adjust max frame to be at least the size of a standard frame */
4264 	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4265 		max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
4266 
4267 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4268 	if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
4269 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
4270 		mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
4271 
4272 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4273 	}
4274 
4275 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4276 	/* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
4277 	hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4278 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4279 
4280 	/*
4281 	 * Setup the HW Rx Head and Tail Descriptor Pointers and
4282 	 * the Base and Length of the Rx Descriptor Ring
4283 	 */
4284 	for (i = 0; i < adapter->num_rx_queues; i++) {
4285 		rx_ring = adapter->rx_ring[i];
4286 
4287 		clear_ring_rsc_enabled(rx_ring);
4288 		clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4289 		clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4290 
4291 		if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4292 			set_ring_rsc_enabled(rx_ring);
4293 
4294 		if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
4295 			set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4296 
4297 		if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
4298 			continue;
4299 
4300 		set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4301 
4302 #if (PAGE_SIZE < 8192)
4303 		if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4304 			set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4305 
4306 		if (IXGBE_2K_TOO_SMALL_WITH_PADDING ||
4307 		    (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
4308 			set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4309 #endif
4310 	}
4311 }
4312 
4313 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
4314 {
4315 	struct ixgbe_hw *hw = &adapter->hw;
4316 	u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4317 
4318 	switch (hw->mac.type) {
4319 	case ixgbe_mac_82598EB:
4320 		/*
4321 		 * For VMDq support of different descriptor types or
4322 		 * buffer sizes through the use of multiple SRRCTL
4323 		 * registers, RDRXCTL.MVMEN must be set to 1
4324 		 *
4325 		 * also, the manual doesn't mention it clearly but DCA hints
4326 		 * will only use queue 0's tags unless this bit is set.  Side
4327 		 * effects of setting this bit are only that SRRCTL must be
4328 		 * fully programmed [0..15]
4329 		 */
4330 		rdrxctl |= IXGBE_RDRXCTL_MVMEN;
4331 		break;
4332 	case ixgbe_mac_X550:
4333 	case ixgbe_mac_X550EM_x:
4334 	case ixgbe_mac_x550em_a:
4335 		if (adapter->num_vfs)
4336 			rdrxctl |= IXGBE_RDRXCTL_PSP;
4337 		fallthrough;
4338 	case ixgbe_mac_82599EB:
4339 	case ixgbe_mac_X540:
4340 		/* Disable RSC for ACK packets */
4341 		IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
4342 		   (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
4343 		rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4344 		/* hardware requires some bits to be set by default */
4345 		rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
4346 		rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4347 		break;
4348 	default:
4349 		/* We should do nothing since we don't know this hardware */
4350 		return;
4351 	}
4352 
4353 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4354 }
4355 
4356 /**
4357  * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
4358  * @adapter: board private structure
4359  *
4360  * Configure the Rx unit of the MAC after a reset.
4361  **/
4362 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
4363 {
4364 	struct ixgbe_hw *hw = &adapter->hw;
4365 	int i;
4366 	u32 rxctrl, rfctl;
4367 
4368 	/* disable receives while setting up the descriptors */
4369 	hw->mac.ops.disable_rx(hw);
4370 
4371 	ixgbe_setup_psrtype(adapter);
4372 	ixgbe_setup_rdrxctl(adapter);
4373 
4374 	/* RSC Setup */
4375 	rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4376 	rfctl &= ~IXGBE_RFCTL_RSC_DIS;
4377 	if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
4378 		rfctl |= IXGBE_RFCTL_RSC_DIS;
4379 
4380 	/* disable NFS filtering */
4381 	rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS);
4382 	IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4383 
4384 	/* Program registers for the distribution of queues */
4385 	ixgbe_setup_mrqc(adapter);
4386 
4387 	/* set_rx_buffer_len must be called before ring initialization */
4388 	ixgbe_set_rx_buffer_len(adapter);
4389 
4390 	/*
4391 	 * Setup the HW Rx Head and Tail Descriptor Pointers and
4392 	 * the Base and Length of the Rx Descriptor Ring
4393 	 */
4394 	for (i = 0; i < adapter->num_rx_queues; i++)
4395 		ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
4396 
4397 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4398 	/* disable drop enable for 82598 parts */
4399 	if (hw->mac.type == ixgbe_mac_82598EB)
4400 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
4401 
4402 	/* enable all receives */
4403 	rxctrl |= IXGBE_RXCTRL_RXEN;
4404 	hw->mac.ops.enable_rx_dma(hw, rxctrl);
4405 }
4406 
4407 static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
4408 				 __be16 proto, u16 vid)
4409 {
4410 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
4411 	struct ixgbe_hw *hw = &adapter->hw;
4412 
4413 	/* add VID to filter table */
4414 	if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4415 		hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
4416 
4417 	set_bit(vid, adapter->active_vlans);
4418 
4419 	return 0;
4420 }
4421 
4422 static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
4423 {
4424 	u32 vlvf;
4425 	int idx;
4426 
4427 	/* short cut the special case */
4428 	if (vlan == 0)
4429 		return 0;
4430 
4431 	/* Search for the vlan id in the VLVF entries */
4432 	for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
4433 		vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
4434 		if ((vlvf & VLAN_VID_MASK) == vlan)
4435 			break;
4436 	}
4437 
4438 	return idx;
4439 }
4440 
4441 void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
4442 {
4443 	struct ixgbe_hw *hw = &adapter->hw;
4444 	u32 bits, word;
4445 	int idx;
4446 
4447 	idx = ixgbe_find_vlvf_entry(hw, vid);
4448 	if (!idx)
4449 		return;
4450 
4451 	/* See if any other pools are set for this VLAN filter
4452 	 * entry other than the PF.
4453 	 */
4454 	word = idx * 2 + (VMDQ_P(0) / 32);
4455 	bits = ~BIT(VMDQ_P(0) % 32);
4456 	bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4457 
4458 	/* Disable the filter so this falls into the default pool. */
4459 	if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
4460 		if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4461 			IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
4462 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
4463 	}
4464 }
4465 
4466 static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
4467 				  __be16 proto, u16 vid)
4468 {
4469 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
4470 	struct ixgbe_hw *hw = &adapter->hw;
4471 
4472 	/* remove VID from filter table */
4473 	if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4474 		hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
4475 
4476 	clear_bit(vid, adapter->active_vlans);
4477 
4478 	return 0;
4479 }
4480 
4481 /**
4482  * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
4483  * @adapter: driver data
4484  */
4485 static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
4486 {
4487 	struct ixgbe_hw *hw = &adapter->hw;
4488 	u32 vlnctrl;
4489 	int i, j;
4490 
4491 	switch (hw->mac.type) {
4492 	case ixgbe_mac_82598EB:
4493 		vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4494 		vlnctrl &= ~IXGBE_VLNCTRL_VME;
4495 		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4496 		break;
4497 	case ixgbe_mac_82599EB:
4498 	case ixgbe_mac_X540:
4499 	case ixgbe_mac_X550:
4500 	case ixgbe_mac_X550EM_x:
4501 	case ixgbe_mac_x550em_a:
4502 		for (i = 0; i < adapter->num_rx_queues; i++) {
4503 			struct ixgbe_ring *ring = adapter->rx_ring[i];
4504 
4505 			if (!netif_is_ixgbe(ring->netdev))
4506 				continue;
4507 
4508 			j = ring->reg_idx;
4509 			vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4510 			vlnctrl &= ~IXGBE_RXDCTL_VME;
4511 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4512 		}
4513 		break;
4514 	default:
4515 		break;
4516 	}
4517 }
4518 
4519 /**
4520  * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
4521  * @adapter: driver data
4522  */
4523 static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
4524 {
4525 	struct ixgbe_hw *hw = &adapter->hw;
4526 	u32 vlnctrl;
4527 	int i, j;
4528 
4529 	switch (hw->mac.type) {
4530 	case ixgbe_mac_82598EB:
4531 		vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4532 		vlnctrl |= IXGBE_VLNCTRL_VME;
4533 		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4534 		break;
4535 	case ixgbe_mac_82599EB:
4536 	case ixgbe_mac_X540:
4537 	case ixgbe_mac_X550:
4538 	case ixgbe_mac_X550EM_x:
4539 	case ixgbe_mac_x550em_a:
4540 		for (i = 0; i < adapter->num_rx_queues; i++) {
4541 			struct ixgbe_ring *ring = adapter->rx_ring[i];
4542 
4543 			if (!netif_is_ixgbe(ring->netdev))
4544 				continue;
4545 
4546 			j = ring->reg_idx;
4547 			vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4548 			vlnctrl |= IXGBE_RXDCTL_VME;
4549 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4550 		}
4551 		break;
4552 	default:
4553 		break;
4554 	}
4555 }
4556 
4557 static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4558 {
4559 	struct ixgbe_hw *hw = &adapter->hw;
4560 	u32 vlnctrl, i;
4561 
4562 	vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4563 
4564 	if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
4565 	/* For VMDq and SR-IOV we must leave VLAN filtering enabled */
4566 		vlnctrl |= IXGBE_VLNCTRL_VFE;
4567 		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4568 	} else {
4569 		vlnctrl &= ~IXGBE_VLNCTRL_VFE;
4570 		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4571 		return;
4572 	}
4573 
4574 	/* Nothing to do for 82598 */
4575 	if (hw->mac.type == ixgbe_mac_82598EB)
4576 		return;
4577 
4578 	/* We are already in VLAN promisc, nothing to do */
4579 	if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
4580 		return;
4581 
4582 	/* Set flag so we don't redo unnecessary work */
4583 	adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
4584 
4585 	/* Add PF to all active pools */
4586 	for (i = IXGBE_VLVF_ENTRIES; --i;) {
4587 		u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
4588 		u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
4589 
4590 		vlvfb |= BIT(VMDQ_P(0) % 32);
4591 		IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
4592 	}
4593 
4594 	/* Set all bits in the VLAN filter table array */
4595 	for (i = hw->mac.vft_size; i--;)
4596 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
4597 }
4598 
4599 #define VFTA_BLOCK_SIZE 8
4600 static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
4601 {
4602 	struct ixgbe_hw *hw = &adapter->hw;
4603 	u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4604 	u32 vid_start = vfta_offset * 32;
4605 	u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4606 	u32 i, vid, word, bits;
4607 
4608 	for (i = IXGBE_VLVF_ENTRIES; --i;) {
4609 		u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
4610 
4611 		/* pull VLAN ID from VLVF */
4612 		vid = vlvf & VLAN_VID_MASK;
4613 
4614 		/* only concern outselves with a certain range */
4615 		if (vid < vid_start || vid >= vid_end)
4616 			continue;
4617 
4618 		if (vlvf) {
4619 			/* record VLAN ID in VFTA */
4620 			vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4621 
4622 			/* if PF is part of this then continue */
4623 			if (test_bit(vid, adapter->active_vlans))
4624 				continue;
4625 		}
4626 
4627 		/* remove PF from the pool */
4628 		word = i * 2 + VMDQ_P(0) / 32;
4629 		bits = ~BIT(VMDQ_P(0) % 32);
4630 		bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4631 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
4632 	}
4633 
4634 	/* extract values from active_vlans and write back to VFTA */
4635 	for (i = VFTA_BLOCK_SIZE; i--;) {
4636 		vid = (vfta_offset + i) * 32;
4637 		word = vid / BITS_PER_LONG;
4638 		bits = vid % BITS_PER_LONG;
4639 
4640 		vfta[i] |= adapter->active_vlans[word] >> bits;
4641 
4642 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
4643 	}
4644 }
4645 
4646 static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4647 {
4648 	struct ixgbe_hw *hw = &adapter->hw;
4649 	u32 vlnctrl, i;
4650 
4651 	/* Set VLAN filtering to enabled */
4652 	vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4653 	vlnctrl |= IXGBE_VLNCTRL_VFE;
4654 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4655 
4656 	if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
4657 	    hw->mac.type == ixgbe_mac_82598EB)
4658 		return;
4659 
4660 	/* We are not in VLAN promisc, nothing to do */
4661 	if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4662 		return;
4663 
4664 	/* Set flag so we don't redo unnecessary work */
4665 	adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4666 
4667 	for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
4668 		ixgbe_scrub_vfta(adapter, i);
4669 }
4670 
4671 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
4672 {
4673 	u16 vid = 1;
4674 
4675 	ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
4676 
4677 	for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
4678 		ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4679 }
4680 
4681 /**
4682  * ixgbe_write_mc_addr_list - write multicast addresses to MTA
4683  * @netdev: network interface device structure
4684  *
4685  * Writes multicast address list to the MTA hash table.
4686  * Returns: -ENOMEM on failure
4687  *                0 on no addresses written
4688  *                X on writing X addresses to MTA
4689  **/
4690 static int ixgbe_write_mc_addr_list(struct net_device *netdev)
4691 {
4692 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
4693 	struct ixgbe_hw *hw = &adapter->hw;
4694 
4695 	if (!netif_running(netdev))
4696 		return 0;
4697 
4698 	if (hw->mac.ops.update_mc_addr_list)
4699 		hw->mac.ops.update_mc_addr_list(hw, netdev);
4700 	else
4701 		return -ENOMEM;
4702 
4703 #ifdef CONFIG_PCI_IOV
4704 	ixgbe_restore_vf_multicasts(adapter);
4705 #endif
4706 
4707 	return netdev_mc_count(netdev);
4708 }
4709 
4710 #ifdef CONFIG_PCI_IOV
4711 void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
4712 {
4713 	struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4714 	struct ixgbe_hw *hw = &adapter->hw;
4715 	int i;
4716 
4717 	for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4718 		mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4719 
4720 		if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4721 			hw->mac.ops.set_rar(hw, i,
4722 					    mac_table->addr,
4723 					    mac_table->pool,
4724 					    IXGBE_RAH_AV);
4725 		else
4726 			hw->mac.ops.clear_rar(hw, i);
4727 	}
4728 }
4729 
4730 #endif
4731 static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
4732 {
4733 	struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4734 	struct ixgbe_hw *hw = &adapter->hw;
4735 	int i;
4736 
4737 	for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4738 		if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
4739 			continue;
4740 
4741 		mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4742 
4743 		if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4744 			hw->mac.ops.set_rar(hw, i,
4745 					    mac_table->addr,
4746 					    mac_table->pool,
4747 					    IXGBE_RAH_AV);
4748 		else
4749 			hw->mac.ops.clear_rar(hw, i);
4750 	}
4751 }
4752 
4753 static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
4754 {
4755 	struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4756 	struct ixgbe_hw *hw = &adapter->hw;
4757 	int i;
4758 
4759 	for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4760 		mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4761 		mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4762 	}
4763 
4764 	ixgbe_sync_mac_table(adapter);
4765 }
4766 
4767 static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
4768 {
4769 	struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4770 	struct ixgbe_hw *hw = &adapter->hw;
4771 	int i, count = 0;
4772 
4773 	for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4774 		/* do not count default RAR as available */
4775 		if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
4776 			continue;
4777 
4778 		/* only count unused and addresses that belong to us */
4779 		if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
4780 			if (mac_table->pool != pool)
4781 				continue;
4782 		}
4783 
4784 		count++;
4785 	}
4786 
4787 	return count;
4788 }
4789 
4790 /* this function destroys the first RAR entry */
4791 static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
4792 {
4793 	struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4794 	struct ixgbe_hw *hw = &adapter->hw;
4795 
4796 	memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
4797 	mac_table->pool = VMDQ_P(0);
4798 
4799 	mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
4800 
4801 	hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
4802 			    IXGBE_RAH_AV);
4803 }
4804 
4805 int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4806 			 const u8 *addr, u16 pool)
4807 {
4808 	struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4809 	struct ixgbe_hw *hw = &adapter->hw;
4810 	int i;
4811 
4812 	if (is_zero_ether_addr(addr))
4813 		return -EINVAL;
4814 
4815 	for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4816 		if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4817 			continue;
4818 
4819 		ether_addr_copy(mac_table->addr, addr);
4820 		mac_table->pool = pool;
4821 
4822 		mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
4823 				    IXGBE_MAC_STATE_IN_USE;
4824 
4825 		ixgbe_sync_mac_table(adapter);
4826 
4827 		return i;
4828 	}
4829 
4830 	return -ENOMEM;
4831 }
4832 
4833 int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
4834 			 const u8 *addr, u16 pool)
4835 {
4836 	struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4837 	struct ixgbe_hw *hw = &adapter->hw;
4838 	int i;
4839 
4840 	if (is_zero_ether_addr(addr))
4841 		return -EINVAL;
4842 
4843 	/* search table for addr, if found clear IN_USE flag and sync */
4844 	for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4845 		/* we can only delete an entry if it is in use */
4846 		if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
4847 			continue;
4848 		/* we only care about entries that belong to the given pool */
4849 		if (mac_table->pool != pool)
4850 			continue;
4851 		/* we only care about a specific MAC address */
4852 		if (!ether_addr_equal(addr, mac_table->addr))
4853 			continue;
4854 
4855 		mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4856 		mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4857 
4858 		ixgbe_sync_mac_table(adapter);
4859 
4860 		return 0;
4861 	}
4862 
4863 	return -ENOMEM;
4864 }
4865 
4866 static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
4867 {
4868 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
4869 	int ret;
4870 
4871 	ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
4872 
4873 	return min_t(int, ret, 0);
4874 }
4875 
4876 static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
4877 {
4878 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
4879 
4880 	ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
4881 
4882 	return 0;
4883 }
4884 
4885 /**
4886  * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
4887  * @netdev: network interface device structure
4888  *
4889  * The set_rx_method entry point is called whenever the unicast/multicast
4890  * address list or the network interface flags are updated.  This routine is
4891  * responsible for configuring the hardware for proper unicast, multicast and
4892  * promiscuous mode.
4893  **/
4894 void ixgbe_set_rx_mode(struct net_device *netdev)
4895 {
4896 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
4897 	struct ixgbe_hw *hw = &adapter->hw;
4898 	u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4899 	netdev_features_t features = netdev->features;
4900 	int count;
4901 
4902 	/* Check for Promiscuous and All Multicast modes */
4903 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4904 
4905 	/* set all bits that we expect to always be set */
4906 	fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
4907 	fctrl |= IXGBE_FCTRL_BAM;
4908 	fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
4909 	fctrl |= IXGBE_FCTRL_PMCF;
4910 
4911 	/* clear the bits we are changing the status of */
4912 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4913 	if (netdev->flags & IFF_PROMISC) {
4914 		hw->addr_ctrl.user_set_promisc = true;
4915 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4916 		vmolr |= IXGBE_VMOLR_MPE;
4917 		features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4918 	} else {
4919 		if (netdev->flags & IFF_ALLMULTI) {
4920 			fctrl |= IXGBE_FCTRL_MPE;
4921 			vmolr |= IXGBE_VMOLR_MPE;
4922 		}
4923 		hw->addr_ctrl.user_set_promisc = false;
4924 	}
4925 
4926 	/*
4927 	 * Write addresses to available RAR registers, if there is not
4928 	 * sufficient space to store all the addresses then enable
4929 	 * unicast promiscuous mode
4930 	 */
4931 	if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
4932 		fctrl |= IXGBE_FCTRL_UPE;
4933 		vmolr |= IXGBE_VMOLR_ROPE;
4934 	}
4935 
4936 	/* Write addresses to the MTA, if the attempt fails
4937 	 * then we should just turn on promiscuous mode so
4938 	 * that we can at least receive multicast traffic
4939 	 */
4940 	count = ixgbe_write_mc_addr_list(netdev);
4941 	if (count < 0) {
4942 		fctrl |= IXGBE_FCTRL_MPE;
4943 		vmolr |= IXGBE_VMOLR_MPE;
4944 	} else if (count) {
4945 		vmolr |= IXGBE_VMOLR_ROMPE;
4946 	}
4947 
4948 	if (hw->mac.type != ixgbe_mac_82598EB) {
4949 		vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
4950 			 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
4951 			   IXGBE_VMOLR_ROPE);
4952 		IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
4953 	}
4954 
4955 	/* This is useful for sniffing bad packets. */
4956 	if (features & NETIF_F_RXALL) {
4957 		/* UPE and MPE will be handled by normal PROMISC logic
4958 		 * in e1000e_set_rx_mode */
4959 		fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */
4960 			  IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */
4961 			  IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */
4962 
4963 		fctrl &= ~(IXGBE_FCTRL_DPF);
4964 		/* NOTE:  VLAN filtering is disabled by setting PROMISC */
4965 	}
4966 
4967 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4968 
4969 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
4970 		ixgbe_vlan_strip_enable(adapter);
4971 	else
4972 		ixgbe_vlan_strip_disable(adapter);
4973 
4974 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
4975 		ixgbe_vlan_promisc_disable(adapter);
4976 	else
4977 		ixgbe_vlan_promisc_enable(adapter);
4978 }
4979 
4980 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
4981 {
4982 	int q_idx;
4983 
4984 	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
4985 		napi_enable(&adapter->q_vector[q_idx]->napi);
4986 }
4987 
4988 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
4989 {
4990 	int q_idx;
4991 
4992 	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
4993 		napi_disable(&adapter->q_vector[q_idx]->napi);
4994 }
4995 
4996 static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
4997 {
4998 	struct ixgbe_adapter *adapter = netdev_priv(dev);
4999 	struct ixgbe_hw *hw = &adapter->hw;
5000 	struct udp_tunnel_info ti;
5001 
5002 	udp_tunnel_nic_get_port(dev, table, 0, &ti);
5003 	if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
5004 		adapter->vxlan_port = ti.port;
5005 	else
5006 		adapter->geneve_port = ti.port;
5007 
5008 	IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL,
5009 			ntohs(adapter->vxlan_port) |
5010 			ntohs(adapter->geneve_port) <<
5011 				IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT);
5012 	return 0;
5013 }
5014 
5015 static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550 = {
5016 	.sync_table	= ixgbe_udp_tunnel_sync,
5017 	.flags		= UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
5018 	.tables		= {
5019 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
5020 	},
5021 };
5022 
5023 static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550em_a = {
5024 	.sync_table	= ixgbe_udp_tunnel_sync,
5025 	.flags		= UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
5026 	.tables		= {
5027 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
5028 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
5029 	},
5030 };
5031 
5032 #ifdef CONFIG_IXGBE_DCB
5033 /**
5034  * ixgbe_configure_dcb - Configure DCB hardware
5035  * @adapter: ixgbe adapter struct
5036  *
5037  * This is called by the driver on open to configure the DCB hardware.
5038  * This is also called by the gennetlink interface when reconfiguring
5039  * the DCB state.
5040  */
5041 static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
5042 {
5043 	struct ixgbe_hw *hw = &adapter->hw;
5044 	int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
5045 
5046 	if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
5047 		if (hw->mac.type == ixgbe_mac_82598EB)
5048 			netif_set_gso_max_size(adapter->netdev, 65536);
5049 		return;
5050 	}
5051 
5052 	if (hw->mac.type == ixgbe_mac_82598EB)
5053 		netif_set_gso_max_size(adapter->netdev, 32768);
5054 
5055 #ifdef IXGBE_FCOE
5056 	if (adapter->netdev->features & NETIF_F_FCOE_MTU)
5057 		max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
5058 #endif
5059 
5060 	/* reconfigure the hardware */
5061 	if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
5062 		ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5063 						DCB_TX_CONFIG);
5064 		ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5065 						DCB_RX_CONFIG);
5066 		ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
5067 	} else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
5068 		ixgbe_dcb_hw_ets(&adapter->hw,
5069 				 adapter->ixgbe_ieee_ets,
5070 				 max_frame);
5071 		ixgbe_dcb_hw_pfc_config(&adapter->hw,
5072 					adapter->ixgbe_ieee_pfc->pfc_en,
5073 					adapter->ixgbe_ieee_ets->prio_tc);
5074 	}
5075 
5076 	/* Enable RSS Hash per TC */
5077 	if (hw->mac.type != ixgbe_mac_82598EB) {
5078 		u32 msb = 0;
5079 		u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
5080 
5081 		while (rss_i) {
5082 			msb++;
5083 			rss_i >>= 1;
5084 		}
5085 
5086 		/* write msb to all 8 TCs in one write */
5087 		IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
5088 	}
5089 }
5090 #endif
5091 
5092 /* Additional bittime to account for IXGBE framing */
5093 #define IXGBE_ETH_FRAMING 20
5094 
5095 /**
5096  * ixgbe_hpbthresh - calculate high water mark for flow control
5097  *
5098  * @adapter: board private structure to calculate for
5099  * @pb: packet buffer to calculate
5100  */
5101 static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
5102 {
5103 	struct ixgbe_hw *hw = &adapter->hw;
5104 	struct net_device *dev = adapter->netdev;
5105 	int link, tc, kb, marker;
5106 	u32 dv_id, rx_pba;
5107 
5108 	/* Calculate max LAN frame size */
5109 	tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
5110 
5111 #ifdef IXGBE_FCOE
5112 	/* FCoE traffic class uses FCOE jumbo frames */
5113 	if ((dev->features & NETIF_F_FCOE_MTU) &&
5114 	    (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5115 	    (pb == ixgbe_fcoe_get_tc(adapter)))
5116 		tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5117 #endif
5118 
5119 	/* Calculate delay value for device */
5120 	switch (hw->mac.type) {
5121 	case ixgbe_mac_X540:
5122 	case ixgbe_mac_X550:
5123 	case ixgbe_mac_X550EM_x:
5124 	case ixgbe_mac_x550em_a:
5125 		dv_id = IXGBE_DV_X540(link, tc);
5126 		break;
5127 	default:
5128 		dv_id = IXGBE_DV(link, tc);
5129 		break;
5130 	}
5131 
5132 	/* Loopback switch introduces additional latency */
5133 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5134 		dv_id += IXGBE_B2BT(tc);
5135 
5136 	/* Delay value is calculated in bit times convert to KB */
5137 	kb = IXGBE_BT2KB(dv_id);
5138 	rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
5139 
5140 	marker = rx_pba - kb;
5141 
5142 	/* It is possible that the packet buffer is not large enough
5143 	 * to provide required headroom. In this case throw an error
5144 	 * to user and a do the best we can.
5145 	 */
5146 	if (marker < 0) {
5147 		e_warn(drv, "Packet Buffer(%i) can not provide enough"
5148 			    "headroom to support flow control."
5149 			    "Decrease MTU or number of traffic classes\n", pb);
5150 		marker = tc + 1;
5151 	}
5152 
5153 	return marker;
5154 }
5155 
5156 /**
5157  * ixgbe_lpbthresh - calculate low water mark for for flow control
5158  *
5159  * @adapter: board private structure to calculate for
5160  * @pb: packet buffer to calculate
5161  */
5162 static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
5163 {
5164 	struct ixgbe_hw *hw = &adapter->hw;
5165 	struct net_device *dev = adapter->netdev;
5166 	int tc;
5167 	u32 dv_id;
5168 
5169 	/* Calculate max LAN frame size */
5170 	tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
5171 
5172 #ifdef IXGBE_FCOE
5173 	/* FCoE traffic class uses FCOE jumbo frames */
5174 	if ((dev->features & NETIF_F_FCOE_MTU) &&
5175 	    (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5176 	    (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
5177 		tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5178 #endif
5179 
5180 	/* Calculate delay value for device */
5181 	switch (hw->mac.type) {
5182 	case ixgbe_mac_X540:
5183 	case ixgbe_mac_X550:
5184 	case ixgbe_mac_X550EM_x:
5185 	case ixgbe_mac_x550em_a:
5186 		dv_id = IXGBE_LOW_DV_X540(tc);
5187 		break;
5188 	default:
5189 		dv_id = IXGBE_LOW_DV(tc);
5190 		break;
5191 	}
5192 
5193 	/* Delay value is calculated in bit times convert to KB */
5194 	return IXGBE_BT2KB(dv_id);
5195 }
5196 
5197 /*
5198  * ixgbe_pbthresh_setup - calculate and setup high low water marks
5199  */
5200 static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
5201 {
5202 	struct ixgbe_hw *hw = &adapter->hw;
5203 	int num_tc = adapter->hw_tcs;
5204 	int i;
5205 
5206 	if (!num_tc)
5207 		num_tc = 1;
5208 
5209 	for (i = 0; i < num_tc; i++) {
5210 		hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
5211 		hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
5212 
5213 		/* Low water marks must not be larger than high water marks */
5214 		if (hw->fc.low_water[i] > hw->fc.high_water[i])
5215 			hw->fc.low_water[i] = 0;
5216 	}
5217 
5218 	for (; i < MAX_TRAFFIC_CLASS; i++)
5219 		hw->fc.high_water[i] = 0;
5220 }
5221 
5222 static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
5223 {
5224 	struct ixgbe_hw *hw = &adapter->hw;
5225 	int hdrm;
5226 	u8 tc = adapter->hw_tcs;
5227 
5228 	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5229 	    adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5230 		hdrm = 32 << adapter->fdir_pballoc;
5231 	else
5232 		hdrm = 0;
5233 
5234 	hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
5235 	ixgbe_pbthresh_setup(adapter);
5236 }
5237 
5238 static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
5239 {
5240 	struct ixgbe_hw *hw = &adapter->hw;
5241 	struct hlist_node *node2;
5242 	struct ixgbe_fdir_filter *filter;
5243 	u8 queue;
5244 
5245 	spin_lock(&adapter->fdir_perfect_lock);
5246 
5247 	if (!hlist_empty(&adapter->fdir_filter_list))
5248 		ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
5249 
5250 	hlist_for_each_entry_safe(filter, node2,
5251 				  &adapter->fdir_filter_list, fdir_node) {
5252 		if (filter->action == IXGBE_FDIR_DROP_QUEUE) {
5253 			queue = IXGBE_FDIR_DROP_QUEUE;
5254 		} else {
5255 			u32 ring = ethtool_get_flow_spec_ring(filter->action);
5256 			u8 vf = ethtool_get_flow_spec_ring_vf(filter->action);
5257 
5258 			if (!vf && (ring >= adapter->num_rx_queues)) {
5259 				e_err(drv, "FDIR restore failed without VF, ring: %u\n",
5260 				      ring);
5261 				continue;
5262 			} else if (vf &&
5263 				   ((vf > adapter->num_vfs) ||
5264 				     ring >= adapter->num_rx_queues_per_pool)) {
5265 				e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n",
5266 				      vf, ring);
5267 				continue;
5268 			}
5269 
5270 			/* Map the ring onto the absolute queue index */
5271 			if (!vf)
5272 				queue = adapter->rx_ring[ring]->reg_idx;
5273 			else
5274 				queue = ((vf - 1) *
5275 					adapter->num_rx_queues_per_pool) + ring;
5276 		}
5277 
5278 		ixgbe_fdir_write_perfect_filter_82599(hw,
5279 				&filter->filter, filter->sw_idx, queue);
5280 	}
5281 
5282 	spin_unlock(&adapter->fdir_perfect_lock);
5283 }
5284 
5285 /**
5286  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
5287  * @rx_ring: ring to free buffers from
5288  **/
5289 static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
5290 {
5291 	u16 i = rx_ring->next_to_clean;
5292 	struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
5293 
5294 	if (rx_ring->xsk_pool) {
5295 		ixgbe_xsk_clean_rx_ring(rx_ring);
5296 		goto skip_free;
5297 	}
5298 
5299 	/* Free all the Rx ring sk_buffs */
5300 	while (i != rx_ring->next_to_alloc) {
5301 		if (rx_buffer->skb) {
5302 			struct sk_buff *skb = rx_buffer->skb;
5303 			if (IXGBE_CB(skb)->page_released)
5304 				dma_unmap_page_attrs(rx_ring->dev,
5305 						     IXGBE_CB(skb)->dma,
5306 						     ixgbe_rx_pg_size(rx_ring),
5307 						     DMA_FROM_DEVICE,
5308 						     IXGBE_RX_DMA_ATTR);
5309 			dev_kfree_skb(skb);
5310 		}
5311 
5312 		/* Invalidate cache lines that may have been written to by
5313 		 * device so that we avoid corrupting memory.
5314 		 */
5315 		dma_sync_single_range_for_cpu(rx_ring->dev,
5316 					      rx_buffer->dma,
5317 					      rx_buffer->page_offset,
5318 					      ixgbe_rx_bufsz(rx_ring),
5319 					      DMA_FROM_DEVICE);
5320 
5321 		/* free resources associated with mapping */
5322 		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
5323 				     ixgbe_rx_pg_size(rx_ring),
5324 				     DMA_FROM_DEVICE,
5325 				     IXGBE_RX_DMA_ATTR);
5326 		__page_frag_cache_drain(rx_buffer->page,
5327 					rx_buffer->pagecnt_bias);
5328 
5329 		i++;
5330 		rx_buffer++;
5331 		if (i == rx_ring->count) {
5332 			i = 0;
5333 			rx_buffer = rx_ring->rx_buffer_info;
5334 		}
5335 	}
5336 
5337 skip_free:
5338 	rx_ring->next_to_alloc = 0;
5339 	rx_ring->next_to_clean = 0;
5340 	rx_ring->next_to_use = 0;
5341 }
5342 
5343 static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
5344 			     struct ixgbe_fwd_adapter *accel)
5345 {
5346 	u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
5347 	int num_tc = netdev_get_num_tc(adapter->netdev);
5348 	struct net_device *vdev = accel->netdev;
5349 	int i, baseq, err;
5350 
5351 	baseq = accel->pool * adapter->num_rx_queues_per_pool;
5352 	netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
5353 		   accel->pool, adapter->num_rx_pools,
5354 		   baseq, baseq + adapter->num_rx_queues_per_pool);
5355 
5356 	accel->rx_base_queue = baseq;
5357 	accel->tx_base_queue = baseq;
5358 
5359 	/* record configuration for macvlan interface in vdev */
5360 	for (i = 0; i < num_tc; i++)
5361 		netdev_bind_sb_channel_queue(adapter->netdev, vdev,
5362 					     i, rss_i, baseq + (rss_i * i));
5363 
5364 	for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5365 		adapter->rx_ring[baseq + i]->netdev = vdev;
5366 
5367 	/* Guarantee all rings are updated before we update the
5368 	 * MAC address filter.
5369 	 */
5370 	wmb();
5371 
5372 	/* ixgbe_add_mac_filter will return an index if it succeeds, so we
5373 	 * need to only treat it as an error value if it is negative.
5374 	 */
5375 	err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
5376 				   VMDQ_P(accel->pool));
5377 	if (err >= 0)
5378 		return 0;
5379 
5380 	/* if we cannot add the MAC rule then disable the offload */
5381 	macvlan_release_l2fw_offload(vdev);
5382 
5383 	for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5384 		adapter->rx_ring[baseq + i]->netdev = NULL;
5385 
5386 	netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
5387 
5388 	/* unbind the queues and drop the subordinate channel config */
5389 	netdev_unbind_sb_channel(adapter->netdev, vdev);
5390 	netdev_set_sb_channel(vdev, 0);
5391 
5392 	clear_bit(accel->pool, adapter->fwd_bitmask);
5393 	kfree(accel);
5394 
5395 	return err;
5396 }
5397 
5398 static int ixgbe_macvlan_up(struct net_device *vdev,
5399 			    struct netdev_nested_priv *priv)
5400 {
5401 	struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data;
5402 	struct ixgbe_fwd_adapter *accel;
5403 
5404 	if (!netif_is_macvlan(vdev))
5405 		return 0;
5406 
5407 	accel = macvlan_accel_priv(vdev);
5408 	if (!accel)
5409 		return 0;
5410 
5411 	ixgbe_fwd_ring_up(adapter, accel);
5412 
5413 	return 0;
5414 }
5415 
5416 static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
5417 {
5418 	struct netdev_nested_priv priv = {
5419 		.data = (void *)adapter,
5420 	};
5421 
5422 	netdev_walk_all_upper_dev_rcu(adapter->netdev,
5423 				      ixgbe_macvlan_up, &priv);
5424 }
5425 
5426 static void ixgbe_configure(struct ixgbe_adapter *adapter)
5427 {
5428 	struct ixgbe_hw *hw = &adapter->hw;
5429 
5430 	ixgbe_configure_pb(adapter);
5431 #ifdef CONFIG_IXGBE_DCB
5432 	ixgbe_configure_dcb(adapter);
5433 #endif
5434 	/*
5435 	 * We must restore virtualization before VLANs or else
5436 	 * the VLVF registers will not be populated
5437 	 */
5438 	ixgbe_configure_virtualization(adapter);
5439 
5440 	ixgbe_set_rx_mode(adapter->netdev);
5441 	ixgbe_restore_vlan(adapter);
5442 	ixgbe_ipsec_restore(adapter);
5443 
5444 	switch (hw->mac.type) {
5445 	case ixgbe_mac_82599EB:
5446 	case ixgbe_mac_X540:
5447 		hw->mac.ops.disable_rx_buff(hw);
5448 		break;
5449 	default:
5450 		break;
5451 	}
5452 
5453 	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5454 		ixgbe_init_fdir_signature_82599(&adapter->hw,
5455 						adapter->fdir_pballoc);
5456 	} else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
5457 		ixgbe_init_fdir_perfect_82599(&adapter->hw,
5458 					      adapter->fdir_pballoc);
5459 		ixgbe_fdir_filter_restore(adapter);
5460 	}
5461 
5462 	switch (hw->mac.type) {
5463 	case ixgbe_mac_82599EB:
5464 	case ixgbe_mac_X540:
5465 		hw->mac.ops.enable_rx_buff(hw);
5466 		break;
5467 	default:
5468 		break;
5469 	}
5470 
5471 #ifdef CONFIG_IXGBE_DCA
5472 	/* configure DCA */
5473 	if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
5474 		ixgbe_setup_dca(adapter);
5475 #endif /* CONFIG_IXGBE_DCA */
5476 
5477 #ifdef IXGBE_FCOE
5478 	/* configure FCoE L2 filters, redirection table, and Rx control */
5479 	ixgbe_configure_fcoe(adapter);
5480 
5481 #endif /* IXGBE_FCOE */
5482 	ixgbe_configure_tx(adapter);
5483 	ixgbe_configure_rx(adapter);
5484 	ixgbe_configure_dfwd(adapter);
5485 }
5486 
5487 /**
5488  * ixgbe_sfp_link_config - set up SFP+ link
5489  * @adapter: pointer to private adapter struct
5490  **/
5491 static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
5492 {
5493 	/*
5494 	 * We are assuming the worst case scenario here, and that
5495 	 * is that an SFP was inserted/removed after the reset
5496 	 * but before SFP detection was enabled.  As such the best
5497 	 * solution is to just start searching as soon as we start
5498 	 */
5499 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
5500 		adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5501 
5502 	adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
5503 	adapter->sfp_poll_time = 0;
5504 }
5505 
5506 /**
5507  * ixgbe_non_sfp_link_config - set up non-SFP+ link
5508  * @hw: pointer to private hardware struct
5509  *
5510  * Returns 0 on success, negative on failure
5511  **/
5512 static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
5513 {
5514 	u32 speed;
5515 	bool autoneg, link_up = false;
5516 	int ret = IXGBE_ERR_LINK_SETUP;
5517 
5518 	if (hw->mac.ops.check_link)
5519 		ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
5520 
5521 	if (ret)
5522 		return ret;
5523 
5524 	speed = hw->phy.autoneg_advertised;
5525 	if (!speed && hw->mac.ops.get_link_capabilities) {
5526 		ret = hw->mac.ops.get_link_capabilities(hw, &speed,
5527 							&autoneg);
5528 		speed &= ~(IXGBE_LINK_SPEED_5GB_FULL |
5529 			   IXGBE_LINK_SPEED_2_5GB_FULL);
5530 	}
5531 
5532 	if (ret)
5533 		return ret;
5534 
5535 	if (hw->mac.ops.setup_link)
5536 		ret = hw->mac.ops.setup_link(hw, speed, link_up);
5537 
5538 	return ret;
5539 }
5540 
5541 static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
5542 {
5543 	struct ixgbe_hw *hw = &adapter->hw;
5544 	u32 gpie = 0;
5545 
5546 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5547 		gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5548 		       IXGBE_GPIE_OCD;
5549 		gpie |= IXGBE_GPIE_EIAME;
5550 		/*
5551 		 * use EIAM to auto-mask when MSI-X interrupt is asserted
5552 		 * this saves a register write for every interrupt
5553 		 */
5554 		switch (hw->mac.type) {
5555 		case ixgbe_mac_82598EB:
5556 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5557 			break;
5558 		case ixgbe_mac_82599EB:
5559 		case ixgbe_mac_X540:
5560 		case ixgbe_mac_X550:
5561 		case ixgbe_mac_X550EM_x:
5562 		case ixgbe_mac_x550em_a:
5563 		default:
5564 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5565 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5566 			break;
5567 		}
5568 	} else {
5569 		/* legacy interrupts, use EIAM to auto-mask when reading EICR,
5570 		 * specifically only auto mask tx and rx interrupts */
5571 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5572 	}
5573 
5574 	/* XXX: to interrupt immediately for EICS writes, enable this */
5575 	/* gpie |= IXGBE_GPIE_EIMEN; */
5576 
5577 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
5578 		gpie &= ~IXGBE_GPIE_VTMODE_MASK;
5579 
5580 		switch (adapter->ring_feature[RING_F_VMDQ].mask) {
5581 		case IXGBE_82599_VMDQ_8Q_MASK:
5582 			gpie |= IXGBE_GPIE_VTMODE_16;
5583 			break;
5584 		case IXGBE_82599_VMDQ_4Q_MASK:
5585 			gpie |= IXGBE_GPIE_VTMODE_32;
5586 			break;
5587 		default:
5588 			gpie |= IXGBE_GPIE_VTMODE_64;
5589 			break;
5590 		}
5591 	}
5592 
5593 	/* Enable Thermal over heat sensor interrupt */
5594 	if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
5595 		switch (adapter->hw.mac.type) {
5596 		case ixgbe_mac_82599EB:
5597 			gpie |= IXGBE_SDP0_GPIEN_8259X;
5598 			break;
5599 		default:
5600 			break;
5601 		}
5602 	}
5603 
5604 	/* Enable fan failure interrupt */
5605 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
5606 		gpie |= IXGBE_SDP1_GPIEN(hw);
5607 
5608 	switch (hw->mac.type) {
5609 	case ixgbe_mac_82599EB:
5610 		gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
5611 		break;
5612 	case ixgbe_mac_X550EM_x:
5613 	case ixgbe_mac_x550em_a:
5614 		gpie |= IXGBE_SDP0_GPIEN_X540;
5615 		break;
5616 	default:
5617 		break;
5618 	}
5619 
5620 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5621 }
5622 
5623 static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
5624 {
5625 	struct ixgbe_hw *hw = &adapter->hw;
5626 	int err;
5627 	u32 ctrl_ext;
5628 
5629 	ixgbe_get_hw_control(adapter);
5630 	ixgbe_setup_gpie(adapter);
5631 
5632 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
5633 		ixgbe_configure_msix(adapter);
5634 	else
5635 		ixgbe_configure_msi_and_legacy(adapter);
5636 
5637 	/* enable the optics for 82599 SFP+ fiber */
5638 	if (hw->mac.ops.enable_tx_laser)
5639 		hw->mac.ops.enable_tx_laser(hw);
5640 
5641 	if (hw->phy.ops.set_phy_power)
5642 		hw->phy.ops.set_phy_power(hw, true);
5643 
5644 	smp_mb__before_atomic();
5645 	clear_bit(__IXGBE_DOWN, &adapter->state);
5646 	ixgbe_napi_enable_all(adapter);
5647 
5648 	if (ixgbe_is_sfp(hw)) {
5649 		ixgbe_sfp_link_config(adapter);
5650 	} else {
5651 		err = ixgbe_non_sfp_link_config(hw);
5652 		if (err)
5653 			e_err(probe, "link_config FAILED %d\n", err);
5654 	}
5655 
5656 	/* clear any pending interrupts, may auto mask */
5657 	IXGBE_READ_REG(hw, IXGBE_EICR);
5658 	ixgbe_irq_enable(adapter, true, true);
5659 
5660 	/*
5661 	 * If this adapter has a fan, check to see if we had a failure
5662 	 * before we enabled the interrupt.
5663 	 */
5664 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5665 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5666 		if (esdp & IXGBE_ESDP_SDP1)
5667 			e_crit(drv, "Fan has stopped, replace the adapter\n");
5668 	}
5669 
5670 	/* bring the link up in the watchdog, this could race with our first
5671 	 * link up interrupt but shouldn't be a problem */
5672 	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5673 	adapter->link_check_timeout = jiffies;
5674 	mod_timer(&adapter->service_timer, jiffies);
5675 
5676 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
5677 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5678 	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
5679 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5680 }
5681 
5682 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
5683 {
5684 	/* put off any impending NetWatchDogTimeout */
5685 	netif_trans_update(adapter->netdev);
5686 
5687 	while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
5688 		usleep_range(1000, 2000);
5689 	if (adapter->hw.phy.type == ixgbe_phy_fw)
5690 		ixgbe_watchdog_link_is_down(adapter);
5691 	ixgbe_down(adapter);
5692 	/*
5693 	 * If SR-IOV enabled then wait a bit before bringing the adapter
5694 	 * back up to give the VFs time to respond to the reset.  The
5695 	 * two second wait is based upon the watchdog timer cycle in
5696 	 * the VF driver.
5697 	 */
5698 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5699 		msleep(2000);
5700 	ixgbe_up(adapter);
5701 	clear_bit(__IXGBE_RESETTING, &adapter->state);
5702 }
5703 
5704 void ixgbe_up(struct ixgbe_adapter *adapter)
5705 {
5706 	/* hardware has been reset, we need to reload some things */
5707 	ixgbe_configure(adapter);
5708 
5709 	ixgbe_up_complete(adapter);
5710 }
5711 
5712 static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter)
5713 {
5714 	u16 devctl2;
5715 
5716 	pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2);
5717 
5718 	switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) {
5719 	case IXGBE_PCIDEVCTRL2_17_34s:
5720 	case IXGBE_PCIDEVCTRL2_4_8s:
5721 		/* For now we cap the upper limit on delay to 2 seconds
5722 		 * as we end up going up to 34 seconds of delay in worst
5723 		 * case timeout value.
5724 		 */
5725 	case IXGBE_PCIDEVCTRL2_1_2s:
5726 		return 2000000ul;	/* 2.0 s */
5727 	case IXGBE_PCIDEVCTRL2_260_520ms:
5728 		return 520000ul;	/* 520 ms */
5729 	case IXGBE_PCIDEVCTRL2_65_130ms:
5730 		return 130000ul;	/* 130 ms */
5731 	case IXGBE_PCIDEVCTRL2_16_32ms:
5732 		return 32000ul;		/* 32 ms */
5733 	case IXGBE_PCIDEVCTRL2_1_2ms:
5734 		return 2000ul;		/* 2 ms */
5735 	case IXGBE_PCIDEVCTRL2_50_100us:
5736 		return 100ul;		/* 100 us */
5737 	case IXGBE_PCIDEVCTRL2_16_32ms_def:
5738 		return 32000ul;		/* 32 ms */
5739 	default:
5740 		break;
5741 	}
5742 
5743 	/* We shouldn't need to hit this path, but just in case default as
5744 	 * though completion timeout is not supported and support 32ms.
5745 	 */
5746 	return 32000ul;
5747 }
5748 
5749 void ixgbe_disable_rx(struct ixgbe_adapter *adapter)
5750 {
5751 	unsigned long wait_delay, delay_interval;
5752 	struct ixgbe_hw *hw = &adapter->hw;
5753 	int i, wait_loop;
5754 	u32 rxdctl;
5755 
5756 	/* disable receives */
5757 	hw->mac.ops.disable_rx(hw);
5758 
5759 	if (ixgbe_removed(hw->hw_addr))
5760 		return;
5761 
5762 	/* disable all enabled Rx queues */
5763 	for (i = 0; i < adapter->num_rx_queues; i++) {
5764 		struct ixgbe_ring *ring = adapter->rx_ring[i];
5765 		u8 reg_idx = ring->reg_idx;
5766 
5767 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
5768 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5769 		rxdctl |= IXGBE_RXDCTL_SWFLSH;
5770 
5771 		/* write value back with RXDCTL.ENABLE bit cleared */
5772 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
5773 	}
5774 
5775 	/* RXDCTL.EN may not change on 82598 if link is down, so skip it */
5776 	if (hw->mac.type == ixgbe_mac_82598EB &&
5777 	    !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
5778 		return;
5779 
5780 	/* Determine our minimum delay interval. We will increase this value
5781 	 * with each subsequent test. This way if the device returns quickly
5782 	 * we should spend as little time as possible waiting, however as
5783 	 * the time increases we will wait for larger periods of time.
5784 	 *
5785 	 * The trick here is that we increase the interval using the
5786 	 * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result
5787 	 * of that wait is that it totals up to 100x whatever interval we
5788 	 * choose. Since our minimum wait is 100us we can just divide the
5789 	 * total timeout by 100 to get our minimum delay interval.
5790 	 */
5791 	delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
5792 
5793 	wait_loop = IXGBE_MAX_RX_DESC_POLL;
5794 	wait_delay = delay_interval;
5795 
5796 	while (wait_loop--) {
5797 		usleep_range(wait_delay, wait_delay + 10);
5798 		wait_delay += delay_interval * 2;
5799 		rxdctl = 0;
5800 
5801 		/* OR together the reading of all the active RXDCTL registers,
5802 		 * and then test the result. We need the disable to complete
5803 		 * before we start freeing the memory and invalidating the
5804 		 * DMA mappings.
5805 		 */
5806 		for (i = 0; i < adapter->num_rx_queues; i++) {
5807 			struct ixgbe_ring *ring = adapter->rx_ring[i];
5808 			u8 reg_idx = ring->reg_idx;
5809 
5810 			rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
5811 		}
5812 
5813 		if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
5814 			return;
5815 	}
5816 
5817 	e_err(drv,
5818 	      "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
5819 }
5820 
5821 void ixgbe_disable_tx(struct ixgbe_adapter *adapter)
5822 {
5823 	unsigned long wait_delay, delay_interval;
5824 	struct ixgbe_hw *hw = &adapter->hw;
5825 	int i, wait_loop;
5826 	u32 txdctl;
5827 
5828 	if (ixgbe_removed(hw->hw_addr))
5829 		return;
5830 
5831 	/* disable all enabled Tx queues */
5832 	for (i = 0; i < adapter->num_tx_queues; i++) {
5833 		struct ixgbe_ring *ring = adapter->tx_ring[i];
5834 		u8 reg_idx = ring->reg_idx;
5835 
5836 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5837 	}
5838 
5839 	/* disable all enabled XDP Tx queues */
5840 	for (i = 0; i < adapter->num_xdp_queues; i++) {
5841 		struct ixgbe_ring *ring = adapter->xdp_ring[i];
5842 		u8 reg_idx = ring->reg_idx;
5843 
5844 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5845 	}
5846 
5847 	/* If the link is not up there shouldn't be much in the way of
5848 	 * pending transactions. Those that are left will be flushed out
5849 	 * when the reset logic goes through the flush sequence to clean out
5850 	 * the pending Tx transactions.
5851 	 */
5852 	if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
5853 		goto dma_engine_disable;
5854 
5855 	/* Determine our minimum delay interval. We will increase this value
5856 	 * with each subsequent test. This way if the device returns quickly
5857 	 * we should spend as little time as possible waiting, however as
5858 	 * the time increases we will wait for larger periods of time.
5859 	 *
5860 	 * The trick here is that we increase the interval using the
5861 	 * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result
5862 	 * of that wait is that it totals up to 100x whatever interval we
5863 	 * choose. Since our minimum wait is 100us we can just divide the
5864 	 * total timeout by 100 to get our minimum delay interval.
5865 	 */
5866 	delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
5867 
5868 	wait_loop = IXGBE_MAX_RX_DESC_POLL;
5869 	wait_delay = delay_interval;
5870 
5871 	while (wait_loop--) {
5872 		usleep_range(wait_delay, wait_delay + 10);
5873 		wait_delay += delay_interval * 2;
5874 		txdctl = 0;
5875 
5876 		/* OR together the reading of all the active TXDCTL registers,
5877 		 * and then test the result. We need the disable to complete
5878 		 * before we start freeing the memory and invalidating the
5879 		 * DMA mappings.
5880 		 */
5881 		for (i = 0; i < adapter->num_tx_queues; i++) {
5882 			struct ixgbe_ring *ring = adapter->tx_ring[i];
5883 			u8 reg_idx = ring->reg_idx;
5884 
5885 			txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
5886 		}
5887 		for (i = 0; i < adapter->num_xdp_queues; i++) {
5888 			struct ixgbe_ring *ring = adapter->xdp_ring[i];
5889 			u8 reg_idx = ring->reg_idx;
5890 
5891 			txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
5892 		}
5893 
5894 		if (!(txdctl & IXGBE_TXDCTL_ENABLE))
5895 			goto dma_engine_disable;
5896 	}
5897 
5898 	e_err(drv,
5899 	      "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
5900 
5901 dma_engine_disable:
5902 	/* Disable the Tx DMA engine on 82599 and later MAC */
5903 	switch (hw->mac.type) {
5904 	case ixgbe_mac_82599EB:
5905 	case ixgbe_mac_X540:
5906 	case ixgbe_mac_X550:
5907 	case ixgbe_mac_X550EM_x:
5908 	case ixgbe_mac_x550em_a:
5909 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
5910 				(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
5911 				 ~IXGBE_DMATXCTL_TE));
5912 		fallthrough;
5913 	default:
5914 		break;
5915 	}
5916 }
5917 
5918 void ixgbe_reset(struct ixgbe_adapter *adapter)
5919 {
5920 	struct ixgbe_hw *hw = &adapter->hw;
5921 	struct net_device *netdev = adapter->netdev;
5922 	int err;
5923 
5924 	if (ixgbe_removed(hw->hw_addr))
5925 		return;
5926 	/* lock SFP init bit to prevent race conditions with the watchdog */
5927 	while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5928 		usleep_range(1000, 2000);
5929 
5930 	/* clear all SFP and link config related flags while holding SFP_INIT */
5931 	adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
5932 			     IXGBE_FLAG2_SFP_NEEDS_RESET);
5933 	adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5934 
5935 	err = hw->mac.ops.init_hw(hw);
5936 	switch (err) {
5937 	case 0:
5938 	case IXGBE_ERR_SFP_NOT_PRESENT:
5939 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
5940 		break;
5941 	case IXGBE_ERR_MASTER_REQUESTS_PENDING:
5942 		e_dev_err("master disable timed out\n");
5943 		break;
5944 	case IXGBE_ERR_EEPROM_VERSION:
5945 		/* We are running on a pre-production device, log a warning */
5946 		e_dev_warn("This device is a pre-production adapter/LOM. "
5947 			   "Please be aware there may be issues associated with "
5948 			   "your hardware.  If you are experiencing problems "
5949 			   "please contact your Intel or hardware "
5950 			   "representative who provided you with this "
5951 			   "hardware.\n");
5952 		break;
5953 	default:
5954 		e_dev_err("Hardware Error: %d\n", err);
5955 	}
5956 
5957 	clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5958 
5959 	/* flush entries out of MAC table */
5960 	ixgbe_flush_sw_mac_table(adapter);
5961 	__dev_uc_unsync(netdev, NULL);
5962 
5963 	/* do not flush user set addresses */
5964 	ixgbe_mac_set_default_filter(adapter);
5965 
5966 	/* update SAN MAC vmdq pool selection */
5967 	if (hw->mac.san_mac_rar_index)
5968 		hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
5969 
5970 	if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5971 		ixgbe_ptp_reset(adapter);
5972 
5973 	if (hw->phy.ops.set_phy_power) {
5974 		if (!netif_running(adapter->netdev) && !adapter->wol)
5975 			hw->phy.ops.set_phy_power(hw, false);
5976 		else
5977 			hw->phy.ops.set_phy_power(hw, true);
5978 	}
5979 }
5980 
5981 /**
5982  * ixgbe_clean_tx_ring - Free Tx Buffers
5983  * @tx_ring: ring to be cleaned
5984  **/
5985 static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
5986 {
5987 	u16 i = tx_ring->next_to_clean;
5988 	struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
5989 
5990 	if (tx_ring->xsk_pool) {
5991 		ixgbe_xsk_clean_tx_ring(tx_ring);
5992 		goto out;
5993 	}
5994 
5995 	while (i != tx_ring->next_to_use) {
5996 		union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
5997 
5998 		/* Free all the Tx ring sk_buffs */
5999 		if (ring_is_xdp(tx_ring))
6000 			xdp_return_frame(tx_buffer->xdpf);
6001 		else
6002 			dev_kfree_skb_any(tx_buffer->skb);
6003 
6004 		/* unmap skb header data */
6005 		dma_unmap_single(tx_ring->dev,
6006 				 dma_unmap_addr(tx_buffer, dma),
6007 				 dma_unmap_len(tx_buffer, len),
6008 				 DMA_TO_DEVICE);
6009 
6010 		/* check for eop_desc to determine the end of the packet */
6011 		eop_desc = tx_buffer->next_to_watch;
6012 		tx_desc = IXGBE_TX_DESC(tx_ring, i);
6013 
6014 		/* unmap remaining buffers */
6015 		while (tx_desc != eop_desc) {
6016 			tx_buffer++;
6017 			tx_desc++;
6018 			i++;
6019 			if (unlikely(i == tx_ring->count)) {
6020 				i = 0;
6021 				tx_buffer = tx_ring->tx_buffer_info;
6022 				tx_desc = IXGBE_TX_DESC(tx_ring, 0);
6023 			}
6024 
6025 			/* unmap any remaining paged data */
6026 			if (dma_unmap_len(tx_buffer, len))
6027 				dma_unmap_page(tx_ring->dev,
6028 					       dma_unmap_addr(tx_buffer, dma),
6029 					       dma_unmap_len(tx_buffer, len),
6030 					       DMA_TO_DEVICE);
6031 		}
6032 
6033 		/* move us one more past the eop_desc for start of next pkt */
6034 		tx_buffer++;
6035 		i++;
6036 		if (unlikely(i == tx_ring->count)) {
6037 			i = 0;
6038 			tx_buffer = tx_ring->tx_buffer_info;
6039 		}
6040 	}
6041 
6042 	/* reset BQL for queue */
6043 	if (!ring_is_xdp(tx_ring))
6044 		netdev_tx_reset_queue(txring_txq(tx_ring));
6045 
6046 out:
6047 	/* reset next_to_use and next_to_clean */
6048 	tx_ring->next_to_use = 0;
6049 	tx_ring->next_to_clean = 0;
6050 }
6051 
6052 /**
6053  * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
6054  * @adapter: board private structure
6055  **/
6056 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
6057 {
6058 	int i;
6059 
6060 	for (i = 0; i < adapter->num_rx_queues; i++)
6061 		ixgbe_clean_rx_ring(adapter->rx_ring[i]);
6062 }
6063 
6064 /**
6065  * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
6066  * @adapter: board private structure
6067  **/
6068 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
6069 {
6070 	int i;
6071 
6072 	for (i = 0; i < adapter->num_tx_queues; i++)
6073 		ixgbe_clean_tx_ring(adapter->tx_ring[i]);
6074 	for (i = 0; i < adapter->num_xdp_queues; i++)
6075 		ixgbe_clean_tx_ring(adapter->xdp_ring[i]);
6076 }
6077 
6078 static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
6079 {
6080 	struct hlist_node *node2;
6081 	struct ixgbe_fdir_filter *filter;
6082 
6083 	spin_lock(&adapter->fdir_perfect_lock);
6084 
6085 	hlist_for_each_entry_safe(filter, node2,
6086 				  &adapter->fdir_filter_list, fdir_node) {
6087 		hlist_del(&filter->fdir_node);
6088 		kfree(filter);
6089 	}
6090 	adapter->fdir_filter_count = 0;
6091 
6092 	spin_unlock(&adapter->fdir_perfect_lock);
6093 }
6094 
6095 void ixgbe_down(struct ixgbe_adapter *adapter)
6096 {
6097 	struct net_device *netdev = adapter->netdev;
6098 	struct ixgbe_hw *hw = &adapter->hw;
6099 	int i;
6100 
6101 	/* signal that we are down to the interrupt handler */
6102 	if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
6103 		return; /* do nothing if already down */
6104 
6105 	/* Shut off incoming Tx traffic */
6106 	netif_tx_stop_all_queues(netdev);
6107 
6108 	/* call carrier off first to avoid false dev_watchdog timeouts */
6109 	netif_carrier_off(netdev);
6110 	netif_tx_disable(netdev);
6111 
6112 	/* Disable Rx */
6113 	ixgbe_disable_rx(adapter);
6114 
6115 	/* synchronize_rcu() needed for pending XDP buffers to drain */
6116 	if (adapter->xdp_ring[0])
6117 		synchronize_rcu();
6118 
6119 	ixgbe_irq_disable(adapter);
6120 
6121 	ixgbe_napi_disable_all(adapter);
6122 
6123 	clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
6124 	adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
6125 	adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
6126 
6127 	del_timer_sync(&adapter->service_timer);
6128 
6129 	if (adapter->num_vfs) {
6130 		/* Clear EITR Select mapping */
6131 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
6132 
6133 		/* Mark all the VFs as inactive */
6134 		for (i = 0 ; i < adapter->num_vfs; i++)
6135 			adapter->vfinfo[i].clear_to_send = false;
6136 
6137 		/* ping all the active vfs to let them know we are going down */
6138 		ixgbe_ping_all_vfs(adapter);
6139 
6140 		/* Disable all VFTE/VFRE TX/RX */
6141 		ixgbe_disable_tx_rx(adapter);
6142 	}
6143 
6144 	/* disable transmits in the hardware now that interrupts are off */
6145 	ixgbe_disable_tx(adapter);
6146 
6147 	if (!pci_channel_offline(adapter->pdev))
6148 		ixgbe_reset(adapter);
6149 
6150 	/* power down the optics for 82599 SFP+ fiber */
6151 	if (hw->mac.ops.disable_tx_laser)
6152 		hw->mac.ops.disable_tx_laser(hw);
6153 
6154 	ixgbe_clean_all_tx_rings(adapter);
6155 	ixgbe_clean_all_rx_rings(adapter);
6156 }
6157 
6158 /**
6159  * ixgbe_set_eee_capable - helper function to determine EEE support on X550
6160  * @adapter: board private structure
6161  */
6162 static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
6163 {
6164 	struct ixgbe_hw *hw = &adapter->hw;
6165 
6166 	switch (hw->device_id) {
6167 	case IXGBE_DEV_ID_X550EM_A_1G_T:
6168 	case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6169 		if (!hw->phy.eee_speeds_supported)
6170 			break;
6171 		adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
6172 		if (!hw->phy.eee_speeds_advertised)
6173 			break;
6174 		adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
6175 		break;
6176 	default:
6177 		adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
6178 		adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
6179 		break;
6180 	}
6181 }
6182 
6183 /**
6184  * ixgbe_tx_timeout - Respond to a Tx Hang
6185  * @netdev: network interface device structure
6186  * @txqueue: queue number that timed out
6187  **/
6188 static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
6189 {
6190 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
6191 
6192 	/* Do the reset outside of interrupt context */
6193 	ixgbe_tx_timeout_reset(adapter);
6194 }
6195 
6196 #ifdef CONFIG_IXGBE_DCB
6197 static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
6198 {
6199 	struct ixgbe_hw *hw = &adapter->hw;
6200 	struct tc_configuration *tc;
6201 	int j;
6202 
6203 	switch (hw->mac.type) {
6204 	case ixgbe_mac_82598EB:
6205 	case ixgbe_mac_82599EB:
6206 		adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
6207 		adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
6208 		break;
6209 	case ixgbe_mac_X540:
6210 	case ixgbe_mac_X550:
6211 		adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
6212 		adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
6213 		break;
6214 	case ixgbe_mac_X550EM_x:
6215 	case ixgbe_mac_x550em_a:
6216 	default:
6217 		adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
6218 		adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
6219 		break;
6220 	}
6221 
6222 	/* Configure DCB traffic classes */
6223 	for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
6224 		tc = &adapter->dcb_cfg.tc_config[j];
6225 		tc->path[DCB_TX_CONFIG].bwg_id = 0;
6226 		tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
6227 		tc->path[DCB_RX_CONFIG].bwg_id = 0;
6228 		tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
6229 		tc->dcb_pfc = pfc_disabled;
6230 	}
6231 
6232 	/* Initialize default user to priority mapping, UPx->TC0 */
6233 	tc = &adapter->dcb_cfg.tc_config[0];
6234 	tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
6235 	tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
6236 
6237 	adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
6238 	adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
6239 	adapter->dcb_cfg.pfc_mode_enable = false;
6240 	adapter->dcb_set_bitmap = 0x00;
6241 	if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
6242 		adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
6243 	memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
6244 	       sizeof(adapter->temp_dcb_cfg));
6245 }
6246 #endif
6247 
6248 /**
6249  * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
6250  * @adapter: board private structure to initialize
6251  * @ii: pointer to ixgbe_info for device
6252  *
6253  * ixgbe_sw_init initializes the Adapter private data structure.
6254  * Fields are initialized based on PCI device information and
6255  * OS network device settings (MTU size).
6256  **/
6257 static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
6258 			 const struct ixgbe_info *ii)
6259 {
6260 	struct ixgbe_hw *hw = &adapter->hw;
6261 	struct pci_dev *pdev = adapter->pdev;
6262 	unsigned int rss, fdir;
6263 	u32 fwsm;
6264 	int i;
6265 
6266 	/* PCI config space info */
6267 
6268 	hw->vendor_id = pdev->vendor;
6269 	hw->device_id = pdev->device;
6270 	hw->revision_id = pdev->revision;
6271 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
6272 	hw->subsystem_device_id = pdev->subsystem_device;
6273 
6274 	/* get_invariants needs the device IDs */
6275 	ii->get_invariants(hw);
6276 
6277 	/* Set common capability flags and settings */
6278 	rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
6279 	adapter->ring_feature[RING_F_RSS].limit = rss;
6280 	adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
6281 	adapter->max_q_vectors = MAX_Q_VECTORS_82599;
6282 	adapter->atr_sample_rate = 20;
6283 	fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
6284 	adapter->ring_feature[RING_F_FDIR].limit = fdir;
6285 	adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
6286 	adapter->ring_feature[RING_F_VMDQ].limit = 1;
6287 #ifdef CONFIG_IXGBE_DCA
6288 	adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
6289 #endif
6290 #ifdef CONFIG_IXGBE_DCB
6291 	adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
6292 	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
6293 #endif
6294 #ifdef IXGBE_FCOE
6295 	adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
6296 	adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6297 #ifdef CONFIG_IXGBE_DCB
6298 	/* Default traffic class to use for FCoE */
6299 	adapter->fcoe.up = IXGBE_FCOE_DEFTC;
6300 #endif /* CONFIG_IXGBE_DCB */
6301 #endif /* IXGBE_FCOE */
6302 
6303 	/* initialize static ixgbe jump table entries */
6304 	adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
6305 					  GFP_KERNEL);
6306 	if (!adapter->jump_tables[0])
6307 		return -ENOMEM;
6308 	adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
6309 
6310 	for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
6311 		adapter->jump_tables[i] = NULL;
6312 
6313 	adapter->mac_table = kcalloc(hw->mac.num_rar_entries,
6314 				     sizeof(struct ixgbe_mac_addr),
6315 				     GFP_KERNEL);
6316 	if (!adapter->mac_table)
6317 		return -ENOMEM;
6318 
6319 	if (ixgbe_init_rss_key(adapter))
6320 		return -ENOMEM;
6321 
6322 	adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL);
6323 	if (!adapter->af_xdp_zc_qps)
6324 		return -ENOMEM;
6325 
6326 	/* Set MAC specific capability flags and exceptions */
6327 	switch (hw->mac.type) {
6328 	case ixgbe_mac_82598EB:
6329 		adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
6330 
6331 		if (hw->device_id == IXGBE_DEV_ID_82598AT)
6332 			adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
6333 
6334 		adapter->max_q_vectors = MAX_Q_VECTORS_82598;
6335 		adapter->ring_feature[RING_F_FDIR].limit = 0;
6336 		adapter->atr_sample_rate = 0;
6337 		adapter->fdir_pballoc = 0;
6338 #ifdef IXGBE_FCOE
6339 		adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6340 		adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6341 #ifdef CONFIG_IXGBE_DCB
6342 		adapter->fcoe.up = 0;
6343 #endif /* IXGBE_DCB */
6344 #endif /* IXGBE_FCOE */
6345 		break;
6346 	case ixgbe_mac_82599EB:
6347 		if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
6348 			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6349 		break;
6350 	case ixgbe_mac_X540:
6351 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
6352 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
6353 			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6354 		break;
6355 	case ixgbe_mac_x550em_a:
6356 		switch (hw->device_id) {
6357 		case IXGBE_DEV_ID_X550EM_A_1G_T:
6358 		case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6359 			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6360 			break;
6361 		default:
6362 			break;
6363 		}
6364 		fallthrough;
6365 	case ixgbe_mac_X550EM_x:
6366 #ifdef CONFIG_IXGBE_DCB
6367 		adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
6368 #endif
6369 #ifdef IXGBE_FCOE
6370 		adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6371 #ifdef CONFIG_IXGBE_DCB
6372 		adapter->fcoe.up = 0;
6373 #endif /* IXGBE_DCB */
6374 #endif /* IXGBE_FCOE */
6375 		fallthrough;
6376 	case ixgbe_mac_X550:
6377 		if (hw->mac.type == ixgbe_mac_X550)
6378 			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6379 #ifdef CONFIG_IXGBE_DCA
6380 		adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
6381 #endif
6382 		break;
6383 	default:
6384 		break;
6385 	}
6386 
6387 #ifdef IXGBE_FCOE
6388 	/* FCoE support exists, always init the FCoE lock */
6389 	spin_lock_init(&adapter->fcoe.lock);
6390 
6391 #endif
6392 	/* n-tuple support exists, always init our spinlock */
6393 	spin_lock_init(&adapter->fdir_perfect_lock);
6394 
6395 #ifdef CONFIG_IXGBE_DCB
6396 	ixgbe_init_dcb(adapter);
6397 #endif
6398 	ixgbe_init_ipsec_offload(adapter);
6399 
6400 	/* default flow control settings */
6401 	hw->fc.requested_mode = ixgbe_fc_full;
6402 	hw->fc.current_mode = ixgbe_fc_full;	/* init for ethtool output */
6403 	ixgbe_pbthresh_setup(adapter);
6404 	hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
6405 	hw->fc.send_xon = true;
6406 	hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
6407 
6408 #ifdef CONFIG_PCI_IOV
6409 	if (max_vfs > 0)
6410 		e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
6411 
6412 	/* assign number of SR-IOV VFs */
6413 	if (hw->mac.type != ixgbe_mac_82598EB) {
6414 		if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
6415 			max_vfs = 0;
6416 			e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
6417 		}
6418 	}
6419 #endif /* CONFIG_PCI_IOV */
6420 
6421 	/* enable itr by default in dynamic mode */
6422 	adapter->rx_itr_setting = 1;
6423 	adapter->tx_itr_setting = 1;
6424 
6425 	/* set default ring sizes */
6426 	adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
6427 	adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
6428 
6429 	/* set default work limits */
6430 	adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
6431 
6432 	/* initialize eeprom parameters */
6433 	if (ixgbe_init_eeprom_params_generic(hw)) {
6434 		e_dev_err("EEPROM initialization failed\n");
6435 		return -EIO;
6436 	}
6437 
6438 	/* PF holds first pool slot */
6439 	set_bit(0, adapter->fwd_bitmask);
6440 	set_bit(__IXGBE_DOWN, &adapter->state);
6441 
6442 	return 0;
6443 }
6444 
6445 /**
6446  * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
6447  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
6448  *
6449  * Return 0 on success, negative on failure
6450  **/
6451 int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
6452 {
6453 	struct device *dev = tx_ring->dev;
6454 	int orig_node = dev_to_node(dev);
6455 	int ring_node = NUMA_NO_NODE;
6456 	int size;
6457 
6458 	size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
6459 
6460 	if (tx_ring->q_vector)
6461 		ring_node = tx_ring->q_vector->numa_node;
6462 
6463 	tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
6464 	if (!tx_ring->tx_buffer_info)
6465 		tx_ring->tx_buffer_info = vmalloc(size);
6466 	if (!tx_ring->tx_buffer_info)
6467 		goto err;
6468 
6469 	/* round up to nearest 4K */
6470 	tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
6471 	tx_ring->size = ALIGN(tx_ring->size, 4096);
6472 
6473 	set_dev_node(dev, ring_node);
6474 	tx_ring->desc = dma_alloc_coherent(dev,
6475 					   tx_ring->size,
6476 					   &tx_ring->dma,
6477 					   GFP_KERNEL);
6478 	set_dev_node(dev, orig_node);
6479 	if (!tx_ring->desc)
6480 		tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
6481 						   &tx_ring->dma, GFP_KERNEL);
6482 	if (!tx_ring->desc)
6483 		goto err;
6484 
6485 	tx_ring->next_to_use = 0;
6486 	tx_ring->next_to_clean = 0;
6487 	return 0;
6488 
6489 err:
6490 	vfree(tx_ring->tx_buffer_info);
6491 	tx_ring->tx_buffer_info = NULL;
6492 	dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
6493 	return -ENOMEM;
6494 }
6495 
6496 /**
6497  * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
6498  * @adapter: board private structure
6499  *
6500  * If this function returns with an error, then it's possible one or
6501  * more of the rings is populated (while the rest are not).  It is the
6502  * callers duty to clean those orphaned rings.
6503  *
6504  * Return 0 on success, negative on failure
6505  **/
6506 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
6507 {
6508 	int i, j = 0, err = 0;
6509 
6510 	for (i = 0; i < adapter->num_tx_queues; i++) {
6511 		err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
6512 		if (!err)
6513 			continue;
6514 
6515 		e_err(probe, "Allocation for Tx Queue %u failed\n", i);
6516 		goto err_setup_tx;
6517 	}
6518 	for (j = 0; j < adapter->num_xdp_queues; j++) {
6519 		err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]);
6520 		if (!err)
6521 			continue;
6522 
6523 		e_err(probe, "Allocation for Tx Queue %u failed\n", j);
6524 		goto err_setup_tx;
6525 	}
6526 
6527 	return 0;
6528 err_setup_tx:
6529 	/* rewind the index freeing the rings as we go */
6530 	while (j--)
6531 		ixgbe_free_tx_resources(adapter->xdp_ring[j]);
6532 	while (i--)
6533 		ixgbe_free_tx_resources(adapter->tx_ring[i]);
6534 	return err;
6535 }
6536 
6537 static int ixgbe_rx_napi_id(struct ixgbe_ring *rx_ring)
6538 {
6539 	struct ixgbe_q_vector *q_vector = rx_ring->q_vector;
6540 
6541 	return q_vector ? q_vector->napi.napi_id : 0;
6542 }
6543 
6544 /**
6545  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
6546  * @adapter: pointer to ixgbe_adapter
6547  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
6548  *
6549  * Returns 0 on success, negative on failure
6550  **/
6551 int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
6552 			     struct ixgbe_ring *rx_ring)
6553 {
6554 	struct device *dev = rx_ring->dev;
6555 	int orig_node = dev_to_node(dev);
6556 	int ring_node = NUMA_NO_NODE;
6557 	int size;
6558 
6559 	size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
6560 
6561 	if (rx_ring->q_vector)
6562 		ring_node = rx_ring->q_vector->numa_node;
6563 
6564 	rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
6565 	if (!rx_ring->rx_buffer_info)
6566 		rx_ring->rx_buffer_info = vmalloc(size);
6567 	if (!rx_ring->rx_buffer_info)
6568 		goto err;
6569 
6570 	/* Round up to nearest 4K */
6571 	rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
6572 	rx_ring->size = ALIGN(rx_ring->size, 4096);
6573 
6574 	set_dev_node(dev, ring_node);
6575 	rx_ring->desc = dma_alloc_coherent(dev,
6576 					   rx_ring->size,
6577 					   &rx_ring->dma,
6578 					   GFP_KERNEL);
6579 	set_dev_node(dev, orig_node);
6580 	if (!rx_ring->desc)
6581 		rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
6582 						   &rx_ring->dma, GFP_KERNEL);
6583 	if (!rx_ring->desc)
6584 		goto err;
6585 
6586 	rx_ring->next_to_clean = 0;
6587 	rx_ring->next_to_use = 0;
6588 
6589 	/* XDP RX-queue info */
6590 	if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
6591 			     rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0)
6592 		goto err;
6593 
6594 	rx_ring->xdp_prog = adapter->xdp_prog;
6595 
6596 	return 0;
6597 err:
6598 	vfree(rx_ring->rx_buffer_info);
6599 	rx_ring->rx_buffer_info = NULL;
6600 	dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
6601 	return -ENOMEM;
6602 }
6603 
6604 /**
6605  * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
6606  * @adapter: board private structure
6607  *
6608  * If this function returns with an error, then it's possible one or
6609  * more of the rings is populated (while the rest are not).  It is the
6610  * callers duty to clean those orphaned rings.
6611  *
6612  * Return 0 on success, negative on failure
6613  **/
6614 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
6615 {
6616 	int i, err = 0;
6617 
6618 	for (i = 0; i < adapter->num_rx_queues; i++) {
6619 		err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
6620 		if (!err)
6621 			continue;
6622 
6623 		e_err(probe, "Allocation for Rx Queue %u failed\n", i);
6624 		goto err_setup_rx;
6625 	}
6626 
6627 #ifdef IXGBE_FCOE
6628 	err = ixgbe_setup_fcoe_ddp_resources(adapter);
6629 	if (!err)
6630 #endif
6631 		return 0;
6632 err_setup_rx:
6633 	/* rewind the index freeing the rings as we go */
6634 	while (i--)
6635 		ixgbe_free_rx_resources(adapter->rx_ring[i]);
6636 	return err;
6637 }
6638 
6639 /**
6640  * ixgbe_free_tx_resources - Free Tx Resources per Queue
6641  * @tx_ring: Tx descriptor ring for a specific queue
6642  *
6643  * Free all transmit software resources
6644  **/
6645 void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
6646 {
6647 	ixgbe_clean_tx_ring(tx_ring);
6648 
6649 	vfree(tx_ring->tx_buffer_info);
6650 	tx_ring->tx_buffer_info = NULL;
6651 
6652 	/* if not set, then don't free */
6653 	if (!tx_ring->desc)
6654 		return;
6655 
6656 	dma_free_coherent(tx_ring->dev, tx_ring->size,
6657 			  tx_ring->desc, tx_ring->dma);
6658 
6659 	tx_ring->desc = NULL;
6660 }
6661 
6662 /**
6663  * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
6664  * @adapter: board private structure
6665  *
6666  * Free all transmit software resources
6667  **/
6668 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
6669 {
6670 	int i;
6671 
6672 	for (i = 0; i < adapter->num_tx_queues; i++)
6673 		if (adapter->tx_ring[i]->desc)
6674 			ixgbe_free_tx_resources(adapter->tx_ring[i]);
6675 	for (i = 0; i < adapter->num_xdp_queues; i++)
6676 		if (adapter->xdp_ring[i]->desc)
6677 			ixgbe_free_tx_resources(adapter->xdp_ring[i]);
6678 }
6679 
6680 /**
6681  * ixgbe_free_rx_resources - Free Rx Resources
6682  * @rx_ring: ring to clean the resources from
6683  *
6684  * Free all receive software resources
6685  **/
6686 void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
6687 {
6688 	ixgbe_clean_rx_ring(rx_ring);
6689 
6690 	rx_ring->xdp_prog = NULL;
6691 	xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
6692 	vfree(rx_ring->rx_buffer_info);
6693 	rx_ring->rx_buffer_info = NULL;
6694 
6695 	/* if not set, then don't free */
6696 	if (!rx_ring->desc)
6697 		return;
6698 
6699 	dma_free_coherent(rx_ring->dev, rx_ring->size,
6700 			  rx_ring->desc, rx_ring->dma);
6701 
6702 	rx_ring->desc = NULL;
6703 }
6704 
6705 /**
6706  * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
6707  * @adapter: board private structure
6708  *
6709  * Free all receive software resources
6710  **/
6711 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
6712 {
6713 	int i;
6714 
6715 #ifdef IXGBE_FCOE
6716 	ixgbe_free_fcoe_ddp_resources(adapter);
6717 
6718 #endif
6719 	for (i = 0; i < adapter->num_rx_queues; i++)
6720 		if (adapter->rx_ring[i]->desc)
6721 			ixgbe_free_rx_resources(adapter->rx_ring[i]);
6722 }
6723 
6724 /**
6725  * ixgbe_change_mtu - Change the Maximum Transfer Unit
6726  * @netdev: network interface device structure
6727  * @new_mtu: new value for maximum frame size
6728  *
6729  * Returns 0 on success, negative on failure
6730  **/
6731 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
6732 {
6733 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
6734 
6735 	if (adapter->xdp_prog) {
6736 		int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
6737 				     VLAN_HLEN;
6738 		int i;
6739 
6740 		for (i = 0; i < adapter->num_rx_queues; i++) {
6741 			struct ixgbe_ring *ring = adapter->rx_ring[i];
6742 
6743 			if (new_frame_size > ixgbe_rx_bufsz(ring)) {
6744 				e_warn(probe, "Requested MTU size is not supported with XDP\n");
6745 				return -EINVAL;
6746 			}
6747 		}
6748 	}
6749 
6750 	/*
6751 	 * For 82599EB we cannot allow legacy VFs to enable their receive
6752 	 * paths when MTU greater than 1500 is configured.  So display a
6753 	 * warning that legacy VFs will be disabled.
6754 	 */
6755 	if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
6756 	    (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
6757 	    (new_mtu > ETH_DATA_LEN))
6758 		e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
6759 
6760 	netdev_dbg(netdev, "changing MTU from %d to %d\n",
6761 		   netdev->mtu, new_mtu);
6762 
6763 	/* must set new MTU before calling down or up */
6764 	netdev->mtu = new_mtu;
6765 
6766 	if (netif_running(netdev))
6767 		ixgbe_reinit_locked(adapter);
6768 
6769 	return 0;
6770 }
6771 
6772 /**
6773  * ixgbe_open - Called when a network interface is made active
6774  * @netdev: network interface device structure
6775  *
6776  * Returns 0 on success, negative value on failure
6777  *
6778  * The open entry point is called when a network interface is made
6779  * active by the system (IFF_UP).  At this point all resources needed
6780  * for transmit and receive operations are allocated, the interrupt
6781  * handler is registered with the OS, the watchdog timer is started,
6782  * and the stack is notified that the interface is ready.
6783  **/
6784 int ixgbe_open(struct net_device *netdev)
6785 {
6786 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
6787 	struct ixgbe_hw *hw = &adapter->hw;
6788 	int err, queues;
6789 
6790 	/* disallow open during test */
6791 	if (test_bit(__IXGBE_TESTING, &adapter->state))
6792 		return -EBUSY;
6793 
6794 	netif_carrier_off(netdev);
6795 
6796 	/* allocate transmit descriptors */
6797 	err = ixgbe_setup_all_tx_resources(adapter);
6798 	if (err)
6799 		goto err_setup_tx;
6800 
6801 	/* allocate receive descriptors */
6802 	err = ixgbe_setup_all_rx_resources(adapter);
6803 	if (err)
6804 		goto err_setup_rx;
6805 
6806 	ixgbe_configure(adapter);
6807 
6808 	err = ixgbe_request_irq(adapter);
6809 	if (err)
6810 		goto err_req_irq;
6811 
6812 	/* Notify the stack of the actual queue counts. */
6813 	queues = adapter->num_tx_queues;
6814 	err = netif_set_real_num_tx_queues(netdev, queues);
6815 	if (err)
6816 		goto err_set_queues;
6817 
6818 	queues = adapter->num_rx_queues;
6819 	err = netif_set_real_num_rx_queues(netdev, queues);
6820 	if (err)
6821 		goto err_set_queues;
6822 
6823 	ixgbe_ptp_init(adapter);
6824 
6825 	ixgbe_up_complete(adapter);
6826 
6827 	udp_tunnel_nic_reset_ntf(netdev);
6828 
6829 	return 0;
6830 
6831 err_set_queues:
6832 	ixgbe_free_irq(adapter);
6833 err_req_irq:
6834 	ixgbe_free_all_rx_resources(adapter);
6835 	if (hw->phy.ops.set_phy_power && !adapter->wol)
6836 		hw->phy.ops.set_phy_power(&adapter->hw, false);
6837 err_setup_rx:
6838 	ixgbe_free_all_tx_resources(adapter);
6839 err_setup_tx:
6840 	ixgbe_reset(adapter);
6841 
6842 	return err;
6843 }
6844 
6845 static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
6846 {
6847 	ixgbe_ptp_suspend(adapter);
6848 
6849 	if (adapter->hw.phy.ops.enter_lplu) {
6850 		adapter->hw.phy.reset_disable = true;
6851 		ixgbe_down(adapter);
6852 		adapter->hw.phy.ops.enter_lplu(&adapter->hw);
6853 		adapter->hw.phy.reset_disable = false;
6854 	} else {
6855 		ixgbe_down(adapter);
6856 	}
6857 
6858 	ixgbe_free_irq(adapter);
6859 
6860 	ixgbe_free_all_tx_resources(adapter);
6861 	ixgbe_free_all_rx_resources(adapter);
6862 }
6863 
6864 /**
6865  * ixgbe_close - Disables a network interface
6866  * @netdev: network interface device structure
6867  *
6868  * Returns 0, this is not allowed to fail
6869  *
6870  * The close entry point is called when an interface is de-activated
6871  * by the OS.  The hardware is still under the drivers control, but
6872  * needs to be disabled.  A global MAC reset is issued to stop the
6873  * hardware, and all transmit and receive resources are freed.
6874  **/
6875 int ixgbe_close(struct net_device *netdev)
6876 {
6877 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
6878 
6879 	ixgbe_ptp_stop(adapter);
6880 
6881 	if (netif_device_present(netdev))
6882 		ixgbe_close_suspend(adapter);
6883 
6884 	ixgbe_fdir_filter_exit(adapter);
6885 
6886 	ixgbe_release_hw_control(adapter);
6887 
6888 	return 0;
6889 }
6890 
6891 static int __maybe_unused ixgbe_resume(struct device *dev_d)
6892 {
6893 	struct pci_dev *pdev = to_pci_dev(dev_d);
6894 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6895 	struct net_device *netdev = adapter->netdev;
6896 	u32 err;
6897 
6898 	adapter->hw.hw_addr = adapter->io_addr;
6899 
6900 	err = pci_enable_device_mem(pdev);
6901 	if (err) {
6902 		e_dev_err("Cannot enable PCI device from suspend\n");
6903 		return err;
6904 	}
6905 	smp_mb__before_atomic();
6906 	clear_bit(__IXGBE_DISABLED, &adapter->state);
6907 	pci_set_master(pdev);
6908 
6909 	device_wakeup_disable(dev_d);
6910 
6911 	ixgbe_reset(adapter);
6912 
6913 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6914 
6915 	rtnl_lock();
6916 	err = ixgbe_init_interrupt_scheme(adapter);
6917 	if (!err && netif_running(netdev))
6918 		err = ixgbe_open(netdev);
6919 
6920 
6921 	if (!err)
6922 		netif_device_attach(netdev);
6923 	rtnl_unlock();
6924 
6925 	return err;
6926 }
6927 
6928 static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
6929 {
6930 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6931 	struct net_device *netdev = adapter->netdev;
6932 	struct ixgbe_hw *hw = &adapter->hw;
6933 	u32 ctrl;
6934 	u32 wufc = adapter->wol;
6935 
6936 	rtnl_lock();
6937 	netif_device_detach(netdev);
6938 
6939 	if (netif_running(netdev))
6940 		ixgbe_close_suspend(adapter);
6941 
6942 	ixgbe_clear_interrupt_scheme(adapter);
6943 	rtnl_unlock();
6944 
6945 	if (hw->mac.ops.stop_link_on_d3)
6946 		hw->mac.ops.stop_link_on_d3(hw);
6947 
6948 	if (wufc) {
6949 		u32 fctrl;
6950 
6951 		ixgbe_set_rx_mode(netdev);
6952 
6953 		/* enable the optics for 82599 SFP+ fiber as we can WoL */
6954 		if (hw->mac.ops.enable_tx_laser)
6955 			hw->mac.ops.enable_tx_laser(hw);
6956 
6957 		/* enable the reception of multicast packets */
6958 		fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6959 		fctrl |= IXGBE_FCTRL_MPE;
6960 		IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
6961 
6962 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
6963 		ctrl |= IXGBE_CTRL_GIO_DIS;
6964 		IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
6965 
6966 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
6967 	} else {
6968 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
6969 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
6970 	}
6971 
6972 	switch (hw->mac.type) {
6973 	case ixgbe_mac_82598EB:
6974 		pci_wake_from_d3(pdev, false);
6975 		break;
6976 	case ixgbe_mac_82599EB:
6977 	case ixgbe_mac_X540:
6978 	case ixgbe_mac_X550:
6979 	case ixgbe_mac_X550EM_x:
6980 	case ixgbe_mac_x550em_a:
6981 		pci_wake_from_d3(pdev, !!wufc);
6982 		break;
6983 	default:
6984 		break;
6985 	}
6986 
6987 	*enable_wake = !!wufc;
6988 	if (hw->phy.ops.set_phy_power && !*enable_wake)
6989 		hw->phy.ops.set_phy_power(hw, false);
6990 
6991 	ixgbe_release_hw_control(adapter);
6992 
6993 	if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
6994 		pci_disable_device(pdev);
6995 
6996 	return 0;
6997 }
6998 
6999 static int __maybe_unused ixgbe_suspend(struct device *dev_d)
7000 {
7001 	struct pci_dev *pdev = to_pci_dev(dev_d);
7002 	int retval;
7003 	bool wake;
7004 
7005 	retval = __ixgbe_shutdown(pdev, &wake);
7006 
7007 	device_set_wakeup_enable(dev_d, wake);
7008 
7009 	return retval;
7010 }
7011 
7012 static void ixgbe_shutdown(struct pci_dev *pdev)
7013 {
7014 	bool wake;
7015 
7016 	__ixgbe_shutdown(pdev, &wake);
7017 
7018 	if (system_state == SYSTEM_POWER_OFF) {
7019 		pci_wake_from_d3(pdev, wake);
7020 		pci_set_power_state(pdev, PCI_D3hot);
7021 	}
7022 }
7023 
7024 /**
7025  * ixgbe_update_stats - Update the board statistics counters.
7026  * @adapter: board private structure
7027  **/
7028 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
7029 {
7030 	struct net_device *netdev = adapter->netdev;
7031 	struct ixgbe_hw *hw = &adapter->hw;
7032 	struct ixgbe_hw_stats *hwstats = &adapter->stats;
7033 	u64 total_mpc = 0;
7034 	u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
7035 	u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
7036 	u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
7037 	u64 alloc_rx_page = 0;
7038 	u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
7039 
7040 	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7041 	    test_bit(__IXGBE_RESETTING, &adapter->state))
7042 		return;
7043 
7044 	if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
7045 		u64 rsc_count = 0;
7046 		u64 rsc_flush = 0;
7047 		for (i = 0; i < adapter->num_rx_queues; i++) {
7048 			rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
7049 			rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
7050 		}
7051 		adapter->rsc_total_count = rsc_count;
7052 		adapter->rsc_total_flush = rsc_flush;
7053 	}
7054 
7055 	for (i = 0; i < adapter->num_rx_queues; i++) {
7056 		struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]);
7057 
7058 		if (!rx_ring)
7059 			continue;
7060 		non_eop_descs += rx_ring->rx_stats.non_eop_descs;
7061 		alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
7062 		alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
7063 		alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
7064 		hw_csum_rx_error += rx_ring->rx_stats.csum_err;
7065 		bytes += rx_ring->stats.bytes;
7066 		packets += rx_ring->stats.packets;
7067 	}
7068 	adapter->non_eop_descs = non_eop_descs;
7069 	adapter->alloc_rx_page = alloc_rx_page;
7070 	adapter->alloc_rx_page_failed = alloc_rx_page_failed;
7071 	adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
7072 	adapter->hw_csum_rx_error = hw_csum_rx_error;
7073 	netdev->stats.rx_bytes = bytes;
7074 	netdev->stats.rx_packets = packets;
7075 
7076 	bytes = 0;
7077 	packets = 0;
7078 	/* gather some stats to the adapter struct that are per queue */
7079 	for (i = 0; i < adapter->num_tx_queues; i++) {
7080 		struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]);
7081 
7082 		if (!tx_ring)
7083 			continue;
7084 		restart_queue += tx_ring->tx_stats.restart_queue;
7085 		tx_busy += tx_ring->tx_stats.tx_busy;
7086 		bytes += tx_ring->stats.bytes;
7087 		packets += tx_ring->stats.packets;
7088 	}
7089 	for (i = 0; i < adapter->num_xdp_queues; i++) {
7090 		struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]);
7091 
7092 		if (!xdp_ring)
7093 			continue;
7094 		restart_queue += xdp_ring->tx_stats.restart_queue;
7095 		tx_busy += xdp_ring->tx_stats.tx_busy;
7096 		bytes += xdp_ring->stats.bytes;
7097 		packets += xdp_ring->stats.packets;
7098 	}
7099 	adapter->restart_queue = restart_queue;
7100 	adapter->tx_busy = tx_busy;
7101 	netdev->stats.tx_bytes = bytes;
7102 	netdev->stats.tx_packets = packets;
7103 
7104 	hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
7105 
7106 	/* 8 register reads */
7107 	for (i = 0; i < 8; i++) {
7108 		/* for packet buffers not used, the register should read 0 */
7109 		mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
7110 		missed_rx += mpc;
7111 		hwstats->mpc[i] += mpc;
7112 		total_mpc += hwstats->mpc[i];
7113 		hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
7114 		hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
7115 		switch (hw->mac.type) {
7116 		case ixgbe_mac_82598EB:
7117 			hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
7118 			hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
7119 			hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
7120 			hwstats->pxonrxc[i] +=
7121 				IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
7122 			break;
7123 		case ixgbe_mac_82599EB:
7124 		case ixgbe_mac_X540:
7125 		case ixgbe_mac_X550:
7126 		case ixgbe_mac_X550EM_x:
7127 		case ixgbe_mac_x550em_a:
7128 			hwstats->pxonrxc[i] +=
7129 				IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
7130 			break;
7131 		default:
7132 			break;
7133 		}
7134 	}
7135 
7136 	/*16 register reads */
7137 	for (i = 0; i < 16; i++) {
7138 		hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
7139 		hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
7140 		if ((hw->mac.type == ixgbe_mac_82599EB) ||
7141 		    (hw->mac.type == ixgbe_mac_X540) ||
7142 		    (hw->mac.type == ixgbe_mac_X550) ||
7143 		    (hw->mac.type == ixgbe_mac_X550EM_x) ||
7144 		    (hw->mac.type == ixgbe_mac_x550em_a)) {
7145 			hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
7146 			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
7147 			hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
7148 			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */
7149 		}
7150 	}
7151 
7152 	hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
7153 	/* work around hardware counting issue */
7154 	hwstats->gprc -= missed_rx;
7155 
7156 	ixgbe_update_xoff_received(adapter);
7157 
7158 	/* 82598 hardware only has a 32 bit counter in the high register */
7159 	switch (hw->mac.type) {
7160 	case ixgbe_mac_82598EB:
7161 		hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
7162 		hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
7163 		hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
7164 		hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
7165 		break;
7166 	case ixgbe_mac_X540:
7167 	case ixgbe_mac_X550:
7168 	case ixgbe_mac_X550EM_x:
7169 	case ixgbe_mac_x550em_a:
7170 		/* OS2BMC stats are X540 and later */
7171 		hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
7172 		hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
7173 		hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
7174 		hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
7175 		fallthrough;
7176 	case ixgbe_mac_82599EB:
7177 		for (i = 0; i < 16; i++)
7178 			adapter->hw_rx_no_dma_resources +=
7179 					     IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
7180 		hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
7181 		IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
7182 		hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
7183 		IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
7184 		hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
7185 		IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
7186 		hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
7187 		hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
7188 		hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
7189 #ifdef IXGBE_FCOE
7190 		hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
7191 		hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
7192 		hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
7193 		hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
7194 		hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
7195 		hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
7196 		/* Add up per cpu counters for total ddp aloc fail */
7197 		if (adapter->fcoe.ddp_pool) {
7198 			struct ixgbe_fcoe *fcoe = &adapter->fcoe;
7199 			struct ixgbe_fcoe_ddp_pool *ddp_pool;
7200 			unsigned int cpu;
7201 			u64 noddp = 0, noddp_ext_buff = 0;
7202 			for_each_possible_cpu(cpu) {
7203 				ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
7204 				noddp += ddp_pool->noddp;
7205 				noddp_ext_buff += ddp_pool->noddp_ext_buff;
7206 			}
7207 			hwstats->fcoe_noddp = noddp;
7208 			hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
7209 		}
7210 #endif /* IXGBE_FCOE */
7211 		break;
7212 	default:
7213 		break;
7214 	}
7215 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
7216 	hwstats->bprc += bprc;
7217 	hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
7218 	if (hw->mac.type == ixgbe_mac_82598EB)
7219 		hwstats->mprc -= bprc;
7220 	hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
7221 	hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
7222 	hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
7223 	hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
7224 	hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
7225 	hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
7226 	hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
7227 	hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
7228 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
7229 	hwstats->lxontxc += lxon;
7230 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
7231 	hwstats->lxofftxc += lxoff;
7232 	hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
7233 	hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
7234 	/*
7235 	 * 82598 errata - tx of flow control packets is included in tx counters
7236 	 */
7237 	xon_off_tot = lxon + lxoff;
7238 	hwstats->gptc -= xon_off_tot;
7239 	hwstats->mptc -= xon_off_tot;
7240 	hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
7241 	hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
7242 	hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
7243 	hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
7244 	hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
7245 	hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
7246 	hwstats->ptc64 -= xon_off_tot;
7247 	hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
7248 	hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
7249 	hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
7250 	hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
7251 	hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
7252 	hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
7253 
7254 	/* Fill out the OS statistics structure */
7255 	netdev->stats.multicast = hwstats->mprc;
7256 
7257 	/* Rx Errors */
7258 	netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
7259 	netdev->stats.rx_dropped = 0;
7260 	netdev->stats.rx_length_errors = hwstats->rlec;
7261 	netdev->stats.rx_crc_errors = hwstats->crcerrs;
7262 	netdev->stats.rx_missed_errors = total_mpc;
7263 }
7264 
7265 /**
7266  * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
7267  * @adapter: pointer to the device adapter structure
7268  **/
7269 static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
7270 {
7271 	struct ixgbe_hw *hw = &adapter->hw;
7272 	int i;
7273 
7274 	if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
7275 		return;
7276 
7277 	adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
7278 
7279 	/* if interface is down do nothing */
7280 	if (test_bit(__IXGBE_DOWN, &adapter->state))
7281 		return;
7282 
7283 	/* do nothing if we are not using signature filters */
7284 	if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
7285 		return;
7286 
7287 	adapter->fdir_overflow++;
7288 
7289 	if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
7290 		for (i = 0; i < adapter->num_tx_queues; i++)
7291 			set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7292 				&(adapter->tx_ring[i]->state));
7293 		for (i = 0; i < adapter->num_xdp_queues; i++)
7294 			set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7295 				&adapter->xdp_ring[i]->state);
7296 		/* re-enable flow director interrupts */
7297 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
7298 	} else {
7299 		e_err(probe, "failed to finish FDIR re-initialization, "
7300 		      "ignored adding FDIR ATR filters\n");
7301 	}
7302 }
7303 
7304 /**
7305  * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
7306  * @adapter: pointer to the device adapter structure
7307  *
7308  * This function serves two purposes.  First it strobes the interrupt lines
7309  * in order to make certain interrupts are occurring.  Secondly it sets the
7310  * bits needed to check for TX hangs.  As a result we should immediately
7311  * determine if a hang has occurred.
7312  */
7313 static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
7314 {
7315 	struct ixgbe_hw *hw = &adapter->hw;
7316 	u64 eics = 0;
7317 	int i;
7318 
7319 	/* If we're down, removing or resetting, just bail */
7320 	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7321 	    test_bit(__IXGBE_REMOVING, &adapter->state) ||
7322 	    test_bit(__IXGBE_RESETTING, &adapter->state))
7323 		return;
7324 
7325 	/* Force detection of hung controller */
7326 	if (netif_carrier_ok(adapter->netdev)) {
7327 		for (i = 0; i < adapter->num_tx_queues; i++)
7328 			set_check_for_tx_hang(adapter->tx_ring[i]);
7329 		for (i = 0; i < adapter->num_xdp_queues; i++)
7330 			set_check_for_tx_hang(adapter->xdp_ring[i]);
7331 	}
7332 
7333 	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
7334 		/*
7335 		 * for legacy and MSI interrupts don't set any bits
7336 		 * that are enabled for EIAM, because this operation
7337 		 * would set *both* EIMS and EICS for any bit in EIAM
7338 		 */
7339 		IXGBE_WRITE_REG(hw, IXGBE_EICS,
7340 			(IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
7341 	} else {
7342 		/* get one bit for every active tx/rx interrupt vector */
7343 		for (i = 0; i < adapter->num_q_vectors; i++) {
7344 			struct ixgbe_q_vector *qv = adapter->q_vector[i];
7345 			if (qv->rx.ring || qv->tx.ring)
7346 				eics |= BIT_ULL(i);
7347 		}
7348 	}
7349 
7350 	/* Cause software interrupt to ensure rings are cleaned */
7351 	ixgbe_irq_rearm_queues(adapter, eics);
7352 }
7353 
7354 /**
7355  * ixgbe_watchdog_update_link - update the link status
7356  * @adapter: pointer to the device adapter structure
7357  **/
7358 static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
7359 {
7360 	struct ixgbe_hw *hw = &adapter->hw;
7361 	u32 link_speed = adapter->link_speed;
7362 	bool link_up = adapter->link_up;
7363 	bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
7364 
7365 	if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
7366 		return;
7367 
7368 	if (hw->mac.ops.check_link) {
7369 		hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
7370 	} else {
7371 		/* always assume link is up, if no check link function */
7372 		link_speed = IXGBE_LINK_SPEED_10GB_FULL;
7373 		link_up = true;
7374 	}
7375 
7376 	if (adapter->ixgbe_ieee_pfc)
7377 		pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
7378 
7379 	if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
7380 		hw->mac.ops.fc_enable(hw);
7381 		ixgbe_set_rx_drop_en(adapter);
7382 	}
7383 
7384 	if (link_up ||
7385 	    time_after(jiffies, (adapter->link_check_timeout +
7386 				 IXGBE_TRY_LINK_TIMEOUT))) {
7387 		adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
7388 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
7389 		IXGBE_WRITE_FLUSH(hw);
7390 	}
7391 
7392 	adapter->link_up = link_up;
7393 	adapter->link_speed = link_speed;
7394 }
7395 
7396 static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
7397 {
7398 #ifdef CONFIG_IXGBE_DCB
7399 	struct net_device *netdev = adapter->netdev;
7400 	struct dcb_app app = {
7401 			      .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
7402 			      .protocol = 0,
7403 			     };
7404 	u8 up = 0;
7405 
7406 	if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
7407 		up = dcb_ieee_getapp_mask(netdev, &app);
7408 
7409 	adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
7410 #endif
7411 }
7412 
7413 /**
7414  * ixgbe_watchdog_link_is_up - update netif_carrier status and
7415  *                             print link up message
7416  * @adapter: pointer to the device adapter structure
7417  **/
7418 static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
7419 {
7420 	struct net_device *netdev = adapter->netdev;
7421 	struct ixgbe_hw *hw = &adapter->hw;
7422 	u32 link_speed = adapter->link_speed;
7423 	const char *speed_str;
7424 	bool flow_rx, flow_tx;
7425 
7426 	/* only continue if link was previously down */
7427 	if (netif_carrier_ok(netdev))
7428 		return;
7429 
7430 	adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
7431 
7432 	switch (hw->mac.type) {
7433 	case ixgbe_mac_82598EB: {
7434 		u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
7435 		u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
7436 		flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
7437 		flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
7438 	}
7439 		break;
7440 	case ixgbe_mac_X540:
7441 	case ixgbe_mac_X550:
7442 	case ixgbe_mac_X550EM_x:
7443 	case ixgbe_mac_x550em_a:
7444 	case ixgbe_mac_82599EB: {
7445 		u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
7446 		u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
7447 		flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
7448 		flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
7449 	}
7450 		break;
7451 	default:
7452 		flow_tx = false;
7453 		flow_rx = false;
7454 		break;
7455 	}
7456 
7457 	adapter->last_rx_ptp_check = jiffies;
7458 
7459 	if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7460 		ixgbe_ptp_start_cyclecounter(adapter);
7461 
7462 	switch (link_speed) {
7463 	case IXGBE_LINK_SPEED_10GB_FULL:
7464 		speed_str = "10 Gbps";
7465 		break;
7466 	case IXGBE_LINK_SPEED_5GB_FULL:
7467 		speed_str = "5 Gbps";
7468 		break;
7469 	case IXGBE_LINK_SPEED_2_5GB_FULL:
7470 		speed_str = "2.5 Gbps";
7471 		break;
7472 	case IXGBE_LINK_SPEED_1GB_FULL:
7473 		speed_str = "1 Gbps";
7474 		break;
7475 	case IXGBE_LINK_SPEED_100_FULL:
7476 		speed_str = "100 Mbps";
7477 		break;
7478 	case IXGBE_LINK_SPEED_10_FULL:
7479 		speed_str = "10 Mbps";
7480 		break;
7481 	default:
7482 		speed_str = "unknown speed";
7483 		break;
7484 	}
7485 	e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
7486 	       ((flow_rx && flow_tx) ? "RX/TX" :
7487 	       (flow_rx ? "RX" :
7488 	       (flow_tx ? "TX" : "None"))));
7489 
7490 	netif_carrier_on(netdev);
7491 	ixgbe_check_vf_rate_limit(adapter);
7492 
7493 	/* enable transmits */
7494 	netif_tx_wake_all_queues(adapter->netdev);
7495 
7496 	/* update the default user priority for VFs */
7497 	ixgbe_update_default_up(adapter);
7498 
7499 	/* ping all the active vfs to let them know link has changed */
7500 	ixgbe_ping_all_vfs(adapter);
7501 }
7502 
7503 /**
7504  * ixgbe_watchdog_link_is_down - update netif_carrier status and
7505  *                               print link down message
7506  * @adapter: pointer to the adapter structure
7507  **/
7508 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
7509 {
7510 	struct net_device *netdev = adapter->netdev;
7511 	struct ixgbe_hw *hw = &adapter->hw;
7512 
7513 	adapter->link_up = false;
7514 	adapter->link_speed = 0;
7515 
7516 	/* only continue if link was up previously */
7517 	if (!netif_carrier_ok(netdev))
7518 		return;
7519 
7520 	/* poll for SFP+ cable when link is down */
7521 	if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
7522 		adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
7523 
7524 	if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7525 		ixgbe_ptp_start_cyclecounter(adapter);
7526 
7527 	e_info(drv, "NIC Link is Down\n");
7528 	netif_carrier_off(netdev);
7529 
7530 	/* ping all the active vfs to let them know link has changed */
7531 	ixgbe_ping_all_vfs(adapter);
7532 }
7533 
7534 static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
7535 {
7536 	int i;
7537 
7538 	for (i = 0; i < adapter->num_tx_queues; i++) {
7539 		struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
7540 
7541 		if (tx_ring->next_to_use != tx_ring->next_to_clean)
7542 			return true;
7543 	}
7544 
7545 	for (i = 0; i < adapter->num_xdp_queues; i++) {
7546 		struct ixgbe_ring *ring = adapter->xdp_ring[i];
7547 
7548 		if (ring->next_to_use != ring->next_to_clean)
7549 			return true;
7550 	}
7551 
7552 	return false;
7553 }
7554 
7555 static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
7556 {
7557 	struct ixgbe_hw *hw = &adapter->hw;
7558 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
7559 	u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
7560 
7561 	int i, j;
7562 
7563 	if (!adapter->num_vfs)
7564 		return false;
7565 
7566 	/* resetting the PF is only needed for MAC before X550 */
7567 	if (hw->mac.type >= ixgbe_mac_X550)
7568 		return false;
7569 
7570 	for (i = 0; i < adapter->num_vfs; i++) {
7571 		for (j = 0; j < q_per_pool; j++) {
7572 			u32 h, t;
7573 
7574 			h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
7575 			t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
7576 
7577 			if (h != t)
7578 				return true;
7579 		}
7580 	}
7581 
7582 	return false;
7583 }
7584 
7585 /**
7586  * ixgbe_watchdog_flush_tx - flush queues on link down
7587  * @adapter: pointer to the device adapter structure
7588  **/
7589 static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
7590 {
7591 	if (!netif_carrier_ok(adapter->netdev)) {
7592 		if (ixgbe_ring_tx_pending(adapter) ||
7593 		    ixgbe_vf_tx_pending(adapter)) {
7594 			/* We've lost link, so the controller stops DMA,
7595 			 * but we've got queued Tx work that's never going
7596 			 * to get done, so reset controller to flush Tx.
7597 			 * (Do the reset outside of interrupt context).
7598 			 */
7599 			e_warn(drv, "initiating reset to clear Tx work after link loss\n");
7600 			set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
7601 		}
7602 	}
7603 }
7604 
7605 #ifdef CONFIG_PCI_IOV
7606 static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
7607 {
7608 	struct ixgbe_hw *hw = &adapter->hw;
7609 	struct pci_dev *pdev = adapter->pdev;
7610 	unsigned int vf;
7611 	u32 gpc;
7612 
7613 	if (!(netif_carrier_ok(adapter->netdev)))
7614 		return;
7615 
7616 	gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
7617 	if (gpc) /* If incrementing then no need for the check below */
7618 		return;
7619 	/* Check to see if a bad DMA write target from an errant or
7620 	 * malicious VF has caused a PCIe error.  If so then we can
7621 	 * issue a VFLR to the offending VF(s) and then resume without
7622 	 * requesting a full slot reset.
7623 	 */
7624 
7625 	if (!pdev)
7626 		return;
7627 
7628 	/* check status reg for all VFs owned by this PF */
7629 	for (vf = 0; vf < adapter->num_vfs; ++vf) {
7630 		struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
7631 		u16 status_reg;
7632 
7633 		if (!vfdev)
7634 			continue;
7635 		pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
7636 		if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
7637 		    status_reg & PCI_STATUS_REC_MASTER_ABORT)
7638 			pcie_flr(vfdev);
7639 	}
7640 }
7641 
7642 static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
7643 {
7644 	u32 ssvpc;
7645 
7646 	/* Do not perform spoof check for 82598 or if not in IOV mode */
7647 	if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
7648 	    adapter->num_vfs == 0)
7649 		return;
7650 
7651 	ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
7652 
7653 	/*
7654 	 * ssvpc register is cleared on read, if zero then no
7655 	 * spoofed packets in the last interval.
7656 	 */
7657 	if (!ssvpc)
7658 		return;
7659 
7660 	e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
7661 }
7662 #else
7663 static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
7664 {
7665 }
7666 
7667 static void
7668 ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
7669 {
7670 }
7671 #endif /* CONFIG_PCI_IOV */
7672 
7673 
7674 /**
7675  * ixgbe_watchdog_subtask - check and bring link up
7676  * @adapter: pointer to the device adapter structure
7677  **/
7678 static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
7679 {
7680 	/* if interface is down, removing or resetting, do nothing */
7681 	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7682 	    test_bit(__IXGBE_REMOVING, &adapter->state) ||
7683 	    test_bit(__IXGBE_RESETTING, &adapter->state))
7684 		return;
7685 
7686 	ixgbe_watchdog_update_link(adapter);
7687 
7688 	if (adapter->link_up)
7689 		ixgbe_watchdog_link_is_up(adapter);
7690 	else
7691 		ixgbe_watchdog_link_is_down(adapter);
7692 
7693 	ixgbe_check_for_bad_vf(adapter);
7694 	ixgbe_spoof_check(adapter);
7695 	ixgbe_update_stats(adapter);
7696 
7697 	ixgbe_watchdog_flush_tx(adapter);
7698 }
7699 
7700 /**
7701  * ixgbe_sfp_detection_subtask - poll for SFP+ cable
7702  * @adapter: the ixgbe adapter structure
7703  **/
7704 static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
7705 {
7706 	struct ixgbe_hw *hw = &adapter->hw;
7707 	s32 err;
7708 
7709 	/* not searching for SFP so there is nothing to do here */
7710 	if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
7711 	    !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7712 		return;
7713 
7714 	if (adapter->sfp_poll_time &&
7715 	    time_after(adapter->sfp_poll_time, jiffies))
7716 		return; /* If not yet time to poll for SFP */
7717 
7718 	/* someone else is in init, wait until next service event */
7719 	if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7720 		return;
7721 
7722 	adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
7723 
7724 	err = hw->phy.ops.identify_sfp(hw);
7725 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7726 		goto sfp_out;
7727 
7728 	if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
7729 		/* If no cable is present, then we need to reset
7730 		 * the next time we find a good cable. */
7731 		adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
7732 	}
7733 
7734 	/* exit on error */
7735 	if (err)
7736 		goto sfp_out;
7737 
7738 	/* exit if reset not needed */
7739 	if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7740 		goto sfp_out;
7741 
7742 	adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
7743 
7744 	/*
7745 	 * A module may be identified correctly, but the EEPROM may not have
7746 	 * support for that module.  setup_sfp() will fail in that case, so
7747 	 * we should not allow that module to load.
7748 	 */
7749 	if (hw->mac.type == ixgbe_mac_82598EB)
7750 		err = hw->phy.ops.reset(hw);
7751 	else
7752 		err = hw->mac.ops.setup_sfp(hw);
7753 
7754 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7755 		goto sfp_out;
7756 
7757 	adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
7758 	e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
7759 
7760 sfp_out:
7761 	clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7762 
7763 	if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
7764 	    (adapter->netdev->reg_state == NETREG_REGISTERED)) {
7765 		e_dev_err("failed to initialize because an unsupported "
7766 			  "SFP+ module type was detected.\n");
7767 		e_dev_err("Reload the driver after installing a "
7768 			  "supported module.\n");
7769 		unregister_netdev(adapter->netdev);
7770 	}
7771 }
7772 
7773 /**
7774  * ixgbe_sfp_link_config_subtask - set up link SFP after module install
7775  * @adapter: the ixgbe adapter structure
7776  **/
7777 static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
7778 {
7779 	struct ixgbe_hw *hw = &adapter->hw;
7780 	u32 cap_speed;
7781 	u32 speed;
7782 	bool autoneg = false;
7783 
7784 	if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
7785 		return;
7786 
7787 	/* someone else is in init, wait until next service event */
7788 	if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7789 		return;
7790 
7791 	adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
7792 
7793 	hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg);
7794 
7795 	/* advertise highest capable link speed */
7796 	if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL))
7797 		speed = IXGBE_LINK_SPEED_10GB_FULL;
7798 	else
7799 		speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL |
7800 				     IXGBE_LINK_SPEED_1GB_FULL);
7801 
7802 	if (hw->mac.ops.setup_link)
7803 		hw->mac.ops.setup_link(hw, speed, true);
7804 
7805 	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
7806 	adapter->link_check_timeout = jiffies;
7807 	clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7808 }
7809 
7810 /**
7811  * ixgbe_service_timer - Timer Call-back
7812  * @t: pointer to timer_list structure
7813  **/
7814 static void ixgbe_service_timer(struct timer_list *t)
7815 {
7816 	struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer);
7817 	unsigned long next_event_offset;
7818 
7819 	/* poll faster when waiting for link */
7820 	if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
7821 		next_event_offset = HZ / 10;
7822 	else
7823 		next_event_offset = HZ * 2;
7824 
7825 	/* Reset the timer */
7826 	mod_timer(&adapter->service_timer, next_event_offset + jiffies);
7827 
7828 	ixgbe_service_event_schedule(adapter);
7829 }
7830 
7831 static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
7832 {
7833 	struct ixgbe_hw *hw = &adapter->hw;
7834 	u32 status;
7835 
7836 	if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
7837 		return;
7838 
7839 	adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
7840 
7841 	if (!hw->phy.ops.handle_lasi)
7842 		return;
7843 
7844 	status = hw->phy.ops.handle_lasi(&adapter->hw);
7845 	if (status != IXGBE_ERR_OVERTEMP)
7846 		return;
7847 
7848 	e_crit(drv, "%s\n", ixgbe_overheat_msg);
7849 }
7850 
7851 static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
7852 {
7853 	if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state))
7854 		return;
7855 
7856 	rtnl_lock();
7857 	/* If we're already down, removing or resetting, just bail */
7858 	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7859 	    test_bit(__IXGBE_REMOVING, &adapter->state) ||
7860 	    test_bit(__IXGBE_RESETTING, &adapter->state)) {
7861 		rtnl_unlock();
7862 		return;
7863 	}
7864 
7865 	ixgbe_dump(adapter);
7866 	netdev_err(adapter->netdev, "Reset adapter\n");
7867 	adapter->tx_timeout_count++;
7868 
7869 	ixgbe_reinit_locked(adapter);
7870 	rtnl_unlock();
7871 }
7872 
7873 /**
7874  * ixgbe_check_fw_error - Check firmware for errors
7875  * @adapter: the adapter private structure
7876  *
7877  * Check firmware errors in register FWSM
7878  */
7879 static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
7880 {
7881 	struct ixgbe_hw *hw = &adapter->hw;
7882 	u32 fwsm;
7883 
7884 	/* read fwsm.ext_err_ind register and log errors */
7885 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
7886 
7887 	if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK ||
7888 	    !(fwsm & IXGBE_FWSM_FW_VAL_BIT))
7889 		e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n",
7890 			   fwsm);
7891 
7892 	if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
7893 		e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
7894 		return true;
7895 	}
7896 
7897 	return false;
7898 }
7899 
7900 /**
7901  * ixgbe_service_task - manages and runs subtasks
7902  * @work: pointer to work_struct containing our data
7903  **/
7904 static void ixgbe_service_task(struct work_struct *work)
7905 {
7906 	struct ixgbe_adapter *adapter = container_of(work,
7907 						     struct ixgbe_adapter,
7908 						     service_task);
7909 	if (ixgbe_removed(adapter->hw.hw_addr)) {
7910 		if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
7911 			rtnl_lock();
7912 			ixgbe_down(adapter);
7913 			rtnl_unlock();
7914 		}
7915 		ixgbe_service_event_complete(adapter);
7916 		return;
7917 	}
7918 	if (ixgbe_check_fw_error(adapter)) {
7919 		if (!test_bit(__IXGBE_DOWN, &adapter->state))
7920 			unregister_netdev(adapter->netdev);
7921 		ixgbe_service_event_complete(adapter);
7922 		return;
7923 	}
7924 	ixgbe_reset_subtask(adapter);
7925 	ixgbe_phy_interrupt_subtask(adapter);
7926 	ixgbe_sfp_detection_subtask(adapter);
7927 	ixgbe_sfp_link_config_subtask(adapter);
7928 	ixgbe_check_overtemp_subtask(adapter);
7929 	ixgbe_watchdog_subtask(adapter);
7930 	ixgbe_fdir_reinit_subtask(adapter);
7931 	ixgbe_check_hang_subtask(adapter);
7932 
7933 	if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
7934 		ixgbe_ptp_overflow_check(adapter);
7935 		if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)
7936 			ixgbe_ptp_rx_hang(adapter);
7937 		ixgbe_ptp_tx_hang(adapter);
7938 	}
7939 
7940 	ixgbe_service_event_complete(adapter);
7941 }
7942 
7943 static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7944 		     struct ixgbe_tx_buffer *first,
7945 		     u8 *hdr_len,
7946 		     struct ixgbe_ipsec_tx_data *itd)
7947 {
7948 	u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
7949 	struct sk_buff *skb = first->skb;
7950 	union {
7951 		struct iphdr *v4;
7952 		struct ipv6hdr *v6;
7953 		unsigned char *hdr;
7954 	} ip;
7955 	union {
7956 		struct tcphdr *tcp;
7957 		struct udphdr *udp;
7958 		unsigned char *hdr;
7959 	} l4;
7960 	u32 paylen, l4_offset;
7961 	u32 fceof_saidx = 0;
7962 	int err;
7963 
7964 	if (skb->ip_summed != CHECKSUM_PARTIAL)
7965 		return 0;
7966 
7967 	if (!skb_is_gso(skb))
7968 		return 0;
7969 
7970 	err = skb_cow_head(skb, 0);
7971 	if (err < 0)
7972 		return err;
7973 
7974 	if (eth_p_mpls(first->protocol))
7975 		ip.hdr = skb_inner_network_header(skb);
7976 	else
7977 		ip.hdr = skb_network_header(skb);
7978 	l4.hdr = skb_checksum_start(skb);
7979 
7980 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
7981 	type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
7982 		      IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP;
7983 
7984 	/* initialize outer IP header fields */
7985 	if (ip.v4->version == 4) {
7986 		unsigned char *csum_start = skb_checksum_start(skb);
7987 		unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
7988 		int len = csum_start - trans_start;
7989 
7990 		/* IP header will have to cancel out any data that
7991 		 * is not a part of the outer IP header, so set to
7992 		 * a reverse csum if needed, else init check to 0.
7993 		 */
7994 		ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
7995 					   csum_fold(csum_partial(trans_start,
7996 								  len, 0)) : 0;
7997 		type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7998 
7999 		ip.v4->tot_len = 0;
8000 		first->tx_flags |= IXGBE_TX_FLAGS_TSO |
8001 				   IXGBE_TX_FLAGS_CSUM |
8002 				   IXGBE_TX_FLAGS_IPV4;
8003 	} else {
8004 		ip.v6->payload_len = 0;
8005 		first->tx_flags |= IXGBE_TX_FLAGS_TSO |
8006 				   IXGBE_TX_FLAGS_CSUM;
8007 	}
8008 
8009 	/* determine offset of inner transport header */
8010 	l4_offset = l4.hdr - skb->data;
8011 
8012 	/* remove payload length from inner checksum */
8013 	paylen = skb->len - l4_offset;
8014 
8015 	if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) {
8016 		/* compute length of segmentation header */
8017 		*hdr_len = (l4.tcp->doff * 4) + l4_offset;
8018 		csum_replace_by_diff(&l4.tcp->check,
8019 				     (__force __wsum)htonl(paylen));
8020 	} else {
8021 		/* compute length of segmentation header */
8022 		*hdr_len = sizeof(*l4.udp) + l4_offset;
8023 		csum_replace_by_diff(&l4.udp->check,
8024 				     (__force __wsum)htonl(paylen));
8025 	}
8026 
8027 	/* update gso size and bytecount with header size */
8028 	first->gso_segs = skb_shinfo(skb)->gso_segs;
8029 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
8030 
8031 	/* mss_l4len_id: use 0 as index for TSO */
8032 	mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
8033 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
8034 
8035 	fceof_saidx |= itd->sa_idx;
8036 	type_tucmd |= itd->flags | itd->trailer_len;
8037 
8038 	/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
8039 	vlan_macip_lens = l4.hdr - ip.hdr;
8040 	vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
8041 	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
8042 
8043 	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
8044 			  mss_l4len_idx);
8045 
8046 	return 1;
8047 }
8048 
8049 static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
8050 			  struct ixgbe_tx_buffer *first,
8051 			  struct ixgbe_ipsec_tx_data *itd)
8052 {
8053 	struct sk_buff *skb = first->skb;
8054 	u32 vlan_macip_lens = 0;
8055 	u32 fceof_saidx = 0;
8056 	u32 type_tucmd = 0;
8057 
8058 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
8059 csum_failed:
8060 		if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
8061 					 IXGBE_TX_FLAGS_CC)))
8062 			return;
8063 		goto no_csum;
8064 	}
8065 
8066 	switch (skb->csum_offset) {
8067 	case offsetof(struct tcphdr, check):
8068 		type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
8069 		fallthrough;
8070 	case offsetof(struct udphdr, check):
8071 		break;
8072 	case offsetof(struct sctphdr, checksum):
8073 		/* validate that this is actually an SCTP request */
8074 		if (skb_csum_is_sctp(skb)) {
8075 			type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
8076 			break;
8077 		}
8078 		fallthrough;
8079 	default:
8080 		skb_checksum_help(skb);
8081 		goto csum_failed;
8082 	}
8083 
8084 	/* update TX checksum flag */
8085 	first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
8086 	vlan_macip_lens = skb_checksum_start_offset(skb) -
8087 			  skb_network_offset(skb);
8088 no_csum:
8089 	/* vlan_macip_lens: MACLEN, VLAN tag */
8090 	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
8091 	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
8092 
8093 	fceof_saidx |= itd->sa_idx;
8094 	type_tucmd |= itd->flags | itd->trailer_len;
8095 
8096 	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
8097 }
8098 
8099 #define IXGBE_SET_FLAG(_input, _flag, _result) \
8100 	((_flag <= _result) ? \
8101 	 ((u32)(_input & _flag) * (_result / _flag)) : \
8102 	 ((u32)(_input & _flag) / (_flag / _result)))
8103 
8104 static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
8105 {
8106 	/* set type for advanced descriptor with frame checksum insertion */
8107 	u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
8108 		       IXGBE_ADVTXD_DCMD_DEXT |
8109 		       IXGBE_ADVTXD_DCMD_IFCS;
8110 
8111 	/* set HW vlan bit if vlan is present */
8112 	cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
8113 				   IXGBE_ADVTXD_DCMD_VLE);
8114 
8115 	/* set segmentation enable bits for TSO/FSO */
8116 	cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
8117 				   IXGBE_ADVTXD_DCMD_TSE);
8118 
8119 	/* set timestamp bit if present */
8120 	cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
8121 				   IXGBE_ADVTXD_MAC_TSTAMP);
8122 
8123 	/* insert frame checksum */
8124 	cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
8125 
8126 	return cmd_type;
8127 }
8128 
8129 static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
8130 				   u32 tx_flags, unsigned int paylen)
8131 {
8132 	u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
8133 
8134 	/* enable L4 checksum for TSO and TX checksum offload */
8135 	olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8136 					IXGBE_TX_FLAGS_CSUM,
8137 					IXGBE_ADVTXD_POPTS_TXSM);
8138 
8139 	/* enable IPv4 checksum for TSO */
8140 	olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8141 					IXGBE_TX_FLAGS_IPV4,
8142 					IXGBE_ADVTXD_POPTS_IXSM);
8143 
8144 	/* enable IPsec */
8145 	olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8146 					IXGBE_TX_FLAGS_IPSEC,
8147 					IXGBE_ADVTXD_POPTS_IPSEC);
8148 
8149 	/*
8150 	 * Check Context must be set if Tx switch is enabled, which it
8151 	 * always is for case where virtual functions are running
8152 	 */
8153 	olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8154 					IXGBE_TX_FLAGS_CC,
8155 					IXGBE_ADVTXD_CC);
8156 
8157 	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
8158 }
8159 
8160 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
8161 {
8162 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
8163 
8164 	/* Herbert's original patch had:
8165 	 *  smp_mb__after_netif_stop_queue();
8166 	 * but since that doesn't exist yet, just open code it.
8167 	 */
8168 	smp_mb();
8169 
8170 	/* We need to check again in a case another CPU has just
8171 	 * made room available.
8172 	 */
8173 	if (likely(ixgbe_desc_unused(tx_ring) < size))
8174 		return -EBUSY;
8175 
8176 	/* A reprieve! - use start_queue because it doesn't call schedule */
8177 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
8178 	++tx_ring->tx_stats.restart_queue;
8179 	return 0;
8180 }
8181 
8182 static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
8183 {
8184 	if (likely(ixgbe_desc_unused(tx_ring) >= size))
8185 		return 0;
8186 
8187 	return __ixgbe_maybe_stop_tx(tx_ring, size);
8188 }
8189 
8190 static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
8191 			struct ixgbe_tx_buffer *first,
8192 			const u8 hdr_len)
8193 {
8194 	struct sk_buff *skb = first->skb;
8195 	struct ixgbe_tx_buffer *tx_buffer;
8196 	union ixgbe_adv_tx_desc *tx_desc;
8197 	skb_frag_t *frag;
8198 	dma_addr_t dma;
8199 	unsigned int data_len, size;
8200 	u32 tx_flags = first->tx_flags;
8201 	u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
8202 	u16 i = tx_ring->next_to_use;
8203 
8204 	tx_desc = IXGBE_TX_DESC(tx_ring, i);
8205 
8206 	ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
8207 
8208 	size = skb_headlen(skb);
8209 	data_len = skb->data_len;
8210 
8211 #ifdef IXGBE_FCOE
8212 	if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
8213 		if (data_len < sizeof(struct fcoe_crc_eof)) {
8214 			size -= sizeof(struct fcoe_crc_eof) - data_len;
8215 			data_len = 0;
8216 		} else {
8217 			data_len -= sizeof(struct fcoe_crc_eof);
8218 		}
8219 	}
8220 
8221 #endif
8222 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
8223 
8224 	tx_buffer = first;
8225 
8226 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
8227 		if (dma_mapping_error(tx_ring->dev, dma))
8228 			goto dma_error;
8229 
8230 		/* record length, and DMA address */
8231 		dma_unmap_len_set(tx_buffer, len, size);
8232 		dma_unmap_addr_set(tx_buffer, dma, dma);
8233 
8234 		tx_desc->read.buffer_addr = cpu_to_le64(dma);
8235 
8236 		while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
8237 			tx_desc->read.cmd_type_len =
8238 				cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
8239 
8240 			i++;
8241 			tx_desc++;
8242 			if (i == tx_ring->count) {
8243 				tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8244 				i = 0;
8245 			}
8246 			tx_desc->read.olinfo_status = 0;
8247 
8248 			dma += IXGBE_MAX_DATA_PER_TXD;
8249 			size -= IXGBE_MAX_DATA_PER_TXD;
8250 
8251 			tx_desc->read.buffer_addr = cpu_to_le64(dma);
8252 		}
8253 
8254 		if (likely(!data_len))
8255 			break;
8256 
8257 		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
8258 
8259 		i++;
8260 		tx_desc++;
8261 		if (i == tx_ring->count) {
8262 			tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8263 			i = 0;
8264 		}
8265 		tx_desc->read.olinfo_status = 0;
8266 
8267 #ifdef IXGBE_FCOE
8268 		size = min_t(unsigned int, data_len, skb_frag_size(frag));
8269 #else
8270 		size = skb_frag_size(frag);
8271 #endif
8272 		data_len -= size;
8273 
8274 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
8275 				       DMA_TO_DEVICE);
8276 
8277 		tx_buffer = &tx_ring->tx_buffer_info[i];
8278 	}
8279 
8280 	/* write last descriptor with RS and EOP bits */
8281 	cmd_type |= size | IXGBE_TXD_CMD;
8282 	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8283 
8284 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
8285 
8286 	/* set the timestamp */
8287 	first->time_stamp = jiffies;
8288 
8289 	skb_tx_timestamp(skb);
8290 
8291 	/*
8292 	 * Force memory writes to complete before letting h/w know there
8293 	 * are new descriptors to fetch.  (Only applicable for weak-ordered
8294 	 * memory model archs, such as IA-64).
8295 	 *
8296 	 * We also need this memory barrier to make certain all of the
8297 	 * status bits have been updated before next_to_watch is written.
8298 	 */
8299 	wmb();
8300 
8301 	/* set next_to_watch value indicating a packet is present */
8302 	first->next_to_watch = tx_desc;
8303 
8304 	i++;
8305 	if (i == tx_ring->count)
8306 		i = 0;
8307 
8308 	tx_ring->next_to_use = i;
8309 
8310 	ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
8311 
8312 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
8313 		writel(i, tx_ring->tail);
8314 	}
8315 
8316 	return 0;
8317 dma_error:
8318 	dev_err(tx_ring->dev, "TX DMA map failed\n");
8319 
8320 	/* clear dma mappings for failed tx_buffer_info map */
8321 	for (;;) {
8322 		tx_buffer = &tx_ring->tx_buffer_info[i];
8323 		if (dma_unmap_len(tx_buffer, len))
8324 			dma_unmap_page(tx_ring->dev,
8325 				       dma_unmap_addr(tx_buffer, dma),
8326 				       dma_unmap_len(tx_buffer, len),
8327 				       DMA_TO_DEVICE);
8328 		dma_unmap_len_set(tx_buffer, len, 0);
8329 		if (tx_buffer == first)
8330 			break;
8331 		if (i == 0)
8332 			i += tx_ring->count;
8333 		i--;
8334 	}
8335 
8336 	dev_kfree_skb_any(first->skb);
8337 	first->skb = NULL;
8338 
8339 	tx_ring->next_to_use = i;
8340 
8341 	return -1;
8342 }
8343 
8344 static void ixgbe_atr(struct ixgbe_ring *ring,
8345 		      struct ixgbe_tx_buffer *first)
8346 {
8347 	struct ixgbe_q_vector *q_vector = ring->q_vector;
8348 	union ixgbe_atr_hash_dword input = { .dword = 0 };
8349 	union ixgbe_atr_hash_dword common = { .dword = 0 };
8350 	union {
8351 		unsigned char *network;
8352 		struct iphdr *ipv4;
8353 		struct ipv6hdr *ipv6;
8354 	} hdr;
8355 	struct tcphdr *th;
8356 	unsigned int hlen;
8357 	struct sk_buff *skb;
8358 	__be16 vlan_id;
8359 	int l4_proto;
8360 
8361 	/* if ring doesn't have a interrupt vector, cannot perform ATR */
8362 	if (!q_vector)
8363 		return;
8364 
8365 	/* do nothing if sampling is disabled */
8366 	if (!ring->atr_sample_rate)
8367 		return;
8368 
8369 	ring->atr_count++;
8370 
8371 	/* currently only IPv4/IPv6 with TCP is supported */
8372 	if ((first->protocol != htons(ETH_P_IP)) &&
8373 	    (first->protocol != htons(ETH_P_IPV6)))
8374 		return;
8375 
8376 	/* snag network header to get L4 type and address */
8377 	skb = first->skb;
8378 	hdr.network = skb_network_header(skb);
8379 	if (unlikely(hdr.network <= skb->data))
8380 		return;
8381 	if (skb->encapsulation &&
8382 	    first->protocol == htons(ETH_P_IP) &&
8383 	    hdr.ipv4->protocol == IPPROTO_UDP) {
8384 		struct ixgbe_adapter *adapter = q_vector->adapter;
8385 
8386 		if (unlikely(skb_tail_pointer(skb) < hdr.network +
8387 			     VXLAN_HEADROOM))
8388 			return;
8389 
8390 		/* verify the port is recognized as VXLAN */
8391 		if (adapter->vxlan_port &&
8392 		    udp_hdr(skb)->dest == adapter->vxlan_port)
8393 			hdr.network = skb_inner_network_header(skb);
8394 
8395 		if (adapter->geneve_port &&
8396 		    udp_hdr(skb)->dest == adapter->geneve_port)
8397 			hdr.network = skb_inner_network_header(skb);
8398 	}
8399 
8400 	/* Make sure we have at least [minimum IPv4 header + TCP]
8401 	 * or [IPv6 header] bytes
8402 	 */
8403 	if (unlikely(skb_tail_pointer(skb) < hdr.network + 40))
8404 		return;
8405 
8406 	/* Currently only IPv4/IPv6 with TCP is supported */
8407 	switch (hdr.ipv4->version) {
8408 	case IPVERSION:
8409 		/* access ihl as u8 to avoid unaligned access on ia64 */
8410 		hlen = (hdr.network[0] & 0x0F) << 2;
8411 		l4_proto = hdr.ipv4->protocol;
8412 		break;
8413 	case 6:
8414 		hlen = hdr.network - skb->data;
8415 		l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
8416 		hlen -= hdr.network - skb->data;
8417 		break;
8418 	default:
8419 		return;
8420 	}
8421 
8422 	if (l4_proto != IPPROTO_TCP)
8423 		return;
8424 
8425 	if (unlikely(skb_tail_pointer(skb) < hdr.network +
8426 		     hlen + sizeof(struct tcphdr)))
8427 		return;
8428 
8429 	th = (struct tcphdr *)(hdr.network + hlen);
8430 
8431 	/* skip this packet since the socket is closing */
8432 	if (th->fin)
8433 		return;
8434 
8435 	/* sample on all syn packets or once every atr sample count */
8436 	if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
8437 		return;
8438 
8439 	/* reset sample count */
8440 	ring->atr_count = 0;
8441 
8442 	vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
8443 
8444 	/*
8445 	 * src and dst are inverted, think how the receiver sees them
8446 	 *
8447 	 * The input is broken into two sections, a non-compressed section
8448 	 * containing vm_pool, vlan_id, and flow_type.  The rest of the data
8449 	 * is XORed together and stored in the compressed dword.
8450 	 */
8451 	input.formatted.vlan_id = vlan_id;
8452 
8453 	/*
8454 	 * since src port and flex bytes occupy the same word XOR them together
8455 	 * and write the value to source port portion of compressed dword
8456 	 */
8457 	if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
8458 		common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
8459 	else
8460 		common.port.src ^= th->dest ^ first->protocol;
8461 	common.port.dst ^= th->source;
8462 
8463 	switch (hdr.ipv4->version) {
8464 	case IPVERSION:
8465 		input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
8466 		common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
8467 		break;
8468 	case 6:
8469 		input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
8470 		common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
8471 			     hdr.ipv6->saddr.s6_addr32[1] ^
8472 			     hdr.ipv6->saddr.s6_addr32[2] ^
8473 			     hdr.ipv6->saddr.s6_addr32[3] ^
8474 			     hdr.ipv6->daddr.s6_addr32[0] ^
8475 			     hdr.ipv6->daddr.s6_addr32[1] ^
8476 			     hdr.ipv6->daddr.s6_addr32[2] ^
8477 			     hdr.ipv6->daddr.s6_addr32[3];
8478 		break;
8479 	default:
8480 		break;
8481 	}
8482 
8483 	if (hdr.network != skb_network_header(skb))
8484 		input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
8485 
8486 	/* This assumes the Rx queue and Tx queue are bound to the same CPU */
8487 	ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
8488 					      input, common, ring->queue_index);
8489 }
8490 
8491 #ifdef IXGBE_FCOE
8492 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
8493 			      struct net_device *sb_dev)
8494 {
8495 	struct ixgbe_adapter *adapter;
8496 	struct ixgbe_ring_feature *f;
8497 	int txq;
8498 
8499 	if (sb_dev) {
8500 		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
8501 		struct net_device *vdev = sb_dev;
8502 
8503 		txq = vdev->tc_to_txq[tc].offset;
8504 		txq += reciprocal_scale(skb_get_hash(skb),
8505 					vdev->tc_to_txq[tc].count);
8506 
8507 		return txq;
8508 	}
8509 
8510 	/*
8511 	 * only execute the code below if protocol is FCoE
8512 	 * or FIP and we have FCoE enabled on the adapter
8513 	 */
8514 	switch (vlan_get_protocol(skb)) {
8515 	case htons(ETH_P_FCOE):
8516 	case htons(ETH_P_FIP):
8517 		adapter = netdev_priv(dev);
8518 
8519 		if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
8520 			break;
8521 		fallthrough;
8522 	default:
8523 		return netdev_pick_tx(dev, skb, sb_dev);
8524 	}
8525 
8526 	f = &adapter->ring_feature[RING_F_FCOE];
8527 
8528 	txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
8529 					   smp_processor_id();
8530 
8531 	while (txq >= f->indices)
8532 		txq -= f->indices;
8533 
8534 	return txq + f->offset;
8535 }
8536 
8537 #endif
8538 int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
8539 			struct xdp_frame *xdpf)
8540 {
8541 	struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
8542 	struct ixgbe_tx_buffer *tx_buffer;
8543 	union ixgbe_adv_tx_desc *tx_desc;
8544 	u32 len, cmd_type;
8545 	dma_addr_t dma;
8546 	u16 i;
8547 
8548 	len = xdpf->len;
8549 
8550 	if (unlikely(!ixgbe_desc_unused(ring)))
8551 		return IXGBE_XDP_CONSUMED;
8552 
8553 	dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE);
8554 	if (dma_mapping_error(ring->dev, dma))
8555 		return IXGBE_XDP_CONSUMED;
8556 
8557 	/* record the location of the first descriptor for this packet */
8558 	tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
8559 	tx_buffer->bytecount = len;
8560 	tx_buffer->gso_segs = 1;
8561 	tx_buffer->protocol = 0;
8562 
8563 	i = ring->next_to_use;
8564 	tx_desc = IXGBE_TX_DESC(ring, i);
8565 
8566 	dma_unmap_len_set(tx_buffer, len, len);
8567 	dma_unmap_addr_set(tx_buffer, dma, dma);
8568 	tx_buffer->xdpf = xdpf;
8569 
8570 	tx_desc->read.buffer_addr = cpu_to_le64(dma);
8571 
8572 	/* put descriptor type bits */
8573 	cmd_type = IXGBE_ADVTXD_DTYP_DATA |
8574 		   IXGBE_ADVTXD_DCMD_DEXT |
8575 		   IXGBE_ADVTXD_DCMD_IFCS;
8576 	cmd_type |= len | IXGBE_TXD_CMD;
8577 	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8578 	tx_desc->read.olinfo_status =
8579 		cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
8580 
8581 	/* Avoid any potential race with xdp_xmit and cleanup */
8582 	smp_wmb();
8583 
8584 	/* set next_to_watch value indicating a packet is present */
8585 	i++;
8586 	if (i == ring->count)
8587 		i = 0;
8588 
8589 	tx_buffer->next_to_watch = tx_desc;
8590 	ring->next_to_use = i;
8591 
8592 	return IXGBE_XDP_TX;
8593 }
8594 
8595 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
8596 			  struct ixgbe_adapter *adapter,
8597 			  struct ixgbe_ring *tx_ring)
8598 {
8599 	struct ixgbe_tx_buffer *first;
8600 	int tso;
8601 	u32 tx_flags = 0;
8602 	unsigned short f;
8603 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
8604 	struct ixgbe_ipsec_tx_data ipsec_tx = { 0 };
8605 	__be16 protocol = skb->protocol;
8606 	u8 hdr_len = 0;
8607 
8608 	/*
8609 	 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
8610 	 *       + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
8611 	 *       + 2 desc gap to keep tail from touching head,
8612 	 *       + 1 desc for context descriptor,
8613 	 * otherwise try next time
8614 	 */
8615 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
8616 		count += TXD_USE_COUNT(skb_frag_size(
8617 						&skb_shinfo(skb)->frags[f]));
8618 
8619 	if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
8620 		tx_ring->tx_stats.tx_busy++;
8621 		return NETDEV_TX_BUSY;
8622 	}
8623 
8624 	/* record the location of the first descriptor for this packet */
8625 	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
8626 	first->skb = skb;
8627 	first->bytecount = skb->len;
8628 	first->gso_segs = 1;
8629 
8630 	/* if we have a HW VLAN tag being added default to the HW one */
8631 	if (skb_vlan_tag_present(skb)) {
8632 		tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
8633 		tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8634 	/* else if it is a SW VLAN check the next protocol and store the tag */
8635 	} else if (protocol == htons(ETH_P_8021Q)) {
8636 		struct vlan_hdr *vhdr, _vhdr;
8637 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
8638 		if (!vhdr)
8639 			goto out_drop;
8640 
8641 		tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
8642 				  IXGBE_TX_FLAGS_VLAN_SHIFT;
8643 		tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
8644 	}
8645 	protocol = vlan_get_protocol(skb);
8646 
8647 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
8648 	    adapter->ptp_clock) {
8649 		if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
8650 		    !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
8651 					   &adapter->state)) {
8652 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8653 			tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
8654 
8655 			/* schedule check for Tx timestamp */
8656 			adapter->ptp_tx_skb = skb_get(skb);
8657 			adapter->ptp_tx_start = jiffies;
8658 			schedule_work(&adapter->ptp_tx_work);
8659 		} else {
8660 			adapter->tx_hwtstamp_skipped++;
8661 		}
8662 	}
8663 
8664 #ifdef CONFIG_PCI_IOV
8665 	/*
8666 	 * Use the l2switch_enable flag - would be false if the DMA
8667 	 * Tx switch had been disabled.
8668 	 */
8669 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
8670 		tx_flags |= IXGBE_TX_FLAGS_CC;
8671 
8672 #endif
8673 	/* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */
8674 	if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
8675 	    ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
8676 	     (skb->priority != TC_PRIO_CONTROL))) {
8677 		tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
8678 		tx_flags |= (skb->priority & 0x7) <<
8679 					IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
8680 		if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
8681 			struct vlan_ethhdr *vhdr;
8682 
8683 			if (skb_cow_head(skb, 0))
8684 				goto out_drop;
8685 			vhdr = (struct vlan_ethhdr *)skb->data;
8686 			vhdr->h_vlan_TCI = htons(tx_flags >>
8687 						 IXGBE_TX_FLAGS_VLAN_SHIFT);
8688 		} else {
8689 			tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8690 		}
8691 	}
8692 
8693 	/* record initial flags and protocol */
8694 	first->tx_flags = tx_flags;
8695 	first->protocol = protocol;
8696 
8697 #ifdef IXGBE_FCOE
8698 	/* setup tx offload for FCoE */
8699 	if ((protocol == htons(ETH_P_FCOE)) &&
8700 	    (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
8701 		tso = ixgbe_fso(tx_ring, first, &hdr_len);
8702 		if (tso < 0)
8703 			goto out_drop;
8704 
8705 		goto xmit_fcoe;
8706 	}
8707 
8708 #endif /* IXGBE_FCOE */
8709 
8710 #ifdef CONFIG_IXGBE_IPSEC
8711 	if (xfrm_offload(skb) &&
8712 	    !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
8713 		goto out_drop;
8714 #endif
8715 	tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
8716 	if (tso < 0)
8717 		goto out_drop;
8718 	else if (!tso)
8719 		ixgbe_tx_csum(tx_ring, first, &ipsec_tx);
8720 
8721 	/* add the ATR filter if ATR is on */
8722 	if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
8723 		ixgbe_atr(tx_ring, first);
8724 
8725 #ifdef IXGBE_FCOE
8726 xmit_fcoe:
8727 #endif /* IXGBE_FCOE */
8728 	if (ixgbe_tx_map(tx_ring, first, hdr_len))
8729 		goto cleanup_tx_timestamp;
8730 
8731 	return NETDEV_TX_OK;
8732 
8733 out_drop:
8734 	dev_kfree_skb_any(first->skb);
8735 	first->skb = NULL;
8736 cleanup_tx_timestamp:
8737 	if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) {
8738 		dev_kfree_skb_any(adapter->ptp_tx_skb);
8739 		adapter->ptp_tx_skb = NULL;
8740 		cancel_work_sync(&adapter->ptp_tx_work);
8741 		clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
8742 	}
8743 
8744 	return NETDEV_TX_OK;
8745 }
8746 
8747 static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
8748 				      struct net_device *netdev,
8749 				      struct ixgbe_ring *ring)
8750 {
8751 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
8752 	struct ixgbe_ring *tx_ring;
8753 
8754 	/*
8755 	 * The minimum packet size for olinfo paylen is 17 so pad the skb
8756 	 * in order to meet this minimum size requirement.
8757 	 */
8758 	if (skb_put_padto(skb, 17))
8759 		return NETDEV_TX_OK;
8760 
8761 	tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)];
8762 	if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
8763 		return NETDEV_TX_BUSY;
8764 
8765 	return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
8766 }
8767 
8768 static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
8769 				    struct net_device *netdev)
8770 {
8771 	return __ixgbe_xmit_frame(skb, netdev, NULL);
8772 }
8773 
8774 /**
8775  * ixgbe_set_mac - Change the Ethernet Address of the NIC
8776  * @netdev: network interface device structure
8777  * @p: pointer to an address structure
8778  *
8779  * Returns 0 on success, negative on failure
8780  **/
8781 static int ixgbe_set_mac(struct net_device *netdev, void *p)
8782 {
8783 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
8784 	struct ixgbe_hw *hw = &adapter->hw;
8785 	struct sockaddr *addr = p;
8786 
8787 	if (!is_valid_ether_addr(addr->sa_data))
8788 		return -EADDRNOTAVAIL;
8789 
8790 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
8791 	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
8792 
8793 	ixgbe_mac_set_default_filter(adapter);
8794 
8795 	return 0;
8796 }
8797 
8798 static int
8799 ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
8800 {
8801 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
8802 	struct ixgbe_hw *hw = &adapter->hw;
8803 	u16 value;
8804 	int rc;
8805 
8806 	if (adapter->mii_bus) {
8807 		int regnum = addr;
8808 
8809 		if (devad != MDIO_DEVAD_NONE)
8810 			regnum |= (devad << 16) | MII_ADDR_C45;
8811 
8812 		return mdiobus_read(adapter->mii_bus, prtad, regnum);
8813 	}
8814 
8815 	if (prtad != hw->phy.mdio.prtad)
8816 		return -EINVAL;
8817 	rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
8818 	if (!rc)
8819 		rc = value;
8820 	return rc;
8821 }
8822 
8823 static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
8824 			    u16 addr, u16 value)
8825 {
8826 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
8827 	struct ixgbe_hw *hw = &adapter->hw;
8828 
8829 	if (adapter->mii_bus) {
8830 		int regnum = addr;
8831 
8832 		if (devad != MDIO_DEVAD_NONE)
8833 			regnum |= (devad << 16) | MII_ADDR_C45;
8834 
8835 		return mdiobus_write(adapter->mii_bus, prtad, regnum, value);
8836 	}
8837 
8838 	if (prtad != hw->phy.mdio.prtad)
8839 		return -EINVAL;
8840 	return hw->phy.ops.write_reg(hw, addr, devad, value);
8841 }
8842 
8843 static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
8844 {
8845 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
8846 
8847 	switch (cmd) {
8848 	case SIOCSHWTSTAMP:
8849 		return ixgbe_ptp_set_ts_config(adapter, req);
8850 	case SIOCGHWTSTAMP:
8851 		return ixgbe_ptp_get_ts_config(adapter, req);
8852 	case SIOCGMIIPHY:
8853 		if (!adapter->hw.phy.ops.read_reg)
8854 			return -EOPNOTSUPP;
8855 		fallthrough;
8856 	default:
8857 		return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
8858 	}
8859 }
8860 
8861 /**
8862  * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
8863  * netdev->dev_addrs
8864  * @dev: network interface device structure
8865  *
8866  * Returns non-zero on failure
8867  **/
8868 static int ixgbe_add_sanmac_netdev(struct net_device *dev)
8869 {
8870 	int err = 0;
8871 	struct ixgbe_adapter *adapter = netdev_priv(dev);
8872 	struct ixgbe_hw *hw = &adapter->hw;
8873 
8874 	if (is_valid_ether_addr(hw->mac.san_addr)) {
8875 		rtnl_lock();
8876 		err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
8877 		rtnl_unlock();
8878 
8879 		/* update SAN MAC vmdq pool selection */
8880 		hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
8881 	}
8882 	return err;
8883 }
8884 
8885 /**
8886  * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
8887  * netdev->dev_addrs
8888  * @dev: network interface device structure
8889  *
8890  * Returns non-zero on failure
8891  **/
8892 static int ixgbe_del_sanmac_netdev(struct net_device *dev)
8893 {
8894 	int err = 0;
8895 	struct ixgbe_adapter *adapter = netdev_priv(dev);
8896 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
8897 
8898 	if (is_valid_ether_addr(mac->san_addr)) {
8899 		rtnl_lock();
8900 		err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
8901 		rtnl_unlock();
8902 	}
8903 	return err;
8904 }
8905 
8906 static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
8907 				   struct ixgbe_ring *ring)
8908 {
8909 	u64 bytes, packets;
8910 	unsigned int start;
8911 
8912 	if (ring) {
8913 		do {
8914 			start = u64_stats_fetch_begin_irq(&ring->syncp);
8915 			packets = ring->stats.packets;
8916 			bytes   = ring->stats.bytes;
8917 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8918 		stats->tx_packets += packets;
8919 		stats->tx_bytes   += bytes;
8920 	}
8921 }
8922 
8923 static void ixgbe_get_stats64(struct net_device *netdev,
8924 			      struct rtnl_link_stats64 *stats)
8925 {
8926 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
8927 	int i;
8928 
8929 	rcu_read_lock();
8930 	for (i = 0; i < adapter->num_rx_queues; i++) {
8931 		struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
8932 		u64 bytes, packets;
8933 		unsigned int start;
8934 
8935 		if (ring) {
8936 			do {
8937 				start = u64_stats_fetch_begin_irq(&ring->syncp);
8938 				packets = ring->stats.packets;
8939 				bytes   = ring->stats.bytes;
8940 			} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8941 			stats->rx_packets += packets;
8942 			stats->rx_bytes   += bytes;
8943 		}
8944 	}
8945 
8946 	for (i = 0; i < adapter->num_tx_queues; i++) {
8947 		struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
8948 
8949 		ixgbe_get_ring_stats64(stats, ring);
8950 	}
8951 	for (i = 0; i < adapter->num_xdp_queues; i++) {
8952 		struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
8953 
8954 		ixgbe_get_ring_stats64(stats, ring);
8955 	}
8956 	rcu_read_unlock();
8957 
8958 	/* following stats updated by ixgbe_watchdog_task() */
8959 	stats->multicast	= netdev->stats.multicast;
8960 	stats->rx_errors	= netdev->stats.rx_errors;
8961 	stats->rx_length_errors	= netdev->stats.rx_length_errors;
8962 	stats->rx_crc_errors	= netdev->stats.rx_crc_errors;
8963 	stats->rx_missed_errors	= netdev->stats.rx_missed_errors;
8964 }
8965 
8966 #ifdef CONFIG_IXGBE_DCB
8967 /**
8968  * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
8969  * @adapter: pointer to ixgbe_adapter
8970  * @tc: number of traffic classes currently enabled
8971  *
8972  * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
8973  * 802.1Q priority maps to a packet buffer that exists.
8974  */
8975 static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
8976 {
8977 	struct ixgbe_hw *hw = &adapter->hw;
8978 	u32 reg, rsave;
8979 	int i;
8980 
8981 	/* 82598 have a static priority to TC mapping that can not
8982 	 * be changed so no validation is needed.
8983 	 */
8984 	if (hw->mac.type == ixgbe_mac_82598EB)
8985 		return;
8986 
8987 	reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
8988 	rsave = reg;
8989 
8990 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
8991 		u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
8992 
8993 		/* If up2tc is out of bounds default to zero */
8994 		if (up2tc > tc)
8995 			reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
8996 	}
8997 
8998 	if (reg != rsave)
8999 		IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
9000 
9001 	return;
9002 }
9003 
9004 /**
9005  * ixgbe_set_prio_tc_map - Configure netdev prio tc map
9006  * @adapter: Pointer to adapter struct
9007  *
9008  * Populate the netdev user priority to tc map
9009  */
9010 static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
9011 {
9012 	struct net_device *dev = adapter->netdev;
9013 	struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
9014 	struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
9015 	u8 prio;
9016 
9017 	for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
9018 		u8 tc = 0;
9019 
9020 		if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
9021 			tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
9022 		else if (ets)
9023 			tc = ets->prio_tc[prio];
9024 
9025 		netdev_set_prio_tc_map(dev, prio, tc);
9026 	}
9027 }
9028 
9029 #endif /* CONFIG_IXGBE_DCB */
9030 static int ixgbe_reassign_macvlan_pool(struct net_device *vdev,
9031 				       struct netdev_nested_priv *priv)
9032 {
9033 	struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data;
9034 	struct ixgbe_fwd_adapter *accel;
9035 	int pool;
9036 
9037 	/* we only care about macvlans... */
9038 	if (!netif_is_macvlan(vdev))
9039 		return 0;
9040 
9041 	/* that have hardware offload enabled... */
9042 	accel = macvlan_accel_priv(vdev);
9043 	if (!accel)
9044 		return 0;
9045 
9046 	/* If we can relocate to a different bit do so */
9047 	pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
9048 	if (pool < adapter->num_rx_pools) {
9049 		set_bit(pool, adapter->fwd_bitmask);
9050 		accel->pool = pool;
9051 		return 0;
9052 	}
9053 
9054 	/* if we cannot find a free pool then disable the offload */
9055 	netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
9056 	macvlan_release_l2fw_offload(vdev);
9057 
9058 	/* unbind the queues and drop the subordinate channel config */
9059 	netdev_unbind_sb_channel(adapter->netdev, vdev);
9060 	netdev_set_sb_channel(vdev, 0);
9061 
9062 	kfree(accel);
9063 
9064 	return 0;
9065 }
9066 
9067 static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
9068 {
9069 	struct ixgbe_adapter *adapter = netdev_priv(dev);
9070 	struct netdev_nested_priv priv = {
9071 		.data = (void *)adapter,
9072 	};
9073 
9074 	/* flush any stale bits out of the fwd bitmask */
9075 	bitmap_clear(adapter->fwd_bitmask, 1, 63);
9076 
9077 	/* walk through upper devices reassigning pools */
9078 	netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool,
9079 				      &priv);
9080 }
9081 
9082 /**
9083  * ixgbe_setup_tc - configure net_device for multiple traffic classes
9084  *
9085  * @dev: net device to configure
9086  * @tc: number of traffic classes to enable
9087  */
9088 int ixgbe_setup_tc(struct net_device *dev, u8 tc)
9089 {
9090 	struct ixgbe_adapter *adapter = netdev_priv(dev);
9091 	struct ixgbe_hw *hw = &adapter->hw;
9092 
9093 	/* Hardware supports up to 8 traffic classes */
9094 	if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
9095 		return -EINVAL;
9096 
9097 	if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
9098 		return -EINVAL;
9099 
9100 	/* Hardware has to reinitialize queues and interrupts to
9101 	 * match packet buffer alignment. Unfortunately, the
9102 	 * hardware is not flexible enough to do this dynamically.
9103 	 */
9104 	if (netif_running(dev))
9105 		ixgbe_close(dev);
9106 	else
9107 		ixgbe_reset(adapter);
9108 
9109 	ixgbe_clear_interrupt_scheme(adapter);
9110 
9111 #ifdef CONFIG_IXGBE_DCB
9112 	if (tc) {
9113 		if (adapter->xdp_prog) {
9114 			e_warn(probe, "DCB is not supported with XDP\n");
9115 
9116 			ixgbe_init_interrupt_scheme(adapter);
9117 			if (netif_running(dev))
9118 				ixgbe_open(dev);
9119 			return -EINVAL;
9120 		}
9121 
9122 		netdev_set_num_tc(dev, tc);
9123 		ixgbe_set_prio_tc_map(adapter);
9124 
9125 		adapter->hw_tcs = tc;
9126 		adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
9127 
9128 		if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
9129 			adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
9130 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
9131 		}
9132 	} else {
9133 		netdev_reset_tc(dev);
9134 
9135 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
9136 			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
9137 
9138 		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
9139 		adapter->hw_tcs = tc;
9140 
9141 		adapter->temp_dcb_cfg.pfc_mode_enable = false;
9142 		adapter->dcb_cfg.pfc_mode_enable = false;
9143 	}
9144 
9145 	ixgbe_validate_rtr(adapter, tc);
9146 
9147 #endif /* CONFIG_IXGBE_DCB */
9148 	ixgbe_init_interrupt_scheme(adapter);
9149 
9150 	ixgbe_defrag_macvlan_pools(dev);
9151 
9152 	if (netif_running(dev))
9153 		return ixgbe_open(dev);
9154 
9155 	return 0;
9156 }
9157 
9158 static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
9159 			       struct tc_cls_u32_offload *cls)
9160 {
9161 	u32 hdl = cls->knode.handle;
9162 	u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
9163 	u32 loc = cls->knode.handle & 0xfffff;
9164 	int err = 0, i, j;
9165 	struct ixgbe_jump_table *jump = NULL;
9166 
9167 	if (loc > IXGBE_MAX_HW_ENTRIES)
9168 		return -EINVAL;
9169 
9170 	if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
9171 		return -EINVAL;
9172 
9173 	/* Clear this filter in the link data it is associated with */
9174 	if (uhtid != 0x800) {
9175 		jump = adapter->jump_tables[uhtid];
9176 		if (!jump)
9177 			return -EINVAL;
9178 		if (!test_bit(loc - 1, jump->child_loc_map))
9179 			return -EINVAL;
9180 		clear_bit(loc - 1, jump->child_loc_map);
9181 	}
9182 
9183 	/* Check if the filter being deleted is a link */
9184 	for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9185 		jump = adapter->jump_tables[i];
9186 		if (jump && jump->link_hdl == hdl) {
9187 			/* Delete filters in the hardware in the child hash
9188 			 * table associated with this link
9189 			 */
9190 			for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
9191 				if (!test_bit(j, jump->child_loc_map))
9192 					continue;
9193 				spin_lock(&adapter->fdir_perfect_lock);
9194 				err = ixgbe_update_ethtool_fdir_entry(adapter,
9195 								      NULL,
9196 								      j + 1);
9197 				spin_unlock(&adapter->fdir_perfect_lock);
9198 				clear_bit(j, jump->child_loc_map);
9199 			}
9200 			/* Remove resources for this link */
9201 			kfree(jump->input);
9202 			kfree(jump->mask);
9203 			kfree(jump);
9204 			adapter->jump_tables[i] = NULL;
9205 			return err;
9206 		}
9207 	}
9208 
9209 	spin_lock(&adapter->fdir_perfect_lock);
9210 	err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
9211 	spin_unlock(&adapter->fdir_perfect_lock);
9212 	return err;
9213 }
9214 
9215 static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
9216 					    struct tc_cls_u32_offload *cls)
9217 {
9218 	u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
9219 
9220 	if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9221 		return -EINVAL;
9222 
9223 	/* This ixgbe devices do not support hash tables at the moment
9224 	 * so abort when given hash tables.
9225 	 */
9226 	if (cls->hnode.divisor > 0)
9227 		return -EINVAL;
9228 
9229 	set_bit(uhtid - 1, &adapter->tables);
9230 	return 0;
9231 }
9232 
9233 static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
9234 					    struct tc_cls_u32_offload *cls)
9235 {
9236 	u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
9237 
9238 	if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9239 		return -EINVAL;
9240 
9241 	clear_bit(uhtid - 1, &adapter->tables);
9242 	return 0;
9243 }
9244 
9245 #ifdef CONFIG_NET_CLS_ACT
9246 struct upper_walk_data {
9247 	struct ixgbe_adapter *adapter;
9248 	u64 action;
9249 	int ifindex;
9250 	u8 queue;
9251 };
9252 
9253 static int get_macvlan_queue(struct net_device *upper,
9254 			     struct netdev_nested_priv *priv)
9255 {
9256 	if (netif_is_macvlan(upper)) {
9257 		struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper);
9258 		struct ixgbe_adapter *adapter;
9259 		struct upper_walk_data *data;
9260 		int ifindex;
9261 
9262 		data = (struct upper_walk_data *)priv->data;
9263 		ifindex = data->ifindex;
9264 		adapter = data->adapter;
9265 		if (vadapter && upper->ifindex == ifindex) {
9266 			data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
9267 			data->action = data->queue;
9268 			return 1;
9269 		}
9270 	}
9271 
9272 	return 0;
9273 }
9274 
9275 static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
9276 				  u8 *queue, u64 *action)
9277 {
9278 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
9279 	unsigned int num_vfs = adapter->num_vfs, vf;
9280 	struct netdev_nested_priv priv;
9281 	struct upper_walk_data data;
9282 	struct net_device *upper;
9283 
9284 	/* redirect to a SRIOV VF */
9285 	for (vf = 0; vf < num_vfs; ++vf) {
9286 		upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
9287 		if (upper->ifindex == ifindex) {
9288 			*queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
9289 			*action = vf + 1;
9290 			*action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
9291 			return 0;
9292 		}
9293 	}
9294 
9295 	/* redirect to a offloaded macvlan netdev */
9296 	data.adapter = adapter;
9297 	data.ifindex = ifindex;
9298 	data.action = 0;
9299 	data.queue = 0;
9300 	priv.data = (void *)&data;
9301 	if (netdev_walk_all_upper_dev_rcu(adapter->netdev,
9302 					  get_macvlan_queue, &priv)) {
9303 		*action = data.action;
9304 		*queue = data.queue;
9305 
9306 		return 0;
9307 	}
9308 
9309 	return -EINVAL;
9310 }
9311 
9312 static int parse_tc_actions(struct ixgbe_adapter *adapter,
9313 			    struct tcf_exts *exts, u64 *action, u8 *queue)
9314 {
9315 	const struct tc_action *a;
9316 	int i;
9317 
9318 	if (!tcf_exts_has_actions(exts))
9319 		return -EINVAL;
9320 
9321 	tcf_exts_for_each_action(i, a, exts) {
9322 		/* Drop action */
9323 		if (is_tcf_gact_shot(a)) {
9324 			*action = IXGBE_FDIR_DROP_QUEUE;
9325 			*queue = IXGBE_FDIR_DROP_QUEUE;
9326 			return 0;
9327 		}
9328 
9329 		/* Redirect to a VF or a offloaded macvlan */
9330 		if (is_tcf_mirred_egress_redirect(a)) {
9331 			struct net_device *dev = tcf_mirred_dev(a);
9332 
9333 			if (!dev)
9334 				return -EINVAL;
9335 			return handle_redirect_action(adapter, dev->ifindex,
9336 						      queue, action);
9337 		}
9338 
9339 		return -EINVAL;
9340 	}
9341 
9342 	return -EINVAL;
9343 }
9344 #else
9345 static int parse_tc_actions(struct ixgbe_adapter *adapter,
9346 			    struct tcf_exts *exts, u64 *action, u8 *queue)
9347 {
9348 	return -EINVAL;
9349 }
9350 #endif /* CONFIG_NET_CLS_ACT */
9351 
9352 static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
9353 				    union ixgbe_atr_input *mask,
9354 				    struct tc_cls_u32_offload *cls,
9355 				    struct ixgbe_mat_field *field_ptr,
9356 				    struct ixgbe_nexthdr *nexthdr)
9357 {
9358 	int i, j, off;
9359 	__be32 val, m;
9360 	bool found_entry = false, found_jump_field = false;
9361 
9362 	for (i = 0; i < cls->knode.sel->nkeys; i++) {
9363 		off = cls->knode.sel->keys[i].off;
9364 		val = cls->knode.sel->keys[i].val;
9365 		m = cls->knode.sel->keys[i].mask;
9366 
9367 		for (j = 0; field_ptr[j].val; j++) {
9368 			if (field_ptr[j].off == off) {
9369 				field_ptr[j].val(input, mask, (__force u32)val,
9370 						 (__force u32)m);
9371 				input->filter.formatted.flow_type |=
9372 					field_ptr[j].type;
9373 				found_entry = true;
9374 				break;
9375 			}
9376 		}
9377 		if (nexthdr) {
9378 			if (nexthdr->off == cls->knode.sel->keys[i].off &&
9379 			    nexthdr->val ==
9380 			    (__force u32)cls->knode.sel->keys[i].val &&
9381 			    nexthdr->mask ==
9382 			    (__force u32)cls->knode.sel->keys[i].mask)
9383 				found_jump_field = true;
9384 			else
9385 				continue;
9386 		}
9387 	}
9388 
9389 	if (nexthdr && !found_jump_field)
9390 		return -EINVAL;
9391 
9392 	if (!found_entry)
9393 		return 0;
9394 
9395 	mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
9396 				    IXGBE_ATR_L4TYPE_MASK;
9397 
9398 	if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
9399 		mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
9400 
9401 	return 0;
9402 }
9403 
9404 static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
9405 				  struct tc_cls_u32_offload *cls)
9406 {
9407 	__be16 protocol = cls->common.protocol;
9408 	u32 loc = cls->knode.handle & 0xfffff;
9409 	struct ixgbe_hw *hw = &adapter->hw;
9410 	struct ixgbe_mat_field *field_ptr;
9411 	struct ixgbe_fdir_filter *input = NULL;
9412 	union ixgbe_atr_input *mask = NULL;
9413 	struct ixgbe_jump_table *jump = NULL;
9414 	int i, err = -EINVAL;
9415 	u8 queue;
9416 	u32 uhtid, link_uhtid;
9417 
9418 	uhtid = TC_U32_USERHTID(cls->knode.handle);
9419 	link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
9420 
9421 	/* At the moment cls_u32 jumps to network layer and skips past
9422 	 * L2 headers. The canonical method to match L2 frames is to use
9423 	 * negative values. However this is error prone at best but really
9424 	 * just broken because there is no way to "know" what sort of hdr
9425 	 * is in front of the network layer. Fix cls_u32 to support L2
9426 	 * headers when needed.
9427 	 */
9428 	if (protocol != htons(ETH_P_IP))
9429 		return err;
9430 
9431 	if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
9432 		e_err(drv, "Location out of range\n");
9433 		return err;
9434 	}
9435 
9436 	/* cls u32 is a graph starting at root node 0x800. The driver tracks
9437 	 * links and also the fields used to advance the parser across each
9438 	 * link (e.g. nexthdr/eat parameters from 'tc'). This way we can map
9439 	 * the u32 graph onto the hardware parse graph denoted in ixgbe_model.h
9440 	 * To add support for new nodes update ixgbe_model.h parse structures
9441 	 * this function _should_ be generic try not to hardcode values here.
9442 	 */
9443 	if (uhtid == 0x800) {
9444 		field_ptr = (adapter->jump_tables[0])->mat;
9445 	} else {
9446 		if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9447 			return err;
9448 		if (!adapter->jump_tables[uhtid])
9449 			return err;
9450 		field_ptr = (adapter->jump_tables[uhtid])->mat;
9451 	}
9452 
9453 	if (!field_ptr)
9454 		return err;
9455 
9456 	/* At this point we know the field_ptr is valid and need to either
9457 	 * build cls_u32 link or attach filter. Because adding a link to
9458 	 * a handle that does not exist is invalid and the same for adding
9459 	 * rules to handles that don't exist.
9460 	 */
9461 
9462 	if (link_uhtid) {
9463 		struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
9464 
9465 		if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
9466 			return err;
9467 
9468 		if (!test_bit(link_uhtid - 1, &adapter->tables))
9469 			return err;
9470 
9471 		/* Multiple filters as links to the same hash table are not
9472 		 * supported. To add a new filter with the same next header
9473 		 * but different match/jump conditions, create a new hash table
9474 		 * and link to it.
9475 		 */
9476 		if (adapter->jump_tables[link_uhtid] &&
9477 		    (adapter->jump_tables[link_uhtid])->link_hdl) {
9478 			e_err(drv, "Link filter exists for link: %x\n",
9479 			      link_uhtid);
9480 			return err;
9481 		}
9482 
9483 		for (i = 0; nexthdr[i].jump; i++) {
9484 			if (nexthdr[i].o != cls->knode.sel->offoff ||
9485 			    nexthdr[i].s != cls->knode.sel->offshift ||
9486 			    nexthdr[i].m !=
9487 			    (__force u32)cls->knode.sel->offmask)
9488 				return err;
9489 
9490 			jump = kzalloc(sizeof(*jump), GFP_KERNEL);
9491 			if (!jump)
9492 				return -ENOMEM;
9493 			input = kzalloc(sizeof(*input), GFP_KERNEL);
9494 			if (!input) {
9495 				err = -ENOMEM;
9496 				goto free_jump;
9497 			}
9498 			mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9499 			if (!mask) {
9500 				err = -ENOMEM;
9501 				goto free_input;
9502 			}
9503 			jump->input = input;
9504 			jump->mask = mask;
9505 			jump->link_hdl = cls->knode.handle;
9506 
9507 			err = ixgbe_clsu32_build_input(input, mask, cls,
9508 						       field_ptr, &nexthdr[i]);
9509 			if (!err) {
9510 				jump->mat = nexthdr[i].jump;
9511 				adapter->jump_tables[link_uhtid] = jump;
9512 				break;
9513 			} else {
9514 				kfree(mask);
9515 				kfree(input);
9516 				kfree(jump);
9517 			}
9518 		}
9519 		return 0;
9520 	}
9521 
9522 	input = kzalloc(sizeof(*input), GFP_KERNEL);
9523 	if (!input)
9524 		return -ENOMEM;
9525 	mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9526 	if (!mask) {
9527 		err = -ENOMEM;
9528 		goto free_input;
9529 	}
9530 
9531 	if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
9532 		if ((adapter->jump_tables[uhtid])->input)
9533 			memcpy(input, (adapter->jump_tables[uhtid])->input,
9534 			       sizeof(*input));
9535 		if ((adapter->jump_tables[uhtid])->mask)
9536 			memcpy(mask, (adapter->jump_tables[uhtid])->mask,
9537 			       sizeof(*mask));
9538 
9539 		/* Lookup in all child hash tables if this location is already
9540 		 * filled with a filter
9541 		 */
9542 		for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9543 			struct ixgbe_jump_table *link = adapter->jump_tables[i];
9544 
9545 			if (link && (test_bit(loc - 1, link->child_loc_map))) {
9546 				e_err(drv, "Filter exists in location: %x\n",
9547 				      loc);
9548 				err = -EINVAL;
9549 				goto err_out;
9550 			}
9551 		}
9552 	}
9553 	err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
9554 	if (err)
9555 		goto err_out;
9556 
9557 	err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
9558 			       &queue);
9559 	if (err < 0)
9560 		goto err_out;
9561 
9562 	input->sw_idx = loc;
9563 
9564 	spin_lock(&adapter->fdir_perfect_lock);
9565 
9566 	if (hlist_empty(&adapter->fdir_filter_list)) {
9567 		memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
9568 		err = ixgbe_fdir_set_input_mask_82599(hw, mask);
9569 		if (err)
9570 			goto err_out_w_lock;
9571 	} else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
9572 		err = -EINVAL;
9573 		goto err_out_w_lock;
9574 	}
9575 
9576 	ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
9577 	err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
9578 						    input->sw_idx, queue);
9579 	if (err)
9580 		goto err_out_w_lock;
9581 
9582 	ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
9583 	spin_unlock(&adapter->fdir_perfect_lock);
9584 
9585 	if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
9586 		set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
9587 
9588 	kfree(mask);
9589 	return err;
9590 err_out_w_lock:
9591 	spin_unlock(&adapter->fdir_perfect_lock);
9592 err_out:
9593 	kfree(mask);
9594 free_input:
9595 	kfree(input);
9596 free_jump:
9597 	kfree(jump);
9598 	return err;
9599 }
9600 
9601 static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter,
9602 				  struct tc_cls_u32_offload *cls_u32)
9603 {
9604 	switch (cls_u32->command) {
9605 	case TC_CLSU32_NEW_KNODE:
9606 	case TC_CLSU32_REPLACE_KNODE:
9607 		return ixgbe_configure_clsu32(adapter, cls_u32);
9608 	case TC_CLSU32_DELETE_KNODE:
9609 		return ixgbe_delete_clsu32(adapter, cls_u32);
9610 	case TC_CLSU32_NEW_HNODE:
9611 	case TC_CLSU32_REPLACE_HNODE:
9612 		return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32);
9613 	case TC_CLSU32_DELETE_HNODE:
9614 		return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32);
9615 	default:
9616 		return -EOPNOTSUPP;
9617 	}
9618 }
9619 
9620 static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
9621 				   void *cb_priv)
9622 {
9623 	struct ixgbe_adapter *adapter = cb_priv;
9624 
9625 	if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
9626 		return -EOPNOTSUPP;
9627 
9628 	switch (type) {
9629 	case TC_SETUP_CLSU32:
9630 		return ixgbe_setup_tc_cls_u32(adapter, type_data);
9631 	default:
9632 		return -EOPNOTSUPP;
9633 	}
9634 }
9635 
9636 static int ixgbe_setup_tc_mqprio(struct net_device *dev,
9637 				 struct tc_mqprio_qopt *mqprio)
9638 {
9639 	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
9640 	return ixgbe_setup_tc(dev, mqprio->num_tc);
9641 }
9642 
9643 static LIST_HEAD(ixgbe_block_cb_list);
9644 
9645 static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
9646 			    void *type_data)
9647 {
9648 	struct ixgbe_adapter *adapter = netdev_priv(dev);
9649 
9650 	switch (type) {
9651 	case TC_SETUP_BLOCK:
9652 		return flow_block_cb_setup_simple(type_data,
9653 						  &ixgbe_block_cb_list,
9654 						  ixgbe_setup_tc_block_cb,
9655 						  adapter, adapter, true);
9656 	case TC_SETUP_QDISC_MQPRIO:
9657 		return ixgbe_setup_tc_mqprio(dev, type_data);
9658 	default:
9659 		return -EOPNOTSUPP;
9660 	}
9661 }
9662 
9663 #ifdef CONFIG_PCI_IOV
9664 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
9665 {
9666 	struct net_device *netdev = adapter->netdev;
9667 
9668 	rtnl_lock();
9669 	ixgbe_setup_tc(netdev, adapter->hw_tcs);
9670 	rtnl_unlock();
9671 }
9672 
9673 #endif
9674 void ixgbe_do_reset(struct net_device *netdev)
9675 {
9676 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
9677 
9678 	if (netif_running(netdev))
9679 		ixgbe_reinit_locked(adapter);
9680 	else
9681 		ixgbe_reset(adapter);
9682 }
9683 
9684 static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
9685 					    netdev_features_t features)
9686 {
9687 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
9688 
9689 	/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
9690 	if (!(features & NETIF_F_RXCSUM))
9691 		features &= ~NETIF_F_LRO;
9692 
9693 	/* Turn off LRO if not RSC capable */
9694 	if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
9695 		features &= ~NETIF_F_LRO;
9696 
9697 	if (adapter->xdp_prog && (features & NETIF_F_LRO)) {
9698 		e_dev_err("LRO is not supported with XDP\n");
9699 		features &= ~NETIF_F_LRO;
9700 	}
9701 
9702 	return features;
9703 }
9704 
9705 static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter)
9706 {
9707 	int rss = min_t(int, ixgbe_max_rss_indices(adapter),
9708 			num_online_cpus());
9709 
9710 	/* go back to full RSS if we're not running SR-IOV */
9711 	if (!adapter->ring_feature[RING_F_VMDQ].offset)
9712 		adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED |
9713 				    IXGBE_FLAG_SRIOV_ENABLED);
9714 
9715 	adapter->ring_feature[RING_F_RSS].limit = rss;
9716 	adapter->ring_feature[RING_F_VMDQ].limit = 1;
9717 
9718 	ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs);
9719 }
9720 
9721 static int ixgbe_set_features(struct net_device *netdev,
9722 			      netdev_features_t features)
9723 {
9724 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
9725 	netdev_features_t changed = netdev->features ^ features;
9726 	bool need_reset = false;
9727 
9728 	/* Make sure RSC matches LRO, reset if change */
9729 	if (!(features & NETIF_F_LRO)) {
9730 		if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
9731 			need_reset = true;
9732 		adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
9733 	} else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
9734 		   !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
9735 		if (adapter->rx_itr_setting == 1 ||
9736 		    adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
9737 			adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
9738 			need_reset = true;
9739 		} else if ((changed ^ features) & NETIF_F_LRO) {
9740 			e_info(probe, "rx-usecs set too low, "
9741 			       "disabling RSC\n");
9742 		}
9743 	}
9744 
9745 	/*
9746 	 * Check if Flow Director n-tuple support or hw_tc support was
9747 	 * enabled or disabled.  If the state changed, we need to reset.
9748 	 */
9749 	if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) {
9750 		/* turn off ATR, enable perfect filters and reset */
9751 		if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
9752 			need_reset = true;
9753 
9754 		adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
9755 		adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9756 	} else {
9757 		/* turn off perfect filters, enable ATR and reset */
9758 		if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
9759 			need_reset = true;
9760 
9761 		adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9762 
9763 		/* We cannot enable ATR if SR-IOV is enabled */
9764 		if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
9765 		    /* We cannot enable ATR if we have 2 or more tcs */
9766 		    (adapter->hw_tcs > 1) ||
9767 		    /* We cannot enable ATR if RSS is disabled */
9768 		    (adapter->ring_feature[RING_F_RSS].limit <= 1) ||
9769 		    /* A sample rate of 0 indicates ATR disabled */
9770 		    (!adapter->atr_sample_rate))
9771 			; /* do nothing not supported */
9772 		else /* otherwise supported and set the flag */
9773 			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
9774 	}
9775 
9776 	if (changed & NETIF_F_RXALL)
9777 		need_reset = true;
9778 
9779 	netdev->features = features;
9780 
9781 	if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1)
9782 		ixgbe_reset_l2fw_offload(adapter);
9783 	else if (need_reset)
9784 		ixgbe_do_reset(netdev);
9785 	else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
9786 			    NETIF_F_HW_VLAN_CTAG_FILTER))
9787 		ixgbe_set_rx_mode(netdev);
9788 
9789 	return 1;
9790 }
9791 
9792 static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
9793 			     struct net_device *dev,
9794 			     const unsigned char *addr, u16 vid,
9795 			     u16 flags,
9796 			     struct netlink_ext_ack *extack)
9797 {
9798 	/* guarantee we can provide a unique filter for the unicast address */
9799 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
9800 		struct ixgbe_adapter *adapter = netdev_priv(dev);
9801 		u16 pool = VMDQ_P(0);
9802 
9803 		if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
9804 			return -ENOMEM;
9805 	}
9806 
9807 	return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
9808 }
9809 
9810 /**
9811  * ixgbe_configure_bridge_mode - set various bridge modes
9812  * @adapter: the private structure
9813  * @mode: requested bridge mode
9814  *
9815  * Configure some settings require for various bridge modes.
9816  **/
9817 static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
9818 				       __u16 mode)
9819 {
9820 	struct ixgbe_hw *hw = &adapter->hw;
9821 	unsigned int p, num_pools;
9822 	u32 vmdctl;
9823 
9824 	switch (mode) {
9825 	case BRIDGE_MODE_VEPA:
9826 		/* disable Tx loopback, rely on switch hairpin mode */
9827 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
9828 
9829 		/* must enable Rx switching replication to allow multicast
9830 		 * packet reception on all VFs, and to enable source address
9831 		 * pruning.
9832 		 */
9833 		vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9834 		vmdctl |= IXGBE_VT_CTL_REPLEN;
9835 		IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9836 
9837 		/* enable Rx source address pruning. Note, this requires
9838 		 * replication to be enabled or else it does nothing.
9839 		 */
9840 		num_pools = adapter->num_vfs + adapter->num_rx_pools;
9841 		for (p = 0; p < num_pools; p++) {
9842 			if (hw->mac.ops.set_source_address_pruning)
9843 				hw->mac.ops.set_source_address_pruning(hw,
9844 								       true,
9845 								       p);
9846 		}
9847 		break;
9848 	case BRIDGE_MODE_VEB:
9849 		/* enable Tx loopback for internal VF/PF communication */
9850 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
9851 				IXGBE_PFDTXGSWC_VT_LBEN);
9852 
9853 		/* disable Rx switching replication unless we have SR-IOV
9854 		 * virtual functions
9855 		 */
9856 		vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9857 		if (!adapter->num_vfs)
9858 			vmdctl &= ~IXGBE_VT_CTL_REPLEN;
9859 		IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9860 
9861 		/* disable Rx source address pruning, since we don't expect to
9862 		 * be receiving external loopback of our transmitted frames.
9863 		 */
9864 		num_pools = adapter->num_vfs + adapter->num_rx_pools;
9865 		for (p = 0; p < num_pools; p++) {
9866 			if (hw->mac.ops.set_source_address_pruning)
9867 				hw->mac.ops.set_source_address_pruning(hw,
9868 								       false,
9869 								       p);
9870 		}
9871 		break;
9872 	default:
9873 		return -EINVAL;
9874 	}
9875 
9876 	adapter->bridge_mode = mode;
9877 
9878 	e_info(drv, "enabling bridge mode: %s\n",
9879 	       mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
9880 
9881 	return 0;
9882 }
9883 
9884 static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
9885 				    struct nlmsghdr *nlh, u16 flags,
9886 				    struct netlink_ext_ack *extack)
9887 {
9888 	struct ixgbe_adapter *adapter = netdev_priv(dev);
9889 	struct nlattr *attr, *br_spec;
9890 	int rem;
9891 
9892 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
9893 		return -EOPNOTSUPP;
9894 
9895 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9896 	if (!br_spec)
9897 		return -EINVAL;
9898 
9899 	nla_for_each_nested(attr, br_spec, rem) {
9900 		int status;
9901 		__u16 mode;
9902 
9903 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
9904 			continue;
9905 
9906 		if (nla_len(attr) < sizeof(mode))
9907 			return -EINVAL;
9908 
9909 		mode = nla_get_u16(attr);
9910 		status = ixgbe_configure_bridge_mode(adapter, mode);
9911 		if (status)
9912 			return status;
9913 
9914 		break;
9915 	}
9916 
9917 	return 0;
9918 }
9919 
9920 static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9921 				    struct net_device *dev,
9922 				    u32 filter_mask, int nlflags)
9923 {
9924 	struct ixgbe_adapter *adapter = netdev_priv(dev);
9925 
9926 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
9927 		return 0;
9928 
9929 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
9930 				       adapter->bridge_mode, 0, 0, nlflags,
9931 				       filter_mask, NULL);
9932 }
9933 
9934 static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
9935 {
9936 	struct ixgbe_adapter *adapter = netdev_priv(pdev);
9937 	struct ixgbe_fwd_adapter *accel;
9938 	int tcs = adapter->hw_tcs ? : 1;
9939 	int pool, err;
9940 
9941 	if (adapter->xdp_prog) {
9942 		e_warn(probe, "L2FW offload is not supported with XDP\n");
9943 		return ERR_PTR(-EINVAL);
9944 	}
9945 
9946 	/* The hardware supported by ixgbe only filters on the destination MAC
9947 	 * address. In order to avoid issues we only support offloading modes
9948 	 * where the hardware can actually provide the functionality.
9949 	 */
9950 	if (!macvlan_supports_dest_filter(vdev))
9951 		return ERR_PTR(-EMEDIUMTYPE);
9952 
9953 	/* We need to lock down the macvlan to be a single queue device so that
9954 	 * we can reuse the tc_to_txq field in the macvlan netdev to represent
9955 	 * the queue mapping to our netdev.
9956 	 */
9957 	if (netif_is_multiqueue(vdev))
9958 		return ERR_PTR(-ERANGE);
9959 
9960 	pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
9961 	if (pool == adapter->num_rx_pools) {
9962 		u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
9963 		u16 reserved_pools;
9964 
9965 		if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
9966 		     adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
9967 		    adapter->num_rx_pools > IXGBE_MAX_MACVLANS)
9968 			return ERR_PTR(-EBUSY);
9969 
9970 		/* Hardware has a limited number of available pools. Each VF,
9971 		 * and the PF require a pool. Check to ensure we don't
9972 		 * attempt to use more then the available number of pools.
9973 		 */
9974 		if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
9975 			return ERR_PTR(-EBUSY);
9976 
9977 		/* Enable VMDq flag so device will be set in VM mode */
9978 		adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED |
9979 				  IXGBE_FLAG_SRIOV_ENABLED;
9980 
9981 		/* Try to reserve as many queues per pool as possible,
9982 		 * we start with the configurations that support 4 queues
9983 		 * per pools, followed by 2, and then by just 1 per pool.
9984 		 */
9985 		if (used_pools < 32 && adapter->num_rx_pools < 16)
9986 			reserved_pools = min_t(u16,
9987 					       32 - used_pools,
9988 					       16 - adapter->num_rx_pools);
9989 		else if (adapter->num_rx_pools < 32)
9990 			reserved_pools = min_t(u16,
9991 					       64 - used_pools,
9992 					       32 - adapter->num_rx_pools);
9993 		else
9994 			reserved_pools = 64 - used_pools;
9995 
9996 
9997 		if (!reserved_pools)
9998 			return ERR_PTR(-EBUSY);
9999 
10000 		adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools;
10001 
10002 		/* Force reinit of ring allocation with VMDQ enabled */
10003 		err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
10004 		if (err)
10005 			return ERR_PTR(err);
10006 
10007 		if (pool >= adapter->num_rx_pools)
10008 			return ERR_PTR(-ENOMEM);
10009 	}
10010 
10011 	accel = kzalloc(sizeof(*accel), GFP_KERNEL);
10012 	if (!accel)
10013 		return ERR_PTR(-ENOMEM);
10014 
10015 	set_bit(pool, adapter->fwd_bitmask);
10016 	netdev_set_sb_channel(vdev, pool);
10017 	accel->pool = pool;
10018 	accel->netdev = vdev;
10019 
10020 	if (!netif_running(pdev))
10021 		return accel;
10022 
10023 	err = ixgbe_fwd_ring_up(adapter, accel);
10024 	if (err)
10025 		return ERR_PTR(err);
10026 
10027 	return accel;
10028 }
10029 
10030 static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
10031 {
10032 	struct ixgbe_fwd_adapter *accel = priv;
10033 	struct ixgbe_adapter *adapter = netdev_priv(pdev);
10034 	unsigned int rxbase = accel->rx_base_queue;
10035 	unsigned int i;
10036 
10037 	/* delete unicast filter associated with offloaded interface */
10038 	ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
10039 			     VMDQ_P(accel->pool));
10040 
10041 	/* Allow remaining Rx packets to get flushed out of the
10042 	 * Rx FIFO before we drop the netdev for the ring.
10043 	 */
10044 	usleep_range(10000, 20000);
10045 
10046 	for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
10047 		struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i];
10048 		struct ixgbe_q_vector *qv = ring->q_vector;
10049 
10050 		/* Make sure we aren't processing any packets and clear
10051 		 * netdev to shut down the ring.
10052 		 */
10053 		if (netif_running(adapter->netdev))
10054 			napi_synchronize(&qv->napi);
10055 		ring->netdev = NULL;
10056 	}
10057 
10058 	/* unbind the queues and drop the subordinate channel config */
10059 	netdev_unbind_sb_channel(pdev, accel->netdev);
10060 	netdev_set_sb_channel(accel->netdev, 0);
10061 
10062 	clear_bit(accel->pool, adapter->fwd_bitmask);
10063 	kfree(accel);
10064 }
10065 
10066 #define IXGBE_MAX_MAC_HDR_LEN		127
10067 #define IXGBE_MAX_NETWORK_HDR_LEN	511
10068 
10069 static netdev_features_t
10070 ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
10071 		     netdev_features_t features)
10072 {
10073 	unsigned int network_hdr_len, mac_hdr_len;
10074 
10075 	/* Make certain the headers can be described by a context descriptor */
10076 	mac_hdr_len = skb_network_header(skb) - skb->data;
10077 	if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
10078 		return features & ~(NETIF_F_HW_CSUM |
10079 				    NETIF_F_SCTP_CRC |
10080 				    NETIF_F_GSO_UDP_L4 |
10081 				    NETIF_F_HW_VLAN_CTAG_TX |
10082 				    NETIF_F_TSO |
10083 				    NETIF_F_TSO6);
10084 
10085 	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
10086 	if (unlikely(network_hdr_len >  IXGBE_MAX_NETWORK_HDR_LEN))
10087 		return features & ~(NETIF_F_HW_CSUM |
10088 				    NETIF_F_SCTP_CRC |
10089 				    NETIF_F_GSO_UDP_L4 |
10090 				    NETIF_F_TSO |
10091 				    NETIF_F_TSO6);
10092 
10093 	/* We can only support IPV4 TSO in tunnels if we can mangle the
10094 	 * inner IP ID field, so strip TSO if MANGLEID is not supported.
10095 	 * IPsec offoad sets skb->encapsulation but still can handle
10096 	 * the TSO, so it's the exception.
10097 	 */
10098 	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
10099 #ifdef CONFIG_IXGBE_IPSEC
10100 		if (!secpath_exists(skb))
10101 #endif
10102 			features &= ~NETIF_F_TSO;
10103 	}
10104 
10105 	return features;
10106 }
10107 
10108 static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10109 {
10110 	int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
10111 	struct ixgbe_adapter *adapter = netdev_priv(dev);
10112 	struct bpf_prog *old_prog;
10113 	bool need_reset;
10114 
10115 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
10116 		return -EINVAL;
10117 
10118 	if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
10119 		return -EINVAL;
10120 
10121 	/* verify ixgbe ring attributes are sufficient for XDP */
10122 	for (i = 0; i < adapter->num_rx_queues; i++) {
10123 		struct ixgbe_ring *ring = adapter->rx_ring[i];
10124 
10125 		if (ring_is_rsc_enabled(ring))
10126 			return -EINVAL;
10127 
10128 		if (frame_size > ixgbe_rx_bufsz(ring))
10129 			return -EINVAL;
10130 	}
10131 
10132 	if (nr_cpu_ids > MAX_XDP_QUEUES)
10133 		return -ENOMEM;
10134 
10135 	old_prog = xchg(&adapter->xdp_prog, prog);
10136 	need_reset = (!!prog != !!old_prog);
10137 
10138 	/* If transitioning XDP modes reconfigure rings */
10139 	if (need_reset) {
10140 		int err;
10141 
10142 		if (!prog)
10143 			/* Wait until ndo_xsk_wakeup completes. */
10144 			synchronize_rcu();
10145 		err = ixgbe_setup_tc(dev, adapter->hw_tcs);
10146 
10147 		if (err) {
10148 			rcu_assign_pointer(adapter->xdp_prog, old_prog);
10149 			return -EINVAL;
10150 		}
10151 	} else {
10152 		for (i = 0; i < adapter->num_rx_queues; i++)
10153 			(void)xchg(&adapter->rx_ring[i]->xdp_prog,
10154 			    adapter->xdp_prog);
10155 	}
10156 
10157 	if (old_prog)
10158 		bpf_prog_put(old_prog);
10159 
10160 	/* Kick start the NAPI context if there is an AF_XDP socket open
10161 	 * on that queue id. This so that receiving will start.
10162 	 */
10163 	if (need_reset && prog)
10164 		for (i = 0; i < adapter->num_rx_queues; i++)
10165 			if (adapter->xdp_ring[i]->xsk_pool)
10166 				(void)ixgbe_xsk_wakeup(adapter->netdev, i,
10167 						       XDP_WAKEUP_RX);
10168 
10169 	return 0;
10170 }
10171 
10172 static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
10173 {
10174 	struct ixgbe_adapter *adapter = netdev_priv(dev);
10175 
10176 	switch (xdp->command) {
10177 	case XDP_SETUP_PROG:
10178 		return ixgbe_xdp_setup(dev, xdp->prog);
10179 	case XDP_SETUP_XSK_POOL:
10180 		return ixgbe_xsk_pool_setup(adapter, xdp->xsk.pool,
10181 					    xdp->xsk.queue_id);
10182 
10183 	default:
10184 		return -EINVAL;
10185 	}
10186 }
10187 
10188 void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
10189 {
10190 	/* Force memory writes to complete before letting h/w know there
10191 	 * are new descriptors to fetch.
10192 	 */
10193 	wmb();
10194 	writel(ring->next_to_use, ring->tail);
10195 }
10196 
10197 static int ixgbe_xdp_xmit(struct net_device *dev, int n,
10198 			  struct xdp_frame **frames, u32 flags)
10199 {
10200 	struct ixgbe_adapter *adapter = netdev_priv(dev);
10201 	struct ixgbe_ring *ring;
10202 	int nxmit = 0;
10203 	int i;
10204 
10205 	if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
10206 		return -ENETDOWN;
10207 
10208 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
10209 		return -EINVAL;
10210 
10211 	/* During program transitions its possible adapter->xdp_prog is assigned
10212 	 * but ring has not been configured yet. In this case simply abort xmit.
10213 	 */
10214 	ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
10215 	if (unlikely(!ring))
10216 		return -ENXIO;
10217 
10218 	if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
10219 		return -ENXIO;
10220 
10221 	for (i = 0; i < n; i++) {
10222 		struct xdp_frame *xdpf = frames[i];
10223 		int err;
10224 
10225 		err = ixgbe_xmit_xdp_ring(adapter, xdpf);
10226 		if (err != IXGBE_XDP_TX)
10227 			break;
10228 		nxmit++;
10229 	}
10230 
10231 	if (unlikely(flags & XDP_XMIT_FLUSH))
10232 		ixgbe_xdp_ring_update_tail(ring);
10233 
10234 	return nxmit;
10235 }
10236 
10237 static const struct net_device_ops ixgbe_netdev_ops = {
10238 	.ndo_open		= ixgbe_open,
10239 	.ndo_stop		= ixgbe_close,
10240 	.ndo_start_xmit		= ixgbe_xmit_frame,
10241 	.ndo_set_rx_mode	= ixgbe_set_rx_mode,
10242 	.ndo_validate_addr	= eth_validate_addr,
10243 	.ndo_set_mac_address	= ixgbe_set_mac,
10244 	.ndo_change_mtu		= ixgbe_change_mtu,
10245 	.ndo_tx_timeout		= ixgbe_tx_timeout,
10246 	.ndo_set_tx_maxrate	= ixgbe_tx_maxrate,
10247 	.ndo_vlan_rx_add_vid	= ixgbe_vlan_rx_add_vid,
10248 	.ndo_vlan_rx_kill_vid	= ixgbe_vlan_rx_kill_vid,
10249 	.ndo_do_ioctl		= ixgbe_ioctl,
10250 	.ndo_set_vf_mac		= ixgbe_ndo_set_vf_mac,
10251 	.ndo_set_vf_vlan	= ixgbe_ndo_set_vf_vlan,
10252 	.ndo_set_vf_rate	= ixgbe_ndo_set_vf_bw,
10253 	.ndo_set_vf_spoofchk	= ixgbe_ndo_set_vf_spoofchk,
10254 	.ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
10255 	.ndo_set_vf_trust	= ixgbe_ndo_set_vf_trust,
10256 	.ndo_get_vf_config	= ixgbe_ndo_get_vf_config,
10257 	.ndo_get_stats64	= ixgbe_get_stats64,
10258 	.ndo_setup_tc		= __ixgbe_setup_tc,
10259 #ifdef IXGBE_FCOE
10260 	.ndo_select_queue	= ixgbe_select_queue,
10261 	.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
10262 	.ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
10263 	.ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
10264 	.ndo_fcoe_enable = ixgbe_fcoe_enable,
10265 	.ndo_fcoe_disable = ixgbe_fcoe_disable,
10266 	.ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
10267 	.ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
10268 #endif /* IXGBE_FCOE */
10269 	.ndo_set_features = ixgbe_set_features,
10270 	.ndo_fix_features = ixgbe_fix_features,
10271 	.ndo_fdb_add		= ixgbe_ndo_fdb_add,
10272 	.ndo_bridge_setlink	= ixgbe_ndo_bridge_setlink,
10273 	.ndo_bridge_getlink	= ixgbe_ndo_bridge_getlink,
10274 	.ndo_dfwd_add_station	= ixgbe_fwd_add,
10275 	.ndo_dfwd_del_station	= ixgbe_fwd_del,
10276 	.ndo_features_check	= ixgbe_features_check,
10277 	.ndo_bpf		= ixgbe_xdp,
10278 	.ndo_xdp_xmit		= ixgbe_xdp_xmit,
10279 	.ndo_xsk_wakeup         = ixgbe_xsk_wakeup,
10280 };
10281 
10282 static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
10283 				 struct ixgbe_ring *tx_ring)
10284 {
10285 	unsigned long wait_delay, delay_interval;
10286 	struct ixgbe_hw *hw = &adapter->hw;
10287 	u8 reg_idx = tx_ring->reg_idx;
10288 	int wait_loop;
10289 	u32 txdctl;
10290 
10291 	IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
10292 
10293 	/* delay mechanism from ixgbe_disable_tx */
10294 	delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
10295 
10296 	wait_loop = IXGBE_MAX_RX_DESC_POLL;
10297 	wait_delay = delay_interval;
10298 
10299 	while (wait_loop--) {
10300 		usleep_range(wait_delay, wait_delay + 10);
10301 		wait_delay += delay_interval * 2;
10302 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
10303 
10304 		if (!(txdctl & IXGBE_TXDCTL_ENABLE))
10305 			return;
10306 	}
10307 
10308 	e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n");
10309 }
10310 
10311 static void ixgbe_disable_txr(struct ixgbe_adapter *adapter,
10312 			      struct ixgbe_ring *tx_ring)
10313 {
10314 	set_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10315 	ixgbe_disable_txr_hw(adapter, tx_ring);
10316 }
10317 
10318 static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter,
10319 				 struct ixgbe_ring *rx_ring)
10320 {
10321 	unsigned long wait_delay, delay_interval;
10322 	struct ixgbe_hw *hw = &adapter->hw;
10323 	u8 reg_idx = rx_ring->reg_idx;
10324 	int wait_loop;
10325 	u32 rxdctl;
10326 
10327 	rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
10328 	rxdctl &= ~IXGBE_RXDCTL_ENABLE;
10329 	rxdctl |= IXGBE_RXDCTL_SWFLSH;
10330 
10331 	/* write value back with RXDCTL.ENABLE bit cleared */
10332 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
10333 
10334 	/* RXDCTL.EN may not change on 82598 if link is down, so skip it */
10335 	if (hw->mac.type == ixgbe_mac_82598EB &&
10336 	    !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
10337 		return;
10338 
10339 	/* delay mechanism from ixgbe_disable_rx */
10340 	delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
10341 
10342 	wait_loop = IXGBE_MAX_RX_DESC_POLL;
10343 	wait_delay = delay_interval;
10344 
10345 	while (wait_loop--) {
10346 		usleep_range(wait_delay, wait_delay + 10);
10347 		wait_delay += delay_interval * 2;
10348 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
10349 
10350 		if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
10351 			return;
10352 	}
10353 
10354 	e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n");
10355 }
10356 
10357 static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring)
10358 {
10359 	memset(&tx_ring->stats, 0, sizeof(tx_ring->stats));
10360 	memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
10361 }
10362 
10363 static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
10364 {
10365 	memset(&rx_ring->stats, 0, sizeof(rx_ring->stats));
10366 	memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
10367 }
10368 
10369 /**
10370  * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
10371  * @adapter: adapter structure
10372  * @ring: ring index
10373  *
10374  * This function disables a certain Rx/Tx/XDP Tx ring. The function
10375  * assumes that the netdev is running.
10376  **/
10377 void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
10378 {
10379 	struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10380 
10381 	rx_ring = adapter->rx_ring[ring];
10382 	tx_ring = adapter->tx_ring[ring];
10383 	xdp_ring = adapter->xdp_ring[ring];
10384 
10385 	ixgbe_disable_txr(adapter, tx_ring);
10386 	if (xdp_ring)
10387 		ixgbe_disable_txr(adapter, xdp_ring);
10388 	ixgbe_disable_rxr_hw(adapter, rx_ring);
10389 
10390 	if (xdp_ring)
10391 		synchronize_rcu();
10392 
10393 	/* Rx/Tx/XDP Tx share the same napi context. */
10394 	napi_disable(&rx_ring->q_vector->napi);
10395 
10396 	ixgbe_clean_tx_ring(tx_ring);
10397 	if (xdp_ring)
10398 		ixgbe_clean_tx_ring(xdp_ring);
10399 	ixgbe_clean_rx_ring(rx_ring);
10400 
10401 	ixgbe_reset_txr_stats(tx_ring);
10402 	if (xdp_ring)
10403 		ixgbe_reset_txr_stats(xdp_ring);
10404 	ixgbe_reset_rxr_stats(rx_ring);
10405 }
10406 
10407 /**
10408  * ixgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings
10409  * @adapter: adapter structure
10410  * @ring: ring index
10411  *
10412  * This function enables a certain Rx/Tx/XDP Tx ring. The function
10413  * assumes that the netdev is running.
10414  **/
10415 void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
10416 {
10417 	struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10418 
10419 	rx_ring = adapter->rx_ring[ring];
10420 	tx_ring = adapter->tx_ring[ring];
10421 	xdp_ring = adapter->xdp_ring[ring];
10422 
10423 	/* Rx/Tx/XDP Tx share the same napi context. */
10424 	napi_enable(&rx_ring->q_vector->napi);
10425 
10426 	ixgbe_configure_tx_ring(adapter, tx_ring);
10427 	if (xdp_ring)
10428 		ixgbe_configure_tx_ring(adapter, xdp_ring);
10429 	ixgbe_configure_rx_ring(adapter, rx_ring);
10430 
10431 	clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10432 	if (xdp_ring)
10433 		clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
10434 }
10435 
10436 /**
10437  * ixgbe_enumerate_functions - Get the number of ports this device has
10438  * @adapter: adapter structure
10439  *
10440  * This function enumerates the phsyical functions co-located on a single slot,
10441  * in order to determine how many ports a device has. This is most useful in
10442  * determining the required GT/s of PCIe bandwidth necessary for optimal
10443  * performance.
10444  **/
10445 static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
10446 {
10447 	struct pci_dev *entry, *pdev = adapter->pdev;
10448 	int physfns = 0;
10449 
10450 	/* Some cards can not use the generic count PCIe functions method,
10451 	 * because they are behind a parent switch, so we hardcode these with
10452 	 * the correct number of functions.
10453 	 */
10454 	if (ixgbe_pcie_from_parent(&adapter->hw))
10455 		physfns = 4;
10456 
10457 	list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
10458 		/* don't count virtual functions */
10459 		if (entry->is_virtfn)
10460 			continue;
10461 
10462 		/* When the devices on the bus don't all match our device ID,
10463 		 * we can't reliably determine the correct number of
10464 		 * functions. This can occur if a function has been direct
10465 		 * attached to a virtual machine using VT-d, for example. In
10466 		 * this case, simply return -1 to indicate this.
10467 		 */
10468 		if ((entry->vendor != pdev->vendor) ||
10469 		    (entry->device != pdev->device))
10470 			return -1;
10471 
10472 		physfns++;
10473 	}
10474 
10475 	return physfns;
10476 }
10477 
10478 /**
10479  * ixgbe_wol_supported - Check whether device supports WoL
10480  * @adapter: the adapter private structure
10481  * @device_id: the device ID
10482  * @subdevice_id: the subsystem device ID
10483  *
10484  * This function is used by probe and ethtool to determine
10485  * which devices have WoL support
10486  *
10487  **/
10488 bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
10489 			 u16 subdevice_id)
10490 {
10491 	struct ixgbe_hw *hw = &adapter->hw;
10492 	u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
10493 
10494 	/* WOL not supported on 82598 */
10495 	if (hw->mac.type == ixgbe_mac_82598EB)
10496 		return false;
10497 
10498 	/* check eeprom to see if WOL is enabled for X540 and newer */
10499 	if (hw->mac.type >= ixgbe_mac_X540) {
10500 		if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
10501 		    ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
10502 		     (hw->bus.func == 0)))
10503 			return true;
10504 	}
10505 
10506 	/* WOL is determined based on device IDs for 82599 MACs */
10507 	switch (device_id) {
10508 	case IXGBE_DEV_ID_82599_SFP:
10509 		/* Only these subdevices could supports WOL */
10510 		switch (subdevice_id) {
10511 		case IXGBE_SUBDEV_ID_82599_560FLR:
10512 		case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
10513 		case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
10514 		case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
10515 			/* only support first port */
10516 			if (hw->bus.func != 0)
10517 				break;
10518 			fallthrough;
10519 		case IXGBE_SUBDEV_ID_82599_SP_560FLR:
10520 		case IXGBE_SUBDEV_ID_82599_SFP:
10521 		case IXGBE_SUBDEV_ID_82599_RNDC:
10522 		case IXGBE_SUBDEV_ID_82599_ECNA_DP:
10523 		case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
10524 		case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
10525 		case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
10526 			return true;
10527 		}
10528 		break;
10529 	case IXGBE_DEV_ID_82599EN_SFP:
10530 		/* Only these subdevices support WOL */
10531 		switch (subdevice_id) {
10532 		case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
10533 			return true;
10534 		}
10535 		break;
10536 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
10537 		/* All except this subdevice support WOL */
10538 		if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
10539 			return true;
10540 		break;
10541 	case IXGBE_DEV_ID_82599_KX4:
10542 		return  true;
10543 	default:
10544 		break;
10545 	}
10546 
10547 	return false;
10548 }
10549 
10550 /**
10551  * ixgbe_set_fw_version - Set FW version
10552  * @adapter: the adapter private structure
10553  *
10554  * This function is used by probe and ethtool to determine the FW version to
10555  * format to display. The FW version is taken from the EEPROM/NVM.
10556  */
10557 static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
10558 {
10559 	struct ixgbe_hw *hw = &adapter->hw;
10560 	struct ixgbe_nvm_version nvm_ver;
10561 
10562 	ixgbe_get_oem_prod_version(hw, &nvm_ver);
10563 	if (nvm_ver.oem_valid) {
10564 		snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10565 			 "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor,
10566 			 nvm_ver.oem_release);
10567 		return;
10568 	}
10569 
10570 	ixgbe_get_etk_id(hw, &nvm_ver);
10571 	ixgbe_get_orom_version(hw, &nvm_ver);
10572 
10573 	if (nvm_ver.or_valid) {
10574 		snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10575 			 "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major,
10576 			 nvm_ver.or_build, nvm_ver.or_patch);
10577 		return;
10578 	}
10579 
10580 	/* Set ETrack ID format */
10581 	snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10582 		 "0x%08x", nvm_ver.etk_id);
10583 }
10584 
10585 /**
10586  * ixgbe_probe - Device Initialization Routine
10587  * @pdev: PCI device information struct
10588  * @ent: entry in ixgbe_pci_tbl
10589  *
10590  * Returns 0 on success, negative on failure
10591  *
10592  * ixgbe_probe initializes an adapter identified by a pci_dev structure.
10593  * The OS initialization, configuring of the adapter private structure,
10594  * and a hardware reset occur.
10595  **/
10596 static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10597 {
10598 	struct net_device *netdev;
10599 	struct ixgbe_adapter *adapter = NULL;
10600 	struct ixgbe_hw *hw;
10601 	const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
10602 	int i, err, pci_using_dac, expected_gts;
10603 	unsigned int indices = MAX_TX_QUEUES;
10604 	u8 part_str[IXGBE_PBANUM_LENGTH];
10605 	bool disable_dev = false;
10606 #ifdef IXGBE_FCOE
10607 	u16 device_caps;
10608 #endif
10609 	u32 eec;
10610 
10611 	/* Catch broken hardware that put the wrong VF device ID in
10612 	 * the PCIe SR-IOV capability.
10613 	 */
10614 	if (pdev->is_virtfn) {
10615 		WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
10616 		     pci_name(pdev), pdev->vendor, pdev->device);
10617 		return -EINVAL;
10618 	}
10619 
10620 	err = pci_enable_device_mem(pdev);
10621 	if (err)
10622 		return err;
10623 
10624 	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
10625 		pci_using_dac = 1;
10626 	} else {
10627 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10628 		if (err) {
10629 			dev_err(&pdev->dev,
10630 				"No usable DMA configuration, aborting\n");
10631 			goto err_dma;
10632 		}
10633 		pci_using_dac = 0;
10634 	}
10635 
10636 	err = pci_request_mem_regions(pdev, ixgbe_driver_name);
10637 	if (err) {
10638 		dev_err(&pdev->dev,
10639 			"pci_request_selected_regions failed 0x%x\n", err);
10640 		goto err_pci_reg;
10641 	}
10642 
10643 	pci_enable_pcie_error_reporting(pdev);
10644 
10645 	pci_set_master(pdev);
10646 	pci_save_state(pdev);
10647 
10648 	if (ii->mac == ixgbe_mac_82598EB) {
10649 #ifdef CONFIG_IXGBE_DCB
10650 		/* 8 TC w/ 4 queues per TC */
10651 		indices = 4 * MAX_TRAFFIC_CLASS;
10652 #else
10653 		indices = IXGBE_MAX_RSS_INDICES;
10654 #endif
10655 	}
10656 
10657 	netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
10658 	if (!netdev) {
10659 		err = -ENOMEM;
10660 		goto err_alloc_etherdev;
10661 	}
10662 
10663 	SET_NETDEV_DEV(netdev, &pdev->dev);
10664 
10665 	adapter = netdev_priv(netdev);
10666 
10667 	adapter->netdev = netdev;
10668 	adapter->pdev = pdev;
10669 	hw = &adapter->hw;
10670 	hw->back = adapter;
10671 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
10672 
10673 	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
10674 			      pci_resource_len(pdev, 0));
10675 	adapter->io_addr = hw->hw_addr;
10676 	if (!hw->hw_addr) {
10677 		err = -EIO;
10678 		goto err_ioremap;
10679 	}
10680 
10681 	netdev->netdev_ops = &ixgbe_netdev_ops;
10682 	ixgbe_set_ethtool_ops(netdev);
10683 	netdev->watchdog_timeo = 5 * HZ;
10684 	strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
10685 
10686 	/* Setup hw api */
10687 	hw->mac.ops   = *ii->mac_ops;
10688 	hw->mac.type  = ii->mac;
10689 	hw->mvals     = ii->mvals;
10690 	if (ii->link_ops)
10691 		hw->link.ops  = *ii->link_ops;
10692 
10693 	/* EEPROM */
10694 	hw->eeprom.ops = *ii->eeprom_ops;
10695 	eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
10696 	if (ixgbe_removed(hw->hw_addr)) {
10697 		err = -EIO;
10698 		goto err_ioremap;
10699 	}
10700 	/* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
10701 	if (!(eec & BIT(8)))
10702 		hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
10703 
10704 	/* PHY */
10705 	hw->phy.ops = *ii->phy_ops;
10706 	hw->phy.sfp_type = ixgbe_sfp_type_unknown;
10707 	/* ixgbe_identify_phy_generic will set prtad and mmds properly */
10708 	hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
10709 	hw->phy.mdio.mmds = 0;
10710 	hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
10711 	hw->phy.mdio.dev = netdev;
10712 	hw->phy.mdio.mdio_read = ixgbe_mdio_read;
10713 	hw->phy.mdio.mdio_write = ixgbe_mdio_write;
10714 
10715 	/* setup the private structure */
10716 	err = ixgbe_sw_init(adapter, ii);
10717 	if (err)
10718 		goto err_sw_init;
10719 
10720 	switch (adapter->hw.mac.type) {
10721 	case ixgbe_mac_X550:
10722 	case ixgbe_mac_X550EM_x:
10723 		netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550;
10724 		break;
10725 	case ixgbe_mac_x550em_a:
10726 		netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550em_a;
10727 		break;
10728 	default:
10729 		break;
10730 	}
10731 
10732 	/* Make sure the SWFW semaphore is in a valid state */
10733 	if (hw->mac.ops.init_swfw_sync)
10734 		hw->mac.ops.init_swfw_sync(hw);
10735 
10736 	/* Make it possible the adapter to be woken up via WOL */
10737 	switch (adapter->hw.mac.type) {
10738 	case ixgbe_mac_82599EB:
10739 	case ixgbe_mac_X540:
10740 	case ixgbe_mac_X550:
10741 	case ixgbe_mac_X550EM_x:
10742 	case ixgbe_mac_x550em_a:
10743 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
10744 		break;
10745 	default:
10746 		break;
10747 	}
10748 
10749 	/*
10750 	 * If there is a fan on this device and it has failed log the
10751 	 * failure.
10752 	 */
10753 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
10754 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
10755 		if (esdp & IXGBE_ESDP_SDP1)
10756 			e_crit(probe, "Fan has stopped, replace the adapter\n");
10757 	}
10758 
10759 	if (allow_unsupported_sfp)
10760 		hw->allow_unsupported_sfp = allow_unsupported_sfp;
10761 
10762 	/* reset_hw fills in the perm_addr as well */
10763 	hw->phy.reset_if_overtemp = true;
10764 	err = hw->mac.ops.reset_hw(hw);
10765 	hw->phy.reset_if_overtemp = false;
10766 	ixgbe_set_eee_capable(adapter);
10767 	if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
10768 		err = 0;
10769 	} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
10770 		e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
10771 		e_dev_err("Reload the driver after installing a supported module.\n");
10772 		goto err_sw_init;
10773 	} else if (err) {
10774 		e_dev_err("HW Init failed: %d\n", err);
10775 		goto err_sw_init;
10776 	}
10777 
10778 #ifdef CONFIG_PCI_IOV
10779 	/* SR-IOV not supported on the 82598 */
10780 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
10781 		goto skip_sriov;
10782 	/* Mailbox */
10783 	ixgbe_init_mbx_params_pf(hw);
10784 	hw->mbx.ops = ii->mbx_ops;
10785 	pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
10786 	ixgbe_enable_sriov(adapter, max_vfs);
10787 skip_sriov:
10788 
10789 #endif
10790 	netdev->features = NETIF_F_SG |
10791 			   NETIF_F_TSO |
10792 			   NETIF_F_TSO6 |
10793 			   NETIF_F_RXHASH |
10794 			   NETIF_F_RXCSUM |
10795 			   NETIF_F_HW_CSUM;
10796 
10797 #define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
10798 				    NETIF_F_GSO_GRE_CSUM | \
10799 				    NETIF_F_GSO_IPXIP4 | \
10800 				    NETIF_F_GSO_IPXIP6 | \
10801 				    NETIF_F_GSO_UDP_TUNNEL | \
10802 				    NETIF_F_GSO_UDP_TUNNEL_CSUM)
10803 
10804 	netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
10805 	netdev->features |= NETIF_F_GSO_PARTIAL |
10806 			    IXGBE_GSO_PARTIAL_FEATURES;
10807 
10808 	if (hw->mac.type >= ixgbe_mac_82599EB)
10809 		netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
10810 
10811 #ifdef CONFIG_IXGBE_IPSEC
10812 #define IXGBE_ESP_FEATURES	(NETIF_F_HW_ESP | \
10813 				 NETIF_F_HW_ESP_TX_CSUM | \
10814 				 NETIF_F_GSO_ESP)
10815 
10816 	if (adapter->ipsec)
10817 		netdev->features |= IXGBE_ESP_FEATURES;
10818 #endif
10819 	/* copy netdev features into list of user selectable features */
10820 	netdev->hw_features |= netdev->features |
10821 			       NETIF_F_HW_VLAN_CTAG_FILTER |
10822 			       NETIF_F_HW_VLAN_CTAG_RX |
10823 			       NETIF_F_HW_VLAN_CTAG_TX |
10824 			       NETIF_F_RXALL |
10825 			       NETIF_F_HW_L2FW_DOFFLOAD;
10826 
10827 	if (hw->mac.type >= ixgbe_mac_82599EB)
10828 		netdev->hw_features |= NETIF_F_NTUPLE |
10829 				       NETIF_F_HW_TC;
10830 
10831 	if (pci_using_dac)
10832 		netdev->features |= NETIF_F_HIGHDMA;
10833 
10834 	netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
10835 	netdev->hw_enc_features |= netdev->vlan_features;
10836 	netdev->mpls_features |= NETIF_F_SG |
10837 				 NETIF_F_TSO |
10838 				 NETIF_F_TSO6 |
10839 				 NETIF_F_HW_CSUM;
10840 	netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES;
10841 
10842 	/* set this bit last since it cannot be part of vlan_features */
10843 	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
10844 			    NETIF_F_HW_VLAN_CTAG_RX |
10845 			    NETIF_F_HW_VLAN_CTAG_TX;
10846 
10847 	netdev->priv_flags |= IFF_UNICAST_FLT;
10848 	netdev->priv_flags |= IFF_SUPP_NOFCS;
10849 
10850 	/* MTU range: 68 - 9710 */
10851 	netdev->min_mtu = ETH_MIN_MTU;
10852 	netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
10853 
10854 #ifdef CONFIG_IXGBE_DCB
10855 	if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
10856 		netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
10857 #endif
10858 
10859 #ifdef IXGBE_FCOE
10860 	if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
10861 		unsigned int fcoe_l;
10862 
10863 		if (hw->mac.ops.get_device_caps) {
10864 			hw->mac.ops.get_device_caps(hw, &device_caps);
10865 			if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
10866 				adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
10867 		}
10868 
10869 
10870 		fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
10871 		adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
10872 
10873 		netdev->features |= NETIF_F_FSO |
10874 				    NETIF_F_FCOE_CRC;
10875 
10876 		netdev->vlan_features |= NETIF_F_FSO |
10877 					 NETIF_F_FCOE_CRC |
10878 					 NETIF_F_FCOE_MTU;
10879 	}
10880 #endif /* IXGBE_FCOE */
10881 	if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
10882 		netdev->hw_features |= NETIF_F_LRO;
10883 	if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
10884 		netdev->features |= NETIF_F_LRO;
10885 
10886 	if (ixgbe_check_fw_error(adapter)) {
10887 		err = -EIO;
10888 		goto err_sw_init;
10889 	}
10890 
10891 	/* make sure the EEPROM is good */
10892 	if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
10893 		e_dev_err("The EEPROM Checksum Is Not Valid\n");
10894 		err = -EIO;
10895 		goto err_sw_init;
10896 	}
10897 
10898 	eth_platform_get_mac_address(&adapter->pdev->dev,
10899 				     adapter->hw.mac.perm_addr);
10900 
10901 	memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
10902 
10903 	if (!is_valid_ether_addr(netdev->dev_addr)) {
10904 		e_dev_err("invalid MAC address\n");
10905 		err = -EIO;
10906 		goto err_sw_init;
10907 	}
10908 
10909 	/* Set hw->mac.addr to permanent MAC address */
10910 	ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
10911 	ixgbe_mac_set_default_filter(adapter);
10912 
10913 	timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
10914 
10915 	if (ixgbe_removed(hw->hw_addr)) {
10916 		err = -EIO;
10917 		goto err_sw_init;
10918 	}
10919 	INIT_WORK(&adapter->service_task, ixgbe_service_task);
10920 	set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
10921 	clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
10922 
10923 	err = ixgbe_init_interrupt_scheme(adapter);
10924 	if (err)
10925 		goto err_sw_init;
10926 
10927 	for (i = 0; i < adapter->num_rx_queues; i++)
10928 		u64_stats_init(&adapter->rx_ring[i]->syncp);
10929 	for (i = 0; i < adapter->num_tx_queues; i++)
10930 		u64_stats_init(&adapter->tx_ring[i]->syncp);
10931 	for (i = 0; i < adapter->num_xdp_queues; i++)
10932 		u64_stats_init(&adapter->xdp_ring[i]->syncp);
10933 
10934 	/* WOL not supported for all devices */
10935 	adapter->wol = 0;
10936 	hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
10937 	hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
10938 						pdev->subsystem_device);
10939 	if (hw->wol_enabled)
10940 		adapter->wol = IXGBE_WUFC_MAG;
10941 
10942 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
10943 
10944 	/* save off EEPROM version number */
10945 	ixgbe_set_fw_version(adapter);
10946 
10947 	/* pick up the PCI bus settings for reporting later */
10948 	if (ixgbe_pcie_from_parent(hw))
10949 		ixgbe_get_parent_bus_info(adapter);
10950 	else
10951 		 hw->mac.ops.get_bus_info(hw);
10952 
10953 	/* calculate the expected PCIe bandwidth required for optimal
10954 	 * performance. Note that some older parts will never have enough
10955 	 * bandwidth due to being older generation PCIe parts. We clamp these
10956 	 * parts to ensure no warning is displayed if it can't be fixed.
10957 	 */
10958 	switch (hw->mac.type) {
10959 	case ixgbe_mac_82598EB:
10960 		expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
10961 		break;
10962 	default:
10963 		expected_gts = ixgbe_enumerate_functions(adapter) * 10;
10964 		break;
10965 	}
10966 
10967 	/* don't check link if we failed to enumerate functions */
10968 	if (expected_gts > 0)
10969 		ixgbe_check_minimum_link(adapter, expected_gts);
10970 
10971 	err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
10972 	if (err)
10973 		strlcpy(part_str, "Unknown", sizeof(part_str));
10974 	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
10975 		e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
10976 			   hw->mac.type, hw->phy.type, hw->phy.sfp_type,
10977 			   part_str);
10978 	else
10979 		e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
10980 			   hw->mac.type, hw->phy.type, part_str);
10981 
10982 	e_dev_info("%pM\n", netdev->dev_addr);
10983 
10984 	/* reset the hardware with the new settings */
10985 	err = hw->mac.ops.start_hw(hw);
10986 	if (err == IXGBE_ERR_EEPROM_VERSION) {
10987 		/* We are running on a pre-production device, log a warning */
10988 		e_dev_warn("This device is a pre-production adapter/LOM. "
10989 			   "Please be aware there may be issues associated "
10990 			   "with your hardware.  If you are experiencing "
10991 			   "problems please contact your Intel or hardware "
10992 			   "representative who provided you with this "
10993 			   "hardware.\n");
10994 	}
10995 	strcpy(netdev->name, "eth%d");
10996 	pci_set_drvdata(pdev, adapter);
10997 	err = register_netdev(netdev);
10998 	if (err)
10999 		goto err_register;
11000 
11001 
11002 	/* power down the optics for 82599 SFP+ fiber */
11003 	if (hw->mac.ops.disable_tx_laser)
11004 		hw->mac.ops.disable_tx_laser(hw);
11005 
11006 	/* carrier off reporting is important to ethtool even BEFORE open */
11007 	netif_carrier_off(netdev);
11008 
11009 #ifdef CONFIG_IXGBE_DCA
11010 	if (dca_add_requester(&pdev->dev) == 0) {
11011 		adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
11012 		ixgbe_setup_dca(adapter);
11013 	}
11014 #endif
11015 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
11016 		e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
11017 		for (i = 0; i < adapter->num_vfs; i++)
11018 			ixgbe_vf_configuration(pdev, (i | 0x10000000));
11019 	}
11020 
11021 	/* firmware requires driver version to be 0xFFFFFFFF
11022 	 * since os does not support feature
11023 	 */
11024 	if (hw->mac.ops.set_fw_drv_ver)
11025 		hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
11026 					   sizeof(UTS_RELEASE) - 1,
11027 					   UTS_RELEASE);
11028 
11029 	/* add san mac addr to netdev */
11030 	ixgbe_add_sanmac_netdev(netdev);
11031 
11032 	e_dev_info("%s\n", ixgbe_default_device_descr);
11033 
11034 #ifdef CONFIG_IXGBE_HWMON
11035 	if (ixgbe_sysfs_init(adapter))
11036 		e_err(probe, "failed to allocate sysfs resources\n");
11037 #endif /* CONFIG_IXGBE_HWMON */
11038 
11039 	ixgbe_dbg_adapter_init(adapter);
11040 
11041 	/* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */
11042 	if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
11043 		hw->mac.ops.setup_link(hw,
11044 			IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
11045 			true);
11046 
11047 	err = ixgbe_mii_bus_init(hw);
11048 	if (err)
11049 		goto err_netdev;
11050 
11051 	return 0;
11052 
11053 err_netdev:
11054 	unregister_netdev(netdev);
11055 err_register:
11056 	ixgbe_release_hw_control(adapter);
11057 	ixgbe_clear_interrupt_scheme(adapter);
11058 err_sw_init:
11059 	ixgbe_disable_sriov(adapter);
11060 	adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
11061 	iounmap(adapter->io_addr);
11062 	kfree(adapter->jump_tables[0]);
11063 	kfree(adapter->mac_table);
11064 	kfree(adapter->rss_key);
11065 	bitmap_free(adapter->af_xdp_zc_qps);
11066 err_ioremap:
11067 	disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
11068 	free_netdev(netdev);
11069 err_alloc_etherdev:
11070 	pci_release_mem_regions(pdev);
11071 err_pci_reg:
11072 err_dma:
11073 	if (!adapter || disable_dev)
11074 		pci_disable_device(pdev);
11075 	return err;
11076 }
11077 
11078 /**
11079  * ixgbe_remove - Device Removal Routine
11080  * @pdev: PCI device information struct
11081  *
11082  * ixgbe_remove is called by the PCI subsystem to alert the driver
11083  * that it should release a PCI device.  The could be caused by a
11084  * Hot-Plug event, or because the driver is going to be removed from
11085  * memory.
11086  **/
11087 static void ixgbe_remove(struct pci_dev *pdev)
11088 {
11089 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11090 	struct net_device *netdev;
11091 	bool disable_dev;
11092 	int i;
11093 
11094 	/* if !adapter then we already cleaned up in probe */
11095 	if (!adapter)
11096 		return;
11097 
11098 	netdev  = adapter->netdev;
11099 	ixgbe_dbg_adapter_exit(adapter);
11100 
11101 	set_bit(__IXGBE_REMOVING, &adapter->state);
11102 	cancel_work_sync(&adapter->service_task);
11103 
11104 	if (adapter->mii_bus)
11105 		mdiobus_unregister(adapter->mii_bus);
11106 
11107 #ifdef CONFIG_IXGBE_DCA
11108 	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
11109 		adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
11110 		dca_remove_requester(&pdev->dev);
11111 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
11112 				IXGBE_DCA_CTRL_DCA_DISABLE);
11113 	}
11114 
11115 #endif
11116 #ifdef CONFIG_IXGBE_HWMON
11117 	ixgbe_sysfs_exit(adapter);
11118 #endif /* CONFIG_IXGBE_HWMON */
11119 
11120 	/* remove the added san mac */
11121 	ixgbe_del_sanmac_netdev(netdev);
11122 
11123 #ifdef CONFIG_PCI_IOV
11124 	ixgbe_disable_sriov(adapter);
11125 #endif
11126 	if (netdev->reg_state == NETREG_REGISTERED)
11127 		unregister_netdev(netdev);
11128 
11129 	ixgbe_stop_ipsec_offload(adapter);
11130 	ixgbe_clear_interrupt_scheme(adapter);
11131 
11132 	ixgbe_release_hw_control(adapter);
11133 
11134 #ifdef CONFIG_DCB
11135 	kfree(adapter->ixgbe_ieee_pfc);
11136 	kfree(adapter->ixgbe_ieee_ets);
11137 
11138 #endif
11139 	iounmap(adapter->io_addr);
11140 	pci_release_mem_regions(pdev);
11141 
11142 	e_dev_info("complete\n");
11143 
11144 	for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
11145 		if (adapter->jump_tables[i]) {
11146 			kfree(adapter->jump_tables[i]->input);
11147 			kfree(adapter->jump_tables[i]->mask);
11148 		}
11149 		kfree(adapter->jump_tables[i]);
11150 	}
11151 
11152 	kfree(adapter->mac_table);
11153 	kfree(adapter->rss_key);
11154 	bitmap_free(adapter->af_xdp_zc_qps);
11155 	disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
11156 	free_netdev(netdev);
11157 
11158 	pci_disable_pcie_error_reporting(pdev);
11159 
11160 	if (disable_dev)
11161 		pci_disable_device(pdev);
11162 }
11163 
11164 /**
11165  * ixgbe_io_error_detected - called when PCI error is detected
11166  * @pdev: Pointer to PCI device
11167  * @state: The current pci connection state
11168  *
11169  * This function is called after a PCI bus error affecting
11170  * this device has been detected.
11171  */
11172 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
11173 						pci_channel_state_t state)
11174 {
11175 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11176 	struct net_device *netdev = adapter->netdev;
11177 
11178 #ifdef CONFIG_PCI_IOV
11179 	struct ixgbe_hw *hw = &adapter->hw;
11180 	struct pci_dev *bdev, *vfdev;
11181 	u32 dw0, dw1, dw2, dw3;
11182 	int vf, pos;
11183 	u16 req_id, pf_func;
11184 
11185 	if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
11186 	    adapter->num_vfs == 0)
11187 		goto skip_bad_vf_detection;
11188 
11189 	bdev = pdev->bus->self;
11190 	while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
11191 		bdev = bdev->bus->self;
11192 
11193 	if (!bdev)
11194 		goto skip_bad_vf_detection;
11195 
11196 	pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
11197 	if (!pos)
11198 		goto skip_bad_vf_detection;
11199 
11200 	dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
11201 	dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
11202 	dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
11203 	dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
11204 	if (ixgbe_removed(hw->hw_addr))
11205 		goto skip_bad_vf_detection;
11206 
11207 	req_id = dw1 >> 16;
11208 	/* On the 82599 if bit 7 of the requestor ID is set then it's a VF */
11209 	if (!(req_id & 0x0080))
11210 		goto skip_bad_vf_detection;
11211 
11212 	pf_func = req_id & 0x01;
11213 	if ((pf_func & 1) == (pdev->devfn & 1)) {
11214 		unsigned int device_id;
11215 
11216 		vf = (req_id & 0x7F) >> 1;
11217 		e_dev_err("VF %d has caused a PCIe error\n", vf);
11218 		e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
11219 				"%8.8x\tdw3: %8.8x\n",
11220 		dw0, dw1, dw2, dw3);
11221 		switch (adapter->hw.mac.type) {
11222 		case ixgbe_mac_82599EB:
11223 			device_id = IXGBE_82599_VF_DEVICE_ID;
11224 			break;
11225 		case ixgbe_mac_X540:
11226 			device_id = IXGBE_X540_VF_DEVICE_ID;
11227 			break;
11228 		case ixgbe_mac_X550:
11229 			device_id = IXGBE_DEV_ID_X550_VF;
11230 			break;
11231 		case ixgbe_mac_X550EM_x:
11232 			device_id = IXGBE_DEV_ID_X550EM_X_VF;
11233 			break;
11234 		case ixgbe_mac_x550em_a:
11235 			device_id = IXGBE_DEV_ID_X550EM_A_VF;
11236 			break;
11237 		default:
11238 			device_id = 0;
11239 			break;
11240 		}
11241 
11242 		/* Find the pci device of the offending VF */
11243 		vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
11244 		while (vfdev) {
11245 			if (vfdev->devfn == (req_id & 0xFF))
11246 				break;
11247 			vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
11248 					       device_id, vfdev);
11249 		}
11250 		/*
11251 		 * There's a slim chance the VF could have been hot plugged,
11252 		 * so if it is no longer present we don't need to issue the
11253 		 * VFLR.  Just clean up the AER in that case.
11254 		 */
11255 		if (vfdev) {
11256 			pcie_flr(vfdev);
11257 			/* Free device reference count */
11258 			pci_dev_put(vfdev);
11259 		}
11260 	}
11261 
11262 	/*
11263 	 * Even though the error may have occurred on the other port
11264 	 * we still need to increment the vf error reference count for
11265 	 * both ports because the I/O resume function will be called
11266 	 * for both of them.
11267 	 */
11268 	adapter->vferr_refcount++;
11269 
11270 	return PCI_ERS_RESULT_RECOVERED;
11271 
11272 skip_bad_vf_detection:
11273 #endif /* CONFIG_PCI_IOV */
11274 	if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
11275 		return PCI_ERS_RESULT_DISCONNECT;
11276 
11277 	if (!netif_device_present(netdev))
11278 		return PCI_ERS_RESULT_DISCONNECT;
11279 
11280 	rtnl_lock();
11281 	netif_device_detach(netdev);
11282 
11283 	if (netif_running(netdev))
11284 		ixgbe_close_suspend(adapter);
11285 
11286 	if (state == pci_channel_io_perm_failure) {
11287 		rtnl_unlock();
11288 		return PCI_ERS_RESULT_DISCONNECT;
11289 	}
11290 
11291 	if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
11292 		pci_disable_device(pdev);
11293 	rtnl_unlock();
11294 
11295 	/* Request a slot reset. */
11296 	return PCI_ERS_RESULT_NEED_RESET;
11297 }
11298 
11299 /**
11300  * ixgbe_io_slot_reset - called after the pci bus has been reset.
11301  * @pdev: Pointer to PCI device
11302  *
11303  * Restart the card from scratch, as if from a cold-boot.
11304  */
11305 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
11306 {
11307 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11308 	pci_ers_result_t result;
11309 
11310 	if (pci_enable_device_mem(pdev)) {
11311 		e_err(probe, "Cannot re-enable PCI device after reset.\n");
11312 		result = PCI_ERS_RESULT_DISCONNECT;
11313 	} else {
11314 		smp_mb__before_atomic();
11315 		clear_bit(__IXGBE_DISABLED, &adapter->state);
11316 		adapter->hw.hw_addr = adapter->io_addr;
11317 		pci_set_master(pdev);
11318 		pci_restore_state(pdev);
11319 		pci_save_state(pdev);
11320 
11321 		pci_wake_from_d3(pdev, false);
11322 
11323 		ixgbe_reset(adapter);
11324 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
11325 		result = PCI_ERS_RESULT_RECOVERED;
11326 	}
11327 
11328 	return result;
11329 }
11330 
11331 /**
11332  * ixgbe_io_resume - called when traffic can start flowing again.
11333  * @pdev: Pointer to PCI device
11334  *
11335  * This callback is called when the error recovery driver tells us that
11336  * its OK to resume normal operation.
11337  */
11338 static void ixgbe_io_resume(struct pci_dev *pdev)
11339 {
11340 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11341 	struct net_device *netdev = adapter->netdev;
11342 
11343 #ifdef CONFIG_PCI_IOV
11344 	if (adapter->vferr_refcount) {
11345 		e_info(drv, "Resuming after VF err\n");
11346 		adapter->vferr_refcount--;
11347 		return;
11348 	}
11349 
11350 #endif
11351 	rtnl_lock();
11352 	if (netif_running(netdev))
11353 		ixgbe_open(netdev);
11354 
11355 	netif_device_attach(netdev);
11356 	rtnl_unlock();
11357 }
11358 
11359 static const struct pci_error_handlers ixgbe_err_handler = {
11360 	.error_detected = ixgbe_io_error_detected,
11361 	.slot_reset = ixgbe_io_slot_reset,
11362 	.resume = ixgbe_io_resume,
11363 };
11364 
11365 static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume);
11366 
11367 static struct pci_driver ixgbe_driver = {
11368 	.name      = ixgbe_driver_name,
11369 	.id_table  = ixgbe_pci_tbl,
11370 	.probe     = ixgbe_probe,
11371 	.remove    = ixgbe_remove,
11372 	.driver.pm = &ixgbe_pm_ops,
11373 	.shutdown  = ixgbe_shutdown,
11374 	.sriov_configure = ixgbe_pci_sriov_configure,
11375 	.err_handler = &ixgbe_err_handler
11376 };
11377 
11378 /**
11379  * ixgbe_init_module - Driver Registration Routine
11380  *
11381  * ixgbe_init_module is the first routine called when the driver is
11382  * loaded. All it does is register with the PCI subsystem.
11383  **/
11384 static int __init ixgbe_init_module(void)
11385 {
11386 	int ret;
11387 	pr_info("%s\n", ixgbe_driver_string);
11388 	pr_info("%s\n", ixgbe_copyright);
11389 
11390 	ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
11391 	if (!ixgbe_wq) {
11392 		pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name);
11393 		return -ENOMEM;
11394 	}
11395 
11396 	ixgbe_dbg_init();
11397 
11398 	ret = pci_register_driver(&ixgbe_driver);
11399 	if (ret) {
11400 		destroy_workqueue(ixgbe_wq);
11401 		ixgbe_dbg_exit();
11402 		return ret;
11403 	}
11404 
11405 #ifdef CONFIG_IXGBE_DCA
11406 	dca_register_notify(&dca_notifier);
11407 #endif
11408 
11409 	return 0;
11410 }
11411 
11412 module_init(ixgbe_init_module);
11413 
11414 /**
11415  * ixgbe_exit_module - Driver Exit Cleanup Routine
11416  *
11417  * ixgbe_exit_module is called just before the driver is removed
11418  * from memory.
11419  **/
11420 static void __exit ixgbe_exit_module(void)
11421 {
11422 #ifdef CONFIG_IXGBE_DCA
11423 	dca_unregister_notify(&dca_notifier);
11424 #endif
11425 	pci_unregister_driver(&ixgbe_driver);
11426 
11427 	ixgbe_dbg_exit();
11428 	if (ixgbe_wq) {
11429 		destroy_workqueue(ixgbe_wq);
11430 		ixgbe_wq = NULL;
11431 	}
11432 }
11433 
11434 #ifdef CONFIG_IXGBE_DCA
11435 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
11436 			    void *p)
11437 {
11438 	int ret_val;
11439 
11440 	ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
11441 					 __ixgbe_notify_dca);
11442 
11443 	return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
11444 }
11445 
11446 #endif /* CONFIG_IXGBE_DCA */
11447 
11448 module_exit(ixgbe_exit_module);
11449 
11450 /* ixgbe_main.c */
11451