1d89f8841SSasha Neftin // SPDX-License-Identifier: GPL-2.0
2d89f8841SSasha Neftin /* Copyright (c)  2018 Intel Corporation */
3d89f8841SSasha Neftin 
4d89f8841SSasha Neftin #include <linux/module.h>
5d89f8841SSasha Neftin #include <linux/types.h>
6c9a11c23SSasha Neftin #include <linux/if_vlan.h>
7c9a11c23SSasha Neftin #include <linux/aer.h>
8d3ae3cfbSSasha Neftin #include <linux/tcp.h>
9d3ae3cfbSSasha Neftin #include <linux/udp.h>
10d3ae3cfbSSasha Neftin #include <linux/ip.h>
119513d2a5SSasha Neftin #include <linux/pm_runtime.h>
12ec50a9d4SVinicius Costa Gomes #include <net/pkt_sched.h>
1326575105SAndre Guedes #include <linux/bpf_trace.h>
14fc9df2a0SAndre Guedes #include <net/xdp_sock_drv.h>
15d3ae3cfbSSasha Neftin #include <net/ipv6.h>
16d89f8841SSasha Neftin 
17d89f8841SSasha Neftin #include "igc.h"
18d89f8841SSasha Neftin #include "igc_hw.h"
19ec50a9d4SVinicius Costa Gomes #include "igc_tsn.h"
2026575105SAndre Guedes #include "igc_xdp.h"
21d89f8841SSasha Neftin 
22d89f8841SSasha Neftin #define DRV_SUMMARY	"Intel(R) 2.5G Ethernet Linux Driver"
23d89f8841SSasha Neftin 
248c5ad0daSSasha Neftin #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
258c5ad0daSSasha Neftin 
2626575105SAndre Guedes #define IGC_XDP_PASS		0
2726575105SAndre Guedes #define IGC_XDP_CONSUMED	BIT(0)
2873f1071cSAndre Guedes #define IGC_XDP_TX		BIT(1)
294ff32036SAndre Guedes #define IGC_XDP_REDIRECT	BIT(2)
3026575105SAndre Guedes 
31c9a11c23SSasha Neftin static int debug = -1;
32c9a11c23SSasha Neftin 
33d89f8841SSasha Neftin MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
34d89f8841SSasha Neftin MODULE_DESCRIPTION(DRV_SUMMARY);
35d89f8841SSasha Neftin MODULE_LICENSE("GPL v2");
36c9a11c23SSasha Neftin module_param(debug, int, 0);
37c9a11c23SSasha Neftin MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
38d89f8841SSasha Neftin 
39d89f8841SSasha Neftin char igc_driver_name[] = "igc";
40d89f8841SSasha Neftin static const char igc_driver_string[] = DRV_SUMMARY;
41d89f8841SSasha Neftin static const char igc_copyright[] =
42d89f8841SSasha Neftin 	"Copyright(c) 2018 Intel Corporation.";
43d89f8841SSasha Neftin 
44ab405612SSasha Neftin static const struct igc_info *igc_info_tbl[] = {
45ab405612SSasha Neftin 	[board_base] = &igc_base_info,
46ab405612SSasha Neftin };
47ab405612SSasha Neftin 
48d89f8841SSasha Neftin static const struct pci_device_id igc_pci_tbl[] = {
49ab405612SSasha Neftin 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
50ab405612SSasha Neftin 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
516d37a382SSasha Neftin 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
526d37a382SSasha Neftin 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
536d37a382SSasha Neftin 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
54c2a3f8feSSasha Neftin 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
55bfa5e98cSSasha Neftin 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base },
56c2a3f8feSSasha Neftin 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
57c2a3f8feSSasha Neftin 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
5843546211SSasha Neftin 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
5943546211SSasha Neftin 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
6043546211SSasha Neftin 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base },
6143546211SSasha Neftin 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base },
6243546211SSasha Neftin 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base },
630e7d4b93SSasha Neftin 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
64d89f8841SSasha Neftin 	/* required last entry */
65d89f8841SSasha Neftin 	{0, }
66d89f8841SSasha Neftin };
67d89f8841SSasha Neftin 
68d89f8841SSasha Neftin MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
69d89f8841SSasha Neftin 
703df25e4cSSasha Neftin enum latency_range {
713df25e4cSSasha Neftin 	lowest_latency = 0,
723df25e4cSSasha Neftin 	low_latency = 1,
733df25e4cSSasha Neftin 	bulk_latency = 2,
743df25e4cSSasha Neftin 	latency_invalid = 255
753df25e4cSSasha Neftin };
76c9a11c23SSasha Neftin 
778c5ad0daSSasha Neftin void igc_reset(struct igc_adapter *adapter)
78c9a11c23SSasha Neftin {
7925f06effSAndre Guedes 	struct net_device *dev = adapter->netdev;
80c0071c7aSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
810373ad4dSSasha Neftin 	struct igc_fc_info *fc = &hw->fc;
820373ad4dSSasha Neftin 	u32 pba, hwm;
830373ad4dSSasha Neftin 
840373ad4dSSasha Neftin 	/* Repartition PBA for greater than 9k MTU if required */
850373ad4dSSasha Neftin 	pba = IGC_PBA_34K;
860373ad4dSSasha Neftin 
870373ad4dSSasha Neftin 	/* flow control settings
880373ad4dSSasha Neftin 	 * The high water mark must be low enough to fit one full frame
890373ad4dSSasha Neftin 	 * after transmitting the pause frame.  As such we must have enough
900373ad4dSSasha Neftin 	 * space to allow for us to complete our current transmit and then
910373ad4dSSasha Neftin 	 * receive the frame that is in progress from the link partner.
920373ad4dSSasha Neftin 	 * Set it to:
930373ad4dSSasha Neftin 	 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
940373ad4dSSasha Neftin 	 */
950373ad4dSSasha Neftin 	hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
960373ad4dSSasha Neftin 
970373ad4dSSasha Neftin 	fc->high_water = hwm & 0xFFFFFFF0;	/* 16-byte granularity */
980373ad4dSSasha Neftin 	fc->low_water = fc->high_water - 16;
990373ad4dSSasha Neftin 	fc->pause_time = 0xFFFF;
1000373ad4dSSasha Neftin 	fc->send_xon = 1;
1010373ad4dSSasha Neftin 	fc->current_mode = fc->requested_mode;
102c0071c7aSSasha Neftin 
103c0071c7aSSasha Neftin 	hw->mac.ops.reset_hw(hw);
104c0071c7aSSasha Neftin 
105c0071c7aSSasha Neftin 	if (hw->mac.ops.init_hw(hw))
10625f06effSAndre Guedes 		netdev_err(dev, "Error on hardware initialization\n");
107c0071c7aSSasha Neftin 
10893ec439aSSasha Neftin 	/* Re-establish EEE setting */
10993ec439aSSasha Neftin 	igc_set_eee_i225(hw, true, true, true);
11093ec439aSSasha Neftin 
111c9a11c23SSasha Neftin 	if (!netif_running(adapter->netdev))
112a0beb3c1SSasha Neftin 		igc_power_down_phy_copper_base(&adapter->hw);
1135586838fSSasha Neftin 
1148d744963SMuhammad Husaini Zulkifli 	/* Enable HW to recognize an 802.1Q VLAN Ethernet packet */
1158d744963SMuhammad Husaini Zulkifli 	wr32(IGC_VET, ETH_P_8021Q);
1168d744963SMuhammad Husaini Zulkifli 
1175f295805SVinicius Costa Gomes 	/* Re-enable PTP, where applicable. */
1185f295805SVinicius Costa Gomes 	igc_ptp_reset(adapter);
1195f295805SVinicius Costa Gomes 
120ec50a9d4SVinicius Costa Gomes 	/* Re-enable TSN offloading, where applicable. */
121ec50a9d4SVinicius Costa Gomes 	igc_tsn_offload_apply(adapter);
122ec50a9d4SVinicius Costa Gomes 
1235586838fSSasha Neftin 	igc_get_phy_info(hw);
124c9a11c23SSasha Neftin }
125c9a11c23SSasha Neftin 
126c9a11c23SSasha Neftin /**
127684ea87cSSasha Neftin  * igc_power_up_link - Power up the phy link
128c9a11c23SSasha Neftin  * @adapter: address of board private structure
129c9a11c23SSasha Neftin  */
130c9a11c23SSasha Neftin static void igc_power_up_link(struct igc_adapter *adapter)
131c9a11c23SSasha Neftin {
1325586838fSSasha Neftin 	igc_reset_phy(&adapter->hw);
1335586838fSSasha Neftin 
1345586838fSSasha Neftin 	igc_power_up_phy_copper(&adapter->hw);
1355586838fSSasha Neftin 
1365586838fSSasha Neftin 	igc_setup_link(&adapter->hw);
137c9a11c23SSasha Neftin }
138c9a11c23SSasha Neftin 
139c9a11c23SSasha Neftin /**
140c9a11c23SSasha Neftin  * igc_release_hw_control - release control of the h/w to f/w
141c9a11c23SSasha Neftin  * @adapter: address of board private structure
142c9a11c23SSasha Neftin  *
143c9a11c23SSasha Neftin  * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
144c9a11c23SSasha Neftin  * For ASF and Pass Through versions of f/w this means that the
145c9a11c23SSasha Neftin  * driver is no longer loaded.
146c9a11c23SSasha Neftin  */
147c9a11c23SSasha Neftin static void igc_release_hw_control(struct igc_adapter *adapter)
148c9a11c23SSasha Neftin {
149c9a11c23SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
150c9a11c23SSasha Neftin 	u32 ctrl_ext;
151c9a11c23SSasha Neftin 
152c9a11c23SSasha Neftin 	/* Let firmware take over control of h/w */
153c9a11c23SSasha Neftin 	ctrl_ext = rd32(IGC_CTRL_EXT);
154c9a11c23SSasha Neftin 	wr32(IGC_CTRL_EXT,
155c9a11c23SSasha Neftin 	     ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
156c9a11c23SSasha Neftin }
157c9a11c23SSasha Neftin 
158c9a11c23SSasha Neftin /**
159c9a11c23SSasha Neftin  * igc_get_hw_control - get control of the h/w from f/w
160c9a11c23SSasha Neftin  * @adapter: address of board private structure
161c9a11c23SSasha Neftin  *
162c9a11c23SSasha Neftin  * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
163c9a11c23SSasha Neftin  * For ASF and Pass Through versions of f/w this means that
164c9a11c23SSasha Neftin  * the driver is loaded.
165c9a11c23SSasha Neftin  */
166c9a11c23SSasha Neftin static void igc_get_hw_control(struct igc_adapter *adapter)
167c9a11c23SSasha Neftin {
168c9a11c23SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
169c9a11c23SSasha Neftin 	u32 ctrl_ext;
170c9a11c23SSasha Neftin 
171c9a11c23SSasha Neftin 	/* Let firmware know the driver has taken over */
172c9a11c23SSasha Neftin 	ctrl_ext = rd32(IGC_CTRL_EXT);
173c9a11c23SSasha Neftin 	wr32(IGC_CTRL_EXT,
174c9a11c23SSasha Neftin 	     ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
175c9a11c23SSasha Neftin }
176c9a11c23SSasha Neftin 
17761234295SAndre Guedes static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf)
17861234295SAndre Guedes {
17961234295SAndre Guedes 	dma_unmap_single(dev, dma_unmap_addr(buf, dma),
18061234295SAndre Guedes 			 dma_unmap_len(buf, len), DMA_TO_DEVICE);
18161234295SAndre Guedes 
18261234295SAndre Guedes 	dma_unmap_len_set(buf, len, 0);
18361234295SAndre Guedes }
18461234295SAndre Guedes 
185c9a11c23SSasha Neftin /**
18613b5b7fdSSasha Neftin  * igc_clean_tx_ring - Free Tx Buffers
18713b5b7fdSSasha Neftin  * @tx_ring: ring to be cleaned
18813b5b7fdSSasha Neftin  */
18913b5b7fdSSasha Neftin static void igc_clean_tx_ring(struct igc_ring *tx_ring)
19013b5b7fdSSasha Neftin {
19113b5b7fdSSasha Neftin 	u16 i = tx_ring->next_to_clean;
19213b5b7fdSSasha Neftin 	struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
1939acf59a7SAndre Guedes 	u32 xsk_frames = 0;
19413b5b7fdSSasha Neftin 
19513b5b7fdSSasha Neftin 	while (i != tx_ring->next_to_use) {
19613b5b7fdSSasha Neftin 		union igc_adv_tx_desc *eop_desc, *tx_desc;
19713b5b7fdSSasha Neftin 
198859b4dfaSAndre Guedes 		switch (tx_buffer->type) {
1999acf59a7SAndre Guedes 		case IGC_TX_BUFFER_TYPE_XSK:
2009acf59a7SAndre Guedes 			xsk_frames++;
2019acf59a7SAndre Guedes 			break;
202859b4dfaSAndre Guedes 		case IGC_TX_BUFFER_TYPE_XDP:
20373f1071cSAndre Guedes 			xdp_return_frame(tx_buffer->xdpf);
2049acf59a7SAndre Guedes 			igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
205859b4dfaSAndre Guedes 			break;
206859b4dfaSAndre Guedes 		case IGC_TX_BUFFER_TYPE_SKB:
20713b5b7fdSSasha Neftin 			dev_kfree_skb_any(tx_buffer->skb);
2089acf59a7SAndre Guedes 			igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
209859b4dfaSAndre Guedes 			break;
210859b4dfaSAndre Guedes 		default:
211859b4dfaSAndre Guedes 			netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
212859b4dfaSAndre Guedes 			break;
213859b4dfaSAndre Guedes 		}
21413b5b7fdSSasha Neftin 
21513b5b7fdSSasha Neftin 		/* check for eop_desc to determine the end of the packet */
21613b5b7fdSSasha Neftin 		eop_desc = tx_buffer->next_to_watch;
21713b5b7fdSSasha Neftin 		tx_desc = IGC_TX_DESC(tx_ring, i);
21813b5b7fdSSasha Neftin 
21913b5b7fdSSasha Neftin 		/* unmap remaining buffers */
22013b5b7fdSSasha Neftin 		while (tx_desc != eop_desc) {
22113b5b7fdSSasha Neftin 			tx_buffer++;
22213b5b7fdSSasha Neftin 			tx_desc++;
22313b5b7fdSSasha Neftin 			i++;
22413b5b7fdSSasha Neftin 			if (unlikely(i == tx_ring->count)) {
22513b5b7fdSSasha Neftin 				i = 0;
22613b5b7fdSSasha Neftin 				tx_buffer = tx_ring->tx_buffer_info;
22713b5b7fdSSasha Neftin 				tx_desc = IGC_TX_DESC(tx_ring, 0);
22813b5b7fdSSasha Neftin 			}
22913b5b7fdSSasha Neftin 
23013b5b7fdSSasha Neftin 			/* unmap any remaining paged data */
23113b5b7fdSSasha Neftin 			if (dma_unmap_len(tx_buffer, len))
23261234295SAndre Guedes 				igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
23313b5b7fdSSasha Neftin 		}
23413b5b7fdSSasha Neftin 
23556ea7ed1SVinicius Costa Gomes 		tx_buffer->next_to_watch = NULL;
23656ea7ed1SVinicius Costa Gomes 
23713b5b7fdSSasha Neftin 		/* move us one more past the eop_desc for start of next pkt */
23813b5b7fdSSasha Neftin 		tx_buffer++;
23913b5b7fdSSasha Neftin 		i++;
24013b5b7fdSSasha Neftin 		if (unlikely(i == tx_ring->count)) {
24113b5b7fdSSasha Neftin 			i = 0;
24213b5b7fdSSasha Neftin 			tx_buffer = tx_ring->tx_buffer_info;
24313b5b7fdSSasha Neftin 		}
24413b5b7fdSSasha Neftin 	}
24513b5b7fdSSasha Neftin 
2469acf59a7SAndre Guedes 	if (tx_ring->xsk_pool && xsk_frames)
2479acf59a7SAndre Guedes 		xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
2489acf59a7SAndre Guedes 
24913b5b7fdSSasha Neftin 	/* reset BQL for queue */
25013b5b7fdSSasha Neftin 	netdev_tx_reset_queue(txring_txq(tx_ring));
25113b5b7fdSSasha Neftin 
25213b5b7fdSSasha Neftin 	/* reset next_to_use and next_to_clean */
25313b5b7fdSSasha Neftin 	tx_ring->next_to_use = 0;
25413b5b7fdSSasha Neftin 	tx_ring->next_to_clean = 0;
25513b5b7fdSSasha Neftin }
25613b5b7fdSSasha Neftin 
25713b5b7fdSSasha Neftin /**
25814504ac5SSasha Neftin  * igc_free_tx_resources - Free Tx Resources per Queue
25914504ac5SSasha Neftin  * @tx_ring: Tx descriptor ring for a specific queue
26014504ac5SSasha Neftin  *
26114504ac5SSasha Neftin  * Free all transmit software resources
26214504ac5SSasha Neftin  */
26314504ac5SSasha Neftin void igc_free_tx_resources(struct igc_ring *tx_ring)
26414504ac5SSasha Neftin {
26514504ac5SSasha Neftin 	igc_clean_tx_ring(tx_ring);
26614504ac5SSasha Neftin 
26714504ac5SSasha Neftin 	vfree(tx_ring->tx_buffer_info);
26814504ac5SSasha Neftin 	tx_ring->tx_buffer_info = NULL;
26914504ac5SSasha Neftin 
27014504ac5SSasha Neftin 	/* if not set, then don't free */
27114504ac5SSasha Neftin 	if (!tx_ring->desc)
27214504ac5SSasha Neftin 		return;
27314504ac5SSasha Neftin 
27414504ac5SSasha Neftin 	dma_free_coherent(tx_ring->dev, tx_ring->size,
27514504ac5SSasha Neftin 			  tx_ring->desc, tx_ring->dma);
27614504ac5SSasha Neftin 
27714504ac5SSasha Neftin 	tx_ring->desc = NULL;
27814504ac5SSasha Neftin }
27914504ac5SSasha Neftin 
28014504ac5SSasha Neftin /**
28114504ac5SSasha Neftin  * igc_free_all_tx_resources - Free Tx Resources for All Queues
28214504ac5SSasha Neftin  * @adapter: board private structure
28314504ac5SSasha Neftin  *
28414504ac5SSasha Neftin  * Free all transmit software resources
28514504ac5SSasha Neftin  */
28614504ac5SSasha Neftin static void igc_free_all_tx_resources(struct igc_adapter *adapter)
28714504ac5SSasha Neftin {
28814504ac5SSasha Neftin 	int i;
28914504ac5SSasha Neftin 
29014504ac5SSasha Neftin 	for (i = 0; i < adapter->num_tx_queues; i++)
29114504ac5SSasha Neftin 		igc_free_tx_resources(adapter->tx_ring[i]);
29214504ac5SSasha Neftin }
29314504ac5SSasha Neftin 
29414504ac5SSasha Neftin /**
2950507ef8aSSasha Neftin  * igc_clean_all_tx_rings - Free Tx Buffers for all queues
2960507ef8aSSasha Neftin  * @adapter: board private structure
2970507ef8aSSasha Neftin  */
2980507ef8aSSasha Neftin static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
2990507ef8aSSasha Neftin {
3000507ef8aSSasha Neftin 	int i;
3010507ef8aSSasha Neftin 
3020507ef8aSSasha Neftin 	for (i = 0; i < adapter->num_tx_queues; i++)
3030507ef8aSSasha Neftin 		if (adapter->tx_ring[i])
3040507ef8aSSasha Neftin 			igc_clean_tx_ring(adapter->tx_ring[i]);
3050507ef8aSSasha Neftin }
3060507ef8aSSasha Neftin 
3070507ef8aSSasha Neftin /**
30813b5b7fdSSasha Neftin  * igc_setup_tx_resources - allocate Tx resources (Descriptors)
30913b5b7fdSSasha Neftin  * @tx_ring: tx descriptor ring (for a specific queue) to setup
31013b5b7fdSSasha Neftin  *
31113b5b7fdSSasha Neftin  * Return 0 on success, negative on failure
31213b5b7fdSSasha Neftin  */
3138c5ad0daSSasha Neftin int igc_setup_tx_resources(struct igc_ring *tx_ring)
31413b5b7fdSSasha Neftin {
31525f06effSAndre Guedes 	struct net_device *ndev = tx_ring->netdev;
31613b5b7fdSSasha Neftin 	struct device *dev = tx_ring->dev;
31713b5b7fdSSasha Neftin 	int size = 0;
31813b5b7fdSSasha Neftin 
31913b5b7fdSSasha Neftin 	size = sizeof(struct igc_tx_buffer) * tx_ring->count;
32013b5b7fdSSasha Neftin 	tx_ring->tx_buffer_info = vzalloc(size);
32113b5b7fdSSasha Neftin 	if (!tx_ring->tx_buffer_info)
32213b5b7fdSSasha Neftin 		goto err;
32313b5b7fdSSasha Neftin 
32413b5b7fdSSasha Neftin 	/* round up to nearest 4K */
32513b5b7fdSSasha Neftin 	tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
32613b5b7fdSSasha Neftin 	tx_ring->size = ALIGN(tx_ring->size, 4096);
32713b5b7fdSSasha Neftin 
32813b5b7fdSSasha Neftin 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
32913b5b7fdSSasha Neftin 					   &tx_ring->dma, GFP_KERNEL);
33013b5b7fdSSasha Neftin 
33113b5b7fdSSasha Neftin 	if (!tx_ring->desc)
33213b5b7fdSSasha Neftin 		goto err;
33313b5b7fdSSasha Neftin 
33413b5b7fdSSasha Neftin 	tx_ring->next_to_use = 0;
33513b5b7fdSSasha Neftin 	tx_ring->next_to_clean = 0;
33613b5b7fdSSasha Neftin 
33713b5b7fdSSasha Neftin 	return 0;
33813b5b7fdSSasha Neftin 
33913b5b7fdSSasha Neftin err:
34013b5b7fdSSasha Neftin 	vfree(tx_ring->tx_buffer_info);
34125f06effSAndre Guedes 	netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n");
34213b5b7fdSSasha Neftin 	return -ENOMEM;
34313b5b7fdSSasha Neftin }
34413b5b7fdSSasha Neftin 
34513b5b7fdSSasha Neftin /**
34613b5b7fdSSasha Neftin  * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
34713b5b7fdSSasha Neftin  * @adapter: board private structure
34813b5b7fdSSasha Neftin  *
34913b5b7fdSSasha Neftin  * Return 0 on success, negative on failure
35013b5b7fdSSasha Neftin  */
35113b5b7fdSSasha Neftin static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
35213b5b7fdSSasha Neftin {
35325f06effSAndre Guedes 	struct net_device *dev = adapter->netdev;
35413b5b7fdSSasha Neftin 	int i, err = 0;
35513b5b7fdSSasha Neftin 
35613b5b7fdSSasha Neftin 	for (i = 0; i < adapter->num_tx_queues; i++) {
35713b5b7fdSSasha Neftin 		err = igc_setup_tx_resources(adapter->tx_ring[i]);
35813b5b7fdSSasha Neftin 		if (err) {
35925f06effSAndre Guedes 			netdev_err(dev, "Error on Tx queue %u setup\n", i);
36013b5b7fdSSasha Neftin 			for (i--; i >= 0; i--)
36113b5b7fdSSasha Neftin 				igc_free_tx_resources(adapter->tx_ring[i]);
36213b5b7fdSSasha Neftin 			break;
36313b5b7fdSSasha Neftin 		}
36413b5b7fdSSasha Neftin 	}
36513b5b7fdSSasha Neftin 
36613b5b7fdSSasha Neftin 	return err;
36713b5b7fdSSasha Neftin }
36813b5b7fdSSasha Neftin 
369f4851648SAndre Guedes static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring)
37013b5b7fdSSasha Neftin {
37113b5b7fdSSasha Neftin 	u16 i = rx_ring->next_to_clean;
37213b5b7fdSSasha Neftin 
37313b5b7fdSSasha Neftin 	dev_kfree_skb(rx_ring->skb);
37413b5b7fdSSasha Neftin 	rx_ring->skb = NULL;
37513b5b7fdSSasha Neftin 
37613b5b7fdSSasha Neftin 	/* Free all the Rx ring sk_buffs */
37713b5b7fdSSasha Neftin 	while (i != rx_ring->next_to_alloc) {
37813b5b7fdSSasha Neftin 		struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
37913b5b7fdSSasha Neftin 
38013b5b7fdSSasha Neftin 		/* Invalidate cache lines that may have been written to by
38113b5b7fdSSasha Neftin 		 * device so that we avoid corrupting memory.
38213b5b7fdSSasha Neftin 		 */
38313b5b7fdSSasha Neftin 		dma_sync_single_range_for_cpu(rx_ring->dev,
38413b5b7fdSSasha Neftin 					      buffer_info->dma,
38513b5b7fdSSasha Neftin 					      buffer_info->page_offset,
38613b5b7fdSSasha Neftin 					      igc_rx_bufsz(rx_ring),
38713b5b7fdSSasha Neftin 					      DMA_FROM_DEVICE);
38813b5b7fdSSasha Neftin 
38913b5b7fdSSasha Neftin 		/* free resources associated with mapping */
39013b5b7fdSSasha Neftin 		dma_unmap_page_attrs(rx_ring->dev,
39113b5b7fdSSasha Neftin 				     buffer_info->dma,
39213b5b7fdSSasha Neftin 				     igc_rx_pg_size(rx_ring),
39313b5b7fdSSasha Neftin 				     DMA_FROM_DEVICE,
39413b5b7fdSSasha Neftin 				     IGC_RX_DMA_ATTR);
39513b5b7fdSSasha Neftin 		__page_frag_cache_drain(buffer_info->page,
39613b5b7fdSSasha Neftin 					buffer_info->pagecnt_bias);
39713b5b7fdSSasha Neftin 
39813b5b7fdSSasha Neftin 		i++;
39913b5b7fdSSasha Neftin 		if (i == rx_ring->count)
40013b5b7fdSSasha Neftin 			i = 0;
40113b5b7fdSSasha Neftin 	}
402f4851648SAndre Guedes }
40313b5b7fdSSasha Neftin 
404fc9df2a0SAndre Guedes static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring)
405fc9df2a0SAndre Guedes {
406fc9df2a0SAndre Guedes 	struct igc_rx_buffer *bi;
407fc9df2a0SAndre Guedes 	u16 i;
408fc9df2a0SAndre Guedes 
409fc9df2a0SAndre Guedes 	for (i = 0; i < ring->count; i++) {
410fc9df2a0SAndre Guedes 		bi = &ring->rx_buffer_info[i];
411fc9df2a0SAndre Guedes 		if (!bi->xdp)
412fc9df2a0SAndre Guedes 			continue;
413fc9df2a0SAndre Guedes 
414fc9df2a0SAndre Guedes 		xsk_buff_free(bi->xdp);
415fc9df2a0SAndre Guedes 		bi->xdp = NULL;
416fc9df2a0SAndre Guedes 	}
417fc9df2a0SAndre Guedes }
418fc9df2a0SAndre Guedes 
419f4851648SAndre Guedes /**
420f4851648SAndre Guedes  * igc_clean_rx_ring - Free Rx Buffers per Queue
421f4851648SAndre Guedes  * @ring: ring to free buffers from
422f4851648SAndre Guedes  */
423f4851648SAndre Guedes static void igc_clean_rx_ring(struct igc_ring *ring)
424f4851648SAndre Guedes {
425fc9df2a0SAndre Guedes 	if (ring->xsk_pool)
426fc9df2a0SAndre Guedes 		igc_clean_rx_ring_xsk_pool(ring);
427fc9df2a0SAndre Guedes 	else
428f4851648SAndre Guedes 		igc_clean_rx_ring_page_shared(ring);
42926575105SAndre Guedes 
430f4851648SAndre Guedes 	clear_ring_uses_large_buffer(ring);
431f4851648SAndre Guedes 
432f4851648SAndre Guedes 	ring->next_to_alloc = 0;
433f4851648SAndre Guedes 	ring->next_to_clean = 0;
434f4851648SAndre Guedes 	ring->next_to_use = 0;
43513b5b7fdSSasha Neftin }
43613b5b7fdSSasha Neftin 
43713b5b7fdSSasha Neftin /**
4380507ef8aSSasha Neftin  * igc_clean_all_rx_rings - Free Rx Buffers for all queues
4390507ef8aSSasha Neftin  * @adapter: board private structure
4400507ef8aSSasha Neftin  */
4410507ef8aSSasha Neftin static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
4420507ef8aSSasha Neftin {
4430507ef8aSSasha Neftin 	int i;
4440507ef8aSSasha Neftin 
4450507ef8aSSasha Neftin 	for (i = 0; i < adapter->num_rx_queues; i++)
4460507ef8aSSasha Neftin 		if (adapter->rx_ring[i])
4470507ef8aSSasha Neftin 			igc_clean_rx_ring(adapter->rx_ring[i]);
4480507ef8aSSasha Neftin }
4490507ef8aSSasha Neftin 
4500507ef8aSSasha Neftin /**
45113b5b7fdSSasha Neftin  * igc_free_rx_resources - Free Rx Resources
45213b5b7fdSSasha Neftin  * @rx_ring: ring to clean the resources from
45313b5b7fdSSasha Neftin  *
45413b5b7fdSSasha Neftin  * Free all receive software resources
45513b5b7fdSSasha Neftin  */
4568c5ad0daSSasha Neftin void igc_free_rx_resources(struct igc_ring *rx_ring)
45713b5b7fdSSasha Neftin {
45813b5b7fdSSasha Neftin 	igc_clean_rx_ring(rx_ring);
45913b5b7fdSSasha Neftin 
4604609ffb9SAndre Guedes 	xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
46173f1071cSAndre Guedes 
46213b5b7fdSSasha Neftin 	vfree(rx_ring->rx_buffer_info);
46313b5b7fdSSasha Neftin 	rx_ring->rx_buffer_info = NULL;
46413b5b7fdSSasha Neftin 
46513b5b7fdSSasha Neftin 	/* if not set, then don't free */
46613b5b7fdSSasha Neftin 	if (!rx_ring->desc)
46713b5b7fdSSasha Neftin 		return;
46813b5b7fdSSasha Neftin 
46913b5b7fdSSasha Neftin 	dma_free_coherent(rx_ring->dev, rx_ring->size,
47013b5b7fdSSasha Neftin 			  rx_ring->desc, rx_ring->dma);
47113b5b7fdSSasha Neftin 
47213b5b7fdSSasha Neftin 	rx_ring->desc = NULL;
47313b5b7fdSSasha Neftin }
47413b5b7fdSSasha Neftin 
47513b5b7fdSSasha Neftin /**
47613b5b7fdSSasha Neftin  * igc_free_all_rx_resources - Free Rx Resources for All Queues
47713b5b7fdSSasha Neftin  * @adapter: board private structure
47813b5b7fdSSasha Neftin  *
47913b5b7fdSSasha Neftin  * Free all receive software resources
48013b5b7fdSSasha Neftin  */
48113b5b7fdSSasha Neftin static void igc_free_all_rx_resources(struct igc_adapter *adapter)
48213b5b7fdSSasha Neftin {
48313b5b7fdSSasha Neftin 	int i;
48413b5b7fdSSasha Neftin 
48513b5b7fdSSasha Neftin 	for (i = 0; i < adapter->num_rx_queues; i++)
48613b5b7fdSSasha Neftin 		igc_free_rx_resources(adapter->rx_ring[i]);
48713b5b7fdSSasha Neftin }
48813b5b7fdSSasha Neftin 
48913b5b7fdSSasha Neftin /**
49013b5b7fdSSasha Neftin  * igc_setup_rx_resources - allocate Rx resources (Descriptors)
49113b5b7fdSSasha Neftin  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
49213b5b7fdSSasha Neftin  *
49313b5b7fdSSasha Neftin  * Returns 0 on success, negative on failure
49413b5b7fdSSasha Neftin  */
4958c5ad0daSSasha Neftin int igc_setup_rx_resources(struct igc_ring *rx_ring)
49613b5b7fdSSasha Neftin {
49725f06effSAndre Guedes 	struct net_device *ndev = rx_ring->netdev;
49813b5b7fdSSasha Neftin 	struct device *dev = rx_ring->dev;
4994609ffb9SAndre Guedes 	u8 index = rx_ring->queue_index;
50073f1071cSAndre Guedes 	int size, desc_len, res;
50173f1071cSAndre Guedes 
5024609ffb9SAndre Guedes 	res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
5034609ffb9SAndre Guedes 			       rx_ring->q_vector->napi.napi_id);
5044609ffb9SAndre Guedes 	if (res < 0) {
5054609ffb9SAndre Guedes 		netdev_err(ndev, "Failed to register xdp_rxq index %u\n",
5064609ffb9SAndre Guedes 			   index);
50773f1071cSAndre Guedes 		return res;
5084609ffb9SAndre Guedes 	}
50913b5b7fdSSasha Neftin 
51013b5b7fdSSasha Neftin 	size = sizeof(struct igc_rx_buffer) * rx_ring->count;
51113b5b7fdSSasha Neftin 	rx_ring->rx_buffer_info = vzalloc(size);
51213b5b7fdSSasha Neftin 	if (!rx_ring->rx_buffer_info)
51313b5b7fdSSasha Neftin 		goto err;
51413b5b7fdSSasha Neftin 
51513b5b7fdSSasha Neftin 	desc_len = sizeof(union igc_adv_rx_desc);
51613b5b7fdSSasha Neftin 
51713b5b7fdSSasha Neftin 	/* Round up to nearest 4K */
51813b5b7fdSSasha Neftin 	rx_ring->size = rx_ring->count * desc_len;
51913b5b7fdSSasha Neftin 	rx_ring->size = ALIGN(rx_ring->size, 4096);
52013b5b7fdSSasha Neftin 
52113b5b7fdSSasha Neftin 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
52213b5b7fdSSasha Neftin 					   &rx_ring->dma, GFP_KERNEL);
52313b5b7fdSSasha Neftin 
52413b5b7fdSSasha Neftin 	if (!rx_ring->desc)
52513b5b7fdSSasha Neftin 		goto err;
52613b5b7fdSSasha Neftin 
52713b5b7fdSSasha Neftin 	rx_ring->next_to_alloc = 0;
52813b5b7fdSSasha Neftin 	rx_ring->next_to_clean = 0;
52913b5b7fdSSasha Neftin 	rx_ring->next_to_use = 0;
53013b5b7fdSSasha Neftin 
53113b5b7fdSSasha Neftin 	return 0;
53213b5b7fdSSasha Neftin 
53313b5b7fdSSasha Neftin err:
5344609ffb9SAndre Guedes 	xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
53513b5b7fdSSasha Neftin 	vfree(rx_ring->rx_buffer_info);
53613b5b7fdSSasha Neftin 	rx_ring->rx_buffer_info = NULL;
53725f06effSAndre Guedes 	netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
53813b5b7fdSSasha Neftin 	return -ENOMEM;
53913b5b7fdSSasha Neftin }
54013b5b7fdSSasha Neftin 
54113b5b7fdSSasha Neftin /**
54213b5b7fdSSasha Neftin  * igc_setup_all_rx_resources - wrapper to allocate Rx resources
54313b5b7fdSSasha Neftin  *                                (Descriptors) for all queues
54413b5b7fdSSasha Neftin  * @adapter: board private structure
54513b5b7fdSSasha Neftin  *
54613b5b7fdSSasha Neftin  * Return 0 on success, negative on failure
54713b5b7fdSSasha Neftin  */
54813b5b7fdSSasha Neftin static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
54913b5b7fdSSasha Neftin {
55025f06effSAndre Guedes 	struct net_device *dev = adapter->netdev;
55113b5b7fdSSasha Neftin 	int i, err = 0;
55213b5b7fdSSasha Neftin 
55313b5b7fdSSasha Neftin 	for (i = 0; i < adapter->num_rx_queues; i++) {
55413b5b7fdSSasha Neftin 		err = igc_setup_rx_resources(adapter->rx_ring[i]);
55513b5b7fdSSasha Neftin 		if (err) {
55625f06effSAndre Guedes 			netdev_err(dev, "Error on Rx queue %u setup\n", i);
55713b5b7fdSSasha Neftin 			for (i--; i >= 0; i--)
55813b5b7fdSSasha Neftin 				igc_free_rx_resources(adapter->rx_ring[i]);
55913b5b7fdSSasha Neftin 			break;
56013b5b7fdSSasha Neftin 		}
56113b5b7fdSSasha Neftin 	}
56213b5b7fdSSasha Neftin 
56313b5b7fdSSasha Neftin 	return err;
56413b5b7fdSSasha Neftin }
56513b5b7fdSSasha Neftin 
566fc9df2a0SAndre Guedes static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter,
567fc9df2a0SAndre Guedes 					      struct igc_ring *ring)
568fc9df2a0SAndre Guedes {
569fc9df2a0SAndre Guedes 	if (!igc_xdp_is_enabled(adapter) ||
570fc9df2a0SAndre Guedes 	    !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags))
571fc9df2a0SAndre Guedes 		return NULL;
572fc9df2a0SAndre Guedes 
573fc9df2a0SAndre Guedes 	return xsk_get_pool_from_qid(ring->netdev, ring->queue_index);
574fc9df2a0SAndre Guedes }
575fc9df2a0SAndre Guedes 
57613b5b7fdSSasha Neftin /**
57713b5b7fdSSasha Neftin  * igc_configure_rx_ring - Configure a receive ring after Reset
57813b5b7fdSSasha Neftin  * @adapter: board private structure
57913b5b7fdSSasha Neftin  * @ring: receive ring to be configured
58013b5b7fdSSasha Neftin  *
58113b5b7fdSSasha Neftin  * Configure the Rx unit of the MAC after a reset.
58213b5b7fdSSasha Neftin  */
58313b5b7fdSSasha Neftin static void igc_configure_rx_ring(struct igc_adapter *adapter,
58413b5b7fdSSasha Neftin 				  struct igc_ring *ring)
58513b5b7fdSSasha Neftin {
58613b5b7fdSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
58713b5b7fdSSasha Neftin 	union igc_adv_rx_desc *rx_desc;
58813b5b7fdSSasha Neftin 	int reg_idx = ring->reg_idx;
58913b5b7fdSSasha Neftin 	u32 srrctl = 0, rxdctl = 0;
59013b5b7fdSSasha Neftin 	u64 rdba = ring->dma;
591fc9df2a0SAndre Guedes 	u32 buf_size;
59213b5b7fdSSasha Neftin 
593fc9df2a0SAndre Guedes 	xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
594fc9df2a0SAndre Guedes 	ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
595fc9df2a0SAndre Guedes 	if (ring->xsk_pool) {
5964609ffb9SAndre Guedes 		WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
597fc9df2a0SAndre Guedes 						   MEM_TYPE_XSK_BUFF_POOL,
598fc9df2a0SAndre Guedes 						   NULL));
599fc9df2a0SAndre Guedes 		xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
600fc9df2a0SAndre Guedes 	} else {
601fc9df2a0SAndre Guedes 		WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
602fc9df2a0SAndre Guedes 						   MEM_TYPE_PAGE_SHARED,
603fc9df2a0SAndre Guedes 						   NULL));
604fc9df2a0SAndre Guedes 	}
6054609ffb9SAndre Guedes 
60626575105SAndre Guedes 	if (igc_xdp_is_enabled(adapter))
60726575105SAndre Guedes 		set_ring_uses_large_buffer(ring);
60826575105SAndre Guedes 
60913b5b7fdSSasha Neftin 	/* disable the queue */
61013b5b7fdSSasha Neftin 	wr32(IGC_RXDCTL(reg_idx), 0);
61113b5b7fdSSasha Neftin 
61213b5b7fdSSasha Neftin 	/* Set DMA base address registers */
61313b5b7fdSSasha Neftin 	wr32(IGC_RDBAL(reg_idx),
61413b5b7fdSSasha Neftin 	     rdba & 0x00000000ffffffffULL);
61513b5b7fdSSasha Neftin 	wr32(IGC_RDBAH(reg_idx), rdba >> 32);
61613b5b7fdSSasha Neftin 	wr32(IGC_RDLEN(reg_idx),
61713b5b7fdSSasha Neftin 	     ring->count * sizeof(union igc_adv_rx_desc));
61813b5b7fdSSasha Neftin 
61913b5b7fdSSasha Neftin 	/* initialize head and tail */
62013b5b7fdSSasha Neftin 	ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
62113b5b7fdSSasha Neftin 	wr32(IGC_RDH(reg_idx), 0);
62213b5b7fdSSasha Neftin 	writel(0, ring->tail);
62313b5b7fdSSasha Neftin 
62413b5b7fdSSasha Neftin 	/* reset next-to- use/clean to place SW in sync with hardware */
62513b5b7fdSSasha Neftin 	ring->next_to_clean = 0;
62613b5b7fdSSasha Neftin 	ring->next_to_use = 0;
62713b5b7fdSSasha Neftin 
628fc9df2a0SAndre Guedes 	if (ring->xsk_pool)
629fc9df2a0SAndre Guedes 		buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
630fc9df2a0SAndre Guedes 	else if (ring_uses_large_buffer(ring))
631fc9df2a0SAndre Guedes 		buf_size = IGC_RXBUFFER_3072;
63213b5b7fdSSasha Neftin 	else
633fc9df2a0SAndre Guedes 		buf_size = IGC_RXBUFFER_2048;
634fc9df2a0SAndre Guedes 
635fc9df2a0SAndre Guedes 	srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
636fc9df2a0SAndre Guedes 	srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT;
63713b5b7fdSSasha Neftin 	srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
63813b5b7fdSSasha Neftin 
63913b5b7fdSSasha Neftin 	wr32(IGC_SRRCTL(reg_idx), srrctl);
64013b5b7fdSSasha Neftin 
64113b5b7fdSSasha Neftin 	rxdctl |= IGC_RX_PTHRESH;
64213b5b7fdSSasha Neftin 	rxdctl |= IGC_RX_HTHRESH << 8;
64313b5b7fdSSasha Neftin 	rxdctl |= IGC_RX_WTHRESH << 16;
64413b5b7fdSSasha Neftin 
64513b5b7fdSSasha Neftin 	/* initialize rx_buffer_info */
64613b5b7fdSSasha Neftin 	memset(ring->rx_buffer_info, 0,
64713b5b7fdSSasha Neftin 	       sizeof(struct igc_rx_buffer) * ring->count);
64813b5b7fdSSasha Neftin 
64913b5b7fdSSasha Neftin 	/* initialize Rx descriptor 0 */
65013b5b7fdSSasha Neftin 	rx_desc = IGC_RX_DESC(ring, 0);
65113b5b7fdSSasha Neftin 	rx_desc->wb.upper.length = 0;
65213b5b7fdSSasha Neftin 
65313b5b7fdSSasha Neftin 	/* enable receive descriptor fetching */
65413b5b7fdSSasha Neftin 	rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
65513b5b7fdSSasha Neftin 
65613b5b7fdSSasha Neftin 	wr32(IGC_RXDCTL(reg_idx), rxdctl);
65713b5b7fdSSasha Neftin }
65813b5b7fdSSasha Neftin 
65913b5b7fdSSasha Neftin /**
66013b5b7fdSSasha Neftin  * igc_configure_rx - Configure receive Unit after Reset
66113b5b7fdSSasha Neftin  * @adapter: board private structure
66213b5b7fdSSasha Neftin  *
66313b5b7fdSSasha Neftin  * Configure the Rx unit of the MAC after a reset.
66413b5b7fdSSasha Neftin  */
66513b5b7fdSSasha Neftin static void igc_configure_rx(struct igc_adapter *adapter)
66613b5b7fdSSasha Neftin {
66713b5b7fdSSasha Neftin 	int i;
66813b5b7fdSSasha Neftin 
66913b5b7fdSSasha Neftin 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
67013b5b7fdSSasha Neftin 	 * the Base and Length of the Rx Descriptor Ring
67113b5b7fdSSasha Neftin 	 */
67213b5b7fdSSasha Neftin 	for (i = 0; i < adapter->num_rx_queues; i++)
67313b5b7fdSSasha Neftin 		igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
67413b5b7fdSSasha Neftin }
67513b5b7fdSSasha Neftin 
67613b5b7fdSSasha Neftin /**
67713b5b7fdSSasha Neftin  * igc_configure_tx_ring - Configure transmit ring after Reset
67813b5b7fdSSasha Neftin  * @adapter: board private structure
67913b5b7fdSSasha Neftin  * @ring: tx ring to configure
68013b5b7fdSSasha Neftin  *
68113b5b7fdSSasha Neftin  * Configure a transmit ring after a reset.
68213b5b7fdSSasha Neftin  */
68313b5b7fdSSasha Neftin static void igc_configure_tx_ring(struct igc_adapter *adapter,
68413b5b7fdSSasha Neftin 				  struct igc_ring *ring)
68513b5b7fdSSasha Neftin {
68613b5b7fdSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
68713b5b7fdSSasha Neftin 	int reg_idx = ring->reg_idx;
68813b5b7fdSSasha Neftin 	u64 tdba = ring->dma;
68913b5b7fdSSasha Neftin 	u32 txdctl = 0;
69013b5b7fdSSasha Neftin 
6919acf59a7SAndre Guedes 	ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
6929acf59a7SAndre Guedes 
69313b5b7fdSSasha Neftin 	/* disable the queue */
69413b5b7fdSSasha Neftin 	wr32(IGC_TXDCTL(reg_idx), 0);
69513b5b7fdSSasha Neftin 	wrfl();
69613b5b7fdSSasha Neftin 	mdelay(10);
69713b5b7fdSSasha Neftin 
69813b5b7fdSSasha Neftin 	wr32(IGC_TDLEN(reg_idx),
69913b5b7fdSSasha Neftin 	     ring->count * sizeof(union igc_adv_tx_desc));
70013b5b7fdSSasha Neftin 	wr32(IGC_TDBAL(reg_idx),
70113b5b7fdSSasha Neftin 	     tdba & 0x00000000ffffffffULL);
70213b5b7fdSSasha Neftin 	wr32(IGC_TDBAH(reg_idx), tdba >> 32);
70313b5b7fdSSasha Neftin 
70413b5b7fdSSasha Neftin 	ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
70513b5b7fdSSasha Neftin 	wr32(IGC_TDH(reg_idx), 0);
70613b5b7fdSSasha Neftin 	writel(0, ring->tail);
70713b5b7fdSSasha Neftin 
70813b5b7fdSSasha Neftin 	txdctl |= IGC_TX_PTHRESH;
70913b5b7fdSSasha Neftin 	txdctl |= IGC_TX_HTHRESH << 8;
71013b5b7fdSSasha Neftin 	txdctl |= IGC_TX_WTHRESH << 16;
71113b5b7fdSSasha Neftin 
71213b5b7fdSSasha Neftin 	txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
71313b5b7fdSSasha Neftin 	wr32(IGC_TXDCTL(reg_idx), txdctl);
71413b5b7fdSSasha Neftin }
71513b5b7fdSSasha Neftin 
71613b5b7fdSSasha Neftin /**
71713b5b7fdSSasha Neftin  * igc_configure_tx - Configure transmit Unit after Reset
71813b5b7fdSSasha Neftin  * @adapter: board private structure
71913b5b7fdSSasha Neftin  *
72013b5b7fdSSasha Neftin  * Configure the Tx unit of the MAC after a reset.
72113b5b7fdSSasha Neftin  */
72213b5b7fdSSasha Neftin static void igc_configure_tx(struct igc_adapter *adapter)
72313b5b7fdSSasha Neftin {
72413b5b7fdSSasha Neftin 	int i;
72513b5b7fdSSasha Neftin 
72613b5b7fdSSasha Neftin 	for (i = 0; i < adapter->num_tx_queues; i++)
72713b5b7fdSSasha Neftin 		igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
72813b5b7fdSSasha Neftin }
72913b5b7fdSSasha Neftin 
73013b5b7fdSSasha Neftin /**
73113b5b7fdSSasha Neftin  * igc_setup_mrqc - configure the multiple receive queue control registers
73213b5b7fdSSasha Neftin  * @adapter: Board private structure
73313b5b7fdSSasha Neftin  */
73413b5b7fdSSasha Neftin static void igc_setup_mrqc(struct igc_adapter *adapter)
73513b5b7fdSSasha Neftin {
7362121c271SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
7372121c271SSasha Neftin 	u32 j, num_rx_queues;
7382121c271SSasha Neftin 	u32 mrqc, rxcsum;
7392121c271SSasha Neftin 	u32 rss_key[10];
7402121c271SSasha Neftin 
7412121c271SSasha Neftin 	netdev_rss_key_fill(rss_key, sizeof(rss_key));
7422121c271SSasha Neftin 	for (j = 0; j < 10; j++)
7432121c271SSasha Neftin 		wr32(IGC_RSSRK(j), rss_key[j]);
7442121c271SSasha Neftin 
7452121c271SSasha Neftin 	num_rx_queues = adapter->rss_queues;
7462121c271SSasha Neftin 
7472121c271SSasha Neftin 	if (adapter->rss_indir_tbl_init != num_rx_queues) {
7482121c271SSasha Neftin 		for (j = 0; j < IGC_RETA_SIZE; j++)
7492121c271SSasha Neftin 			adapter->rss_indir_tbl[j] =
7502121c271SSasha Neftin 			(j * num_rx_queues) / IGC_RETA_SIZE;
7512121c271SSasha Neftin 		adapter->rss_indir_tbl_init = num_rx_queues;
7522121c271SSasha Neftin 	}
7532121c271SSasha Neftin 	igc_write_rss_indir_tbl(adapter);
7542121c271SSasha Neftin 
7552121c271SSasha Neftin 	/* Disable raw packet checksumming so that RSS hash is placed in
7562121c271SSasha Neftin 	 * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
7572121c271SSasha Neftin 	 * offloads as they are enabled by default
7582121c271SSasha Neftin 	 */
7592121c271SSasha Neftin 	rxcsum = rd32(IGC_RXCSUM);
7602121c271SSasha Neftin 	rxcsum |= IGC_RXCSUM_PCSD;
7612121c271SSasha Neftin 
7622121c271SSasha Neftin 	/* Enable Receive Checksum Offload for SCTP */
7632121c271SSasha Neftin 	rxcsum |= IGC_RXCSUM_CRCOFL;
7642121c271SSasha Neftin 
7652121c271SSasha Neftin 	/* Don't need to set TUOFL or IPOFL, they default to 1 */
7662121c271SSasha Neftin 	wr32(IGC_RXCSUM, rxcsum);
7672121c271SSasha Neftin 
7682121c271SSasha Neftin 	/* Generate RSS hash based on packet types, TCP/UDP
7692121c271SSasha Neftin 	 * port numbers and/or IPv4/v6 src and dst addresses
7702121c271SSasha Neftin 	 */
7712121c271SSasha Neftin 	mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
7722121c271SSasha Neftin 	       IGC_MRQC_RSS_FIELD_IPV4_TCP |
7732121c271SSasha Neftin 	       IGC_MRQC_RSS_FIELD_IPV6 |
7742121c271SSasha Neftin 	       IGC_MRQC_RSS_FIELD_IPV6_TCP |
7752121c271SSasha Neftin 	       IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
7762121c271SSasha Neftin 
7772121c271SSasha Neftin 	if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
7782121c271SSasha Neftin 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
7792121c271SSasha Neftin 	if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
7802121c271SSasha Neftin 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
7812121c271SSasha Neftin 
7822121c271SSasha Neftin 	mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
7832121c271SSasha Neftin 
7842121c271SSasha Neftin 	wr32(IGC_MRQC, mrqc);
78513b5b7fdSSasha Neftin }
78613b5b7fdSSasha Neftin 
78713b5b7fdSSasha Neftin /**
78813b5b7fdSSasha Neftin  * igc_setup_rctl - configure the receive control registers
78913b5b7fdSSasha Neftin  * @adapter: Board private structure
79013b5b7fdSSasha Neftin  */
79113b5b7fdSSasha Neftin static void igc_setup_rctl(struct igc_adapter *adapter)
79213b5b7fdSSasha Neftin {
79313b5b7fdSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
79413b5b7fdSSasha Neftin 	u32 rctl;
79513b5b7fdSSasha Neftin 
79613b5b7fdSSasha Neftin 	rctl = rd32(IGC_RCTL);
79713b5b7fdSSasha Neftin 
79813b5b7fdSSasha Neftin 	rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
79913b5b7fdSSasha Neftin 	rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
80013b5b7fdSSasha Neftin 
80113b5b7fdSSasha Neftin 	rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
80213b5b7fdSSasha Neftin 		(hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
80313b5b7fdSSasha Neftin 
80413b5b7fdSSasha Neftin 	/* enable stripping of CRC. Newer features require
80513b5b7fdSSasha Neftin 	 * that the HW strips the CRC.
80613b5b7fdSSasha Neftin 	 */
80713b5b7fdSSasha Neftin 	rctl |= IGC_RCTL_SECRC;
80813b5b7fdSSasha Neftin 
80913b5b7fdSSasha Neftin 	/* disable store bad packets and clear size bits. */
81013b5b7fdSSasha Neftin 	rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
81113b5b7fdSSasha Neftin 
81213b5b7fdSSasha Neftin 	/* enable LPE to allow for reception of jumbo frames */
81313b5b7fdSSasha Neftin 	rctl |= IGC_RCTL_LPE;
81413b5b7fdSSasha Neftin 
81513b5b7fdSSasha Neftin 	/* disable queue 0 to prevent tail write w/o re-config */
81613b5b7fdSSasha Neftin 	wr32(IGC_RXDCTL(0), 0);
81713b5b7fdSSasha Neftin 
81813b5b7fdSSasha Neftin 	/* This is useful for sniffing bad packets. */
81913b5b7fdSSasha Neftin 	if (adapter->netdev->features & NETIF_F_RXALL) {
82013b5b7fdSSasha Neftin 		/* UPE and MPE will be handled by normal PROMISC logic
82113b5b7fdSSasha Neftin 		 * in set_rx_mode
82213b5b7fdSSasha Neftin 		 */
82313b5b7fdSSasha Neftin 		rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
82413b5b7fdSSasha Neftin 			 IGC_RCTL_BAM | /* RX All Bcast Pkts */
82513b5b7fdSSasha Neftin 			 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
82613b5b7fdSSasha Neftin 
82713b5b7fdSSasha Neftin 		rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
82813b5b7fdSSasha Neftin 			  IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
82913b5b7fdSSasha Neftin 	}
83013b5b7fdSSasha Neftin 
83113b5b7fdSSasha Neftin 	wr32(IGC_RCTL, rctl);
83213b5b7fdSSasha Neftin }
83313b5b7fdSSasha Neftin 
83413b5b7fdSSasha Neftin /**
83513b5b7fdSSasha Neftin  * igc_setup_tctl - configure the transmit control registers
83613b5b7fdSSasha Neftin  * @adapter: Board private structure
83713b5b7fdSSasha Neftin  */
83813b5b7fdSSasha Neftin static void igc_setup_tctl(struct igc_adapter *adapter)
83913b5b7fdSSasha Neftin {
84013b5b7fdSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
84113b5b7fdSSasha Neftin 	u32 tctl;
84213b5b7fdSSasha Neftin 
84313b5b7fdSSasha Neftin 	/* disable queue 0 which icould be enabled by default */
84413b5b7fdSSasha Neftin 	wr32(IGC_TXDCTL(0), 0);
84513b5b7fdSSasha Neftin 
84613b5b7fdSSasha Neftin 	/* Program the Transmit Control Register */
84713b5b7fdSSasha Neftin 	tctl = rd32(IGC_TCTL);
84813b5b7fdSSasha Neftin 	tctl &= ~IGC_TCTL_CT;
84913b5b7fdSSasha Neftin 	tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
85013b5b7fdSSasha Neftin 		(IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
85113b5b7fdSSasha Neftin 
85213b5b7fdSSasha Neftin 	/* Enable transmits */
85313b5b7fdSSasha Neftin 	tctl |= IGC_TCTL_EN;
85413b5b7fdSSasha Neftin 
85513b5b7fdSSasha Neftin 	wr32(IGC_TCTL, tctl);
85613b5b7fdSSasha Neftin }
85713b5b7fdSSasha Neftin 
85813b5b7fdSSasha Neftin /**
859424045beSAndre Guedes  * igc_set_mac_filter_hw() - Set MAC address filter in hardware
860424045beSAndre Guedes  * @adapter: Pointer to adapter where the filter should be set
861424045beSAndre Guedes  * @index: Filter index
862750433d0SAndre Guedes  * @type: MAC address filter type (source or destination)
863750433d0SAndre Guedes  * @addr: MAC address
864424045beSAndre Guedes  * @queue: If non-negative, queue assignment feature is enabled and frames
865424045beSAndre Guedes  *         matching the filter are enqueued onto 'queue'. Otherwise, queue
866424045beSAndre Guedes  *         assignment is disabled.
8673988d8bfSSasha Neftin  */
868424045beSAndre Guedes static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
869750433d0SAndre Guedes 				  enum igc_mac_filter_type type,
870424045beSAndre Guedes 				  const u8 *addr, int queue)
8713988d8bfSSasha Neftin {
872949b922eSAndre Guedes 	struct net_device *dev = adapter->netdev;
8733988d8bfSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
874424045beSAndre Guedes 	u32 ral, rah;
8753988d8bfSSasha Neftin 
876424045beSAndre Guedes 	if (WARN_ON(index >= hw->mac.rar_entry_count))
877424045beSAndre Guedes 		return;
8783988d8bfSSasha Neftin 
879424045beSAndre Guedes 	ral = le32_to_cpup((__le32 *)(addr));
880424045beSAndre Guedes 	rah = le16_to_cpup((__le16 *)(addr + 4));
8813988d8bfSSasha Neftin 
882750433d0SAndre Guedes 	if (type == IGC_MAC_FILTER_TYPE_SRC) {
883750433d0SAndre Guedes 		rah &= ~IGC_RAH_ASEL_MASK;
884750433d0SAndre Guedes 		rah |= IGC_RAH_ASEL_SRC_ADDR;
8853988d8bfSSasha Neftin 	}
8863988d8bfSSasha Neftin 
887424045beSAndre Guedes 	if (queue >= 0) {
888424045beSAndre Guedes 		rah &= ~IGC_RAH_QSEL_MASK;
889424045beSAndre Guedes 		rah |= (queue << IGC_RAH_QSEL_SHIFT);
890424045beSAndre Guedes 		rah |= IGC_RAH_QSEL_ENABLE;
891424045beSAndre Guedes 	}
892424045beSAndre Guedes 
893424045beSAndre Guedes 	rah |= IGC_RAH_AV;
894424045beSAndre Guedes 
895424045beSAndre Guedes 	wr32(IGC_RAL(index), ral);
896424045beSAndre Guedes 	wr32(IGC_RAH(index), rah);
897949b922eSAndre Guedes 
898949b922eSAndre Guedes 	netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
899424045beSAndre Guedes }
900424045beSAndre Guedes 
901424045beSAndre Guedes /**
902424045beSAndre Guedes  * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
903424045beSAndre Guedes  * @adapter: Pointer to adapter where the filter should be cleared
904424045beSAndre Guedes  * @index: Filter index
9053988d8bfSSasha Neftin  */
906424045beSAndre Guedes static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
907424045beSAndre Guedes {
908949b922eSAndre Guedes 	struct net_device *dev = adapter->netdev;
909424045beSAndre Guedes 	struct igc_hw *hw = &adapter->hw;
9103988d8bfSSasha Neftin 
911424045beSAndre Guedes 	if (WARN_ON(index >= hw->mac.rar_entry_count))
912424045beSAndre Guedes 		return;
91327945ebeSAndre Guedes 
914424045beSAndre Guedes 	wr32(IGC_RAL(index), 0);
915424045beSAndre Guedes 	wr32(IGC_RAH(index), 0);
916949b922eSAndre Guedes 
917949b922eSAndre Guedes 	netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
9183988d8bfSSasha Neftin }
9193988d8bfSSasha Neftin 
9203988d8bfSSasha Neftin /* Set default MAC address for the PF in the first RAR entry */
9213988d8bfSSasha Neftin static void igc_set_default_mac_filter(struct igc_adapter *adapter)
9223988d8bfSSasha Neftin {
923949b922eSAndre Guedes 	struct net_device *dev = adapter->netdev;
924949b922eSAndre Guedes 	u8 *addr = adapter->hw.mac.addr;
9253988d8bfSSasha Neftin 
926949b922eSAndre Guedes 	netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
9273988d8bfSSasha Neftin 
928750433d0SAndre Guedes 	igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1);
9293988d8bfSSasha Neftin }
9303988d8bfSSasha Neftin 
9313988d8bfSSasha Neftin /**
932c9a11c23SSasha Neftin  * igc_set_mac - Change the Ethernet Address of the NIC
933c9a11c23SSasha Neftin  * @netdev: network interface device structure
934c9a11c23SSasha Neftin  * @p: pointer to an address structure
935c9a11c23SSasha Neftin  *
936c9a11c23SSasha Neftin  * Returns 0 on success, negative on failure
937c9a11c23SSasha Neftin  */
938c9a11c23SSasha Neftin static int igc_set_mac(struct net_device *netdev, void *p)
939c9a11c23SSasha Neftin {
940c9a11c23SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
941c9a11c23SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
942c9a11c23SSasha Neftin 	struct sockaddr *addr = p;
943c9a11c23SSasha Neftin 
944c9a11c23SSasha Neftin 	if (!is_valid_ether_addr(addr->sa_data))
945c9a11c23SSasha Neftin 		return -EADDRNOTAVAIL;
946c9a11c23SSasha Neftin 
947c9a11c23SSasha Neftin 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
948c9a11c23SSasha Neftin 	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
949c9a11c23SSasha Neftin 
950c9a11c23SSasha Neftin 	/* set the correct pool for the new PF MAC address in entry 0 */
951c9a11c23SSasha Neftin 	igc_set_default_mac_filter(adapter);
952c9a11c23SSasha Neftin 
953c9a11c23SSasha Neftin 	return 0;
954c9a11c23SSasha Neftin }
955c9a11c23SSasha Neftin 
9567f839684SSasha Neftin /**
9577f839684SSasha Neftin  *  igc_write_mc_addr_list - write multicast addresses to MTA
9587f839684SSasha Neftin  *  @netdev: network interface device structure
9597f839684SSasha Neftin  *
9607f839684SSasha Neftin  *  Writes multicast address list to the MTA hash table.
9617f839684SSasha Neftin  *  Returns: -ENOMEM on failure
9627f839684SSasha Neftin  *           0 on no addresses written
9637f839684SSasha Neftin  *           X on writing X addresses to MTA
9647f839684SSasha Neftin  **/
9657f839684SSasha Neftin static int igc_write_mc_addr_list(struct net_device *netdev)
9667f839684SSasha Neftin {
9677f839684SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
9687f839684SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
9697f839684SSasha Neftin 	struct netdev_hw_addr *ha;
9707f839684SSasha Neftin 	u8  *mta_list;
9717f839684SSasha Neftin 	int i;
9727f839684SSasha Neftin 
9737f839684SSasha Neftin 	if (netdev_mc_empty(netdev)) {
9747f839684SSasha Neftin 		/* nothing to program, so clear mc list */
9757f839684SSasha Neftin 		igc_update_mc_addr_list(hw, NULL, 0);
9767f839684SSasha Neftin 		return 0;
9777f839684SSasha Neftin 	}
9787f839684SSasha Neftin 
9797f839684SSasha Neftin 	mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
9807f839684SSasha Neftin 	if (!mta_list)
9817f839684SSasha Neftin 		return -ENOMEM;
9827f839684SSasha Neftin 
9837f839684SSasha Neftin 	/* The shared function expects a packed array of only addresses. */
9847f839684SSasha Neftin 	i = 0;
9857f839684SSasha Neftin 	netdev_for_each_mc_addr(ha, netdev)
9867f839684SSasha Neftin 		memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
9877f839684SSasha Neftin 
9887f839684SSasha Neftin 	igc_update_mc_addr_list(hw, mta_list, i);
9897f839684SSasha Neftin 	kfree(mta_list);
9907f839684SSasha Neftin 
9917f839684SSasha Neftin 	return netdev_mc_count(netdev);
9927f839684SSasha Neftin }
9937f839684SSasha Neftin 
99482faa9b7SVinicius Costa Gomes static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
99582faa9b7SVinicius Costa Gomes {
99682faa9b7SVinicius Costa Gomes 	ktime_t cycle_time = adapter->cycle_time;
99782faa9b7SVinicius Costa Gomes 	ktime_t base_time = adapter->base_time;
99882faa9b7SVinicius Costa Gomes 	u32 launchtime;
99982faa9b7SVinicius Costa Gomes 
100082faa9b7SVinicius Costa Gomes 	/* FIXME: when using ETF together with taprio, we may have a
100182faa9b7SVinicius Costa Gomes 	 * case where 'delta' is larger than the cycle_time, this may
100282faa9b7SVinicius Costa Gomes 	 * cause problems if we don't read the current value of
100382faa9b7SVinicius Costa Gomes 	 * IGC_BASET, as the value writen into the launchtime
100482faa9b7SVinicius Costa Gomes 	 * descriptor field may be misinterpreted.
100582faa9b7SVinicius Costa Gomes 	 */
100682faa9b7SVinicius Costa Gomes 	div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
100782faa9b7SVinicius Costa Gomes 
100882faa9b7SVinicius Costa Gomes 	return cpu_to_le32(launchtime);
100982faa9b7SVinicius Costa Gomes }
101082faa9b7SVinicius Costa Gomes 
1011d3ae3cfbSSasha Neftin static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
1012d3ae3cfbSSasha Neftin 			    struct igc_tx_buffer *first,
1013d3ae3cfbSSasha Neftin 			    u32 vlan_macip_lens, u32 type_tucmd,
1014d3ae3cfbSSasha Neftin 			    u32 mss_l4len_idx)
1015d3ae3cfbSSasha Neftin {
1016d3ae3cfbSSasha Neftin 	struct igc_adv_tx_context_desc *context_desc;
1017d3ae3cfbSSasha Neftin 	u16 i = tx_ring->next_to_use;
1018d3ae3cfbSSasha Neftin 
1019d3ae3cfbSSasha Neftin 	context_desc = IGC_TX_CTXTDESC(tx_ring, i);
1020d3ae3cfbSSasha Neftin 
1021d3ae3cfbSSasha Neftin 	i++;
1022d3ae3cfbSSasha Neftin 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1023d3ae3cfbSSasha Neftin 
1024d3ae3cfbSSasha Neftin 	/* set bits to identify this as an advanced context descriptor */
1025d3ae3cfbSSasha Neftin 	type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
1026d3ae3cfbSSasha Neftin 
102793d85dc5SSasha Neftin 	/* For i225, context index must be unique per ring. */
1028d3ae3cfbSSasha Neftin 	if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
1029d3ae3cfbSSasha Neftin 		mss_l4len_idx |= tx_ring->reg_idx << 4;
1030d3ae3cfbSSasha Neftin 
1031d3ae3cfbSSasha Neftin 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
1032d3ae3cfbSSasha Neftin 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
1033d3ae3cfbSSasha Neftin 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
1034d3ae3cfbSSasha Neftin 
1035d3ae3cfbSSasha Neftin 	/* We assume there is always a valid Tx time available. Invalid times
1036d3ae3cfbSSasha Neftin 	 * should have been handled by the upper layers.
1037d3ae3cfbSSasha Neftin 	 */
1038d3ae3cfbSSasha Neftin 	if (tx_ring->launchtime_enable) {
103982faa9b7SVinicius Costa Gomes 		struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
104082faa9b7SVinicius Costa Gomes 		ktime_t txtime = first->skb->tstamp;
104182faa9b7SVinicius Costa Gomes 
1042847cbfc0SVladimir Oltean 		skb_txtime_consumed(first->skb);
104382faa9b7SVinicius Costa Gomes 		context_desc->launch_time = igc_tx_launchtime(adapter,
104482faa9b7SVinicius Costa Gomes 							      txtime);
1045d3ae3cfbSSasha Neftin 	} else {
1046d3ae3cfbSSasha Neftin 		context_desc->launch_time = 0;
1047d3ae3cfbSSasha Neftin 	}
1048d3ae3cfbSSasha Neftin }
1049d3ae3cfbSSasha Neftin 
10500507ef8aSSasha Neftin static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
1051c9a11c23SSasha Neftin {
1052d3ae3cfbSSasha Neftin 	struct sk_buff *skb = first->skb;
1053d3ae3cfbSSasha Neftin 	u32 vlan_macip_lens = 0;
1054d3ae3cfbSSasha Neftin 	u32 type_tucmd = 0;
1055d3ae3cfbSSasha Neftin 
1056d3ae3cfbSSasha Neftin 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
1057d3ae3cfbSSasha Neftin csum_failed:
1058d3ae3cfbSSasha Neftin 		if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) &&
1059d3ae3cfbSSasha Neftin 		    !tx_ring->launchtime_enable)
1060d3ae3cfbSSasha Neftin 			return;
1061d3ae3cfbSSasha Neftin 		goto no_csum;
1062d3ae3cfbSSasha Neftin 	}
1063d3ae3cfbSSasha Neftin 
1064d3ae3cfbSSasha Neftin 	switch (skb->csum_offset) {
1065d3ae3cfbSSasha Neftin 	case offsetof(struct tcphdr, check):
1066d3ae3cfbSSasha Neftin 		type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
10675463fce6SJeff Kirsher 		fallthrough;
1068d3ae3cfbSSasha Neftin 	case offsetof(struct udphdr, check):
1069d3ae3cfbSSasha Neftin 		break;
1070d3ae3cfbSSasha Neftin 	case offsetof(struct sctphdr, checksum):
1071d3ae3cfbSSasha Neftin 		/* validate that this is actually an SCTP request */
1072609d29a9SXin Long 		if (skb_csum_is_sctp(skb)) {
1073d3ae3cfbSSasha Neftin 			type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
1074d3ae3cfbSSasha Neftin 			break;
1075d3ae3cfbSSasha Neftin 		}
10765463fce6SJeff Kirsher 		fallthrough;
1077d3ae3cfbSSasha Neftin 	default:
1078d3ae3cfbSSasha Neftin 		skb_checksum_help(skb);
1079d3ae3cfbSSasha Neftin 		goto csum_failed;
1080d3ae3cfbSSasha Neftin 	}
1081d3ae3cfbSSasha Neftin 
1082d3ae3cfbSSasha Neftin 	/* update TX checksum flag */
1083d3ae3cfbSSasha Neftin 	first->tx_flags |= IGC_TX_FLAGS_CSUM;
1084d3ae3cfbSSasha Neftin 	vlan_macip_lens = skb_checksum_start_offset(skb) -
1085d3ae3cfbSSasha Neftin 			  skb_network_offset(skb);
1086d3ae3cfbSSasha Neftin no_csum:
1087d3ae3cfbSSasha Neftin 	vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
1088d3ae3cfbSSasha Neftin 	vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1089d3ae3cfbSSasha Neftin 
1090d3ae3cfbSSasha Neftin 	igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
10910507ef8aSSasha Neftin }
10920507ef8aSSasha Neftin 
10930507ef8aSSasha Neftin static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
10940507ef8aSSasha Neftin {
10950507ef8aSSasha Neftin 	struct net_device *netdev = tx_ring->netdev;
10960507ef8aSSasha Neftin 
10970507ef8aSSasha Neftin 	netif_stop_subqueue(netdev, tx_ring->queue_index);
10980507ef8aSSasha Neftin 
10990507ef8aSSasha Neftin 	/* memory barriier comment */
11000507ef8aSSasha Neftin 	smp_mb();
11010507ef8aSSasha Neftin 
11020507ef8aSSasha Neftin 	/* We need to check again in a case another CPU has just
11030507ef8aSSasha Neftin 	 * made room available.
11040507ef8aSSasha Neftin 	 */
11050507ef8aSSasha Neftin 	if (igc_desc_unused(tx_ring) < size)
11060507ef8aSSasha Neftin 		return -EBUSY;
11070507ef8aSSasha Neftin 
11080507ef8aSSasha Neftin 	/* A reprieve! */
11090507ef8aSSasha Neftin 	netif_wake_subqueue(netdev, tx_ring->queue_index);
11100507ef8aSSasha Neftin 
11110507ef8aSSasha Neftin 	u64_stats_update_begin(&tx_ring->tx_syncp2);
11120507ef8aSSasha Neftin 	tx_ring->tx_stats.restart_queue2++;
11130507ef8aSSasha Neftin 	u64_stats_update_end(&tx_ring->tx_syncp2);
11140507ef8aSSasha Neftin 
11150507ef8aSSasha Neftin 	return 0;
11160507ef8aSSasha Neftin }
11170507ef8aSSasha Neftin 
11180507ef8aSSasha Neftin static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
11190507ef8aSSasha Neftin {
11200507ef8aSSasha Neftin 	if (igc_desc_unused(tx_ring) >= size)
11210507ef8aSSasha Neftin 		return 0;
11220507ef8aSSasha Neftin 	return __igc_maybe_stop_tx(tx_ring, size);
11230507ef8aSSasha Neftin }
11240507ef8aSSasha Neftin 
11252c344ae2SVinicius Costa Gomes #define IGC_SET_FLAG(_input, _flag, _result) \
11262c344ae2SVinicius Costa Gomes 	(((_flag) <= (_result)) ?				\
11272c344ae2SVinicius Costa Gomes 	 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) :	\
11282c344ae2SVinicius Costa Gomes 	 ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
11292c344ae2SVinicius Costa Gomes 
11308d744963SMuhammad Husaini Zulkifli static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
11310507ef8aSSasha Neftin {
11320507ef8aSSasha Neftin 	/* set type for advanced descriptor with frame checksum insertion */
11330507ef8aSSasha Neftin 	u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
11340507ef8aSSasha Neftin 		       IGC_ADVTXD_DCMD_DEXT |
11350507ef8aSSasha Neftin 		       IGC_ADVTXD_DCMD_IFCS;
11360507ef8aSSasha Neftin 
11378d744963SMuhammad Husaini Zulkifli 	/* set HW vlan bit if vlan is present */
11388d744963SMuhammad Husaini Zulkifli 	cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN,
11398d744963SMuhammad Husaini Zulkifli 				 IGC_ADVTXD_DCMD_VLE);
11408d744963SMuhammad Husaini Zulkifli 
1141f38b782dSSasha Neftin 	/* set segmentation bits for TSO */
1142f38b782dSSasha Neftin 	cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
1143f38b782dSSasha Neftin 				 (IGC_ADVTXD_DCMD_TSE));
1144f38b782dSSasha Neftin 
11452c344ae2SVinicius Costa Gomes 	/* set timestamp bit if present */
11462c344ae2SVinicius Costa Gomes 	cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
11472c344ae2SVinicius Costa Gomes 				 (IGC_ADVTXD_MAC_TSTAMP));
11482c344ae2SVinicius Costa Gomes 
11498d744963SMuhammad Husaini Zulkifli 	/* insert frame checksum */
11508d744963SMuhammad Husaini Zulkifli 	cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS);
11518d744963SMuhammad Husaini Zulkifli 
11520507ef8aSSasha Neftin 	return cmd_type;
11530507ef8aSSasha Neftin }
11540507ef8aSSasha Neftin 
11550507ef8aSSasha Neftin static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
11560507ef8aSSasha Neftin 				 union igc_adv_tx_desc *tx_desc,
11570507ef8aSSasha Neftin 				 u32 tx_flags, unsigned int paylen)
11580507ef8aSSasha Neftin {
11590507ef8aSSasha Neftin 	u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
11600507ef8aSSasha Neftin 
11610507ef8aSSasha Neftin 	/* insert L4 checksum */
11620507ef8aSSasha Neftin 	olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
11630507ef8aSSasha Neftin 			  ((IGC_TXD_POPTS_TXSM << 8) /
11640507ef8aSSasha Neftin 			  IGC_TX_FLAGS_CSUM);
11650507ef8aSSasha Neftin 
11660507ef8aSSasha Neftin 	/* insert IPv4 checksum */
11670507ef8aSSasha Neftin 	olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
11680507ef8aSSasha Neftin 			  (((IGC_TXD_POPTS_IXSM << 8)) /
11690507ef8aSSasha Neftin 			  IGC_TX_FLAGS_IPV4);
11700507ef8aSSasha Neftin 
11710507ef8aSSasha Neftin 	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
11720507ef8aSSasha Neftin }
11730507ef8aSSasha Neftin 
11740507ef8aSSasha Neftin static int igc_tx_map(struct igc_ring *tx_ring,
11750507ef8aSSasha Neftin 		      struct igc_tx_buffer *first,
11760507ef8aSSasha Neftin 		      const u8 hdr_len)
11770507ef8aSSasha Neftin {
11780507ef8aSSasha Neftin 	struct sk_buff *skb = first->skb;
11790507ef8aSSasha Neftin 	struct igc_tx_buffer *tx_buffer;
11800507ef8aSSasha Neftin 	union igc_adv_tx_desc *tx_desc;
11810507ef8aSSasha Neftin 	u32 tx_flags = first->tx_flags;
1182d7840976SMatthew Wilcox (Oracle) 	skb_frag_t *frag;
11830507ef8aSSasha Neftin 	u16 i = tx_ring->next_to_use;
11840507ef8aSSasha Neftin 	unsigned int data_len, size;
11850507ef8aSSasha Neftin 	dma_addr_t dma;
11868d744963SMuhammad Husaini Zulkifli 	u32 cmd_type;
11870507ef8aSSasha Neftin 
11888d744963SMuhammad Husaini Zulkifli 	cmd_type = igc_tx_cmd_type(skb, tx_flags);
11890507ef8aSSasha Neftin 	tx_desc = IGC_TX_DESC(tx_ring, i);
11900507ef8aSSasha Neftin 
11910507ef8aSSasha Neftin 	igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
11920507ef8aSSasha Neftin 
11930507ef8aSSasha Neftin 	size = skb_headlen(skb);
11940507ef8aSSasha Neftin 	data_len = skb->data_len;
11950507ef8aSSasha Neftin 
11960507ef8aSSasha Neftin 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
11970507ef8aSSasha Neftin 
11980507ef8aSSasha Neftin 	tx_buffer = first;
11990507ef8aSSasha Neftin 
12000507ef8aSSasha Neftin 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
12010507ef8aSSasha Neftin 		if (dma_mapping_error(tx_ring->dev, dma))
12020507ef8aSSasha Neftin 			goto dma_error;
12030507ef8aSSasha Neftin 
12040507ef8aSSasha Neftin 		/* record length, and DMA address */
12050507ef8aSSasha Neftin 		dma_unmap_len_set(tx_buffer, len, size);
12060507ef8aSSasha Neftin 		dma_unmap_addr_set(tx_buffer, dma, dma);
12070507ef8aSSasha Neftin 
12080507ef8aSSasha Neftin 		tx_desc->read.buffer_addr = cpu_to_le64(dma);
12090507ef8aSSasha Neftin 
12100507ef8aSSasha Neftin 		while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
12110507ef8aSSasha Neftin 			tx_desc->read.cmd_type_len =
12120507ef8aSSasha Neftin 				cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
12130507ef8aSSasha Neftin 
12140507ef8aSSasha Neftin 			i++;
12150507ef8aSSasha Neftin 			tx_desc++;
12160507ef8aSSasha Neftin 			if (i == tx_ring->count) {
12170507ef8aSSasha Neftin 				tx_desc = IGC_TX_DESC(tx_ring, 0);
12180507ef8aSSasha Neftin 				i = 0;
12190507ef8aSSasha Neftin 			}
12200507ef8aSSasha Neftin 			tx_desc->read.olinfo_status = 0;
12210507ef8aSSasha Neftin 
12220507ef8aSSasha Neftin 			dma += IGC_MAX_DATA_PER_TXD;
12230507ef8aSSasha Neftin 			size -= IGC_MAX_DATA_PER_TXD;
12240507ef8aSSasha Neftin 
12250507ef8aSSasha Neftin 			tx_desc->read.buffer_addr = cpu_to_le64(dma);
12260507ef8aSSasha Neftin 		}
12270507ef8aSSasha Neftin 
12280507ef8aSSasha Neftin 		if (likely(!data_len))
12290507ef8aSSasha Neftin 			break;
12300507ef8aSSasha Neftin 
12310507ef8aSSasha Neftin 		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
12320507ef8aSSasha Neftin 
12330507ef8aSSasha Neftin 		i++;
12340507ef8aSSasha Neftin 		tx_desc++;
12350507ef8aSSasha Neftin 		if (i == tx_ring->count) {
12360507ef8aSSasha Neftin 			tx_desc = IGC_TX_DESC(tx_ring, 0);
12370507ef8aSSasha Neftin 			i = 0;
12380507ef8aSSasha Neftin 		}
12390507ef8aSSasha Neftin 		tx_desc->read.olinfo_status = 0;
12400507ef8aSSasha Neftin 
12410507ef8aSSasha Neftin 		size = skb_frag_size(frag);
12420507ef8aSSasha Neftin 		data_len -= size;
12430507ef8aSSasha Neftin 
12440507ef8aSSasha Neftin 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
12450507ef8aSSasha Neftin 				       size, DMA_TO_DEVICE);
12460507ef8aSSasha Neftin 
12470507ef8aSSasha Neftin 		tx_buffer = &tx_ring->tx_buffer_info[i];
12480507ef8aSSasha Neftin 	}
12490507ef8aSSasha Neftin 
12500507ef8aSSasha Neftin 	/* write last descriptor with RS and EOP bits */
12510507ef8aSSasha Neftin 	cmd_type |= size | IGC_TXD_DCMD;
12520507ef8aSSasha Neftin 	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
12530507ef8aSSasha Neftin 
12540507ef8aSSasha Neftin 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
12550507ef8aSSasha Neftin 
12560507ef8aSSasha Neftin 	/* set the timestamp */
12570507ef8aSSasha Neftin 	first->time_stamp = jiffies;
12580507ef8aSSasha Neftin 
1259a9e51058SJacob Keller 	skb_tx_timestamp(skb);
1260a9e51058SJacob Keller 
12610507ef8aSSasha Neftin 	/* Force memory writes to complete before letting h/w know there
12620507ef8aSSasha Neftin 	 * are new descriptors to fetch.  (Only applicable for weak-ordered
12630507ef8aSSasha Neftin 	 * memory model archs, such as IA-64).
12640507ef8aSSasha Neftin 	 *
12650507ef8aSSasha Neftin 	 * We also need this memory barrier to make certain all of the
12660507ef8aSSasha Neftin 	 * status bits have been updated before next_to_watch is written.
12670507ef8aSSasha Neftin 	 */
12680507ef8aSSasha Neftin 	wmb();
12690507ef8aSSasha Neftin 
12700507ef8aSSasha Neftin 	/* set next_to_watch value indicating a packet is present */
12710507ef8aSSasha Neftin 	first->next_to_watch = tx_desc;
12720507ef8aSSasha Neftin 
12730507ef8aSSasha Neftin 	i++;
12740507ef8aSSasha Neftin 	if (i == tx_ring->count)
12750507ef8aSSasha Neftin 		i = 0;
12760507ef8aSSasha Neftin 
12770507ef8aSSasha Neftin 	tx_ring->next_to_use = i;
12780507ef8aSSasha Neftin 
12790507ef8aSSasha Neftin 	/* Make sure there is space in the ring for the next send. */
12800507ef8aSSasha Neftin 	igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
12810507ef8aSSasha Neftin 
12826b16f9eeSFlorian Westphal 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
12830507ef8aSSasha Neftin 		writel(i, tx_ring->tail);
12840507ef8aSSasha Neftin 	}
12850507ef8aSSasha Neftin 
12860507ef8aSSasha Neftin 	return 0;
12870507ef8aSSasha Neftin dma_error:
128825f06effSAndre Guedes 	netdev_err(tx_ring->netdev, "TX DMA map failed\n");
12890507ef8aSSasha Neftin 	tx_buffer = &tx_ring->tx_buffer_info[i];
12900507ef8aSSasha Neftin 
12910507ef8aSSasha Neftin 	/* clear dma mappings for failed tx_buffer_info map */
12920507ef8aSSasha Neftin 	while (tx_buffer != first) {
12930507ef8aSSasha Neftin 		if (dma_unmap_len(tx_buffer, len))
129461234295SAndre Guedes 			igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
12950507ef8aSSasha Neftin 
12960507ef8aSSasha Neftin 		if (i-- == 0)
12970507ef8aSSasha Neftin 			i += tx_ring->count;
12980507ef8aSSasha Neftin 		tx_buffer = &tx_ring->tx_buffer_info[i];
12990507ef8aSSasha Neftin 	}
13000507ef8aSSasha Neftin 
13010507ef8aSSasha Neftin 	if (dma_unmap_len(tx_buffer, len))
130261234295SAndre Guedes 		igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
13030507ef8aSSasha Neftin 
13040507ef8aSSasha Neftin 	dev_kfree_skb_any(tx_buffer->skb);
13050507ef8aSSasha Neftin 	tx_buffer->skb = NULL;
13060507ef8aSSasha Neftin 
13070507ef8aSSasha Neftin 	tx_ring->next_to_use = i;
13080507ef8aSSasha Neftin 
13090507ef8aSSasha Neftin 	return -1;
13100507ef8aSSasha Neftin }
13110507ef8aSSasha Neftin 
1312f38b782dSSasha Neftin static int igc_tso(struct igc_ring *tx_ring,
1313f38b782dSSasha Neftin 		   struct igc_tx_buffer *first,
1314f38b782dSSasha Neftin 		   u8 *hdr_len)
1315f38b782dSSasha Neftin {
1316f38b782dSSasha Neftin 	u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1317f38b782dSSasha Neftin 	struct sk_buff *skb = first->skb;
1318f38b782dSSasha Neftin 	union {
1319f38b782dSSasha Neftin 		struct iphdr *v4;
1320f38b782dSSasha Neftin 		struct ipv6hdr *v6;
1321f38b782dSSasha Neftin 		unsigned char *hdr;
1322f38b782dSSasha Neftin 	} ip;
1323f38b782dSSasha Neftin 	union {
1324f38b782dSSasha Neftin 		struct tcphdr *tcp;
1325f38b782dSSasha Neftin 		struct udphdr *udp;
1326f38b782dSSasha Neftin 		unsigned char *hdr;
1327f38b782dSSasha Neftin 	} l4;
1328f38b782dSSasha Neftin 	u32 paylen, l4_offset;
1329f38b782dSSasha Neftin 	int err;
1330f38b782dSSasha Neftin 
1331f38b782dSSasha Neftin 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1332f38b782dSSasha Neftin 		return 0;
1333f38b782dSSasha Neftin 
1334f38b782dSSasha Neftin 	if (!skb_is_gso(skb))
1335f38b782dSSasha Neftin 		return 0;
1336f38b782dSSasha Neftin 
1337f38b782dSSasha Neftin 	err = skb_cow_head(skb, 0);
1338f38b782dSSasha Neftin 	if (err < 0)
1339f38b782dSSasha Neftin 		return err;
1340f38b782dSSasha Neftin 
1341f38b782dSSasha Neftin 	ip.hdr = skb_network_header(skb);
1342f38b782dSSasha Neftin 	l4.hdr = skb_checksum_start(skb);
1343f38b782dSSasha Neftin 
1344f38b782dSSasha Neftin 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1345f38b782dSSasha Neftin 	type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1346f38b782dSSasha Neftin 
1347f38b782dSSasha Neftin 	/* initialize outer IP header fields */
1348f38b782dSSasha Neftin 	if (ip.v4->version == 4) {
1349f38b782dSSasha Neftin 		unsigned char *csum_start = skb_checksum_start(skb);
1350f38b782dSSasha Neftin 		unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
1351f38b782dSSasha Neftin 
1352f38b782dSSasha Neftin 		/* IP header will have to cancel out any data that
1353f38b782dSSasha Neftin 		 * is not a part of the outer IP header
1354f38b782dSSasha Neftin 		 */
1355f38b782dSSasha Neftin 		ip.v4->check = csum_fold(csum_partial(trans_start,
1356f38b782dSSasha Neftin 						      csum_start - trans_start,
1357f38b782dSSasha Neftin 						      0));
1358f38b782dSSasha Neftin 		type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
1359f38b782dSSasha Neftin 
1360f38b782dSSasha Neftin 		ip.v4->tot_len = 0;
1361f38b782dSSasha Neftin 		first->tx_flags |= IGC_TX_FLAGS_TSO |
1362f38b782dSSasha Neftin 				   IGC_TX_FLAGS_CSUM |
1363f38b782dSSasha Neftin 				   IGC_TX_FLAGS_IPV4;
1364f38b782dSSasha Neftin 	} else {
1365f38b782dSSasha Neftin 		ip.v6->payload_len = 0;
1366f38b782dSSasha Neftin 		first->tx_flags |= IGC_TX_FLAGS_TSO |
1367f38b782dSSasha Neftin 				   IGC_TX_FLAGS_CSUM;
1368f38b782dSSasha Neftin 	}
1369f38b782dSSasha Neftin 
1370f38b782dSSasha Neftin 	/* determine offset of inner transport header */
1371f38b782dSSasha Neftin 	l4_offset = l4.hdr - skb->data;
1372f38b782dSSasha Neftin 
1373f38b782dSSasha Neftin 	/* remove payload length from inner checksum */
1374f38b782dSSasha Neftin 	paylen = skb->len - l4_offset;
1375f38b782dSSasha Neftin 	if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
1376f38b782dSSasha Neftin 		/* compute length of segmentation header */
1377f38b782dSSasha Neftin 		*hdr_len = (l4.tcp->doff * 4) + l4_offset;
1378f38b782dSSasha Neftin 		csum_replace_by_diff(&l4.tcp->check,
1379f38b782dSSasha Neftin 				     (__force __wsum)htonl(paylen));
1380f38b782dSSasha Neftin 	} else {
1381f38b782dSSasha Neftin 		/* compute length of segmentation header */
1382f38b782dSSasha Neftin 		*hdr_len = sizeof(*l4.udp) + l4_offset;
1383f38b782dSSasha Neftin 		csum_replace_by_diff(&l4.udp->check,
1384f38b782dSSasha Neftin 				     (__force __wsum)htonl(paylen));
1385f38b782dSSasha Neftin 	}
1386f38b782dSSasha Neftin 
1387f38b782dSSasha Neftin 	/* update gso size and bytecount with header size */
1388f38b782dSSasha Neftin 	first->gso_segs = skb_shinfo(skb)->gso_segs;
1389f38b782dSSasha Neftin 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
1390f38b782dSSasha Neftin 
1391f38b782dSSasha Neftin 	/* MSS L4LEN IDX */
1392f38b782dSSasha Neftin 	mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
1393f38b782dSSasha Neftin 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
1394f38b782dSSasha Neftin 
1395f38b782dSSasha Neftin 	/* VLAN MACLEN IPLEN */
1396f38b782dSSasha Neftin 	vlan_macip_lens = l4.hdr - ip.hdr;
1397f38b782dSSasha Neftin 	vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
1398f38b782dSSasha Neftin 	vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1399f38b782dSSasha Neftin 
1400f38b782dSSasha Neftin 	igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
1401f38b782dSSasha Neftin 			type_tucmd, mss_l4len_idx);
1402f38b782dSSasha Neftin 
1403f38b782dSSasha Neftin 	return 1;
1404f38b782dSSasha Neftin }
1405f38b782dSSasha Neftin 
14060507ef8aSSasha Neftin static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
14070507ef8aSSasha Neftin 				       struct igc_ring *tx_ring)
14080507ef8aSSasha Neftin {
14090507ef8aSSasha Neftin 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
14100507ef8aSSasha Neftin 	__be16 protocol = vlan_get_protocol(skb);
14110507ef8aSSasha Neftin 	struct igc_tx_buffer *first;
14120507ef8aSSasha Neftin 	u32 tx_flags = 0;
14130507ef8aSSasha Neftin 	unsigned short f;
14140507ef8aSSasha Neftin 	u8 hdr_len = 0;
1415f38b782dSSasha Neftin 	int tso = 0;
14160507ef8aSSasha Neftin 
14170507ef8aSSasha Neftin 	/* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
14180507ef8aSSasha Neftin 	 *	+ 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
14190507ef8aSSasha Neftin 	 *	+ 2 desc gap to keep tail from touching head,
14200507ef8aSSasha Neftin 	 *	+ 1 desc for context descriptor,
14210507ef8aSSasha Neftin 	 * otherwise try next time
14220507ef8aSSasha Neftin 	 */
14230507ef8aSSasha Neftin 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1424d7840976SMatthew Wilcox (Oracle) 		count += TXD_USE_COUNT(skb_frag_size(
1425d7840976SMatthew Wilcox (Oracle) 						&skb_shinfo(skb)->frags[f]));
14260507ef8aSSasha Neftin 
14270507ef8aSSasha Neftin 	if (igc_maybe_stop_tx(tx_ring, count + 3)) {
14280507ef8aSSasha Neftin 		/* this is a hard error */
14290507ef8aSSasha Neftin 		return NETDEV_TX_BUSY;
14300507ef8aSSasha Neftin 	}
14310507ef8aSSasha Neftin 
14320507ef8aSSasha Neftin 	/* record the location of the first descriptor for this packet */
14330507ef8aSSasha Neftin 	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1434859b4dfaSAndre Guedes 	first->type = IGC_TX_BUFFER_TYPE_SKB;
14350507ef8aSSasha Neftin 	first->skb = skb;
14360507ef8aSSasha Neftin 	first->bytecount = skb->len;
14370507ef8aSSasha Neftin 	first->gso_segs = 1;
14380507ef8aSSasha Neftin 
14392c344ae2SVinicius Costa Gomes 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
14402c344ae2SVinicius Costa Gomes 		struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
14412c344ae2SVinicius Costa Gomes 
14422c344ae2SVinicius Costa Gomes 		/* FIXME: add support for retrieving timestamps from
14432c344ae2SVinicius Costa Gomes 		 * the other timer registers before skipping the
14442c344ae2SVinicius Costa Gomes 		 * timestamping request.
14452c344ae2SVinicius Costa Gomes 		 */
14462c344ae2SVinicius Costa Gomes 		if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
14472c344ae2SVinicius Costa Gomes 		    !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS,
14482c344ae2SVinicius Costa Gomes 					   &adapter->state)) {
14492c344ae2SVinicius Costa Gomes 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
14502c344ae2SVinicius Costa Gomes 			tx_flags |= IGC_TX_FLAGS_TSTAMP;
14512c344ae2SVinicius Costa Gomes 
14522c344ae2SVinicius Costa Gomes 			adapter->ptp_tx_skb = skb_get(skb);
14532c344ae2SVinicius Costa Gomes 			adapter->ptp_tx_start = jiffies;
14542c344ae2SVinicius Costa Gomes 		} else {
14552c344ae2SVinicius Costa Gomes 			adapter->tx_hwtstamp_skipped++;
14562c344ae2SVinicius Costa Gomes 		}
14572c344ae2SVinicius Costa Gomes 	}
14582c344ae2SVinicius Costa Gomes 
14598d744963SMuhammad Husaini Zulkifli 	if (skb_vlan_tag_present(skb)) {
14608d744963SMuhammad Husaini Zulkifli 		tx_flags |= IGC_TX_FLAGS_VLAN;
14618d744963SMuhammad Husaini Zulkifli 		tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT);
14628d744963SMuhammad Husaini Zulkifli 	}
14638d744963SMuhammad Husaini Zulkifli 
14640507ef8aSSasha Neftin 	/* record initial flags and protocol */
14650507ef8aSSasha Neftin 	first->tx_flags = tx_flags;
14660507ef8aSSasha Neftin 	first->protocol = protocol;
14670507ef8aSSasha Neftin 
1468f38b782dSSasha Neftin 	tso = igc_tso(tx_ring, first, &hdr_len);
1469f38b782dSSasha Neftin 	if (tso < 0)
1470f38b782dSSasha Neftin 		goto out_drop;
1471f38b782dSSasha Neftin 	else if (!tso)
14720507ef8aSSasha Neftin 		igc_tx_csum(tx_ring, first);
14730507ef8aSSasha Neftin 
14740507ef8aSSasha Neftin 	igc_tx_map(tx_ring, first, hdr_len);
14750507ef8aSSasha Neftin 
1476c9a11c23SSasha Neftin 	return NETDEV_TX_OK;
1477f38b782dSSasha Neftin 
1478f38b782dSSasha Neftin out_drop:
1479f38b782dSSasha Neftin 	dev_kfree_skb_any(first->skb);
1480f38b782dSSasha Neftin 	first->skb = NULL;
1481f38b782dSSasha Neftin 
1482f38b782dSSasha Neftin 	return NETDEV_TX_OK;
1483c9a11c23SSasha Neftin }
1484c9a11c23SSasha Neftin 
14850507ef8aSSasha Neftin static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
14860507ef8aSSasha Neftin 						    struct sk_buff *skb)
148713b5b7fdSSasha Neftin {
14880507ef8aSSasha Neftin 	unsigned int r_idx = skb->queue_mapping;
14890507ef8aSSasha Neftin 
14900507ef8aSSasha Neftin 	if (r_idx >= adapter->num_tx_queues)
14910507ef8aSSasha Neftin 		r_idx = r_idx % adapter->num_tx_queues;
14920507ef8aSSasha Neftin 
14930507ef8aSSasha Neftin 	return adapter->tx_ring[r_idx];
149413b5b7fdSSasha Neftin }
149513b5b7fdSSasha Neftin 
14960507ef8aSSasha Neftin static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
14970507ef8aSSasha Neftin 				  struct net_device *netdev)
149813b5b7fdSSasha Neftin {
14990507ef8aSSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
150013b5b7fdSSasha Neftin 
15010507ef8aSSasha Neftin 	/* The minimum packet size with TCTL.PSP set is 17 so pad the skb
15020507ef8aSSasha Neftin 	 * in order to meet this minimum size requirement.
150313b5b7fdSSasha Neftin 	 */
15040507ef8aSSasha Neftin 	if (skb->len < 17) {
15050507ef8aSSasha Neftin 		if (skb_padto(skb, 17))
15060507ef8aSSasha Neftin 			return NETDEV_TX_OK;
15070507ef8aSSasha Neftin 		skb->len = 17;
15080507ef8aSSasha Neftin 	}
150913b5b7fdSSasha Neftin 
15100507ef8aSSasha Neftin 	return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
15110507ef8aSSasha Neftin }
15120507ef8aSSasha Neftin 
15133bdd7086SSasha Neftin static void igc_rx_checksum(struct igc_ring *ring,
15143bdd7086SSasha Neftin 			    union igc_adv_rx_desc *rx_desc,
15153bdd7086SSasha Neftin 			    struct sk_buff *skb)
15163bdd7086SSasha Neftin {
15173bdd7086SSasha Neftin 	skb_checksum_none_assert(skb);
15183bdd7086SSasha Neftin 
15193bdd7086SSasha Neftin 	/* Ignore Checksum bit is set */
15203bdd7086SSasha Neftin 	if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
15213bdd7086SSasha Neftin 		return;
15223bdd7086SSasha Neftin 
15233bdd7086SSasha Neftin 	/* Rx checksum disabled via ethtool */
15243bdd7086SSasha Neftin 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
15253bdd7086SSasha Neftin 		return;
15263bdd7086SSasha Neftin 
15273bdd7086SSasha Neftin 	/* TCP/UDP checksum error bit is set */
15283bdd7086SSasha Neftin 	if (igc_test_staterr(rx_desc,
1529ef8a17a2SAndre Guedes 			     IGC_RXDEXT_STATERR_L4E |
15303bdd7086SSasha Neftin 			     IGC_RXDEXT_STATERR_IPE)) {
15313bdd7086SSasha Neftin 		/* work around errata with sctp packets where the TCPE aka
15323bdd7086SSasha Neftin 		 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
15333bdd7086SSasha Neftin 		 * packets (aka let the stack check the crc32c)
15343bdd7086SSasha Neftin 		 */
15353bdd7086SSasha Neftin 		if (!(skb->len == 60 &&
15363bdd7086SSasha Neftin 		      test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
15373bdd7086SSasha Neftin 			u64_stats_update_begin(&ring->rx_syncp);
15383bdd7086SSasha Neftin 			ring->rx_stats.csum_err++;
15393bdd7086SSasha Neftin 			u64_stats_update_end(&ring->rx_syncp);
15403bdd7086SSasha Neftin 		}
15413bdd7086SSasha Neftin 		/* let the stack verify checksum errors */
15423bdd7086SSasha Neftin 		return;
15433bdd7086SSasha Neftin 	}
15443bdd7086SSasha Neftin 	/* It must be a TCP or UDP packet with a valid checksum */
15453bdd7086SSasha Neftin 	if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
15463bdd7086SSasha Neftin 				      IGC_RXD_STAT_UDPCS))
15473bdd7086SSasha Neftin 		skb->ip_summed = CHECKSUM_UNNECESSARY;
15483bdd7086SSasha Neftin 
154925f06effSAndre Guedes 	netdev_dbg(ring->netdev, "cksum success: bits %08X\n",
15503bdd7086SSasha Neftin 		   le32_to_cpu(rx_desc->wb.upper.status_error));
15513bdd7086SSasha Neftin }
15523bdd7086SSasha Neftin 
15530507ef8aSSasha Neftin static inline void igc_rx_hash(struct igc_ring *ring,
15540507ef8aSSasha Neftin 			       union igc_adv_rx_desc *rx_desc,
15550507ef8aSSasha Neftin 			       struct sk_buff *skb)
15560507ef8aSSasha Neftin {
15570507ef8aSSasha Neftin 	if (ring->netdev->features & NETIF_F_RXHASH)
15580507ef8aSSasha Neftin 		skb_set_hash(skb,
15590507ef8aSSasha Neftin 			     le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
15600507ef8aSSasha Neftin 			     PKT_HASH_TYPE_L3);
15610507ef8aSSasha Neftin }
15620507ef8aSSasha Neftin 
15638d744963SMuhammad Husaini Zulkifli static void igc_rx_vlan(struct igc_ring *rx_ring,
15648d744963SMuhammad Husaini Zulkifli 			union igc_adv_rx_desc *rx_desc,
15658d744963SMuhammad Husaini Zulkifli 			struct sk_buff *skb)
15668d744963SMuhammad Husaini Zulkifli {
15678d744963SMuhammad Husaini Zulkifli 	struct net_device *dev = rx_ring->netdev;
15688d744963SMuhammad Husaini Zulkifli 	u16 vid;
15698d744963SMuhammad Husaini Zulkifli 
15708d744963SMuhammad Husaini Zulkifli 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
15718d744963SMuhammad Husaini Zulkifli 	    igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) {
15728d744963SMuhammad Husaini Zulkifli 		if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) &&
15738d744963SMuhammad Husaini Zulkifli 		    test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
15748d744963SMuhammad Husaini Zulkifli 			vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
15758d744963SMuhammad Husaini Zulkifli 		else
15768d744963SMuhammad Husaini Zulkifli 			vid = le16_to_cpu(rx_desc->wb.upper.vlan);
15778d744963SMuhammad Husaini Zulkifli 
15788d744963SMuhammad Husaini Zulkifli 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
15798d744963SMuhammad Husaini Zulkifli 	}
15808d744963SMuhammad Husaini Zulkifli }
15818d744963SMuhammad Husaini Zulkifli 
15820507ef8aSSasha Neftin /**
15830507ef8aSSasha Neftin  * igc_process_skb_fields - Populate skb header fields from Rx descriptor
15840507ef8aSSasha Neftin  * @rx_ring: rx descriptor ring packet is being transacted on
15850507ef8aSSasha Neftin  * @rx_desc: pointer to the EOP Rx descriptor
15860507ef8aSSasha Neftin  * @skb: pointer to current skb being populated
15870507ef8aSSasha Neftin  *
15883a66abe9SAndre Guedes  * This function checks the ring, descriptor, and packet information in order
15893a66abe9SAndre Guedes  * to populate the hash, checksum, VLAN, protocol, and other fields within the
15903a66abe9SAndre Guedes  * skb.
15910507ef8aSSasha Neftin  */
15920507ef8aSSasha Neftin static void igc_process_skb_fields(struct igc_ring *rx_ring,
15930507ef8aSSasha Neftin 				   union igc_adv_rx_desc *rx_desc,
15940507ef8aSSasha Neftin 				   struct sk_buff *skb)
15950507ef8aSSasha Neftin {
15960507ef8aSSasha Neftin 	igc_rx_hash(rx_ring, rx_desc, skb);
15970507ef8aSSasha Neftin 
15983bdd7086SSasha Neftin 	igc_rx_checksum(rx_ring, rx_desc, skb);
15993bdd7086SSasha Neftin 
16008d744963SMuhammad Husaini Zulkifli 	igc_rx_vlan(rx_ring, rx_desc, skb);
16018d744963SMuhammad Husaini Zulkifli 
16020507ef8aSSasha Neftin 	skb_record_rx_queue(skb, rx_ring->queue_index);
16030507ef8aSSasha Neftin 
16040507ef8aSSasha Neftin 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
16050507ef8aSSasha Neftin }
16060507ef8aSSasha Neftin 
16078d744963SMuhammad Husaini Zulkifli static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features)
16088d744963SMuhammad Husaini Zulkifli {
16098d744963SMuhammad Husaini Zulkifli 	bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
16108d744963SMuhammad Husaini Zulkifli 	struct igc_adapter *adapter = netdev_priv(netdev);
16118d744963SMuhammad Husaini Zulkifli 	struct igc_hw *hw = &adapter->hw;
16128d744963SMuhammad Husaini Zulkifli 	u32 ctrl;
16138d744963SMuhammad Husaini Zulkifli 
16148d744963SMuhammad Husaini Zulkifli 	ctrl = rd32(IGC_CTRL);
16158d744963SMuhammad Husaini Zulkifli 
16168d744963SMuhammad Husaini Zulkifli 	if (enable) {
16178d744963SMuhammad Husaini Zulkifli 		/* enable VLAN tag insert/strip */
16188d744963SMuhammad Husaini Zulkifli 		ctrl |= IGC_CTRL_VME;
16198d744963SMuhammad Husaini Zulkifli 	} else {
16208d744963SMuhammad Husaini Zulkifli 		/* disable VLAN tag insert/strip */
16218d744963SMuhammad Husaini Zulkifli 		ctrl &= ~IGC_CTRL_VME;
16228d744963SMuhammad Husaini Zulkifli 	}
16238d744963SMuhammad Husaini Zulkifli 	wr32(IGC_CTRL, ctrl);
16248d744963SMuhammad Husaini Zulkifli }
16258d744963SMuhammad Husaini Zulkifli 
16268d744963SMuhammad Husaini Zulkifli static void igc_restore_vlan(struct igc_adapter *adapter)
16278d744963SMuhammad Husaini Zulkifli {
16288d744963SMuhammad Husaini Zulkifli 	igc_vlan_mode(adapter->netdev, adapter->netdev->features);
16298d744963SMuhammad Husaini Zulkifli }
16308d744963SMuhammad Husaini Zulkifli 
16310507ef8aSSasha Neftin static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
16324ff32036SAndre Guedes 					       const unsigned int size,
16334ff32036SAndre Guedes 					       int *rx_buffer_pgcnt)
16340507ef8aSSasha Neftin {
16350507ef8aSSasha Neftin 	struct igc_rx_buffer *rx_buffer;
16360507ef8aSSasha Neftin 
16370507ef8aSSasha Neftin 	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
16384ff32036SAndre Guedes 	*rx_buffer_pgcnt =
16394ff32036SAndre Guedes #if (PAGE_SIZE < 8192)
16404ff32036SAndre Guedes 		page_count(rx_buffer->page);
16414ff32036SAndre Guedes #else
16424ff32036SAndre Guedes 		0;
16434ff32036SAndre Guedes #endif
16440507ef8aSSasha Neftin 	prefetchw(rx_buffer->page);
16450507ef8aSSasha Neftin 
16460507ef8aSSasha Neftin 	/* we are reusing so sync this buffer for CPU use */
16470507ef8aSSasha Neftin 	dma_sync_single_range_for_cpu(rx_ring->dev,
16480507ef8aSSasha Neftin 				      rx_buffer->dma,
16490507ef8aSSasha Neftin 				      rx_buffer->page_offset,
16500507ef8aSSasha Neftin 				      size,
16510507ef8aSSasha Neftin 				      DMA_FROM_DEVICE);
16520507ef8aSSasha Neftin 
16530507ef8aSSasha Neftin 	rx_buffer->pagecnt_bias--;
16540507ef8aSSasha Neftin 
16550507ef8aSSasha Neftin 	return rx_buffer;
16560507ef8aSSasha Neftin }
16570507ef8aSSasha Neftin 
1658613cf199SAndre Guedes static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer,
1659613cf199SAndre Guedes 			       unsigned int truesize)
1660613cf199SAndre Guedes {
1661613cf199SAndre Guedes #if (PAGE_SIZE < 8192)
1662613cf199SAndre Guedes 	buffer->page_offset ^= truesize;
1663613cf199SAndre Guedes #else
1664613cf199SAndre Guedes 	buffer->page_offset += truesize;
1665613cf199SAndre Guedes #endif
1666613cf199SAndre Guedes }
1667613cf199SAndre Guedes 
1668a39f5e53SAndre Guedes static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring,
1669a39f5e53SAndre Guedes 					      unsigned int size)
1670a39f5e53SAndre Guedes {
1671a39f5e53SAndre Guedes 	unsigned int truesize;
1672a39f5e53SAndre Guedes 
1673a39f5e53SAndre Guedes #if (PAGE_SIZE < 8192)
1674a39f5e53SAndre Guedes 	truesize = igc_rx_pg_size(ring) / 2;
1675a39f5e53SAndre Guedes #else
1676a39f5e53SAndre Guedes 	truesize = ring_uses_build_skb(ring) ?
1677a39f5e53SAndre Guedes 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1678a39f5e53SAndre Guedes 		   SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1679a39f5e53SAndre Guedes 		   SKB_DATA_ALIGN(size);
1680a39f5e53SAndre Guedes #endif
1681a39f5e53SAndre Guedes 	return truesize;
1682a39f5e53SAndre Guedes }
1683a39f5e53SAndre Guedes 
16840507ef8aSSasha Neftin /**
16850507ef8aSSasha Neftin  * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
16860507ef8aSSasha Neftin  * @rx_ring: rx descriptor ring to transact packets on
16870507ef8aSSasha Neftin  * @rx_buffer: buffer containing page to add
16880507ef8aSSasha Neftin  * @skb: sk_buff to place the data into
16890507ef8aSSasha Neftin  * @size: size of buffer to be added
16900507ef8aSSasha Neftin  *
16910507ef8aSSasha Neftin  * This function will add the data contained in rx_buffer->page to the skb.
16920507ef8aSSasha Neftin  */
16930507ef8aSSasha Neftin static void igc_add_rx_frag(struct igc_ring *rx_ring,
16940507ef8aSSasha Neftin 			    struct igc_rx_buffer *rx_buffer,
16950507ef8aSSasha Neftin 			    struct sk_buff *skb,
16960507ef8aSSasha Neftin 			    unsigned int size)
16970507ef8aSSasha Neftin {
1698613cf199SAndre Guedes 	unsigned int truesize;
16990507ef8aSSasha Neftin 
1700613cf199SAndre Guedes #if (PAGE_SIZE < 8192)
1701613cf199SAndre Guedes 	truesize = igc_rx_pg_size(rx_ring) / 2;
17020507ef8aSSasha Neftin #else
1703613cf199SAndre Guedes 	truesize = ring_uses_build_skb(rx_ring) ?
17040507ef8aSSasha Neftin 		   SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
17050507ef8aSSasha Neftin 		   SKB_DATA_ALIGN(size);
1706613cf199SAndre Guedes #endif
17070507ef8aSSasha Neftin 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
17080507ef8aSSasha Neftin 			rx_buffer->page_offset, size, truesize);
1709613cf199SAndre Guedes 
1710613cf199SAndre Guedes 	igc_rx_buffer_flip(rx_buffer, truesize);
17110507ef8aSSasha Neftin }
17120507ef8aSSasha Neftin 
17130507ef8aSSasha Neftin static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
17140507ef8aSSasha Neftin 				     struct igc_rx_buffer *rx_buffer,
17150507ef8aSSasha Neftin 				     union igc_adv_rx_desc *rx_desc,
17160507ef8aSSasha Neftin 				     unsigned int size)
17170507ef8aSSasha Neftin {
17180507ef8aSSasha Neftin 	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1719a39f5e53SAndre Guedes 	unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
17200507ef8aSSasha Neftin 	struct sk_buff *skb;
17210507ef8aSSasha Neftin 
17220507ef8aSSasha Neftin 	/* prefetch first cache line of first page */
1723f468f21bSTariq Toukan 	net_prefetch(va);
17240507ef8aSSasha Neftin 
17250507ef8aSSasha Neftin 	/* build an skb around the page buffer */
17260507ef8aSSasha Neftin 	skb = build_skb(va - IGC_SKB_PAD, truesize);
17270507ef8aSSasha Neftin 	if (unlikely(!skb))
17280507ef8aSSasha Neftin 		return NULL;
17290507ef8aSSasha Neftin 
17300507ef8aSSasha Neftin 	/* update pointers within the skb to store the data */
17310507ef8aSSasha Neftin 	skb_reserve(skb, IGC_SKB_PAD);
17320507ef8aSSasha Neftin 	__skb_put(skb, size);
17330507ef8aSSasha Neftin 
1734613cf199SAndre Guedes 	igc_rx_buffer_flip(rx_buffer, truesize);
17350507ef8aSSasha Neftin 	return skb;
17360507ef8aSSasha Neftin }
17370507ef8aSSasha Neftin 
17380507ef8aSSasha Neftin static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
17390507ef8aSSasha Neftin 					 struct igc_rx_buffer *rx_buffer,
174026575105SAndre Guedes 					 struct xdp_buff *xdp,
1741e1ed4f92SAndre Guedes 					 ktime_t timestamp)
17420507ef8aSSasha Neftin {
174326575105SAndre Guedes 	unsigned int size = xdp->data_end - xdp->data;
1744a39f5e53SAndre Guedes 	unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
174526575105SAndre Guedes 	void *va = xdp->data;
17460507ef8aSSasha Neftin 	unsigned int headlen;
17470507ef8aSSasha Neftin 	struct sk_buff *skb;
17480507ef8aSSasha Neftin 
17490507ef8aSSasha Neftin 	/* prefetch first cache line of first page */
1750f468f21bSTariq Toukan 	net_prefetch(va);
17510507ef8aSSasha Neftin 
17520507ef8aSSasha Neftin 	/* allocate a skb to store the frags */
17530507ef8aSSasha Neftin 	skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
17540507ef8aSSasha Neftin 	if (unlikely(!skb))
17550507ef8aSSasha Neftin 		return NULL;
17560507ef8aSSasha Neftin 
1757e1ed4f92SAndre Guedes 	if (timestamp)
1758e1ed4f92SAndre Guedes 		skb_hwtstamps(skb)->hwtstamp = timestamp;
175981b05520SVinicius Costa Gomes 
17600507ef8aSSasha Neftin 	/* Determine available headroom for copy */
17610507ef8aSSasha Neftin 	headlen = size;
17620507ef8aSSasha Neftin 	if (headlen > IGC_RX_HDR_LEN)
1763c43f1255SStanislav Fomichev 		headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
17640507ef8aSSasha Neftin 
17650507ef8aSSasha Neftin 	/* align pull length to size of long to optimize memcpy performance */
17660507ef8aSSasha Neftin 	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
17670507ef8aSSasha Neftin 
17680507ef8aSSasha Neftin 	/* update all of the pointers */
17690507ef8aSSasha Neftin 	size -= headlen;
17700507ef8aSSasha Neftin 	if (size) {
17710507ef8aSSasha Neftin 		skb_add_rx_frag(skb, 0, rx_buffer->page,
17720507ef8aSSasha Neftin 				(va + headlen) - page_address(rx_buffer->page),
17730507ef8aSSasha Neftin 				size, truesize);
1774613cf199SAndre Guedes 		igc_rx_buffer_flip(rx_buffer, truesize);
17750507ef8aSSasha Neftin 	} else {
17760507ef8aSSasha Neftin 		rx_buffer->pagecnt_bias++;
17770507ef8aSSasha Neftin 	}
17780507ef8aSSasha Neftin 
17790507ef8aSSasha Neftin 	return skb;
17800507ef8aSSasha Neftin }
17810507ef8aSSasha Neftin 
17820507ef8aSSasha Neftin /**
17830507ef8aSSasha Neftin  * igc_reuse_rx_page - page flip buffer and store it back on the ring
17840507ef8aSSasha Neftin  * @rx_ring: rx descriptor ring to store buffers on
17850507ef8aSSasha Neftin  * @old_buff: donor buffer to have page reused
17860507ef8aSSasha Neftin  *
17870507ef8aSSasha Neftin  * Synchronizes page for reuse by the adapter
17880507ef8aSSasha Neftin  */
17890507ef8aSSasha Neftin static void igc_reuse_rx_page(struct igc_ring *rx_ring,
17900507ef8aSSasha Neftin 			      struct igc_rx_buffer *old_buff)
17910507ef8aSSasha Neftin {
17920507ef8aSSasha Neftin 	u16 nta = rx_ring->next_to_alloc;
17930507ef8aSSasha Neftin 	struct igc_rx_buffer *new_buff;
17940507ef8aSSasha Neftin 
17950507ef8aSSasha Neftin 	new_buff = &rx_ring->rx_buffer_info[nta];
17960507ef8aSSasha Neftin 
17970507ef8aSSasha Neftin 	/* update, and store next to alloc */
17980507ef8aSSasha Neftin 	nta++;
17990507ef8aSSasha Neftin 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
18000507ef8aSSasha Neftin 
18010507ef8aSSasha Neftin 	/* Transfer page from old buffer to new buffer.
18020507ef8aSSasha Neftin 	 * Move each member individually to avoid possible store
18030507ef8aSSasha Neftin 	 * forwarding stalls.
18040507ef8aSSasha Neftin 	 */
18050507ef8aSSasha Neftin 	new_buff->dma		= old_buff->dma;
18060507ef8aSSasha Neftin 	new_buff->page		= old_buff->page;
18070507ef8aSSasha Neftin 	new_buff->page_offset	= old_buff->page_offset;
18080507ef8aSSasha Neftin 	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
18090507ef8aSSasha Neftin }
18100507ef8aSSasha Neftin 
18114ff32036SAndre Guedes static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer,
18124ff32036SAndre Guedes 				  int rx_buffer_pgcnt)
18130507ef8aSSasha Neftin {
18140507ef8aSSasha Neftin 	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
18150507ef8aSSasha Neftin 	struct page *page = rx_buffer->page;
18160507ef8aSSasha Neftin 
1817a79afa78SAlexander Lobakin 	/* avoid re-using remote and pfmemalloc pages */
1818a79afa78SAlexander Lobakin 	if (!dev_page_is_reusable(page))
18190507ef8aSSasha Neftin 		return false;
18200507ef8aSSasha Neftin 
18210507ef8aSSasha Neftin #if (PAGE_SIZE < 8192)
18220507ef8aSSasha Neftin 	/* if we are only owner of page we can reuse it */
18234ff32036SAndre Guedes 	if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
18240507ef8aSSasha Neftin 		return false;
18250507ef8aSSasha Neftin #else
18260507ef8aSSasha Neftin #define IGC_LAST_OFFSET \
18270507ef8aSSasha Neftin 	(SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
18280507ef8aSSasha Neftin 
18290507ef8aSSasha Neftin 	if (rx_buffer->page_offset > IGC_LAST_OFFSET)
18300507ef8aSSasha Neftin 		return false;
18310507ef8aSSasha Neftin #endif
18320507ef8aSSasha Neftin 
18330507ef8aSSasha Neftin 	/* If we have drained the page fragment pool we need to update
18340507ef8aSSasha Neftin 	 * the pagecnt_bias and page count so that we fully restock the
18350507ef8aSSasha Neftin 	 * number of references the driver holds.
18360507ef8aSSasha Neftin 	 */
18374ff32036SAndre Guedes 	if (unlikely(pagecnt_bias == 1)) {
18384ff32036SAndre Guedes 		page_ref_add(page, USHRT_MAX - 1);
18390507ef8aSSasha Neftin 		rx_buffer->pagecnt_bias = USHRT_MAX;
18400507ef8aSSasha Neftin 	}
18410507ef8aSSasha Neftin 
18420507ef8aSSasha Neftin 	return true;
18430507ef8aSSasha Neftin }
18440507ef8aSSasha Neftin 
18450507ef8aSSasha Neftin /**
18460507ef8aSSasha Neftin  * igc_is_non_eop - process handling of non-EOP buffers
18470507ef8aSSasha Neftin  * @rx_ring: Rx ring being processed
18480507ef8aSSasha Neftin  * @rx_desc: Rx descriptor for current buffer
18490507ef8aSSasha Neftin  *
18500507ef8aSSasha Neftin  * This function updates next to clean.  If the buffer is an EOP buffer
18510507ef8aSSasha Neftin  * this function exits returning false, otherwise it will place the
18520507ef8aSSasha Neftin  * sk_buff in the next buffer to be chained and return true indicating
18530507ef8aSSasha Neftin  * that this is in fact a non-EOP buffer.
18540507ef8aSSasha Neftin  */
18550507ef8aSSasha Neftin static bool igc_is_non_eop(struct igc_ring *rx_ring,
18560507ef8aSSasha Neftin 			   union igc_adv_rx_desc *rx_desc)
18570507ef8aSSasha Neftin {
18580507ef8aSSasha Neftin 	u32 ntc = rx_ring->next_to_clean + 1;
18590507ef8aSSasha Neftin 
18600507ef8aSSasha Neftin 	/* fetch, update, and store next to clean */
18610507ef8aSSasha Neftin 	ntc = (ntc < rx_ring->count) ? ntc : 0;
18620507ef8aSSasha Neftin 	rx_ring->next_to_clean = ntc;
18630507ef8aSSasha Neftin 
18640507ef8aSSasha Neftin 	prefetch(IGC_RX_DESC(rx_ring, ntc));
18650507ef8aSSasha Neftin 
18660507ef8aSSasha Neftin 	if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
18670507ef8aSSasha Neftin 		return false;
18680507ef8aSSasha Neftin 
18690507ef8aSSasha Neftin 	return true;
18700507ef8aSSasha Neftin }
18710507ef8aSSasha Neftin 
18720507ef8aSSasha Neftin /**
18730507ef8aSSasha Neftin  * igc_cleanup_headers - Correct corrupted or empty headers
18740507ef8aSSasha Neftin  * @rx_ring: rx descriptor ring packet is being transacted on
18750507ef8aSSasha Neftin  * @rx_desc: pointer to the EOP Rx descriptor
18760507ef8aSSasha Neftin  * @skb: pointer to current skb being fixed
18770507ef8aSSasha Neftin  *
18780507ef8aSSasha Neftin  * Address the case where we are pulling data in on pages only
18790507ef8aSSasha Neftin  * and as such no data is present in the skb header.
18800507ef8aSSasha Neftin  *
18810507ef8aSSasha Neftin  * In addition if skb is not at least 60 bytes we need to pad it so that
18820507ef8aSSasha Neftin  * it is large enough to qualify as a valid Ethernet frame.
18830507ef8aSSasha Neftin  *
18840507ef8aSSasha Neftin  * Returns true if an error was encountered and skb was freed.
18850507ef8aSSasha Neftin  */
18860507ef8aSSasha Neftin static bool igc_cleanup_headers(struct igc_ring *rx_ring,
18870507ef8aSSasha Neftin 				union igc_adv_rx_desc *rx_desc,
18880507ef8aSSasha Neftin 				struct sk_buff *skb)
18890507ef8aSSasha Neftin {
189026575105SAndre Guedes 	/* XDP packets use error pointer so abort at this point */
189126575105SAndre Guedes 	if (IS_ERR(skb))
189226575105SAndre Guedes 		return true;
189326575105SAndre Guedes 
1894ef8a17a2SAndre Guedes 	if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
18950507ef8aSSasha Neftin 		struct net_device *netdev = rx_ring->netdev;
18960507ef8aSSasha Neftin 
18970507ef8aSSasha Neftin 		if (!(netdev->features & NETIF_F_RXALL)) {
18980507ef8aSSasha Neftin 			dev_kfree_skb_any(skb);
18990507ef8aSSasha Neftin 			return true;
19000507ef8aSSasha Neftin 		}
19010507ef8aSSasha Neftin 	}
19020507ef8aSSasha Neftin 
19030507ef8aSSasha Neftin 	/* if eth_skb_pad returns an error the skb was freed */
19040507ef8aSSasha Neftin 	if (eth_skb_pad(skb))
19050507ef8aSSasha Neftin 		return true;
19060507ef8aSSasha Neftin 
190713b5b7fdSSasha Neftin 	return false;
190813b5b7fdSSasha Neftin }
190913b5b7fdSSasha Neftin 
19100507ef8aSSasha Neftin static void igc_put_rx_buffer(struct igc_ring *rx_ring,
19114ff32036SAndre Guedes 			      struct igc_rx_buffer *rx_buffer,
19124ff32036SAndre Guedes 			      int rx_buffer_pgcnt)
19130507ef8aSSasha Neftin {
19144ff32036SAndre Guedes 	if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
19150507ef8aSSasha Neftin 		/* hand second half of page back to the ring */
19160507ef8aSSasha Neftin 		igc_reuse_rx_page(rx_ring, rx_buffer);
19170507ef8aSSasha Neftin 	} else {
19180507ef8aSSasha Neftin 		/* We are not reusing the buffer so unmap it and free
19190507ef8aSSasha Neftin 		 * any references we are holding to it
19200507ef8aSSasha Neftin 		 */
19210507ef8aSSasha Neftin 		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
19220507ef8aSSasha Neftin 				     igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
19230507ef8aSSasha Neftin 				     IGC_RX_DMA_ATTR);
19240507ef8aSSasha Neftin 		__page_frag_cache_drain(rx_buffer->page,
19250507ef8aSSasha Neftin 					rx_buffer->pagecnt_bias);
19260507ef8aSSasha Neftin 	}
192713b5b7fdSSasha Neftin 
19280507ef8aSSasha Neftin 	/* clear contents of rx_buffer */
19290507ef8aSSasha Neftin 	rx_buffer->page = NULL;
193013b5b7fdSSasha Neftin }
193113b5b7fdSSasha Neftin 
1932aac8f68cSSasha Neftin static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
1933aac8f68cSSasha Neftin {
193426575105SAndre Guedes 	struct igc_adapter *adapter = rx_ring->q_vector->adapter;
193526575105SAndre Guedes 
193626575105SAndre Guedes 	if (ring_uses_build_skb(rx_ring))
193726575105SAndre Guedes 		return IGC_SKB_PAD;
193826575105SAndre Guedes 	if (igc_xdp_is_enabled(adapter))
193926575105SAndre Guedes 		return XDP_PACKET_HEADROOM;
194026575105SAndre Guedes 
194126575105SAndre Guedes 	return 0;
1942aac8f68cSSasha Neftin }
1943aac8f68cSSasha Neftin 
1944aac8f68cSSasha Neftin static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
1945aac8f68cSSasha Neftin 				  struct igc_rx_buffer *bi)
1946aac8f68cSSasha Neftin {
1947aac8f68cSSasha Neftin 	struct page *page = bi->page;
1948aac8f68cSSasha Neftin 	dma_addr_t dma;
1949aac8f68cSSasha Neftin 
1950aac8f68cSSasha Neftin 	/* since we are recycling buffers we should seldom need to alloc */
1951aac8f68cSSasha Neftin 	if (likely(page))
1952aac8f68cSSasha Neftin 		return true;
1953aac8f68cSSasha Neftin 
1954aac8f68cSSasha Neftin 	/* alloc new page for storage */
1955aac8f68cSSasha Neftin 	page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
1956aac8f68cSSasha Neftin 	if (unlikely(!page)) {
1957aac8f68cSSasha Neftin 		rx_ring->rx_stats.alloc_failed++;
1958aac8f68cSSasha Neftin 		return false;
1959aac8f68cSSasha Neftin 	}
1960aac8f68cSSasha Neftin 
1961aac8f68cSSasha Neftin 	/* map page for use */
1962aac8f68cSSasha Neftin 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1963aac8f68cSSasha Neftin 				 igc_rx_pg_size(rx_ring),
1964aac8f68cSSasha Neftin 				 DMA_FROM_DEVICE,
1965aac8f68cSSasha Neftin 				 IGC_RX_DMA_ATTR);
1966aac8f68cSSasha Neftin 
1967aac8f68cSSasha Neftin 	/* if mapping failed free memory back to system since
1968aac8f68cSSasha Neftin 	 * there isn't much point in holding memory we can't use
1969aac8f68cSSasha Neftin 	 */
1970aac8f68cSSasha Neftin 	if (dma_mapping_error(rx_ring->dev, dma)) {
1971aac8f68cSSasha Neftin 		__free_page(page);
1972aac8f68cSSasha Neftin 
1973aac8f68cSSasha Neftin 		rx_ring->rx_stats.alloc_failed++;
1974aac8f68cSSasha Neftin 		return false;
1975aac8f68cSSasha Neftin 	}
1976aac8f68cSSasha Neftin 
1977aac8f68cSSasha Neftin 	bi->dma = dma;
1978aac8f68cSSasha Neftin 	bi->page = page;
1979aac8f68cSSasha Neftin 	bi->page_offset = igc_rx_offset(rx_ring);
19804ff32036SAndre Guedes 	page_ref_add(page, USHRT_MAX - 1);
19814ff32036SAndre Guedes 	bi->pagecnt_bias = USHRT_MAX;
1982aac8f68cSSasha Neftin 
1983aac8f68cSSasha Neftin 	return true;
1984aac8f68cSSasha Neftin }
1985aac8f68cSSasha Neftin 
198613b5b7fdSSasha Neftin /**
198713b5b7fdSSasha Neftin  * igc_alloc_rx_buffers - Replace used receive buffers; packet split
1988085c8589SSasha Neftin  * @rx_ring: rx descriptor ring
1989085c8589SSasha Neftin  * @cleaned_count: number of buffers to clean
199013b5b7fdSSasha Neftin  */
199113b5b7fdSSasha Neftin static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
199213b5b7fdSSasha Neftin {
199313b5b7fdSSasha Neftin 	union igc_adv_rx_desc *rx_desc;
199413b5b7fdSSasha Neftin 	u16 i = rx_ring->next_to_use;
199513b5b7fdSSasha Neftin 	struct igc_rx_buffer *bi;
199613b5b7fdSSasha Neftin 	u16 bufsz;
199713b5b7fdSSasha Neftin 
199813b5b7fdSSasha Neftin 	/* nothing to do */
199913b5b7fdSSasha Neftin 	if (!cleaned_count)
200013b5b7fdSSasha Neftin 		return;
200113b5b7fdSSasha Neftin 
200213b5b7fdSSasha Neftin 	rx_desc = IGC_RX_DESC(rx_ring, i);
200313b5b7fdSSasha Neftin 	bi = &rx_ring->rx_buffer_info[i];
200413b5b7fdSSasha Neftin 	i -= rx_ring->count;
200513b5b7fdSSasha Neftin 
200613b5b7fdSSasha Neftin 	bufsz = igc_rx_bufsz(rx_ring);
200713b5b7fdSSasha Neftin 
200813b5b7fdSSasha Neftin 	do {
200913b5b7fdSSasha Neftin 		if (!igc_alloc_mapped_page(rx_ring, bi))
201013b5b7fdSSasha Neftin 			break;
201113b5b7fdSSasha Neftin 
201213b5b7fdSSasha Neftin 		/* sync the buffer for use by the device */
201313b5b7fdSSasha Neftin 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
201413b5b7fdSSasha Neftin 						 bi->page_offset, bufsz,
201513b5b7fdSSasha Neftin 						 DMA_FROM_DEVICE);
201613b5b7fdSSasha Neftin 
201713b5b7fdSSasha Neftin 		/* Refresh the desc even if buffer_addrs didn't change
201813b5b7fdSSasha Neftin 		 * because each write-back erases this info.
201913b5b7fdSSasha Neftin 		 */
202013b5b7fdSSasha Neftin 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
202113b5b7fdSSasha Neftin 
202213b5b7fdSSasha Neftin 		rx_desc++;
202313b5b7fdSSasha Neftin 		bi++;
202413b5b7fdSSasha Neftin 		i++;
202513b5b7fdSSasha Neftin 		if (unlikely(!i)) {
202613b5b7fdSSasha Neftin 			rx_desc = IGC_RX_DESC(rx_ring, 0);
202713b5b7fdSSasha Neftin 			bi = rx_ring->rx_buffer_info;
202813b5b7fdSSasha Neftin 			i -= rx_ring->count;
202913b5b7fdSSasha Neftin 		}
203013b5b7fdSSasha Neftin 
203113b5b7fdSSasha Neftin 		/* clear the length for the next_to_use descriptor */
203213b5b7fdSSasha Neftin 		rx_desc->wb.upper.length = 0;
203313b5b7fdSSasha Neftin 
203413b5b7fdSSasha Neftin 		cleaned_count--;
203513b5b7fdSSasha Neftin 	} while (cleaned_count);
203613b5b7fdSSasha Neftin 
203713b5b7fdSSasha Neftin 	i += rx_ring->count;
203813b5b7fdSSasha Neftin 
203913b5b7fdSSasha Neftin 	if (rx_ring->next_to_use != i) {
204013b5b7fdSSasha Neftin 		/* record the next descriptor to use */
204113b5b7fdSSasha Neftin 		rx_ring->next_to_use = i;
204213b5b7fdSSasha Neftin 
204313b5b7fdSSasha Neftin 		/* update next to alloc since we have filled the ring */
204413b5b7fdSSasha Neftin 		rx_ring->next_to_alloc = i;
204513b5b7fdSSasha Neftin 
204613b5b7fdSSasha Neftin 		/* Force memory writes to complete before letting h/w
204713b5b7fdSSasha Neftin 		 * know there are new descriptors to fetch.  (Only
204813b5b7fdSSasha Neftin 		 * applicable for weak-ordered memory model archs,
204913b5b7fdSSasha Neftin 		 * such as IA-64).
205013b5b7fdSSasha Neftin 		 */
205113b5b7fdSSasha Neftin 		wmb();
205213b5b7fdSSasha Neftin 		writel(i, rx_ring->tail);
205313b5b7fdSSasha Neftin 	}
205413b5b7fdSSasha Neftin }
205513b5b7fdSSasha Neftin 
2056fc9df2a0SAndre Guedes static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
2057fc9df2a0SAndre Guedes {
2058fc9df2a0SAndre Guedes 	union igc_adv_rx_desc *desc;
2059fc9df2a0SAndre Guedes 	u16 i = ring->next_to_use;
2060fc9df2a0SAndre Guedes 	struct igc_rx_buffer *bi;
2061fc9df2a0SAndre Guedes 	dma_addr_t dma;
2062fc9df2a0SAndre Guedes 	bool ok = true;
2063fc9df2a0SAndre Guedes 
2064fc9df2a0SAndre Guedes 	if (!count)
2065fc9df2a0SAndre Guedes 		return ok;
2066fc9df2a0SAndre Guedes 
2067fc9df2a0SAndre Guedes 	desc = IGC_RX_DESC(ring, i);
2068fc9df2a0SAndre Guedes 	bi = &ring->rx_buffer_info[i];
2069fc9df2a0SAndre Guedes 	i -= ring->count;
2070fc9df2a0SAndre Guedes 
2071fc9df2a0SAndre Guedes 	do {
2072fc9df2a0SAndre Guedes 		bi->xdp = xsk_buff_alloc(ring->xsk_pool);
2073fc9df2a0SAndre Guedes 		if (!bi->xdp) {
2074fc9df2a0SAndre Guedes 			ok = false;
2075fc9df2a0SAndre Guedes 			break;
2076fc9df2a0SAndre Guedes 		}
2077fc9df2a0SAndre Guedes 
2078fc9df2a0SAndre Guedes 		dma = xsk_buff_xdp_get_dma(bi->xdp);
2079fc9df2a0SAndre Guedes 		desc->read.pkt_addr = cpu_to_le64(dma);
2080fc9df2a0SAndre Guedes 
2081fc9df2a0SAndre Guedes 		desc++;
2082fc9df2a0SAndre Guedes 		bi++;
2083fc9df2a0SAndre Guedes 		i++;
2084fc9df2a0SAndre Guedes 		if (unlikely(!i)) {
2085fc9df2a0SAndre Guedes 			desc = IGC_RX_DESC(ring, 0);
2086fc9df2a0SAndre Guedes 			bi = ring->rx_buffer_info;
2087fc9df2a0SAndre Guedes 			i -= ring->count;
2088fc9df2a0SAndre Guedes 		}
2089fc9df2a0SAndre Guedes 
2090fc9df2a0SAndre Guedes 		/* Clear the length for the next_to_use descriptor. */
2091fc9df2a0SAndre Guedes 		desc->wb.upper.length = 0;
2092fc9df2a0SAndre Guedes 
2093fc9df2a0SAndre Guedes 		count--;
2094fc9df2a0SAndre Guedes 	} while (count);
2095fc9df2a0SAndre Guedes 
2096fc9df2a0SAndre Guedes 	i += ring->count;
2097fc9df2a0SAndre Guedes 
2098fc9df2a0SAndre Guedes 	if (ring->next_to_use != i) {
2099fc9df2a0SAndre Guedes 		ring->next_to_use = i;
2100fc9df2a0SAndre Guedes 
2101fc9df2a0SAndre Guedes 		/* Force memory writes to complete before letting h/w
2102fc9df2a0SAndre Guedes 		 * know there are new descriptors to fetch.  (Only
2103fc9df2a0SAndre Guedes 		 * applicable for weak-ordered memory model archs,
2104fc9df2a0SAndre Guedes 		 * such as IA-64).
2105fc9df2a0SAndre Guedes 		 */
2106fc9df2a0SAndre Guedes 		wmb();
2107fc9df2a0SAndre Guedes 		writel(i, ring->tail);
2108fc9df2a0SAndre Guedes 	}
2109fc9df2a0SAndre Guedes 
2110fc9df2a0SAndre Guedes 	return ok;
2111fc9df2a0SAndre Guedes }
2112fc9df2a0SAndre Guedes 
211373f1071cSAndre Guedes static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
211473f1071cSAndre Guedes 				  struct xdp_frame *xdpf,
211573f1071cSAndre Guedes 				  struct igc_ring *ring)
211673f1071cSAndre Guedes {
211773f1071cSAndre Guedes 	dma_addr_t dma;
211873f1071cSAndre Guedes 
211973f1071cSAndre Guedes 	dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
212073f1071cSAndre Guedes 	if (dma_mapping_error(ring->dev, dma)) {
212173f1071cSAndre Guedes 		netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
212273f1071cSAndre Guedes 		return -ENOMEM;
212373f1071cSAndre Guedes 	}
212473f1071cSAndre Guedes 
2125859b4dfaSAndre Guedes 	buffer->type = IGC_TX_BUFFER_TYPE_XDP;
212673f1071cSAndre Guedes 	buffer->xdpf = xdpf;
212773f1071cSAndre Guedes 	buffer->protocol = 0;
212873f1071cSAndre Guedes 	buffer->bytecount = xdpf->len;
212973f1071cSAndre Guedes 	buffer->gso_segs = 1;
213073f1071cSAndre Guedes 	buffer->time_stamp = jiffies;
213173f1071cSAndre Guedes 	dma_unmap_len_set(buffer, len, xdpf->len);
213273f1071cSAndre Guedes 	dma_unmap_addr_set(buffer, dma, dma);
213373f1071cSAndre Guedes 	return 0;
213473f1071cSAndre Guedes }
213573f1071cSAndre Guedes 
213673f1071cSAndre Guedes /* This function requires __netif_tx_lock is held by the caller. */
213773f1071cSAndre Guedes static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
213873f1071cSAndre Guedes 				      struct xdp_frame *xdpf)
213973f1071cSAndre Guedes {
214073f1071cSAndre Guedes 	struct igc_tx_buffer *buffer;
214173f1071cSAndre Guedes 	union igc_adv_tx_desc *desc;
214273f1071cSAndre Guedes 	u32 cmd_type, olinfo_status;
214373f1071cSAndre Guedes 	int err;
214473f1071cSAndre Guedes 
214573f1071cSAndre Guedes 	if (!igc_desc_unused(ring))
214673f1071cSAndre Guedes 		return -EBUSY;
214773f1071cSAndre Guedes 
214873f1071cSAndre Guedes 	buffer = &ring->tx_buffer_info[ring->next_to_use];
214973f1071cSAndre Guedes 	err = igc_xdp_init_tx_buffer(buffer, xdpf, ring);
215073f1071cSAndre Guedes 	if (err)
215173f1071cSAndre Guedes 		return err;
215273f1071cSAndre Guedes 
215373f1071cSAndre Guedes 	cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
215473f1071cSAndre Guedes 		   IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
215573f1071cSAndre Guedes 		   buffer->bytecount;
215673f1071cSAndre Guedes 	olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
215773f1071cSAndre Guedes 
215873f1071cSAndre Guedes 	desc = IGC_TX_DESC(ring, ring->next_to_use);
215973f1071cSAndre Guedes 	desc->read.cmd_type_len = cpu_to_le32(cmd_type);
216073f1071cSAndre Guedes 	desc->read.olinfo_status = cpu_to_le32(olinfo_status);
216173f1071cSAndre Guedes 	desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
216273f1071cSAndre Guedes 
216373f1071cSAndre Guedes 	netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount);
216473f1071cSAndre Guedes 
216573f1071cSAndre Guedes 	buffer->next_to_watch = desc;
216673f1071cSAndre Guedes 
216773f1071cSAndre Guedes 	ring->next_to_use++;
216873f1071cSAndre Guedes 	if (ring->next_to_use == ring->count)
216973f1071cSAndre Guedes 		ring->next_to_use = 0;
217073f1071cSAndre Guedes 
217173f1071cSAndre Guedes 	return 0;
217273f1071cSAndre Guedes }
217373f1071cSAndre Guedes 
217473f1071cSAndre Guedes static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
217573f1071cSAndre Guedes 					    int cpu)
217673f1071cSAndre Guedes {
217773f1071cSAndre Guedes 	int index = cpu;
217873f1071cSAndre Guedes 
217973f1071cSAndre Guedes 	if (unlikely(index < 0))
218073f1071cSAndre Guedes 		index = 0;
218173f1071cSAndre Guedes 
218273f1071cSAndre Guedes 	while (index >= adapter->num_tx_queues)
218373f1071cSAndre Guedes 		index -= adapter->num_tx_queues;
218473f1071cSAndre Guedes 
218573f1071cSAndre Guedes 	return adapter->tx_ring[index];
218673f1071cSAndre Guedes }
218773f1071cSAndre Guedes 
218873f1071cSAndre Guedes static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
218973f1071cSAndre Guedes {
219073f1071cSAndre Guedes 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
219173f1071cSAndre Guedes 	int cpu = smp_processor_id();
219273f1071cSAndre Guedes 	struct netdev_queue *nq;
219373f1071cSAndre Guedes 	struct igc_ring *ring;
219473f1071cSAndre Guedes 	int res;
219573f1071cSAndre Guedes 
219673f1071cSAndre Guedes 	if (unlikely(!xdpf))
219773f1071cSAndre Guedes 		return -EFAULT;
219873f1071cSAndre Guedes 
219973f1071cSAndre Guedes 	ring = igc_xdp_get_tx_ring(adapter, cpu);
220073f1071cSAndre Guedes 	nq = txring_txq(ring);
220173f1071cSAndre Guedes 
220273f1071cSAndre Guedes 	__netif_tx_lock(nq, cpu);
220373f1071cSAndre Guedes 	res = igc_xdp_init_tx_descriptor(ring, xdpf);
220473f1071cSAndre Guedes 	__netif_tx_unlock(nq);
220573f1071cSAndre Guedes 	return res;
220673f1071cSAndre Guedes }
220773f1071cSAndre Guedes 
220873a6e372SAndre Guedes /* This function assumes rcu_read_lock() is held by the caller. */
220973a6e372SAndre Guedes static int __igc_xdp_run_prog(struct igc_adapter *adapter,
221073a6e372SAndre Guedes 			      struct bpf_prog *prog,
221173a6e372SAndre Guedes 			      struct xdp_buff *xdp)
221273a6e372SAndre Guedes {
221373a6e372SAndre Guedes 	u32 act = bpf_prog_run_xdp(prog, xdp);
221473a6e372SAndre Guedes 
221573a6e372SAndre Guedes 	switch (act) {
221673a6e372SAndre Guedes 	case XDP_PASS:
221773a6e372SAndre Guedes 		return IGC_XDP_PASS;
221873a6e372SAndre Guedes 	case XDP_TX:
221973f1071cSAndre Guedes 		if (igc_xdp_xmit_back(adapter, xdp) < 0)
222045ce0859SMagnus Karlsson 			goto out_failure;
222112628565SDavid S. Miller 		return IGC_XDP_TX;
222273a6e372SAndre Guedes 	case XDP_REDIRECT:
22234ff32036SAndre Guedes 		if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
222445ce0859SMagnus Karlsson 			goto out_failure;
222512628565SDavid S. Miller 		return IGC_XDP_REDIRECT;
22264ff32036SAndre Guedes 		break;
222773a6e372SAndre Guedes 	default:
222873a6e372SAndre Guedes 		bpf_warn_invalid_xdp_action(act);
222973a6e372SAndre Guedes 		fallthrough;
223073a6e372SAndre Guedes 	case XDP_ABORTED:
223145ce0859SMagnus Karlsson out_failure:
223273a6e372SAndre Guedes 		trace_xdp_exception(adapter->netdev, prog, act);
223373a6e372SAndre Guedes 		fallthrough;
223473a6e372SAndre Guedes 	case XDP_DROP:
223573a6e372SAndre Guedes 		return IGC_XDP_CONSUMED;
223673a6e372SAndre Guedes 	}
223773a6e372SAndre Guedes }
223873a6e372SAndre Guedes 
2239c9a11c23SSasha Neftin static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
2240c9a11c23SSasha Neftin 					struct xdp_buff *xdp)
2241c9a11c23SSasha Neftin {
2242c9a11c23SSasha Neftin 	struct bpf_prog *prog;
2243c9a11c23SSasha Neftin 	int res;
224426575105SAndre Guedes 
224526575105SAndre Guedes 	prog = READ_ONCE(adapter->xdp_prog);
224626575105SAndre Guedes 	if (!prog) {
224726575105SAndre Guedes 		res = IGC_XDP_PASS;
224849589b23SToke Høiland-Jørgensen 		goto out;
224926575105SAndre Guedes 	}
225026575105SAndre Guedes 
225173a6e372SAndre Guedes 	res = __igc_xdp_run_prog(adapter, prog, xdp);
225226575105SAndre Guedes 
225349589b23SToke Høiland-Jørgensen out:
225426575105SAndre Guedes 	return ERR_PTR(-res);
225526575105SAndre Guedes }
225626575105SAndre Guedes 
225773f1071cSAndre Guedes /* This function assumes __netif_tx_lock is held by the caller. */
225873f1071cSAndre Guedes static void igc_flush_tx_descriptors(struct igc_ring *ring)
225973f1071cSAndre Guedes {
226073f1071cSAndre Guedes 	/* Once tail pointer is updated, hardware can fetch the descriptors
226173f1071cSAndre Guedes 	 * any time so we issue a write membar here to ensure all memory
226273f1071cSAndre Guedes 	 * writes are complete before the tail pointer is updated.
226373f1071cSAndre Guedes 	 */
226473f1071cSAndre Guedes 	wmb();
226573f1071cSAndre Guedes 	writel(ring->next_to_use, ring->tail);
226673f1071cSAndre Guedes }
226773f1071cSAndre Guedes 
226873f1071cSAndre Guedes static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
226973f1071cSAndre Guedes {
227073f1071cSAndre Guedes 	int cpu = smp_processor_id();
227173f1071cSAndre Guedes 	struct netdev_queue *nq;
227273f1071cSAndre Guedes 	struct igc_ring *ring;
227373f1071cSAndre Guedes 
227473f1071cSAndre Guedes 	if (status & IGC_XDP_TX) {
227573f1071cSAndre Guedes 		ring = igc_xdp_get_tx_ring(adapter, cpu);
227673f1071cSAndre Guedes 		nq = txring_txq(ring);
227773f1071cSAndre Guedes 
227873f1071cSAndre Guedes 		__netif_tx_lock(nq, cpu);
227973f1071cSAndre Guedes 		igc_flush_tx_descriptors(ring);
228073f1071cSAndre Guedes 		__netif_tx_unlock(nq);
228173f1071cSAndre Guedes 	}
22824ff32036SAndre Guedes 
22834ff32036SAndre Guedes 	if (status & IGC_XDP_REDIRECT)
22844ff32036SAndre Guedes 		xdp_do_flush();
228573f1071cSAndre Guedes }
228673f1071cSAndre Guedes 
2287a27e6e73SAndre Guedes static void igc_update_rx_stats(struct igc_q_vector *q_vector,
2288a27e6e73SAndre Guedes 				unsigned int packets, unsigned int bytes)
2289a27e6e73SAndre Guedes {
2290a27e6e73SAndre Guedes 	struct igc_ring *ring = q_vector->rx.ring;
2291a27e6e73SAndre Guedes 
2292a27e6e73SAndre Guedes 	u64_stats_update_begin(&ring->rx_syncp);
2293a27e6e73SAndre Guedes 	ring->rx_stats.packets += packets;
2294a27e6e73SAndre Guedes 	ring->rx_stats.bytes += bytes;
2295a27e6e73SAndre Guedes 	u64_stats_update_end(&ring->rx_syncp);
2296a27e6e73SAndre Guedes 
2297a27e6e73SAndre Guedes 	q_vector->rx.total_packets += packets;
2298a27e6e73SAndre Guedes 	q_vector->rx.total_bytes += bytes;
2299a27e6e73SAndre Guedes }
2300a27e6e73SAndre Guedes 
23010507ef8aSSasha Neftin static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
23020507ef8aSSasha Neftin {
23030507ef8aSSasha Neftin 	unsigned int total_bytes = 0, total_packets = 0;
230473f1071cSAndre Guedes 	struct igc_adapter *adapter = q_vector->adapter;
23050507ef8aSSasha Neftin 	struct igc_ring *rx_ring = q_vector->rx.ring;
23060507ef8aSSasha Neftin 	struct sk_buff *skb = rx_ring->skb;
23070507ef8aSSasha Neftin 	u16 cleaned_count = igc_desc_unused(rx_ring);
23084ff32036SAndre Guedes 	int xdp_status = 0, rx_buffer_pgcnt;
23090507ef8aSSasha Neftin 
23100507ef8aSSasha Neftin 	while (likely(total_packets < budget)) {
23110507ef8aSSasha Neftin 		union igc_adv_rx_desc *rx_desc;
23120507ef8aSSasha Neftin 		struct igc_rx_buffer *rx_buffer;
231373f1071cSAndre Guedes 		unsigned int size, truesize;
2314e1ed4f92SAndre Guedes 		ktime_t timestamp = 0;
231526575105SAndre Guedes 		struct xdp_buff xdp;
2316e1ed4f92SAndre Guedes 		int pkt_offset = 0;
231726575105SAndre Guedes 		void *pktbuf;
23180507ef8aSSasha Neftin 
23190507ef8aSSasha Neftin 		/* return some buffers to hardware, one at a time is too slow */
23200507ef8aSSasha Neftin 		if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
23210507ef8aSSasha Neftin 			igc_alloc_rx_buffers(rx_ring, cleaned_count);
23220507ef8aSSasha Neftin 			cleaned_count = 0;
23230507ef8aSSasha Neftin 		}
23240507ef8aSSasha Neftin 
23250507ef8aSSasha Neftin 		rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
23260507ef8aSSasha Neftin 		size = le16_to_cpu(rx_desc->wb.upper.length);
23270507ef8aSSasha Neftin 		if (!size)
23280507ef8aSSasha Neftin 			break;
23290507ef8aSSasha Neftin 
23300507ef8aSSasha Neftin 		/* This memory barrier is needed to keep us from reading
23310507ef8aSSasha Neftin 		 * any other fields out of the rx_desc until we know the
23320507ef8aSSasha Neftin 		 * descriptor has been written back
23330507ef8aSSasha Neftin 		 */
23340507ef8aSSasha Neftin 		dma_rmb();
23350507ef8aSSasha Neftin 
23364ff32036SAndre Guedes 		rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
233773f1071cSAndre Guedes 		truesize = igc_get_rx_frame_truesize(rx_ring, size);
23380507ef8aSSasha Neftin 
233926575105SAndre Guedes 		pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
2340e1ed4f92SAndre Guedes 
234126575105SAndre Guedes 		if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
2342e1ed4f92SAndre Guedes 			timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
2343e1ed4f92SAndre Guedes 							pktbuf);
2344e1ed4f92SAndre Guedes 			pkt_offset = IGC_TS_HDR_LEN;
2345e1ed4f92SAndre Guedes 			size -= IGC_TS_HDR_LEN;
2346e1ed4f92SAndre Guedes 		}
2347e1ed4f92SAndre Guedes 
234826575105SAndre Guedes 		if (!skb) {
2349082294f2SMatteo Croce 			xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq);
2350082294f2SMatteo Croce 			xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
2351082294f2SMatteo Croce 					 igc_rx_offset(rx_ring) + pkt_offset, size, false);
235226575105SAndre Guedes 
235326575105SAndre Guedes 			skb = igc_xdp_run_prog(adapter, &xdp);
235426575105SAndre Guedes 		}
235526575105SAndre Guedes 
235626575105SAndre Guedes 		if (IS_ERR(skb)) {
235773f1071cSAndre Guedes 			unsigned int xdp_res = -PTR_ERR(skb);
235873f1071cSAndre Guedes 
235973f1071cSAndre Guedes 			switch (xdp_res) {
236073f1071cSAndre Guedes 			case IGC_XDP_CONSUMED:
236126575105SAndre Guedes 				rx_buffer->pagecnt_bias++;
236273f1071cSAndre Guedes 				break;
236373f1071cSAndre Guedes 			case IGC_XDP_TX:
23644ff32036SAndre Guedes 			case IGC_XDP_REDIRECT:
236573f1071cSAndre Guedes 				igc_rx_buffer_flip(rx_buffer, truesize);
236673f1071cSAndre Guedes 				xdp_status |= xdp_res;
236773f1071cSAndre Guedes 				break;
236873f1071cSAndre Guedes 			}
236973f1071cSAndre Guedes 
237026575105SAndre Guedes 			total_packets++;
237126575105SAndre Guedes 			total_bytes += size;
237226575105SAndre Guedes 		} else if (skb)
23730507ef8aSSasha Neftin 			igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
23740507ef8aSSasha Neftin 		else if (ring_uses_build_skb(rx_ring))
23750507ef8aSSasha Neftin 			skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
23760507ef8aSSasha Neftin 		else
237726575105SAndre Guedes 			skb = igc_construct_skb(rx_ring, rx_buffer, &xdp,
237826575105SAndre Guedes 						timestamp);
23790507ef8aSSasha Neftin 
23800507ef8aSSasha Neftin 		/* exit if we failed to retrieve a buffer */
23810507ef8aSSasha Neftin 		if (!skb) {
23820507ef8aSSasha Neftin 			rx_ring->rx_stats.alloc_failed++;
23830507ef8aSSasha Neftin 			rx_buffer->pagecnt_bias++;
23840507ef8aSSasha Neftin 			break;
23850507ef8aSSasha Neftin 		}
23860507ef8aSSasha Neftin 
23874ff32036SAndre Guedes 		igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
23880507ef8aSSasha Neftin 		cleaned_count++;
23890507ef8aSSasha Neftin 
23900507ef8aSSasha Neftin 		/* fetch next buffer in frame if non-eop */
23910507ef8aSSasha Neftin 		if (igc_is_non_eop(rx_ring, rx_desc))
23920507ef8aSSasha Neftin 			continue;
23930507ef8aSSasha Neftin 
23940507ef8aSSasha Neftin 		/* verify the packet layout is correct */
23950507ef8aSSasha Neftin 		if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
23960507ef8aSSasha Neftin 			skb = NULL;
23970507ef8aSSasha Neftin 			continue;
23980507ef8aSSasha Neftin 		}
23990507ef8aSSasha Neftin 
24000507ef8aSSasha Neftin 		/* probably a little skewed due to removing CRC */
24010507ef8aSSasha Neftin 		total_bytes += skb->len;
24020507ef8aSSasha Neftin 
24033a66abe9SAndre Guedes 		/* populate checksum, VLAN, and protocol */
24040507ef8aSSasha Neftin 		igc_process_skb_fields(rx_ring, rx_desc, skb);
24050507ef8aSSasha Neftin 
24060507ef8aSSasha Neftin 		napi_gro_receive(&q_vector->napi, skb);
24070507ef8aSSasha Neftin 
24080507ef8aSSasha Neftin 		/* reset skb pointer */
24090507ef8aSSasha Neftin 		skb = NULL;
24100507ef8aSSasha Neftin 
24110507ef8aSSasha Neftin 		/* update budget accounting */
24120507ef8aSSasha Neftin 		total_packets++;
24130507ef8aSSasha Neftin 	}
24140507ef8aSSasha Neftin 
241573f1071cSAndre Guedes 	if (xdp_status)
241673f1071cSAndre Guedes 		igc_finalize_xdp(adapter, xdp_status);
241773f1071cSAndre Guedes 
24180507ef8aSSasha Neftin 	/* place incomplete frames back on ring for completion */
24190507ef8aSSasha Neftin 	rx_ring->skb = skb;
24200507ef8aSSasha Neftin 
2421a27e6e73SAndre Guedes 	igc_update_rx_stats(q_vector, total_packets, total_bytes);
24220507ef8aSSasha Neftin 
24230507ef8aSSasha Neftin 	if (cleaned_count)
24240507ef8aSSasha Neftin 		igc_alloc_rx_buffers(rx_ring, cleaned_count);
24250507ef8aSSasha Neftin 
24260507ef8aSSasha Neftin 	return total_packets;
24270507ef8aSSasha Neftin }
24280507ef8aSSasha Neftin 
2429fc9df2a0SAndre Guedes static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
2430fc9df2a0SAndre Guedes 					    struct xdp_buff *xdp)
2431fc9df2a0SAndre Guedes {
2432fc9df2a0SAndre Guedes 	unsigned int metasize = xdp->data - xdp->data_meta;
2433fc9df2a0SAndre Guedes 	unsigned int datasize = xdp->data_end - xdp->data;
2434fc9df2a0SAndre Guedes 	unsigned int totalsize = metasize + datasize;
2435fc9df2a0SAndre Guedes 	struct sk_buff *skb;
2436fc9df2a0SAndre Guedes 
2437fc9df2a0SAndre Guedes 	skb = __napi_alloc_skb(&ring->q_vector->napi,
2438fc9df2a0SAndre Guedes 			       xdp->data_end - xdp->data_hard_start,
2439fc9df2a0SAndre Guedes 			       GFP_ATOMIC | __GFP_NOWARN);
2440fc9df2a0SAndre Guedes 	if (unlikely(!skb))
2441fc9df2a0SAndre Guedes 		return NULL;
2442fc9df2a0SAndre Guedes 
2443fc9df2a0SAndre Guedes 	skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
2444fc9df2a0SAndre Guedes 	memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize);
2445fc9df2a0SAndre Guedes 	if (metasize)
2446fc9df2a0SAndre Guedes 		skb_metadata_set(skb, metasize);
2447fc9df2a0SAndre Guedes 
2448fc9df2a0SAndre Guedes 	return skb;
2449fc9df2a0SAndre Guedes }
2450fc9df2a0SAndre Guedes 
2451fc9df2a0SAndre Guedes static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
2452fc9df2a0SAndre Guedes 				union igc_adv_rx_desc *desc,
2453fc9df2a0SAndre Guedes 				struct xdp_buff *xdp,
2454fc9df2a0SAndre Guedes 				ktime_t timestamp)
2455fc9df2a0SAndre Guedes {
2456fc9df2a0SAndre Guedes 	struct igc_ring *ring = q_vector->rx.ring;
2457fc9df2a0SAndre Guedes 	struct sk_buff *skb;
2458fc9df2a0SAndre Guedes 
2459fc9df2a0SAndre Guedes 	skb = igc_construct_skb_zc(ring, xdp);
2460fc9df2a0SAndre Guedes 	if (!skb) {
2461fc9df2a0SAndre Guedes 		ring->rx_stats.alloc_failed++;
2462fc9df2a0SAndre Guedes 		return;
2463fc9df2a0SAndre Guedes 	}
2464fc9df2a0SAndre Guedes 
2465fc9df2a0SAndre Guedes 	if (timestamp)
2466fc9df2a0SAndre Guedes 		skb_hwtstamps(skb)->hwtstamp = timestamp;
2467fc9df2a0SAndre Guedes 
2468fc9df2a0SAndre Guedes 	if (igc_cleanup_headers(ring, desc, skb))
2469fc9df2a0SAndre Guedes 		return;
2470fc9df2a0SAndre Guedes 
2471fc9df2a0SAndre Guedes 	igc_process_skb_fields(ring, desc, skb);
2472fc9df2a0SAndre Guedes 	napi_gro_receive(&q_vector->napi, skb);
2473fc9df2a0SAndre Guedes }
2474fc9df2a0SAndre Guedes 
2475fc9df2a0SAndre Guedes static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
2476fc9df2a0SAndre Guedes {
2477fc9df2a0SAndre Guedes 	struct igc_adapter *adapter = q_vector->adapter;
2478fc9df2a0SAndre Guedes 	struct igc_ring *ring = q_vector->rx.ring;
2479fc9df2a0SAndre Guedes 	u16 cleaned_count = igc_desc_unused(ring);
2480fc9df2a0SAndre Guedes 	int total_bytes = 0, total_packets = 0;
2481fc9df2a0SAndre Guedes 	u16 ntc = ring->next_to_clean;
2482fc9df2a0SAndre Guedes 	struct bpf_prog *prog;
2483fc9df2a0SAndre Guedes 	bool failure = false;
2484fc9df2a0SAndre Guedes 	int xdp_status = 0;
2485fc9df2a0SAndre Guedes 
2486fc9df2a0SAndre Guedes 	rcu_read_lock();
2487fc9df2a0SAndre Guedes 
2488fc9df2a0SAndre Guedes 	prog = READ_ONCE(adapter->xdp_prog);
2489fc9df2a0SAndre Guedes 
2490fc9df2a0SAndre Guedes 	while (likely(total_packets < budget)) {
2491fc9df2a0SAndre Guedes 		union igc_adv_rx_desc *desc;
2492fc9df2a0SAndre Guedes 		struct igc_rx_buffer *bi;
2493fc9df2a0SAndre Guedes 		ktime_t timestamp = 0;
2494fc9df2a0SAndre Guedes 		unsigned int size;
2495fc9df2a0SAndre Guedes 		int res;
2496fc9df2a0SAndre Guedes 
2497fc9df2a0SAndre Guedes 		desc = IGC_RX_DESC(ring, ntc);
2498fc9df2a0SAndre Guedes 		size = le16_to_cpu(desc->wb.upper.length);
2499fc9df2a0SAndre Guedes 		if (!size)
2500fc9df2a0SAndre Guedes 			break;
2501fc9df2a0SAndre Guedes 
2502fc9df2a0SAndre Guedes 		/* This memory barrier is needed to keep us from reading
2503fc9df2a0SAndre Guedes 		 * any other fields out of the rx_desc until we know the
2504fc9df2a0SAndre Guedes 		 * descriptor has been written back
2505fc9df2a0SAndre Guedes 		 */
2506fc9df2a0SAndre Guedes 		dma_rmb();
2507fc9df2a0SAndre Guedes 
2508fc9df2a0SAndre Guedes 		bi = &ring->rx_buffer_info[ntc];
2509fc9df2a0SAndre Guedes 
2510fc9df2a0SAndre Guedes 		if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
2511fc9df2a0SAndre Guedes 			timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
2512fc9df2a0SAndre Guedes 							bi->xdp->data);
2513fc9df2a0SAndre Guedes 
2514fc9df2a0SAndre Guedes 			bi->xdp->data += IGC_TS_HDR_LEN;
2515fc9df2a0SAndre Guedes 
2516fc9df2a0SAndre Guedes 			/* HW timestamp has been copied into local variable. Metadata
2517fc9df2a0SAndre Guedes 			 * length when XDP program is called should be 0.
2518fc9df2a0SAndre Guedes 			 */
2519fc9df2a0SAndre Guedes 			bi->xdp->data_meta += IGC_TS_HDR_LEN;
2520fc9df2a0SAndre Guedes 			size -= IGC_TS_HDR_LEN;
2521fc9df2a0SAndre Guedes 		}
2522fc9df2a0SAndre Guedes 
2523fc9df2a0SAndre Guedes 		bi->xdp->data_end = bi->xdp->data + size;
2524fc9df2a0SAndre Guedes 		xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool);
2525fc9df2a0SAndre Guedes 
2526fc9df2a0SAndre Guedes 		res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
2527fc9df2a0SAndre Guedes 		switch (res) {
2528fc9df2a0SAndre Guedes 		case IGC_XDP_PASS:
2529fc9df2a0SAndre Guedes 			igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
2530fc9df2a0SAndre Guedes 			fallthrough;
2531fc9df2a0SAndre Guedes 		case IGC_XDP_CONSUMED:
2532fc9df2a0SAndre Guedes 			xsk_buff_free(bi->xdp);
2533fc9df2a0SAndre Guedes 			break;
2534fc9df2a0SAndre Guedes 		case IGC_XDP_TX:
2535fc9df2a0SAndre Guedes 		case IGC_XDP_REDIRECT:
2536fc9df2a0SAndre Guedes 			xdp_status |= res;
2537fc9df2a0SAndre Guedes 			break;
2538fc9df2a0SAndre Guedes 		}
2539fc9df2a0SAndre Guedes 
2540fc9df2a0SAndre Guedes 		bi->xdp = NULL;
2541fc9df2a0SAndre Guedes 		total_bytes += size;
2542fc9df2a0SAndre Guedes 		total_packets++;
2543fc9df2a0SAndre Guedes 		cleaned_count++;
2544fc9df2a0SAndre Guedes 		ntc++;
2545fc9df2a0SAndre Guedes 		if (ntc == ring->count)
2546fc9df2a0SAndre Guedes 			ntc = 0;
2547fc9df2a0SAndre Guedes 	}
2548fc9df2a0SAndre Guedes 
2549fc9df2a0SAndre Guedes 	ring->next_to_clean = ntc;
2550fc9df2a0SAndre Guedes 	rcu_read_unlock();
2551fc9df2a0SAndre Guedes 
2552fc9df2a0SAndre Guedes 	if (cleaned_count >= IGC_RX_BUFFER_WRITE)
2553fc9df2a0SAndre Guedes 		failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count);
2554fc9df2a0SAndre Guedes 
2555fc9df2a0SAndre Guedes 	if (xdp_status)
2556fc9df2a0SAndre Guedes 		igc_finalize_xdp(adapter, xdp_status);
2557fc9df2a0SAndre Guedes 
2558fc9df2a0SAndre Guedes 	igc_update_rx_stats(q_vector, total_packets, total_bytes);
2559fc9df2a0SAndre Guedes 
2560fc9df2a0SAndre Guedes 	if (xsk_uses_need_wakeup(ring->xsk_pool)) {
2561fc9df2a0SAndre Guedes 		if (failure || ring->next_to_clean == ring->next_to_use)
2562fc9df2a0SAndre Guedes 			xsk_set_rx_need_wakeup(ring->xsk_pool);
2563fc9df2a0SAndre Guedes 		else
2564fc9df2a0SAndre Guedes 			xsk_clear_rx_need_wakeup(ring->xsk_pool);
2565fc9df2a0SAndre Guedes 		return total_packets;
2566fc9df2a0SAndre Guedes 	}
2567fc9df2a0SAndre Guedes 
2568fc9df2a0SAndre Guedes 	return failure ? budget : total_packets;
2569fc9df2a0SAndre Guedes }
2570fc9df2a0SAndre Guedes 
2571a27e6e73SAndre Guedes static void igc_update_tx_stats(struct igc_q_vector *q_vector,
2572a27e6e73SAndre Guedes 				unsigned int packets, unsigned int bytes)
2573a27e6e73SAndre Guedes {
2574a27e6e73SAndre Guedes 	struct igc_ring *ring = q_vector->tx.ring;
2575a27e6e73SAndre Guedes 
2576a27e6e73SAndre Guedes 	u64_stats_update_begin(&ring->tx_syncp);
2577a27e6e73SAndre Guedes 	ring->tx_stats.bytes += bytes;
2578a27e6e73SAndre Guedes 	ring->tx_stats.packets += packets;
2579a27e6e73SAndre Guedes 	u64_stats_update_end(&ring->tx_syncp);
2580a27e6e73SAndre Guedes 
2581a27e6e73SAndre Guedes 	q_vector->tx.total_bytes += bytes;
2582a27e6e73SAndre Guedes 	q_vector->tx.total_packets += packets;
2583a27e6e73SAndre Guedes }
2584a27e6e73SAndre Guedes 
25859acf59a7SAndre Guedes static void igc_xdp_xmit_zc(struct igc_ring *ring)
25869acf59a7SAndre Guedes {
25879acf59a7SAndre Guedes 	struct xsk_buff_pool *pool = ring->xsk_pool;
25889acf59a7SAndre Guedes 	struct netdev_queue *nq = txring_txq(ring);
25899acf59a7SAndre Guedes 	union igc_adv_tx_desc *tx_desc = NULL;
25909acf59a7SAndre Guedes 	int cpu = smp_processor_id();
25919acf59a7SAndre Guedes 	u16 ntu = ring->next_to_use;
25929acf59a7SAndre Guedes 	struct xdp_desc xdp_desc;
25939acf59a7SAndre Guedes 	u16 budget;
25949acf59a7SAndre Guedes 
25959acf59a7SAndre Guedes 	if (!netif_carrier_ok(ring->netdev))
25969acf59a7SAndre Guedes 		return;
25979acf59a7SAndre Guedes 
25989acf59a7SAndre Guedes 	__netif_tx_lock(nq, cpu);
25999acf59a7SAndre Guedes 
26009acf59a7SAndre Guedes 	budget = igc_desc_unused(ring);
26019acf59a7SAndre Guedes 
26029acf59a7SAndre Guedes 	while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
26039acf59a7SAndre Guedes 		u32 cmd_type, olinfo_status;
26049acf59a7SAndre Guedes 		struct igc_tx_buffer *bi;
26059acf59a7SAndre Guedes 		dma_addr_t dma;
26069acf59a7SAndre Guedes 
26079acf59a7SAndre Guedes 		cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
26089acf59a7SAndre Guedes 			   IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
26099acf59a7SAndre Guedes 			   xdp_desc.len;
26109acf59a7SAndre Guedes 		olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT;
26119acf59a7SAndre Guedes 
26129acf59a7SAndre Guedes 		dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
26139acf59a7SAndre Guedes 		xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len);
26149acf59a7SAndre Guedes 
26159acf59a7SAndre Guedes 		tx_desc = IGC_TX_DESC(ring, ntu);
26169acf59a7SAndre Guedes 		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
26179acf59a7SAndre Guedes 		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
26189acf59a7SAndre Guedes 		tx_desc->read.buffer_addr = cpu_to_le64(dma);
26199acf59a7SAndre Guedes 
26209acf59a7SAndre Guedes 		bi = &ring->tx_buffer_info[ntu];
26219acf59a7SAndre Guedes 		bi->type = IGC_TX_BUFFER_TYPE_XSK;
26229acf59a7SAndre Guedes 		bi->protocol = 0;
26239acf59a7SAndre Guedes 		bi->bytecount = xdp_desc.len;
26249acf59a7SAndre Guedes 		bi->gso_segs = 1;
26259acf59a7SAndre Guedes 		bi->time_stamp = jiffies;
26269acf59a7SAndre Guedes 		bi->next_to_watch = tx_desc;
26279acf59a7SAndre Guedes 
26289acf59a7SAndre Guedes 		netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len);
26299acf59a7SAndre Guedes 
26309acf59a7SAndre Guedes 		ntu++;
26319acf59a7SAndre Guedes 		if (ntu == ring->count)
26329acf59a7SAndre Guedes 			ntu = 0;
26339acf59a7SAndre Guedes 	}
26349acf59a7SAndre Guedes 
26359acf59a7SAndre Guedes 	ring->next_to_use = ntu;
26369acf59a7SAndre Guedes 	if (tx_desc) {
26379acf59a7SAndre Guedes 		igc_flush_tx_descriptors(ring);
26389acf59a7SAndre Guedes 		xsk_tx_release(pool);
26399acf59a7SAndre Guedes 	}
26409acf59a7SAndre Guedes 
26419acf59a7SAndre Guedes 	__netif_tx_unlock(nq);
26429acf59a7SAndre Guedes }
26439acf59a7SAndre Guedes 
26440507ef8aSSasha Neftin /**
26450507ef8aSSasha Neftin  * igc_clean_tx_irq - Reclaim resources after transmit completes
26460507ef8aSSasha Neftin  * @q_vector: pointer to q_vector containing needed info
26470507ef8aSSasha Neftin  * @napi_budget: Used to determine if we are in netpoll
26480507ef8aSSasha Neftin  *
26490507ef8aSSasha Neftin  * returns true if ring is completely cleaned
26500507ef8aSSasha Neftin  */
26510507ef8aSSasha Neftin static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
26520507ef8aSSasha Neftin {
26530507ef8aSSasha Neftin 	struct igc_adapter *adapter = q_vector->adapter;
26540507ef8aSSasha Neftin 	unsigned int total_bytes = 0, total_packets = 0;
26550507ef8aSSasha Neftin 	unsigned int budget = q_vector->tx.work_limit;
26560507ef8aSSasha Neftin 	struct igc_ring *tx_ring = q_vector->tx.ring;
26570507ef8aSSasha Neftin 	unsigned int i = tx_ring->next_to_clean;
26580507ef8aSSasha Neftin 	struct igc_tx_buffer *tx_buffer;
26590507ef8aSSasha Neftin 	union igc_adv_tx_desc *tx_desc;
26609acf59a7SAndre Guedes 	u32 xsk_frames = 0;
26610507ef8aSSasha Neftin 
26620507ef8aSSasha Neftin 	if (test_bit(__IGC_DOWN, &adapter->state))
26630507ef8aSSasha Neftin 		return true;
26640507ef8aSSasha Neftin 
26650507ef8aSSasha Neftin 	tx_buffer = &tx_ring->tx_buffer_info[i];
26660507ef8aSSasha Neftin 	tx_desc = IGC_TX_DESC(tx_ring, i);
26670507ef8aSSasha Neftin 	i -= tx_ring->count;
26680507ef8aSSasha Neftin 
26690507ef8aSSasha Neftin 	do {
26700507ef8aSSasha Neftin 		union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
26710507ef8aSSasha Neftin 
26720507ef8aSSasha Neftin 		/* if next_to_watch is not set then there is no work pending */
26730507ef8aSSasha Neftin 		if (!eop_desc)
26740507ef8aSSasha Neftin 			break;
26750507ef8aSSasha Neftin 
26760507ef8aSSasha Neftin 		/* prevent any other reads prior to eop_desc */
26770507ef8aSSasha Neftin 		smp_rmb();
26780507ef8aSSasha Neftin 
26790507ef8aSSasha Neftin 		/* if DD is not set pending work has not been completed */
26800507ef8aSSasha Neftin 		if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
26810507ef8aSSasha Neftin 			break;
26820507ef8aSSasha Neftin 
26830507ef8aSSasha Neftin 		/* clear next_to_watch to prevent false hangs */
26840507ef8aSSasha Neftin 		tx_buffer->next_to_watch = NULL;
26850507ef8aSSasha Neftin 
26860507ef8aSSasha Neftin 		/* update the statistics for this packet */
26870507ef8aSSasha Neftin 		total_bytes += tx_buffer->bytecount;
26880507ef8aSSasha Neftin 		total_packets += tx_buffer->gso_segs;
26890507ef8aSSasha Neftin 
2690859b4dfaSAndre Guedes 		switch (tx_buffer->type) {
26919acf59a7SAndre Guedes 		case IGC_TX_BUFFER_TYPE_XSK:
26929acf59a7SAndre Guedes 			xsk_frames++;
26939acf59a7SAndre Guedes 			break;
2694859b4dfaSAndre Guedes 		case IGC_TX_BUFFER_TYPE_XDP:
269573f1071cSAndre Guedes 			xdp_return_frame(tx_buffer->xdpf);
26969acf59a7SAndre Guedes 			igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2697859b4dfaSAndre Guedes 			break;
2698859b4dfaSAndre Guedes 		case IGC_TX_BUFFER_TYPE_SKB:
26990507ef8aSSasha Neftin 			napi_consume_skb(tx_buffer->skb, napi_budget);
27009acf59a7SAndre Guedes 			igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2701859b4dfaSAndre Guedes 			break;
2702859b4dfaSAndre Guedes 		default:
2703859b4dfaSAndre Guedes 			netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
2704859b4dfaSAndre Guedes 			break;
2705859b4dfaSAndre Guedes 		}
27060507ef8aSSasha Neftin 
27070507ef8aSSasha Neftin 		/* clear last DMA location and unmap remaining buffers */
27080507ef8aSSasha Neftin 		while (tx_desc != eop_desc) {
27090507ef8aSSasha Neftin 			tx_buffer++;
27100507ef8aSSasha Neftin 			tx_desc++;
27110507ef8aSSasha Neftin 			i++;
27120507ef8aSSasha Neftin 			if (unlikely(!i)) {
27130507ef8aSSasha Neftin 				i -= tx_ring->count;
27140507ef8aSSasha Neftin 				tx_buffer = tx_ring->tx_buffer_info;
27150507ef8aSSasha Neftin 				tx_desc = IGC_TX_DESC(tx_ring, 0);
27160507ef8aSSasha Neftin 			}
27170507ef8aSSasha Neftin 
27180507ef8aSSasha Neftin 			/* unmap any remaining paged data */
271961234295SAndre Guedes 			if (dma_unmap_len(tx_buffer, len))
272061234295SAndre Guedes 				igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
27210507ef8aSSasha Neftin 		}
27220507ef8aSSasha Neftin 
27230507ef8aSSasha Neftin 		/* move us one more past the eop_desc for start of next pkt */
27240507ef8aSSasha Neftin 		tx_buffer++;
27250507ef8aSSasha Neftin 		tx_desc++;
27260507ef8aSSasha Neftin 		i++;
27270507ef8aSSasha Neftin 		if (unlikely(!i)) {
27280507ef8aSSasha Neftin 			i -= tx_ring->count;
27290507ef8aSSasha Neftin 			tx_buffer = tx_ring->tx_buffer_info;
27300507ef8aSSasha Neftin 			tx_desc = IGC_TX_DESC(tx_ring, 0);
27310507ef8aSSasha Neftin 		}
27320507ef8aSSasha Neftin 
27330507ef8aSSasha Neftin 		/* issue prefetch for next Tx descriptor */
27340507ef8aSSasha Neftin 		prefetch(tx_desc);
27350507ef8aSSasha Neftin 
27360507ef8aSSasha Neftin 		/* update budget accounting */
27370507ef8aSSasha Neftin 		budget--;
27380507ef8aSSasha Neftin 	} while (likely(budget));
27390507ef8aSSasha Neftin 
27400507ef8aSSasha Neftin 	netdev_tx_completed_queue(txring_txq(tx_ring),
27410507ef8aSSasha Neftin 				  total_packets, total_bytes);
27420507ef8aSSasha Neftin 
27430507ef8aSSasha Neftin 	i += tx_ring->count;
27440507ef8aSSasha Neftin 	tx_ring->next_to_clean = i;
2745a27e6e73SAndre Guedes 
2746a27e6e73SAndre Guedes 	igc_update_tx_stats(q_vector, total_packets, total_bytes);
27470507ef8aSSasha Neftin 
27489acf59a7SAndre Guedes 	if (tx_ring->xsk_pool) {
27499acf59a7SAndre Guedes 		if (xsk_frames)
27509acf59a7SAndre Guedes 			xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
27519acf59a7SAndre Guedes 		if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
27529acf59a7SAndre Guedes 			xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
27539acf59a7SAndre Guedes 		igc_xdp_xmit_zc(tx_ring);
27549acf59a7SAndre Guedes 	}
27559acf59a7SAndre Guedes 
27560507ef8aSSasha Neftin 	if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
27570507ef8aSSasha Neftin 		struct igc_hw *hw = &adapter->hw;
27580507ef8aSSasha Neftin 
27590507ef8aSSasha Neftin 		/* Detect a transmit hang in hardware, this serializes the
27600507ef8aSSasha Neftin 		 * check with the clearing of time_stamp and movement of i
27610507ef8aSSasha Neftin 		 */
27620507ef8aSSasha Neftin 		clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
27630507ef8aSSasha Neftin 		if (tx_buffer->next_to_watch &&
27640507ef8aSSasha Neftin 		    time_after(jiffies, tx_buffer->time_stamp +
27650507ef8aSSasha Neftin 		    (adapter->tx_timeout_factor * HZ)) &&
27660507ef8aSSasha Neftin 		    !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
27670507ef8aSSasha Neftin 			/* detected Tx unit hang */
276825f06effSAndre Guedes 			netdev_err(tx_ring->netdev,
27690507ef8aSSasha Neftin 				   "Detected Tx Unit Hang\n"
27700507ef8aSSasha Neftin 				   "  Tx Queue             <%d>\n"
27710507ef8aSSasha Neftin 				   "  TDH                  <%x>\n"
27720507ef8aSSasha Neftin 				   "  TDT                  <%x>\n"
27730507ef8aSSasha Neftin 				   "  next_to_use          <%x>\n"
27740507ef8aSSasha Neftin 				   "  next_to_clean        <%x>\n"
27750507ef8aSSasha Neftin 				   "buffer_info[next_to_clean]\n"
27760507ef8aSSasha Neftin 				   "  time_stamp           <%lx>\n"
27770507ef8aSSasha Neftin 				   "  next_to_watch        <%p>\n"
27780507ef8aSSasha Neftin 				   "  jiffies              <%lx>\n"
27790507ef8aSSasha Neftin 				   "  desc.status          <%x>\n",
27800507ef8aSSasha Neftin 				   tx_ring->queue_index,
27810507ef8aSSasha Neftin 				   rd32(IGC_TDH(tx_ring->reg_idx)),
27820507ef8aSSasha Neftin 				   readl(tx_ring->tail),
27830507ef8aSSasha Neftin 				   tx_ring->next_to_use,
27840507ef8aSSasha Neftin 				   tx_ring->next_to_clean,
27850507ef8aSSasha Neftin 				   tx_buffer->time_stamp,
27860507ef8aSSasha Neftin 				   tx_buffer->next_to_watch,
27870507ef8aSSasha Neftin 				   jiffies,
27880507ef8aSSasha Neftin 				   tx_buffer->next_to_watch->wb.status);
27890507ef8aSSasha Neftin 			netif_stop_subqueue(tx_ring->netdev,
27900507ef8aSSasha Neftin 					    tx_ring->queue_index);
27910507ef8aSSasha Neftin 
27920507ef8aSSasha Neftin 			/* we are about to reset, no point in enabling stuff */
27930507ef8aSSasha Neftin 			return true;
27940507ef8aSSasha Neftin 		}
27950507ef8aSSasha Neftin 	}
27960507ef8aSSasha Neftin 
27970507ef8aSSasha Neftin #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
27980507ef8aSSasha Neftin 	if (unlikely(total_packets &&
27990507ef8aSSasha Neftin 		     netif_carrier_ok(tx_ring->netdev) &&
28000507ef8aSSasha Neftin 		     igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
28010507ef8aSSasha Neftin 		/* Make sure that anybody stopping the queue after this
28020507ef8aSSasha Neftin 		 * sees the new next_to_clean.
28030507ef8aSSasha Neftin 		 */
28040507ef8aSSasha Neftin 		smp_mb();
28050507ef8aSSasha Neftin 		if (__netif_subqueue_stopped(tx_ring->netdev,
28060507ef8aSSasha Neftin 					     tx_ring->queue_index) &&
28070507ef8aSSasha Neftin 		    !(test_bit(__IGC_DOWN, &adapter->state))) {
28080507ef8aSSasha Neftin 			netif_wake_subqueue(tx_ring->netdev,
28090507ef8aSSasha Neftin 					    tx_ring->queue_index);
28100507ef8aSSasha Neftin 
28110507ef8aSSasha Neftin 			u64_stats_update_begin(&tx_ring->tx_syncp);
28120507ef8aSSasha Neftin 			tx_ring->tx_stats.restart_queue++;
28130507ef8aSSasha Neftin 			u64_stats_update_end(&tx_ring->tx_syncp);
28140507ef8aSSasha Neftin 		}
28150507ef8aSSasha Neftin 	}
28160507ef8aSSasha Neftin 
28170507ef8aSSasha Neftin 	return !!budget;
28180507ef8aSSasha Neftin }
28190507ef8aSSasha Neftin 
2820750433d0SAndre Guedes static int igc_find_mac_filter(struct igc_adapter *adapter,
2821750433d0SAndre Guedes 			       enum igc_mac_filter_type type, const u8 *addr)
282286a4de66SSasha Neftin {
282386a4de66SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
2824d66358caSAndre Guedes 	int max_entries = hw->mac.rar_entry_count;
2825d66358caSAndre Guedes 	u32 ral, rah;
282686a4de66SSasha Neftin 	int i;
282786a4de66SSasha Neftin 
2828794e5bc8SAndre Guedes 	for (i = 0; i < max_entries; i++) {
2829d66358caSAndre Guedes 		ral = rd32(IGC_RAL(i));
2830d66358caSAndre Guedes 		rah = rd32(IGC_RAH(i));
283186a4de66SSasha Neftin 
2832d66358caSAndre Guedes 		if (!(rah & IGC_RAH_AV))
2833794e5bc8SAndre Guedes 			continue;
2834750433d0SAndre Guedes 		if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type)
2835750433d0SAndre Guedes 			continue;
2836d66358caSAndre Guedes 		if ((rah & IGC_RAH_RAH_MASK) !=
2837d66358caSAndre Guedes 		    le16_to_cpup((__le16 *)(addr + 4)))
2838d66358caSAndre Guedes 			continue;
2839d66358caSAndre Guedes 		if (ral != le32_to_cpup((__le32 *)(addr)))
284086a4de66SSasha Neftin 			continue;
284186a4de66SSasha Neftin 
284286a4de66SSasha Neftin 		return i;
284386a4de66SSasha Neftin 	}
284486a4de66SSasha Neftin 
2845794e5bc8SAndre Guedes 	return -1;
284686a4de66SSasha Neftin }
284786a4de66SSasha Neftin 
2848794e5bc8SAndre Guedes static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
284986a4de66SSasha Neftin {
285086a4de66SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
2851d66358caSAndre Guedes 	int max_entries = hw->mac.rar_entry_count;
2852d66358caSAndre Guedes 	u32 rah;
285386a4de66SSasha Neftin 	int i;
285486a4de66SSasha Neftin 
2855794e5bc8SAndre Guedes 	for (i = 0; i < max_entries; i++) {
2856d66358caSAndre Guedes 		rah = rd32(IGC_RAH(i));
285786a4de66SSasha Neftin 
2858d66358caSAndre Guedes 		if (!(rah & IGC_RAH_AV))
2859794e5bc8SAndre Guedes 			return i;
286086a4de66SSasha Neftin 	}
286186a4de66SSasha Neftin 
2862794e5bc8SAndre Guedes 	return -1;
286386a4de66SSasha Neftin }
286486a4de66SSasha Neftin 
2865e9736fa4SAndre Guedes /**
2866e9736fa4SAndre Guedes  * igc_add_mac_filter() - Add MAC address filter
2867e9736fa4SAndre Guedes  * @adapter: Pointer to adapter where the filter should be added
2868750433d0SAndre Guedes  * @type: MAC address filter type (source or destination)
2869e9736fa4SAndre Guedes  * @addr: MAC address
2870e9736fa4SAndre Guedes  * @queue: If non-negative, queue assignment feature is enabled and frames
2871e9736fa4SAndre Guedes  *         matching the filter are enqueued onto 'queue'. Otherwise, queue
2872e9736fa4SAndre Guedes  *         assignment is disabled.
2873e9736fa4SAndre Guedes  *
2874e9736fa4SAndre Guedes  * Return: 0 in case of success, negative errno code otherwise.
287586a4de66SSasha Neftin  */
287636fa2152SAndre Guedes static int igc_add_mac_filter(struct igc_adapter *adapter,
2877750433d0SAndre Guedes 			      enum igc_mac_filter_type type, const u8 *addr,
2878750433d0SAndre Guedes 			      int queue)
287986a4de66SSasha Neftin {
2880949b922eSAndre Guedes 	struct net_device *dev = adapter->netdev;
2881794e5bc8SAndre Guedes 	int index;
288286a4de66SSasha Neftin 
2883750433d0SAndre Guedes 	index = igc_find_mac_filter(adapter, type, addr);
2884794e5bc8SAndre Guedes 	if (index >= 0)
2885d66358caSAndre Guedes 		goto update_filter;
288686a4de66SSasha Neftin 
2887794e5bc8SAndre Guedes 	index = igc_get_avail_mac_filter_slot(adapter);
2888794e5bc8SAndre Guedes 	if (index < 0)
288986a4de66SSasha Neftin 		return -ENOSPC;
2890794e5bc8SAndre Guedes 
2891750433d0SAndre Guedes 	netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n",
2892750433d0SAndre Guedes 		   index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2893750433d0SAndre Guedes 		   addr, queue);
2894949b922eSAndre Guedes 
2895d66358caSAndre Guedes update_filter:
2896750433d0SAndre Guedes 	igc_set_mac_filter_hw(adapter, index, type, addr, queue);
289786a4de66SSasha Neftin 	return 0;
289886a4de66SSasha Neftin }
289986a4de66SSasha Neftin 
2900c6aae591SAndre Guedes /**
2901c6aae591SAndre Guedes  * igc_del_mac_filter() - Delete MAC address filter
2902c6aae591SAndre Guedes  * @adapter: Pointer to adapter where the filter should be deleted from
2903750433d0SAndre Guedes  * @type: MAC address filter type (source or destination)
2904c6aae591SAndre Guedes  * @addr: MAC address
290586a4de66SSasha Neftin  */
2906acda576fSAndre Guedes static void igc_del_mac_filter(struct igc_adapter *adapter,
2907750433d0SAndre Guedes 			       enum igc_mac_filter_type type, const u8 *addr)
290886a4de66SSasha Neftin {
2909949b922eSAndre Guedes 	struct net_device *dev = adapter->netdev;
29105f930713SAndre Guedes 	int index;
291186a4de66SSasha Neftin 
2912750433d0SAndre Guedes 	index = igc_find_mac_filter(adapter, type, addr);
29135f930713SAndre Guedes 	if (index < 0)
2914acda576fSAndre Guedes 		return;
291586a4de66SSasha Neftin 
2916d66358caSAndre Guedes 	if (index == 0) {
29175f930713SAndre Guedes 		/* If this is the default filter, we don't actually delete it.
29185f930713SAndre Guedes 		 * We just reset to its default value i.e. disable queue
29195f930713SAndre Guedes 		 * assignment.
292086a4de66SSasha Neftin 		 */
2921949b922eSAndre Guedes 		netdev_dbg(dev, "Disable default MAC filter queue assignment");
2922949b922eSAndre Guedes 
2923750433d0SAndre Guedes 		igc_set_mac_filter_hw(adapter, 0, type, addr, -1);
292486a4de66SSasha Neftin 	} else {
2925750433d0SAndre Guedes 		netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n",
2926750433d0SAndre Guedes 			   index,
2927750433d0SAndre Guedes 			   type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2928750433d0SAndre Guedes 			   addr);
2929949b922eSAndre Guedes 
29305f930713SAndre Guedes 		igc_clear_mac_filter_hw(adapter, index);
293186a4de66SSasha Neftin 	}
293286a4de66SSasha Neftin }
293386a4de66SSasha Neftin 
293412ddee68SAndre Guedes /**
293512ddee68SAndre Guedes  * igc_add_vlan_prio_filter() - Add VLAN priority filter
293612ddee68SAndre Guedes  * @adapter: Pointer to adapter where the filter should be added
293712ddee68SAndre Guedes  * @prio: VLAN priority value
293812ddee68SAndre Guedes  * @queue: Queue number which matching frames are assigned to
293912ddee68SAndre Guedes  *
294012ddee68SAndre Guedes  * Return: 0 in case of success, negative errno code otherwise.
294112ddee68SAndre Guedes  */
294236fa2152SAndre Guedes static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio,
294336fa2152SAndre Guedes 				    int queue)
294412ddee68SAndre Guedes {
294512ddee68SAndre Guedes 	struct net_device *dev = adapter->netdev;
294612ddee68SAndre Guedes 	struct igc_hw *hw = &adapter->hw;
294712ddee68SAndre Guedes 	u32 vlanpqf;
294812ddee68SAndre Guedes 
294912ddee68SAndre Guedes 	vlanpqf = rd32(IGC_VLANPQF);
295012ddee68SAndre Guedes 
295112ddee68SAndre Guedes 	if (vlanpqf & IGC_VLANPQF_VALID(prio)) {
295212ddee68SAndre Guedes 		netdev_dbg(dev, "VLAN priority filter already in use\n");
295312ddee68SAndre Guedes 		return -EEXIST;
295412ddee68SAndre Guedes 	}
295512ddee68SAndre Guedes 
295612ddee68SAndre Guedes 	vlanpqf |= IGC_VLANPQF_QSEL(prio, queue);
295712ddee68SAndre Guedes 	vlanpqf |= IGC_VLANPQF_VALID(prio);
295812ddee68SAndre Guedes 
295912ddee68SAndre Guedes 	wr32(IGC_VLANPQF, vlanpqf);
296012ddee68SAndre Guedes 
296112ddee68SAndre Guedes 	netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n",
296212ddee68SAndre Guedes 		   prio, queue);
296312ddee68SAndre Guedes 	return 0;
296412ddee68SAndre Guedes }
296512ddee68SAndre Guedes 
296612ddee68SAndre Guedes /**
296712ddee68SAndre Guedes  * igc_del_vlan_prio_filter() - Delete VLAN priority filter
296812ddee68SAndre Guedes  * @adapter: Pointer to adapter where the filter should be deleted from
296912ddee68SAndre Guedes  * @prio: VLAN priority value
297012ddee68SAndre Guedes  */
297136fa2152SAndre Guedes static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio)
297212ddee68SAndre Guedes {
297312ddee68SAndre Guedes 	struct igc_hw *hw = &adapter->hw;
297412ddee68SAndre Guedes 	u32 vlanpqf;
297512ddee68SAndre Guedes 
297612ddee68SAndre Guedes 	vlanpqf = rd32(IGC_VLANPQF);
297712ddee68SAndre Guedes 
297812ddee68SAndre Guedes 	vlanpqf &= ~IGC_VLANPQF_VALID(prio);
297912ddee68SAndre Guedes 	vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK);
298012ddee68SAndre Guedes 
298112ddee68SAndre Guedes 	wr32(IGC_VLANPQF, vlanpqf);
298212ddee68SAndre Guedes 
298312ddee68SAndre Guedes 	netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n",
298412ddee68SAndre Guedes 		   prio);
298512ddee68SAndre Guedes }
298612ddee68SAndre Guedes 
2987aa7ca726SAndre Guedes static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter)
2988aa7ca726SAndre Guedes {
2989aa7ca726SAndre Guedes 	struct igc_hw *hw = &adapter->hw;
2990aa7ca726SAndre Guedes 	int i;
2991aa7ca726SAndre Guedes 
2992aa7ca726SAndre Guedes 	for (i = 0; i < MAX_ETYPE_FILTER; i++) {
2993aa7ca726SAndre Guedes 		u32 etqf = rd32(IGC_ETQF(i));
2994aa7ca726SAndre Guedes 
2995aa7ca726SAndre Guedes 		if (!(etqf & IGC_ETQF_FILTER_ENABLE))
2996aa7ca726SAndre Guedes 			return i;
2997aa7ca726SAndre Guedes 	}
2998aa7ca726SAndre Guedes 
2999aa7ca726SAndre Guedes 	return -1;
3000aa7ca726SAndre Guedes }
3001aa7ca726SAndre Guedes 
3002aa7ca726SAndre Guedes /**
3003aa7ca726SAndre Guedes  * igc_add_etype_filter() - Add ethertype filter
3004aa7ca726SAndre Guedes  * @adapter: Pointer to adapter where the filter should be added
3005aa7ca726SAndre Guedes  * @etype: Ethertype value
3006aa7ca726SAndre Guedes  * @queue: If non-negative, queue assignment feature is enabled and frames
3007aa7ca726SAndre Guedes  *         matching the filter are enqueued onto 'queue'. Otherwise, queue
3008aa7ca726SAndre Guedes  *         assignment is disabled.
3009aa7ca726SAndre Guedes  *
3010aa7ca726SAndre Guedes  * Return: 0 in case of success, negative errno code otherwise.
3011aa7ca726SAndre Guedes  */
301236fa2152SAndre Guedes static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype,
301336fa2152SAndre Guedes 				int queue)
3014aa7ca726SAndre Guedes {
3015aa7ca726SAndre Guedes 	struct igc_hw *hw = &adapter->hw;
3016aa7ca726SAndre Guedes 	int index;
3017aa7ca726SAndre Guedes 	u32 etqf;
3018aa7ca726SAndre Guedes 
3019aa7ca726SAndre Guedes 	index = igc_get_avail_etype_filter_slot(adapter);
3020aa7ca726SAndre Guedes 	if (index < 0)
3021aa7ca726SAndre Guedes 		return -ENOSPC;
3022aa7ca726SAndre Guedes 
3023aa7ca726SAndre Guedes 	etqf = rd32(IGC_ETQF(index));
3024aa7ca726SAndre Guedes 
3025aa7ca726SAndre Guedes 	etqf &= ~IGC_ETQF_ETYPE_MASK;
3026aa7ca726SAndre Guedes 	etqf |= etype;
3027aa7ca726SAndre Guedes 
3028aa7ca726SAndre Guedes 	if (queue >= 0) {
3029aa7ca726SAndre Guedes 		etqf &= ~IGC_ETQF_QUEUE_MASK;
3030aa7ca726SAndre Guedes 		etqf |= (queue << IGC_ETQF_QUEUE_SHIFT);
3031aa7ca726SAndre Guedes 		etqf |= IGC_ETQF_QUEUE_ENABLE;
3032aa7ca726SAndre Guedes 	}
3033aa7ca726SAndre Guedes 
3034aa7ca726SAndre Guedes 	etqf |= IGC_ETQF_FILTER_ENABLE;
3035aa7ca726SAndre Guedes 
3036aa7ca726SAndre Guedes 	wr32(IGC_ETQF(index), etqf);
3037aa7ca726SAndre Guedes 
3038aa7ca726SAndre Guedes 	netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n",
3039aa7ca726SAndre Guedes 		   etype, queue);
3040aa7ca726SAndre Guedes 	return 0;
3041aa7ca726SAndre Guedes }
3042aa7ca726SAndre Guedes 
3043aa7ca726SAndre Guedes static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype)
3044aa7ca726SAndre Guedes {
3045aa7ca726SAndre Guedes 	struct igc_hw *hw = &adapter->hw;
3046aa7ca726SAndre Guedes 	int i;
3047aa7ca726SAndre Guedes 
3048aa7ca726SAndre Guedes 	for (i = 0; i < MAX_ETYPE_FILTER; i++) {
3049aa7ca726SAndre Guedes 		u32 etqf = rd32(IGC_ETQF(i));
3050aa7ca726SAndre Guedes 
3051aa7ca726SAndre Guedes 		if ((etqf & IGC_ETQF_ETYPE_MASK) == etype)
3052aa7ca726SAndre Guedes 			return i;
3053aa7ca726SAndre Guedes 	}
3054aa7ca726SAndre Guedes 
3055aa7ca726SAndre Guedes 	return -1;
3056aa7ca726SAndre Guedes }
3057aa7ca726SAndre Guedes 
3058aa7ca726SAndre Guedes /**
3059aa7ca726SAndre Guedes  * igc_del_etype_filter() - Delete ethertype filter
3060aa7ca726SAndre Guedes  * @adapter: Pointer to adapter where the filter should be deleted from
3061aa7ca726SAndre Guedes  * @etype: Ethertype value
3062aa7ca726SAndre Guedes  */
3063acda576fSAndre Guedes static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
3064aa7ca726SAndre Guedes {
3065aa7ca726SAndre Guedes 	struct igc_hw *hw = &adapter->hw;
3066aa7ca726SAndre Guedes 	int index;
3067aa7ca726SAndre Guedes 
3068aa7ca726SAndre Guedes 	index = igc_find_etype_filter(adapter, etype);
3069aa7ca726SAndre Guedes 	if (index < 0)
3070acda576fSAndre Guedes 		return;
3071aa7ca726SAndre Guedes 
3072aa7ca726SAndre Guedes 	wr32(IGC_ETQF(index), 0);
3073aa7ca726SAndre Guedes 
3074aa7ca726SAndre Guedes 	netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n",
3075aa7ca726SAndre Guedes 		   etype);
3076aa7ca726SAndre Guedes }
3077aa7ca726SAndre Guedes 
30786574631bSKurt Kanzenbach static int igc_flex_filter_select(struct igc_adapter *adapter,
30796574631bSKurt Kanzenbach 				  struct igc_flex_filter *input,
30806574631bSKurt Kanzenbach 				  u32 *fhft)
30816574631bSKurt Kanzenbach {
30826574631bSKurt Kanzenbach 	struct igc_hw *hw = &adapter->hw;
30836574631bSKurt Kanzenbach 	u8 fhft_index;
30846574631bSKurt Kanzenbach 	u32 fhftsl;
30856574631bSKurt Kanzenbach 
30866574631bSKurt Kanzenbach 	if (input->index >= MAX_FLEX_FILTER) {
30876574631bSKurt Kanzenbach 		dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n");
30886574631bSKurt Kanzenbach 		return -EINVAL;
30896574631bSKurt Kanzenbach 	}
30906574631bSKurt Kanzenbach 
30916574631bSKurt Kanzenbach 	/* Indirect table select register */
30926574631bSKurt Kanzenbach 	fhftsl = rd32(IGC_FHFTSL);
30936574631bSKurt Kanzenbach 	fhftsl &= ~IGC_FHFTSL_FTSL_MASK;
30946574631bSKurt Kanzenbach 	switch (input->index) {
30956574631bSKurt Kanzenbach 	case 0 ... 7:
30966574631bSKurt Kanzenbach 		fhftsl |= 0x00;
30976574631bSKurt Kanzenbach 		break;
30986574631bSKurt Kanzenbach 	case 8 ... 15:
30996574631bSKurt Kanzenbach 		fhftsl |= 0x01;
31006574631bSKurt Kanzenbach 		break;
31016574631bSKurt Kanzenbach 	case 16 ... 23:
31026574631bSKurt Kanzenbach 		fhftsl |= 0x02;
31036574631bSKurt Kanzenbach 		break;
31046574631bSKurt Kanzenbach 	case 24 ... 31:
31056574631bSKurt Kanzenbach 		fhftsl |= 0x03;
31066574631bSKurt Kanzenbach 		break;
31076574631bSKurt Kanzenbach 	}
31086574631bSKurt Kanzenbach 	wr32(IGC_FHFTSL, fhftsl);
31096574631bSKurt Kanzenbach 
31106574631bSKurt Kanzenbach 	/* Normalize index down to host table register */
31116574631bSKurt Kanzenbach 	fhft_index = input->index % 8;
31126574631bSKurt Kanzenbach 
31136574631bSKurt Kanzenbach 	*fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) :
31146574631bSKurt Kanzenbach 		IGC_FHFT_EXT(fhft_index - 4);
31156574631bSKurt Kanzenbach 
31166574631bSKurt Kanzenbach 	return 0;
31176574631bSKurt Kanzenbach }
31186574631bSKurt Kanzenbach 
31192b477d05SKurt Kanzenbach static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
31206574631bSKurt Kanzenbach 				    struct igc_flex_filter *input)
31216574631bSKurt Kanzenbach {
31226574631bSKurt Kanzenbach 	struct device *dev = &adapter->pdev->dev;
31236574631bSKurt Kanzenbach 	struct igc_hw *hw = &adapter->hw;
31246574631bSKurt Kanzenbach 	u8 *data = input->data;
31256574631bSKurt Kanzenbach 	u8 *mask = input->mask;
31266574631bSKurt Kanzenbach 	u32 queuing;
31276574631bSKurt Kanzenbach 	u32 fhft;
31286574631bSKurt Kanzenbach 	u32 wufc;
31296574631bSKurt Kanzenbach 	int ret;
31306574631bSKurt Kanzenbach 	int i;
31316574631bSKurt Kanzenbach 
31326574631bSKurt Kanzenbach 	/* Length has to be aligned to 8. Otherwise the filter will fail. Bail
31336574631bSKurt Kanzenbach 	 * out early to avoid surprises later.
31346574631bSKurt Kanzenbach 	 */
31356574631bSKurt Kanzenbach 	if (input->length % 8 != 0) {
31366574631bSKurt Kanzenbach 		dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n");
31376574631bSKurt Kanzenbach 		return -EINVAL;
31386574631bSKurt Kanzenbach 	}
31396574631bSKurt Kanzenbach 
31406574631bSKurt Kanzenbach 	/* Select corresponding flex filter register and get base for host table. */
31416574631bSKurt Kanzenbach 	ret = igc_flex_filter_select(adapter, input, &fhft);
31426574631bSKurt Kanzenbach 	if (ret)
31436574631bSKurt Kanzenbach 		return ret;
31446574631bSKurt Kanzenbach 
31456574631bSKurt Kanzenbach 	/* When adding a filter globally disable flex filter feature. That is
31466574631bSKurt Kanzenbach 	 * recommended within the datasheet.
31476574631bSKurt Kanzenbach 	 */
31486574631bSKurt Kanzenbach 	wufc = rd32(IGC_WUFC);
31496574631bSKurt Kanzenbach 	wufc &= ~IGC_WUFC_FLEX_HQ;
31506574631bSKurt Kanzenbach 	wr32(IGC_WUFC, wufc);
31516574631bSKurt Kanzenbach 
31526574631bSKurt Kanzenbach 	/* Configure filter */
31536574631bSKurt Kanzenbach 	queuing = input->length & IGC_FHFT_LENGTH_MASK;
31546574631bSKurt Kanzenbach 	queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK;
31556574631bSKurt Kanzenbach 	queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK;
31566574631bSKurt Kanzenbach 
31576574631bSKurt Kanzenbach 	if (input->immediate_irq)
31586574631bSKurt Kanzenbach 		queuing |= IGC_FHFT_IMM_INT;
31596574631bSKurt Kanzenbach 
31606574631bSKurt Kanzenbach 	if (input->drop)
31616574631bSKurt Kanzenbach 		queuing |= IGC_FHFT_DROP;
31626574631bSKurt Kanzenbach 
31636574631bSKurt Kanzenbach 	wr32(fhft + 0xFC, queuing);
31646574631bSKurt Kanzenbach 
31656574631bSKurt Kanzenbach 	/* Write data (128 byte) and mask (128 bit) */
31666574631bSKurt Kanzenbach 	for (i = 0; i < 16; ++i) {
31676574631bSKurt Kanzenbach 		const size_t data_idx = i * 8;
31686574631bSKurt Kanzenbach 		const size_t row_idx = i * 16;
31696574631bSKurt Kanzenbach 		u32 dw0 =
31706574631bSKurt Kanzenbach 			(data[data_idx + 0] << 0) |
31716574631bSKurt Kanzenbach 			(data[data_idx + 1] << 8) |
31726574631bSKurt Kanzenbach 			(data[data_idx + 2] << 16) |
31736574631bSKurt Kanzenbach 			(data[data_idx + 3] << 24);
31746574631bSKurt Kanzenbach 		u32 dw1 =
31756574631bSKurt Kanzenbach 			(data[data_idx + 4] << 0) |
31766574631bSKurt Kanzenbach 			(data[data_idx + 5] << 8) |
31776574631bSKurt Kanzenbach 			(data[data_idx + 6] << 16) |
31786574631bSKurt Kanzenbach 			(data[data_idx + 7] << 24);
31796574631bSKurt Kanzenbach 		u32 tmp;
31806574631bSKurt Kanzenbach 
31816574631bSKurt Kanzenbach 		/* Write row: dw0, dw1 and mask */
31826574631bSKurt Kanzenbach 		wr32(fhft + row_idx, dw0);
31836574631bSKurt Kanzenbach 		wr32(fhft + row_idx + 4, dw1);
31846574631bSKurt Kanzenbach 
31856574631bSKurt Kanzenbach 		/* mask is only valid for MASK(7, 0) */
31866574631bSKurt Kanzenbach 		tmp = rd32(fhft + row_idx + 8);
31876574631bSKurt Kanzenbach 		tmp &= ~GENMASK(7, 0);
31886574631bSKurt Kanzenbach 		tmp |= mask[i];
31896574631bSKurt Kanzenbach 		wr32(fhft + row_idx + 8, tmp);
31906574631bSKurt Kanzenbach 	}
31916574631bSKurt Kanzenbach 
31926574631bSKurt Kanzenbach 	/* Enable filter. */
31936574631bSKurt Kanzenbach 	wufc |= IGC_WUFC_FLEX_HQ;
31946574631bSKurt Kanzenbach 	if (input->index > 8) {
31956574631bSKurt Kanzenbach 		/* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */
31966574631bSKurt Kanzenbach 		u32 wufc_ext = rd32(IGC_WUFC_EXT);
31976574631bSKurt Kanzenbach 
31986574631bSKurt Kanzenbach 		wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8));
31996574631bSKurt Kanzenbach 
32006574631bSKurt Kanzenbach 		wr32(IGC_WUFC_EXT, wufc_ext);
32016574631bSKurt Kanzenbach 	} else {
32026574631bSKurt Kanzenbach 		wufc |= (IGC_WUFC_FLX0 << input->index);
32036574631bSKurt Kanzenbach 	}
32046574631bSKurt Kanzenbach 	wr32(IGC_WUFC, wufc);
32056574631bSKurt Kanzenbach 
32066574631bSKurt Kanzenbach 	dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n",
32076574631bSKurt Kanzenbach 		input->index);
32086574631bSKurt Kanzenbach 
32096574631bSKurt Kanzenbach 	return 0;
32106574631bSKurt Kanzenbach }
32116574631bSKurt Kanzenbach 
32122b477d05SKurt Kanzenbach static void igc_flex_filter_add_field(struct igc_flex_filter *flex,
32132b477d05SKurt Kanzenbach 				      const void *src, unsigned int offset,
32142b477d05SKurt Kanzenbach 				      size_t len, const void *mask)
32152b477d05SKurt Kanzenbach {
32162b477d05SKurt Kanzenbach 	int i;
32172b477d05SKurt Kanzenbach 
32182b477d05SKurt Kanzenbach 	/* data */
32192b477d05SKurt Kanzenbach 	memcpy(&flex->data[offset], src, len);
32202b477d05SKurt Kanzenbach 
32212b477d05SKurt Kanzenbach 	/* mask */
32222b477d05SKurt Kanzenbach 	for (i = 0; i < len; ++i) {
32232b477d05SKurt Kanzenbach 		const unsigned int idx = i + offset;
32242b477d05SKurt Kanzenbach 		const u8 *ptr = mask;
32252b477d05SKurt Kanzenbach 
32262b477d05SKurt Kanzenbach 		if (mask) {
32272b477d05SKurt Kanzenbach 			if (ptr[i] & 0xff)
32282b477d05SKurt Kanzenbach 				flex->mask[idx / 8] |= BIT(idx % 8);
32292b477d05SKurt Kanzenbach 
32302b477d05SKurt Kanzenbach 			continue;
32312b477d05SKurt Kanzenbach 		}
32322b477d05SKurt Kanzenbach 
32332b477d05SKurt Kanzenbach 		flex->mask[idx / 8] |= BIT(idx % 8);
32342b477d05SKurt Kanzenbach 	}
32352b477d05SKurt Kanzenbach }
32362b477d05SKurt Kanzenbach 
32372b477d05SKurt Kanzenbach static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter)
32382b477d05SKurt Kanzenbach {
32392b477d05SKurt Kanzenbach 	struct igc_hw *hw = &adapter->hw;
32402b477d05SKurt Kanzenbach 	u32 wufc, wufc_ext;
32412b477d05SKurt Kanzenbach 	int i;
32422b477d05SKurt Kanzenbach 
32432b477d05SKurt Kanzenbach 	wufc = rd32(IGC_WUFC);
32442b477d05SKurt Kanzenbach 	wufc_ext = rd32(IGC_WUFC_EXT);
32452b477d05SKurt Kanzenbach 
32462b477d05SKurt Kanzenbach 	for (i = 0; i < MAX_FLEX_FILTER; i++) {
32472b477d05SKurt Kanzenbach 		if (i < 8) {
32482b477d05SKurt Kanzenbach 			if (!(wufc & (IGC_WUFC_FLX0 << i)))
32492b477d05SKurt Kanzenbach 				return i;
32502b477d05SKurt Kanzenbach 		} else {
32512b477d05SKurt Kanzenbach 			if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8))))
32522b477d05SKurt Kanzenbach 				return i;
32532b477d05SKurt Kanzenbach 		}
32542b477d05SKurt Kanzenbach 	}
32552b477d05SKurt Kanzenbach 
32562b477d05SKurt Kanzenbach 	return -ENOSPC;
32572b477d05SKurt Kanzenbach }
32582b477d05SKurt Kanzenbach 
32592b477d05SKurt Kanzenbach static bool igc_flex_filter_in_use(struct igc_adapter *adapter)
32602b477d05SKurt Kanzenbach {
32612b477d05SKurt Kanzenbach 	struct igc_hw *hw = &adapter->hw;
32622b477d05SKurt Kanzenbach 	u32 wufc, wufc_ext;
32632b477d05SKurt Kanzenbach 
32642b477d05SKurt Kanzenbach 	wufc = rd32(IGC_WUFC);
32652b477d05SKurt Kanzenbach 	wufc_ext = rd32(IGC_WUFC_EXT);
32662b477d05SKurt Kanzenbach 
32672b477d05SKurt Kanzenbach 	if (wufc & IGC_WUFC_FILTER_MASK)
32682b477d05SKurt Kanzenbach 		return true;
32692b477d05SKurt Kanzenbach 
32702b477d05SKurt Kanzenbach 	if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK)
32712b477d05SKurt Kanzenbach 		return true;
32722b477d05SKurt Kanzenbach 
32732b477d05SKurt Kanzenbach 	return false;
32742b477d05SKurt Kanzenbach }
32752b477d05SKurt Kanzenbach 
32762b477d05SKurt Kanzenbach static int igc_add_flex_filter(struct igc_adapter *adapter,
32772b477d05SKurt Kanzenbach 			       struct igc_nfc_rule *rule)
32782b477d05SKurt Kanzenbach {
32792b477d05SKurt Kanzenbach 	struct igc_flex_filter flex = { };
32802b477d05SKurt Kanzenbach 	struct igc_nfc_filter *filter = &rule->filter;
32812b477d05SKurt Kanzenbach 	unsigned int eth_offset, user_offset;
32822b477d05SKurt Kanzenbach 	int ret, index;
32832b477d05SKurt Kanzenbach 	bool vlan;
32842b477d05SKurt Kanzenbach 
32852b477d05SKurt Kanzenbach 	index = igc_find_avail_flex_filter_slot(adapter);
32862b477d05SKurt Kanzenbach 	if (index < 0)
32872b477d05SKurt Kanzenbach 		return -ENOSPC;
32882b477d05SKurt Kanzenbach 
32892b477d05SKurt Kanzenbach 	/* Construct the flex filter:
32902b477d05SKurt Kanzenbach 	 *  -> dest_mac [6]
32912b477d05SKurt Kanzenbach 	 *  -> src_mac [6]
32922b477d05SKurt Kanzenbach 	 *  -> tpid [2]
32932b477d05SKurt Kanzenbach 	 *  -> vlan tci [2]
32942b477d05SKurt Kanzenbach 	 *  -> ether type [2]
32952b477d05SKurt Kanzenbach 	 *  -> user data [8]
32962b477d05SKurt Kanzenbach 	 *  -> = 26 bytes => 32 length
32972b477d05SKurt Kanzenbach 	 */
32982b477d05SKurt Kanzenbach 	flex.index    = index;
32992b477d05SKurt Kanzenbach 	flex.length   = 32;
33002b477d05SKurt Kanzenbach 	flex.rx_queue = rule->action;
33012b477d05SKurt Kanzenbach 
33022b477d05SKurt Kanzenbach 	vlan = rule->filter.vlan_tci || rule->filter.vlan_etype;
33032b477d05SKurt Kanzenbach 	eth_offset = vlan ? 16 : 12;
33042b477d05SKurt Kanzenbach 	user_offset = vlan ? 18 : 14;
33052b477d05SKurt Kanzenbach 
33062b477d05SKurt Kanzenbach 	/* Add destination MAC  */
33072b477d05SKurt Kanzenbach 	if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
33082b477d05SKurt Kanzenbach 		igc_flex_filter_add_field(&flex, &filter->dst_addr, 0,
33092b477d05SKurt Kanzenbach 					  ETH_ALEN, NULL);
33102b477d05SKurt Kanzenbach 
33112b477d05SKurt Kanzenbach 	/* Add source MAC */
33122b477d05SKurt Kanzenbach 	if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
33132b477d05SKurt Kanzenbach 		igc_flex_filter_add_field(&flex, &filter->src_addr, 6,
33142b477d05SKurt Kanzenbach 					  ETH_ALEN, NULL);
33152b477d05SKurt Kanzenbach 
33162b477d05SKurt Kanzenbach 	/* Add VLAN etype */
33172b477d05SKurt Kanzenbach 	if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE)
33182b477d05SKurt Kanzenbach 		igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12,
33192b477d05SKurt Kanzenbach 					  sizeof(filter->vlan_etype),
33202b477d05SKurt Kanzenbach 					  NULL);
33212b477d05SKurt Kanzenbach 
33222b477d05SKurt Kanzenbach 	/* Add VLAN TCI */
33232b477d05SKurt Kanzenbach 	if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
33242b477d05SKurt Kanzenbach 		igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14,
33252b477d05SKurt Kanzenbach 					  sizeof(filter->vlan_tci), NULL);
33262b477d05SKurt Kanzenbach 
33272b477d05SKurt Kanzenbach 	/* Add Ether type */
33282b477d05SKurt Kanzenbach 	if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
33292b477d05SKurt Kanzenbach 		__be16 etype = cpu_to_be16(filter->etype);
33302b477d05SKurt Kanzenbach 
33312b477d05SKurt Kanzenbach 		igc_flex_filter_add_field(&flex, &etype, eth_offset,
33322b477d05SKurt Kanzenbach 					  sizeof(etype), NULL);
33332b477d05SKurt Kanzenbach 	}
33342b477d05SKurt Kanzenbach 
33352b477d05SKurt Kanzenbach 	/* Add user data */
33362b477d05SKurt Kanzenbach 	if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA)
33372b477d05SKurt Kanzenbach 		igc_flex_filter_add_field(&flex, &filter->user_data,
33382b477d05SKurt Kanzenbach 					  user_offset,
33392b477d05SKurt Kanzenbach 					  sizeof(filter->user_data),
33402b477d05SKurt Kanzenbach 					  filter->user_mask);
33412b477d05SKurt Kanzenbach 
33422b477d05SKurt Kanzenbach 	/* Add it down to the hardware and enable it. */
33432b477d05SKurt Kanzenbach 	ret = igc_write_flex_filter_ll(adapter, &flex);
33442b477d05SKurt Kanzenbach 	if (ret)
33452b477d05SKurt Kanzenbach 		return ret;
33462b477d05SKurt Kanzenbach 
33472b477d05SKurt Kanzenbach 	filter->flex_index = index;
33482b477d05SKurt Kanzenbach 
33492b477d05SKurt Kanzenbach 	return 0;
33502b477d05SKurt Kanzenbach }
33512b477d05SKurt Kanzenbach 
33522b477d05SKurt Kanzenbach static void igc_del_flex_filter(struct igc_adapter *adapter,
33532b477d05SKurt Kanzenbach 				u16 reg_index)
33542b477d05SKurt Kanzenbach {
33552b477d05SKurt Kanzenbach 	struct igc_hw *hw = &adapter->hw;
33562b477d05SKurt Kanzenbach 	u32 wufc;
33572b477d05SKurt Kanzenbach 
33582b477d05SKurt Kanzenbach 	/* Just disable the filter. The filter table itself is kept
33592b477d05SKurt Kanzenbach 	 * intact. Another flex_filter_add() should override the "old" data
33602b477d05SKurt Kanzenbach 	 * then.
33612b477d05SKurt Kanzenbach 	 */
33622b477d05SKurt Kanzenbach 	if (reg_index > 8) {
33632b477d05SKurt Kanzenbach 		u32 wufc_ext = rd32(IGC_WUFC_EXT);
33642b477d05SKurt Kanzenbach 
33652b477d05SKurt Kanzenbach 		wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8));
33662b477d05SKurt Kanzenbach 		wr32(IGC_WUFC_EXT, wufc_ext);
33672b477d05SKurt Kanzenbach 	} else {
33682b477d05SKurt Kanzenbach 		wufc = rd32(IGC_WUFC);
33692b477d05SKurt Kanzenbach 
33702b477d05SKurt Kanzenbach 		wufc &= ~(IGC_WUFC_FLX0 << reg_index);
33712b477d05SKurt Kanzenbach 		wr32(IGC_WUFC, wufc);
33722b477d05SKurt Kanzenbach 	}
33732b477d05SKurt Kanzenbach 
33742b477d05SKurt Kanzenbach 	if (igc_flex_filter_in_use(adapter))
33752b477d05SKurt Kanzenbach 		return;
33762b477d05SKurt Kanzenbach 
33772b477d05SKurt Kanzenbach 	/* No filters are in use, we may disable flex filters */
33782b477d05SKurt Kanzenbach 	wufc = rd32(IGC_WUFC);
33792b477d05SKurt Kanzenbach 	wufc &= ~IGC_WUFC_FLEX_HQ;
33802b477d05SKurt Kanzenbach 	wr32(IGC_WUFC, wufc);
33812b477d05SKurt Kanzenbach }
33822b477d05SKurt Kanzenbach 
338336fa2152SAndre Guedes static int igc_enable_nfc_rule(struct igc_adapter *adapter,
33842b477d05SKurt Kanzenbach 			       struct igc_nfc_rule *rule)
338536fa2152SAndre Guedes {
338636fa2152SAndre Guedes 	int err;
338736fa2152SAndre Guedes 
338873744262SKurt Kanzenbach 	if (rule->flex) {
338973744262SKurt Kanzenbach 		return igc_add_flex_filter(adapter, rule);
33902b477d05SKurt Kanzenbach 	}
33912b477d05SKurt Kanzenbach 
339236fa2152SAndre Guedes 	if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
339336fa2152SAndre Guedes 		err = igc_add_etype_filter(adapter, rule->filter.etype,
339436fa2152SAndre Guedes 					   rule->action);
339536fa2152SAndre Guedes 		if (err)
339636fa2152SAndre Guedes 			return err;
339736fa2152SAndre Guedes 	}
339836fa2152SAndre Guedes 
339936fa2152SAndre Guedes 	if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
340036fa2152SAndre Guedes 		err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
340136fa2152SAndre Guedes 					 rule->filter.src_addr, rule->action);
340236fa2152SAndre Guedes 		if (err)
340336fa2152SAndre Guedes 			return err;
340436fa2152SAndre Guedes 	}
340536fa2152SAndre Guedes 
340636fa2152SAndre Guedes 	if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
340736fa2152SAndre Guedes 		err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
340836fa2152SAndre Guedes 					 rule->filter.dst_addr, rule->action);
340936fa2152SAndre Guedes 		if (err)
341036fa2152SAndre Guedes 			return err;
341136fa2152SAndre Guedes 	}
341236fa2152SAndre Guedes 
341336fa2152SAndre Guedes 	if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
341436fa2152SAndre Guedes 		int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
341536fa2152SAndre Guedes 			   VLAN_PRIO_SHIFT;
341636fa2152SAndre Guedes 
341736fa2152SAndre Guedes 		err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
341836fa2152SAndre Guedes 		if (err)
341936fa2152SAndre Guedes 			return err;
342036fa2152SAndre Guedes 	}
342136fa2152SAndre Guedes 
342236fa2152SAndre Guedes 	return 0;
342336fa2152SAndre Guedes }
342436fa2152SAndre Guedes 
3425acda576fSAndre Guedes static void igc_disable_nfc_rule(struct igc_adapter *adapter,
342636fa2152SAndre Guedes 				 const struct igc_nfc_rule *rule)
342736fa2152SAndre Guedes {
342873744262SKurt Kanzenbach 	if (rule->flex) {
34292b477d05SKurt Kanzenbach 		igc_del_flex_filter(adapter, rule->filter.flex_index);
343073744262SKurt Kanzenbach 		return;
343173744262SKurt Kanzenbach 	}
34322b477d05SKurt Kanzenbach 
343336fa2152SAndre Guedes 	if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
343436fa2152SAndre Guedes 		igc_del_etype_filter(adapter, rule->filter.etype);
343536fa2152SAndre Guedes 
343636fa2152SAndre Guedes 	if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
343736fa2152SAndre Guedes 		int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
343836fa2152SAndre Guedes 			   VLAN_PRIO_SHIFT;
343936fa2152SAndre Guedes 
344036fa2152SAndre Guedes 		igc_del_vlan_prio_filter(adapter, prio);
344136fa2152SAndre Guedes 	}
344236fa2152SAndre Guedes 
344336fa2152SAndre Guedes 	if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
344436fa2152SAndre Guedes 		igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
344536fa2152SAndre Guedes 				   rule->filter.src_addr);
344636fa2152SAndre Guedes 
344736fa2152SAndre Guedes 	if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
344836fa2152SAndre Guedes 		igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
344936fa2152SAndre Guedes 				   rule->filter.dst_addr);
345036fa2152SAndre Guedes }
345136fa2152SAndre Guedes 
345236fa2152SAndre Guedes /**
345336fa2152SAndre Guedes  * igc_get_nfc_rule() - Get NFC rule
345436fa2152SAndre Guedes  * @adapter: Pointer to adapter
345536fa2152SAndre Guedes  * @location: Rule location
345636fa2152SAndre Guedes  *
345736fa2152SAndre Guedes  * Context: Expects adapter->nfc_rule_lock to be held by caller.
345836fa2152SAndre Guedes  *
345936fa2152SAndre Guedes  * Return: Pointer to NFC rule at @location. If not found, NULL.
346036fa2152SAndre Guedes  */
346136fa2152SAndre Guedes struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
346236fa2152SAndre Guedes 				      u32 location)
346336fa2152SAndre Guedes {
346436fa2152SAndre Guedes 	struct igc_nfc_rule *rule;
346536fa2152SAndre Guedes 
346636fa2152SAndre Guedes 	list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
346736fa2152SAndre Guedes 		if (rule->location == location)
346836fa2152SAndre Guedes 			return rule;
346936fa2152SAndre Guedes 		if (rule->location > location)
347036fa2152SAndre Guedes 			break;
347136fa2152SAndre Guedes 	}
347236fa2152SAndre Guedes 
347336fa2152SAndre Guedes 	return NULL;
347436fa2152SAndre Guedes }
347536fa2152SAndre Guedes 
347636fa2152SAndre Guedes /**
347736fa2152SAndre Guedes  * igc_del_nfc_rule() - Delete NFC rule
347836fa2152SAndre Guedes  * @adapter: Pointer to adapter
347936fa2152SAndre Guedes  * @rule: Pointer to rule to be deleted
348036fa2152SAndre Guedes  *
348136fa2152SAndre Guedes  * Disable NFC rule in hardware and delete it from adapter.
348236fa2152SAndre Guedes  *
348336fa2152SAndre Guedes  * Context: Expects adapter->nfc_rule_lock to be held by caller.
348436fa2152SAndre Guedes  */
348536fa2152SAndre Guedes void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
348636fa2152SAndre Guedes {
348736fa2152SAndre Guedes 	igc_disable_nfc_rule(adapter, rule);
348836fa2152SAndre Guedes 
348936fa2152SAndre Guedes 	list_del(&rule->list);
349036fa2152SAndre Guedes 	adapter->nfc_rule_count--;
349136fa2152SAndre Guedes 
349236fa2152SAndre Guedes 	kfree(rule);
349336fa2152SAndre Guedes }
349436fa2152SAndre Guedes 
3495e256ec83SAndre Guedes static void igc_flush_nfc_rules(struct igc_adapter *adapter)
3496e256ec83SAndre Guedes {
3497e256ec83SAndre Guedes 	struct igc_nfc_rule *rule, *tmp;
3498e256ec83SAndre Guedes 
349942fc5dc0SAndre Guedes 	mutex_lock(&adapter->nfc_rule_lock);
3500e256ec83SAndre Guedes 
3501e256ec83SAndre Guedes 	list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list)
3502e256ec83SAndre Guedes 		igc_del_nfc_rule(adapter, rule);
3503e256ec83SAndre Guedes 
350442fc5dc0SAndre Guedes 	mutex_unlock(&adapter->nfc_rule_lock);
3505e256ec83SAndre Guedes }
3506e256ec83SAndre Guedes 
350736fa2152SAndre Guedes /**
350836fa2152SAndre Guedes  * igc_add_nfc_rule() - Add NFC rule
350936fa2152SAndre Guedes  * @adapter: Pointer to adapter
351036fa2152SAndre Guedes  * @rule: Pointer to rule to be added
351136fa2152SAndre Guedes  *
351236fa2152SAndre Guedes  * Enable NFC rule in hardware and add it to adapter.
351336fa2152SAndre Guedes  *
351436fa2152SAndre Guedes  * Context: Expects adapter->nfc_rule_lock to be held by caller.
351536fa2152SAndre Guedes  *
351636fa2152SAndre Guedes  * Return: 0 on success, negative errno on failure.
351736fa2152SAndre Guedes  */
351836fa2152SAndre Guedes int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
351936fa2152SAndre Guedes {
352036fa2152SAndre Guedes 	struct igc_nfc_rule *pred, *cur;
352136fa2152SAndre Guedes 	int err;
352236fa2152SAndre Guedes 
352336fa2152SAndre Guedes 	err = igc_enable_nfc_rule(adapter, rule);
352436fa2152SAndre Guedes 	if (err)
352536fa2152SAndre Guedes 		return err;
352636fa2152SAndre Guedes 
352736fa2152SAndre Guedes 	pred = NULL;
352836fa2152SAndre Guedes 	list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
352936fa2152SAndre Guedes 		if (cur->location >= rule->location)
353036fa2152SAndre Guedes 			break;
353136fa2152SAndre Guedes 		pred = cur;
353236fa2152SAndre Guedes 	}
353336fa2152SAndre Guedes 
353436fa2152SAndre Guedes 	list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list);
353536fa2152SAndre Guedes 	adapter->nfc_rule_count++;
353636fa2152SAndre Guedes 	return 0;
353736fa2152SAndre Guedes }
353836fa2152SAndre Guedes 
353936fa2152SAndre Guedes static void igc_restore_nfc_rules(struct igc_adapter *adapter)
354036fa2152SAndre Guedes {
354136fa2152SAndre Guedes 	struct igc_nfc_rule *rule;
354236fa2152SAndre Guedes 
354342fc5dc0SAndre Guedes 	mutex_lock(&adapter->nfc_rule_lock);
354436fa2152SAndre Guedes 
354536fa2152SAndre Guedes 	list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list)
354636fa2152SAndre Guedes 		igc_enable_nfc_rule(adapter, rule);
354736fa2152SAndre Guedes 
354842fc5dc0SAndre Guedes 	mutex_unlock(&adapter->nfc_rule_lock);
354986a4de66SSasha Neftin }
355086a4de66SSasha Neftin 
355186a4de66SSasha Neftin static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
355286a4de66SSasha Neftin {
355386a4de66SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
355486a4de66SSasha Neftin 
3555750433d0SAndre Guedes 	return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1);
355686a4de66SSasha Neftin }
355786a4de66SSasha Neftin 
355886a4de66SSasha Neftin static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
355986a4de66SSasha Neftin {
356086a4de66SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
356186a4de66SSasha Neftin 
3562acda576fSAndre Guedes 	igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr);
356386a4de66SSasha Neftin 	return 0;
356486a4de66SSasha Neftin }
356586a4de66SSasha Neftin 
356686a4de66SSasha Neftin /**
356786a4de66SSasha Neftin  * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
356886a4de66SSasha Neftin  * @netdev: network interface device structure
356986a4de66SSasha Neftin  *
357086a4de66SSasha Neftin  * The set_rx_mode entry point is called whenever the unicast or multicast
357186a4de66SSasha Neftin  * address lists or the network interface flags are updated.  This routine is
357286a4de66SSasha Neftin  * responsible for configuring the hardware for proper unicast, multicast,
357386a4de66SSasha Neftin  * promiscuous mode, and all-multi behavior.
357486a4de66SSasha Neftin  */
357586a4de66SSasha Neftin static void igc_set_rx_mode(struct net_device *netdev)
357686a4de66SSasha Neftin {
357786a4de66SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
357886a4de66SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
357986a4de66SSasha Neftin 	u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
358086a4de66SSasha Neftin 	int count;
358186a4de66SSasha Neftin 
358286a4de66SSasha Neftin 	/* Check for Promiscuous and All Multicast modes */
358386a4de66SSasha Neftin 	if (netdev->flags & IFF_PROMISC) {
358486a4de66SSasha Neftin 		rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
358586a4de66SSasha Neftin 	} else {
358686a4de66SSasha Neftin 		if (netdev->flags & IFF_ALLMULTI) {
358786a4de66SSasha Neftin 			rctl |= IGC_RCTL_MPE;
358886a4de66SSasha Neftin 		} else {
358986a4de66SSasha Neftin 			/* Write addresses to the MTA, if the attempt fails
359086a4de66SSasha Neftin 			 * then we should just turn on promiscuous mode so
359186a4de66SSasha Neftin 			 * that we can at least receive multicast traffic
359286a4de66SSasha Neftin 			 */
359386a4de66SSasha Neftin 			count = igc_write_mc_addr_list(netdev);
359486a4de66SSasha Neftin 			if (count < 0)
359586a4de66SSasha Neftin 				rctl |= IGC_RCTL_MPE;
359686a4de66SSasha Neftin 		}
359786a4de66SSasha Neftin 	}
359886a4de66SSasha Neftin 
359986a4de66SSasha Neftin 	/* Write addresses to available RAR registers, if there is not
360086a4de66SSasha Neftin 	 * sufficient space to store all the addresses then enable
360186a4de66SSasha Neftin 	 * unicast promiscuous mode
360286a4de66SSasha Neftin 	 */
360386a4de66SSasha Neftin 	if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
360486a4de66SSasha Neftin 		rctl |= IGC_RCTL_UPE;
360586a4de66SSasha Neftin 
360686a4de66SSasha Neftin 	/* update state of unicast and multicast */
360786a4de66SSasha Neftin 	rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
360886a4de66SSasha Neftin 	wr32(IGC_RCTL, rctl);
360986a4de66SSasha Neftin 
361086a4de66SSasha Neftin #if (PAGE_SIZE < 8192)
361186a4de66SSasha Neftin 	if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
361286a4de66SSasha Neftin 		rlpml = IGC_MAX_FRAME_BUILD_SKB;
361386a4de66SSasha Neftin #endif
361486a4de66SSasha Neftin 	wr32(IGC_RLPML, rlpml);
361586a4de66SSasha Neftin }
361686a4de66SSasha Neftin 
36171a7c0f2eSSasha Neftin /**
36181a7c0f2eSSasha Neftin  * igc_configure - configure the hardware for RX and TX
36191a7c0f2eSSasha Neftin  * @adapter: private board structure
36201a7c0f2eSSasha Neftin  */
36211a7c0f2eSSasha Neftin static void igc_configure(struct igc_adapter *adapter)
36221a7c0f2eSSasha Neftin {
36231a7c0f2eSSasha Neftin 	struct net_device *netdev = adapter->netdev;
36241a7c0f2eSSasha Neftin 	int i = 0;
36251a7c0f2eSSasha Neftin 
36261a7c0f2eSSasha Neftin 	igc_get_hw_control(adapter);
36271a7c0f2eSSasha Neftin 	igc_set_rx_mode(netdev);
36281a7c0f2eSSasha Neftin 
36298d744963SMuhammad Husaini Zulkifli 	igc_restore_vlan(adapter);
36308d744963SMuhammad Husaini Zulkifli 
36311a7c0f2eSSasha Neftin 	igc_setup_tctl(adapter);
36321a7c0f2eSSasha Neftin 	igc_setup_mrqc(adapter);
36331a7c0f2eSSasha Neftin 	igc_setup_rctl(adapter);
36341a7c0f2eSSasha Neftin 
3635ac9156b2SAndre Guedes 	igc_set_default_mac_filter(adapter);
363697700bc8SAndre Guedes 	igc_restore_nfc_rules(adapter);
3637ac9156b2SAndre Guedes 
36381a7c0f2eSSasha Neftin 	igc_configure_tx(adapter);
36391a7c0f2eSSasha Neftin 	igc_configure_rx(adapter);
36401a7c0f2eSSasha Neftin 
36411a7c0f2eSSasha Neftin 	igc_rx_fifo_flush_base(&adapter->hw);
36421a7c0f2eSSasha Neftin 
36431a7c0f2eSSasha Neftin 	/* call igc_desc_unused which always leaves
36441a7c0f2eSSasha Neftin 	 * at least 1 descriptor unused to make sure
36451a7c0f2eSSasha Neftin 	 * next_to_use != next_to_clean
36461a7c0f2eSSasha Neftin 	 */
36471a7c0f2eSSasha Neftin 	for (i = 0; i < adapter->num_rx_queues; i++) {
36481a7c0f2eSSasha Neftin 		struct igc_ring *ring = adapter->rx_ring[i];
36491a7c0f2eSSasha Neftin 
3650fc9df2a0SAndre Guedes 		if (ring->xsk_pool)
3651fc9df2a0SAndre Guedes 			igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
3652fc9df2a0SAndre Guedes 		else
36531a7c0f2eSSasha Neftin 			igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
36541a7c0f2eSSasha Neftin 	}
36551a7c0f2eSSasha Neftin }
36561a7c0f2eSSasha Neftin 
3657c9a11c23SSasha Neftin /**
3658f817fa05SSasha Neftin  * igc_write_ivar - configure ivar for given MSI-X vector
3659f817fa05SSasha Neftin  * @hw: pointer to the HW structure
3660f817fa05SSasha Neftin  * @msix_vector: vector number we are allocating to a given ring
3661f817fa05SSasha Neftin  * @index: row index of IVAR register to write within IVAR table
3662f817fa05SSasha Neftin  * @offset: column offset of in IVAR, should be multiple of 8
3663f817fa05SSasha Neftin  *
3664f817fa05SSasha Neftin  * The IVAR table consists of 2 columns,
3665f817fa05SSasha Neftin  * each containing an cause allocation for an Rx and Tx ring, and a
3666f817fa05SSasha Neftin  * variable number of rows depending on the number of queues supported.
3667f817fa05SSasha Neftin  */
3668f817fa05SSasha Neftin static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
3669f817fa05SSasha Neftin 			   int index, int offset)
3670f817fa05SSasha Neftin {
3671f817fa05SSasha Neftin 	u32 ivar = array_rd32(IGC_IVAR0, index);
3672f817fa05SSasha Neftin 
3673f817fa05SSasha Neftin 	/* clear any bits that are currently set */
3674f817fa05SSasha Neftin 	ivar &= ~((u32)0xFF << offset);
3675f817fa05SSasha Neftin 
3676f817fa05SSasha Neftin 	/* write vector and valid bit */
3677f817fa05SSasha Neftin 	ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
3678f817fa05SSasha Neftin 
3679f817fa05SSasha Neftin 	array_wr32(IGC_IVAR0, index, ivar);
3680f817fa05SSasha Neftin }
3681f817fa05SSasha Neftin 
3682f817fa05SSasha Neftin static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
3683f817fa05SSasha Neftin {
3684f817fa05SSasha Neftin 	struct igc_adapter *adapter = q_vector->adapter;
3685f817fa05SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
3686f817fa05SSasha Neftin 	int rx_queue = IGC_N0_QUEUE;
3687f817fa05SSasha Neftin 	int tx_queue = IGC_N0_QUEUE;
3688f817fa05SSasha Neftin 
3689f817fa05SSasha Neftin 	if (q_vector->rx.ring)
3690f817fa05SSasha Neftin 		rx_queue = q_vector->rx.ring->reg_idx;
3691f817fa05SSasha Neftin 	if (q_vector->tx.ring)
3692f817fa05SSasha Neftin 		tx_queue = q_vector->tx.ring->reg_idx;
3693f817fa05SSasha Neftin 
3694f817fa05SSasha Neftin 	switch (hw->mac.type) {
3695f817fa05SSasha Neftin 	case igc_i225:
3696f817fa05SSasha Neftin 		if (rx_queue > IGC_N0_QUEUE)
3697f817fa05SSasha Neftin 			igc_write_ivar(hw, msix_vector,
3698f817fa05SSasha Neftin 				       rx_queue >> 1,
3699f817fa05SSasha Neftin 				       (rx_queue & 0x1) << 4);
3700f817fa05SSasha Neftin 		if (tx_queue > IGC_N0_QUEUE)
3701f817fa05SSasha Neftin 			igc_write_ivar(hw, msix_vector,
3702f817fa05SSasha Neftin 				       tx_queue >> 1,
3703f817fa05SSasha Neftin 				       ((tx_queue & 0x1) << 4) + 8);
3704f817fa05SSasha Neftin 		q_vector->eims_value = BIT(msix_vector);
3705f817fa05SSasha Neftin 		break;
3706f817fa05SSasha Neftin 	default:
3707f817fa05SSasha Neftin 		WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
3708f817fa05SSasha Neftin 		break;
3709f817fa05SSasha Neftin 	}
3710f817fa05SSasha Neftin 
3711f817fa05SSasha Neftin 	/* add q_vector eims value to global eims_enable_mask */
3712f817fa05SSasha Neftin 	adapter->eims_enable_mask |= q_vector->eims_value;
3713f817fa05SSasha Neftin 
3714f817fa05SSasha Neftin 	/* configure q_vector to set itr on first interrupt */
3715f817fa05SSasha Neftin 	q_vector->set_itr = 1;
3716f817fa05SSasha Neftin }
3717f817fa05SSasha Neftin 
3718f817fa05SSasha Neftin /**
3719a146ea02SSasha Neftin  * igc_configure_msix - Configure MSI-X hardware
3720a146ea02SSasha Neftin  * @adapter: Pointer to adapter structure
3721a146ea02SSasha Neftin  *
3722a146ea02SSasha Neftin  * igc_configure_msix sets up the hardware to properly
3723a146ea02SSasha Neftin  * generate MSI-X interrupts.
3724a146ea02SSasha Neftin  */
3725a146ea02SSasha Neftin static void igc_configure_msix(struct igc_adapter *adapter)
3726a146ea02SSasha Neftin {
3727a146ea02SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
3728a146ea02SSasha Neftin 	int i, vector = 0;
3729a146ea02SSasha Neftin 	u32 tmp;
3730a146ea02SSasha Neftin 
3731a146ea02SSasha Neftin 	adapter->eims_enable_mask = 0;
3732a146ea02SSasha Neftin 
3733a146ea02SSasha Neftin 	/* set vector for other causes, i.e. link changes */
3734a146ea02SSasha Neftin 	switch (hw->mac.type) {
3735a146ea02SSasha Neftin 	case igc_i225:
3736a146ea02SSasha Neftin 		/* Turn on MSI-X capability first, or our settings
3737a146ea02SSasha Neftin 		 * won't stick.  And it will take days to debug.
3738a146ea02SSasha Neftin 		 */
3739a146ea02SSasha Neftin 		wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
3740a146ea02SSasha Neftin 		     IGC_GPIE_PBA | IGC_GPIE_EIAME |
3741a146ea02SSasha Neftin 		     IGC_GPIE_NSICR);
3742a146ea02SSasha Neftin 
3743a146ea02SSasha Neftin 		/* enable msix_other interrupt */
3744a146ea02SSasha Neftin 		adapter->eims_other = BIT(vector);
3745a146ea02SSasha Neftin 		tmp = (vector++ | IGC_IVAR_VALID) << 8;
3746a146ea02SSasha Neftin 
3747a146ea02SSasha Neftin 		wr32(IGC_IVAR_MISC, tmp);
3748a146ea02SSasha Neftin 		break;
3749a146ea02SSasha Neftin 	default:
3750a146ea02SSasha Neftin 		/* do nothing, since nothing else supports MSI-X */
3751a146ea02SSasha Neftin 		break;
3752a146ea02SSasha Neftin 	} /* switch (hw->mac.type) */
3753a146ea02SSasha Neftin 
3754a146ea02SSasha Neftin 	adapter->eims_enable_mask |= adapter->eims_other;
3755a146ea02SSasha Neftin 
3756a146ea02SSasha Neftin 	for (i = 0; i < adapter->num_q_vectors; i++)
3757a146ea02SSasha Neftin 		igc_assign_vector(adapter->q_vector[i], vector++);
3758a146ea02SSasha Neftin 
3759a146ea02SSasha Neftin 	wrfl();
3760a146ea02SSasha Neftin }
3761a146ea02SSasha Neftin 
3762a146ea02SSasha Neftin /**
3763fccf939eSSasha Neftin  * igc_irq_enable - Enable default interrupt generation settings
3764fccf939eSSasha Neftin  * @adapter: board private structure
3765fccf939eSSasha Neftin  */
3766fccf939eSSasha Neftin static void igc_irq_enable(struct igc_adapter *adapter)
3767fccf939eSSasha Neftin {
3768fccf939eSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
3769fccf939eSSasha Neftin 
3770fccf939eSSasha Neftin 	if (adapter->msix_entries) {
3771fccf939eSSasha Neftin 		u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
3772fccf939eSSasha Neftin 		u32 regval = rd32(IGC_EIAC);
3773fccf939eSSasha Neftin 
3774fccf939eSSasha Neftin 		wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
3775fccf939eSSasha Neftin 		regval = rd32(IGC_EIAM);
3776fccf939eSSasha Neftin 		wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
3777fccf939eSSasha Neftin 		wr32(IGC_EIMS, adapter->eims_enable_mask);
3778fccf939eSSasha Neftin 		wr32(IGC_IMS, ims);
3779fccf939eSSasha Neftin 	} else {
3780fccf939eSSasha Neftin 		wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
3781fccf939eSSasha Neftin 		wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
3782fccf939eSSasha Neftin 	}
3783fccf939eSSasha Neftin }
3784fccf939eSSasha Neftin 
3785fccf939eSSasha Neftin /**
378635f9a78aSSasha Neftin  * igc_irq_disable - Mask off interrupt generation on the NIC
378735f9a78aSSasha Neftin  * @adapter: board private structure
378835f9a78aSSasha Neftin  */
378935f9a78aSSasha Neftin static void igc_irq_disable(struct igc_adapter *adapter)
379035f9a78aSSasha Neftin {
379135f9a78aSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
379235f9a78aSSasha Neftin 
379335f9a78aSSasha Neftin 	if (adapter->msix_entries) {
379435f9a78aSSasha Neftin 		u32 regval = rd32(IGC_EIAM);
379535f9a78aSSasha Neftin 
379635f9a78aSSasha Neftin 		wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
379735f9a78aSSasha Neftin 		wr32(IGC_EIMC, adapter->eims_enable_mask);
379835f9a78aSSasha Neftin 		regval = rd32(IGC_EIAC);
379935f9a78aSSasha Neftin 		wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
380035f9a78aSSasha Neftin 	}
380135f9a78aSSasha Neftin 
380235f9a78aSSasha Neftin 	wr32(IGC_IAM, 0);
380335f9a78aSSasha Neftin 	wr32(IGC_IMC, ~0);
380435f9a78aSSasha Neftin 	wrfl();
380535f9a78aSSasha Neftin 
380635f9a78aSSasha Neftin 	if (adapter->msix_entries) {
380735f9a78aSSasha Neftin 		int vector = 0, i;
380835f9a78aSSasha Neftin 
380935f9a78aSSasha Neftin 		synchronize_irq(adapter->msix_entries[vector++].vector);
381035f9a78aSSasha Neftin 
381135f9a78aSSasha Neftin 		for (i = 0; i < adapter->num_q_vectors; i++)
381235f9a78aSSasha Neftin 			synchronize_irq(adapter->msix_entries[vector++].vector);
381335f9a78aSSasha Neftin 	} else {
381435f9a78aSSasha Neftin 		synchronize_irq(adapter->pdev->irq);
381535f9a78aSSasha Neftin 	}
381635f9a78aSSasha Neftin }
381735f9a78aSSasha Neftin 
381863c92c9dSSasha Neftin void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
381963c92c9dSSasha Neftin 			      const u32 max_rss_queues)
382063c92c9dSSasha Neftin {
382163c92c9dSSasha Neftin 	/* Determine if we need to pair queues. */
382263c92c9dSSasha Neftin 	/* If rss_queues > half of max_rss_queues, pair the queues in
382363c92c9dSSasha Neftin 	 * order to conserve interrupts due to limited supply.
382463c92c9dSSasha Neftin 	 */
382563c92c9dSSasha Neftin 	if (adapter->rss_queues > (max_rss_queues / 2))
382663c92c9dSSasha Neftin 		adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
382763c92c9dSSasha Neftin 	else
382863c92c9dSSasha Neftin 		adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
382963c92c9dSSasha Neftin }
383063c92c9dSSasha Neftin 
383163c92c9dSSasha Neftin unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
383263c92c9dSSasha Neftin {
38334d0710c2SAndre Guedes 	return IGC_MAX_RX_QUEUES;
383463c92c9dSSasha Neftin }
383563c92c9dSSasha Neftin 
383663c92c9dSSasha Neftin static void igc_init_queue_configuration(struct igc_adapter *adapter)
383763c92c9dSSasha Neftin {
383863c92c9dSSasha Neftin 	u32 max_rss_queues;
383963c92c9dSSasha Neftin 
384063c92c9dSSasha Neftin 	max_rss_queues = igc_get_max_rss_queues(adapter);
384163c92c9dSSasha Neftin 	adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
384263c92c9dSSasha Neftin 
384363c92c9dSSasha Neftin 	igc_set_flag_queue_pairs(adapter, max_rss_queues);
384463c92c9dSSasha Neftin }
384563c92c9dSSasha Neftin 
384663c92c9dSSasha Neftin /**
384763c92c9dSSasha Neftin  * igc_reset_q_vector - Reset config for interrupt vector
384863c92c9dSSasha Neftin  * @adapter: board private structure to initialize
384963c92c9dSSasha Neftin  * @v_idx: Index of vector to be reset
385063c92c9dSSasha Neftin  *
385163c92c9dSSasha Neftin  * If NAPI is enabled it will delete any references to the
385263c92c9dSSasha Neftin  * NAPI struct. This is preparation for igc_free_q_vector.
385363c92c9dSSasha Neftin  */
385463c92c9dSSasha Neftin static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
385563c92c9dSSasha Neftin {
385663c92c9dSSasha Neftin 	struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
385763c92c9dSSasha Neftin 
385863c92c9dSSasha Neftin 	/* if we're coming from igc_set_interrupt_capability, the vectors are
385963c92c9dSSasha Neftin 	 * not yet allocated
386063c92c9dSSasha Neftin 	 */
386163c92c9dSSasha Neftin 	if (!q_vector)
386263c92c9dSSasha Neftin 		return;
386363c92c9dSSasha Neftin 
386463c92c9dSSasha Neftin 	if (q_vector->tx.ring)
386563c92c9dSSasha Neftin 		adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
386663c92c9dSSasha Neftin 
386763c92c9dSSasha Neftin 	if (q_vector->rx.ring)
386863c92c9dSSasha Neftin 		adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
386963c92c9dSSasha Neftin 
387063c92c9dSSasha Neftin 	netif_napi_del(&q_vector->napi);
387163c92c9dSSasha Neftin }
387263c92c9dSSasha Neftin 
387363c92c9dSSasha Neftin /**
387463c92c9dSSasha Neftin  * igc_free_q_vector - Free memory allocated for specific interrupt vector
387563c92c9dSSasha Neftin  * @adapter: board private structure to initialize
387663c92c9dSSasha Neftin  * @v_idx: Index of vector to be freed
387763c92c9dSSasha Neftin  *
387863c92c9dSSasha Neftin  * This function frees the memory allocated to the q_vector.
387963c92c9dSSasha Neftin  */
388063c92c9dSSasha Neftin static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
388163c92c9dSSasha Neftin {
388263c92c9dSSasha Neftin 	struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
388363c92c9dSSasha Neftin 
388463c92c9dSSasha Neftin 	adapter->q_vector[v_idx] = NULL;
388563c92c9dSSasha Neftin 
388663c92c9dSSasha Neftin 	/* igc_get_stats64() might access the rings on this vector,
388763c92c9dSSasha Neftin 	 * we must wait a grace period before freeing it.
388863c92c9dSSasha Neftin 	 */
388963c92c9dSSasha Neftin 	if (q_vector)
389063c92c9dSSasha Neftin 		kfree_rcu(q_vector, rcu);
389163c92c9dSSasha Neftin }
389263c92c9dSSasha Neftin 
389363c92c9dSSasha Neftin /**
389463c92c9dSSasha Neftin  * igc_free_q_vectors - Free memory allocated for interrupt vectors
389563c92c9dSSasha Neftin  * @adapter: board private structure to initialize
389663c92c9dSSasha Neftin  *
389763c92c9dSSasha Neftin  * This function frees the memory allocated to the q_vectors.  In addition if
389863c92c9dSSasha Neftin  * NAPI is enabled it will delete any references to the NAPI struct prior
389963c92c9dSSasha Neftin  * to freeing the q_vector.
390063c92c9dSSasha Neftin  */
390163c92c9dSSasha Neftin static void igc_free_q_vectors(struct igc_adapter *adapter)
390263c92c9dSSasha Neftin {
390363c92c9dSSasha Neftin 	int v_idx = adapter->num_q_vectors;
390463c92c9dSSasha Neftin 
390563c92c9dSSasha Neftin 	adapter->num_tx_queues = 0;
390663c92c9dSSasha Neftin 	adapter->num_rx_queues = 0;
390763c92c9dSSasha Neftin 	adapter->num_q_vectors = 0;
390863c92c9dSSasha Neftin 
390963c92c9dSSasha Neftin 	while (v_idx--) {
391063c92c9dSSasha Neftin 		igc_reset_q_vector(adapter, v_idx);
391163c92c9dSSasha Neftin 		igc_free_q_vector(adapter, v_idx);
391263c92c9dSSasha Neftin 	}
391363c92c9dSSasha Neftin }
391463c92c9dSSasha Neftin 
391563c92c9dSSasha Neftin /**
391663c92c9dSSasha Neftin  * igc_update_itr - update the dynamic ITR value based on statistics
391763c92c9dSSasha Neftin  * @q_vector: pointer to q_vector
391863c92c9dSSasha Neftin  * @ring_container: ring info to update the itr for
391963c92c9dSSasha Neftin  *
392063c92c9dSSasha Neftin  * Stores a new ITR value based on packets and byte
392163c92c9dSSasha Neftin  * counts during the last interrupt.  The advantage of per interrupt
392263c92c9dSSasha Neftin  * computation is faster updates and more accurate ITR for the current
392363c92c9dSSasha Neftin  * traffic pattern.  Constants in this function were computed
392463c92c9dSSasha Neftin  * based on theoretical maximum wire speed and thresholds were set based
392563c92c9dSSasha Neftin  * on testing data as well as attempting to minimize response time
392663c92c9dSSasha Neftin  * while increasing bulk throughput.
392763c92c9dSSasha Neftin  * NOTE: These calculations are only valid when operating in a single-
392863c92c9dSSasha Neftin  * queue environment.
392963c92c9dSSasha Neftin  */
393063c92c9dSSasha Neftin static void igc_update_itr(struct igc_q_vector *q_vector,
393163c92c9dSSasha Neftin 			   struct igc_ring_container *ring_container)
393263c92c9dSSasha Neftin {
393363c92c9dSSasha Neftin 	unsigned int packets = ring_container->total_packets;
393463c92c9dSSasha Neftin 	unsigned int bytes = ring_container->total_bytes;
393563c92c9dSSasha Neftin 	u8 itrval = ring_container->itr;
393663c92c9dSSasha Neftin 
393763c92c9dSSasha Neftin 	/* no packets, exit with status unchanged */
393863c92c9dSSasha Neftin 	if (packets == 0)
393963c92c9dSSasha Neftin 		return;
394063c92c9dSSasha Neftin 
394163c92c9dSSasha Neftin 	switch (itrval) {
394263c92c9dSSasha Neftin 	case lowest_latency:
394363c92c9dSSasha Neftin 		/* handle TSO and jumbo frames */
394463c92c9dSSasha Neftin 		if (bytes / packets > 8000)
394563c92c9dSSasha Neftin 			itrval = bulk_latency;
394663c92c9dSSasha Neftin 		else if ((packets < 5) && (bytes > 512))
394763c92c9dSSasha Neftin 			itrval = low_latency;
394863c92c9dSSasha Neftin 		break;
394963c92c9dSSasha Neftin 	case low_latency:  /* 50 usec aka 20000 ints/s */
395063c92c9dSSasha Neftin 		if (bytes > 10000) {
395163c92c9dSSasha Neftin 			/* this if handles the TSO accounting */
395263c92c9dSSasha Neftin 			if (bytes / packets > 8000)
395363c92c9dSSasha Neftin 				itrval = bulk_latency;
395463c92c9dSSasha Neftin 			else if ((packets < 10) || ((bytes / packets) > 1200))
395563c92c9dSSasha Neftin 				itrval = bulk_latency;
395663c92c9dSSasha Neftin 			else if ((packets > 35))
395763c92c9dSSasha Neftin 				itrval = lowest_latency;
395863c92c9dSSasha Neftin 		} else if (bytes / packets > 2000) {
395963c92c9dSSasha Neftin 			itrval = bulk_latency;
396063c92c9dSSasha Neftin 		} else if (packets <= 2 && bytes < 512) {
396163c92c9dSSasha Neftin 			itrval = lowest_latency;
396263c92c9dSSasha Neftin 		}
396363c92c9dSSasha Neftin 		break;
396463c92c9dSSasha Neftin 	case bulk_latency: /* 250 usec aka 4000 ints/s */
396563c92c9dSSasha Neftin 		if (bytes > 25000) {
396663c92c9dSSasha Neftin 			if (packets > 35)
396763c92c9dSSasha Neftin 				itrval = low_latency;
396863c92c9dSSasha Neftin 		} else if (bytes < 1500) {
396963c92c9dSSasha Neftin 			itrval = low_latency;
397063c92c9dSSasha Neftin 		}
397163c92c9dSSasha Neftin 		break;
397263c92c9dSSasha Neftin 	}
397363c92c9dSSasha Neftin 
397463c92c9dSSasha Neftin 	/* clear work counters since we have the values we need */
397563c92c9dSSasha Neftin 	ring_container->total_bytes = 0;
397663c92c9dSSasha Neftin 	ring_container->total_packets = 0;
397763c92c9dSSasha Neftin 
397863c92c9dSSasha Neftin 	/* write updated itr to ring container */
397963c92c9dSSasha Neftin 	ring_container->itr = itrval;
398063c92c9dSSasha Neftin }
398163c92c9dSSasha Neftin 
398263c92c9dSSasha Neftin static void igc_set_itr(struct igc_q_vector *q_vector)
398363c92c9dSSasha Neftin {
398463c92c9dSSasha Neftin 	struct igc_adapter *adapter = q_vector->adapter;
398563c92c9dSSasha Neftin 	u32 new_itr = q_vector->itr_val;
398663c92c9dSSasha Neftin 	u8 current_itr = 0;
398763c92c9dSSasha Neftin 
398863c92c9dSSasha Neftin 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
398963c92c9dSSasha Neftin 	switch (adapter->link_speed) {
399063c92c9dSSasha Neftin 	case SPEED_10:
399163c92c9dSSasha Neftin 	case SPEED_100:
399263c92c9dSSasha Neftin 		current_itr = 0;
399363c92c9dSSasha Neftin 		new_itr = IGC_4K_ITR;
399463c92c9dSSasha Neftin 		goto set_itr_now;
399563c92c9dSSasha Neftin 	default:
399663c92c9dSSasha Neftin 		break;
399763c92c9dSSasha Neftin 	}
399863c92c9dSSasha Neftin 
399963c92c9dSSasha Neftin 	igc_update_itr(q_vector, &q_vector->tx);
400063c92c9dSSasha Neftin 	igc_update_itr(q_vector, &q_vector->rx);
400163c92c9dSSasha Neftin 
400263c92c9dSSasha Neftin 	current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
400363c92c9dSSasha Neftin 
400463c92c9dSSasha Neftin 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
400563c92c9dSSasha Neftin 	if (current_itr == lowest_latency &&
400663c92c9dSSasha Neftin 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
400763c92c9dSSasha Neftin 	    (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
400863c92c9dSSasha Neftin 		current_itr = low_latency;
400963c92c9dSSasha Neftin 
401063c92c9dSSasha Neftin 	switch (current_itr) {
401163c92c9dSSasha Neftin 	/* counts and packets in update_itr are dependent on these numbers */
401263c92c9dSSasha Neftin 	case lowest_latency:
401363c92c9dSSasha Neftin 		new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
401463c92c9dSSasha Neftin 		break;
401563c92c9dSSasha Neftin 	case low_latency:
401663c92c9dSSasha Neftin 		new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
401763c92c9dSSasha Neftin 		break;
401863c92c9dSSasha Neftin 	case bulk_latency:
401963c92c9dSSasha Neftin 		new_itr = IGC_4K_ITR;  /* 4,000 ints/sec */
402063c92c9dSSasha Neftin 		break;
402163c92c9dSSasha Neftin 	default:
402263c92c9dSSasha Neftin 		break;
402363c92c9dSSasha Neftin 	}
402463c92c9dSSasha Neftin 
402563c92c9dSSasha Neftin set_itr_now:
402663c92c9dSSasha Neftin 	if (new_itr != q_vector->itr_val) {
402763c92c9dSSasha Neftin 		/* this attempts to bias the interrupt rate towards Bulk
402863c92c9dSSasha Neftin 		 * by adding intermediate steps when interrupt rate is
402963c92c9dSSasha Neftin 		 * increasing
403063c92c9dSSasha Neftin 		 */
403163c92c9dSSasha Neftin 		new_itr = new_itr > q_vector->itr_val ?
403263c92c9dSSasha Neftin 			  max((new_itr * q_vector->itr_val) /
403363c92c9dSSasha Neftin 			  (new_itr + (q_vector->itr_val >> 2)),
403463c92c9dSSasha Neftin 			  new_itr) : new_itr;
403563c92c9dSSasha Neftin 		/* Don't write the value here; it resets the adapter's
403663c92c9dSSasha Neftin 		 * internal timer, and causes us to delay far longer than
403763c92c9dSSasha Neftin 		 * we should between interrupts.  Instead, we write the ITR
403863c92c9dSSasha Neftin 		 * value at the beginning of the next interrupt so the timing
403963c92c9dSSasha Neftin 		 * ends up being correct.
404063c92c9dSSasha Neftin 		 */
404163c92c9dSSasha Neftin 		q_vector->itr_val = new_itr;
404263c92c9dSSasha Neftin 		q_vector->set_itr = 1;
404363c92c9dSSasha Neftin 	}
404463c92c9dSSasha Neftin }
404563c92c9dSSasha Neftin 
404663c92c9dSSasha Neftin static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
404763c92c9dSSasha Neftin {
404863c92c9dSSasha Neftin 	int v_idx = adapter->num_q_vectors;
404963c92c9dSSasha Neftin 
405063c92c9dSSasha Neftin 	if (adapter->msix_entries) {
405163c92c9dSSasha Neftin 		pci_disable_msix(adapter->pdev);
405263c92c9dSSasha Neftin 		kfree(adapter->msix_entries);
405363c92c9dSSasha Neftin 		adapter->msix_entries = NULL;
405463c92c9dSSasha Neftin 	} else if (adapter->flags & IGC_FLAG_HAS_MSI) {
405563c92c9dSSasha Neftin 		pci_disable_msi(adapter->pdev);
405663c92c9dSSasha Neftin 	}
405763c92c9dSSasha Neftin 
405863c92c9dSSasha Neftin 	while (v_idx--)
405963c92c9dSSasha Neftin 		igc_reset_q_vector(adapter, v_idx);
406063c92c9dSSasha Neftin }
406163c92c9dSSasha Neftin 
406263c92c9dSSasha Neftin /**
406363c92c9dSSasha Neftin  * igc_set_interrupt_capability - set MSI or MSI-X if supported
406463c92c9dSSasha Neftin  * @adapter: Pointer to adapter structure
406563c92c9dSSasha Neftin  * @msix: boolean value for MSI-X capability
406663c92c9dSSasha Neftin  *
406763c92c9dSSasha Neftin  * Attempt to configure interrupts using the best available
406863c92c9dSSasha Neftin  * capabilities of the hardware and kernel.
406963c92c9dSSasha Neftin  */
407063c92c9dSSasha Neftin static void igc_set_interrupt_capability(struct igc_adapter *adapter,
407163c92c9dSSasha Neftin 					 bool msix)
407263c92c9dSSasha Neftin {
407363c92c9dSSasha Neftin 	int numvecs, i;
407463c92c9dSSasha Neftin 	int err;
407563c92c9dSSasha Neftin 
407663c92c9dSSasha Neftin 	if (!msix)
407763c92c9dSSasha Neftin 		goto msi_only;
407863c92c9dSSasha Neftin 	adapter->flags |= IGC_FLAG_HAS_MSIX;
407963c92c9dSSasha Neftin 
408063c92c9dSSasha Neftin 	/* Number of supported queues. */
408163c92c9dSSasha Neftin 	adapter->num_rx_queues = adapter->rss_queues;
408263c92c9dSSasha Neftin 
408363c92c9dSSasha Neftin 	adapter->num_tx_queues = adapter->rss_queues;
408463c92c9dSSasha Neftin 
408563c92c9dSSasha Neftin 	/* start with one vector for every Rx queue */
408663c92c9dSSasha Neftin 	numvecs = adapter->num_rx_queues;
408763c92c9dSSasha Neftin 
408863c92c9dSSasha Neftin 	/* if Tx handler is separate add 1 for every Tx queue */
408963c92c9dSSasha Neftin 	if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
409063c92c9dSSasha Neftin 		numvecs += adapter->num_tx_queues;
409163c92c9dSSasha Neftin 
409263c92c9dSSasha Neftin 	/* store the number of vectors reserved for queues */
409363c92c9dSSasha Neftin 	adapter->num_q_vectors = numvecs;
409463c92c9dSSasha Neftin 
409563c92c9dSSasha Neftin 	/* add 1 vector for link status interrupts */
409663c92c9dSSasha Neftin 	numvecs++;
409763c92c9dSSasha Neftin 
409863c92c9dSSasha Neftin 	adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
409963c92c9dSSasha Neftin 					GFP_KERNEL);
410063c92c9dSSasha Neftin 
410163c92c9dSSasha Neftin 	if (!adapter->msix_entries)
410263c92c9dSSasha Neftin 		return;
410363c92c9dSSasha Neftin 
410463c92c9dSSasha Neftin 	/* populate entry values */
410563c92c9dSSasha Neftin 	for (i = 0; i < numvecs; i++)
410663c92c9dSSasha Neftin 		adapter->msix_entries[i].entry = i;
410763c92c9dSSasha Neftin 
410863c92c9dSSasha Neftin 	err = pci_enable_msix_range(adapter->pdev,
410963c92c9dSSasha Neftin 				    adapter->msix_entries,
411063c92c9dSSasha Neftin 				    numvecs,
411163c92c9dSSasha Neftin 				    numvecs);
411263c92c9dSSasha Neftin 	if (err > 0)
411363c92c9dSSasha Neftin 		return;
411463c92c9dSSasha Neftin 
411563c92c9dSSasha Neftin 	kfree(adapter->msix_entries);
411663c92c9dSSasha Neftin 	adapter->msix_entries = NULL;
411763c92c9dSSasha Neftin 
411863c92c9dSSasha Neftin 	igc_reset_interrupt_capability(adapter);
411963c92c9dSSasha Neftin 
412063c92c9dSSasha Neftin msi_only:
412163c92c9dSSasha Neftin 	adapter->flags &= ~IGC_FLAG_HAS_MSIX;
412263c92c9dSSasha Neftin 
412363c92c9dSSasha Neftin 	adapter->rss_queues = 1;
412463c92c9dSSasha Neftin 	adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
412563c92c9dSSasha Neftin 	adapter->num_rx_queues = 1;
412663c92c9dSSasha Neftin 	adapter->num_tx_queues = 1;
412763c92c9dSSasha Neftin 	adapter->num_q_vectors = 1;
412863c92c9dSSasha Neftin 	if (!pci_enable_msi(adapter->pdev))
412963c92c9dSSasha Neftin 		adapter->flags |= IGC_FLAG_HAS_MSI;
413063c92c9dSSasha Neftin }
413163c92c9dSSasha Neftin 
413263c92c9dSSasha Neftin /**
413363c92c9dSSasha Neftin  * igc_update_ring_itr - update the dynamic ITR value based on packet size
413463c92c9dSSasha Neftin  * @q_vector: pointer to q_vector
413563c92c9dSSasha Neftin  *
413663c92c9dSSasha Neftin  * Stores a new ITR value based on strictly on packet size.  This
413763c92c9dSSasha Neftin  * algorithm is less sophisticated than that used in igc_update_itr,
413863c92c9dSSasha Neftin  * due to the difficulty of synchronizing statistics across multiple
413963c92c9dSSasha Neftin  * receive rings.  The divisors and thresholds used by this function
414063c92c9dSSasha Neftin  * were determined based on theoretical maximum wire speed and testing
414163c92c9dSSasha Neftin  * data, in order to minimize response time while increasing bulk
414263c92c9dSSasha Neftin  * throughput.
414363c92c9dSSasha Neftin  * NOTE: This function is called only when operating in a multiqueue
414463c92c9dSSasha Neftin  * receive environment.
414563c92c9dSSasha Neftin  */
414663c92c9dSSasha Neftin static void igc_update_ring_itr(struct igc_q_vector *q_vector)
414763c92c9dSSasha Neftin {
414863c92c9dSSasha Neftin 	struct igc_adapter *adapter = q_vector->adapter;
414963c92c9dSSasha Neftin 	int new_val = q_vector->itr_val;
415063c92c9dSSasha Neftin 	int avg_wire_size = 0;
415163c92c9dSSasha Neftin 	unsigned int packets;
415263c92c9dSSasha Neftin 
415363c92c9dSSasha Neftin 	/* For non-gigabit speeds, just fix the interrupt rate at 4000
415463c92c9dSSasha Neftin 	 * ints/sec - ITR timer value of 120 ticks.
415563c92c9dSSasha Neftin 	 */
415663c92c9dSSasha Neftin 	switch (adapter->link_speed) {
415763c92c9dSSasha Neftin 	case SPEED_10:
415863c92c9dSSasha Neftin 	case SPEED_100:
415963c92c9dSSasha Neftin 		new_val = IGC_4K_ITR;
416063c92c9dSSasha Neftin 		goto set_itr_val;
416163c92c9dSSasha Neftin 	default:
416263c92c9dSSasha Neftin 		break;
416363c92c9dSSasha Neftin 	}
416463c92c9dSSasha Neftin 
416563c92c9dSSasha Neftin 	packets = q_vector->rx.total_packets;
416663c92c9dSSasha Neftin 	if (packets)
416763c92c9dSSasha Neftin 		avg_wire_size = q_vector->rx.total_bytes / packets;
416863c92c9dSSasha Neftin 
416963c92c9dSSasha Neftin 	packets = q_vector->tx.total_packets;
417063c92c9dSSasha Neftin 	if (packets)
417163c92c9dSSasha Neftin 		avg_wire_size = max_t(u32, avg_wire_size,
417263c92c9dSSasha Neftin 				      q_vector->tx.total_bytes / packets);
417363c92c9dSSasha Neftin 
417463c92c9dSSasha Neftin 	/* if avg_wire_size isn't set no work was done */
417563c92c9dSSasha Neftin 	if (!avg_wire_size)
417663c92c9dSSasha Neftin 		goto clear_counts;
417763c92c9dSSasha Neftin 
417863c92c9dSSasha Neftin 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
417963c92c9dSSasha Neftin 	avg_wire_size += 24;
418063c92c9dSSasha Neftin 
418163c92c9dSSasha Neftin 	/* Don't starve jumbo frames */
418263c92c9dSSasha Neftin 	avg_wire_size = min(avg_wire_size, 3000);
418363c92c9dSSasha Neftin 
418463c92c9dSSasha Neftin 	/* Give a little boost to mid-size frames */
418563c92c9dSSasha Neftin 	if (avg_wire_size > 300 && avg_wire_size < 1200)
418663c92c9dSSasha Neftin 		new_val = avg_wire_size / 3;
418763c92c9dSSasha Neftin 	else
418863c92c9dSSasha Neftin 		new_val = avg_wire_size / 2;
418963c92c9dSSasha Neftin 
419063c92c9dSSasha Neftin 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
419163c92c9dSSasha Neftin 	if (new_val < IGC_20K_ITR &&
419263c92c9dSSasha Neftin 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
419363c92c9dSSasha Neftin 	    (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
419463c92c9dSSasha Neftin 		new_val = IGC_20K_ITR;
419563c92c9dSSasha Neftin 
419663c92c9dSSasha Neftin set_itr_val:
419763c92c9dSSasha Neftin 	if (new_val != q_vector->itr_val) {
419863c92c9dSSasha Neftin 		q_vector->itr_val = new_val;
419963c92c9dSSasha Neftin 		q_vector->set_itr = 1;
420063c92c9dSSasha Neftin 	}
420163c92c9dSSasha Neftin clear_counts:
420263c92c9dSSasha Neftin 	q_vector->rx.total_bytes = 0;
420363c92c9dSSasha Neftin 	q_vector->rx.total_packets = 0;
420463c92c9dSSasha Neftin 	q_vector->tx.total_bytes = 0;
420563c92c9dSSasha Neftin 	q_vector->tx.total_packets = 0;
420663c92c9dSSasha Neftin }
420763c92c9dSSasha Neftin 
420863c92c9dSSasha Neftin static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
420963c92c9dSSasha Neftin {
421063c92c9dSSasha Neftin 	struct igc_adapter *adapter = q_vector->adapter;
421163c92c9dSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
421263c92c9dSSasha Neftin 
421363c92c9dSSasha Neftin 	if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
421463c92c9dSSasha Neftin 	    (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
421563c92c9dSSasha Neftin 		if (adapter->num_q_vectors == 1)
421663c92c9dSSasha Neftin 			igc_set_itr(q_vector);
421763c92c9dSSasha Neftin 		else
421863c92c9dSSasha Neftin 			igc_update_ring_itr(q_vector);
421963c92c9dSSasha Neftin 	}
422063c92c9dSSasha Neftin 
422163c92c9dSSasha Neftin 	if (!test_bit(__IGC_DOWN, &adapter->state)) {
422263c92c9dSSasha Neftin 		if (adapter->msix_entries)
422363c92c9dSSasha Neftin 			wr32(IGC_EIMS, q_vector->eims_value);
422463c92c9dSSasha Neftin 		else
422563c92c9dSSasha Neftin 			igc_irq_enable(adapter);
422663c92c9dSSasha Neftin 	}
422763c92c9dSSasha Neftin }
422863c92c9dSSasha Neftin 
422963c92c9dSSasha Neftin static void igc_add_ring(struct igc_ring *ring,
423063c92c9dSSasha Neftin 			 struct igc_ring_container *head)
423163c92c9dSSasha Neftin {
423263c92c9dSSasha Neftin 	head->ring = ring;
423363c92c9dSSasha Neftin 	head->count++;
423463c92c9dSSasha Neftin }
423563c92c9dSSasha Neftin 
423663c92c9dSSasha Neftin /**
423763c92c9dSSasha Neftin  * igc_cache_ring_register - Descriptor ring to register mapping
423863c92c9dSSasha Neftin  * @adapter: board private structure to initialize
423963c92c9dSSasha Neftin  *
424063c92c9dSSasha Neftin  * Once we know the feature-set enabled for the device, we'll cache
424163c92c9dSSasha Neftin  * the register offset the descriptor ring is assigned to.
424263c92c9dSSasha Neftin  */
424363c92c9dSSasha Neftin static void igc_cache_ring_register(struct igc_adapter *adapter)
424463c92c9dSSasha Neftin {
424563c92c9dSSasha Neftin 	int i = 0, j = 0;
424663c92c9dSSasha Neftin 
424763c92c9dSSasha Neftin 	switch (adapter->hw.mac.type) {
424863c92c9dSSasha Neftin 	case igc_i225:
424963c92c9dSSasha Neftin 	default:
425063c92c9dSSasha Neftin 		for (; i < adapter->num_rx_queues; i++)
425163c92c9dSSasha Neftin 			adapter->rx_ring[i]->reg_idx = i;
425263c92c9dSSasha Neftin 		for (; j < adapter->num_tx_queues; j++)
425363c92c9dSSasha Neftin 			adapter->tx_ring[j]->reg_idx = j;
425463c92c9dSSasha Neftin 		break;
425563c92c9dSSasha Neftin 	}
425663c92c9dSSasha Neftin }
425763c92c9dSSasha Neftin 
425863c92c9dSSasha Neftin /**
425963c92c9dSSasha Neftin  * igc_poll - NAPI Rx polling callback
426063c92c9dSSasha Neftin  * @napi: napi polling structure
426163c92c9dSSasha Neftin  * @budget: count of how many packets we should handle
426263c92c9dSSasha Neftin  */
426363c92c9dSSasha Neftin static int igc_poll(struct napi_struct *napi, int budget)
426463c92c9dSSasha Neftin {
426563c92c9dSSasha Neftin 	struct igc_q_vector *q_vector = container_of(napi,
426663c92c9dSSasha Neftin 						     struct igc_q_vector,
426763c92c9dSSasha Neftin 						     napi);
4268fc9df2a0SAndre Guedes 	struct igc_ring *rx_ring = q_vector->rx.ring;
426963c92c9dSSasha Neftin 	bool clean_complete = true;
427063c92c9dSSasha Neftin 	int work_done = 0;
427163c92c9dSSasha Neftin 
427263c92c9dSSasha Neftin 	if (q_vector->tx.ring)
427363c92c9dSSasha Neftin 		clean_complete = igc_clean_tx_irq(q_vector, budget);
427463c92c9dSSasha Neftin 
4275fc9df2a0SAndre Guedes 	if (rx_ring) {
4276fc9df2a0SAndre Guedes 		int cleaned = rx_ring->xsk_pool ?
4277fc9df2a0SAndre Guedes 			      igc_clean_rx_irq_zc(q_vector, budget) :
4278fc9df2a0SAndre Guedes 			      igc_clean_rx_irq(q_vector, budget);
427963c92c9dSSasha Neftin 
428063c92c9dSSasha Neftin 		work_done += cleaned;
428163c92c9dSSasha Neftin 		if (cleaned >= budget)
428263c92c9dSSasha Neftin 			clean_complete = false;
428363c92c9dSSasha Neftin 	}
428463c92c9dSSasha Neftin 
428563c92c9dSSasha Neftin 	/* If all work not completed, return budget and keep polling */
428663c92c9dSSasha Neftin 	if (!clean_complete)
428763c92c9dSSasha Neftin 		return budget;
428863c92c9dSSasha Neftin 
428963c92c9dSSasha Neftin 	/* Exit the polling mode, but don't re-enable interrupts if stack might
429063c92c9dSSasha Neftin 	 * poll us due to busy-polling
429163c92c9dSSasha Neftin 	 */
429263c92c9dSSasha Neftin 	if (likely(napi_complete_done(napi, work_done)))
429363c92c9dSSasha Neftin 		igc_ring_irq_enable(q_vector);
429463c92c9dSSasha Neftin 
429563c92c9dSSasha Neftin 	return min(work_done, budget - 1);
429663c92c9dSSasha Neftin }
429763c92c9dSSasha Neftin 
429863c92c9dSSasha Neftin /**
429963c92c9dSSasha Neftin  * igc_alloc_q_vector - Allocate memory for a single interrupt vector
430063c92c9dSSasha Neftin  * @adapter: board private structure to initialize
430163c92c9dSSasha Neftin  * @v_count: q_vectors allocated on adapter, used for ring interleaving
430263c92c9dSSasha Neftin  * @v_idx: index of vector in adapter struct
430363c92c9dSSasha Neftin  * @txr_count: total number of Tx rings to allocate
430463c92c9dSSasha Neftin  * @txr_idx: index of first Tx ring to allocate
430563c92c9dSSasha Neftin  * @rxr_count: total number of Rx rings to allocate
430663c92c9dSSasha Neftin  * @rxr_idx: index of first Rx ring to allocate
430763c92c9dSSasha Neftin  *
430863c92c9dSSasha Neftin  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
430963c92c9dSSasha Neftin  */
431063c92c9dSSasha Neftin static int igc_alloc_q_vector(struct igc_adapter *adapter,
431163c92c9dSSasha Neftin 			      unsigned int v_count, unsigned int v_idx,
431263c92c9dSSasha Neftin 			      unsigned int txr_count, unsigned int txr_idx,
431363c92c9dSSasha Neftin 			      unsigned int rxr_count, unsigned int rxr_idx)
431463c92c9dSSasha Neftin {
431563c92c9dSSasha Neftin 	struct igc_q_vector *q_vector;
431663c92c9dSSasha Neftin 	struct igc_ring *ring;
431763c92c9dSSasha Neftin 	int ring_count;
431863c92c9dSSasha Neftin 
431963c92c9dSSasha Neftin 	/* igc only supports 1 Tx and/or 1 Rx queue per vector */
432063c92c9dSSasha Neftin 	if (txr_count > 1 || rxr_count > 1)
432163c92c9dSSasha Neftin 		return -ENOMEM;
432263c92c9dSSasha Neftin 
432363c92c9dSSasha Neftin 	ring_count = txr_count + rxr_count;
432463c92c9dSSasha Neftin 
432563c92c9dSSasha Neftin 	/* allocate q_vector and rings */
432663c92c9dSSasha Neftin 	q_vector = adapter->q_vector[v_idx];
432763c92c9dSSasha Neftin 	if (!q_vector)
432863c92c9dSSasha Neftin 		q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
432963c92c9dSSasha Neftin 				   GFP_KERNEL);
433063c92c9dSSasha Neftin 	else
433163c92c9dSSasha Neftin 		memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
433263c92c9dSSasha Neftin 	if (!q_vector)
433363c92c9dSSasha Neftin 		return -ENOMEM;
433463c92c9dSSasha Neftin 
433563c92c9dSSasha Neftin 	/* initialize NAPI */
433663c92c9dSSasha Neftin 	netif_napi_add(adapter->netdev, &q_vector->napi,
433763c92c9dSSasha Neftin 		       igc_poll, 64);
433863c92c9dSSasha Neftin 
433963c92c9dSSasha Neftin 	/* tie q_vector and adapter together */
434063c92c9dSSasha Neftin 	adapter->q_vector[v_idx] = q_vector;
434163c92c9dSSasha Neftin 	q_vector->adapter = adapter;
434263c92c9dSSasha Neftin 
434363c92c9dSSasha Neftin 	/* initialize work limits */
434463c92c9dSSasha Neftin 	q_vector->tx.work_limit = adapter->tx_work_limit;
434563c92c9dSSasha Neftin 
434663c92c9dSSasha Neftin 	/* initialize ITR configuration */
434763c92c9dSSasha Neftin 	q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
434863c92c9dSSasha Neftin 	q_vector->itr_val = IGC_START_ITR;
434963c92c9dSSasha Neftin 
435063c92c9dSSasha Neftin 	/* initialize pointer to rings */
435163c92c9dSSasha Neftin 	ring = q_vector->ring;
435263c92c9dSSasha Neftin 
435363c92c9dSSasha Neftin 	/* initialize ITR */
435463c92c9dSSasha Neftin 	if (rxr_count) {
435563c92c9dSSasha Neftin 		/* rx or rx/tx vector */
435663c92c9dSSasha Neftin 		if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
435763c92c9dSSasha Neftin 			q_vector->itr_val = adapter->rx_itr_setting;
435863c92c9dSSasha Neftin 	} else {
435963c92c9dSSasha Neftin 		/* tx only vector */
436063c92c9dSSasha Neftin 		if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
436163c92c9dSSasha Neftin 			q_vector->itr_val = adapter->tx_itr_setting;
436263c92c9dSSasha Neftin 	}
436363c92c9dSSasha Neftin 
436463c92c9dSSasha Neftin 	if (txr_count) {
436563c92c9dSSasha Neftin 		/* assign generic ring traits */
436663c92c9dSSasha Neftin 		ring->dev = &adapter->pdev->dev;
436763c92c9dSSasha Neftin 		ring->netdev = adapter->netdev;
436863c92c9dSSasha Neftin 
436963c92c9dSSasha Neftin 		/* configure backlink on ring */
437063c92c9dSSasha Neftin 		ring->q_vector = q_vector;
437163c92c9dSSasha Neftin 
437263c92c9dSSasha Neftin 		/* update q_vector Tx values */
437363c92c9dSSasha Neftin 		igc_add_ring(ring, &q_vector->tx);
437463c92c9dSSasha Neftin 
437563c92c9dSSasha Neftin 		/* apply Tx specific ring traits */
437663c92c9dSSasha Neftin 		ring->count = adapter->tx_ring_count;
437763c92c9dSSasha Neftin 		ring->queue_index = txr_idx;
437863c92c9dSSasha Neftin 
437963c92c9dSSasha Neftin 		/* assign ring to adapter */
438063c92c9dSSasha Neftin 		adapter->tx_ring[txr_idx] = ring;
438163c92c9dSSasha Neftin 
438263c92c9dSSasha Neftin 		/* push pointer to next ring */
438363c92c9dSSasha Neftin 		ring++;
438463c92c9dSSasha Neftin 	}
438563c92c9dSSasha Neftin 
438663c92c9dSSasha Neftin 	if (rxr_count) {
438763c92c9dSSasha Neftin 		/* assign generic ring traits */
438863c92c9dSSasha Neftin 		ring->dev = &adapter->pdev->dev;
438963c92c9dSSasha Neftin 		ring->netdev = adapter->netdev;
439063c92c9dSSasha Neftin 
439163c92c9dSSasha Neftin 		/* configure backlink on ring */
439263c92c9dSSasha Neftin 		ring->q_vector = q_vector;
439363c92c9dSSasha Neftin 
439463c92c9dSSasha Neftin 		/* update q_vector Rx values */
439563c92c9dSSasha Neftin 		igc_add_ring(ring, &q_vector->rx);
439663c92c9dSSasha Neftin 
439763c92c9dSSasha Neftin 		/* apply Rx specific ring traits */
439863c92c9dSSasha Neftin 		ring->count = adapter->rx_ring_count;
439963c92c9dSSasha Neftin 		ring->queue_index = rxr_idx;
440063c92c9dSSasha Neftin 
440163c92c9dSSasha Neftin 		/* assign ring to adapter */
440263c92c9dSSasha Neftin 		adapter->rx_ring[rxr_idx] = ring;
440363c92c9dSSasha Neftin 	}
440463c92c9dSSasha Neftin 
440563c92c9dSSasha Neftin 	return 0;
440663c92c9dSSasha Neftin }
440763c92c9dSSasha Neftin 
440863c92c9dSSasha Neftin /**
440963c92c9dSSasha Neftin  * igc_alloc_q_vectors - Allocate memory for interrupt vectors
441063c92c9dSSasha Neftin  * @adapter: board private structure to initialize
441163c92c9dSSasha Neftin  *
441263c92c9dSSasha Neftin  * We allocate one q_vector per queue interrupt.  If allocation fails we
441363c92c9dSSasha Neftin  * return -ENOMEM.
441463c92c9dSSasha Neftin  */
441563c92c9dSSasha Neftin static int igc_alloc_q_vectors(struct igc_adapter *adapter)
441663c92c9dSSasha Neftin {
441763c92c9dSSasha Neftin 	int rxr_remaining = adapter->num_rx_queues;
441863c92c9dSSasha Neftin 	int txr_remaining = adapter->num_tx_queues;
441963c92c9dSSasha Neftin 	int rxr_idx = 0, txr_idx = 0, v_idx = 0;
442063c92c9dSSasha Neftin 	int q_vectors = adapter->num_q_vectors;
442163c92c9dSSasha Neftin 	int err;
442263c92c9dSSasha Neftin 
442363c92c9dSSasha Neftin 	if (q_vectors >= (rxr_remaining + txr_remaining)) {
442463c92c9dSSasha Neftin 		for (; rxr_remaining; v_idx++) {
442563c92c9dSSasha Neftin 			err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
442663c92c9dSSasha Neftin 						 0, 0, 1, rxr_idx);
442763c92c9dSSasha Neftin 
442863c92c9dSSasha Neftin 			if (err)
442963c92c9dSSasha Neftin 				goto err_out;
443063c92c9dSSasha Neftin 
443163c92c9dSSasha Neftin 			/* update counts and index */
443263c92c9dSSasha Neftin 			rxr_remaining--;
443363c92c9dSSasha Neftin 			rxr_idx++;
443463c92c9dSSasha Neftin 		}
443563c92c9dSSasha Neftin 	}
443663c92c9dSSasha Neftin 
443763c92c9dSSasha Neftin 	for (; v_idx < q_vectors; v_idx++) {
443863c92c9dSSasha Neftin 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
443963c92c9dSSasha Neftin 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
444063c92c9dSSasha Neftin 
444163c92c9dSSasha Neftin 		err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
444263c92c9dSSasha Neftin 					 tqpv, txr_idx, rqpv, rxr_idx);
444363c92c9dSSasha Neftin 
444463c92c9dSSasha Neftin 		if (err)
444563c92c9dSSasha Neftin 			goto err_out;
444663c92c9dSSasha Neftin 
444763c92c9dSSasha Neftin 		/* update counts and index */
444863c92c9dSSasha Neftin 		rxr_remaining -= rqpv;
444963c92c9dSSasha Neftin 		txr_remaining -= tqpv;
445063c92c9dSSasha Neftin 		rxr_idx++;
445163c92c9dSSasha Neftin 		txr_idx++;
445263c92c9dSSasha Neftin 	}
445363c92c9dSSasha Neftin 
445463c92c9dSSasha Neftin 	return 0;
445563c92c9dSSasha Neftin 
445663c92c9dSSasha Neftin err_out:
445763c92c9dSSasha Neftin 	adapter->num_tx_queues = 0;
445863c92c9dSSasha Neftin 	adapter->num_rx_queues = 0;
445963c92c9dSSasha Neftin 	adapter->num_q_vectors = 0;
446063c92c9dSSasha Neftin 
446163c92c9dSSasha Neftin 	while (v_idx--)
446263c92c9dSSasha Neftin 		igc_free_q_vector(adapter, v_idx);
446363c92c9dSSasha Neftin 
446463c92c9dSSasha Neftin 	return -ENOMEM;
446563c92c9dSSasha Neftin }
446663c92c9dSSasha Neftin 
446763c92c9dSSasha Neftin /**
446863c92c9dSSasha Neftin  * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
446963c92c9dSSasha Neftin  * @adapter: Pointer to adapter structure
447063c92c9dSSasha Neftin  * @msix: boolean for MSI-X capability
447163c92c9dSSasha Neftin  *
447263c92c9dSSasha Neftin  * This function initializes the interrupts and allocates all of the queues.
447363c92c9dSSasha Neftin  */
447463c92c9dSSasha Neftin static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
447563c92c9dSSasha Neftin {
447625f06effSAndre Guedes 	struct net_device *dev = adapter->netdev;
447763c92c9dSSasha Neftin 	int err = 0;
447863c92c9dSSasha Neftin 
447963c92c9dSSasha Neftin 	igc_set_interrupt_capability(adapter, msix);
448063c92c9dSSasha Neftin 
448163c92c9dSSasha Neftin 	err = igc_alloc_q_vectors(adapter);
448263c92c9dSSasha Neftin 	if (err) {
448325f06effSAndre Guedes 		netdev_err(dev, "Unable to allocate memory for vectors\n");
448463c92c9dSSasha Neftin 		goto err_alloc_q_vectors;
448563c92c9dSSasha Neftin 	}
448663c92c9dSSasha Neftin 
448763c92c9dSSasha Neftin 	igc_cache_ring_register(adapter);
448863c92c9dSSasha Neftin 
448963c92c9dSSasha Neftin 	return 0;
449063c92c9dSSasha Neftin 
449163c92c9dSSasha Neftin err_alloc_q_vectors:
449263c92c9dSSasha Neftin 	igc_reset_interrupt_capability(adapter);
449363c92c9dSSasha Neftin 	return err;
449463c92c9dSSasha Neftin }
449563c92c9dSSasha Neftin 
449663c92c9dSSasha Neftin /**
449763c92c9dSSasha Neftin  * igc_sw_init - Initialize general software structures (struct igc_adapter)
449863c92c9dSSasha Neftin  * @adapter: board private structure to initialize
449963c92c9dSSasha Neftin  *
450063c92c9dSSasha Neftin  * igc_sw_init initializes the Adapter private data structure.
450163c92c9dSSasha Neftin  * Fields are initialized based on PCI device information and
450263c92c9dSSasha Neftin  * OS network device settings (MTU size).
450363c92c9dSSasha Neftin  */
450463c92c9dSSasha Neftin static int igc_sw_init(struct igc_adapter *adapter)
450563c92c9dSSasha Neftin {
450663c92c9dSSasha Neftin 	struct net_device *netdev = adapter->netdev;
450763c92c9dSSasha Neftin 	struct pci_dev *pdev = adapter->pdev;
450863c92c9dSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
450963c92c9dSSasha Neftin 
451063c92c9dSSasha Neftin 	pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
451163c92c9dSSasha Neftin 
451263c92c9dSSasha Neftin 	/* set default ring sizes */
451363c92c9dSSasha Neftin 	adapter->tx_ring_count = IGC_DEFAULT_TXD;
451463c92c9dSSasha Neftin 	adapter->rx_ring_count = IGC_DEFAULT_RXD;
451563c92c9dSSasha Neftin 
451663c92c9dSSasha Neftin 	/* set default ITR values */
451763c92c9dSSasha Neftin 	adapter->rx_itr_setting = IGC_DEFAULT_ITR;
451863c92c9dSSasha Neftin 	adapter->tx_itr_setting = IGC_DEFAULT_ITR;
451963c92c9dSSasha Neftin 
452063c92c9dSSasha Neftin 	/* set default work limits */
452163c92c9dSSasha Neftin 	adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
452263c92c9dSSasha Neftin 
452363c92c9dSSasha Neftin 	/* adjust max frame to be at least the size of a standard frame */
452463c92c9dSSasha Neftin 	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
452563c92c9dSSasha Neftin 				VLAN_HLEN;
452663c92c9dSSasha Neftin 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
452763c92c9dSSasha Neftin 
452842fc5dc0SAndre Guedes 	mutex_init(&adapter->nfc_rule_lock);
4529d957c601SAndre Guedes 	INIT_LIST_HEAD(&adapter->nfc_rule_list);
4530d957c601SAndre Guedes 	adapter->nfc_rule_count = 0;
4531d957c601SAndre Guedes 
453263c92c9dSSasha Neftin 	spin_lock_init(&adapter->stats64_lock);
453363c92c9dSSasha Neftin 	/* Assume MSI-X interrupts, will be checked during IRQ allocation */
453463c92c9dSSasha Neftin 	adapter->flags |= IGC_FLAG_HAS_MSIX;
453563c92c9dSSasha Neftin 
453663c92c9dSSasha Neftin 	igc_init_queue_configuration(adapter);
453763c92c9dSSasha Neftin 
453863c92c9dSSasha Neftin 	/* This call may decrease the number of queues */
453963c92c9dSSasha Neftin 	if (igc_init_interrupt_scheme(adapter, true)) {
454025f06effSAndre Guedes 		netdev_err(netdev, "Unable to allocate memory for queues\n");
454163c92c9dSSasha Neftin 		return -ENOMEM;
454263c92c9dSSasha Neftin 	}
454363c92c9dSSasha Neftin 
454463c92c9dSSasha Neftin 	/* Explicitly disable IRQ since the NIC can be in any state. */
454563c92c9dSSasha Neftin 	igc_irq_disable(adapter);
454663c92c9dSSasha Neftin 
454763c92c9dSSasha Neftin 	set_bit(__IGC_DOWN, &adapter->state);
454863c92c9dSSasha Neftin 
454963c92c9dSSasha Neftin 	return 0;
455063c92c9dSSasha Neftin }
455163c92c9dSSasha Neftin 
455235f9a78aSSasha Neftin /**
4553c9a11c23SSasha Neftin  * igc_up - Open the interface and prepare it to handle traffic
4554c9a11c23SSasha Neftin  * @adapter: board private structure
4555c9a11c23SSasha Neftin  */
45568c5ad0daSSasha Neftin void igc_up(struct igc_adapter *adapter)
4557c9a11c23SSasha Neftin {
45583df25e4cSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
4559c9a11c23SSasha Neftin 	int i = 0;
4560c9a11c23SSasha Neftin 
4561c9a11c23SSasha Neftin 	/* hardware has been reset, we need to reload some things */
4562c9a11c23SSasha Neftin 	igc_configure(adapter);
4563c9a11c23SSasha Neftin 
4564c9a11c23SSasha Neftin 	clear_bit(__IGC_DOWN, &adapter->state);
4565c9a11c23SSasha Neftin 
4566c9a11c23SSasha Neftin 	for (i = 0; i < adapter->num_q_vectors; i++)
4567c9a11c23SSasha Neftin 		napi_enable(&adapter->q_vector[i]->napi);
45683df25e4cSSasha Neftin 
45693df25e4cSSasha Neftin 	if (adapter->msix_entries)
45703df25e4cSSasha Neftin 		igc_configure_msix(adapter);
45713df25e4cSSasha Neftin 	else
45723df25e4cSSasha Neftin 		igc_assign_vector(adapter->q_vector[0], 0);
45733df25e4cSSasha Neftin 
45743df25e4cSSasha Neftin 	/* Clear any pending interrupts. */
45753df25e4cSSasha Neftin 	rd32(IGC_ICR);
45763df25e4cSSasha Neftin 	igc_irq_enable(adapter);
457713b5b7fdSSasha Neftin 
457813b5b7fdSSasha Neftin 	netif_tx_start_all_queues(adapter->netdev);
457913b5b7fdSSasha Neftin 
458013b5b7fdSSasha Neftin 	/* start the watchdog. */
4581501f2309SJiapeng Zhong 	hw->mac.get_link_status = true;
4582208983f0SSasha Neftin 	schedule_work(&adapter->watchdog_task);
4583c9a11c23SSasha Neftin }
4584c9a11c23SSasha Neftin 
4585c9a11c23SSasha Neftin /**
4586c9a11c23SSasha Neftin  * igc_update_stats - Update the board statistics counters
4587c9a11c23SSasha Neftin  * @adapter: board private structure
4588c9a11c23SSasha Neftin  */
458936b9fea6SSasha Neftin void igc_update_stats(struct igc_adapter *adapter)
4590c9a11c23SSasha Neftin {
459136b9fea6SSasha Neftin 	struct rtnl_link_stats64 *net_stats = &adapter->stats64;
459236b9fea6SSasha Neftin 	struct pci_dev *pdev = adapter->pdev;
459336b9fea6SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
459436b9fea6SSasha Neftin 	u64 _bytes, _packets;
459536b9fea6SSasha Neftin 	u64 bytes, packets;
459636b9fea6SSasha Neftin 	unsigned int start;
459736b9fea6SSasha Neftin 	u32 mpc;
459836b9fea6SSasha Neftin 	int i;
459936b9fea6SSasha Neftin 
460036b9fea6SSasha Neftin 	/* Prevent stats update while adapter is being reset, or if the pci
460136b9fea6SSasha Neftin 	 * connection is down.
460236b9fea6SSasha Neftin 	 */
460336b9fea6SSasha Neftin 	if (adapter->link_speed == 0)
460436b9fea6SSasha Neftin 		return;
460536b9fea6SSasha Neftin 	if (pci_channel_offline(pdev))
460636b9fea6SSasha Neftin 		return;
460736b9fea6SSasha Neftin 
460836b9fea6SSasha Neftin 	packets = 0;
460936b9fea6SSasha Neftin 	bytes = 0;
461036b9fea6SSasha Neftin 
461136b9fea6SSasha Neftin 	rcu_read_lock();
461236b9fea6SSasha Neftin 	for (i = 0; i < adapter->num_rx_queues; i++) {
461336b9fea6SSasha Neftin 		struct igc_ring *ring = adapter->rx_ring[i];
461436b9fea6SSasha Neftin 		u32 rqdpc = rd32(IGC_RQDPC(i));
461536b9fea6SSasha Neftin 
461636b9fea6SSasha Neftin 		if (hw->mac.type >= igc_i225)
461736b9fea6SSasha Neftin 			wr32(IGC_RQDPC(i), 0);
461836b9fea6SSasha Neftin 
461936b9fea6SSasha Neftin 		if (rqdpc) {
462036b9fea6SSasha Neftin 			ring->rx_stats.drops += rqdpc;
462136b9fea6SSasha Neftin 			net_stats->rx_fifo_errors += rqdpc;
462236b9fea6SSasha Neftin 		}
462336b9fea6SSasha Neftin 
462436b9fea6SSasha Neftin 		do {
462536b9fea6SSasha Neftin 			start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
462636b9fea6SSasha Neftin 			_bytes = ring->rx_stats.bytes;
462736b9fea6SSasha Neftin 			_packets = ring->rx_stats.packets;
462836b9fea6SSasha Neftin 		} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
462936b9fea6SSasha Neftin 		bytes += _bytes;
463036b9fea6SSasha Neftin 		packets += _packets;
463136b9fea6SSasha Neftin 	}
463236b9fea6SSasha Neftin 
463336b9fea6SSasha Neftin 	net_stats->rx_bytes = bytes;
463436b9fea6SSasha Neftin 	net_stats->rx_packets = packets;
463536b9fea6SSasha Neftin 
463636b9fea6SSasha Neftin 	packets = 0;
463736b9fea6SSasha Neftin 	bytes = 0;
463836b9fea6SSasha Neftin 	for (i = 0; i < adapter->num_tx_queues; i++) {
463936b9fea6SSasha Neftin 		struct igc_ring *ring = adapter->tx_ring[i];
464036b9fea6SSasha Neftin 
464136b9fea6SSasha Neftin 		do {
464236b9fea6SSasha Neftin 			start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
464336b9fea6SSasha Neftin 			_bytes = ring->tx_stats.bytes;
464436b9fea6SSasha Neftin 			_packets = ring->tx_stats.packets;
464536b9fea6SSasha Neftin 		} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
464636b9fea6SSasha Neftin 		bytes += _bytes;
464736b9fea6SSasha Neftin 		packets += _packets;
464836b9fea6SSasha Neftin 	}
464936b9fea6SSasha Neftin 	net_stats->tx_bytes = bytes;
465036b9fea6SSasha Neftin 	net_stats->tx_packets = packets;
465136b9fea6SSasha Neftin 	rcu_read_unlock();
465236b9fea6SSasha Neftin 
465336b9fea6SSasha Neftin 	/* read stats registers */
465436b9fea6SSasha Neftin 	adapter->stats.crcerrs += rd32(IGC_CRCERRS);
465536b9fea6SSasha Neftin 	adapter->stats.gprc += rd32(IGC_GPRC);
465636b9fea6SSasha Neftin 	adapter->stats.gorc += rd32(IGC_GORCL);
465736b9fea6SSasha Neftin 	rd32(IGC_GORCH); /* clear GORCL */
465836b9fea6SSasha Neftin 	adapter->stats.bprc += rd32(IGC_BPRC);
465936b9fea6SSasha Neftin 	adapter->stats.mprc += rd32(IGC_MPRC);
466036b9fea6SSasha Neftin 	adapter->stats.roc += rd32(IGC_ROC);
466136b9fea6SSasha Neftin 
466236b9fea6SSasha Neftin 	adapter->stats.prc64 += rd32(IGC_PRC64);
466336b9fea6SSasha Neftin 	adapter->stats.prc127 += rd32(IGC_PRC127);
466436b9fea6SSasha Neftin 	adapter->stats.prc255 += rd32(IGC_PRC255);
466536b9fea6SSasha Neftin 	adapter->stats.prc511 += rd32(IGC_PRC511);
466636b9fea6SSasha Neftin 	adapter->stats.prc1023 += rd32(IGC_PRC1023);
466736b9fea6SSasha Neftin 	adapter->stats.prc1522 += rd32(IGC_PRC1522);
466840edc734SSasha Neftin 	adapter->stats.tlpic += rd32(IGC_TLPIC);
466940edc734SSasha Neftin 	adapter->stats.rlpic += rd32(IGC_RLPIC);
4670e6529944SSasha Neftin 	adapter->stats.hgptc += rd32(IGC_HGPTC);
467136b9fea6SSasha Neftin 
467236b9fea6SSasha Neftin 	mpc = rd32(IGC_MPC);
467336b9fea6SSasha Neftin 	adapter->stats.mpc += mpc;
467436b9fea6SSasha Neftin 	net_stats->rx_fifo_errors += mpc;
467536b9fea6SSasha Neftin 	adapter->stats.scc += rd32(IGC_SCC);
467636b9fea6SSasha Neftin 	adapter->stats.ecol += rd32(IGC_ECOL);
467736b9fea6SSasha Neftin 	adapter->stats.mcc += rd32(IGC_MCC);
467836b9fea6SSasha Neftin 	adapter->stats.latecol += rd32(IGC_LATECOL);
467936b9fea6SSasha Neftin 	adapter->stats.dc += rd32(IGC_DC);
468036b9fea6SSasha Neftin 	adapter->stats.rlec += rd32(IGC_RLEC);
468136b9fea6SSasha Neftin 	adapter->stats.xonrxc += rd32(IGC_XONRXC);
468236b9fea6SSasha Neftin 	adapter->stats.xontxc += rd32(IGC_XONTXC);
468336b9fea6SSasha Neftin 	adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
468436b9fea6SSasha Neftin 	adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
468536b9fea6SSasha Neftin 	adapter->stats.fcruc += rd32(IGC_FCRUC);
468636b9fea6SSasha Neftin 	adapter->stats.gptc += rd32(IGC_GPTC);
468736b9fea6SSasha Neftin 	adapter->stats.gotc += rd32(IGC_GOTCL);
468836b9fea6SSasha Neftin 	rd32(IGC_GOTCH); /* clear GOTCL */
468936b9fea6SSasha Neftin 	adapter->stats.rnbc += rd32(IGC_RNBC);
469036b9fea6SSasha Neftin 	adapter->stats.ruc += rd32(IGC_RUC);
469136b9fea6SSasha Neftin 	adapter->stats.rfc += rd32(IGC_RFC);
469236b9fea6SSasha Neftin 	adapter->stats.rjc += rd32(IGC_RJC);
469336b9fea6SSasha Neftin 	adapter->stats.tor += rd32(IGC_TORH);
469436b9fea6SSasha Neftin 	adapter->stats.tot += rd32(IGC_TOTH);
469536b9fea6SSasha Neftin 	adapter->stats.tpr += rd32(IGC_TPR);
469636b9fea6SSasha Neftin 
469736b9fea6SSasha Neftin 	adapter->stats.ptc64 += rd32(IGC_PTC64);
469836b9fea6SSasha Neftin 	adapter->stats.ptc127 += rd32(IGC_PTC127);
469936b9fea6SSasha Neftin 	adapter->stats.ptc255 += rd32(IGC_PTC255);
470036b9fea6SSasha Neftin 	adapter->stats.ptc511 += rd32(IGC_PTC511);
470136b9fea6SSasha Neftin 	adapter->stats.ptc1023 += rd32(IGC_PTC1023);
470236b9fea6SSasha Neftin 	adapter->stats.ptc1522 += rd32(IGC_PTC1522);
470336b9fea6SSasha Neftin 
470436b9fea6SSasha Neftin 	adapter->stats.mptc += rd32(IGC_MPTC);
470536b9fea6SSasha Neftin 	adapter->stats.bptc += rd32(IGC_BPTC);
470636b9fea6SSasha Neftin 
470736b9fea6SSasha Neftin 	adapter->stats.tpt += rd32(IGC_TPT);
470836b9fea6SSasha Neftin 	adapter->stats.colc += rd32(IGC_COLC);
470951c657b4SSasha Neftin 	adapter->stats.colc += rd32(IGC_RERC);
471036b9fea6SSasha Neftin 
471136b9fea6SSasha Neftin 	adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
471236b9fea6SSasha Neftin 
471336b9fea6SSasha Neftin 	adapter->stats.tsctc += rd32(IGC_TSCTC);
471436b9fea6SSasha Neftin 
471536b9fea6SSasha Neftin 	adapter->stats.iac += rd32(IGC_IAC);
471636b9fea6SSasha Neftin 
471736b9fea6SSasha Neftin 	/* Fill out the OS statistics structure */
471836b9fea6SSasha Neftin 	net_stats->multicast = adapter->stats.mprc;
471936b9fea6SSasha Neftin 	net_stats->collisions = adapter->stats.colc;
472036b9fea6SSasha Neftin 
472136b9fea6SSasha Neftin 	/* Rx Errors */
472236b9fea6SSasha Neftin 
472336b9fea6SSasha Neftin 	/* RLEC on some newer hardware can be incorrect so build
472436b9fea6SSasha Neftin 	 * our own version based on RUC and ROC
472536b9fea6SSasha Neftin 	 */
472636b9fea6SSasha Neftin 	net_stats->rx_errors = adapter->stats.rxerrc +
472736b9fea6SSasha Neftin 		adapter->stats.crcerrs + adapter->stats.algnerrc +
472836b9fea6SSasha Neftin 		adapter->stats.ruc + adapter->stats.roc +
472936b9fea6SSasha Neftin 		adapter->stats.cexterr;
473036b9fea6SSasha Neftin 	net_stats->rx_length_errors = adapter->stats.ruc +
473136b9fea6SSasha Neftin 				      adapter->stats.roc;
473236b9fea6SSasha Neftin 	net_stats->rx_crc_errors = adapter->stats.crcerrs;
473336b9fea6SSasha Neftin 	net_stats->rx_frame_errors = adapter->stats.algnerrc;
473436b9fea6SSasha Neftin 	net_stats->rx_missed_errors = adapter->stats.mpc;
473536b9fea6SSasha Neftin 
473636b9fea6SSasha Neftin 	/* Tx Errors */
473736b9fea6SSasha Neftin 	net_stats->tx_errors = adapter->stats.ecol +
473836b9fea6SSasha Neftin 			       adapter->stats.latecol;
473936b9fea6SSasha Neftin 	net_stats->tx_aborted_errors = adapter->stats.ecol;
474036b9fea6SSasha Neftin 	net_stats->tx_window_errors = adapter->stats.latecol;
474136b9fea6SSasha Neftin 	net_stats->tx_carrier_errors = adapter->stats.tncrs;
474236b9fea6SSasha Neftin 
474336b9fea6SSasha Neftin 	/* Tx Dropped needs to be maintained elsewhere */
474436b9fea6SSasha Neftin 
474536b9fea6SSasha Neftin 	/* Management Stats */
474636b9fea6SSasha Neftin 	adapter->stats.mgptc += rd32(IGC_MGTPTC);
474736b9fea6SSasha Neftin 	adapter->stats.mgprc += rd32(IGC_MGTPRC);
474836b9fea6SSasha Neftin 	adapter->stats.mgpdc += rd32(IGC_MGTPDC);
4749c9a11c23SSasha Neftin }
4750c9a11c23SSasha Neftin 
4751c9a11c23SSasha Neftin /**
4752c9a11c23SSasha Neftin  * igc_down - Close the interface
4753c9a11c23SSasha Neftin  * @adapter: board private structure
4754c9a11c23SSasha Neftin  */
47558c5ad0daSSasha Neftin void igc_down(struct igc_adapter *adapter)
4756c9a11c23SSasha Neftin {
4757c9a11c23SSasha Neftin 	struct net_device *netdev = adapter->netdev;
47580507ef8aSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
47590507ef8aSSasha Neftin 	u32 tctl, rctl;
4760c9a11c23SSasha Neftin 	int i = 0;
4761c9a11c23SSasha Neftin 
4762c9a11c23SSasha Neftin 	set_bit(__IGC_DOWN, &adapter->state);
4763c9a11c23SSasha Neftin 
4764b03c49cdSVinicius Costa Gomes 	igc_ptp_suspend(adapter);
4765b03c49cdSVinicius Costa Gomes 
47660507ef8aSSasha Neftin 	/* disable receives in the hardware */
47670507ef8aSSasha Neftin 	rctl = rd32(IGC_RCTL);
47680507ef8aSSasha Neftin 	wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
47690507ef8aSSasha Neftin 	/* flush and sleep below */
47700507ef8aSSasha Neftin 
4771c9a11c23SSasha Neftin 	/* set trans_start so we don't get spurious watchdogs during reset */
4772c9a11c23SSasha Neftin 	netif_trans_update(netdev);
4773c9a11c23SSasha Neftin 
4774c9a11c23SSasha Neftin 	netif_carrier_off(netdev);
4775c9a11c23SSasha Neftin 	netif_tx_stop_all_queues(netdev);
4776c9a11c23SSasha Neftin 
47770507ef8aSSasha Neftin 	/* disable transmits in the hardware */
47780507ef8aSSasha Neftin 	tctl = rd32(IGC_TCTL);
47790507ef8aSSasha Neftin 	tctl &= ~IGC_TCTL_EN;
47800507ef8aSSasha Neftin 	wr32(IGC_TCTL, tctl);
47810507ef8aSSasha Neftin 	/* flush both disables and wait for them to finish */
47820507ef8aSSasha Neftin 	wrfl();
47830507ef8aSSasha Neftin 	usleep_range(10000, 20000);
47840507ef8aSSasha Neftin 
47850507ef8aSSasha Neftin 	igc_irq_disable(adapter);
47860507ef8aSSasha Neftin 
47870507ef8aSSasha Neftin 	adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
47880507ef8aSSasha Neftin 
47890507ef8aSSasha Neftin 	for (i = 0; i < adapter->num_q_vectors; i++) {
47900507ef8aSSasha Neftin 		if (adapter->q_vector[i]) {
47910507ef8aSSasha Neftin 			napi_synchronize(&adapter->q_vector[i]->napi);
4792c9a11c23SSasha Neftin 			napi_disable(&adapter->q_vector[i]->napi);
47930507ef8aSSasha Neftin 		}
47940507ef8aSSasha Neftin 	}
47950507ef8aSSasha Neftin 
47960507ef8aSSasha Neftin 	del_timer_sync(&adapter->watchdog_timer);
47970507ef8aSSasha Neftin 	del_timer_sync(&adapter->phy_info_timer);
47980507ef8aSSasha Neftin 
47990507ef8aSSasha Neftin 	/* record the stats before reset*/
48000507ef8aSSasha Neftin 	spin_lock(&adapter->stats64_lock);
48010507ef8aSSasha Neftin 	igc_update_stats(adapter);
48020507ef8aSSasha Neftin 	spin_unlock(&adapter->stats64_lock);
4803c9a11c23SSasha Neftin 
4804c9a11c23SSasha Neftin 	adapter->link_speed = 0;
4805c9a11c23SSasha Neftin 	adapter->link_duplex = 0;
48060507ef8aSSasha Neftin 
48070507ef8aSSasha Neftin 	if (!pci_channel_offline(adapter->pdev))
48080507ef8aSSasha Neftin 		igc_reset(adapter);
48090507ef8aSSasha Neftin 
48100507ef8aSSasha Neftin 	/* clear VLAN promisc flag so VFTA will be updated if necessary */
48110507ef8aSSasha Neftin 	adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
48120507ef8aSSasha Neftin 
48130507ef8aSSasha Neftin 	igc_clean_all_tx_rings(adapter);
48140507ef8aSSasha Neftin 	igc_clean_all_rx_rings(adapter);
48150507ef8aSSasha Neftin }
48160507ef8aSSasha Neftin 
48178c5ad0daSSasha Neftin void igc_reinit_locked(struct igc_adapter *adapter)
48180507ef8aSSasha Neftin {
48190507ef8aSSasha Neftin 	while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
48200507ef8aSSasha Neftin 		usleep_range(1000, 2000);
48210507ef8aSSasha Neftin 	igc_down(adapter);
48220507ef8aSSasha Neftin 	igc_up(adapter);
48230507ef8aSSasha Neftin 	clear_bit(__IGC_RESETTING, &adapter->state);
48240507ef8aSSasha Neftin }
48250507ef8aSSasha Neftin 
48260507ef8aSSasha Neftin static void igc_reset_task(struct work_struct *work)
48270507ef8aSSasha Neftin {
48280507ef8aSSasha Neftin 	struct igc_adapter *adapter;
48290507ef8aSSasha Neftin 
48300507ef8aSSasha Neftin 	adapter = container_of(work, struct igc_adapter, reset_task);
48310507ef8aSSasha Neftin 
48326da26237SSasha Neftin 	rtnl_lock();
48336da26237SSasha Neftin 	/* If we're already down or resetting, just bail */
48346da26237SSasha Neftin 	if (test_bit(__IGC_DOWN, &adapter->state) ||
48356da26237SSasha Neftin 	    test_bit(__IGC_RESETTING, &adapter->state)) {
48366da26237SSasha Neftin 		rtnl_unlock();
48376da26237SSasha Neftin 		return;
48386da26237SSasha Neftin 	}
48396da26237SSasha Neftin 
48409c384ee3SSasha Neftin 	igc_rings_dump(adapter);
48419c384ee3SSasha Neftin 	igc_regs_dump(adapter);
48420507ef8aSSasha Neftin 	netdev_err(adapter->netdev, "Reset adapter\n");
48430507ef8aSSasha Neftin 	igc_reinit_locked(adapter);
48446da26237SSasha Neftin 	rtnl_unlock();
4845c9a11c23SSasha Neftin }
4846c9a11c23SSasha Neftin 
4847c9a11c23SSasha Neftin /**
4848c9a11c23SSasha Neftin  * igc_change_mtu - Change the Maximum Transfer Unit
4849c9a11c23SSasha Neftin  * @netdev: network interface device structure
4850c9a11c23SSasha Neftin  * @new_mtu: new value for maximum frame size
4851c9a11c23SSasha Neftin  *
4852c9a11c23SSasha Neftin  * Returns 0 on success, negative on failure
4853c9a11c23SSasha Neftin  */
4854c9a11c23SSasha Neftin static int igc_change_mtu(struct net_device *netdev, int new_mtu)
4855c9a11c23SSasha Neftin {
4856c9a11c23SSasha Neftin 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4857c9a11c23SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
4858c9a11c23SSasha Neftin 
485926575105SAndre Guedes 	if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) {
486026575105SAndre Guedes 		netdev_dbg(netdev, "Jumbo frames not supported with XDP");
486126575105SAndre Guedes 		return -EINVAL;
486226575105SAndre Guedes 	}
486326575105SAndre Guedes 
4864c9a11c23SSasha Neftin 	/* adjust max frame to be at least the size of a standard frame */
4865c9a11c23SSasha Neftin 	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4866c9a11c23SSasha Neftin 		max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
4867c9a11c23SSasha Neftin 
4868c9a11c23SSasha Neftin 	while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
4869c9a11c23SSasha Neftin 		usleep_range(1000, 2000);
4870c9a11c23SSasha Neftin 
4871c9a11c23SSasha Neftin 	/* igc_down has a dependency on max_frame_size */
4872c9a11c23SSasha Neftin 	adapter->max_frame_size = max_frame;
4873c9a11c23SSasha Neftin 
4874c9a11c23SSasha Neftin 	if (netif_running(netdev))
4875c9a11c23SSasha Neftin 		igc_down(adapter);
4876c9a11c23SSasha Neftin 
487725f06effSAndre Guedes 	netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
4878c9a11c23SSasha Neftin 	netdev->mtu = new_mtu;
4879c9a11c23SSasha Neftin 
4880c9a11c23SSasha Neftin 	if (netif_running(netdev))
4881c9a11c23SSasha Neftin 		igc_up(adapter);
4882c9a11c23SSasha Neftin 	else
4883c9a11c23SSasha Neftin 		igc_reset(adapter);
4884c9a11c23SSasha Neftin 
4885c9a11c23SSasha Neftin 	clear_bit(__IGC_RESETTING, &adapter->state);
4886c9a11c23SSasha Neftin 
4887c9a11c23SSasha Neftin 	return 0;
4888c9a11c23SSasha Neftin }
4889c9a11c23SSasha Neftin 
4890c9a11c23SSasha Neftin /**
48916b7ed22aSVinicius Costa Gomes  * igc_get_stats64 - Get System Network Statistics
4892c9a11c23SSasha Neftin  * @netdev: network interface device structure
48936b7ed22aSVinicius Costa Gomes  * @stats: rtnl_link_stats64 pointer
4894c9a11c23SSasha Neftin  *
4895c9a11c23SSasha Neftin  * Returns the address of the device statistics structure.
4896c9a11c23SSasha Neftin  * The statistics are updated here and also from the timer callback.
4897c9a11c23SSasha Neftin  */
48986b7ed22aSVinicius Costa Gomes static void igc_get_stats64(struct net_device *netdev,
48996b7ed22aSVinicius Costa Gomes 			    struct rtnl_link_stats64 *stats)
4900c9a11c23SSasha Neftin {
4901c9a11c23SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
4902c9a11c23SSasha Neftin 
49036b7ed22aSVinicius Costa Gomes 	spin_lock(&adapter->stats64_lock);
4904c9a11c23SSasha Neftin 	if (!test_bit(__IGC_RESETTING, &adapter->state))
4905c9a11c23SSasha Neftin 		igc_update_stats(adapter);
49066b7ed22aSVinicius Costa Gomes 	memcpy(stats, &adapter->stats64, sizeof(*stats));
49076b7ed22aSVinicius Costa Gomes 	spin_unlock(&adapter->stats64_lock);
4908c9a11c23SSasha Neftin }
4909c9a11c23SSasha Neftin 
491065cd3a72SSasha Neftin static netdev_features_t igc_fix_features(struct net_device *netdev,
491165cd3a72SSasha Neftin 					  netdev_features_t features)
491265cd3a72SSasha Neftin {
491365cd3a72SSasha Neftin 	/* Since there is no support for separate Rx/Tx vlan accel
491465cd3a72SSasha Neftin 	 * enable/disable make sure Tx flag is always in same state as Rx.
491565cd3a72SSasha Neftin 	 */
491665cd3a72SSasha Neftin 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
491765cd3a72SSasha Neftin 		features |= NETIF_F_HW_VLAN_CTAG_TX;
491865cd3a72SSasha Neftin 	else
491965cd3a72SSasha Neftin 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
492065cd3a72SSasha Neftin 
492165cd3a72SSasha Neftin 	return features;
492265cd3a72SSasha Neftin }
492365cd3a72SSasha Neftin 
492465cd3a72SSasha Neftin static int igc_set_features(struct net_device *netdev,
492565cd3a72SSasha Neftin 			    netdev_features_t features)
492665cd3a72SSasha Neftin {
492765cd3a72SSasha Neftin 	netdev_features_t changed = netdev->features ^ features;
492865cd3a72SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
492965cd3a72SSasha Neftin 
49308d744963SMuhammad Husaini Zulkifli 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
49318d744963SMuhammad Husaini Zulkifli 		igc_vlan_mode(netdev, features);
49328d744963SMuhammad Husaini Zulkifli 
493365cd3a72SSasha Neftin 	/* Add VLAN support */
493465cd3a72SSasha Neftin 	if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
493565cd3a72SSasha Neftin 		return 0;
493665cd3a72SSasha Neftin 
4937e256ec83SAndre Guedes 	if (!(features & NETIF_F_NTUPLE))
4938e256ec83SAndre Guedes 		igc_flush_nfc_rules(adapter);
493965cd3a72SSasha Neftin 
494065cd3a72SSasha Neftin 	netdev->features = features;
494165cd3a72SSasha Neftin 
494265cd3a72SSasha Neftin 	if (netif_running(netdev))
494365cd3a72SSasha Neftin 		igc_reinit_locked(adapter);
494465cd3a72SSasha Neftin 	else
494565cd3a72SSasha Neftin 		igc_reset(adapter);
494665cd3a72SSasha Neftin 
494765cd3a72SSasha Neftin 	return 1;
494865cd3a72SSasha Neftin }
494965cd3a72SSasha Neftin 
495065cd3a72SSasha Neftin static netdev_features_t
495165cd3a72SSasha Neftin igc_features_check(struct sk_buff *skb, struct net_device *dev,
495265cd3a72SSasha Neftin 		   netdev_features_t features)
495365cd3a72SSasha Neftin {
495465cd3a72SSasha Neftin 	unsigned int network_hdr_len, mac_hdr_len;
495565cd3a72SSasha Neftin 
495665cd3a72SSasha Neftin 	/* Make certain the headers can be described by a context descriptor */
495765cd3a72SSasha Neftin 	mac_hdr_len = skb_network_header(skb) - skb->data;
495865cd3a72SSasha Neftin 	if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
495965cd3a72SSasha Neftin 		return features & ~(NETIF_F_HW_CSUM |
496065cd3a72SSasha Neftin 				    NETIF_F_SCTP_CRC |
496165cd3a72SSasha Neftin 				    NETIF_F_HW_VLAN_CTAG_TX |
496265cd3a72SSasha Neftin 				    NETIF_F_TSO |
496365cd3a72SSasha Neftin 				    NETIF_F_TSO6);
496465cd3a72SSasha Neftin 
496565cd3a72SSasha Neftin 	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
496665cd3a72SSasha Neftin 	if (unlikely(network_hdr_len >  IGC_MAX_NETWORK_HDR_LEN))
496765cd3a72SSasha Neftin 		return features & ~(NETIF_F_HW_CSUM |
496865cd3a72SSasha Neftin 				    NETIF_F_SCTP_CRC |
496965cd3a72SSasha Neftin 				    NETIF_F_TSO |
497065cd3a72SSasha Neftin 				    NETIF_F_TSO6);
497165cd3a72SSasha Neftin 
497265cd3a72SSasha Neftin 	/* We can only support IPv4 TSO in tunnels if we can mangle the
497365cd3a72SSasha Neftin 	 * inner IP ID field, so strip TSO if MANGLEID is not supported.
497465cd3a72SSasha Neftin 	 */
497565cd3a72SSasha Neftin 	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
497665cd3a72SSasha Neftin 		features &= ~NETIF_F_TSO;
497765cd3a72SSasha Neftin 
497865cd3a72SSasha Neftin 	return features;
497965cd3a72SSasha Neftin }
498065cd3a72SSasha Neftin 
49812c344ae2SVinicius Costa Gomes static void igc_tsync_interrupt(struct igc_adapter *adapter)
49822c344ae2SVinicius Costa Gomes {
498387938851SEderson de Souza 	u32 ack, tsauxc, sec, nsec, tsicr;
49842c344ae2SVinicius Costa Gomes 	struct igc_hw *hw = &adapter->hw;
498564433e5bSEderson de Souza 	struct ptp_clock_event event;
498687938851SEderson de Souza 	struct timespec64 ts;
498787938851SEderson de Souza 
498887938851SEderson de Souza 	tsicr = rd32(IGC_TSICR);
498987938851SEderson de Souza 	ack = 0;
49902c344ae2SVinicius Costa Gomes 
499164433e5bSEderson de Souza 	if (tsicr & IGC_TSICR_SYS_WRAP) {
499264433e5bSEderson de Souza 		event.type = PTP_CLOCK_PPS;
499364433e5bSEderson de Souza 		if (adapter->ptp_caps.pps)
499464433e5bSEderson de Souza 			ptp_clock_event(adapter->ptp_clock, &event);
499564433e5bSEderson de Souza 		ack |= IGC_TSICR_SYS_WRAP;
499664433e5bSEderson de Souza 	}
499764433e5bSEderson de Souza 
49982c344ae2SVinicius Costa Gomes 	if (tsicr & IGC_TSICR_TXTS) {
49992c344ae2SVinicius Costa Gomes 		/* retrieve hardware timestamp */
50002c344ae2SVinicius Costa Gomes 		schedule_work(&adapter->ptp_tx_work);
50012c344ae2SVinicius Costa Gomes 		ack |= IGC_TSICR_TXTS;
50022c344ae2SVinicius Costa Gomes 	}
50032c344ae2SVinicius Costa Gomes 
500487938851SEderson de Souza 	if (tsicr & IGC_TSICR_TT0) {
500587938851SEderson de Souza 		spin_lock(&adapter->tmreg_lock);
500687938851SEderson de Souza 		ts = timespec64_add(adapter->perout[0].start,
500787938851SEderson de Souza 				    adapter->perout[0].period);
500887938851SEderson de Souza 		wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
500987938851SEderson de Souza 		wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec);
501087938851SEderson de Souza 		tsauxc = rd32(IGC_TSAUXC);
501187938851SEderson de Souza 		tsauxc |= IGC_TSAUXC_EN_TT0;
501287938851SEderson de Souza 		wr32(IGC_TSAUXC, tsauxc);
501387938851SEderson de Souza 		adapter->perout[0].start = ts;
501487938851SEderson de Souza 		spin_unlock(&adapter->tmreg_lock);
501587938851SEderson de Souza 		ack |= IGC_TSICR_TT0;
501687938851SEderson de Souza 	}
501787938851SEderson de Souza 
501887938851SEderson de Souza 	if (tsicr & IGC_TSICR_TT1) {
501987938851SEderson de Souza 		spin_lock(&adapter->tmreg_lock);
502087938851SEderson de Souza 		ts = timespec64_add(adapter->perout[1].start,
502187938851SEderson de Souza 				    adapter->perout[1].period);
502287938851SEderson de Souza 		wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
502387938851SEderson de Souza 		wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec);
502487938851SEderson de Souza 		tsauxc = rd32(IGC_TSAUXC);
502587938851SEderson de Souza 		tsauxc |= IGC_TSAUXC_EN_TT1;
502687938851SEderson de Souza 		wr32(IGC_TSAUXC, tsauxc);
502787938851SEderson de Souza 		adapter->perout[1].start = ts;
502887938851SEderson de Souza 		spin_unlock(&adapter->tmreg_lock);
502987938851SEderson de Souza 		ack |= IGC_TSICR_TT1;
503087938851SEderson de Souza 	}
503187938851SEderson de Souza 
503287938851SEderson de Souza 	if (tsicr & IGC_TSICR_AUTT0) {
503387938851SEderson de Souza 		nsec = rd32(IGC_AUXSTMPL0);
503487938851SEderson de Souza 		sec  = rd32(IGC_AUXSTMPH0);
503587938851SEderson de Souza 		event.type = PTP_CLOCK_EXTTS;
503687938851SEderson de Souza 		event.index = 0;
503787938851SEderson de Souza 		event.timestamp = sec * NSEC_PER_SEC + nsec;
503887938851SEderson de Souza 		ptp_clock_event(adapter->ptp_clock, &event);
503987938851SEderson de Souza 		ack |= IGC_TSICR_AUTT0;
504087938851SEderson de Souza 	}
504187938851SEderson de Souza 
504287938851SEderson de Souza 	if (tsicr & IGC_TSICR_AUTT1) {
504387938851SEderson de Souza 		nsec = rd32(IGC_AUXSTMPL1);
504487938851SEderson de Souza 		sec  = rd32(IGC_AUXSTMPH1);
504587938851SEderson de Souza 		event.type = PTP_CLOCK_EXTTS;
504687938851SEderson de Souza 		event.index = 1;
504787938851SEderson de Souza 		event.timestamp = sec * NSEC_PER_SEC + nsec;
504887938851SEderson de Souza 		ptp_clock_event(adapter->ptp_clock, &event);
504987938851SEderson de Souza 		ack |= IGC_TSICR_AUTT1;
505087938851SEderson de Souza 	}
505187938851SEderson de Souza 
50522c344ae2SVinicius Costa Gomes 	/* acknowledge the interrupts */
50532c344ae2SVinicius Costa Gomes 	wr32(IGC_TSICR, ack);
50542c344ae2SVinicius Costa Gomes }
50552c344ae2SVinicius Costa Gomes 
505613b5b7fdSSasha Neftin /**
50573df25e4cSSasha Neftin  * igc_msix_other - msix other interrupt handler
50583df25e4cSSasha Neftin  * @irq: interrupt number
50593df25e4cSSasha Neftin  * @data: pointer to a q_vector
50603df25e4cSSasha Neftin  */
50613df25e4cSSasha Neftin static irqreturn_t igc_msix_other(int irq, void *data)
50623df25e4cSSasha Neftin {
50633df25e4cSSasha Neftin 	struct igc_adapter *adapter = data;
50643df25e4cSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
50653df25e4cSSasha Neftin 	u32 icr = rd32(IGC_ICR);
50663df25e4cSSasha Neftin 
50673df25e4cSSasha Neftin 	/* reading ICR causes bit 31 of EICR to be cleared */
50683df25e4cSSasha Neftin 	if (icr & IGC_ICR_DRSTA)
50693df25e4cSSasha Neftin 		schedule_work(&adapter->reset_task);
50703df25e4cSSasha Neftin 
50713df25e4cSSasha Neftin 	if (icr & IGC_ICR_DOUTSYNC) {
50723df25e4cSSasha Neftin 		/* HW is reporting DMA is out of sync */
50733df25e4cSSasha Neftin 		adapter->stats.doosync++;
50743df25e4cSSasha Neftin 	}
50753df25e4cSSasha Neftin 
50763df25e4cSSasha Neftin 	if (icr & IGC_ICR_LSC) {
5077501f2309SJiapeng Zhong 		hw->mac.get_link_status = true;
50783df25e4cSSasha Neftin 		/* guard against interrupt when we're going down */
50793df25e4cSSasha Neftin 		if (!test_bit(__IGC_DOWN, &adapter->state))
50803df25e4cSSasha Neftin 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
50813df25e4cSSasha Neftin 	}
50823df25e4cSSasha Neftin 
50832c344ae2SVinicius Costa Gomes 	if (icr & IGC_ICR_TS)
50842c344ae2SVinicius Costa Gomes 		igc_tsync_interrupt(adapter);
50852c344ae2SVinicius Costa Gomes 
50863df25e4cSSasha Neftin 	wr32(IGC_EIMS, adapter->eims_other);
50873df25e4cSSasha Neftin 
50883df25e4cSSasha Neftin 	return IRQ_HANDLED;
50893df25e4cSSasha Neftin }
50903df25e4cSSasha Neftin 
509155cd7386SSasha Neftin static void igc_write_itr(struct igc_q_vector *q_vector)
509255cd7386SSasha Neftin {
509355cd7386SSasha Neftin 	u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
509455cd7386SSasha Neftin 
509555cd7386SSasha Neftin 	if (!q_vector->set_itr)
509655cd7386SSasha Neftin 		return;
509755cd7386SSasha Neftin 
509855cd7386SSasha Neftin 	if (!itr_val)
509955cd7386SSasha Neftin 		itr_val = IGC_ITR_VAL_MASK;
510055cd7386SSasha Neftin 
510155cd7386SSasha Neftin 	itr_val |= IGC_EITR_CNT_IGNR;
510255cd7386SSasha Neftin 
510355cd7386SSasha Neftin 	writel(itr_val, q_vector->itr_register);
510455cd7386SSasha Neftin 	q_vector->set_itr = 0;
510555cd7386SSasha Neftin }
510655cd7386SSasha Neftin 
51073df25e4cSSasha Neftin static irqreturn_t igc_msix_ring(int irq, void *data)
51083df25e4cSSasha Neftin {
51093df25e4cSSasha Neftin 	struct igc_q_vector *q_vector = data;
51103df25e4cSSasha Neftin 
51113df25e4cSSasha Neftin 	/* Write the ITR value calculated from the previous interrupt. */
51123df25e4cSSasha Neftin 	igc_write_itr(q_vector);
51133df25e4cSSasha Neftin 
51143df25e4cSSasha Neftin 	napi_schedule(&q_vector->napi);
51153df25e4cSSasha Neftin 
51163df25e4cSSasha Neftin 	return IRQ_HANDLED;
51173df25e4cSSasha Neftin }
51183df25e4cSSasha Neftin 
51193df25e4cSSasha Neftin /**
51203df25e4cSSasha Neftin  * igc_request_msix - Initialize MSI-X interrupts
51213df25e4cSSasha Neftin  * @adapter: Pointer to adapter structure
51223df25e4cSSasha Neftin  *
51233df25e4cSSasha Neftin  * igc_request_msix allocates MSI-X vectors and requests interrupts from the
51243df25e4cSSasha Neftin  * kernel.
51253df25e4cSSasha Neftin  */
51263df25e4cSSasha Neftin static int igc_request_msix(struct igc_adapter *adapter)
51273df25e4cSSasha Neftin {
5128373e2829SSasha Neftin 	unsigned int num_q_vectors = adapter->num_q_vectors;
51293df25e4cSSasha Neftin 	int i = 0, err = 0, vector = 0, free_vector = 0;
51303df25e4cSSasha Neftin 	struct net_device *netdev = adapter->netdev;
51313df25e4cSSasha Neftin 
51323df25e4cSSasha Neftin 	err = request_irq(adapter->msix_entries[vector].vector,
51333df25e4cSSasha Neftin 			  &igc_msix_other, 0, netdev->name, adapter);
51343df25e4cSSasha Neftin 	if (err)
51353df25e4cSSasha Neftin 		goto err_out;
51363df25e4cSSasha Neftin 
5137373e2829SSasha Neftin 	if (num_q_vectors > MAX_Q_VECTORS) {
5138373e2829SSasha Neftin 		num_q_vectors = MAX_Q_VECTORS;
5139373e2829SSasha Neftin 		dev_warn(&adapter->pdev->dev,
5140373e2829SSasha Neftin 			 "The number of queue vectors (%d) is higher than max allowed (%d)\n",
5141373e2829SSasha Neftin 			 adapter->num_q_vectors, MAX_Q_VECTORS);
5142373e2829SSasha Neftin 	}
5143373e2829SSasha Neftin 	for (i = 0; i < num_q_vectors; i++) {
51443df25e4cSSasha Neftin 		struct igc_q_vector *q_vector = adapter->q_vector[i];
51453df25e4cSSasha Neftin 
51463df25e4cSSasha Neftin 		vector++;
51473df25e4cSSasha Neftin 
51483df25e4cSSasha Neftin 		q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
51493df25e4cSSasha Neftin 
51503df25e4cSSasha Neftin 		if (q_vector->rx.ring && q_vector->tx.ring)
51513df25e4cSSasha Neftin 			sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
51523df25e4cSSasha Neftin 				q_vector->rx.ring->queue_index);
51533df25e4cSSasha Neftin 		else if (q_vector->tx.ring)
51543df25e4cSSasha Neftin 			sprintf(q_vector->name, "%s-tx-%u", netdev->name,
51553df25e4cSSasha Neftin 				q_vector->tx.ring->queue_index);
51563df25e4cSSasha Neftin 		else if (q_vector->rx.ring)
51573df25e4cSSasha Neftin 			sprintf(q_vector->name, "%s-rx-%u", netdev->name,
51583df25e4cSSasha Neftin 				q_vector->rx.ring->queue_index);
51593df25e4cSSasha Neftin 		else
51603df25e4cSSasha Neftin 			sprintf(q_vector->name, "%s-unused", netdev->name);
51613df25e4cSSasha Neftin 
51623df25e4cSSasha Neftin 		err = request_irq(adapter->msix_entries[vector].vector,
51633df25e4cSSasha Neftin 				  igc_msix_ring, 0, q_vector->name,
51643df25e4cSSasha Neftin 				  q_vector);
51653df25e4cSSasha Neftin 		if (err)
51663df25e4cSSasha Neftin 			goto err_free;
51673df25e4cSSasha Neftin 	}
51683df25e4cSSasha Neftin 
51693df25e4cSSasha Neftin 	igc_configure_msix(adapter);
51703df25e4cSSasha Neftin 	return 0;
51713df25e4cSSasha Neftin 
51723df25e4cSSasha Neftin err_free:
51733df25e4cSSasha Neftin 	/* free already assigned IRQs */
51743df25e4cSSasha Neftin 	free_irq(adapter->msix_entries[free_vector++].vector, adapter);
51753df25e4cSSasha Neftin 
51763df25e4cSSasha Neftin 	vector--;
51773df25e4cSSasha Neftin 	for (i = 0; i < vector; i++) {
51783df25e4cSSasha Neftin 		free_irq(adapter->msix_entries[free_vector++].vector,
51793df25e4cSSasha Neftin 			 adapter->q_vector[i]);
51803df25e4cSSasha Neftin 	}
51813df25e4cSSasha Neftin err_out:
51823df25e4cSSasha Neftin 	return err;
51833df25e4cSSasha Neftin }
51843df25e4cSSasha Neftin 
51853df25e4cSSasha Neftin /**
5186a8c4873bSSasha Neftin  * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
5187a8c4873bSSasha Neftin  * @adapter: Pointer to adapter structure
5188a8c4873bSSasha Neftin  *
5189a8c4873bSSasha Neftin  * This function resets the device so that it has 0 rx queues, tx queues, and
5190a8c4873bSSasha Neftin  * MSI-X interrupts allocated.
5191a8c4873bSSasha Neftin  */
5192a8c4873bSSasha Neftin static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
5193a8c4873bSSasha Neftin {
5194a8c4873bSSasha Neftin 	igc_free_q_vectors(adapter);
5195a8c4873bSSasha Neftin 	igc_reset_interrupt_capability(adapter);
5196a8c4873bSSasha Neftin }
5197a8c4873bSSasha Neftin 
5198208983f0SSasha Neftin /* Need to wait a few seconds after link up to get diagnostic information from
5199208983f0SSasha Neftin  * the phy
5200208983f0SSasha Neftin  */
5201208983f0SSasha Neftin static void igc_update_phy_info(struct timer_list *t)
5202208983f0SSasha Neftin {
5203208983f0SSasha Neftin 	struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5204208983f0SSasha Neftin 
5205208983f0SSasha Neftin 	igc_get_phy_info(&adapter->hw);
5206208983f0SSasha Neftin }
5207208983f0SSasha Neftin 
5208208983f0SSasha Neftin /**
5209208983f0SSasha Neftin  * igc_has_link - check shared code for link and determine up/down
5210208983f0SSasha Neftin  * @adapter: pointer to driver private info
5211208983f0SSasha Neftin  */
52128c5ad0daSSasha Neftin bool igc_has_link(struct igc_adapter *adapter)
5213208983f0SSasha Neftin {
5214208983f0SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
5215208983f0SSasha Neftin 	bool link_active = false;
5216208983f0SSasha Neftin 
5217208983f0SSasha Neftin 	/* get_link_status is set on LSC (link status) interrupt or
5218208983f0SSasha Neftin 	 * rx sequence error interrupt.  get_link_status will stay
5219208983f0SSasha Neftin 	 * false until the igc_check_for_link establishes link
5220208983f0SSasha Neftin 	 * for copper adapters ONLY
5221208983f0SSasha Neftin 	 */
5222208983f0SSasha Neftin 	if (!hw->mac.get_link_status)
5223208983f0SSasha Neftin 		return true;
5224208983f0SSasha Neftin 	hw->mac.ops.check_for_link(hw);
5225208983f0SSasha Neftin 	link_active = !hw->mac.get_link_status;
5226208983f0SSasha Neftin 
52277c496de5SSasha Neftin 	if (hw->mac.type == igc_i225) {
5228208983f0SSasha Neftin 		if (!netif_carrier_ok(adapter->netdev)) {
5229208983f0SSasha Neftin 			adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5230208983f0SSasha Neftin 		} else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
5231208983f0SSasha Neftin 			adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE;
5232208983f0SSasha Neftin 			adapter->link_check_timeout = jiffies;
5233208983f0SSasha Neftin 		}
5234208983f0SSasha Neftin 	}
5235208983f0SSasha Neftin 
5236208983f0SSasha Neftin 	return link_active;
5237208983f0SSasha Neftin }
5238208983f0SSasha Neftin 
52393df25e4cSSasha Neftin /**
52400507ef8aSSasha Neftin  * igc_watchdog - Timer Call-back
524186efeccdSSasha Neftin  * @t: timer for the watchdog
52420507ef8aSSasha Neftin  */
52430507ef8aSSasha Neftin static void igc_watchdog(struct timer_list *t)
52440507ef8aSSasha Neftin {
52450507ef8aSSasha Neftin 	struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5246208983f0SSasha Neftin 	/* Do the rest outside of interrupt context */
5247208983f0SSasha Neftin 	schedule_work(&adapter->watchdog_task);
5248208983f0SSasha Neftin }
5249208983f0SSasha Neftin 
5250208983f0SSasha Neftin static void igc_watchdog_task(struct work_struct *work)
5251208983f0SSasha Neftin {
5252208983f0SSasha Neftin 	struct igc_adapter *adapter = container_of(work,
5253208983f0SSasha Neftin 						   struct igc_adapter,
5254208983f0SSasha Neftin 						   watchdog_task);
5255208983f0SSasha Neftin 	struct net_device *netdev = adapter->netdev;
5256208983f0SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
5257208983f0SSasha Neftin 	struct igc_phy_info *phy = &hw->phy;
5258208983f0SSasha Neftin 	u16 phy_data, retry_count = 20;
5259208983f0SSasha Neftin 	u32 link;
5260208983f0SSasha Neftin 	int i;
5261208983f0SSasha Neftin 
5262208983f0SSasha Neftin 	link = igc_has_link(adapter);
5263208983f0SSasha Neftin 
5264208983f0SSasha Neftin 	if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) {
5265208983f0SSasha Neftin 		if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5266208983f0SSasha Neftin 			adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5267208983f0SSasha Neftin 		else
5268208983f0SSasha Neftin 			link = false;
5269208983f0SSasha Neftin 	}
5270208983f0SSasha Neftin 
5271208983f0SSasha Neftin 	if (link) {
52728594a7f3SSasha Neftin 		/* Cancel scheduled suspend requests. */
52738594a7f3SSasha Neftin 		pm_runtime_resume(netdev->dev.parent);
52748594a7f3SSasha Neftin 
5275208983f0SSasha Neftin 		if (!netif_carrier_ok(netdev)) {
5276208983f0SSasha Neftin 			u32 ctrl;
5277208983f0SSasha Neftin 
5278208983f0SSasha Neftin 			hw->mac.ops.get_speed_and_duplex(hw,
5279208983f0SSasha Neftin 							 &adapter->link_speed,
5280208983f0SSasha Neftin 							 &adapter->link_duplex);
5281208983f0SSasha Neftin 
5282208983f0SSasha Neftin 			ctrl = rd32(IGC_CTRL);
5283208983f0SSasha Neftin 			/* Link status message must follow this format */
5284208983f0SSasha Neftin 			netdev_info(netdev,
528525f06effSAndre Guedes 				    "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5286208983f0SSasha Neftin 				    adapter->link_speed,
5287208983f0SSasha Neftin 				    adapter->link_duplex == FULL_DUPLEX ?
5288208983f0SSasha Neftin 				    "Full" : "Half",
5289208983f0SSasha Neftin 				    (ctrl & IGC_CTRL_TFCE) &&
5290208983f0SSasha Neftin 				    (ctrl & IGC_CTRL_RFCE) ? "RX/TX" :
5291208983f0SSasha Neftin 				    (ctrl & IGC_CTRL_RFCE) ?  "RX" :
5292208983f0SSasha Neftin 				    (ctrl & IGC_CTRL_TFCE) ?  "TX" : "None");
5293208983f0SSasha Neftin 
529493ec439aSSasha Neftin 			/* disable EEE if enabled */
529593ec439aSSasha Neftin 			if ((adapter->flags & IGC_FLAG_EEE) &&
529693ec439aSSasha Neftin 			    adapter->link_duplex == HALF_DUPLEX) {
529793ec439aSSasha Neftin 				netdev_info(netdev,
529893ec439aSSasha Neftin 					    "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n");
529993ec439aSSasha Neftin 				adapter->hw.dev_spec._base.eee_enable = false;
530093ec439aSSasha Neftin 				adapter->flags &= ~IGC_FLAG_EEE;
530193ec439aSSasha Neftin 			}
530293ec439aSSasha Neftin 
5303208983f0SSasha Neftin 			/* check if SmartSpeed worked */
5304208983f0SSasha Neftin 			igc_check_downshift(hw);
5305208983f0SSasha Neftin 			if (phy->speed_downgraded)
5306208983f0SSasha Neftin 				netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5307208983f0SSasha Neftin 
5308208983f0SSasha Neftin 			/* adjust timeout factor according to speed/duplex */
5309208983f0SSasha Neftin 			adapter->tx_timeout_factor = 1;
5310208983f0SSasha Neftin 			switch (adapter->link_speed) {
5311208983f0SSasha Neftin 			case SPEED_10:
5312208983f0SSasha Neftin 				adapter->tx_timeout_factor = 14;
5313208983f0SSasha Neftin 				break;
5314208983f0SSasha Neftin 			case SPEED_100:
5315b27b8dc7SMuhammad Husaini Zulkifli 			case SPEED_1000:
5316b27b8dc7SMuhammad Husaini Zulkifli 			case SPEED_2500:
5317b27b8dc7SMuhammad Husaini Zulkifli 				adapter->tx_timeout_factor = 7;
5318208983f0SSasha Neftin 				break;
5319208983f0SSasha Neftin 			}
5320208983f0SSasha Neftin 
5321208983f0SSasha Neftin 			if (adapter->link_speed != SPEED_1000)
5322208983f0SSasha Neftin 				goto no_wait;
5323208983f0SSasha Neftin 
5324208983f0SSasha Neftin 			/* wait for Remote receiver status OK */
5325208983f0SSasha Neftin retry_read_status:
5326208983f0SSasha Neftin 			if (!igc_read_phy_reg(hw, PHY_1000T_STATUS,
5327208983f0SSasha Neftin 					      &phy_data)) {
5328208983f0SSasha Neftin 				if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5329208983f0SSasha Neftin 				    retry_count) {
5330208983f0SSasha Neftin 					msleep(100);
5331208983f0SSasha Neftin 					retry_count--;
5332208983f0SSasha Neftin 					goto retry_read_status;
5333208983f0SSasha Neftin 				} else if (!retry_count) {
533425f06effSAndre Guedes 					netdev_err(netdev, "exceed max 2 second\n");
5335208983f0SSasha Neftin 				}
5336208983f0SSasha Neftin 			} else {
533725f06effSAndre Guedes 				netdev_err(netdev, "read 1000Base-T Status Reg\n");
5338208983f0SSasha Neftin 			}
5339208983f0SSasha Neftin no_wait:
5340208983f0SSasha Neftin 			netif_carrier_on(netdev);
5341208983f0SSasha Neftin 
5342208983f0SSasha Neftin 			/* link state has changed, schedule phy info update */
5343208983f0SSasha Neftin 			if (!test_bit(__IGC_DOWN, &adapter->state))
5344208983f0SSasha Neftin 				mod_timer(&adapter->phy_info_timer,
5345208983f0SSasha Neftin 					  round_jiffies(jiffies + 2 * HZ));
5346208983f0SSasha Neftin 		}
5347208983f0SSasha Neftin 	} else {
5348208983f0SSasha Neftin 		if (netif_carrier_ok(netdev)) {
5349208983f0SSasha Neftin 			adapter->link_speed = 0;
5350208983f0SSasha Neftin 			adapter->link_duplex = 0;
5351208983f0SSasha Neftin 
5352208983f0SSasha Neftin 			/* Links status message must follow this format */
535325f06effSAndre Guedes 			netdev_info(netdev, "NIC Link is Down\n");
5354208983f0SSasha Neftin 			netif_carrier_off(netdev);
5355208983f0SSasha Neftin 
5356208983f0SSasha Neftin 			/* link state has changed, schedule phy info update */
5357208983f0SSasha Neftin 			if (!test_bit(__IGC_DOWN, &adapter->state))
5358208983f0SSasha Neftin 				mod_timer(&adapter->phy_info_timer,
5359208983f0SSasha Neftin 					  round_jiffies(jiffies + 2 * HZ));
5360208983f0SSasha Neftin 
5361208983f0SSasha Neftin 			/* link is down, time to check for alternate media */
5362208983f0SSasha Neftin 			if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
5363208983f0SSasha Neftin 				if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
5364208983f0SSasha Neftin 					schedule_work(&adapter->reset_task);
5365208983f0SSasha Neftin 					/* return immediately */
5366208983f0SSasha Neftin 					return;
5367208983f0SSasha Neftin 				}
5368208983f0SSasha Neftin 			}
53698594a7f3SSasha Neftin 			pm_schedule_suspend(netdev->dev.parent,
53708594a7f3SSasha Neftin 					    MSEC_PER_SEC * 5);
5371208983f0SSasha Neftin 
5372208983f0SSasha Neftin 		/* also check for alternate media here */
5373208983f0SSasha Neftin 		} else if (!netif_carrier_ok(netdev) &&
5374208983f0SSasha Neftin 			   (adapter->flags & IGC_FLAG_MAS_ENABLE)) {
5375208983f0SSasha Neftin 			if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
5376208983f0SSasha Neftin 				schedule_work(&adapter->reset_task);
5377208983f0SSasha Neftin 				/* return immediately */
5378208983f0SSasha Neftin 				return;
5379208983f0SSasha Neftin 			}
5380208983f0SSasha Neftin 		}
5381208983f0SSasha Neftin 	}
5382208983f0SSasha Neftin 
5383208983f0SSasha Neftin 	spin_lock(&adapter->stats64_lock);
5384208983f0SSasha Neftin 	igc_update_stats(adapter);
5385208983f0SSasha Neftin 	spin_unlock(&adapter->stats64_lock);
5386208983f0SSasha Neftin 
5387208983f0SSasha Neftin 	for (i = 0; i < adapter->num_tx_queues; i++) {
5388208983f0SSasha Neftin 		struct igc_ring *tx_ring = adapter->tx_ring[i];
5389208983f0SSasha Neftin 
5390208983f0SSasha Neftin 		if (!netif_carrier_ok(netdev)) {
5391208983f0SSasha Neftin 			/* We've lost link, so the controller stops DMA,
5392208983f0SSasha Neftin 			 * but we've got queued Tx work that's never going
5393208983f0SSasha Neftin 			 * to get done, so reset controller to flush Tx.
5394208983f0SSasha Neftin 			 * (Do the reset outside of interrupt context).
5395208983f0SSasha Neftin 			 */
5396208983f0SSasha Neftin 			if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
5397208983f0SSasha Neftin 				adapter->tx_timeout_count++;
5398208983f0SSasha Neftin 				schedule_work(&adapter->reset_task);
5399208983f0SSasha Neftin 				/* return immediately since reset is imminent */
5400208983f0SSasha Neftin 				return;
5401208983f0SSasha Neftin 			}
5402208983f0SSasha Neftin 		}
5403208983f0SSasha Neftin 
5404208983f0SSasha Neftin 		/* Force detection of hung controller every watchdog period */
5405208983f0SSasha Neftin 		set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5406208983f0SSasha Neftin 	}
5407208983f0SSasha Neftin 
5408208983f0SSasha Neftin 	/* Cause software interrupt to ensure Rx ring is cleaned */
5409208983f0SSasha Neftin 	if (adapter->flags & IGC_FLAG_HAS_MSIX) {
5410208983f0SSasha Neftin 		u32 eics = 0;
5411208983f0SSasha Neftin 
5412208983f0SSasha Neftin 		for (i = 0; i < adapter->num_q_vectors; i++)
5413208983f0SSasha Neftin 			eics |= adapter->q_vector[i]->eims_value;
5414208983f0SSasha Neftin 		wr32(IGC_EICS, eics);
5415208983f0SSasha Neftin 	} else {
5416208983f0SSasha Neftin 		wr32(IGC_ICS, IGC_ICS_RXDMT0);
5417208983f0SSasha Neftin 	}
5418208983f0SSasha Neftin 
54192c344ae2SVinicius Costa Gomes 	igc_ptp_tx_hang(adapter);
54202c344ae2SVinicius Costa Gomes 
5421208983f0SSasha Neftin 	/* Reset the timer */
5422208983f0SSasha Neftin 	if (!test_bit(__IGC_DOWN, &adapter->state)) {
5423208983f0SSasha Neftin 		if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
5424208983f0SSasha Neftin 			mod_timer(&adapter->watchdog_timer,
5425208983f0SSasha Neftin 				  round_jiffies(jiffies +  HZ));
5426208983f0SSasha Neftin 		else
5427208983f0SSasha Neftin 			mod_timer(&adapter->watchdog_timer,
5428208983f0SSasha Neftin 				  round_jiffies(jiffies + 2 * HZ));
5429208983f0SSasha Neftin 	}
54300507ef8aSSasha Neftin }
54310507ef8aSSasha Neftin 
54320507ef8aSSasha Neftin /**
543313b5b7fdSSasha Neftin  * igc_intr_msi - Interrupt Handler
543413b5b7fdSSasha Neftin  * @irq: interrupt number
543513b5b7fdSSasha Neftin  * @data: pointer to a network interface device structure
543613b5b7fdSSasha Neftin  */
543713b5b7fdSSasha Neftin static irqreturn_t igc_intr_msi(int irq, void *data)
543813b5b7fdSSasha Neftin {
543913b5b7fdSSasha Neftin 	struct igc_adapter *adapter = data;
544013b5b7fdSSasha Neftin 	struct igc_q_vector *q_vector = adapter->q_vector[0];
544113b5b7fdSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
544213b5b7fdSSasha Neftin 	/* read ICR disables interrupts using IAM */
544313b5b7fdSSasha Neftin 	u32 icr = rd32(IGC_ICR);
544413b5b7fdSSasha Neftin 
544513b5b7fdSSasha Neftin 	igc_write_itr(q_vector);
544613b5b7fdSSasha Neftin 
544713b5b7fdSSasha Neftin 	if (icr & IGC_ICR_DRSTA)
544813b5b7fdSSasha Neftin 		schedule_work(&adapter->reset_task);
544913b5b7fdSSasha Neftin 
545013b5b7fdSSasha Neftin 	if (icr & IGC_ICR_DOUTSYNC) {
545113b5b7fdSSasha Neftin 		/* HW is reporting DMA is out of sync */
545213b5b7fdSSasha Neftin 		adapter->stats.doosync++;
545313b5b7fdSSasha Neftin 	}
545413b5b7fdSSasha Neftin 
545513b5b7fdSSasha Neftin 	if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
5456501f2309SJiapeng Zhong 		hw->mac.get_link_status = true;
545713b5b7fdSSasha Neftin 		if (!test_bit(__IGC_DOWN, &adapter->state))
545813b5b7fdSSasha Neftin 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
545913b5b7fdSSasha Neftin 	}
546013b5b7fdSSasha Neftin 
546113b5b7fdSSasha Neftin 	napi_schedule(&q_vector->napi);
546213b5b7fdSSasha Neftin 
546313b5b7fdSSasha Neftin 	return IRQ_HANDLED;
546413b5b7fdSSasha Neftin }
546513b5b7fdSSasha Neftin 
546613b5b7fdSSasha Neftin /**
546713b5b7fdSSasha Neftin  * igc_intr - Legacy Interrupt Handler
546813b5b7fdSSasha Neftin  * @irq: interrupt number
546913b5b7fdSSasha Neftin  * @data: pointer to a network interface device structure
547013b5b7fdSSasha Neftin  */
547113b5b7fdSSasha Neftin static irqreturn_t igc_intr(int irq, void *data)
547213b5b7fdSSasha Neftin {
547313b5b7fdSSasha Neftin 	struct igc_adapter *adapter = data;
547413b5b7fdSSasha Neftin 	struct igc_q_vector *q_vector = adapter->q_vector[0];
547513b5b7fdSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
547613b5b7fdSSasha Neftin 	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
547713b5b7fdSSasha Neftin 	 * need for the IMC write
547813b5b7fdSSasha Neftin 	 */
547913b5b7fdSSasha Neftin 	u32 icr = rd32(IGC_ICR);
548013b5b7fdSSasha Neftin 
548113b5b7fdSSasha Neftin 	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
548213b5b7fdSSasha Neftin 	 * not set, then the adapter didn't send an interrupt
548313b5b7fdSSasha Neftin 	 */
548413b5b7fdSSasha Neftin 	if (!(icr & IGC_ICR_INT_ASSERTED))
548513b5b7fdSSasha Neftin 		return IRQ_NONE;
548613b5b7fdSSasha Neftin 
548713b5b7fdSSasha Neftin 	igc_write_itr(q_vector);
548813b5b7fdSSasha Neftin 
548913b5b7fdSSasha Neftin 	if (icr & IGC_ICR_DRSTA)
549013b5b7fdSSasha Neftin 		schedule_work(&adapter->reset_task);
549113b5b7fdSSasha Neftin 
549213b5b7fdSSasha Neftin 	if (icr & IGC_ICR_DOUTSYNC) {
549313b5b7fdSSasha Neftin 		/* HW is reporting DMA is out of sync */
549413b5b7fdSSasha Neftin 		adapter->stats.doosync++;
549513b5b7fdSSasha Neftin 	}
549613b5b7fdSSasha Neftin 
549713b5b7fdSSasha Neftin 	if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
5498501f2309SJiapeng Zhong 		hw->mac.get_link_status = true;
549913b5b7fdSSasha Neftin 		/* guard against interrupt when we're going down */
550013b5b7fdSSasha Neftin 		if (!test_bit(__IGC_DOWN, &adapter->state))
550113b5b7fdSSasha Neftin 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
550213b5b7fdSSasha Neftin 	}
550313b5b7fdSSasha Neftin 
550413b5b7fdSSasha Neftin 	napi_schedule(&q_vector->napi);
550513b5b7fdSSasha Neftin 
550613b5b7fdSSasha Neftin 	return IRQ_HANDLED;
550713b5b7fdSSasha Neftin }
550813b5b7fdSSasha Neftin 
55093df25e4cSSasha Neftin static void igc_free_irq(struct igc_adapter *adapter)
55103df25e4cSSasha Neftin {
55113df25e4cSSasha Neftin 	if (adapter->msix_entries) {
55123df25e4cSSasha Neftin 		int vector = 0, i;
55133df25e4cSSasha Neftin 
55143df25e4cSSasha Neftin 		free_irq(adapter->msix_entries[vector++].vector, adapter);
55153df25e4cSSasha Neftin 
55163df25e4cSSasha Neftin 		for (i = 0; i < adapter->num_q_vectors; i++)
55173df25e4cSSasha Neftin 			free_irq(adapter->msix_entries[vector++].vector,
55183df25e4cSSasha Neftin 				 adapter->q_vector[i]);
55193df25e4cSSasha Neftin 	} else {
55203df25e4cSSasha Neftin 		free_irq(adapter->pdev->irq, adapter);
55213df25e4cSSasha Neftin 	}
55223df25e4cSSasha Neftin }
55233df25e4cSSasha Neftin 
55243df25e4cSSasha Neftin /**
55253df25e4cSSasha Neftin  * igc_request_irq - initialize interrupts
55263df25e4cSSasha Neftin  * @adapter: Pointer to adapter structure
55273df25e4cSSasha Neftin  *
55283df25e4cSSasha Neftin  * Attempts to configure interrupts using the best available
55293df25e4cSSasha Neftin  * capabilities of the hardware and kernel.
55303df25e4cSSasha Neftin  */
55313df25e4cSSasha Neftin static int igc_request_irq(struct igc_adapter *adapter)
55323df25e4cSSasha Neftin {
553313b5b7fdSSasha Neftin 	struct net_device *netdev = adapter->netdev;
553413b5b7fdSSasha Neftin 	struct pci_dev *pdev = adapter->pdev;
55353df25e4cSSasha Neftin 	int err = 0;
55363df25e4cSSasha Neftin 
55373df25e4cSSasha Neftin 	if (adapter->flags & IGC_FLAG_HAS_MSIX) {
55383df25e4cSSasha Neftin 		err = igc_request_msix(adapter);
55393df25e4cSSasha Neftin 		if (!err)
55403df25e4cSSasha Neftin 			goto request_done;
55413df25e4cSSasha Neftin 		/* fall back to MSI */
554213b5b7fdSSasha Neftin 		igc_free_all_tx_resources(adapter);
554313b5b7fdSSasha Neftin 		igc_free_all_rx_resources(adapter);
55443df25e4cSSasha Neftin 
55453df25e4cSSasha Neftin 		igc_clear_interrupt_scheme(adapter);
55463df25e4cSSasha Neftin 		err = igc_init_interrupt_scheme(adapter, false);
55473df25e4cSSasha Neftin 		if (err)
55483df25e4cSSasha Neftin 			goto request_done;
554913b5b7fdSSasha Neftin 		igc_setup_all_tx_resources(adapter);
555013b5b7fdSSasha Neftin 		igc_setup_all_rx_resources(adapter);
55513df25e4cSSasha Neftin 		igc_configure(adapter);
55523df25e4cSSasha Neftin 	}
55533df25e4cSSasha Neftin 
555413b5b7fdSSasha Neftin 	igc_assign_vector(adapter->q_vector[0], 0);
555513b5b7fdSSasha Neftin 
555613b5b7fdSSasha Neftin 	if (adapter->flags & IGC_FLAG_HAS_MSI) {
555713b5b7fdSSasha Neftin 		err = request_irq(pdev->irq, &igc_intr_msi, 0,
555813b5b7fdSSasha Neftin 				  netdev->name, adapter);
555913b5b7fdSSasha Neftin 		if (!err)
556013b5b7fdSSasha Neftin 			goto request_done;
556113b5b7fdSSasha Neftin 
556213b5b7fdSSasha Neftin 		/* fall back to legacy interrupts */
556313b5b7fdSSasha Neftin 		igc_reset_interrupt_capability(adapter);
556413b5b7fdSSasha Neftin 		adapter->flags &= ~IGC_FLAG_HAS_MSI;
556513b5b7fdSSasha Neftin 	}
556613b5b7fdSSasha Neftin 
556713b5b7fdSSasha Neftin 	err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
556813b5b7fdSSasha Neftin 			  netdev->name, adapter);
556913b5b7fdSSasha Neftin 
557013b5b7fdSSasha Neftin 	if (err)
557125f06effSAndre Guedes 		netdev_err(netdev, "Error %d getting interrupt\n", err);
557213b5b7fdSSasha Neftin 
55733df25e4cSSasha Neftin request_done:
55743df25e4cSSasha Neftin 	return err;
55753df25e4cSSasha Neftin }
55763df25e4cSSasha Neftin 
55773df25e4cSSasha Neftin /**
557886efeccdSSasha Neftin  * __igc_open - Called when a network interface is made active
5579c9a11c23SSasha Neftin  * @netdev: network interface device structure
558086efeccdSSasha Neftin  * @resuming: boolean indicating if the device is resuming
5581c9a11c23SSasha Neftin  *
5582c9a11c23SSasha Neftin  * Returns 0 on success, negative value on failure
5583c9a11c23SSasha Neftin  *
5584c9a11c23SSasha Neftin  * The open entry point is called when a network interface is made
5585c9a11c23SSasha Neftin  * active by the system (IFF_UP).  At this point all resources needed
5586c9a11c23SSasha Neftin  * for transmit and receive operations are allocated, the interrupt
5587c9a11c23SSasha Neftin  * handler is registered with the OS, the watchdog timer is started,
5588c9a11c23SSasha Neftin  * and the stack is notified that the interface is ready.
5589c9a11c23SSasha Neftin  */
5590c9a11c23SSasha Neftin static int __igc_open(struct net_device *netdev, bool resuming)
5591c9a11c23SSasha Neftin {
5592c9a11c23SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
55938594a7f3SSasha Neftin 	struct pci_dev *pdev = adapter->pdev;
5594c9a11c23SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
55953df25e4cSSasha Neftin 	int err = 0;
5596c9a11c23SSasha Neftin 	int i = 0;
5597c9a11c23SSasha Neftin 
5598c9a11c23SSasha Neftin 	/* disallow open during test */
5599c9a11c23SSasha Neftin 
5600c9a11c23SSasha Neftin 	if (test_bit(__IGC_TESTING, &adapter->state)) {
5601c9a11c23SSasha Neftin 		WARN_ON(resuming);
5602c9a11c23SSasha Neftin 		return -EBUSY;
5603c9a11c23SSasha Neftin 	}
5604c9a11c23SSasha Neftin 
56058594a7f3SSasha Neftin 	if (!resuming)
56068594a7f3SSasha Neftin 		pm_runtime_get_sync(&pdev->dev);
56078594a7f3SSasha Neftin 
5608c9a11c23SSasha Neftin 	netif_carrier_off(netdev);
5609c9a11c23SSasha Neftin 
561013b5b7fdSSasha Neftin 	/* allocate transmit descriptors */
561113b5b7fdSSasha Neftin 	err = igc_setup_all_tx_resources(adapter);
561213b5b7fdSSasha Neftin 	if (err)
561313b5b7fdSSasha Neftin 		goto err_setup_tx;
561413b5b7fdSSasha Neftin 
561513b5b7fdSSasha Neftin 	/* allocate receive descriptors */
561613b5b7fdSSasha Neftin 	err = igc_setup_all_rx_resources(adapter);
561713b5b7fdSSasha Neftin 	if (err)
561813b5b7fdSSasha Neftin 		goto err_setup_rx;
561913b5b7fdSSasha Neftin 
5620c9a11c23SSasha Neftin 	igc_power_up_link(adapter);
5621c9a11c23SSasha Neftin 
5622c9a11c23SSasha Neftin 	igc_configure(adapter);
5623c9a11c23SSasha Neftin 
56243df25e4cSSasha Neftin 	err = igc_request_irq(adapter);
56253df25e4cSSasha Neftin 	if (err)
56263df25e4cSSasha Neftin 		goto err_req_irq;
56273df25e4cSSasha Neftin 
56283df25e4cSSasha Neftin 	/* Notify the stack of the actual queue counts. */
562914b21cecSColin Ian King 	err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
56303df25e4cSSasha Neftin 	if (err)
56313df25e4cSSasha Neftin 		goto err_set_queues;
56323df25e4cSSasha Neftin 
56333df25e4cSSasha Neftin 	err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
56343df25e4cSSasha Neftin 	if (err)
56353df25e4cSSasha Neftin 		goto err_set_queues;
56363df25e4cSSasha Neftin 
5637c9a11c23SSasha Neftin 	clear_bit(__IGC_DOWN, &adapter->state);
5638c9a11c23SSasha Neftin 
5639c9a11c23SSasha Neftin 	for (i = 0; i < adapter->num_q_vectors; i++)
5640c9a11c23SSasha Neftin 		napi_enable(&adapter->q_vector[i]->napi);
5641c9a11c23SSasha Neftin 
56423df25e4cSSasha Neftin 	/* Clear any pending interrupts. */
56433df25e4cSSasha Neftin 	rd32(IGC_ICR);
56443df25e4cSSasha Neftin 	igc_irq_enable(adapter);
56453df25e4cSSasha Neftin 
56468594a7f3SSasha Neftin 	if (!resuming)
56478594a7f3SSasha Neftin 		pm_runtime_put(&pdev->dev);
56488594a7f3SSasha Neftin 
564913b5b7fdSSasha Neftin 	netif_tx_start_all_queues(netdev);
565013b5b7fdSSasha Neftin 
5651c9a11c23SSasha Neftin 	/* start the watchdog. */
5652501f2309SJiapeng Zhong 	hw->mac.get_link_status = true;
5653208983f0SSasha Neftin 	schedule_work(&adapter->watchdog_task);
5654c9a11c23SSasha Neftin 
5655c9a11c23SSasha Neftin 	return IGC_SUCCESS;
56563df25e4cSSasha Neftin 
56573df25e4cSSasha Neftin err_set_queues:
56583df25e4cSSasha Neftin 	igc_free_irq(adapter);
56593df25e4cSSasha Neftin err_req_irq:
56603df25e4cSSasha Neftin 	igc_release_hw_control(adapter);
5661a0beb3c1SSasha Neftin 	igc_power_down_phy_copper_base(&adapter->hw);
566213b5b7fdSSasha Neftin 	igc_free_all_rx_resources(adapter);
566313b5b7fdSSasha Neftin err_setup_rx:
566413b5b7fdSSasha Neftin 	igc_free_all_tx_resources(adapter);
566513b5b7fdSSasha Neftin err_setup_tx:
566613b5b7fdSSasha Neftin 	igc_reset(adapter);
56678594a7f3SSasha Neftin 	if (!resuming)
56688594a7f3SSasha Neftin 		pm_runtime_put(&pdev->dev);
56693df25e4cSSasha Neftin 
56703df25e4cSSasha Neftin 	return err;
5671c9a11c23SSasha Neftin }
5672c9a11c23SSasha Neftin 
5673f026d8caSVitaly Lifshits int igc_open(struct net_device *netdev)
5674c9a11c23SSasha Neftin {
5675c9a11c23SSasha Neftin 	return __igc_open(netdev, false);
5676c9a11c23SSasha Neftin }
5677c9a11c23SSasha Neftin 
5678c9a11c23SSasha Neftin /**
567986efeccdSSasha Neftin  * __igc_close - Disables a network interface
5680c9a11c23SSasha Neftin  * @netdev: network interface device structure
568186efeccdSSasha Neftin  * @suspending: boolean indicating the device is suspending
5682c9a11c23SSasha Neftin  *
5683c9a11c23SSasha Neftin  * Returns 0, this is not allowed to fail
5684c9a11c23SSasha Neftin  *
5685c9a11c23SSasha Neftin  * The close entry point is called when an interface is de-activated
5686c9a11c23SSasha Neftin  * by the OS.  The hardware is still under the driver's control, but
5687c9a11c23SSasha Neftin  * needs to be disabled.  A global MAC reset is issued to stop the
5688c9a11c23SSasha Neftin  * hardware, and all transmit and receive resources are freed.
5689c9a11c23SSasha Neftin  */
5690c9a11c23SSasha Neftin static int __igc_close(struct net_device *netdev, bool suspending)
5691c9a11c23SSasha Neftin {
5692c9a11c23SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
56938594a7f3SSasha Neftin 	struct pci_dev *pdev = adapter->pdev;
5694c9a11c23SSasha Neftin 
5695c9a11c23SSasha Neftin 	WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
5696c9a11c23SSasha Neftin 
56978594a7f3SSasha Neftin 	if (!suspending)
56988594a7f3SSasha Neftin 		pm_runtime_get_sync(&pdev->dev);
56998594a7f3SSasha Neftin 
5700c9a11c23SSasha Neftin 	igc_down(adapter);
5701c9a11c23SSasha Neftin 
5702c9a11c23SSasha Neftin 	igc_release_hw_control(adapter);
5703c9a11c23SSasha Neftin 
57043df25e4cSSasha Neftin 	igc_free_irq(adapter);
57053df25e4cSSasha Neftin 
570613b5b7fdSSasha Neftin 	igc_free_all_tx_resources(adapter);
570713b5b7fdSSasha Neftin 	igc_free_all_rx_resources(adapter);
570813b5b7fdSSasha Neftin 
57098594a7f3SSasha Neftin 	if (!suspending)
57108594a7f3SSasha Neftin 		pm_runtime_put_sync(&pdev->dev);
57118594a7f3SSasha Neftin 
5712c9a11c23SSasha Neftin 	return 0;
5713c9a11c23SSasha Neftin }
5714c9a11c23SSasha Neftin 
5715f026d8caSVitaly Lifshits int igc_close(struct net_device *netdev)
5716c9a11c23SSasha Neftin {
5717c9a11c23SSasha Neftin 	if (netif_device_present(netdev) || netdev->dismantle)
5718c9a11c23SSasha Neftin 		return __igc_close(netdev, false);
5719c9a11c23SSasha Neftin 	return 0;
5720c9a11c23SSasha Neftin }
5721c9a11c23SSasha Neftin 
57225f295805SVinicius Costa Gomes /**
57235f295805SVinicius Costa Gomes  * igc_ioctl - Access the hwtstamp interface
57245f295805SVinicius Costa Gomes  * @netdev: network interface device structure
5725b50f7bcaSJesse Brandeburg  * @ifr: interface request data
57265f295805SVinicius Costa Gomes  * @cmd: ioctl command
57275f295805SVinicius Costa Gomes  **/
57285f295805SVinicius Costa Gomes static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
57295f295805SVinicius Costa Gomes {
57305f295805SVinicius Costa Gomes 	switch (cmd) {
57315f295805SVinicius Costa Gomes 	case SIOCGHWTSTAMP:
57325f295805SVinicius Costa Gomes 		return igc_ptp_get_ts_config(netdev, ifr);
57335f295805SVinicius Costa Gomes 	case SIOCSHWTSTAMP:
57345f295805SVinicius Costa Gomes 		return igc_ptp_set_ts_config(netdev, ifr);
57355f295805SVinicius Costa Gomes 	default:
57365f295805SVinicius Costa Gomes 		return -EOPNOTSUPP;
57375f295805SVinicius Costa Gomes 	}
57385f295805SVinicius Costa Gomes }
57395f295805SVinicius Costa Gomes 
574082faa9b7SVinicius Costa Gomes static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
574182faa9b7SVinicius Costa Gomes 				      bool enable)
574282faa9b7SVinicius Costa Gomes {
574382faa9b7SVinicius Costa Gomes 	struct igc_ring *ring;
574482faa9b7SVinicius Costa Gomes 	int i;
574582faa9b7SVinicius Costa Gomes 
574682faa9b7SVinicius Costa Gomes 	if (queue < 0 || queue >= adapter->num_tx_queues)
574782faa9b7SVinicius Costa Gomes 		return -EINVAL;
574882faa9b7SVinicius Costa Gomes 
574982faa9b7SVinicius Costa Gomes 	ring = adapter->tx_ring[queue];
575082faa9b7SVinicius Costa Gomes 	ring->launchtime_enable = enable;
575182faa9b7SVinicius Costa Gomes 
575282faa9b7SVinicius Costa Gomes 	if (adapter->base_time)
575382faa9b7SVinicius Costa Gomes 		return 0;
575482faa9b7SVinicius Costa Gomes 
575582faa9b7SVinicius Costa Gomes 	adapter->cycle_time = NSEC_PER_SEC;
575682faa9b7SVinicius Costa Gomes 
575782faa9b7SVinicius Costa Gomes 	for (i = 0; i < adapter->num_tx_queues; i++) {
575882faa9b7SVinicius Costa Gomes 		ring = adapter->tx_ring[i];
575982faa9b7SVinicius Costa Gomes 		ring->start_time = 0;
576082faa9b7SVinicius Costa Gomes 		ring->end_time = NSEC_PER_SEC;
576182faa9b7SVinicius Costa Gomes 	}
576282faa9b7SVinicius Costa Gomes 
576382faa9b7SVinicius Costa Gomes 	return 0;
576482faa9b7SVinicius Costa Gomes }
576582faa9b7SVinicius Costa Gomes 
576658c4ee0eSVinicius Costa Gomes static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now)
576758c4ee0eSVinicius Costa Gomes {
576858c4ee0eSVinicius Costa Gomes 	struct timespec64 b;
576958c4ee0eSVinicius Costa Gomes 
577058c4ee0eSVinicius Costa Gomes 	b = ktime_to_timespec64(base_time);
577158c4ee0eSVinicius Costa Gomes 
577258c4ee0eSVinicius Costa Gomes 	return timespec64_compare(now, &b) > 0;
577358c4ee0eSVinicius Costa Gomes }
577458c4ee0eSVinicius Costa Gomes 
577558c4ee0eSVinicius Costa Gomes static bool validate_schedule(struct igc_adapter *adapter,
577658c4ee0eSVinicius Costa Gomes 			      const struct tc_taprio_qopt_offload *qopt)
5777ec50a9d4SVinicius Costa Gomes {
5778ec50a9d4SVinicius Costa Gomes 	int queue_uses[IGC_MAX_TX_QUEUES] = { };
577958c4ee0eSVinicius Costa Gomes 	struct timespec64 now;
5780ec50a9d4SVinicius Costa Gomes 	size_t n;
5781ec50a9d4SVinicius Costa Gomes 
5782ec50a9d4SVinicius Costa Gomes 	if (qopt->cycle_time_extension)
5783ec50a9d4SVinicius Costa Gomes 		return false;
5784ec50a9d4SVinicius Costa Gomes 
578558c4ee0eSVinicius Costa Gomes 	igc_ptp_read(adapter, &now);
578658c4ee0eSVinicius Costa Gomes 
578758c4ee0eSVinicius Costa Gomes 	/* If we program the controller's BASET registers with a time
578858c4ee0eSVinicius Costa Gomes 	 * in the future, it will hold all the packets until that
578958c4ee0eSVinicius Costa Gomes 	 * time, causing a lot of TX Hangs, so to avoid that, we
579058c4ee0eSVinicius Costa Gomes 	 * reject schedules that would start in the future.
579158c4ee0eSVinicius Costa Gomes 	 */
579258c4ee0eSVinicius Costa Gomes 	if (!is_base_time_past(qopt->base_time, &now))
579358c4ee0eSVinicius Costa Gomes 		return false;
579458c4ee0eSVinicius Costa Gomes 
5795ec50a9d4SVinicius Costa Gomes 	for (n = 0; n < qopt->num_entries; n++) {
5796ec50a9d4SVinicius Costa Gomes 		const struct tc_taprio_sched_entry *e;
5797ec50a9d4SVinicius Costa Gomes 		int i;
5798ec50a9d4SVinicius Costa Gomes 
5799ec50a9d4SVinicius Costa Gomes 		e = &qopt->entries[n];
5800ec50a9d4SVinicius Costa Gomes 
5801ec50a9d4SVinicius Costa Gomes 		/* i225 only supports "global" frame preemption
5802ec50a9d4SVinicius Costa Gomes 		 * settings.
5803ec50a9d4SVinicius Costa Gomes 		 */
5804ec50a9d4SVinicius Costa Gomes 		if (e->command != TC_TAPRIO_CMD_SET_GATES)
5805ec50a9d4SVinicius Costa Gomes 			return false;
5806ec50a9d4SVinicius Costa Gomes 
5807ec50a9d4SVinicius Costa Gomes 		for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
5808ec50a9d4SVinicius Costa Gomes 			if (e->gate_mask & BIT(i))
5809ec50a9d4SVinicius Costa Gomes 				queue_uses[i]++;
5810ec50a9d4SVinicius Costa Gomes 
5811ec50a9d4SVinicius Costa Gomes 			if (queue_uses[i] > 1)
5812ec50a9d4SVinicius Costa Gomes 				return false;
5813ec50a9d4SVinicius Costa Gomes 		}
5814ec50a9d4SVinicius Costa Gomes 	}
5815ec50a9d4SVinicius Costa Gomes 
5816ec50a9d4SVinicius Costa Gomes 	return true;
5817ec50a9d4SVinicius Costa Gomes }
5818ec50a9d4SVinicius Costa Gomes 
581982faa9b7SVinicius Costa Gomes static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
582082faa9b7SVinicius Costa Gomes 				     struct tc_etf_qopt_offload *qopt)
582182faa9b7SVinicius Costa Gomes {
582282faa9b7SVinicius Costa Gomes 	struct igc_hw *hw = &adapter->hw;
582382faa9b7SVinicius Costa Gomes 	int err;
582482faa9b7SVinicius Costa Gomes 
582582faa9b7SVinicius Costa Gomes 	if (hw->mac.type != igc_i225)
582682faa9b7SVinicius Costa Gomes 		return -EOPNOTSUPP;
582782faa9b7SVinicius Costa Gomes 
582882faa9b7SVinicius Costa Gomes 	err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
582982faa9b7SVinicius Costa Gomes 	if (err)
583082faa9b7SVinicius Costa Gomes 		return err;
583182faa9b7SVinicius Costa Gomes 
583282faa9b7SVinicius Costa Gomes 	return igc_tsn_offload_apply(adapter);
583382faa9b7SVinicius Costa Gomes }
583482faa9b7SVinicius Costa Gomes 
5835ec50a9d4SVinicius Costa Gomes static int igc_save_qbv_schedule(struct igc_adapter *adapter,
5836ec50a9d4SVinicius Costa Gomes 				 struct tc_taprio_qopt_offload *qopt)
5837ec50a9d4SVinicius Costa Gomes {
5838ec50a9d4SVinicius Costa Gomes 	u32 start_time = 0, end_time = 0;
5839ec50a9d4SVinicius Costa Gomes 	size_t n;
5840ec50a9d4SVinicius Costa Gomes 
5841ec50a9d4SVinicius Costa Gomes 	if (!qopt->enable) {
5842ec50a9d4SVinicius Costa Gomes 		adapter->base_time = 0;
5843ec50a9d4SVinicius Costa Gomes 		return 0;
5844ec50a9d4SVinicius Costa Gomes 	}
5845ec50a9d4SVinicius Costa Gomes 
5846ec50a9d4SVinicius Costa Gomes 	if (adapter->base_time)
5847ec50a9d4SVinicius Costa Gomes 		return -EALREADY;
5848ec50a9d4SVinicius Costa Gomes 
584958c4ee0eSVinicius Costa Gomes 	if (!validate_schedule(adapter, qopt))
5850ec50a9d4SVinicius Costa Gomes 		return -EINVAL;
5851ec50a9d4SVinicius Costa Gomes 
5852ec50a9d4SVinicius Costa Gomes 	adapter->cycle_time = qopt->cycle_time;
5853ec50a9d4SVinicius Costa Gomes 	adapter->base_time = qopt->base_time;
5854ec50a9d4SVinicius Costa Gomes 
5855ec50a9d4SVinicius Costa Gomes 	/* FIXME: be a little smarter about cases when the gate for a
5856ec50a9d4SVinicius Costa Gomes 	 * queue stays open for more than one entry.
5857ec50a9d4SVinicius Costa Gomes 	 */
5858ec50a9d4SVinicius Costa Gomes 	for (n = 0; n < qopt->num_entries; n++) {
5859ec50a9d4SVinicius Costa Gomes 		struct tc_taprio_sched_entry *e = &qopt->entries[n];
5860ec50a9d4SVinicius Costa Gomes 		int i;
5861ec50a9d4SVinicius Costa Gomes 
5862ec50a9d4SVinicius Costa Gomes 		end_time += e->interval;
5863ec50a9d4SVinicius Costa Gomes 
5864ec50a9d4SVinicius Costa Gomes 		for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
5865ec50a9d4SVinicius Costa Gomes 			struct igc_ring *ring = adapter->tx_ring[i];
5866ec50a9d4SVinicius Costa Gomes 
5867ec50a9d4SVinicius Costa Gomes 			if (!(e->gate_mask & BIT(i)))
5868ec50a9d4SVinicius Costa Gomes 				continue;
5869ec50a9d4SVinicius Costa Gomes 
5870ec50a9d4SVinicius Costa Gomes 			ring->start_time = start_time;
5871ec50a9d4SVinicius Costa Gomes 			ring->end_time = end_time;
5872ec50a9d4SVinicius Costa Gomes 		}
5873ec50a9d4SVinicius Costa Gomes 
5874ec50a9d4SVinicius Costa Gomes 		start_time += e->interval;
5875ec50a9d4SVinicius Costa Gomes 	}
5876ec50a9d4SVinicius Costa Gomes 
5877ec50a9d4SVinicius Costa Gomes 	return 0;
5878ec50a9d4SVinicius Costa Gomes }
5879ec50a9d4SVinicius Costa Gomes 
5880ec50a9d4SVinicius Costa Gomes static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
5881ec50a9d4SVinicius Costa Gomes 					 struct tc_taprio_qopt_offload *qopt)
5882ec50a9d4SVinicius Costa Gomes {
5883ec50a9d4SVinicius Costa Gomes 	struct igc_hw *hw = &adapter->hw;
5884ec50a9d4SVinicius Costa Gomes 	int err;
5885ec50a9d4SVinicius Costa Gomes 
5886ec50a9d4SVinicius Costa Gomes 	if (hw->mac.type != igc_i225)
5887ec50a9d4SVinicius Costa Gomes 		return -EOPNOTSUPP;
5888ec50a9d4SVinicius Costa Gomes 
5889ec50a9d4SVinicius Costa Gomes 	err = igc_save_qbv_schedule(adapter, qopt);
5890ec50a9d4SVinicius Costa Gomes 	if (err)
5891ec50a9d4SVinicius Costa Gomes 		return err;
5892ec50a9d4SVinicius Costa Gomes 
5893ec50a9d4SVinicius Costa Gomes 	return igc_tsn_offload_apply(adapter);
5894ec50a9d4SVinicius Costa Gomes }
5895ec50a9d4SVinicius Costa Gomes 
5896ec50a9d4SVinicius Costa Gomes static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
5897ec50a9d4SVinicius Costa Gomes 			void *type_data)
5898ec50a9d4SVinicius Costa Gomes {
5899ec50a9d4SVinicius Costa Gomes 	struct igc_adapter *adapter = netdev_priv(dev);
5900ec50a9d4SVinicius Costa Gomes 
5901ec50a9d4SVinicius Costa Gomes 	switch (type) {
5902ec50a9d4SVinicius Costa Gomes 	case TC_SETUP_QDISC_TAPRIO:
5903ec50a9d4SVinicius Costa Gomes 		return igc_tsn_enable_qbv_scheduling(adapter, type_data);
5904ec50a9d4SVinicius Costa Gomes 
590582faa9b7SVinicius Costa Gomes 	case TC_SETUP_QDISC_ETF:
590682faa9b7SVinicius Costa Gomes 		return igc_tsn_enable_launchtime(adapter, type_data);
590782faa9b7SVinicius Costa Gomes 
5908ec50a9d4SVinicius Costa Gomes 	default:
5909ec50a9d4SVinicius Costa Gomes 		return -EOPNOTSUPP;
5910ec50a9d4SVinicius Costa Gomes 	}
5911ec50a9d4SVinicius Costa Gomes }
5912ec50a9d4SVinicius Costa Gomes 
591326575105SAndre Guedes static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
591426575105SAndre Guedes {
591526575105SAndre Guedes 	struct igc_adapter *adapter = netdev_priv(dev);
591626575105SAndre Guedes 
591726575105SAndre Guedes 	switch (bpf->command) {
591826575105SAndre Guedes 	case XDP_SETUP_PROG:
591926575105SAndre Guedes 		return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack);
5920fc9df2a0SAndre Guedes 	case XDP_SETUP_XSK_POOL:
5921fc9df2a0SAndre Guedes 		return igc_xdp_setup_pool(adapter, bpf->xsk.pool,
5922fc9df2a0SAndre Guedes 					  bpf->xsk.queue_id);
592326575105SAndre Guedes 	default:
592426575105SAndre Guedes 		return -EOPNOTSUPP;
592526575105SAndre Guedes 	}
592626575105SAndre Guedes }
592726575105SAndre Guedes 
59284ff32036SAndre Guedes static int igc_xdp_xmit(struct net_device *dev, int num_frames,
59294ff32036SAndre Guedes 			struct xdp_frame **frames, u32 flags)
59304ff32036SAndre Guedes {
59314ff32036SAndre Guedes 	struct igc_adapter *adapter = netdev_priv(dev);
59324ff32036SAndre Guedes 	int cpu = smp_processor_id();
59334ff32036SAndre Guedes 	struct netdev_queue *nq;
59344ff32036SAndre Guedes 	struct igc_ring *ring;
59354ff32036SAndre Guedes 	int i, drops;
59364ff32036SAndre Guedes 
59374ff32036SAndre Guedes 	if (unlikely(test_bit(__IGC_DOWN, &adapter->state)))
59384ff32036SAndre Guedes 		return -ENETDOWN;
59394ff32036SAndre Guedes 
59404ff32036SAndre Guedes 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
59414ff32036SAndre Guedes 		return -EINVAL;
59424ff32036SAndre Guedes 
59434ff32036SAndre Guedes 	ring = igc_xdp_get_tx_ring(adapter, cpu);
59444ff32036SAndre Guedes 	nq = txring_txq(ring);
59454ff32036SAndre Guedes 
59464ff32036SAndre Guedes 	__netif_tx_lock(nq, cpu);
59474ff32036SAndre Guedes 
59484ff32036SAndre Guedes 	drops = 0;
59494ff32036SAndre Guedes 	for (i = 0; i < num_frames; i++) {
59504ff32036SAndre Guedes 		int err;
59514ff32036SAndre Guedes 		struct xdp_frame *xdpf = frames[i];
59524ff32036SAndre Guedes 
59534ff32036SAndre Guedes 		err = igc_xdp_init_tx_descriptor(ring, xdpf);
59544ff32036SAndre Guedes 		if (err) {
59554ff32036SAndre Guedes 			xdp_return_frame_rx_napi(xdpf);
59564ff32036SAndre Guedes 			drops++;
59574ff32036SAndre Guedes 		}
59584ff32036SAndre Guedes 	}
59594ff32036SAndre Guedes 
59604ff32036SAndre Guedes 	if (flags & XDP_XMIT_FLUSH)
59614ff32036SAndre Guedes 		igc_flush_tx_descriptors(ring);
59624ff32036SAndre Guedes 
59634ff32036SAndre Guedes 	__netif_tx_unlock(nq);
59644ff32036SAndre Guedes 
59654ff32036SAndre Guedes 	return num_frames - drops;
59664ff32036SAndre Guedes }
59674ff32036SAndre Guedes 
5968fc9df2a0SAndre Guedes static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
5969fc9df2a0SAndre Guedes 					struct igc_q_vector *q_vector)
5970fc9df2a0SAndre Guedes {
5971fc9df2a0SAndre Guedes 	struct igc_hw *hw = &adapter->hw;
5972fc9df2a0SAndre Guedes 	u32 eics = 0;
5973fc9df2a0SAndre Guedes 
5974fc9df2a0SAndre Guedes 	eics |= q_vector->eims_value;
5975fc9df2a0SAndre Guedes 	wr32(IGC_EICS, eics);
5976fc9df2a0SAndre Guedes }
5977fc9df2a0SAndre Guedes 
5978fc9df2a0SAndre Guedes int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
5979fc9df2a0SAndre Guedes {
5980fc9df2a0SAndre Guedes 	struct igc_adapter *adapter = netdev_priv(dev);
5981fc9df2a0SAndre Guedes 	struct igc_q_vector *q_vector;
5982fc9df2a0SAndre Guedes 	struct igc_ring *ring;
5983fc9df2a0SAndre Guedes 
5984fc9df2a0SAndre Guedes 	if (test_bit(__IGC_DOWN, &adapter->state))
5985fc9df2a0SAndre Guedes 		return -ENETDOWN;
5986fc9df2a0SAndre Guedes 
5987fc9df2a0SAndre Guedes 	if (!igc_xdp_is_enabled(adapter))
5988fc9df2a0SAndre Guedes 		return -ENXIO;
5989fc9df2a0SAndre Guedes 
5990fc9df2a0SAndre Guedes 	if (queue_id >= adapter->num_rx_queues)
5991fc9df2a0SAndre Guedes 		return -EINVAL;
5992fc9df2a0SAndre Guedes 
5993fc9df2a0SAndre Guedes 	ring = adapter->rx_ring[queue_id];
5994fc9df2a0SAndre Guedes 
5995fc9df2a0SAndre Guedes 	if (!ring->xsk_pool)
5996fc9df2a0SAndre Guedes 		return -ENXIO;
5997fc9df2a0SAndre Guedes 
5998fc9df2a0SAndre Guedes 	q_vector = adapter->q_vector[queue_id];
5999fc9df2a0SAndre Guedes 	if (!napi_if_scheduled_mark_missed(&q_vector->napi))
6000fc9df2a0SAndre Guedes 		igc_trigger_rxtxq_interrupt(adapter, q_vector);
6001fc9df2a0SAndre Guedes 
6002fc9df2a0SAndre Guedes 	return 0;
6003fc9df2a0SAndre Guedes }
6004fc9df2a0SAndre Guedes 
6005c9a11c23SSasha Neftin static const struct net_device_ops igc_netdev_ops = {
6006c9a11c23SSasha Neftin 	.ndo_open		= igc_open,
6007c9a11c23SSasha Neftin 	.ndo_stop		= igc_close,
6008c9a11c23SSasha Neftin 	.ndo_start_xmit		= igc_xmit_frame,
60097f839684SSasha Neftin 	.ndo_set_rx_mode	= igc_set_rx_mode,
6010c9a11c23SSasha Neftin 	.ndo_set_mac_address	= igc_set_mac,
6011c9a11c23SSasha Neftin 	.ndo_change_mtu		= igc_change_mtu,
60126b7ed22aSVinicius Costa Gomes 	.ndo_get_stats64	= igc_get_stats64,
601365cd3a72SSasha Neftin 	.ndo_fix_features	= igc_fix_features,
601465cd3a72SSasha Neftin 	.ndo_set_features	= igc_set_features,
601565cd3a72SSasha Neftin 	.ndo_features_check	= igc_features_check,
6016*a7605370SArnd Bergmann 	.ndo_eth_ioctl		= igc_ioctl,
6017ec50a9d4SVinicius Costa Gomes 	.ndo_setup_tc		= igc_setup_tc,
601826575105SAndre Guedes 	.ndo_bpf		= igc_bpf,
60194ff32036SAndre Guedes 	.ndo_xdp_xmit		= igc_xdp_xmit,
6020fc9df2a0SAndre Guedes 	.ndo_xsk_wakeup		= igc_xsk_wakeup,
6021c9a11c23SSasha Neftin };
6022146740f9SSasha Neftin 
6023146740f9SSasha Neftin /* PCIe configuration access */
6024146740f9SSasha Neftin void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
6025146740f9SSasha Neftin {
6026146740f9SSasha Neftin 	struct igc_adapter *adapter = hw->back;
6027146740f9SSasha Neftin 
6028146740f9SSasha Neftin 	pci_read_config_word(adapter->pdev, reg, value);
6029146740f9SSasha Neftin }
6030146740f9SSasha Neftin 
6031146740f9SSasha Neftin void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
6032146740f9SSasha Neftin {
6033146740f9SSasha Neftin 	struct igc_adapter *adapter = hw->back;
6034146740f9SSasha Neftin 
6035146740f9SSasha Neftin 	pci_write_config_word(adapter->pdev, reg, *value);
6036146740f9SSasha Neftin }
6037146740f9SSasha Neftin 
6038146740f9SSasha Neftin s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
6039146740f9SSasha Neftin {
6040146740f9SSasha Neftin 	struct igc_adapter *adapter = hw->back;
6041146740f9SSasha Neftin 
6042a16f6d3aSFrederick Lawler 	if (!pci_is_pcie(adapter->pdev))
6043146740f9SSasha Neftin 		return -IGC_ERR_CONFIG;
6044146740f9SSasha Neftin 
6045a16f6d3aSFrederick Lawler 	pcie_capability_read_word(adapter->pdev, reg, value);
6046146740f9SSasha Neftin 
6047146740f9SSasha Neftin 	return IGC_SUCCESS;
6048146740f9SSasha Neftin }
6049146740f9SSasha Neftin 
6050146740f9SSasha Neftin s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
6051146740f9SSasha Neftin {
6052146740f9SSasha Neftin 	struct igc_adapter *adapter = hw->back;
6053146740f9SSasha Neftin 
6054a16f6d3aSFrederick Lawler 	if (!pci_is_pcie(adapter->pdev))
6055146740f9SSasha Neftin 		return -IGC_ERR_CONFIG;
6056146740f9SSasha Neftin 
6057a16f6d3aSFrederick Lawler 	pcie_capability_write_word(adapter->pdev, reg, *value);
6058146740f9SSasha Neftin 
6059146740f9SSasha Neftin 	return IGC_SUCCESS;
6060146740f9SSasha Neftin }
6061146740f9SSasha Neftin 
6062146740f9SSasha Neftin u32 igc_rd32(struct igc_hw *hw, u32 reg)
6063146740f9SSasha Neftin {
6064c9a11c23SSasha Neftin 	struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
6065146740f9SSasha Neftin 	u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
6066146740f9SSasha Neftin 	u32 value = 0;
6067146740f9SSasha Neftin 
6068146740f9SSasha Neftin 	value = readl(&hw_addr[reg]);
6069146740f9SSasha Neftin 
6070146740f9SSasha Neftin 	/* reads should not return all F's */
6071c9a11c23SSasha Neftin 	if (!(~value) && (!reg || !(~readl(hw_addr)))) {
6072c9a11c23SSasha Neftin 		struct net_device *netdev = igc->netdev;
6073c9a11c23SSasha Neftin 
6074146740f9SSasha Neftin 		hw->hw_addr = NULL;
6075c9a11c23SSasha Neftin 		netif_device_detach(netdev);
6076c9a11c23SSasha Neftin 		netdev_err(netdev, "PCIe link lost, device now detached\n");
607794bc1e52SLyude Paul 		WARN(pci_device_is_present(igc->pdev),
607894bc1e52SLyude Paul 		     "igc: Failed to read reg 0x%x!\n", reg);
6079c9a11c23SSasha Neftin 	}
6080146740f9SSasha Neftin 
6081146740f9SSasha Neftin 	return value;
6082146740f9SSasha Neftin }
6083146740f9SSasha Neftin 
60848c5ad0daSSasha Neftin int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
60858c5ad0daSSasha Neftin {
60868c5ad0daSSasha Neftin 	struct igc_mac_info *mac = &adapter->hw.mac;
60878c5ad0daSSasha Neftin 
6088501f2309SJiapeng Zhong 	mac->autoneg = false;
60898c5ad0daSSasha Neftin 
60908c5ad0daSSasha Neftin 	/* Make sure dplx is at most 1 bit and lsb of speed is not set
60918c5ad0daSSasha Neftin 	 * for the switch() below to work
60928c5ad0daSSasha Neftin 	 */
60938c5ad0daSSasha Neftin 	if ((spd & 1) || (dplx & ~1))
60948c5ad0daSSasha Neftin 		goto err_inval;
60958c5ad0daSSasha Neftin 
60968c5ad0daSSasha Neftin 	switch (spd + dplx) {
60978c5ad0daSSasha Neftin 	case SPEED_10 + DUPLEX_HALF:
60988c5ad0daSSasha Neftin 		mac->forced_speed_duplex = ADVERTISE_10_HALF;
60998c5ad0daSSasha Neftin 		break;
61008c5ad0daSSasha Neftin 	case SPEED_10 + DUPLEX_FULL:
61018c5ad0daSSasha Neftin 		mac->forced_speed_duplex = ADVERTISE_10_FULL;
61028c5ad0daSSasha Neftin 		break;
61038c5ad0daSSasha Neftin 	case SPEED_100 + DUPLEX_HALF:
61048c5ad0daSSasha Neftin 		mac->forced_speed_duplex = ADVERTISE_100_HALF;
61058c5ad0daSSasha Neftin 		break;
61068c5ad0daSSasha Neftin 	case SPEED_100 + DUPLEX_FULL:
61078c5ad0daSSasha Neftin 		mac->forced_speed_duplex = ADVERTISE_100_FULL;
61088c5ad0daSSasha Neftin 		break;
61098c5ad0daSSasha Neftin 	case SPEED_1000 + DUPLEX_FULL:
6110501f2309SJiapeng Zhong 		mac->autoneg = true;
61118c5ad0daSSasha Neftin 		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
61128c5ad0daSSasha Neftin 		break;
61138c5ad0daSSasha Neftin 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
61148c5ad0daSSasha Neftin 		goto err_inval;
61158c5ad0daSSasha Neftin 	case SPEED_2500 + DUPLEX_FULL:
6116501f2309SJiapeng Zhong 		mac->autoneg = true;
61178c5ad0daSSasha Neftin 		adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
61188c5ad0daSSasha Neftin 		break;
61198c5ad0daSSasha Neftin 	case SPEED_2500 + DUPLEX_HALF: /* not supported */
61208c5ad0daSSasha Neftin 	default:
61218c5ad0daSSasha Neftin 		goto err_inval;
61228c5ad0daSSasha Neftin 	}
61238c5ad0daSSasha Neftin 
61248c5ad0daSSasha Neftin 	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
61258c5ad0daSSasha Neftin 	adapter->hw.phy.mdix = AUTO_ALL_MODES;
61268c5ad0daSSasha Neftin 
61278c5ad0daSSasha Neftin 	return 0;
61288c5ad0daSSasha Neftin 
61298c5ad0daSSasha Neftin err_inval:
613025f06effSAndre Guedes 	netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n");
61318c5ad0daSSasha Neftin 	return -EINVAL;
61328c5ad0daSSasha Neftin }
61338c5ad0daSSasha Neftin 
6134d89f8841SSasha Neftin /**
6135d89f8841SSasha Neftin  * igc_probe - Device Initialization Routine
6136d89f8841SSasha Neftin  * @pdev: PCI device information struct
6137d89f8841SSasha Neftin  * @ent: entry in igc_pci_tbl
6138d89f8841SSasha Neftin  *
6139d89f8841SSasha Neftin  * Returns 0 on success, negative on failure
6140d89f8841SSasha Neftin  *
6141d89f8841SSasha Neftin  * igc_probe initializes an adapter identified by a pci_dev structure.
6142d89f8841SSasha Neftin  * The OS initialization, configuring the adapter private structure,
6143d89f8841SSasha Neftin  * and a hardware reset occur.
6144d89f8841SSasha Neftin  */
6145d89f8841SSasha Neftin static int igc_probe(struct pci_dev *pdev,
6146d89f8841SSasha Neftin 		     const struct pci_device_id *ent)
6147d89f8841SSasha Neftin {
6148146740f9SSasha Neftin 	struct igc_adapter *adapter;
6149c9a11c23SSasha Neftin 	struct net_device *netdev;
6150c9a11c23SSasha Neftin 	struct igc_hw *hw;
6151ab405612SSasha Neftin 	const struct igc_info *ei = igc_info_tbl[ent->driver_data];
615221da01fdSSasha Neftin 	int err, pci_using_dac;
6153d89f8841SSasha Neftin 
6154d89f8841SSasha Neftin 	err = pci_enable_device_mem(pdev);
6155d89f8841SSasha Neftin 	if (err)
6156d89f8841SSasha Neftin 		return err;
6157d89f8841SSasha Neftin 
615821da01fdSSasha Neftin 	pci_using_dac = 0;
615921da01fdSSasha Neftin 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6160d89f8841SSasha Neftin 	if (!err) {
616121da01fdSSasha Neftin 		pci_using_dac = 1;
6162d89f8841SSasha Neftin 	} else {
616321da01fdSSasha Neftin 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6164d89f8841SSasha Neftin 		if (err) {
616521da01fdSSasha Neftin 			dev_err(&pdev->dev,
616621da01fdSSasha Neftin 				"No usable DMA configuration, aborting\n");
6167d89f8841SSasha Neftin 			goto err_dma;
6168d89f8841SSasha Neftin 		}
6169d89f8841SSasha Neftin 	}
6170d89f8841SSasha Neftin 
617121da01fdSSasha Neftin 	err = pci_request_mem_regions(pdev, igc_driver_name);
6172d89f8841SSasha Neftin 	if (err)
6173d89f8841SSasha Neftin 		goto err_pci_reg;
6174d89f8841SSasha Neftin 
6175c9a11c23SSasha Neftin 	pci_enable_pcie_error_reporting(pdev);
6176c9a11c23SSasha Neftin 
6177d89f8841SSasha Neftin 	pci_set_master(pdev);
6178c9a11c23SSasha Neftin 
6179c9a11c23SSasha Neftin 	err = -ENOMEM;
6180c9a11c23SSasha Neftin 	netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
6181c9a11c23SSasha Neftin 				   IGC_MAX_TX_QUEUES);
6182c9a11c23SSasha Neftin 
6183c9a11c23SSasha Neftin 	if (!netdev)
6184c9a11c23SSasha Neftin 		goto err_alloc_etherdev;
6185c9a11c23SSasha Neftin 
6186c9a11c23SSasha Neftin 	SET_NETDEV_DEV(netdev, &pdev->dev);
6187c9a11c23SSasha Neftin 
6188c9a11c23SSasha Neftin 	pci_set_drvdata(pdev, netdev);
6189c9a11c23SSasha Neftin 	adapter = netdev_priv(netdev);
6190c9a11c23SSasha Neftin 	adapter->netdev = netdev;
6191c9a11c23SSasha Neftin 	adapter->pdev = pdev;
6192c9a11c23SSasha Neftin 	hw = &adapter->hw;
6193c9a11c23SSasha Neftin 	hw->back = adapter;
6194c9a11c23SSasha Neftin 	adapter->port_num = hw->bus.func;
61958c5ad0daSSasha Neftin 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
6196c9a11c23SSasha Neftin 
6197d89f8841SSasha Neftin 	err = pci_save_state(pdev);
6198c9a11c23SSasha Neftin 	if (err)
6199c9a11c23SSasha Neftin 		goto err_ioremap;
6200c9a11c23SSasha Neftin 
6201c9a11c23SSasha Neftin 	err = -EIO;
6202c9a11c23SSasha Neftin 	adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
6203c9a11c23SSasha Neftin 				   pci_resource_len(pdev, 0));
6204c9a11c23SSasha Neftin 	if (!adapter->io_addr)
6205c9a11c23SSasha Neftin 		goto err_ioremap;
6206c9a11c23SSasha Neftin 
6207c9a11c23SSasha Neftin 	/* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
6208c9a11c23SSasha Neftin 	hw->hw_addr = adapter->io_addr;
6209c9a11c23SSasha Neftin 
6210c9a11c23SSasha Neftin 	netdev->netdev_ops = &igc_netdev_ops;
62117df76bd1SAndre Guedes 	igc_ethtool_set_ops(netdev);
6212c9a11c23SSasha Neftin 	netdev->watchdog_timeo = 5 * HZ;
6213c9a11c23SSasha Neftin 
6214c9a11c23SSasha Neftin 	netdev->mem_start = pci_resource_start(pdev, 0);
6215c9a11c23SSasha Neftin 	netdev->mem_end = pci_resource_end(pdev, 0);
6216c9a11c23SSasha Neftin 
6217c9a11c23SSasha Neftin 	/* PCI config space info */
6218c9a11c23SSasha Neftin 	hw->vendor_id = pdev->vendor;
6219c9a11c23SSasha Neftin 	hw->device_id = pdev->device;
6220c9a11c23SSasha Neftin 	hw->revision_id = pdev->revision;
6221c9a11c23SSasha Neftin 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
6222c9a11c23SSasha Neftin 	hw->subsystem_device_id = pdev->subsystem_device;
6223146740f9SSasha Neftin 
6224ab405612SSasha Neftin 	/* Copy the default MAC and PHY function pointers */
6225ab405612SSasha Neftin 	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
62265586838fSSasha Neftin 	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
6227ab405612SSasha Neftin 
6228ab405612SSasha Neftin 	/* Initialize skew-specific constants */
6229ab405612SSasha Neftin 	err = ei->get_invariants(hw);
6230ab405612SSasha Neftin 	if (err)
6231ab405612SSasha Neftin 		goto err_sw_init;
6232ab405612SSasha Neftin 
6233d3ae3cfbSSasha Neftin 	/* Add supported features to the features list*/
6234b7b46245SSasha Neftin 	netdev->features |= NETIF_F_SG;
6235f38b782dSSasha Neftin 	netdev->features |= NETIF_F_TSO;
6236f38b782dSSasha Neftin 	netdev->features |= NETIF_F_TSO6;
62378e8204a4SSasha Neftin 	netdev->features |= NETIF_F_TSO_ECN;
62383bdd7086SSasha Neftin 	netdev->features |= NETIF_F_RXCSUM;
6239d3ae3cfbSSasha Neftin 	netdev->features |= NETIF_F_HW_CSUM;
62400ac960a8SSasha Neftin 	netdev->features |= NETIF_F_SCTP_CRC;
6241635071e2SSasha Neftin 	netdev->features |= NETIF_F_HW_TC;
6242d3ae3cfbSSasha Neftin 
624334428dffSSasha Neftin #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
624434428dffSSasha Neftin 				  NETIF_F_GSO_GRE_CSUM | \
624534428dffSSasha Neftin 				  NETIF_F_GSO_IPXIP4 | \
624634428dffSSasha Neftin 				  NETIF_F_GSO_IPXIP6 | \
624734428dffSSasha Neftin 				  NETIF_F_GSO_UDP_TUNNEL | \
624834428dffSSasha Neftin 				  NETIF_F_GSO_UDP_TUNNEL_CSUM)
624934428dffSSasha Neftin 
625034428dffSSasha Neftin 	netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
625134428dffSSasha Neftin 	netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
6252146740f9SSasha Neftin 
6253146740f9SSasha Neftin 	/* setup the private structure */
6254146740f9SSasha Neftin 	err = igc_sw_init(adapter);
6255146740f9SSasha Neftin 	if (err)
6256146740f9SSasha Neftin 		goto err_sw_init;
6257146740f9SSasha Neftin 
625865cd3a72SSasha Neftin 	/* copy netdev features into list of user selectable features */
625965cd3a72SSasha Neftin 	netdev->hw_features |= NETIF_F_NTUPLE;
62608d744963SMuhammad Husaini Zulkifli 	netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
62618d744963SMuhammad Husaini Zulkifli 	netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
6262d3ae3cfbSSasha Neftin 	netdev->hw_features |= netdev->features;
626365cd3a72SSasha Neftin 
62644439dc42SSasha Neftin 	if (pci_using_dac)
62654439dc42SSasha Neftin 		netdev->features |= NETIF_F_HIGHDMA;
62664439dc42SSasha Neftin 
62678d744963SMuhammad Husaini Zulkifli 	netdev->vlan_features |= netdev->features;
62688d744963SMuhammad Husaini Zulkifli 
6269c9a11c23SSasha Neftin 	/* MTU range: 68 - 9216 */
6270c9a11c23SSasha Neftin 	netdev->min_mtu = ETH_MIN_MTU;
6271c9a11c23SSasha Neftin 	netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
6272c9a11c23SSasha Neftin 
62734eb80801SSasha Neftin 	/* before reading the NVM, reset the controller to put the device in a
62744eb80801SSasha Neftin 	 * known good starting state
62754eb80801SSasha Neftin 	 */
62764eb80801SSasha Neftin 	hw->mac.ops.reset_hw(hw);
62774eb80801SSasha Neftin 
62789b924eddSSasha Neftin 	if (igc_get_flash_presence_i225(hw)) {
62799b924eddSSasha Neftin 		if (hw->nvm.ops.validate(hw) < 0) {
628025f06effSAndre Guedes 			dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
62819b924eddSSasha Neftin 			err = -EIO;
62829b924eddSSasha Neftin 			goto err_eeprom;
62839b924eddSSasha Neftin 		}
62849b924eddSSasha Neftin 	}
62859b924eddSSasha Neftin 
62864eb80801SSasha Neftin 	if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
62874eb80801SSasha Neftin 		/* copy the MAC address out of the NVM */
62884eb80801SSasha Neftin 		if (hw->mac.ops.read_mac_addr(hw))
62894eb80801SSasha Neftin 			dev_err(&pdev->dev, "NVM Read Error\n");
62904eb80801SSasha Neftin 	}
62914eb80801SSasha Neftin 
62924eb80801SSasha Neftin 	memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
62934eb80801SSasha Neftin 
62944eb80801SSasha Neftin 	if (!is_valid_ether_addr(netdev->dev_addr)) {
62954eb80801SSasha Neftin 		dev_err(&pdev->dev, "Invalid MAC Address\n");
62964eb80801SSasha Neftin 		err = -EIO;
62974eb80801SSasha Neftin 		goto err_eeprom;
62984eb80801SSasha Neftin 	}
62994eb80801SSasha Neftin 
63000507ef8aSSasha Neftin 	/* configure RXPBSIZE and TXPBSIZE */
63010507ef8aSSasha Neftin 	wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
63020507ef8aSSasha Neftin 	wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
63030507ef8aSSasha Neftin 
63040507ef8aSSasha Neftin 	timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
6305208983f0SSasha Neftin 	timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
63060507ef8aSSasha Neftin 
63070507ef8aSSasha Neftin 	INIT_WORK(&adapter->reset_task, igc_reset_task);
6308208983f0SSasha Neftin 	INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
63090507ef8aSSasha Neftin 
63104eb80801SSasha Neftin 	/* Initialize link properties that are user-changeable */
63114eb80801SSasha Neftin 	adapter->fc_autoneg = true;
63124eb80801SSasha Neftin 	hw->mac.autoneg = true;
63134eb80801SSasha Neftin 	hw->phy.autoneg_advertised = 0xaf;
63144eb80801SSasha Neftin 
63154eb80801SSasha Neftin 	hw->fc.requested_mode = igc_fc_default;
63164eb80801SSasha Neftin 	hw->fc.current_mode = igc_fc_default;
63174eb80801SSasha Neftin 
6318e055600dSSasha Neftin 	/* By default, support wake on port A */
6319e055600dSSasha Neftin 	adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
6320e055600dSSasha Neftin 
6321e055600dSSasha Neftin 	/* initialize the wol settings based on the eeprom settings */
6322e055600dSSasha Neftin 	if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
6323e055600dSSasha Neftin 		adapter->wol |= IGC_WUFC_MAG;
6324e055600dSSasha Neftin 
6325e055600dSSasha Neftin 	device_set_wakeup_enable(&adapter->pdev->dev,
6326e055600dSSasha Neftin 				 adapter->flags & IGC_FLAG_WOL_SUPPORTED);
6327e055600dSSasha Neftin 
63283cda505aSVinicius Costa Gomes 	igc_ptp_init(adapter);
63293cda505aSVinicius Costa Gomes 
6330c9a11c23SSasha Neftin 	/* reset the hardware with the new settings */
6331c9a11c23SSasha Neftin 	igc_reset(adapter);
6332c9a11c23SSasha Neftin 
6333c9a11c23SSasha Neftin 	/* let the f/w know that the h/w is now under the control of the
6334c9a11c23SSasha Neftin 	 * driver.
6335c9a11c23SSasha Neftin 	 */
6336c9a11c23SSasha Neftin 	igc_get_hw_control(adapter);
6337c9a11c23SSasha Neftin 
6338c9a11c23SSasha Neftin 	strncpy(netdev->name, "eth%d", IFNAMSIZ);
6339c9a11c23SSasha Neftin 	err = register_netdev(netdev);
6340c9a11c23SSasha Neftin 	if (err)
6341c9a11c23SSasha Neftin 		goto err_register;
6342c9a11c23SSasha Neftin 
6343c9a11c23SSasha Neftin 	 /* carrier off reporting is important to ethtool even BEFORE open */
6344c9a11c23SSasha Neftin 	netif_carrier_off(netdev);
6345c9a11c23SSasha Neftin 
6346ab405612SSasha Neftin 	/* Check if Media Autosense is enabled */
6347ab405612SSasha Neftin 	adapter->ei = *ei;
6348ab405612SSasha Neftin 
6349c9a11c23SSasha Neftin 	/* print pcie link status and MAC address */
6350c9a11c23SSasha Neftin 	pcie_print_link_status(pdev);
6351c9a11c23SSasha Neftin 	netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
6352c9a11c23SSasha Neftin 
6353e0751556SRafael J. Wysocki 	dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
635493ec439aSSasha Neftin 	/* Disable EEE for internal PHY devices */
635593ec439aSSasha Neftin 	hw->dev_spec._base.eee_enable = false;
635693ec439aSSasha Neftin 	adapter->flags &= ~IGC_FLAG_EEE;
635793ec439aSSasha Neftin 	igc_set_eee_i225(hw, false, false, false);
63588594a7f3SSasha Neftin 
63598594a7f3SSasha Neftin 	pm_runtime_put_noidle(&pdev->dev);
63608594a7f3SSasha Neftin 
6361d89f8841SSasha Neftin 	return 0;
6362d89f8841SSasha Neftin 
6363c9a11c23SSasha Neftin err_register:
6364c9a11c23SSasha Neftin 	igc_release_hw_control(adapter);
63654eb80801SSasha Neftin err_eeprom:
63664eb80801SSasha Neftin 	if (!igc_check_reset_block(hw))
63674eb80801SSasha Neftin 		igc_reset_phy(hw);
6368146740f9SSasha Neftin err_sw_init:
63693df25e4cSSasha Neftin 	igc_clear_interrupt_scheme(adapter);
63703df25e4cSSasha Neftin 	iounmap(adapter->io_addr);
6371c9a11c23SSasha Neftin err_ioremap:
6372c9a11c23SSasha Neftin 	free_netdev(netdev);
6373c9a11c23SSasha Neftin err_alloc_etherdev:
6374c6bc9e5cSChristophe JAILLET 	pci_disable_pcie_error_reporting(pdev);
6375faf4dd52SSasha Neftin 	pci_release_mem_regions(pdev);
6376d89f8841SSasha Neftin err_pci_reg:
6377d89f8841SSasha Neftin err_dma:
6378d89f8841SSasha Neftin 	pci_disable_device(pdev);
6379d89f8841SSasha Neftin 	return err;
6380d89f8841SSasha Neftin }
6381d89f8841SSasha Neftin 
6382d89f8841SSasha Neftin /**
6383d89f8841SSasha Neftin  * igc_remove - Device Removal Routine
6384d89f8841SSasha Neftin  * @pdev: PCI device information struct
6385d89f8841SSasha Neftin  *
6386d89f8841SSasha Neftin  * igc_remove is called by the PCI subsystem to alert the driver
6387d89f8841SSasha Neftin  * that it should release a PCI device.  This could be caused by a
6388d89f8841SSasha Neftin  * Hot-Plug event, or because the driver is going to be removed from
6389d89f8841SSasha Neftin  * memory.
6390d89f8841SSasha Neftin  */
6391d89f8841SSasha Neftin static void igc_remove(struct pci_dev *pdev)
6392d89f8841SSasha Neftin {
6393c9a11c23SSasha Neftin 	struct net_device *netdev = pci_get_drvdata(pdev);
6394c9a11c23SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
6395c9a11c23SSasha Neftin 
63968594a7f3SSasha Neftin 	pm_runtime_get_noresume(&pdev->dev);
63978594a7f3SSasha Neftin 
6398e256ec83SAndre Guedes 	igc_flush_nfc_rules(adapter);
6399e256ec83SAndre Guedes 
64005f295805SVinicius Costa Gomes 	igc_ptp_stop(adapter);
64015f295805SVinicius Costa Gomes 
6402c9a11c23SSasha Neftin 	set_bit(__IGC_DOWN, &adapter->state);
64030507ef8aSSasha Neftin 
64040507ef8aSSasha Neftin 	del_timer_sync(&adapter->watchdog_timer);
6405208983f0SSasha Neftin 	del_timer_sync(&adapter->phy_info_timer);
64060507ef8aSSasha Neftin 
64070507ef8aSSasha Neftin 	cancel_work_sync(&adapter->reset_task);
6408208983f0SSasha Neftin 	cancel_work_sync(&adapter->watchdog_task);
6409c9a11c23SSasha Neftin 
6410c9a11c23SSasha Neftin 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
6411c9a11c23SSasha Neftin 	 * would have already happened in close and is redundant.
6412c9a11c23SSasha Neftin 	 */
6413c9a11c23SSasha Neftin 	igc_release_hw_control(adapter);
6414c9a11c23SSasha Neftin 	unregister_netdev(netdev);
6415c9a11c23SSasha Neftin 
64160507ef8aSSasha Neftin 	igc_clear_interrupt_scheme(adapter);
64170507ef8aSSasha Neftin 	pci_iounmap(pdev, adapter->io_addr);
64180507ef8aSSasha Neftin 	pci_release_mem_regions(pdev);
6419d89f8841SSasha Neftin 
6420c9a11c23SSasha Neftin 	free_netdev(netdev);
64210507ef8aSSasha Neftin 
64220507ef8aSSasha Neftin 	pci_disable_pcie_error_reporting(pdev);
64230507ef8aSSasha Neftin 
6424d89f8841SSasha Neftin 	pci_disable_device(pdev);
6425d89f8841SSasha Neftin }
6426d89f8841SSasha Neftin 
64279513d2a5SSasha Neftin static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
64289513d2a5SSasha Neftin 			  bool runtime)
64299513d2a5SSasha Neftin {
64309513d2a5SSasha Neftin 	struct net_device *netdev = pci_get_drvdata(pdev);
64319513d2a5SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
64329513d2a5SSasha Neftin 	u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol;
64339513d2a5SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
64349513d2a5SSasha Neftin 	u32 ctrl, rctl, status;
64359513d2a5SSasha Neftin 	bool wake;
64369513d2a5SSasha Neftin 
64379513d2a5SSasha Neftin 	rtnl_lock();
64389513d2a5SSasha Neftin 	netif_device_detach(netdev);
64399513d2a5SSasha Neftin 
64409513d2a5SSasha Neftin 	if (netif_running(netdev))
64419513d2a5SSasha Neftin 		__igc_close(netdev, true);
64429513d2a5SSasha Neftin 
6443a5136f76SSasha Neftin 	igc_ptp_suspend(adapter);
6444a5136f76SSasha Neftin 
64459513d2a5SSasha Neftin 	igc_clear_interrupt_scheme(adapter);
64469513d2a5SSasha Neftin 	rtnl_unlock();
64479513d2a5SSasha Neftin 
64489513d2a5SSasha Neftin 	status = rd32(IGC_STATUS);
64499513d2a5SSasha Neftin 	if (status & IGC_STATUS_LU)
64509513d2a5SSasha Neftin 		wufc &= ~IGC_WUFC_LNKC;
64519513d2a5SSasha Neftin 
64529513d2a5SSasha Neftin 	if (wufc) {
64539513d2a5SSasha Neftin 		igc_setup_rctl(adapter);
64549513d2a5SSasha Neftin 		igc_set_rx_mode(netdev);
64559513d2a5SSasha Neftin 
64569513d2a5SSasha Neftin 		/* turn on all-multi mode if wake on multicast is enabled */
64579513d2a5SSasha Neftin 		if (wufc & IGC_WUFC_MC) {
64589513d2a5SSasha Neftin 			rctl = rd32(IGC_RCTL);
64599513d2a5SSasha Neftin 			rctl |= IGC_RCTL_MPE;
64609513d2a5SSasha Neftin 			wr32(IGC_RCTL, rctl);
64619513d2a5SSasha Neftin 		}
64629513d2a5SSasha Neftin 
64639513d2a5SSasha Neftin 		ctrl = rd32(IGC_CTRL);
64649513d2a5SSasha Neftin 		ctrl |= IGC_CTRL_ADVD3WUC;
64659513d2a5SSasha Neftin 		wr32(IGC_CTRL, ctrl);
64669513d2a5SSasha Neftin 
64679513d2a5SSasha Neftin 		/* Allow time for pending master requests to run */
64689513d2a5SSasha Neftin 		igc_disable_pcie_master(hw);
64699513d2a5SSasha Neftin 
64709513d2a5SSasha Neftin 		wr32(IGC_WUC, IGC_WUC_PME_EN);
64719513d2a5SSasha Neftin 		wr32(IGC_WUFC, wufc);
64729513d2a5SSasha Neftin 	} else {
64739513d2a5SSasha Neftin 		wr32(IGC_WUC, 0);
64749513d2a5SSasha Neftin 		wr32(IGC_WUFC, 0);
64759513d2a5SSasha Neftin 	}
64769513d2a5SSasha Neftin 
64779513d2a5SSasha Neftin 	wake = wufc || adapter->en_mng_pt;
64789513d2a5SSasha Neftin 	if (!wake)
6479a0beb3c1SSasha Neftin 		igc_power_down_phy_copper_base(&adapter->hw);
64809513d2a5SSasha Neftin 	else
64819513d2a5SSasha Neftin 		igc_power_up_link(adapter);
64829513d2a5SSasha Neftin 
64839513d2a5SSasha Neftin 	if (enable_wake)
64849513d2a5SSasha Neftin 		*enable_wake = wake;
64859513d2a5SSasha Neftin 
64869513d2a5SSasha Neftin 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
64879513d2a5SSasha Neftin 	 * would have already happened in close and is redundant.
64889513d2a5SSasha Neftin 	 */
64899513d2a5SSasha Neftin 	igc_release_hw_control(adapter);
64909513d2a5SSasha Neftin 
64919513d2a5SSasha Neftin 	pci_disable_device(pdev);
64929513d2a5SSasha Neftin 
64939513d2a5SSasha Neftin 	return 0;
64949513d2a5SSasha Neftin }
64959513d2a5SSasha Neftin 
64969513d2a5SSasha Neftin #ifdef CONFIG_PM
64979513d2a5SSasha Neftin static int __maybe_unused igc_runtime_suspend(struct device *dev)
64989513d2a5SSasha Neftin {
64999513d2a5SSasha Neftin 	return __igc_shutdown(to_pci_dev(dev), NULL, 1);
65009513d2a5SSasha Neftin }
65019513d2a5SSasha Neftin 
65029513d2a5SSasha Neftin static void igc_deliver_wake_packet(struct net_device *netdev)
65039513d2a5SSasha Neftin {
65049513d2a5SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
65059513d2a5SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
65069513d2a5SSasha Neftin 	struct sk_buff *skb;
65079513d2a5SSasha Neftin 	u32 wupl;
65089513d2a5SSasha Neftin 
65099513d2a5SSasha Neftin 	wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK;
65109513d2a5SSasha Neftin 
65119513d2a5SSasha Neftin 	/* WUPM stores only the first 128 bytes of the wake packet.
65129513d2a5SSasha Neftin 	 * Read the packet only if we have the whole thing.
65139513d2a5SSasha Neftin 	 */
65149513d2a5SSasha Neftin 	if (wupl == 0 || wupl > IGC_WUPM_BYTES)
65159513d2a5SSasha Neftin 		return;
65169513d2a5SSasha Neftin 
65179513d2a5SSasha Neftin 	skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES);
65189513d2a5SSasha Neftin 	if (!skb)
65199513d2a5SSasha Neftin 		return;
65209513d2a5SSasha Neftin 
65219513d2a5SSasha Neftin 	skb_put(skb, wupl);
65229513d2a5SSasha Neftin 
65239513d2a5SSasha Neftin 	/* Ensure reads are 32-bit aligned */
65249513d2a5SSasha Neftin 	wupl = roundup(wupl, 4);
65259513d2a5SSasha Neftin 
65269513d2a5SSasha Neftin 	memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl);
65279513d2a5SSasha Neftin 
65289513d2a5SSasha Neftin 	skb->protocol = eth_type_trans(skb, netdev);
65299513d2a5SSasha Neftin 	netif_rx(skb);
65309513d2a5SSasha Neftin }
65319513d2a5SSasha Neftin 
65329513d2a5SSasha Neftin static int __maybe_unused igc_resume(struct device *dev)
65339513d2a5SSasha Neftin {
65349513d2a5SSasha Neftin 	struct pci_dev *pdev = to_pci_dev(dev);
65359513d2a5SSasha Neftin 	struct net_device *netdev = pci_get_drvdata(pdev);
65369513d2a5SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
65379513d2a5SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
65389513d2a5SSasha Neftin 	u32 err, val;
65399513d2a5SSasha Neftin 
65409513d2a5SSasha Neftin 	pci_set_power_state(pdev, PCI_D0);
65419513d2a5SSasha Neftin 	pci_restore_state(pdev);
65429513d2a5SSasha Neftin 	pci_save_state(pdev);
65439513d2a5SSasha Neftin 
65449513d2a5SSasha Neftin 	if (!pci_device_is_present(pdev))
65459513d2a5SSasha Neftin 		return -ENODEV;
65469513d2a5SSasha Neftin 	err = pci_enable_device_mem(pdev);
65479513d2a5SSasha Neftin 	if (err) {
654825f06effSAndre Guedes 		netdev_err(netdev, "Cannot enable PCI device from suspend\n");
65499513d2a5SSasha Neftin 		return err;
65509513d2a5SSasha Neftin 	}
65519513d2a5SSasha Neftin 	pci_set_master(pdev);
65529513d2a5SSasha Neftin 
65539513d2a5SSasha Neftin 	pci_enable_wake(pdev, PCI_D3hot, 0);
65549513d2a5SSasha Neftin 	pci_enable_wake(pdev, PCI_D3cold, 0);
65559513d2a5SSasha Neftin 
65569513d2a5SSasha Neftin 	if (igc_init_interrupt_scheme(adapter, true)) {
655725f06effSAndre Guedes 		netdev_err(netdev, "Unable to allocate memory for queues\n");
65589513d2a5SSasha Neftin 		return -ENOMEM;
65599513d2a5SSasha Neftin 	}
65609513d2a5SSasha Neftin 
65619513d2a5SSasha Neftin 	igc_reset(adapter);
65629513d2a5SSasha Neftin 
65639513d2a5SSasha Neftin 	/* let the f/w know that the h/w is now under the control of the
65649513d2a5SSasha Neftin 	 * driver.
65659513d2a5SSasha Neftin 	 */
65669513d2a5SSasha Neftin 	igc_get_hw_control(adapter);
65679513d2a5SSasha Neftin 
65689513d2a5SSasha Neftin 	val = rd32(IGC_WUS);
65699513d2a5SSasha Neftin 	if (val & WAKE_PKT_WUS)
65709513d2a5SSasha Neftin 		igc_deliver_wake_packet(netdev);
65719513d2a5SSasha Neftin 
65729513d2a5SSasha Neftin 	wr32(IGC_WUS, ~0);
65739513d2a5SSasha Neftin 
65749513d2a5SSasha Neftin 	rtnl_lock();
65759513d2a5SSasha Neftin 	if (!err && netif_running(netdev))
65769513d2a5SSasha Neftin 		err = __igc_open(netdev, true);
65779513d2a5SSasha Neftin 
65789513d2a5SSasha Neftin 	if (!err)
65799513d2a5SSasha Neftin 		netif_device_attach(netdev);
65809513d2a5SSasha Neftin 	rtnl_unlock();
65819513d2a5SSasha Neftin 
65829513d2a5SSasha Neftin 	return err;
65839513d2a5SSasha Neftin }
65849513d2a5SSasha Neftin 
65859513d2a5SSasha Neftin static int __maybe_unused igc_runtime_resume(struct device *dev)
65869513d2a5SSasha Neftin {
65879513d2a5SSasha Neftin 	return igc_resume(dev);
65889513d2a5SSasha Neftin }
65899513d2a5SSasha Neftin 
65909513d2a5SSasha Neftin static int __maybe_unused igc_suspend(struct device *dev)
65919513d2a5SSasha Neftin {
65929513d2a5SSasha Neftin 	return __igc_shutdown(to_pci_dev(dev), NULL, 0);
65939513d2a5SSasha Neftin }
65949513d2a5SSasha Neftin 
65959513d2a5SSasha Neftin static int __maybe_unused igc_runtime_idle(struct device *dev)
65969513d2a5SSasha Neftin {
65979513d2a5SSasha Neftin 	struct net_device *netdev = dev_get_drvdata(dev);
65989513d2a5SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
65999513d2a5SSasha Neftin 
66009513d2a5SSasha Neftin 	if (!igc_has_link(adapter))
66019513d2a5SSasha Neftin 		pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
66029513d2a5SSasha Neftin 
66039513d2a5SSasha Neftin 	return -EBUSY;
66049513d2a5SSasha Neftin }
66059513d2a5SSasha Neftin #endif /* CONFIG_PM */
66069513d2a5SSasha Neftin 
66079513d2a5SSasha Neftin static void igc_shutdown(struct pci_dev *pdev)
66089513d2a5SSasha Neftin {
66099513d2a5SSasha Neftin 	bool wake;
66109513d2a5SSasha Neftin 
66119513d2a5SSasha Neftin 	__igc_shutdown(pdev, &wake, 0);
66129513d2a5SSasha Neftin 
66139513d2a5SSasha Neftin 	if (system_state == SYSTEM_POWER_OFF) {
66149513d2a5SSasha Neftin 		pci_wake_from_d3(pdev, wake);
66159513d2a5SSasha Neftin 		pci_set_power_state(pdev, PCI_D3hot);
66169513d2a5SSasha Neftin 	}
66179513d2a5SSasha Neftin }
66189513d2a5SSasha Neftin 
6619bc23aa94SSasha Neftin /**
6620bc23aa94SSasha Neftin  *  igc_io_error_detected - called when PCI error is detected
6621bc23aa94SSasha Neftin  *  @pdev: Pointer to PCI device
6622bc23aa94SSasha Neftin  *  @state: The current PCI connection state
6623bc23aa94SSasha Neftin  *
6624bc23aa94SSasha Neftin  *  This function is called after a PCI bus error affecting
6625bc23aa94SSasha Neftin  *  this device has been detected.
6626bc23aa94SSasha Neftin  **/
6627bc23aa94SSasha Neftin static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
6628bc23aa94SSasha Neftin 					      pci_channel_state_t state)
6629bc23aa94SSasha Neftin {
6630bc23aa94SSasha Neftin 	struct net_device *netdev = pci_get_drvdata(pdev);
6631bc23aa94SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
6632bc23aa94SSasha Neftin 
6633bc23aa94SSasha Neftin 	netif_device_detach(netdev);
6634bc23aa94SSasha Neftin 
6635bc23aa94SSasha Neftin 	if (state == pci_channel_io_perm_failure)
6636bc23aa94SSasha Neftin 		return PCI_ERS_RESULT_DISCONNECT;
6637bc23aa94SSasha Neftin 
6638bc23aa94SSasha Neftin 	if (netif_running(netdev))
6639bc23aa94SSasha Neftin 		igc_down(adapter);
6640bc23aa94SSasha Neftin 	pci_disable_device(pdev);
6641bc23aa94SSasha Neftin 
6642bc23aa94SSasha Neftin 	/* Request a slot reset. */
6643bc23aa94SSasha Neftin 	return PCI_ERS_RESULT_NEED_RESET;
6644bc23aa94SSasha Neftin }
6645bc23aa94SSasha Neftin 
6646bc23aa94SSasha Neftin /**
6647bc23aa94SSasha Neftin  *  igc_io_slot_reset - called after the PCI bus has been reset.
6648bc23aa94SSasha Neftin  *  @pdev: Pointer to PCI device
6649bc23aa94SSasha Neftin  *
6650bc23aa94SSasha Neftin  *  Restart the card from scratch, as if from a cold-boot. Implementation
6651bc23aa94SSasha Neftin  *  resembles the first-half of the igc_resume routine.
6652bc23aa94SSasha Neftin  **/
6653bc23aa94SSasha Neftin static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
6654bc23aa94SSasha Neftin {
6655bc23aa94SSasha Neftin 	struct net_device *netdev = pci_get_drvdata(pdev);
6656bc23aa94SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
6657bc23aa94SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
6658bc23aa94SSasha Neftin 	pci_ers_result_t result;
6659bc23aa94SSasha Neftin 
6660bc23aa94SSasha Neftin 	if (pci_enable_device_mem(pdev)) {
666125f06effSAndre Guedes 		netdev_err(netdev, "Could not re-enable PCI device after reset\n");
6662bc23aa94SSasha Neftin 		result = PCI_ERS_RESULT_DISCONNECT;
6663bc23aa94SSasha Neftin 	} else {
6664bc23aa94SSasha Neftin 		pci_set_master(pdev);
6665bc23aa94SSasha Neftin 		pci_restore_state(pdev);
6666bc23aa94SSasha Neftin 		pci_save_state(pdev);
6667bc23aa94SSasha Neftin 
6668bc23aa94SSasha Neftin 		pci_enable_wake(pdev, PCI_D3hot, 0);
6669bc23aa94SSasha Neftin 		pci_enable_wake(pdev, PCI_D3cold, 0);
6670bc23aa94SSasha Neftin 
6671bc23aa94SSasha Neftin 		/* In case of PCI error, adapter loses its HW address
6672bc23aa94SSasha Neftin 		 * so we should re-assign it here.
6673bc23aa94SSasha Neftin 		 */
6674bc23aa94SSasha Neftin 		hw->hw_addr = adapter->io_addr;
6675bc23aa94SSasha Neftin 
6676bc23aa94SSasha Neftin 		igc_reset(adapter);
6677bc23aa94SSasha Neftin 		wr32(IGC_WUS, ~0);
6678bc23aa94SSasha Neftin 		result = PCI_ERS_RESULT_RECOVERED;
6679bc23aa94SSasha Neftin 	}
6680bc23aa94SSasha Neftin 
6681bc23aa94SSasha Neftin 	return result;
6682bc23aa94SSasha Neftin }
6683bc23aa94SSasha Neftin 
6684bc23aa94SSasha Neftin /**
6685bc23aa94SSasha Neftin  *  igc_io_resume - called when traffic can start to flow again.
6686bc23aa94SSasha Neftin  *  @pdev: Pointer to PCI device
6687bc23aa94SSasha Neftin  *
6688bc23aa94SSasha Neftin  *  This callback is called when the error recovery driver tells us that
6689bc23aa94SSasha Neftin  *  its OK to resume normal operation. Implementation resembles the
6690bc23aa94SSasha Neftin  *  second-half of the igc_resume routine.
6691bc23aa94SSasha Neftin  */
6692bc23aa94SSasha Neftin static void igc_io_resume(struct pci_dev *pdev)
6693bc23aa94SSasha Neftin {
6694bc23aa94SSasha Neftin 	struct net_device *netdev = pci_get_drvdata(pdev);
6695bc23aa94SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
6696bc23aa94SSasha Neftin 
6697bc23aa94SSasha Neftin 	rtnl_lock();
6698bc23aa94SSasha Neftin 	if (netif_running(netdev)) {
6699bc23aa94SSasha Neftin 		if (igc_open(netdev)) {
670025f06effSAndre Guedes 			netdev_err(netdev, "igc_open failed after reset\n");
6701bc23aa94SSasha Neftin 			return;
6702bc23aa94SSasha Neftin 		}
6703bc23aa94SSasha Neftin 	}
6704bc23aa94SSasha Neftin 
6705bc23aa94SSasha Neftin 	netif_device_attach(netdev);
6706bc23aa94SSasha Neftin 
6707bc23aa94SSasha Neftin 	/* let the f/w know that the h/w is now under the control of the
6708bc23aa94SSasha Neftin 	 * driver.
6709bc23aa94SSasha Neftin 	 */
6710bc23aa94SSasha Neftin 	igc_get_hw_control(adapter);
6711bc23aa94SSasha Neftin 	rtnl_unlock();
6712bc23aa94SSasha Neftin }
6713bc23aa94SSasha Neftin 
6714bc23aa94SSasha Neftin static const struct pci_error_handlers igc_err_handler = {
6715bc23aa94SSasha Neftin 	.error_detected = igc_io_error_detected,
6716bc23aa94SSasha Neftin 	.slot_reset = igc_io_slot_reset,
6717bc23aa94SSasha Neftin 	.resume = igc_io_resume,
6718bc23aa94SSasha Neftin };
6719bc23aa94SSasha Neftin 
67209513d2a5SSasha Neftin #ifdef CONFIG_PM
67219513d2a5SSasha Neftin static const struct dev_pm_ops igc_pm_ops = {
67229513d2a5SSasha Neftin 	SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
67239513d2a5SSasha Neftin 	SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
67249513d2a5SSasha Neftin 			   igc_runtime_idle)
67259513d2a5SSasha Neftin };
67269513d2a5SSasha Neftin #endif
67279513d2a5SSasha Neftin 
6728d89f8841SSasha Neftin static struct pci_driver igc_driver = {
6729d89f8841SSasha Neftin 	.name     = igc_driver_name,
6730d89f8841SSasha Neftin 	.id_table = igc_pci_tbl,
6731d89f8841SSasha Neftin 	.probe    = igc_probe,
6732d89f8841SSasha Neftin 	.remove   = igc_remove,
67339513d2a5SSasha Neftin #ifdef CONFIG_PM
67349513d2a5SSasha Neftin 	.driver.pm = &igc_pm_ops,
67359513d2a5SSasha Neftin #endif
67369513d2a5SSasha Neftin 	.shutdown = igc_shutdown,
6737bc23aa94SSasha Neftin 	.err_handler = &igc_err_handler,
6738d89f8841SSasha Neftin };
6739d89f8841SSasha Neftin 
6740146740f9SSasha Neftin /**
67418c5ad0daSSasha Neftin  * igc_reinit_queues - return error
67428c5ad0daSSasha Neftin  * @adapter: pointer to adapter structure
67438c5ad0daSSasha Neftin  */
67448c5ad0daSSasha Neftin int igc_reinit_queues(struct igc_adapter *adapter)
67458c5ad0daSSasha Neftin {
67468c5ad0daSSasha Neftin 	struct net_device *netdev = adapter->netdev;
67478c5ad0daSSasha Neftin 	int err = 0;
67488c5ad0daSSasha Neftin 
67498c5ad0daSSasha Neftin 	if (netif_running(netdev))
67508c5ad0daSSasha Neftin 		igc_close(netdev);
67518c5ad0daSSasha Neftin 
67528c5ad0daSSasha Neftin 	igc_reset_interrupt_capability(adapter);
67538c5ad0daSSasha Neftin 
67548c5ad0daSSasha Neftin 	if (igc_init_interrupt_scheme(adapter, true)) {
675525f06effSAndre Guedes 		netdev_err(netdev, "Unable to allocate memory for queues\n");
67568c5ad0daSSasha Neftin 		return -ENOMEM;
67578c5ad0daSSasha Neftin 	}
67588c5ad0daSSasha Neftin 
67598c5ad0daSSasha Neftin 	if (netif_running(netdev))
67608c5ad0daSSasha Neftin 		err = igc_open(netdev);
67618c5ad0daSSasha Neftin 
67628c5ad0daSSasha Neftin 	return err;
67638c5ad0daSSasha Neftin }
67648c5ad0daSSasha Neftin 
67658c5ad0daSSasha Neftin /**
6766c0071c7aSSasha Neftin  * igc_get_hw_dev - return device
6767c0071c7aSSasha Neftin  * @hw: pointer to hardware structure
6768c0071c7aSSasha Neftin  *
6769c0071c7aSSasha Neftin  * used by hardware layer to print debugging information
6770c0071c7aSSasha Neftin  */
6771c0071c7aSSasha Neftin struct net_device *igc_get_hw_dev(struct igc_hw *hw)
6772c0071c7aSSasha Neftin {
6773c0071c7aSSasha Neftin 	struct igc_adapter *adapter = hw->back;
6774c0071c7aSSasha Neftin 
6775c0071c7aSSasha Neftin 	return adapter->netdev;
6776c0071c7aSSasha Neftin }
6777c0071c7aSSasha Neftin 
6778fc9df2a0SAndre Guedes static void igc_disable_rx_ring_hw(struct igc_ring *ring)
6779fc9df2a0SAndre Guedes {
6780fc9df2a0SAndre Guedes 	struct igc_hw *hw = &ring->q_vector->adapter->hw;
6781fc9df2a0SAndre Guedes 	u8 idx = ring->reg_idx;
6782fc9df2a0SAndre Guedes 	u32 rxdctl;
6783fc9df2a0SAndre Guedes 
6784fc9df2a0SAndre Guedes 	rxdctl = rd32(IGC_RXDCTL(idx));
6785fc9df2a0SAndre Guedes 	rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE;
6786fc9df2a0SAndre Guedes 	rxdctl |= IGC_RXDCTL_SWFLUSH;
6787fc9df2a0SAndre Guedes 	wr32(IGC_RXDCTL(idx), rxdctl);
6788fc9df2a0SAndre Guedes }
6789fc9df2a0SAndre Guedes 
6790fc9df2a0SAndre Guedes void igc_disable_rx_ring(struct igc_ring *ring)
6791fc9df2a0SAndre Guedes {
6792fc9df2a0SAndre Guedes 	igc_disable_rx_ring_hw(ring);
6793fc9df2a0SAndre Guedes 	igc_clean_rx_ring(ring);
6794fc9df2a0SAndre Guedes }
6795fc9df2a0SAndre Guedes 
6796fc9df2a0SAndre Guedes void igc_enable_rx_ring(struct igc_ring *ring)
6797fc9df2a0SAndre Guedes {
6798fc9df2a0SAndre Guedes 	struct igc_adapter *adapter = ring->q_vector->adapter;
6799fc9df2a0SAndre Guedes 
6800fc9df2a0SAndre Guedes 	igc_configure_rx_ring(adapter, ring);
6801fc9df2a0SAndre Guedes 
6802fc9df2a0SAndre Guedes 	if (ring->xsk_pool)
6803fc9df2a0SAndre Guedes 		igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
6804fc9df2a0SAndre Guedes 	else
6805fc9df2a0SAndre Guedes 		igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
6806fc9df2a0SAndre Guedes }
6807fc9df2a0SAndre Guedes 
68089acf59a7SAndre Guedes static void igc_disable_tx_ring_hw(struct igc_ring *ring)
68099acf59a7SAndre Guedes {
68109acf59a7SAndre Guedes 	struct igc_hw *hw = &ring->q_vector->adapter->hw;
68119acf59a7SAndre Guedes 	u8 idx = ring->reg_idx;
68129acf59a7SAndre Guedes 	u32 txdctl;
68139acf59a7SAndre Guedes 
68149acf59a7SAndre Guedes 	txdctl = rd32(IGC_TXDCTL(idx));
68159acf59a7SAndre Guedes 	txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE;
68169acf59a7SAndre Guedes 	txdctl |= IGC_TXDCTL_SWFLUSH;
68179acf59a7SAndre Guedes 	wr32(IGC_TXDCTL(idx), txdctl);
68189acf59a7SAndre Guedes }
68199acf59a7SAndre Guedes 
68209acf59a7SAndre Guedes void igc_disable_tx_ring(struct igc_ring *ring)
68219acf59a7SAndre Guedes {
68229acf59a7SAndre Guedes 	igc_disable_tx_ring_hw(ring);
68239acf59a7SAndre Guedes 	igc_clean_tx_ring(ring);
68249acf59a7SAndre Guedes }
68259acf59a7SAndre Guedes 
68269acf59a7SAndre Guedes void igc_enable_tx_ring(struct igc_ring *ring)
68279acf59a7SAndre Guedes {
68289acf59a7SAndre Guedes 	struct igc_adapter *adapter = ring->q_vector->adapter;
68299acf59a7SAndre Guedes 
68309acf59a7SAndre Guedes 	igc_configure_tx_ring(adapter, ring);
68319acf59a7SAndre Guedes }
68329acf59a7SAndre Guedes 
6833c0071c7aSSasha Neftin /**
6834d89f8841SSasha Neftin  * igc_init_module - Driver Registration Routine
6835d89f8841SSasha Neftin  *
6836d89f8841SSasha Neftin  * igc_init_module is the first routine called when the driver is
6837d89f8841SSasha Neftin  * loaded. All it does is register with the PCI subsystem.
6838d89f8841SSasha Neftin  */
6839d89f8841SSasha Neftin static int __init igc_init_module(void)
6840d89f8841SSasha Neftin {
6841d89f8841SSasha Neftin 	int ret;
6842d89f8841SSasha Neftin 
684334a2a3b8SJeff Kirsher 	pr_info("%s\n", igc_driver_string);
6844d89f8841SSasha Neftin 	pr_info("%s\n", igc_copyright);
6845d89f8841SSasha Neftin 
6846d89f8841SSasha Neftin 	ret = pci_register_driver(&igc_driver);
6847d89f8841SSasha Neftin 	return ret;
6848d89f8841SSasha Neftin }
6849d89f8841SSasha Neftin 
6850d89f8841SSasha Neftin module_init(igc_init_module);
6851d89f8841SSasha Neftin 
6852d89f8841SSasha Neftin /**
6853d89f8841SSasha Neftin  * igc_exit_module - Driver Exit Cleanup Routine
6854d89f8841SSasha Neftin  *
6855d89f8841SSasha Neftin  * igc_exit_module is called just before the driver is removed
6856d89f8841SSasha Neftin  * from memory.
6857d89f8841SSasha Neftin  */
6858d89f8841SSasha Neftin static void __exit igc_exit_module(void)
6859d89f8841SSasha Neftin {
6860d89f8841SSasha Neftin 	pci_unregister_driver(&igc_driver);
6861d89f8841SSasha Neftin }
6862d89f8841SSasha Neftin 
6863d89f8841SSasha Neftin module_exit(igc_exit_module);
6864d89f8841SSasha Neftin /* igc_main.c */
6865