1d89f8841SSasha Neftin // SPDX-License-Identifier: GPL-2.0
2d89f8841SSasha Neftin /* Copyright (c)  2018 Intel Corporation */
3d89f8841SSasha Neftin 
4d89f8841SSasha Neftin #include <linux/module.h>
5d89f8841SSasha Neftin #include <linux/types.h>
6c9a11c23SSasha Neftin #include <linux/if_vlan.h>
7c9a11c23SSasha Neftin #include <linux/aer.h>
8d89f8841SSasha Neftin 
9d89f8841SSasha Neftin #include "igc.h"
10d89f8841SSasha Neftin #include "igc_hw.h"
11d89f8841SSasha Neftin 
12d89f8841SSasha Neftin #define DRV_VERSION	"0.0.1-k"
13d89f8841SSasha Neftin #define DRV_SUMMARY	"Intel(R) 2.5G Ethernet Linux Driver"
14d89f8841SSasha Neftin 
15c9a11c23SSasha Neftin static int debug = -1;
16c9a11c23SSasha Neftin 
17d89f8841SSasha Neftin MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
18d89f8841SSasha Neftin MODULE_DESCRIPTION(DRV_SUMMARY);
19d89f8841SSasha Neftin MODULE_LICENSE("GPL v2");
20d89f8841SSasha Neftin MODULE_VERSION(DRV_VERSION);
21c9a11c23SSasha Neftin module_param(debug, int, 0);
22c9a11c23SSasha Neftin MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
23d89f8841SSasha Neftin 
24d89f8841SSasha Neftin char igc_driver_name[] = "igc";
25d89f8841SSasha Neftin char igc_driver_version[] = DRV_VERSION;
26d89f8841SSasha Neftin static const char igc_driver_string[] = DRV_SUMMARY;
27d89f8841SSasha Neftin static const char igc_copyright[] =
28d89f8841SSasha Neftin 	"Copyright(c) 2018 Intel Corporation.";
29d89f8841SSasha Neftin 
30ab405612SSasha Neftin static const struct igc_info *igc_info_tbl[] = {
31ab405612SSasha Neftin 	[board_base] = &igc_base_info,
32ab405612SSasha Neftin };
33ab405612SSasha Neftin 
34d89f8841SSasha Neftin static const struct pci_device_id igc_pci_tbl[] = {
35ab405612SSasha Neftin 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
36ab405612SSasha Neftin 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
37d89f8841SSasha Neftin 	/* required last entry */
38d89f8841SSasha Neftin 	{0, }
39d89f8841SSasha Neftin };
40d89f8841SSasha Neftin 
41d89f8841SSasha Neftin MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
42d89f8841SSasha Neftin 
43146740f9SSasha Neftin /* forward declaration */
4413b5b7fdSSasha Neftin static void igc_clean_tx_ring(struct igc_ring *tx_ring);
45146740f9SSasha Neftin static int igc_sw_init(struct igc_adapter *);
46c9a11c23SSasha Neftin static void igc_configure(struct igc_adapter *adapter);
47c9a11c23SSasha Neftin static void igc_power_down_link(struct igc_adapter *adapter);
48c9a11c23SSasha Neftin static void igc_set_default_mac_filter(struct igc_adapter *adapter);
4913b5b7fdSSasha Neftin static void igc_set_rx_mode(struct net_device *netdev);
503df25e4cSSasha Neftin static void igc_write_itr(struct igc_q_vector *q_vector);
513df25e4cSSasha Neftin static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector);
523df25e4cSSasha Neftin static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx);
533df25e4cSSasha Neftin static void igc_set_interrupt_capability(struct igc_adapter *adapter,
543df25e4cSSasha Neftin 					 bool msix);
553df25e4cSSasha Neftin static void igc_free_q_vectors(struct igc_adapter *adapter);
563df25e4cSSasha Neftin static void igc_irq_disable(struct igc_adapter *adapter);
573df25e4cSSasha Neftin static void igc_irq_enable(struct igc_adapter *adapter);
583df25e4cSSasha Neftin static void igc_configure_msix(struct igc_adapter *adapter);
590507ef8aSSasha Neftin static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
600507ef8aSSasha Neftin 				  struct igc_rx_buffer *bi);
613df25e4cSSasha Neftin 
623df25e4cSSasha Neftin enum latency_range {
633df25e4cSSasha Neftin 	lowest_latency = 0,
643df25e4cSSasha Neftin 	low_latency = 1,
653df25e4cSSasha Neftin 	bulk_latency = 2,
663df25e4cSSasha Neftin 	latency_invalid = 255
673df25e4cSSasha Neftin };
68c9a11c23SSasha Neftin 
69c9a11c23SSasha Neftin static void igc_reset(struct igc_adapter *adapter)
70c9a11c23SSasha Neftin {
71c0071c7aSSasha Neftin 	struct pci_dev *pdev = adapter->pdev;
72c0071c7aSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
73c0071c7aSSasha Neftin 
74c0071c7aSSasha Neftin 	hw->mac.ops.reset_hw(hw);
75c0071c7aSSasha Neftin 
76c0071c7aSSasha Neftin 	if (hw->mac.ops.init_hw(hw))
77c0071c7aSSasha Neftin 		dev_err(&pdev->dev, "Hardware Error\n");
78c0071c7aSSasha Neftin 
79c9a11c23SSasha Neftin 	if (!netif_running(adapter->netdev))
80c9a11c23SSasha Neftin 		igc_power_down_link(adapter);
81c9a11c23SSasha Neftin }
82c9a11c23SSasha Neftin 
83c9a11c23SSasha Neftin /**
84c9a11c23SSasha Neftin  * igc_power_up_link - Power up the phy/serdes link
85c9a11c23SSasha Neftin  * @adapter: address of board private structure
86c9a11c23SSasha Neftin  */
87c9a11c23SSasha Neftin static void igc_power_up_link(struct igc_adapter *adapter)
88c9a11c23SSasha Neftin {
89c9a11c23SSasha Neftin }
90c9a11c23SSasha Neftin 
91c9a11c23SSasha Neftin /**
92c9a11c23SSasha Neftin  * igc_power_down_link - Power down the phy/serdes link
93c9a11c23SSasha Neftin  * @adapter: address of board private structure
94c9a11c23SSasha Neftin  */
95c9a11c23SSasha Neftin static void igc_power_down_link(struct igc_adapter *adapter)
96c9a11c23SSasha Neftin {
97c9a11c23SSasha Neftin }
98c9a11c23SSasha Neftin 
99c9a11c23SSasha Neftin /**
100c9a11c23SSasha Neftin  * igc_release_hw_control - release control of the h/w to f/w
101c9a11c23SSasha Neftin  * @adapter: address of board private structure
102c9a11c23SSasha Neftin  *
103c9a11c23SSasha Neftin  * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
104c9a11c23SSasha Neftin  * For ASF and Pass Through versions of f/w this means that the
105c9a11c23SSasha Neftin  * driver is no longer loaded.
106c9a11c23SSasha Neftin  */
107c9a11c23SSasha Neftin static void igc_release_hw_control(struct igc_adapter *adapter)
108c9a11c23SSasha Neftin {
109c9a11c23SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
110c9a11c23SSasha Neftin 	u32 ctrl_ext;
111c9a11c23SSasha Neftin 
112c9a11c23SSasha Neftin 	/* Let firmware take over control of h/w */
113c9a11c23SSasha Neftin 	ctrl_ext = rd32(IGC_CTRL_EXT);
114c9a11c23SSasha Neftin 	wr32(IGC_CTRL_EXT,
115c9a11c23SSasha Neftin 	     ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
116c9a11c23SSasha Neftin }
117c9a11c23SSasha Neftin 
118c9a11c23SSasha Neftin /**
119c9a11c23SSasha Neftin  * igc_get_hw_control - get control of the h/w from f/w
120c9a11c23SSasha Neftin  * @adapter: address of board private structure
121c9a11c23SSasha Neftin  *
122c9a11c23SSasha Neftin  * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
123c9a11c23SSasha Neftin  * For ASF and Pass Through versions of f/w this means that
124c9a11c23SSasha Neftin  * the driver is loaded.
125c9a11c23SSasha Neftin  */
126c9a11c23SSasha Neftin static void igc_get_hw_control(struct igc_adapter *adapter)
127c9a11c23SSasha Neftin {
128c9a11c23SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
129c9a11c23SSasha Neftin 	u32 ctrl_ext;
130c9a11c23SSasha Neftin 
131c9a11c23SSasha Neftin 	/* Let firmware know the driver has taken over */
132c9a11c23SSasha Neftin 	ctrl_ext = rd32(IGC_CTRL_EXT);
133c9a11c23SSasha Neftin 	wr32(IGC_CTRL_EXT,
134c9a11c23SSasha Neftin 	     ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
135c9a11c23SSasha Neftin }
136c9a11c23SSasha Neftin 
137c9a11c23SSasha Neftin /**
13813b5b7fdSSasha Neftin  * igc_free_tx_resources - Free Tx Resources per Queue
13913b5b7fdSSasha Neftin  * @tx_ring: Tx descriptor ring for a specific queue
14013b5b7fdSSasha Neftin  *
14113b5b7fdSSasha Neftin  * Free all transmit software resources
14213b5b7fdSSasha Neftin  */
14313b5b7fdSSasha Neftin static void igc_free_tx_resources(struct igc_ring *tx_ring)
14413b5b7fdSSasha Neftin {
14513b5b7fdSSasha Neftin 	igc_clean_tx_ring(tx_ring);
14613b5b7fdSSasha Neftin 
14713b5b7fdSSasha Neftin 	vfree(tx_ring->tx_buffer_info);
14813b5b7fdSSasha Neftin 	tx_ring->tx_buffer_info = NULL;
14913b5b7fdSSasha Neftin 
15013b5b7fdSSasha Neftin 	/* if not set, then don't free */
15113b5b7fdSSasha Neftin 	if (!tx_ring->desc)
15213b5b7fdSSasha Neftin 		return;
15313b5b7fdSSasha Neftin 
15413b5b7fdSSasha Neftin 	dma_free_coherent(tx_ring->dev, tx_ring->size,
15513b5b7fdSSasha Neftin 			  tx_ring->desc, tx_ring->dma);
15613b5b7fdSSasha Neftin 
15713b5b7fdSSasha Neftin 	tx_ring->desc = NULL;
15813b5b7fdSSasha Neftin }
15913b5b7fdSSasha Neftin 
16013b5b7fdSSasha Neftin /**
16113b5b7fdSSasha Neftin  * igc_free_all_tx_resources - Free Tx Resources for All Queues
16213b5b7fdSSasha Neftin  * @adapter: board private structure
16313b5b7fdSSasha Neftin  *
16413b5b7fdSSasha Neftin  * Free all transmit software resources
16513b5b7fdSSasha Neftin  */
16613b5b7fdSSasha Neftin static void igc_free_all_tx_resources(struct igc_adapter *adapter)
16713b5b7fdSSasha Neftin {
16813b5b7fdSSasha Neftin 	int i;
16913b5b7fdSSasha Neftin 
17013b5b7fdSSasha Neftin 	for (i = 0; i < adapter->num_tx_queues; i++)
17113b5b7fdSSasha Neftin 		igc_free_tx_resources(adapter->tx_ring[i]);
17213b5b7fdSSasha Neftin }
17313b5b7fdSSasha Neftin 
17413b5b7fdSSasha Neftin /**
17513b5b7fdSSasha Neftin  * igc_clean_tx_ring - Free Tx Buffers
17613b5b7fdSSasha Neftin  * @tx_ring: ring to be cleaned
17713b5b7fdSSasha Neftin  */
17813b5b7fdSSasha Neftin static void igc_clean_tx_ring(struct igc_ring *tx_ring)
17913b5b7fdSSasha Neftin {
18013b5b7fdSSasha Neftin 	u16 i = tx_ring->next_to_clean;
18113b5b7fdSSasha Neftin 	struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
18213b5b7fdSSasha Neftin 
18313b5b7fdSSasha Neftin 	while (i != tx_ring->next_to_use) {
18413b5b7fdSSasha Neftin 		union igc_adv_tx_desc *eop_desc, *tx_desc;
18513b5b7fdSSasha Neftin 
18613b5b7fdSSasha Neftin 		/* Free all the Tx ring sk_buffs */
18713b5b7fdSSasha Neftin 		dev_kfree_skb_any(tx_buffer->skb);
18813b5b7fdSSasha Neftin 
18913b5b7fdSSasha Neftin 		/* unmap skb header data */
19013b5b7fdSSasha Neftin 		dma_unmap_single(tx_ring->dev,
19113b5b7fdSSasha Neftin 				 dma_unmap_addr(tx_buffer, dma),
19213b5b7fdSSasha Neftin 				 dma_unmap_len(tx_buffer, len),
19313b5b7fdSSasha Neftin 				 DMA_TO_DEVICE);
19413b5b7fdSSasha Neftin 
19513b5b7fdSSasha Neftin 		/* check for eop_desc to determine the end of the packet */
19613b5b7fdSSasha Neftin 		eop_desc = tx_buffer->next_to_watch;
19713b5b7fdSSasha Neftin 		tx_desc = IGC_TX_DESC(tx_ring, i);
19813b5b7fdSSasha Neftin 
19913b5b7fdSSasha Neftin 		/* unmap remaining buffers */
20013b5b7fdSSasha Neftin 		while (tx_desc != eop_desc) {
20113b5b7fdSSasha Neftin 			tx_buffer++;
20213b5b7fdSSasha Neftin 			tx_desc++;
20313b5b7fdSSasha Neftin 			i++;
20413b5b7fdSSasha Neftin 			if (unlikely(i == tx_ring->count)) {
20513b5b7fdSSasha Neftin 				i = 0;
20613b5b7fdSSasha Neftin 				tx_buffer = tx_ring->tx_buffer_info;
20713b5b7fdSSasha Neftin 				tx_desc = IGC_TX_DESC(tx_ring, 0);
20813b5b7fdSSasha Neftin 			}
20913b5b7fdSSasha Neftin 
21013b5b7fdSSasha Neftin 			/* unmap any remaining paged data */
21113b5b7fdSSasha Neftin 			if (dma_unmap_len(tx_buffer, len))
21213b5b7fdSSasha Neftin 				dma_unmap_page(tx_ring->dev,
21313b5b7fdSSasha Neftin 					       dma_unmap_addr(tx_buffer, dma),
21413b5b7fdSSasha Neftin 					       dma_unmap_len(tx_buffer, len),
21513b5b7fdSSasha Neftin 					       DMA_TO_DEVICE);
21613b5b7fdSSasha Neftin 		}
21713b5b7fdSSasha Neftin 
21813b5b7fdSSasha Neftin 		/* move us one more past the eop_desc for start of next pkt */
21913b5b7fdSSasha Neftin 		tx_buffer++;
22013b5b7fdSSasha Neftin 		i++;
22113b5b7fdSSasha Neftin 		if (unlikely(i == tx_ring->count)) {
22213b5b7fdSSasha Neftin 			i = 0;
22313b5b7fdSSasha Neftin 			tx_buffer = tx_ring->tx_buffer_info;
22413b5b7fdSSasha Neftin 		}
22513b5b7fdSSasha Neftin 	}
22613b5b7fdSSasha Neftin 
22713b5b7fdSSasha Neftin 	/* reset BQL for queue */
22813b5b7fdSSasha Neftin 	netdev_tx_reset_queue(txring_txq(tx_ring));
22913b5b7fdSSasha Neftin 
23013b5b7fdSSasha Neftin 	/* reset next_to_use and next_to_clean */
23113b5b7fdSSasha Neftin 	tx_ring->next_to_use = 0;
23213b5b7fdSSasha Neftin 	tx_ring->next_to_clean = 0;
23313b5b7fdSSasha Neftin }
23413b5b7fdSSasha Neftin 
23513b5b7fdSSasha Neftin /**
2360507ef8aSSasha Neftin  * igc_clean_all_tx_rings - Free Tx Buffers for all queues
2370507ef8aSSasha Neftin  * @adapter: board private structure
2380507ef8aSSasha Neftin  */
2390507ef8aSSasha Neftin static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
2400507ef8aSSasha Neftin {
2410507ef8aSSasha Neftin 	int i;
2420507ef8aSSasha Neftin 
2430507ef8aSSasha Neftin 	for (i = 0; i < adapter->num_tx_queues; i++)
2440507ef8aSSasha Neftin 		if (adapter->tx_ring[i])
2450507ef8aSSasha Neftin 			igc_clean_tx_ring(adapter->tx_ring[i]);
2460507ef8aSSasha Neftin }
2470507ef8aSSasha Neftin 
2480507ef8aSSasha Neftin /**
24913b5b7fdSSasha Neftin  * igc_setup_tx_resources - allocate Tx resources (Descriptors)
25013b5b7fdSSasha Neftin  * @tx_ring: tx descriptor ring (for a specific queue) to setup
25113b5b7fdSSasha Neftin  *
25213b5b7fdSSasha Neftin  * Return 0 on success, negative on failure
25313b5b7fdSSasha Neftin  */
25413b5b7fdSSasha Neftin static int igc_setup_tx_resources(struct igc_ring *tx_ring)
25513b5b7fdSSasha Neftin {
25613b5b7fdSSasha Neftin 	struct device *dev = tx_ring->dev;
25713b5b7fdSSasha Neftin 	int size = 0;
25813b5b7fdSSasha Neftin 
25913b5b7fdSSasha Neftin 	size = sizeof(struct igc_tx_buffer) * tx_ring->count;
26013b5b7fdSSasha Neftin 	tx_ring->tx_buffer_info = vzalloc(size);
26113b5b7fdSSasha Neftin 	if (!tx_ring->tx_buffer_info)
26213b5b7fdSSasha Neftin 		goto err;
26313b5b7fdSSasha Neftin 
26413b5b7fdSSasha Neftin 	/* round up to nearest 4K */
26513b5b7fdSSasha Neftin 	tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
26613b5b7fdSSasha Neftin 	tx_ring->size = ALIGN(tx_ring->size, 4096);
26713b5b7fdSSasha Neftin 
26813b5b7fdSSasha Neftin 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
26913b5b7fdSSasha Neftin 					   &tx_ring->dma, GFP_KERNEL);
27013b5b7fdSSasha Neftin 
27113b5b7fdSSasha Neftin 	if (!tx_ring->desc)
27213b5b7fdSSasha Neftin 		goto err;
27313b5b7fdSSasha Neftin 
27413b5b7fdSSasha Neftin 	tx_ring->next_to_use = 0;
27513b5b7fdSSasha Neftin 	tx_ring->next_to_clean = 0;
27613b5b7fdSSasha Neftin 
27713b5b7fdSSasha Neftin 	return 0;
27813b5b7fdSSasha Neftin 
27913b5b7fdSSasha Neftin err:
28013b5b7fdSSasha Neftin 	vfree(tx_ring->tx_buffer_info);
28113b5b7fdSSasha Neftin 	dev_err(dev,
28213b5b7fdSSasha Neftin 		"Unable to allocate memory for the transmit descriptor ring\n");
28313b5b7fdSSasha Neftin 	return -ENOMEM;
28413b5b7fdSSasha Neftin }
28513b5b7fdSSasha Neftin 
28613b5b7fdSSasha Neftin /**
28713b5b7fdSSasha Neftin  * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
28813b5b7fdSSasha Neftin  * @adapter: board private structure
28913b5b7fdSSasha Neftin  *
29013b5b7fdSSasha Neftin  * Return 0 on success, negative on failure
29113b5b7fdSSasha Neftin  */
29213b5b7fdSSasha Neftin static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
29313b5b7fdSSasha Neftin {
29413b5b7fdSSasha Neftin 	struct pci_dev *pdev = adapter->pdev;
29513b5b7fdSSasha Neftin 	int i, err = 0;
29613b5b7fdSSasha Neftin 
29713b5b7fdSSasha Neftin 	for (i = 0; i < adapter->num_tx_queues; i++) {
29813b5b7fdSSasha Neftin 		err = igc_setup_tx_resources(adapter->tx_ring[i]);
29913b5b7fdSSasha Neftin 		if (err) {
30013b5b7fdSSasha Neftin 			dev_err(&pdev->dev,
30113b5b7fdSSasha Neftin 				"Allocation for Tx Queue %u failed\n", i);
30213b5b7fdSSasha Neftin 			for (i--; i >= 0; i--)
30313b5b7fdSSasha Neftin 				igc_free_tx_resources(adapter->tx_ring[i]);
30413b5b7fdSSasha Neftin 			break;
30513b5b7fdSSasha Neftin 		}
30613b5b7fdSSasha Neftin 	}
30713b5b7fdSSasha Neftin 
30813b5b7fdSSasha Neftin 	return err;
30913b5b7fdSSasha Neftin }
31013b5b7fdSSasha Neftin 
31113b5b7fdSSasha Neftin /**
31213b5b7fdSSasha Neftin  * igc_clean_rx_ring - Free Rx Buffers per Queue
31313b5b7fdSSasha Neftin  * @rx_ring: ring to free buffers from
31413b5b7fdSSasha Neftin  */
31513b5b7fdSSasha Neftin static void igc_clean_rx_ring(struct igc_ring *rx_ring)
31613b5b7fdSSasha Neftin {
31713b5b7fdSSasha Neftin 	u16 i = rx_ring->next_to_clean;
31813b5b7fdSSasha Neftin 
31913b5b7fdSSasha Neftin 	if (rx_ring->skb)
32013b5b7fdSSasha Neftin 		dev_kfree_skb(rx_ring->skb);
32113b5b7fdSSasha Neftin 	rx_ring->skb = NULL;
32213b5b7fdSSasha Neftin 
32313b5b7fdSSasha Neftin 	/* Free all the Rx ring sk_buffs */
32413b5b7fdSSasha Neftin 	while (i != rx_ring->next_to_alloc) {
32513b5b7fdSSasha Neftin 		struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
32613b5b7fdSSasha Neftin 
32713b5b7fdSSasha Neftin 		/* Invalidate cache lines that may have been written to by
32813b5b7fdSSasha Neftin 		 * device so that we avoid corrupting memory.
32913b5b7fdSSasha Neftin 		 */
33013b5b7fdSSasha Neftin 		dma_sync_single_range_for_cpu(rx_ring->dev,
33113b5b7fdSSasha Neftin 					      buffer_info->dma,
33213b5b7fdSSasha Neftin 					      buffer_info->page_offset,
33313b5b7fdSSasha Neftin 					      igc_rx_bufsz(rx_ring),
33413b5b7fdSSasha Neftin 					      DMA_FROM_DEVICE);
33513b5b7fdSSasha Neftin 
33613b5b7fdSSasha Neftin 		/* free resources associated with mapping */
33713b5b7fdSSasha Neftin 		dma_unmap_page_attrs(rx_ring->dev,
33813b5b7fdSSasha Neftin 				     buffer_info->dma,
33913b5b7fdSSasha Neftin 				     igc_rx_pg_size(rx_ring),
34013b5b7fdSSasha Neftin 				     DMA_FROM_DEVICE,
34113b5b7fdSSasha Neftin 				     IGC_RX_DMA_ATTR);
34213b5b7fdSSasha Neftin 		__page_frag_cache_drain(buffer_info->page,
34313b5b7fdSSasha Neftin 					buffer_info->pagecnt_bias);
34413b5b7fdSSasha Neftin 
34513b5b7fdSSasha Neftin 		i++;
34613b5b7fdSSasha Neftin 		if (i == rx_ring->count)
34713b5b7fdSSasha Neftin 			i = 0;
34813b5b7fdSSasha Neftin 	}
34913b5b7fdSSasha Neftin 
35013b5b7fdSSasha Neftin 	rx_ring->next_to_alloc = 0;
35113b5b7fdSSasha Neftin 	rx_ring->next_to_clean = 0;
35213b5b7fdSSasha Neftin 	rx_ring->next_to_use = 0;
35313b5b7fdSSasha Neftin }
35413b5b7fdSSasha Neftin 
35513b5b7fdSSasha Neftin /**
3560507ef8aSSasha Neftin  * igc_clean_all_rx_rings - Free Rx Buffers for all queues
3570507ef8aSSasha Neftin  * @adapter: board private structure
3580507ef8aSSasha Neftin  */
3590507ef8aSSasha Neftin static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
3600507ef8aSSasha Neftin {
3610507ef8aSSasha Neftin 	int i;
3620507ef8aSSasha Neftin 
3630507ef8aSSasha Neftin 	for (i = 0; i < adapter->num_rx_queues; i++)
3640507ef8aSSasha Neftin 		if (adapter->rx_ring[i])
3650507ef8aSSasha Neftin 			igc_clean_rx_ring(adapter->rx_ring[i]);
3660507ef8aSSasha Neftin }
3670507ef8aSSasha Neftin 
3680507ef8aSSasha Neftin /**
36913b5b7fdSSasha Neftin  * igc_free_rx_resources - Free Rx Resources
37013b5b7fdSSasha Neftin  * @rx_ring: ring to clean the resources from
37113b5b7fdSSasha Neftin  *
37213b5b7fdSSasha Neftin  * Free all receive software resources
37313b5b7fdSSasha Neftin  */
37413b5b7fdSSasha Neftin static void igc_free_rx_resources(struct igc_ring *rx_ring)
37513b5b7fdSSasha Neftin {
37613b5b7fdSSasha Neftin 	igc_clean_rx_ring(rx_ring);
37713b5b7fdSSasha Neftin 
37813b5b7fdSSasha Neftin 	vfree(rx_ring->rx_buffer_info);
37913b5b7fdSSasha Neftin 	rx_ring->rx_buffer_info = NULL;
38013b5b7fdSSasha Neftin 
38113b5b7fdSSasha Neftin 	/* if not set, then don't free */
38213b5b7fdSSasha Neftin 	if (!rx_ring->desc)
38313b5b7fdSSasha Neftin 		return;
38413b5b7fdSSasha Neftin 
38513b5b7fdSSasha Neftin 	dma_free_coherent(rx_ring->dev, rx_ring->size,
38613b5b7fdSSasha Neftin 			  rx_ring->desc, rx_ring->dma);
38713b5b7fdSSasha Neftin 
38813b5b7fdSSasha Neftin 	rx_ring->desc = NULL;
38913b5b7fdSSasha Neftin }
39013b5b7fdSSasha Neftin 
39113b5b7fdSSasha Neftin /**
39213b5b7fdSSasha Neftin  * igc_free_all_rx_resources - Free Rx Resources for All Queues
39313b5b7fdSSasha Neftin  * @adapter: board private structure
39413b5b7fdSSasha Neftin  *
39513b5b7fdSSasha Neftin  * Free all receive software resources
39613b5b7fdSSasha Neftin  */
39713b5b7fdSSasha Neftin static void igc_free_all_rx_resources(struct igc_adapter *adapter)
39813b5b7fdSSasha Neftin {
39913b5b7fdSSasha Neftin 	int i;
40013b5b7fdSSasha Neftin 
40113b5b7fdSSasha Neftin 	for (i = 0; i < adapter->num_rx_queues; i++)
40213b5b7fdSSasha Neftin 		igc_free_rx_resources(adapter->rx_ring[i]);
40313b5b7fdSSasha Neftin }
40413b5b7fdSSasha Neftin 
40513b5b7fdSSasha Neftin /**
40613b5b7fdSSasha Neftin  * igc_setup_rx_resources - allocate Rx resources (Descriptors)
40713b5b7fdSSasha Neftin  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
40813b5b7fdSSasha Neftin  *
40913b5b7fdSSasha Neftin  * Returns 0 on success, negative on failure
41013b5b7fdSSasha Neftin  */
41113b5b7fdSSasha Neftin static int igc_setup_rx_resources(struct igc_ring *rx_ring)
41213b5b7fdSSasha Neftin {
41313b5b7fdSSasha Neftin 	struct device *dev = rx_ring->dev;
41413b5b7fdSSasha Neftin 	int size, desc_len;
41513b5b7fdSSasha Neftin 
41613b5b7fdSSasha Neftin 	size = sizeof(struct igc_rx_buffer) * rx_ring->count;
41713b5b7fdSSasha Neftin 	rx_ring->rx_buffer_info = vzalloc(size);
41813b5b7fdSSasha Neftin 	if (!rx_ring->rx_buffer_info)
41913b5b7fdSSasha Neftin 		goto err;
42013b5b7fdSSasha Neftin 
42113b5b7fdSSasha Neftin 	desc_len = sizeof(union igc_adv_rx_desc);
42213b5b7fdSSasha Neftin 
42313b5b7fdSSasha Neftin 	/* Round up to nearest 4K */
42413b5b7fdSSasha Neftin 	rx_ring->size = rx_ring->count * desc_len;
42513b5b7fdSSasha Neftin 	rx_ring->size = ALIGN(rx_ring->size, 4096);
42613b5b7fdSSasha Neftin 
42713b5b7fdSSasha Neftin 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
42813b5b7fdSSasha Neftin 					   &rx_ring->dma, GFP_KERNEL);
42913b5b7fdSSasha Neftin 
43013b5b7fdSSasha Neftin 	if (!rx_ring->desc)
43113b5b7fdSSasha Neftin 		goto err;
43213b5b7fdSSasha Neftin 
43313b5b7fdSSasha Neftin 	rx_ring->next_to_alloc = 0;
43413b5b7fdSSasha Neftin 	rx_ring->next_to_clean = 0;
43513b5b7fdSSasha Neftin 	rx_ring->next_to_use = 0;
43613b5b7fdSSasha Neftin 
43713b5b7fdSSasha Neftin 	return 0;
43813b5b7fdSSasha Neftin 
43913b5b7fdSSasha Neftin err:
44013b5b7fdSSasha Neftin 	vfree(rx_ring->rx_buffer_info);
44113b5b7fdSSasha Neftin 	rx_ring->rx_buffer_info = NULL;
44213b5b7fdSSasha Neftin 	dev_err(dev,
44313b5b7fdSSasha Neftin 		"Unable to allocate memory for the receive descriptor ring\n");
44413b5b7fdSSasha Neftin 	return -ENOMEM;
44513b5b7fdSSasha Neftin }
44613b5b7fdSSasha Neftin 
44713b5b7fdSSasha Neftin /**
44813b5b7fdSSasha Neftin  * igc_setup_all_rx_resources - wrapper to allocate Rx resources
44913b5b7fdSSasha Neftin  *                                (Descriptors) for all queues
45013b5b7fdSSasha Neftin  * @adapter: board private structure
45113b5b7fdSSasha Neftin  *
45213b5b7fdSSasha Neftin  * Return 0 on success, negative on failure
45313b5b7fdSSasha Neftin  */
45413b5b7fdSSasha Neftin static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
45513b5b7fdSSasha Neftin {
45613b5b7fdSSasha Neftin 	struct pci_dev *pdev = adapter->pdev;
45713b5b7fdSSasha Neftin 	int i, err = 0;
45813b5b7fdSSasha Neftin 
45913b5b7fdSSasha Neftin 	for (i = 0; i < adapter->num_rx_queues; i++) {
46013b5b7fdSSasha Neftin 		err = igc_setup_rx_resources(adapter->rx_ring[i]);
46113b5b7fdSSasha Neftin 		if (err) {
46213b5b7fdSSasha Neftin 			dev_err(&pdev->dev,
46313b5b7fdSSasha Neftin 				"Allocation for Rx Queue %u failed\n", i);
46413b5b7fdSSasha Neftin 			for (i--; i >= 0; i--)
46513b5b7fdSSasha Neftin 				igc_free_rx_resources(adapter->rx_ring[i]);
46613b5b7fdSSasha Neftin 			break;
46713b5b7fdSSasha Neftin 		}
46813b5b7fdSSasha Neftin 	}
46913b5b7fdSSasha Neftin 
47013b5b7fdSSasha Neftin 	return err;
47113b5b7fdSSasha Neftin }
47213b5b7fdSSasha Neftin 
47313b5b7fdSSasha Neftin /**
47413b5b7fdSSasha Neftin  * igc_configure_rx_ring - Configure a receive ring after Reset
47513b5b7fdSSasha Neftin  * @adapter: board private structure
47613b5b7fdSSasha Neftin  * @ring: receive ring to be configured
47713b5b7fdSSasha Neftin  *
47813b5b7fdSSasha Neftin  * Configure the Rx unit of the MAC after a reset.
47913b5b7fdSSasha Neftin  */
48013b5b7fdSSasha Neftin static void igc_configure_rx_ring(struct igc_adapter *adapter,
48113b5b7fdSSasha Neftin 				  struct igc_ring *ring)
48213b5b7fdSSasha Neftin {
48313b5b7fdSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
48413b5b7fdSSasha Neftin 	union igc_adv_rx_desc *rx_desc;
48513b5b7fdSSasha Neftin 	int reg_idx = ring->reg_idx;
48613b5b7fdSSasha Neftin 	u32 srrctl = 0, rxdctl = 0;
48713b5b7fdSSasha Neftin 	u64 rdba = ring->dma;
48813b5b7fdSSasha Neftin 
48913b5b7fdSSasha Neftin 	/* disable the queue */
49013b5b7fdSSasha Neftin 	wr32(IGC_RXDCTL(reg_idx), 0);
49113b5b7fdSSasha Neftin 
49213b5b7fdSSasha Neftin 	/* Set DMA base address registers */
49313b5b7fdSSasha Neftin 	wr32(IGC_RDBAL(reg_idx),
49413b5b7fdSSasha Neftin 	     rdba & 0x00000000ffffffffULL);
49513b5b7fdSSasha Neftin 	wr32(IGC_RDBAH(reg_idx), rdba >> 32);
49613b5b7fdSSasha Neftin 	wr32(IGC_RDLEN(reg_idx),
49713b5b7fdSSasha Neftin 	     ring->count * sizeof(union igc_adv_rx_desc));
49813b5b7fdSSasha Neftin 
49913b5b7fdSSasha Neftin 	/* initialize head and tail */
50013b5b7fdSSasha Neftin 	ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
50113b5b7fdSSasha Neftin 	wr32(IGC_RDH(reg_idx), 0);
50213b5b7fdSSasha Neftin 	writel(0, ring->tail);
50313b5b7fdSSasha Neftin 
50413b5b7fdSSasha Neftin 	/* reset next-to- use/clean to place SW in sync with hardware */
50513b5b7fdSSasha Neftin 	ring->next_to_clean = 0;
50613b5b7fdSSasha Neftin 	ring->next_to_use = 0;
50713b5b7fdSSasha Neftin 
50813b5b7fdSSasha Neftin 	/* set descriptor configuration */
50913b5b7fdSSasha Neftin 	srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
51013b5b7fdSSasha Neftin 	if (ring_uses_large_buffer(ring))
51113b5b7fdSSasha Neftin 		srrctl |= IGC_RXBUFFER_3072 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
51213b5b7fdSSasha Neftin 	else
51313b5b7fdSSasha Neftin 		srrctl |= IGC_RXBUFFER_2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
51413b5b7fdSSasha Neftin 	srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
51513b5b7fdSSasha Neftin 
51613b5b7fdSSasha Neftin 	wr32(IGC_SRRCTL(reg_idx), srrctl);
51713b5b7fdSSasha Neftin 
51813b5b7fdSSasha Neftin 	rxdctl |= IGC_RX_PTHRESH;
51913b5b7fdSSasha Neftin 	rxdctl |= IGC_RX_HTHRESH << 8;
52013b5b7fdSSasha Neftin 	rxdctl |= IGC_RX_WTHRESH << 16;
52113b5b7fdSSasha Neftin 
52213b5b7fdSSasha Neftin 	/* initialize rx_buffer_info */
52313b5b7fdSSasha Neftin 	memset(ring->rx_buffer_info, 0,
52413b5b7fdSSasha Neftin 	       sizeof(struct igc_rx_buffer) * ring->count);
52513b5b7fdSSasha Neftin 
52613b5b7fdSSasha Neftin 	/* initialize Rx descriptor 0 */
52713b5b7fdSSasha Neftin 	rx_desc = IGC_RX_DESC(ring, 0);
52813b5b7fdSSasha Neftin 	rx_desc->wb.upper.length = 0;
52913b5b7fdSSasha Neftin 
53013b5b7fdSSasha Neftin 	/* enable receive descriptor fetching */
53113b5b7fdSSasha Neftin 	rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
53213b5b7fdSSasha Neftin 
53313b5b7fdSSasha Neftin 	wr32(IGC_RXDCTL(reg_idx), rxdctl);
53413b5b7fdSSasha Neftin }
53513b5b7fdSSasha Neftin 
53613b5b7fdSSasha Neftin /**
53713b5b7fdSSasha Neftin  * igc_configure_rx - Configure receive Unit after Reset
53813b5b7fdSSasha Neftin  * @adapter: board private structure
53913b5b7fdSSasha Neftin  *
54013b5b7fdSSasha Neftin  * Configure the Rx unit of the MAC after a reset.
54113b5b7fdSSasha Neftin  */
54213b5b7fdSSasha Neftin static void igc_configure_rx(struct igc_adapter *adapter)
54313b5b7fdSSasha Neftin {
54413b5b7fdSSasha Neftin 	int i;
54513b5b7fdSSasha Neftin 
54613b5b7fdSSasha Neftin 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
54713b5b7fdSSasha Neftin 	 * the Base and Length of the Rx Descriptor Ring
54813b5b7fdSSasha Neftin 	 */
54913b5b7fdSSasha Neftin 	for (i = 0; i < adapter->num_rx_queues; i++)
55013b5b7fdSSasha Neftin 		igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
55113b5b7fdSSasha Neftin }
55213b5b7fdSSasha Neftin 
55313b5b7fdSSasha Neftin /**
55413b5b7fdSSasha Neftin  * igc_configure_tx_ring - Configure transmit ring after Reset
55513b5b7fdSSasha Neftin  * @adapter: board private structure
55613b5b7fdSSasha Neftin  * @ring: tx ring to configure
55713b5b7fdSSasha Neftin  *
55813b5b7fdSSasha Neftin  * Configure a transmit ring after a reset.
55913b5b7fdSSasha Neftin  */
56013b5b7fdSSasha Neftin static void igc_configure_tx_ring(struct igc_adapter *adapter,
56113b5b7fdSSasha Neftin 				  struct igc_ring *ring)
56213b5b7fdSSasha Neftin {
56313b5b7fdSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
56413b5b7fdSSasha Neftin 	int reg_idx = ring->reg_idx;
56513b5b7fdSSasha Neftin 	u64 tdba = ring->dma;
56613b5b7fdSSasha Neftin 	u32 txdctl = 0;
56713b5b7fdSSasha Neftin 
56813b5b7fdSSasha Neftin 	/* disable the queue */
56913b5b7fdSSasha Neftin 	wr32(IGC_TXDCTL(reg_idx), 0);
57013b5b7fdSSasha Neftin 	wrfl();
57113b5b7fdSSasha Neftin 	mdelay(10);
57213b5b7fdSSasha Neftin 
57313b5b7fdSSasha Neftin 	wr32(IGC_TDLEN(reg_idx),
57413b5b7fdSSasha Neftin 	     ring->count * sizeof(union igc_adv_tx_desc));
57513b5b7fdSSasha Neftin 	wr32(IGC_TDBAL(reg_idx),
57613b5b7fdSSasha Neftin 	     tdba & 0x00000000ffffffffULL);
57713b5b7fdSSasha Neftin 	wr32(IGC_TDBAH(reg_idx), tdba >> 32);
57813b5b7fdSSasha Neftin 
57913b5b7fdSSasha Neftin 	ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
58013b5b7fdSSasha Neftin 	wr32(IGC_TDH(reg_idx), 0);
58113b5b7fdSSasha Neftin 	writel(0, ring->tail);
58213b5b7fdSSasha Neftin 
58313b5b7fdSSasha Neftin 	txdctl |= IGC_TX_PTHRESH;
58413b5b7fdSSasha Neftin 	txdctl |= IGC_TX_HTHRESH << 8;
58513b5b7fdSSasha Neftin 	txdctl |= IGC_TX_WTHRESH << 16;
58613b5b7fdSSasha Neftin 
58713b5b7fdSSasha Neftin 	txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
58813b5b7fdSSasha Neftin 	wr32(IGC_TXDCTL(reg_idx), txdctl);
58913b5b7fdSSasha Neftin }
59013b5b7fdSSasha Neftin 
59113b5b7fdSSasha Neftin /**
59213b5b7fdSSasha Neftin  * igc_configure_tx - Configure transmit Unit after Reset
59313b5b7fdSSasha Neftin  * @adapter: board private structure
59413b5b7fdSSasha Neftin  *
59513b5b7fdSSasha Neftin  * Configure the Tx unit of the MAC after a reset.
59613b5b7fdSSasha Neftin  */
59713b5b7fdSSasha Neftin static void igc_configure_tx(struct igc_adapter *adapter)
59813b5b7fdSSasha Neftin {
59913b5b7fdSSasha Neftin 	int i;
60013b5b7fdSSasha Neftin 
60113b5b7fdSSasha Neftin 	for (i = 0; i < adapter->num_tx_queues; i++)
60213b5b7fdSSasha Neftin 		igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
60313b5b7fdSSasha Neftin }
60413b5b7fdSSasha Neftin 
60513b5b7fdSSasha Neftin /**
60613b5b7fdSSasha Neftin  * igc_setup_mrqc - configure the multiple receive queue control registers
60713b5b7fdSSasha Neftin  * @adapter: Board private structure
60813b5b7fdSSasha Neftin  */
60913b5b7fdSSasha Neftin static void igc_setup_mrqc(struct igc_adapter *adapter)
61013b5b7fdSSasha Neftin {
61113b5b7fdSSasha Neftin }
61213b5b7fdSSasha Neftin 
61313b5b7fdSSasha Neftin /**
61413b5b7fdSSasha Neftin  * igc_setup_rctl - configure the receive control registers
61513b5b7fdSSasha Neftin  * @adapter: Board private structure
61613b5b7fdSSasha Neftin  */
61713b5b7fdSSasha Neftin static void igc_setup_rctl(struct igc_adapter *adapter)
61813b5b7fdSSasha Neftin {
61913b5b7fdSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
62013b5b7fdSSasha Neftin 	u32 rctl;
62113b5b7fdSSasha Neftin 
62213b5b7fdSSasha Neftin 	rctl = rd32(IGC_RCTL);
62313b5b7fdSSasha Neftin 
62413b5b7fdSSasha Neftin 	rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
62513b5b7fdSSasha Neftin 	rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
62613b5b7fdSSasha Neftin 
62713b5b7fdSSasha Neftin 	rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
62813b5b7fdSSasha Neftin 		(hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
62913b5b7fdSSasha Neftin 
63013b5b7fdSSasha Neftin 	/* enable stripping of CRC. Newer features require
63113b5b7fdSSasha Neftin 	 * that the HW strips the CRC.
63213b5b7fdSSasha Neftin 	 */
63313b5b7fdSSasha Neftin 	rctl |= IGC_RCTL_SECRC;
63413b5b7fdSSasha Neftin 
63513b5b7fdSSasha Neftin 	/* disable store bad packets and clear size bits. */
63613b5b7fdSSasha Neftin 	rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
63713b5b7fdSSasha Neftin 
63813b5b7fdSSasha Neftin 	/* enable LPE to allow for reception of jumbo frames */
63913b5b7fdSSasha Neftin 	rctl |= IGC_RCTL_LPE;
64013b5b7fdSSasha Neftin 
64113b5b7fdSSasha Neftin 	/* disable queue 0 to prevent tail write w/o re-config */
64213b5b7fdSSasha Neftin 	wr32(IGC_RXDCTL(0), 0);
64313b5b7fdSSasha Neftin 
64413b5b7fdSSasha Neftin 	/* This is useful for sniffing bad packets. */
64513b5b7fdSSasha Neftin 	if (adapter->netdev->features & NETIF_F_RXALL) {
64613b5b7fdSSasha Neftin 		/* UPE and MPE will be handled by normal PROMISC logic
64713b5b7fdSSasha Neftin 		 * in set_rx_mode
64813b5b7fdSSasha Neftin 		 */
64913b5b7fdSSasha Neftin 		rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
65013b5b7fdSSasha Neftin 			 IGC_RCTL_BAM | /* RX All Bcast Pkts */
65113b5b7fdSSasha Neftin 			 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
65213b5b7fdSSasha Neftin 
65313b5b7fdSSasha Neftin 		rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
65413b5b7fdSSasha Neftin 			  IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
65513b5b7fdSSasha Neftin 	}
65613b5b7fdSSasha Neftin 
65713b5b7fdSSasha Neftin 	wr32(IGC_RCTL, rctl);
65813b5b7fdSSasha Neftin }
65913b5b7fdSSasha Neftin 
66013b5b7fdSSasha Neftin /**
66113b5b7fdSSasha Neftin  * igc_setup_tctl - configure the transmit control registers
66213b5b7fdSSasha Neftin  * @adapter: Board private structure
66313b5b7fdSSasha Neftin  */
66413b5b7fdSSasha Neftin static void igc_setup_tctl(struct igc_adapter *adapter)
66513b5b7fdSSasha Neftin {
66613b5b7fdSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
66713b5b7fdSSasha Neftin 	u32 tctl;
66813b5b7fdSSasha Neftin 
66913b5b7fdSSasha Neftin 	/* disable queue 0 which icould be enabled by default */
67013b5b7fdSSasha Neftin 	wr32(IGC_TXDCTL(0), 0);
67113b5b7fdSSasha Neftin 
67213b5b7fdSSasha Neftin 	/* Program the Transmit Control Register */
67313b5b7fdSSasha Neftin 	tctl = rd32(IGC_TCTL);
67413b5b7fdSSasha Neftin 	tctl &= ~IGC_TCTL_CT;
67513b5b7fdSSasha Neftin 	tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
67613b5b7fdSSasha Neftin 		(IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
67713b5b7fdSSasha Neftin 
67813b5b7fdSSasha Neftin 	/* Enable transmits */
67913b5b7fdSSasha Neftin 	tctl |= IGC_TCTL_EN;
68013b5b7fdSSasha Neftin 
68113b5b7fdSSasha Neftin 	wr32(IGC_TCTL, tctl);
68213b5b7fdSSasha Neftin }
68313b5b7fdSSasha Neftin 
68413b5b7fdSSasha Neftin /**
685c9a11c23SSasha Neftin  * igc_set_mac - Change the Ethernet Address of the NIC
686c9a11c23SSasha Neftin  * @netdev: network interface device structure
687c9a11c23SSasha Neftin  * @p: pointer to an address structure
688c9a11c23SSasha Neftin  *
689c9a11c23SSasha Neftin  * Returns 0 on success, negative on failure
690c9a11c23SSasha Neftin  */
691c9a11c23SSasha Neftin static int igc_set_mac(struct net_device *netdev, void *p)
692c9a11c23SSasha Neftin {
693c9a11c23SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
694c9a11c23SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
695c9a11c23SSasha Neftin 	struct sockaddr *addr = p;
696c9a11c23SSasha Neftin 
697c9a11c23SSasha Neftin 	if (!is_valid_ether_addr(addr->sa_data))
698c9a11c23SSasha Neftin 		return -EADDRNOTAVAIL;
699c9a11c23SSasha Neftin 
700c9a11c23SSasha Neftin 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
701c9a11c23SSasha Neftin 	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
702c9a11c23SSasha Neftin 
703c9a11c23SSasha Neftin 	/* set the correct pool for the new PF MAC address in entry 0 */
704c9a11c23SSasha Neftin 	igc_set_default_mac_filter(adapter);
705c9a11c23SSasha Neftin 
706c9a11c23SSasha Neftin 	return 0;
707c9a11c23SSasha Neftin }
708c9a11c23SSasha Neftin 
7090507ef8aSSasha Neftin static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
710c9a11c23SSasha Neftin {
7110507ef8aSSasha Neftin }
7120507ef8aSSasha Neftin 
7130507ef8aSSasha Neftin static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
7140507ef8aSSasha Neftin {
7150507ef8aSSasha Neftin 	struct net_device *netdev = tx_ring->netdev;
7160507ef8aSSasha Neftin 
7170507ef8aSSasha Neftin 	netif_stop_subqueue(netdev, tx_ring->queue_index);
7180507ef8aSSasha Neftin 
7190507ef8aSSasha Neftin 	/* memory barriier comment */
7200507ef8aSSasha Neftin 	smp_mb();
7210507ef8aSSasha Neftin 
7220507ef8aSSasha Neftin 	/* We need to check again in a case another CPU has just
7230507ef8aSSasha Neftin 	 * made room available.
7240507ef8aSSasha Neftin 	 */
7250507ef8aSSasha Neftin 	if (igc_desc_unused(tx_ring) < size)
7260507ef8aSSasha Neftin 		return -EBUSY;
7270507ef8aSSasha Neftin 
7280507ef8aSSasha Neftin 	/* A reprieve! */
7290507ef8aSSasha Neftin 	netif_wake_subqueue(netdev, tx_ring->queue_index);
7300507ef8aSSasha Neftin 
7310507ef8aSSasha Neftin 	u64_stats_update_begin(&tx_ring->tx_syncp2);
7320507ef8aSSasha Neftin 	tx_ring->tx_stats.restart_queue2++;
7330507ef8aSSasha Neftin 	u64_stats_update_end(&tx_ring->tx_syncp2);
7340507ef8aSSasha Neftin 
7350507ef8aSSasha Neftin 	return 0;
7360507ef8aSSasha Neftin }
7370507ef8aSSasha Neftin 
7380507ef8aSSasha Neftin static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
7390507ef8aSSasha Neftin {
7400507ef8aSSasha Neftin 	if (igc_desc_unused(tx_ring) >= size)
7410507ef8aSSasha Neftin 		return 0;
7420507ef8aSSasha Neftin 	return __igc_maybe_stop_tx(tx_ring, size);
7430507ef8aSSasha Neftin }
7440507ef8aSSasha Neftin 
7450507ef8aSSasha Neftin static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
7460507ef8aSSasha Neftin {
7470507ef8aSSasha Neftin 	/* set type for advanced descriptor with frame checksum insertion */
7480507ef8aSSasha Neftin 	u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
7490507ef8aSSasha Neftin 		       IGC_ADVTXD_DCMD_DEXT |
7500507ef8aSSasha Neftin 		       IGC_ADVTXD_DCMD_IFCS;
7510507ef8aSSasha Neftin 
7520507ef8aSSasha Neftin 	return cmd_type;
7530507ef8aSSasha Neftin }
7540507ef8aSSasha Neftin 
7550507ef8aSSasha Neftin static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
7560507ef8aSSasha Neftin 				 union igc_adv_tx_desc *tx_desc,
7570507ef8aSSasha Neftin 				 u32 tx_flags, unsigned int paylen)
7580507ef8aSSasha Neftin {
7590507ef8aSSasha Neftin 	u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
7600507ef8aSSasha Neftin 
7610507ef8aSSasha Neftin 	/* insert L4 checksum */
7620507ef8aSSasha Neftin 	olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
7630507ef8aSSasha Neftin 			  ((IGC_TXD_POPTS_TXSM << 8) /
7640507ef8aSSasha Neftin 			  IGC_TX_FLAGS_CSUM);
7650507ef8aSSasha Neftin 
7660507ef8aSSasha Neftin 	/* insert IPv4 checksum */
7670507ef8aSSasha Neftin 	olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
7680507ef8aSSasha Neftin 			  (((IGC_TXD_POPTS_IXSM << 8)) /
7690507ef8aSSasha Neftin 			  IGC_TX_FLAGS_IPV4);
7700507ef8aSSasha Neftin 
7710507ef8aSSasha Neftin 	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
7720507ef8aSSasha Neftin }
7730507ef8aSSasha Neftin 
7740507ef8aSSasha Neftin static int igc_tx_map(struct igc_ring *tx_ring,
7750507ef8aSSasha Neftin 		      struct igc_tx_buffer *first,
7760507ef8aSSasha Neftin 		      const u8 hdr_len)
7770507ef8aSSasha Neftin {
7780507ef8aSSasha Neftin 	struct sk_buff *skb = first->skb;
7790507ef8aSSasha Neftin 	struct igc_tx_buffer *tx_buffer;
7800507ef8aSSasha Neftin 	union igc_adv_tx_desc *tx_desc;
7810507ef8aSSasha Neftin 	u32 tx_flags = first->tx_flags;
7820507ef8aSSasha Neftin 	struct skb_frag_struct *frag;
7830507ef8aSSasha Neftin 	u16 i = tx_ring->next_to_use;
7840507ef8aSSasha Neftin 	unsigned int data_len, size;
7850507ef8aSSasha Neftin 	dma_addr_t dma;
7860507ef8aSSasha Neftin 	u32 cmd_type = igc_tx_cmd_type(skb, tx_flags);
7870507ef8aSSasha Neftin 
7880507ef8aSSasha Neftin 	tx_desc = IGC_TX_DESC(tx_ring, i);
7890507ef8aSSasha Neftin 
7900507ef8aSSasha Neftin 	igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
7910507ef8aSSasha Neftin 
7920507ef8aSSasha Neftin 	size = skb_headlen(skb);
7930507ef8aSSasha Neftin 	data_len = skb->data_len;
7940507ef8aSSasha Neftin 
7950507ef8aSSasha Neftin 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
7960507ef8aSSasha Neftin 
7970507ef8aSSasha Neftin 	tx_buffer = first;
7980507ef8aSSasha Neftin 
7990507ef8aSSasha Neftin 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
8000507ef8aSSasha Neftin 		if (dma_mapping_error(tx_ring->dev, dma))
8010507ef8aSSasha Neftin 			goto dma_error;
8020507ef8aSSasha Neftin 
8030507ef8aSSasha Neftin 		/* record length, and DMA address */
8040507ef8aSSasha Neftin 		dma_unmap_len_set(tx_buffer, len, size);
8050507ef8aSSasha Neftin 		dma_unmap_addr_set(tx_buffer, dma, dma);
8060507ef8aSSasha Neftin 
8070507ef8aSSasha Neftin 		tx_desc->read.buffer_addr = cpu_to_le64(dma);
8080507ef8aSSasha Neftin 
8090507ef8aSSasha Neftin 		while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
8100507ef8aSSasha Neftin 			tx_desc->read.cmd_type_len =
8110507ef8aSSasha Neftin 				cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
8120507ef8aSSasha Neftin 
8130507ef8aSSasha Neftin 			i++;
8140507ef8aSSasha Neftin 			tx_desc++;
8150507ef8aSSasha Neftin 			if (i == tx_ring->count) {
8160507ef8aSSasha Neftin 				tx_desc = IGC_TX_DESC(tx_ring, 0);
8170507ef8aSSasha Neftin 				i = 0;
8180507ef8aSSasha Neftin 			}
8190507ef8aSSasha Neftin 			tx_desc->read.olinfo_status = 0;
8200507ef8aSSasha Neftin 
8210507ef8aSSasha Neftin 			dma += IGC_MAX_DATA_PER_TXD;
8220507ef8aSSasha Neftin 			size -= IGC_MAX_DATA_PER_TXD;
8230507ef8aSSasha Neftin 
8240507ef8aSSasha Neftin 			tx_desc->read.buffer_addr = cpu_to_le64(dma);
8250507ef8aSSasha Neftin 		}
8260507ef8aSSasha Neftin 
8270507ef8aSSasha Neftin 		if (likely(!data_len))
8280507ef8aSSasha Neftin 			break;
8290507ef8aSSasha Neftin 
8300507ef8aSSasha Neftin 		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
8310507ef8aSSasha Neftin 
8320507ef8aSSasha Neftin 		i++;
8330507ef8aSSasha Neftin 		tx_desc++;
8340507ef8aSSasha Neftin 		if (i == tx_ring->count) {
8350507ef8aSSasha Neftin 			tx_desc = IGC_TX_DESC(tx_ring, 0);
8360507ef8aSSasha Neftin 			i = 0;
8370507ef8aSSasha Neftin 		}
8380507ef8aSSasha Neftin 		tx_desc->read.olinfo_status = 0;
8390507ef8aSSasha Neftin 
8400507ef8aSSasha Neftin 		size = skb_frag_size(frag);
8410507ef8aSSasha Neftin 		data_len -= size;
8420507ef8aSSasha Neftin 
8430507ef8aSSasha Neftin 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
8440507ef8aSSasha Neftin 				       size, DMA_TO_DEVICE);
8450507ef8aSSasha Neftin 
8460507ef8aSSasha Neftin 		tx_buffer = &tx_ring->tx_buffer_info[i];
8470507ef8aSSasha Neftin 	}
8480507ef8aSSasha Neftin 
8490507ef8aSSasha Neftin 	/* write last descriptor with RS and EOP bits */
8500507ef8aSSasha Neftin 	cmd_type |= size | IGC_TXD_DCMD;
8510507ef8aSSasha Neftin 	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8520507ef8aSSasha Neftin 
8530507ef8aSSasha Neftin 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
8540507ef8aSSasha Neftin 
8550507ef8aSSasha Neftin 	/* set the timestamp */
8560507ef8aSSasha Neftin 	first->time_stamp = jiffies;
8570507ef8aSSasha Neftin 
8580507ef8aSSasha Neftin 	/* Force memory writes to complete before letting h/w know there
8590507ef8aSSasha Neftin 	 * are new descriptors to fetch.  (Only applicable for weak-ordered
8600507ef8aSSasha Neftin 	 * memory model archs, such as IA-64).
8610507ef8aSSasha Neftin 	 *
8620507ef8aSSasha Neftin 	 * We also need this memory barrier to make certain all of the
8630507ef8aSSasha Neftin 	 * status bits have been updated before next_to_watch is written.
8640507ef8aSSasha Neftin 	 */
8650507ef8aSSasha Neftin 	wmb();
8660507ef8aSSasha Neftin 
8670507ef8aSSasha Neftin 	/* set next_to_watch value indicating a packet is present */
8680507ef8aSSasha Neftin 	first->next_to_watch = tx_desc;
8690507ef8aSSasha Neftin 
8700507ef8aSSasha Neftin 	i++;
8710507ef8aSSasha Neftin 	if (i == tx_ring->count)
8720507ef8aSSasha Neftin 		i = 0;
8730507ef8aSSasha Neftin 
8740507ef8aSSasha Neftin 	tx_ring->next_to_use = i;
8750507ef8aSSasha Neftin 
8760507ef8aSSasha Neftin 	/* Make sure there is space in the ring for the next send. */
8770507ef8aSSasha Neftin 	igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
8780507ef8aSSasha Neftin 
8790507ef8aSSasha Neftin 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
8800507ef8aSSasha Neftin 		writel(i, tx_ring->tail);
8810507ef8aSSasha Neftin 
8820507ef8aSSasha Neftin 		/* we need this if more than one processor can write to our tail
8830507ef8aSSasha Neftin 		 * at a time, it synchronizes IO on IA64/Altix systems
8840507ef8aSSasha Neftin 		 */
8850507ef8aSSasha Neftin 		mmiowb();
8860507ef8aSSasha Neftin 	}
8870507ef8aSSasha Neftin 
8880507ef8aSSasha Neftin 	return 0;
8890507ef8aSSasha Neftin dma_error:
8900507ef8aSSasha Neftin 	dev_err(tx_ring->dev, "TX DMA map failed\n");
8910507ef8aSSasha Neftin 	tx_buffer = &tx_ring->tx_buffer_info[i];
8920507ef8aSSasha Neftin 
8930507ef8aSSasha Neftin 	/* clear dma mappings for failed tx_buffer_info map */
8940507ef8aSSasha Neftin 	while (tx_buffer != first) {
8950507ef8aSSasha Neftin 		if (dma_unmap_len(tx_buffer, len))
8960507ef8aSSasha Neftin 			dma_unmap_page(tx_ring->dev,
8970507ef8aSSasha Neftin 				       dma_unmap_addr(tx_buffer, dma),
8980507ef8aSSasha Neftin 				       dma_unmap_len(tx_buffer, len),
8990507ef8aSSasha Neftin 				       DMA_TO_DEVICE);
9000507ef8aSSasha Neftin 		dma_unmap_len_set(tx_buffer, len, 0);
9010507ef8aSSasha Neftin 
9020507ef8aSSasha Neftin 		if (i-- == 0)
9030507ef8aSSasha Neftin 			i += tx_ring->count;
9040507ef8aSSasha Neftin 		tx_buffer = &tx_ring->tx_buffer_info[i];
9050507ef8aSSasha Neftin 	}
9060507ef8aSSasha Neftin 
9070507ef8aSSasha Neftin 	if (dma_unmap_len(tx_buffer, len))
9080507ef8aSSasha Neftin 		dma_unmap_single(tx_ring->dev,
9090507ef8aSSasha Neftin 				 dma_unmap_addr(tx_buffer, dma),
9100507ef8aSSasha Neftin 				 dma_unmap_len(tx_buffer, len),
9110507ef8aSSasha Neftin 				 DMA_TO_DEVICE);
9120507ef8aSSasha Neftin 	dma_unmap_len_set(tx_buffer, len, 0);
9130507ef8aSSasha Neftin 
9140507ef8aSSasha Neftin 	dev_kfree_skb_any(tx_buffer->skb);
9150507ef8aSSasha Neftin 	tx_buffer->skb = NULL;
9160507ef8aSSasha Neftin 
9170507ef8aSSasha Neftin 	tx_ring->next_to_use = i;
9180507ef8aSSasha Neftin 
9190507ef8aSSasha Neftin 	return -1;
9200507ef8aSSasha Neftin }
9210507ef8aSSasha Neftin 
9220507ef8aSSasha Neftin static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
9230507ef8aSSasha Neftin 				       struct igc_ring *tx_ring)
9240507ef8aSSasha Neftin {
9250507ef8aSSasha Neftin 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
9260507ef8aSSasha Neftin 	__be16 protocol = vlan_get_protocol(skb);
9270507ef8aSSasha Neftin 	struct igc_tx_buffer *first;
9280507ef8aSSasha Neftin 	u32 tx_flags = 0;
9290507ef8aSSasha Neftin 	unsigned short f;
9300507ef8aSSasha Neftin 	u8 hdr_len = 0;
9310507ef8aSSasha Neftin 
9320507ef8aSSasha Neftin 	/* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
9330507ef8aSSasha Neftin 	 *	+ 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
9340507ef8aSSasha Neftin 	 *	+ 2 desc gap to keep tail from touching head,
9350507ef8aSSasha Neftin 	 *	+ 1 desc for context descriptor,
9360507ef8aSSasha Neftin 	 * otherwise try next time
9370507ef8aSSasha Neftin 	 */
9380507ef8aSSasha Neftin 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
9390507ef8aSSasha Neftin 		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
9400507ef8aSSasha Neftin 
9410507ef8aSSasha Neftin 	if (igc_maybe_stop_tx(tx_ring, count + 3)) {
9420507ef8aSSasha Neftin 		/* this is a hard error */
9430507ef8aSSasha Neftin 		return NETDEV_TX_BUSY;
9440507ef8aSSasha Neftin 	}
9450507ef8aSSasha Neftin 
9460507ef8aSSasha Neftin 	/* record the location of the first descriptor for this packet */
9470507ef8aSSasha Neftin 	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
9480507ef8aSSasha Neftin 	first->skb = skb;
9490507ef8aSSasha Neftin 	first->bytecount = skb->len;
9500507ef8aSSasha Neftin 	first->gso_segs = 1;
9510507ef8aSSasha Neftin 
9520507ef8aSSasha Neftin 	skb_tx_timestamp(skb);
9530507ef8aSSasha Neftin 
9540507ef8aSSasha Neftin 	/* record initial flags and protocol */
9550507ef8aSSasha Neftin 	first->tx_flags = tx_flags;
9560507ef8aSSasha Neftin 	first->protocol = protocol;
9570507ef8aSSasha Neftin 
9580507ef8aSSasha Neftin 	igc_tx_csum(tx_ring, first);
9590507ef8aSSasha Neftin 
9600507ef8aSSasha Neftin 	igc_tx_map(tx_ring, first, hdr_len);
9610507ef8aSSasha Neftin 
962c9a11c23SSasha Neftin 	return NETDEV_TX_OK;
963c9a11c23SSasha Neftin }
964c9a11c23SSasha Neftin 
9650507ef8aSSasha Neftin static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
9660507ef8aSSasha Neftin 						    struct sk_buff *skb)
96713b5b7fdSSasha Neftin {
9680507ef8aSSasha Neftin 	unsigned int r_idx = skb->queue_mapping;
9690507ef8aSSasha Neftin 
9700507ef8aSSasha Neftin 	if (r_idx >= adapter->num_tx_queues)
9710507ef8aSSasha Neftin 		r_idx = r_idx % adapter->num_tx_queues;
9720507ef8aSSasha Neftin 
9730507ef8aSSasha Neftin 	return adapter->tx_ring[r_idx];
97413b5b7fdSSasha Neftin }
97513b5b7fdSSasha Neftin 
9760507ef8aSSasha Neftin static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
9770507ef8aSSasha Neftin 				  struct net_device *netdev)
97813b5b7fdSSasha Neftin {
9790507ef8aSSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
98013b5b7fdSSasha Neftin 
9810507ef8aSSasha Neftin 	/* The minimum packet size with TCTL.PSP set is 17 so pad the skb
9820507ef8aSSasha Neftin 	 * in order to meet this minimum size requirement.
98313b5b7fdSSasha Neftin 	 */
9840507ef8aSSasha Neftin 	if (skb->len < 17) {
9850507ef8aSSasha Neftin 		if (skb_padto(skb, 17))
9860507ef8aSSasha Neftin 			return NETDEV_TX_OK;
9870507ef8aSSasha Neftin 		skb->len = 17;
9880507ef8aSSasha Neftin 	}
98913b5b7fdSSasha Neftin 
9900507ef8aSSasha Neftin 	return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
9910507ef8aSSasha Neftin }
9920507ef8aSSasha Neftin 
9930507ef8aSSasha Neftin static inline void igc_rx_hash(struct igc_ring *ring,
9940507ef8aSSasha Neftin 			       union igc_adv_rx_desc *rx_desc,
9950507ef8aSSasha Neftin 			       struct sk_buff *skb)
9960507ef8aSSasha Neftin {
9970507ef8aSSasha Neftin 	if (ring->netdev->features & NETIF_F_RXHASH)
9980507ef8aSSasha Neftin 		skb_set_hash(skb,
9990507ef8aSSasha Neftin 			     le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
10000507ef8aSSasha Neftin 			     PKT_HASH_TYPE_L3);
10010507ef8aSSasha Neftin }
10020507ef8aSSasha Neftin 
10030507ef8aSSasha Neftin /**
10040507ef8aSSasha Neftin  * igc_process_skb_fields - Populate skb header fields from Rx descriptor
10050507ef8aSSasha Neftin  * @rx_ring: rx descriptor ring packet is being transacted on
10060507ef8aSSasha Neftin  * @rx_desc: pointer to the EOP Rx descriptor
10070507ef8aSSasha Neftin  * @skb: pointer to current skb being populated
10080507ef8aSSasha Neftin  *
10090507ef8aSSasha Neftin  * This function checks the ring, descriptor, and packet information in
10100507ef8aSSasha Neftin  * order to populate the hash, checksum, VLAN, timestamp, protocol, and
10110507ef8aSSasha Neftin  * other fields within the skb.
10120507ef8aSSasha Neftin  */
10130507ef8aSSasha Neftin static void igc_process_skb_fields(struct igc_ring *rx_ring,
10140507ef8aSSasha Neftin 				   union igc_adv_rx_desc *rx_desc,
10150507ef8aSSasha Neftin 				   struct sk_buff *skb)
10160507ef8aSSasha Neftin {
10170507ef8aSSasha Neftin 	igc_rx_hash(rx_ring, rx_desc, skb);
10180507ef8aSSasha Neftin 
10190507ef8aSSasha Neftin 	skb_record_rx_queue(skb, rx_ring->queue_index);
10200507ef8aSSasha Neftin 
10210507ef8aSSasha Neftin 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
10220507ef8aSSasha Neftin }
10230507ef8aSSasha Neftin 
10240507ef8aSSasha Neftin static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
10250507ef8aSSasha Neftin 					       const unsigned int size)
10260507ef8aSSasha Neftin {
10270507ef8aSSasha Neftin 	struct igc_rx_buffer *rx_buffer;
10280507ef8aSSasha Neftin 
10290507ef8aSSasha Neftin 	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
10300507ef8aSSasha Neftin 	prefetchw(rx_buffer->page);
10310507ef8aSSasha Neftin 
10320507ef8aSSasha Neftin 	/* we are reusing so sync this buffer for CPU use */
10330507ef8aSSasha Neftin 	dma_sync_single_range_for_cpu(rx_ring->dev,
10340507ef8aSSasha Neftin 				      rx_buffer->dma,
10350507ef8aSSasha Neftin 				      rx_buffer->page_offset,
10360507ef8aSSasha Neftin 				      size,
10370507ef8aSSasha Neftin 				      DMA_FROM_DEVICE);
10380507ef8aSSasha Neftin 
10390507ef8aSSasha Neftin 	rx_buffer->pagecnt_bias--;
10400507ef8aSSasha Neftin 
10410507ef8aSSasha Neftin 	return rx_buffer;
10420507ef8aSSasha Neftin }
10430507ef8aSSasha Neftin 
10440507ef8aSSasha Neftin /**
10450507ef8aSSasha Neftin  * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
10460507ef8aSSasha Neftin  * @rx_ring: rx descriptor ring to transact packets on
10470507ef8aSSasha Neftin  * @rx_buffer: buffer containing page to add
10480507ef8aSSasha Neftin  * @skb: sk_buff to place the data into
10490507ef8aSSasha Neftin  * @size: size of buffer to be added
10500507ef8aSSasha Neftin  *
10510507ef8aSSasha Neftin  * This function will add the data contained in rx_buffer->page to the skb.
10520507ef8aSSasha Neftin  */
10530507ef8aSSasha Neftin static void igc_add_rx_frag(struct igc_ring *rx_ring,
10540507ef8aSSasha Neftin 			    struct igc_rx_buffer *rx_buffer,
10550507ef8aSSasha Neftin 			    struct sk_buff *skb,
10560507ef8aSSasha Neftin 			    unsigned int size)
10570507ef8aSSasha Neftin {
10580507ef8aSSasha Neftin #if (PAGE_SIZE < 8192)
10590507ef8aSSasha Neftin 	unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
10600507ef8aSSasha Neftin 
10610507ef8aSSasha Neftin 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
10620507ef8aSSasha Neftin 			rx_buffer->page_offset, size, truesize);
10630507ef8aSSasha Neftin 	rx_buffer->page_offset ^= truesize;
10640507ef8aSSasha Neftin #else
10650507ef8aSSasha Neftin 	unsigned int truesize = ring_uses_build_skb(rx_ring) ?
10660507ef8aSSasha Neftin 				SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
10670507ef8aSSasha Neftin 				SKB_DATA_ALIGN(size);
10680507ef8aSSasha Neftin 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
10690507ef8aSSasha Neftin 			rx_buffer->page_offset, size, truesize);
10700507ef8aSSasha Neftin 	rx_buffer->page_offset += truesize;
10710507ef8aSSasha Neftin #endif
10720507ef8aSSasha Neftin }
10730507ef8aSSasha Neftin 
10740507ef8aSSasha Neftin static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
10750507ef8aSSasha Neftin 				     struct igc_rx_buffer *rx_buffer,
10760507ef8aSSasha Neftin 				     union igc_adv_rx_desc *rx_desc,
10770507ef8aSSasha Neftin 				     unsigned int size)
10780507ef8aSSasha Neftin {
10790507ef8aSSasha Neftin 	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
10800507ef8aSSasha Neftin #if (PAGE_SIZE < 8192)
10810507ef8aSSasha Neftin 	unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
10820507ef8aSSasha Neftin #else
10830507ef8aSSasha Neftin 	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
10840507ef8aSSasha Neftin 				SKB_DATA_ALIGN(IGC_SKB_PAD + size);
10850507ef8aSSasha Neftin #endif
10860507ef8aSSasha Neftin 	struct sk_buff *skb;
10870507ef8aSSasha Neftin 
10880507ef8aSSasha Neftin 	/* prefetch first cache line of first page */
10890507ef8aSSasha Neftin 	prefetch(va);
10900507ef8aSSasha Neftin #if L1_CACHE_BYTES < 128
10910507ef8aSSasha Neftin 	prefetch(va + L1_CACHE_BYTES);
10920507ef8aSSasha Neftin #endif
10930507ef8aSSasha Neftin 
10940507ef8aSSasha Neftin 	/* build an skb around the page buffer */
10950507ef8aSSasha Neftin 	skb = build_skb(va - IGC_SKB_PAD, truesize);
10960507ef8aSSasha Neftin 	if (unlikely(!skb))
10970507ef8aSSasha Neftin 		return NULL;
10980507ef8aSSasha Neftin 
10990507ef8aSSasha Neftin 	/* update pointers within the skb to store the data */
11000507ef8aSSasha Neftin 	skb_reserve(skb, IGC_SKB_PAD);
11010507ef8aSSasha Neftin 	 __skb_put(skb, size);
11020507ef8aSSasha Neftin 
11030507ef8aSSasha Neftin 	/* update buffer offset */
11040507ef8aSSasha Neftin #if (PAGE_SIZE < 8192)
11050507ef8aSSasha Neftin 	rx_buffer->page_offset ^= truesize;
11060507ef8aSSasha Neftin #else
11070507ef8aSSasha Neftin 	rx_buffer->page_offset += truesize;
11080507ef8aSSasha Neftin #endif
11090507ef8aSSasha Neftin 
11100507ef8aSSasha Neftin 	return skb;
11110507ef8aSSasha Neftin }
11120507ef8aSSasha Neftin 
11130507ef8aSSasha Neftin static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
11140507ef8aSSasha Neftin 					 struct igc_rx_buffer *rx_buffer,
11150507ef8aSSasha Neftin 					 union igc_adv_rx_desc *rx_desc,
11160507ef8aSSasha Neftin 					 unsigned int size)
11170507ef8aSSasha Neftin {
11180507ef8aSSasha Neftin 	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
11190507ef8aSSasha Neftin #if (PAGE_SIZE < 8192)
11200507ef8aSSasha Neftin 	unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
11210507ef8aSSasha Neftin #else
11220507ef8aSSasha Neftin 	unsigned int truesize = SKB_DATA_ALIGN(size);
11230507ef8aSSasha Neftin #endif
11240507ef8aSSasha Neftin 	unsigned int headlen;
11250507ef8aSSasha Neftin 	struct sk_buff *skb;
11260507ef8aSSasha Neftin 
11270507ef8aSSasha Neftin 	/* prefetch first cache line of first page */
11280507ef8aSSasha Neftin 	prefetch(va);
11290507ef8aSSasha Neftin #if L1_CACHE_BYTES < 128
11300507ef8aSSasha Neftin 	prefetch(va + L1_CACHE_BYTES);
11310507ef8aSSasha Neftin #endif
11320507ef8aSSasha Neftin 
11330507ef8aSSasha Neftin 	/* allocate a skb to store the frags */
11340507ef8aSSasha Neftin 	skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
11350507ef8aSSasha Neftin 	if (unlikely(!skb))
11360507ef8aSSasha Neftin 		return NULL;
11370507ef8aSSasha Neftin 
11380507ef8aSSasha Neftin 	/* Determine available headroom for copy */
11390507ef8aSSasha Neftin 	headlen = size;
11400507ef8aSSasha Neftin 	if (headlen > IGC_RX_HDR_LEN)
11410507ef8aSSasha Neftin 		headlen = eth_get_headlen(va, IGC_RX_HDR_LEN);
11420507ef8aSSasha Neftin 
11430507ef8aSSasha Neftin 	/* align pull length to size of long to optimize memcpy performance */
11440507ef8aSSasha Neftin 	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
11450507ef8aSSasha Neftin 
11460507ef8aSSasha Neftin 	/* update all of the pointers */
11470507ef8aSSasha Neftin 	size -= headlen;
11480507ef8aSSasha Neftin 	if (size) {
11490507ef8aSSasha Neftin 		skb_add_rx_frag(skb, 0, rx_buffer->page,
11500507ef8aSSasha Neftin 				(va + headlen) - page_address(rx_buffer->page),
11510507ef8aSSasha Neftin 				size, truesize);
11520507ef8aSSasha Neftin #if (PAGE_SIZE < 8192)
11530507ef8aSSasha Neftin 	rx_buffer->page_offset ^= truesize;
11540507ef8aSSasha Neftin #else
11550507ef8aSSasha Neftin 	rx_buffer->page_offset += truesize;
11560507ef8aSSasha Neftin #endif
11570507ef8aSSasha Neftin 	} else {
11580507ef8aSSasha Neftin 		rx_buffer->pagecnt_bias++;
11590507ef8aSSasha Neftin 	}
11600507ef8aSSasha Neftin 
11610507ef8aSSasha Neftin 	return skb;
11620507ef8aSSasha Neftin }
11630507ef8aSSasha Neftin 
11640507ef8aSSasha Neftin /**
11650507ef8aSSasha Neftin  * igc_reuse_rx_page - page flip buffer and store it back on the ring
11660507ef8aSSasha Neftin  * @rx_ring: rx descriptor ring to store buffers on
11670507ef8aSSasha Neftin  * @old_buff: donor buffer to have page reused
11680507ef8aSSasha Neftin  *
11690507ef8aSSasha Neftin  * Synchronizes page for reuse by the adapter
11700507ef8aSSasha Neftin  */
11710507ef8aSSasha Neftin static void igc_reuse_rx_page(struct igc_ring *rx_ring,
11720507ef8aSSasha Neftin 			      struct igc_rx_buffer *old_buff)
11730507ef8aSSasha Neftin {
11740507ef8aSSasha Neftin 	u16 nta = rx_ring->next_to_alloc;
11750507ef8aSSasha Neftin 	struct igc_rx_buffer *new_buff;
11760507ef8aSSasha Neftin 
11770507ef8aSSasha Neftin 	new_buff = &rx_ring->rx_buffer_info[nta];
11780507ef8aSSasha Neftin 
11790507ef8aSSasha Neftin 	/* update, and store next to alloc */
11800507ef8aSSasha Neftin 	nta++;
11810507ef8aSSasha Neftin 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
11820507ef8aSSasha Neftin 
11830507ef8aSSasha Neftin 	/* Transfer page from old buffer to new buffer.
11840507ef8aSSasha Neftin 	 * Move each member individually to avoid possible store
11850507ef8aSSasha Neftin 	 * forwarding stalls.
11860507ef8aSSasha Neftin 	 */
11870507ef8aSSasha Neftin 	new_buff->dma		= old_buff->dma;
11880507ef8aSSasha Neftin 	new_buff->page		= old_buff->page;
11890507ef8aSSasha Neftin 	new_buff->page_offset	= old_buff->page_offset;
11900507ef8aSSasha Neftin 	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
11910507ef8aSSasha Neftin }
11920507ef8aSSasha Neftin 
11930507ef8aSSasha Neftin static inline bool igc_page_is_reserved(struct page *page)
11940507ef8aSSasha Neftin {
11950507ef8aSSasha Neftin 	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
11960507ef8aSSasha Neftin }
11970507ef8aSSasha Neftin 
11980507ef8aSSasha Neftin static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
11990507ef8aSSasha Neftin {
12000507ef8aSSasha Neftin 	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
12010507ef8aSSasha Neftin 	struct page *page = rx_buffer->page;
12020507ef8aSSasha Neftin 
12030507ef8aSSasha Neftin 	/* avoid re-using remote pages */
12040507ef8aSSasha Neftin 	if (unlikely(igc_page_is_reserved(page)))
12050507ef8aSSasha Neftin 		return false;
12060507ef8aSSasha Neftin 
12070507ef8aSSasha Neftin #if (PAGE_SIZE < 8192)
12080507ef8aSSasha Neftin 	/* if we are only owner of page we can reuse it */
12090507ef8aSSasha Neftin 	if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
12100507ef8aSSasha Neftin 		return false;
12110507ef8aSSasha Neftin #else
12120507ef8aSSasha Neftin #define IGC_LAST_OFFSET \
12130507ef8aSSasha Neftin 	(SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
12140507ef8aSSasha Neftin 
12150507ef8aSSasha Neftin 	if (rx_buffer->page_offset > IGC_LAST_OFFSET)
12160507ef8aSSasha Neftin 		return false;
12170507ef8aSSasha Neftin #endif
12180507ef8aSSasha Neftin 
12190507ef8aSSasha Neftin 	/* If we have drained the page fragment pool we need to update
12200507ef8aSSasha Neftin 	 * the pagecnt_bias and page count so that we fully restock the
12210507ef8aSSasha Neftin 	 * number of references the driver holds.
12220507ef8aSSasha Neftin 	 */
12230507ef8aSSasha Neftin 	if (unlikely(!pagecnt_bias)) {
12240507ef8aSSasha Neftin 		page_ref_add(page, USHRT_MAX);
12250507ef8aSSasha Neftin 		rx_buffer->pagecnt_bias = USHRT_MAX;
12260507ef8aSSasha Neftin 	}
12270507ef8aSSasha Neftin 
12280507ef8aSSasha Neftin 	return true;
12290507ef8aSSasha Neftin }
12300507ef8aSSasha Neftin 
12310507ef8aSSasha Neftin /**
12320507ef8aSSasha Neftin  * igc_is_non_eop - process handling of non-EOP buffers
12330507ef8aSSasha Neftin  * @rx_ring: Rx ring being processed
12340507ef8aSSasha Neftin  * @rx_desc: Rx descriptor for current buffer
12350507ef8aSSasha Neftin  * @skb: current socket buffer containing buffer in progress
12360507ef8aSSasha Neftin  *
12370507ef8aSSasha Neftin  * This function updates next to clean.  If the buffer is an EOP buffer
12380507ef8aSSasha Neftin  * this function exits returning false, otherwise it will place the
12390507ef8aSSasha Neftin  * sk_buff in the next buffer to be chained and return true indicating
12400507ef8aSSasha Neftin  * that this is in fact a non-EOP buffer.
12410507ef8aSSasha Neftin  */
12420507ef8aSSasha Neftin static bool igc_is_non_eop(struct igc_ring *rx_ring,
12430507ef8aSSasha Neftin 			   union igc_adv_rx_desc *rx_desc)
12440507ef8aSSasha Neftin {
12450507ef8aSSasha Neftin 	u32 ntc = rx_ring->next_to_clean + 1;
12460507ef8aSSasha Neftin 
12470507ef8aSSasha Neftin 	/* fetch, update, and store next to clean */
12480507ef8aSSasha Neftin 	ntc = (ntc < rx_ring->count) ? ntc : 0;
12490507ef8aSSasha Neftin 	rx_ring->next_to_clean = ntc;
12500507ef8aSSasha Neftin 
12510507ef8aSSasha Neftin 	prefetch(IGC_RX_DESC(rx_ring, ntc));
12520507ef8aSSasha Neftin 
12530507ef8aSSasha Neftin 	if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
12540507ef8aSSasha Neftin 		return false;
12550507ef8aSSasha Neftin 
12560507ef8aSSasha Neftin 	return true;
12570507ef8aSSasha Neftin }
12580507ef8aSSasha Neftin 
12590507ef8aSSasha Neftin /**
12600507ef8aSSasha Neftin  * igc_cleanup_headers - Correct corrupted or empty headers
12610507ef8aSSasha Neftin  * @rx_ring: rx descriptor ring packet is being transacted on
12620507ef8aSSasha Neftin  * @rx_desc: pointer to the EOP Rx descriptor
12630507ef8aSSasha Neftin  * @skb: pointer to current skb being fixed
12640507ef8aSSasha Neftin  *
12650507ef8aSSasha Neftin  * Address the case where we are pulling data in on pages only
12660507ef8aSSasha Neftin  * and as such no data is present in the skb header.
12670507ef8aSSasha Neftin  *
12680507ef8aSSasha Neftin  * In addition if skb is not at least 60 bytes we need to pad it so that
12690507ef8aSSasha Neftin  * it is large enough to qualify as a valid Ethernet frame.
12700507ef8aSSasha Neftin  *
12710507ef8aSSasha Neftin  * Returns true if an error was encountered and skb was freed.
12720507ef8aSSasha Neftin  */
12730507ef8aSSasha Neftin static bool igc_cleanup_headers(struct igc_ring *rx_ring,
12740507ef8aSSasha Neftin 				union igc_adv_rx_desc *rx_desc,
12750507ef8aSSasha Neftin 				struct sk_buff *skb)
12760507ef8aSSasha Neftin {
12770507ef8aSSasha Neftin 	if (unlikely((igc_test_staterr(rx_desc,
12780507ef8aSSasha Neftin 				       IGC_RXDEXT_ERR_FRAME_ERR_MASK)))) {
12790507ef8aSSasha Neftin 		struct net_device *netdev = rx_ring->netdev;
12800507ef8aSSasha Neftin 
12810507ef8aSSasha Neftin 		if (!(netdev->features & NETIF_F_RXALL)) {
12820507ef8aSSasha Neftin 			dev_kfree_skb_any(skb);
12830507ef8aSSasha Neftin 			return true;
12840507ef8aSSasha Neftin 		}
12850507ef8aSSasha Neftin 	}
12860507ef8aSSasha Neftin 
12870507ef8aSSasha Neftin 	/* if eth_skb_pad returns an error the skb was freed */
12880507ef8aSSasha Neftin 	if (eth_skb_pad(skb))
12890507ef8aSSasha Neftin 		return true;
12900507ef8aSSasha Neftin 
129113b5b7fdSSasha Neftin 	return false;
129213b5b7fdSSasha Neftin }
129313b5b7fdSSasha Neftin 
12940507ef8aSSasha Neftin static void igc_put_rx_buffer(struct igc_ring *rx_ring,
12950507ef8aSSasha Neftin 			      struct igc_rx_buffer *rx_buffer)
12960507ef8aSSasha Neftin {
12970507ef8aSSasha Neftin 	if (igc_can_reuse_rx_page(rx_buffer)) {
12980507ef8aSSasha Neftin 		/* hand second half of page back to the ring */
12990507ef8aSSasha Neftin 		igc_reuse_rx_page(rx_ring, rx_buffer);
13000507ef8aSSasha Neftin 	} else {
13010507ef8aSSasha Neftin 		/* We are not reusing the buffer so unmap it and free
13020507ef8aSSasha Neftin 		 * any references we are holding to it
13030507ef8aSSasha Neftin 		 */
13040507ef8aSSasha Neftin 		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
13050507ef8aSSasha Neftin 				     igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
13060507ef8aSSasha Neftin 				     IGC_RX_DMA_ATTR);
13070507ef8aSSasha Neftin 		__page_frag_cache_drain(rx_buffer->page,
13080507ef8aSSasha Neftin 					rx_buffer->pagecnt_bias);
13090507ef8aSSasha Neftin 	}
131013b5b7fdSSasha Neftin 
13110507ef8aSSasha Neftin 	/* clear contents of rx_buffer */
13120507ef8aSSasha Neftin 	rx_buffer->page = NULL;
131313b5b7fdSSasha Neftin }
131413b5b7fdSSasha Neftin 
131513b5b7fdSSasha Neftin /**
131613b5b7fdSSasha Neftin  * igc_alloc_rx_buffers - Replace used receive buffers; packet split
131713b5b7fdSSasha Neftin  * @adapter: address of board private structure
131813b5b7fdSSasha Neftin  */
131913b5b7fdSSasha Neftin static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
132013b5b7fdSSasha Neftin {
132113b5b7fdSSasha Neftin 	union igc_adv_rx_desc *rx_desc;
132213b5b7fdSSasha Neftin 	u16 i = rx_ring->next_to_use;
132313b5b7fdSSasha Neftin 	struct igc_rx_buffer *bi;
132413b5b7fdSSasha Neftin 	u16 bufsz;
132513b5b7fdSSasha Neftin 
132613b5b7fdSSasha Neftin 	/* nothing to do */
132713b5b7fdSSasha Neftin 	if (!cleaned_count)
132813b5b7fdSSasha Neftin 		return;
132913b5b7fdSSasha Neftin 
133013b5b7fdSSasha Neftin 	rx_desc = IGC_RX_DESC(rx_ring, i);
133113b5b7fdSSasha Neftin 	bi = &rx_ring->rx_buffer_info[i];
133213b5b7fdSSasha Neftin 	i -= rx_ring->count;
133313b5b7fdSSasha Neftin 
133413b5b7fdSSasha Neftin 	bufsz = igc_rx_bufsz(rx_ring);
133513b5b7fdSSasha Neftin 
133613b5b7fdSSasha Neftin 	do {
133713b5b7fdSSasha Neftin 		if (!igc_alloc_mapped_page(rx_ring, bi))
133813b5b7fdSSasha Neftin 			break;
133913b5b7fdSSasha Neftin 
134013b5b7fdSSasha Neftin 		/* sync the buffer for use by the device */
134113b5b7fdSSasha Neftin 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
134213b5b7fdSSasha Neftin 						 bi->page_offset, bufsz,
134313b5b7fdSSasha Neftin 						 DMA_FROM_DEVICE);
134413b5b7fdSSasha Neftin 
134513b5b7fdSSasha Neftin 		/* Refresh the desc even if buffer_addrs didn't change
134613b5b7fdSSasha Neftin 		 * because each write-back erases this info.
134713b5b7fdSSasha Neftin 		 */
134813b5b7fdSSasha Neftin 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
134913b5b7fdSSasha Neftin 
135013b5b7fdSSasha Neftin 		rx_desc++;
135113b5b7fdSSasha Neftin 		bi++;
135213b5b7fdSSasha Neftin 		i++;
135313b5b7fdSSasha Neftin 		if (unlikely(!i)) {
135413b5b7fdSSasha Neftin 			rx_desc = IGC_RX_DESC(rx_ring, 0);
135513b5b7fdSSasha Neftin 			bi = rx_ring->rx_buffer_info;
135613b5b7fdSSasha Neftin 			i -= rx_ring->count;
135713b5b7fdSSasha Neftin 		}
135813b5b7fdSSasha Neftin 
135913b5b7fdSSasha Neftin 		/* clear the length for the next_to_use descriptor */
136013b5b7fdSSasha Neftin 		rx_desc->wb.upper.length = 0;
136113b5b7fdSSasha Neftin 
136213b5b7fdSSasha Neftin 		cleaned_count--;
136313b5b7fdSSasha Neftin 	} while (cleaned_count);
136413b5b7fdSSasha Neftin 
136513b5b7fdSSasha Neftin 	i += rx_ring->count;
136613b5b7fdSSasha Neftin 
136713b5b7fdSSasha Neftin 	if (rx_ring->next_to_use != i) {
136813b5b7fdSSasha Neftin 		/* record the next descriptor to use */
136913b5b7fdSSasha Neftin 		rx_ring->next_to_use = i;
137013b5b7fdSSasha Neftin 
137113b5b7fdSSasha Neftin 		/* update next to alloc since we have filled the ring */
137213b5b7fdSSasha Neftin 		rx_ring->next_to_alloc = i;
137313b5b7fdSSasha Neftin 
137413b5b7fdSSasha Neftin 		/* Force memory writes to complete before letting h/w
137513b5b7fdSSasha Neftin 		 * know there are new descriptors to fetch.  (Only
137613b5b7fdSSasha Neftin 		 * applicable for weak-ordered memory model archs,
137713b5b7fdSSasha Neftin 		 * such as IA-64).
137813b5b7fdSSasha Neftin 		 */
137913b5b7fdSSasha Neftin 		wmb();
138013b5b7fdSSasha Neftin 		writel(i, rx_ring->tail);
138113b5b7fdSSasha Neftin 	}
138213b5b7fdSSasha Neftin }
138313b5b7fdSSasha Neftin 
13840507ef8aSSasha Neftin static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
13850507ef8aSSasha Neftin {
13860507ef8aSSasha Neftin 	unsigned int total_bytes = 0, total_packets = 0;
13870507ef8aSSasha Neftin 	struct igc_ring *rx_ring = q_vector->rx.ring;
13880507ef8aSSasha Neftin 	struct sk_buff *skb = rx_ring->skb;
13890507ef8aSSasha Neftin 	u16 cleaned_count = igc_desc_unused(rx_ring);
13900507ef8aSSasha Neftin 
13910507ef8aSSasha Neftin 	while (likely(total_packets < budget)) {
13920507ef8aSSasha Neftin 		union igc_adv_rx_desc *rx_desc;
13930507ef8aSSasha Neftin 		struct igc_rx_buffer *rx_buffer;
13940507ef8aSSasha Neftin 		unsigned int size;
13950507ef8aSSasha Neftin 
13960507ef8aSSasha Neftin 		/* return some buffers to hardware, one at a time is too slow */
13970507ef8aSSasha Neftin 		if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
13980507ef8aSSasha Neftin 			igc_alloc_rx_buffers(rx_ring, cleaned_count);
13990507ef8aSSasha Neftin 			cleaned_count = 0;
14000507ef8aSSasha Neftin 		}
14010507ef8aSSasha Neftin 
14020507ef8aSSasha Neftin 		rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
14030507ef8aSSasha Neftin 		size = le16_to_cpu(rx_desc->wb.upper.length);
14040507ef8aSSasha Neftin 		if (!size)
14050507ef8aSSasha Neftin 			break;
14060507ef8aSSasha Neftin 
14070507ef8aSSasha Neftin 		/* This memory barrier is needed to keep us from reading
14080507ef8aSSasha Neftin 		 * any other fields out of the rx_desc until we know the
14090507ef8aSSasha Neftin 		 * descriptor has been written back
14100507ef8aSSasha Neftin 		 */
14110507ef8aSSasha Neftin 		dma_rmb();
14120507ef8aSSasha Neftin 
14130507ef8aSSasha Neftin 		rx_buffer = igc_get_rx_buffer(rx_ring, size);
14140507ef8aSSasha Neftin 
14150507ef8aSSasha Neftin 		/* retrieve a buffer from the ring */
14160507ef8aSSasha Neftin 		if (skb)
14170507ef8aSSasha Neftin 			igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
14180507ef8aSSasha Neftin 		else if (ring_uses_build_skb(rx_ring))
14190507ef8aSSasha Neftin 			skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
14200507ef8aSSasha Neftin 		else
14210507ef8aSSasha Neftin 			skb = igc_construct_skb(rx_ring, rx_buffer,
14220507ef8aSSasha Neftin 						rx_desc, size);
14230507ef8aSSasha Neftin 
14240507ef8aSSasha Neftin 		/* exit if we failed to retrieve a buffer */
14250507ef8aSSasha Neftin 		if (!skb) {
14260507ef8aSSasha Neftin 			rx_ring->rx_stats.alloc_failed++;
14270507ef8aSSasha Neftin 			rx_buffer->pagecnt_bias++;
14280507ef8aSSasha Neftin 			break;
14290507ef8aSSasha Neftin 		}
14300507ef8aSSasha Neftin 
14310507ef8aSSasha Neftin 		igc_put_rx_buffer(rx_ring, rx_buffer);
14320507ef8aSSasha Neftin 		cleaned_count++;
14330507ef8aSSasha Neftin 
14340507ef8aSSasha Neftin 		/* fetch next buffer in frame if non-eop */
14350507ef8aSSasha Neftin 		if (igc_is_non_eop(rx_ring, rx_desc))
14360507ef8aSSasha Neftin 			continue;
14370507ef8aSSasha Neftin 
14380507ef8aSSasha Neftin 		/* verify the packet layout is correct */
14390507ef8aSSasha Neftin 		if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
14400507ef8aSSasha Neftin 			skb = NULL;
14410507ef8aSSasha Neftin 			continue;
14420507ef8aSSasha Neftin 		}
14430507ef8aSSasha Neftin 
14440507ef8aSSasha Neftin 		/* probably a little skewed due to removing CRC */
14450507ef8aSSasha Neftin 		total_bytes += skb->len;
14460507ef8aSSasha Neftin 
14470507ef8aSSasha Neftin 		/* populate checksum, timestamp, VLAN, and protocol */
14480507ef8aSSasha Neftin 		igc_process_skb_fields(rx_ring, rx_desc, skb);
14490507ef8aSSasha Neftin 
14500507ef8aSSasha Neftin 		napi_gro_receive(&q_vector->napi, skb);
14510507ef8aSSasha Neftin 
14520507ef8aSSasha Neftin 		/* reset skb pointer */
14530507ef8aSSasha Neftin 		skb = NULL;
14540507ef8aSSasha Neftin 
14550507ef8aSSasha Neftin 		/* update budget accounting */
14560507ef8aSSasha Neftin 		total_packets++;
14570507ef8aSSasha Neftin 	}
14580507ef8aSSasha Neftin 
14590507ef8aSSasha Neftin 	/* place incomplete frames back on ring for completion */
14600507ef8aSSasha Neftin 	rx_ring->skb = skb;
14610507ef8aSSasha Neftin 
14620507ef8aSSasha Neftin 	u64_stats_update_begin(&rx_ring->rx_syncp);
14630507ef8aSSasha Neftin 	rx_ring->rx_stats.packets += total_packets;
14640507ef8aSSasha Neftin 	rx_ring->rx_stats.bytes += total_bytes;
14650507ef8aSSasha Neftin 	u64_stats_update_end(&rx_ring->rx_syncp);
14660507ef8aSSasha Neftin 	q_vector->rx.total_packets += total_packets;
14670507ef8aSSasha Neftin 	q_vector->rx.total_bytes += total_bytes;
14680507ef8aSSasha Neftin 
14690507ef8aSSasha Neftin 	if (cleaned_count)
14700507ef8aSSasha Neftin 		igc_alloc_rx_buffers(rx_ring, cleaned_count);
14710507ef8aSSasha Neftin 
14720507ef8aSSasha Neftin 	return total_packets;
14730507ef8aSSasha Neftin }
14740507ef8aSSasha Neftin 
14750507ef8aSSasha Neftin static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
14760507ef8aSSasha Neftin {
14770507ef8aSSasha Neftin 	return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
14780507ef8aSSasha Neftin }
14790507ef8aSSasha Neftin 
14800507ef8aSSasha Neftin static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
14810507ef8aSSasha Neftin 				  struct igc_rx_buffer *bi)
14820507ef8aSSasha Neftin {
14830507ef8aSSasha Neftin 	struct page *page = bi->page;
14840507ef8aSSasha Neftin 	dma_addr_t dma;
14850507ef8aSSasha Neftin 
14860507ef8aSSasha Neftin 	/* since we are recycling buffers we should seldom need to alloc */
14870507ef8aSSasha Neftin 	if (likely(page))
14880507ef8aSSasha Neftin 		return true;
14890507ef8aSSasha Neftin 
14900507ef8aSSasha Neftin 	/* alloc new page for storage */
14910507ef8aSSasha Neftin 	page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
14920507ef8aSSasha Neftin 	if (unlikely(!page)) {
14930507ef8aSSasha Neftin 		rx_ring->rx_stats.alloc_failed++;
14940507ef8aSSasha Neftin 		return false;
14950507ef8aSSasha Neftin 	}
14960507ef8aSSasha Neftin 
14970507ef8aSSasha Neftin 	/* map page for use */
14980507ef8aSSasha Neftin 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
14990507ef8aSSasha Neftin 				 igc_rx_pg_size(rx_ring),
15000507ef8aSSasha Neftin 				 DMA_FROM_DEVICE,
15010507ef8aSSasha Neftin 				 IGC_RX_DMA_ATTR);
15020507ef8aSSasha Neftin 
15030507ef8aSSasha Neftin 	/* if mapping failed free memory back to system since
15040507ef8aSSasha Neftin 	 * there isn't much point in holding memory we can't use
15050507ef8aSSasha Neftin 	 */
15060507ef8aSSasha Neftin 	if (dma_mapping_error(rx_ring->dev, dma)) {
15070507ef8aSSasha Neftin 		__free_page(page);
15080507ef8aSSasha Neftin 
15090507ef8aSSasha Neftin 		rx_ring->rx_stats.alloc_failed++;
15100507ef8aSSasha Neftin 		return false;
15110507ef8aSSasha Neftin 	}
15120507ef8aSSasha Neftin 
15130507ef8aSSasha Neftin 	bi->dma = dma;
15140507ef8aSSasha Neftin 	bi->page = page;
15150507ef8aSSasha Neftin 	bi->page_offset = igc_rx_offset(rx_ring);
15160507ef8aSSasha Neftin 	bi->pagecnt_bias = 1;
15170507ef8aSSasha Neftin 
15180507ef8aSSasha Neftin 	return true;
15190507ef8aSSasha Neftin }
15200507ef8aSSasha Neftin 
15210507ef8aSSasha Neftin /**
15220507ef8aSSasha Neftin  * igc_clean_tx_irq - Reclaim resources after transmit completes
15230507ef8aSSasha Neftin  * @q_vector: pointer to q_vector containing needed info
15240507ef8aSSasha Neftin  * @napi_budget: Used to determine if we are in netpoll
15250507ef8aSSasha Neftin  *
15260507ef8aSSasha Neftin  * returns true if ring is completely cleaned
15270507ef8aSSasha Neftin  */
15280507ef8aSSasha Neftin static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
15290507ef8aSSasha Neftin {
15300507ef8aSSasha Neftin 	struct igc_adapter *adapter = q_vector->adapter;
15310507ef8aSSasha Neftin 	unsigned int total_bytes = 0, total_packets = 0;
15320507ef8aSSasha Neftin 	unsigned int budget = q_vector->tx.work_limit;
15330507ef8aSSasha Neftin 	struct igc_ring *tx_ring = q_vector->tx.ring;
15340507ef8aSSasha Neftin 	unsigned int i = tx_ring->next_to_clean;
15350507ef8aSSasha Neftin 	struct igc_tx_buffer *tx_buffer;
15360507ef8aSSasha Neftin 	union igc_adv_tx_desc *tx_desc;
15370507ef8aSSasha Neftin 
15380507ef8aSSasha Neftin 	if (test_bit(__IGC_DOWN, &adapter->state))
15390507ef8aSSasha Neftin 		return true;
15400507ef8aSSasha Neftin 
15410507ef8aSSasha Neftin 	tx_buffer = &tx_ring->tx_buffer_info[i];
15420507ef8aSSasha Neftin 	tx_desc = IGC_TX_DESC(tx_ring, i);
15430507ef8aSSasha Neftin 	i -= tx_ring->count;
15440507ef8aSSasha Neftin 
15450507ef8aSSasha Neftin 	do {
15460507ef8aSSasha Neftin 		union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
15470507ef8aSSasha Neftin 
15480507ef8aSSasha Neftin 		/* if next_to_watch is not set then there is no work pending */
15490507ef8aSSasha Neftin 		if (!eop_desc)
15500507ef8aSSasha Neftin 			break;
15510507ef8aSSasha Neftin 
15520507ef8aSSasha Neftin 		/* prevent any other reads prior to eop_desc */
15530507ef8aSSasha Neftin 		smp_rmb();
15540507ef8aSSasha Neftin 
15550507ef8aSSasha Neftin 		/* if DD is not set pending work has not been completed */
15560507ef8aSSasha Neftin 		if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
15570507ef8aSSasha Neftin 			break;
15580507ef8aSSasha Neftin 
15590507ef8aSSasha Neftin 		/* clear next_to_watch to prevent false hangs */
15600507ef8aSSasha Neftin 		tx_buffer->next_to_watch = NULL;
15610507ef8aSSasha Neftin 
15620507ef8aSSasha Neftin 		/* update the statistics for this packet */
15630507ef8aSSasha Neftin 		total_bytes += tx_buffer->bytecount;
15640507ef8aSSasha Neftin 		total_packets += tx_buffer->gso_segs;
15650507ef8aSSasha Neftin 
15660507ef8aSSasha Neftin 		/* free the skb */
15670507ef8aSSasha Neftin 		napi_consume_skb(tx_buffer->skb, napi_budget);
15680507ef8aSSasha Neftin 
15690507ef8aSSasha Neftin 		/* unmap skb header data */
15700507ef8aSSasha Neftin 		dma_unmap_single(tx_ring->dev,
15710507ef8aSSasha Neftin 				 dma_unmap_addr(tx_buffer, dma),
15720507ef8aSSasha Neftin 				 dma_unmap_len(tx_buffer, len),
15730507ef8aSSasha Neftin 				 DMA_TO_DEVICE);
15740507ef8aSSasha Neftin 
15750507ef8aSSasha Neftin 		/* clear tx_buffer data */
15760507ef8aSSasha Neftin 		dma_unmap_len_set(tx_buffer, len, 0);
15770507ef8aSSasha Neftin 
15780507ef8aSSasha Neftin 		/* clear last DMA location and unmap remaining buffers */
15790507ef8aSSasha Neftin 		while (tx_desc != eop_desc) {
15800507ef8aSSasha Neftin 			tx_buffer++;
15810507ef8aSSasha Neftin 			tx_desc++;
15820507ef8aSSasha Neftin 			i++;
15830507ef8aSSasha Neftin 			if (unlikely(!i)) {
15840507ef8aSSasha Neftin 				i -= tx_ring->count;
15850507ef8aSSasha Neftin 				tx_buffer = tx_ring->tx_buffer_info;
15860507ef8aSSasha Neftin 				tx_desc = IGC_TX_DESC(tx_ring, 0);
15870507ef8aSSasha Neftin 			}
15880507ef8aSSasha Neftin 
15890507ef8aSSasha Neftin 			/* unmap any remaining paged data */
15900507ef8aSSasha Neftin 			if (dma_unmap_len(tx_buffer, len)) {
15910507ef8aSSasha Neftin 				dma_unmap_page(tx_ring->dev,
15920507ef8aSSasha Neftin 					       dma_unmap_addr(tx_buffer, dma),
15930507ef8aSSasha Neftin 					       dma_unmap_len(tx_buffer, len),
15940507ef8aSSasha Neftin 					       DMA_TO_DEVICE);
15950507ef8aSSasha Neftin 				dma_unmap_len_set(tx_buffer, len, 0);
15960507ef8aSSasha Neftin 			}
15970507ef8aSSasha Neftin 		}
15980507ef8aSSasha Neftin 
15990507ef8aSSasha Neftin 		/* move us one more past the eop_desc for start of next pkt */
16000507ef8aSSasha Neftin 		tx_buffer++;
16010507ef8aSSasha Neftin 		tx_desc++;
16020507ef8aSSasha Neftin 		i++;
16030507ef8aSSasha Neftin 		if (unlikely(!i)) {
16040507ef8aSSasha Neftin 			i -= tx_ring->count;
16050507ef8aSSasha Neftin 			tx_buffer = tx_ring->tx_buffer_info;
16060507ef8aSSasha Neftin 			tx_desc = IGC_TX_DESC(tx_ring, 0);
16070507ef8aSSasha Neftin 		}
16080507ef8aSSasha Neftin 
16090507ef8aSSasha Neftin 		/* issue prefetch for next Tx descriptor */
16100507ef8aSSasha Neftin 		prefetch(tx_desc);
16110507ef8aSSasha Neftin 
16120507ef8aSSasha Neftin 		/* update budget accounting */
16130507ef8aSSasha Neftin 		budget--;
16140507ef8aSSasha Neftin 	} while (likely(budget));
16150507ef8aSSasha Neftin 
16160507ef8aSSasha Neftin 	netdev_tx_completed_queue(txring_txq(tx_ring),
16170507ef8aSSasha Neftin 				  total_packets, total_bytes);
16180507ef8aSSasha Neftin 
16190507ef8aSSasha Neftin 	i += tx_ring->count;
16200507ef8aSSasha Neftin 	tx_ring->next_to_clean = i;
16210507ef8aSSasha Neftin 	u64_stats_update_begin(&tx_ring->tx_syncp);
16220507ef8aSSasha Neftin 	tx_ring->tx_stats.bytes += total_bytes;
16230507ef8aSSasha Neftin 	tx_ring->tx_stats.packets += total_packets;
16240507ef8aSSasha Neftin 	u64_stats_update_end(&tx_ring->tx_syncp);
16250507ef8aSSasha Neftin 	q_vector->tx.total_bytes += total_bytes;
16260507ef8aSSasha Neftin 	q_vector->tx.total_packets += total_packets;
16270507ef8aSSasha Neftin 
16280507ef8aSSasha Neftin 	if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
16290507ef8aSSasha Neftin 		struct igc_hw *hw = &adapter->hw;
16300507ef8aSSasha Neftin 
16310507ef8aSSasha Neftin 		/* Detect a transmit hang in hardware, this serializes the
16320507ef8aSSasha Neftin 		 * check with the clearing of time_stamp and movement of i
16330507ef8aSSasha Neftin 		 */
16340507ef8aSSasha Neftin 		clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
16350507ef8aSSasha Neftin 		if (tx_buffer->next_to_watch &&
16360507ef8aSSasha Neftin 		    time_after(jiffies, tx_buffer->time_stamp +
16370507ef8aSSasha Neftin 		    (adapter->tx_timeout_factor * HZ)) &&
16380507ef8aSSasha Neftin 		    !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
16390507ef8aSSasha Neftin 			/* detected Tx unit hang */
16400507ef8aSSasha Neftin 			dev_err(tx_ring->dev,
16410507ef8aSSasha Neftin 				"Detected Tx Unit Hang\n"
16420507ef8aSSasha Neftin 				"  Tx Queue             <%d>\n"
16430507ef8aSSasha Neftin 				"  TDH                  <%x>\n"
16440507ef8aSSasha Neftin 				"  TDT                  <%x>\n"
16450507ef8aSSasha Neftin 				"  next_to_use          <%x>\n"
16460507ef8aSSasha Neftin 				"  next_to_clean        <%x>\n"
16470507ef8aSSasha Neftin 				"buffer_info[next_to_clean]\n"
16480507ef8aSSasha Neftin 				"  time_stamp           <%lx>\n"
16490507ef8aSSasha Neftin 				"  next_to_watch        <%p>\n"
16500507ef8aSSasha Neftin 				"  jiffies              <%lx>\n"
16510507ef8aSSasha Neftin 				"  desc.status          <%x>\n",
16520507ef8aSSasha Neftin 				tx_ring->queue_index,
16530507ef8aSSasha Neftin 				rd32(IGC_TDH(tx_ring->reg_idx)),
16540507ef8aSSasha Neftin 				readl(tx_ring->tail),
16550507ef8aSSasha Neftin 				tx_ring->next_to_use,
16560507ef8aSSasha Neftin 				tx_ring->next_to_clean,
16570507ef8aSSasha Neftin 				tx_buffer->time_stamp,
16580507ef8aSSasha Neftin 				tx_buffer->next_to_watch,
16590507ef8aSSasha Neftin 				jiffies,
16600507ef8aSSasha Neftin 				tx_buffer->next_to_watch->wb.status);
16610507ef8aSSasha Neftin 				netif_stop_subqueue(tx_ring->netdev,
16620507ef8aSSasha Neftin 						    tx_ring->queue_index);
16630507ef8aSSasha Neftin 
16640507ef8aSSasha Neftin 			/* we are about to reset, no point in enabling stuff */
16650507ef8aSSasha Neftin 			return true;
16660507ef8aSSasha Neftin 		}
16670507ef8aSSasha Neftin 	}
16680507ef8aSSasha Neftin 
16690507ef8aSSasha Neftin #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
16700507ef8aSSasha Neftin 	if (unlikely(total_packets &&
16710507ef8aSSasha Neftin 		     netif_carrier_ok(tx_ring->netdev) &&
16720507ef8aSSasha Neftin 		     igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
16730507ef8aSSasha Neftin 		/* Make sure that anybody stopping the queue after this
16740507ef8aSSasha Neftin 		 * sees the new next_to_clean.
16750507ef8aSSasha Neftin 		 */
16760507ef8aSSasha Neftin 		smp_mb();
16770507ef8aSSasha Neftin 		if (__netif_subqueue_stopped(tx_ring->netdev,
16780507ef8aSSasha Neftin 					     tx_ring->queue_index) &&
16790507ef8aSSasha Neftin 		    !(test_bit(__IGC_DOWN, &adapter->state))) {
16800507ef8aSSasha Neftin 			netif_wake_subqueue(tx_ring->netdev,
16810507ef8aSSasha Neftin 					    tx_ring->queue_index);
16820507ef8aSSasha Neftin 
16830507ef8aSSasha Neftin 			u64_stats_update_begin(&tx_ring->tx_syncp);
16840507ef8aSSasha Neftin 			tx_ring->tx_stats.restart_queue++;
16850507ef8aSSasha Neftin 			u64_stats_update_end(&tx_ring->tx_syncp);
16860507ef8aSSasha Neftin 		}
16870507ef8aSSasha Neftin 	}
16880507ef8aSSasha Neftin 
16890507ef8aSSasha Neftin 	return !!budget;
16900507ef8aSSasha Neftin }
16910507ef8aSSasha Neftin 
1692c9a11c23SSasha Neftin /**
1693c9a11c23SSasha Neftin  * igc_ioctl - I/O control method
1694c9a11c23SSasha Neftin  * @netdev: network interface device structure
1695c9a11c23SSasha Neftin  * @ifreq: frequency
1696c9a11c23SSasha Neftin  * @cmd: command
1697c9a11c23SSasha Neftin  */
1698c9a11c23SSasha Neftin static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1699c9a11c23SSasha Neftin {
1700c9a11c23SSasha Neftin 	switch (cmd) {
1701c9a11c23SSasha Neftin 	default:
1702c9a11c23SSasha Neftin 		return -EOPNOTSUPP;
1703c9a11c23SSasha Neftin 	}
1704c9a11c23SSasha Neftin }
1705c9a11c23SSasha Neftin 
1706c9a11c23SSasha Neftin /**
1707c9a11c23SSasha Neftin  * igc_up - Open the interface and prepare it to handle traffic
1708c9a11c23SSasha Neftin  * @adapter: board private structure
1709c9a11c23SSasha Neftin  */
1710c9a11c23SSasha Neftin static void igc_up(struct igc_adapter *adapter)
1711c9a11c23SSasha Neftin {
17123df25e4cSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
1713c9a11c23SSasha Neftin 	int i = 0;
1714c9a11c23SSasha Neftin 
1715c9a11c23SSasha Neftin 	/* hardware has been reset, we need to reload some things */
1716c9a11c23SSasha Neftin 	igc_configure(adapter);
1717c9a11c23SSasha Neftin 
1718c9a11c23SSasha Neftin 	clear_bit(__IGC_DOWN, &adapter->state);
1719c9a11c23SSasha Neftin 
1720c9a11c23SSasha Neftin 	for (i = 0; i < adapter->num_q_vectors; i++)
1721c9a11c23SSasha Neftin 		napi_enable(&adapter->q_vector[i]->napi);
17223df25e4cSSasha Neftin 
17233df25e4cSSasha Neftin 	if (adapter->msix_entries)
17243df25e4cSSasha Neftin 		igc_configure_msix(adapter);
17253df25e4cSSasha Neftin 	else
17263df25e4cSSasha Neftin 		igc_assign_vector(adapter->q_vector[0], 0);
17273df25e4cSSasha Neftin 
17283df25e4cSSasha Neftin 	/* Clear any pending interrupts. */
17293df25e4cSSasha Neftin 	rd32(IGC_ICR);
17303df25e4cSSasha Neftin 	igc_irq_enable(adapter);
173113b5b7fdSSasha Neftin 
173213b5b7fdSSasha Neftin 	netif_tx_start_all_queues(adapter->netdev);
173313b5b7fdSSasha Neftin 
173413b5b7fdSSasha Neftin 	/* start the watchdog. */
173513b5b7fdSSasha Neftin 	hw->mac.get_link_status = 1;
1736c9a11c23SSasha Neftin }
1737c9a11c23SSasha Neftin 
1738c9a11c23SSasha Neftin /**
1739c9a11c23SSasha Neftin  * igc_update_stats - Update the board statistics counters
1740c9a11c23SSasha Neftin  * @adapter: board private structure
1741c9a11c23SSasha Neftin  */
1742c9a11c23SSasha Neftin static void igc_update_stats(struct igc_adapter *adapter)
1743c9a11c23SSasha Neftin {
1744c9a11c23SSasha Neftin }
1745c9a11c23SSasha Neftin 
17460507ef8aSSasha Neftin static void igc_nfc_filter_exit(struct igc_adapter *adapter)
17470507ef8aSSasha Neftin {
17480507ef8aSSasha Neftin }
17490507ef8aSSasha Neftin 
1750c9a11c23SSasha Neftin /**
1751c9a11c23SSasha Neftin  * igc_down - Close the interface
1752c9a11c23SSasha Neftin  * @adapter: board private structure
1753c9a11c23SSasha Neftin  */
1754c9a11c23SSasha Neftin static void igc_down(struct igc_adapter *adapter)
1755c9a11c23SSasha Neftin {
1756c9a11c23SSasha Neftin 	struct net_device *netdev = adapter->netdev;
17570507ef8aSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
17580507ef8aSSasha Neftin 	u32 tctl, rctl;
1759c9a11c23SSasha Neftin 	int i = 0;
1760c9a11c23SSasha Neftin 
1761c9a11c23SSasha Neftin 	set_bit(__IGC_DOWN, &adapter->state);
1762c9a11c23SSasha Neftin 
17630507ef8aSSasha Neftin 	/* disable receives in the hardware */
17640507ef8aSSasha Neftin 	rctl = rd32(IGC_RCTL);
17650507ef8aSSasha Neftin 	wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
17660507ef8aSSasha Neftin 	/* flush and sleep below */
17670507ef8aSSasha Neftin 
17680507ef8aSSasha Neftin 	igc_nfc_filter_exit(adapter);
17690507ef8aSSasha Neftin 
1770c9a11c23SSasha Neftin 	/* set trans_start so we don't get spurious watchdogs during reset */
1771c9a11c23SSasha Neftin 	netif_trans_update(netdev);
1772c9a11c23SSasha Neftin 
1773c9a11c23SSasha Neftin 	netif_carrier_off(netdev);
1774c9a11c23SSasha Neftin 	netif_tx_stop_all_queues(netdev);
1775c9a11c23SSasha Neftin 
17760507ef8aSSasha Neftin 	/* disable transmits in the hardware */
17770507ef8aSSasha Neftin 	tctl = rd32(IGC_TCTL);
17780507ef8aSSasha Neftin 	tctl &= ~IGC_TCTL_EN;
17790507ef8aSSasha Neftin 	wr32(IGC_TCTL, tctl);
17800507ef8aSSasha Neftin 	/* flush both disables and wait for them to finish */
17810507ef8aSSasha Neftin 	wrfl();
17820507ef8aSSasha Neftin 	usleep_range(10000, 20000);
17830507ef8aSSasha Neftin 
17840507ef8aSSasha Neftin 	igc_irq_disable(adapter);
17850507ef8aSSasha Neftin 
17860507ef8aSSasha Neftin 	adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
17870507ef8aSSasha Neftin 
17880507ef8aSSasha Neftin 	for (i = 0; i < adapter->num_q_vectors; i++) {
17890507ef8aSSasha Neftin 		if (adapter->q_vector[i]) {
17900507ef8aSSasha Neftin 			napi_synchronize(&adapter->q_vector[i]->napi);
1791c9a11c23SSasha Neftin 			napi_disable(&adapter->q_vector[i]->napi);
17920507ef8aSSasha Neftin 		}
17930507ef8aSSasha Neftin 	}
17940507ef8aSSasha Neftin 
17950507ef8aSSasha Neftin 	del_timer_sync(&adapter->watchdog_timer);
17960507ef8aSSasha Neftin 	del_timer_sync(&adapter->phy_info_timer);
17970507ef8aSSasha Neftin 
17980507ef8aSSasha Neftin 	/* record the stats before reset*/
17990507ef8aSSasha Neftin 	spin_lock(&adapter->stats64_lock);
18000507ef8aSSasha Neftin 	igc_update_stats(adapter);
18010507ef8aSSasha Neftin 	spin_unlock(&adapter->stats64_lock);
1802c9a11c23SSasha Neftin 
1803c9a11c23SSasha Neftin 	adapter->link_speed = 0;
1804c9a11c23SSasha Neftin 	adapter->link_duplex = 0;
18050507ef8aSSasha Neftin 
18060507ef8aSSasha Neftin 	if (!pci_channel_offline(adapter->pdev))
18070507ef8aSSasha Neftin 		igc_reset(adapter);
18080507ef8aSSasha Neftin 
18090507ef8aSSasha Neftin 	/* clear VLAN promisc flag so VFTA will be updated if necessary */
18100507ef8aSSasha Neftin 	adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
18110507ef8aSSasha Neftin 
18120507ef8aSSasha Neftin 	igc_clean_all_tx_rings(adapter);
18130507ef8aSSasha Neftin 	igc_clean_all_rx_rings(adapter);
18140507ef8aSSasha Neftin }
18150507ef8aSSasha Neftin 
18160507ef8aSSasha Neftin static void igc_reinit_locked(struct igc_adapter *adapter)
18170507ef8aSSasha Neftin {
18180507ef8aSSasha Neftin 	WARN_ON(in_interrupt());
18190507ef8aSSasha Neftin 	while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
18200507ef8aSSasha Neftin 		usleep_range(1000, 2000);
18210507ef8aSSasha Neftin 	igc_down(adapter);
18220507ef8aSSasha Neftin 	igc_up(adapter);
18230507ef8aSSasha Neftin 	clear_bit(__IGC_RESETTING, &adapter->state);
18240507ef8aSSasha Neftin }
18250507ef8aSSasha Neftin 
18260507ef8aSSasha Neftin static void igc_reset_task(struct work_struct *work)
18270507ef8aSSasha Neftin {
18280507ef8aSSasha Neftin 	struct igc_adapter *adapter;
18290507ef8aSSasha Neftin 
18300507ef8aSSasha Neftin 	adapter = container_of(work, struct igc_adapter, reset_task);
18310507ef8aSSasha Neftin 
18320507ef8aSSasha Neftin 	netdev_err(adapter->netdev, "Reset adapter\n");
18330507ef8aSSasha Neftin 	igc_reinit_locked(adapter);
1834c9a11c23SSasha Neftin }
1835c9a11c23SSasha Neftin 
1836c9a11c23SSasha Neftin /**
1837c9a11c23SSasha Neftin  * igc_change_mtu - Change the Maximum Transfer Unit
1838c9a11c23SSasha Neftin  * @netdev: network interface device structure
1839c9a11c23SSasha Neftin  * @new_mtu: new value for maximum frame size
1840c9a11c23SSasha Neftin  *
1841c9a11c23SSasha Neftin  * Returns 0 on success, negative on failure
1842c9a11c23SSasha Neftin  */
1843c9a11c23SSasha Neftin static int igc_change_mtu(struct net_device *netdev, int new_mtu)
1844c9a11c23SSasha Neftin {
1845c9a11c23SSasha Neftin 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1846c9a11c23SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
1847c9a11c23SSasha Neftin 	struct pci_dev *pdev = adapter->pdev;
1848c9a11c23SSasha Neftin 
1849c9a11c23SSasha Neftin 	/* adjust max frame to be at least the size of a standard frame */
1850c9a11c23SSasha Neftin 	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
1851c9a11c23SSasha Neftin 		max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
1852c9a11c23SSasha Neftin 
1853c9a11c23SSasha Neftin 	while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
1854c9a11c23SSasha Neftin 		usleep_range(1000, 2000);
1855c9a11c23SSasha Neftin 
1856c9a11c23SSasha Neftin 	/* igc_down has a dependency on max_frame_size */
1857c9a11c23SSasha Neftin 	adapter->max_frame_size = max_frame;
1858c9a11c23SSasha Neftin 
1859c9a11c23SSasha Neftin 	if (netif_running(netdev))
1860c9a11c23SSasha Neftin 		igc_down(adapter);
1861c9a11c23SSasha Neftin 
1862c9a11c23SSasha Neftin 	dev_info(&pdev->dev, "changing MTU from %d to %d\n",
1863c9a11c23SSasha Neftin 		 netdev->mtu, new_mtu);
1864c9a11c23SSasha Neftin 	netdev->mtu = new_mtu;
1865c9a11c23SSasha Neftin 
1866c9a11c23SSasha Neftin 	if (netif_running(netdev))
1867c9a11c23SSasha Neftin 		igc_up(adapter);
1868c9a11c23SSasha Neftin 	else
1869c9a11c23SSasha Neftin 		igc_reset(adapter);
1870c9a11c23SSasha Neftin 
1871c9a11c23SSasha Neftin 	clear_bit(__IGC_RESETTING, &adapter->state);
1872c9a11c23SSasha Neftin 
1873c9a11c23SSasha Neftin 	return 0;
1874c9a11c23SSasha Neftin }
1875c9a11c23SSasha Neftin 
1876c9a11c23SSasha Neftin /**
1877c9a11c23SSasha Neftin  * igc_get_stats - Get System Network Statistics
1878c9a11c23SSasha Neftin  * @netdev: network interface device structure
1879c9a11c23SSasha Neftin  *
1880c9a11c23SSasha Neftin  * Returns the address of the device statistics structure.
1881c9a11c23SSasha Neftin  * The statistics are updated here and also from the timer callback.
1882c9a11c23SSasha Neftin  */
1883c9a11c23SSasha Neftin static struct net_device_stats *igc_get_stats(struct net_device *netdev)
1884c9a11c23SSasha Neftin {
1885c9a11c23SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
1886c9a11c23SSasha Neftin 
1887c9a11c23SSasha Neftin 	if (!test_bit(__IGC_RESETTING, &adapter->state))
1888c9a11c23SSasha Neftin 		igc_update_stats(adapter);
1889c9a11c23SSasha Neftin 
1890c9a11c23SSasha Neftin 	/* only return the current stats */
1891c9a11c23SSasha Neftin 	return &netdev->stats;
1892c9a11c23SSasha Neftin }
1893c9a11c23SSasha Neftin 
1894c9a11c23SSasha Neftin /**
1895c9a11c23SSasha Neftin  * igc_configure - configure the hardware for RX and TX
1896c9a11c23SSasha Neftin  * @adapter: private board structure
1897c9a11c23SSasha Neftin  */
1898c9a11c23SSasha Neftin static void igc_configure(struct igc_adapter *adapter)
1899c9a11c23SSasha Neftin {
190013b5b7fdSSasha Neftin 	struct net_device *netdev = adapter->netdev;
190113b5b7fdSSasha Neftin 	int i = 0;
190213b5b7fdSSasha Neftin 
1903c9a11c23SSasha Neftin 	igc_get_hw_control(adapter);
190413b5b7fdSSasha Neftin 	igc_set_rx_mode(netdev);
190513b5b7fdSSasha Neftin 
190613b5b7fdSSasha Neftin 	igc_setup_tctl(adapter);
190713b5b7fdSSasha Neftin 	igc_setup_mrqc(adapter);
190813b5b7fdSSasha Neftin 	igc_setup_rctl(adapter);
190913b5b7fdSSasha Neftin 
191013b5b7fdSSasha Neftin 	igc_configure_tx(adapter);
191113b5b7fdSSasha Neftin 	igc_configure_rx(adapter);
191213b5b7fdSSasha Neftin 
191313b5b7fdSSasha Neftin 	igc_rx_fifo_flush_base(&adapter->hw);
191413b5b7fdSSasha Neftin 
191513b5b7fdSSasha Neftin 	/* call igc_desc_unused which always leaves
191613b5b7fdSSasha Neftin 	 * at least 1 descriptor unused to make sure
191713b5b7fdSSasha Neftin 	 * next_to_use != next_to_clean
191813b5b7fdSSasha Neftin 	 */
191913b5b7fdSSasha Neftin 	for (i = 0; i < adapter->num_rx_queues; i++) {
192013b5b7fdSSasha Neftin 		struct igc_ring *ring = adapter->rx_ring[i];
192113b5b7fdSSasha Neftin 
192213b5b7fdSSasha Neftin 		igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
192313b5b7fdSSasha Neftin 	}
1924c9a11c23SSasha Neftin }
1925c9a11c23SSasha Neftin 
1926c9a11c23SSasha Neftin /**
1927c9a11c23SSasha Neftin  * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
1928c9a11c23SSasha Neftin  * @adapter: Pointer to adapter structure
1929c9a11c23SSasha Neftin  * @index: Index of the RAR entry which need to be synced with MAC table
1930c9a11c23SSasha Neftin  */
1931c9a11c23SSasha Neftin static void igc_rar_set_index(struct igc_adapter *adapter, u32 index)
1932c9a11c23SSasha Neftin {
1933c9a11c23SSasha Neftin 	u8 *addr = adapter->mac_table[index].addr;
1934c9a11c23SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
1935c9a11c23SSasha Neftin 	u32 rar_low, rar_high;
1936c9a11c23SSasha Neftin 
1937c9a11c23SSasha Neftin 	/* HW expects these to be in network order when they are plugged
1938c9a11c23SSasha Neftin 	 * into the registers which are little endian.  In order to guarantee
1939c9a11c23SSasha Neftin 	 * that ordering we need to do an leXX_to_cpup here in order to be
1940c9a11c23SSasha Neftin 	 * ready for the byteswap that occurs with writel
1941c9a11c23SSasha Neftin 	 */
1942c9a11c23SSasha Neftin 	rar_low = le32_to_cpup((__le32 *)(addr));
1943c9a11c23SSasha Neftin 	rar_high = le16_to_cpup((__le16 *)(addr + 4));
1944c9a11c23SSasha Neftin 
1945c9a11c23SSasha Neftin 	/* Indicate to hardware the Address is Valid. */
1946c9a11c23SSasha Neftin 	if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) {
1947c9a11c23SSasha Neftin 		if (is_valid_ether_addr(addr))
1948c9a11c23SSasha Neftin 			rar_high |= IGC_RAH_AV;
1949c9a11c23SSasha Neftin 
1950c9a11c23SSasha Neftin 		rar_high |= IGC_RAH_POOL_1 <<
1951c9a11c23SSasha Neftin 			adapter->mac_table[index].queue;
1952c9a11c23SSasha Neftin 	}
1953c9a11c23SSasha Neftin 
1954c9a11c23SSasha Neftin 	wr32(IGC_RAL(index), rar_low);
1955c9a11c23SSasha Neftin 	wrfl();
1956c9a11c23SSasha Neftin 	wr32(IGC_RAH(index), rar_high);
1957c9a11c23SSasha Neftin 	wrfl();
1958c9a11c23SSasha Neftin }
1959c9a11c23SSasha Neftin 
1960c9a11c23SSasha Neftin /* Set default MAC address for the PF in the first RAR entry */
1961c9a11c23SSasha Neftin static void igc_set_default_mac_filter(struct igc_adapter *adapter)
1962c9a11c23SSasha Neftin {
1963c9a11c23SSasha Neftin 	struct igc_mac_addr *mac_table = &adapter->mac_table[0];
1964c9a11c23SSasha Neftin 
1965c9a11c23SSasha Neftin 	ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
1966c9a11c23SSasha Neftin 	mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
1967c9a11c23SSasha Neftin 
1968c9a11c23SSasha Neftin 	igc_rar_set_index(adapter, 0);
1969c9a11c23SSasha Neftin }
1970c9a11c23SSasha Neftin 
1971c9a11c23SSasha Neftin /**
197213b5b7fdSSasha Neftin  * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
197313b5b7fdSSasha Neftin  * @netdev: network interface device structure
197413b5b7fdSSasha Neftin  *
197513b5b7fdSSasha Neftin  * The set_rx_mode entry point is called whenever the unicast or multicast
197613b5b7fdSSasha Neftin  * address lists or the network interface flags are updated.  This routine is
197713b5b7fdSSasha Neftin  * responsible for configuring the hardware for proper unicast, multicast,
197813b5b7fdSSasha Neftin  * promiscuous mode, and all-multi behavior.
197913b5b7fdSSasha Neftin  */
198013b5b7fdSSasha Neftin static void igc_set_rx_mode(struct net_device *netdev)
198113b5b7fdSSasha Neftin {
198213b5b7fdSSasha Neftin }
198313b5b7fdSSasha Neftin 
198413b5b7fdSSasha Neftin /**
19853df25e4cSSasha Neftin  * igc_msix_other - msix other interrupt handler
19863df25e4cSSasha Neftin  * @irq: interrupt number
19873df25e4cSSasha Neftin  * @data: pointer to a q_vector
19883df25e4cSSasha Neftin  */
19893df25e4cSSasha Neftin static irqreturn_t igc_msix_other(int irq, void *data)
19903df25e4cSSasha Neftin {
19913df25e4cSSasha Neftin 	struct igc_adapter *adapter = data;
19923df25e4cSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
19933df25e4cSSasha Neftin 	u32 icr = rd32(IGC_ICR);
19943df25e4cSSasha Neftin 
19953df25e4cSSasha Neftin 	/* reading ICR causes bit 31 of EICR to be cleared */
19963df25e4cSSasha Neftin 	if (icr & IGC_ICR_DRSTA)
19973df25e4cSSasha Neftin 		schedule_work(&adapter->reset_task);
19983df25e4cSSasha Neftin 
19993df25e4cSSasha Neftin 	if (icr & IGC_ICR_DOUTSYNC) {
20003df25e4cSSasha Neftin 		/* HW is reporting DMA is out of sync */
20013df25e4cSSasha Neftin 		adapter->stats.doosync++;
20023df25e4cSSasha Neftin 	}
20033df25e4cSSasha Neftin 
20043df25e4cSSasha Neftin 	if (icr & IGC_ICR_LSC) {
20053df25e4cSSasha Neftin 		hw->mac.get_link_status = 1;
20063df25e4cSSasha Neftin 		/* guard against interrupt when we're going down */
20073df25e4cSSasha Neftin 		if (!test_bit(__IGC_DOWN, &adapter->state))
20083df25e4cSSasha Neftin 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
20093df25e4cSSasha Neftin 	}
20103df25e4cSSasha Neftin 
20113df25e4cSSasha Neftin 	wr32(IGC_EIMS, adapter->eims_other);
20123df25e4cSSasha Neftin 
20133df25e4cSSasha Neftin 	return IRQ_HANDLED;
20143df25e4cSSasha Neftin }
20153df25e4cSSasha Neftin 
20163df25e4cSSasha Neftin /**
20173df25e4cSSasha Neftin  * igc_write_ivar - configure ivar for given MSI-X vector
20183df25e4cSSasha Neftin  * @hw: pointer to the HW structure
20193df25e4cSSasha Neftin  * @msix_vector: vector number we are allocating to a given ring
20203df25e4cSSasha Neftin  * @index: row index of IVAR register to write within IVAR table
20213df25e4cSSasha Neftin  * @offset: column offset of in IVAR, should be multiple of 8
20223df25e4cSSasha Neftin  *
20233df25e4cSSasha Neftin  * The IVAR table consists of 2 columns,
20243df25e4cSSasha Neftin  * each containing an cause allocation for an Rx and Tx ring, and a
20253df25e4cSSasha Neftin  * variable number of rows depending on the number of queues supported.
20263df25e4cSSasha Neftin  */
20273df25e4cSSasha Neftin static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
20283df25e4cSSasha Neftin 			   int index, int offset)
20293df25e4cSSasha Neftin {
20303df25e4cSSasha Neftin 	u32 ivar = array_rd32(IGC_IVAR0, index);
20313df25e4cSSasha Neftin 
20323df25e4cSSasha Neftin 	/* clear any bits that are currently set */
20333df25e4cSSasha Neftin 	ivar &= ~((u32)0xFF << offset);
20343df25e4cSSasha Neftin 
20353df25e4cSSasha Neftin 	/* write vector and valid bit */
20363df25e4cSSasha Neftin 	ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
20373df25e4cSSasha Neftin 
20383df25e4cSSasha Neftin 	array_wr32(IGC_IVAR0, index, ivar);
20393df25e4cSSasha Neftin }
20403df25e4cSSasha Neftin 
20413df25e4cSSasha Neftin static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
20423df25e4cSSasha Neftin {
20433df25e4cSSasha Neftin 	struct igc_adapter *adapter = q_vector->adapter;
20443df25e4cSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
20453df25e4cSSasha Neftin 	int rx_queue = IGC_N0_QUEUE;
20463df25e4cSSasha Neftin 	int tx_queue = IGC_N0_QUEUE;
20473df25e4cSSasha Neftin 
20483df25e4cSSasha Neftin 	if (q_vector->rx.ring)
20493df25e4cSSasha Neftin 		rx_queue = q_vector->rx.ring->reg_idx;
20503df25e4cSSasha Neftin 	if (q_vector->tx.ring)
20513df25e4cSSasha Neftin 		tx_queue = q_vector->tx.ring->reg_idx;
20523df25e4cSSasha Neftin 
20533df25e4cSSasha Neftin 	switch (hw->mac.type) {
20543df25e4cSSasha Neftin 	case igc_i225:
20553df25e4cSSasha Neftin 		if (rx_queue > IGC_N0_QUEUE)
20563df25e4cSSasha Neftin 			igc_write_ivar(hw, msix_vector,
20573df25e4cSSasha Neftin 				       rx_queue >> 1,
20583df25e4cSSasha Neftin 				       (rx_queue & 0x1) << 4);
20593df25e4cSSasha Neftin 		if (tx_queue > IGC_N0_QUEUE)
20603df25e4cSSasha Neftin 			igc_write_ivar(hw, msix_vector,
20613df25e4cSSasha Neftin 				       tx_queue >> 1,
20623df25e4cSSasha Neftin 				       ((tx_queue & 0x1) << 4) + 8);
20633df25e4cSSasha Neftin 		q_vector->eims_value = BIT(msix_vector);
20643df25e4cSSasha Neftin 		break;
20653df25e4cSSasha Neftin 	default:
20663df25e4cSSasha Neftin 		WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
20673df25e4cSSasha Neftin 		break;
20683df25e4cSSasha Neftin 	}
20693df25e4cSSasha Neftin 
20703df25e4cSSasha Neftin 	/* add q_vector eims value to global eims_enable_mask */
20713df25e4cSSasha Neftin 	adapter->eims_enable_mask |= q_vector->eims_value;
20723df25e4cSSasha Neftin 
20733df25e4cSSasha Neftin 	/* configure q_vector to set itr on first interrupt */
20743df25e4cSSasha Neftin 	q_vector->set_itr = 1;
20753df25e4cSSasha Neftin }
20763df25e4cSSasha Neftin 
20773df25e4cSSasha Neftin /**
20783df25e4cSSasha Neftin  * igc_configure_msix - Configure MSI-X hardware
20793df25e4cSSasha Neftin  * @adapter: Pointer to adapter structure
20803df25e4cSSasha Neftin  *
20813df25e4cSSasha Neftin  * igc_configure_msix sets up the hardware to properly
20823df25e4cSSasha Neftin  * generate MSI-X interrupts.
20833df25e4cSSasha Neftin  */
20843df25e4cSSasha Neftin static void igc_configure_msix(struct igc_adapter *adapter)
20853df25e4cSSasha Neftin {
20863df25e4cSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
20873df25e4cSSasha Neftin 	int i, vector = 0;
20883df25e4cSSasha Neftin 	u32 tmp;
20893df25e4cSSasha Neftin 
20903df25e4cSSasha Neftin 	adapter->eims_enable_mask = 0;
20913df25e4cSSasha Neftin 
20923df25e4cSSasha Neftin 	/* set vector for other causes, i.e. link changes */
20933df25e4cSSasha Neftin 	switch (hw->mac.type) {
20943df25e4cSSasha Neftin 	case igc_i225:
20953df25e4cSSasha Neftin 		/* Turn on MSI-X capability first, or our settings
20963df25e4cSSasha Neftin 		 * won't stick.  And it will take days to debug.
20973df25e4cSSasha Neftin 		 */
20983df25e4cSSasha Neftin 		wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
20993df25e4cSSasha Neftin 		     IGC_GPIE_PBA | IGC_GPIE_EIAME |
21003df25e4cSSasha Neftin 		     IGC_GPIE_NSICR);
21013df25e4cSSasha Neftin 
21023df25e4cSSasha Neftin 		/* enable msix_other interrupt */
21033df25e4cSSasha Neftin 		adapter->eims_other = BIT(vector);
21043df25e4cSSasha Neftin 		tmp = (vector++ | IGC_IVAR_VALID) << 8;
21053df25e4cSSasha Neftin 
21063df25e4cSSasha Neftin 		wr32(IGC_IVAR_MISC, tmp);
21073df25e4cSSasha Neftin 		break;
21083df25e4cSSasha Neftin 	default:
21093df25e4cSSasha Neftin 		/* do nothing, since nothing else supports MSI-X */
21103df25e4cSSasha Neftin 		break;
21113df25e4cSSasha Neftin 	} /* switch (hw->mac.type) */
21123df25e4cSSasha Neftin 
21133df25e4cSSasha Neftin 	adapter->eims_enable_mask |= adapter->eims_other;
21143df25e4cSSasha Neftin 
21153df25e4cSSasha Neftin 	for (i = 0; i < adapter->num_q_vectors; i++)
21163df25e4cSSasha Neftin 		igc_assign_vector(adapter->q_vector[i], vector++);
21173df25e4cSSasha Neftin 
21183df25e4cSSasha Neftin 	wrfl();
21193df25e4cSSasha Neftin }
21203df25e4cSSasha Neftin 
21213df25e4cSSasha Neftin static irqreturn_t igc_msix_ring(int irq, void *data)
21223df25e4cSSasha Neftin {
21233df25e4cSSasha Neftin 	struct igc_q_vector *q_vector = data;
21243df25e4cSSasha Neftin 
21253df25e4cSSasha Neftin 	/* Write the ITR value calculated from the previous interrupt. */
21263df25e4cSSasha Neftin 	igc_write_itr(q_vector);
21273df25e4cSSasha Neftin 
21283df25e4cSSasha Neftin 	napi_schedule(&q_vector->napi);
21293df25e4cSSasha Neftin 
21303df25e4cSSasha Neftin 	return IRQ_HANDLED;
21313df25e4cSSasha Neftin }
21323df25e4cSSasha Neftin 
21333df25e4cSSasha Neftin /**
21343df25e4cSSasha Neftin  * igc_request_msix - Initialize MSI-X interrupts
21353df25e4cSSasha Neftin  * @adapter: Pointer to adapter structure
21363df25e4cSSasha Neftin  *
21373df25e4cSSasha Neftin  * igc_request_msix allocates MSI-X vectors and requests interrupts from the
21383df25e4cSSasha Neftin  * kernel.
21393df25e4cSSasha Neftin  */
21403df25e4cSSasha Neftin static int igc_request_msix(struct igc_adapter *adapter)
21413df25e4cSSasha Neftin {
21423df25e4cSSasha Neftin 	int i = 0, err = 0, vector = 0, free_vector = 0;
21433df25e4cSSasha Neftin 	struct net_device *netdev = adapter->netdev;
21443df25e4cSSasha Neftin 
21453df25e4cSSasha Neftin 	err = request_irq(adapter->msix_entries[vector].vector,
21463df25e4cSSasha Neftin 			  &igc_msix_other, 0, netdev->name, adapter);
21473df25e4cSSasha Neftin 	if (err)
21483df25e4cSSasha Neftin 		goto err_out;
21493df25e4cSSasha Neftin 
21503df25e4cSSasha Neftin 	for (i = 0; i < adapter->num_q_vectors; i++) {
21513df25e4cSSasha Neftin 		struct igc_q_vector *q_vector = adapter->q_vector[i];
21523df25e4cSSasha Neftin 
21533df25e4cSSasha Neftin 		vector++;
21543df25e4cSSasha Neftin 
21553df25e4cSSasha Neftin 		q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
21563df25e4cSSasha Neftin 
21573df25e4cSSasha Neftin 		if (q_vector->rx.ring && q_vector->tx.ring)
21583df25e4cSSasha Neftin 			sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
21593df25e4cSSasha Neftin 				q_vector->rx.ring->queue_index);
21603df25e4cSSasha Neftin 		else if (q_vector->tx.ring)
21613df25e4cSSasha Neftin 			sprintf(q_vector->name, "%s-tx-%u", netdev->name,
21623df25e4cSSasha Neftin 				q_vector->tx.ring->queue_index);
21633df25e4cSSasha Neftin 		else if (q_vector->rx.ring)
21643df25e4cSSasha Neftin 			sprintf(q_vector->name, "%s-rx-%u", netdev->name,
21653df25e4cSSasha Neftin 				q_vector->rx.ring->queue_index);
21663df25e4cSSasha Neftin 		else
21673df25e4cSSasha Neftin 			sprintf(q_vector->name, "%s-unused", netdev->name);
21683df25e4cSSasha Neftin 
21693df25e4cSSasha Neftin 		err = request_irq(adapter->msix_entries[vector].vector,
21703df25e4cSSasha Neftin 				  igc_msix_ring, 0, q_vector->name,
21713df25e4cSSasha Neftin 				  q_vector);
21723df25e4cSSasha Neftin 		if (err)
21733df25e4cSSasha Neftin 			goto err_free;
21743df25e4cSSasha Neftin 	}
21753df25e4cSSasha Neftin 
21763df25e4cSSasha Neftin 	igc_configure_msix(adapter);
21773df25e4cSSasha Neftin 	return 0;
21783df25e4cSSasha Neftin 
21793df25e4cSSasha Neftin err_free:
21803df25e4cSSasha Neftin 	/* free already assigned IRQs */
21813df25e4cSSasha Neftin 	free_irq(adapter->msix_entries[free_vector++].vector, adapter);
21823df25e4cSSasha Neftin 
21833df25e4cSSasha Neftin 	vector--;
21843df25e4cSSasha Neftin 	for (i = 0; i < vector; i++) {
21853df25e4cSSasha Neftin 		free_irq(adapter->msix_entries[free_vector++].vector,
21863df25e4cSSasha Neftin 			 adapter->q_vector[i]);
21873df25e4cSSasha Neftin 	}
21883df25e4cSSasha Neftin err_out:
21893df25e4cSSasha Neftin 	return err;
21903df25e4cSSasha Neftin }
21913df25e4cSSasha Neftin 
21923df25e4cSSasha Neftin /**
21933df25e4cSSasha Neftin  * igc_reset_q_vector - Reset config for interrupt vector
21943df25e4cSSasha Neftin  * @adapter: board private structure to initialize
21953df25e4cSSasha Neftin  * @v_idx: Index of vector to be reset
21963df25e4cSSasha Neftin  *
21973df25e4cSSasha Neftin  * If NAPI is enabled it will delete any references to the
21983df25e4cSSasha Neftin  * NAPI struct. This is preparation for igc_free_q_vector.
21993df25e4cSSasha Neftin  */
22003df25e4cSSasha Neftin static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
22013df25e4cSSasha Neftin {
22023df25e4cSSasha Neftin 	struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
22033df25e4cSSasha Neftin 
22043df25e4cSSasha Neftin 	/* if we're coming from igc_set_interrupt_capability, the vectors are
22053df25e4cSSasha Neftin 	 * not yet allocated
22063df25e4cSSasha Neftin 	 */
22073df25e4cSSasha Neftin 	if (!q_vector)
22083df25e4cSSasha Neftin 		return;
22093df25e4cSSasha Neftin 
22103df25e4cSSasha Neftin 	if (q_vector->tx.ring)
22113df25e4cSSasha Neftin 		adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
22123df25e4cSSasha Neftin 
22133df25e4cSSasha Neftin 	if (q_vector->rx.ring)
22143df25e4cSSasha Neftin 		adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
22153df25e4cSSasha Neftin 
22163df25e4cSSasha Neftin 	netif_napi_del(&q_vector->napi);
22173df25e4cSSasha Neftin }
22183df25e4cSSasha Neftin 
22193df25e4cSSasha Neftin static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
22203df25e4cSSasha Neftin {
22213df25e4cSSasha Neftin 	int v_idx = adapter->num_q_vectors;
22223df25e4cSSasha Neftin 
22233df25e4cSSasha Neftin 	if (adapter->msix_entries) {
22243df25e4cSSasha Neftin 		pci_disable_msix(adapter->pdev);
22253df25e4cSSasha Neftin 		kfree(adapter->msix_entries);
22263df25e4cSSasha Neftin 		adapter->msix_entries = NULL;
22273df25e4cSSasha Neftin 	} else if (adapter->flags & IGC_FLAG_HAS_MSI) {
22283df25e4cSSasha Neftin 		pci_disable_msi(adapter->pdev);
22293df25e4cSSasha Neftin 	}
22303df25e4cSSasha Neftin 
22313df25e4cSSasha Neftin 	while (v_idx--)
22323df25e4cSSasha Neftin 		igc_reset_q_vector(adapter, v_idx);
22333df25e4cSSasha Neftin }
22343df25e4cSSasha Neftin 
22353df25e4cSSasha Neftin /**
22363df25e4cSSasha Neftin  * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
22373df25e4cSSasha Neftin  * @adapter: Pointer to adapter structure
22383df25e4cSSasha Neftin  *
22393df25e4cSSasha Neftin  * This function resets the device so that it has 0 rx queues, tx queues, and
22403df25e4cSSasha Neftin  * MSI-X interrupts allocated.
22413df25e4cSSasha Neftin  */
22423df25e4cSSasha Neftin static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
22433df25e4cSSasha Neftin {
22443df25e4cSSasha Neftin 	igc_free_q_vectors(adapter);
22453df25e4cSSasha Neftin 	igc_reset_interrupt_capability(adapter);
22463df25e4cSSasha Neftin }
22473df25e4cSSasha Neftin 
22483df25e4cSSasha Neftin /**
22493df25e4cSSasha Neftin  * igc_free_q_vectors - Free memory allocated for interrupt vectors
22503df25e4cSSasha Neftin  * @adapter: board private structure to initialize
22513df25e4cSSasha Neftin  *
22523df25e4cSSasha Neftin  * This function frees the memory allocated to the q_vectors.  In addition if
22533df25e4cSSasha Neftin  * NAPI is enabled it will delete any references to the NAPI struct prior
22543df25e4cSSasha Neftin  * to freeing the q_vector.
22553df25e4cSSasha Neftin  */
22563df25e4cSSasha Neftin static void igc_free_q_vectors(struct igc_adapter *adapter)
22573df25e4cSSasha Neftin {
22583df25e4cSSasha Neftin 	int v_idx = adapter->num_q_vectors;
22593df25e4cSSasha Neftin 
22603df25e4cSSasha Neftin 	adapter->num_tx_queues = 0;
22613df25e4cSSasha Neftin 	adapter->num_rx_queues = 0;
22623df25e4cSSasha Neftin 	adapter->num_q_vectors = 0;
22633df25e4cSSasha Neftin 
22643df25e4cSSasha Neftin 	while (v_idx--) {
22653df25e4cSSasha Neftin 		igc_reset_q_vector(adapter, v_idx);
22663df25e4cSSasha Neftin 		igc_free_q_vector(adapter, v_idx);
22673df25e4cSSasha Neftin 	}
22683df25e4cSSasha Neftin }
22693df25e4cSSasha Neftin 
22703df25e4cSSasha Neftin /**
22713df25e4cSSasha Neftin  * igc_free_q_vector - Free memory allocated for specific interrupt vector
22723df25e4cSSasha Neftin  * @adapter: board private structure to initialize
22733df25e4cSSasha Neftin  * @v_idx: Index of vector to be freed
22743df25e4cSSasha Neftin  *
22753df25e4cSSasha Neftin  * This function frees the memory allocated to the q_vector.
22763df25e4cSSasha Neftin  */
22773df25e4cSSasha Neftin static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
22783df25e4cSSasha Neftin {
22793df25e4cSSasha Neftin 	struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
22803df25e4cSSasha Neftin 
22813df25e4cSSasha Neftin 	adapter->q_vector[v_idx] = NULL;
22823df25e4cSSasha Neftin 
22833df25e4cSSasha Neftin 	/* igc_get_stats64() might access the rings on this vector,
22843df25e4cSSasha Neftin 	 * we must wait a grace period before freeing it.
22853df25e4cSSasha Neftin 	 */
22863df25e4cSSasha Neftin 	if (q_vector)
22873df25e4cSSasha Neftin 		kfree_rcu(q_vector, rcu);
22883df25e4cSSasha Neftin }
22893df25e4cSSasha Neftin 
22903df25e4cSSasha Neftin /**
22910507ef8aSSasha Neftin  * igc_watchdog - Timer Call-back
22920507ef8aSSasha Neftin  * @data: pointer to adapter cast into an unsigned long
22930507ef8aSSasha Neftin  */
22940507ef8aSSasha Neftin static void igc_watchdog(struct timer_list *t)
22950507ef8aSSasha Neftin {
22960507ef8aSSasha Neftin 	struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
22970507ef8aSSasha Neftin }
22980507ef8aSSasha Neftin 
22990507ef8aSSasha Neftin /**
23003df25e4cSSasha Neftin  * igc_update_ring_itr - update the dynamic ITR value based on packet size
23013df25e4cSSasha Neftin  * @q_vector: pointer to q_vector
23023df25e4cSSasha Neftin  *
23033df25e4cSSasha Neftin  * Stores a new ITR value based on strictly on packet size.  This
23043df25e4cSSasha Neftin  * algorithm is less sophisticated than that used in igc_update_itr,
23053df25e4cSSasha Neftin  * due to the difficulty of synchronizing statistics across multiple
23063df25e4cSSasha Neftin  * receive rings.  The divisors and thresholds used by this function
23073df25e4cSSasha Neftin  * were determined based on theoretical maximum wire speed and testing
23083df25e4cSSasha Neftin  * data, in order to minimize response time while increasing bulk
23093df25e4cSSasha Neftin  * throughput.
23103df25e4cSSasha Neftin  * NOTE: This function is called only when operating in a multiqueue
23113df25e4cSSasha Neftin  * receive environment.
23123df25e4cSSasha Neftin  */
23133df25e4cSSasha Neftin static void igc_update_ring_itr(struct igc_q_vector *q_vector)
23143df25e4cSSasha Neftin {
23153df25e4cSSasha Neftin 	struct igc_adapter *adapter = q_vector->adapter;
23163df25e4cSSasha Neftin 	int new_val = q_vector->itr_val;
23173df25e4cSSasha Neftin 	int avg_wire_size = 0;
23183df25e4cSSasha Neftin 	unsigned int packets;
23193df25e4cSSasha Neftin 
23203df25e4cSSasha Neftin 	/* For non-gigabit speeds, just fix the interrupt rate at 4000
23213df25e4cSSasha Neftin 	 * ints/sec - ITR timer value of 120 ticks.
23223df25e4cSSasha Neftin 	 */
23233df25e4cSSasha Neftin 	switch (adapter->link_speed) {
23243df25e4cSSasha Neftin 	case SPEED_10:
23253df25e4cSSasha Neftin 	case SPEED_100:
23263df25e4cSSasha Neftin 		new_val = IGC_4K_ITR;
23273df25e4cSSasha Neftin 		goto set_itr_val;
23283df25e4cSSasha Neftin 	default:
23293df25e4cSSasha Neftin 		break;
23303df25e4cSSasha Neftin 	}
23313df25e4cSSasha Neftin 
23323df25e4cSSasha Neftin 	packets = q_vector->rx.total_packets;
23333df25e4cSSasha Neftin 	if (packets)
23343df25e4cSSasha Neftin 		avg_wire_size = q_vector->rx.total_bytes / packets;
23353df25e4cSSasha Neftin 
23363df25e4cSSasha Neftin 	packets = q_vector->tx.total_packets;
23373df25e4cSSasha Neftin 	if (packets)
23383df25e4cSSasha Neftin 		avg_wire_size = max_t(u32, avg_wire_size,
23393df25e4cSSasha Neftin 				      q_vector->tx.total_bytes / packets);
23403df25e4cSSasha Neftin 
23413df25e4cSSasha Neftin 	/* if avg_wire_size isn't set no work was done */
23423df25e4cSSasha Neftin 	if (!avg_wire_size)
23433df25e4cSSasha Neftin 		goto clear_counts;
23443df25e4cSSasha Neftin 
23453df25e4cSSasha Neftin 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
23463df25e4cSSasha Neftin 	avg_wire_size += 24;
23473df25e4cSSasha Neftin 
23483df25e4cSSasha Neftin 	/* Don't starve jumbo frames */
23493df25e4cSSasha Neftin 	avg_wire_size = min(avg_wire_size, 3000);
23503df25e4cSSasha Neftin 
23513df25e4cSSasha Neftin 	/* Give a little boost to mid-size frames */
23523df25e4cSSasha Neftin 	if (avg_wire_size > 300 && avg_wire_size < 1200)
23533df25e4cSSasha Neftin 		new_val = avg_wire_size / 3;
23543df25e4cSSasha Neftin 	else
23553df25e4cSSasha Neftin 		new_val = avg_wire_size / 2;
23563df25e4cSSasha Neftin 
23573df25e4cSSasha Neftin 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
23583df25e4cSSasha Neftin 	if (new_val < IGC_20K_ITR &&
23593df25e4cSSasha Neftin 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
23603df25e4cSSasha Neftin 	    (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
23613df25e4cSSasha Neftin 		new_val = IGC_20K_ITR;
23623df25e4cSSasha Neftin 
23633df25e4cSSasha Neftin set_itr_val:
23643df25e4cSSasha Neftin 	if (new_val != q_vector->itr_val) {
23653df25e4cSSasha Neftin 		q_vector->itr_val = new_val;
23663df25e4cSSasha Neftin 		q_vector->set_itr = 1;
23673df25e4cSSasha Neftin 	}
23683df25e4cSSasha Neftin clear_counts:
23693df25e4cSSasha Neftin 	q_vector->rx.total_bytes = 0;
23703df25e4cSSasha Neftin 	q_vector->rx.total_packets = 0;
23713df25e4cSSasha Neftin 	q_vector->tx.total_bytes = 0;
23723df25e4cSSasha Neftin 	q_vector->tx.total_packets = 0;
23733df25e4cSSasha Neftin }
23743df25e4cSSasha Neftin 
23753df25e4cSSasha Neftin /**
23763df25e4cSSasha Neftin  * igc_update_itr - update the dynamic ITR value based on statistics
23773df25e4cSSasha Neftin  * @q_vector: pointer to q_vector
23783df25e4cSSasha Neftin  * @ring_container: ring info to update the itr for
23793df25e4cSSasha Neftin  *
23803df25e4cSSasha Neftin  * Stores a new ITR value based on packets and byte
23813df25e4cSSasha Neftin  * counts during the last interrupt.  The advantage of per interrupt
23823df25e4cSSasha Neftin  * computation is faster updates and more accurate ITR for the current
23833df25e4cSSasha Neftin  * traffic pattern.  Constants in this function were computed
23843df25e4cSSasha Neftin  * based on theoretical maximum wire speed and thresholds were set based
23853df25e4cSSasha Neftin  * on testing data as well as attempting to minimize response time
23863df25e4cSSasha Neftin  * while increasing bulk throughput.
23873df25e4cSSasha Neftin  * NOTE: These calculations are only valid when operating in a single-
23883df25e4cSSasha Neftin  * queue environment.
23893df25e4cSSasha Neftin  */
23903df25e4cSSasha Neftin static void igc_update_itr(struct igc_q_vector *q_vector,
23913df25e4cSSasha Neftin 			   struct igc_ring_container *ring_container)
23923df25e4cSSasha Neftin {
23933df25e4cSSasha Neftin 	unsigned int packets = ring_container->total_packets;
23943df25e4cSSasha Neftin 	unsigned int bytes = ring_container->total_bytes;
23953df25e4cSSasha Neftin 	u8 itrval = ring_container->itr;
23963df25e4cSSasha Neftin 
23973df25e4cSSasha Neftin 	/* no packets, exit with status unchanged */
23983df25e4cSSasha Neftin 	if (packets == 0)
23993df25e4cSSasha Neftin 		return;
24003df25e4cSSasha Neftin 
24013df25e4cSSasha Neftin 	switch (itrval) {
24023df25e4cSSasha Neftin 	case lowest_latency:
24033df25e4cSSasha Neftin 		/* handle TSO and jumbo frames */
24043df25e4cSSasha Neftin 		if (bytes / packets > 8000)
24053df25e4cSSasha Neftin 			itrval = bulk_latency;
24063df25e4cSSasha Neftin 		else if ((packets < 5) && (bytes > 512))
24073df25e4cSSasha Neftin 			itrval = low_latency;
24083df25e4cSSasha Neftin 		break;
24093df25e4cSSasha Neftin 	case low_latency:  /* 50 usec aka 20000 ints/s */
24103df25e4cSSasha Neftin 		if (bytes > 10000) {
24113df25e4cSSasha Neftin 			/* this if handles the TSO accounting */
24123df25e4cSSasha Neftin 			if (bytes / packets > 8000)
24133df25e4cSSasha Neftin 				itrval = bulk_latency;
24143df25e4cSSasha Neftin 			else if ((packets < 10) || ((bytes / packets) > 1200))
24153df25e4cSSasha Neftin 				itrval = bulk_latency;
24163df25e4cSSasha Neftin 			else if ((packets > 35))
24173df25e4cSSasha Neftin 				itrval = lowest_latency;
24183df25e4cSSasha Neftin 		} else if (bytes / packets > 2000) {
24193df25e4cSSasha Neftin 			itrval = bulk_latency;
24203df25e4cSSasha Neftin 		} else if (packets <= 2 && bytes < 512) {
24213df25e4cSSasha Neftin 			itrval = lowest_latency;
24223df25e4cSSasha Neftin 		}
24233df25e4cSSasha Neftin 		break;
24243df25e4cSSasha Neftin 	case bulk_latency: /* 250 usec aka 4000 ints/s */
24253df25e4cSSasha Neftin 		if (bytes > 25000) {
24263df25e4cSSasha Neftin 			if (packets > 35)
24273df25e4cSSasha Neftin 				itrval = low_latency;
24283df25e4cSSasha Neftin 		} else if (bytes < 1500) {
24293df25e4cSSasha Neftin 			itrval = low_latency;
24303df25e4cSSasha Neftin 		}
24313df25e4cSSasha Neftin 		break;
24323df25e4cSSasha Neftin 	}
24333df25e4cSSasha Neftin 
24343df25e4cSSasha Neftin 	/* clear work counters since we have the values we need */
24353df25e4cSSasha Neftin 	ring_container->total_bytes = 0;
24363df25e4cSSasha Neftin 	ring_container->total_packets = 0;
24373df25e4cSSasha Neftin 
24383df25e4cSSasha Neftin 	/* write updated itr to ring container */
24393df25e4cSSasha Neftin 	ring_container->itr = itrval;
24403df25e4cSSasha Neftin }
24413df25e4cSSasha Neftin 
244213b5b7fdSSasha Neftin /**
244313b5b7fdSSasha Neftin  * igc_intr_msi - Interrupt Handler
244413b5b7fdSSasha Neftin  * @irq: interrupt number
244513b5b7fdSSasha Neftin  * @data: pointer to a network interface device structure
244613b5b7fdSSasha Neftin  */
244713b5b7fdSSasha Neftin static irqreturn_t igc_intr_msi(int irq, void *data)
244813b5b7fdSSasha Neftin {
244913b5b7fdSSasha Neftin 	struct igc_adapter *adapter = data;
245013b5b7fdSSasha Neftin 	struct igc_q_vector *q_vector = adapter->q_vector[0];
245113b5b7fdSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
245213b5b7fdSSasha Neftin 	/* read ICR disables interrupts using IAM */
245313b5b7fdSSasha Neftin 	u32 icr = rd32(IGC_ICR);
245413b5b7fdSSasha Neftin 
245513b5b7fdSSasha Neftin 	igc_write_itr(q_vector);
245613b5b7fdSSasha Neftin 
245713b5b7fdSSasha Neftin 	if (icr & IGC_ICR_DRSTA)
245813b5b7fdSSasha Neftin 		schedule_work(&adapter->reset_task);
245913b5b7fdSSasha Neftin 
246013b5b7fdSSasha Neftin 	if (icr & IGC_ICR_DOUTSYNC) {
246113b5b7fdSSasha Neftin 		/* HW is reporting DMA is out of sync */
246213b5b7fdSSasha Neftin 		adapter->stats.doosync++;
246313b5b7fdSSasha Neftin 	}
246413b5b7fdSSasha Neftin 
246513b5b7fdSSasha Neftin 	if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
246613b5b7fdSSasha Neftin 		hw->mac.get_link_status = 1;
246713b5b7fdSSasha Neftin 		if (!test_bit(__IGC_DOWN, &adapter->state))
246813b5b7fdSSasha Neftin 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
246913b5b7fdSSasha Neftin 	}
247013b5b7fdSSasha Neftin 
247113b5b7fdSSasha Neftin 	napi_schedule(&q_vector->napi);
247213b5b7fdSSasha Neftin 
247313b5b7fdSSasha Neftin 	return IRQ_HANDLED;
247413b5b7fdSSasha Neftin }
247513b5b7fdSSasha Neftin 
247613b5b7fdSSasha Neftin /**
247713b5b7fdSSasha Neftin  * igc_intr - Legacy Interrupt Handler
247813b5b7fdSSasha Neftin  * @irq: interrupt number
247913b5b7fdSSasha Neftin  * @data: pointer to a network interface device structure
248013b5b7fdSSasha Neftin  */
248113b5b7fdSSasha Neftin static irqreturn_t igc_intr(int irq, void *data)
248213b5b7fdSSasha Neftin {
248313b5b7fdSSasha Neftin 	struct igc_adapter *adapter = data;
248413b5b7fdSSasha Neftin 	struct igc_q_vector *q_vector = adapter->q_vector[0];
248513b5b7fdSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
248613b5b7fdSSasha Neftin 	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
248713b5b7fdSSasha Neftin 	 * need for the IMC write
248813b5b7fdSSasha Neftin 	 */
248913b5b7fdSSasha Neftin 	u32 icr = rd32(IGC_ICR);
249013b5b7fdSSasha Neftin 
249113b5b7fdSSasha Neftin 	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
249213b5b7fdSSasha Neftin 	 * not set, then the adapter didn't send an interrupt
249313b5b7fdSSasha Neftin 	 */
249413b5b7fdSSasha Neftin 	if (!(icr & IGC_ICR_INT_ASSERTED))
249513b5b7fdSSasha Neftin 		return IRQ_NONE;
249613b5b7fdSSasha Neftin 
249713b5b7fdSSasha Neftin 	igc_write_itr(q_vector);
249813b5b7fdSSasha Neftin 
249913b5b7fdSSasha Neftin 	if (icr & IGC_ICR_DRSTA)
250013b5b7fdSSasha Neftin 		schedule_work(&adapter->reset_task);
250113b5b7fdSSasha Neftin 
250213b5b7fdSSasha Neftin 	if (icr & IGC_ICR_DOUTSYNC) {
250313b5b7fdSSasha Neftin 		/* HW is reporting DMA is out of sync */
250413b5b7fdSSasha Neftin 		adapter->stats.doosync++;
250513b5b7fdSSasha Neftin 	}
250613b5b7fdSSasha Neftin 
250713b5b7fdSSasha Neftin 	if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
250813b5b7fdSSasha Neftin 		hw->mac.get_link_status = 1;
250913b5b7fdSSasha Neftin 		/* guard against interrupt when we're going down */
251013b5b7fdSSasha Neftin 		if (!test_bit(__IGC_DOWN, &adapter->state))
251113b5b7fdSSasha Neftin 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
251213b5b7fdSSasha Neftin 	}
251313b5b7fdSSasha Neftin 
251413b5b7fdSSasha Neftin 	napi_schedule(&q_vector->napi);
251513b5b7fdSSasha Neftin 
251613b5b7fdSSasha Neftin 	return IRQ_HANDLED;
251713b5b7fdSSasha Neftin }
251813b5b7fdSSasha Neftin 
25193df25e4cSSasha Neftin static void igc_set_itr(struct igc_q_vector *q_vector)
25203df25e4cSSasha Neftin {
25213df25e4cSSasha Neftin 	struct igc_adapter *adapter = q_vector->adapter;
25223df25e4cSSasha Neftin 	u32 new_itr = q_vector->itr_val;
25233df25e4cSSasha Neftin 	u8 current_itr = 0;
25243df25e4cSSasha Neftin 
25253df25e4cSSasha Neftin 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
25263df25e4cSSasha Neftin 	switch (adapter->link_speed) {
25273df25e4cSSasha Neftin 	case SPEED_10:
25283df25e4cSSasha Neftin 	case SPEED_100:
25293df25e4cSSasha Neftin 		current_itr = 0;
25303df25e4cSSasha Neftin 		new_itr = IGC_4K_ITR;
25313df25e4cSSasha Neftin 		goto set_itr_now;
25323df25e4cSSasha Neftin 	default:
25333df25e4cSSasha Neftin 		break;
25343df25e4cSSasha Neftin 	}
25353df25e4cSSasha Neftin 
25363df25e4cSSasha Neftin 	igc_update_itr(q_vector, &q_vector->tx);
25373df25e4cSSasha Neftin 	igc_update_itr(q_vector, &q_vector->rx);
25383df25e4cSSasha Neftin 
25393df25e4cSSasha Neftin 	current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
25403df25e4cSSasha Neftin 
25413df25e4cSSasha Neftin 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
25423df25e4cSSasha Neftin 	if (current_itr == lowest_latency &&
25433df25e4cSSasha Neftin 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
25443df25e4cSSasha Neftin 	    (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
25453df25e4cSSasha Neftin 		current_itr = low_latency;
25463df25e4cSSasha Neftin 
25473df25e4cSSasha Neftin 	switch (current_itr) {
25483df25e4cSSasha Neftin 	/* counts and packets in update_itr are dependent on these numbers */
25493df25e4cSSasha Neftin 	case lowest_latency:
25503df25e4cSSasha Neftin 		new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
25513df25e4cSSasha Neftin 		break;
25523df25e4cSSasha Neftin 	case low_latency:
25533df25e4cSSasha Neftin 		new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
25543df25e4cSSasha Neftin 		break;
25553df25e4cSSasha Neftin 	case bulk_latency:
25563df25e4cSSasha Neftin 		new_itr = IGC_4K_ITR;  /* 4,000 ints/sec */
25573df25e4cSSasha Neftin 		break;
25583df25e4cSSasha Neftin 	default:
25593df25e4cSSasha Neftin 		break;
25603df25e4cSSasha Neftin 	}
25613df25e4cSSasha Neftin 
25623df25e4cSSasha Neftin set_itr_now:
25633df25e4cSSasha Neftin 	if (new_itr != q_vector->itr_val) {
25643df25e4cSSasha Neftin 		/* this attempts to bias the interrupt rate towards Bulk
25653df25e4cSSasha Neftin 		 * by adding intermediate steps when interrupt rate is
25663df25e4cSSasha Neftin 		 * increasing
25673df25e4cSSasha Neftin 		 */
25683df25e4cSSasha Neftin 		new_itr = new_itr > q_vector->itr_val ?
25693df25e4cSSasha Neftin 			  max((new_itr * q_vector->itr_val) /
25703df25e4cSSasha Neftin 			  (new_itr + (q_vector->itr_val >> 2)),
25713df25e4cSSasha Neftin 			  new_itr) : new_itr;
25723df25e4cSSasha Neftin 		/* Don't write the value here; it resets the adapter's
25733df25e4cSSasha Neftin 		 * internal timer, and causes us to delay far longer than
25743df25e4cSSasha Neftin 		 * we should between interrupts.  Instead, we write the ITR
25753df25e4cSSasha Neftin 		 * value at the beginning of the next interrupt so the timing
25763df25e4cSSasha Neftin 		 * ends up being correct.
25773df25e4cSSasha Neftin 		 */
25783df25e4cSSasha Neftin 		q_vector->itr_val = new_itr;
25793df25e4cSSasha Neftin 		q_vector->set_itr = 1;
25803df25e4cSSasha Neftin 	}
25813df25e4cSSasha Neftin }
25823df25e4cSSasha Neftin 
25833df25e4cSSasha Neftin static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
25843df25e4cSSasha Neftin {
25853df25e4cSSasha Neftin 	struct igc_adapter *adapter = q_vector->adapter;
25863df25e4cSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
25873df25e4cSSasha Neftin 
25883df25e4cSSasha Neftin 	if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
25893df25e4cSSasha Neftin 	    (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
25903df25e4cSSasha Neftin 		if (adapter->num_q_vectors == 1)
25913df25e4cSSasha Neftin 			igc_set_itr(q_vector);
25923df25e4cSSasha Neftin 		else
25933df25e4cSSasha Neftin 			igc_update_ring_itr(q_vector);
25943df25e4cSSasha Neftin 	}
25953df25e4cSSasha Neftin 
25963df25e4cSSasha Neftin 	if (!test_bit(__IGC_DOWN, &adapter->state)) {
25973df25e4cSSasha Neftin 		if (adapter->msix_entries)
25983df25e4cSSasha Neftin 			wr32(IGC_EIMS, q_vector->eims_value);
25993df25e4cSSasha Neftin 		else
26003df25e4cSSasha Neftin 			igc_irq_enable(adapter);
26013df25e4cSSasha Neftin 	}
26023df25e4cSSasha Neftin }
26033df25e4cSSasha Neftin 
26043df25e4cSSasha Neftin /**
26053df25e4cSSasha Neftin  * igc_poll - NAPI Rx polling callback
26063df25e4cSSasha Neftin  * @napi: napi polling structure
26073df25e4cSSasha Neftin  * @budget: count of how many packets we should handle
26083df25e4cSSasha Neftin  */
26093df25e4cSSasha Neftin static int igc_poll(struct napi_struct *napi, int budget)
26103df25e4cSSasha Neftin {
26113df25e4cSSasha Neftin 	struct igc_q_vector *q_vector = container_of(napi,
26123df25e4cSSasha Neftin 						     struct igc_q_vector,
26133df25e4cSSasha Neftin 						     napi);
26143df25e4cSSasha Neftin 	bool clean_complete = true;
26153df25e4cSSasha Neftin 	int work_done = 0;
26160507ef8aSSasha Neftin 
26170507ef8aSSasha Neftin 	if (q_vector->tx.ring)
26180507ef8aSSasha Neftin 		clean_complete = igc_clean_tx_irq(q_vector, budget);
26193df25e4cSSasha Neftin 
26203df25e4cSSasha Neftin 	if (q_vector->rx.ring) {
26210507ef8aSSasha Neftin 		int cleaned = igc_clean_rx_irq(q_vector, budget);
26220507ef8aSSasha Neftin 
26233df25e4cSSasha Neftin 		work_done += cleaned;
26243df25e4cSSasha Neftin 		if (cleaned >= budget)
26253df25e4cSSasha Neftin 			clean_complete = false;
26263df25e4cSSasha Neftin 	}
26273df25e4cSSasha Neftin 
26283df25e4cSSasha Neftin 	/* If all work not completed, return budget and keep polling */
26293df25e4cSSasha Neftin 	if (!clean_complete)
26303df25e4cSSasha Neftin 		return budget;
26313df25e4cSSasha Neftin 
26323df25e4cSSasha Neftin 	/* If not enough Rx work done, exit the polling mode */
26333df25e4cSSasha Neftin 	napi_complete_done(napi, work_done);
26343df25e4cSSasha Neftin 	igc_ring_irq_enable(q_vector);
26353df25e4cSSasha Neftin 
26363df25e4cSSasha Neftin 	return 0;
26373df25e4cSSasha Neftin }
26383df25e4cSSasha Neftin 
26393df25e4cSSasha Neftin /**
26403df25e4cSSasha Neftin  * igc_set_interrupt_capability - set MSI or MSI-X if supported
26413df25e4cSSasha Neftin  * @adapter: Pointer to adapter structure
26423df25e4cSSasha Neftin  *
26433df25e4cSSasha Neftin  * Attempt to configure interrupts using the best available
26443df25e4cSSasha Neftin  * capabilities of the hardware and kernel.
26453df25e4cSSasha Neftin  */
26463df25e4cSSasha Neftin static void igc_set_interrupt_capability(struct igc_adapter *adapter,
26473df25e4cSSasha Neftin 					 bool msix)
26483df25e4cSSasha Neftin {
26493df25e4cSSasha Neftin 	int numvecs, i;
26503df25e4cSSasha Neftin 	int err;
26513df25e4cSSasha Neftin 
26523df25e4cSSasha Neftin 	if (!msix)
26533df25e4cSSasha Neftin 		goto msi_only;
26543df25e4cSSasha Neftin 	adapter->flags |= IGC_FLAG_HAS_MSIX;
26553df25e4cSSasha Neftin 
26563df25e4cSSasha Neftin 	/* Number of supported queues. */
26573df25e4cSSasha Neftin 	adapter->num_rx_queues = adapter->rss_queues;
26583df25e4cSSasha Neftin 
26593df25e4cSSasha Neftin 	adapter->num_tx_queues = adapter->rss_queues;
26603df25e4cSSasha Neftin 
26613df25e4cSSasha Neftin 	/* start with one vector for every Rx queue */
26623df25e4cSSasha Neftin 	numvecs = adapter->num_rx_queues;
26633df25e4cSSasha Neftin 
26643df25e4cSSasha Neftin 	/* if Tx handler is separate add 1 for every Tx queue */
26653df25e4cSSasha Neftin 	if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
26663df25e4cSSasha Neftin 		numvecs += adapter->num_tx_queues;
26673df25e4cSSasha Neftin 
26683df25e4cSSasha Neftin 	/* store the number of vectors reserved for queues */
26693df25e4cSSasha Neftin 	adapter->num_q_vectors = numvecs;
26703df25e4cSSasha Neftin 
26713df25e4cSSasha Neftin 	/* add 1 vector for link status interrupts */
26723df25e4cSSasha Neftin 	numvecs++;
26733df25e4cSSasha Neftin 
26743df25e4cSSasha Neftin 	adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
26753df25e4cSSasha Neftin 					GFP_KERNEL);
26763df25e4cSSasha Neftin 
26773df25e4cSSasha Neftin 	if (!adapter->msix_entries)
26783df25e4cSSasha Neftin 		return;
26793df25e4cSSasha Neftin 
26803df25e4cSSasha Neftin 	/* populate entry values */
26813df25e4cSSasha Neftin 	for (i = 0; i < numvecs; i++)
26823df25e4cSSasha Neftin 		adapter->msix_entries[i].entry = i;
26833df25e4cSSasha Neftin 
26843df25e4cSSasha Neftin 	err = pci_enable_msix_range(adapter->pdev,
26853df25e4cSSasha Neftin 				    adapter->msix_entries,
26863df25e4cSSasha Neftin 				    numvecs,
26873df25e4cSSasha Neftin 				    numvecs);
26883df25e4cSSasha Neftin 	if (err > 0)
26893df25e4cSSasha Neftin 		return;
26903df25e4cSSasha Neftin 
26913df25e4cSSasha Neftin 	kfree(adapter->msix_entries);
26923df25e4cSSasha Neftin 	adapter->msix_entries = NULL;
26933df25e4cSSasha Neftin 
26943df25e4cSSasha Neftin 	igc_reset_interrupt_capability(adapter);
26953df25e4cSSasha Neftin 
26963df25e4cSSasha Neftin msi_only:
26973df25e4cSSasha Neftin 	adapter->flags &= ~IGC_FLAG_HAS_MSIX;
26983df25e4cSSasha Neftin 
26993df25e4cSSasha Neftin 	adapter->rss_queues = 1;
27003df25e4cSSasha Neftin 	adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
27013df25e4cSSasha Neftin 	adapter->num_rx_queues = 1;
27023df25e4cSSasha Neftin 	adapter->num_tx_queues = 1;
27033df25e4cSSasha Neftin 	adapter->num_q_vectors = 1;
27043df25e4cSSasha Neftin 	if (!pci_enable_msi(adapter->pdev))
27053df25e4cSSasha Neftin 		adapter->flags |= IGC_FLAG_HAS_MSI;
27063df25e4cSSasha Neftin }
27073df25e4cSSasha Neftin 
27083df25e4cSSasha Neftin static void igc_add_ring(struct igc_ring *ring,
27093df25e4cSSasha Neftin 			 struct igc_ring_container *head)
27103df25e4cSSasha Neftin {
27113df25e4cSSasha Neftin 	head->ring = ring;
27123df25e4cSSasha Neftin 	head->count++;
27133df25e4cSSasha Neftin }
27143df25e4cSSasha Neftin 
27153df25e4cSSasha Neftin /**
27163df25e4cSSasha Neftin  * igc_alloc_q_vector - Allocate memory for a single interrupt vector
27173df25e4cSSasha Neftin  * @adapter: board private structure to initialize
27183df25e4cSSasha Neftin  * @v_count: q_vectors allocated on adapter, used for ring interleaving
27193df25e4cSSasha Neftin  * @v_idx: index of vector in adapter struct
27203df25e4cSSasha Neftin  * @txr_count: total number of Tx rings to allocate
27213df25e4cSSasha Neftin  * @txr_idx: index of first Tx ring to allocate
27223df25e4cSSasha Neftin  * @rxr_count: total number of Rx rings to allocate
27233df25e4cSSasha Neftin  * @rxr_idx: index of first Rx ring to allocate
27243df25e4cSSasha Neftin  *
27253df25e4cSSasha Neftin  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
27263df25e4cSSasha Neftin  */
27273df25e4cSSasha Neftin static int igc_alloc_q_vector(struct igc_adapter *adapter,
27283df25e4cSSasha Neftin 			      unsigned int v_count, unsigned int v_idx,
27293df25e4cSSasha Neftin 			      unsigned int txr_count, unsigned int txr_idx,
27303df25e4cSSasha Neftin 			      unsigned int rxr_count, unsigned int rxr_idx)
27313df25e4cSSasha Neftin {
27323df25e4cSSasha Neftin 	struct igc_q_vector *q_vector;
27333df25e4cSSasha Neftin 	struct igc_ring *ring;
27343df25e4cSSasha Neftin 	int ring_count, size;
27353df25e4cSSasha Neftin 
27363df25e4cSSasha Neftin 	/* igc only supports 1 Tx and/or 1 Rx queue per vector */
27373df25e4cSSasha Neftin 	if (txr_count > 1 || rxr_count > 1)
27383df25e4cSSasha Neftin 		return -ENOMEM;
27393df25e4cSSasha Neftin 
27403df25e4cSSasha Neftin 	ring_count = txr_count + rxr_count;
27413df25e4cSSasha Neftin 	size = sizeof(struct igc_q_vector) +
27423df25e4cSSasha Neftin 		(sizeof(struct igc_ring) * ring_count);
27433df25e4cSSasha Neftin 
27443df25e4cSSasha Neftin 	/* allocate q_vector and rings */
27453df25e4cSSasha Neftin 	q_vector = adapter->q_vector[v_idx];
27463df25e4cSSasha Neftin 	if (!q_vector)
27473df25e4cSSasha Neftin 		q_vector = kzalloc(size, GFP_KERNEL);
27483df25e4cSSasha Neftin 	else
27493df25e4cSSasha Neftin 		memset(q_vector, 0, size);
27503df25e4cSSasha Neftin 	if (!q_vector)
27513df25e4cSSasha Neftin 		return -ENOMEM;
27523df25e4cSSasha Neftin 
27533df25e4cSSasha Neftin 	/* initialize NAPI */
27543df25e4cSSasha Neftin 	netif_napi_add(adapter->netdev, &q_vector->napi,
27553df25e4cSSasha Neftin 		       igc_poll, 64);
27563df25e4cSSasha Neftin 
27573df25e4cSSasha Neftin 	/* tie q_vector and adapter together */
27583df25e4cSSasha Neftin 	adapter->q_vector[v_idx] = q_vector;
27593df25e4cSSasha Neftin 	q_vector->adapter = adapter;
27603df25e4cSSasha Neftin 
27613df25e4cSSasha Neftin 	/* initialize work limits */
27623df25e4cSSasha Neftin 	q_vector->tx.work_limit = adapter->tx_work_limit;
27633df25e4cSSasha Neftin 
27643df25e4cSSasha Neftin 	/* initialize ITR configuration */
27653df25e4cSSasha Neftin 	q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
27663df25e4cSSasha Neftin 	q_vector->itr_val = IGC_START_ITR;
27673df25e4cSSasha Neftin 
27683df25e4cSSasha Neftin 	/* initialize pointer to rings */
27693df25e4cSSasha Neftin 	ring = q_vector->ring;
27703df25e4cSSasha Neftin 
27713df25e4cSSasha Neftin 	/* initialize ITR */
27723df25e4cSSasha Neftin 	if (rxr_count) {
27733df25e4cSSasha Neftin 		/* rx or rx/tx vector */
27743df25e4cSSasha Neftin 		if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
27753df25e4cSSasha Neftin 			q_vector->itr_val = adapter->rx_itr_setting;
27763df25e4cSSasha Neftin 	} else {
27773df25e4cSSasha Neftin 		/* tx only vector */
27783df25e4cSSasha Neftin 		if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
27793df25e4cSSasha Neftin 			q_vector->itr_val = adapter->tx_itr_setting;
27803df25e4cSSasha Neftin 	}
27813df25e4cSSasha Neftin 
27823df25e4cSSasha Neftin 	if (txr_count) {
27833df25e4cSSasha Neftin 		/* assign generic ring traits */
27843df25e4cSSasha Neftin 		ring->dev = &adapter->pdev->dev;
27853df25e4cSSasha Neftin 		ring->netdev = adapter->netdev;
27863df25e4cSSasha Neftin 
27873df25e4cSSasha Neftin 		/* configure backlink on ring */
27883df25e4cSSasha Neftin 		ring->q_vector = q_vector;
27893df25e4cSSasha Neftin 
27903df25e4cSSasha Neftin 		/* update q_vector Tx values */
27913df25e4cSSasha Neftin 		igc_add_ring(ring, &q_vector->tx);
27923df25e4cSSasha Neftin 
27933df25e4cSSasha Neftin 		/* apply Tx specific ring traits */
27943df25e4cSSasha Neftin 		ring->count = adapter->tx_ring_count;
27953df25e4cSSasha Neftin 		ring->queue_index = txr_idx;
27963df25e4cSSasha Neftin 
27973df25e4cSSasha Neftin 		/* assign ring to adapter */
27983df25e4cSSasha Neftin 		adapter->tx_ring[txr_idx] = ring;
27993df25e4cSSasha Neftin 
28003df25e4cSSasha Neftin 		/* push pointer to next ring */
28013df25e4cSSasha Neftin 		ring++;
28023df25e4cSSasha Neftin 	}
28033df25e4cSSasha Neftin 
28043df25e4cSSasha Neftin 	if (rxr_count) {
28053df25e4cSSasha Neftin 		/* assign generic ring traits */
28063df25e4cSSasha Neftin 		ring->dev = &adapter->pdev->dev;
28073df25e4cSSasha Neftin 		ring->netdev = adapter->netdev;
28083df25e4cSSasha Neftin 
28093df25e4cSSasha Neftin 		/* configure backlink on ring */
28103df25e4cSSasha Neftin 		ring->q_vector = q_vector;
28113df25e4cSSasha Neftin 
28123df25e4cSSasha Neftin 		/* update q_vector Rx values */
28133df25e4cSSasha Neftin 		igc_add_ring(ring, &q_vector->rx);
28143df25e4cSSasha Neftin 
28153df25e4cSSasha Neftin 		/* apply Rx specific ring traits */
28163df25e4cSSasha Neftin 		ring->count = adapter->rx_ring_count;
28173df25e4cSSasha Neftin 		ring->queue_index = rxr_idx;
28183df25e4cSSasha Neftin 
28193df25e4cSSasha Neftin 		/* assign ring to adapter */
28203df25e4cSSasha Neftin 		adapter->rx_ring[rxr_idx] = ring;
28213df25e4cSSasha Neftin 	}
28223df25e4cSSasha Neftin 
28233df25e4cSSasha Neftin 	return 0;
28243df25e4cSSasha Neftin }
28253df25e4cSSasha Neftin 
28263df25e4cSSasha Neftin /**
28273df25e4cSSasha Neftin  * igc_alloc_q_vectors - Allocate memory for interrupt vectors
28283df25e4cSSasha Neftin  * @adapter: board private structure to initialize
28293df25e4cSSasha Neftin  *
28303df25e4cSSasha Neftin  * We allocate one q_vector per queue interrupt.  If allocation fails we
28313df25e4cSSasha Neftin  * return -ENOMEM.
28323df25e4cSSasha Neftin  */
28333df25e4cSSasha Neftin static int igc_alloc_q_vectors(struct igc_adapter *adapter)
28343df25e4cSSasha Neftin {
28353df25e4cSSasha Neftin 	int rxr_remaining = adapter->num_rx_queues;
28363df25e4cSSasha Neftin 	int txr_remaining = adapter->num_tx_queues;
28373df25e4cSSasha Neftin 	int rxr_idx = 0, txr_idx = 0, v_idx = 0;
28383df25e4cSSasha Neftin 	int q_vectors = adapter->num_q_vectors;
28393df25e4cSSasha Neftin 	int err;
28403df25e4cSSasha Neftin 
28413df25e4cSSasha Neftin 	if (q_vectors >= (rxr_remaining + txr_remaining)) {
28423df25e4cSSasha Neftin 		for (; rxr_remaining; v_idx++) {
28433df25e4cSSasha Neftin 			err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
28443df25e4cSSasha Neftin 						 0, 0, 1, rxr_idx);
28453df25e4cSSasha Neftin 
28463df25e4cSSasha Neftin 			if (err)
28473df25e4cSSasha Neftin 				goto err_out;
28483df25e4cSSasha Neftin 
28493df25e4cSSasha Neftin 			/* update counts and index */
28503df25e4cSSasha Neftin 			rxr_remaining--;
28513df25e4cSSasha Neftin 			rxr_idx++;
28523df25e4cSSasha Neftin 		}
28533df25e4cSSasha Neftin 	}
28543df25e4cSSasha Neftin 
28553df25e4cSSasha Neftin 	for (; v_idx < q_vectors; v_idx++) {
28563df25e4cSSasha Neftin 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
28573df25e4cSSasha Neftin 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
28583df25e4cSSasha Neftin 
28593df25e4cSSasha Neftin 		err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
28603df25e4cSSasha Neftin 					 tqpv, txr_idx, rqpv, rxr_idx);
28613df25e4cSSasha Neftin 
28623df25e4cSSasha Neftin 		if (err)
28633df25e4cSSasha Neftin 			goto err_out;
28643df25e4cSSasha Neftin 
28653df25e4cSSasha Neftin 		/* update counts and index */
28663df25e4cSSasha Neftin 		rxr_remaining -= rqpv;
28673df25e4cSSasha Neftin 		txr_remaining -= tqpv;
28683df25e4cSSasha Neftin 		rxr_idx++;
28693df25e4cSSasha Neftin 		txr_idx++;
28703df25e4cSSasha Neftin 	}
28713df25e4cSSasha Neftin 
28723df25e4cSSasha Neftin 	return 0;
28733df25e4cSSasha Neftin 
28743df25e4cSSasha Neftin err_out:
28753df25e4cSSasha Neftin 	adapter->num_tx_queues = 0;
28763df25e4cSSasha Neftin 	adapter->num_rx_queues = 0;
28773df25e4cSSasha Neftin 	adapter->num_q_vectors = 0;
28783df25e4cSSasha Neftin 
28793df25e4cSSasha Neftin 	while (v_idx--)
28803df25e4cSSasha Neftin 		igc_free_q_vector(adapter, v_idx);
28813df25e4cSSasha Neftin 
28823df25e4cSSasha Neftin 	return -ENOMEM;
28833df25e4cSSasha Neftin }
28843df25e4cSSasha Neftin 
28853df25e4cSSasha Neftin /**
288613b5b7fdSSasha Neftin  * igc_cache_ring_register - Descriptor ring to register mapping
288713b5b7fdSSasha Neftin  * @adapter: board private structure to initialize
288813b5b7fdSSasha Neftin  *
288913b5b7fdSSasha Neftin  * Once we know the feature-set enabled for the device, we'll cache
289013b5b7fdSSasha Neftin  * the register offset the descriptor ring is assigned to.
289113b5b7fdSSasha Neftin  */
289213b5b7fdSSasha Neftin static void igc_cache_ring_register(struct igc_adapter *adapter)
289313b5b7fdSSasha Neftin {
289413b5b7fdSSasha Neftin 	int i = 0, j = 0;
289513b5b7fdSSasha Neftin 
289613b5b7fdSSasha Neftin 	switch (adapter->hw.mac.type) {
289713b5b7fdSSasha Neftin 	case igc_i225:
289813b5b7fdSSasha Neftin 	/* Fall through */
289913b5b7fdSSasha Neftin 	default:
290013b5b7fdSSasha Neftin 		for (; i < adapter->num_rx_queues; i++)
290113b5b7fdSSasha Neftin 			adapter->rx_ring[i]->reg_idx = i;
290213b5b7fdSSasha Neftin 		for (; j < adapter->num_tx_queues; j++)
290313b5b7fdSSasha Neftin 			adapter->tx_ring[j]->reg_idx = j;
290413b5b7fdSSasha Neftin 		break;
290513b5b7fdSSasha Neftin 	}
290613b5b7fdSSasha Neftin }
290713b5b7fdSSasha Neftin 
290813b5b7fdSSasha Neftin /**
29093df25e4cSSasha Neftin  * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
29103df25e4cSSasha Neftin  * @adapter: Pointer to adapter structure
29113df25e4cSSasha Neftin  *
29123df25e4cSSasha Neftin  * This function initializes the interrupts and allocates all of the queues.
29133df25e4cSSasha Neftin  */
29143df25e4cSSasha Neftin static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
29153df25e4cSSasha Neftin {
29163df25e4cSSasha Neftin 	struct pci_dev *pdev = adapter->pdev;
29173df25e4cSSasha Neftin 	int err = 0;
29183df25e4cSSasha Neftin 
29193df25e4cSSasha Neftin 	igc_set_interrupt_capability(adapter, msix);
29203df25e4cSSasha Neftin 
29213df25e4cSSasha Neftin 	err = igc_alloc_q_vectors(adapter);
29223df25e4cSSasha Neftin 	if (err) {
29233df25e4cSSasha Neftin 		dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
29243df25e4cSSasha Neftin 		goto err_alloc_q_vectors;
29253df25e4cSSasha Neftin 	}
29263df25e4cSSasha Neftin 
292713b5b7fdSSasha Neftin 	igc_cache_ring_register(adapter);
292813b5b7fdSSasha Neftin 
29293df25e4cSSasha Neftin 	return 0;
29303df25e4cSSasha Neftin 
29313df25e4cSSasha Neftin err_alloc_q_vectors:
29323df25e4cSSasha Neftin 	igc_reset_interrupt_capability(adapter);
29333df25e4cSSasha Neftin 	return err;
29343df25e4cSSasha Neftin }
29353df25e4cSSasha Neftin 
29363df25e4cSSasha Neftin static void igc_free_irq(struct igc_adapter *adapter)
29373df25e4cSSasha Neftin {
29383df25e4cSSasha Neftin 	if (adapter->msix_entries) {
29393df25e4cSSasha Neftin 		int vector = 0, i;
29403df25e4cSSasha Neftin 
29413df25e4cSSasha Neftin 		free_irq(adapter->msix_entries[vector++].vector, adapter);
29423df25e4cSSasha Neftin 
29433df25e4cSSasha Neftin 		for (i = 0; i < adapter->num_q_vectors; i++)
29443df25e4cSSasha Neftin 			free_irq(adapter->msix_entries[vector++].vector,
29453df25e4cSSasha Neftin 				 adapter->q_vector[i]);
29463df25e4cSSasha Neftin 	} else {
29473df25e4cSSasha Neftin 		free_irq(adapter->pdev->irq, adapter);
29483df25e4cSSasha Neftin 	}
29493df25e4cSSasha Neftin }
29503df25e4cSSasha Neftin 
29513df25e4cSSasha Neftin /**
29523df25e4cSSasha Neftin  * igc_irq_disable - Mask off interrupt generation on the NIC
29533df25e4cSSasha Neftin  * @adapter: board private structure
29543df25e4cSSasha Neftin  */
29553df25e4cSSasha Neftin static void igc_irq_disable(struct igc_adapter *adapter)
29563df25e4cSSasha Neftin {
29573df25e4cSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
29583df25e4cSSasha Neftin 
29593df25e4cSSasha Neftin 	if (adapter->msix_entries) {
29603df25e4cSSasha Neftin 		u32 regval = rd32(IGC_EIAM);
29613df25e4cSSasha Neftin 
29623df25e4cSSasha Neftin 		wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
29633df25e4cSSasha Neftin 		wr32(IGC_EIMC, adapter->eims_enable_mask);
29643df25e4cSSasha Neftin 		regval = rd32(IGC_EIAC);
29653df25e4cSSasha Neftin 		wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
29663df25e4cSSasha Neftin 	}
29673df25e4cSSasha Neftin 
29683df25e4cSSasha Neftin 	wr32(IGC_IAM, 0);
29693df25e4cSSasha Neftin 	wr32(IGC_IMC, ~0);
29703df25e4cSSasha Neftin 	wrfl();
29713df25e4cSSasha Neftin 
29723df25e4cSSasha Neftin 	if (adapter->msix_entries) {
29733df25e4cSSasha Neftin 		int vector = 0, i;
29743df25e4cSSasha Neftin 
29753df25e4cSSasha Neftin 		synchronize_irq(adapter->msix_entries[vector++].vector);
29763df25e4cSSasha Neftin 
29773df25e4cSSasha Neftin 		for (i = 0; i < adapter->num_q_vectors; i++)
29783df25e4cSSasha Neftin 			synchronize_irq(adapter->msix_entries[vector++].vector);
29793df25e4cSSasha Neftin 	} else {
29803df25e4cSSasha Neftin 		synchronize_irq(adapter->pdev->irq);
29813df25e4cSSasha Neftin 	}
29823df25e4cSSasha Neftin }
29833df25e4cSSasha Neftin 
29843df25e4cSSasha Neftin /**
29853df25e4cSSasha Neftin  * igc_irq_enable - Enable default interrupt generation settings
29863df25e4cSSasha Neftin  * @adapter: board private structure
29873df25e4cSSasha Neftin  */
29883df25e4cSSasha Neftin static void igc_irq_enable(struct igc_adapter *adapter)
29893df25e4cSSasha Neftin {
29903df25e4cSSasha Neftin 	struct igc_hw *hw = &adapter->hw;
29913df25e4cSSasha Neftin 
29923df25e4cSSasha Neftin 	if (adapter->msix_entries) {
29933df25e4cSSasha Neftin 		u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
29943df25e4cSSasha Neftin 		u32 regval = rd32(IGC_EIAC);
29953df25e4cSSasha Neftin 
29963df25e4cSSasha Neftin 		wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
29973df25e4cSSasha Neftin 		regval = rd32(IGC_EIAM);
29983df25e4cSSasha Neftin 		wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
29993df25e4cSSasha Neftin 		wr32(IGC_EIMS, adapter->eims_enable_mask);
30003df25e4cSSasha Neftin 		wr32(IGC_IMS, ims);
30013df25e4cSSasha Neftin 	} else {
30023df25e4cSSasha Neftin 		wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
30033df25e4cSSasha Neftin 		wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
30043df25e4cSSasha Neftin 	}
30053df25e4cSSasha Neftin }
30063df25e4cSSasha Neftin 
30073df25e4cSSasha Neftin /**
30083df25e4cSSasha Neftin  * igc_request_irq - initialize interrupts
30093df25e4cSSasha Neftin  * @adapter: Pointer to adapter structure
30103df25e4cSSasha Neftin  *
30113df25e4cSSasha Neftin  * Attempts to configure interrupts using the best available
30123df25e4cSSasha Neftin  * capabilities of the hardware and kernel.
30133df25e4cSSasha Neftin  */
30143df25e4cSSasha Neftin static int igc_request_irq(struct igc_adapter *adapter)
30153df25e4cSSasha Neftin {
301613b5b7fdSSasha Neftin 	struct net_device *netdev = adapter->netdev;
301713b5b7fdSSasha Neftin 	struct pci_dev *pdev = adapter->pdev;
30183df25e4cSSasha Neftin 	int err = 0;
30193df25e4cSSasha Neftin 
30203df25e4cSSasha Neftin 	if (adapter->flags & IGC_FLAG_HAS_MSIX) {
30213df25e4cSSasha Neftin 		err = igc_request_msix(adapter);
30223df25e4cSSasha Neftin 		if (!err)
30233df25e4cSSasha Neftin 			goto request_done;
30243df25e4cSSasha Neftin 		/* fall back to MSI */
302513b5b7fdSSasha Neftin 		igc_free_all_tx_resources(adapter);
302613b5b7fdSSasha Neftin 		igc_free_all_rx_resources(adapter);
30273df25e4cSSasha Neftin 
30283df25e4cSSasha Neftin 		igc_clear_interrupt_scheme(adapter);
30293df25e4cSSasha Neftin 		err = igc_init_interrupt_scheme(adapter, false);
30303df25e4cSSasha Neftin 		if (err)
30313df25e4cSSasha Neftin 			goto request_done;
303213b5b7fdSSasha Neftin 		igc_setup_all_tx_resources(adapter);
303313b5b7fdSSasha Neftin 		igc_setup_all_rx_resources(adapter);
30343df25e4cSSasha Neftin 		igc_configure(adapter);
30353df25e4cSSasha Neftin 	}
30363df25e4cSSasha Neftin 
303713b5b7fdSSasha Neftin 	igc_assign_vector(adapter->q_vector[0], 0);
303813b5b7fdSSasha Neftin 
303913b5b7fdSSasha Neftin 	if (adapter->flags & IGC_FLAG_HAS_MSI) {
304013b5b7fdSSasha Neftin 		err = request_irq(pdev->irq, &igc_intr_msi, 0,
304113b5b7fdSSasha Neftin 				  netdev->name, adapter);
304213b5b7fdSSasha Neftin 		if (!err)
304313b5b7fdSSasha Neftin 			goto request_done;
304413b5b7fdSSasha Neftin 
304513b5b7fdSSasha Neftin 		/* fall back to legacy interrupts */
304613b5b7fdSSasha Neftin 		igc_reset_interrupt_capability(adapter);
304713b5b7fdSSasha Neftin 		adapter->flags &= ~IGC_FLAG_HAS_MSI;
304813b5b7fdSSasha Neftin 	}
304913b5b7fdSSasha Neftin 
305013b5b7fdSSasha Neftin 	err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
305113b5b7fdSSasha Neftin 			  netdev->name, adapter);
305213b5b7fdSSasha Neftin 
305313b5b7fdSSasha Neftin 	if (err)
305413b5b7fdSSasha Neftin 		dev_err(&pdev->dev, "Error %d getting interrupt\n",
305513b5b7fdSSasha Neftin 			err);
305613b5b7fdSSasha Neftin 
30573df25e4cSSasha Neftin request_done:
30583df25e4cSSasha Neftin 	return err;
30593df25e4cSSasha Neftin }
30603df25e4cSSasha Neftin 
30613df25e4cSSasha Neftin static void igc_write_itr(struct igc_q_vector *q_vector)
30623df25e4cSSasha Neftin {
30633df25e4cSSasha Neftin 	u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
30643df25e4cSSasha Neftin 
30653df25e4cSSasha Neftin 	if (!q_vector->set_itr)
30663df25e4cSSasha Neftin 		return;
30673df25e4cSSasha Neftin 
30683df25e4cSSasha Neftin 	if (!itr_val)
30693df25e4cSSasha Neftin 		itr_val = IGC_ITR_VAL_MASK;
30703df25e4cSSasha Neftin 
30713df25e4cSSasha Neftin 	itr_val |= IGC_EITR_CNT_IGNR;
30723df25e4cSSasha Neftin 
30733df25e4cSSasha Neftin 	writel(itr_val, q_vector->itr_register);
30743df25e4cSSasha Neftin 	q_vector->set_itr = 0;
30753df25e4cSSasha Neftin }
30763df25e4cSSasha Neftin 
30773df25e4cSSasha Neftin /**
3078c9a11c23SSasha Neftin  * igc_open - Called when a network interface is made active
3079c9a11c23SSasha Neftin  * @netdev: network interface device structure
3080c9a11c23SSasha Neftin  *
3081c9a11c23SSasha Neftin  * Returns 0 on success, negative value on failure
3082c9a11c23SSasha Neftin  *
3083c9a11c23SSasha Neftin  * The open entry point is called when a network interface is made
3084c9a11c23SSasha Neftin  * active by the system (IFF_UP).  At this point all resources needed
3085c9a11c23SSasha Neftin  * for transmit and receive operations are allocated, the interrupt
3086c9a11c23SSasha Neftin  * handler is registered with the OS, the watchdog timer is started,
3087c9a11c23SSasha Neftin  * and the stack is notified that the interface is ready.
3088c9a11c23SSasha Neftin  */
3089c9a11c23SSasha Neftin static int __igc_open(struct net_device *netdev, bool resuming)
3090c9a11c23SSasha Neftin {
3091c9a11c23SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
3092c9a11c23SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
30933df25e4cSSasha Neftin 	int err = 0;
3094c9a11c23SSasha Neftin 	int i = 0;
3095c9a11c23SSasha Neftin 
3096c9a11c23SSasha Neftin 	/* disallow open during test */
3097c9a11c23SSasha Neftin 
3098c9a11c23SSasha Neftin 	if (test_bit(__IGC_TESTING, &adapter->state)) {
3099c9a11c23SSasha Neftin 		WARN_ON(resuming);
3100c9a11c23SSasha Neftin 		return -EBUSY;
3101c9a11c23SSasha Neftin 	}
3102c9a11c23SSasha Neftin 
3103c9a11c23SSasha Neftin 	netif_carrier_off(netdev);
3104c9a11c23SSasha Neftin 
310513b5b7fdSSasha Neftin 	/* allocate transmit descriptors */
310613b5b7fdSSasha Neftin 	err = igc_setup_all_tx_resources(adapter);
310713b5b7fdSSasha Neftin 	if (err)
310813b5b7fdSSasha Neftin 		goto err_setup_tx;
310913b5b7fdSSasha Neftin 
311013b5b7fdSSasha Neftin 	/* allocate receive descriptors */
311113b5b7fdSSasha Neftin 	err = igc_setup_all_rx_resources(adapter);
311213b5b7fdSSasha Neftin 	if (err)
311313b5b7fdSSasha Neftin 		goto err_setup_rx;
311413b5b7fdSSasha Neftin 
3115c9a11c23SSasha Neftin 	igc_power_up_link(adapter);
3116c9a11c23SSasha Neftin 
3117c9a11c23SSasha Neftin 	igc_configure(adapter);
3118c9a11c23SSasha Neftin 
31193df25e4cSSasha Neftin 	err = igc_request_irq(adapter);
31203df25e4cSSasha Neftin 	if (err)
31213df25e4cSSasha Neftin 		goto err_req_irq;
31223df25e4cSSasha Neftin 
31233df25e4cSSasha Neftin 	/* Notify the stack of the actual queue counts. */
31243df25e4cSSasha Neftin 	netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
31253df25e4cSSasha Neftin 	if (err)
31263df25e4cSSasha Neftin 		goto err_set_queues;
31273df25e4cSSasha Neftin 
31283df25e4cSSasha Neftin 	err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
31293df25e4cSSasha Neftin 	if (err)
31303df25e4cSSasha Neftin 		goto err_set_queues;
31313df25e4cSSasha Neftin 
3132c9a11c23SSasha Neftin 	clear_bit(__IGC_DOWN, &adapter->state);
3133c9a11c23SSasha Neftin 
3134c9a11c23SSasha Neftin 	for (i = 0; i < adapter->num_q_vectors; i++)
3135c9a11c23SSasha Neftin 		napi_enable(&adapter->q_vector[i]->napi);
3136c9a11c23SSasha Neftin 
31373df25e4cSSasha Neftin 	/* Clear any pending interrupts. */
31383df25e4cSSasha Neftin 	rd32(IGC_ICR);
31393df25e4cSSasha Neftin 	igc_irq_enable(adapter);
31403df25e4cSSasha Neftin 
314113b5b7fdSSasha Neftin 	netif_tx_start_all_queues(netdev);
314213b5b7fdSSasha Neftin 
3143c9a11c23SSasha Neftin 	/* start the watchdog. */
3144c9a11c23SSasha Neftin 	hw->mac.get_link_status = 1;
3145c9a11c23SSasha Neftin 
3146c9a11c23SSasha Neftin 	return IGC_SUCCESS;
31473df25e4cSSasha Neftin 
31483df25e4cSSasha Neftin err_set_queues:
31493df25e4cSSasha Neftin 	igc_free_irq(adapter);
31503df25e4cSSasha Neftin err_req_irq:
31513df25e4cSSasha Neftin 	igc_release_hw_control(adapter);
31523df25e4cSSasha Neftin 	igc_power_down_link(adapter);
315313b5b7fdSSasha Neftin 	igc_free_all_rx_resources(adapter);
315413b5b7fdSSasha Neftin err_setup_rx:
315513b5b7fdSSasha Neftin 	igc_free_all_tx_resources(adapter);
315613b5b7fdSSasha Neftin err_setup_tx:
315713b5b7fdSSasha Neftin 	igc_reset(adapter);
31583df25e4cSSasha Neftin 
31593df25e4cSSasha Neftin 	return err;
3160c9a11c23SSasha Neftin }
3161c9a11c23SSasha Neftin 
3162c9a11c23SSasha Neftin static int igc_open(struct net_device *netdev)
3163c9a11c23SSasha Neftin {
3164c9a11c23SSasha Neftin 	return __igc_open(netdev, false);
3165c9a11c23SSasha Neftin }
3166c9a11c23SSasha Neftin 
3167c9a11c23SSasha Neftin /**
3168c9a11c23SSasha Neftin  * igc_close - Disables a network interface
3169c9a11c23SSasha Neftin  * @netdev: network interface device structure
3170c9a11c23SSasha Neftin  *
3171c9a11c23SSasha Neftin  * Returns 0, this is not allowed to fail
3172c9a11c23SSasha Neftin  *
3173c9a11c23SSasha Neftin  * The close entry point is called when an interface is de-activated
3174c9a11c23SSasha Neftin  * by the OS.  The hardware is still under the driver's control, but
3175c9a11c23SSasha Neftin  * needs to be disabled.  A global MAC reset is issued to stop the
3176c9a11c23SSasha Neftin  * hardware, and all transmit and receive resources are freed.
3177c9a11c23SSasha Neftin  */
3178c9a11c23SSasha Neftin static int __igc_close(struct net_device *netdev, bool suspending)
3179c9a11c23SSasha Neftin {
3180c9a11c23SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
3181c9a11c23SSasha Neftin 
3182c9a11c23SSasha Neftin 	WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
3183c9a11c23SSasha Neftin 
3184c9a11c23SSasha Neftin 	igc_down(adapter);
3185c9a11c23SSasha Neftin 
3186c9a11c23SSasha Neftin 	igc_release_hw_control(adapter);
3187c9a11c23SSasha Neftin 
31883df25e4cSSasha Neftin 	igc_free_irq(adapter);
31893df25e4cSSasha Neftin 
319013b5b7fdSSasha Neftin 	igc_free_all_tx_resources(adapter);
319113b5b7fdSSasha Neftin 	igc_free_all_rx_resources(adapter);
319213b5b7fdSSasha Neftin 
3193c9a11c23SSasha Neftin 	return 0;
3194c9a11c23SSasha Neftin }
3195c9a11c23SSasha Neftin 
3196c9a11c23SSasha Neftin static int igc_close(struct net_device *netdev)
3197c9a11c23SSasha Neftin {
3198c9a11c23SSasha Neftin 	if (netif_device_present(netdev) || netdev->dismantle)
3199c9a11c23SSasha Neftin 		return __igc_close(netdev, false);
3200c9a11c23SSasha Neftin 	return 0;
3201c9a11c23SSasha Neftin }
3202c9a11c23SSasha Neftin 
3203c9a11c23SSasha Neftin static const struct net_device_ops igc_netdev_ops = {
3204c9a11c23SSasha Neftin 	.ndo_open		= igc_open,
3205c9a11c23SSasha Neftin 	.ndo_stop		= igc_close,
3206c9a11c23SSasha Neftin 	.ndo_start_xmit		= igc_xmit_frame,
3207c9a11c23SSasha Neftin 	.ndo_set_mac_address	= igc_set_mac,
3208c9a11c23SSasha Neftin 	.ndo_change_mtu		= igc_change_mtu,
3209c9a11c23SSasha Neftin 	.ndo_get_stats		= igc_get_stats,
3210c9a11c23SSasha Neftin 	.ndo_do_ioctl		= igc_ioctl,
3211c9a11c23SSasha Neftin };
3212146740f9SSasha Neftin 
3213146740f9SSasha Neftin /* PCIe configuration access */
3214146740f9SSasha Neftin void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
3215146740f9SSasha Neftin {
3216146740f9SSasha Neftin 	struct igc_adapter *adapter = hw->back;
3217146740f9SSasha Neftin 
3218146740f9SSasha Neftin 	pci_read_config_word(adapter->pdev, reg, value);
3219146740f9SSasha Neftin }
3220146740f9SSasha Neftin 
3221146740f9SSasha Neftin void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
3222146740f9SSasha Neftin {
3223146740f9SSasha Neftin 	struct igc_adapter *adapter = hw->back;
3224146740f9SSasha Neftin 
3225146740f9SSasha Neftin 	pci_write_config_word(adapter->pdev, reg, *value);
3226146740f9SSasha Neftin }
3227146740f9SSasha Neftin 
3228146740f9SSasha Neftin s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
3229146740f9SSasha Neftin {
3230146740f9SSasha Neftin 	struct igc_adapter *adapter = hw->back;
3231146740f9SSasha Neftin 	u16 cap_offset;
3232146740f9SSasha Neftin 
3233146740f9SSasha Neftin 	cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3234146740f9SSasha Neftin 	if (!cap_offset)
3235146740f9SSasha Neftin 		return -IGC_ERR_CONFIG;
3236146740f9SSasha Neftin 
3237146740f9SSasha Neftin 	pci_read_config_word(adapter->pdev, cap_offset + reg, value);
3238146740f9SSasha Neftin 
3239146740f9SSasha Neftin 	return IGC_SUCCESS;
3240146740f9SSasha Neftin }
3241146740f9SSasha Neftin 
3242146740f9SSasha Neftin s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
3243146740f9SSasha Neftin {
3244146740f9SSasha Neftin 	struct igc_adapter *adapter = hw->back;
3245146740f9SSasha Neftin 	u16 cap_offset;
3246146740f9SSasha Neftin 
3247146740f9SSasha Neftin 	cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3248146740f9SSasha Neftin 	if (!cap_offset)
3249146740f9SSasha Neftin 		return -IGC_ERR_CONFIG;
3250146740f9SSasha Neftin 
3251146740f9SSasha Neftin 	pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
3252146740f9SSasha Neftin 
3253146740f9SSasha Neftin 	return IGC_SUCCESS;
3254146740f9SSasha Neftin }
3255146740f9SSasha Neftin 
3256146740f9SSasha Neftin u32 igc_rd32(struct igc_hw *hw, u32 reg)
3257146740f9SSasha Neftin {
3258c9a11c23SSasha Neftin 	struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
3259146740f9SSasha Neftin 	u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
3260146740f9SSasha Neftin 	u32 value = 0;
3261146740f9SSasha Neftin 
3262146740f9SSasha Neftin 	if (IGC_REMOVED(hw_addr))
3263146740f9SSasha Neftin 		return ~value;
3264146740f9SSasha Neftin 
3265146740f9SSasha Neftin 	value = readl(&hw_addr[reg]);
3266146740f9SSasha Neftin 
3267146740f9SSasha Neftin 	/* reads should not return all F's */
3268c9a11c23SSasha Neftin 	if (!(~value) && (!reg || !(~readl(hw_addr)))) {
3269c9a11c23SSasha Neftin 		struct net_device *netdev = igc->netdev;
3270c9a11c23SSasha Neftin 
3271146740f9SSasha Neftin 		hw->hw_addr = NULL;
3272c9a11c23SSasha Neftin 		netif_device_detach(netdev);
3273c9a11c23SSasha Neftin 		netdev_err(netdev, "PCIe link lost, device now detached\n");
3274c9a11c23SSasha Neftin 	}
3275146740f9SSasha Neftin 
3276146740f9SSasha Neftin 	return value;
3277146740f9SSasha Neftin }
3278146740f9SSasha Neftin 
3279d89f8841SSasha Neftin /**
3280d89f8841SSasha Neftin  * igc_probe - Device Initialization Routine
3281d89f8841SSasha Neftin  * @pdev: PCI device information struct
3282d89f8841SSasha Neftin  * @ent: entry in igc_pci_tbl
3283d89f8841SSasha Neftin  *
3284d89f8841SSasha Neftin  * Returns 0 on success, negative on failure
3285d89f8841SSasha Neftin  *
3286d89f8841SSasha Neftin  * igc_probe initializes an adapter identified by a pci_dev structure.
3287d89f8841SSasha Neftin  * The OS initialization, configuring the adapter private structure,
3288d89f8841SSasha Neftin  * and a hardware reset occur.
3289d89f8841SSasha Neftin  */
3290d89f8841SSasha Neftin static int igc_probe(struct pci_dev *pdev,
3291d89f8841SSasha Neftin 		     const struct pci_device_id *ent)
3292d89f8841SSasha Neftin {
3293146740f9SSasha Neftin 	struct igc_adapter *adapter;
3294c9a11c23SSasha Neftin 	struct net_device *netdev;
3295c9a11c23SSasha Neftin 	struct igc_hw *hw;
3296ab405612SSasha Neftin 	const struct igc_info *ei = igc_info_tbl[ent->driver_data];
3297d89f8841SSasha Neftin 	int err, pci_using_dac;
3298d89f8841SSasha Neftin 
3299d89f8841SSasha Neftin 	err = pci_enable_device_mem(pdev);
3300d89f8841SSasha Neftin 	if (err)
3301d89f8841SSasha Neftin 		return err;
3302d89f8841SSasha Neftin 
3303d89f8841SSasha Neftin 	pci_using_dac = 0;
3304d89f8841SSasha Neftin 	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3305d89f8841SSasha Neftin 	if (!err) {
3306d89f8841SSasha Neftin 		err = dma_set_coherent_mask(&pdev->dev,
3307d89f8841SSasha Neftin 					    DMA_BIT_MASK(64));
3308d89f8841SSasha Neftin 		if (!err)
3309d89f8841SSasha Neftin 			pci_using_dac = 1;
3310d89f8841SSasha Neftin 	} else {
3311d89f8841SSasha Neftin 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3312d89f8841SSasha Neftin 		if (err) {
3313d89f8841SSasha Neftin 			err = dma_set_coherent_mask(&pdev->dev,
3314d89f8841SSasha Neftin 						    DMA_BIT_MASK(32));
3315d89f8841SSasha Neftin 			if (err) {
3316d89f8841SSasha Neftin 				IGC_ERR("Wrong DMA configuration, aborting\n");
3317d89f8841SSasha Neftin 				goto err_dma;
3318d89f8841SSasha Neftin 			}
3319d89f8841SSasha Neftin 		}
3320d89f8841SSasha Neftin 	}
3321d89f8841SSasha Neftin 
3322d89f8841SSasha Neftin 	err = pci_request_selected_regions(pdev,
3323d89f8841SSasha Neftin 					   pci_select_bars(pdev,
3324d89f8841SSasha Neftin 							   IORESOURCE_MEM),
3325d89f8841SSasha Neftin 					   igc_driver_name);
3326d89f8841SSasha Neftin 	if (err)
3327d89f8841SSasha Neftin 		goto err_pci_reg;
3328d89f8841SSasha Neftin 
3329c9a11c23SSasha Neftin 	pci_enable_pcie_error_reporting(pdev);
3330c9a11c23SSasha Neftin 
3331d89f8841SSasha Neftin 	pci_set_master(pdev);
3332c9a11c23SSasha Neftin 
3333c9a11c23SSasha Neftin 	err = -ENOMEM;
3334c9a11c23SSasha Neftin 	netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
3335c9a11c23SSasha Neftin 				   IGC_MAX_TX_QUEUES);
3336c9a11c23SSasha Neftin 
3337c9a11c23SSasha Neftin 	if (!netdev)
3338c9a11c23SSasha Neftin 		goto err_alloc_etherdev;
3339c9a11c23SSasha Neftin 
3340c9a11c23SSasha Neftin 	SET_NETDEV_DEV(netdev, &pdev->dev);
3341c9a11c23SSasha Neftin 
3342c9a11c23SSasha Neftin 	pci_set_drvdata(pdev, netdev);
3343c9a11c23SSasha Neftin 	adapter = netdev_priv(netdev);
3344c9a11c23SSasha Neftin 	adapter->netdev = netdev;
3345c9a11c23SSasha Neftin 	adapter->pdev = pdev;
3346c9a11c23SSasha Neftin 	hw = &adapter->hw;
3347c9a11c23SSasha Neftin 	hw->back = adapter;
3348c9a11c23SSasha Neftin 	adapter->port_num = hw->bus.func;
3349c9a11c23SSasha Neftin 	adapter->msg_enable = GENMASK(debug - 1, 0);
3350c9a11c23SSasha Neftin 
3351d89f8841SSasha Neftin 	err = pci_save_state(pdev);
3352c9a11c23SSasha Neftin 	if (err)
3353c9a11c23SSasha Neftin 		goto err_ioremap;
3354c9a11c23SSasha Neftin 
3355c9a11c23SSasha Neftin 	err = -EIO;
3356c9a11c23SSasha Neftin 	adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
3357c9a11c23SSasha Neftin 				   pci_resource_len(pdev, 0));
3358c9a11c23SSasha Neftin 	if (!adapter->io_addr)
3359c9a11c23SSasha Neftin 		goto err_ioremap;
3360c9a11c23SSasha Neftin 
3361c9a11c23SSasha Neftin 	/* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
3362c9a11c23SSasha Neftin 	hw->hw_addr = adapter->io_addr;
3363c9a11c23SSasha Neftin 
3364c9a11c23SSasha Neftin 	netdev->netdev_ops = &igc_netdev_ops;
3365c9a11c23SSasha Neftin 
3366c9a11c23SSasha Neftin 	netdev->watchdog_timeo = 5 * HZ;
3367c9a11c23SSasha Neftin 
3368c9a11c23SSasha Neftin 	netdev->mem_start = pci_resource_start(pdev, 0);
3369c9a11c23SSasha Neftin 	netdev->mem_end = pci_resource_end(pdev, 0);
3370c9a11c23SSasha Neftin 
3371c9a11c23SSasha Neftin 	/* PCI config space info */
3372c9a11c23SSasha Neftin 	hw->vendor_id = pdev->vendor;
3373c9a11c23SSasha Neftin 	hw->device_id = pdev->device;
3374c9a11c23SSasha Neftin 	hw->revision_id = pdev->revision;
3375c9a11c23SSasha Neftin 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
3376c9a11c23SSasha Neftin 	hw->subsystem_device_id = pdev->subsystem_device;
3377146740f9SSasha Neftin 
3378ab405612SSasha Neftin 	/* Copy the default MAC and PHY function pointers */
3379ab405612SSasha Neftin 	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
3380ab405612SSasha Neftin 
3381ab405612SSasha Neftin 	/* Initialize skew-specific constants */
3382ab405612SSasha Neftin 	err = ei->get_invariants(hw);
3383ab405612SSasha Neftin 	if (err)
3384ab405612SSasha Neftin 		goto err_sw_init;
3385ab405612SSasha Neftin 
3386146740f9SSasha Neftin 	/* setup the private structure */
3387146740f9SSasha Neftin 	err = igc_sw_init(adapter);
3388146740f9SSasha Neftin 	if (err)
3389146740f9SSasha Neftin 		goto err_sw_init;
3390146740f9SSasha Neftin 
3391c9a11c23SSasha Neftin 	/* MTU range: 68 - 9216 */
3392c9a11c23SSasha Neftin 	netdev->min_mtu = ETH_MIN_MTU;
3393c9a11c23SSasha Neftin 	netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
3394c9a11c23SSasha Neftin 
33950507ef8aSSasha Neftin 	/* configure RXPBSIZE and TXPBSIZE */
33960507ef8aSSasha Neftin 	wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
33970507ef8aSSasha Neftin 	wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
33980507ef8aSSasha Neftin 
33990507ef8aSSasha Neftin 	timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
34000507ef8aSSasha Neftin 
34010507ef8aSSasha Neftin 	INIT_WORK(&adapter->reset_task, igc_reset_task);
34020507ef8aSSasha Neftin 
3403c9a11c23SSasha Neftin 	/* reset the hardware with the new settings */
3404c9a11c23SSasha Neftin 	igc_reset(adapter);
3405c9a11c23SSasha Neftin 
3406c9a11c23SSasha Neftin 	/* let the f/w know that the h/w is now under the control of the
3407c9a11c23SSasha Neftin 	 * driver.
3408c9a11c23SSasha Neftin 	 */
3409c9a11c23SSasha Neftin 	igc_get_hw_control(adapter);
3410c9a11c23SSasha Neftin 
3411c9a11c23SSasha Neftin 	strncpy(netdev->name, "eth%d", IFNAMSIZ);
3412c9a11c23SSasha Neftin 	err = register_netdev(netdev);
3413c9a11c23SSasha Neftin 	if (err)
3414c9a11c23SSasha Neftin 		goto err_register;
3415c9a11c23SSasha Neftin 
3416c9a11c23SSasha Neftin 	 /* carrier off reporting is important to ethtool even BEFORE open */
3417c9a11c23SSasha Neftin 	netif_carrier_off(netdev);
3418c9a11c23SSasha Neftin 
3419ab405612SSasha Neftin 	/* Check if Media Autosense is enabled */
3420ab405612SSasha Neftin 	adapter->ei = *ei;
3421ab405612SSasha Neftin 
3422c9a11c23SSasha Neftin 	/* print pcie link status and MAC address */
3423c9a11c23SSasha Neftin 	pcie_print_link_status(pdev);
3424c9a11c23SSasha Neftin 	netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
3425c9a11c23SSasha Neftin 
3426d89f8841SSasha Neftin 	return 0;
3427d89f8841SSasha Neftin 
3428c9a11c23SSasha Neftin err_register:
3429c9a11c23SSasha Neftin 	igc_release_hw_control(adapter);
3430146740f9SSasha Neftin err_sw_init:
34313df25e4cSSasha Neftin 	igc_clear_interrupt_scheme(adapter);
34323df25e4cSSasha Neftin 	iounmap(adapter->io_addr);
3433c9a11c23SSasha Neftin err_ioremap:
3434c9a11c23SSasha Neftin 	free_netdev(netdev);
3435c9a11c23SSasha Neftin err_alloc_etherdev:
3436c9a11c23SSasha Neftin 	pci_release_selected_regions(pdev,
3437c9a11c23SSasha Neftin 				     pci_select_bars(pdev, IORESOURCE_MEM));
3438d89f8841SSasha Neftin err_pci_reg:
3439d89f8841SSasha Neftin err_dma:
3440d89f8841SSasha Neftin 	pci_disable_device(pdev);
3441d89f8841SSasha Neftin 	return err;
3442d89f8841SSasha Neftin }
3443d89f8841SSasha Neftin 
3444d89f8841SSasha Neftin /**
3445d89f8841SSasha Neftin  * igc_remove - Device Removal Routine
3446d89f8841SSasha Neftin  * @pdev: PCI device information struct
3447d89f8841SSasha Neftin  *
3448d89f8841SSasha Neftin  * igc_remove is called by the PCI subsystem to alert the driver
3449d89f8841SSasha Neftin  * that it should release a PCI device.  This could be caused by a
3450d89f8841SSasha Neftin  * Hot-Plug event, or because the driver is going to be removed from
3451d89f8841SSasha Neftin  * memory.
3452d89f8841SSasha Neftin  */
3453d89f8841SSasha Neftin static void igc_remove(struct pci_dev *pdev)
3454d89f8841SSasha Neftin {
3455c9a11c23SSasha Neftin 	struct net_device *netdev = pci_get_drvdata(pdev);
3456c9a11c23SSasha Neftin 	struct igc_adapter *adapter = netdev_priv(netdev);
3457c9a11c23SSasha Neftin 
3458c9a11c23SSasha Neftin 	set_bit(__IGC_DOWN, &adapter->state);
34590507ef8aSSasha Neftin 
34600507ef8aSSasha Neftin 	del_timer_sync(&adapter->watchdog_timer);
34610507ef8aSSasha Neftin 
34620507ef8aSSasha Neftin 	cancel_work_sync(&adapter->reset_task);
3463c9a11c23SSasha Neftin 
3464c9a11c23SSasha Neftin 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
3465c9a11c23SSasha Neftin 	 * would have already happened in close and is redundant.
3466c9a11c23SSasha Neftin 	 */
3467c9a11c23SSasha Neftin 	igc_release_hw_control(adapter);
3468c9a11c23SSasha Neftin 	unregister_netdev(netdev);
3469c9a11c23SSasha Neftin 
34700507ef8aSSasha Neftin 	igc_clear_interrupt_scheme(adapter);
34710507ef8aSSasha Neftin 	pci_iounmap(pdev, adapter->io_addr);
34720507ef8aSSasha Neftin 	pci_release_mem_regions(pdev);
3473d89f8841SSasha Neftin 
34740507ef8aSSasha Neftin 	kfree(adapter->mac_table);
34750507ef8aSSasha Neftin 	kfree(adapter->shadow_vfta);
3476c9a11c23SSasha Neftin 	free_netdev(netdev);
34770507ef8aSSasha Neftin 
34780507ef8aSSasha Neftin 	pci_disable_pcie_error_reporting(pdev);
34790507ef8aSSasha Neftin 
3480d89f8841SSasha Neftin 	pci_disable_device(pdev);
3481d89f8841SSasha Neftin }
3482d89f8841SSasha Neftin 
3483d89f8841SSasha Neftin static struct pci_driver igc_driver = {
3484d89f8841SSasha Neftin 	.name     = igc_driver_name,
3485d89f8841SSasha Neftin 	.id_table = igc_pci_tbl,
3486d89f8841SSasha Neftin 	.probe    = igc_probe,
3487d89f8841SSasha Neftin 	.remove   = igc_remove,
3488d89f8841SSasha Neftin };
3489d89f8841SSasha Neftin 
34900507ef8aSSasha Neftin static void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
34910507ef8aSSasha Neftin 				     const u32 max_rss_queues)
34920507ef8aSSasha Neftin {
34930507ef8aSSasha Neftin 	/* Determine if we need to pair queues. */
34940507ef8aSSasha Neftin 	/* If rss_queues > half of max_rss_queues, pair the queues in
34950507ef8aSSasha Neftin 	 * order to conserve interrupts due to limited supply.
34960507ef8aSSasha Neftin 	 */
34970507ef8aSSasha Neftin 	if (adapter->rss_queues > (max_rss_queues / 2))
34980507ef8aSSasha Neftin 		adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
34990507ef8aSSasha Neftin 	else
35000507ef8aSSasha Neftin 		adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
35010507ef8aSSasha Neftin }
35020507ef8aSSasha Neftin 
35030507ef8aSSasha Neftin static unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
35040507ef8aSSasha Neftin {
35050507ef8aSSasha Neftin 	unsigned int max_rss_queues;
35060507ef8aSSasha Neftin 
35070507ef8aSSasha Neftin 	/* Determine the maximum number of RSS queues supported. */
35080507ef8aSSasha Neftin 	max_rss_queues = IGC_MAX_RX_QUEUES;
35090507ef8aSSasha Neftin 
35100507ef8aSSasha Neftin 	return max_rss_queues;
35110507ef8aSSasha Neftin }
35120507ef8aSSasha Neftin 
35130507ef8aSSasha Neftin static void igc_init_queue_configuration(struct igc_adapter *adapter)
35140507ef8aSSasha Neftin {
35150507ef8aSSasha Neftin 	u32 max_rss_queues;
35160507ef8aSSasha Neftin 
35170507ef8aSSasha Neftin 	max_rss_queues = igc_get_max_rss_queues(adapter);
35180507ef8aSSasha Neftin 	adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
35190507ef8aSSasha Neftin 
35200507ef8aSSasha Neftin 	igc_set_flag_queue_pairs(adapter, max_rss_queues);
35210507ef8aSSasha Neftin }
35220507ef8aSSasha Neftin 
3523d89f8841SSasha Neftin /**
3524146740f9SSasha Neftin  * igc_sw_init - Initialize general software structures (struct igc_adapter)
3525146740f9SSasha Neftin  * @adapter: board private structure to initialize
3526146740f9SSasha Neftin  *
3527146740f9SSasha Neftin  * igc_sw_init initializes the Adapter private data structure.
3528146740f9SSasha Neftin  * Fields are initialized based on PCI device information and
3529146740f9SSasha Neftin  * OS network device settings (MTU size).
3530146740f9SSasha Neftin  */
3531146740f9SSasha Neftin static int igc_sw_init(struct igc_adapter *adapter)
3532146740f9SSasha Neftin {
3533c9a11c23SSasha Neftin 	struct net_device *netdev = adapter->netdev;
3534146740f9SSasha Neftin 	struct pci_dev *pdev = adapter->pdev;
3535146740f9SSasha Neftin 	struct igc_hw *hw = &adapter->hw;
3536146740f9SSasha Neftin 
35370507ef8aSSasha Neftin 	int size = sizeof(struct igc_mac_addr) * hw->mac.rar_entry_count;
3538146740f9SSasha Neftin 
3539146740f9SSasha Neftin 	pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3540146740f9SSasha Neftin 
35410507ef8aSSasha Neftin 	/* set default ring sizes */
35420507ef8aSSasha Neftin 	adapter->tx_ring_count = IGC_DEFAULT_TXD;
35430507ef8aSSasha Neftin 	adapter->rx_ring_count = IGC_DEFAULT_RXD;
35440507ef8aSSasha Neftin 
35450507ef8aSSasha Neftin 	/* set default ITR values */
35460507ef8aSSasha Neftin 	adapter->rx_itr_setting = IGC_DEFAULT_ITR;
35470507ef8aSSasha Neftin 	adapter->tx_itr_setting = IGC_DEFAULT_ITR;
35480507ef8aSSasha Neftin 
35490507ef8aSSasha Neftin 	/* set default work limits */
35500507ef8aSSasha Neftin 	adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
35510507ef8aSSasha Neftin 
3552c9a11c23SSasha Neftin 	/* adjust max frame to be at least the size of a standard frame */
3553c9a11c23SSasha Neftin 	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
3554c9a11c23SSasha Neftin 				VLAN_HLEN;
35550507ef8aSSasha Neftin 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3556c9a11c23SSasha Neftin 
35570507ef8aSSasha Neftin 	spin_lock_init(&adapter->nfc_lock);
35580507ef8aSSasha Neftin 	spin_lock_init(&adapter->stats64_lock);
35590507ef8aSSasha Neftin 	/* Assume MSI-X interrupts, will be checked during IRQ allocation */
35600507ef8aSSasha Neftin 	adapter->flags |= IGC_FLAG_HAS_MSIX;
35610507ef8aSSasha Neftin 
35620507ef8aSSasha Neftin 	adapter->mac_table = kzalloc(size, GFP_ATOMIC);
35630507ef8aSSasha Neftin 	if (!adapter->mac_table)
35640507ef8aSSasha Neftin 		return -ENOMEM;
35650507ef8aSSasha Neftin 
35660507ef8aSSasha Neftin 	igc_init_queue_configuration(adapter);
35670507ef8aSSasha Neftin 
35680507ef8aSSasha Neftin 	/* This call may decrease the number of queues */
35693df25e4cSSasha Neftin 	if (igc_init_interrupt_scheme(adapter, true)) {
35703df25e4cSSasha Neftin 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
35713df25e4cSSasha Neftin 		return -ENOMEM;
35723df25e4cSSasha Neftin 	}
35733df25e4cSSasha Neftin 
35743df25e4cSSasha Neftin 	/* Explicitly disable IRQ since the NIC can be in any state. */
35753df25e4cSSasha Neftin 	igc_irq_disable(adapter);
35763df25e4cSSasha Neftin 
3577c9a11c23SSasha Neftin 	set_bit(__IGC_DOWN, &adapter->state);
3578c9a11c23SSasha Neftin 
3579146740f9SSasha Neftin 	return 0;
3580146740f9SSasha Neftin }
3581146740f9SSasha Neftin 
3582146740f9SSasha Neftin /**
3583c0071c7aSSasha Neftin  * igc_get_hw_dev - return device
3584c0071c7aSSasha Neftin  * @hw: pointer to hardware structure
3585c0071c7aSSasha Neftin  *
3586c0071c7aSSasha Neftin  * used by hardware layer to print debugging information
3587c0071c7aSSasha Neftin  */
3588c0071c7aSSasha Neftin struct net_device *igc_get_hw_dev(struct igc_hw *hw)
3589c0071c7aSSasha Neftin {
3590c0071c7aSSasha Neftin 	struct igc_adapter *adapter = hw->back;
3591c0071c7aSSasha Neftin 
3592c0071c7aSSasha Neftin 	return adapter->netdev;
3593c0071c7aSSasha Neftin }
3594c0071c7aSSasha Neftin 
3595c0071c7aSSasha Neftin /**
3596d89f8841SSasha Neftin  * igc_init_module - Driver Registration Routine
3597d89f8841SSasha Neftin  *
3598d89f8841SSasha Neftin  * igc_init_module is the first routine called when the driver is
3599d89f8841SSasha Neftin  * loaded. All it does is register with the PCI subsystem.
3600d89f8841SSasha Neftin  */
3601d89f8841SSasha Neftin static int __init igc_init_module(void)
3602d89f8841SSasha Neftin {
3603d89f8841SSasha Neftin 	int ret;
3604d89f8841SSasha Neftin 
3605d89f8841SSasha Neftin 	pr_info("%s - version %s\n",
3606d89f8841SSasha Neftin 		igc_driver_string, igc_driver_version);
3607d89f8841SSasha Neftin 
3608d89f8841SSasha Neftin 	pr_info("%s\n", igc_copyright);
3609d89f8841SSasha Neftin 
3610d89f8841SSasha Neftin 	ret = pci_register_driver(&igc_driver);
3611d89f8841SSasha Neftin 	return ret;
3612d89f8841SSasha Neftin }
3613d89f8841SSasha Neftin 
3614d89f8841SSasha Neftin module_init(igc_init_module);
3615d89f8841SSasha Neftin 
3616d89f8841SSasha Neftin /**
3617d89f8841SSasha Neftin  * igc_exit_module - Driver Exit Cleanup Routine
3618d89f8841SSasha Neftin  *
3619d89f8841SSasha Neftin  * igc_exit_module is called just before the driver is removed
3620d89f8841SSasha Neftin  * from memory.
3621d89f8841SSasha Neftin  */
3622d89f8841SSasha Neftin static void __exit igc_exit_module(void)
3623d89f8841SSasha Neftin {
3624d89f8841SSasha Neftin 	pci_unregister_driver(&igc_driver);
3625d89f8841SSasha Neftin }
3626d89f8841SSasha Neftin 
3627d89f8841SSasha Neftin module_exit(igc_exit_module);
3628d89f8841SSasha Neftin /* igc_main.c */
3629