151dce24bSJeff Kirsher // SPDX-License-Identifier: GPL-2.0
251dce24bSJeff Kirsher /* Copyright(c) 1999 - 2018 Intel Corporation. */
3dee1ad47SJeff Kirsher
4dee1ad47SJeff Kirsher /******************************************************************************
5dee1ad47SJeff Kirsher Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
6dee1ad47SJeff Kirsher ******************************************************************************/
7dbd9636eSJeff Kirsher
8dbd9636eSJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9dbd9636eSJeff Kirsher
10dee1ad47SJeff Kirsher #include <linux/types.h>
11dee1ad47SJeff Kirsher #include <linux/bitops.h>
12dee1ad47SJeff Kirsher #include <linux/module.h>
13dee1ad47SJeff Kirsher #include <linux/pci.h>
14dee1ad47SJeff Kirsher #include <linux/netdevice.h>
15dee1ad47SJeff Kirsher #include <linux/vmalloc.h>
16dee1ad47SJeff Kirsher #include <linux/string.h>
17dee1ad47SJeff Kirsher #include <linux/in.h>
18dee1ad47SJeff Kirsher #include <linux/ip.h>
19dee1ad47SJeff Kirsher #include <linux/tcp.h>
2070a10e25SAlexander Duyck #include <linux/sctp.h>
21dee1ad47SJeff Kirsher #include <linux/ipv6.h>
22dee1ad47SJeff Kirsher #include <linux/slab.h>
23dee1ad47SJeff Kirsher #include <net/checksum.h>
24dee1ad47SJeff Kirsher #include <net/ip6_checksum.h>
25dee1ad47SJeff Kirsher #include <linux/ethtool.h>
2601789349SJiri Pirko #include <linux/if.h>
27dee1ad47SJeff Kirsher #include <linux/if_vlan.h>
28dee1ad47SJeff Kirsher #include <linux/prefetch.h>
292a20525bSScott Peterson #include <net/mpls.h>
30c7aec596STony Nguyen #include <linux/bpf.h>
31c7aec596STony Nguyen #include <linux/bpf_trace.h>
32c7aec596STony Nguyen #include <linux/atomic.h>
338f6617baSJeff Kirsher #include <net/xfrm.h>
34dee1ad47SJeff Kirsher
35dee1ad47SJeff Kirsher #include "ixgbevf.h"
36dee1ad47SJeff Kirsher
373d8fe98fSStephen Hemminger const char ixgbevf_driver_name[] = "ixgbevf";
38dee1ad47SJeff Kirsher static const char ixgbevf_driver_string[] =
39dee1ad47SJeff Kirsher "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
40dee1ad47SJeff Kirsher
41dee1ad47SJeff Kirsher static char ixgbevf_copyright[] =
427f68d430SShannon Nelson "Copyright (c) 2009 - 2018 Intel Corporation.";
43dee1ad47SJeff Kirsher
44dee1ad47SJeff Kirsher static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
45dee1ad47SJeff Kirsher [board_82599_vf] = &ixgbevf_82599_vf_info,
46c6d45171SKY Srinivasan [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
47dee1ad47SJeff Kirsher [board_X540_vf] = &ixgbevf_X540_vf_info,
48c6d45171SKY Srinivasan [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
4947068b0dSEmil Tantilov [board_X550_vf] = &ixgbevf_X550_vf_info,
50c6d45171SKY Srinivasan [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
5147068b0dSEmil Tantilov [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
52c6d45171SKY Srinivasan [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
531d94f987SDon Skidmore [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
54dee1ad47SJeff Kirsher };
55dee1ad47SJeff Kirsher
56dee1ad47SJeff Kirsher /* ixgbevf_pci_tbl - PCI Device ID Table
57dee1ad47SJeff Kirsher *
58dee1ad47SJeff Kirsher * Wildcard entries (PCI_ANY_ID) should come last
59dee1ad47SJeff Kirsher * Last entry must be all 0s
60dee1ad47SJeff Kirsher *
61dee1ad47SJeff Kirsher * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
62dee1ad47SJeff Kirsher * Class, Class Mask, private data (not used) }
63dee1ad47SJeff Kirsher */
649baa3c34SBenoit Taine static const struct pci_device_id ixgbevf_pci_tbl[] = {
6539ba22b4SStephen Hemminger {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
66c6d45171SKY Srinivasan {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
6739ba22b4SStephen Hemminger {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
68c6d45171SKY Srinivasan {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
6947068b0dSEmil Tantilov {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
70c6d45171SKY Srinivasan {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
7147068b0dSEmil Tantilov {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
72c6d45171SKY Srinivasan {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
731d94f987SDon Skidmore {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
74dee1ad47SJeff Kirsher /* required last entry */
75dee1ad47SJeff Kirsher {0, }
76dee1ad47SJeff Kirsher };
77dee1ad47SJeff Kirsher MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
78dee1ad47SJeff Kirsher
79dee1ad47SJeff Kirsher MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
80b8ce18cdSEmil Tantilov MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
8198674ebeSJesse Brandeburg MODULE_LICENSE("GPL v2");
82dee1ad47SJeff Kirsher
83b3f4d599Sstephen hemminger #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
84b3f4d599Sstephen hemminger static int debug = -1;
85b3f4d599Sstephen hemminger module_param(debug, int, 0);
86b3f4d599Sstephen hemminger MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
87dee1ad47SJeff Kirsher
8840a13e24SMark Rustad static struct workqueue_struct *ixgbevf_wq;
8940a13e24SMark Rustad
ixgbevf_service_event_schedule(struct ixgbevf_adapter * adapter)909ac5c5ccSEmil Tantilov static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
919ac5c5ccSEmil Tantilov {
929ac5c5ccSEmil Tantilov if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
939ac5c5ccSEmil Tantilov !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
949ac5c5ccSEmil Tantilov !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
9540a13e24SMark Rustad queue_work(ixgbevf_wq, &adapter->service_task);
969ac5c5ccSEmil Tantilov }
979ac5c5ccSEmil Tantilov
ixgbevf_service_event_complete(struct ixgbevf_adapter * adapter)989ac5c5ccSEmil Tantilov static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
999ac5c5ccSEmil Tantilov {
1009ac5c5ccSEmil Tantilov BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
1019ac5c5ccSEmil Tantilov
1029ac5c5ccSEmil Tantilov /* flush memory to make sure state is correct before next watchdog */
1039ac5c5ccSEmil Tantilov smp_mb__before_atomic();
1049ac5c5ccSEmil Tantilov clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
1059ac5c5ccSEmil Tantilov }
1069ac5c5ccSEmil Tantilov
107dee1ad47SJeff Kirsher /* forward decls */
108220fe050SDon Skidmore static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
109fa71ae27SAlexander Duyck static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
11056e94095SAlexander Duyck static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
111925f5690SEmil Tantilov static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer);
112925f5690SEmil Tantilov static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
113925f5690SEmil Tantilov struct ixgbevf_rx_buffer *old_buff);
114dee1ad47SJeff Kirsher
ixgbevf_remove_adapter(struct ixgbe_hw * hw)115dbf8b0d8SMark Rustad static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
116dbf8b0d8SMark Rustad {
117dbf8b0d8SMark Rustad struct ixgbevf_adapter *adapter = hw->back;
118dbf8b0d8SMark Rustad
119dbf8b0d8SMark Rustad if (!hw->hw_addr)
120dbf8b0d8SMark Rustad return;
121dbf8b0d8SMark Rustad hw->hw_addr = NULL;
122dbf8b0d8SMark Rustad dev_err(&adapter->pdev->dev, "Adapter removed\n");
1239ac5c5ccSEmil Tantilov if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
1249ac5c5ccSEmil Tantilov ixgbevf_service_event_schedule(adapter);
125dbf8b0d8SMark Rustad }
126dbf8b0d8SMark Rustad
ixgbevf_check_remove(struct ixgbe_hw * hw,u32 reg)127dbf8b0d8SMark Rustad static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
128dbf8b0d8SMark Rustad {
129dbf8b0d8SMark Rustad u32 value;
130dbf8b0d8SMark Rustad
131dbf8b0d8SMark Rustad /* The following check not only optimizes a bit by not
132dbf8b0d8SMark Rustad * performing a read on the status register when the
133dbf8b0d8SMark Rustad * register just read was a status register read that
134dbf8b0d8SMark Rustad * returned IXGBE_FAILED_READ_REG. It also blocks any
135dbf8b0d8SMark Rustad * potential recursion.
136dbf8b0d8SMark Rustad */
137dbf8b0d8SMark Rustad if (reg == IXGBE_VFSTATUS) {
138dbf8b0d8SMark Rustad ixgbevf_remove_adapter(hw);
139dbf8b0d8SMark Rustad return;
140dbf8b0d8SMark Rustad }
14132c74949SMark Rustad value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
142dbf8b0d8SMark Rustad if (value == IXGBE_FAILED_READ_REG)
143dbf8b0d8SMark Rustad ixgbevf_remove_adapter(hw);
144dbf8b0d8SMark Rustad }
145dbf8b0d8SMark Rustad
ixgbevf_read_reg(struct ixgbe_hw * hw,u32 reg)14632c74949SMark Rustad u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
147dbf8b0d8SMark Rustad {
1486aa7de05SMark Rutland u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
149dbf8b0d8SMark Rustad u32 value;
150dbf8b0d8SMark Rustad
151dbf8b0d8SMark Rustad if (IXGBE_REMOVED(reg_addr))
152dbf8b0d8SMark Rustad return IXGBE_FAILED_READ_REG;
153dbf8b0d8SMark Rustad value = readl(reg_addr + reg);
154dbf8b0d8SMark Rustad if (unlikely(value == IXGBE_FAILED_READ_REG))
155dbf8b0d8SMark Rustad ixgbevf_check_remove(hw, reg);
156dbf8b0d8SMark Rustad return value;
157dbf8b0d8SMark Rustad }
158dbf8b0d8SMark Rustad
15949ce9c2cSBen Hutchings /**
160dee1ad47SJeff Kirsher * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
161dee1ad47SJeff Kirsher * @adapter: pointer to adapter struct
162dee1ad47SJeff Kirsher * @direction: 0 for Rx, 1 for Tx, -1 for other causes
163dee1ad47SJeff Kirsher * @queue: queue to map the corresponding interrupt to
164dee1ad47SJeff Kirsher * @msix_vector: the vector to map to the corresponding queue
165dec0d8e4SJeff Kirsher **/
ixgbevf_set_ivar(struct ixgbevf_adapter * adapter,s8 direction,u8 queue,u8 msix_vector)166dee1ad47SJeff Kirsher static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
167dee1ad47SJeff Kirsher u8 queue, u8 msix_vector)
168dee1ad47SJeff Kirsher {
169dee1ad47SJeff Kirsher u32 ivar, index;
170dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
171dec0d8e4SJeff Kirsher
172dee1ad47SJeff Kirsher if (direction == -1) {
173dee1ad47SJeff Kirsher /* other causes */
174dee1ad47SJeff Kirsher msix_vector |= IXGBE_IVAR_ALLOC_VAL;
175dee1ad47SJeff Kirsher ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
176dee1ad47SJeff Kirsher ivar &= ~0xFF;
177dee1ad47SJeff Kirsher ivar |= msix_vector;
178dee1ad47SJeff Kirsher IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
179dee1ad47SJeff Kirsher } else {
180dec0d8e4SJeff Kirsher /* Tx or Rx causes */
181dee1ad47SJeff Kirsher msix_vector |= IXGBE_IVAR_ALLOC_VAL;
182dee1ad47SJeff Kirsher index = ((16 * (queue & 1)) + (8 * direction));
183dee1ad47SJeff Kirsher ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
184dee1ad47SJeff Kirsher ivar &= ~(0xFF << index);
185dee1ad47SJeff Kirsher ivar |= (msix_vector << index);
186dee1ad47SJeff Kirsher IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
187dee1ad47SJeff Kirsher }
188dee1ad47SJeff Kirsher }
189dee1ad47SJeff Kirsher
ixgbevf_get_tx_completed(struct ixgbevf_ring * ring)190e08400b7SEmil Tantilov static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
191e08400b7SEmil Tantilov {
192e08400b7SEmil Tantilov return ring->stats.packets;
193e08400b7SEmil Tantilov }
194dee1ad47SJeff Kirsher
ixgbevf_get_tx_pending(struct ixgbevf_ring * ring)195e08400b7SEmil Tantilov static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
196e08400b7SEmil Tantilov {
197e08400b7SEmil Tantilov struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
198e08400b7SEmil Tantilov struct ixgbe_hw *hw = &adapter->hw;
199dee1ad47SJeff Kirsher
200e08400b7SEmil Tantilov u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
201e08400b7SEmil Tantilov u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
202e08400b7SEmil Tantilov
203e08400b7SEmil Tantilov if (head != tail)
204e08400b7SEmil Tantilov return (head < tail) ?
205e08400b7SEmil Tantilov tail - head : (tail + ring->count - head);
206e08400b7SEmil Tantilov
207e08400b7SEmil Tantilov return 0;
208e08400b7SEmil Tantilov }
209e08400b7SEmil Tantilov
ixgbevf_check_tx_hang(struct ixgbevf_ring * tx_ring)210e08400b7SEmil Tantilov static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
211e08400b7SEmil Tantilov {
212e08400b7SEmil Tantilov u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
213e08400b7SEmil Tantilov u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
214e08400b7SEmil Tantilov u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
215e08400b7SEmil Tantilov
216e08400b7SEmil Tantilov clear_check_for_tx_hang(tx_ring);
217e08400b7SEmil Tantilov
218e08400b7SEmil Tantilov /* Check for a hung queue, but be thorough. This verifies
219e08400b7SEmil Tantilov * that a transmit has been completed since the previous
220e08400b7SEmil Tantilov * check AND there is at least one packet pending. The
221e08400b7SEmil Tantilov * ARMED bit is set to indicate a potential hang.
222e08400b7SEmil Tantilov */
223e08400b7SEmil Tantilov if ((tx_done_old == tx_done) && tx_pending) {
224e08400b7SEmil Tantilov /* make sure it is true for two checks in a row */
225e08400b7SEmil Tantilov return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
226e08400b7SEmil Tantilov &tx_ring->state);
227e08400b7SEmil Tantilov }
228e08400b7SEmil Tantilov /* reset the countdown */
229e08400b7SEmil Tantilov clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
230e08400b7SEmil Tantilov
231e08400b7SEmil Tantilov /* update completed stats and continue */
232e08400b7SEmil Tantilov tx_ring->tx_stats.tx_done_old = tx_done;
233e08400b7SEmil Tantilov
234e08400b7SEmil Tantilov return false;
235e08400b7SEmil Tantilov }
236e08400b7SEmil Tantilov
ixgbevf_tx_timeout_reset(struct ixgbevf_adapter * adapter)2379ac5c5ccSEmil Tantilov static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
2389ac5c5ccSEmil Tantilov {
2399ac5c5ccSEmil Tantilov /* Do the reset outside of interrupt context */
2409ac5c5ccSEmil Tantilov if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
241d5dd7c3fSEmil Tantilov set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
2429ac5c5ccSEmil Tantilov ixgbevf_service_event_schedule(adapter);
2439ac5c5ccSEmil Tantilov }
2449ac5c5ccSEmil Tantilov }
2459ac5c5ccSEmil Tantilov
246e08400b7SEmil Tantilov /**
247e08400b7SEmil Tantilov * ixgbevf_tx_timeout - Respond to a Tx Hang
248e08400b7SEmil Tantilov * @netdev: network interface device structure
249b50f7bcaSJesse Brandeburg * @txqueue: transmit queue hanging (unused)
250e08400b7SEmil Tantilov **/
ixgbevf_tx_timeout(struct net_device * netdev,unsigned int __always_unused txqueue)251b50f7bcaSJesse Brandeburg static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
252e08400b7SEmil Tantilov {
253e08400b7SEmil Tantilov struct ixgbevf_adapter *adapter = netdev_priv(netdev);
254e08400b7SEmil Tantilov
2559ac5c5ccSEmil Tantilov ixgbevf_tx_timeout_reset(adapter);
256e08400b7SEmil Tantilov }
257dee1ad47SJeff Kirsher
258dee1ad47SJeff Kirsher /**
259dee1ad47SJeff Kirsher * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
260fa71ae27SAlexander Duyck * @q_vector: board private structure
261dee1ad47SJeff Kirsher * @tx_ring: tx ring to clean
2628220bbc1SAlexander Duyck * @napi_budget: Used to determine if we are in netpoll
263dee1ad47SJeff Kirsher **/
ixgbevf_clean_tx_irq(struct ixgbevf_q_vector * q_vector,struct ixgbevf_ring * tx_ring,int napi_budget)264fa71ae27SAlexander Duyck static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
2658220bbc1SAlexander Duyck struct ixgbevf_ring *tx_ring, int napi_budget)
266dee1ad47SJeff Kirsher {
267fa71ae27SAlexander Duyck struct ixgbevf_adapter *adapter = q_vector->adapter;
2687ad1a093SEmil Tantilov struct ixgbevf_tx_buffer *tx_buffer;
2697ad1a093SEmil Tantilov union ixgbe_adv_tx_desc *tx_desc;
2707f68d430SShannon Nelson unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
2717ad1a093SEmil Tantilov unsigned int budget = tx_ring->count / 2;
2727ad1a093SEmil Tantilov unsigned int i = tx_ring->next_to_clean;
273dee1ad47SJeff Kirsher
27410cc1bddSAlexander Duyck if (test_bit(__IXGBEVF_DOWN, &adapter->state))
27510cc1bddSAlexander Duyck return true;
27610cc1bddSAlexander Duyck
2777ad1a093SEmil Tantilov tx_buffer = &tx_ring->tx_buffer_info[i];
2787ad1a093SEmil Tantilov tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2797ad1a093SEmil Tantilov i -= tx_ring->count;
280dee1ad47SJeff Kirsher
281e757e3e1SAlexander Duyck do {
2827ad1a093SEmil Tantilov union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
283e757e3e1SAlexander Duyck
284e757e3e1SAlexander Duyck /* if next_to_watch is not set then there is no work pending */
285e757e3e1SAlexander Duyck if (!eop_desc)
286e757e3e1SAlexander Duyck break;
287e757e3e1SAlexander Duyck
288e757e3e1SAlexander Duyck /* prevent any other reads prior to eop_desc */
289ae0c585dSBrian King smp_rmb();
290e757e3e1SAlexander Duyck
291e757e3e1SAlexander Duyck /* if DD is not set pending work has not been completed */
292e757e3e1SAlexander Duyck if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
293e757e3e1SAlexander Duyck break;
294e757e3e1SAlexander Duyck
295e757e3e1SAlexander Duyck /* clear next_to_watch to prevent false hangs */
2967ad1a093SEmil Tantilov tx_buffer->next_to_watch = NULL;
297e757e3e1SAlexander Duyck
2987ad1a093SEmil Tantilov /* update the statistics for this packet */
2997ad1a093SEmil Tantilov total_bytes += tx_buffer->bytecount;
3007ad1a093SEmil Tantilov total_packets += tx_buffer->gso_segs;
3017f68d430SShannon Nelson if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
3027f68d430SShannon Nelson total_ipsec++;
303dee1ad47SJeff Kirsher
3049bdfefd2SEmil Tantilov /* free the skb */
30521092e9cSTony Nguyen if (ring_is_xdp(tx_ring))
30621092e9cSTony Nguyen page_frag_free(tx_buffer->data);
30721092e9cSTony Nguyen else
3088220bbc1SAlexander Duyck napi_consume_skb(tx_buffer->skb, napi_budget);
3099bdfefd2SEmil Tantilov
3109bdfefd2SEmil Tantilov /* unmap skb header data */
3119bdfefd2SEmil Tantilov dma_unmap_single(tx_ring->dev,
3129bdfefd2SEmil Tantilov dma_unmap_addr(tx_buffer, dma),
3139bdfefd2SEmil Tantilov dma_unmap_len(tx_buffer, len),
3149bdfefd2SEmil Tantilov DMA_TO_DEVICE);
3159bdfefd2SEmil Tantilov
3167ad1a093SEmil Tantilov /* clear tx_buffer data */
3179bdfefd2SEmil Tantilov dma_unmap_len_set(tx_buffer, len, 0);
318dee1ad47SJeff Kirsher
3197ad1a093SEmil Tantilov /* unmap remaining buffers */
3207ad1a093SEmil Tantilov while (tx_desc != eop_desc) {
3217ad1a093SEmil Tantilov tx_buffer++;
3227ad1a093SEmil Tantilov tx_desc++;
3237ad1a093SEmil Tantilov i++;
3247ad1a093SEmil Tantilov if (unlikely(!i)) {
3257ad1a093SEmil Tantilov i -= tx_ring->count;
3267ad1a093SEmil Tantilov tx_buffer = tx_ring->tx_buffer_info;
3277ad1a093SEmil Tantilov tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
328dee1ad47SJeff Kirsher }
329dee1ad47SJeff Kirsher
3309bdfefd2SEmil Tantilov /* unmap any remaining paged data */
3319bdfefd2SEmil Tantilov if (dma_unmap_len(tx_buffer, len)) {
3329bdfefd2SEmil Tantilov dma_unmap_page(tx_ring->dev,
3339bdfefd2SEmil Tantilov dma_unmap_addr(tx_buffer, dma),
3349bdfefd2SEmil Tantilov dma_unmap_len(tx_buffer, len),
3359bdfefd2SEmil Tantilov DMA_TO_DEVICE);
3369bdfefd2SEmil Tantilov dma_unmap_len_set(tx_buffer, len, 0);
3379bdfefd2SEmil Tantilov }
3387ad1a093SEmil Tantilov }
339dee1ad47SJeff Kirsher
3407ad1a093SEmil Tantilov /* move us one more past the eop_desc for start of next pkt */
3417ad1a093SEmil Tantilov tx_buffer++;
3427ad1a093SEmil Tantilov tx_desc++;
343dee1ad47SJeff Kirsher i++;
3447ad1a093SEmil Tantilov if (unlikely(!i)) {
3457ad1a093SEmil Tantilov i -= tx_ring->count;
3467ad1a093SEmil Tantilov tx_buffer = tx_ring->tx_buffer_info;
3477ad1a093SEmil Tantilov tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
348dee1ad47SJeff Kirsher }
349dee1ad47SJeff Kirsher
3507ad1a093SEmil Tantilov /* issue prefetch for next Tx descriptor */
3517ad1a093SEmil Tantilov prefetch(tx_desc);
352dee1ad47SJeff Kirsher
3537ad1a093SEmil Tantilov /* update budget accounting */
3547ad1a093SEmil Tantilov budget--;
3557ad1a093SEmil Tantilov } while (likely(budget));
3567ad1a093SEmil Tantilov
3577ad1a093SEmil Tantilov i += tx_ring->count;
358dee1ad47SJeff Kirsher tx_ring->next_to_clean = i;
3594197aa7bSEric Dumazet u64_stats_update_begin(&tx_ring->syncp);
360095e2617SEmil Tantilov tx_ring->stats.bytes += total_bytes;
361095e2617SEmil Tantilov tx_ring->stats.packets += total_packets;
3624197aa7bSEric Dumazet u64_stats_update_end(&tx_ring->syncp);
363ac6ed8f0SGreg Rose q_vector->tx.total_bytes += total_bytes;
364ac6ed8f0SGreg Rose q_vector->tx.total_packets += total_packets;
3657f68d430SShannon Nelson adapter->tx_ipsec += total_ipsec;
366dee1ad47SJeff Kirsher
367e08400b7SEmil Tantilov if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
368e08400b7SEmil Tantilov struct ixgbe_hw *hw = &adapter->hw;
369e08400b7SEmil Tantilov union ixgbe_adv_tx_desc *eop_desc;
370e08400b7SEmil Tantilov
371e08400b7SEmil Tantilov eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
372e08400b7SEmil Tantilov
37321092e9cSTony Nguyen pr_err("Detected Tx Unit Hang%s\n"
374e08400b7SEmil Tantilov " Tx Queue <%d>\n"
375e08400b7SEmil Tantilov " TDH, TDT <%x>, <%x>\n"
376e08400b7SEmil Tantilov " next_to_use <%x>\n"
377e08400b7SEmil Tantilov " next_to_clean <%x>\n"
378e08400b7SEmil Tantilov "tx_buffer_info[next_to_clean]\n"
379e08400b7SEmil Tantilov " next_to_watch <%p>\n"
380e08400b7SEmil Tantilov " eop_desc->wb.status <%x>\n"
381e08400b7SEmil Tantilov " time_stamp <%lx>\n"
382e08400b7SEmil Tantilov " jiffies <%lx>\n",
38321092e9cSTony Nguyen ring_is_xdp(tx_ring) ? " XDP" : "",
384e08400b7SEmil Tantilov tx_ring->queue_index,
385e08400b7SEmil Tantilov IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
386e08400b7SEmil Tantilov IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
387e08400b7SEmil Tantilov tx_ring->next_to_use, i,
388e08400b7SEmil Tantilov eop_desc, (eop_desc ? eop_desc->wb.status : 0),
389e08400b7SEmil Tantilov tx_ring->tx_buffer_info[i].time_stamp, jiffies);
390e08400b7SEmil Tantilov
39121092e9cSTony Nguyen if (!ring_is_xdp(tx_ring))
39221092e9cSTony Nguyen netif_stop_subqueue(tx_ring->netdev,
39321092e9cSTony Nguyen tx_ring->queue_index);
394e08400b7SEmil Tantilov
395e08400b7SEmil Tantilov /* schedule immediate reset if we believe we hung */
3969ac5c5ccSEmil Tantilov ixgbevf_tx_timeout_reset(adapter);
397e08400b7SEmil Tantilov
398e08400b7SEmil Tantilov return true;
399e08400b7SEmil Tantilov }
400e08400b7SEmil Tantilov
40121092e9cSTony Nguyen if (ring_is_xdp(tx_ring))
40221092e9cSTony Nguyen return !!budget;
40321092e9cSTony Nguyen
4047ad1a093SEmil Tantilov #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
4057ad1a093SEmil Tantilov if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
4067ad1a093SEmil Tantilov (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
4077ad1a093SEmil Tantilov /* Make sure that anybody stopping the queue after this
4087ad1a093SEmil Tantilov * sees the new next_to_clean.
4097ad1a093SEmil Tantilov */
4107ad1a093SEmil Tantilov smp_mb();
4117ad1a093SEmil Tantilov
4127ad1a093SEmil Tantilov if (__netif_subqueue_stopped(tx_ring->netdev,
4137ad1a093SEmil Tantilov tx_ring->queue_index) &&
4147ad1a093SEmil Tantilov !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
4157ad1a093SEmil Tantilov netif_wake_subqueue(tx_ring->netdev,
4167ad1a093SEmil Tantilov tx_ring->queue_index);
4177ad1a093SEmil Tantilov ++tx_ring->tx_stats.restart_queue;
4187ad1a093SEmil Tantilov }
4197ad1a093SEmil Tantilov }
4207ad1a093SEmil Tantilov
4217ad1a093SEmil Tantilov return !!budget;
422dee1ad47SJeff Kirsher }
423dee1ad47SJeff Kirsher
424dee1ad47SJeff Kirsher /**
42508681618SJacob Keller * ixgbevf_rx_skb - Helper function to determine proper Rx method
42608681618SJacob Keller * @q_vector: structure containing interrupt and ring information
42708681618SJacob Keller * @skb: packet to send up
42808681618SJacob Keller **/
ixgbevf_rx_skb(struct ixgbevf_q_vector * q_vector,struct sk_buff * skb)42908681618SJacob Keller static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
430dff80520SEmil Tantilov struct sk_buff *skb)
43108681618SJacob Keller {
432dff80520SEmil Tantilov napi_gro_receive(&q_vector->napi, skb);
43308681618SJacob Keller }
43408681618SJacob Keller
4351e1429d6SFan Du #define IXGBE_RSS_L4_TYPES_MASK \
4361e1429d6SFan Du ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
4371e1429d6SFan Du (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
4381e1429d6SFan Du (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
4391e1429d6SFan Du (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
4401e1429d6SFan Du
ixgbevf_rx_hash(struct ixgbevf_ring * ring,union ixgbe_adv_rx_desc * rx_desc,struct sk_buff * skb)4411e1429d6SFan Du static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
4421e1429d6SFan Du union ixgbe_adv_rx_desc *rx_desc,
4431e1429d6SFan Du struct sk_buff *skb)
4441e1429d6SFan Du {
4451e1429d6SFan Du u16 rss_type;
4461e1429d6SFan Du
4471e1429d6SFan Du if (!(ring->netdev->features & NETIF_F_RXHASH))
4481e1429d6SFan Du return;
4491e1429d6SFan Du
4501e1429d6SFan Du rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
4511e1429d6SFan Du IXGBE_RXDADV_RSSTYPE_MASK;
4521e1429d6SFan Du
4531e1429d6SFan Du if (!rss_type)
4541e1429d6SFan Du return;
4551e1429d6SFan Du
4561e1429d6SFan Du skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
4571e1429d6SFan Du (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
4581e1429d6SFan Du PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
4591e1429d6SFan Du }
4601e1429d6SFan Du
461dec0d8e4SJeff Kirsher /**
462dec0d8e4SJeff Kirsher * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
463ec62fe26SEmil Tantilov * @ring: structure containig ring specific data
464ec62fe26SEmil Tantilov * @rx_desc: current Rx descriptor being processed
465dee1ad47SJeff Kirsher * @skb: skb currently being received and modified
466dec0d8e4SJeff Kirsher **/
ixgbevf_rx_checksum(struct ixgbevf_ring * ring,union ixgbe_adv_rx_desc * rx_desc,struct sk_buff * skb)46755fb277cSGreg Rose static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
468ec62fe26SEmil Tantilov union ixgbe_adv_rx_desc *rx_desc,
469ec62fe26SEmil Tantilov struct sk_buff *skb)
470dee1ad47SJeff Kirsher {
471dee1ad47SJeff Kirsher skb_checksum_none_assert(skb);
472dee1ad47SJeff Kirsher
473dee1ad47SJeff Kirsher /* Rx csum disabled */
474fb40195cSAlexander Duyck if (!(ring->netdev->features & NETIF_F_RXCSUM))
475dee1ad47SJeff Kirsher return;
476dee1ad47SJeff Kirsher
477dee1ad47SJeff Kirsher /* if IP and error */
478ec62fe26SEmil Tantilov if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
479ec62fe26SEmil Tantilov ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
480095e2617SEmil Tantilov ring->rx_stats.csum_err++;
481dee1ad47SJeff Kirsher return;
482dee1ad47SJeff Kirsher }
483dee1ad47SJeff Kirsher
484ec62fe26SEmil Tantilov if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
485dee1ad47SJeff Kirsher return;
486dee1ad47SJeff Kirsher
487ec62fe26SEmil Tantilov if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
488095e2617SEmil Tantilov ring->rx_stats.csum_err++;
489dee1ad47SJeff Kirsher return;
490dee1ad47SJeff Kirsher }
491dee1ad47SJeff Kirsher
492dee1ad47SJeff Kirsher /* It must be a TCP or UDP packet with a valid checksum */
493dee1ad47SJeff Kirsher skb->ip_summed = CHECKSUM_UNNECESSARY;
494dee1ad47SJeff Kirsher }
495dee1ad47SJeff Kirsher
496dec0d8e4SJeff Kirsher /**
497dec0d8e4SJeff Kirsher * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
498dff80520SEmil Tantilov * @rx_ring: rx descriptor ring packet is being transacted on
499dff80520SEmil Tantilov * @rx_desc: pointer to the EOP Rx descriptor
500dff80520SEmil Tantilov * @skb: pointer to current skb being populated
501dff80520SEmil Tantilov *
502dff80520SEmil Tantilov * This function checks the ring, descriptor, and packet information in
503dff80520SEmil Tantilov * order to populate the checksum, VLAN, protocol, and other fields within
504dff80520SEmil Tantilov * the skb.
505dec0d8e4SJeff Kirsher **/
ixgbevf_process_skb_fields(struct ixgbevf_ring * rx_ring,union ixgbe_adv_rx_desc * rx_desc,struct sk_buff * skb)506dff80520SEmil Tantilov static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
507dff80520SEmil Tantilov union ixgbe_adv_rx_desc *rx_desc,
508dff80520SEmil Tantilov struct sk_buff *skb)
509dff80520SEmil Tantilov {
5101e1429d6SFan Du ixgbevf_rx_hash(rx_ring, rx_desc, skb);
511dff80520SEmil Tantilov ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
512dff80520SEmil Tantilov
513dff80520SEmil Tantilov if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
514dff80520SEmil Tantilov u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
515dff80520SEmil Tantilov unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
516dff80520SEmil Tantilov
517dff80520SEmil Tantilov if (test_bit(vid & VLAN_VID_MASK, active_vlans))
518dff80520SEmil Tantilov __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
519dff80520SEmil Tantilov }
520dff80520SEmil Tantilov
5217f68d430SShannon Nelson if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
5227f68d430SShannon Nelson ixgbevf_ipsec_rx(rx_ring, rx_desc, skb);
5237f68d430SShannon Nelson
524dff80520SEmil Tantilov skb->protocol = eth_type_trans(skb, rx_ring->netdev);
525dff80520SEmil Tantilov }
526dff80520SEmil Tantilov
527925f5690SEmil Tantilov static
ixgbevf_get_rx_buffer(struct ixgbevf_ring * rx_ring,const unsigned int size)528925f5690SEmil Tantilov struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring,
529925f5690SEmil Tantilov const unsigned int size)
530925f5690SEmil Tantilov {
531925f5690SEmil Tantilov struct ixgbevf_rx_buffer *rx_buffer;
532925f5690SEmil Tantilov
533925f5690SEmil Tantilov rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
534925f5690SEmil Tantilov prefetchw(rx_buffer->page);
535925f5690SEmil Tantilov
536925f5690SEmil Tantilov /* we are reusing so sync this buffer for CPU use */
537925f5690SEmil Tantilov dma_sync_single_range_for_cpu(rx_ring->dev,
538925f5690SEmil Tantilov rx_buffer->dma,
539925f5690SEmil Tantilov rx_buffer->page_offset,
540925f5690SEmil Tantilov size,
541925f5690SEmil Tantilov DMA_FROM_DEVICE);
542925f5690SEmil Tantilov
543925f5690SEmil Tantilov rx_buffer->pagecnt_bias--;
544925f5690SEmil Tantilov
545925f5690SEmil Tantilov return rx_buffer;
546925f5690SEmil Tantilov }
547925f5690SEmil Tantilov
ixgbevf_put_rx_buffer(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * rx_buffer,struct sk_buff * skb)548925f5690SEmil Tantilov static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring,
549c7aec596STony Nguyen struct ixgbevf_rx_buffer *rx_buffer,
550c7aec596STony Nguyen struct sk_buff *skb)
551925f5690SEmil Tantilov {
552925f5690SEmil Tantilov if (ixgbevf_can_reuse_rx_page(rx_buffer)) {
553925f5690SEmil Tantilov /* hand second half of page back to the ring */
554925f5690SEmil Tantilov ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
555925f5690SEmil Tantilov } else {
556c7aec596STony Nguyen if (IS_ERR(skb))
557925f5690SEmil Tantilov /* We are not reusing the buffer so unmap it and free
558925f5690SEmil Tantilov * any references we are holding to it
559925f5690SEmil Tantilov */
560925f5690SEmil Tantilov dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
561925f5690SEmil Tantilov ixgbevf_rx_pg_size(rx_ring),
562925f5690SEmil Tantilov DMA_FROM_DEVICE,
563925f5690SEmil Tantilov IXGBEVF_RX_DMA_ATTR);
564925f5690SEmil Tantilov __page_frag_cache_drain(rx_buffer->page,
565925f5690SEmil Tantilov rx_buffer->pagecnt_bias);
566925f5690SEmil Tantilov }
567925f5690SEmil Tantilov
568925f5690SEmil Tantilov /* clear contents of rx_buffer */
569925f5690SEmil Tantilov rx_buffer->page = NULL;
570925f5690SEmil Tantilov }
571925f5690SEmil Tantilov
5724b95fe3dSEmil Tantilov /**
5734b95fe3dSEmil Tantilov * ixgbevf_is_non_eop - process handling of non-EOP buffers
5744b95fe3dSEmil Tantilov * @rx_ring: Rx ring being processed
5754b95fe3dSEmil Tantilov * @rx_desc: Rx descriptor for current buffer
5764b95fe3dSEmil Tantilov *
5774b95fe3dSEmil Tantilov * This function updates next to clean. If the buffer is an EOP buffer
5784b95fe3dSEmil Tantilov * this function exits returning false, otherwise it will place the
5794b95fe3dSEmil Tantilov * sk_buff in the next buffer to be chained and return true indicating
5804b95fe3dSEmil Tantilov * that this is in fact a non-EOP buffer.
5814b95fe3dSEmil Tantilov **/
ixgbevf_is_non_eop(struct ixgbevf_ring * rx_ring,union ixgbe_adv_rx_desc * rx_desc)5824b95fe3dSEmil Tantilov static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
583bad17234SEmil Tantilov union ixgbe_adv_rx_desc *rx_desc)
5844b95fe3dSEmil Tantilov {
5854b95fe3dSEmil Tantilov u32 ntc = rx_ring->next_to_clean + 1;
5864b95fe3dSEmil Tantilov
5874b95fe3dSEmil Tantilov /* fetch, update, and store next to clean */
5884b95fe3dSEmil Tantilov ntc = (ntc < rx_ring->count) ? ntc : 0;
5894b95fe3dSEmil Tantilov rx_ring->next_to_clean = ntc;
5904b95fe3dSEmil Tantilov
5914b95fe3dSEmil Tantilov prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
5924b95fe3dSEmil Tantilov
5934b95fe3dSEmil Tantilov if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
5944b95fe3dSEmil Tantilov return false;
5954b95fe3dSEmil Tantilov
5964b95fe3dSEmil Tantilov return true;
5974b95fe3dSEmil Tantilov }
5984b95fe3dSEmil Tantilov
ixgbevf_rx_offset(struct ixgbevf_ring * rx_ring)5991ab37e12SEmil Tantilov static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring)
6001ab37e12SEmil Tantilov {
6011ab37e12SEmil Tantilov return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0;
6021ab37e12SEmil Tantilov }
6031ab37e12SEmil Tantilov
ixgbevf_alloc_mapped_page(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * bi)604bad17234SEmil Tantilov static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
605bafa578fSEmil Tantilov struct ixgbevf_rx_buffer *bi)
606bafa578fSEmil Tantilov {
607bad17234SEmil Tantilov struct page *page = bi->page;
60893a6a37cSColin Ian King dma_addr_t dma;
609bafa578fSEmil Tantilov
610bad17234SEmil Tantilov /* since we are recycling buffers we should seldom need to alloc */
611bad17234SEmil Tantilov if (likely(page))
612bafa578fSEmil Tantilov return true;
613bafa578fSEmil Tantilov
614bad17234SEmil Tantilov /* alloc new page for storage */
615f15c5ba5SEmil Tantilov page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring));
616bad17234SEmil Tantilov if (unlikely(!page)) {
617bad17234SEmil Tantilov rx_ring->rx_stats.alloc_rx_page_failed++;
618bafa578fSEmil Tantilov return false;
619bafa578fSEmil Tantilov }
620bafa578fSEmil Tantilov
621bad17234SEmil Tantilov /* map page for use */
622f15c5ba5SEmil Tantilov dma = dma_map_page_attrs(rx_ring->dev, page, 0,
623f15c5ba5SEmil Tantilov ixgbevf_rx_pg_size(rx_ring),
62416b35949SEmil Tantilov DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
625bafa578fSEmil Tantilov
626bafa578fSEmil Tantilov /* if mapping failed free memory back to system since
627bafa578fSEmil Tantilov * there isn't much point in holding memory we can't use
628bafa578fSEmil Tantilov */
629bafa578fSEmil Tantilov if (dma_mapping_error(rx_ring->dev, dma)) {
630f15c5ba5SEmil Tantilov __free_pages(page, ixgbevf_rx_pg_order(rx_ring));
631bafa578fSEmil Tantilov
6322a35efe5SEmil Tantilov rx_ring->rx_stats.alloc_rx_page_failed++;
633bafa578fSEmil Tantilov return false;
634bafa578fSEmil Tantilov }
635bafa578fSEmil Tantilov
636bafa578fSEmil Tantilov bi->dma = dma;
637bad17234SEmil Tantilov bi->page = page;
6381ab37e12SEmil Tantilov bi->page_offset = ixgbevf_rx_offset(rx_ring);
63935074d69SEmil Tantilov bi->pagecnt_bias = 1;
6402a35efe5SEmil Tantilov rx_ring->rx_stats.alloc_rx_page++;
641bafa578fSEmil Tantilov
642bafa578fSEmil Tantilov return true;
643bafa578fSEmil Tantilov }
644bafa578fSEmil Tantilov
645dee1ad47SJeff Kirsher /**
646dee1ad47SJeff Kirsher * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
647095e2617SEmil Tantilov * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
648bafa578fSEmil Tantilov * @cleaned_count: number of buffers to replace
649dee1ad47SJeff Kirsher **/
ixgbevf_alloc_rx_buffers(struct ixgbevf_ring * rx_ring,u16 cleaned_count)650095e2617SEmil Tantilov static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
651bafa578fSEmil Tantilov u16 cleaned_count)
652dee1ad47SJeff Kirsher {
653dee1ad47SJeff Kirsher union ixgbe_adv_rx_desc *rx_desc;
654dee1ad47SJeff Kirsher struct ixgbevf_rx_buffer *bi;
655fb40195cSAlexander Duyck unsigned int i = rx_ring->next_to_use;
656dee1ad47SJeff Kirsher
657bafa578fSEmil Tantilov /* nothing to do or no valid netdev defined */
658bafa578fSEmil Tantilov if (!cleaned_count || !rx_ring->netdev)
659bafa578fSEmil Tantilov return;
660bafa578fSEmil Tantilov
661908421f6SAlexander Duyck rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
66205d063aaSEmil Tantilov bi = &rx_ring->rx_buffer_info[i];
663bafa578fSEmil Tantilov i -= rx_ring->count;
664b9dd245bSGreg Rose
665bafa578fSEmil Tantilov do {
666bad17234SEmil Tantilov if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
6676132ee8aSGreg Rose break;
668bafa578fSEmil Tantilov
66916b35949SEmil Tantilov /* sync the buffer for use by the device */
67016b35949SEmil Tantilov dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
67116b35949SEmil Tantilov bi->page_offset,
672f15c5ba5SEmil Tantilov ixgbevf_rx_bufsz(rx_ring),
67316b35949SEmil Tantilov DMA_FROM_DEVICE);
67416b35949SEmil Tantilov
675bafa578fSEmil Tantilov /* Refresh the desc even if pkt_addr didn't change
676bafa578fSEmil Tantilov * because each write-back erases this info.
677bafa578fSEmil Tantilov */
678bad17234SEmil Tantilov rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
679dee1ad47SJeff Kirsher
680bafa578fSEmil Tantilov rx_desc++;
681bafa578fSEmil Tantilov bi++;
682dee1ad47SJeff Kirsher i++;
683bafa578fSEmil Tantilov if (unlikely(!i)) {
684bafa578fSEmil Tantilov rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
685bafa578fSEmil Tantilov bi = rx_ring->rx_buffer_info;
686bafa578fSEmil Tantilov i -= rx_ring->count;
687dee1ad47SJeff Kirsher }
688dee1ad47SJeff Kirsher
68924bff091SEmil Tantilov /* clear the length for the next_to_use descriptor */
69024bff091SEmil Tantilov rx_desc->wb.upper.length = 0;
691bafa578fSEmil Tantilov
692bafa578fSEmil Tantilov cleaned_count--;
693bafa578fSEmil Tantilov } while (cleaned_count);
694bafa578fSEmil Tantilov
695bafa578fSEmil Tantilov i += rx_ring->count;
696bafa578fSEmil Tantilov
697bafa578fSEmil Tantilov if (rx_ring->next_to_use != i) {
698bafa578fSEmil Tantilov /* record the next descriptor to use */
699bafa578fSEmil Tantilov rx_ring->next_to_use = i;
700bafa578fSEmil Tantilov
701bad17234SEmil Tantilov /* update next to alloc since we have filled the ring */
702bad17234SEmil Tantilov rx_ring->next_to_alloc = i;
703bad17234SEmil Tantilov
704bafa578fSEmil Tantilov /* Force memory writes to complete before letting h/w
705bafa578fSEmil Tantilov * know there are new descriptors to fetch. (Only
706bafa578fSEmil Tantilov * applicable for weak-ordered memory model archs,
707bafa578fSEmil Tantilov * such as IA-64).
708bafa578fSEmil Tantilov */
709bafa578fSEmil Tantilov wmb();
710bafa578fSEmil Tantilov ixgbevf_write_tail(rx_ring, i);
711bafa578fSEmil Tantilov }
712dee1ad47SJeff Kirsher }
713dee1ad47SJeff Kirsher
714dec0d8e4SJeff Kirsher /**
715dec0d8e4SJeff Kirsher * ixgbevf_cleanup_headers - Correct corrupted or empty headers
716bad17234SEmil Tantilov * @rx_ring: rx descriptor ring packet is being transacted on
717bad17234SEmil Tantilov * @rx_desc: pointer to the EOP Rx descriptor
718bad17234SEmil Tantilov * @skb: pointer to current skb being fixed
719bad17234SEmil Tantilov *
720bad17234SEmil Tantilov * Check for corrupted packet headers caused by senders on the local L2
721bad17234SEmil Tantilov * embedded NIC switch not setting up their Tx Descriptors right. These
722bad17234SEmil Tantilov * should be very rare.
723bad17234SEmil Tantilov *
724bad17234SEmil Tantilov * Also address the case where we are pulling data in on pages only
725bad17234SEmil Tantilov * and as such no data is present in the skb header.
726bad17234SEmil Tantilov *
727bad17234SEmil Tantilov * In addition if skb is not at least 60 bytes we need to pad it so that
728bad17234SEmil Tantilov * it is large enough to qualify as a valid Ethernet frame.
729bad17234SEmil Tantilov *
730bad17234SEmil Tantilov * Returns true if an error was encountered and skb was freed.
731dec0d8e4SJeff Kirsher **/
ixgbevf_cleanup_headers(struct ixgbevf_ring * rx_ring,union ixgbe_adv_rx_desc * rx_desc,struct sk_buff * skb)732bad17234SEmil Tantilov static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
733bad17234SEmil Tantilov union ixgbe_adv_rx_desc *rx_desc,
734bad17234SEmil Tantilov struct sk_buff *skb)
735bad17234SEmil Tantilov {
736c7aec596STony Nguyen /* XDP packets use error pointer so abort at this point */
737c7aec596STony Nguyen if (IS_ERR(skb))
738c7aec596STony Nguyen return true;
739c7aec596STony Nguyen
740bad17234SEmil Tantilov /* verify that the packet does not have any known errors */
741bad17234SEmil Tantilov if (unlikely(ixgbevf_test_staterr(rx_desc,
742bad17234SEmil Tantilov IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
743bad17234SEmil Tantilov struct net_device *netdev = rx_ring->netdev;
744bad17234SEmil Tantilov
745bad17234SEmil Tantilov if (!(netdev->features & NETIF_F_RXALL)) {
746bad17234SEmil Tantilov dev_kfree_skb_any(skb);
747bad17234SEmil Tantilov return true;
748bad17234SEmil Tantilov }
749bad17234SEmil Tantilov }
750bad17234SEmil Tantilov
751a94d9e22SAlexander Duyck /* if eth_skb_pad returns an error the skb was freed */
752a94d9e22SAlexander Duyck if (eth_skb_pad(skb))
753bad17234SEmil Tantilov return true;
754bad17234SEmil Tantilov
755bad17234SEmil Tantilov return false;
756bad17234SEmil Tantilov }
757bad17234SEmil Tantilov
758dec0d8e4SJeff Kirsher /**
759dec0d8e4SJeff Kirsher * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
760bad17234SEmil Tantilov * @rx_ring: rx descriptor ring to store buffers on
761bad17234SEmil Tantilov * @old_buff: donor buffer to have page reused
762bad17234SEmil Tantilov *
763bad17234SEmil Tantilov * Synchronizes page for reuse by the adapter
764dec0d8e4SJeff Kirsher **/
ixgbevf_reuse_rx_page(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * old_buff)765bad17234SEmil Tantilov static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
766bad17234SEmil Tantilov struct ixgbevf_rx_buffer *old_buff)
767bad17234SEmil Tantilov {
768bad17234SEmil Tantilov struct ixgbevf_rx_buffer *new_buff;
769bad17234SEmil Tantilov u16 nta = rx_ring->next_to_alloc;
770bad17234SEmil Tantilov
771bad17234SEmil Tantilov new_buff = &rx_ring->rx_buffer_info[nta];
772bad17234SEmil Tantilov
773bad17234SEmil Tantilov /* update, and store next to alloc */
774bad17234SEmil Tantilov nta++;
775bad17234SEmil Tantilov rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
776bad17234SEmil Tantilov
777bad17234SEmil Tantilov /* transfer page from old buffer to new buffer */
778bad17234SEmil Tantilov new_buff->page = old_buff->page;
779bad17234SEmil Tantilov new_buff->dma = old_buff->dma;
780bad17234SEmil Tantilov new_buff->page_offset = old_buff->page_offset;
78135074d69SEmil Tantilov new_buff->pagecnt_bias = old_buff->pagecnt_bias;
782bad17234SEmil Tantilov }
783bad17234SEmil Tantilov
ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer * rx_buffer)784925f5690SEmil Tantilov static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
785a355fd9aSEmil Tantilov {
786925f5690SEmil Tantilov unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
787925f5690SEmil Tantilov struct page *page = rx_buffer->page;
78835074d69SEmil Tantilov
789a79afa78SAlexander Lobakin /* avoid re-using remote and pfmemalloc pages */
790a79afa78SAlexander Lobakin if (!dev_page_is_reusable(page))
791a355fd9aSEmil Tantilov return false;
792a355fd9aSEmil Tantilov
793a355fd9aSEmil Tantilov #if (PAGE_SIZE < 8192)
794a355fd9aSEmil Tantilov /* if we are only owner of page we can reuse it */
795925f5690SEmil Tantilov if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
796a355fd9aSEmil Tantilov return false;
797a355fd9aSEmil Tantilov #else
798f15c5ba5SEmil Tantilov #define IXGBEVF_LAST_OFFSET \
799f15c5ba5SEmil Tantilov (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
800f15c5ba5SEmil Tantilov
801f15c5ba5SEmil Tantilov if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET)
802a355fd9aSEmil Tantilov return false;
803a355fd9aSEmil Tantilov
804a355fd9aSEmil Tantilov #endif
80535074d69SEmil Tantilov
80635074d69SEmil Tantilov /* If we have drained the page fragment pool we need to update
80735074d69SEmil Tantilov * the pagecnt_bias and page count so that we fully restock the
80835074d69SEmil Tantilov * number of references the driver holds.
809a355fd9aSEmil Tantilov */
810925f5690SEmil Tantilov if (unlikely(!pagecnt_bias)) {
81135074d69SEmil Tantilov page_ref_add(page, USHRT_MAX);
81235074d69SEmil Tantilov rx_buffer->pagecnt_bias = USHRT_MAX;
81335074d69SEmil Tantilov }
814a355fd9aSEmil Tantilov
815a355fd9aSEmil Tantilov return true;
816a355fd9aSEmil Tantilov }
817a355fd9aSEmil Tantilov
818dec0d8e4SJeff Kirsher /**
819dec0d8e4SJeff Kirsher * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
820bad17234SEmil Tantilov * @rx_ring: rx descriptor ring to transact packets on
821bad17234SEmil Tantilov * @rx_buffer: buffer containing page to add
822bad17234SEmil Tantilov * @skb: sk_buff to place the data into
823925f5690SEmil Tantilov * @size: size of buffer to be added
824bad17234SEmil Tantilov *
825bad17234SEmil Tantilov * This function will add the data contained in rx_buffer->page to the skb.
826dec0d8e4SJeff Kirsher **/
ixgbevf_add_rx_frag(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * rx_buffer,struct sk_buff * skb,unsigned int size)827925f5690SEmil Tantilov static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
828bad17234SEmil Tantilov struct ixgbevf_rx_buffer *rx_buffer,
829925f5690SEmil Tantilov struct sk_buff *skb,
830925f5690SEmil Tantilov unsigned int size)
831bad17234SEmil Tantilov {
832bad17234SEmil Tantilov #if (PAGE_SIZE < 8192)
833f15c5ba5SEmil Tantilov unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
834bad17234SEmil Tantilov #else
8351ab37e12SEmil Tantilov unsigned int truesize = ring_uses_build_skb(rx_ring) ?
8361ab37e12SEmil Tantilov SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
8371ab37e12SEmil Tantilov SKB_DATA_ALIGN(size);
838bad17234SEmil Tantilov #endif
839925f5690SEmil Tantilov skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
840925f5690SEmil Tantilov rx_buffer->page_offset, size, truesize);
841925f5690SEmil Tantilov #if (PAGE_SIZE < 8192)
842925f5690SEmil Tantilov rx_buffer->page_offset ^= truesize;
843925f5690SEmil Tantilov #else
844925f5690SEmil Tantilov rx_buffer->page_offset += truesize;
845925f5690SEmil Tantilov #endif
846bad17234SEmil Tantilov }
847bad17234SEmil Tantilov
848925f5690SEmil Tantilov static
ixgbevf_construct_skb(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * rx_buffer,struct xdp_buff * xdp,union ixgbe_adv_rx_desc * rx_desc)849925f5690SEmil Tantilov struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
850925f5690SEmil Tantilov struct ixgbevf_rx_buffer *rx_buffer,
851c7aec596STony Nguyen struct xdp_buff *xdp,
852c7aec596STony Nguyen union ixgbe_adv_rx_desc *rx_desc)
853bad17234SEmil Tantilov {
854c7aec596STony Nguyen unsigned int size = xdp->data_end - xdp->data;
855925f5690SEmil Tantilov #if (PAGE_SIZE < 8192)
856925f5690SEmil Tantilov unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
857925f5690SEmil Tantilov #else
858c7aec596STony Nguyen unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
859c7aec596STony Nguyen xdp->data_hard_start);
860925f5690SEmil Tantilov #endif
861925f5690SEmil Tantilov unsigned int headlen;
862925f5690SEmil Tantilov struct sk_buff *skb;
863bad17234SEmil Tantilov
864bad17234SEmil Tantilov /* prefetch first cache line of first page */
865f468f21bSTariq Toukan net_prefetch(xdp->data);
866f468f21bSTariq Toukan
867be833332STony Nguyen /* Note, we get here by enabling legacy-rx via:
868be833332STony Nguyen *
869be833332STony Nguyen * ethtool --set-priv-flags <dev> legacy-rx on
870be833332STony Nguyen *
871be833332STony Nguyen * In this mode, we currently get 0 extra XDP headroom as
872be833332STony Nguyen * opposed to having legacy-rx off, where we process XDP
873be833332STony Nguyen * packets going to stack via ixgbevf_build_skb().
874be833332STony Nguyen *
875be833332STony Nguyen * For ixgbevf_construct_skb() mode it means that the
876be833332STony Nguyen * xdp->data_meta will always point to xdp->data, since
877be833332STony Nguyen * the helper cannot expand the head. Should this ever
878be833332STony Nguyen * changed in future for legacy-rx mode on, then lets also
879be833332STony Nguyen * add xdp->data_meta handling here.
880be833332STony Nguyen */
881bad17234SEmil Tantilov
882bad17234SEmil Tantilov /* allocate a skb to store the frags */
883925f5690SEmil Tantilov skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE);
884925f5690SEmil Tantilov if (unlikely(!skb))
885bad17234SEmil Tantilov return NULL;
886bad17234SEmil Tantilov
887925f5690SEmil Tantilov /* Determine available headroom for copy */
888925f5690SEmil Tantilov headlen = size;
889925f5690SEmil Tantilov if (headlen > IXGBEVF_RX_HDR_SIZE)
890c43f1255SStanislav Fomichev headlen = eth_get_headlen(skb->dev, xdp->data,
891c43f1255SStanislav Fomichev IXGBEVF_RX_HDR_SIZE);
892bad17234SEmil Tantilov
893925f5690SEmil Tantilov /* align pull length to size of long to optimize memcpy performance */
894c7aec596STony Nguyen memcpy(__skb_put(skb, headlen), xdp->data,
895c7aec596STony Nguyen ALIGN(headlen, sizeof(long)));
896925f5690SEmil Tantilov
897925f5690SEmil Tantilov /* update all of the pointers */
898925f5690SEmil Tantilov size -= headlen;
899925f5690SEmil Tantilov if (size) {
900925f5690SEmil Tantilov skb_add_rx_frag(skb, 0, rx_buffer->page,
901c7aec596STony Nguyen (xdp->data + headlen) -
902c7aec596STony Nguyen page_address(rx_buffer->page),
903925f5690SEmil Tantilov size, truesize);
904925f5690SEmil Tantilov #if (PAGE_SIZE < 8192)
905925f5690SEmil Tantilov rx_buffer->page_offset ^= truesize;
906925f5690SEmil Tantilov #else
907925f5690SEmil Tantilov rx_buffer->page_offset += truesize;
908925f5690SEmil Tantilov #endif
909bad17234SEmil Tantilov } else {
910925f5690SEmil Tantilov rx_buffer->pagecnt_bias++;
911bad17234SEmil Tantilov }
912bad17234SEmil Tantilov
913bad17234SEmil Tantilov return skb;
914bad17234SEmil Tantilov }
915bad17234SEmil Tantilov
ixgbevf_irq_enable_queues(struct ixgbevf_adapter * adapter,u32 qmask)916dee1ad47SJeff Kirsher static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
9175f3600ebSAlexander Duyck u32 qmask)
918dee1ad47SJeff Kirsher {
919dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
920dee1ad47SJeff Kirsher
9215f3600ebSAlexander Duyck IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
922dee1ad47SJeff Kirsher }
923dee1ad47SJeff Kirsher
ixgbevf_build_skb(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * rx_buffer,struct xdp_buff * xdp,union ixgbe_adv_rx_desc * rx_desc)9246d9c0217SEmil Tantilov static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
9256d9c0217SEmil Tantilov struct ixgbevf_rx_buffer *rx_buffer,
926c7aec596STony Nguyen struct xdp_buff *xdp,
927c7aec596STony Nguyen union ixgbe_adv_rx_desc *rx_desc)
9286d9c0217SEmil Tantilov {
929be833332STony Nguyen unsigned int metasize = xdp->data - xdp->data_meta;
9306d9c0217SEmil Tantilov #if (PAGE_SIZE < 8192)
9316d9c0217SEmil Tantilov unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
9326d9c0217SEmil Tantilov #else
9336d9c0217SEmil Tantilov unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
934c7aec596STony Nguyen SKB_DATA_ALIGN(xdp->data_end -
935c7aec596STony Nguyen xdp->data_hard_start);
9366d9c0217SEmil Tantilov #endif
9376d9c0217SEmil Tantilov struct sk_buff *skb;
9386d9c0217SEmil Tantilov
939be833332STony Nguyen /* Prefetch first cache line of first page. If xdp->data_meta
940be833332STony Nguyen * is unused, this points to xdp->data, otherwise, we likely
941be833332STony Nguyen * have a consumer accessing first few bytes of meta data,
942be833332STony Nguyen * and then actual data.
943be833332STony Nguyen */
944f468f21bSTariq Toukan net_prefetch(xdp->data_meta);
9456d9c0217SEmil Tantilov
946c7aec596STony Nguyen /* build an skb around the page buffer */
947c1550019SAlexander Lobakin skb = napi_build_skb(xdp->data_hard_start, truesize);
9486d9c0217SEmil Tantilov if (unlikely(!skb))
9496d9c0217SEmil Tantilov return NULL;
9506d9c0217SEmil Tantilov
9516d9c0217SEmil Tantilov /* update pointers within the skb to store the data */
952c7aec596STony Nguyen skb_reserve(skb, xdp->data - xdp->data_hard_start);
953c7aec596STony Nguyen __skb_put(skb, xdp->data_end - xdp->data);
954be833332STony Nguyen if (metasize)
955be833332STony Nguyen skb_metadata_set(skb, metasize);
9566d9c0217SEmil Tantilov
9576d9c0217SEmil Tantilov /* update buffer offset */
9586d9c0217SEmil Tantilov #if (PAGE_SIZE < 8192)
9596d9c0217SEmil Tantilov rx_buffer->page_offset ^= truesize;
9606d9c0217SEmil Tantilov #else
9616d9c0217SEmil Tantilov rx_buffer->page_offset += truesize;
9626d9c0217SEmil Tantilov #endif
9636d9c0217SEmil Tantilov
9646d9c0217SEmil Tantilov return skb;
9656d9c0217SEmil Tantilov }
966c7aec596STony Nguyen
967c7aec596STony Nguyen #define IXGBEVF_XDP_PASS 0
968c7aec596STony Nguyen #define IXGBEVF_XDP_CONSUMED 1
96921092e9cSTony Nguyen #define IXGBEVF_XDP_TX 2
970c7aec596STony Nguyen
ixgbevf_xmit_xdp_ring(struct ixgbevf_ring * ring,struct xdp_buff * xdp)97121092e9cSTony Nguyen static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
97221092e9cSTony Nguyen struct xdp_buff *xdp)
97321092e9cSTony Nguyen {
97421092e9cSTony Nguyen struct ixgbevf_tx_buffer *tx_buffer;
97521092e9cSTony Nguyen union ixgbe_adv_tx_desc *tx_desc;
97621092e9cSTony Nguyen u32 len, cmd_type;
97721092e9cSTony Nguyen dma_addr_t dma;
97821092e9cSTony Nguyen u16 i;
97921092e9cSTony Nguyen
98021092e9cSTony Nguyen len = xdp->data_end - xdp->data;
98121092e9cSTony Nguyen
98221092e9cSTony Nguyen if (unlikely(!ixgbevf_desc_unused(ring)))
98321092e9cSTony Nguyen return IXGBEVF_XDP_CONSUMED;
98421092e9cSTony Nguyen
98521092e9cSTony Nguyen dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
98621092e9cSTony Nguyen if (dma_mapping_error(ring->dev, dma))
98721092e9cSTony Nguyen return IXGBEVF_XDP_CONSUMED;
98821092e9cSTony Nguyen
98921092e9cSTony Nguyen /* record the location of the first descriptor for this packet */
99021092e9cSTony Nguyen i = ring->next_to_use;
9914be87727SAlexander Duyck tx_buffer = &ring->tx_buffer_info[i];
99221092e9cSTony Nguyen
99321092e9cSTony Nguyen dma_unmap_len_set(tx_buffer, len, len);
99421092e9cSTony Nguyen dma_unmap_addr_set(tx_buffer, dma, dma);
99521092e9cSTony Nguyen tx_buffer->data = xdp->data;
9964be87727SAlexander Duyck tx_buffer->bytecount = len;
9974be87727SAlexander Duyck tx_buffer->gso_segs = 1;
9984be87727SAlexander Duyck tx_buffer->protocol = 0;
9994be87727SAlexander Duyck
10004be87727SAlexander Duyck /* Populate minimal context descriptor that will provide for the
10014be87727SAlexander Duyck * fact that we are expected to process Ethernet frames.
10024be87727SAlexander Duyck */
10034be87727SAlexander Duyck if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) {
10044be87727SAlexander Duyck struct ixgbe_adv_tx_context_desc *context_desc;
10054be87727SAlexander Duyck
10064be87727SAlexander Duyck set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
10074be87727SAlexander Duyck
10084be87727SAlexander Duyck context_desc = IXGBEVF_TX_CTXTDESC(ring, 0);
10094be87727SAlexander Duyck context_desc->vlan_macip_lens =
10104be87727SAlexander Duyck cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT);
10117f68d430SShannon Nelson context_desc->fceof_saidx = 0;
10124be87727SAlexander Duyck context_desc->type_tucmd_mlhl =
10134be87727SAlexander Duyck cpu_to_le32(IXGBE_TXD_CMD_DEXT |
10144be87727SAlexander Duyck IXGBE_ADVTXD_DTYP_CTXT);
10154be87727SAlexander Duyck context_desc->mss_l4len_idx = 0;
10164be87727SAlexander Duyck
10174be87727SAlexander Duyck i = 1;
10184be87727SAlexander Duyck }
101921092e9cSTony Nguyen
102021092e9cSTony Nguyen /* put descriptor type bits */
102121092e9cSTony Nguyen cmd_type = IXGBE_ADVTXD_DTYP_DATA |
102221092e9cSTony Nguyen IXGBE_ADVTXD_DCMD_DEXT |
102321092e9cSTony Nguyen IXGBE_ADVTXD_DCMD_IFCS;
102421092e9cSTony Nguyen cmd_type |= len | IXGBE_TXD_CMD;
10254be87727SAlexander Duyck
10264be87727SAlexander Duyck tx_desc = IXGBEVF_TX_DESC(ring, i);
10274be87727SAlexander Duyck tx_desc->read.buffer_addr = cpu_to_le64(dma);
10284be87727SAlexander Duyck
102921092e9cSTony Nguyen tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
103021092e9cSTony Nguyen tx_desc->read.olinfo_status =
103121092e9cSTony Nguyen cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) |
103221092e9cSTony Nguyen IXGBE_ADVTXD_CC);
103321092e9cSTony Nguyen
1034efecfd5fSTony Nguyen /* Avoid any potential race with cleanup */
1035efecfd5fSTony Nguyen smp_wmb();
103621092e9cSTony Nguyen
103721092e9cSTony Nguyen /* set next_to_watch value indicating a packet is present */
103821092e9cSTony Nguyen i++;
103921092e9cSTony Nguyen if (i == ring->count)
104021092e9cSTony Nguyen i = 0;
104121092e9cSTony Nguyen
104221092e9cSTony Nguyen tx_buffer->next_to_watch = tx_desc;
104321092e9cSTony Nguyen ring->next_to_use = i;
104421092e9cSTony Nguyen
104521092e9cSTony Nguyen return IXGBEVF_XDP_TX;
104621092e9cSTony Nguyen }
104721092e9cSTony Nguyen
ixgbevf_run_xdp(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * rx_ring,struct xdp_buff * xdp)104821092e9cSTony Nguyen static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
104921092e9cSTony Nguyen struct ixgbevf_ring *rx_ring,
1050c7aec596STony Nguyen struct xdp_buff *xdp)
1051c7aec596STony Nguyen {
1052c7aec596STony Nguyen int result = IXGBEVF_XDP_PASS;
105321092e9cSTony Nguyen struct ixgbevf_ring *xdp_ring;
1054c7aec596STony Nguyen struct bpf_prog *xdp_prog;
1055c7aec596STony Nguyen u32 act;
1056c7aec596STony Nguyen
1057c7aec596STony Nguyen xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1058c7aec596STony Nguyen
1059c7aec596STony Nguyen if (!xdp_prog)
1060c7aec596STony Nguyen goto xdp_out;
1061c7aec596STony Nguyen
1062c7aec596STony Nguyen act = bpf_prog_run_xdp(xdp_prog, xdp);
1063c7aec596STony Nguyen switch (act) {
1064c7aec596STony Nguyen case XDP_PASS:
1065c7aec596STony Nguyen break;
106621092e9cSTony Nguyen case XDP_TX:
106721092e9cSTony Nguyen xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
106821092e9cSTony Nguyen result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
1069faae8142SMagnus Karlsson if (result == IXGBEVF_XDP_CONSUMED)
1070faae8142SMagnus Karlsson goto out_failure;
107121092e9cSTony Nguyen break;
1072c7aec596STony Nguyen default:
1073c8064e5bSPaolo Abeni bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
10745463fce6SJeff Kirsher fallthrough;
1075c7aec596STony Nguyen case XDP_ABORTED:
1076faae8142SMagnus Karlsson out_failure:
1077c7aec596STony Nguyen trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
10785463fce6SJeff Kirsher fallthrough; /* handle aborts by dropping packet */
1079c7aec596STony Nguyen case XDP_DROP:
1080c7aec596STony Nguyen result = IXGBEVF_XDP_CONSUMED;
1081c7aec596STony Nguyen break;
1082c7aec596STony Nguyen }
1083c7aec596STony Nguyen xdp_out:
1084c7aec596STony Nguyen return ERR_PTR(-result);
1085c7aec596STony Nguyen }
1086c7aec596STony Nguyen
ixgbevf_rx_frame_truesize(struct ixgbevf_ring * rx_ring,unsigned int size)108781f3c628SJesper Dangaard Brouer static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring,
108881f3c628SJesper Dangaard Brouer unsigned int size)
108981f3c628SJesper Dangaard Brouer {
109081f3c628SJesper Dangaard Brouer unsigned int truesize;
109181f3c628SJesper Dangaard Brouer
109281f3c628SJesper Dangaard Brouer #if (PAGE_SIZE < 8192)
109381f3c628SJesper Dangaard Brouer truesize = ixgbevf_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
109481f3c628SJesper Dangaard Brouer #else
109581f3c628SJesper Dangaard Brouer truesize = ring_uses_build_skb(rx_ring) ?
109681f3c628SJesper Dangaard Brouer SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) +
109781f3c628SJesper Dangaard Brouer SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
109881f3c628SJesper Dangaard Brouer SKB_DATA_ALIGN(size);
109981f3c628SJesper Dangaard Brouer #endif
110081f3c628SJesper Dangaard Brouer return truesize;
110181f3c628SJesper Dangaard Brouer }
110281f3c628SJesper Dangaard Brouer
ixgbevf_rx_buffer_flip(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * rx_buffer,unsigned int size)110321092e9cSTony Nguyen static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring,
110421092e9cSTony Nguyen struct ixgbevf_rx_buffer *rx_buffer,
110521092e9cSTony Nguyen unsigned int size)
110621092e9cSTony Nguyen {
110781f3c628SJesper Dangaard Brouer unsigned int truesize = ixgbevf_rx_frame_truesize(rx_ring, size);
110821092e9cSTony Nguyen
110981f3c628SJesper Dangaard Brouer #if (PAGE_SIZE < 8192)
111021092e9cSTony Nguyen rx_buffer->page_offset ^= truesize;
111121092e9cSTony Nguyen #else
111221092e9cSTony Nguyen rx_buffer->page_offset += truesize;
111321092e9cSTony Nguyen #endif
111421092e9cSTony Nguyen }
111521092e9cSTony Nguyen
ixgbevf_clean_rx_irq(struct ixgbevf_q_vector * q_vector,struct ixgbevf_ring * rx_ring,int budget)111608e50a20SJacob Keller static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
1117dee1ad47SJeff Kirsher struct ixgbevf_ring *rx_ring,
1118fa71ae27SAlexander Duyck int budget)
1119dee1ad47SJeff Kirsher {
112043b5169dSLorenzo Bianconi unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
112121092e9cSTony Nguyen struct ixgbevf_adapter *adapter = q_vector->adapter;
1122bafa578fSEmil Tantilov u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
1123bad17234SEmil Tantilov struct sk_buff *skb = rx_ring->skb;
1124efecfd5fSTony Nguyen bool xdp_xmit = false;
1125c7aec596STony Nguyen struct xdp_buff xdp;
1126c7aec596STony Nguyen
112781f3c628SJesper Dangaard Brouer /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
112881f3c628SJesper Dangaard Brouer #if (PAGE_SIZE < 8192)
112943b5169dSLorenzo Bianconi frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
113081f3c628SJesper Dangaard Brouer #endif
113143b5169dSLorenzo Bianconi xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
113281f3c628SJesper Dangaard Brouer
11336622402aSEmil Tantilov while (likely(total_rx_packets < budget)) {
1134925f5690SEmil Tantilov struct ixgbevf_rx_buffer *rx_buffer;
1135c7aec596STony Nguyen union ixgbe_adv_rx_desc *rx_desc;
1136925f5690SEmil Tantilov unsigned int size;
1137b97fe3b1SEmil Tantilov
11380579eefcSEmil Tantilov /* return some buffers to hardware, one at a time is too slow */
11390579eefcSEmil Tantilov if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
11400579eefcSEmil Tantilov ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
11410579eefcSEmil Tantilov cleaned_count = 0;
11420579eefcSEmil Tantilov }
11430579eefcSEmil Tantilov
1144bad17234SEmil Tantilov rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1145925f5690SEmil Tantilov size = le16_to_cpu(rx_desc->wb.upper.length);
1146925f5690SEmil Tantilov if (!size)
1147dee1ad47SJeff Kirsher break;
1148dee1ad47SJeff Kirsher
11490579eefcSEmil Tantilov /* This memory barrier is needed to keep us from reading
11500579eefcSEmil Tantilov * any other fields out of the rx_desc until we know the
11510579eefcSEmil Tantilov * RXD_STAT_DD bit is set
11520579eefcSEmil Tantilov */
11530579eefcSEmil Tantilov rmb();
1154ec62fe26SEmil Tantilov
1155925f5690SEmil Tantilov rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size);
1156925f5690SEmil Tantilov
1157bad17234SEmil Tantilov /* retrieve a buffer from the ring */
1158c7aec596STony Nguyen if (!skb) {
1159be9df4afSLorenzo Bianconi unsigned int offset = ixgbevf_rx_offset(rx_ring);
1160be9df4afSLorenzo Bianconi unsigned char *hard_start;
1161be9df4afSLorenzo Bianconi
1162be9df4afSLorenzo Bianconi hard_start = page_address(rx_buffer->page) +
1163be9df4afSLorenzo Bianconi rx_buffer->page_offset - offset;
1164be9df4afSLorenzo Bianconi xdp_prepare_buff(&xdp, hard_start, offset, size, true);
116581f3c628SJesper Dangaard Brouer #if (PAGE_SIZE > 4096)
116681f3c628SJesper Dangaard Brouer /* At larger PAGE_SIZE, frame_sz depend on len size */
116781f3c628SJesper Dangaard Brouer xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
116881f3c628SJesper Dangaard Brouer #endif
116921092e9cSTony Nguyen skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
1170c7aec596STony Nguyen }
1171c7aec596STony Nguyen
1172c7aec596STony Nguyen if (IS_ERR(skb)) {
1173efecfd5fSTony Nguyen if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
1174efecfd5fSTony Nguyen xdp_xmit = true;
117521092e9cSTony Nguyen ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
117621092e9cSTony Nguyen size);
1177efecfd5fSTony Nguyen } else {
117821092e9cSTony Nguyen rx_buffer->pagecnt_bias++;
1179efecfd5fSTony Nguyen }
1180c7aec596STony Nguyen total_rx_packets++;
1181c7aec596STony Nguyen total_rx_bytes += size;
1182c7aec596STony Nguyen } else if (skb) {
1183925f5690SEmil Tantilov ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1184c7aec596STony Nguyen } else if (ring_uses_build_skb(rx_ring)) {
11856d9c0217SEmil Tantilov skb = ixgbevf_build_skb(rx_ring, rx_buffer,
1186c7aec596STony Nguyen &xdp, rx_desc);
1187c7aec596STony Nguyen } else {
1188925f5690SEmil Tantilov skb = ixgbevf_construct_skb(rx_ring, rx_buffer,
1189c7aec596STony Nguyen &xdp, rx_desc);
1190c7aec596STony Nguyen }
1191dee1ad47SJeff Kirsher
1192bad17234SEmil Tantilov /* exit if we failed to retrieve a buffer */
11932a35efe5SEmil Tantilov if (!skb) {
11942a35efe5SEmil Tantilov rx_ring->rx_stats.alloc_rx_buff_failed++;
1195925f5690SEmil Tantilov rx_buffer->pagecnt_bias++;
1196bad17234SEmil Tantilov break;
11972a35efe5SEmil Tantilov }
1198dee1ad47SJeff Kirsher
1199c7aec596STony Nguyen ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb);
1200b97fe3b1SEmil Tantilov cleaned_count++;
1201b97fe3b1SEmil Tantilov
1202bad17234SEmil Tantilov /* fetch next buffer in frame if non-eop */
1203bad17234SEmil Tantilov if (ixgbevf_is_non_eop(rx_ring, rx_desc))
12040579eefcSEmil Tantilov continue;
1205dee1ad47SJeff Kirsher
1206bad17234SEmil Tantilov /* verify the packet layout is correct */
1207bad17234SEmil Tantilov if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
1208bad17234SEmil Tantilov skb = NULL;
12090579eefcSEmil Tantilov continue;
1210dee1ad47SJeff Kirsher }
1211dee1ad47SJeff Kirsher
1212dee1ad47SJeff Kirsher /* probably a little skewed due to removing CRC */
1213dee1ad47SJeff Kirsher total_rx_bytes += skb->len;
1214dee1ad47SJeff Kirsher
1215815cccbfSJohn Fastabend /* Workaround hardware that can't do proper VEPA multicast
1216815cccbfSJohn Fastabend * source pruning.
1217815cccbfSJohn Fastabend */
1218bd9d5592SFlorian Fainelli if ((skb->pkt_type == PACKET_BROADCAST ||
1219bd9d5592SFlorian Fainelli skb->pkt_type == PACKET_MULTICAST) &&
1220095e2617SEmil Tantilov ether_addr_equal(rx_ring->netdev->dev_addr,
12217367d0b5SJoe Perches eth_hdr(skb)->h_source)) {
1222815cccbfSJohn Fastabend dev_kfree_skb_irq(skb);
12230579eefcSEmil Tantilov continue;
1224815cccbfSJohn Fastabend }
1225815cccbfSJohn Fastabend
1226dff80520SEmil Tantilov /* populate checksum, VLAN, and protocol */
1227dff80520SEmil Tantilov ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
1228dff80520SEmil Tantilov
1229dff80520SEmil Tantilov ixgbevf_rx_skb(q_vector, skb);
1230dee1ad47SJeff Kirsher
1231bad17234SEmil Tantilov /* reset skb pointer */
1232bad17234SEmil Tantilov skb = NULL;
1233bad17234SEmil Tantilov
12340579eefcSEmil Tantilov /* update budget accounting */
12356622402aSEmil Tantilov total_rx_packets++;
12366622402aSEmil Tantilov }
1237dee1ad47SJeff Kirsher
1238bad17234SEmil Tantilov /* place incomplete frames back on ring for completion */
1239bad17234SEmil Tantilov rx_ring->skb = skb;
1240bad17234SEmil Tantilov
1241efecfd5fSTony Nguyen if (xdp_xmit) {
1242efecfd5fSTony Nguyen struct ixgbevf_ring *xdp_ring =
1243efecfd5fSTony Nguyen adapter->xdp_ring[rx_ring->queue_index];
1244efecfd5fSTony Nguyen
1245efecfd5fSTony Nguyen /* Force memory writes to complete before letting h/w
1246efecfd5fSTony Nguyen * know there are new descriptors to fetch.
1247efecfd5fSTony Nguyen */
1248efecfd5fSTony Nguyen wmb();
1249efecfd5fSTony Nguyen ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use);
1250efecfd5fSTony Nguyen }
1251efecfd5fSTony Nguyen
12524197aa7bSEric Dumazet u64_stats_update_begin(&rx_ring->syncp);
1253095e2617SEmil Tantilov rx_ring->stats.packets += total_rx_packets;
1254095e2617SEmil Tantilov rx_ring->stats.bytes += total_rx_bytes;
12554197aa7bSEric Dumazet u64_stats_update_end(&rx_ring->syncp);
1256ac6ed8f0SGreg Rose q_vector->rx.total_packets += total_rx_packets;
1257ac6ed8f0SGreg Rose q_vector->rx.total_bytes += total_rx_bytes;
1258dee1ad47SJeff Kirsher
125908e50a20SJacob Keller return total_rx_packets;
1260dee1ad47SJeff Kirsher }
1261dee1ad47SJeff Kirsher
1262dee1ad47SJeff Kirsher /**
1263fa71ae27SAlexander Duyck * ixgbevf_poll - NAPI polling calback
1264dee1ad47SJeff Kirsher * @napi: napi struct with our devices info in it
1265dee1ad47SJeff Kirsher * @budget: amount of work driver is allowed to do this pass, in packets
1266dee1ad47SJeff Kirsher *
1267fa71ae27SAlexander Duyck * This function will clean more than one or more rings associated with a
1268dee1ad47SJeff Kirsher * q_vector.
1269dee1ad47SJeff Kirsher **/
ixgbevf_poll(struct napi_struct * napi,int budget)1270fa71ae27SAlexander Duyck static int ixgbevf_poll(struct napi_struct *napi, int budget)
1271dee1ad47SJeff Kirsher {
1272dee1ad47SJeff Kirsher struct ixgbevf_q_vector *q_vector =
1273dee1ad47SJeff Kirsher container_of(napi, struct ixgbevf_q_vector, napi);
1274dee1ad47SJeff Kirsher struct ixgbevf_adapter *adapter = q_vector->adapter;
1275fa71ae27SAlexander Duyck struct ixgbevf_ring *ring;
127632b3e08fSJesse Brandeburg int per_ring_budget, work_done = 0;
1277fa71ae27SAlexander Duyck bool clean_complete = true;
1278fa71ae27SAlexander Duyck
12798220bbc1SAlexander Duyck ixgbevf_for_each_ring(ring, q_vector->tx) {
12808220bbc1SAlexander Duyck if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
12818220bbc1SAlexander Duyck clean_complete = false;
12828220bbc1SAlexander Duyck }
1283dee1ad47SJeff Kirsher
1284d0f71affSWilliam Dauchy if (budget <= 0)
1285d0f71affSWilliam Dauchy return budget;
1286c777cdfaSJacob Keller
1287dee1ad47SJeff Kirsher /* attempt to distribute budget to each queue fairly, but don't allow
1288dec0d8e4SJeff Kirsher * the budget to go below 1 because we'll exit polling
1289dec0d8e4SJeff Kirsher */
1290fa71ae27SAlexander Duyck if (q_vector->rx.count > 1)
1291fa71ae27SAlexander Duyck per_ring_budget = max(budget/q_vector->rx.count, 1);
1292fa71ae27SAlexander Duyck else
1293fa71ae27SAlexander Duyck per_ring_budget = budget;
1294dee1ad47SJeff Kirsher
129532b3e08fSJesse Brandeburg ixgbevf_for_each_ring(ring, q_vector->rx) {
129632b3e08fSJesse Brandeburg int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
129732b3e08fSJesse Brandeburg per_ring_budget);
129832b3e08fSJesse Brandeburg work_done += cleaned;
12998220bbc1SAlexander Duyck if (cleaned >= per_ring_budget)
13008220bbc1SAlexander Duyck clean_complete = false;
130132b3e08fSJesse Brandeburg }
1302dee1ad47SJeff Kirsher
1303fa71ae27SAlexander Duyck /* If all work not completed, return budget and keep polling */
1304fa71ae27SAlexander Duyck if (!clean_complete)
1305fa71ae27SAlexander Duyck return budget;
13060bcd952fSJesse Brandeburg
13070bcd952fSJesse Brandeburg /* Exit the polling mode, but don't re-enable interrupts if stack might
13080bcd952fSJesse Brandeburg * poll us due to busy-polling
13090bcd952fSJesse Brandeburg */
13100bcd952fSJesse Brandeburg if (likely(napi_complete_done(napi, work_done))) {
13119ad3d6f7SEmil Tantilov if (adapter->rx_itr_setting == 1)
1312fa71ae27SAlexander Duyck ixgbevf_set_itr(q_vector);
13132e7cfbddSMark Rustad if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
13142e7cfbddSMark Rustad !test_bit(__IXGBEVF_REMOVING, &adapter->state))
13156b43c446SAlexander Duyck ixgbevf_irq_enable_queues(adapter,
13168d055cc0SJacob Keller BIT(q_vector->v_idx));
13170bcd952fSJesse Brandeburg }
1318dee1ad47SJeff Kirsher
13190bcd952fSJesse Brandeburg return min(work_done, budget - 1);
1320dee1ad47SJeff Kirsher }
1321dee1ad47SJeff Kirsher
1322ce422606SGreg Rose /**
1323ce422606SGreg Rose * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1324ce422606SGreg Rose * @q_vector: structure containing interrupt and ring information
1325dec0d8e4SJeff Kirsher **/
ixgbevf_write_eitr(struct ixgbevf_q_vector * q_vector)13263849623eSJacob Keller void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
1327ce422606SGreg Rose {
1328ce422606SGreg Rose struct ixgbevf_adapter *adapter = q_vector->adapter;
1329ce422606SGreg Rose struct ixgbe_hw *hw = &adapter->hw;
1330ce422606SGreg Rose int v_idx = q_vector->v_idx;
1331ce422606SGreg Rose u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1332ce422606SGreg Rose
1333dec0d8e4SJeff Kirsher /* set the WDIS bit to not clear the timer bits and cause an
1334ce422606SGreg Rose * immediate assertion of the interrupt
1335ce422606SGreg Rose */
1336ce422606SGreg Rose itr_reg |= IXGBE_EITR_CNT_WDIS;
1337ce422606SGreg Rose
1338ce422606SGreg Rose IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1339ce422606SGreg Rose }
1340dee1ad47SJeff Kirsher
1341dee1ad47SJeff Kirsher /**
1342dee1ad47SJeff Kirsher * ixgbevf_configure_msix - Configure MSI-X hardware
1343dee1ad47SJeff Kirsher * @adapter: board private structure
1344dee1ad47SJeff Kirsher *
1345dee1ad47SJeff Kirsher * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1346dee1ad47SJeff Kirsher * interrupts.
1347dee1ad47SJeff Kirsher **/
ixgbevf_configure_msix(struct ixgbevf_adapter * adapter)1348dee1ad47SJeff Kirsher static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1349dee1ad47SJeff Kirsher {
1350dee1ad47SJeff Kirsher struct ixgbevf_q_vector *q_vector;
13516b43c446SAlexander Duyck int q_vectors, v_idx;
1352dee1ad47SJeff Kirsher
1353dee1ad47SJeff Kirsher q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
13545f3600ebSAlexander Duyck adapter->eims_enable_mask = 0;
1355dee1ad47SJeff Kirsher
1356dec0d8e4SJeff Kirsher /* Populate the IVAR table and set the ITR values to the
1357dee1ad47SJeff Kirsher * corresponding register.
1358dee1ad47SJeff Kirsher */
1359dee1ad47SJeff Kirsher for (v_idx = 0; v_idx < q_vectors; v_idx++) {
13606b43c446SAlexander Duyck struct ixgbevf_ring *ring;
1361dec0d8e4SJeff Kirsher
1362dee1ad47SJeff Kirsher q_vector = adapter->q_vector[v_idx];
1363dee1ad47SJeff Kirsher
13646b43c446SAlexander Duyck ixgbevf_for_each_ring(ring, q_vector->rx)
13656b43c446SAlexander Duyck ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1366dee1ad47SJeff Kirsher
13676b43c446SAlexander Duyck ixgbevf_for_each_ring(ring, q_vector->tx)
13686b43c446SAlexander Duyck ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1369dee1ad47SJeff Kirsher
13705f3600ebSAlexander Duyck if (q_vector->tx.ring && !q_vector->rx.ring) {
1371dec0d8e4SJeff Kirsher /* Tx only vector */
13725f3600ebSAlexander Duyck if (adapter->tx_itr_setting == 1)
13738a9ca110SAlexander Duyck q_vector->itr = IXGBE_12K_ITR;
13745f3600ebSAlexander Duyck else
13755f3600ebSAlexander Duyck q_vector->itr = adapter->tx_itr_setting;
13765f3600ebSAlexander Duyck } else {
1377dec0d8e4SJeff Kirsher /* Rx or Rx/Tx vector */
13785f3600ebSAlexander Duyck if (adapter->rx_itr_setting == 1)
13795f3600ebSAlexander Duyck q_vector->itr = IXGBE_20K_ITR;
13805f3600ebSAlexander Duyck else
13815f3600ebSAlexander Duyck q_vector->itr = adapter->rx_itr_setting;
13825f3600ebSAlexander Duyck }
1383dee1ad47SJeff Kirsher
13845f3600ebSAlexander Duyck /* add q_vector eims value to global eims_enable_mask */
13858d055cc0SJacob Keller adapter->eims_enable_mask |= BIT(v_idx);
13865f3600ebSAlexander Duyck
13875f3600ebSAlexander Duyck ixgbevf_write_eitr(q_vector);
1388dee1ad47SJeff Kirsher }
1389dee1ad47SJeff Kirsher
1390dee1ad47SJeff Kirsher ixgbevf_set_ivar(adapter, -1, 1, v_idx);
13915f3600ebSAlexander Duyck /* setup eims_other and add value to global eims_enable_mask */
13928d055cc0SJacob Keller adapter->eims_other = BIT(v_idx);
13935f3600ebSAlexander Duyck adapter->eims_enable_mask |= adapter->eims_other;
1394dee1ad47SJeff Kirsher }
1395dee1ad47SJeff Kirsher
1396dee1ad47SJeff Kirsher enum latency_range {
1397dee1ad47SJeff Kirsher lowest_latency = 0,
1398dee1ad47SJeff Kirsher low_latency = 1,
1399dee1ad47SJeff Kirsher bulk_latency = 2,
1400dee1ad47SJeff Kirsher latency_invalid = 255
1401dee1ad47SJeff Kirsher };
1402dee1ad47SJeff Kirsher
1403dee1ad47SJeff Kirsher /**
1404dee1ad47SJeff Kirsher * ixgbevf_update_itr - update the dynamic ITR value based on statistics
14055f3600ebSAlexander Duyck * @q_vector: structure containing interrupt and ring information
14065f3600ebSAlexander Duyck * @ring_container: structure containing ring performance data
1407dee1ad47SJeff Kirsher *
1408dee1ad47SJeff Kirsher * Stores a new ITR value based on packets and byte
1409dee1ad47SJeff Kirsher * counts during the last interrupt. The advantage of per interrupt
1410dee1ad47SJeff Kirsher * computation is faster updates and more accurate ITR for the current
1411dee1ad47SJeff Kirsher * traffic pattern. Constants in this function were computed
1412dee1ad47SJeff Kirsher * based on theoretical maximum wire speed and thresholds were set based
1413dee1ad47SJeff Kirsher * on testing data as well as attempting to minimize response time
1414dee1ad47SJeff Kirsher * while increasing bulk throughput.
1415dee1ad47SJeff Kirsher **/
ixgbevf_update_itr(struct ixgbevf_q_vector * q_vector,struct ixgbevf_ring_container * ring_container)14165f3600ebSAlexander Duyck static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
14175f3600ebSAlexander Duyck struct ixgbevf_ring_container *ring_container)
1418dee1ad47SJeff Kirsher {
14195f3600ebSAlexander Duyck int bytes = ring_container->total_bytes;
14205f3600ebSAlexander Duyck int packets = ring_container->total_packets;
1421dee1ad47SJeff Kirsher u32 timepassed_us;
1422dee1ad47SJeff Kirsher u64 bytes_perint;
14235f3600ebSAlexander Duyck u8 itr_setting = ring_container->itr;
1424dee1ad47SJeff Kirsher
1425dee1ad47SJeff Kirsher if (packets == 0)
14265f3600ebSAlexander Duyck return;
1427dee1ad47SJeff Kirsher
1428dee1ad47SJeff Kirsher /* simple throttle rate management
1429dee1ad47SJeff Kirsher * 0-20MB/s lowest (100000 ints/s)
1430dee1ad47SJeff Kirsher * 20-100MB/s low (20000 ints/s)
14318a9ca110SAlexander Duyck * 100-1249MB/s bulk (12000 ints/s)
1432dee1ad47SJeff Kirsher */
1433dee1ad47SJeff Kirsher /* what was last interrupt timeslice? */
14345f3600ebSAlexander Duyck timepassed_us = q_vector->itr >> 2;
1435e0f0be7dSYoung Xiao if (timepassed_us == 0)
1436e0f0be7dSYoung Xiao return;
1437e0f0be7dSYoung Xiao
1438dee1ad47SJeff Kirsher bytes_perint = bytes / timepassed_us; /* bytes/usec */
1439dee1ad47SJeff Kirsher
1440dee1ad47SJeff Kirsher switch (itr_setting) {
1441dee1ad47SJeff Kirsher case lowest_latency:
1442e2c28ce7SAlexander Duyck if (bytes_perint > 10)
14435f3600ebSAlexander Duyck itr_setting = low_latency;
1444dee1ad47SJeff Kirsher break;
1445dee1ad47SJeff Kirsher case low_latency:
1446e2c28ce7SAlexander Duyck if (bytes_perint > 20)
14475f3600ebSAlexander Duyck itr_setting = bulk_latency;
1448e2c28ce7SAlexander Duyck else if (bytes_perint <= 10)
14495f3600ebSAlexander Duyck itr_setting = lowest_latency;
1450dee1ad47SJeff Kirsher break;
1451dee1ad47SJeff Kirsher case bulk_latency:
1452e2c28ce7SAlexander Duyck if (bytes_perint <= 20)
14535f3600ebSAlexander Duyck itr_setting = low_latency;
1454dee1ad47SJeff Kirsher break;
1455dee1ad47SJeff Kirsher }
1456dee1ad47SJeff Kirsher
14575f3600ebSAlexander Duyck /* clear work counters since we have the values we need */
14585f3600ebSAlexander Duyck ring_container->total_bytes = 0;
14595f3600ebSAlexander Duyck ring_container->total_packets = 0;
14605f3600ebSAlexander Duyck
14615f3600ebSAlexander Duyck /* write updated itr to ring container */
14625f3600ebSAlexander Duyck ring_container->itr = itr_setting;
1463dee1ad47SJeff Kirsher }
1464dee1ad47SJeff Kirsher
ixgbevf_set_itr(struct ixgbevf_q_vector * q_vector)1465fa71ae27SAlexander Duyck static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
1466dee1ad47SJeff Kirsher {
14675f3600ebSAlexander Duyck u32 new_itr = q_vector->itr;
14685f3600ebSAlexander Duyck u8 current_itr;
1469dee1ad47SJeff Kirsher
14705f3600ebSAlexander Duyck ixgbevf_update_itr(q_vector, &q_vector->tx);
14715f3600ebSAlexander Duyck ixgbevf_update_itr(q_vector, &q_vector->rx);
1472dee1ad47SJeff Kirsher
14736b43c446SAlexander Duyck current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1474dee1ad47SJeff Kirsher
1475dee1ad47SJeff Kirsher switch (current_itr) {
1476dee1ad47SJeff Kirsher /* counts and packets in update_itr are dependent on these numbers */
1477dee1ad47SJeff Kirsher case lowest_latency:
14785f3600ebSAlexander Duyck new_itr = IXGBE_100K_ITR;
1479dee1ad47SJeff Kirsher break;
1480dee1ad47SJeff Kirsher case low_latency:
14815f3600ebSAlexander Duyck new_itr = IXGBE_20K_ITR;
1482dee1ad47SJeff Kirsher break;
1483dee1ad47SJeff Kirsher case bulk_latency:
14848a9ca110SAlexander Duyck new_itr = IXGBE_12K_ITR;
1485dee1ad47SJeff Kirsher break;
14869ad3d6f7SEmil Tantilov default:
14879ad3d6f7SEmil Tantilov break;
1488dee1ad47SJeff Kirsher }
1489dee1ad47SJeff Kirsher
14905f3600ebSAlexander Duyck if (new_itr != q_vector->itr) {
1491dee1ad47SJeff Kirsher /* do an exponential smoothing */
14925f3600ebSAlexander Duyck new_itr = (10 * new_itr * q_vector->itr) /
14935f3600ebSAlexander Duyck ((9 * new_itr) + q_vector->itr);
14945f3600ebSAlexander Duyck
14955f3600ebSAlexander Duyck /* save the algorithm value here */
14965f3600ebSAlexander Duyck q_vector->itr = new_itr;
14975f3600ebSAlexander Duyck
14985f3600ebSAlexander Duyck ixgbevf_write_eitr(q_vector);
1499dee1ad47SJeff Kirsher }
1500dee1ad47SJeff Kirsher }
1501dee1ad47SJeff Kirsher
ixgbevf_msix_other(int irq,void * data)15024b2cd27fSAlexander Duyck static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1503dee1ad47SJeff Kirsher {
1504fa71ae27SAlexander Duyck struct ixgbevf_adapter *adapter = data;
1505dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
1506dee1ad47SJeff Kirsher
15074b2cd27fSAlexander Duyck hw->mac.get_link_status = 1;
1508dee1ad47SJeff Kirsher
15099ac5c5ccSEmil Tantilov ixgbevf_service_event_schedule(adapter);
15103a2c4033SGreg Rose
15115f3600ebSAlexander Duyck IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
15125f3600ebSAlexander Duyck
1513dee1ad47SJeff Kirsher return IRQ_HANDLED;
1514dee1ad47SJeff Kirsher }
1515dee1ad47SJeff Kirsher
1516dee1ad47SJeff Kirsher /**
1517fa71ae27SAlexander Duyck * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1518dee1ad47SJeff Kirsher * @irq: unused
1519dee1ad47SJeff Kirsher * @data: pointer to our q_vector struct for this interrupt vector
1520dee1ad47SJeff Kirsher **/
ixgbevf_msix_clean_rings(int irq,void * data)1521fa71ae27SAlexander Duyck static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
1522dee1ad47SJeff Kirsher {
1523dee1ad47SJeff Kirsher struct ixgbevf_q_vector *q_vector = data;
1524dee1ad47SJeff Kirsher
15255f3600ebSAlexander Duyck /* EIAM disabled interrupts (on this vector) for us */
1526fa71ae27SAlexander Duyck if (q_vector->rx.ring || q_vector->tx.ring)
1527ef2662b2SAlexander Duyck napi_schedule_irqoff(&q_vector->napi);
1528dee1ad47SJeff Kirsher
1529dee1ad47SJeff Kirsher return IRQ_HANDLED;
1530dee1ad47SJeff Kirsher }
1531dee1ad47SJeff Kirsher
1532dee1ad47SJeff Kirsher /**
1533dee1ad47SJeff Kirsher * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1534dee1ad47SJeff Kirsher * @adapter: board private structure
1535dee1ad47SJeff Kirsher *
1536dee1ad47SJeff Kirsher * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1537dee1ad47SJeff Kirsher * interrupts from the kernel.
1538dee1ad47SJeff Kirsher **/
ixgbevf_request_msix_irqs(struct ixgbevf_adapter * adapter)1539dee1ad47SJeff Kirsher static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1540dee1ad47SJeff Kirsher {
1541dee1ad47SJeff Kirsher struct net_device *netdev = adapter->netdev;
1542fa71ae27SAlexander Duyck int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
154331f5d9b1STony Nguyen unsigned int ri = 0, ti = 0;
1544fa71ae27SAlexander Duyck int vector, err;
1545dee1ad47SJeff Kirsher
1546dee1ad47SJeff Kirsher for (vector = 0; vector < q_vectors; vector++) {
1547fa71ae27SAlexander Duyck struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1548fa71ae27SAlexander Duyck struct msix_entry *entry = &adapter->msix_entries[vector];
1549dee1ad47SJeff Kirsher
1550fa71ae27SAlexander Duyck if (q_vector->tx.ring && q_vector->rx.ring) {
155131f5d9b1STony Nguyen snprintf(q_vector->name, sizeof(q_vector->name),
155231f5d9b1STony Nguyen "%s-TxRx-%u", netdev->name, ri++);
1553fa71ae27SAlexander Duyck ti++;
1554fa71ae27SAlexander Duyck } else if (q_vector->rx.ring) {
155531f5d9b1STony Nguyen snprintf(q_vector->name, sizeof(q_vector->name),
155631f5d9b1STony Nguyen "%s-rx-%u", netdev->name, ri++);
1557fa71ae27SAlexander Duyck } else if (q_vector->tx.ring) {
155831f5d9b1STony Nguyen snprintf(q_vector->name, sizeof(q_vector->name),
155931f5d9b1STony Nguyen "%s-tx-%u", netdev->name, ti++);
1560dee1ad47SJeff Kirsher } else {
1561dee1ad47SJeff Kirsher /* skip this unused q_vector */
1562dee1ad47SJeff Kirsher continue;
1563dee1ad47SJeff Kirsher }
1564fa71ae27SAlexander Duyck err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1565fa71ae27SAlexander Duyck q_vector->name, q_vector);
1566dee1ad47SJeff Kirsher if (err) {
1567dee1ad47SJeff Kirsher hw_dbg(&adapter->hw,
1568dec0d8e4SJeff Kirsher "request_irq failed for MSIX interrupt Error: %d\n",
1569dec0d8e4SJeff Kirsher err);
1570dee1ad47SJeff Kirsher goto free_queue_irqs;
1571dee1ad47SJeff Kirsher }
1572dee1ad47SJeff Kirsher }
1573dee1ad47SJeff Kirsher
1574dee1ad47SJeff Kirsher err = request_irq(adapter->msix_entries[vector].vector,
15754b2cd27fSAlexander Duyck &ixgbevf_msix_other, 0, netdev->name, adapter);
1576dee1ad47SJeff Kirsher if (err) {
1577dec0d8e4SJeff Kirsher hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1578dec0d8e4SJeff Kirsher err);
1579dee1ad47SJeff Kirsher goto free_queue_irqs;
1580dee1ad47SJeff Kirsher }
1581dee1ad47SJeff Kirsher
1582dee1ad47SJeff Kirsher return 0;
1583dee1ad47SJeff Kirsher
1584dee1ad47SJeff Kirsher free_queue_irqs:
1585fa71ae27SAlexander Duyck while (vector) {
1586fa71ae27SAlexander Duyck vector--;
1587fa71ae27SAlexander Duyck free_irq(adapter->msix_entries[vector].vector,
1588fa71ae27SAlexander Duyck adapter->q_vector[vector]);
1589fa71ae27SAlexander Duyck }
1590a1f6c6b1Sxunleer /* This failure is non-recoverable - it indicates the system is
1591a1f6c6b1Sxunleer * out of MSIX vector resources and the VF driver cannot run
1592a1f6c6b1Sxunleer * without them. Set the number of msix vectors to zero
1593a1f6c6b1Sxunleer * indicating that not enough can be allocated. The error
1594a1f6c6b1Sxunleer * will be returned to the user indicating device open failed.
1595a1f6c6b1Sxunleer * Any further attempts to force the driver to open will also
1596a1f6c6b1Sxunleer * fail. The only way to recover is to unload the driver and
1597a1f6c6b1Sxunleer * reload it again. If the system has recovered some MSIX
1598a1f6c6b1Sxunleer * vectors then it may succeed.
1599a1f6c6b1Sxunleer */
1600a1f6c6b1Sxunleer adapter->num_msix_vectors = 0;
1601dee1ad47SJeff Kirsher return err;
1602dee1ad47SJeff Kirsher }
1603dee1ad47SJeff Kirsher
1604dee1ad47SJeff Kirsher /**
1605dee1ad47SJeff Kirsher * ixgbevf_request_irq - initialize interrupts
1606dee1ad47SJeff Kirsher * @adapter: board private structure
1607dee1ad47SJeff Kirsher *
1608dee1ad47SJeff Kirsher * Attempts to configure interrupts using the best available
1609dee1ad47SJeff Kirsher * capabilities of the hardware and kernel.
1610dee1ad47SJeff Kirsher **/
ixgbevf_request_irq(struct ixgbevf_adapter * adapter)1611dee1ad47SJeff Kirsher static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1612dee1ad47SJeff Kirsher {
161350985b5fSMark Rustad int err = ixgbevf_request_msix_irqs(adapter);
1614dee1ad47SJeff Kirsher
1615dee1ad47SJeff Kirsher if (err)
1616dec0d8e4SJeff Kirsher hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
1617dee1ad47SJeff Kirsher
1618dee1ad47SJeff Kirsher return err;
1619dee1ad47SJeff Kirsher }
1620dee1ad47SJeff Kirsher
ixgbevf_free_irq(struct ixgbevf_adapter * adapter)1621dee1ad47SJeff Kirsher static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1622dee1ad47SJeff Kirsher {
1623dee1ad47SJeff Kirsher int i, q_vectors;
1624dee1ad47SJeff Kirsher
1625eeffceeeSMark Rustad if (!adapter->msix_entries)
1626eeffceeeSMark Rustad return;
1627eeffceeeSMark Rustad
1628dee1ad47SJeff Kirsher q_vectors = adapter->num_msix_vectors;
1629dee1ad47SJeff Kirsher i = q_vectors - 1;
1630dee1ad47SJeff Kirsher
1631fa71ae27SAlexander Duyck free_irq(adapter->msix_entries[i].vector, adapter);
1632dee1ad47SJeff Kirsher i--;
1633dee1ad47SJeff Kirsher
1634dee1ad47SJeff Kirsher for (; i >= 0; i--) {
1635fa71ae27SAlexander Duyck /* free only the irqs that were actually requested */
1636fa71ae27SAlexander Duyck if (!adapter->q_vector[i]->rx.ring &&
1637fa71ae27SAlexander Duyck !adapter->q_vector[i]->tx.ring)
1638fa71ae27SAlexander Duyck continue;
1639fa71ae27SAlexander Duyck
1640dee1ad47SJeff Kirsher free_irq(adapter->msix_entries[i].vector,
1641dee1ad47SJeff Kirsher adapter->q_vector[i]);
1642dee1ad47SJeff Kirsher }
1643dee1ad47SJeff Kirsher }
1644dee1ad47SJeff Kirsher
1645dee1ad47SJeff Kirsher /**
1646dee1ad47SJeff Kirsher * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1647dee1ad47SJeff Kirsher * @adapter: board private structure
1648dee1ad47SJeff Kirsher **/
ixgbevf_irq_disable(struct ixgbevf_adapter * adapter)1649dee1ad47SJeff Kirsher static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1650dee1ad47SJeff Kirsher {
1651dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
16525f3600ebSAlexander Duyck int i;
1653dee1ad47SJeff Kirsher
16545f3600ebSAlexander Duyck IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1655dee1ad47SJeff Kirsher IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
16565f3600ebSAlexander Duyck IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1657dee1ad47SJeff Kirsher
1658dee1ad47SJeff Kirsher IXGBE_WRITE_FLUSH(hw);
1659dee1ad47SJeff Kirsher
1660dee1ad47SJeff Kirsher for (i = 0; i < adapter->num_msix_vectors; i++)
1661dee1ad47SJeff Kirsher synchronize_irq(adapter->msix_entries[i].vector);
1662dee1ad47SJeff Kirsher }
1663dee1ad47SJeff Kirsher
1664dee1ad47SJeff Kirsher /**
1665dee1ad47SJeff Kirsher * ixgbevf_irq_enable - Enable default interrupt generation settings
1666dee1ad47SJeff Kirsher * @adapter: board private structure
1667dee1ad47SJeff Kirsher **/
ixgbevf_irq_enable(struct ixgbevf_adapter * adapter)16685f3600ebSAlexander Duyck static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1669dee1ad47SJeff Kirsher {
1670dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
1671dee1ad47SJeff Kirsher
16725f3600ebSAlexander Duyck IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
16735f3600ebSAlexander Duyck IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
16745f3600ebSAlexander Duyck IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1675dee1ad47SJeff Kirsher }
1676dee1ad47SJeff Kirsher
1677dee1ad47SJeff Kirsher /**
1678de02decbSDon Skidmore * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1679de02decbSDon Skidmore * @adapter: board private structure
1680de02decbSDon Skidmore * @ring: structure containing ring specific data
1681de02decbSDon Skidmore *
1682de02decbSDon Skidmore * Configure the Tx descriptor ring after a reset.
1683de02decbSDon Skidmore **/
ixgbevf_configure_tx_ring(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * ring)1684de02decbSDon Skidmore static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1685de02decbSDon Skidmore struct ixgbevf_ring *ring)
1686de02decbSDon Skidmore {
1687de02decbSDon Skidmore struct ixgbe_hw *hw = &adapter->hw;
1688de02decbSDon Skidmore u64 tdba = ring->dma;
1689de02decbSDon Skidmore int wait_loop = 10;
1690de02decbSDon Skidmore u32 txdctl = IXGBE_TXDCTL_ENABLE;
1691de02decbSDon Skidmore u8 reg_idx = ring->reg_idx;
1692de02decbSDon Skidmore
1693de02decbSDon Skidmore /* disable queue to avoid issues while updating state */
1694de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1695de02decbSDon Skidmore IXGBE_WRITE_FLUSH(hw);
1696de02decbSDon Skidmore
1697de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1698de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1699de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1700de02decbSDon Skidmore ring->count * sizeof(union ixgbe_adv_tx_desc));
1701de02decbSDon Skidmore
1702de02decbSDon Skidmore /* disable head writeback */
1703de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1704de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1705de02decbSDon Skidmore
1706de02decbSDon Skidmore /* enable relaxed ordering */
1707de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1708de02decbSDon Skidmore (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1709de02decbSDon Skidmore IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1710de02decbSDon Skidmore
1711de02decbSDon Skidmore /* reset head and tail pointers */
1712de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1713de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1714dbf8b0d8SMark Rustad ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1715de02decbSDon Skidmore
1716de02decbSDon Skidmore /* reset ntu and ntc to place SW in sync with hardwdare */
1717de02decbSDon Skidmore ring->next_to_clean = 0;
1718de02decbSDon Skidmore ring->next_to_use = 0;
1719de02decbSDon Skidmore
1720de02decbSDon Skidmore /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1721de02decbSDon Skidmore * to or less than the number of on chip descriptors, which is
1722de02decbSDon Skidmore * currently 40.
1723de02decbSDon Skidmore */
1724de02decbSDon Skidmore txdctl |= (8 << 16); /* WTHRESH = 8 */
1725de02decbSDon Skidmore
1726de02decbSDon Skidmore /* Setting PTHRESH to 32 both improves performance */
17278d055cc0SJacob Keller txdctl |= (1u << 8) | /* HTHRESH = 1 */
1728de02decbSDon Skidmore 32; /* PTHRESH = 32 */
1729de02decbSDon Skidmore
1730865a4d98SEmil Tantilov /* reinitialize tx_buffer_info */
1731865a4d98SEmil Tantilov memset(ring->tx_buffer_info, 0,
1732865a4d98SEmil Tantilov sizeof(struct ixgbevf_tx_buffer) * ring->count);
1733865a4d98SEmil Tantilov
1734e08400b7SEmil Tantilov clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
17354be87727SAlexander Duyck clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
1736e08400b7SEmil Tantilov
1737de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1738de02decbSDon Skidmore
1739de02decbSDon Skidmore /* poll to verify queue is enabled */
1740de02decbSDon Skidmore do {
1741de02decbSDon Skidmore usleep_range(1000, 2000);
1742de02decbSDon Skidmore txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1743de02decbSDon Skidmore } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1744de02decbSDon Skidmore if (!wait_loop)
1745ee95053fSEmil Tantilov hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
1746de02decbSDon Skidmore }
1747de02decbSDon Skidmore
1748de02decbSDon Skidmore /**
1749dee1ad47SJeff Kirsher * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1750dee1ad47SJeff Kirsher * @adapter: board private structure
1751dee1ad47SJeff Kirsher *
1752dee1ad47SJeff Kirsher * Configure the Tx unit of the MAC after a reset.
1753dee1ad47SJeff Kirsher **/
ixgbevf_configure_tx(struct ixgbevf_adapter * adapter)1754dee1ad47SJeff Kirsher static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1755dee1ad47SJeff Kirsher {
1756de02decbSDon Skidmore u32 i;
1757dee1ad47SJeff Kirsher
1758dee1ad47SJeff Kirsher /* Setup the HW Tx Head and Tail descriptor pointers */
1759de02decbSDon Skidmore for (i = 0; i < adapter->num_tx_queues; i++)
1760de02decbSDon Skidmore ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
176121092e9cSTony Nguyen for (i = 0; i < adapter->num_xdp_queues; i++)
176221092e9cSTony Nguyen ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]);
1763dee1ad47SJeff Kirsher }
1764dee1ad47SJeff Kirsher
1765dee1ad47SJeff Kirsher #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1766dee1ad47SJeff Kirsher
ixgbevf_configure_srrctl(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * ring,int index)1767f15c5ba5SEmil Tantilov static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter,
1768f15c5ba5SEmil Tantilov struct ixgbevf_ring *ring, int index)
1769dee1ad47SJeff Kirsher {
1770dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
1771dee1ad47SJeff Kirsher u32 srrctl;
1772dee1ad47SJeff Kirsher
1773dee1ad47SJeff Kirsher srrctl = IXGBE_SRRCTL_DROP_EN;
1774dee1ad47SJeff Kirsher
1775bad17234SEmil Tantilov srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1776f15c5ba5SEmil Tantilov if (ring_uses_large_buffer(ring))
1777f15c5ba5SEmil Tantilov srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1778f15c5ba5SEmil Tantilov else
1779f15c5ba5SEmil Tantilov srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1780dee1ad47SJeff Kirsher srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1781dee1ad47SJeff Kirsher
1782dee1ad47SJeff Kirsher IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1783dee1ad47SJeff Kirsher }
1784dee1ad47SJeff Kirsher
ixgbevf_setup_psrtype(struct ixgbevf_adapter * adapter)17851bb9c639SDon Skidmore static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
17861bb9c639SDon Skidmore {
17871bb9c639SDon Skidmore struct ixgbe_hw *hw = &adapter->hw;
17881bb9c639SDon Skidmore
17891bb9c639SDon Skidmore /* PSRTYPE must be initialized in 82599 */
17901bb9c639SDon Skidmore u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
17911bb9c639SDon Skidmore IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
17921bb9c639SDon Skidmore IXGBE_PSRTYPE_L2HDR;
17931bb9c639SDon Skidmore
17941bb9c639SDon Skidmore if (adapter->num_rx_queues > 1)
17958d055cc0SJacob Keller psrtype |= BIT(29);
17961bb9c639SDon Skidmore
17971bb9c639SDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
17981bb9c639SDon Skidmore }
17991bb9c639SDon Skidmore
1800de02decbSDon Skidmore #define IXGBEVF_MAX_RX_DESC_POLL 10
ixgbevf_disable_rx_queue(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * ring)1801de02decbSDon Skidmore static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1802de02decbSDon Skidmore struct ixgbevf_ring *ring)
1803de02decbSDon Skidmore {
1804de02decbSDon Skidmore struct ixgbe_hw *hw = &adapter->hw;
1805de02decbSDon Skidmore int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1806de02decbSDon Skidmore u32 rxdctl;
1807de02decbSDon Skidmore u8 reg_idx = ring->reg_idx;
1808de02decbSDon Skidmore
180926597802SMark Rustad if (IXGBE_REMOVED(hw->hw_addr))
181026597802SMark Rustad return;
1811de02decbSDon Skidmore rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1812de02decbSDon Skidmore rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1813de02decbSDon Skidmore
1814de02decbSDon Skidmore /* write value back with RXDCTL.ENABLE bit cleared */
1815de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1816de02decbSDon Skidmore
1817dec0d8e4SJeff Kirsher /* the hardware may take up to 100us to really disable the Rx queue */
1818de02decbSDon Skidmore do {
1819de02decbSDon Skidmore udelay(10);
1820de02decbSDon Skidmore rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1821de02decbSDon Skidmore } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1822de02decbSDon Skidmore
1823de02decbSDon Skidmore if (!wait_loop)
1824de02decbSDon Skidmore pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1825de02decbSDon Skidmore reg_idx);
1826de02decbSDon Skidmore }
1827de02decbSDon Skidmore
ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * ring)1828de02decbSDon Skidmore static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1829de02decbSDon Skidmore struct ixgbevf_ring *ring)
1830de02decbSDon Skidmore {
1831de02decbSDon Skidmore struct ixgbe_hw *hw = &adapter->hw;
1832de02decbSDon Skidmore int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1833de02decbSDon Skidmore u32 rxdctl;
1834de02decbSDon Skidmore u8 reg_idx = ring->reg_idx;
1835de02decbSDon Skidmore
183626597802SMark Rustad if (IXGBE_REMOVED(hw->hw_addr))
183726597802SMark Rustad return;
1838de02decbSDon Skidmore do {
1839de02decbSDon Skidmore usleep_range(1000, 2000);
1840de02decbSDon Skidmore rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1841de02decbSDon Skidmore } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1842de02decbSDon Skidmore
1843de02decbSDon Skidmore if (!wait_loop)
1844de02decbSDon Skidmore pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1845de02decbSDon Skidmore reg_idx);
1846de02decbSDon Skidmore }
1847de02decbSDon Skidmore
1848e60ae003STony Nguyen /**
1849e60ae003STony Nguyen * ixgbevf_init_rss_key - Initialize adapter RSS key
1850e60ae003STony Nguyen * @adapter: device handle
1851e60ae003STony Nguyen *
1852e60ae003STony Nguyen * Allocates and initializes the RSS key if it is not allocated.
1853e60ae003STony Nguyen **/
ixgbevf_init_rss_key(struct ixgbevf_adapter * adapter)1854e60ae003STony Nguyen static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter)
1855e60ae003STony Nguyen {
1856e60ae003STony Nguyen u32 *rss_key;
1857e60ae003STony Nguyen
1858e60ae003STony Nguyen if (!adapter->rss_key) {
1859e60ae003STony Nguyen rss_key = kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE, GFP_KERNEL);
1860e60ae003STony Nguyen if (unlikely(!rss_key))
1861e60ae003STony Nguyen return -ENOMEM;
1862e60ae003STony Nguyen
1863e60ae003STony Nguyen netdev_rss_key_fill(rss_key, IXGBEVF_RSS_HASH_KEY_SIZE);
1864e60ae003STony Nguyen adapter->rss_key = rss_key;
1865e60ae003STony Nguyen }
1866e60ae003STony Nguyen
1867e60ae003STony Nguyen return 0;
1868e60ae003STony Nguyen }
1869e60ae003STony Nguyen
ixgbevf_setup_vfmrqc(struct ixgbevf_adapter * adapter)18709295edb4SEmil Tantilov static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
18719295edb4SEmil Tantilov {
18729295edb4SEmil Tantilov struct ixgbe_hw *hw = &adapter->hw;
18739295edb4SEmil Tantilov u32 vfmrqc = 0, vfreta = 0;
18749295edb4SEmil Tantilov u16 rss_i = adapter->num_rx_queues;
18759cba434fSEmil Tantilov u8 i, j;
18769295edb4SEmil Tantilov
18779295edb4SEmil Tantilov /* Fill out hash function seeds */
18789cba434fSEmil Tantilov for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1879e60ae003STony Nguyen IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i));
18809295edb4SEmil Tantilov
18819cba434fSEmil Tantilov for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
18829295edb4SEmil Tantilov if (j == rss_i)
18839295edb4SEmil Tantilov j = 0;
18849cba434fSEmil Tantilov
18859cba434fSEmil Tantilov adapter->rss_indir_tbl[i] = j;
18869cba434fSEmil Tantilov
18879cba434fSEmil Tantilov vfreta |= j << (i & 0x3) * 8;
18889cba434fSEmil Tantilov if ((i & 3) == 3) {
18899295edb4SEmil Tantilov IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
18909cba434fSEmil Tantilov vfreta = 0;
18919cba434fSEmil Tantilov }
18929295edb4SEmil Tantilov }
18939295edb4SEmil Tantilov
18949295edb4SEmil Tantilov /* Perform hash on these packet types */
18959295edb4SEmil Tantilov vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
18969295edb4SEmil Tantilov IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
18979295edb4SEmil Tantilov IXGBE_VFMRQC_RSS_FIELD_IPV6 |
18989295edb4SEmil Tantilov IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
18999295edb4SEmil Tantilov
19009295edb4SEmil Tantilov vfmrqc |= IXGBE_VFMRQC_RSSEN;
19019295edb4SEmil Tantilov
19029295edb4SEmil Tantilov IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
19039295edb4SEmil Tantilov }
19049295edb4SEmil Tantilov
ixgbevf_configure_rx_ring(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * ring)1905de02decbSDon Skidmore static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1906de02decbSDon Skidmore struct ixgbevf_ring *ring)
1907de02decbSDon Skidmore {
1908de02decbSDon Skidmore struct ixgbe_hw *hw = &adapter->hw;
190924bff091SEmil Tantilov union ixgbe_adv_rx_desc *rx_desc;
1910de02decbSDon Skidmore u64 rdba = ring->dma;
1911de02decbSDon Skidmore u32 rxdctl;
1912de02decbSDon Skidmore u8 reg_idx = ring->reg_idx;
1913de02decbSDon Skidmore
1914de02decbSDon Skidmore /* disable queue to avoid issues while updating state */
1915de02decbSDon Skidmore rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1916de02decbSDon Skidmore ixgbevf_disable_rx_queue(adapter, ring);
1917de02decbSDon Skidmore
1918de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1919de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1920de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1921de02decbSDon Skidmore ring->count * sizeof(union ixgbe_adv_rx_desc));
1922de02decbSDon Skidmore
192333b0eb15SBabu Moger #ifndef CONFIG_SPARC
1924de02decbSDon Skidmore /* enable relaxed ordering */
1925de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1926de02decbSDon Skidmore IXGBE_DCA_RXCTRL_DESC_RRO_EN);
192733b0eb15SBabu Moger #else
192833b0eb15SBabu Moger IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
192933b0eb15SBabu Moger IXGBE_DCA_RXCTRL_DESC_RRO_EN |
193033b0eb15SBabu Moger IXGBE_DCA_RXCTRL_DATA_WRO_EN);
193133b0eb15SBabu Moger #endif
1932de02decbSDon Skidmore
1933de02decbSDon Skidmore /* reset head and tail pointers */
1934de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1935de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1936dbf8b0d8SMark Rustad ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1937de02decbSDon Skidmore
193840b8178bSEmil Tantilov /* initialize rx_buffer_info */
193940b8178bSEmil Tantilov memset(ring->rx_buffer_info, 0,
194040b8178bSEmil Tantilov sizeof(struct ixgbevf_rx_buffer) * ring->count);
194140b8178bSEmil Tantilov
194224bff091SEmil Tantilov /* initialize Rx descriptor 0 */
194324bff091SEmil Tantilov rx_desc = IXGBEVF_RX_DESC(ring, 0);
194424bff091SEmil Tantilov rx_desc->wb.upper.length = 0;
194524bff091SEmil Tantilov
1946de02decbSDon Skidmore /* reset ntu and ntc to place SW in sync with hardwdare */
1947de02decbSDon Skidmore ring->next_to_clean = 0;
1948de02decbSDon Skidmore ring->next_to_use = 0;
1949bad17234SEmil Tantilov ring->next_to_alloc = 0;
1950de02decbSDon Skidmore
1951f15c5ba5SEmil Tantilov ixgbevf_configure_srrctl(adapter, ring, reg_idx);
1952de02decbSDon Skidmore
19531ab37e12SEmil Tantilov /* RXDCTL.RLPML does not work on 82599 */
19541ab37e12SEmil Tantilov if (adapter->hw.mac.type != ixgbe_mac_82599_vf) {
19551ab37e12SEmil Tantilov rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
19561ab37e12SEmil Tantilov IXGBE_RXDCTL_RLPML_EN);
19571ab37e12SEmil Tantilov
19581ab37e12SEmil Tantilov #if (PAGE_SIZE < 8192)
19591ab37e12SEmil Tantilov /* Limit the maximum frame size so we don't overrun the skb */
19601ab37e12SEmil Tantilov if (ring_uses_build_skb(ring) &&
19611ab37e12SEmil Tantilov !ring_uses_large_buffer(ring))
19621ab37e12SEmil Tantilov rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB |
19631ab37e12SEmil Tantilov IXGBE_RXDCTL_RLPML_EN;
19641ab37e12SEmil Tantilov #endif
19651ab37e12SEmil Tantilov }
1966bad17234SEmil Tantilov
1967de02decbSDon Skidmore rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1968de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1969de02decbSDon Skidmore
1970de02decbSDon Skidmore ixgbevf_rx_desc_queue_enable(adapter, ring);
1971095e2617SEmil Tantilov ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1972de02decbSDon Skidmore }
1973de02decbSDon Skidmore
ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * rx_ring)1974f15c5ba5SEmil Tantilov static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
1975f15c5ba5SEmil Tantilov struct ixgbevf_ring *rx_ring)
1976f15c5ba5SEmil Tantilov {
1977f15c5ba5SEmil Tantilov struct net_device *netdev = adapter->netdev;
1978f15c5ba5SEmil Tantilov unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1979f15c5ba5SEmil Tantilov
1980f15c5ba5SEmil Tantilov /* set build_skb and buffer size flags */
19811ab37e12SEmil Tantilov clear_ring_build_skb_enabled(rx_ring);
1982f15c5ba5SEmil Tantilov clear_ring_uses_large_buffer(rx_ring);
1983f15c5ba5SEmil Tantilov
1984f15c5ba5SEmil Tantilov if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
1985f15c5ba5SEmil Tantilov return;
1986f15c5ba5SEmil Tantilov
1987fe68195dSSamuel Mendoza-Jonas if (PAGE_SIZE < 8192)
1988fe68195dSSamuel Mendoza-Jonas if (max_frame > IXGBEVF_MAX_FRAME_BUILD_SKB)
1989fe68195dSSamuel Mendoza-Jonas set_ring_uses_large_buffer(rx_ring);
19901ab37e12SEmil Tantilov
1991fe68195dSSamuel Mendoza-Jonas /* 82599 can't rely on RXDCTL.RLPML to restrict the size of the frame */
1992fe68195dSSamuel Mendoza-Jonas if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring))
1993f15c5ba5SEmil Tantilov return;
1994f15c5ba5SEmil Tantilov
1995fe68195dSSamuel Mendoza-Jonas set_ring_build_skb_enabled(rx_ring);
1996f15c5ba5SEmil Tantilov }
1997f15c5ba5SEmil Tantilov
1998dee1ad47SJeff Kirsher /**
1999dee1ad47SJeff Kirsher * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
2000dee1ad47SJeff Kirsher * @adapter: board private structure
2001dee1ad47SJeff Kirsher *
2002dee1ad47SJeff Kirsher * Configure the Rx unit of the MAC after a reset.
2003dee1ad47SJeff Kirsher **/
ixgbevf_configure_rx(struct ixgbevf_adapter * adapter)2004dee1ad47SJeff Kirsher static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
2005dee1ad47SJeff Kirsher {
2006bad17234SEmil Tantilov struct ixgbe_hw *hw = &adapter->hw;
2007bad17234SEmil Tantilov struct net_device *netdev = adapter->netdev;
20086a11e52bSTony Nguyen int i, ret;
2009dee1ad47SJeff Kirsher
20101bb9c639SDon Skidmore ixgbevf_setup_psrtype(adapter);
20119295edb4SEmil Tantilov if (hw->mac.type >= ixgbe_mac_X550_vf)
20129295edb4SEmil Tantilov ixgbevf_setup_vfmrqc(adapter);
2013dd1fe113SAlexander Duyck
201414b22cd9SEmil Tantilov spin_lock_bh(&adapter->mbx_lock);
2015bad17234SEmil Tantilov /* notify the PF of our intent to use this size of frame */
20166a11e52bSTony Nguyen ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
201714b22cd9SEmil Tantilov spin_unlock_bh(&adapter->mbx_lock);
20186a11e52bSTony Nguyen if (ret)
20196a11e52bSTony Nguyen dev_err(&adapter->pdev->dev,
20206a11e52bSTony Nguyen "Failed to set MTU at %d\n", netdev->mtu);
2021dee1ad47SJeff Kirsher
2022dee1ad47SJeff Kirsher /* Setup the HW Rx Head and Tail Descriptor Pointers and
2023dec0d8e4SJeff Kirsher * the Base and Length of the Rx Descriptor Ring
2024dec0d8e4SJeff Kirsher */
2025f15c5ba5SEmil Tantilov for (i = 0; i < adapter->num_rx_queues; i++) {
2026f15c5ba5SEmil Tantilov struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
2027f15c5ba5SEmil Tantilov
2028f15c5ba5SEmil Tantilov ixgbevf_set_rx_buffer_len(adapter, rx_ring);
2029f15c5ba5SEmil Tantilov ixgbevf_configure_rx_ring(adapter, rx_ring);
2030f15c5ba5SEmil Tantilov }
2031dee1ad47SJeff Kirsher }
2032dee1ad47SJeff Kirsher
ixgbevf_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)203380d5c368SPatrick McHardy static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
203480d5c368SPatrick McHardy __be16 proto, u16 vid)
2035dee1ad47SJeff Kirsher {
2036dee1ad47SJeff Kirsher struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2037dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
20382ddc7fe1SAlexander Duyck int err;
20392ddc7fe1SAlexander Duyck
204055fdd45bSJohn Fastabend spin_lock_bh(&adapter->mbx_lock);
20411c55ed76SAlexander Duyck
2042dee1ad47SJeff Kirsher /* add VID to filter table */
20432ddc7fe1SAlexander Duyck err = hw->mac.ops.set_vfta(hw, vid, 0, true);
20441c55ed76SAlexander Duyck
204555fdd45bSJohn Fastabend spin_unlock_bh(&adapter->mbx_lock);
20461c55ed76SAlexander Duyck
2047eac0b680SJan Sokolowski if (err) {
2048eac0b680SJan Sokolowski netdev_err(netdev, "VF could not set VLAN %d\n", vid);
2049eac0b680SJan Sokolowski
20502ddc7fe1SAlexander Duyck /* translate error return types so error makes sense */
20512ddc7fe1SAlexander Duyck if (err == IXGBE_ERR_MBX)
20522ddc7fe1SAlexander Duyck return -EIO;
20532ddc7fe1SAlexander Duyck
20542ddc7fe1SAlexander Duyck if (err == IXGBE_ERR_INVALID_ARGUMENT)
20552ddc7fe1SAlexander Duyck return -EACCES;
2056eac0b680SJan Sokolowski }
20572ddc7fe1SAlexander Duyck
2058dee1ad47SJeff Kirsher set_bit(vid, adapter->active_vlans);
20598e586137SJiri Pirko
20602ddc7fe1SAlexander Duyck return err;
2061dee1ad47SJeff Kirsher }
2062dee1ad47SJeff Kirsher
ixgbevf_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)206380d5c368SPatrick McHardy static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
206480d5c368SPatrick McHardy __be16 proto, u16 vid)
2065dee1ad47SJeff Kirsher {
2066dee1ad47SJeff Kirsher struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2067dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
206850985b5fSMark Rustad int err;
2069dee1ad47SJeff Kirsher
207055fdd45bSJohn Fastabend spin_lock_bh(&adapter->mbx_lock);
20711c55ed76SAlexander Duyck
2072dee1ad47SJeff Kirsher /* remove VID from filter table */
20732ddc7fe1SAlexander Duyck err = hw->mac.ops.set_vfta(hw, vid, 0, false);
20741c55ed76SAlexander Duyck
207555fdd45bSJohn Fastabend spin_unlock_bh(&adapter->mbx_lock);
20761c55ed76SAlexander Duyck
2077eac0b680SJan Sokolowski if (err)
2078eac0b680SJan Sokolowski netdev_err(netdev, "Could not remove VLAN %d\n", vid);
2079eac0b680SJan Sokolowski
2080dee1ad47SJeff Kirsher clear_bit(vid, adapter->active_vlans);
20818e586137SJiri Pirko
20822ddc7fe1SAlexander Duyck return err;
2083dee1ad47SJeff Kirsher }
2084dee1ad47SJeff Kirsher
ixgbevf_restore_vlan(struct ixgbevf_adapter * adapter)2085dee1ad47SJeff Kirsher static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
2086dee1ad47SJeff Kirsher {
2087dee1ad47SJeff Kirsher u16 vid;
2088dee1ad47SJeff Kirsher
2089dee1ad47SJeff Kirsher for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
209080d5c368SPatrick McHardy ixgbevf_vlan_rx_add_vid(adapter->netdev,
209180d5c368SPatrick McHardy htons(ETH_P_8021Q), vid);
2092dee1ad47SJeff Kirsher }
2093dee1ad47SJeff Kirsher
ixgbevf_write_uc_addr_list(struct net_device * netdev)2094dee1ad47SJeff Kirsher static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
2095dee1ad47SJeff Kirsher {
2096dee1ad47SJeff Kirsher struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2097dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
2098dee1ad47SJeff Kirsher int count = 0;
2099dee1ad47SJeff Kirsher
2100dee1ad47SJeff Kirsher if (!netdev_uc_empty(netdev)) {
2101dee1ad47SJeff Kirsher struct netdev_hw_addr *ha;
2102dec0d8e4SJeff Kirsher
2103dee1ad47SJeff Kirsher netdev_for_each_uc_addr(ha, netdev) {
2104dee1ad47SJeff Kirsher hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
2105dee1ad47SJeff Kirsher udelay(200);
2106dee1ad47SJeff Kirsher }
2107dee1ad47SJeff Kirsher } else {
2108dec0d8e4SJeff Kirsher /* If the list is empty then send message to PF driver to
2109dec0d8e4SJeff Kirsher * clear all MAC VLANs on this VF.
2110dee1ad47SJeff Kirsher */
2111dee1ad47SJeff Kirsher hw->mac.ops.set_uc_addr(hw, 0, NULL);
2112dee1ad47SJeff Kirsher }
2113dee1ad47SJeff Kirsher
2114dee1ad47SJeff Kirsher return count;
2115dee1ad47SJeff Kirsher }
2116dee1ad47SJeff Kirsher
2117dee1ad47SJeff Kirsher /**
2118dee847f5SGreg Rose * ixgbevf_set_rx_mode - Multicast and unicast set
2119dee1ad47SJeff Kirsher * @netdev: network interface device structure
2120dee1ad47SJeff Kirsher *
2121dee1ad47SJeff Kirsher * The set_rx_method entry point is called whenever the multicast address
2122dee847f5SGreg Rose * list, unicast address list or the network interface flags are updated.
2123dee847f5SGreg Rose * This routine is responsible for configuring the hardware for proper
2124dee847f5SGreg Rose * multicast mode and configuring requested unicast filters.
2125dee1ad47SJeff Kirsher **/
ixgbevf_set_rx_mode(struct net_device * netdev)2126dee1ad47SJeff Kirsher static void ixgbevf_set_rx_mode(struct net_device *netdev)
2127dee1ad47SJeff Kirsher {
2128dee1ad47SJeff Kirsher struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2129dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
21308443c1a4SHiroshi Shimamoto unsigned int flags = netdev->flags;
21318443c1a4SHiroshi Shimamoto int xcast_mode;
21328443c1a4SHiroshi Shimamoto
213341e544cdSDon Skidmore /* request the most inclusive mode we need */
213441e544cdSDon Skidmore if (flags & IFF_PROMISC)
213541e544cdSDon Skidmore xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
213641e544cdSDon Skidmore else if (flags & IFF_ALLMULTI)
213741e544cdSDon Skidmore xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI;
213841e544cdSDon Skidmore else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
213941e544cdSDon Skidmore xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
214041e544cdSDon Skidmore else
214141e544cdSDon Skidmore xcast_mode = IXGBEVF_XCAST_MODE_NONE;
214241e544cdSDon Skidmore
214355fdd45bSJohn Fastabend spin_lock_bh(&adapter->mbx_lock);
21441c55ed76SAlexander Duyck
21458b44a8a0STony Nguyen hw->mac.ops.update_xcast_mode(hw, xcast_mode);
21468443c1a4SHiroshi Shimamoto
2147dee1ad47SJeff Kirsher /* reprogram multicast list */
2148dee1ad47SJeff Kirsher hw->mac.ops.update_mc_addr_list(hw, netdev);
2149dee1ad47SJeff Kirsher
2150dee1ad47SJeff Kirsher ixgbevf_write_uc_addr_list(netdev);
21511c55ed76SAlexander Duyck
215255fdd45bSJohn Fastabend spin_unlock_bh(&adapter->mbx_lock);
2153dee1ad47SJeff Kirsher }
2154dee1ad47SJeff Kirsher
ixgbevf_napi_enable_all(struct ixgbevf_adapter * adapter)2155dee1ad47SJeff Kirsher static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
2156dee1ad47SJeff Kirsher {
2157dee1ad47SJeff Kirsher int q_idx;
2158dee1ad47SJeff Kirsher struct ixgbevf_q_vector *q_vector;
2159dee1ad47SJeff Kirsher int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2160dee1ad47SJeff Kirsher
2161dee1ad47SJeff Kirsher for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2162dee1ad47SJeff Kirsher q_vector = adapter->q_vector[q_idx];
2163fa71ae27SAlexander Duyck napi_enable(&q_vector->napi);
2164dee1ad47SJeff Kirsher }
2165dee1ad47SJeff Kirsher }
2166dee1ad47SJeff Kirsher
ixgbevf_napi_disable_all(struct ixgbevf_adapter * adapter)2167dee1ad47SJeff Kirsher static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
2168dee1ad47SJeff Kirsher {
2169dee1ad47SJeff Kirsher int q_idx;
2170dee1ad47SJeff Kirsher struct ixgbevf_q_vector *q_vector;
2171dee1ad47SJeff Kirsher int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2172dee1ad47SJeff Kirsher
2173dee1ad47SJeff Kirsher for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2174dee1ad47SJeff Kirsher q_vector = adapter->q_vector[q_idx];
2175dee1ad47SJeff Kirsher napi_disable(&q_vector->napi);
2176dee1ad47SJeff Kirsher }
2177dee1ad47SJeff Kirsher }
2178dee1ad47SJeff Kirsher
ixgbevf_configure_dcb(struct ixgbevf_adapter * adapter)2179220fe050SDon Skidmore static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
2180220fe050SDon Skidmore {
2181220fe050SDon Skidmore struct ixgbe_hw *hw = &adapter->hw;
2182220fe050SDon Skidmore unsigned int def_q = 0;
2183220fe050SDon Skidmore unsigned int num_tcs = 0;
21842dc571aaSEmil Tantilov unsigned int num_rx_queues = adapter->num_rx_queues;
21852dc571aaSEmil Tantilov unsigned int num_tx_queues = adapter->num_tx_queues;
2186220fe050SDon Skidmore int err;
2187220fe050SDon Skidmore
2188220fe050SDon Skidmore spin_lock_bh(&adapter->mbx_lock);
2189220fe050SDon Skidmore
2190220fe050SDon Skidmore /* fetch queue configuration from the PF */
2191220fe050SDon Skidmore err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2192220fe050SDon Skidmore
2193220fe050SDon Skidmore spin_unlock_bh(&adapter->mbx_lock);
2194220fe050SDon Skidmore
2195220fe050SDon Skidmore if (err)
2196220fe050SDon Skidmore return err;
2197220fe050SDon Skidmore
2198220fe050SDon Skidmore if (num_tcs > 1) {
21992dc571aaSEmil Tantilov /* we need only one Tx queue */
22002dc571aaSEmil Tantilov num_tx_queues = 1;
22012dc571aaSEmil Tantilov
2202220fe050SDon Skidmore /* update default Tx ring register index */
220387e70ab9SDon Skidmore adapter->tx_ring[0]->reg_idx = def_q;
2204220fe050SDon Skidmore
2205220fe050SDon Skidmore /* we need as many queues as traffic classes */
2206220fe050SDon Skidmore num_rx_queues = num_tcs;
2207220fe050SDon Skidmore }
2208220fe050SDon Skidmore
2209220fe050SDon Skidmore /* if we have a bad config abort request queue reset */
22102dc571aaSEmil Tantilov if ((adapter->num_rx_queues != num_rx_queues) ||
22112dc571aaSEmil Tantilov (adapter->num_tx_queues != num_tx_queues)) {
2212220fe050SDon Skidmore /* force mailbox timeout to prevent further messages */
2213220fe050SDon Skidmore hw->mbx.timeout = 0;
2214220fe050SDon Skidmore
2215220fe050SDon Skidmore /* wait for watchdog to come around and bail us out */
2216d5dd7c3fSEmil Tantilov set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
2217220fe050SDon Skidmore }
2218220fe050SDon Skidmore
2219220fe050SDon Skidmore return 0;
2220220fe050SDon Skidmore }
2221220fe050SDon Skidmore
ixgbevf_configure(struct ixgbevf_adapter * adapter)2222dee1ad47SJeff Kirsher static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
2223dee1ad47SJeff Kirsher {
2224220fe050SDon Skidmore ixgbevf_configure_dcb(adapter);
2225220fe050SDon Skidmore
2226de02decbSDon Skidmore ixgbevf_set_rx_mode(adapter->netdev);
2227dee1ad47SJeff Kirsher
2228dee1ad47SJeff Kirsher ixgbevf_restore_vlan(adapter);
22297f68d430SShannon Nelson ixgbevf_ipsec_restore(adapter);
2230dee1ad47SJeff Kirsher
2231dee1ad47SJeff Kirsher ixgbevf_configure_tx(adapter);
2232dee1ad47SJeff Kirsher ixgbevf_configure_rx(adapter);
2233dee1ad47SJeff Kirsher }
2234dee1ad47SJeff Kirsher
ixgbevf_save_reset_stats(struct ixgbevf_adapter * adapter)2235dee1ad47SJeff Kirsher static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
2236dee1ad47SJeff Kirsher {
2237dee1ad47SJeff Kirsher /* Only save pre-reset stats if there are some */
2238dee1ad47SJeff Kirsher if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
2239dee1ad47SJeff Kirsher adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
2240dee1ad47SJeff Kirsher adapter->stats.base_vfgprc;
2241dee1ad47SJeff Kirsher adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2242dee1ad47SJeff Kirsher adapter->stats.base_vfgptc;
2243dee1ad47SJeff Kirsher adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2244dee1ad47SJeff Kirsher adapter->stats.base_vfgorc;
2245dee1ad47SJeff Kirsher adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2246dee1ad47SJeff Kirsher adapter->stats.base_vfgotc;
2247dee1ad47SJeff Kirsher adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2248dee1ad47SJeff Kirsher adapter->stats.base_vfmprc;
2249dee1ad47SJeff Kirsher }
2250dee1ad47SJeff Kirsher }
2251dee1ad47SJeff Kirsher
ixgbevf_init_last_counter_stats(struct ixgbevf_adapter * adapter)2252dee1ad47SJeff Kirsher static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2253dee1ad47SJeff Kirsher {
2254dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
2255dee1ad47SJeff Kirsher
2256dee1ad47SJeff Kirsher adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2257dee1ad47SJeff Kirsher adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2258dee1ad47SJeff Kirsher adapter->stats.last_vfgorc |=
2259dee1ad47SJeff Kirsher (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2260dee1ad47SJeff Kirsher adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2261dee1ad47SJeff Kirsher adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2262dee1ad47SJeff Kirsher adapter->stats.last_vfgotc |=
2263dee1ad47SJeff Kirsher (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2264dee1ad47SJeff Kirsher adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2265dee1ad47SJeff Kirsher
2266dee1ad47SJeff Kirsher adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2267dee1ad47SJeff Kirsher adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2268dee1ad47SJeff Kirsher adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2269dee1ad47SJeff Kirsher adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2270dee1ad47SJeff Kirsher adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2271dee1ad47SJeff Kirsher }
2272dee1ad47SJeff Kirsher
ixgbevf_negotiate_api(struct ixgbevf_adapter * adapter)227331186785SAlexander Duyck static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
227431186785SAlexander Duyck {
227531186785SAlexander Duyck struct ixgbe_hw *hw = &adapter->hw;
227664d8db7dSColin Ian King static const int api[] = {
2277339f2896SRadoslaw Tyl ixgbe_mbox_api_15,
227864d8db7dSColin Ian King ixgbe_mbox_api_14,
22797f68d430SShannon Nelson ixgbe_mbox_api_13,
228041e544cdSDon Skidmore ixgbe_mbox_api_12,
228194cf66f8SVlad Zolotarov ixgbe_mbox_api_11,
228256e94095SAlexander Duyck ixgbe_mbox_api_10,
228364d8db7dSColin Ian King ixgbe_mbox_api_unknown
228464d8db7dSColin Ian King };
228550985b5fSMark Rustad int err, idx = 0;
228631186785SAlexander Duyck
228755fdd45bSJohn Fastabend spin_lock_bh(&adapter->mbx_lock);
228831186785SAlexander Duyck
228931186785SAlexander Duyck while (api[idx] != ixgbe_mbox_api_unknown) {
22907921f4dcSAlexander Duyck err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
229131186785SAlexander Duyck if (!err)
229231186785SAlexander Duyck break;
229331186785SAlexander Duyck idx++;
229431186785SAlexander Duyck }
229531186785SAlexander Duyck
2296339f2896SRadoslaw Tyl if (hw->api_version >= ixgbe_mbox_api_15) {
2297339f2896SRadoslaw Tyl hw->mbx.ops.init_params(hw);
2298339f2896SRadoslaw Tyl memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
2299339f2896SRadoslaw Tyl sizeof(struct ixgbe_mbx_operations));
2300339f2896SRadoslaw Tyl }
2301339f2896SRadoslaw Tyl
230255fdd45bSJohn Fastabend spin_unlock_bh(&adapter->mbx_lock);
230331186785SAlexander Duyck }
230431186785SAlexander Duyck
ixgbevf_up_complete(struct ixgbevf_adapter * adapter)2305795180d8SGreg Rose static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
2306dee1ad47SJeff Kirsher {
2307dee1ad47SJeff Kirsher struct net_device *netdev = adapter->netdev;
2308443ebdd6SSlawomir Mrozowicz struct pci_dev *pdev = adapter->pdev;
2309dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
2310443ebdd6SSlawomir Mrozowicz bool state;
2311dee1ad47SJeff Kirsher
2312dee1ad47SJeff Kirsher ixgbevf_configure_msix(adapter);
2313dee1ad47SJeff Kirsher
231455fdd45bSJohn Fastabend spin_lock_bh(&adapter->mbx_lock);
23151c55ed76SAlexander Duyck
2316dee1ad47SJeff Kirsher if (is_valid_ether_addr(hw->mac.addr))
2317dee1ad47SJeff Kirsher hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2318dee1ad47SJeff Kirsher else
2319dee1ad47SJeff Kirsher hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
2320dee1ad47SJeff Kirsher
232155fdd45bSJohn Fastabend spin_unlock_bh(&adapter->mbx_lock);
23221c55ed76SAlexander Duyck
2323443ebdd6SSlawomir Mrozowicz state = adapter->link_state;
2324443ebdd6SSlawomir Mrozowicz hw->mac.ops.get_link_state(hw, &adapter->link_state);
2325443ebdd6SSlawomir Mrozowicz if (state && state != adapter->link_state)
2326443ebdd6SSlawomir Mrozowicz dev_info(&pdev->dev, "VF is administratively disabled\n");
2327443ebdd6SSlawomir Mrozowicz
23284e857c58SPeter Zijlstra smp_mb__before_atomic();
2329dee1ad47SJeff Kirsher clear_bit(__IXGBEVF_DOWN, &adapter->state);
2330dee1ad47SJeff Kirsher ixgbevf_napi_enable_all(adapter);
2331dee1ad47SJeff Kirsher
2332d9bdb57fSEmil Tantilov /* clear any pending interrupts, may auto mask */
2333d9bdb57fSEmil Tantilov IXGBE_READ_REG(hw, IXGBE_VTEICR);
2334d9bdb57fSEmil Tantilov ixgbevf_irq_enable(adapter);
2335d9bdb57fSEmil Tantilov
2336dee1ad47SJeff Kirsher /* enable transmits */
2337dee1ad47SJeff Kirsher netif_tx_start_all_queues(netdev);
2338dee1ad47SJeff Kirsher
2339dee1ad47SJeff Kirsher ixgbevf_save_reset_stats(adapter);
2340dee1ad47SJeff Kirsher ixgbevf_init_last_counter_stats(adapter);
2341dee1ad47SJeff Kirsher
23424b2cd27fSAlexander Duyck hw->mac.get_link_status = 1;
23439ac5c5ccSEmil Tantilov mod_timer(&adapter->service_timer, jiffies);
2344dee1ad47SJeff Kirsher }
2345dee1ad47SJeff Kirsher
ixgbevf_up(struct ixgbevf_adapter * adapter)2346795180d8SGreg Rose void ixgbevf_up(struct ixgbevf_adapter *adapter)
2347dee1ad47SJeff Kirsher {
2348dee1ad47SJeff Kirsher ixgbevf_configure(adapter);
2349dee1ad47SJeff Kirsher
2350795180d8SGreg Rose ixgbevf_up_complete(adapter);
2351dee1ad47SJeff Kirsher }
2352dee1ad47SJeff Kirsher
2353dee1ad47SJeff Kirsher /**
2354dee1ad47SJeff Kirsher * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
2355dee1ad47SJeff Kirsher * @rx_ring: ring to free buffers from
2356dee1ad47SJeff Kirsher **/
ixgbevf_clean_rx_ring(struct ixgbevf_ring * rx_ring)235705d063aaSEmil Tantilov static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2358dee1ad47SJeff Kirsher {
235940b8178bSEmil Tantilov u16 i = rx_ring->next_to_clean;
2360dee1ad47SJeff Kirsher
2361bad17234SEmil Tantilov /* Free Rx ring sk_buff */
2362bad17234SEmil Tantilov if (rx_ring->skb) {
2363bad17234SEmil Tantilov dev_kfree_skb(rx_ring->skb);
2364bad17234SEmil Tantilov rx_ring->skb = NULL;
2365bad17234SEmil Tantilov }
2366bad17234SEmil Tantilov
2367bad17234SEmil Tantilov /* Free all the Rx ring pages */
236840b8178bSEmil Tantilov while (i != rx_ring->next_to_alloc) {
2369bad17234SEmil Tantilov struct ixgbevf_rx_buffer *rx_buffer;
2370dee1ad47SJeff Kirsher
2371bad17234SEmil Tantilov rx_buffer = &rx_ring->rx_buffer_info[i];
237216b35949SEmil Tantilov
237316b35949SEmil Tantilov /* Invalidate cache lines that may have been written to by
237416b35949SEmil Tantilov * device so that we avoid corrupting memory.
237516b35949SEmil Tantilov */
237616b35949SEmil Tantilov dma_sync_single_range_for_cpu(rx_ring->dev,
237716b35949SEmil Tantilov rx_buffer->dma,
237816b35949SEmil Tantilov rx_buffer->page_offset,
2379f15c5ba5SEmil Tantilov ixgbevf_rx_bufsz(rx_ring),
238016b35949SEmil Tantilov DMA_FROM_DEVICE);
238116b35949SEmil Tantilov
238216b35949SEmil Tantilov /* free resources associated with mapping */
238316b35949SEmil Tantilov dma_unmap_page_attrs(rx_ring->dev,
238416b35949SEmil Tantilov rx_buffer->dma,
2385f15c5ba5SEmil Tantilov ixgbevf_rx_pg_size(rx_ring),
238616b35949SEmil Tantilov DMA_FROM_DEVICE,
238716b35949SEmil Tantilov IXGBEVF_RX_DMA_ATTR);
238816b35949SEmil Tantilov
238935074d69SEmil Tantilov __page_frag_cache_drain(rx_buffer->page,
239035074d69SEmil Tantilov rx_buffer->pagecnt_bias);
239135074d69SEmil Tantilov
239240b8178bSEmil Tantilov i++;
239340b8178bSEmil Tantilov if (i == rx_ring->count)
239440b8178bSEmil Tantilov i = 0;
2395dee1ad47SJeff Kirsher }
2396dee1ad47SJeff Kirsher
239740b8178bSEmil Tantilov rx_ring->next_to_alloc = 0;
239840b8178bSEmil Tantilov rx_ring->next_to_clean = 0;
239940b8178bSEmil Tantilov rx_ring->next_to_use = 0;
2400dee1ad47SJeff Kirsher }
2401dee1ad47SJeff Kirsher
2402dee1ad47SJeff Kirsher /**
2403dee1ad47SJeff Kirsher * ixgbevf_clean_tx_ring - Free Tx Buffers
2404dee1ad47SJeff Kirsher * @tx_ring: ring to be cleaned
2405dee1ad47SJeff Kirsher **/
ixgbevf_clean_tx_ring(struct ixgbevf_ring * tx_ring)240605d063aaSEmil Tantilov static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
2407dee1ad47SJeff Kirsher {
2408865a4d98SEmil Tantilov u16 i = tx_ring->next_to_clean;
2409865a4d98SEmil Tantilov struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
2410dee1ad47SJeff Kirsher
2411865a4d98SEmil Tantilov while (i != tx_ring->next_to_use) {
2412865a4d98SEmil Tantilov union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
2413dee1ad47SJeff Kirsher
2414dee1ad47SJeff Kirsher /* Free all the Tx ring sk_buffs */
241521092e9cSTony Nguyen if (ring_is_xdp(tx_ring))
241621092e9cSTony Nguyen page_frag_free(tx_buffer->data);
241721092e9cSTony Nguyen else
2418865a4d98SEmil Tantilov dev_kfree_skb_any(tx_buffer->skb);
2419865a4d98SEmil Tantilov
2420865a4d98SEmil Tantilov /* unmap skb header data */
2421865a4d98SEmil Tantilov dma_unmap_single(tx_ring->dev,
2422865a4d98SEmil Tantilov dma_unmap_addr(tx_buffer, dma),
2423865a4d98SEmil Tantilov dma_unmap_len(tx_buffer, len),
2424865a4d98SEmil Tantilov DMA_TO_DEVICE);
2425865a4d98SEmil Tantilov
2426865a4d98SEmil Tantilov /* check for eop_desc to determine the end of the packet */
2427865a4d98SEmil Tantilov eop_desc = tx_buffer->next_to_watch;
2428865a4d98SEmil Tantilov tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2429865a4d98SEmil Tantilov
2430865a4d98SEmil Tantilov /* unmap remaining buffers */
2431865a4d98SEmil Tantilov while (tx_desc != eop_desc) {
2432865a4d98SEmil Tantilov tx_buffer++;
2433865a4d98SEmil Tantilov tx_desc++;
2434865a4d98SEmil Tantilov i++;
2435865a4d98SEmil Tantilov if (unlikely(i == tx_ring->count)) {
2436865a4d98SEmil Tantilov i = 0;
2437865a4d98SEmil Tantilov tx_buffer = tx_ring->tx_buffer_info;
2438865a4d98SEmil Tantilov tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
2439dee1ad47SJeff Kirsher }
2440dee1ad47SJeff Kirsher
2441865a4d98SEmil Tantilov /* unmap any remaining paged data */
2442865a4d98SEmil Tantilov if (dma_unmap_len(tx_buffer, len))
2443865a4d98SEmil Tantilov dma_unmap_page(tx_ring->dev,
2444865a4d98SEmil Tantilov dma_unmap_addr(tx_buffer, dma),
2445865a4d98SEmil Tantilov dma_unmap_len(tx_buffer, len),
2446865a4d98SEmil Tantilov DMA_TO_DEVICE);
2447865a4d98SEmil Tantilov }
2448dee1ad47SJeff Kirsher
2449865a4d98SEmil Tantilov /* move us one more past the eop_desc for start of next pkt */
2450865a4d98SEmil Tantilov tx_buffer++;
2451865a4d98SEmil Tantilov i++;
2452865a4d98SEmil Tantilov if (unlikely(i == tx_ring->count)) {
2453865a4d98SEmil Tantilov i = 0;
2454865a4d98SEmil Tantilov tx_buffer = tx_ring->tx_buffer_info;
2455865a4d98SEmil Tantilov }
2456865a4d98SEmil Tantilov }
2457865a4d98SEmil Tantilov
2458865a4d98SEmil Tantilov /* reset next_to_use and next_to_clean */
2459865a4d98SEmil Tantilov tx_ring->next_to_use = 0;
2460865a4d98SEmil Tantilov tx_ring->next_to_clean = 0;
2461865a4d98SEmil Tantilov
2462dee1ad47SJeff Kirsher }
2463dee1ad47SJeff Kirsher
2464dee1ad47SJeff Kirsher /**
2465dee1ad47SJeff Kirsher * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2466dee1ad47SJeff Kirsher * @adapter: board private structure
2467dee1ad47SJeff Kirsher **/
ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter * adapter)2468dee1ad47SJeff Kirsher static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2469dee1ad47SJeff Kirsher {
2470dee1ad47SJeff Kirsher int i;
2471dee1ad47SJeff Kirsher
2472dee1ad47SJeff Kirsher for (i = 0; i < adapter->num_rx_queues; i++)
247305d063aaSEmil Tantilov ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
2474dee1ad47SJeff Kirsher }
2475dee1ad47SJeff Kirsher
2476dee1ad47SJeff Kirsher /**
2477dee1ad47SJeff Kirsher * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2478dee1ad47SJeff Kirsher * @adapter: board private structure
2479dee1ad47SJeff Kirsher **/
ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter * adapter)2480dee1ad47SJeff Kirsher static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2481dee1ad47SJeff Kirsher {
2482dee1ad47SJeff Kirsher int i;
2483dee1ad47SJeff Kirsher
2484dee1ad47SJeff Kirsher for (i = 0; i < adapter->num_tx_queues; i++)
248505d063aaSEmil Tantilov ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
248621092e9cSTony Nguyen for (i = 0; i < adapter->num_xdp_queues; i++)
248721092e9cSTony Nguyen ixgbevf_clean_tx_ring(adapter->xdp_ring[i]);
2488dee1ad47SJeff Kirsher }
2489dee1ad47SJeff Kirsher
ixgbevf_down(struct ixgbevf_adapter * adapter)2490dee1ad47SJeff Kirsher void ixgbevf_down(struct ixgbevf_adapter *adapter)
2491dee1ad47SJeff Kirsher {
2492dee1ad47SJeff Kirsher struct net_device *netdev = adapter->netdev;
2493dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
2494de02decbSDon Skidmore int i;
2495dee1ad47SJeff Kirsher
2496dee1ad47SJeff Kirsher /* signal that we are down to the interrupt handler */
24975b346dc9SMark Rustad if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
24985b346dc9SMark Rustad return; /* do nothing if already down */
2499858c3ddaSDon Skidmore
2500dec0d8e4SJeff Kirsher /* disable all enabled Rx queues */
2501858c3ddaSDon Skidmore for (i = 0; i < adapter->num_rx_queues; i++)
250287e70ab9SDon Skidmore ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2503dee1ad47SJeff Kirsher
2504d9bdb57fSEmil Tantilov usleep_range(10000, 20000);
2505dee1ad47SJeff Kirsher
2506dee1ad47SJeff Kirsher netif_tx_stop_all_queues(netdev);
2507dee1ad47SJeff Kirsher
2508d9bdb57fSEmil Tantilov /* call carrier off first to avoid false dev_watchdog timeouts */
2509d9bdb57fSEmil Tantilov netif_carrier_off(netdev);
2510d9bdb57fSEmil Tantilov netif_tx_disable(netdev);
2511d9bdb57fSEmil Tantilov
2512dee1ad47SJeff Kirsher ixgbevf_irq_disable(adapter);
2513dee1ad47SJeff Kirsher
2514dee1ad47SJeff Kirsher ixgbevf_napi_disable_all(adapter);
2515dee1ad47SJeff Kirsher
25169ac5c5ccSEmil Tantilov del_timer_sync(&adapter->service_timer);
2517dee1ad47SJeff Kirsher
2518dee1ad47SJeff Kirsher /* disable transmits in the hardware now that interrupts are off */
2519dee1ad47SJeff Kirsher for (i = 0; i < adapter->num_tx_queues; i++) {
2520de02decbSDon Skidmore u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2521de02decbSDon Skidmore
2522de02decbSDon Skidmore IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2523de02decbSDon Skidmore IXGBE_TXDCTL_SWFLSH);
2524dee1ad47SJeff Kirsher }
2525dee1ad47SJeff Kirsher
252621092e9cSTony Nguyen for (i = 0; i < adapter->num_xdp_queues; i++) {
252721092e9cSTony Nguyen u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
252821092e9cSTony Nguyen
252921092e9cSTony Nguyen IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
253021092e9cSTony Nguyen IXGBE_TXDCTL_SWFLSH);
253121092e9cSTony Nguyen }
253221092e9cSTony Nguyen
2533dee1ad47SJeff Kirsher if (!pci_channel_offline(adapter->pdev))
2534dee1ad47SJeff Kirsher ixgbevf_reset(adapter);
2535dee1ad47SJeff Kirsher
2536dee1ad47SJeff Kirsher ixgbevf_clean_all_tx_rings(adapter);
2537dee1ad47SJeff Kirsher ixgbevf_clean_all_rx_rings(adapter);
2538dee1ad47SJeff Kirsher }
2539dee1ad47SJeff Kirsher
ixgbevf_reinit_locked(struct ixgbevf_adapter * adapter)2540dee1ad47SJeff Kirsher void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2541dee1ad47SJeff Kirsher {
2542dee1ad47SJeff Kirsher while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2543dee1ad47SJeff Kirsher msleep(1);
2544dee1ad47SJeff Kirsher
2545dee1ad47SJeff Kirsher ixgbevf_down(adapter);
2546d7cb9da1SRadoslaw Tyl pci_set_master(adapter->pdev);
2547dee1ad47SJeff Kirsher ixgbevf_up(adapter);
2548dee1ad47SJeff Kirsher
2549dee1ad47SJeff Kirsher clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2550dee1ad47SJeff Kirsher }
2551dee1ad47SJeff Kirsher
ixgbevf_reset(struct ixgbevf_adapter * adapter)2552dee1ad47SJeff Kirsher void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2553dee1ad47SJeff Kirsher {
2554dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
2555dee1ad47SJeff Kirsher struct net_device *netdev = adapter->netdev;
2556dee1ad47SJeff Kirsher
2557798e381aSDon Skidmore if (hw->mac.ops.reset_hw(hw)) {
2558dee1ad47SJeff Kirsher hw_dbg(hw, "PF still resetting\n");
2559798e381aSDon Skidmore } else {
2560dee1ad47SJeff Kirsher hw->mac.ops.init_hw(hw);
2561798e381aSDon Skidmore ixgbevf_negotiate_api(adapter);
2562798e381aSDon Skidmore }
2563dee1ad47SJeff Kirsher
2564dee1ad47SJeff Kirsher if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2565f3956ebbSJakub Kicinski eth_hw_addr_set(netdev, adapter->hw.mac.addr);
256691a76baaSEmil Tantilov ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2567dee1ad47SJeff Kirsher }
2568e66c92adSEmil Tantilov
2569e66c92adSEmil Tantilov adapter->last_reset = jiffies;
2570dee1ad47SJeff Kirsher }
2571dee1ad47SJeff Kirsher
ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter * adapter,int vectors)2572e45dd5feSJakub Kicinski static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2573dee1ad47SJeff Kirsher int vectors)
2574dee1ad47SJeff Kirsher {
2575a5f9337bSEmil Tantilov int vector_threshold;
2576dee1ad47SJeff Kirsher
2577fa71ae27SAlexander Duyck /* We'll want at least 2 (vector_threshold):
2578fa71ae27SAlexander Duyck * 1) TxQ[0] + RxQ[0] handler
2579fa71ae27SAlexander Duyck * 2) Other (Link Status Change, etc.)
2580dee1ad47SJeff Kirsher */
2581dee1ad47SJeff Kirsher vector_threshold = MIN_MSIX_COUNT;
2582dee1ad47SJeff Kirsher
2583dee1ad47SJeff Kirsher /* The more we get, the more we will assign to Tx/Rx Cleanup
2584dee1ad47SJeff Kirsher * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2585dee1ad47SJeff Kirsher * Right now, we simply care about how many we'll get; we'll
2586dee1ad47SJeff Kirsher * set them up later while requesting irq's.
2587dee1ad47SJeff Kirsher */
25885c1e3588SAlexander Gordeev vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
25895c1e3588SAlexander Gordeev vector_threshold, vectors);
2590dee1ad47SJeff Kirsher
25915c1e3588SAlexander Gordeev if (vectors < 0) {
2592e45dd5feSJakub Kicinski dev_err(&adapter->pdev->dev,
2593dee1ad47SJeff Kirsher "Unable to allocate MSI-X interrupts\n");
2594dee1ad47SJeff Kirsher kfree(adapter->msix_entries);
2595dee1ad47SJeff Kirsher adapter->msix_entries = NULL;
25965c1e3588SAlexander Gordeev return vectors;
25975c1e3588SAlexander Gordeev }
25985c1e3588SAlexander Gordeev
25995c1e3588SAlexander Gordeev /* Adjust for only the vectors we'll use, which is minimum
2600dee1ad47SJeff Kirsher * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2601dee1ad47SJeff Kirsher * vectors we were allocated.
2602dee1ad47SJeff Kirsher */
2603dee1ad47SJeff Kirsher adapter->num_msix_vectors = vectors;
2604dee847f5SGreg Rose
26055c1e3588SAlexander Gordeev return 0;
2606dee1ad47SJeff Kirsher }
2607dee1ad47SJeff Kirsher
260849ce9c2cSBen Hutchings /**
260949ce9c2cSBen Hutchings * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2610dee1ad47SJeff Kirsher * @adapter: board private structure to initialize
2611dee1ad47SJeff Kirsher *
2612dee1ad47SJeff Kirsher * This is the top level queue allocation routine. The order here is very
2613dee1ad47SJeff Kirsher * important, starting with the "most" number of features turned on at once,
2614dee1ad47SJeff Kirsher * and ending with the smallest set of features. This way large combinations
2615dee1ad47SJeff Kirsher * can be allocated if they're turned on, and smaller combinations are the
2616dee1ad47SJeff Kirsher * fall through conditions.
2617dee1ad47SJeff Kirsher *
2618dee1ad47SJeff Kirsher **/
ixgbevf_set_num_queues(struct ixgbevf_adapter * adapter)2619dee1ad47SJeff Kirsher static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2620dee1ad47SJeff Kirsher {
2621220fe050SDon Skidmore struct ixgbe_hw *hw = &adapter->hw;
2622220fe050SDon Skidmore unsigned int def_q = 0;
2623220fe050SDon Skidmore unsigned int num_tcs = 0;
2624220fe050SDon Skidmore int err;
2625220fe050SDon Skidmore
2626dee1ad47SJeff Kirsher /* Start with base case */
2627dee1ad47SJeff Kirsher adapter->num_rx_queues = 1;
2628dee1ad47SJeff Kirsher adapter->num_tx_queues = 1;
262921092e9cSTony Nguyen adapter->num_xdp_queues = 0;
2630220fe050SDon Skidmore
2631220fe050SDon Skidmore spin_lock_bh(&adapter->mbx_lock);
2632220fe050SDon Skidmore
2633220fe050SDon Skidmore /* fetch queue configuration from the PF */
2634220fe050SDon Skidmore err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2635220fe050SDon Skidmore
2636220fe050SDon Skidmore spin_unlock_bh(&adapter->mbx_lock);
2637220fe050SDon Skidmore
2638220fe050SDon Skidmore if (err)
2639220fe050SDon Skidmore return;
2640220fe050SDon Skidmore
2641220fe050SDon Skidmore /* we need as many queues as traffic classes */
26422dc571aaSEmil Tantilov if (num_tcs > 1) {
2643220fe050SDon Skidmore adapter->num_rx_queues = num_tcs;
26442dc571aaSEmil Tantilov } else {
26452dc571aaSEmil Tantilov u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
26462dc571aaSEmil Tantilov
26472dc571aaSEmil Tantilov switch (hw->api_version) {
26482dc571aaSEmil Tantilov case ixgbe_mbox_api_11:
264994cf66f8SVlad Zolotarov case ixgbe_mbox_api_12:
265041e544cdSDon Skidmore case ixgbe_mbox_api_13:
26517f68d430SShannon Nelson case ixgbe_mbox_api_14:
2652339f2896SRadoslaw Tyl case ixgbe_mbox_api_15:
265321092e9cSTony Nguyen if (adapter->xdp_prog &&
265421092e9cSTony Nguyen hw->mac.max_tx_queues == rss)
265521092e9cSTony Nguyen rss = rss > 3 ? 2 : 1;
265621092e9cSTony Nguyen
26572dc571aaSEmil Tantilov adapter->num_rx_queues = rss;
26582dc571aaSEmil Tantilov adapter->num_tx_queues = rss;
265921092e9cSTony Nguyen adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0;
2660d8f0c306SGustavo A. R. Silva break;
26612dc571aaSEmil Tantilov default:
26622dc571aaSEmil Tantilov break;
26632dc571aaSEmil Tantilov }
26642dc571aaSEmil Tantilov }
2665dee1ad47SJeff Kirsher }
2666dee1ad47SJeff Kirsher
2667dee1ad47SJeff Kirsher /**
2668dee1ad47SJeff Kirsher * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2669dee1ad47SJeff Kirsher * @adapter: board private structure to initialize
2670dee1ad47SJeff Kirsher *
2671dee1ad47SJeff Kirsher * Attempt to configure the interrupts using the best available
2672dee1ad47SJeff Kirsher * capabilities of the hardware and the kernel.
2673dee1ad47SJeff Kirsher **/
ixgbevf_set_interrupt_capability(struct ixgbevf_adapter * adapter)2674dee1ad47SJeff Kirsher static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2675dee1ad47SJeff Kirsher {
2676dee1ad47SJeff Kirsher int vector, v_budget;
2677dee1ad47SJeff Kirsher
2678dec0d8e4SJeff Kirsher /* It's easy to be greedy for MSI-X vectors, but it really
2679dee1ad47SJeff Kirsher * doesn't do us much good if we have a lot more vectors
2680dee1ad47SJeff Kirsher * than CPU's. So let's be conservative and only ask for
2681fa71ae27SAlexander Duyck * (roughly) the same number of vectors as there are CPU's.
2682fa71ae27SAlexander Duyck * The default is to use pairs of vectors.
2683dee1ad47SJeff Kirsher */
2684fa71ae27SAlexander Duyck v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2685fa71ae27SAlexander Duyck v_budget = min_t(int, v_budget, num_online_cpus());
2686fa71ae27SAlexander Duyck v_budget += NON_Q_VECTORS;
2687dee1ad47SJeff Kirsher
2688dee1ad47SJeff Kirsher adapter->msix_entries = kcalloc(v_budget,
2689dee1ad47SJeff Kirsher sizeof(struct msix_entry), GFP_KERNEL);
269050985b5fSMark Rustad if (!adapter->msix_entries)
269150985b5fSMark Rustad return -ENOMEM;
2692dee1ad47SJeff Kirsher
2693dee1ad47SJeff Kirsher for (vector = 0; vector < v_budget; vector++)
2694dee1ad47SJeff Kirsher adapter->msix_entries[vector].entry = vector;
2695dee1ad47SJeff Kirsher
269621c046e4SEmil Tantilov /* A failure in MSI-X entry allocation isn't fatal, but the VF driver
269721c046e4SEmil Tantilov * does not support any other modes, so we will simply fail here. Note
269821c046e4SEmil Tantilov * that we clean up the msix_entries pointer else-where.
269921c046e4SEmil Tantilov */
270021c046e4SEmil Tantilov return ixgbevf_acquire_msix_vectors(adapter, v_budget);
270121c046e4SEmil Tantilov }
2702dee1ad47SJeff Kirsher
ixgbevf_add_ring(struct ixgbevf_ring * ring,struct ixgbevf_ring_container * head)270321c046e4SEmil Tantilov static void ixgbevf_add_ring(struct ixgbevf_ring *ring,
270421c046e4SEmil Tantilov struct ixgbevf_ring_container *head)
270521c046e4SEmil Tantilov {
270621c046e4SEmil Tantilov ring->next = head->ring;
270721c046e4SEmil Tantilov head->ring = ring;
270821c046e4SEmil Tantilov head->count++;
270921c046e4SEmil Tantilov }
271050985b5fSMark Rustad
271121c046e4SEmil Tantilov /**
271221c046e4SEmil Tantilov * ixgbevf_alloc_q_vector - Allocate memory for a single interrupt vector
271321c046e4SEmil Tantilov * @adapter: board private structure to initialize
271421c046e4SEmil Tantilov * @v_idx: index of vector in adapter struct
271521c046e4SEmil Tantilov * @txr_count: number of Tx rings for q vector
271621c046e4SEmil Tantilov * @txr_idx: index of first Tx ring to assign
271721092e9cSTony Nguyen * @xdp_count: total number of XDP rings to allocate
271821092e9cSTony Nguyen * @xdp_idx: index of first XDP ring to allocate
271921c046e4SEmil Tantilov * @rxr_count: number of Rx rings for q vector
272021c046e4SEmil Tantilov * @rxr_idx: index of first Rx ring to assign
272121c046e4SEmil Tantilov *
272221c046e4SEmil Tantilov * We allocate one q_vector. If allocation fails we return -ENOMEM.
272321c046e4SEmil Tantilov **/
ixgbevf_alloc_q_vector(struct ixgbevf_adapter * adapter,int v_idx,int txr_count,int txr_idx,int xdp_count,int xdp_idx,int rxr_count,int rxr_idx)272421c046e4SEmil Tantilov static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
272521c046e4SEmil Tantilov int txr_count, int txr_idx,
272621092e9cSTony Nguyen int xdp_count, int xdp_idx,
272721c046e4SEmil Tantilov int rxr_count, int rxr_idx)
272821c046e4SEmil Tantilov {
272921c046e4SEmil Tantilov struct ixgbevf_q_vector *q_vector;
273021092e9cSTony Nguyen int reg_idx = txr_idx + xdp_idx;
273121c046e4SEmil Tantilov struct ixgbevf_ring *ring;
273221c046e4SEmil Tantilov int ring_count, size;
273321c046e4SEmil Tantilov
273421092e9cSTony Nguyen ring_count = txr_count + xdp_count + rxr_count;
273521c046e4SEmil Tantilov size = sizeof(*q_vector) + (sizeof(*ring) * ring_count);
273621c046e4SEmil Tantilov
273721c046e4SEmil Tantilov /* allocate q_vector and rings */
273821c046e4SEmil Tantilov q_vector = kzalloc(size, GFP_KERNEL);
273921c046e4SEmil Tantilov if (!q_vector)
274021c046e4SEmil Tantilov return -ENOMEM;
274121c046e4SEmil Tantilov
274221c046e4SEmil Tantilov /* initialize NAPI */
2743b48b89f9SJakub Kicinski netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll);
274421c046e4SEmil Tantilov
274521c046e4SEmil Tantilov /* tie q_vector and adapter together */
274621c046e4SEmil Tantilov adapter->q_vector[v_idx] = q_vector;
274721c046e4SEmil Tantilov q_vector->adapter = adapter;
274821c046e4SEmil Tantilov q_vector->v_idx = v_idx;
274921c046e4SEmil Tantilov
275021c046e4SEmil Tantilov /* initialize pointer to rings */
275121c046e4SEmil Tantilov ring = q_vector->ring;
275221c046e4SEmil Tantilov
275321c046e4SEmil Tantilov while (txr_count) {
275421c046e4SEmil Tantilov /* assign generic ring traits */
275521c046e4SEmil Tantilov ring->dev = &adapter->pdev->dev;
275621c046e4SEmil Tantilov ring->netdev = adapter->netdev;
275721c046e4SEmil Tantilov
275821c046e4SEmil Tantilov /* configure backlink on ring */
275921c046e4SEmil Tantilov ring->q_vector = q_vector;
276021c046e4SEmil Tantilov
276121c046e4SEmil Tantilov /* update q_vector Tx values */
276221c046e4SEmil Tantilov ixgbevf_add_ring(ring, &q_vector->tx);
276321c046e4SEmil Tantilov
276421c046e4SEmil Tantilov /* apply Tx specific ring traits */
276521c046e4SEmil Tantilov ring->count = adapter->tx_ring_count;
276621c046e4SEmil Tantilov ring->queue_index = txr_idx;
276721092e9cSTony Nguyen ring->reg_idx = reg_idx;
276821c046e4SEmil Tantilov
276921c046e4SEmil Tantilov /* assign ring to adapter */
277021c046e4SEmil Tantilov adapter->tx_ring[txr_idx] = ring;
277121c046e4SEmil Tantilov
277221c046e4SEmil Tantilov /* update count and index */
277321c046e4SEmil Tantilov txr_count--;
277421c046e4SEmil Tantilov txr_idx++;
277521092e9cSTony Nguyen reg_idx++;
277621092e9cSTony Nguyen
277721092e9cSTony Nguyen /* push pointer to next ring */
277821092e9cSTony Nguyen ring++;
277921092e9cSTony Nguyen }
278021092e9cSTony Nguyen
278121092e9cSTony Nguyen while (xdp_count) {
278221092e9cSTony Nguyen /* assign generic ring traits */
278321092e9cSTony Nguyen ring->dev = &adapter->pdev->dev;
278421092e9cSTony Nguyen ring->netdev = adapter->netdev;
278521092e9cSTony Nguyen
278621092e9cSTony Nguyen /* configure backlink on ring */
278721092e9cSTony Nguyen ring->q_vector = q_vector;
278821092e9cSTony Nguyen
278921092e9cSTony Nguyen /* update q_vector Tx values */
279021092e9cSTony Nguyen ixgbevf_add_ring(ring, &q_vector->tx);
279121092e9cSTony Nguyen
279221092e9cSTony Nguyen /* apply Tx specific ring traits */
279321092e9cSTony Nguyen ring->count = adapter->tx_ring_count;
279421092e9cSTony Nguyen ring->queue_index = xdp_idx;
279521092e9cSTony Nguyen ring->reg_idx = reg_idx;
279621092e9cSTony Nguyen set_ring_xdp(ring);
279721092e9cSTony Nguyen
279821092e9cSTony Nguyen /* assign ring to adapter */
279921092e9cSTony Nguyen adapter->xdp_ring[xdp_idx] = ring;
280021092e9cSTony Nguyen
280121092e9cSTony Nguyen /* update count and index */
280221092e9cSTony Nguyen xdp_count--;
280321092e9cSTony Nguyen xdp_idx++;
280421092e9cSTony Nguyen reg_idx++;
280521c046e4SEmil Tantilov
280621c046e4SEmil Tantilov /* push pointer to next ring */
280721c046e4SEmil Tantilov ring++;
280821c046e4SEmil Tantilov }
280921c046e4SEmil Tantilov
281021c046e4SEmil Tantilov while (rxr_count) {
281121c046e4SEmil Tantilov /* assign generic ring traits */
281221c046e4SEmil Tantilov ring->dev = &adapter->pdev->dev;
281321c046e4SEmil Tantilov ring->netdev = adapter->netdev;
281421c046e4SEmil Tantilov
281521c046e4SEmil Tantilov /* configure backlink on ring */
281621c046e4SEmil Tantilov ring->q_vector = q_vector;
281721c046e4SEmil Tantilov
281821c046e4SEmil Tantilov /* update q_vector Rx values */
281921c046e4SEmil Tantilov ixgbevf_add_ring(ring, &q_vector->rx);
282021c046e4SEmil Tantilov
282121c046e4SEmil Tantilov /* apply Rx specific ring traits */
282221c046e4SEmil Tantilov ring->count = adapter->rx_ring_count;
282321c046e4SEmil Tantilov ring->queue_index = rxr_idx;
282421c046e4SEmil Tantilov ring->reg_idx = rxr_idx;
282521c046e4SEmil Tantilov
282621c046e4SEmil Tantilov /* assign ring to adapter */
282721c046e4SEmil Tantilov adapter->rx_ring[rxr_idx] = ring;
282821c046e4SEmil Tantilov
282921c046e4SEmil Tantilov /* update count and index */
283021c046e4SEmil Tantilov rxr_count--;
283121c046e4SEmil Tantilov rxr_idx++;
283221c046e4SEmil Tantilov
283321c046e4SEmil Tantilov /* push pointer to next ring */
283421c046e4SEmil Tantilov ring++;
283521c046e4SEmil Tantilov }
283621c046e4SEmil Tantilov
283721c046e4SEmil Tantilov return 0;
283821c046e4SEmil Tantilov }
283921c046e4SEmil Tantilov
284021c046e4SEmil Tantilov /**
284121c046e4SEmil Tantilov * ixgbevf_free_q_vector - Free memory allocated for specific interrupt vector
284221c046e4SEmil Tantilov * @adapter: board private structure to initialize
284321c046e4SEmil Tantilov * @v_idx: index of vector in adapter struct
284421c046e4SEmil Tantilov *
284521c046e4SEmil Tantilov * This function frees the memory allocated to the q_vector. In addition if
284621c046e4SEmil Tantilov * NAPI is enabled it will delete any references to the NAPI struct prior
284721c046e4SEmil Tantilov * to freeing the q_vector.
284821c046e4SEmil Tantilov **/
ixgbevf_free_q_vector(struct ixgbevf_adapter * adapter,int v_idx)284921c046e4SEmil Tantilov static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx)
285021c046e4SEmil Tantilov {
285121c046e4SEmil Tantilov struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx];
285221c046e4SEmil Tantilov struct ixgbevf_ring *ring;
285321c046e4SEmil Tantilov
285421092e9cSTony Nguyen ixgbevf_for_each_ring(ring, q_vector->tx) {
285521092e9cSTony Nguyen if (ring_is_xdp(ring))
285621092e9cSTony Nguyen adapter->xdp_ring[ring->queue_index] = NULL;
285721092e9cSTony Nguyen else
285821c046e4SEmil Tantilov adapter->tx_ring[ring->queue_index] = NULL;
285921092e9cSTony Nguyen }
286021c046e4SEmil Tantilov
286121c046e4SEmil Tantilov ixgbevf_for_each_ring(ring, q_vector->rx)
286221c046e4SEmil Tantilov adapter->rx_ring[ring->queue_index] = NULL;
286321c046e4SEmil Tantilov
286421c046e4SEmil Tantilov adapter->q_vector[v_idx] = NULL;
286521c046e4SEmil Tantilov netif_napi_del(&q_vector->napi);
286621c046e4SEmil Tantilov
286721c046e4SEmil Tantilov /* ixgbevf_get_stats() might access the rings on this vector,
286821c046e4SEmil Tantilov * we must wait a grace period before freeing it.
286921c046e4SEmil Tantilov */
287021c046e4SEmil Tantilov kfree_rcu(q_vector, rcu);
2871dee1ad47SJeff Kirsher }
2872dee1ad47SJeff Kirsher
2873dee1ad47SJeff Kirsher /**
2874dee1ad47SJeff Kirsher * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2875dee1ad47SJeff Kirsher * @adapter: board private structure to initialize
2876dee1ad47SJeff Kirsher *
2877dee1ad47SJeff Kirsher * We allocate one q_vector per queue interrupt. If allocation fails we
2878dee1ad47SJeff Kirsher * return -ENOMEM.
2879dee1ad47SJeff Kirsher **/
ixgbevf_alloc_q_vectors(struct ixgbevf_adapter * adapter)2880dee1ad47SJeff Kirsher static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2881dee1ad47SJeff Kirsher {
288221c046e4SEmil Tantilov int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
288321c046e4SEmil Tantilov int rxr_remaining = adapter->num_rx_queues;
288421c046e4SEmil Tantilov int txr_remaining = adapter->num_tx_queues;
288521092e9cSTony Nguyen int xdp_remaining = adapter->num_xdp_queues;
288621092e9cSTony Nguyen int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
288721c046e4SEmil Tantilov int err;
2888dee1ad47SJeff Kirsher
288921092e9cSTony Nguyen if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
289021c046e4SEmil Tantilov for (; rxr_remaining; v_idx++, q_vectors--) {
289121c046e4SEmil Tantilov int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2892dee1ad47SJeff Kirsher
289321c046e4SEmil Tantilov err = ixgbevf_alloc_q_vector(adapter, v_idx,
289421092e9cSTony Nguyen 0, 0, 0, 0, rqpv, rxr_idx);
289521c046e4SEmil Tantilov if (err)
2896dee1ad47SJeff Kirsher goto err_out;
289721c046e4SEmil Tantilov
289821c046e4SEmil Tantilov /* update counts and index */
289921c046e4SEmil Tantilov rxr_remaining -= rqpv;
290021c046e4SEmil Tantilov rxr_idx += rqpv;
290121c046e4SEmil Tantilov }
290221c046e4SEmil Tantilov }
290321c046e4SEmil Tantilov
290421c046e4SEmil Tantilov for (; q_vectors; v_idx++, q_vectors--) {
290521c046e4SEmil Tantilov int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
290621c046e4SEmil Tantilov int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
290721092e9cSTony Nguyen int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors);
290821c046e4SEmil Tantilov
290921c046e4SEmil Tantilov err = ixgbevf_alloc_q_vector(adapter, v_idx,
291021c046e4SEmil Tantilov tqpv, txr_idx,
291121092e9cSTony Nguyen xqpv, xdp_idx,
291221c046e4SEmil Tantilov rqpv, rxr_idx);
291321c046e4SEmil Tantilov
291421c046e4SEmil Tantilov if (err)
291521c046e4SEmil Tantilov goto err_out;
291621c046e4SEmil Tantilov
291721c046e4SEmil Tantilov /* update counts and index */
291821c046e4SEmil Tantilov rxr_remaining -= rqpv;
291921c046e4SEmil Tantilov rxr_idx += rqpv;
292021c046e4SEmil Tantilov txr_remaining -= tqpv;
292121c046e4SEmil Tantilov txr_idx += tqpv;
292221092e9cSTony Nguyen xdp_remaining -= xqpv;
292321092e9cSTony Nguyen xdp_idx += xqpv;
2924dee1ad47SJeff Kirsher }
2925dee1ad47SJeff Kirsher
2926dee1ad47SJeff Kirsher return 0;
2927dee1ad47SJeff Kirsher
2928dee1ad47SJeff Kirsher err_out:
292921c046e4SEmil Tantilov while (v_idx) {
293021c046e4SEmil Tantilov v_idx--;
293121c046e4SEmil Tantilov ixgbevf_free_q_vector(adapter, v_idx);
2932dee1ad47SJeff Kirsher }
293321c046e4SEmil Tantilov
2934dee1ad47SJeff Kirsher return -ENOMEM;
2935dee1ad47SJeff Kirsher }
2936dee1ad47SJeff Kirsher
2937dee1ad47SJeff Kirsher /**
2938dee1ad47SJeff Kirsher * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2939dee1ad47SJeff Kirsher * @adapter: board private structure to initialize
2940dee1ad47SJeff Kirsher *
2941dee1ad47SJeff Kirsher * This function frees the memory allocated to the q_vectors. In addition if
2942dee1ad47SJeff Kirsher * NAPI is enabled it will delete any references to the NAPI struct prior
2943dee1ad47SJeff Kirsher * to freeing the q_vector.
2944dee1ad47SJeff Kirsher **/
ixgbevf_free_q_vectors(struct ixgbevf_adapter * adapter)2945dee1ad47SJeff Kirsher static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2946dee1ad47SJeff Kirsher {
294721c046e4SEmil Tantilov int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2948dee1ad47SJeff Kirsher
294921c046e4SEmil Tantilov while (q_vectors) {
295021c046e4SEmil Tantilov q_vectors--;
295121c046e4SEmil Tantilov ixgbevf_free_q_vector(adapter, q_vectors);
2952dee1ad47SJeff Kirsher }
2953dee1ad47SJeff Kirsher }
2954dee1ad47SJeff Kirsher
2955dee1ad47SJeff Kirsher /**
2956dee1ad47SJeff Kirsher * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2957dee1ad47SJeff Kirsher * @adapter: board private structure
2958dee1ad47SJeff Kirsher *
2959dee1ad47SJeff Kirsher **/
ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter * adapter)2960dee1ad47SJeff Kirsher static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2961dee1ad47SJeff Kirsher {
2962eeffceeeSMark Rustad if (!adapter->msix_entries)
2963eeffceeeSMark Rustad return;
2964eeffceeeSMark Rustad
2965dee1ad47SJeff Kirsher pci_disable_msix(adapter->pdev);
2966dee1ad47SJeff Kirsher kfree(adapter->msix_entries);
2967dee1ad47SJeff Kirsher adapter->msix_entries = NULL;
2968dee1ad47SJeff Kirsher }
2969dee1ad47SJeff Kirsher
2970dee1ad47SJeff Kirsher /**
2971dee1ad47SJeff Kirsher * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2972dee1ad47SJeff Kirsher * @adapter: board private structure to initialize
2973dee1ad47SJeff Kirsher *
2974dee1ad47SJeff Kirsher **/
ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter * adapter)2975dee1ad47SJeff Kirsher static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2976dee1ad47SJeff Kirsher {
2977dee1ad47SJeff Kirsher int err;
2978dee1ad47SJeff Kirsher
2979dee1ad47SJeff Kirsher /* Number of supported queues */
2980dee1ad47SJeff Kirsher ixgbevf_set_num_queues(adapter);
2981dee1ad47SJeff Kirsher
2982dee1ad47SJeff Kirsher err = ixgbevf_set_interrupt_capability(adapter);
2983dee1ad47SJeff Kirsher if (err) {
2984dee1ad47SJeff Kirsher hw_dbg(&adapter->hw,
2985dee1ad47SJeff Kirsher "Unable to setup interrupt capabilities\n");
2986dee1ad47SJeff Kirsher goto err_set_interrupt;
2987dee1ad47SJeff Kirsher }
2988dee1ad47SJeff Kirsher
2989dee1ad47SJeff Kirsher err = ixgbevf_alloc_q_vectors(adapter);
2990dee1ad47SJeff Kirsher if (err) {
2991dec0d8e4SJeff Kirsher hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
2992dee1ad47SJeff Kirsher goto err_alloc_q_vectors;
2993dee1ad47SJeff Kirsher }
2994dee1ad47SJeff Kirsher
299521092e9cSTony Nguyen hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n",
299621092e9cSTony Nguyen (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
299721092e9cSTony Nguyen adapter->num_rx_queues, adapter->num_tx_queues,
299821092e9cSTony Nguyen adapter->num_xdp_queues);
2999dee1ad47SJeff Kirsher
3000dee1ad47SJeff Kirsher set_bit(__IXGBEVF_DOWN, &adapter->state);
3001dee1ad47SJeff Kirsher
3002dee1ad47SJeff Kirsher return 0;
3003dee1ad47SJeff Kirsher err_alloc_q_vectors:
3004dee1ad47SJeff Kirsher ixgbevf_reset_interrupt_capability(adapter);
3005dee1ad47SJeff Kirsher err_set_interrupt:
3006dee1ad47SJeff Kirsher return err;
3007dee1ad47SJeff Kirsher }
3008dee1ad47SJeff Kirsher
3009dee1ad47SJeff Kirsher /**
30100ac1e8ceSAlexander Duyck * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
30110ac1e8ceSAlexander Duyck * @adapter: board private structure to clear interrupt scheme on
30120ac1e8ceSAlexander Duyck *
30130ac1e8ceSAlexander Duyck * We go through and clear interrupt specific resources and reset the structure
30140ac1e8ceSAlexander Duyck * to pre-load conditions
30150ac1e8ceSAlexander Duyck **/
ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter * adapter)30160ac1e8ceSAlexander Duyck static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
30170ac1e8ceSAlexander Duyck {
30180ac1e8ceSAlexander Duyck adapter->num_tx_queues = 0;
301921092e9cSTony Nguyen adapter->num_xdp_queues = 0;
30200ac1e8ceSAlexander Duyck adapter->num_rx_queues = 0;
30210ac1e8ceSAlexander Duyck
30220ac1e8ceSAlexander Duyck ixgbevf_free_q_vectors(adapter);
30230ac1e8ceSAlexander Duyck ixgbevf_reset_interrupt_capability(adapter);
30240ac1e8ceSAlexander Duyck }
30250ac1e8ceSAlexander Duyck
30260ac1e8ceSAlexander Duyck /**
3027dee1ad47SJeff Kirsher * ixgbevf_sw_init - Initialize general software structures
3028dee1ad47SJeff Kirsher * @adapter: board private structure to initialize
3029dee1ad47SJeff Kirsher *
3030dee1ad47SJeff Kirsher * ixgbevf_sw_init initializes the Adapter private data structure.
3031dee1ad47SJeff Kirsher * Fields are initialized based on PCI device information and
3032dee1ad47SJeff Kirsher * OS network device settings (MTU size).
3033dee1ad47SJeff Kirsher **/
ixgbevf_sw_init(struct ixgbevf_adapter * adapter)30349f9a12f8SBill Pemberton static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
3035dee1ad47SJeff Kirsher {
3036dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
3037dee1ad47SJeff Kirsher struct pci_dev *pdev = adapter->pdev;
3038e1941a74SGreg Rose struct net_device *netdev = adapter->netdev;
3039dee1ad47SJeff Kirsher int err;
3040dee1ad47SJeff Kirsher
3041dee1ad47SJeff Kirsher /* PCI config space info */
3042dee1ad47SJeff Kirsher hw->vendor_id = pdev->vendor;
3043dee1ad47SJeff Kirsher hw->device_id = pdev->device;
3044dee1ad47SJeff Kirsher hw->revision_id = pdev->revision;
3045dee1ad47SJeff Kirsher hw->subsystem_vendor_id = pdev->subsystem_vendor;
3046dee1ad47SJeff Kirsher hw->subsystem_device_id = pdev->subsystem_device;
3047dee1ad47SJeff Kirsher
3048dee1ad47SJeff Kirsher hw->mbx.ops.init_params(hw);
304956e94095SAlexander Duyck
3050e60ae003STony Nguyen if (hw->mac.type >= ixgbe_mac_X550_vf) {
3051e60ae003STony Nguyen err = ixgbevf_init_rss_key(adapter);
3052e60ae003STony Nguyen if (err)
3053e60ae003STony Nguyen goto out;
3054e60ae003STony Nguyen }
3055e60ae003STony Nguyen
305656e94095SAlexander Duyck /* assume legacy case in which PF would only give VF 2 queues */
305756e94095SAlexander Duyck hw->mac.max_tx_queues = 2;
305856e94095SAlexander Duyck hw->mac.max_rx_queues = 2;
305956e94095SAlexander Duyck
3060798e381aSDon Skidmore /* lock to protect mailbox accesses */
3061798e381aSDon Skidmore spin_lock_init(&adapter->mbx_lock);
3062798e381aSDon Skidmore
3063dee1ad47SJeff Kirsher err = hw->mac.ops.reset_hw(hw);
3064dee1ad47SJeff Kirsher if (err) {
3065dee1ad47SJeff Kirsher dev_info(&pdev->dev,
3066e1941a74SGreg Rose "PF still in reset state. Is the PF interface up?\n");
3067dee1ad47SJeff Kirsher } else {
3068dee1ad47SJeff Kirsher err = hw->mac.ops.init_hw(hw);
3069dee1ad47SJeff Kirsher if (err) {
3070dbd9636eSJeff Kirsher pr_err("init_shared_code failed: %d\n", err);
3071dee1ad47SJeff Kirsher goto out;
3072dee1ad47SJeff Kirsher }
3073798e381aSDon Skidmore ixgbevf_negotiate_api(adapter);
3074e1941a74SGreg Rose err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
3075e1941a74SGreg Rose if (err)
3076e1941a74SGreg Rose dev_info(&pdev->dev, "Error reading MAC address\n");
3077e1941a74SGreg Rose else if (is_zero_ether_addr(adapter->hw.mac.addr))
3078e1941a74SGreg Rose dev_info(&pdev->dev,
3079e1941a74SGreg Rose "MAC address not assigned by administrator.\n");
3080f3956ebbSJakub Kicinski eth_hw_addr_set(netdev, hw->mac.addr);
3081e1941a74SGreg Rose }
3082e1941a74SGreg Rose
3083e1941a74SGreg Rose if (!is_valid_ether_addr(netdev->dev_addr)) {
3084e1941a74SGreg Rose dev_info(&pdev->dev, "Assigning random MAC address\n");
3085e1941a74SGreg Rose eth_hw_addr_random(netdev);
308691a76baaSEmil Tantilov ether_addr_copy(hw->mac.addr, netdev->dev_addr);
3087465fc643SEmil Tantilov ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr);
3088dee1ad47SJeff Kirsher }
3089dee1ad47SJeff Kirsher
3090dee1ad47SJeff Kirsher /* Enable dynamic interrupt throttling rates */
30915f3600ebSAlexander Duyck adapter->rx_itr_setting = 1;
30925f3600ebSAlexander Duyck adapter->tx_itr_setting = 1;
3093dee1ad47SJeff Kirsher
3094dee1ad47SJeff Kirsher /* set default ring sizes */
3095dee1ad47SJeff Kirsher adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
3096dee1ad47SJeff Kirsher adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
3097dee1ad47SJeff Kirsher
3098443ebdd6SSlawomir Mrozowicz adapter->link_state = true;
3099443ebdd6SSlawomir Mrozowicz
3100dee1ad47SJeff Kirsher set_bit(__IXGBEVF_DOWN, &adapter->state);
31011a0d6ae5SDanny Kukawka return 0;
3102dee1ad47SJeff Kirsher
3103dee1ad47SJeff Kirsher out:
3104dee1ad47SJeff Kirsher return err;
3105dee1ad47SJeff Kirsher }
3106dee1ad47SJeff Kirsher
3107dee1ad47SJeff Kirsher #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
3108dee1ad47SJeff Kirsher { \
3109dee1ad47SJeff Kirsher u32 current_counter = IXGBE_READ_REG(hw, reg); \
3110dee1ad47SJeff Kirsher if (current_counter < last_counter) \
3111dee1ad47SJeff Kirsher counter += 0x100000000LL; \
3112dee1ad47SJeff Kirsher last_counter = current_counter; \
3113dee1ad47SJeff Kirsher counter &= 0xFFFFFFFF00000000LL; \
3114dee1ad47SJeff Kirsher counter |= current_counter; \
3115dee1ad47SJeff Kirsher }
3116dee1ad47SJeff Kirsher
3117dee1ad47SJeff Kirsher #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
3118dee1ad47SJeff Kirsher { \
3119dee1ad47SJeff Kirsher u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
3120dee1ad47SJeff Kirsher u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
3121dee1ad47SJeff Kirsher u64 current_counter = (current_counter_msb << 32) | \
3122dee1ad47SJeff Kirsher current_counter_lsb; \
3123dee1ad47SJeff Kirsher if (current_counter < last_counter) \
3124dee1ad47SJeff Kirsher counter += 0x1000000000LL; \
3125dee1ad47SJeff Kirsher last_counter = current_counter; \
3126dee1ad47SJeff Kirsher counter &= 0xFFFFFFF000000000LL; \
3127dee1ad47SJeff Kirsher counter |= current_counter; \
3128dee1ad47SJeff Kirsher }
3129dee1ad47SJeff Kirsher /**
3130dee1ad47SJeff Kirsher * ixgbevf_update_stats - Update the board statistics counters.
3131dee1ad47SJeff Kirsher * @adapter: board private structure
3132dee1ad47SJeff Kirsher **/
ixgbevf_update_stats(struct ixgbevf_adapter * adapter)3133dee1ad47SJeff Kirsher void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
3134dee1ad47SJeff Kirsher {
3135dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
31362a35efe5SEmil Tantilov u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
31372a35efe5SEmil Tantilov u64 alloc_rx_page = 0, hw_csum_rx_error = 0;
313855fb277cSGreg Rose int i;
3139dee1ad47SJeff Kirsher
3140e66c92adSEmil Tantilov if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3141e66c92adSEmil Tantilov test_bit(__IXGBEVF_RESETTING, &adapter->state))
3142088245a3SGreg Rose return;
3143088245a3SGreg Rose
3144dee1ad47SJeff Kirsher UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3145dee1ad47SJeff Kirsher adapter->stats.vfgprc);
3146dee1ad47SJeff Kirsher UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3147dee1ad47SJeff Kirsher adapter->stats.vfgptc);
3148dee1ad47SJeff Kirsher UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3149dee1ad47SJeff Kirsher adapter->stats.last_vfgorc,
3150dee1ad47SJeff Kirsher adapter->stats.vfgorc);
3151dee1ad47SJeff Kirsher UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3152dee1ad47SJeff Kirsher adapter->stats.last_vfgotc,
3153dee1ad47SJeff Kirsher adapter->stats.vfgotc);
3154dee1ad47SJeff Kirsher UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3155dee1ad47SJeff Kirsher adapter->stats.vfmprc);
315655fb277cSGreg Rose
315755fb277cSGreg Rose for (i = 0; i < adapter->num_rx_queues; i++) {
31582a35efe5SEmil Tantilov struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
31592a35efe5SEmil Tantilov
31602a35efe5SEmil Tantilov hw_csum_rx_error += rx_ring->rx_stats.csum_err;
31612a35efe5SEmil Tantilov alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
31622a35efe5SEmil Tantilov alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
31632a35efe5SEmil Tantilov alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
316455fb277cSGreg Rose }
31652a35efe5SEmil Tantilov
31662a35efe5SEmil Tantilov adapter->hw_csum_rx_error = hw_csum_rx_error;
31672a35efe5SEmil Tantilov adapter->alloc_rx_page_failed = alloc_rx_page_failed;
31682a35efe5SEmil Tantilov adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
31692a35efe5SEmil Tantilov adapter->alloc_rx_page = alloc_rx_page;
3170dee1ad47SJeff Kirsher }
3171dee1ad47SJeff Kirsher
3172dee1ad47SJeff Kirsher /**
31739ac5c5ccSEmil Tantilov * ixgbevf_service_timer - Timer Call-back
3174e23cf38fSTony Nguyen * @t: pointer to timer_list struct
3175dee1ad47SJeff Kirsher **/
ixgbevf_service_timer(struct timer_list * t)317626566eaeSKees Cook static void ixgbevf_service_timer(struct timer_list *t)
3177dee1ad47SJeff Kirsher {
317826566eaeSKees Cook struct ixgbevf_adapter *adapter = from_timer(adapter, t,
317926566eaeSKees Cook service_timer);
3180e66c92adSEmil Tantilov
31819ac5c5ccSEmil Tantilov /* Reset the timer */
31829ac5c5ccSEmil Tantilov mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
31839ac5c5ccSEmil Tantilov
31849ac5c5ccSEmil Tantilov ixgbevf_service_event_schedule(adapter);
3185e66c92adSEmil Tantilov }
3186e66c92adSEmil Tantilov
ixgbevf_reset_subtask(struct ixgbevf_adapter * adapter)31879ac5c5ccSEmil Tantilov static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
3188e66c92adSEmil Tantilov {
3189d5dd7c3fSEmil Tantilov if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
31909ac5c5ccSEmil Tantilov return;
3191e66c92adSEmil Tantilov
31927d6446dbSEmil Tantilov rtnl_lock();
3193e66c92adSEmil Tantilov /* If we're already down or resetting, just bail */
3194e66c92adSEmil Tantilov if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
31956e469ed0SDon Skidmore test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
31967d6446dbSEmil Tantilov test_bit(__IXGBEVF_RESETTING, &adapter->state)) {
31977d6446dbSEmil Tantilov rtnl_unlock();
3198e66c92adSEmil Tantilov return;
31997d6446dbSEmil Tantilov }
3200e66c92adSEmil Tantilov
3201e66c92adSEmil Tantilov adapter->tx_timeout_count++;
3202e66c92adSEmil Tantilov
3203e66c92adSEmil Tantilov ixgbevf_reinit_locked(adapter);
32048e8247abSDon Skidmore rtnl_unlock();
3205e66c92adSEmil Tantilov }
3206e66c92adSEmil Tantilov
3207dec0d8e4SJeff Kirsher /**
3208dec0d8e4SJeff Kirsher * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
3209dec0d8e4SJeff Kirsher * @adapter: pointer to the device adapter structure
3210e66c92adSEmil Tantilov *
3211e66c92adSEmil Tantilov * This function serves two purposes. First it strobes the interrupt lines
3212e66c92adSEmil Tantilov * in order to make certain interrupts are occurring. Secondly it sets the
3213e66c92adSEmil Tantilov * bits needed to check for TX hangs. As a result we should immediately
3214e66c92adSEmil Tantilov * determine if a hang has occurred.
3215dec0d8e4SJeff Kirsher **/
ixgbevf_check_hang_subtask(struct ixgbevf_adapter * adapter)3216e66c92adSEmil Tantilov static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
3217e66c92adSEmil Tantilov {
3218dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
32195f3600ebSAlexander Duyck u32 eics = 0;
3220dee1ad47SJeff Kirsher int i;
3221dee1ad47SJeff Kirsher
3222e66c92adSEmil Tantilov /* If we're down or resetting, just bail */
3223e66c92adSEmil Tantilov if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3224e66c92adSEmil Tantilov test_bit(__IXGBEVF_RESETTING, &adapter->state))
3225e66c92adSEmil Tantilov return;
3226dee1ad47SJeff Kirsher
3227e08400b7SEmil Tantilov /* Force detection of hung controller */
3228e08400b7SEmil Tantilov if (netif_carrier_ok(adapter->netdev)) {
3229e08400b7SEmil Tantilov for (i = 0; i < adapter->num_tx_queues; i++)
3230e08400b7SEmil Tantilov set_check_for_tx_hang(adapter->tx_ring[i]);
323121092e9cSTony Nguyen for (i = 0; i < adapter->num_xdp_queues; i++)
323221092e9cSTony Nguyen set_check_for_tx_hang(adapter->xdp_ring[i]);
3233e08400b7SEmil Tantilov }
3234e08400b7SEmil Tantilov
3235dec0d8e4SJeff Kirsher /* get one bit for every active Tx/Rx interrupt vector */
3236dee1ad47SJeff Kirsher for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
3237dee1ad47SJeff Kirsher struct ixgbevf_q_vector *qv = adapter->q_vector[i];
32389ac5c5ccSEmil Tantilov
32396b43c446SAlexander Duyck if (qv->rx.ring || qv->tx.ring)
32408d055cc0SJacob Keller eics |= BIT(i);
3241dee1ad47SJeff Kirsher }
3242dee1ad47SJeff Kirsher
3243e66c92adSEmil Tantilov /* Cause software interrupt to ensure rings are cleaned */
32445f3600ebSAlexander Duyck IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
3245dee1ad47SJeff Kirsher }
3246dee1ad47SJeff Kirsher
3247e66c92adSEmil Tantilov /**
3248e66c92adSEmil Tantilov * ixgbevf_watchdog_update_link - update the link status
3249dec0d8e4SJeff Kirsher * @adapter: pointer to the device adapter structure
3250e66c92adSEmil Tantilov **/
ixgbevf_watchdog_update_link(struct ixgbevf_adapter * adapter)3251e66c92adSEmil Tantilov static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
3252dee1ad47SJeff Kirsher {
3253e66c92adSEmil Tantilov struct ixgbe_hw *hw = &adapter->hw;
3254e66c92adSEmil Tantilov u32 link_speed = adapter->link_speed;
3255e66c92adSEmil Tantilov bool link_up = adapter->link_up;
3256e66c92adSEmil Tantilov s32 err;
3257dee1ad47SJeff Kirsher
3258e66c92adSEmil Tantilov spin_lock_bh(&adapter->mbx_lock);
3259e66c92adSEmil Tantilov
3260e66c92adSEmil Tantilov err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3261e66c92adSEmil Tantilov
3262e66c92adSEmil Tantilov spin_unlock_bh(&adapter->mbx_lock);
3263e66c92adSEmil Tantilov
3264e66c92adSEmil Tantilov /* if check for link returns error we will need to reset */
3265e66c92adSEmil Tantilov if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
3266d5dd7c3fSEmil Tantilov set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
3267e66c92adSEmil Tantilov link_up = false;
3268e66c92adSEmil Tantilov }
3269e66c92adSEmil Tantilov
3270e66c92adSEmil Tantilov adapter->link_up = link_up;
3271e66c92adSEmil Tantilov adapter->link_speed = link_speed;
3272e66c92adSEmil Tantilov }
3273e66c92adSEmil Tantilov
3274e66c92adSEmil Tantilov /**
3275e66c92adSEmil Tantilov * ixgbevf_watchdog_link_is_up - update netif_carrier status and
3276e66c92adSEmil Tantilov * print link up message
3277dec0d8e4SJeff Kirsher * @adapter: pointer to the device adapter structure
3278e66c92adSEmil Tantilov **/
ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter * adapter)3279e66c92adSEmil Tantilov static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
3280e66c92adSEmil Tantilov {
3281e66c92adSEmil Tantilov struct net_device *netdev = adapter->netdev;
3282e66c92adSEmil Tantilov
3283e66c92adSEmil Tantilov /* only continue if link was previously down */
3284e66c92adSEmil Tantilov if (netif_carrier_ok(netdev))
3285dee1ad47SJeff Kirsher return;
3286dee1ad47SJeff Kirsher
3287e66c92adSEmil Tantilov dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
3288e66c92adSEmil Tantilov (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
3289e66c92adSEmil Tantilov "10 Gbps" :
3290e66c92adSEmil Tantilov (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
3291e66c92adSEmil Tantilov "1 Gbps" :
3292e66c92adSEmil Tantilov (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
3293e66c92adSEmil Tantilov "100 Mbps" :
3294e66c92adSEmil Tantilov "unknown speed");
3295dee1ad47SJeff Kirsher
3296e66c92adSEmil Tantilov netif_carrier_on(netdev);
3297e66c92adSEmil Tantilov }
3298e66c92adSEmil Tantilov
3299e66c92adSEmil Tantilov /**
3300e66c92adSEmil Tantilov * ixgbevf_watchdog_link_is_down - update netif_carrier status and
3301e66c92adSEmil Tantilov * print link down message
3302dec0d8e4SJeff Kirsher * @adapter: pointer to the adapter structure
3303e66c92adSEmil Tantilov **/
ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter * adapter)3304e66c92adSEmil Tantilov static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
3305e66c92adSEmil Tantilov {
3306e66c92adSEmil Tantilov struct net_device *netdev = adapter->netdev;
3307e66c92adSEmil Tantilov
3308e66c92adSEmil Tantilov adapter->link_speed = 0;
3309e66c92adSEmil Tantilov
3310e66c92adSEmil Tantilov /* only continue if link was up previously */
3311e66c92adSEmil Tantilov if (!netif_carrier_ok(netdev))
3312e66c92adSEmil Tantilov return;
3313e66c92adSEmil Tantilov
3314e66c92adSEmil Tantilov dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
3315e66c92adSEmil Tantilov
3316e66c92adSEmil Tantilov netif_carrier_off(netdev);
3317dee1ad47SJeff Kirsher }
3318dee1ad47SJeff Kirsher
3319dee1ad47SJeff Kirsher /**
33209ac5c5ccSEmil Tantilov * ixgbevf_watchdog_subtask - worker thread to bring link up
3321e23cf38fSTony Nguyen * @adapter: board private structure
3322dee1ad47SJeff Kirsher **/
ixgbevf_watchdog_subtask(struct ixgbevf_adapter * adapter)33239ac5c5ccSEmil Tantilov static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
33249ac5c5ccSEmil Tantilov {
33259ac5c5ccSEmil Tantilov /* if interface is down do nothing */
33269ac5c5ccSEmil Tantilov if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
33279ac5c5ccSEmil Tantilov test_bit(__IXGBEVF_RESETTING, &adapter->state))
33289ac5c5ccSEmil Tantilov return;
33299ac5c5ccSEmil Tantilov
33309ac5c5ccSEmil Tantilov ixgbevf_watchdog_update_link(adapter);
33319ac5c5ccSEmil Tantilov
3332443ebdd6SSlawomir Mrozowicz if (adapter->link_up && adapter->link_state)
33339ac5c5ccSEmil Tantilov ixgbevf_watchdog_link_is_up(adapter);
33349ac5c5ccSEmil Tantilov else
33359ac5c5ccSEmil Tantilov ixgbevf_watchdog_link_is_down(adapter);
33369ac5c5ccSEmil Tantilov
33379ac5c5ccSEmil Tantilov ixgbevf_update_stats(adapter);
33389ac5c5ccSEmil Tantilov }
33399ac5c5ccSEmil Tantilov
33409ac5c5ccSEmil Tantilov /**
33419ac5c5ccSEmil Tantilov * ixgbevf_service_task - manages and runs subtasks
33429ac5c5ccSEmil Tantilov * @work: pointer to work_struct containing our data
33439ac5c5ccSEmil Tantilov **/
ixgbevf_service_task(struct work_struct * work)33449ac5c5ccSEmil Tantilov static void ixgbevf_service_task(struct work_struct *work)
3345dee1ad47SJeff Kirsher {
3346dee1ad47SJeff Kirsher struct ixgbevf_adapter *adapter = container_of(work,
3347dee1ad47SJeff Kirsher struct ixgbevf_adapter,
33489ac5c5ccSEmil Tantilov service_task);
3349dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
3350dee1ad47SJeff Kirsher
335126597802SMark Rustad if (IXGBE_REMOVED(hw->hw_addr)) {
335226597802SMark Rustad if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
335326597802SMark Rustad rtnl_lock();
335426597802SMark Rustad ixgbevf_down(adapter);
335526597802SMark Rustad rtnl_unlock();
335626597802SMark Rustad }
335726597802SMark Rustad return;
335826597802SMark Rustad }
3359e66c92adSEmil Tantilov
3360220fe050SDon Skidmore ixgbevf_queue_reset_subtask(adapter);
33619ac5c5ccSEmil Tantilov ixgbevf_reset_subtask(adapter);
33629ac5c5ccSEmil Tantilov ixgbevf_watchdog_subtask(adapter);
3363e66c92adSEmil Tantilov ixgbevf_check_hang_subtask(adapter);
3364e66c92adSEmil Tantilov
33659ac5c5ccSEmil Tantilov ixgbevf_service_event_complete(adapter);
3366dee1ad47SJeff Kirsher }
3367dee1ad47SJeff Kirsher
3368dee1ad47SJeff Kirsher /**
3369dee1ad47SJeff Kirsher * ixgbevf_free_tx_resources - Free Tx Resources per Queue
3370dee1ad47SJeff Kirsher * @tx_ring: Tx descriptor ring for a specific queue
3371dee1ad47SJeff Kirsher *
3372dee1ad47SJeff Kirsher * Free all transmit software resources
3373dee1ad47SJeff Kirsher **/
ixgbevf_free_tx_resources(struct ixgbevf_ring * tx_ring)337405d063aaSEmil Tantilov void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
3375dee1ad47SJeff Kirsher {
337605d063aaSEmil Tantilov ixgbevf_clean_tx_ring(tx_ring);
3377dee1ad47SJeff Kirsher
3378dee1ad47SJeff Kirsher vfree(tx_ring->tx_buffer_info);
3379dee1ad47SJeff Kirsher tx_ring->tx_buffer_info = NULL;
3380dee1ad47SJeff Kirsher
3381de02decbSDon Skidmore /* if not set, then don't free */
3382de02decbSDon Skidmore if (!tx_ring->desc)
3383de02decbSDon Skidmore return;
3384de02decbSDon Skidmore
338505d063aaSEmil Tantilov dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
3386dee1ad47SJeff Kirsher tx_ring->dma);
3387dee1ad47SJeff Kirsher
3388dee1ad47SJeff Kirsher tx_ring->desc = NULL;
3389dee1ad47SJeff Kirsher }
3390dee1ad47SJeff Kirsher
3391dee1ad47SJeff Kirsher /**
3392dee1ad47SJeff Kirsher * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
3393dee1ad47SJeff Kirsher * @adapter: board private structure
3394dee1ad47SJeff Kirsher *
3395dee1ad47SJeff Kirsher * Free all transmit software resources
3396dee1ad47SJeff Kirsher **/
ixgbevf_free_all_tx_resources(struct ixgbevf_adapter * adapter)3397dee1ad47SJeff Kirsher static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
3398dee1ad47SJeff Kirsher {
3399dee1ad47SJeff Kirsher int i;
3400dee1ad47SJeff Kirsher
3401dee1ad47SJeff Kirsher for (i = 0; i < adapter->num_tx_queues; i++)
340287e70ab9SDon Skidmore if (adapter->tx_ring[i]->desc)
340305d063aaSEmil Tantilov ixgbevf_free_tx_resources(adapter->tx_ring[i]);
340421092e9cSTony Nguyen for (i = 0; i < adapter->num_xdp_queues; i++)
340521092e9cSTony Nguyen if (adapter->xdp_ring[i]->desc)
340621092e9cSTony Nguyen ixgbevf_free_tx_resources(adapter->xdp_ring[i]);
3407dee1ad47SJeff Kirsher }
3408dee1ad47SJeff Kirsher
3409dee1ad47SJeff Kirsher /**
3410dee1ad47SJeff Kirsher * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
3411dec0d8e4SJeff Kirsher * @tx_ring: Tx descriptor ring (for a specific queue) to setup
3412dee1ad47SJeff Kirsher *
3413dee1ad47SJeff Kirsher * Return 0 on success, negative on failure
3414dee1ad47SJeff Kirsher **/
ixgbevf_setup_tx_resources(struct ixgbevf_ring * tx_ring)341505d063aaSEmil Tantilov int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
3416dee1ad47SJeff Kirsher {
34174ad6af02SEmil Tantilov struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3418dee1ad47SJeff Kirsher int size;
3419dee1ad47SJeff Kirsher
3420dee1ad47SJeff Kirsher size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
3421865a4d98SEmil Tantilov tx_ring->tx_buffer_info = vmalloc(size);
3422dee1ad47SJeff Kirsher if (!tx_ring->tx_buffer_info)
3423dee1ad47SJeff Kirsher goto err;
3424dee1ad47SJeff Kirsher
34257c3a4626SFlorian Fainelli u64_stats_init(&tx_ring->syncp);
34267c3a4626SFlorian Fainelli
3427dee1ad47SJeff Kirsher /* round up to nearest 4K */
3428dee1ad47SJeff Kirsher tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3429dee1ad47SJeff Kirsher tx_ring->size = ALIGN(tx_ring->size, 4096);
3430dee1ad47SJeff Kirsher
343105d063aaSEmil Tantilov tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
3432dee1ad47SJeff Kirsher &tx_ring->dma, GFP_KERNEL);
3433dee1ad47SJeff Kirsher if (!tx_ring->desc)
3434dee1ad47SJeff Kirsher goto err;
3435dee1ad47SJeff Kirsher
3436dee1ad47SJeff Kirsher return 0;
3437dee1ad47SJeff Kirsher
3438dee1ad47SJeff Kirsher err:
3439dee1ad47SJeff Kirsher vfree(tx_ring->tx_buffer_info);
3440dee1ad47SJeff Kirsher tx_ring->tx_buffer_info = NULL;
3441dec0d8e4SJeff Kirsher hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
3442dee1ad47SJeff Kirsher return -ENOMEM;
3443dee1ad47SJeff Kirsher }
3444dee1ad47SJeff Kirsher
3445dee1ad47SJeff Kirsher /**
3446dee1ad47SJeff Kirsher * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
3447dee1ad47SJeff Kirsher * @adapter: board private structure
3448dee1ad47SJeff Kirsher *
3449dee1ad47SJeff Kirsher * If this function returns with an error, then it's possible one or
3450dee1ad47SJeff Kirsher * more of the rings is populated (while the rest are not). It is the
3451dee1ad47SJeff Kirsher * callers duty to clean those orphaned rings.
3452dee1ad47SJeff Kirsher *
3453dee1ad47SJeff Kirsher * Return 0 on success, negative on failure
3454dee1ad47SJeff Kirsher **/
ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter * adapter)3455dee1ad47SJeff Kirsher static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3456dee1ad47SJeff Kirsher {
345721092e9cSTony Nguyen int i, j = 0, err = 0;
3458dee1ad47SJeff Kirsher
3459dee1ad47SJeff Kirsher for (i = 0; i < adapter->num_tx_queues; i++) {
346005d063aaSEmil Tantilov err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
3461dee1ad47SJeff Kirsher if (!err)
3462dee1ad47SJeff Kirsher continue;
3463dec0d8e4SJeff Kirsher hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
3464f2d00ecaSEmil Tantilov goto err_setup_tx;
3465dee1ad47SJeff Kirsher }
3466dee1ad47SJeff Kirsher
346721092e9cSTony Nguyen for (j = 0; j < adapter->num_xdp_queues; j++) {
346821092e9cSTony Nguyen err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]);
346921092e9cSTony Nguyen if (!err)
347021092e9cSTony Nguyen continue;
347121092e9cSTony Nguyen hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
347239035bfdSColin Ian King goto err_setup_tx;
347321092e9cSTony Nguyen }
347421092e9cSTony Nguyen
3475f2d00ecaSEmil Tantilov return 0;
3476f2d00ecaSEmil Tantilov err_setup_tx:
3477f2d00ecaSEmil Tantilov /* rewind the index freeing the rings as we go */
347821092e9cSTony Nguyen while (j--)
347921092e9cSTony Nguyen ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
3480f2d00ecaSEmil Tantilov while (i--)
3481f2d00ecaSEmil Tantilov ixgbevf_free_tx_resources(adapter->tx_ring[i]);
348221092e9cSTony Nguyen
3483dee1ad47SJeff Kirsher return err;
3484dee1ad47SJeff Kirsher }
3485dee1ad47SJeff Kirsher
3486dee1ad47SJeff Kirsher /**
3487dee1ad47SJeff Kirsher * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
3488c7aec596STony Nguyen * @adapter: board private structure
3489dec0d8e4SJeff Kirsher * @rx_ring: Rx descriptor ring (for a specific queue) to setup
3490dee1ad47SJeff Kirsher *
3491dee1ad47SJeff Kirsher * Returns 0 on success, negative on failure
3492dee1ad47SJeff Kirsher **/
ixgbevf_setup_rx_resources(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * rx_ring)3493c7aec596STony Nguyen int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
3494c7aec596STony Nguyen struct ixgbevf_ring *rx_ring)
3495dee1ad47SJeff Kirsher {
3496dee1ad47SJeff Kirsher int size;
3497dee1ad47SJeff Kirsher
3498dee1ad47SJeff Kirsher size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
349940b8178bSEmil Tantilov rx_ring->rx_buffer_info = vmalloc(size);
3500e404decbSJoe Perches if (!rx_ring->rx_buffer_info)
350105d063aaSEmil Tantilov goto err;
3502dee1ad47SJeff Kirsher
35037c3a4626SFlorian Fainelli u64_stats_init(&rx_ring->syncp);
35047c3a4626SFlorian Fainelli
3505dee1ad47SJeff Kirsher /* Round up to nearest 4K */
3506dee1ad47SJeff Kirsher rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3507dee1ad47SJeff Kirsher rx_ring->size = ALIGN(rx_ring->size, 4096);
3508dee1ad47SJeff Kirsher
350905d063aaSEmil Tantilov rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
3510dee1ad47SJeff Kirsher &rx_ring->dma, GFP_KERNEL);
3511dee1ad47SJeff Kirsher
351205d063aaSEmil Tantilov if (!rx_ring->desc)
351305d063aaSEmil Tantilov goto err;
3514dee1ad47SJeff Kirsher
3515c7aec596STony Nguyen /* XDP RX-queue info */
3516c7aec596STony Nguyen if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
3517b02e5a0eSBjörn Töpel rx_ring->queue_index, 0) < 0)
3518c7aec596STony Nguyen goto err;
3519c7aec596STony Nguyen
3520c7aec596STony Nguyen rx_ring->xdp_prog = adapter->xdp_prog;
3521c7aec596STony Nguyen
3522dee1ad47SJeff Kirsher return 0;
352305d063aaSEmil Tantilov err:
352405d063aaSEmil Tantilov vfree(rx_ring->rx_buffer_info);
352505d063aaSEmil Tantilov rx_ring->rx_buffer_info = NULL;
352605d063aaSEmil Tantilov dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
3527dee1ad47SJeff Kirsher return -ENOMEM;
3528dee1ad47SJeff Kirsher }
3529dee1ad47SJeff Kirsher
3530dee1ad47SJeff Kirsher /**
3531dee1ad47SJeff Kirsher * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3532dee1ad47SJeff Kirsher * @adapter: board private structure
3533dee1ad47SJeff Kirsher *
3534dee1ad47SJeff Kirsher * If this function returns with an error, then it's possible one or
3535dee1ad47SJeff Kirsher * more of the rings is populated (while the rest are not). It is the
3536dee1ad47SJeff Kirsher * callers duty to clean those orphaned rings.
3537dee1ad47SJeff Kirsher *
3538dee1ad47SJeff Kirsher * Return 0 on success, negative on failure
3539dee1ad47SJeff Kirsher **/
ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter * adapter)3540dee1ad47SJeff Kirsher static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3541dee1ad47SJeff Kirsher {
3542dee1ad47SJeff Kirsher int i, err = 0;
3543dee1ad47SJeff Kirsher
3544dee1ad47SJeff Kirsher for (i = 0; i < adapter->num_rx_queues; i++) {
3545c7aec596STony Nguyen err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]);
3546dee1ad47SJeff Kirsher if (!err)
3547dee1ad47SJeff Kirsher continue;
3548dec0d8e4SJeff Kirsher hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
3549f2d00ecaSEmil Tantilov goto err_setup_rx;
3550dee1ad47SJeff Kirsher }
3551f2d00ecaSEmil Tantilov
3552f2d00ecaSEmil Tantilov return 0;
3553f2d00ecaSEmil Tantilov err_setup_rx:
3554f2d00ecaSEmil Tantilov /* rewind the index freeing the rings as we go */
3555f2d00ecaSEmil Tantilov while (i--)
3556f2d00ecaSEmil Tantilov ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3557dee1ad47SJeff Kirsher return err;
3558dee1ad47SJeff Kirsher }
3559dee1ad47SJeff Kirsher
3560dee1ad47SJeff Kirsher /**
3561dee1ad47SJeff Kirsher * ixgbevf_free_rx_resources - Free Rx Resources
3562dee1ad47SJeff Kirsher * @rx_ring: ring to clean the resources from
3563dee1ad47SJeff Kirsher *
3564dee1ad47SJeff Kirsher * Free all receive software resources
3565dee1ad47SJeff Kirsher **/
ixgbevf_free_rx_resources(struct ixgbevf_ring * rx_ring)356605d063aaSEmil Tantilov void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
3567dee1ad47SJeff Kirsher {
356805d063aaSEmil Tantilov ixgbevf_clean_rx_ring(rx_ring);
3569dee1ad47SJeff Kirsher
3570c7aec596STony Nguyen rx_ring->xdp_prog = NULL;
3571c7aec596STony Nguyen xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
3572dee1ad47SJeff Kirsher vfree(rx_ring->rx_buffer_info);
3573dee1ad47SJeff Kirsher rx_ring->rx_buffer_info = NULL;
3574dee1ad47SJeff Kirsher
357505d063aaSEmil Tantilov dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
3576dee1ad47SJeff Kirsher rx_ring->dma);
3577dee1ad47SJeff Kirsher
3578dee1ad47SJeff Kirsher rx_ring->desc = NULL;
3579dee1ad47SJeff Kirsher }
3580dee1ad47SJeff Kirsher
3581dee1ad47SJeff Kirsher /**
3582dee1ad47SJeff Kirsher * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3583dee1ad47SJeff Kirsher * @adapter: board private structure
3584dee1ad47SJeff Kirsher *
3585dee1ad47SJeff Kirsher * Free all receive software resources
3586dee1ad47SJeff Kirsher **/
ixgbevf_free_all_rx_resources(struct ixgbevf_adapter * adapter)3587dee1ad47SJeff Kirsher static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3588dee1ad47SJeff Kirsher {
3589dee1ad47SJeff Kirsher int i;
3590dee1ad47SJeff Kirsher
3591dee1ad47SJeff Kirsher for (i = 0; i < adapter->num_rx_queues; i++)
359287e70ab9SDon Skidmore if (adapter->rx_ring[i]->desc)
359305d063aaSEmil Tantilov ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3594dee1ad47SJeff Kirsher }
3595dee1ad47SJeff Kirsher
3596dee1ad47SJeff Kirsher /**
3597dee1ad47SJeff Kirsher * ixgbevf_open - Called when a network interface is made active
3598dee1ad47SJeff Kirsher * @netdev: network interface device structure
3599dee1ad47SJeff Kirsher *
3600dee1ad47SJeff Kirsher * Returns 0 on success, negative value on failure
3601dee1ad47SJeff Kirsher *
3602dee1ad47SJeff Kirsher * The open entry point is called when a network interface is made
3603dee1ad47SJeff Kirsher * active by the system (IFF_UP). At this point all resources needed
3604dee1ad47SJeff Kirsher * for transmit and receive operations are allocated, the interrupt
3605dee1ad47SJeff Kirsher * handler is registered with the OS, the watchdog timer is started,
3606dee1ad47SJeff Kirsher * and the stack is notified that the interface is ready.
3607dee1ad47SJeff Kirsher **/
ixgbevf_open(struct net_device * netdev)3608324d0867SStefan Assmann int ixgbevf_open(struct net_device *netdev)
3609dee1ad47SJeff Kirsher {
3610dee1ad47SJeff Kirsher struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3611dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
3612dee1ad47SJeff Kirsher int err;
3613dee1ad47SJeff Kirsher
3614a1f6c6b1Sxunleer /* A previous failure to open the device because of a lack of
3615a1f6c6b1Sxunleer * available MSIX vector resources may have reset the number
3616a1f6c6b1Sxunleer * of msix vectors variable to zero. The only way to recover
3617a1f6c6b1Sxunleer * is to unload/reload the driver and hope that the system has
3618a1f6c6b1Sxunleer * been able to recover some MSIX vector resources.
3619a1f6c6b1Sxunleer */
3620a1f6c6b1Sxunleer if (!adapter->num_msix_vectors)
3621a1f6c6b1Sxunleer return -ENOMEM;
3622a1f6c6b1Sxunleer
3623dee1ad47SJeff Kirsher if (hw->adapter_stopped) {
3624dee1ad47SJeff Kirsher ixgbevf_reset(adapter);
3625dee1ad47SJeff Kirsher /* if adapter is still stopped then PF isn't up and
3626dec0d8e4SJeff Kirsher * the VF can't start.
3627dec0d8e4SJeff Kirsher */
3628dee1ad47SJeff Kirsher if (hw->adapter_stopped) {
3629dee1ad47SJeff Kirsher err = IXGBE_ERR_MBX;
3630dec0d8e4SJeff Kirsher pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3631dee1ad47SJeff Kirsher goto err_setup_reset;
3632dee1ad47SJeff Kirsher }
3633dee1ad47SJeff Kirsher }
3634dee1ad47SJeff Kirsher
3635d9bdb57fSEmil Tantilov /* disallow open during test */
3636d9bdb57fSEmil Tantilov if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3637d9bdb57fSEmil Tantilov return -EBUSY;
3638d9bdb57fSEmil Tantilov
3639d9bdb57fSEmil Tantilov netif_carrier_off(netdev);
3640d9bdb57fSEmil Tantilov
3641dee1ad47SJeff Kirsher /* allocate transmit descriptors */
3642dee1ad47SJeff Kirsher err = ixgbevf_setup_all_tx_resources(adapter);
3643dee1ad47SJeff Kirsher if (err)
3644dee1ad47SJeff Kirsher goto err_setup_tx;
3645dee1ad47SJeff Kirsher
3646dee1ad47SJeff Kirsher /* allocate receive descriptors */
3647dee1ad47SJeff Kirsher err = ixgbevf_setup_all_rx_resources(adapter);
3648dee1ad47SJeff Kirsher if (err)
3649dee1ad47SJeff Kirsher goto err_setup_rx;
3650dee1ad47SJeff Kirsher
3651dee1ad47SJeff Kirsher ixgbevf_configure(adapter);
3652dee1ad47SJeff Kirsher
3653dee1ad47SJeff Kirsher err = ixgbevf_request_irq(adapter);
3654dee1ad47SJeff Kirsher if (err)
3655dee1ad47SJeff Kirsher goto err_req_irq;
3656dee1ad47SJeff Kirsher
3657f2d00ecaSEmil Tantilov /* Notify the stack of the actual queue counts. */
3658f2d00ecaSEmil Tantilov err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
3659f2d00ecaSEmil Tantilov if (err)
3660f2d00ecaSEmil Tantilov goto err_set_queues;
3661f2d00ecaSEmil Tantilov
3662f2d00ecaSEmil Tantilov err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
3663f2d00ecaSEmil Tantilov if (err)
3664f2d00ecaSEmil Tantilov goto err_set_queues;
3665f2d00ecaSEmil Tantilov
3666d9bdb57fSEmil Tantilov ixgbevf_up_complete(adapter);
3667dee1ad47SJeff Kirsher
3668dee1ad47SJeff Kirsher return 0;
3669dee1ad47SJeff Kirsher
3670f2d00ecaSEmil Tantilov err_set_queues:
3671f2d00ecaSEmil Tantilov ixgbevf_free_irq(adapter);
3672dee1ad47SJeff Kirsher err_req_irq:
3673dee1ad47SJeff Kirsher ixgbevf_free_all_rx_resources(adapter);
3674f2d00ecaSEmil Tantilov err_setup_rx:
3675dee1ad47SJeff Kirsher ixgbevf_free_all_tx_resources(adapter);
3676f2d00ecaSEmil Tantilov err_setup_tx:
3677dee1ad47SJeff Kirsher ixgbevf_reset(adapter);
3678dee1ad47SJeff Kirsher err_setup_reset:
3679dee1ad47SJeff Kirsher
3680dee1ad47SJeff Kirsher return err;
3681dee1ad47SJeff Kirsher }
3682dee1ad47SJeff Kirsher
3683dee1ad47SJeff Kirsher /**
3684b19cf6eeSEmil Tantilov * ixgbevf_close_suspend - actions necessary to both suspend and close flows
3685b19cf6eeSEmil Tantilov * @adapter: the private adapter struct
3686b19cf6eeSEmil Tantilov *
3687b19cf6eeSEmil Tantilov * This function should contain the necessary work common to both suspending
3688b19cf6eeSEmil Tantilov * and closing of the device.
3689b19cf6eeSEmil Tantilov */
ixgbevf_close_suspend(struct ixgbevf_adapter * adapter)3690b19cf6eeSEmil Tantilov static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter)
3691b19cf6eeSEmil Tantilov {
3692b19cf6eeSEmil Tantilov ixgbevf_down(adapter);
3693b19cf6eeSEmil Tantilov ixgbevf_free_irq(adapter);
3694b19cf6eeSEmil Tantilov ixgbevf_free_all_tx_resources(adapter);
3695b19cf6eeSEmil Tantilov ixgbevf_free_all_rx_resources(adapter);
3696b19cf6eeSEmil Tantilov }
3697b19cf6eeSEmil Tantilov
3698b19cf6eeSEmil Tantilov /**
3699dee1ad47SJeff Kirsher * ixgbevf_close - Disables a network interface
3700dee1ad47SJeff Kirsher * @netdev: network interface device structure
3701dee1ad47SJeff Kirsher *
3702dee1ad47SJeff Kirsher * Returns 0, this is not allowed to fail
3703dee1ad47SJeff Kirsher *
3704dee1ad47SJeff Kirsher * The close entry point is called when an interface is de-activated
3705dee1ad47SJeff Kirsher * by the OS. The hardware is still under the drivers control, but
3706dee1ad47SJeff Kirsher * needs to be disabled. A global MAC reset is issued to stop the
3707dee1ad47SJeff Kirsher * hardware, and all transmit and receive resources are freed.
3708dee1ad47SJeff Kirsher **/
ixgbevf_close(struct net_device * netdev)3709324d0867SStefan Assmann int ixgbevf_close(struct net_device *netdev)
3710dee1ad47SJeff Kirsher {
3711dee1ad47SJeff Kirsher struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3712dee1ad47SJeff Kirsher
3713b19cf6eeSEmil Tantilov if (netif_device_present(netdev))
3714b19cf6eeSEmil Tantilov ixgbevf_close_suspend(adapter);
3715dee1ad47SJeff Kirsher
3716dee1ad47SJeff Kirsher return 0;
3717dee1ad47SJeff Kirsher }
3718dee1ad47SJeff Kirsher
ixgbevf_queue_reset_subtask(struct ixgbevf_adapter * adapter)3719220fe050SDon Skidmore static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3720220fe050SDon Skidmore {
3721220fe050SDon Skidmore struct net_device *dev = adapter->netdev;
3722220fe050SDon Skidmore
3723d5dd7c3fSEmil Tantilov if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
3724d5dd7c3fSEmil Tantilov &adapter->state))
3725220fe050SDon Skidmore return;
3726220fe050SDon Skidmore
3727220fe050SDon Skidmore /* if interface is down do nothing */
3728220fe050SDon Skidmore if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3729220fe050SDon Skidmore test_bit(__IXGBEVF_RESETTING, &adapter->state))
3730220fe050SDon Skidmore return;
3731220fe050SDon Skidmore
3732220fe050SDon Skidmore /* Hardware has to reinitialize queues and interrupts to
3733220fe050SDon Skidmore * match packet buffer alignment. Unfortunately, the
3734220fe050SDon Skidmore * hardware is not flexible enough to do this dynamically.
3735220fe050SDon Skidmore */
37362dad7b27SEmil Tantilov rtnl_lock();
37372dad7b27SEmil Tantilov
3738220fe050SDon Skidmore if (netif_running(dev))
3739220fe050SDon Skidmore ixgbevf_close(dev);
3740220fe050SDon Skidmore
3741220fe050SDon Skidmore ixgbevf_clear_interrupt_scheme(adapter);
3742220fe050SDon Skidmore ixgbevf_init_interrupt_scheme(adapter);
3743220fe050SDon Skidmore
3744220fe050SDon Skidmore if (netif_running(dev))
3745220fe050SDon Skidmore ixgbevf_open(dev);
37462dad7b27SEmil Tantilov
37472dad7b27SEmil Tantilov rtnl_unlock();
3748220fe050SDon Skidmore }
3749220fe050SDon Skidmore
ixgbevf_tx_ctxtdesc(struct ixgbevf_ring * tx_ring,u32 vlan_macip_lens,u32 fceof_saidx,u32 type_tucmd,u32 mss_l4len_idx)375070a10e25SAlexander Duyck static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
37517f68d430SShannon Nelson u32 vlan_macip_lens, u32 fceof_saidx,
37527f68d430SShannon Nelson u32 type_tucmd, u32 mss_l4len_idx)
3753dee1ad47SJeff Kirsher {
3754dee1ad47SJeff Kirsher struct ixgbe_adv_tx_context_desc *context_desc;
375570a10e25SAlexander Duyck u16 i = tx_ring->next_to_use;
375670a10e25SAlexander Duyck
375770a10e25SAlexander Duyck context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
375870a10e25SAlexander Duyck
375970a10e25SAlexander Duyck i++;
376070a10e25SAlexander Duyck tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
376170a10e25SAlexander Duyck
376270a10e25SAlexander Duyck /* set bits to identify this as an advanced context descriptor */
376370a10e25SAlexander Duyck type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
376470a10e25SAlexander Duyck
376570a10e25SAlexander Duyck context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
37667f68d430SShannon Nelson context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
376770a10e25SAlexander Duyck context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
376870a10e25SAlexander Duyck context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
376970a10e25SAlexander Duyck }
377070a10e25SAlexander Duyck
ixgbevf_tso(struct ixgbevf_ring * tx_ring,struct ixgbevf_tx_buffer * first,u8 * hdr_len,struct ixgbevf_ipsec_tx_data * itd)377170a10e25SAlexander Duyck static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
37727ad1a093SEmil Tantilov struct ixgbevf_tx_buffer *first,
37737f68d430SShannon Nelson u8 *hdr_len,
37747f68d430SShannon Nelson struct ixgbevf_ipsec_tx_data *itd)
377570a10e25SAlexander Duyck {
3776b83e3010SAlexander Duyck u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
37777ad1a093SEmil Tantilov struct sk_buff *skb = first->skb;
3778b83e3010SAlexander Duyck union {
3779b83e3010SAlexander Duyck struct iphdr *v4;
3780b83e3010SAlexander Duyck struct ipv6hdr *v6;
3781b83e3010SAlexander Duyck unsigned char *hdr;
3782b83e3010SAlexander Duyck } ip;
3783b83e3010SAlexander Duyck union {
3784b83e3010SAlexander Duyck struct tcphdr *tcp;
3785b83e3010SAlexander Duyck unsigned char *hdr;
3786b83e3010SAlexander Duyck } l4;
3787b83e3010SAlexander Duyck u32 paylen, l4_offset;
37887f68d430SShannon Nelson u32 fceof_saidx = 0;
37898f12c034SFrancois Romieu int err;
3790dee1ad47SJeff Kirsher
379101a545cfSEmil Tantilov if (skb->ip_summed != CHECKSUM_PARTIAL)
379201a545cfSEmil Tantilov return 0;
379301a545cfSEmil Tantilov
379470a10e25SAlexander Duyck if (!skb_is_gso(skb))
379570a10e25SAlexander Duyck return 0;
379670a10e25SAlexander Duyck
37978f12c034SFrancois Romieu err = skb_cow_head(skb, 0);
37988f12c034SFrancois Romieu if (err < 0)
3799dee1ad47SJeff Kirsher return err;
380070a10e25SAlexander Duyck
38012a20525bSScott Peterson if (eth_p_mpls(first->protocol))
38022a20525bSScott Peterson ip.hdr = skb_inner_network_header(skb);
38032a20525bSScott Peterson else
3804b83e3010SAlexander Duyck ip.hdr = skb_network_header(skb);
3805b83e3010SAlexander Duyck l4.hdr = skb_checksum_start(skb);
3806b83e3010SAlexander Duyck
380770a10e25SAlexander Duyck /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
380870a10e25SAlexander Duyck type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3809dee1ad47SJeff Kirsher
3810b83e3010SAlexander Duyck /* initialize outer IP header fields */
3811b83e3010SAlexander Duyck if (ip.v4->version == 4) {
3812c54cdc31SAlexander Duyck unsigned char *csum_start = skb_checksum_start(skb);
3813c54cdc31SAlexander Duyck unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
38147f68d430SShannon Nelson int len = csum_start - trans_start;
3815c54cdc31SAlexander Duyck
3816b83e3010SAlexander Duyck /* IP header will have to cancel out any data that
38177f68d430SShannon Nelson * is not a part of the outer IP header, so set to
38187f68d430SShannon Nelson * a reverse csum if needed, else init check to 0.
3819b83e3010SAlexander Duyck */
38207f68d430SShannon Nelson ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
38217f68d430SShannon Nelson csum_fold(csum_partial(trans_start,
38227f68d430SShannon Nelson len, 0)) : 0;
382370a10e25SAlexander Duyck type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3824b83e3010SAlexander Duyck
3825b83e3010SAlexander Duyck ip.v4->tot_len = 0;
38267ad1a093SEmil Tantilov first->tx_flags |= IXGBE_TX_FLAGS_TSO |
38277ad1a093SEmil Tantilov IXGBE_TX_FLAGS_CSUM |
38287ad1a093SEmil Tantilov IXGBE_TX_FLAGS_IPV4;
3829b83e3010SAlexander Duyck } else {
3830b83e3010SAlexander Duyck ip.v6->payload_len = 0;
38317ad1a093SEmil Tantilov first->tx_flags |= IXGBE_TX_FLAGS_TSO |
38327ad1a093SEmil Tantilov IXGBE_TX_FLAGS_CSUM;
3833dee1ad47SJeff Kirsher }
3834dee1ad47SJeff Kirsher
3835b83e3010SAlexander Duyck /* determine offset of inner transport header */
3836b83e3010SAlexander Duyck l4_offset = l4.hdr - skb->data;
3837dee1ad47SJeff Kirsher
3838b83e3010SAlexander Duyck /* compute length of segmentation header */
3839b83e3010SAlexander Duyck *hdr_len = (l4.tcp->doff * 4) + l4_offset;
3840b83e3010SAlexander Duyck
3841b83e3010SAlexander Duyck /* remove payload length from inner checksum */
3842b83e3010SAlexander Duyck paylen = skb->len - l4_offset;
3843de844713SJesse Brandeburg csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
3844b83e3010SAlexander Duyck
3845b83e3010SAlexander Duyck /* update gso size and bytecount with header size */
38467ad1a093SEmil Tantilov first->gso_segs = skb_shinfo(skb)->gso_segs;
38477ad1a093SEmil Tantilov first->bytecount += (first->gso_segs - 1) * *hdr_len;
38487ad1a093SEmil Tantilov
384970a10e25SAlexander Duyck /* mss_l4len_id: use 1 as index for TSO */
3850b83e3010SAlexander Duyck mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
385170a10e25SAlexander Duyck mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
38528d055cc0SJacob Keller mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
3853dee1ad47SJeff Kirsher
38547f68d430SShannon Nelson fceof_saidx |= itd->pfsa;
38557f68d430SShannon Nelson type_tucmd |= itd->flags | itd->trailer_len;
38567f68d430SShannon Nelson
385770a10e25SAlexander Duyck /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3858b83e3010SAlexander Duyck vlan_macip_lens = l4.hdr - ip.hdr;
3859b83e3010SAlexander Duyck vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
38607ad1a093SEmil Tantilov vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3861dee1ad47SJeff Kirsher
38627f68d430SShannon Nelson ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
38637f68d430SShannon Nelson mss_l4len_idx);
3864dee1ad47SJeff Kirsher
386570a10e25SAlexander Duyck return 1;
3866dee1ad47SJeff Kirsher }
3867dee1ad47SJeff Kirsher
ixgbevf_tx_csum(struct ixgbevf_ring * tx_ring,struct ixgbevf_tx_buffer * first,struct ixgbevf_ipsec_tx_data * itd)38687ad1a093SEmil Tantilov static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
38697f68d430SShannon Nelson struct ixgbevf_tx_buffer *first,
38707f68d430SShannon Nelson struct ixgbevf_ipsec_tx_data *itd)
3871dee1ad47SJeff Kirsher {
38727ad1a093SEmil Tantilov struct sk_buff *skb = first->skb;
387370a10e25SAlexander Duyck u32 vlan_macip_lens = 0;
38747f68d430SShannon Nelson u32 fceof_saidx = 0;
387570a10e25SAlexander Duyck u32 type_tucmd = 0;
3876dee1ad47SJeff Kirsher
3877cb2b3edbSAlexander Duyck if (skb->ip_summed != CHECKSUM_PARTIAL)
3878cb2b3edbSAlexander Duyck goto no_csum;
3879dec0d8e4SJeff Kirsher
3880cb2b3edbSAlexander Duyck switch (skb->csum_offset) {
3881cb2b3edbSAlexander Duyck case offsetof(struct tcphdr, check):
3882cb2b3edbSAlexander Duyck type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
38835463fce6SJeff Kirsher fallthrough;
3884cb2b3edbSAlexander Duyck case offsetof(struct udphdr, check):
3885dee1ad47SJeff Kirsher break;
3886cb2b3edbSAlexander Duyck case offsetof(struct sctphdr, checksum):
3887cb2b3edbSAlexander Duyck /* validate that this is actually an SCTP request */
3888fc186d0aSXin Long if (skb_csum_is_sctp(skb)) {
3889cb2b3edbSAlexander Duyck type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
389070a10e25SAlexander Duyck break;
389170a10e25SAlexander Duyck }
38925463fce6SJeff Kirsher fallthrough;
389370a10e25SAlexander Duyck default:
3894d34a614aSMark Rustad skb_checksum_help(skb);
3895d34a614aSMark Rustad goto no_csum;
3896dee1ad47SJeff Kirsher }
38977fb94bd5SSebastian Basierski
38987fb94bd5SSebastian Basierski if (first->protocol == htons(ETH_P_IP))
38997fb94bd5SSebastian Basierski type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
39007fb94bd5SSebastian Basierski
39017ad1a093SEmil Tantilov /* update TX checksum flag */
39027ad1a093SEmil Tantilov first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3903cb2b3edbSAlexander Duyck vlan_macip_lens = skb_checksum_start_offset(skb) -
3904cb2b3edbSAlexander Duyck skb_network_offset(skb);
3905d34a614aSMark Rustad no_csum:
390670a10e25SAlexander Duyck /* vlan_macip_lens: MACLEN, VLAN tag */
390770a10e25SAlexander Duyck vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
39087ad1a093SEmil Tantilov vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3909dee1ad47SJeff Kirsher
39107f68d430SShannon Nelson fceof_saidx |= itd->pfsa;
39117f68d430SShannon Nelson type_tucmd |= itd->flags | itd->trailer_len;
39127f68d430SShannon Nelson
39137f68d430SShannon Nelson ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
39147f68d430SShannon Nelson fceof_saidx, type_tucmd, 0);
3915dee1ad47SJeff Kirsher }
3916dee1ad47SJeff Kirsher
ixgbevf_tx_cmd_type(u32 tx_flags)391729d37fa1SEmil Tantilov static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
391829d37fa1SEmil Tantilov {
391929d37fa1SEmil Tantilov /* set type for advanced descriptor with frame checksum insertion */
392029d37fa1SEmil Tantilov __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
392129d37fa1SEmil Tantilov IXGBE_ADVTXD_DCMD_IFCS |
392229d37fa1SEmil Tantilov IXGBE_ADVTXD_DCMD_DEXT);
392329d37fa1SEmil Tantilov
3924dec0d8e4SJeff Kirsher /* set HW VLAN bit if VLAN is present */
392529d37fa1SEmil Tantilov if (tx_flags & IXGBE_TX_FLAGS_VLAN)
392629d37fa1SEmil Tantilov cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
392729d37fa1SEmil Tantilov
392829d37fa1SEmil Tantilov /* set segmentation enable bits for TSO/FSO */
392929d37fa1SEmil Tantilov if (tx_flags & IXGBE_TX_FLAGS_TSO)
393029d37fa1SEmil Tantilov cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
393129d37fa1SEmil Tantilov
393229d37fa1SEmil Tantilov return cmd_type;
393329d37fa1SEmil Tantilov }
393429d37fa1SEmil Tantilov
ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc * tx_desc,u32 tx_flags,unsigned int paylen)393529d37fa1SEmil Tantilov static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
393629d37fa1SEmil Tantilov u32 tx_flags, unsigned int paylen)
393729d37fa1SEmil Tantilov {
393829d37fa1SEmil Tantilov __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
393929d37fa1SEmil Tantilov
394029d37fa1SEmil Tantilov /* enable L4 checksum for TSO and TX checksum offload */
394129d37fa1SEmil Tantilov if (tx_flags & IXGBE_TX_FLAGS_CSUM)
394229d37fa1SEmil Tantilov olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
394329d37fa1SEmil Tantilov
394429d37fa1SEmil Tantilov /* enble IPv4 checksum for TSO */
394529d37fa1SEmil Tantilov if (tx_flags & IXGBE_TX_FLAGS_IPV4)
394629d37fa1SEmil Tantilov olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
394729d37fa1SEmil Tantilov
39487f68d430SShannon Nelson /* enable IPsec */
39497f68d430SShannon Nelson if (tx_flags & IXGBE_TX_FLAGS_IPSEC)
39507f68d430SShannon Nelson olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC);
39517f68d430SShannon Nelson
39527f68d430SShannon Nelson /* use index 1 context for TSO/FSO/FCOE/IPSEC */
39537f68d430SShannon Nelson if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC))
39548d055cc0SJacob Keller olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
395529d37fa1SEmil Tantilov
395629d37fa1SEmil Tantilov /* Check Context must be set if Tx switch is enabled, which it
395729d37fa1SEmil Tantilov * always is for case where virtual functions are running
395829d37fa1SEmil Tantilov */
395929d37fa1SEmil Tantilov olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
396029d37fa1SEmil Tantilov
396129d37fa1SEmil Tantilov tx_desc->read.olinfo_status = olinfo_status;
396229d37fa1SEmil Tantilov }
396329d37fa1SEmil Tantilov
ixgbevf_tx_map(struct ixgbevf_ring * tx_ring,struct ixgbevf_tx_buffer * first,const u8 hdr_len)396429d37fa1SEmil Tantilov static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
396529d37fa1SEmil Tantilov struct ixgbevf_tx_buffer *first,
396629d37fa1SEmil Tantilov const u8 hdr_len)
3967dee1ad47SJeff Kirsher {
39687ad1a093SEmil Tantilov struct sk_buff *skb = first->skb;
396929d37fa1SEmil Tantilov struct ixgbevf_tx_buffer *tx_buffer;
397029d37fa1SEmil Tantilov union ixgbe_adv_tx_desc *tx_desc;
3971d7840976SMatthew Wilcox (Oracle) skb_frag_t *frag;
39726f355454SEmil Tantilov dma_addr_t dma;
39736f355454SEmil Tantilov unsigned int data_len, size;
397429d37fa1SEmil Tantilov u32 tx_flags = first->tx_flags;
39756f355454SEmil Tantilov __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
397629d37fa1SEmil Tantilov u16 i = tx_ring->next_to_use;
3977dee1ad47SJeff Kirsher
397829d37fa1SEmil Tantilov tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3979dee1ad47SJeff Kirsher
39806f355454SEmil Tantilov ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
39816f355454SEmil Tantilov
39826f355454SEmil Tantilov size = skb_headlen(skb);
39836f355454SEmil Tantilov data_len = skb->data_len;
3984dee1ad47SJeff Kirsher
398529d37fa1SEmil Tantilov dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
39866f355454SEmil Tantilov
39876f355454SEmil Tantilov tx_buffer = first;
39886f355454SEmil Tantilov
39896f355454SEmil Tantilov for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
39909bdfefd2SEmil Tantilov if (dma_mapping_error(tx_ring->dev, dma))
3991dee1ad47SJeff Kirsher goto dma_error;
3992dee1ad47SJeff Kirsher
39939bdfefd2SEmil Tantilov /* record length, and DMA address */
39946f355454SEmil Tantilov dma_unmap_len_set(tx_buffer, len, size);
39956f355454SEmil Tantilov dma_unmap_addr_set(tx_buffer, dma, dma);
39969bdfefd2SEmil Tantilov
399729d37fa1SEmil Tantilov tx_desc->read.buffer_addr = cpu_to_le64(dma);
399829d37fa1SEmil Tantilov
399929d37fa1SEmil Tantilov while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
400029d37fa1SEmil Tantilov tx_desc->read.cmd_type_len =
400129d37fa1SEmil Tantilov cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
400229d37fa1SEmil Tantilov
4003dee1ad47SJeff Kirsher i++;
400429d37fa1SEmil Tantilov tx_desc++;
400529d37fa1SEmil Tantilov if (i == tx_ring->count) {
400629d37fa1SEmil Tantilov tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
4007dee1ad47SJeff Kirsher i = 0;
4008dee1ad47SJeff Kirsher }
40096f355454SEmil Tantilov tx_desc->read.olinfo_status = 0;
4010dee1ad47SJeff Kirsher
401129d37fa1SEmil Tantilov dma += IXGBE_MAX_DATA_PER_TXD;
401229d37fa1SEmil Tantilov size -= IXGBE_MAX_DATA_PER_TXD;
4013dee1ad47SJeff Kirsher
401429d37fa1SEmil Tantilov tx_desc->read.buffer_addr = cpu_to_le64(dma);
4015dee1ad47SJeff Kirsher }
401629d37fa1SEmil Tantilov
401729d37fa1SEmil Tantilov if (likely(!data_len))
4018dee1ad47SJeff Kirsher break;
401929d37fa1SEmil Tantilov
402029d37fa1SEmil Tantilov tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
402129d37fa1SEmil Tantilov
402229d37fa1SEmil Tantilov i++;
402329d37fa1SEmil Tantilov tx_desc++;
402429d37fa1SEmil Tantilov if (i == tx_ring->count) {
402529d37fa1SEmil Tantilov tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
402629d37fa1SEmil Tantilov i = 0;
4027dee1ad47SJeff Kirsher }
40286f355454SEmil Tantilov tx_desc->read.olinfo_status = 0;
4029dee1ad47SJeff Kirsher
403029d37fa1SEmil Tantilov size = skb_frag_size(frag);
403129d37fa1SEmil Tantilov data_len -= size;
40327ad1a093SEmil Tantilov
403329d37fa1SEmil Tantilov dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
403429d37fa1SEmil Tantilov DMA_TO_DEVICE);
403529d37fa1SEmil Tantilov
403629d37fa1SEmil Tantilov tx_buffer = &tx_ring->tx_buffer_info[i];
403729d37fa1SEmil Tantilov }
403829d37fa1SEmil Tantilov
403929d37fa1SEmil Tantilov /* write last descriptor with RS and EOP bits */
404029d37fa1SEmil Tantilov cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
404129d37fa1SEmil Tantilov tx_desc->read.cmd_type_len = cmd_type;
404229d37fa1SEmil Tantilov
404329d37fa1SEmil Tantilov /* set the timestamp */
40447ad1a093SEmil Tantilov first->time_stamp = jiffies;
4045dee1ad47SJeff Kirsher
40469fc145fcSJacob Keller skb_tx_timestamp(skb);
40479fc145fcSJacob Keller
404829d37fa1SEmil Tantilov /* Force memory writes to complete before letting h/w know there
404929d37fa1SEmil Tantilov * are new descriptors to fetch. (Only applicable for weak-ordered
405029d37fa1SEmil Tantilov * memory model archs, such as IA-64).
405129d37fa1SEmil Tantilov *
405229d37fa1SEmil Tantilov * We also need this memory barrier (wmb) to make certain all of the
405329d37fa1SEmil Tantilov * status bits have been updated before next_to_watch is written.
405429d37fa1SEmil Tantilov */
405529d37fa1SEmil Tantilov wmb();
4056dee1ad47SJeff Kirsher
405729d37fa1SEmil Tantilov /* set next_to_watch value indicating a packet is present */
405829d37fa1SEmil Tantilov first->next_to_watch = tx_desc;
405929d37fa1SEmil Tantilov
406029d37fa1SEmil Tantilov i++;
406129d37fa1SEmil Tantilov if (i == tx_ring->count)
406229d37fa1SEmil Tantilov i = 0;
406329d37fa1SEmil Tantilov
406429d37fa1SEmil Tantilov tx_ring->next_to_use = i;
406529d37fa1SEmil Tantilov
406629d37fa1SEmil Tantilov /* notify HW of packet */
406706380db6SMark Rustad ixgbevf_write_tail(tx_ring, i);
406829d37fa1SEmil Tantilov
406929d37fa1SEmil Tantilov return;
4070dee1ad47SJeff Kirsher dma_error:
407170a10e25SAlexander Duyck dev_err(tx_ring->dev, "TX DMA map failed\n");
4072865a4d98SEmil Tantilov tx_buffer = &tx_ring->tx_buffer_info[i];
4073dee1ad47SJeff Kirsher
407429d37fa1SEmil Tantilov /* clear dma mappings for failed tx_buffer_info map */
4075865a4d98SEmil Tantilov while (tx_buffer != first) {
4076865a4d98SEmil Tantilov if (dma_unmap_len(tx_buffer, len))
4077865a4d98SEmil Tantilov dma_unmap_page(tx_ring->dev,
4078865a4d98SEmil Tantilov dma_unmap_addr(tx_buffer, dma),
4079865a4d98SEmil Tantilov dma_unmap_len(tx_buffer, len),
4080865a4d98SEmil Tantilov DMA_TO_DEVICE);
4081865a4d98SEmil Tantilov dma_unmap_len_set(tx_buffer, len, 0);
4082865a4d98SEmil Tantilov
4083865a4d98SEmil Tantilov if (i-- == 0)
4084865a4d98SEmil Tantilov i += tx_ring->count;
408529d37fa1SEmil Tantilov tx_buffer = &tx_ring->tx_buffer_info[i];
4086dee1ad47SJeff Kirsher }
4087dee1ad47SJeff Kirsher
4088865a4d98SEmil Tantilov if (dma_unmap_len(tx_buffer, len))
4089865a4d98SEmil Tantilov dma_unmap_single(tx_ring->dev,
4090865a4d98SEmil Tantilov dma_unmap_addr(tx_buffer, dma),
4091865a4d98SEmil Tantilov dma_unmap_len(tx_buffer, len),
4092865a4d98SEmil Tantilov DMA_TO_DEVICE);
4093865a4d98SEmil Tantilov dma_unmap_len_set(tx_buffer, len, 0);
4094865a4d98SEmil Tantilov
4095865a4d98SEmil Tantilov dev_kfree_skb_any(tx_buffer->skb);
4096865a4d98SEmil Tantilov tx_buffer->skb = NULL;
4097865a4d98SEmil Tantilov
4098dee1ad47SJeff Kirsher tx_ring->next_to_use = i;
4099dee1ad47SJeff Kirsher }
4100dee1ad47SJeff Kirsher
__ixgbevf_maybe_stop_tx(struct ixgbevf_ring * tx_ring,int size)4101fb40195cSAlexander Duyck static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4102dee1ad47SJeff Kirsher {
4103fb40195cSAlexander Duyck netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
4104dee1ad47SJeff Kirsher /* Herbert's original patch had:
4105dee1ad47SJeff Kirsher * smp_mb__after_netif_stop_queue();
4106dec0d8e4SJeff Kirsher * but since that doesn't exist yet, just open code it.
4107dec0d8e4SJeff Kirsher */
4108dee1ad47SJeff Kirsher smp_mb();
4109dee1ad47SJeff Kirsher
4110dee1ad47SJeff Kirsher /* We need to check again in a case another CPU has just
4111dec0d8e4SJeff Kirsher * made room available.
4112dec0d8e4SJeff Kirsher */
4113f880d07bSDon Skidmore if (likely(ixgbevf_desc_unused(tx_ring) < size))
4114dee1ad47SJeff Kirsher return -EBUSY;
4115dee1ad47SJeff Kirsher
4116dee1ad47SJeff Kirsher /* A reprieve! - use start_queue because it doesn't call schedule */
4117fb40195cSAlexander Duyck netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
4118095e2617SEmil Tantilov ++tx_ring->tx_stats.restart_queue;
4119095e2617SEmil Tantilov
4120dee1ad47SJeff Kirsher return 0;
4121dee1ad47SJeff Kirsher }
4122dee1ad47SJeff Kirsher
ixgbevf_maybe_stop_tx(struct ixgbevf_ring * tx_ring,int size)4123fb40195cSAlexander Duyck static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4124dee1ad47SJeff Kirsher {
4125f880d07bSDon Skidmore if (likely(ixgbevf_desc_unused(tx_ring) >= size))
4126dee1ad47SJeff Kirsher return 0;
4127fb40195cSAlexander Duyck return __ixgbevf_maybe_stop_tx(tx_ring, size);
4128dee1ad47SJeff Kirsher }
4129dee1ad47SJeff Kirsher
ixgbevf_xmit_frame_ring(struct sk_buff * skb,struct ixgbevf_ring * tx_ring)41305cc0f1c0SEmil Tantilov static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
41315cc0f1c0SEmil Tantilov struct ixgbevf_ring *tx_ring)
4132dee1ad47SJeff Kirsher {
41337ad1a093SEmil Tantilov struct ixgbevf_tx_buffer *first;
41347ad1a093SEmil Tantilov int tso;
41357ad1a093SEmil Tantilov u32 tx_flags = 0;
41363595990aSAlexander Duyck u16 count = TXD_USE_COUNT(skb_headlen(skb));
41377f68d430SShannon Nelson struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 };
41383595990aSAlexander Duyck #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
41393595990aSAlexander Duyck unsigned short f;
41403595990aSAlexander Duyck #endif
41417ad1a093SEmil Tantilov u8 hdr_len = 0;
4142f9d08f16SGreg Rose u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
41437ad1a093SEmil Tantilov
414446acc460SBen Hutchings if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
4145e7fcd543SAlexander Duyck dev_kfree_skb_any(skb);
4146f9d08f16SGreg Rose return NETDEV_TX_OK;
4147f9d08f16SGreg Rose }
4148dee1ad47SJeff Kirsher
4149dec0d8e4SJeff Kirsher /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
41503595990aSAlexander Duyck * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
41513595990aSAlexander Duyck * + 2 desc gap to keep tail from touching head,
41523595990aSAlexander Duyck * + 1 desc for context descriptor,
41533595990aSAlexander Duyck * otherwise try next time
41543595990aSAlexander Duyck */
41553595990aSAlexander Duyck #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4156d601be97SQian Cai for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
4157d601be97SQian Cai skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
4158d601be97SQian Cai
4159d601be97SQian Cai count += TXD_USE_COUNT(skb_frag_size(frag));
4160d601be97SQian Cai }
41613595990aSAlexander Duyck #else
41623595990aSAlexander Duyck count += skb_shinfo(skb)->nr_frags;
41633595990aSAlexander Duyck #endif
4164fb40195cSAlexander Duyck if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
4165095e2617SEmil Tantilov tx_ring->tx_stats.tx_busy++;
41663595990aSAlexander Duyck return NETDEV_TX_BUSY;
41673595990aSAlexander Duyck }
41683595990aSAlexander Duyck
41697ad1a093SEmil Tantilov /* record the location of the first descriptor for this packet */
41707ad1a093SEmil Tantilov first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
41717ad1a093SEmil Tantilov first->skb = skb;
41727ad1a093SEmil Tantilov first->bytecount = skb->len;
41737ad1a093SEmil Tantilov first->gso_segs = 1;
41747ad1a093SEmil Tantilov
4175df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) {
4176df8a39deSJiri Pirko tx_flags |= skb_vlan_tag_get(skb);
4177dee1ad47SJeff Kirsher tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
4178dee1ad47SJeff Kirsher tx_flags |= IXGBE_TX_FLAGS_VLAN;
4179dee1ad47SJeff Kirsher }
4180dee1ad47SJeff Kirsher
41817ad1a093SEmil Tantilov /* record initial flags and protocol */
41827ad1a093SEmil Tantilov first->tx_flags = tx_flags;
41837ad1a093SEmil Tantilov first->protocol = vlan_get_protocol(skb);
4184dee1ad47SJeff Kirsher
418548e01e00SJeff Kirsher #ifdef CONFIG_IXGBEVF_IPSEC
41868f6617baSJeff Kirsher if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
41877f68d430SShannon Nelson goto out_drop;
41887f68d430SShannon Nelson #endif
41897f68d430SShannon Nelson tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
41907ad1a093SEmil Tantilov if (tso < 0)
41917ad1a093SEmil Tantilov goto out_drop;
4192b5d217f3SEmil Tantilov else if (!tso)
41937f68d430SShannon Nelson ixgbevf_tx_csum(tx_ring, first, &ipsec_tx);
4194dee1ad47SJeff Kirsher
419529d37fa1SEmil Tantilov ixgbevf_tx_map(tx_ring, first, hdr_len);
4196dee1ad47SJeff Kirsher
4197fb40195cSAlexander Duyck ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
4198dee1ad47SJeff Kirsher
4199dee1ad47SJeff Kirsher return NETDEV_TX_OK;
42007ad1a093SEmil Tantilov
42017ad1a093SEmil Tantilov out_drop:
42027ad1a093SEmil Tantilov dev_kfree_skb_any(first->skb);
42037ad1a093SEmil Tantilov first->skb = NULL;
42047ad1a093SEmil Tantilov
42057ad1a093SEmil Tantilov return NETDEV_TX_OK;
4206dee1ad47SJeff Kirsher }
4207dee1ad47SJeff Kirsher
ixgbevf_xmit_frame(struct sk_buff * skb,struct net_device * netdev)4208cf12aab6SLuc Van Oostenryck static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
42095cc0f1c0SEmil Tantilov {
42105cc0f1c0SEmil Tantilov struct ixgbevf_adapter *adapter = netdev_priv(netdev);
42115cc0f1c0SEmil Tantilov struct ixgbevf_ring *tx_ring;
42125cc0f1c0SEmil Tantilov
42135cc0f1c0SEmil Tantilov if (skb->len <= 0) {
42145cc0f1c0SEmil Tantilov dev_kfree_skb_any(skb);
42155cc0f1c0SEmil Tantilov return NETDEV_TX_OK;
42165cc0f1c0SEmil Tantilov }
42175cc0f1c0SEmil Tantilov
42185cc0f1c0SEmil Tantilov /* The minimum packet size for olinfo paylen is 17 so pad the skb
42195cc0f1c0SEmil Tantilov * in order to meet this minimum size requirement.
42205cc0f1c0SEmil Tantilov */
42215cc0f1c0SEmil Tantilov if (skb->len < 17) {
42225cc0f1c0SEmil Tantilov if (skb_padto(skb, 17))
42235cc0f1c0SEmil Tantilov return NETDEV_TX_OK;
42245cc0f1c0SEmil Tantilov skb->len = 17;
42255cc0f1c0SEmil Tantilov }
42265cc0f1c0SEmil Tantilov
42275cc0f1c0SEmil Tantilov tx_ring = adapter->tx_ring[skb->queue_mapping];
42285cc0f1c0SEmil Tantilov return ixgbevf_xmit_frame_ring(skb, tx_ring);
42295cc0f1c0SEmil Tantilov }
42305cc0f1c0SEmil Tantilov
4231dee1ad47SJeff Kirsher /**
4232dee1ad47SJeff Kirsher * ixgbevf_set_mac - Change the Ethernet Address of the NIC
4233dee1ad47SJeff Kirsher * @netdev: network interface device structure
4234dee1ad47SJeff Kirsher * @p: pointer to an address structure
4235dee1ad47SJeff Kirsher *
4236dee1ad47SJeff Kirsher * Returns 0 on success, negative on failure
4237dee1ad47SJeff Kirsher **/
ixgbevf_set_mac(struct net_device * netdev,void * p)4238dee1ad47SJeff Kirsher static int ixgbevf_set_mac(struct net_device *netdev, void *p)
4239dee1ad47SJeff Kirsher {
4240dee1ad47SJeff Kirsher struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4241dee1ad47SJeff Kirsher struct ixgbe_hw *hw = &adapter->hw;
4242dee1ad47SJeff Kirsher struct sockaddr *addr = p;
424332ca6868SEmil Tantilov int err;
4244dee1ad47SJeff Kirsher
4245dee1ad47SJeff Kirsher if (!is_valid_ether_addr(addr->sa_data))
4246dee1ad47SJeff Kirsher return -EADDRNOTAVAIL;
4247dee1ad47SJeff Kirsher
424855fdd45bSJohn Fastabend spin_lock_bh(&adapter->mbx_lock);
42491c55ed76SAlexander Duyck
425032ca6868SEmil Tantilov err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
4251dee1ad47SJeff Kirsher
425255fdd45bSJohn Fastabend spin_unlock_bh(&adapter->mbx_lock);
42531c55ed76SAlexander Duyck
425432ca6868SEmil Tantilov if (err)
425532ca6868SEmil Tantilov return -EPERM;
425632ca6868SEmil Tantilov
425732ca6868SEmil Tantilov ether_addr_copy(hw->mac.addr, addr->sa_data);
42586e7d0ba1SEmil Tantilov ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
4259f3956ebbSJakub Kicinski eth_hw_addr_set(netdev, addr->sa_data);
426032ca6868SEmil Tantilov
4261dee1ad47SJeff Kirsher return 0;
4262dee1ad47SJeff Kirsher }
4263dee1ad47SJeff Kirsher
4264dee1ad47SJeff Kirsher /**
4265dee1ad47SJeff Kirsher * ixgbevf_change_mtu - Change the Maximum Transfer Unit
4266dee1ad47SJeff Kirsher * @netdev: network interface device structure
4267dee1ad47SJeff Kirsher * @new_mtu: new value for maximum frame size
4268dee1ad47SJeff Kirsher *
4269dee1ad47SJeff Kirsher * Returns 0 on success, negative on failure
4270dee1ad47SJeff Kirsher **/
ixgbevf_change_mtu(struct net_device * netdev,int new_mtu)4271dee1ad47SJeff Kirsher static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
4272dee1ad47SJeff Kirsher {
4273dee1ad47SJeff Kirsher struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4274bad17234SEmil Tantilov struct ixgbe_hw *hw = &adapter->hw;
4275dee1ad47SJeff Kirsher int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
42766a11e52bSTony Nguyen int ret;
4277dee1ad47SJeff Kirsher
4278c7aec596STony Nguyen /* prevent MTU being changed to a size unsupported by XDP */
4279c7aec596STony Nguyen if (adapter->xdp_prog) {
4280c7aec596STony Nguyen dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n");
4281c7aec596STony Nguyen return -EPERM;
4282c7aec596STony Nguyen }
4283c7aec596STony Nguyen
428414b22cd9SEmil Tantilov spin_lock_bh(&adapter->mbx_lock);
42856a11e52bSTony Nguyen /* notify the PF of our intent to use this size of frame */
42866a11e52bSTony Nguyen ret = hw->mac.ops.set_rlpml(hw, max_frame);
428714b22cd9SEmil Tantilov spin_unlock_bh(&adapter->mbx_lock);
42886a11e52bSTony Nguyen if (ret)
42896a11e52bSTony Nguyen return -EINVAL;
42906a11e52bSTony Nguyen
4291bad17234SEmil Tantilov hw_dbg(hw, "changing MTU from %d to %d\n",
4292dee1ad47SJeff Kirsher netdev->mtu, new_mtu);
42936a11e52bSTony Nguyen
4294dee1ad47SJeff Kirsher /* must set new MTU before calling down or up */
4295dee1ad47SJeff Kirsher netdev->mtu = new_mtu;
4296dee1ad47SJeff Kirsher
42971ab37e12SEmil Tantilov if (netif_running(netdev))
42981ab37e12SEmil Tantilov ixgbevf_reinit_locked(adapter);
42991ab37e12SEmil Tantilov
4300dee1ad47SJeff Kirsher return 0;
4301dee1ad47SJeff Kirsher }
4302dee1ad47SJeff Kirsher
ixgbevf_suspend(struct device * dev_d)4303bac66317SVaibhav Gupta static int __maybe_unused ixgbevf_suspend(struct device *dev_d)
4304dee1ad47SJeff Kirsher {
4305bac66317SVaibhav Gupta struct net_device *netdev = dev_get_drvdata(dev_d);
4306dee1ad47SJeff Kirsher struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4307dee1ad47SJeff Kirsher
43082dad7b27SEmil Tantilov rtnl_lock();
4309dee1ad47SJeff Kirsher netif_device_detach(netdev);
4310dee1ad47SJeff Kirsher
4311b19cf6eeSEmil Tantilov if (netif_running(netdev))
4312b19cf6eeSEmil Tantilov ixgbevf_close_suspend(adapter);
4313b19cf6eeSEmil Tantilov
4314eeffceeeSMark Rustad ixgbevf_clear_interrupt_scheme(adapter);
43152dad7b27SEmil Tantilov rtnl_unlock();
4316dee1ad47SJeff Kirsher
43170ac1e8ceSAlexander Duyck return 0;
43180ac1e8ceSAlexander Duyck }
43190ac1e8ceSAlexander Duyck
ixgbevf_resume(struct device * dev_d)4320bac66317SVaibhav Gupta static int __maybe_unused ixgbevf_resume(struct device *dev_d)
43210ac1e8ceSAlexander Duyck {
4322bac66317SVaibhav Gupta struct pci_dev *pdev = to_pci_dev(dev_d);
432327ae2967SWei Yongjun struct net_device *netdev = pci_get_drvdata(pdev);
432427ae2967SWei Yongjun struct ixgbevf_adapter *adapter = netdev_priv(netdev);
43250ac1e8ceSAlexander Duyck u32 err;
43260ac1e8ceSAlexander Duyck
432726403b7fSEmil Tantilov adapter->hw.hw_addr = adapter->io_addr;
43284e857c58SPeter Zijlstra smp_mb__before_atomic();
4329bc0c7151SMark Rustad clear_bit(__IXGBEVF_DISABLED, &adapter->state);
43300ac1e8ceSAlexander Duyck pci_set_master(pdev);
43310ac1e8ceSAlexander Duyck
4332798e381aSDon Skidmore ixgbevf_reset(adapter);
4333798e381aSDon Skidmore
43340ac1e8ceSAlexander Duyck rtnl_lock();
43350ac1e8ceSAlexander Duyck err = ixgbevf_init_interrupt_scheme(adapter);
4336f2d00ecaSEmil Tantilov if (!err && netif_running(netdev))
43370ac1e8ceSAlexander Duyck err = ixgbevf_open(netdev);
4338f2d00ecaSEmil Tantilov rtnl_unlock();
43390ac1e8ceSAlexander Duyck if (err)
43400ac1e8ceSAlexander Duyck return err;
43410ac1e8ceSAlexander Duyck
43420ac1e8ceSAlexander Duyck netif_device_attach(netdev);
43430ac1e8ceSAlexander Duyck
43440ac1e8ceSAlexander Duyck return err;
43450ac1e8ceSAlexander Duyck }
43460ac1e8ceSAlexander Duyck
ixgbevf_shutdown(struct pci_dev * pdev)43470ac1e8ceSAlexander Duyck static void ixgbevf_shutdown(struct pci_dev *pdev)
43480ac1e8ceSAlexander Duyck {
4349bac66317SVaibhav Gupta ixgbevf_suspend(&pdev->dev);
4350dee1ad47SJeff Kirsher }
4351dee1ad47SJeff Kirsher
ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 * stats,const struct ixgbevf_ring * ring)435221092e9cSTony Nguyen static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
435321092e9cSTony Nguyen const struct ixgbevf_ring *ring)
435421092e9cSTony Nguyen {
435521092e9cSTony Nguyen u64 bytes, packets;
435621092e9cSTony Nguyen unsigned int start;
435721092e9cSTony Nguyen
435821092e9cSTony Nguyen if (ring) {
435921092e9cSTony Nguyen do {
4360068c38adSThomas Gleixner start = u64_stats_fetch_begin(&ring->syncp);
436121092e9cSTony Nguyen bytes = ring->stats.bytes;
436221092e9cSTony Nguyen packets = ring->stats.packets;
4363068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&ring->syncp, start));
436421092e9cSTony Nguyen stats->tx_bytes += bytes;
436521092e9cSTony Nguyen stats->tx_packets += packets;
436621092e9cSTony Nguyen }
436721092e9cSTony Nguyen }
436821092e9cSTony Nguyen
ixgbevf_get_stats(struct net_device * netdev,struct rtnl_link_stats64 * stats)4369bc1f4470Sstephen hemminger static void ixgbevf_get_stats(struct net_device *netdev,
43704197aa7bSEric Dumazet struct rtnl_link_stats64 *stats)
43714197aa7bSEric Dumazet {
43724197aa7bSEric Dumazet struct ixgbevf_adapter *adapter = netdev_priv(netdev);
43734197aa7bSEric Dumazet unsigned int start;
43744197aa7bSEric Dumazet u64 bytes, packets;
43754197aa7bSEric Dumazet const struct ixgbevf_ring *ring;
43764197aa7bSEric Dumazet int i;
43774197aa7bSEric Dumazet
43784197aa7bSEric Dumazet ixgbevf_update_stats(adapter);
43794197aa7bSEric Dumazet
43804197aa7bSEric Dumazet stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
43814197aa7bSEric Dumazet
438221c046e4SEmil Tantilov rcu_read_lock();
43834197aa7bSEric Dumazet for (i = 0; i < adapter->num_rx_queues; i++) {
438487e70ab9SDon Skidmore ring = adapter->rx_ring[i];
43854197aa7bSEric Dumazet do {
4386068c38adSThomas Gleixner start = u64_stats_fetch_begin(&ring->syncp);
4387095e2617SEmil Tantilov bytes = ring->stats.bytes;
4388095e2617SEmil Tantilov packets = ring->stats.packets;
4389068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&ring->syncp, start));
43904197aa7bSEric Dumazet stats->rx_bytes += bytes;
43914197aa7bSEric Dumazet stats->rx_packets += packets;
43924197aa7bSEric Dumazet }
43934197aa7bSEric Dumazet
43944197aa7bSEric Dumazet for (i = 0; i < adapter->num_tx_queues; i++) {
439587e70ab9SDon Skidmore ring = adapter->tx_ring[i];
439621092e9cSTony Nguyen ixgbevf_get_tx_ring_stats(stats, ring);
439721092e9cSTony Nguyen }
439821092e9cSTony Nguyen
439921092e9cSTony Nguyen for (i = 0; i < adapter->num_xdp_queues; i++) {
440021092e9cSTony Nguyen ring = adapter->xdp_ring[i];
440121092e9cSTony Nguyen ixgbevf_get_tx_ring_stats(stats, ring);
44024197aa7bSEric Dumazet }
440321c046e4SEmil Tantilov rcu_read_unlock();
44044197aa7bSEric Dumazet }
44054197aa7bSEric Dumazet
4406b83e3010SAlexander Duyck #define IXGBEVF_MAX_MAC_HDR_LEN 127
4407b83e3010SAlexander Duyck #define IXGBEVF_MAX_NETWORK_HDR_LEN 511
4408b83e3010SAlexander Duyck
4409b83e3010SAlexander Duyck static netdev_features_t
ixgbevf_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)4410b83e3010SAlexander Duyck ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
4411b83e3010SAlexander Duyck netdev_features_t features)
4412b83e3010SAlexander Duyck {
4413b83e3010SAlexander Duyck unsigned int network_hdr_len, mac_hdr_len;
4414b83e3010SAlexander Duyck
4415b83e3010SAlexander Duyck /* Make certain the headers can be described by a context descriptor */
4416b83e3010SAlexander Duyck mac_hdr_len = skb_network_header(skb) - skb->data;
4417b83e3010SAlexander Duyck if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
4418b83e3010SAlexander Duyck return features & ~(NETIF_F_HW_CSUM |
4419b83e3010SAlexander Duyck NETIF_F_SCTP_CRC |
4420b83e3010SAlexander Duyck NETIF_F_HW_VLAN_CTAG_TX |
4421b83e3010SAlexander Duyck NETIF_F_TSO |
4422b83e3010SAlexander Duyck NETIF_F_TSO6);
4423b83e3010SAlexander Duyck
4424b83e3010SAlexander Duyck network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
4425b83e3010SAlexander Duyck if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
4426b83e3010SAlexander Duyck return features & ~(NETIF_F_HW_CSUM |
4427b83e3010SAlexander Duyck NETIF_F_SCTP_CRC |
4428b83e3010SAlexander Duyck NETIF_F_TSO |
4429b83e3010SAlexander Duyck NETIF_F_TSO6);
4430b83e3010SAlexander Duyck
4431b83e3010SAlexander Duyck /* We can only support IPV4 TSO in tunnels if we can mangle the
4432b83e3010SAlexander Duyck * inner IP ID field, so strip TSO if MANGLEID is not supported.
4433b83e3010SAlexander Duyck */
4434b83e3010SAlexander Duyck if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
4435b83e3010SAlexander Duyck features &= ~NETIF_F_TSO;
4436b83e3010SAlexander Duyck
4437b83e3010SAlexander Duyck return features;
4438b83e3010SAlexander Duyck }
4439b83e3010SAlexander Duyck
ixgbevf_xdp_setup(struct net_device * dev,struct bpf_prog * prog)4440c7aec596STony Nguyen static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
4441c7aec596STony Nguyen {
4442c7aec596STony Nguyen int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4443c7aec596STony Nguyen struct ixgbevf_adapter *adapter = netdev_priv(dev);
4444c7aec596STony Nguyen struct bpf_prog *old_prog;
4445c7aec596STony Nguyen
4446c7aec596STony Nguyen /* verify ixgbevf ring attributes are sufficient for XDP */
4447c7aec596STony Nguyen for (i = 0; i < adapter->num_rx_queues; i++) {
4448c7aec596STony Nguyen struct ixgbevf_ring *ring = adapter->rx_ring[i];
4449c7aec596STony Nguyen
4450c7aec596STony Nguyen if (frame_size > ixgbevf_rx_bufsz(ring))
4451c7aec596STony Nguyen return -EINVAL;
4452c7aec596STony Nguyen }
4453c7aec596STony Nguyen
4454c7aec596STony Nguyen old_prog = xchg(&adapter->xdp_prog, prog);
445521092e9cSTony Nguyen
445621092e9cSTony Nguyen /* If transitioning XDP modes reconfigure rings */
445721092e9cSTony Nguyen if (!!prog != !!old_prog) {
445821092e9cSTony Nguyen /* Hardware has to reinitialize queues and interrupts to
445921092e9cSTony Nguyen * match packet buffer alignment. Unfortunately, the
446021092e9cSTony Nguyen * hardware is not flexible enough to do this dynamically.
446121092e9cSTony Nguyen */
446221092e9cSTony Nguyen if (netif_running(dev))
446321092e9cSTony Nguyen ixgbevf_close(dev);
446421092e9cSTony Nguyen
446521092e9cSTony Nguyen ixgbevf_clear_interrupt_scheme(adapter);
446621092e9cSTony Nguyen ixgbevf_init_interrupt_scheme(adapter);
446721092e9cSTony Nguyen
446821092e9cSTony Nguyen if (netif_running(dev))
446921092e9cSTony Nguyen ixgbevf_open(dev);
447021092e9cSTony Nguyen } else {
4471c7aec596STony Nguyen for (i = 0; i < adapter->num_rx_queues; i++)
4472c7aec596STony Nguyen xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
447321092e9cSTony Nguyen }
4474c7aec596STony Nguyen
4475c7aec596STony Nguyen if (old_prog)
4476c7aec596STony Nguyen bpf_prog_put(old_prog);
4477c7aec596STony Nguyen
4478c7aec596STony Nguyen return 0;
4479c7aec596STony Nguyen }
4480c7aec596STony Nguyen
ixgbevf_xdp(struct net_device * dev,struct netdev_bpf * xdp)4481c7aec596STony Nguyen static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4482c7aec596STony Nguyen {
4483c7aec596STony Nguyen switch (xdp->command) {
4484c7aec596STony Nguyen case XDP_SETUP_PROG:
4485c7aec596STony Nguyen return ixgbevf_xdp_setup(dev, xdp->prog);
4486c7aec596STony Nguyen default:
4487c7aec596STony Nguyen return -EINVAL;
4488c7aec596STony Nguyen }
4489c7aec596STony Nguyen }
4490c7aec596STony Nguyen
44910ac1e8ceSAlexander Duyck static const struct net_device_ops ixgbevf_netdev_ops = {
4492dee1ad47SJeff Kirsher .ndo_open = ixgbevf_open,
4493dee1ad47SJeff Kirsher .ndo_stop = ixgbevf_close,
4494dee1ad47SJeff Kirsher .ndo_start_xmit = ixgbevf_xmit_frame,
4495dee1ad47SJeff Kirsher .ndo_set_rx_mode = ixgbevf_set_rx_mode,
44964197aa7bSEric Dumazet .ndo_get_stats64 = ixgbevf_get_stats,
4497dee1ad47SJeff Kirsher .ndo_validate_addr = eth_validate_addr,
4498dee1ad47SJeff Kirsher .ndo_set_mac_address = ixgbevf_set_mac,
4499dee1ad47SJeff Kirsher .ndo_change_mtu = ixgbevf_change_mtu,
4500dee1ad47SJeff Kirsher .ndo_tx_timeout = ixgbevf_tx_timeout,
4501dee1ad47SJeff Kirsher .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
4502dee1ad47SJeff Kirsher .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
4503b83e3010SAlexander Duyck .ndo_features_check = ixgbevf_features_check,
4504c7aec596STony Nguyen .ndo_bpf = ixgbevf_xdp,
4505dee1ad47SJeff Kirsher };
4506dee1ad47SJeff Kirsher
ixgbevf_assign_netdev_ops(struct net_device * dev)4507dee1ad47SJeff Kirsher static void ixgbevf_assign_netdev_ops(struct net_device *dev)
4508dee1ad47SJeff Kirsher {
45090ac1e8ceSAlexander Duyck dev->netdev_ops = &ixgbevf_netdev_ops;
4510dee1ad47SJeff Kirsher ixgbevf_set_ethtool_ops(dev);
4511dee1ad47SJeff Kirsher dev->watchdog_timeo = 5 * HZ;
4512dee1ad47SJeff Kirsher }
4513dee1ad47SJeff Kirsher
4514dee1ad47SJeff Kirsher /**
4515dee1ad47SJeff Kirsher * ixgbevf_probe - Device Initialization Routine
4516dee1ad47SJeff Kirsher * @pdev: PCI device information struct
4517dee1ad47SJeff Kirsher * @ent: entry in ixgbevf_pci_tbl
4518dee1ad47SJeff Kirsher *
4519dee1ad47SJeff Kirsher * Returns 0 on success, negative on failure
4520dee1ad47SJeff Kirsher *
4521dee1ad47SJeff Kirsher * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
4522dee1ad47SJeff Kirsher * The OS initialization, configuring of the adapter private structure,
4523dee1ad47SJeff Kirsher * and a hardware reset occur.
4524dee1ad47SJeff Kirsher **/
ixgbevf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)45251dd06ae8SGreg Kroah-Hartman static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4526dee1ad47SJeff Kirsher {
4527dee1ad47SJeff Kirsher struct net_device *netdev;
4528dee1ad47SJeff Kirsher struct ixgbevf_adapter *adapter = NULL;
4529dee1ad47SJeff Kirsher struct ixgbe_hw *hw = NULL;
4530dee1ad47SJeff Kirsher const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
45310333464fSEmil Tantilov bool disable_dev = false;
453290b83d01SChristophe JAILLET int err;
4533dee1ad47SJeff Kirsher
4534dee1ad47SJeff Kirsher err = pci_enable_device(pdev);
4535dee1ad47SJeff Kirsher if (err)
4536dee1ad47SJeff Kirsher return err;
4537dee1ad47SJeff Kirsher
453890b83d01SChristophe JAILLET err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4539dee1ad47SJeff Kirsher if (err) {
4540dec0d8e4SJeff Kirsher dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
4541dee1ad47SJeff Kirsher goto err_dma;
4542dee1ad47SJeff Kirsher }
4543dee1ad47SJeff Kirsher
4544dee1ad47SJeff Kirsher err = pci_request_regions(pdev, ixgbevf_driver_name);
4545dee1ad47SJeff Kirsher if (err) {
4546dee1ad47SJeff Kirsher dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
4547dee1ad47SJeff Kirsher goto err_pci_reg;
4548dee1ad47SJeff Kirsher }
4549dee1ad47SJeff Kirsher
4550dee1ad47SJeff Kirsher pci_set_master(pdev);
4551dee1ad47SJeff Kirsher
4552dee1ad47SJeff Kirsher netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
4553dee1ad47SJeff Kirsher MAX_TX_QUEUES);
4554dee1ad47SJeff Kirsher if (!netdev) {
4555dee1ad47SJeff Kirsher err = -ENOMEM;
4556dee1ad47SJeff Kirsher goto err_alloc_etherdev;
4557dee1ad47SJeff Kirsher }
4558dee1ad47SJeff Kirsher
4559dee1ad47SJeff Kirsher SET_NETDEV_DEV(netdev, &pdev->dev);
4560dee1ad47SJeff Kirsher
4561dee1ad47SJeff Kirsher adapter = netdev_priv(netdev);
4562dee1ad47SJeff Kirsher
4563dee1ad47SJeff Kirsher adapter->netdev = netdev;
4564dee1ad47SJeff Kirsher adapter->pdev = pdev;
4565dee1ad47SJeff Kirsher hw = &adapter->hw;
4566dee1ad47SJeff Kirsher hw->back = adapter;
4567b3f4d599Sstephen hemminger adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4568dee1ad47SJeff Kirsher
4569dec0d8e4SJeff Kirsher /* call save state here in standalone driver because it relies on
4570dee1ad47SJeff Kirsher * adapter struct to exist, and needs to call netdev_priv
4571dee1ad47SJeff Kirsher */
4572dee1ad47SJeff Kirsher pci_save_state(pdev);
4573dee1ad47SJeff Kirsher
4574dee1ad47SJeff Kirsher hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4575dee1ad47SJeff Kirsher pci_resource_len(pdev, 0));
4576dbf8b0d8SMark Rustad adapter->io_addr = hw->hw_addr;
4577dee1ad47SJeff Kirsher if (!hw->hw_addr) {
4578dee1ad47SJeff Kirsher err = -EIO;
4579dee1ad47SJeff Kirsher goto err_ioremap;
4580dee1ad47SJeff Kirsher }
4581dee1ad47SJeff Kirsher
4582dee1ad47SJeff Kirsher ixgbevf_assign_netdev_ops(netdev);
4583dee1ad47SJeff Kirsher
4584dec0d8e4SJeff Kirsher /* Setup HW API */
4585dee1ad47SJeff Kirsher memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
4586dee1ad47SJeff Kirsher hw->mac.type = ii->mac;
4587dee1ad47SJeff Kirsher
45889c9463c2SRadoslaw Tyl memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy,
4589dee1ad47SJeff Kirsher sizeof(struct ixgbe_mbx_operations));
4590dee1ad47SJeff Kirsher
4591dee1ad47SJeff Kirsher /* setup the private structure */
4592dee1ad47SJeff Kirsher err = ixgbevf_sw_init(adapter);
45931a0d6ae5SDanny Kukawka if (err)
45941a0d6ae5SDanny Kukawka goto err_sw_init;
45951a0d6ae5SDanny Kukawka
45961a0d6ae5SDanny Kukawka /* The HW MAC address was set and/or determined in sw_init */
45971a0d6ae5SDanny Kukawka if (!is_valid_ether_addr(netdev->dev_addr)) {
45981a0d6ae5SDanny Kukawka pr_err("invalid MAC address\n");
45991a0d6ae5SDanny Kukawka err = -EIO;
46001a0d6ae5SDanny Kukawka goto err_sw_init;
46011a0d6ae5SDanny Kukawka }
4602dee1ad47SJeff Kirsher
4603471a76deSMichał Mirosław netdev->hw_features = NETIF_F_SG |
4604471a76deSMichał Mirosław NETIF_F_TSO |
4605471a76deSMichał Mirosław NETIF_F_TSO6 |
4606cb2b3edbSAlexander Duyck NETIF_F_RXCSUM |
4607cb2b3edbSAlexander Duyck NETIF_F_HW_CSUM |
4608cb2b3edbSAlexander Duyck NETIF_F_SCTP_CRC;
4609471a76deSMichał Mirosław
4610b83e3010SAlexander Duyck #define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
4611b83e3010SAlexander Duyck NETIF_F_GSO_GRE_CSUM | \
46127e13318dSTom Herbert NETIF_F_GSO_IPXIP4 | \
4613bf2d1df3SAlexander Duyck NETIF_F_GSO_IPXIP6 | \
4614b83e3010SAlexander Duyck NETIF_F_GSO_UDP_TUNNEL | \
4615b83e3010SAlexander Duyck NETIF_F_GSO_UDP_TUNNEL_CSUM)
4616dee1ad47SJeff Kirsher
4617b83e3010SAlexander Duyck netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
4618b83e3010SAlexander Duyck netdev->hw_features |= NETIF_F_GSO_PARTIAL |
4619b83e3010SAlexander Duyck IXGBEVF_GSO_PARTIAL_FEATURES;
4620cb2b3edbSAlexander Duyck
462190b83d01SChristophe JAILLET netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
4622dee1ad47SJeff Kirsher
4623b83e3010SAlexander Duyck netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
46242a20525bSScott Peterson netdev->mpls_features |= NETIF_F_SG |
46252a20525bSScott Peterson NETIF_F_TSO |
46262a20525bSScott Peterson NETIF_F_TSO6 |
46272a20525bSScott Peterson NETIF_F_HW_CSUM;
46282a20525bSScott Peterson netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES;
4629b83e3010SAlexander Duyck netdev->hw_enc_features |= netdev->vlan_features;
4630b83e3010SAlexander Duyck
4631b83e3010SAlexander Duyck /* set this bit last since it cannot be part of vlan_features */
4632b83e3010SAlexander Duyck netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4633b83e3010SAlexander Duyck NETIF_F_HW_VLAN_CTAG_RX |
4634b83e3010SAlexander Duyck NETIF_F_HW_VLAN_CTAG_TX;
4635b83e3010SAlexander Duyck
463601789349SJiri Pirko netdev->priv_flags |= IFF_UNICAST_FLT;
4637*66c0e13aSMarek Majtyka netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
463801789349SJiri Pirko
463991c527a5SJarod Wilson /* MTU range: 68 - 1504 or 9710 */
464091c527a5SJarod Wilson netdev->min_mtu = ETH_MIN_MTU;
464191c527a5SJarod Wilson switch (adapter->hw.api_version) {
464291c527a5SJarod Wilson case ixgbe_mbox_api_11:
464391c527a5SJarod Wilson case ixgbe_mbox_api_12:
464441e544cdSDon Skidmore case ixgbe_mbox_api_13:
46457f68d430SShannon Nelson case ixgbe_mbox_api_14:
4646339f2896SRadoslaw Tyl case ixgbe_mbox_api_15:
464791c527a5SJarod Wilson netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
464891c527a5SJarod Wilson (ETH_HLEN + ETH_FCS_LEN);
464991c527a5SJarod Wilson break;
465091c527a5SJarod Wilson default:
465191c527a5SJarod Wilson if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
465291c527a5SJarod Wilson netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
465391c527a5SJarod Wilson (ETH_HLEN + ETH_FCS_LEN);
465491c527a5SJarod Wilson else
465591c527a5SJarod Wilson netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN;
465691c527a5SJarod Wilson break;
465791c527a5SJarod Wilson }
465891c527a5SJarod Wilson
4659ea699569SMark Rustad if (IXGBE_REMOVED(hw->hw_addr)) {
4660ea699569SMark Rustad err = -EIO;
4661ea699569SMark Rustad goto err_sw_init;
4662ea699569SMark Rustad }
46639ac5c5ccSEmil Tantilov
466426566eaeSKees Cook timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0);
46659ac5c5ccSEmil Tantilov
46669ac5c5ccSEmil Tantilov INIT_WORK(&adapter->service_task, ixgbevf_service_task);
46679ac5c5ccSEmil Tantilov set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
46689ac5c5ccSEmil Tantilov clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
4669dee1ad47SJeff Kirsher
4670dee1ad47SJeff Kirsher err = ixgbevf_init_interrupt_scheme(adapter);
4671dee1ad47SJeff Kirsher if (err)
4672dee1ad47SJeff Kirsher goto err_sw_init;
4673dee1ad47SJeff Kirsher
4674dee1ad47SJeff Kirsher strcpy(netdev->name, "eth%d");
4675dee1ad47SJeff Kirsher
4676dee1ad47SJeff Kirsher err = register_netdev(netdev);
4677dee1ad47SJeff Kirsher if (err)
4678dee1ad47SJeff Kirsher goto err_register;
4679dee1ad47SJeff Kirsher
46800333464fSEmil Tantilov pci_set_drvdata(pdev, netdev);
4681dee1ad47SJeff Kirsher netif_carrier_off(netdev);
46827f68d430SShannon Nelson ixgbevf_init_ipsec_offload(adapter);
4683dee1ad47SJeff Kirsher
4684dee1ad47SJeff Kirsher ixgbevf_init_last_counter_stats(adapter);
4685dee1ad47SJeff Kirsher
468647068b0dSEmil Tantilov /* print the VF info */
468747068b0dSEmil Tantilov dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
468847068b0dSEmil Tantilov dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
4689dee1ad47SJeff Kirsher
469047068b0dSEmil Tantilov switch (hw->mac.type) {
469147068b0dSEmil Tantilov case ixgbe_mac_X550_vf:
469247068b0dSEmil Tantilov dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
469347068b0dSEmil Tantilov break;
469447068b0dSEmil Tantilov case ixgbe_mac_X540_vf:
469547068b0dSEmil Tantilov dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
469647068b0dSEmil Tantilov break;
469747068b0dSEmil Tantilov case ixgbe_mac_82599_vf:
469847068b0dSEmil Tantilov default:
469947068b0dSEmil Tantilov dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
470047068b0dSEmil Tantilov break;
470147068b0dSEmil Tantilov }
4702dee1ad47SJeff Kirsher
4703dee1ad47SJeff Kirsher return 0;
4704dee1ad47SJeff Kirsher
4705dee1ad47SJeff Kirsher err_register:
47060ac1e8ceSAlexander Duyck ixgbevf_clear_interrupt_scheme(adapter);
4707dee1ad47SJeff Kirsher err_sw_init:
4708dee1ad47SJeff Kirsher ixgbevf_reset_interrupt_capability(adapter);
4709dbf8b0d8SMark Rustad iounmap(adapter->io_addr);
4710e60ae003STony Nguyen kfree(adapter->rss_key);
4711dee1ad47SJeff Kirsher err_ioremap:
47120333464fSEmil Tantilov disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4713dee1ad47SJeff Kirsher free_netdev(netdev);
4714dee1ad47SJeff Kirsher err_alloc_etherdev:
4715dee1ad47SJeff Kirsher pci_release_regions(pdev);
4716dee1ad47SJeff Kirsher err_pci_reg:
4717dee1ad47SJeff Kirsher err_dma:
47180333464fSEmil Tantilov if (!adapter || disable_dev)
4719dee1ad47SJeff Kirsher pci_disable_device(pdev);
4720dee1ad47SJeff Kirsher return err;
4721dee1ad47SJeff Kirsher }
4722dee1ad47SJeff Kirsher
4723dee1ad47SJeff Kirsher /**
4724dee1ad47SJeff Kirsher * ixgbevf_remove - Device Removal Routine
4725dee1ad47SJeff Kirsher * @pdev: PCI device information struct
4726dee1ad47SJeff Kirsher *
4727dee1ad47SJeff Kirsher * ixgbevf_remove is called by the PCI subsystem to alert the driver
4728dee1ad47SJeff Kirsher * that it should release a PCI device. The could be caused by a
4729dee1ad47SJeff Kirsher * Hot-Plug event, or because the driver is going to be removed from
4730dee1ad47SJeff Kirsher * memory.
4731dee1ad47SJeff Kirsher **/
ixgbevf_remove(struct pci_dev * pdev)47329f9a12f8SBill Pemberton static void ixgbevf_remove(struct pci_dev *pdev)
4733dee1ad47SJeff Kirsher {
4734dee1ad47SJeff Kirsher struct net_device *netdev = pci_get_drvdata(pdev);
47350333464fSEmil Tantilov struct ixgbevf_adapter *adapter;
47360333464fSEmil Tantilov bool disable_dev;
47370333464fSEmil Tantilov
47380333464fSEmil Tantilov if (!netdev)
47390333464fSEmil Tantilov return;
47400333464fSEmil Tantilov
47410333464fSEmil Tantilov adapter = netdev_priv(netdev);
4742dee1ad47SJeff Kirsher
47432e7cfbddSMark Rustad set_bit(__IXGBEVF_REMOVING, &adapter->state);
47449ac5c5ccSEmil Tantilov cancel_work_sync(&adapter->service_task);
4745dee1ad47SJeff Kirsher
4746fd13a9abSAlexander Duyck if (netdev->reg_state == NETREG_REGISTERED)
4747dee1ad47SJeff Kirsher unregister_netdev(netdev);
4748dee1ad47SJeff Kirsher
47497f68d430SShannon Nelson ixgbevf_stop_ipsec_offload(adapter);
47500ac1e8ceSAlexander Duyck ixgbevf_clear_interrupt_scheme(adapter);
4751dee1ad47SJeff Kirsher ixgbevf_reset_interrupt_capability(adapter);
4752dee1ad47SJeff Kirsher
4753dbf8b0d8SMark Rustad iounmap(adapter->io_addr);
4754dee1ad47SJeff Kirsher pci_release_regions(pdev);
4755dee1ad47SJeff Kirsher
4756dee1ad47SJeff Kirsher hw_dbg(&adapter->hw, "Remove complete\n");
4757dee1ad47SJeff Kirsher
4758e60ae003STony Nguyen kfree(adapter->rss_key);
47590333464fSEmil Tantilov disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4760dee1ad47SJeff Kirsher free_netdev(netdev);
4761dee1ad47SJeff Kirsher
47620333464fSEmil Tantilov if (disable_dev)
4763dee1ad47SJeff Kirsher pci_disable_device(pdev);
4764dee1ad47SJeff Kirsher }
4765dee1ad47SJeff Kirsher
47669f19f31dSAlexander Duyck /**
47679f19f31dSAlexander Duyck * ixgbevf_io_error_detected - called when PCI error is detected
47689f19f31dSAlexander Duyck * @pdev: Pointer to PCI device
47699f19f31dSAlexander Duyck * @state: The current pci connection state
47709f19f31dSAlexander Duyck *
47719f19f31dSAlexander Duyck * This function is called after a PCI bus error affecting
47729f19f31dSAlexander Duyck * this device has been detected.
4773dec0d8e4SJeff Kirsher **/
ixgbevf_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)47749f19f31dSAlexander Duyck static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
47759f19f31dSAlexander Duyck pci_channel_state_t state)
47769f19f31dSAlexander Duyck {
47779f19f31dSAlexander Duyck struct net_device *netdev = pci_get_drvdata(pdev);
47789f19f31dSAlexander Duyck struct ixgbevf_adapter *adapter = netdev_priv(netdev);
47799f19f31dSAlexander Duyck
47809ac5c5ccSEmil Tantilov if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
4781ea699569SMark Rustad return PCI_ERS_RESULT_DISCONNECT;
4782ea699569SMark Rustad
4783bc0c7151SMark Rustad rtnl_lock();
47849f19f31dSAlexander Duyck netif_device_detach(netdev);
47859f19f31dSAlexander Duyck
4786b212d815SMauro S M Rodrigues if (netif_running(netdev))
4787b212d815SMauro S M Rodrigues ixgbevf_close_suspend(adapter);
4788b212d815SMauro S M Rodrigues
4789bc0c7151SMark Rustad if (state == pci_channel_io_perm_failure) {
4790bc0c7151SMark Rustad rtnl_unlock();
47919f19f31dSAlexander Duyck return PCI_ERS_RESULT_DISCONNECT;
4792bc0c7151SMark Rustad }
47939f19f31dSAlexander Duyck
4794bc0c7151SMark Rustad if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
47959f19f31dSAlexander Duyck pci_disable_device(pdev);
4796bc0c7151SMark Rustad rtnl_unlock();
47979f19f31dSAlexander Duyck
47988bfb7869SJilin Yuan /* Request a slot reset. */
47999f19f31dSAlexander Duyck return PCI_ERS_RESULT_NEED_RESET;
48009f19f31dSAlexander Duyck }
48019f19f31dSAlexander Duyck
48029f19f31dSAlexander Duyck /**
48039f19f31dSAlexander Duyck * ixgbevf_io_slot_reset - called after the pci bus has been reset.
48049f19f31dSAlexander Duyck * @pdev: Pointer to PCI device
48059f19f31dSAlexander Duyck *
48069f19f31dSAlexander Duyck * Restart the card from scratch, as if from a cold-boot. Implementation
48079f19f31dSAlexander Duyck * resembles the first-half of the ixgbevf_resume routine.
4808dec0d8e4SJeff Kirsher **/
ixgbevf_io_slot_reset(struct pci_dev * pdev)48099f19f31dSAlexander Duyck static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
48109f19f31dSAlexander Duyck {
48119f19f31dSAlexander Duyck struct net_device *netdev = pci_get_drvdata(pdev);
48129f19f31dSAlexander Duyck struct ixgbevf_adapter *adapter = netdev_priv(netdev);
48139f19f31dSAlexander Duyck
48149f19f31dSAlexander Duyck if (pci_enable_device_mem(pdev)) {
48159f19f31dSAlexander Duyck dev_err(&pdev->dev,
48169f19f31dSAlexander Duyck "Cannot re-enable PCI device after reset.\n");
48179f19f31dSAlexander Duyck return PCI_ERS_RESULT_DISCONNECT;
48189f19f31dSAlexander Duyck }
48199f19f31dSAlexander Duyck
482026403b7fSEmil Tantilov adapter->hw.hw_addr = adapter->io_addr;
48214e857c58SPeter Zijlstra smp_mb__before_atomic();
4822bc0c7151SMark Rustad clear_bit(__IXGBEVF_DISABLED, &adapter->state);
48239f19f31dSAlexander Duyck pci_set_master(pdev);
48249f19f31dSAlexander Duyck
48259f19f31dSAlexander Duyck ixgbevf_reset(adapter);
48269f19f31dSAlexander Duyck
48279f19f31dSAlexander Duyck return PCI_ERS_RESULT_RECOVERED;
48289f19f31dSAlexander Duyck }
48299f19f31dSAlexander Duyck
48309f19f31dSAlexander Duyck /**
48319f19f31dSAlexander Duyck * ixgbevf_io_resume - called when traffic can start flowing again.
48329f19f31dSAlexander Duyck * @pdev: Pointer to PCI device
48339f19f31dSAlexander Duyck *
48349f19f31dSAlexander Duyck * This callback is called when the error recovery driver tells us that
48359f19f31dSAlexander Duyck * its OK to resume normal operation. Implementation resembles the
48369f19f31dSAlexander Duyck * second-half of the ixgbevf_resume routine.
4837dec0d8e4SJeff Kirsher **/
ixgbevf_io_resume(struct pci_dev * pdev)48389f19f31dSAlexander Duyck static void ixgbevf_io_resume(struct pci_dev *pdev)
48399f19f31dSAlexander Duyck {
48409f19f31dSAlexander Duyck struct net_device *netdev = pci_get_drvdata(pdev);
48419f19f31dSAlexander Duyck
4842b19cf6eeSEmil Tantilov rtnl_lock();
48439f19f31dSAlexander Duyck if (netif_running(netdev))
4844b19cf6eeSEmil Tantilov ixgbevf_open(netdev);
48459f19f31dSAlexander Duyck
48469f19f31dSAlexander Duyck netif_device_attach(netdev);
4847b19cf6eeSEmil Tantilov rtnl_unlock();
48489f19f31dSAlexander Duyck }
48499f19f31dSAlexander Duyck
48509f19f31dSAlexander Duyck /* PCI Error Recovery (ERS) */
48513646f0e5SStephen Hemminger static const struct pci_error_handlers ixgbevf_err_handler = {
48529f19f31dSAlexander Duyck .error_detected = ixgbevf_io_error_detected,
48539f19f31dSAlexander Duyck .slot_reset = ixgbevf_io_slot_reset,
48549f19f31dSAlexander Duyck .resume = ixgbevf_io_resume,
48559f19f31dSAlexander Duyck };
48569f19f31dSAlexander Duyck
4857bac66317SVaibhav Gupta static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume);
4858bac66317SVaibhav Gupta
4859dee1ad47SJeff Kirsher static struct pci_driver ixgbevf_driver = {
4860dee1ad47SJeff Kirsher .name = ixgbevf_driver_name,
4861dee1ad47SJeff Kirsher .id_table = ixgbevf_pci_tbl,
4862dee1ad47SJeff Kirsher .probe = ixgbevf_probe,
48639f9a12f8SBill Pemberton .remove = ixgbevf_remove,
4864bac66317SVaibhav Gupta
48650ac1e8ceSAlexander Duyck /* Power Management Hooks */
4866bac66317SVaibhav Gupta .driver.pm = &ixgbevf_pm_ops,
4867bac66317SVaibhav Gupta
4868dee1ad47SJeff Kirsher .shutdown = ixgbevf_shutdown,
48699f19f31dSAlexander Duyck .err_handler = &ixgbevf_err_handler
4870dee1ad47SJeff Kirsher };
4871dee1ad47SJeff Kirsher
4872dee1ad47SJeff Kirsher /**
4873dee1ad47SJeff Kirsher * ixgbevf_init_module - Driver Registration Routine
4874dee1ad47SJeff Kirsher *
4875dee1ad47SJeff Kirsher * ixgbevf_init_module is the first routine called when the driver is
4876dee1ad47SJeff Kirsher * loaded. All it does is register with the PCI subsystem.
4877dee1ad47SJeff Kirsher **/
ixgbevf_init_module(void)4878dee1ad47SJeff Kirsher static int __init ixgbevf_init_module(void)
4879dee1ad47SJeff Kirsher {
48808cfa238aSShang XiaoJing int err;
48818cfa238aSShang XiaoJing
488234a2a3b8SJeff Kirsher pr_info("%s\n", ixgbevf_driver_string);
4883dbd9636eSJeff Kirsher pr_info("%s\n", ixgbevf_copyright);
488440a13e24SMark Rustad ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
488540a13e24SMark Rustad if (!ixgbevf_wq) {
488640a13e24SMark Rustad pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name);
488740a13e24SMark Rustad return -ENOMEM;
488840a13e24SMark Rustad }
4889dee1ad47SJeff Kirsher
48908cfa238aSShang XiaoJing err = pci_register_driver(&ixgbevf_driver);
48918cfa238aSShang XiaoJing if (err) {
48928cfa238aSShang XiaoJing destroy_workqueue(ixgbevf_wq);
48938cfa238aSShang XiaoJing return err;
48948cfa238aSShang XiaoJing }
48958cfa238aSShang XiaoJing
48968cfa238aSShang XiaoJing return 0;
4897dee1ad47SJeff Kirsher }
4898dee1ad47SJeff Kirsher
4899dee1ad47SJeff Kirsher module_init(ixgbevf_init_module);
4900dee1ad47SJeff Kirsher
4901dee1ad47SJeff Kirsher /**
4902dee1ad47SJeff Kirsher * ixgbevf_exit_module - Driver Exit Cleanup Routine
4903dee1ad47SJeff Kirsher *
4904dee1ad47SJeff Kirsher * ixgbevf_exit_module is called just before the driver is removed
4905dee1ad47SJeff Kirsher * from memory.
4906dee1ad47SJeff Kirsher **/
ixgbevf_exit_module(void)4907dee1ad47SJeff Kirsher static void __exit ixgbevf_exit_module(void)
4908dee1ad47SJeff Kirsher {
4909dee1ad47SJeff Kirsher pci_unregister_driver(&ixgbevf_driver);
491040a13e24SMark Rustad if (ixgbevf_wq) {
491140a13e24SMark Rustad destroy_workqueue(ixgbevf_wq);
491240a13e24SMark Rustad ixgbevf_wq = NULL;
491340a13e24SMark Rustad }
4914dee1ad47SJeff Kirsher }
4915dee1ad47SJeff Kirsher
4916dee1ad47SJeff Kirsher #ifdef DEBUG
4917dee1ad47SJeff Kirsher /**
4918dee1ad47SJeff Kirsher * ixgbevf_get_hw_dev_name - return device name string
4919dee1ad47SJeff Kirsher * used by hardware layer to print debugging information
4920e23cf38fSTony Nguyen * @hw: pointer to private hardware struct
4921dee1ad47SJeff Kirsher **/
ixgbevf_get_hw_dev_name(struct ixgbe_hw * hw)4922dee1ad47SJeff Kirsher char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4923dee1ad47SJeff Kirsher {
4924dee1ad47SJeff Kirsher struct ixgbevf_adapter *adapter = hw->back;
4925dec0d8e4SJeff Kirsher
4926dee1ad47SJeff Kirsher return adapter->netdev->name;
4927dee1ad47SJeff Kirsher }
4928dee1ad47SJeff Kirsher
4929dee1ad47SJeff Kirsher #endif
4930dee1ad47SJeff Kirsher module_exit(ixgbevf_exit_module);
4931dee1ad47SJeff Kirsher
4932dee1ad47SJeff Kirsher /* ixgbevf_main.c */
4933