1837f08fdSAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0
2837f08fdSAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */
3837f08fdSAnirudh Venkataramanan 
4837f08fdSAnirudh Venkataramanan /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5837f08fdSAnirudh Venkataramanan 
6837f08fdSAnirudh Venkataramanan #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7837f08fdSAnirudh Venkataramanan 
834a2a3b8SJeff Kirsher #include <generated/utsrelease.h>
9837f08fdSAnirudh Venkataramanan #include "ice.h"
10eff380aaSAnirudh Venkataramanan #include "ice_base.h"
1145d3d428SAnirudh Venkataramanan #include "ice_lib.h"
121b8f15b6SMichal Swiatkowski #include "ice_fltr.h"
1337b6f646SAnirudh Venkataramanan #include "ice_dcb_lib.h"
14b94b013eSDave Ertman #include "ice_dcb_nl.h"
151adf7eadSJacob Keller #include "ice_devlink.h"
163089cf6dSJesse Brandeburg /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
173089cf6dSJesse Brandeburg  * ice tracepoint functions. This must be done exactly once across the
183089cf6dSJesse Brandeburg  * ice driver.
193089cf6dSJesse Brandeburg  */
203089cf6dSJesse Brandeburg #define CREATE_TRACE_POINTS
213089cf6dSJesse Brandeburg #include "ice_trace.h"
22b3be918dSGrzegorz Nitka #include "ice_eswitch.h"
230d08a441SKiran Patil #include "ice_tc_lib.h"
24c31af68aSBrett Creeley #include "ice_vsi_vlan_ops.h"
2566c0e13aSMarek Majtyka #include <net/xdp_sock_drv.h>
26837f08fdSAnirudh Venkataramanan 
27837f08fdSAnirudh Venkataramanan #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
28837f08fdSAnirudh Venkataramanan static const char ice_driver_string[] = DRV_SUMMARY;
29837f08fdSAnirudh Venkataramanan static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
30837f08fdSAnirudh Venkataramanan 
31462acf6aSTony Nguyen /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
32462acf6aSTony Nguyen #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
33462acf6aSTony Nguyen #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
34462acf6aSTony Nguyen 
35837f08fdSAnirudh Venkataramanan MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
36837f08fdSAnirudh Venkataramanan MODULE_DESCRIPTION(DRV_SUMMARY);
3798674ebeSJesse Brandeburg MODULE_LICENSE("GPL v2");
38462acf6aSTony Nguyen MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
39837f08fdSAnirudh Venkataramanan 
40837f08fdSAnirudh Venkataramanan static int debug = -1;
41837f08fdSAnirudh Venkataramanan module_param(debug, int, 0644);
427ec59eeaSAnirudh Venkataramanan #ifndef CONFIG_DYNAMIC_DEBUG
437ec59eeaSAnirudh Venkataramanan MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
447ec59eeaSAnirudh Venkataramanan #else
457ec59eeaSAnirudh Venkataramanan MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
467ec59eeaSAnirudh Venkataramanan #endif /* !CONFIG_DYNAMIC_DEBUG */
47837f08fdSAnirudh Venkataramanan 
4822bf877eSMaciej Fijalkowski DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
4922bf877eSMaciej Fijalkowski EXPORT_SYMBOL(ice_xdp_locking_key);
50d25a0fc4SDave Ertman 
51649c87c6SJacob Keller /**
52649c87c6SJacob Keller  * ice_hw_to_dev - Get device pointer from the hardware structure
53649c87c6SJacob Keller  * @hw: pointer to the device HW structure
54649c87c6SJacob Keller  *
55649c87c6SJacob Keller  * Used to access the device pointer from compilation units which can't easily
56649c87c6SJacob Keller  * include the definition of struct ice_pf without leading to circular header
57649c87c6SJacob Keller  * dependencies.
58649c87c6SJacob Keller  */
59649c87c6SJacob Keller struct device *ice_hw_to_dev(struct ice_hw *hw)
60649c87c6SJacob Keller {
61649c87c6SJacob Keller 	struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
62649c87c6SJacob Keller 
63649c87c6SJacob Keller 	return &pf->pdev->dev;
64649c87c6SJacob Keller }
65649c87c6SJacob Keller 
66940b61afSAnirudh Venkataramanan static struct workqueue_struct *ice_wq;
67462acf6aSTony Nguyen static const struct net_device_ops ice_netdev_safe_mode_ops;
68cdedef59SAnirudh Venkataramanan static const struct net_device_ops ice_netdev_ops;
69940b61afSAnirudh Venkataramanan 
70462acf6aSTony Nguyen static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
7128c2a645SAnirudh Venkataramanan 
720f9d5027SAnirudh Venkataramanan static void ice_vsi_release_all(struct ice_pf *pf);
733a858ba3SAnirudh Venkataramanan 
74fbc7b27aSKiran Patil static int ice_rebuild_channels(struct ice_pf *pf);
75fbc7b27aSKiran Patil static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
76fbc7b27aSKiran Patil 
77195bb48fSMichal Swiatkowski static int
78195bb48fSMichal Swiatkowski ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
79195bb48fSMichal Swiatkowski 		     void *cb_priv, enum tc_setup_type type, void *type_data,
80195bb48fSMichal Swiatkowski 		     void *data,
81195bb48fSMichal Swiatkowski 		     void (*cleanup)(struct flow_block_cb *block_cb));
82195bb48fSMichal Swiatkowski 
83df006dd4SDave Ertman bool netif_is_ice(struct net_device *dev)
84df006dd4SDave Ertman {
85df006dd4SDave Ertman 	return dev && (dev->netdev_ops == &ice_netdev_ops);
86df006dd4SDave Ertman }
87df006dd4SDave Ertman 
883a858ba3SAnirudh Venkataramanan /**
89b3969fd7SSudheer Mogilappagari  * ice_get_tx_pending - returns number of Tx descriptors not processed
90b3969fd7SSudheer Mogilappagari  * @ring: the ring of descriptors
91b3969fd7SSudheer Mogilappagari  */
92e72bba21SMaciej Fijalkowski static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
93b3969fd7SSudheer Mogilappagari {
94c1ddf1f5SBrett Creeley 	u16 head, tail;
95b3969fd7SSudheer Mogilappagari 
96b3969fd7SSudheer Mogilappagari 	head = ring->next_to_clean;
97c1ddf1f5SBrett Creeley 	tail = ring->next_to_use;
98b3969fd7SSudheer Mogilappagari 
99b3969fd7SSudheer Mogilappagari 	if (head != tail)
100b3969fd7SSudheer Mogilappagari 		return (head < tail) ?
101b3969fd7SSudheer Mogilappagari 			tail - head : (tail + ring->count - head);
102b3969fd7SSudheer Mogilappagari 	return 0;
103b3969fd7SSudheer Mogilappagari }
104b3969fd7SSudheer Mogilappagari 
105b3969fd7SSudheer Mogilappagari /**
106b3969fd7SSudheer Mogilappagari  * ice_check_for_hang_subtask - check for and recover hung queues
107b3969fd7SSudheer Mogilappagari  * @pf: pointer to PF struct
108b3969fd7SSudheer Mogilappagari  */
109b3969fd7SSudheer Mogilappagari static void ice_check_for_hang_subtask(struct ice_pf *pf)
110b3969fd7SSudheer Mogilappagari {
111b3969fd7SSudheer Mogilappagari 	struct ice_vsi *vsi = NULL;
112e89e899fSBrett Creeley 	struct ice_hw *hw;
113b3969fd7SSudheer Mogilappagari 	unsigned int i;
114b3969fd7SSudheer Mogilappagari 	int packets;
115e89e899fSBrett Creeley 	u32 v;
116b3969fd7SSudheer Mogilappagari 
117b3969fd7SSudheer Mogilappagari 	ice_for_each_vsi(pf, v)
118b3969fd7SSudheer Mogilappagari 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
119b3969fd7SSudheer Mogilappagari 			vsi = pf->vsi[v];
120b3969fd7SSudheer Mogilappagari 			break;
121b3969fd7SSudheer Mogilappagari 		}
122b3969fd7SSudheer Mogilappagari 
123e97fb1aeSAnirudh Venkataramanan 	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
124b3969fd7SSudheer Mogilappagari 		return;
125b3969fd7SSudheer Mogilappagari 
126b3969fd7SSudheer Mogilappagari 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
127b3969fd7SSudheer Mogilappagari 		return;
128b3969fd7SSudheer Mogilappagari 
129e89e899fSBrett Creeley 	hw = &vsi->back->hw;
130e89e899fSBrett Creeley 
1312faf63b6SMaciej Fijalkowski 	ice_for_each_txq(vsi, i) {
132e72bba21SMaciej Fijalkowski 		struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
133288ecf49SBenjamin Mikailenko 		struct ice_ring_stats *ring_stats;
134b3969fd7SSudheer Mogilappagari 
135fbc7b27aSKiran Patil 		if (!tx_ring)
136fbc7b27aSKiran Patil 			continue;
137fbc7b27aSKiran Patil 		if (ice_ring_ch_enabled(tx_ring))
138fbc7b27aSKiran Patil 			continue;
139fbc7b27aSKiran Patil 
140288ecf49SBenjamin Mikailenko 		ring_stats = tx_ring->ring_stats;
141288ecf49SBenjamin Mikailenko 		if (!ring_stats)
142288ecf49SBenjamin Mikailenko 			continue;
143288ecf49SBenjamin Mikailenko 
144fbc7b27aSKiran Patil 		if (tx_ring->desc) {
145b3969fd7SSudheer Mogilappagari 			/* If packet counter has not changed the queue is
146b3969fd7SSudheer Mogilappagari 			 * likely stalled, so force an interrupt for this
147b3969fd7SSudheer Mogilappagari 			 * queue.
148b3969fd7SSudheer Mogilappagari 			 *
149b3969fd7SSudheer Mogilappagari 			 * prev_pkt would be negative if there was no
150b3969fd7SSudheer Mogilappagari 			 * pending work.
151b3969fd7SSudheer Mogilappagari 			 */
152288ecf49SBenjamin Mikailenko 			packets = ring_stats->stats.pkts & INT_MAX;
153288ecf49SBenjamin Mikailenko 			if (ring_stats->tx_stats.prev_pkt == packets) {
154b3969fd7SSudheer Mogilappagari 				/* Trigger sw interrupt to revive the queue */
155e89e899fSBrett Creeley 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
156b3969fd7SSudheer Mogilappagari 				continue;
157b3969fd7SSudheer Mogilappagari 			}
158b3969fd7SSudheer Mogilappagari 
159b3969fd7SSudheer Mogilappagari 			/* Memory barrier between read of packet count and call
160b3969fd7SSudheer Mogilappagari 			 * to ice_get_tx_pending()
161b3969fd7SSudheer Mogilappagari 			 */
162b3969fd7SSudheer Mogilappagari 			smp_rmb();
163288ecf49SBenjamin Mikailenko 			ring_stats->tx_stats.prev_pkt =
164b3969fd7SSudheer Mogilappagari 			    ice_get_tx_pending(tx_ring) ? packets : -1;
165b3969fd7SSudheer Mogilappagari 		}
166b3969fd7SSudheer Mogilappagari 	}
167b3969fd7SSudheer Mogilappagari }
168b3969fd7SSudheer Mogilappagari 
169b3969fd7SSudheer Mogilappagari /**
170561f4379STony Nguyen  * ice_init_mac_fltr - Set initial MAC filters
171561f4379STony Nguyen  * @pf: board private structure
172561f4379STony Nguyen  *
1732f2da36eSAnirudh Venkataramanan  * Set initial set of MAC filters for PF VSI; configure filters for permanent
174561f4379STony Nguyen  * address and broadcast address. If an error is encountered, netdevice will be
175561f4379STony Nguyen  * unregistered.
176561f4379STony Nguyen  */
177561f4379STony Nguyen static int ice_init_mac_fltr(struct ice_pf *pf)
178561f4379STony Nguyen {
179561f4379STony Nguyen 	struct ice_vsi *vsi;
1801b8f15b6SMichal Swiatkowski 	u8 *perm_addr;
181561f4379STony Nguyen 
182208ff751SAnirudh Venkataramanan 	vsi = ice_get_main_vsi(pf);
183561f4379STony Nguyen 	if (!vsi)
184561f4379STony Nguyen 		return -EINVAL;
185561f4379STony Nguyen 
1861b8f15b6SMichal Swiatkowski 	perm_addr = vsi->port_info->mac.perm_addr;
187c1484691STony Nguyen 	return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
188561f4379STony Nguyen }
189561f4379STony Nguyen 
190561f4379STony Nguyen /**
191f9867df6SAnirudh Venkataramanan  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
192e94d4478SAnirudh Venkataramanan  * @netdev: the net device on which the sync is happening
193f9867df6SAnirudh Venkataramanan  * @addr: MAC address to sync
194e94d4478SAnirudh Venkataramanan  *
195e94d4478SAnirudh Venkataramanan  * This is a callback function which is called by the in kernel device sync
196e94d4478SAnirudh Venkataramanan  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
197e94d4478SAnirudh Venkataramanan  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
198f9867df6SAnirudh Venkataramanan  * MAC filters from the hardware.
199e94d4478SAnirudh Venkataramanan  */
200e94d4478SAnirudh Venkataramanan static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
201e94d4478SAnirudh Venkataramanan {
202e94d4478SAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
203e94d4478SAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
204e94d4478SAnirudh Venkataramanan 
2051b8f15b6SMichal Swiatkowski 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
2061b8f15b6SMichal Swiatkowski 				     ICE_FWD_TO_VSI))
207e94d4478SAnirudh Venkataramanan 		return -EINVAL;
208e94d4478SAnirudh Venkataramanan 
209e94d4478SAnirudh Venkataramanan 	return 0;
210e94d4478SAnirudh Venkataramanan }
211e94d4478SAnirudh Venkataramanan 
212e94d4478SAnirudh Venkataramanan /**
213f9867df6SAnirudh Venkataramanan  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
214e94d4478SAnirudh Venkataramanan  * @netdev: the net device on which the unsync is happening
215f9867df6SAnirudh Venkataramanan  * @addr: MAC address to unsync
216e94d4478SAnirudh Venkataramanan  *
217e94d4478SAnirudh Venkataramanan  * This is a callback function which is called by the in kernel device unsync
218e94d4478SAnirudh Venkataramanan  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
219e94d4478SAnirudh Venkataramanan  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
220f9867df6SAnirudh Venkataramanan  * delete the MAC filters from the hardware.
221e94d4478SAnirudh Venkataramanan  */
222e94d4478SAnirudh Venkataramanan static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
223e94d4478SAnirudh Venkataramanan {
224e94d4478SAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
225e94d4478SAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
226e94d4478SAnirudh Venkataramanan 
2273ba7f53fSBrett Creeley 	/* Under some circumstances, we might receive a request to delete our
2283ba7f53fSBrett Creeley 	 * own device address from our uc list. Because we store the device
2293ba7f53fSBrett Creeley 	 * address in the VSI's MAC filter list, we need to ignore such
2303ba7f53fSBrett Creeley 	 * requests and not delete our device address from this list.
2313ba7f53fSBrett Creeley 	 */
2323ba7f53fSBrett Creeley 	if (ether_addr_equal(addr, netdev->dev_addr))
2333ba7f53fSBrett Creeley 		return 0;
2343ba7f53fSBrett Creeley 
2351b8f15b6SMichal Swiatkowski 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
2361b8f15b6SMichal Swiatkowski 				     ICE_FWD_TO_VSI))
237e94d4478SAnirudh Venkataramanan 		return -EINVAL;
238e94d4478SAnirudh Venkataramanan 
239e94d4478SAnirudh Venkataramanan 	return 0;
240e94d4478SAnirudh Venkataramanan }
241e94d4478SAnirudh Venkataramanan 
242e94d4478SAnirudh Venkataramanan /**
243e94d4478SAnirudh Venkataramanan  * ice_vsi_fltr_changed - check if filter state changed
244e94d4478SAnirudh Venkataramanan  * @vsi: VSI to be checked
245e94d4478SAnirudh Venkataramanan  *
246e94d4478SAnirudh Venkataramanan  * returns true if filter state has changed, false otherwise.
247e94d4478SAnirudh Venkataramanan  */
248e94d4478SAnirudh Venkataramanan static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
249e94d4478SAnirudh Venkataramanan {
250e97fb1aeSAnirudh Venkataramanan 	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
2511273f895SIvan Vecera 	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
252e94d4478SAnirudh Venkataramanan }
253e94d4478SAnirudh Venkataramanan 
254e94d4478SAnirudh Venkataramanan /**
255fabf480bSBrett Creeley  * ice_set_promisc - Enable promiscuous mode for a given PF
2565eda8afdSAkeem G Abodunrin  * @vsi: the VSI being configured
2575eda8afdSAkeem G Abodunrin  * @promisc_m: mask of promiscuous config bits
2585eda8afdSAkeem G Abodunrin  *
2595eda8afdSAkeem G Abodunrin  */
260fabf480bSBrett Creeley static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
2615eda8afdSAkeem G Abodunrin {
2625e24d598STony Nguyen 	int status;
2635eda8afdSAkeem G Abodunrin 
2645eda8afdSAkeem G Abodunrin 	if (vsi->type != ICE_VSI_PF)
2655eda8afdSAkeem G Abodunrin 		return 0;
2665eda8afdSAkeem G Abodunrin 
2671273f895SIvan Vecera 	if (ice_vsi_has_non_zero_vlans(vsi)) {
2681273f895SIvan Vecera 		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
2691273f895SIvan Vecera 		status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
2701273f895SIvan Vecera 						       promisc_m);
2711273f895SIvan Vecera 	} else {
2721273f895SIvan Vecera 		status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
2731273f895SIvan Vecera 						  promisc_m, 0);
2741273f895SIvan Vecera 	}
275abddafd4SGrzegorz Siwik 	if (status && status != -EEXIST)
276c1484691STony Nguyen 		return status;
277abddafd4SGrzegorz Siwik 
27843fbca02SJesse Brandeburg 	netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
27943fbca02SJesse Brandeburg 		   vsi->vsi_num, promisc_m);
280abddafd4SGrzegorz Siwik 	return 0;
2815eda8afdSAkeem G Abodunrin }
2825eda8afdSAkeem G Abodunrin 
283fabf480bSBrett Creeley /**
284fabf480bSBrett Creeley  * ice_clear_promisc - Disable promiscuous mode for a given PF
285fabf480bSBrett Creeley  * @vsi: the VSI being configured
286fabf480bSBrett Creeley  * @promisc_m: mask of promiscuous config bits
287fabf480bSBrett Creeley  *
288fabf480bSBrett Creeley  */
289fabf480bSBrett Creeley static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
290fabf480bSBrett Creeley {
2915e24d598STony Nguyen 	int status;
292fabf480bSBrett Creeley 
293fabf480bSBrett Creeley 	if (vsi->type != ICE_VSI_PF)
294fabf480bSBrett Creeley 		return 0;
295fabf480bSBrett Creeley 
2961273f895SIvan Vecera 	if (ice_vsi_has_non_zero_vlans(vsi)) {
2971273f895SIvan Vecera 		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
2981273f895SIvan Vecera 		status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
2991273f895SIvan Vecera 							 promisc_m);
3001273f895SIvan Vecera 	} else {
3011273f895SIvan Vecera 		status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3021273f895SIvan Vecera 						    promisc_m, 0);
3031273f895SIvan Vecera 	}
3041273f895SIvan Vecera 
30543fbca02SJesse Brandeburg 	netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
30643fbca02SJesse Brandeburg 		   vsi->vsi_num, promisc_m);
307c1484691STony Nguyen 	return status;
3085eda8afdSAkeem G Abodunrin }
3095eda8afdSAkeem G Abodunrin 
3105eda8afdSAkeem G Abodunrin /**
311e94d4478SAnirudh Venkataramanan  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
312e94d4478SAnirudh Venkataramanan  * @vsi: ptr to the VSI
313e94d4478SAnirudh Venkataramanan  *
314e94d4478SAnirudh Venkataramanan  * Push any outstanding VSI filter changes through the AdminQ.
315e94d4478SAnirudh Venkataramanan  */
316e94d4478SAnirudh Venkataramanan static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
317e94d4478SAnirudh Venkataramanan {
318c31af68aSBrett Creeley 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3199a946843SAnirudh Venkataramanan 	struct device *dev = ice_pf_to_dev(vsi->back);
320e94d4478SAnirudh Venkataramanan 	struct net_device *netdev = vsi->netdev;
321e94d4478SAnirudh Venkataramanan 	bool promisc_forced_on = false;
322e94d4478SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
323e94d4478SAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
324e94d4478SAnirudh Venkataramanan 	u32 changed_flags = 0;
3252ccc1c1cSTony Nguyen 	int err;
326e94d4478SAnirudh Venkataramanan 
327e94d4478SAnirudh Venkataramanan 	if (!vsi->netdev)
328e94d4478SAnirudh Venkataramanan 		return -EINVAL;
329e94d4478SAnirudh Venkataramanan 
3307e408e07SAnirudh Venkataramanan 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
331e94d4478SAnirudh Venkataramanan 		usleep_range(1000, 2000);
332e94d4478SAnirudh Venkataramanan 
333e94d4478SAnirudh Venkataramanan 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
334e94d4478SAnirudh Venkataramanan 	vsi->current_netdev_flags = vsi->netdev->flags;
335e94d4478SAnirudh Venkataramanan 
336e94d4478SAnirudh Venkataramanan 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
337e94d4478SAnirudh Venkataramanan 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
338e94d4478SAnirudh Venkataramanan 
339e94d4478SAnirudh Venkataramanan 	if (ice_vsi_fltr_changed(vsi)) {
340e97fb1aeSAnirudh Venkataramanan 		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
341e97fb1aeSAnirudh Venkataramanan 		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
342e94d4478SAnirudh Venkataramanan 
343e94d4478SAnirudh Venkataramanan 		/* grab the netdev's addr_list_lock */
344e94d4478SAnirudh Venkataramanan 		netif_addr_lock_bh(netdev);
345e94d4478SAnirudh Venkataramanan 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
346e94d4478SAnirudh Venkataramanan 			      ice_add_mac_to_unsync_list);
347e94d4478SAnirudh Venkataramanan 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
348e94d4478SAnirudh Venkataramanan 			      ice_add_mac_to_unsync_list);
349e94d4478SAnirudh Venkataramanan 		/* our temp lists are populated. release lock */
350e94d4478SAnirudh Venkataramanan 		netif_addr_unlock_bh(netdev);
351e94d4478SAnirudh Venkataramanan 	}
352e94d4478SAnirudh Venkataramanan 
353f9867df6SAnirudh Venkataramanan 	/* Remove MAC addresses in the unsync list */
3542ccc1c1cSTony Nguyen 	err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
3551b8f15b6SMichal Swiatkowski 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
3562ccc1c1cSTony Nguyen 	if (err) {
357e94d4478SAnirudh Venkataramanan 		netdev_err(netdev, "Failed to delete MAC filters\n");
358e94d4478SAnirudh Venkataramanan 		/* if we failed because of alloc failures, just bail */
3592ccc1c1cSTony Nguyen 		if (err == -ENOMEM)
360e94d4478SAnirudh Venkataramanan 			goto out;
361e94d4478SAnirudh Venkataramanan 	}
362e94d4478SAnirudh Venkataramanan 
363f9867df6SAnirudh Venkataramanan 	/* Add MAC addresses in the sync list */
3642ccc1c1cSTony Nguyen 	err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
3651b8f15b6SMichal Swiatkowski 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
36689f3e4a5SPreethi Banala 	/* If filter is added successfully or already exists, do not go into
36789f3e4a5SPreethi Banala 	 * 'if' condition and report it as error. Instead continue processing
36889f3e4a5SPreethi Banala 	 * rest of the function.
36989f3e4a5SPreethi Banala 	 */
3702ccc1c1cSTony Nguyen 	if (err && err != -EEXIST) {
371e94d4478SAnirudh Venkataramanan 		netdev_err(netdev, "Failed to add MAC filters\n");
372f9867df6SAnirudh Venkataramanan 		/* If there is no more space for new umac filters, VSI
373e94d4478SAnirudh Venkataramanan 		 * should go into promiscuous mode. There should be some
374e94d4478SAnirudh Venkataramanan 		 * space reserved for promiscuous filters.
375e94d4478SAnirudh Venkataramanan 		 */
376e94d4478SAnirudh Venkataramanan 		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
3777e408e07SAnirudh Venkataramanan 		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
378e94d4478SAnirudh Venkataramanan 				      vsi->state)) {
379e94d4478SAnirudh Venkataramanan 			promisc_forced_on = true;
38019cce2c6SAnirudh Venkataramanan 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
381e94d4478SAnirudh Venkataramanan 				    vsi->vsi_num);
382e94d4478SAnirudh Venkataramanan 		} else {
383e94d4478SAnirudh Venkataramanan 			goto out;
384e94d4478SAnirudh Venkataramanan 		}
385e94d4478SAnirudh Venkataramanan 	}
3862ccc1c1cSTony Nguyen 	err = 0;
387e94d4478SAnirudh Venkataramanan 	/* check for changes in promiscuous modes */
3885eda8afdSAkeem G Abodunrin 	if (changed_flags & IFF_ALLMULTI) {
3895eda8afdSAkeem G Abodunrin 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3901273f895SIvan Vecera 			err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
3915eda8afdSAkeem G Abodunrin 			if (err) {
3925eda8afdSAkeem G Abodunrin 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
3935eda8afdSAkeem G Abodunrin 				goto out_promisc;
3945eda8afdSAkeem G Abodunrin 			}
39592ace482SBruce Allan 		} else {
39692ace482SBruce Allan 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
3971273f895SIvan Vecera 			err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
3985eda8afdSAkeem G Abodunrin 			if (err) {
3995eda8afdSAkeem G Abodunrin 				vsi->current_netdev_flags |= IFF_ALLMULTI;
4005eda8afdSAkeem G Abodunrin 				goto out_promisc;
4015eda8afdSAkeem G Abodunrin 			}
4025eda8afdSAkeem G Abodunrin 		}
4035eda8afdSAkeem G Abodunrin 	}
404e94d4478SAnirudh Venkataramanan 
405e94d4478SAnirudh Venkataramanan 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
406e97fb1aeSAnirudh Venkataramanan 	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
407e97fb1aeSAnirudh Venkataramanan 		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
408e94d4478SAnirudh Venkataramanan 		if (vsi->current_netdev_flags & IFF_PROMISC) {
409f9867df6SAnirudh Venkataramanan 			/* Apply Rx filter rule to get traffic from wire */
410d7393425SMichal Wilczynski 			if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
411d7393425SMichal Wilczynski 				err = ice_set_dflt_vsi(vsi);
412fc0f39bcSBrett Creeley 				if (err && err != -EEXIST) {
41319cce2c6SAnirudh Venkataramanan 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
414fc0f39bcSBrett Creeley 						   err, vsi->vsi_num);
415fc0f39bcSBrett Creeley 					vsi->current_netdev_flags &=
416fc0f39bcSBrett Creeley 						~IFF_PROMISC;
417e94d4478SAnirudh Venkataramanan 					goto out_promisc;
418e94d4478SAnirudh Venkataramanan 				}
4192ccc1c1cSTony Nguyen 				err = 0;
420c31af68aSBrett Creeley 				vlan_ops->dis_rx_filtering(vsi);
42143fbca02SJesse Brandeburg 
42243fbca02SJesse Brandeburg 				/* promiscuous mode implies allmulticast so
42343fbca02SJesse Brandeburg 				 * that VSIs that are in promiscuous mode are
42443fbca02SJesse Brandeburg 				 * subscribed to multicast packets coming to
42543fbca02SJesse Brandeburg 				 * the port
42643fbca02SJesse Brandeburg 				 */
42743fbca02SJesse Brandeburg 				err = ice_set_promisc(vsi,
42843fbca02SJesse Brandeburg 						      ICE_MCAST_PROMISC_BITS);
42943fbca02SJesse Brandeburg 				if (err)
43043fbca02SJesse Brandeburg 					goto out_promisc;
431fc0f39bcSBrett Creeley 			}
432e94d4478SAnirudh Venkataramanan 		} else {
433f9867df6SAnirudh Venkataramanan 			/* Clear Rx filter to remove traffic from wire */
434d7393425SMichal Wilczynski 			if (ice_is_vsi_dflt_vsi(vsi)) {
435d7393425SMichal Wilczynski 				err = ice_clear_dflt_vsi(vsi);
436fc0f39bcSBrett Creeley 				if (err) {
43719cce2c6SAnirudh Venkataramanan 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
438fc0f39bcSBrett Creeley 						   err, vsi->vsi_num);
439fc0f39bcSBrett Creeley 					vsi->current_netdev_flags |=
440fc0f39bcSBrett Creeley 						IFF_PROMISC;
441e94d4478SAnirudh Venkataramanan 					goto out_promisc;
442e94d4478SAnirudh Venkataramanan 				}
4437dc839feSJian Shen 				if (vsi->netdev->features &
4441babaf77SBrett Creeley 				    NETIF_F_HW_VLAN_CTAG_FILTER)
445c31af68aSBrett Creeley 					vlan_ops->ena_rx_filtering(vsi);
446e94d4478SAnirudh Venkataramanan 			}
44743fbca02SJesse Brandeburg 
44843fbca02SJesse Brandeburg 			/* disable allmulti here, but only if allmulti is not
44943fbca02SJesse Brandeburg 			 * still enabled for the netdev
45043fbca02SJesse Brandeburg 			 */
45143fbca02SJesse Brandeburg 			if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
45243fbca02SJesse Brandeburg 				err = ice_clear_promisc(vsi,
45343fbca02SJesse Brandeburg 							ICE_MCAST_PROMISC_BITS);
45443fbca02SJesse Brandeburg 				if (err) {
45543fbca02SJesse Brandeburg 					netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
45643fbca02SJesse Brandeburg 						   err, vsi->vsi_num);
45743fbca02SJesse Brandeburg 				}
45843fbca02SJesse Brandeburg 			}
459e94d4478SAnirudh Venkataramanan 		}
460fc0f39bcSBrett Creeley 	}
461e94d4478SAnirudh Venkataramanan 	goto exit;
462e94d4478SAnirudh Venkataramanan 
463e94d4478SAnirudh Venkataramanan out_promisc:
464e97fb1aeSAnirudh Venkataramanan 	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
465e94d4478SAnirudh Venkataramanan 	goto exit;
466e94d4478SAnirudh Venkataramanan out:
467e94d4478SAnirudh Venkataramanan 	/* if something went wrong then set the changed flag so we try again */
468e97fb1aeSAnirudh Venkataramanan 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
469e97fb1aeSAnirudh Venkataramanan 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
470e94d4478SAnirudh Venkataramanan exit:
4717e408e07SAnirudh Venkataramanan 	clear_bit(ICE_CFG_BUSY, vsi->state);
472e94d4478SAnirudh Venkataramanan 	return err;
473e94d4478SAnirudh Venkataramanan }
474e94d4478SAnirudh Venkataramanan 
475e94d4478SAnirudh Venkataramanan /**
476e94d4478SAnirudh Venkataramanan  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
477e94d4478SAnirudh Venkataramanan  * @pf: board private structure
478e94d4478SAnirudh Venkataramanan  */
479e94d4478SAnirudh Venkataramanan static void ice_sync_fltr_subtask(struct ice_pf *pf)
480e94d4478SAnirudh Venkataramanan {
481e94d4478SAnirudh Venkataramanan 	int v;
482e94d4478SAnirudh Venkataramanan 
483e94d4478SAnirudh Venkataramanan 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
484e94d4478SAnirudh Venkataramanan 		return;
485e94d4478SAnirudh Venkataramanan 
486e94d4478SAnirudh Venkataramanan 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
487e94d4478SAnirudh Venkataramanan 
48880ed404aSBrett Creeley 	ice_for_each_vsi(pf, v)
489e94d4478SAnirudh Venkataramanan 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
490e94d4478SAnirudh Venkataramanan 		    ice_vsi_sync_fltr(pf->vsi[v])) {
491e94d4478SAnirudh Venkataramanan 			/* come back and try again later */
492e94d4478SAnirudh Venkataramanan 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
493e94d4478SAnirudh Venkataramanan 			break;
494e94d4478SAnirudh Venkataramanan 		}
495e94d4478SAnirudh Venkataramanan }
496e94d4478SAnirudh Venkataramanan 
497e94d4478SAnirudh Venkataramanan /**
4987b9ffc76SAnirudh Venkataramanan  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
4997b9ffc76SAnirudh Venkataramanan  * @pf: the PF
5007b9ffc76SAnirudh Venkataramanan  * @locked: is the rtnl_lock already held
5017b9ffc76SAnirudh Venkataramanan  */
5027b9ffc76SAnirudh Venkataramanan static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
5037b9ffc76SAnirudh Venkataramanan {
504b126bd6bSKiran Patil 	int node;
5057b9ffc76SAnirudh Venkataramanan 	int v;
5067b9ffc76SAnirudh Venkataramanan 
5077b9ffc76SAnirudh Venkataramanan 	ice_for_each_vsi(pf, v)
5087b9ffc76SAnirudh Venkataramanan 		if (pf->vsi[v])
5097b9ffc76SAnirudh Venkataramanan 			ice_dis_vsi(pf->vsi[v], locked);
510b126bd6bSKiran Patil 
511b126bd6bSKiran Patil 	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
512b126bd6bSKiran Patil 		pf->pf_agg_node[node].num_vsis = 0;
513b126bd6bSKiran Patil 
514b126bd6bSKiran Patil 	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
515b126bd6bSKiran Patil 		pf->vf_agg_node[node].num_vsis = 0;
5167b9ffc76SAnirudh Venkataramanan }
5177b9ffc76SAnirudh Venkataramanan 
5187b9ffc76SAnirudh Venkataramanan /**
519c1e5da5dSWojciech Drewek  * ice_clear_sw_switch_recipes - clear switch recipes
520c1e5da5dSWojciech Drewek  * @pf: board private structure
521c1e5da5dSWojciech Drewek  *
522c1e5da5dSWojciech Drewek  * Mark switch recipes as not created in sw structures. There are cases where
523c1e5da5dSWojciech Drewek  * rules (especially advanced rules) need to be restored, either re-read from
524c1e5da5dSWojciech Drewek  * hardware or added again. For example after the reset. 'recp_created' flag
525c1e5da5dSWojciech Drewek  * prevents from doing that and need to be cleared upfront.
526c1e5da5dSWojciech Drewek  */
527c1e5da5dSWojciech Drewek static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
528c1e5da5dSWojciech Drewek {
529c1e5da5dSWojciech Drewek 	struct ice_sw_recipe *recp;
530c1e5da5dSWojciech Drewek 	u8 i;
531c1e5da5dSWojciech Drewek 
532c1e5da5dSWojciech Drewek 	recp = pf->hw.switch_info->recp_list;
533c1e5da5dSWojciech Drewek 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
534c1e5da5dSWojciech Drewek 		recp[i].recp_created = false;
535c1e5da5dSWojciech Drewek }
536c1e5da5dSWojciech Drewek 
537c1e5da5dSWojciech Drewek /**
538fbc7b27aSKiran Patil  * ice_prepare_for_reset - prep for reset
5390b28b702SAnirudh Venkataramanan  * @pf: board private structure
540fbc7b27aSKiran Patil  * @reset_type: reset type requested
5410b28b702SAnirudh Venkataramanan  *
5420b28b702SAnirudh Venkataramanan  * Inform or close all dependent features in prep for reset.
5430b28b702SAnirudh Venkataramanan  */
5440b28b702SAnirudh Venkataramanan static void
545fbc7b27aSKiran Patil ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
5460b28b702SAnirudh Venkataramanan {
5470b28b702SAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
548fbc7b27aSKiran Patil 	struct ice_vsi *vsi;
549c4c2c7dbSJacob Keller 	struct ice_vf *vf;
550c4c2c7dbSJacob Keller 	unsigned int bkt;
5510b28b702SAnirudh Venkataramanan 
552fbc7b27aSKiran Patil 	dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
553fbc7b27aSKiran Patil 
5545abac9d7SBrett Creeley 	/* already prepared for reset */
5557e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
5565abac9d7SBrett Creeley 		return;
5575abac9d7SBrett Creeley 
558f9f5301eSDave Ertman 	ice_unplug_aux_dev(pf);
559f9f5301eSDave Ertman 
560007676b4SAnirudh Venkataramanan 	/* Notify VFs of impending reset */
561007676b4SAnirudh Venkataramanan 	if (ice_check_sq_alive(hw, &hw->mailboxq))
562007676b4SAnirudh Venkataramanan 		ice_vc_notify_reset(pf);
563007676b4SAnirudh Venkataramanan 
564c7aeb4d1SAkeem G Abodunrin 	/* Disable VFs until reset is completed */
5653d5985a1SJacob Keller 	mutex_lock(&pf->vfs.table_lock);
566c4c2c7dbSJacob Keller 	ice_for_each_vf(pf, bkt, vf)
567fa4a15c8SJacob Keller 		ice_set_vf_state_dis(vf);
5683d5985a1SJacob Keller 	mutex_unlock(&pf->vfs.table_lock);
569c7aeb4d1SAkeem G Abodunrin 
570c1e5da5dSWojciech Drewek 	if (ice_is_eswitch_mode_switchdev(pf)) {
571c1e5da5dSWojciech Drewek 		if (reset_type != ICE_RESET_PFR)
572c1e5da5dSWojciech Drewek 			ice_clear_sw_switch_recipes(pf);
573c1e5da5dSWojciech Drewek 	}
574c1e5da5dSWojciech Drewek 
575fbc7b27aSKiran Patil 	/* release ADQ specific HW and SW resources */
576fbc7b27aSKiran Patil 	vsi = ice_get_main_vsi(pf);
577fbc7b27aSKiran Patil 	if (!vsi)
578fbc7b27aSKiran Patil 		goto skip;
579fbc7b27aSKiran Patil 
580fbc7b27aSKiran Patil 	/* to be on safe side, reset orig_rss_size so that normal flow
581fbc7b27aSKiran Patil 	 * of deciding rss_size can take precedence
582fbc7b27aSKiran Patil 	 */
583fbc7b27aSKiran Patil 	vsi->orig_rss_size = 0;
584fbc7b27aSKiran Patil 
585fbc7b27aSKiran Patil 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
586fbc7b27aSKiran Patil 		if (reset_type == ICE_RESET_PFR) {
587fbc7b27aSKiran Patil 			vsi->old_ena_tc = vsi->all_enatc;
588fbc7b27aSKiran Patil 			vsi->old_numtc = vsi->all_numtc;
589fbc7b27aSKiran Patil 		} else {
590fbc7b27aSKiran Patil 			ice_remove_q_channels(vsi, true);
591fbc7b27aSKiran Patil 
592fbc7b27aSKiran Patil 			/* for other reset type, do not support channel rebuild
593fbc7b27aSKiran Patil 			 * hence reset needed info
594fbc7b27aSKiran Patil 			 */
595fbc7b27aSKiran Patil 			vsi->old_ena_tc = 0;
596fbc7b27aSKiran Patil 			vsi->all_enatc = 0;
597fbc7b27aSKiran Patil 			vsi->old_numtc = 0;
598fbc7b27aSKiran Patil 			vsi->all_numtc = 0;
599fbc7b27aSKiran Patil 			vsi->req_txq = 0;
600fbc7b27aSKiran Patil 			vsi->req_rxq = 0;
601fbc7b27aSKiran Patil 			clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
602fbc7b27aSKiran Patil 			memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
603fbc7b27aSKiran Patil 		}
604fbc7b27aSKiran Patil 	}
605fbc7b27aSKiran Patil skip:
606fbc7b27aSKiran Patil 
607462acf6aSTony Nguyen 	/* clear SW filtering DB */
608462acf6aSTony Nguyen 	ice_clear_hw_tbls(hw);
6090b28b702SAnirudh Venkataramanan 	/* disable the VSIs and their queues that are not already DOWN */
6107b9ffc76SAnirudh Venkataramanan 	ice_pf_dis_all_vsi(pf, false);
6110b28b702SAnirudh Venkataramanan 
61206c16d89SJacob Keller 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
61348096710SKarol Kolacinski 		ice_ptp_prepare_for_reset(pf);
61406c16d89SJacob Keller 
61543113ff7SKarol Kolacinski 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
61643113ff7SKarol Kolacinski 		ice_gnss_exit(pf);
61743113ff7SKarol Kolacinski 
618c5a2a4a3SUsha Ketineni 	if (hw->port_info)
619c5a2a4a3SUsha Ketineni 		ice_sched_clear_port(hw->port_info);
620c5a2a4a3SUsha Ketineni 
6210b28b702SAnirudh Venkataramanan 	ice_shutdown_all_ctrlq(hw);
6220f9d5027SAnirudh Venkataramanan 
6237e408e07SAnirudh Venkataramanan 	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
6240b28b702SAnirudh Venkataramanan }
6250b28b702SAnirudh Venkataramanan 
6260b28b702SAnirudh Venkataramanan /**
6270b28b702SAnirudh Venkataramanan  * ice_do_reset - Initiate one of many types of resets
6280b28b702SAnirudh Venkataramanan  * @pf: board private structure
629fbc7b27aSKiran Patil  * @reset_type: reset type requested before this function was called.
6300b28b702SAnirudh Venkataramanan  */
6310b28b702SAnirudh Venkataramanan static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
6320b28b702SAnirudh Venkataramanan {
6334015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
6340b28b702SAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
6350b28b702SAnirudh Venkataramanan 
6360b28b702SAnirudh Venkataramanan 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
6370b28b702SAnirudh Venkataramanan 
638fbc7b27aSKiran Patil 	ice_prepare_for_reset(pf, reset_type);
6390b28b702SAnirudh Venkataramanan 
6400b28b702SAnirudh Venkataramanan 	/* trigger the reset */
6410b28b702SAnirudh Venkataramanan 	if (ice_reset(hw, reset_type)) {
6420b28b702SAnirudh Venkataramanan 		dev_err(dev, "reset %d failed\n", reset_type);
6437e408e07SAnirudh Venkataramanan 		set_bit(ICE_RESET_FAILED, pf->state);
6447e408e07SAnirudh Venkataramanan 		clear_bit(ICE_RESET_OICR_RECV, pf->state);
6457e408e07SAnirudh Venkataramanan 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
6467e408e07SAnirudh Venkataramanan 		clear_bit(ICE_PFR_REQ, pf->state);
6477e408e07SAnirudh Venkataramanan 		clear_bit(ICE_CORER_REQ, pf->state);
6487e408e07SAnirudh Venkataramanan 		clear_bit(ICE_GLOBR_REQ, pf->state);
6491c08052eSJacob Keller 		wake_up(&pf->reset_wait_queue);
6500b28b702SAnirudh Venkataramanan 		return;
6510b28b702SAnirudh Venkataramanan 	}
6520b28b702SAnirudh Venkataramanan 
6530f9d5027SAnirudh Venkataramanan 	/* PFR is a bit of a special case because it doesn't result in an OICR
6540f9d5027SAnirudh Venkataramanan 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
6550f9d5027SAnirudh Venkataramanan 	 * associated state bits.
6560f9d5027SAnirudh Venkataramanan 	 */
6570b28b702SAnirudh Venkataramanan 	if (reset_type == ICE_RESET_PFR) {
6580b28b702SAnirudh Venkataramanan 		pf->pfr_count++;
659462acf6aSTony Nguyen 		ice_rebuild(pf, reset_type);
6607e408e07SAnirudh Venkataramanan 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
6617e408e07SAnirudh Venkataramanan 		clear_bit(ICE_PFR_REQ, pf->state);
6621c08052eSJacob Keller 		wake_up(&pf->reset_wait_queue);
663dac57288SJacob Keller 		ice_reset_all_vfs(pf);
6640b28b702SAnirudh Venkataramanan 	}
6650b28b702SAnirudh Venkataramanan }
6660b28b702SAnirudh Venkataramanan 
6670b28b702SAnirudh Venkataramanan /**
6680b28b702SAnirudh Venkataramanan  * ice_reset_subtask - Set up for resetting the device and driver
6690b28b702SAnirudh Venkataramanan  * @pf: board private structure
6700b28b702SAnirudh Venkataramanan  */
6710b28b702SAnirudh Venkataramanan static void ice_reset_subtask(struct ice_pf *pf)
6720b28b702SAnirudh Venkataramanan {
6730f9d5027SAnirudh Venkataramanan 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
6740b28b702SAnirudh Venkataramanan 
6750b28b702SAnirudh Venkataramanan 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
6760f9d5027SAnirudh Venkataramanan 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
6770f9d5027SAnirudh Venkataramanan 	 * of reset is pending and sets bits in pf->state indicating the reset
6787e408e07SAnirudh Venkataramanan 	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
6790f9d5027SAnirudh Venkataramanan 	 * prepare for pending reset if not already (for PF software-initiated
6800f9d5027SAnirudh Venkataramanan 	 * global resets the software should already be prepared for it as
6817e408e07SAnirudh Venkataramanan 	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
6820f9d5027SAnirudh Venkataramanan 	 * by firmware or software on other PFs, that bit is not set so prepare
6830f9d5027SAnirudh Venkataramanan 	 * for the reset now), poll for reset done, rebuild and return.
6840b28b702SAnirudh Venkataramanan 	 */
6857e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
6862ebd4428SDave Ertman 		/* Perform the largest reset requested */
6877e408e07SAnirudh Venkataramanan 		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
6882ebd4428SDave Ertman 			reset_type = ICE_RESET_CORER;
6897e408e07SAnirudh Venkataramanan 		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
6902ebd4428SDave Ertman 			reset_type = ICE_RESET_GLOBR;
6917e408e07SAnirudh Venkataramanan 		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
69203af8406SAnirudh Venkataramanan 			reset_type = ICE_RESET_EMPR;
6932ebd4428SDave Ertman 		/* return if no valid reset type requested */
6942ebd4428SDave Ertman 		if (reset_type == ICE_RESET_INVAL)
6952ebd4428SDave Ertman 			return;
696fbc7b27aSKiran Patil 		ice_prepare_for_reset(pf, reset_type);
6970b28b702SAnirudh Venkataramanan 
6980b28b702SAnirudh Venkataramanan 		/* make sure we are ready to rebuild */
699fd2a9817SAnirudh Venkataramanan 		if (ice_check_reset(&pf->hw)) {
7007e408e07SAnirudh Venkataramanan 			set_bit(ICE_RESET_FAILED, pf->state);
701fd2a9817SAnirudh Venkataramanan 		} else {
702fd2a9817SAnirudh Venkataramanan 			/* done with reset. start rebuild */
703fd2a9817SAnirudh Venkataramanan 			pf->hw.reset_ongoing = false;
704462acf6aSTony Nguyen 			ice_rebuild(pf, reset_type);
7050f9d5027SAnirudh Venkataramanan 			/* clear bit to resume normal operations, but
7060f9d5027SAnirudh Venkataramanan 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
7070f9d5027SAnirudh Venkataramanan 			 */
7087e408e07SAnirudh Venkataramanan 			clear_bit(ICE_RESET_OICR_RECV, pf->state);
7097e408e07SAnirudh Venkataramanan 			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
7107e408e07SAnirudh Venkataramanan 			clear_bit(ICE_PFR_REQ, pf->state);
7117e408e07SAnirudh Venkataramanan 			clear_bit(ICE_CORER_REQ, pf->state);
7127e408e07SAnirudh Venkataramanan 			clear_bit(ICE_GLOBR_REQ, pf->state);
7131c08052eSJacob Keller 			wake_up(&pf->reset_wait_queue);
714dac57288SJacob Keller 			ice_reset_all_vfs(pf);
7150f9d5027SAnirudh Venkataramanan 		}
7160f9d5027SAnirudh Venkataramanan 
7170f9d5027SAnirudh Venkataramanan 		return;
7180b28b702SAnirudh Venkataramanan 	}
7190b28b702SAnirudh Venkataramanan 
7200b28b702SAnirudh Venkataramanan 	/* No pending resets to finish processing. Check for new resets */
7217e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_PFR_REQ, pf->state))
7220f9d5027SAnirudh Venkataramanan 		reset_type = ICE_RESET_PFR;
7237e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_CORER_REQ, pf->state))
7240f9d5027SAnirudh Venkataramanan 		reset_type = ICE_RESET_CORER;
7257e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_GLOBR_REQ, pf->state))
7260b28b702SAnirudh Venkataramanan 		reset_type = ICE_RESET_GLOBR;
7270f9d5027SAnirudh Venkataramanan 	/* If no valid reset type requested just return */
7280f9d5027SAnirudh Venkataramanan 	if (reset_type == ICE_RESET_INVAL)
7290f9d5027SAnirudh Venkataramanan 		return;
7300b28b702SAnirudh Venkataramanan 
7310f9d5027SAnirudh Venkataramanan 	/* reset if not already down or busy */
7327e408e07SAnirudh Venkataramanan 	if (!test_bit(ICE_DOWN, pf->state) &&
7337e408e07SAnirudh Venkataramanan 	    !test_bit(ICE_CFG_BUSY, pf->state)) {
7340b28b702SAnirudh Venkataramanan 		ice_do_reset(pf, reset_type);
7350b28b702SAnirudh Venkataramanan 	}
7360b28b702SAnirudh Venkataramanan }
7370b28b702SAnirudh Venkataramanan 
7380b28b702SAnirudh Venkataramanan /**
7392e0ab37cSJesse Brandeburg  * ice_print_topo_conflict - print topology conflict message
7402e0ab37cSJesse Brandeburg  * @vsi: the VSI whose topology status is being checked
7412e0ab37cSJesse Brandeburg  */
7422e0ab37cSJesse Brandeburg static void ice_print_topo_conflict(struct ice_vsi *vsi)
7432e0ab37cSJesse Brandeburg {
7442e0ab37cSJesse Brandeburg 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
7452e0ab37cSJesse Brandeburg 	case ICE_AQ_LINK_TOPO_CONFLICT:
7462e0ab37cSJesse Brandeburg 	case ICE_AQ_LINK_MEDIA_CONFLICT:
7475878589dSPaul Greenwalt 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
7485878589dSPaul Greenwalt 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
7495878589dSPaul Greenwalt 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
7505c57145aSPaul Greenwalt 		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
7512e0ab37cSJesse Brandeburg 		break;
7525878589dSPaul Greenwalt 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
7534fc5fbeeSAnirudh Venkataramanan 		if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
7544fc5fbeeSAnirudh Venkataramanan 			netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
7554fc5fbeeSAnirudh Venkataramanan 		else
7564fc5fbeeSAnirudh Venkataramanan 			netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
7575878589dSPaul Greenwalt 		break;
7582e0ab37cSJesse Brandeburg 	default:
7592e0ab37cSJesse Brandeburg 		break;
7602e0ab37cSJesse Brandeburg 	}
7612e0ab37cSJesse Brandeburg }
7622e0ab37cSJesse Brandeburg 
7632e0ab37cSJesse Brandeburg /**
764cdedef59SAnirudh Venkataramanan  * ice_print_link_msg - print link up or down message
765cdedef59SAnirudh Venkataramanan  * @vsi: the VSI whose link status is being queried
766cdedef59SAnirudh Venkataramanan  * @isup: boolean for if the link is now up or down
767cdedef59SAnirudh Venkataramanan  */
768fcea6f3dSAnirudh Venkataramanan void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
769cdedef59SAnirudh Venkataramanan {
770f776b3acSPaul Greenwalt 	struct ice_aqc_get_phy_caps_data *caps;
7715ee30564SPaul Greenwalt 	const char *an_advertised;
772f776b3acSPaul Greenwalt 	const char *fec_req;
773cdedef59SAnirudh Venkataramanan 	const char *speed;
774f776b3acSPaul Greenwalt 	const char *fec;
775cdedef59SAnirudh Venkataramanan 	const char *fc;
77643260988SJesse Brandeburg 	const char *an;
7775518ac2aSTony Nguyen 	int status;
778cdedef59SAnirudh Venkataramanan 
779c2a23e00SBrett Creeley 	if (!vsi)
780c2a23e00SBrett Creeley 		return;
781c2a23e00SBrett Creeley 
782cdedef59SAnirudh Venkataramanan 	if (vsi->current_isup == isup)
783cdedef59SAnirudh Venkataramanan 		return;
784cdedef59SAnirudh Venkataramanan 
785cdedef59SAnirudh Venkataramanan 	vsi->current_isup = isup;
786cdedef59SAnirudh Venkataramanan 
787cdedef59SAnirudh Venkataramanan 	if (!isup) {
788cdedef59SAnirudh Venkataramanan 		netdev_info(vsi->netdev, "NIC Link is Down\n");
789cdedef59SAnirudh Venkataramanan 		return;
790cdedef59SAnirudh Venkataramanan 	}
791cdedef59SAnirudh Venkataramanan 
792cdedef59SAnirudh Venkataramanan 	switch (vsi->port_info->phy.link_info.link_speed) {
793072efdf8SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_100GB:
794072efdf8SAnirudh Venkataramanan 		speed = "100 G";
795072efdf8SAnirudh Venkataramanan 		break;
796072efdf8SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_50GB:
797072efdf8SAnirudh Venkataramanan 		speed = "50 G";
798072efdf8SAnirudh Venkataramanan 		break;
799cdedef59SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_40GB:
800cdedef59SAnirudh Venkataramanan 		speed = "40 G";
801cdedef59SAnirudh Venkataramanan 		break;
802cdedef59SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_25GB:
803cdedef59SAnirudh Venkataramanan 		speed = "25 G";
804cdedef59SAnirudh Venkataramanan 		break;
805cdedef59SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_20GB:
806cdedef59SAnirudh Venkataramanan 		speed = "20 G";
807cdedef59SAnirudh Venkataramanan 		break;
808cdedef59SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_10GB:
809cdedef59SAnirudh Venkataramanan 		speed = "10 G";
810cdedef59SAnirudh Venkataramanan 		break;
811cdedef59SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_5GB:
812cdedef59SAnirudh Venkataramanan 		speed = "5 G";
813cdedef59SAnirudh Venkataramanan 		break;
814cdedef59SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_2500MB:
815cdedef59SAnirudh Venkataramanan 		speed = "2.5 G";
816cdedef59SAnirudh Venkataramanan 		break;
817cdedef59SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_1000MB:
818cdedef59SAnirudh Venkataramanan 		speed = "1 G";
819cdedef59SAnirudh Venkataramanan 		break;
820cdedef59SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_100MB:
821cdedef59SAnirudh Venkataramanan 		speed = "100 M";
822cdedef59SAnirudh Venkataramanan 		break;
823cdedef59SAnirudh Venkataramanan 	default:
824cdedef59SAnirudh Venkataramanan 		speed = "Unknown ";
825cdedef59SAnirudh Venkataramanan 		break;
826cdedef59SAnirudh Venkataramanan 	}
827cdedef59SAnirudh Venkataramanan 
828cdedef59SAnirudh Venkataramanan 	switch (vsi->port_info->fc.current_mode) {
829cdedef59SAnirudh Venkataramanan 	case ICE_FC_FULL:
8302f2da36eSAnirudh Venkataramanan 		fc = "Rx/Tx";
831cdedef59SAnirudh Venkataramanan 		break;
832cdedef59SAnirudh Venkataramanan 	case ICE_FC_TX_PAUSE:
8332f2da36eSAnirudh Venkataramanan 		fc = "Tx";
834cdedef59SAnirudh Venkataramanan 		break;
835cdedef59SAnirudh Venkataramanan 	case ICE_FC_RX_PAUSE:
8362f2da36eSAnirudh Venkataramanan 		fc = "Rx";
837cdedef59SAnirudh Venkataramanan 		break;
838203a068aSBrett Creeley 	case ICE_FC_NONE:
839203a068aSBrett Creeley 		fc = "None";
840203a068aSBrett Creeley 		break;
841cdedef59SAnirudh Venkataramanan 	default:
842cdedef59SAnirudh Venkataramanan 		fc = "Unknown";
843cdedef59SAnirudh Venkataramanan 		break;
844cdedef59SAnirudh Venkataramanan 	}
845cdedef59SAnirudh Venkataramanan 
846f776b3acSPaul Greenwalt 	/* Get FEC mode based on negotiated link info */
847f776b3acSPaul Greenwalt 	switch (vsi->port_info->phy.link_info.fec_info) {
848f776b3acSPaul Greenwalt 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
849f776b3acSPaul Greenwalt 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
850f776b3acSPaul Greenwalt 		fec = "RS-FEC";
851f776b3acSPaul Greenwalt 		break;
852f776b3acSPaul Greenwalt 	case ICE_AQ_LINK_25G_KR_FEC_EN:
853f776b3acSPaul Greenwalt 		fec = "FC-FEC/BASE-R";
854f776b3acSPaul Greenwalt 		break;
855f776b3acSPaul Greenwalt 	default:
856f776b3acSPaul Greenwalt 		fec = "NONE";
857f776b3acSPaul Greenwalt 		break;
858f776b3acSPaul Greenwalt 	}
859f776b3acSPaul Greenwalt 
86043260988SJesse Brandeburg 	/* check if autoneg completed, might be false due to not supported */
86143260988SJesse Brandeburg 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
86243260988SJesse Brandeburg 		an = "True";
86343260988SJesse Brandeburg 	else
86443260988SJesse Brandeburg 		an = "False";
86543260988SJesse Brandeburg 
866f776b3acSPaul Greenwalt 	/* Get FEC mode requested based on PHY caps last SW configuration */
8679efe35d0STony Nguyen 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
868f776b3acSPaul Greenwalt 	if (!caps) {
869f776b3acSPaul Greenwalt 		fec_req = "Unknown";
8705ee30564SPaul Greenwalt 		an_advertised = "Unknown";
871f776b3acSPaul Greenwalt 		goto done;
872f776b3acSPaul Greenwalt 	}
873f776b3acSPaul Greenwalt 
874f776b3acSPaul Greenwalt 	status = ice_aq_get_phy_caps(vsi->port_info, false,
875d6730a87SAnirudh Venkataramanan 				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
876f776b3acSPaul Greenwalt 	if (status)
877f776b3acSPaul Greenwalt 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
878f776b3acSPaul Greenwalt 
8795ee30564SPaul Greenwalt 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
8805ee30564SPaul Greenwalt 
881f776b3acSPaul Greenwalt 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
882f776b3acSPaul Greenwalt 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
883f776b3acSPaul Greenwalt 		fec_req = "RS-FEC";
884f776b3acSPaul Greenwalt 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
885f776b3acSPaul Greenwalt 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
886f776b3acSPaul Greenwalt 		fec_req = "FC-FEC/BASE-R";
887f776b3acSPaul Greenwalt 	else
888f776b3acSPaul Greenwalt 		fec_req = "NONE";
889f776b3acSPaul Greenwalt 
8909efe35d0STony Nguyen 	kfree(caps);
891f776b3acSPaul Greenwalt 
892f776b3acSPaul Greenwalt done:
8935ee30564SPaul Greenwalt 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
8945ee30564SPaul Greenwalt 		    speed, fec_req, fec, an_advertised, an, fc);
8952e0ab37cSJesse Brandeburg 	ice_print_topo_conflict(vsi);
896cdedef59SAnirudh Venkataramanan }
897cdedef59SAnirudh Venkataramanan 
898cdedef59SAnirudh Venkataramanan /**
899f9867df6SAnirudh Venkataramanan  * ice_vsi_link_event - update the VSI's netdev
900f9867df6SAnirudh Venkataramanan  * @vsi: the VSI on which the link event occurred
901f9867df6SAnirudh Venkataramanan  * @link_up: whether or not the VSI needs to be set up or down
9020b28b702SAnirudh Venkataramanan  */
9030b28b702SAnirudh Venkataramanan static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
9040b28b702SAnirudh Venkataramanan {
905c2a23e00SBrett Creeley 	if (!vsi)
906c2a23e00SBrett Creeley 		return;
907c2a23e00SBrett Creeley 
908e97fb1aeSAnirudh Venkataramanan 	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
9090b28b702SAnirudh Venkataramanan 		return;
9100b28b702SAnirudh Venkataramanan 
9110b28b702SAnirudh Venkataramanan 	if (vsi->type == ICE_VSI_PF) {
912c2a23e00SBrett Creeley 		if (link_up == netif_carrier_ok(vsi->netdev))
9130b28b702SAnirudh Venkataramanan 			return;
914c2a23e00SBrett Creeley 
9150b28b702SAnirudh Venkataramanan 		if (link_up) {
9160b28b702SAnirudh Venkataramanan 			netif_carrier_on(vsi->netdev);
9170b28b702SAnirudh Venkataramanan 			netif_tx_wake_all_queues(vsi->netdev);
9180b28b702SAnirudh Venkataramanan 		} else {
9190b28b702SAnirudh Venkataramanan 			netif_carrier_off(vsi->netdev);
9200b28b702SAnirudh Venkataramanan 			netif_tx_stop_all_queues(vsi->netdev);
9210b28b702SAnirudh Venkataramanan 		}
9220b28b702SAnirudh Venkataramanan 	}
9230b28b702SAnirudh Venkataramanan }
9240b28b702SAnirudh Venkataramanan 
9250b28b702SAnirudh Venkataramanan /**
9267d9c9b79SDave Ertman  * ice_set_dflt_mib - send a default config MIB to the FW
9277d9c9b79SDave Ertman  * @pf: private PF struct
9287d9c9b79SDave Ertman  *
9297d9c9b79SDave Ertman  * This function sends a default configuration MIB to the FW.
9307d9c9b79SDave Ertman  *
9317d9c9b79SDave Ertman  * If this function errors out at any point, the driver is still able to
9327d9c9b79SDave Ertman  * function.  The main impact is that LFC may not operate as expected.
9337d9c9b79SDave Ertman  * Therefore an error state in this function should be treated with a DBG
9347d9c9b79SDave Ertman  * message and continue on with driver rebuild/reenable.
9357d9c9b79SDave Ertman  */
9367d9c9b79SDave Ertman static void ice_set_dflt_mib(struct ice_pf *pf)
9377d9c9b79SDave Ertman {
9387d9c9b79SDave Ertman 	struct device *dev = ice_pf_to_dev(pf);
9397d9c9b79SDave Ertman 	u8 mib_type, *buf, *lldpmib = NULL;
9407d9c9b79SDave Ertman 	u16 len, typelen, offset = 0;
9417d9c9b79SDave Ertman 	struct ice_lldp_org_tlv *tlv;
94212aae8f1SBruce Allan 	struct ice_hw *hw = &pf->hw;
9437d9c9b79SDave Ertman 	u32 ouisubtype;
9447d9c9b79SDave Ertman 
9457d9c9b79SDave Ertman 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
9467d9c9b79SDave Ertman 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
9477d9c9b79SDave Ertman 	if (!lldpmib) {
9487d9c9b79SDave Ertman 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
9497d9c9b79SDave Ertman 			__func__);
9507d9c9b79SDave Ertman 		return;
9517d9c9b79SDave Ertman 	}
9527d9c9b79SDave Ertman 
9537d9c9b79SDave Ertman 	/* Add ETS CFG TLV */
9547d9c9b79SDave Ertman 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
9557d9c9b79SDave Ertman 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
9567d9c9b79SDave Ertman 		   ICE_IEEE_ETS_TLV_LEN);
9577d9c9b79SDave Ertman 	tlv->typelen = htons(typelen);
9587d9c9b79SDave Ertman 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
9597d9c9b79SDave Ertman 		      ICE_IEEE_SUBTYPE_ETS_CFG);
9607d9c9b79SDave Ertman 	tlv->ouisubtype = htonl(ouisubtype);
9617d9c9b79SDave Ertman 
9627d9c9b79SDave Ertman 	buf = tlv->tlvinfo;
9637d9c9b79SDave Ertman 	buf[0] = 0;
9647d9c9b79SDave Ertman 
9657d9c9b79SDave Ertman 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
9667d9c9b79SDave Ertman 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
9677d9c9b79SDave Ertman 	 * Octets 13 - 20 are TSA values - leave as zeros
9687d9c9b79SDave Ertman 	 */
9697d9c9b79SDave Ertman 	buf[5] = 0x64;
9707d9c9b79SDave Ertman 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
9717d9c9b79SDave Ertman 	offset += len + 2;
9727d9c9b79SDave Ertman 	tlv = (struct ice_lldp_org_tlv *)
9737d9c9b79SDave Ertman 		((char *)tlv + sizeof(tlv->typelen) + len);
9747d9c9b79SDave Ertman 
9757d9c9b79SDave Ertman 	/* Add ETS REC TLV */
9767d9c9b79SDave Ertman 	buf = tlv->tlvinfo;
9777d9c9b79SDave Ertman 	tlv->typelen = htons(typelen);
9787d9c9b79SDave Ertman 
9797d9c9b79SDave Ertman 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
9807d9c9b79SDave Ertman 		      ICE_IEEE_SUBTYPE_ETS_REC);
9817d9c9b79SDave Ertman 	tlv->ouisubtype = htonl(ouisubtype);
9827d9c9b79SDave Ertman 
9837d9c9b79SDave Ertman 	/* First octet of buf is reserved
9847d9c9b79SDave Ertman 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
9857d9c9b79SDave Ertman 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
9867d9c9b79SDave Ertman 	 * Octets 13 - 20 are TSA value - leave as zeros
9877d9c9b79SDave Ertman 	 */
9887d9c9b79SDave Ertman 	buf[5] = 0x64;
9897d9c9b79SDave Ertman 	offset += len + 2;
9907d9c9b79SDave Ertman 	tlv = (struct ice_lldp_org_tlv *)
9917d9c9b79SDave Ertman 		((char *)tlv + sizeof(tlv->typelen) + len);
9927d9c9b79SDave Ertman 
9937d9c9b79SDave Ertman 	/* Add PFC CFG TLV */
9947d9c9b79SDave Ertman 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
9957d9c9b79SDave Ertman 		   ICE_IEEE_PFC_TLV_LEN);
9967d9c9b79SDave Ertman 	tlv->typelen = htons(typelen);
9977d9c9b79SDave Ertman 
9987d9c9b79SDave Ertman 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
9997d9c9b79SDave Ertman 		      ICE_IEEE_SUBTYPE_PFC_CFG);
10007d9c9b79SDave Ertman 	tlv->ouisubtype = htonl(ouisubtype);
10017d9c9b79SDave Ertman 
10027d9c9b79SDave Ertman 	/* Octet 1 left as all zeros - PFC disabled */
10037d9c9b79SDave Ertman 	buf[0] = 0x08;
10047d9c9b79SDave Ertman 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
10057d9c9b79SDave Ertman 	offset += len + 2;
10067d9c9b79SDave Ertman 
10077d9c9b79SDave Ertman 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
10087d9c9b79SDave Ertman 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
10097d9c9b79SDave Ertman 
10107d9c9b79SDave Ertman 	kfree(lldpmib);
10117d9c9b79SDave Ertman }
10127d9c9b79SDave Ertman 
10137d9c9b79SDave Ertman /**
101499d40752SBrett Creeley  * ice_check_phy_fw_load - check if PHY FW load failed
101599d40752SBrett Creeley  * @pf: pointer to PF struct
101699d40752SBrett Creeley  * @link_cfg_err: bitmap from the link info structure
101799d40752SBrett Creeley  *
101899d40752SBrett Creeley  * check if external PHY FW load failed and print an error message if it did
101999d40752SBrett Creeley  */
102099d40752SBrett Creeley static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
102199d40752SBrett Creeley {
102299d40752SBrett Creeley 	if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
102399d40752SBrett Creeley 		clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
102499d40752SBrett Creeley 		return;
102599d40752SBrett Creeley 	}
102699d40752SBrett Creeley 
102799d40752SBrett Creeley 	if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
102899d40752SBrett Creeley 		return;
102999d40752SBrett Creeley 
103099d40752SBrett Creeley 	if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
103199d40752SBrett Creeley 		dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
103299d40752SBrett Creeley 		set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
103399d40752SBrett Creeley 	}
103499d40752SBrett Creeley }
103599d40752SBrett Creeley 
103699d40752SBrett Creeley /**
1037c77849f5SAnirudh Venkataramanan  * ice_check_module_power
1038c77849f5SAnirudh Venkataramanan  * @pf: pointer to PF struct
1039c77849f5SAnirudh Venkataramanan  * @link_cfg_err: bitmap from the link info structure
1040c77849f5SAnirudh Venkataramanan  *
1041c77849f5SAnirudh Venkataramanan  * check module power level returned by a previous call to aq_get_link_info
1042c77849f5SAnirudh Venkataramanan  * and print error messages if module power level is not supported
1043c77849f5SAnirudh Venkataramanan  */
1044c77849f5SAnirudh Venkataramanan static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1045c77849f5SAnirudh Venkataramanan {
1046c77849f5SAnirudh Venkataramanan 	/* if module power level is supported, clear the flag */
1047c77849f5SAnirudh Venkataramanan 	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1048c77849f5SAnirudh Venkataramanan 			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1049c77849f5SAnirudh Venkataramanan 		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1050c77849f5SAnirudh Venkataramanan 		return;
1051c77849f5SAnirudh Venkataramanan 	}
1052c77849f5SAnirudh Venkataramanan 
1053c77849f5SAnirudh Venkataramanan 	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1054c77849f5SAnirudh Venkataramanan 	 * above block didn't clear this bit, there's nothing to do
1055c77849f5SAnirudh Venkataramanan 	 */
1056c77849f5SAnirudh Venkataramanan 	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1057c77849f5SAnirudh Venkataramanan 		return;
1058c77849f5SAnirudh Venkataramanan 
1059c77849f5SAnirudh Venkataramanan 	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1060c77849f5SAnirudh Venkataramanan 		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1061c77849f5SAnirudh Venkataramanan 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1062c77849f5SAnirudh Venkataramanan 	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1063c77849f5SAnirudh Venkataramanan 		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1064c77849f5SAnirudh Venkataramanan 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1065c77849f5SAnirudh Venkataramanan 	}
1066c77849f5SAnirudh Venkataramanan }
1067c77849f5SAnirudh Venkataramanan 
1068c77849f5SAnirudh Venkataramanan /**
106999d40752SBrett Creeley  * ice_check_link_cfg_err - check if link configuration failed
107099d40752SBrett Creeley  * @pf: pointer to the PF struct
107199d40752SBrett Creeley  * @link_cfg_err: bitmap from the link info structure
107299d40752SBrett Creeley  *
107399d40752SBrett Creeley  * print if any link configuration failure happens due to the value in the
107499d40752SBrett Creeley  * link_cfg_err parameter in the link info structure
107599d40752SBrett Creeley  */
107699d40752SBrett Creeley static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
107799d40752SBrett Creeley {
107899d40752SBrett Creeley 	ice_check_module_power(pf, link_cfg_err);
107999d40752SBrett Creeley 	ice_check_phy_fw_load(pf, link_cfg_err);
108099d40752SBrett Creeley }
108199d40752SBrett Creeley 
108299d40752SBrett Creeley /**
10830b28b702SAnirudh Venkataramanan  * ice_link_event - process the link event
10842f2da36eSAnirudh Venkataramanan  * @pf: PF that the link event is associated with
10850b28b702SAnirudh Venkataramanan  * @pi: port_info for the port that the link event is associated with
1086c2a23e00SBrett Creeley  * @link_up: true if the physical link is up and false if it is down
1087c2a23e00SBrett Creeley  * @link_speed: current link speed received from the link event
10880b28b702SAnirudh Venkataramanan  *
1089c2a23e00SBrett Creeley  * Returns 0 on success and negative on failure
10900b28b702SAnirudh Venkataramanan  */
10910b28b702SAnirudh Venkataramanan static int
1092c2a23e00SBrett Creeley ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1093c2a23e00SBrett Creeley 	       u16 link_speed)
10940b28b702SAnirudh Venkataramanan {
10954015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
10960b28b702SAnirudh Venkataramanan 	struct ice_phy_info *phy_info;
1097c2a23e00SBrett Creeley 	struct ice_vsi *vsi;
1098c2a23e00SBrett Creeley 	u16 old_link_speed;
1099c2a23e00SBrett Creeley 	bool old_link;
11005518ac2aSTony Nguyen 	int status;
11010b28b702SAnirudh Venkataramanan 
11020b28b702SAnirudh Venkataramanan 	phy_info = &pi->phy;
11030b28b702SAnirudh Venkataramanan 	phy_info->link_info_old = phy_info->link_info;
11040b28b702SAnirudh Venkataramanan 
1105c2a23e00SBrett Creeley 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
11060b28b702SAnirudh Venkataramanan 	old_link_speed = phy_info->link_info_old.link_speed;
11070b28b702SAnirudh Venkataramanan 
1108c2a23e00SBrett Creeley 	/* update the link info structures and re-enable link events,
1109c2a23e00SBrett Creeley 	 * don't bail on failure due to other book keeping needed
1110c2a23e00SBrett Creeley 	 */
1111d348d517SAnirudh Venkataramanan 	status = ice_update_link_info(pi);
1112d348d517SAnirudh Venkataramanan 	if (status)
11135f87ec48STony Nguyen 		dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
11145f87ec48STony Nguyen 			pi->lport, status,
1115d348d517SAnirudh Venkataramanan 			ice_aq_str(pi->hw->adminq.sq_last_status));
11160b28b702SAnirudh Venkataramanan 
111799d40752SBrett Creeley 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1118c77849f5SAnirudh Venkataramanan 
11190ce6c34aSDave Ertman 	/* Check if the link state is up after updating link info, and treat
11200ce6c34aSDave Ertman 	 * this event as an UP event since the link is actually UP now.
11210ce6c34aSDave Ertman 	 */
11220ce6c34aSDave Ertman 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
11230ce6c34aSDave Ertman 		link_up = true;
11240ce6c34aSDave Ertman 
1125208ff751SAnirudh Venkataramanan 	vsi = ice_get_main_vsi(pf);
11260b28b702SAnirudh Venkataramanan 	if (!vsi || !vsi->port_info)
1127c2a23e00SBrett Creeley 		return -EINVAL;
11280b28b702SAnirudh Venkataramanan 
11296d599946STony Nguyen 	/* turn off PHY if media was removed */
11306d599946STony Nguyen 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
11316d599946STony Nguyen 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
11326d599946STony Nguyen 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1133d348d517SAnirudh Venkataramanan 		ice_set_link(vsi, false);
11346d599946STony Nguyen 	}
11356d599946STony Nguyen 
11361a3571b5SPaul Greenwalt 	/* if the old link up/down and speed is the same as the new */
11371a3571b5SPaul Greenwalt 	if (link_up == old_link && link_speed == old_link_speed)
1138d348d517SAnirudh Venkataramanan 		return 0;
11391a3571b5SPaul Greenwalt 
11403a749623SJacob Keller 	ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
11413a749623SJacob Keller 
11427d9c9b79SDave Ertman 	if (ice_is_dcb_active(pf)) {
11437d9c9b79SDave Ertman 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1144242b5e06SDave Ertman 			ice_dcb_rebuild(pf);
11457d9c9b79SDave Ertman 	} else {
11467d9c9b79SDave Ertman 		if (link_up)
11477d9c9b79SDave Ertman 			ice_set_dflt_mib(pf);
11487d9c9b79SDave Ertman 	}
1149c2a23e00SBrett Creeley 	ice_vsi_link_event(vsi, link_up);
1150c2a23e00SBrett Creeley 	ice_print_link_msg(vsi, link_up);
11510b28b702SAnirudh Venkataramanan 
115253b8decbSAnirudh Venkataramanan 	ice_vc_notify_link_state(pf);
115353b8decbSAnirudh Venkataramanan 
1154d348d517SAnirudh Venkataramanan 	return 0;
11550b28b702SAnirudh Venkataramanan }
11560b28b702SAnirudh Venkataramanan 
11570b28b702SAnirudh Venkataramanan /**
11584f4be03bSAnirudh Venkataramanan  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
11594f4be03bSAnirudh Venkataramanan  * @pf: board private structure
11600b28b702SAnirudh Venkataramanan  */
11614f4be03bSAnirudh Venkataramanan static void ice_watchdog_subtask(struct ice_pf *pf)
11620b28b702SAnirudh Venkataramanan {
11634f4be03bSAnirudh Venkataramanan 	int i;
11640b28b702SAnirudh Venkataramanan 
11654f4be03bSAnirudh Venkataramanan 	/* if interface is down do nothing */
11667e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_DOWN, pf->state) ||
11677e408e07SAnirudh Venkataramanan 	    test_bit(ICE_CFG_BUSY, pf->state))
11684f4be03bSAnirudh Venkataramanan 		return;
11690b28b702SAnirudh Venkataramanan 
11704f4be03bSAnirudh Venkataramanan 	/* make sure we don't do these things too often */
11714f4be03bSAnirudh Venkataramanan 	if (time_before(jiffies,
11724f4be03bSAnirudh Venkataramanan 			pf->serv_tmr_prev + pf->serv_tmr_period))
11734f4be03bSAnirudh Venkataramanan 		return;
11740b28b702SAnirudh Venkataramanan 
11754f4be03bSAnirudh Venkataramanan 	pf->serv_tmr_prev = jiffies;
11764f4be03bSAnirudh Venkataramanan 
11774f4be03bSAnirudh Venkataramanan 	/* Update the stats for active netdevs so the network stack
11784f4be03bSAnirudh Venkataramanan 	 * can look at updated numbers whenever it cares to
11794f4be03bSAnirudh Venkataramanan 	 */
11804f4be03bSAnirudh Venkataramanan 	ice_update_pf_stats(pf);
118180ed404aSBrett Creeley 	ice_for_each_vsi(pf, i)
11824f4be03bSAnirudh Venkataramanan 		if (pf->vsi[i] && pf->vsi[i]->netdev)
11834f4be03bSAnirudh Venkataramanan 			ice_update_vsi_stats(pf->vsi[i]);
11840b28b702SAnirudh Venkataramanan }
11850b28b702SAnirudh Venkataramanan 
11860b28b702SAnirudh Venkataramanan /**
1187250c3b3eSBrett Creeley  * ice_init_link_events - enable/initialize link events
1188250c3b3eSBrett Creeley  * @pi: pointer to the port_info instance
1189250c3b3eSBrett Creeley  *
1190250c3b3eSBrett Creeley  * Returns -EIO on failure, 0 on success
1191250c3b3eSBrett Creeley  */
1192250c3b3eSBrett Creeley static int ice_init_link_events(struct ice_port_info *pi)
1193250c3b3eSBrett Creeley {
1194250c3b3eSBrett Creeley 	u16 mask;
1195250c3b3eSBrett Creeley 
1196250c3b3eSBrett Creeley 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
119799d40752SBrett Creeley 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
119899d40752SBrett Creeley 		       ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1199250c3b3eSBrett Creeley 
1200250c3b3eSBrett Creeley 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
120119cce2c6SAnirudh Venkataramanan 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1202250c3b3eSBrett Creeley 			pi->lport);
1203250c3b3eSBrett Creeley 		return -EIO;
1204250c3b3eSBrett Creeley 	}
1205250c3b3eSBrett Creeley 
1206250c3b3eSBrett Creeley 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
120719cce2c6SAnirudh Venkataramanan 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1208250c3b3eSBrett Creeley 			pi->lport);
1209250c3b3eSBrett Creeley 		return -EIO;
1210250c3b3eSBrett Creeley 	}
1211250c3b3eSBrett Creeley 
1212250c3b3eSBrett Creeley 	return 0;
1213250c3b3eSBrett Creeley }
1214250c3b3eSBrett Creeley 
1215250c3b3eSBrett Creeley /**
1216250c3b3eSBrett Creeley  * ice_handle_link_event - handle link event via ARQ
12172f2da36eSAnirudh Venkataramanan  * @pf: PF that the link event is associated with
1218c2a23e00SBrett Creeley  * @event: event structure containing link status info
1219250c3b3eSBrett Creeley  */
1220c2a23e00SBrett Creeley static int
1221c2a23e00SBrett Creeley ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1222250c3b3eSBrett Creeley {
1223c2a23e00SBrett Creeley 	struct ice_aqc_get_link_status_data *link_data;
1224250c3b3eSBrett Creeley 	struct ice_port_info *port_info;
1225250c3b3eSBrett Creeley 	int status;
1226250c3b3eSBrett Creeley 
1227c2a23e00SBrett Creeley 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1228250c3b3eSBrett Creeley 	port_info = pf->hw.port_info;
1229250c3b3eSBrett Creeley 	if (!port_info)
1230250c3b3eSBrett Creeley 		return -EINVAL;
1231250c3b3eSBrett Creeley 
1232c2a23e00SBrett Creeley 	status = ice_link_event(pf, port_info,
1233c2a23e00SBrett Creeley 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1234c2a23e00SBrett Creeley 				le16_to_cpu(link_data->link_speed));
1235250c3b3eSBrett Creeley 	if (status)
123619cce2c6SAnirudh Venkataramanan 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
123719cce2c6SAnirudh Venkataramanan 			status);
1238250c3b3eSBrett Creeley 
1239250c3b3eSBrett Creeley 	return status;
1240250c3b3eSBrett Creeley }
1241250c3b3eSBrett Creeley 
1242d69ea414SJacob Keller enum ice_aq_task_state {
1243d69ea414SJacob Keller 	ICE_AQ_TASK_WAITING = 0,
1244d69ea414SJacob Keller 	ICE_AQ_TASK_COMPLETE,
1245d69ea414SJacob Keller 	ICE_AQ_TASK_CANCELED,
1246d69ea414SJacob Keller };
1247d69ea414SJacob Keller 
1248d69ea414SJacob Keller struct ice_aq_task {
1249d69ea414SJacob Keller 	struct hlist_node entry;
1250d69ea414SJacob Keller 
1251d69ea414SJacob Keller 	u16 opcode;
1252d69ea414SJacob Keller 	struct ice_rq_event_info *event;
1253d69ea414SJacob Keller 	enum ice_aq_task_state state;
1254d69ea414SJacob Keller };
1255d69ea414SJacob Keller 
1256d69ea414SJacob Keller /**
1257ef860480STony Nguyen  * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1258d69ea414SJacob Keller  * @pf: pointer to the PF private structure
1259d69ea414SJacob Keller  * @opcode: the opcode to wait for
1260d69ea414SJacob Keller  * @timeout: how long to wait, in jiffies
1261d69ea414SJacob Keller  * @event: storage for the event info
1262d69ea414SJacob Keller  *
1263d69ea414SJacob Keller  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1264d69ea414SJacob Keller  * current thread will be put to sleep until the specified event occurs or
1265d69ea414SJacob Keller  * until the given timeout is reached.
1266d69ea414SJacob Keller  *
1267d69ea414SJacob Keller  * To obtain only the descriptor contents, pass an event without an allocated
1268d69ea414SJacob Keller  * msg_buf. If the complete data buffer is desired, allocate the
1269d69ea414SJacob Keller  * event->msg_buf with enough space ahead of time.
1270d69ea414SJacob Keller  *
1271d69ea414SJacob Keller  * Returns: zero on success, or a negative error code on failure.
1272d69ea414SJacob Keller  */
1273d69ea414SJacob Keller int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1274d69ea414SJacob Keller 			  struct ice_rq_event_info *event)
1275d69ea414SJacob Keller {
12761e8249ccSJacob Keller 	struct device *dev = ice_pf_to_dev(pf);
1277d69ea414SJacob Keller 	struct ice_aq_task *task;
12781e8249ccSJacob Keller 	unsigned long start;
1279d69ea414SJacob Keller 	long ret;
1280d69ea414SJacob Keller 	int err;
1281d69ea414SJacob Keller 
1282d69ea414SJacob Keller 	task = kzalloc(sizeof(*task), GFP_KERNEL);
1283d69ea414SJacob Keller 	if (!task)
1284d69ea414SJacob Keller 		return -ENOMEM;
1285d69ea414SJacob Keller 
1286d69ea414SJacob Keller 	INIT_HLIST_NODE(&task->entry);
1287d69ea414SJacob Keller 	task->opcode = opcode;
1288d69ea414SJacob Keller 	task->event = event;
1289d69ea414SJacob Keller 	task->state = ICE_AQ_TASK_WAITING;
1290d69ea414SJacob Keller 
1291d69ea414SJacob Keller 	spin_lock_bh(&pf->aq_wait_lock);
1292d69ea414SJacob Keller 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1293d69ea414SJacob Keller 	spin_unlock_bh(&pf->aq_wait_lock);
1294d69ea414SJacob Keller 
12951e8249ccSJacob Keller 	start = jiffies;
12961e8249ccSJacob Keller 
1297d69ea414SJacob Keller 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1298d69ea414SJacob Keller 					       timeout);
1299d69ea414SJacob Keller 	switch (task->state) {
1300d69ea414SJacob Keller 	case ICE_AQ_TASK_WAITING:
1301d69ea414SJacob Keller 		err = ret < 0 ? ret : -ETIMEDOUT;
1302d69ea414SJacob Keller 		break;
1303d69ea414SJacob Keller 	case ICE_AQ_TASK_CANCELED:
1304d69ea414SJacob Keller 		err = ret < 0 ? ret : -ECANCELED;
1305d69ea414SJacob Keller 		break;
1306d69ea414SJacob Keller 	case ICE_AQ_TASK_COMPLETE:
1307d69ea414SJacob Keller 		err = ret < 0 ? ret : 0;
1308d69ea414SJacob Keller 		break;
1309d69ea414SJacob Keller 	default:
1310d69ea414SJacob Keller 		WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1311d69ea414SJacob Keller 		err = -EINVAL;
1312d69ea414SJacob Keller 		break;
1313d69ea414SJacob Keller 	}
1314d69ea414SJacob Keller 
13151e8249ccSJacob Keller 	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
13161e8249ccSJacob Keller 		jiffies_to_msecs(jiffies - start),
13171e8249ccSJacob Keller 		jiffies_to_msecs(timeout),
13181e8249ccSJacob Keller 		opcode);
13191e8249ccSJacob Keller 
1320d69ea414SJacob Keller 	spin_lock_bh(&pf->aq_wait_lock);
1321d69ea414SJacob Keller 	hlist_del(&task->entry);
1322d69ea414SJacob Keller 	spin_unlock_bh(&pf->aq_wait_lock);
1323d69ea414SJacob Keller 	kfree(task);
1324d69ea414SJacob Keller 
1325d69ea414SJacob Keller 	return err;
1326d69ea414SJacob Keller }
1327d69ea414SJacob Keller 
1328d69ea414SJacob Keller /**
1329d69ea414SJacob Keller  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1330d69ea414SJacob Keller  * @pf: pointer to the PF private structure
1331d69ea414SJacob Keller  * @opcode: the opcode of the event
1332d69ea414SJacob Keller  * @event: the event to check
1333d69ea414SJacob Keller  *
1334d69ea414SJacob Keller  * Loops over the current list of pending threads waiting for an AdminQ event.
1335d69ea414SJacob Keller  * For each matching task, copy the contents of the event into the task
1336d69ea414SJacob Keller  * structure and wake up the thread.
1337d69ea414SJacob Keller  *
1338d69ea414SJacob Keller  * If multiple threads wait for the same opcode, they will all be woken up.
1339d69ea414SJacob Keller  *
1340d69ea414SJacob Keller  * Note that event->msg_buf will only be duplicated if the event has a buffer
1341d69ea414SJacob Keller  * with enough space already allocated. Otherwise, only the descriptor and
1342d69ea414SJacob Keller  * message length will be copied.
1343d69ea414SJacob Keller  *
1344d69ea414SJacob Keller  * Returns: true if an event was found, false otherwise
1345d69ea414SJacob Keller  */
1346d69ea414SJacob Keller static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1347d69ea414SJacob Keller 				struct ice_rq_event_info *event)
1348d69ea414SJacob Keller {
1349d69ea414SJacob Keller 	struct ice_aq_task *task;
1350d69ea414SJacob Keller 	bool found = false;
1351d69ea414SJacob Keller 
1352d69ea414SJacob Keller 	spin_lock_bh(&pf->aq_wait_lock);
1353d69ea414SJacob Keller 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1354d69ea414SJacob Keller 		if (task->state || task->opcode != opcode)
1355d69ea414SJacob Keller 			continue;
1356d69ea414SJacob Keller 
1357d69ea414SJacob Keller 		memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1358d69ea414SJacob Keller 		task->event->msg_len = event->msg_len;
1359d69ea414SJacob Keller 
1360d69ea414SJacob Keller 		/* Only copy the data buffer if a destination was set */
1361d69ea414SJacob Keller 		if (task->event->msg_buf &&
1362d69ea414SJacob Keller 		    task->event->buf_len > event->buf_len) {
1363d69ea414SJacob Keller 			memcpy(task->event->msg_buf, event->msg_buf,
1364d69ea414SJacob Keller 			       event->buf_len);
1365d69ea414SJacob Keller 			task->event->buf_len = event->buf_len;
1366d69ea414SJacob Keller 		}
1367d69ea414SJacob Keller 
1368d69ea414SJacob Keller 		task->state = ICE_AQ_TASK_COMPLETE;
1369d69ea414SJacob Keller 		found = true;
1370d69ea414SJacob Keller 	}
1371d69ea414SJacob Keller 	spin_unlock_bh(&pf->aq_wait_lock);
1372d69ea414SJacob Keller 
1373d69ea414SJacob Keller 	if (found)
1374d69ea414SJacob Keller 		wake_up(&pf->aq_wait_queue);
1375d69ea414SJacob Keller }
1376d69ea414SJacob Keller 
1377d69ea414SJacob Keller /**
1378d69ea414SJacob Keller  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1379d69ea414SJacob Keller  * @pf: the PF private structure
1380d69ea414SJacob Keller  *
1381d69ea414SJacob Keller  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1382d69ea414SJacob Keller  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1383d69ea414SJacob Keller  */
1384d69ea414SJacob Keller static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1385d69ea414SJacob Keller {
1386d69ea414SJacob Keller 	struct ice_aq_task *task;
1387d69ea414SJacob Keller 
1388d69ea414SJacob Keller 	spin_lock_bh(&pf->aq_wait_lock);
1389d69ea414SJacob Keller 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1390d69ea414SJacob Keller 		task->state = ICE_AQ_TASK_CANCELED;
1391d69ea414SJacob Keller 	spin_unlock_bh(&pf->aq_wait_lock);
1392d69ea414SJacob Keller 
1393d69ea414SJacob Keller 	wake_up(&pf->aq_wait_queue);
1394d69ea414SJacob Keller }
1395d69ea414SJacob Keller 
1396afc24d65SJacob Keller #define ICE_MBX_OVERFLOW_WATERMARK 64
1397afc24d65SJacob Keller 
1398250c3b3eSBrett Creeley /**
1399940b61afSAnirudh Venkataramanan  * __ice_clean_ctrlq - helper function to clean controlq rings
1400940b61afSAnirudh Venkataramanan  * @pf: ptr to struct ice_pf
1401940b61afSAnirudh Venkataramanan  * @q_type: specific Control queue type
1402940b61afSAnirudh Venkataramanan  */
1403940b61afSAnirudh Venkataramanan static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1404940b61afSAnirudh Venkataramanan {
14054015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
1406940b61afSAnirudh Venkataramanan 	struct ice_rq_event_info event;
1407940b61afSAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
1408940b61afSAnirudh Venkataramanan 	struct ice_ctl_q_info *cq;
1409940b61afSAnirudh Venkataramanan 	u16 pending, i = 0;
1410940b61afSAnirudh Venkataramanan 	const char *qtype;
1411940b61afSAnirudh Venkataramanan 	u32 oldval, val;
1412940b61afSAnirudh Venkataramanan 
14130b28b702SAnirudh Venkataramanan 	/* Do not clean control queue if/when PF reset fails */
14147e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_RESET_FAILED, pf->state))
14150b28b702SAnirudh Venkataramanan 		return 0;
14160b28b702SAnirudh Venkataramanan 
1417940b61afSAnirudh Venkataramanan 	switch (q_type) {
1418940b61afSAnirudh Venkataramanan 	case ICE_CTL_Q_ADMIN:
1419940b61afSAnirudh Venkataramanan 		cq = &hw->adminq;
1420940b61afSAnirudh Venkataramanan 		qtype = "Admin";
1421940b61afSAnirudh Venkataramanan 		break;
14228f5ee3c4SJacob Keller 	case ICE_CTL_Q_SB:
14238f5ee3c4SJacob Keller 		cq = &hw->sbq;
14248f5ee3c4SJacob Keller 		qtype = "Sideband";
14258f5ee3c4SJacob Keller 		break;
142675d2b253SAnirudh Venkataramanan 	case ICE_CTL_Q_MAILBOX:
142775d2b253SAnirudh Venkataramanan 		cq = &hw->mailboxq;
142875d2b253SAnirudh Venkataramanan 		qtype = "Mailbox";
14290891c896SVignesh Sridhar 		/* we are going to try to detect a malicious VF, so set the
14300891c896SVignesh Sridhar 		 * state to begin detection
14310891c896SVignesh Sridhar 		 */
14320891c896SVignesh Sridhar 		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
143375d2b253SAnirudh Venkataramanan 		break;
1434940b61afSAnirudh Venkataramanan 	default:
14354015d11eSBrett Creeley 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1436940b61afSAnirudh Venkataramanan 		return 0;
1437940b61afSAnirudh Venkataramanan 	}
1438940b61afSAnirudh Venkataramanan 
1439940b61afSAnirudh Venkataramanan 	/* check for error indications - PF_xx_AxQLEN register layout for
1440940b61afSAnirudh Venkataramanan 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1441940b61afSAnirudh Venkataramanan 	 */
1442940b61afSAnirudh Venkataramanan 	val = rd32(hw, cq->rq.len);
1443940b61afSAnirudh Venkataramanan 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1444940b61afSAnirudh Venkataramanan 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1445940b61afSAnirudh Venkataramanan 		oldval = val;
1446940b61afSAnirudh Venkataramanan 		if (val & PF_FW_ARQLEN_ARQVFE_M)
14474015d11eSBrett Creeley 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
14484015d11eSBrett Creeley 				qtype);
1449940b61afSAnirudh Venkataramanan 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
145019cce2c6SAnirudh Venkataramanan 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1451940b61afSAnirudh Venkataramanan 				qtype);
1452940b61afSAnirudh Venkataramanan 		}
1453940b61afSAnirudh Venkataramanan 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
145419cce2c6SAnirudh Venkataramanan 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1455940b61afSAnirudh Venkataramanan 				qtype);
1456940b61afSAnirudh Venkataramanan 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1457940b61afSAnirudh Venkataramanan 			 PF_FW_ARQLEN_ARQCRIT_M);
1458940b61afSAnirudh Venkataramanan 		if (oldval != val)
1459940b61afSAnirudh Venkataramanan 			wr32(hw, cq->rq.len, val);
1460940b61afSAnirudh Venkataramanan 	}
1461940b61afSAnirudh Venkataramanan 
1462940b61afSAnirudh Venkataramanan 	val = rd32(hw, cq->sq.len);
1463940b61afSAnirudh Venkataramanan 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1464940b61afSAnirudh Venkataramanan 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1465940b61afSAnirudh Venkataramanan 		oldval = val;
1466940b61afSAnirudh Venkataramanan 		if (val & PF_FW_ATQLEN_ATQVFE_M)
146719cce2c6SAnirudh Venkataramanan 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
146819cce2c6SAnirudh Venkataramanan 				qtype);
1469940b61afSAnirudh Venkataramanan 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
14704015d11eSBrett Creeley 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1471940b61afSAnirudh Venkataramanan 				qtype);
1472940b61afSAnirudh Venkataramanan 		}
1473940b61afSAnirudh Venkataramanan 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
14744015d11eSBrett Creeley 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1475940b61afSAnirudh Venkataramanan 				qtype);
1476940b61afSAnirudh Venkataramanan 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1477940b61afSAnirudh Venkataramanan 			 PF_FW_ATQLEN_ATQCRIT_M);
1478940b61afSAnirudh Venkataramanan 		if (oldval != val)
1479940b61afSAnirudh Venkataramanan 			wr32(hw, cq->sq.len, val);
1480940b61afSAnirudh Venkataramanan 	}
1481940b61afSAnirudh Venkataramanan 
1482940b61afSAnirudh Venkataramanan 	event.buf_len = cq->rq_buf_size;
14839efe35d0STony Nguyen 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1484940b61afSAnirudh Venkataramanan 	if (!event.msg_buf)
1485940b61afSAnirudh Venkataramanan 		return 0;
1486940b61afSAnirudh Venkataramanan 
1487940b61afSAnirudh Venkataramanan 	do {
1488afc24d65SJacob Keller 		struct ice_mbx_data data = {};
14890b28b702SAnirudh Venkataramanan 		u16 opcode;
14905518ac2aSTony Nguyen 		int ret;
1491940b61afSAnirudh Venkataramanan 
1492940b61afSAnirudh Venkataramanan 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1493d54699e2STony Nguyen 		if (ret == -EALREADY)
1494940b61afSAnirudh Venkataramanan 			break;
1495940b61afSAnirudh Venkataramanan 		if (ret) {
14965f87ec48STony Nguyen 			dev_err(dev, "%s Receive Queue event error %d\n", qtype,
14975f87ec48STony Nguyen 				ret);
1498940b61afSAnirudh Venkataramanan 			break;
1499940b61afSAnirudh Venkataramanan 		}
15000b28b702SAnirudh Venkataramanan 
15010b28b702SAnirudh Venkataramanan 		opcode = le16_to_cpu(event.desc.opcode);
15020b28b702SAnirudh Venkataramanan 
1503d69ea414SJacob Keller 		/* Notify any thread that might be waiting for this event */
1504d69ea414SJacob Keller 		ice_aq_check_events(pf, opcode, &event);
1505d69ea414SJacob Keller 
15060b28b702SAnirudh Venkataramanan 		switch (opcode) {
1507250c3b3eSBrett Creeley 		case ice_aqc_opc_get_link_status:
1508c2a23e00SBrett Creeley 			if (ice_handle_link_event(pf, &event))
15094015d11eSBrett Creeley 				dev_err(dev, "Could not handle link event\n");
1510250c3b3eSBrett Creeley 			break;
15112309ae38SBrett Creeley 		case ice_aqc_opc_event_lan_overflow:
15122309ae38SBrett Creeley 			ice_vf_lan_overflow_event(pf, &event);
15132309ae38SBrett Creeley 			break;
15141071a835SAnirudh Venkataramanan 		case ice_mbx_opc_send_msg_to_pf:
1515afc24d65SJacob Keller 			data.num_msg_proc = i;
1516afc24d65SJacob Keller 			data.num_pending_arq = pending;
1517afc24d65SJacob Keller 			data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
1518afc24d65SJacob Keller 			data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
1519afc24d65SJacob Keller 
1520be96815cSJacob Keller 			ice_vc_process_vf_msg(pf, &event, &data);
15211071a835SAnirudh Venkataramanan 			break;
15228b97ceb1SHieu Tran 		case ice_aqc_opc_fw_logging:
15238b97ceb1SHieu Tran 			ice_output_fw_log(hw, &event.desc, event.msg_buf);
15248b97ceb1SHieu Tran 			break;
152500cc3f1bSAnirudh Venkataramanan 		case ice_aqc_opc_lldp_set_mib_change:
152600cc3f1bSAnirudh Venkataramanan 			ice_dcb_process_lldp_set_mib_change(pf, &event);
152700cc3f1bSAnirudh Venkataramanan 			break;
15280b28b702SAnirudh Venkataramanan 		default:
152919cce2c6SAnirudh Venkataramanan 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
15300b28b702SAnirudh Venkataramanan 				qtype, opcode);
15310b28b702SAnirudh Venkataramanan 			break;
15320b28b702SAnirudh Venkataramanan 		}
1533940b61afSAnirudh Venkataramanan 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1534940b61afSAnirudh Venkataramanan 
15359efe35d0STony Nguyen 	kfree(event.msg_buf);
1536940b61afSAnirudh Venkataramanan 
1537940b61afSAnirudh Venkataramanan 	return pending && (i == ICE_DFLT_IRQ_WORK);
1538940b61afSAnirudh Venkataramanan }
1539940b61afSAnirudh Venkataramanan 
1540940b61afSAnirudh Venkataramanan /**
15413d6b640eSAnirudh Venkataramanan  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
15423d6b640eSAnirudh Venkataramanan  * @hw: pointer to hardware info
15433d6b640eSAnirudh Venkataramanan  * @cq: control queue information
15443d6b640eSAnirudh Venkataramanan  *
15453d6b640eSAnirudh Venkataramanan  * returns true if there are pending messages in a queue, false if there aren't
15463d6b640eSAnirudh Venkataramanan  */
15473d6b640eSAnirudh Venkataramanan static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
15483d6b640eSAnirudh Venkataramanan {
15493d6b640eSAnirudh Venkataramanan 	u16 ntu;
15503d6b640eSAnirudh Venkataramanan 
15513d6b640eSAnirudh Venkataramanan 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
15523d6b640eSAnirudh Venkataramanan 	return cq->rq.next_to_clean != ntu;
15533d6b640eSAnirudh Venkataramanan }
15543d6b640eSAnirudh Venkataramanan 
15553d6b640eSAnirudh Venkataramanan /**
1556940b61afSAnirudh Venkataramanan  * ice_clean_adminq_subtask - clean the AdminQ rings
1557940b61afSAnirudh Venkataramanan  * @pf: board private structure
1558940b61afSAnirudh Venkataramanan  */
1559940b61afSAnirudh Venkataramanan static void ice_clean_adminq_subtask(struct ice_pf *pf)
1560940b61afSAnirudh Venkataramanan {
1561940b61afSAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
1562940b61afSAnirudh Venkataramanan 
15637e408e07SAnirudh Venkataramanan 	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1564940b61afSAnirudh Venkataramanan 		return;
1565940b61afSAnirudh Venkataramanan 
1566940b61afSAnirudh Venkataramanan 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1567940b61afSAnirudh Venkataramanan 		return;
1568940b61afSAnirudh Venkataramanan 
15697e408e07SAnirudh Venkataramanan 	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1570940b61afSAnirudh Venkataramanan 
15713d6b640eSAnirudh Venkataramanan 	/* There might be a situation where new messages arrive to a control
15723d6b640eSAnirudh Venkataramanan 	 * queue between processing the last message and clearing the
15733d6b640eSAnirudh Venkataramanan 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
15743d6b640eSAnirudh Venkataramanan 	 * ice_ctrlq_pending) and process new messages if any.
15753d6b640eSAnirudh Venkataramanan 	 */
15763d6b640eSAnirudh Venkataramanan 	if (ice_ctrlq_pending(hw, &hw->adminq))
15773d6b640eSAnirudh Venkataramanan 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1578940b61afSAnirudh Venkataramanan 
1579940b61afSAnirudh Venkataramanan 	ice_flush(hw);
1580940b61afSAnirudh Venkataramanan }
1581940b61afSAnirudh Venkataramanan 
1582940b61afSAnirudh Venkataramanan /**
158375d2b253SAnirudh Venkataramanan  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
158475d2b253SAnirudh Venkataramanan  * @pf: board private structure
158575d2b253SAnirudh Venkataramanan  */
158675d2b253SAnirudh Venkataramanan static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
158775d2b253SAnirudh Venkataramanan {
158875d2b253SAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
158975d2b253SAnirudh Venkataramanan 
15907e408e07SAnirudh Venkataramanan 	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
159175d2b253SAnirudh Venkataramanan 		return;
159275d2b253SAnirudh Venkataramanan 
159375d2b253SAnirudh Venkataramanan 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
159475d2b253SAnirudh Venkataramanan 		return;
159575d2b253SAnirudh Venkataramanan 
15967e408e07SAnirudh Venkataramanan 	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
159775d2b253SAnirudh Venkataramanan 
159875d2b253SAnirudh Venkataramanan 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
159975d2b253SAnirudh Venkataramanan 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
160075d2b253SAnirudh Venkataramanan 
160175d2b253SAnirudh Venkataramanan 	ice_flush(hw);
160275d2b253SAnirudh Venkataramanan }
160375d2b253SAnirudh Venkataramanan 
160475d2b253SAnirudh Venkataramanan /**
16058f5ee3c4SJacob Keller  * ice_clean_sbq_subtask - clean the Sideband Queue rings
16068f5ee3c4SJacob Keller  * @pf: board private structure
16078f5ee3c4SJacob Keller  */
16088f5ee3c4SJacob Keller static void ice_clean_sbq_subtask(struct ice_pf *pf)
16098f5ee3c4SJacob Keller {
16108f5ee3c4SJacob Keller 	struct ice_hw *hw = &pf->hw;
16118f5ee3c4SJacob Keller 
16128f5ee3c4SJacob Keller 	/* Nothing to do here if sideband queue is not supported */
16138f5ee3c4SJacob Keller 	if (!ice_is_sbq_supported(hw)) {
16148f5ee3c4SJacob Keller 		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
16158f5ee3c4SJacob Keller 		return;
16168f5ee3c4SJacob Keller 	}
16178f5ee3c4SJacob Keller 
16188f5ee3c4SJacob Keller 	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
16198f5ee3c4SJacob Keller 		return;
16208f5ee3c4SJacob Keller 
16218f5ee3c4SJacob Keller 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
16228f5ee3c4SJacob Keller 		return;
16238f5ee3c4SJacob Keller 
16248f5ee3c4SJacob Keller 	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
16258f5ee3c4SJacob Keller 
16268f5ee3c4SJacob Keller 	if (ice_ctrlq_pending(hw, &hw->sbq))
16278f5ee3c4SJacob Keller 		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
16288f5ee3c4SJacob Keller 
16298f5ee3c4SJacob Keller 	ice_flush(hw);
16308f5ee3c4SJacob Keller }
16318f5ee3c4SJacob Keller 
16328f5ee3c4SJacob Keller /**
1633940b61afSAnirudh Venkataramanan  * ice_service_task_schedule - schedule the service task to wake up
1634940b61afSAnirudh Venkataramanan  * @pf: board private structure
1635940b61afSAnirudh Venkataramanan  *
1636940b61afSAnirudh Venkataramanan  * If not already scheduled, this puts the task into the work queue.
1637940b61afSAnirudh Venkataramanan  */
163828bf2672SBrett Creeley void ice_service_task_schedule(struct ice_pf *pf)
1639940b61afSAnirudh Venkataramanan {
16407e408e07SAnirudh Venkataramanan 	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
16417e408e07SAnirudh Venkataramanan 	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
16427e408e07SAnirudh Venkataramanan 	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1643940b61afSAnirudh Venkataramanan 		queue_work(ice_wq, &pf->serv_task);
1644940b61afSAnirudh Venkataramanan }
1645940b61afSAnirudh Venkataramanan 
1646940b61afSAnirudh Venkataramanan /**
1647940b61afSAnirudh Venkataramanan  * ice_service_task_complete - finish up the service task
1648940b61afSAnirudh Venkataramanan  * @pf: board private structure
1649940b61afSAnirudh Venkataramanan  */
1650940b61afSAnirudh Venkataramanan static void ice_service_task_complete(struct ice_pf *pf)
1651940b61afSAnirudh Venkataramanan {
16527e408e07SAnirudh Venkataramanan 	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1653940b61afSAnirudh Venkataramanan 
1654940b61afSAnirudh Venkataramanan 	/* force memory (pf->state) to sync before next service task */
1655940b61afSAnirudh Venkataramanan 	smp_mb__before_atomic();
16567e408e07SAnirudh Venkataramanan 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1657940b61afSAnirudh Venkataramanan }
1658940b61afSAnirudh Venkataramanan 
1659940b61afSAnirudh Venkataramanan /**
16608d81fa55SAkeem G Abodunrin  * ice_service_task_stop - stop service task and cancel works
16618d81fa55SAkeem G Abodunrin  * @pf: board private structure
1662769c500dSAkeem G Abodunrin  *
16637e408e07SAnirudh Venkataramanan  * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1664769c500dSAkeem G Abodunrin  * 1 otherwise.
16658d81fa55SAkeem G Abodunrin  */
1666769c500dSAkeem G Abodunrin static int ice_service_task_stop(struct ice_pf *pf)
16678d81fa55SAkeem G Abodunrin {
1668769c500dSAkeem G Abodunrin 	int ret;
1669769c500dSAkeem G Abodunrin 
16707e408e07SAnirudh Venkataramanan 	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
16718d81fa55SAkeem G Abodunrin 
16728d81fa55SAkeem G Abodunrin 	if (pf->serv_tmr.function)
16738d81fa55SAkeem G Abodunrin 		del_timer_sync(&pf->serv_tmr);
16748d81fa55SAkeem G Abodunrin 	if (pf->serv_task.func)
16758d81fa55SAkeem G Abodunrin 		cancel_work_sync(&pf->serv_task);
16768d81fa55SAkeem G Abodunrin 
16777e408e07SAnirudh Venkataramanan 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1678769c500dSAkeem G Abodunrin 	return ret;
16798d81fa55SAkeem G Abodunrin }
16808d81fa55SAkeem G Abodunrin 
16818d81fa55SAkeem G Abodunrin /**
16825995b6d0SBrett Creeley  * ice_service_task_restart - restart service task and schedule works
16835995b6d0SBrett Creeley  * @pf: board private structure
16845995b6d0SBrett Creeley  *
16855995b6d0SBrett Creeley  * This function is needed for suspend and resume works (e.g WoL scenario)
16865995b6d0SBrett Creeley  */
16875995b6d0SBrett Creeley static void ice_service_task_restart(struct ice_pf *pf)
16885995b6d0SBrett Creeley {
16897e408e07SAnirudh Venkataramanan 	clear_bit(ICE_SERVICE_DIS, pf->state);
16905995b6d0SBrett Creeley 	ice_service_task_schedule(pf);
16915995b6d0SBrett Creeley }
16925995b6d0SBrett Creeley 
16935995b6d0SBrett Creeley /**
1694940b61afSAnirudh Venkataramanan  * ice_service_timer - timer callback to schedule service task
1695940b61afSAnirudh Venkataramanan  * @t: pointer to timer_list
1696940b61afSAnirudh Venkataramanan  */
1697940b61afSAnirudh Venkataramanan static void ice_service_timer(struct timer_list *t)
1698940b61afSAnirudh Venkataramanan {
1699940b61afSAnirudh Venkataramanan 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1700940b61afSAnirudh Venkataramanan 
1701940b61afSAnirudh Venkataramanan 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1702940b61afSAnirudh Venkataramanan 	ice_service_task_schedule(pf);
1703940b61afSAnirudh Venkataramanan }
1704940b61afSAnirudh Venkataramanan 
1705940b61afSAnirudh Venkataramanan /**
1706b3969fd7SSudheer Mogilappagari  * ice_handle_mdd_event - handle malicious driver detect event
1707b3969fd7SSudheer Mogilappagari  * @pf: pointer to the PF structure
1708b3969fd7SSudheer Mogilappagari  *
17099d5c5a52SPaul Greenwalt  * Called from service task. OICR interrupt handler indicates MDD event.
17109d5c5a52SPaul Greenwalt  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
17119d5c5a52SPaul Greenwalt  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
17129d5c5a52SPaul Greenwalt  * disable the queue, the PF can be configured to reset the VF using ethtool
17139d5c5a52SPaul Greenwalt  * private flag mdd-auto-reset-vf.
1714b3969fd7SSudheer Mogilappagari  */
1715b3969fd7SSudheer Mogilappagari static void ice_handle_mdd_event(struct ice_pf *pf)
1716b3969fd7SSudheer Mogilappagari {
17174015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
1718b3969fd7SSudheer Mogilappagari 	struct ice_hw *hw = &pf->hw;
1719c4c2c7dbSJacob Keller 	struct ice_vf *vf;
1720c4c2c7dbSJacob Keller 	unsigned int bkt;
1721b3969fd7SSudheer Mogilappagari 	u32 reg;
1722b3969fd7SSudheer Mogilappagari 
17237e408e07SAnirudh Venkataramanan 	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
17249d5c5a52SPaul Greenwalt 		/* Since the VF MDD event logging is rate limited, check if
17259d5c5a52SPaul Greenwalt 		 * there are pending MDD events.
17269d5c5a52SPaul Greenwalt 		 */
17279d5c5a52SPaul Greenwalt 		ice_print_vfs_mdd_events(pf);
1728b3969fd7SSudheer Mogilappagari 		return;
17299d5c5a52SPaul Greenwalt 	}
1730b3969fd7SSudheer Mogilappagari 
17319d5c5a52SPaul Greenwalt 	/* find what triggered an MDD event */
1732b3969fd7SSudheer Mogilappagari 	reg = rd32(hw, GL_MDET_TX_PQM);
1733b3969fd7SSudheer Mogilappagari 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1734b3969fd7SSudheer Mogilappagari 		u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1735b3969fd7SSudheer Mogilappagari 				GL_MDET_TX_PQM_PF_NUM_S;
1736b3969fd7SSudheer Mogilappagari 		u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1737b3969fd7SSudheer Mogilappagari 				GL_MDET_TX_PQM_VF_NUM_S;
1738b3969fd7SSudheer Mogilappagari 		u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1739b3969fd7SSudheer Mogilappagari 				GL_MDET_TX_PQM_MAL_TYPE_S;
1740b3969fd7SSudheer Mogilappagari 		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1741b3969fd7SSudheer Mogilappagari 				GL_MDET_TX_PQM_QNUM_S);
1742b3969fd7SSudheer Mogilappagari 
1743b3969fd7SSudheer Mogilappagari 		if (netif_msg_tx_err(pf))
17444015d11eSBrett Creeley 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1745b3969fd7SSudheer Mogilappagari 				 event, queue, pf_num, vf_num);
1746b3969fd7SSudheer Mogilappagari 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1747b3969fd7SSudheer Mogilappagari 	}
1748b3969fd7SSudheer Mogilappagari 
1749b3969fd7SSudheer Mogilappagari 	reg = rd32(hw, GL_MDET_TX_TCLAN);
1750b3969fd7SSudheer Mogilappagari 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1751b3969fd7SSudheer Mogilappagari 		u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1752b3969fd7SSudheer Mogilappagari 				GL_MDET_TX_TCLAN_PF_NUM_S;
1753b3969fd7SSudheer Mogilappagari 		u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1754b3969fd7SSudheer Mogilappagari 				GL_MDET_TX_TCLAN_VF_NUM_S;
1755b3969fd7SSudheer Mogilappagari 		u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1756b3969fd7SSudheer Mogilappagari 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1757b3969fd7SSudheer Mogilappagari 		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1758b3969fd7SSudheer Mogilappagari 				GL_MDET_TX_TCLAN_QNUM_S);
1759b3969fd7SSudheer Mogilappagari 
17601d8bd992SBen Shelton 		if (netif_msg_tx_err(pf))
17614015d11eSBrett Creeley 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1762b3969fd7SSudheer Mogilappagari 				 event, queue, pf_num, vf_num);
1763b3969fd7SSudheer Mogilappagari 		wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1764b3969fd7SSudheer Mogilappagari 	}
1765b3969fd7SSudheer Mogilappagari 
1766b3969fd7SSudheer Mogilappagari 	reg = rd32(hw, GL_MDET_RX);
1767b3969fd7SSudheer Mogilappagari 	if (reg & GL_MDET_RX_VALID_M) {
1768b3969fd7SSudheer Mogilappagari 		u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1769b3969fd7SSudheer Mogilappagari 				GL_MDET_RX_PF_NUM_S;
1770b3969fd7SSudheer Mogilappagari 		u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1771b3969fd7SSudheer Mogilappagari 				GL_MDET_RX_VF_NUM_S;
1772b3969fd7SSudheer Mogilappagari 		u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1773b3969fd7SSudheer Mogilappagari 				GL_MDET_RX_MAL_TYPE_S;
1774b3969fd7SSudheer Mogilappagari 		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1775b3969fd7SSudheer Mogilappagari 				GL_MDET_RX_QNUM_S);
1776b3969fd7SSudheer Mogilappagari 
1777b3969fd7SSudheer Mogilappagari 		if (netif_msg_rx_err(pf))
17784015d11eSBrett Creeley 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1779b3969fd7SSudheer Mogilappagari 				 event, queue, pf_num, vf_num);
1780b3969fd7SSudheer Mogilappagari 		wr32(hw, GL_MDET_RX, 0xffffffff);
1781b3969fd7SSudheer Mogilappagari 	}
1782b3969fd7SSudheer Mogilappagari 
17839d5c5a52SPaul Greenwalt 	/* check to see if this PF caused an MDD event */
1784b3969fd7SSudheer Mogilappagari 	reg = rd32(hw, PF_MDET_TX_PQM);
1785b3969fd7SSudheer Mogilappagari 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1786b3969fd7SSudheer Mogilappagari 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
17879d5c5a52SPaul Greenwalt 		if (netif_msg_tx_err(pf))
17889d5c5a52SPaul Greenwalt 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1789b3969fd7SSudheer Mogilappagari 	}
1790b3969fd7SSudheer Mogilappagari 
1791b3969fd7SSudheer Mogilappagari 	reg = rd32(hw, PF_MDET_TX_TCLAN);
1792b3969fd7SSudheer Mogilappagari 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1793b3969fd7SSudheer Mogilappagari 		wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
17949d5c5a52SPaul Greenwalt 		if (netif_msg_tx_err(pf))
17959d5c5a52SPaul Greenwalt 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1796b3969fd7SSudheer Mogilappagari 	}
1797b3969fd7SSudheer Mogilappagari 
1798b3969fd7SSudheer Mogilappagari 	reg = rd32(hw, PF_MDET_RX);
1799b3969fd7SSudheer Mogilappagari 	if (reg & PF_MDET_RX_VALID_M) {
1800b3969fd7SSudheer Mogilappagari 		wr32(hw, PF_MDET_RX, 0xFFFF);
18019d5c5a52SPaul Greenwalt 		if (netif_msg_rx_err(pf))
18029d5c5a52SPaul Greenwalt 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1803b3969fd7SSudheer Mogilappagari 	}
1804b3969fd7SSudheer Mogilappagari 
18059d5c5a52SPaul Greenwalt 	/* Check to see if one of the VFs caused an MDD event, and then
18069d5c5a52SPaul Greenwalt 	 * increment counters and set print pending
18079d5c5a52SPaul Greenwalt 	 */
18083d5985a1SJacob Keller 	mutex_lock(&pf->vfs.table_lock);
1809c4c2c7dbSJacob Keller 	ice_for_each_vf(pf, bkt, vf) {
1810c4c2c7dbSJacob Keller 		reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
18117c4bc1f5SAnirudh Venkataramanan 		if (reg & VP_MDET_TX_PQM_VALID_M) {
1812c4c2c7dbSJacob Keller 			wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
18139d5c5a52SPaul Greenwalt 			vf->mdd_tx_events.count++;
18147e408e07SAnirudh Venkataramanan 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
18159d5c5a52SPaul Greenwalt 			if (netif_msg_tx_err(pf))
18169d5c5a52SPaul Greenwalt 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1817c4c2c7dbSJacob Keller 					 vf->vf_id);
18187c4bc1f5SAnirudh Venkataramanan 		}
18197c4bc1f5SAnirudh Venkataramanan 
1820c4c2c7dbSJacob Keller 		reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
18217c4bc1f5SAnirudh Venkataramanan 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1822c4c2c7dbSJacob Keller 			wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
18239d5c5a52SPaul Greenwalt 			vf->mdd_tx_events.count++;
18247e408e07SAnirudh Venkataramanan 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
18259d5c5a52SPaul Greenwalt 			if (netif_msg_tx_err(pf))
18269d5c5a52SPaul Greenwalt 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1827c4c2c7dbSJacob Keller 					 vf->vf_id);
18287c4bc1f5SAnirudh Venkataramanan 		}
18297c4bc1f5SAnirudh Venkataramanan 
1830c4c2c7dbSJacob Keller 		reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
18317c4bc1f5SAnirudh Venkataramanan 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1832c4c2c7dbSJacob Keller 			wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
18339d5c5a52SPaul Greenwalt 			vf->mdd_tx_events.count++;
18347e408e07SAnirudh Venkataramanan 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
18359d5c5a52SPaul Greenwalt 			if (netif_msg_tx_err(pf))
18369d5c5a52SPaul Greenwalt 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1837c4c2c7dbSJacob Keller 					 vf->vf_id);
18387c4bc1f5SAnirudh Venkataramanan 		}
18397c4bc1f5SAnirudh Venkataramanan 
1840c4c2c7dbSJacob Keller 		reg = rd32(hw, VP_MDET_RX(vf->vf_id));
18417c4bc1f5SAnirudh Venkataramanan 		if (reg & VP_MDET_RX_VALID_M) {
1842c4c2c7dbSJacob Keller 			wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
18439d5c5a52SPaul Greenwalt 			vf->mdd_rx_events.count++;
18447e408e07SAnirudh Venkataramanan 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
18459d5c5a52SPaul Greenwalt 			if (netif_msg_rx_err(pf))
18469d5c5a52SPaul Greenwalt 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1847c4c2c7dbSJacob Keller 					 vf->vf_id);
18489d5c5a52SPaul Greenwalt 
18499d5c5a52SPaul Greenwalt 			/* Since the queue is disabled on VF Rx MDD events, the
18509d5c5a52SPaul Greenwalt 			 * PF can be configured to reset the VF through ethtool
18519d5c5a52SPaul Greenwalt 			 * private flag mdd-auto-reset-vf.
18529d5c5a52SPaul Greenwalt 			 */
18537438a3b0SPaul Greenwalt 			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
18547438a3b0SPaul Greenwalt 				/* VF MDD event counters will be cleared by
18557438a3b0SPaul Greenwalt 				 * reset, so print the event prior to reset.
18567438a3b0SPaul Greenwalt 				 */
18577438a3b0SPaul Greenwalt 				ice_print_vf_rx_mdd_event(vf);
1858f5f085c0SJacob Keller 				ice_reset_vf(vf, ICE_VF_RESET_LOCK);
18599d5c5a52SPaul Greenwalt 			}
18607c4bc1f5SAnirudh Venkataramanan 		}
18617438a3b0SPaul Greenwalt 	}
18623d5985a1SJacob Keller 	mutex_unlock(&pf->vfs.table_lock);
18637c4bc1f5SAnirudh Venkataramanan 
18649d5c5a52SPaul Greenwalt 	ice_print_vfs_mdd_events(pf);
1865b3969fd7SSudheer Mogilappagari }
1866b3969fd7SSudheer Mogilappagari 
1867b3969fd7SSudheer Mogilappagari /**
18686d599946STony Nguyen  * ice_force_phys_link_state - Force the physical link state
18696d599946STony Nguyen  * @vsi: VSI to force the physical link state to up/down
18706d599946STony Nguyen  * @link_up: true/false indicates to set the physical link to up/down
18716d599946STony Nguyen  *
18726d599946STony Nguyen  * Force the physical link state by getting the current PHY capabilities from
18736d599946STony Nguyen  * hardware and setting the PHY config based on the determined capabilities. If
18746d599946STony Nguyen  * link changes a link event will be triggered because both the Enable Automatic
18756d599946STony Nguyen  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
18766d599946STony Nguyen  *
18776d599946STony Nguyen  * Returns 0 on success, negative on failure
18786d599946STony Nguyen  */
18796d599946STony Nguyen static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
18806d599946STony Nguyen {
18816d599946STony Nguyen 	struct ice_aqc_get_phy_caps_data *pcaps;
18826d599946STony Nguyen 	struct ice_aqc_set_phy_cfg_data *cfg;
18836d599946STony Nguyen 	struct ice_port_info *pi;
18846d599946STony Nguyen 	struct device *dev;
18856d599946STony Nguyen 	int retcode;
18866d599946STony Nguyen 
18876d599946STony Nguyen 	if (!vsi || !vsi->port_info || !vsi->back)
18886d599946STony Nguyen 		return -EINVAL;
18896d599946STony Nguyen 	if (vsi->type != ICE_VSI_PF)
18906d599946STony Nguyen 		return 0;
18916d599946STony Nguyen 
18929a946843SAnirudh Venkataramanan 	dev = ice_pf_to_dev(vsi->back);
18936d599946STony Nguyen 
18946d599946STony Nguyen 	pi = vsi->port_info;
18956d599946STony Nguyen 
18969efe35d0STony Nguyen 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
18976d599946STony Nguyen 	if (!pcaps)
18986d599946STony Nguyen 		return -ENOMEM;
18996d599946STony Nguyen 
1900d6730a87SAnirudh Venkataramanan 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
19016d599946STony Nguyen 				      NULL);
19026d599946STony Nguyen 	if (retcode) {
190319cce2c6SAnirudh Venkataramanan 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
19046d599946STony Nguyen 			vsi->vsi_num, retcode);
19056d599946STony Nguyen 		retcode = -EIO;
19066d599946STony Nguyen 		goto out;
19076d599946STony Nguyen 	}
19086d599946STony Nguyen 
19096d599946STony Nguyen 	/* No change in link */
19106d599946STony Nguyen 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
19116d599946STony Nguyen 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
19126d599946STony Nguyen 		goto out;
19136d599946STony Nguyen 
19141a3571b5SPaul Greenwalt 	/* Use the current user PHY configuration. The current user PHY
19151a3571b5SPaul Greenwalt 	 * configuration is initialized during probe from PHY capabilities
19161a3571b5SPaul Greenwalt 	 * software mode, and updated on set PHY configuration.
19171a3571b5SPaul Greenwalt 	 */
19181a3571b5SPaul Greenwalt 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
19196d599946STony Nguyen 	if (!cfg) {
19206d599946STony Nguyen 		retcode = -ENOMEM;
19216d599946STony Nguyen 		goto out;
19226d599946STony Nguyen 	}
19236d599946STony Nguyen 
19241a3571b5SPaul Greenwalt 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
19256d599946STony Nguyen 	if (link_up)
19266d599946STony Nguyen 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
19276d599946STony Nguyen 	else
19286d599946STony Nguyen 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
19296d599946STony Nguyen 
19301a3571b5SPaul Greenwalt 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
19316d599946STony Nguyen 	if (retcode) {
19326d599946STony Nguyen 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
19336d599946STony Nguyen 			vsi->vsi_num, retcode);
19346d599946STony Nguyen 		retcode = -EIO;
19356d599946STony Nguyen 	}
19366d599946STony Nguyen 
19379efe35d0STony Nguyen 	kfree(cfg);
19386d599946STony Nguyen out:
19399efe35d0STony Nguyen 	kfree(pcaps);
19406d599946STony Nguyen 	return retcode;
19416d599946STony Nguyen }
19426d599946STony Nguyen 
19436d599946STony Nguyen /**
19441a3571b5SPaul Greenwalt  * ice_init_nvm_phy_type - Initialize the NVM PHY type
19451a3571b5SPaul Greenwalt  * @pi: port info structure
19461a3571b5SPaul Greenwalt  *
1947ea78ce4dSPaul Greenwalt  * Initialize nvm_phy_type_[low|high] for link lenient mode support
19481a3571b5SPaul Greenwalt  */
19491a3571b5SPaul Greenwalt static int ice_init_nvm_phy_type(struct ice_port_info *pi)
19501a3571b5SPaul Greenwalt {
19511a3571b5SPaul Greenwalt 	struct ice_aqc_get_phy_caps_data *pcaps;
19521a3571b5SPaul Greenwalt 	struct ice_pf *pf = pi->hw->back;
19532ccc1c1cSTony Nguyen 	int err;
19541a3571b5SPaul Greenwalt 
19551a3571b5SPaul Greenwalt 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
19561a3571b5SPaul Greenwalt 	if (!pcaps)
19571a3571b5SPaul Greenwalt 		return -ENOMEM;
19581a3571b5SPaul Greenwalt 
19592ccc1c1cSTony Nguyen 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
19602ccc1c1cSTony Nguyen 				  pcaps, NULL);
19611a3571b5SPaul Greenwalt 
19622ccc1c1cSTony Nguyen 	if (err) {
19631a3571b5SPaul Greenwalt 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
19641a3571b5SPaul Greenwalt 		goto out;
19651a3571b5SPaul Greenwalt 	}
19661a3571b5SPaul Greenwalt 
19671a3571b5SPaul Greenwalt 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
19681a3571b5SPaul Greenwalt 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
19691a3571b5SPaul Greenwalt 
19701a3571b5SPaul Greenwalt out:
19711a3571b5SPaul Greenwalt 	kfree(pcaps);
19721a3571b5SPaul Greenwalt 	return err;
19731a3571b5SPaul Greenwalt }
19741a3571b5SPaul Greenwalt 
19751a3571b5SPaul Greenwalt /**
1976ea78ce4dSPaul Greenwalt  * ice_init_link_dflt_override - Initialize link default override
1977ea78ce4dSPaul Greenwalt  * @pi: port info structure
1978b4e813ddSBruce Allan  *
1979b4e813ddSBruce Allan  * Initialize link default override and PHY total port shutdown during probe
1980ea78ce4dSPaul Greenwalt  */
1981ea78ce4dSPaul Greenwalt static void ice_init_link_dflt_override(struct ice_port_info *pi)
1982ea78ce4dSPaul Greenwalt {
1983ea78ce4dSPaul Greenwalt 	struct ice_link_default_override_tlv *ldo;
1984ea78ce4dSPaul Greenwalt 	struct ice_pf *pf = pi->hw->back;
1985ea78ce4dSPaul Greenwalt 
1986ea78ce4dSPaul Greenwalt 	ldo = &pf->link_dflt_override;
1987b4e813ddSBruce Allan 	if (ice_get_link_default_override(ldo, pi))
1988b4e813ddSBruce Allan 		return;
1989b4e813ddSBruce Allan 
1990b4e813ddSBruce Allan 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1991b4e813ddSBruce Allan 		return;
1992b4e813ddSBruce Allan 
1993b4e813ddSBruce Allan 	/* Enable Total Port Shutdown (override/replace link-down-on-close
1994b4e813ddSBruce Allan 	 * ethtool private flag) for ports with Port Disable bit set.
1995b4e813ddSBruce Allan 	 */
1996b4e813ddSBruce Allan 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1997b4e813ddSBruce Allan 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1998ea78ce4dSPaul Greenwalt }
1999ea78ce4dSPaul Greenwalt 
2000ea78ce4dSPaul Greenwalt /**
2001ea78ce4dSPaul Greenwalt  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2002ea78ce4dSPaul Greenwalt  * @pi: port info structure
2003ea78ce4dSPaul Greenwalt  *
20040a02944fSAnirudh Venkataramanan  * If default override is enabled, initialize the user PHY cfg speed and FEC
2005ea78ce4dSPaul Greenwalt  * settings using the default override mask from the NVM.
2006ea78ce4dSPaul Greenwalt  *
2007ea78ce4dSPaul Greenwalt  * The PHY should only be configured with the default override settings the
20087e408e07SAnirudh Venkataramanan  * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2009ea78ce4dSPaul Greenwalt  * is used to indicate that the user PHY cfg default override is initialized
2010ea78ce4dSPaul Greenwalt  * and the PHY has not been configured with the default override settings. The
2011ea78ce4dSPaul Greenwalt  * state is set here, and cleared in ice_configure_phy the first time the PHY is
2012ea78ce4dSPaul Greenwalt  * configured.
20130a02944fSAnirudh Venkataramanan  *
20140a02944fSAnirudh Venkataramanan  * This function should be called only if the FW doesn't support default
20150a02944fSAnirudh Venkataramanan  * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2016ea78ce4dSPaul Greenwalt  */
2017ea78ce4dSPaul Greenwalt static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2018ea78ce4dSPaul Greenwalt {
2019ea78ce4dSPaul Greenwalt 	struct ice_link_default_override_tlv *ldo;
2020ea78ce4dSPaul Greenwalt 	struct ice_aqc_set_phy_cfg_data *cfg;
2021ea78ce4dSPaul Greenwalt 	struct ice_phy_info *phy = &pi->phy;
2022ea78ce4dSPaul Greenwalt 	struct ice_pf *pf = pi->hw->back;
2023ea78ce4dSPaul Greenwalt 
2024ea78ce4dSPaul Greenwalt 	ldo = &pf->link_dflt_override;
2025ea78ce4dSPaul Greenwalt 
2026ea78ce4dSPaul Greenwalt 	/* If link default override is enabled, use to mask NVM PHY capabilities
2027ea78ce4dSPaul Greenwalt 	 * for speed and FEC default configuration.
2028ea78ce4dSPaul Greenwalt 	 */
2029ea78ce4dSPaul Greenwalt 	cfg = &phy->curr_user_phy_cfg;
2030ea78ce4dSPaul Greenwalt 
2031ea78ce4dSPaul Greenwalt 	if (ldo->phy_type_low || ldo->phy_type_high) {
2032ea78ce4dSPaul Greenwalt 		cfg->phy_type_low = pf->nvm_phy_type_lo &
2033ea78ce4dSPaul Greenwalt 				    cpu_to_le64(ldo->phy_type_low);
2034ea78ce4dSPaul Greenwalt 		cfg->phy_type_high = pf->nvm_phy_type_hi &
2035ea78ce4dSPaul Greenwalt 				     cpu_to_le64(ldo->phy_type_high);
2036ea78ce4dSPaul Greenwalt 	}
2037ea78ce4dSPaul Greenwalt 	cfg->link_fec_opt = ldo->fec_options;
2038ea78ce4dSPaul Greenwalt 	phy->curr_user_fec_req = ICE_FEC_AUTO;
2039ea78ce4dSPaul Greenwalt 
20407e408e07SAnirudh Venkataramanan 	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2041ea78ce4dSPaul Greenwalt }
2042ea78ce4dSPaul Greenwalt 
2043ea78ce4dSPaul Greenwalt /**
20441a3571b5SPaul Greenwalt  * ice_init_phy_user_cfg - Initialize the PHY user configuration
20451a3571b5SPaul Greenwalt  * @pi: port info structure
20461a3571b5SPaul Greenwalt  *
20471a3571b5SPaul Greenwalt  * Initialize the current user PHY configuration, speed, FEC, and FC requested
20481a3571b5SPaul Greenwalt  * mode to default. The PHY defaults are from get PHY capabilities topology
20491a3571b5SPaul Greenwalt  * with media so call when media is first available. An error is returned if
20501a3571b5SPaul Greenwalt  * called when media is not available. The PHY initialization completed state is
20511a3571b5SPaul Greenwalt  * set here.
20521a3571b5SPaul Greenwalt  *
20531a3571b5SPaul Greenwalt  * These configurations are used when setting PHY
20541a3571b5SPaul Greenwalt  * configuration. The user PHY configuration is updated on set PHY
20551a3571b5SPaul Greenwalt  * configuration. Returns 0 on success, negative on failure
20561a3571b5SPaul Greenwalt  */
20571a3571b5SPaul Greenwalt static int ice_init_phy_user_cfg(struct ice_port_info *pi)
20581a3571b5SPaul Greenwalt {
20591a3571b5SPaul Greenwalt 	struct ice_aqc_get_phy_caps_data *pcaps;
20601a3571b5SPaul Greenwalt 	struct ice_phy_info *phy = &pi->phy;
20611a3571b5SPaul Greenwalt 	struct ice_pf *pf = pi->hw->back;
20622ccc1c1cSTony Nguyen 	int err;
20631a3571b5SPaul Greenwalt 
20641a3571b5SPaul Greenwalt 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
20651a3571b5SPaul Greenwalt 		return -EIO;
20661a3571b5SPaul Greenwalt 
20671a3571b5SPaul Greenwalt 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
20681a3571b5SPaul Greenwalt 	if (!pcaps)
20691a3571b5SPaul Greenwalt 		return -ENOMEM;
20701a3571b5SPaul Greenwalt 
20710a02944fSAnirudh Venkataramanan 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
20722ccc1c1cSTony Nguyen 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
20730a02944fSAnirudh Venkataramanan 					  pcaps, NULL);
20740a02944fSAnirudh Venkataramanan 	else
20752ccc1c1cSTony Nguyen 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
20760a02944fSAnirudh Venkataramanan 					  pcaps, NULL);
20772ccc1c1cSTony Nguyen 	if (err) {
20781a3571b5SPaul Greenwalt 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
20791a3571b5SPaul Greenwalt 		goto err_out;
20801a3571b5SPaul Greenwalt 	}
20811a3571b5SPaul Greenwalt 
2082ea78ce4dSPaul Greenwalt 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2083ea78ce4dSPaul Greenwalt 
2084ea78ce4dSPaul Greenwalt 	/* check if lenient mode is supported and enabled */
2085dc6aaa13SAnirudh Venkataramanan 	if (ice_fw_supports_link_override(pi->hw) &&
2086ea78ce4dSPaul Greenwalt 	    !(pcaps->module_compliance_enforcement &
2087ea78ce4dSPaul Greenwalt 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2088ea78ce4dSPaul Greenwalt 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2089ea78ce4dSPaul Greenwalt 
20900a02944fSAnirudh Venkataramanan 		/* if the FW supports default PHY configuration mode, then the driver
20910a02944fSAnirudh Venkataramanan 		 * does not have to apply link override settings. If not,
20920a02944fSAnirudh Venkataramanan 		 * initialize user PHY configuration with link override values
2093ea78ce4dSPaul Greenwalt 		 */
20940a02944fSAnirudh Venkataramanan 		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
20950a02944fSAnirudh Venkataramanan 		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2096ea78ce4dSPaul Greenwalt 			ice_init_phy_cfg_dflt_override(pi);
2097ea78ce4dSPaul Greenwalt 			goto out;
2098ea78ce4dSPaul Greenwalt 		}
2099ea78ce4dSPaul Greenwalt 	}
2100ea78ce4dSPaul Greenwalt 
21010a02944fSAnirudh Venkataramanan 	/* if link default override is not enabled, set user flow control and
21020a02944fSAnirudh Venkataramanan 	 * FEC settings based on what get_phy_caps returned
2103ea78ce4dSPaul Greenwalt 	 */
21041a3571b5SPaul Greenwalt 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
21051a3571b5SPaul Greenwalt 						      pcaps->link_fec_options);
21061a3571b5SPaul Greenwalt 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
21071a3571b5SPaul Greenwalt 
2108ea78ce4dSPaul Greenwalt out:
21091a3571b5SPaul Greenwalt 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
21107e408e07SAnirudh Venkataramanan 	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
21111a3571b5SPaul Greenwalt err_out:
21121a3571b5SPaul Greenwalt 	kfree(pcaps);
21131a3571b5SPaul Greenwalt 	return err;
21141a3571b5SPaul Greenwalt }
21151a3571b5SPaul Greenwalt 
21161a3571b5SPaul Greenwalt /**
21171a3571b5SPaul Greenwalt  * ice_configure_phy - configure PHY
21181a3571b5SPaul Greenwalt  * @vsi: VSI of PHY
21191a3571b5SPaul Greenwalt  *
21201a3571b5SPaul Greenwalt  * Set the PHY configuration. If the current PHY configuration is the same as
21211a3571b5SPaul Greenwalt  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
21221a3571b5SPaul Greenwalt  * configure the based get PHY capabilities for topology with media.
21231a3571b5SPaul Greenwalt  */
21241a3571b5SPaul Greenwalt static int ice_configure_phy(struct ice_vsi *vsi)
21251a3571b5SPaul Greenwalt {
21261a3571b5SPaul Greenwalt 	struct device *dev = ice_pf_to_dev(vsi->back);
2127efc1eddbSAnirudh Venkataramanan 	struct ice_port_info *pi = vsi->port_info;
21281a3571b5SPaul Greenwalt 	struct ice_aqc_get_phy_caps_data *pcaps;
21291a3571b5SPaul Greenwalt 	struct ice_aqc_set_phy_cfg_data *cfg;
2130efc1eddbSAnirudh Venkataramanan 	struct ice_phy_info *phy = &pi->phy;
2131efc1eddbSAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
21322ccc1c1cSTony Nguyen 	int err;
21331a3571b5SPaul Greenwalt 
21341a3571b5SPaul Greenwalt 	/* Ensure we have media as we cannot configure a medialess port */
2135efc1eddbSAnirudh Venkataramanan 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
21361a3571b5SPaul Greenwalt 		return -EPERM;
21371a3571b5SPaul Greenwalt 
21381a3571b5SPaul Greenwalt 	ice_print_topo_conflict(vsi);
21391a3571b5SPaul Greenwalt 
21404fc5fbeeSAnirudh Venkataramanan 	if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
21414fc5fbeeSAnirudh Venkataramanan 	    phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
21421a3571b5SPaul Greenwalt 		return -EPERM;
21431a3571b5SPaul Greenwalt 
2144efc1eddbSAnirudh Venkataramanan 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
21451a3571b5SPaul Greenwalt 		return ice_force_phys_link_state(vsi, true);
21461a3571b5SPaul Greenwalt 
21471a3571b5SPaul Greenwalt 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
21481a3571b5SPaul Greenwalt 	if (!pcaps)
21491a3571b5SPaul Greenwalt 		return -ENOMEM;
21501a3571b5SPaul Greenwalt 
21511a3571b5SPaul Greenwalt 	/* Get current PHY config */
21522ccc1c1cSTony Nguyen 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
21531a3571b5SPaul Greenwalt 				  NULL);
21542ccc1c1cSTony Nguyen 	if (err) {
21555f87ec48STony Nguyen 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
21562ccc1c1cSTony Nguyen 			vsi->vsi_num, err);
21571a3571b5SPaul Greenwalt 		goto done;
21581a3571b5SPaul Greenwalt 	}
21591a3571b5SPaul Greenwalt 
21601a3571b5SPaul Greenwalt 	/* If PHY enable link is configured and configuration has not changed,
21611a3571b5SPaul Greenwalt 	 * there's nothing to do
21621a3571b5SPaul Greenwalt 	 */
21631a3571b5SPaul Greenwalt 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2164efc1eddbSAnirudh Venkataramanan 	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
21651a3571b5SPaul Greenwalt 		goto done;
21661a3571b5SPaul Greenwalt 
21671a3571b5SPaul Greenwalt 	/* Use PHY topology as baseline for configuration */
21681a3571b5SPaul Greenwalt 	memset(pcaps, 0, sizeof(*pcaps));
21690a02944fSAnirudh Venkataramanan 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
21702ccc1c1cSTony Nguyen 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
21710a02944fSAnirudh Venkataramanan 					  pcaps, NULL);
21720a02944fSAnirudh Venkataramanan 	else
21732ccc1c1cSTony Nguyen 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
21740a02944fSAnirudh Venkataramanan 					  pcaps, NULL);
21752ccc1c1cSTony Nguyen 	if (err) {
21765f87ec48STony Nguyen 		dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
21772ccc1c1cSTony Nguyen 			vsi->vsi_num, err);
21781a3571b5SPaul Greenwalt 		goto done;
21791a3571b5SPaul Greenwalt 	}
21801a3571b5SPaul Greenwalt 
21811a3571b5SPaul Greenwalt 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
21821a3571b5SPaul Greenwalt 	if (!cfg) {
21831a3571b5SPaul Greenwalt 		err = -ENOMEM;
21841a3571b5SPaul Greenwalt 		goto done;
21851a3571b5SPaul Greenwalt 	}
21861a3571b5SPaul Greenwalt 
2187ea78ce4dSPaul Greenwalt 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
21881a3571b5SPaul Greenwalt 
21891a3571b5SPaul Greenwalt 	/* Speed - If default override pending, use curr_user_phy_cfg set in
21901a3571b5SPaul Greenwalt 	 * ice_init_phy_user_cfg_ldo.
21911a3571b5SPaul Greenwalt 	 */
21927e408e07SAnirudh Venkataramanan 	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2193ea78ce4dSPaul Greenwalt 			       vsi->back->state)) {
2194efc1eddbSAnirudh Venkataramanan 		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2195efc1eddbSAnirudh Venkataramanan 		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2196ea78ce4dSPaul Greenwalt 	} else {
2197ea78ce4dSPaul Greenwalt 		u64 phy_low = 0, phy_high = 0;
2198ea78ce4dSPaul Greenwalt 
2199ea78ce4dSPaul Greenwalt 		ice_update_phy_type(&phy_low, &phy_high,
2200ea78ce4dSPaul Greenwalt 				    pi->phy.curr_user_speed_req);
22011a3571b5SPaul Greenwalt 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2202ea78ce4dSPaul Greenwalt 		cfg->phy_type_high = pcaps->phy_type_high &
2203ea78ce4dSPaul Greenwalt 				     cpu_to_le64(phy_high);
2204ea78ce4dSPaul Greenwalt 	}
22051a3571b5SPaul Greenwalt 
22061a3571b5SPaul Greenwalt 	/* Can't provide what was requested; use PHY capabilities */
22071a3571b5SPaul Greenwalt 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
22081a3571b5SPaul Greenwalt 		cfg->phy_type_low = pcaps->phy_type_low;
22091a3571b5SPaul Greenwalt 		cfg->phy_type_high = pcaps->phy_type_high;
22101a3571b5SPaul Greenwalt 	}
22111a3571b5SPaul Greenwalt 
22121a3571b5SPaul Greenwalt 	/* FEC */
2213efc1eddbSAnirudh Venkataramanan 	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
22141a3571b5SPaul Greenwalt 
22151a3571b5SPaul Greenwalt 	/* Can't provide what was requested; use PHY capabilities */
22161a3571b5SPaul Greenwalt 	if (cfg->link_fec_opt !=
22171a3571b5SPaul Greenwalt 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
22181a3571b5SPaul Greenwalt 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
22191a3571b5SPaul Greenwalt 		cfg->link_fec_opt = pcaps->link_fec_options;
22201a3571b5SPaul Greenwalt 	}
22211a3571b5SPaul Greenwalt 
22221a3571b5SPaul Greenwalt 	/* Flow Control - always supported; no need to check against
22231a3571b5SPaul Greenwalt 	 * capabilities
22241a3571b5SPaul Greenwalt 	 */
2225efc1eddbSAnirudh Venkataramanan 	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
22261a3571b5SPaul Greenwalt 
22271a3571b5SPaul Greenwalt 	/* Enable link and link update */
22281a3571b5SPaul Greenwalt 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
22291a3571b5SPaul Greenwalt 
22302ccc1c1cSTony Nguyen 	err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2231c1484691STony Nguyen 	if (err)
22325f87ec48STony Nguyen 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
22332ccc1c1cSTony Nguyen 			vsi->vsi_num, err);
22341a3571b5SPaul Greenwalt 
22351a3571b5SPaul Greenwalt 	kfree(cfg);
22361a3571b5SPaul Greenwalt done:
22371a3571b5SPaul Greenwalt 	kfree(pcaps);
22381a3571b5SPaul Greenwalt 	return err;
22391a3571b5SPaul Greenwalt }
22401a3571b5SPaul Greenwalt 
22411a3571b5SPaul Greenwalt /**
22421a3571b5SPaul Greenwalt  * ice_check_media_subtask - Check for media
22436d599946STony Nguyen  * @pf: pointer to PF struct
22441a3571b5SPaul Greenwalt  *
22451a3571b5SPaul Greenwalt  * If media is available, then initialize PHY user configuration if it is not
22461a3571b5SPaul Greenwalt  * been, and configure the PHY if the interface is up.
22476d599946STony Nguyen  */
22486d599946STony Nguyen static void ice_check_media_subtask(struct ice_pf *pf)
22496d599946STony Nguyen {
22506d599946STony Nguyen 	struct ice_port_info *pi;
22516d599946STony Nguyen 	struct ice_vsi *vsi;
22526d599946STony Nguyen 	int err;
22536d599946STony Nguyen 
22541a3571b5SPaul Greenwalt 	/* No need to check for media if it's already present */
22551a3571b5SPaul Greenwalt 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
22566d599946STony Nguyen 		return;
22576d599946STony Nguyen 
22581a3571b5SPaul Greenwalt 	vsi = ice_get_main_vsi(pf);
22591a3571b5SPaul Greenwalt 	if (!vsi)
22606d599946STony Nguyen 		return;
22616d599946STony Nguyen 
22626d599946STony Nguyen 	/* Refresh link info and check if media is present */
22636d599946STony Nguyen 	pi = vsi->port_info;
22646d599946STony Nguyen 	err = ice_update_link_info(pi);
22656d599946STony Nguyen 	if (err)
22666d599946STony Nguyen 		return;
22676d599946STony Nguyen 
226899d40752SBrett Creeley 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2269c77849f5SAnirudh Venkataramanan 
22706d599946STony Nguyen 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
22717e408e07SAnirudh Venkataramanan 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
22721a3571b5SPaul Greenwalt 			ice_init_phy_user_cfg(pi);
22731a3571b5SPaul Greenwalt 
22741a3571b5SPaul Greenwalt 		/* PHY settings are reset on media insertion, reconfigure
22751a3571b5SPaul Greenwalt 		 * PHY to preserve settings.
22761a3571b5SPaul Greenwalt 		 */
2277e97fb1aeSAnirudh Venkataramanan 		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
22781a3571b5SPaul Greenwalt 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
22796d599946STony Nguyen 			return;
22801a3571b5SPaul Greenwalt 
22811a3571b5SPaul Greenwalt 		err = ice_configure_phy(vsi);
22821a3571b5SPaul Greenwalt 		if (!err)
22836d599946STony Nguyen 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
22846d599946STony Nguyen 
22856d599946STony Nguyen 		/* A Link Status Event will be generated; the event handler
22866d599946STony Nguyen 		 * will complete bringing the interface up
22876d599946STony Nguyen 		 */
22886d599946STony Nguyen 	}
22896d599946STony Nguyen }
22906d599946STony Nguyen 
22916d599946STony Nguyen /**
2292940b61afSAnirudh Venkataramanan  * ice_service_task - manage and run subtasks
2293940b61afSAnirudh Venkataramanan  * @work: pointer to work_struct contained by the PF struct
2294940b61afSAnirudh Venkataramanan  */
2295940b61afSAnirudh Venkataramanan static void ice_service_task(struct work_struct *work)
2296940b61afSAnirudh Venkataramanan {
2297940b61afSAnirudh Venkataramanan 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2298940b61afSAnirudh Venkataramanan 	unsigned long start_time = jiffies;
2299940b61afSAnirudh Venkataramanan 
2300940b61afSAnirudh Venkataramanan 	/* subtasks */
23010b28b702SAnirudh Venkataramanan 
23020b28b702SAnirudh Venkataramanan 	/* process reset requests first */
23030b28b702SAnirudh Venkataramanan 	ice_reset_subtask(pf);
23040b28b702SAnirudh Venkataramanan 
23050f9d5027SAnirudh Venkataramanan 	/* bail if a reset/recovery cycle is pending or rebuild failed */
23065df7e45dSDave Ertman 	if (ice_is_reset_in_progress(pf->state) ||
23077e408e07SAnirudh Venkataramanan 	    test_bit(ICE_SUSPENDED, pf->state) ||
23087e408e07SAnirudh Venkataramanan 	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
23090b28b702SAnirudh Venkataramanan 		ice_service_task_complete(pf);
23100b28b702SAnirudh Venkataramanan 		return;
23110b28b702SAnirudh Venkataramanan 	}
23120b28b702SAnirudh Venkataramanan 
231332d53c0aSAlexander Lobakin 	if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
231432d53c0aSAlexander Lobakin 		struct iidc_event *event;
231532d53c0aSAlexander Lobakin 
231632d53c0aSAlexander Lobakin 		event = kzalloc(sizeof(*event), GFP_KERNEL);
231732d53c0aSAlexander Lobakin 		if (event) {
231832d53c0aSAlexander Lobakin 			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
231932d53c0aSAlexander Lobakin 			/* report the entire OICR value to AUX driver */
232032d53c0aSAlexander Lobakin 			swap(event->reg, pf->oicr_err_reg);
232132d53c0aSAlexander Lobakin 			ice_send_event_to_aux(pf, event);
232232d53c0aSAlexander Lobakin 			kfree(event);
232332d53c0aSAlexander Lobakin 		}
232432d53c0aSAlexander Lobakin 	}
232532d53c0aSAlexander Lobakin 
2326248401cbSDave Ertman 	/* unplug aux dev per request, if an unplug request came in
2327248401cbSDave Ertman 	 * while processing a plug request, this will handle it
23285cb1ebdbSIvan Vecera 	 */
2329248401cbSDave Ertman 	if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
23305cb1ebdbSIvan Vecera 		ice_unplug_aux_dev(pf);
2331248401cbSDave Ertman 
2332248401cbSDave Ertman 	/* Plug aux device per request */
2333248401cbSDave Ertman 	if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2334248401cbSDave Ertman 		ice_plug_aux_dev(pf);
23355cb1ebdbSIvan Vecera 
233697b01291SDave Ertman 	if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
233797b01291SDave Ertman 		struct iidc_event *event;
233897b01291SDave Ertman 
233997b01291SDave Ertman 		event = kzalloc(sizeof(*event), GFP_KERNEL);
234097b01291SDave Ertman 		if (event) {
234197b01291SDave Ertman 			set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
234297b01291SDave Ertman 			ice_send_event_to_aux(pf, event);
234397b01291SDave Ertman 			kfree(event);
234497b01291SDave Ertman 		}
234597b01291SDave Ertman 	}
234697b01291SDave Ertman 
2347462acf6aSTony Nguyen 	ice_clean_adminq_subtask(pf);
23486d599946STony Nguyen 	ice_check_media_subtask(pf);
2349b3969fd7SSudheer Mogilappagari 	ice_check_for_hang_subtask(pf);
2350e94d4478SAnirudh Venkataramanan 	ice_sync_fltr_subtask(pf);
2351b3969fd7SSudheer Mogilappagari 	ice_handle_mdd_event(pf);
2352fcea6f3dSAnirudh Venkataramanan 	ice_watchdog_subtask(pf);
2353462acf6aSTony Nguyen 
2354462acf6aSTony Nguyen 	if (ice_is_safe_mode(pf)) {
2355462acf6aSTony Nguyen 		ice_service_task_complete(pf);
2356462acf6aSTony Nguyen 		return;
2357462acf6aSTony Nguyen 	}
2358462acf6aSTony Nguyen 
2359462acf6aSTony Nguyen 	ice_process_vflr_event(pf);
236075d2b253SAnirudh Venkataramanan 	ice_clean_mailboxq_subtask(pf);
23618f5ee3c4SJacob Keller 	ice_clean_sbq_subtask(pf);
236228bf2672SBrett Creeley 	ice_sync_arfs_fltrs(pf);
2363d6218317SQi Zhang 	ice_flush_fdir_ctx(pf);
23647e408e07SAnirudh Venkataramanan 
23657e408e07SAnirudh Venkataramanan 	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2366940b61afSAnirudh Venkataramanan 	ice_service_task_complete(pf);
2367940b61afSAnirudh Venkataramanan 
2368940b61afSAnirudh Venkataramanan 	/* If the tasks have taken longer than one service timer period
2369940b61afSAnirudh Venkataramanan 	 * or there is more work to be done, reset the service timer to
2370940b61afSAnirudh Venkataramanan 	 * schedule the service task now.
2371940b61afSAnirudh Venkataramanan 	 */
2372940b61afSAnirudh Venkataramanan 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
23737e408e07SAnirudh Venkataramanan 	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
23747e408e07SAnirudh Venkataramanan 	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
23757e408e07SAnirudh Venkataramanan 	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
23767e408e07SAnirudh Venkataramanan 	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
23778f5ee3c4SJacob Keller 	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
23787e408e07SAnirudh Venkataramanan 	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2379940b61afSAnirudh Venkataramanan 		mod_timer(&pf->serv_tmr, jiffies);
2380940b61afSAnirudh Venkataramanan }
2381940b61afSAnirudh Venkataramanan 
2382837f08fdSAnirudh Venkataramanan /**
2383f31e4b6fSAnirudh Venkataramanan  * ice_set_ctrlq_len - helper function to set controlq length
2384f9867df6SAnirudh Venkataramanan  * @hw: pointer to the HW instance
2385f31e4b6fSAnirudh Venkataramanan  */
2386f31e4b6fSAnirudh Venkataramanan static void ice_set_ctrlq_len(struct ice_hw *hw)
2387f31e4b6fSAnirudh Venkataramanan {
2388f31e4b6fSAnirudh Venkataramanan 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2389f31e4b6fSAnirudh Venkataramanan 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2390f31e4b6fSAnirudh Venkataramanan 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2391f31e4b6fSAnirudh Venkataramanan 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2392c8a1071dSLukasz Czapnik 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
239311836214SBrett Creeley 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
239475d2b253SAnirudh Venkataramanan 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
239575d2b253SAnirudh Venkataramanan 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
23968f5ee3c4SJacob Keller 	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
23978f5ee3c4SJacob Keller 	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
23988f5ee3c4SJacob Keller 	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
23998f5ee3c4SJacob Keller 	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2400f31e4b6fSAnirudh Venkataramanan }
2401f31e4b6fSAnirudh Venkataramanan 
2402f31e4b6fSAnirudh Venkataramanan /**
240387324e74SHenry Tieman  * ice_schedule_reset - schedule a reset
240487324e74SHenry Tieman  * @pf: board private structure
240587324e74SHenry Tieman  * @reset: reset being requested
240687324e74SHenry Tieman  */
240787324e74SHenry Tieman int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
240887324e74SHenry Tieman {
240987324e74SHenry Tieman 	struct device *dev = ice_pf_to_dev(pf);
241087324e74SHenry Tieman 
241187324e74SHenry Tieman 	/* bail out if earlier reset has failed */
24127e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_RESET_FAILED, pf->state)) {
241387324e74SHenry Tieman 		dev_dbg(dev, "earlier reset has failed\n");
241487324e74SHenry Tieman 		return -EIO;
241587324e74SHenry Tieman 	}
241687324e74SHenry Tieman 	/* bail if reset/recovery already in progress */
241787324e74SHenry Tieman 	if (ice_is_reset_in_progress(pf->state)) {
241887324e74SHenry Tieman 		dev_dbg(dev, "Reset already in progress\n");
241987324e74SHenry Tieman 		return -EBUSY;
242087324e74SHenry Tieman 	}
242187324e74SHenry Tieman 
242287324e74SHenry Tieman 	switch (reset) {
242387324e74SHenry Tieman 	case ICE_RESET_PFR:
24247e408e07SAnirudh Venkataramanan 		set_bit(ICE_PFR_REQ, pf->state);
242587324e74SHenry Tieman 		break;
242687324e74SHenry Tieman 	case ICE_RESET_CORER:
24277e408e07SAnirudh Venkataramanan 		set_bit(ICE_CORER_REQ, pf->state);
242887324e74SHenry Tieman 		break;
242987324e74SHenry Tieman 	case ICE_RESET_GLOBR:
24307e408e07SAnirudh Venkataramanan 		set_bit(ICE_GLOBR_REQ, pf->state);
243187324e74SHenry Tieman 		break;
243287324e74SHenry Tieman 	default:
243387324e74SHenry Tieman 		return -EINVAL;
243487324e74SHenry Tieman 	}
243587324e74SHenry Tieman 
243687324e74SHenry Tieman 	ice_service_task_schedule(pf);
243787324e74SHenry Tieman 	return 0;
243887324e74SHenry Tieman }
243987324e74SHenry Tieman 
244087324e74SHenry Tieman /**
2441cdedef59SAnirudh Venkataramanan  * ice_irq_affinity_notify - Callback for affinity changes
2442cdedef59SAnirudh Venkataramanan  * @notify: context as to what irq was changed
2443cdedef59SAnirudh Venkataramanan  * @mask: the new affinity mask
2444cdedef59SAnirudh Venkataramanan  *
2445cdedef59SAnirudh Venkataramanan  * This is a callback function used by the irq_set_affinity_notifier function
2446cdedef59SAnirudh Venkataramanan  * so that we may register to receive changes to the irq affinity masks.
2447cdedef59SAnirudh Venkataramanan  */
2448c8b7abddSBruce Allan static void
2449c8b7abddSBruce Allan ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2450cdedef59SAnirudh Venkataramanan 			const cpumask_t *mask)
2451cdedef59SAnirudh Venkataramanan {
2452cdedef59SAnirudh Venkataramanan 	struct ice_q_vector *q_vector =
2453cdedef59SAnirudh Venkataramanan 		container_of(notify, struct ice_q_vector, affinity_notify);
2454cdedef59SAnirudh Venkataramanan 
2455cdedef59SAnirudh Venkataramanan 	cpumask_copy(&q_vector->affinity_mask, mask);
2456cdedef59SAnirudh Venkataramanan }
2457cdedef59SAnirudh Venkataramanan 
2458cdedef59SAnirudh Venkataramanan /**
2459cdedef59SAnirudh Venkataramanan  * ice_irq_affinity_release - Callback for affinity notifier release
2460cdedef59SAnirudh Venkataramanan  * @ref: internal core kernel usage
2461cdedef59SAnirudh Venkataramanan  *
2462cdedef59SAnirudh Venkataramanan  * This is a callback function used by the irq_set_affinity_notifier function
2463cdedef59SAnirudh Venkataramanan  * to inform the current notification subscriber that they will no longer
2464cdedef59SAnirudh Venkataramanan  * receive notifications.
2465cdedef59SAnirudh Venkataramanan  */
2466cdedef59SAnirudh Venkataramanan static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2467cdedef59SAnirudh Venkataramanan 
2468cdedef59SAnirudh Venkataramanan /**
2469cdedef59SAnirudh Venkataramanan  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2470cdedef59SAnirudh Venkataramanan  * @vsi: the VSI being configured
2471cdedef59SAnirudh Venkataramanan  */
2472cdedef59SAnirudh Venkataramanan static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2473cdedef59SAnirudh Venkataramanan {
2474ba880734SBrett Creeley 	struct ice_hw *hw = &vsi->back->hw;
2475cdedef59SAnirudh Venkataramanan 	int i;
2476cdedef59SAnirudh Venkataramanan 
24770c2561c8SBrett Creeley 	ice_for_each_q_vector(vsi, i)
2478cdedef59SAnirudh Venkataramanan 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2479cdedef59SAnirudh Venkataramanan 
2480cdedef59SAnirudh Venkataramanan 	ice_flush(hw);
2481cdedef59SAnirudh Venkataramanan 	return 0;
2482cdedef59SAnirudh Venkataramanan }
2483cdedef59SAnirudh Venkataramanan 
2484cdedef59SAnirudh Venkataramanan /**
2485cdedef59SAnirudh Venkataramanan  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2486cdedef59SAnirudh Venkataramanan  * @vsi: the VSI being configured
2487cdedef59SAnirudh Venkataramanan  * @basename: name for the vector
2488cdedef59SAnirudh Venkataramanan  */
2489cdedef59SAnirudh Venkataramanan static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2490cdedef59SAnirudh Venkataramanan {
2491cdedef59SAnirudh Venkataramanan 	int q_vectors = vsi->num_q_vectors;
2492cdedef59SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
2493cbe66bfeSBrett Creeley 	int base = vsi->base_vector;
24944015d11eSBrett Creeley 	struct device *dev;
2495cdedef59SAnirudh Venkataramanan 	int rx_int_idx = 0;
2496cdedef59SAnirudh Venkataramanan 	int tx_int_idx = 0;
2497cdedef59SAnirudh Venkataramanan 	int vector, err;
2498cdedef59SAnirudh Venkataramanan 	int irq_num;
2499cdedef59SAnirudh Venkataramanan 
25004015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
2501cdedef59SAnirudh Venkataramanan 	for (vector = 0; vector < q_vectors; vector++) {
2502cdedef59SAnirudh Venkataramanan 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2503cdedef59SAnirudh Venkataramanan 
2504cdedef59SAnirudh Venkataramanan 		irq_num = pf->msix_entries[base + vector].vector;
2505cdedef59SAnirudh Venkataramanan 
2506e72bba21SMaciej Fijalkowski 		if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2507cdedef59SAnirudh Venkataramanan 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2508cdedef59SAnirudh Venkataramanan 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2509cdedef59SAnirudh Venkataramanan 			tx_int_idx++;
2510e72bba21SMaciej Fijalkowski 		} else if (q_vector->rx.rx_ring) {
2511cdedef59SAnirudh Venkataramanan 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2512cdedef59SAnirudh Venkataramanan 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2513e72bba21SMaciej Fijalkowski 		} else if (q_vector->tx.tx_ring) {
2514cdedef59SAnirudh Venkataramanan 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2515cdedef59SAnirudh Venkataramanan 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2516cdedef59SAnirudh Venkataramanan 		} else {
2517cdedef59SAnirudh Venkataramanan 			/* skip this unused q_vector */
2518cdedef59SAnirudh Venkataramanan 			continue;
2519cdedef59SAnirudh Venkataramanan 		}
2520b03d519dSJacob Keller 		if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2521da62c5ffSQi Zhang 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2522da62c5ffSQi Zhang 					       IRQF_SHARED, q_vector->name,
2523da62c5ffSQi Zhang 					       q_vector);
2524da62c5ffSQi Zhang 		else
2525da62c5ffSQi Zhang 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2526da62c5ffSQi Zhang 					       0, q_vector->name, q_vector);
2527cdedef59SAnirudh Venkataramanan 		if (err) {
252819cce2c6SAnirudh Venkataramanan 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
252919cce2c6SAnirudh Venkataramanan 				   err);
2530cdedef59SAnirudh Venkataramanan 			goto free_q_irqs;
2531cdedef59SAnirudh Venkataramanan 		}
2532cdedef59SAnirudh Venkataramanan 
2533cdedef59SAnirudh Venkataramanan 		/* register for affinity change notifications */
253428bf2672SBrett Creeley 		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
253528bf2672SBrett Creeley 			struct irq_affinity_notify *affinity_notify;
253628bf2672SBrett Creeley 
253728bf2672SBrett Creeley 			affinity_notify = &q_vector->affinity_notify;
253828bf2672SBrett Creeley 			affinity_notify->notify = ice_irq_affinity_notify;
253928bf2672SBrett Creeley 			affinity_notify->release = ice_irq_affinity_release;
254028bf2672SBrett Creeley 			irq_set_affinity_notifier(irq_num, affinity_notify);
254128bf2672SBrett Creeley 		}
2542cdedef59SAnirudh Venkataramanan 
2543cdedef59SAnirudh Venkataramanan 		/* assign the mask for this irq */
2544cdedef59SAnirudh Venkataramanan 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2545cdedef59SAnirudh Venkataramanan 	}
2546cdedef59SAnirudh Venkataramanan 
2547d7442f51SAlexander Lobakin 	err = ice_set_cpu_rx_rmap(vsi);
2548d7442f51SAlexander Lobakin 	if (err) {
2549d7442f51SAlexander Lobakin 		netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2550d7442f51SAlexander Lobakin 			   vsi->vsi_num, ERR_PTR(err));
2551d7442f51SAlexander Lobakin 		goto free_q_irqs;
2552d7442f51SAlexander Lobakin 	}
2553d7442f51SAlexander Lobakin 
2554cdedef59SAnirudh Venkataramanan 	vsi->irqs_ready = true;
2555cdedef59SAnirudh Venkataramanan 	return 0;
2556cdedef59SAnirudh Venkataramanan 
2557cdedef59SAnirudh Venkataramanan free_q_irqs:
2558cdedef59SAnirudh Venkataramanan 	while (vector) {
2559cdedef59SAnirudh Venkataramanan 		vector--;
256028bf2672SBrett Creeley 		irq_num = pf->msix_entries[base + vector].vector;
256128bf2672SBrett Creeley 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2562cdedef59SAnirudh Venkataramanan 			irq_set_affinity_notifier(irq_num, NULL);
2563cdedef59SAnirudh Venkataramanan 		irq_set_affinity_hint(irq_num, NULL);
25644015d11eSBrett Creeley 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2565cdedef59SAnirudh Venkataramanan 	}
2566cdedef59SAnirudh Venkataramanan 	return err;
2567cdedef59SAnirudh Venkataramanan }
2568cdedef59SAnirudh Venkataramanan 
2569cdedef59SAnirudh Venkataramanan /**
2570efc2214bSMaciej Fijalkowski  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2571efc2214bSMaciej Fijalkowski  * @vsi: VSI to setup Tx rings used by XDP
2572efc2214bSMaciej Fijalkowski  *
2573efc2214bSMaciej Fijalkowski  * Return 0 on success and negative value on error
2574efc2214bSMaciej Fijalkowski  */
2575efc2214bSMaciej Fijalkowski static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2576efc2214bSMaciej Fijalkowski {
25779a946843SAnirudh Venkataramanan 	struct device *dev = ice_pf_to_dev(vsi->back);
25789610bd98SMaciej Fijalkowski 	struct ice_tx_desc *tx_desc;
25799610bd98SMaciej Fijalkowski 	int i, j;
2580efc2214bSMaciej Fijalkowski 
25812faf63b6SMaciej Fijalkowski 	ice_for_each_xdp_txq(vsi, i) {
2582efc2214bSMaciej Fijalkowski 		u16 xdp_q_idx = vsi->alloc_txq + i;
2583288ecf49SBenjamin Mikailenko 		struct ice_ring_stats *ring_stats;
2584e72bba21SMaciej Fijalkowski 		struct ice_tx_ring *xdp_ring;
2585efc2214bSMaciej Fijalkowski 
2586efc2214bSMaciej Fijalkowski 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2587efc2214bSMaciej Fijalkowski 		if (!xdp_ring)
2588efc2214bSMaciej Fijalkowski 			goto free_xdp_rings;
2589efc2214bSMaciej Fijalkowski 
2590288ecf49SBenjamin Mikailenko 		ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2591288ecf49SBenjamin Mikailenko 		if (!ring_stats) {
2592288ecf49SBenjamin Mikailenko 			ice_free_tx_ring(xdp_ring);
2593288ecf49SBenjamin Mikailenko 			goto free_xdp_rings;
2594288ecf49SBenjamin Mikailenko 		}
2595288ecf49SBenjamin Mikailenko 
2596288ecf49SBenjamin Mikailenko 		xdp_ring->ring_stats = ring_stats;
2597efc2214bSMaciej Fijalkowski 		xdp_ring->q_index = xdp_q_idx;
2598efc2214bSMaciej Fijalkowski 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2599efc2214bSMaciej Fijalkowski 		xdp_ring->vsi = vsi;
2600efc2214bSMaciej Fijalkowski 		xdp_ring->netdev = NULL;
2601efc2214bSMaciej Fijalkowski 		xdp_ring->dev = dev;
2602efc2214bSMaciej Fijalkowski 		xdp_ring->count = vsi->num_tx_desc;
2603b1d95cc2SCiara Loftus 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2604efc2214bSMaciej Fijalkowski 		if (ice_setup_tx_ring(xdp_ring))
2605efc2214bSMaciej Fijalkowski 			goto free_xdp_rings;
2606efc2214bSMaciej Fijalkowski 		ice_set_ring_xdp(xdp_ring);
260722bf877eSMaciej Fijalkowski 		spin_lock_init(&xdp_ring->tx_lock);
26089610bd98SMaciej Fijalkowski 		for (j = 0; j < xdp_ring->count; j++) {
26099610bd98SMaciej Fijalkowski 			tx_desc = ICE_TX_DESC(xdp_ring, j);
2610e19778e6SMaciej Fijalkowski 			tx_desc->cmd_type_offset_bsz = 0;
26119610bd98SMaciej Fijalkowski 		}
2612efc2214bSMaciej Fijalkowski 	}
2613efc2214bSMaciej Fijalkowski 
2614efc2214bSMaciej Fijalkowski 	return 0;
2615efc2214bSMaciej Fijalkowski 
2616efc2214bSMaciej Fijalkowski free_xdp_rings:
2617288ecf49SBenjamin Mikailenko 	for (; i >= 0; i--) {
2618288ecf49SBenjamin Mikailenko 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2619288ecf49SBenjamin Mikailenko 			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2620288ecf49SBenjamin Mikailenko 			vsi->xdp_rings[i]->ring_stats = NULL;
2621efc2214bSMaciej Fijalkowski 			ice_free_tx_ring(vsi->xdp_rings[i]);
2622288ecf49SBenjamin Mikailenko 		}
2623288ecf49SBenjamin Mikailenko 	}
2624efc2214bSMaciej Fijalkowski 	return -ENOMEM;
2625efc2214bSMaciej Fijalkowski }
2626efc2214bSMaciej Fijalkowski 
2627efc2214bSMaciej Fijalkowski /**
2628efc2214bSMaciej Fijalkowski  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2629efc2214bSMaciej Fijalkowski  * @vsi: VSI to set the bpf prog on
2630efc2214bSMaciej Fijalkowski  * @prog: the bpf prog pointer
2631efc2214bSMaciej Fijalkowski  */
2632efc2214bSMaciej Fijalkowski static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2633efc2214bSMaciej Fijalkowski {
2634efc2214bSMaciej Fijalkowski 	struct bpf_prog *old_prog;
2635efc2214bSMaciej Fijalkowski 	int i;
2636efc2214bSMaciej Fijalkowski 
2637efc2214bSMaciej Fijalkowski 	old_prog = xchg(&vsi->xdp_prog, prog);
2638efc2214bSMaciej Fijalkowski 	if (old_prog)
2639efc2214bSMaciej Fijalkowski 		bpf_prog_put(old_prog);
2640efc2214bSMaciej Fijalkowski 
2641efc2214bSMaciej Fijalkowski 	ice_for_each_rxq(vsi, i)
2642efc2214bSMaciej Fijalkowski 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2643efc2214bSMaciej Fijalkowski }
2644efc2214bSMaciej Fijalkowski 
2645efc2214bSMaciej Fijalkowski /**
2646efc2214bSMaciej Fijalkowski  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2647efc2214bSMaciej Fijalkowski  * @vsi: VSI to bring up Tx rings used by XDP
2648efc2214bSMaciej Fijalkowski  * @prog: bpf program that will be assigned to VSI
2649efc2214bSMaciej Fijalkowski  *
2650efc2214bSMaciej Fijalkowski  * Return 0 on success and negative value on error
2651efc2214bSMaciej Fijalkowski  */
2652efc2214bSMaciej Fijalkowski int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2653efc2214bSMaciej Fijalkowski {
2654efc2214bSMaciej Fijalkowski 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2655efc2214bSMaciej Fijalkowski 	int xdp_rings_rem = vsi->num_xdp_txq;
2656efc2214bSMaciej Fijalkowski 	struct ice_pf *pf = vsi->back;
2657efc2214bSMaciej Fijalkowski 	struct ice_qs_cfg xdp_qs_cfg = {
2658efc2214bSMaciej Fijalkowski 		.qs_mutex = &pf->avail_q_mutex,
2659efc2214bSMaciej Fijalkowski 		.pf_map = pf->avail_txqs,
2660efc2214bSMaciej Fijalkowski 		.pf_map_size = pf->max_pf_txqs,
2661efc2214bSMaciej Fijalkowski 		.q_count = vsi->num_xdp_txq,
2662efc2214bSMaciej Fijalkowski 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2663efc2214bSMaciej Fijalkowski 		.vsi_map = vsi->txq_map,
2664efc2214bSMaciej Fijalkowski 		.vsi_map_offset = vsi->alloc_txq,
2665efc2214bSMaciej Fijalkowski 		.mapping_mode = ICE_VSI_MAP_CONTIG
2666efc2214bSMaciej Fijalkowski 	};
26674015d11eSBrett Creeley 	struct device *dev;
2668efc2214bSMaciej Fijalkowski 	int i, v_idx;
26695518ac2aSTony Nguyen 	int status;
2670efc2214bSMaciej Fijalkowski 
26714015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
26724015d11eSBrett Creeley 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2673efc2214bSMaciej Fijalkowski 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2674efc2214bSMaciej Fijalkowski 	if (!vsi->xdp_rings)
2675efc2214bSMaciej Fijalkowski 		return -ENOMEM;
2676efc2214bSMaciej Fijalkowski 
2677efc2214bSMaciej Fijalkowski 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2678efc2214bSMaciej Fijalkowski 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2679efc2214bSMaciej Fijalkowski 		goto err_map_xdp;
2680efc2214bSMaciej Fijalkowski 
268122bf877eSMaciej Fijalkowski 	if (static_key_enabled(&ice_xdp_locking_key))
268222bf877eSMaciej Fijalkowski 		netdev_warn(vsi->netdev,
268322bf877eSMaciej Fijalkowski 			    "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
268422bf877eSMaciej Fijalkowski 
2685efc2214bSMaciej Fijalkowski 	if (ice_xdp_alloc_setup_rings(vsi))
2686efc2214bSMaciej Fijalkowski 		goto clear_xdp_rings;
2687efc2214bSMaciej Fijalkowski 
2688efc2214bSMaciej Fijalkowski 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2689efc2214bSMaciej Fijalkowski 	ice_for_each_q_vector(vsi, v_idx) {
2690efc2214bSMaciej Fijalkowski 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2691efc2214bSMaciej Fijalkowski 		int xdp_rings_per_v, q_id, q_base;
2692efc2214bSMaciej Fijalkowski 
2693efc2214bSMaciej Fijalkowski 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2694efc2214bSMaciej Fijalkowski 					       vsi->num_q_vectors - v_idx);
2695efc2214bSMaciej Fijalkowski 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2696efc2214bSMaciej Fijalkowski 
2697efc2214bSMaciej Fijalkowski 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2698e72bba21SMaciej Fijalkowski 			struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2699efc2214bSMaciej Fijalkowski 
2700efc2214bSMaciej Fijalkowski 			xdp_ring->q_vector = q_vector;
2701e72bba21SMaciej Fijalkowski 			xdp_ring->next = q_vector->tx.tx_ring;
2702e72bba21SMaciej Fijalkowski 			q_vector->tx.tx_ring = xdp_ring;
2703efc2214bSMaciej Fijalkowski 		}
2704efc2214bSMaciej Fijalkowski 		xdp_rings_rem -= xdp_rings_per_v;
2705efc2214bSMaciej Fijalkowski 	}
2706efc2214bSMaciej Fijalkowski 
27079ead7e74SMaciej Fijalkowski 	ice_for_each_rxq(vsi, i) {
27089ead7e74SMaciej Fijalkowski 		if (static_key_enabled(&ice_xdp_locking_key)) {
27099ead7e74SMaciej Fijalkowski 			vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
27109ead7e74SMaciej Fijalkowski 		} else {
27119ead7e74SMaciej Fijalkowski 			struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
27129ead7e74SMaciej Fijalkowski 			struct ice_tx_ring *ring;
27139ead7e74SMaciej Fijalkowski 
27149ead7e74SMaciej Fijalkowski 			ice_for_each_tx_ring(ring, q_vector->tx) {
27159ead7e74SMaciej Fijalkowski 				if (ice_ring_is_xdp(ring)) {
27169ead7e74SMaciej Fijalkowski 					vsi->rx_rings[i]->xdp_ring = ring;
27179ead7e74SMaciej Fijalkowski 					break;
27189ead7e74SMaciej Fijalkowski 				}
27199ead7e74SMaciej Fijalkowski 			}
27209ead7e74SMaciej Fijalkowski 		}
27219ead7e74SMaciej Fijalkowski 		ice_tx_xsk_pool(vsi, i);
27229ead7e74SMaciej Fijalkowski 	}
27239ead7e74SMaciej Fijalkowski 
2724efc2214bSMaciej Fijalkowski 	/* omit the scheduler update if in reset path; XDP queues will be
2725efc2214bSMaciej Fijalkowski 	 * taken into account at the end of ice_vsi_rebuild, where
2726efc2214bSMaciej Fijalkowski 	 * ice_cfg_vsi_lan is being called
2727efc2214bSMaciej Fijalkowski 	 */
2728efc2214bSMaciej Fijalkowski 	if (ice_is_reset_in_progress(pf->state))
2729efc2214bSMaciej Fijalkowski 		return 0;
2730efc2214bSMaciej Fijalkowski 
2731efc2214bSMaciej Fijalkowski 	/* tell the Tx scheduler that right now we have
2732efc2214bSMaciej Fijalkowski 	 * additional queues
2733efc2214bSMaciej Fijalkowski 	 */
2734efc2214bSMaciej Fijalkowski 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2735efc2214bSMaciej Fijalkowski 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2736efc2214bSMaciej Fijalkowski 
2737efc2214bSMaciej Fijalkowski 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2738efc2214bSMaciej Fijalkowski 				 max_txqs);
2739efc2214bSMaciej Fijalkowski 	if (status) {
27405f87ec48STony Nguyen 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
27415f87ec48STony Nguyen 			status);
2742efc2214bSMaciej Fijalkowski 		goto clear_xdp_rings;
2743efc2214bSMaciej Fijalkowski 	}
2744f65ee535SMarta Plantykow 
2745f65ee535SMarta Plantykow 	/* assign the prog only when it's not already present on VSI;
2746f65ee535SMarta Plantykow 	 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2747f65ee535SMarta Plantykow 	 * VSI rebuild that happens under ethtool -L can expose us to
2748f65ee535SMarta Plantykow 	 * the bpf_prog refcount issues as we would be swapping same
2749f65ee535SMarta Plantykow 	 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2750f65ee535SMarta Plantykow 	 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2751f65ee535SMarta Plantykow 	 * this is not harmful as dev_xdp_install bumps the refcount
2752f65ee535SMarta Plantykow 	 * before calling the op exposed by the driver;
2753f65ee535SMarta Plantykow 	 */
2754f65ee535SMarta Plantykow 	if (!ice_is_xdp_ena_vsi(vsi))
2755efc2214bSMaciej Fijalkowski 		ice_vsi_assign_bpf_prog(vsi, prog);
2756efc2214bSMaciej Fijalkowski 
2757efc2214bSMaciej Fijalkowski 	return 0;
2758efc2214bSMaciej Fijalkowski clear_xdp_rings:
27592faf63b6SMaciej Fijalkowski 	ice_for_each_xdp_txq(vsi, i)
2760efc2214bSMaciej Fijalkowski 		if (vsi->xdp_rings[i]) {
2761efc2214bSMaciej Fijalkowski 			kfree_rcu(vsi->xdp_rings[i], rcu);
2762efc2214bSMaciej Fijalkowski 			vsi->xdp_rings[i] = NULL;
2763efc2214bSMaciej Fijalkowski 		}
2764efc2214bSMaciej Fijalkowski 
2765efc2214bSMaciej Fijalkowski err_map_xdp:
2766efc2214bSMaciej Fijalkowski 	mutex_lock(&pf->avail_q_mutex);
27672faf63b6SMaciej Fijalkowski 	ice_for_each_xdp_txq(vsi, i) {
2768efc2214bSMaciej Fijalkowski 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2769efc2214bSMaciej Fijalkowski 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2770efc2214bSMaciej Fijalkowski 	}
2771efc2214bSMaciej Fijalkowski 	mutex_unlock(&pf->avail_q_mutex);
2772efc2214bSMaciej Fijalkowski 
27734015d11eSBrett Creeley 	devm_kfree(dev, vsi->xdp_rings);
2774efc2214bSMaciej Fijalkowski 	return -ENOMEM;
2775efc2214bSMaciej Fijalkowski }
2776efc2214bSMaciej Fijalkowski 
2777efc2214bSMaciej Fijalkowski /**
2778efc2214bSMaciej Fijalkowski  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2779efc2214bSMaciej Fijalkowski  * @vsi: VSI to remove XDP rings
2780efc2214bSMaciej Fijalkowski  *
2781efc2214bSMaciej Fijalkowski  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2782efc2214bSMaciej Fijalkowski  * resources
2783efc2214bSMaciej Fijalkowski  */
2784efc2214bSMaciej Fijalkowski int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2785efc2214bSMaciej Fijalkowski {
2786efc2214bSMaciej Fijalkowski 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2787efc2214bSMaciej Fijalkowski 	struct ice_pf *pf = vsi->back;
2788efc2214bSMaciej Fijalkowski 	int i, v_idx;
2789efc2214bSMaciej Fijalkowski 
2790efc2214bSMaciej Fijalkowski 	/* q_vectors are freed in reset path so there's no point in detaching
2791ac382a09SBruce Allan 	 * rings; in case of rebuild being triggered not from reset bits
2792efc2214bSMaciej Fijalkowski 	 * in pf->state won't be set, so additionally check first q_vector
2793efc2214bSMaciej Fijalkowski 	 * against NULL
2794efc2214bSMaciej Fijalkowski 	 */
2795efc2214bSMaciej Fijalkowski 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2796efc2214bSMaciej Fijalkowski 		goto free_qmap;
2797efc2214bSMaciej Fijalkowski 
2798efc2214bSMaciej Fijalkowski 	ice_for_each_q_vector(vsi, v_idx) {
2799efc2214bSMaciej Fijalkowski 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2800e72bba21SMaciej Fijalkowski 		struct ice_tx_ring *ring;
2801efc2214bSMaciej Fijalkowski 
2802e72bba21SMaciej Fijalkowski 		ice_for_each_tx_ring(ring, q_vector->tx)
2803efc2214bSMaciej Fijalkowski 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2804efc2214bSMaciej Fijalkowski 				break;
2805efc2214bSMaciej Fijalkowski 
2806efc2214bSMaciej Fijalkowski 		/* restore the value of last node prior to XDP setup */
2807e72bba21SMaciej Fijalkowski 		q_vector->tx.tx_ring = ring;
2808efc2214bSMaciej Fijalkowski 	}
2809efc2214bSMaciej Fijalkowski 
2810efc2214bSMaciej Fijalkowski free_qmap:
2811efc2214bSMaciej Fijalkowski 	mutex_lock(&pf->avail_q_mutex);
28122faf63b6SMaciej Fijalkowski 	ice_for_each_xdp_txq(vsi, i) {
2813efc2214bSMaciej Fijalkowski 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2814efc2214bSMaciej Fijalkowski 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2815efc2214bSMaciej Fijalkowski 	}
2816efc2214bSMaciej Fijalkowski 	mutex_unlock(&pf->avail_q_mutex);
2817efc2214bSMaciej Fijalkowski 
28182faf63b6SMaciej Fijalkowski 	ice_for_each_xdp_txq(vsi, i)
2819efc2214bSMaciej Fijalkowski 		if (vsi->xdp_rings[i]) {
2820f9124c68SMaciej Fijalkowski 			if (vsi->xdp_rings[i]->desc) {
2821f9124c68SMaciej Fijalkowski 				synchronize_rcu();
2822efc2214bSMaciej Fijalkowski 				ice_free_tx_ring(vsi->xdp_rings[i]);
2823f9124c68SMaciej Fijalkowski 			}
2824288ecf49SBenjamin Mikailenko 			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2825288ecf49SBenjamin Mikailenko 			vsi->xdp_rings[i]->ring_stats = NULL;
2826efc2214bSMaciej Fijalkowski 			kfree_rcu(vsi->xdp_rings[i], rcu);
2827efc2214bSMaciej Fijalkowski 			vsi->xdp_rings[i] = NULL;
2828efc2214bSMaciej Fijalkowski 		}
2829efc2214bSMaciej Fijalkowski 
28304015d11eSBrett Creeley 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2831efc2214bSMaciej Fijalkowski 	vsi->xdp_rings = NULL;
2832efc2214bSMaciej Fijalkowski 
283322bf877eSMaciej Fijalkowski 	if (static_key_enabled(&ice_xdp_locking_key))
283422bf877eSMaciej Fijalkowski 		static_branch_dec(&ice_xdp_locking_key);
283522bf877eSMaciej Fijalkowski 
2836efc2214bSMaciej Fijalkowski 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2837efc2214bSMaciej Fijalkowski 		return 0;
2838efc2214bSMaciej Fijalkowski 
2839efc2214bSMaciej Fijalkowski 	ice_vsi_assign_bpf_prog(vsi, NULL);
2840efc2214bSMaciej Fijalkowski 
2841efc2214bSMaciej Fijalkowski 	/* notify Tx scheduler that we destroyed XDP queues and bring
2842efc2214bSMaciej Fijalkowski 	 * back the old number of child nodes
2843efc2214bSMaciej Fijalkowski 	 */
2844efc2214bSMaciej Fijalkowski 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2845efc2214bSMaciej Fijalkowski 		max_txqs[i] = vsi->num_txq;
2846efc2214bSMaciej Fijalkowski 
2847c8f135c6SMarta Plantykow 	/* change number of XDP Tx queues to 0 */
2848c8f135c6SMarta Plantykow 	vsi->num_xdp_txq = 0;
2849c8f135c6SMarta Plantykow 
2850efc2214bSMaciej Fijalkowski 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2851efc2214bSMaciej Fijalkowski 			       max_txqs);
2852efc2214bSMaciej Fijalkowski }
2853efc2214bSMaciej Fijalkowski 
2854efc2214bSMaciej Fijalkowski /**
2855c7a21904SMichal Swiatkowski  * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2856c7a21904SMichal Swiatkowski  * @vsi: VSI to schedule napi on
2857c7a21904SMichal Swiatkowski  */
2858c7a21904SMichal Swiatkowski static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2859c7a21904SMichal Swiatkowski {
2860c7a21904SMichal Swiatkowski 	int i;
2861c7a21904SMichal Swiatkowski 
2862c7a21904SMichal Swiatkowski 	ice_for_each_rxq(vsi, i) {
2863e72bba21SMaciej Fijalkowski 		struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2864c7a21904SMichal Swiatkowski 
2865c7a21904SMichal Swiatkowski 		if (rx_ring->xsk_pool)
2866c7a21904SMichal Swiatkowski 			napi_schedule(&rx_ring->q_vector->napi);
2867c7a21904SMichal Swiatkowski 	}
2868c7a21904SMichal Swiatkowski }
2869c7a21904SMichal Swiatkowski 
2870c7a21904SMichal Swiatkowski /**
287122bf877eSMaciej Fijalkowski  * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
287222bf877eSMaciej Fijalkowski  * @vsi: VSI to determine the count of XDP Tx qs
287322bf877eSMaciej Fijalkowski  *
287422bf877eSMaciej Fijalkowski  * returns 0 if Tx qs count is higher than at least half of CPU count,
287522bf877eSMaciej Fijalkowski  * -ENOMEM otherwise
287622bf877eSMaciej Fijalkowski  */
287722bf877eSMaciej Fijalkowski int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
287822bf877eSMaciej Fijalkowski {
287922bf877eSMaciej Fijalkowski 	u16 avail = ice_get_avail_txq_count(vsi->back);
288022bf877eSMaciej Fijalkowski 	u16 cpus = num_possible_cpus();
288122bf877eSMaciej Fijalkowski 
288222bf877eSMaciej Fijalkowski 	if (avail < cpus / 2)
288322bf877eSMaciej Fijalkowski 		return -ENOMEM;
288422bf877eSMaciej Fijalkowski 
288522bf877eSMaciej Fijalkowski 	vsi->num_xdp_txq = min_t(u16, avail, cpus);
288622bf877eSMaciej Fijalkowski 
288722bf877eSMaciej Fijalkowski 	if (vsi->num_xdp_txq < cpus)
288822bf877eSMaciej Fijalkowski 		static_branch_inc(&ice_xdp_locking_key);
288922bf877eSMaciej Fijalkowski 
289022bf877eSMaciej Fijalkowski 	return 0;
289122bf877eSMaciej Fijalkowski }
289222bf877eSMaciej Fijalkowski 
289322bf877eSMaciej Fijalkowski /**
289460bc72b3SMaciej Fijalkowski  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
289560bc72b3SMaciej Fijalkowski  * @vsi: Pointer to VSI structure
289660bc72b3SMaciej Fijalkowski  */
289760bc72b3SMaciej Fijalkowski static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
289860bc72b3SMaciej Fijalkowski {
289960bc72b3SMaciej Fijalkowski 	if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
290060bc72b3SMaciej Fijalkowski 		return ICE_RXBUF_1664;
290160bc72b3SMaciej Fijalkowski 	else
290260bc72b3SMaciej Fijalkowski 		return ICE_RXBUF_3072;
290360bc72b3SMaciej Fijalkowski }
290460bc72b3SMaciej Fijalkowski 
290560bc72b3SMaciej Fijalkowski /**
2906efc2214bSMaciej Fijalkowski  * ice_xdp_setup_prog - Add or remove XDP eBPF program
2907efc2214bSMaciej Fijalkowski  * @vsi: VSI to setup XDP for
2908efc2214bSMaciej Fijalkowski  * @prog: XDP program
2909efc2214bSMaciej Fijalkowski  * @extack: netlink extended ack
2910efc2214bSMaciej Fijalkowski  */
2911efc2214bSMaciej Fijalkowski static int
2912efc2214bSMaciej Fijalkowski ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2913efc2214bSMaciej Fijalkowski 		   struct netlink_ext_ack *extack)
2914efc2214bSMaciej Fijalkowski {
291560bc72b3SMaciej Fijalkowski 	unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2916efc2214bSMaciej Fijalkowski 	bool if_running = netif_running(vsi->netdev);
2917efc2214bSMaciej Fijalkowski 	int ret = 0, xdp_ring_err = 0;
2918efc2214bSMaciej Fijalkowski 
29192fba7dc5SMaciej Fijalkowski 	if (prog && !prog->aux->xdp_has_frags) {
292060bc72b3SMaciej Fijalkowski 		if (frame_size > ice_max_xdp_frame_size(vsi)) {
29212fba7dc5SMaciej Fijalkowski 			NL_SET_ERR_MSG_MOD(extack,
29222fba7dc5SMaciej Fijalkowski 					   "MTU is too large for linear frames and XDP prog does not support frags");
2923efc2214bSMaciej Fijalkowski 			return -EOPNOTSUPP;
2924efc2214bSMaciej Fijalkowski 		}
29252fba7dc5SMaciej Fijalkowski 	}
2926efc2214bSMaciej Fijalkowski 
2927efc2214bSMaciej Fijalkowski 	/* need to stop netdev while setting up the program for Rx rings */
2928e97fb1aeSAnirudh Venkataramanan 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2929efc2214bSMaciej Fijalkowski 		ret = ice_down(vsi);
2930efc2214bSMaciej Fijalkowski 		if (ret) {
2931af23635aSJesse Brandeburg 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2932efc2214bSMaciej Fijalkowski 			return ret;
2933efc2214bSMaciej Fijalkowski 		}
2934efc2214bSMaciej Fijalkowski 	}
2935efc2214bSMaciej Fijalkowski 
2936efc2214bSMaciej Fijalkowski 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
293722bf877eSMaciej Fijalkowski 		xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
293822bf877eSMaciej Fijalkowski 		if (xdp_ring_err) {
293922bf877eSMaciej Fijalkowski 			NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
294022bf877eSMaciej Fijalkowski 		} else {
2941efc2214bSMaciej Fijalkowski 			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2942efc2214bSMaciej Fijalkowski 			if (xdp_ring_err)
2943af23635aSJesse Brandeburg 				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
294422bf877eSMaciej Fijalkowski 		}
2945b6a4103cSLorenzo Bianconi 		xdp_features_set_redirect_target(vsi->netdev, true);
29467e753eb6SPrzemyslaw Patynowski 		/* reallocate Rx queues that are used for zero-copy */
29477e753eb6SPrzemyslaw Patynowski 		xdp_ring_err = ice_realloc_zc_buf(vsi, true);
29487e753eb6SPrzemyslaw Patynowski 		if (xdp_ring_err)
29497e753eb6SPrzemyslaw Patynowski 			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
2950efc2214bSMaciej Fijalkowski 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
295166c0e13aSMarek Majtyka 		xdp_features_clear_redirect_target(vsi->netdev);
2952efc2214bSMaciej Fijalkowski 		xdp_ring_err = ice_destroy_xdp_rings(vsi);
2953efc2214bSMaciej Fijalkowski 		if (xdp_ring_err)
2954af23635aSJesse Brandeburg 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
29557e753eb6SPrzemyslaw Patynowski 		/* reallocate Rx queues that were used for zero-copy */
29567e753eb6SPrzemyslaw Patynowski 		xdp_ring_err = ice_realloc_zc_buf(vsi, false);
29577e753eb6SPrzemyslaw Patynowski 		if (xdp_ring_err)
29587e753eb6SPrzemyslaw Patynowski 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
2959efc2214bSMaciej Fijalkowski 	} else {
2960f65ee535SMarta Plantykow 		/* safe to call even when prog == vsi->xdp_prog as
2961f65ee535SMarta Plantykow 		 * dev_xdp_install in net/core/dev.c incremented prog's
2962f65ee535SMarta Plantykow 		 * refcount so corresponding bpf_prog_put won't cause
2963f65ee535SMarta Plantykow 		 * underflow
2964f65ee535SMarta Plantykow 		 */
2965efc2214bSMaciej Fijalkowski 		ice_vsi_assign_bpf_prog(vsi, prog);
2966efc2214bSMaciej Fijalkowski 	}
2967efc2214bSMaciej Fijalkowski 
2968efc2214bSMaciej Fijalkowski 	if (if_running)
2969efc2214bSMaciej Fijalkowski 		ret = ice_up(vsi);
2970efc2214bSMaciej Fijalkowski 
2971c7a21904SMichal Swiatkowski 	if (!ret && prog)
2972c7a21904SMichal Swiatkowski 		ice_vsi_rx_napi_schedule(vsi);
29732d4238f5SKrzysztof Kazimierczak 
2974efc2214bSMaciej Fijalkowski 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
2975efc2214bSMaciej Fijalkowski }
2976efc2214bSMaciej Fijalkowski 
2977efc2214bSMaciej Fijalkowski /**
2978ebc5399eSMaciej Fijalkowski  * ice_xdp_safe_mode - XDP handler for safe mode
2979ebc5399eSMaciej Fijalkowski  * @dev: netdevice
2980ebc5399eSMaciej Fijalkowski  * @xdp: XDP command
2981ebc5399eSMaciej Fijalkowski  */
2982ebc5399eSMaciej Fijalkowski static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2983ebc5399eSMaciej Fijalkowski 			     struct netdev_bpf *xdp)
2984ebc5399eSMaciej Fijalkowski {
2985ebc5399eSMaciej Fijalkowski 	NL_SET_ERR_MSG_MOD(xdp->extack,
2986ebc5399eSMaciej Fijalkowski 			   "Please provide working DDP firmware package in order to use XDP\n"
2987ebc5399eSMaciej Fijalkowski 			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2988ebc5399eSMaciej Fijalkowski 	return -EOPNOTSUPP;
2989ebc5399eSMaciej Fijalkowski }
2990ebc5399eSMaciej Fijalkowski 
2991ebc5399eSMaciej Fijalkowski /**
2992efc2214bSMaciej Fijalkowski  * ice_xdp - implements XDP handler
2993efc2214bSMaciej Fijalkowski  * @dev: netdevice
2994efc2214bSMaciej Fijalkowski  * @xdp: XDP command
2995efc2214bSMaciej Fijalkowski  */
2996efc2214bSMaciej Fijalkowski static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2997efc2214bSMaciej Fijalkowski {
2998efc2214bSMaciej Fijalkowski 	struct ice_netdev_priv *np = netdev_priv(dev);
2999efc2214bSMaciej Fijalkowski 	struct ice_vsi *vsi = np->vsi;
3000efc2214bSMaciej Fijalkowski 
3001efc2214bSMaciej Fijalkowski 	if (vsi->type != ICE_VSI_PF) {
3002af23635aSJesse Brandeburg 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
3003efc2214bSMaciej Fijalkowski 		return -EINVAL;
3004efc2214bSMaciej Fijalkowski 	}
3005efc2214bSMaciej Fijalkowski 
3006efc2214bSMaciej Fijalkowski 	switch (xdp->command) {
3007efc2214bSMaciej Fijalkowski 	case XDP_SETUP_PROG:
3008efc2214bSMaciej Fijalkowski 		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
30091742b3d5SMagnus Karlsson 	case XDP_SETUP_XSK_POOL:
30101742b3d5SMagnus Karlsson 		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
30112d4238f5SKrzysztof Kazimierczak 					  xdp->xsk.queue_id);
3012efc2214bSMaciej Fijalkowski 	default:
3013efc2214bSMaciej Fijalkowski 		return -EINVAL;
3014efc2214bSMaciej Fijalkowski 	}
3015efc2214bSMaciej Fijalkowski }
3016efc2214bSMaciej Fijalkowski 
3017efc2214bSMaciej Fijalkowski /**
3018940b61afSAnirudh Venkataramanan  * ice_ena_misc_vector - enable the non-queue interrupts
3019940b61afSAnirudh Venkataramanan  * @pf: board private structure
3020940b61afSAnirudh Venkataramanan  */
3021940b61afSAnirudh Venkataramanan static void ice_ena_misc_vector(struct ice_pf *pf)
3022940b61afSAnirudh Venkataramanan {
3023940b61afSAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
3024940b61afSAnirudh Venkataramanan 	u32 val;
3025940b61afSAnirudh Venkataramanan 
30269d5c5a52SPaul Greenwalt 	/* Disable anti-spoof detection interrupt to prevent spurious event
30279d5c5a52SPaul Greenwalt 	 * interrupts during a function reset. Anti-spoof functionally is
30289d5c5a52SPaul Greenwalt 	 * still supported.
30299d5c5a52SPaul Greenwalt 	 */
30309d5c5a52SPaul Greenwalt 	val = rd32(hw, GL_MDCK_TX_TDPU);
30319d5c5a52SPaul Greenwalt 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
30329d5c5a52SPaul Greenwalt 	wr32(hw, GL_MDCK_TX_TDPU, val);
30339d5c5a52SPaul Greenwalt 
3034940b61afSAnirudh Venkataramanan 	/* clear things first */
3035940b61afSAnirudh Venkataramanan 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
3036940b61afSAnirudh Venkataramanan 	rd32(hw, PFINT_OICR);		/* read to clear */
3037940b61afSAnirudh Venkataramanan 
30383bcd7fa3SBruce Allan 	val = (PFINT_OICR_ECC_ERR_M |
3039940b61afSAnirudh Venkataramanan 	       PFINT_OICR_MAL_DETECT_M |
3040940b61afSAnirudh Venkataramanan 	       PFINT_OICR_GRST_M |
3041940b61afSAnirudh Venkataramanan 	       PFINT_OICR_PCI_EXCEPTION_M |
3042007676b4SAnirudh Venkataramanan 	       PFINT_OICR_VFLR_M |
30433bcd7fa3SBruce Allan 	       PFINT_OICR_HMC_ERR_M |
3044348048e7SDave Ertman 	       PFINT_OICR_PE_PUSH_M |
30453bcd7fa3SBruce Allan 	       PFINT_OICR_PE_CRITERR_M);
3046940b61afSAnirudh Venkataramanan 
3047940b61afSAnirudh Venkataramanan 	wr32(hw, PFINT_OICR_ENA, val);
3048940b61afSAnirudh Venkataramanan 
3049940b61afSAnirudh Venkataramanan 	/* SW_ITR_IDX = 0, but don't change INTENA */
3050cbe66bfeSBrett Creeley 	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
3051940b61afSAnirudh Venkataramanan 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3052940b61afSAnirudh Venkataramanan }
3053940b61afSAnirudh Venkataramanan 
3054940b61afSAnirudh Venkataramanan /**
3055940b61afSAnirudh Venkataramanan  * ice_misc_intr - misc interrupt handler
3056940b61afSAnirudh Venkataramanan  * @irq: interrupt number
3057940b61afSAnirudh Venkataramanan  * @data: pointer to a q_vector
3058940b61afSAnirudh Venkataramanan  */
3059940b61afSAnirudh Venkataramanan static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3060940b61afSAnirudh Venkataramanan {
3061940b61afSAnirudh Venkataramanan 	struct ice_pf *pf = (struct ice_pf *)data;
3062940b61afSAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
3063940b61afSAnirudh Venkataramanan 	irqreturn_t ret = IRQ_NONE;
30644015d11eSBrett Creeley 	struct device *dev;
3065940b61afSAnirudh Venkataramanan 	u32 oicr, ena_mask;
3066940b61afSAnirudh Venkataramanan 
30674015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
30687e408e07SAnirudh Venkataramanan 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
30697e408e07SAnirudh Venkataramanan 	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
30708f5ee3c4SJacob Keller 	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3071940b61afSAnirudh Venkataramanan 
3072940b61afSAnirudh Venkataramanan 	oicr = rd32(hw, PFINT_OICR);
3073940b61afSAnirudh Venkataramanan 	ena_mask = rd32(hw, PFINT_OICR_ENA);
3074940b61afSAnirudh Venkataramanan 
30750e674aebSAnirudh Venkataramanan 	if (oicr & PFINT_OICR_SWINT_M) {
30760e674aebSAnirudh Venkataramanan 		ena_mask &= ~PFINT_OICR_SWINT_M;
30770e674aebSAnirudh Venkataramanan 		pf->sw_int_count++;
30780e674aebSAnirudh Venkataramanan 	}
30790e674aebSAnirudh Venkataramanan 
3080b3969fd7SSudheer Mogilappagari 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
3081b3969fd7SSudheer Mogilappagari 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
30827e408e07SAnirudh Venkataramanan 		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3083b3969fd7SSudheer Mogilappagari 	}
3084007676b4SAnirudh Venkataramanan 	if (oicr & PFINT_OICR_VFLR_M) {
3085f844d521SBrett Creeley 		/* disable any further VFLR event notifications */
30867e408e07SAnirudh Venkataramanan 		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3087f844d521SBrett Creeley 			u32 reg = rd32(hw, PFINT_OICR_ENA);
3088f844d521SBrett Creeley 
3089f844d521SBrett Creeley 			reg &= ~PFINT_OICR_VFLR_M;
3090f844d521SBrett Creeley 			wr32(hw, PFINT_OICR_ENA, reg);
3091f844d521SBrett Creeley 		} else {
3092007676b4SAnirudh Venkataramanan 			ena_mask &= ~PFINT_OICR_VFLR_M;
30937e408e07SAnirudh Venkataramanan 			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3094007676b4SAnirudh Venkataramanan 		}
3095f844d521SBrett Creeley 	}
3096b3969fd7SSudheer Mogilappagari 
30970b28b702SAnirudh Venkataramanan 	if (oicr & PFINT_OICR_GRST_M) {
30980b28b702SAnirudh Venkataramanan 		u32 reset;
3099b3969fd7SSudheer Mogilappagari 
31000b28b702SAnirudh Venkataramanan 		/* we have a reset warning */
31010b28b702SAnirudh Venkataramanan 		ena_mask &= ~PFINT_OICR_GRST_M;
31020b28b702SAnirudh Venkataramanan 		reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
31030b28b702SAnirudh Venkataramanan 			GLGEN_RSTAT_RESET_TYPE_S;
31040b28b702SAnirudh Venkataramanan 
31050b28b702SAnirudh Venkataramanan 		if (reset == ICE_RESET_CORER)
31060b28b702SAnirudh Venkataramanan 			pf->corer_count++;
31070b28b702SAnirudh Venkataramanan 		else if (reset == ICE_RESET_GLOBR)
31080b28b702SAnirudh Venkataramanan 			pf->globr_count++;
3109ca4929b6SBrett Creeley 		else if (reset == ICE_RESET_EMPR)
31100b28b702SAnirudh Venkataramanan 			pf->empr_count++;
3111ca4929b6SBrett Creeley 		else
31124015d11eSBrett Creeley 			dev_dbg(dev, "Invalid reset type %d\n", reset);
31130b28b702SAnirudh Venkataramanan 
31140b28b702SAnirudh Venkataramanan 		/* If a reset cycle isn't already in progress, we set a bit in
31150b28b702SAnirudh Venkataramanan 		 * pf->state so that the service task can start a reset/rebuild.
31160b28b702SAnirudh Venkataramanan 		 */
31177e408e07SAnirudh Venkataramanan 		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
31180b28b702SAnirudh Venkataramanan 			if (reset == ICE_RESET_CORER)
31197e408e07SAnirudh Venkataramanan 				set_bit(ICE_CORER_RECV, pf->state);
31200b28b702SAnirudh Venkataramanan 			else if (reset == ICE_RESET_GLOBR)
31217e408e07SAnirudh Venkataramanan 				set_bit(ICE_GLOBR_RECV, pf->state);
31220b28b702SAnirudh Venkataramanan 			else
31237e408e07SAnirudh Venkataramanan 				set_bit(ICE_EMPR_RECV, pf->state);
31240b28b702SAnirudh Venkataramanan 
3125fd2a9817SAnirudh Venkataramanan 			/* There are couple of different bits at play here.
3126fd2a9817SAnirudh Venkataramanan 			 * hw->reset_ongoing indicates whether the hardware is
3127fd2a9817SAnirudh Venkataramanan 			 * in reset. This is set to true when a reset interrupt
3128fd2a9817SAnirudh Venkataramanan 			 * is received and set back to false after the driver
3129fd2a9817SAnirudh Venkataramanan 			 * has determined that the hardware is out of reset.
3130fd2a9817SAnirudh Venkataramanan 			 *
31317e408e07SAnirudh Venkataramanan 			 * ICE_RESET_OICR_RECV in pf->state indicates
3132fd2a9817SAnirudh Venkataramanan 			 * that a post reset rebuild is required before the
3133fd2a9817SAnirudh Venkataramanan 			 * driver is operational again. This is set above.
3134fd2a9817SAnirudh Venkataramanan 			 *
3135fd2a9817SAnirudh Venkataramanan 			 * As this is the start of the reset/rebuild cycle, set
3136fd2a9817SAnirudh Venkataramanan 			 * both to indicate that.
3137fd2a9817SAnirudh Venkataramanan 			 */
3138fd2a9817SAnirudh Venkataramanan 			hw->reset_ongoing = true;
31390b28b702SAnirudh Venkataramanan 		}
31400b28b702SAnirudh Venkataramanan 	}
31410b28b702SAnirudh Venkataramanan 
3142ea9b847cSJacob Keller 	if (oicr & PFINT_OICR_TSYN_TX_M) {
3143ea9b847cSJacob Keller 		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
31441229b339SKarol Kolacinski 		if (!hw->reset_ongoing)
31451229b339SKarol Kolacinski 			ret = IRQ_WAKE_THREAD;
3146ea9b847cSJacob Keller 	}
3147ea9b847cSJacob Keller 
3148172db5f9SMaciej Machnikowski 	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3149172db5f9SMaciej Machnikowski 		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3150172db5f9SMaciej Machnikowski 		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3151172db5f9SMaciej Machnikowski 
3152172db5f9SMaciej Machnikowski 		/* Save EVENTs from GTSYN register */
3153172db5f9SMaciej Machnikowski 		pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
3154172db5f9SMaciej Machnikowski 						     GLTSYN_STAT_EVENT1_M |
3155172db5f9SMaciej Machnikowski 						     GLTSYN_STAT_EVENT2_M);
3156172db5f9SMaciej Machnikowski 		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3157172db5f9SMaciej Machnikowski 		kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
3158172db5f9SMaciej Machnikowski 	}
3159172db5f9SMaciej Machnikowski 
3160348048e7SDave Ertman #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3161348048e7SDave Ertman 	if (oicr & ICE_AUX_CRIT_ERR) {
316232d53c0aSAlexander Lobakin 		pf->oicr_err_reg |= oicr;
316332d53c0aSAlexander Lobakin 		set_bit(ICE_AUX_ERR_PENDING, pf->state);
3164348048e7SDave Ertman 		ena_mask &= ~ICE_AUX_CRIT_ERR;
3165940b61afSAnirudh Venkataramanan 	}
3166940b61afSAnirudh Venkataramanan 
31678d7189d2SMd Fahad Iqbal Polash 	/* Report any remaining unexpected interrupts */
3168940b61afSAnirudh Venkataramanan 	oicr &= ena_mask;
3169940b61afSAnirudh Venkataramanan 	if (oicr) {
31704015d11eSBrett Creeley 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3171940b61afSAnirudh Venkataramanan 		/* If a critical error is pending there is no choice but to
3172940b61afSAnirudh Venkataramanan 		 * reset the device.
3173940b61afSAnirudh Venkataramanan 		 */
3174348048e7SDave Ertman 		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
31750b28b702SAnirudh Venkataramanan 			    PFINT_OICR_ECC_ERR_M)) {
31767e408e07SAnirudh Venkataramanan 			set_bit(ICE_PFR_REQ, pf->state);
31770b28b702SAnirudh Venkataramanan 			ice_service_task_schedule(pf);
31780b28b702SAnirudh Venkataramanan 		}
3179940b61afSAnirudh Venkataramanan 	}
31801229b339SKarol Kolacinski 	if (!ret)
3181940b61afSAnirudh Venkataramanan 		ret = IRQ_HANDLED;
3182940b61afSAnirudh Venkataramanan 
3183940b61afSAnirudh Venkataramanan 	ice_service_task_schedule(pf);
3184cdedef59SAnirudh Venkataramanan 	ice_irq_dynamic_ena(hw, NULL, NULL);
3185940b61afSAnirudh Venkataramanan 
3186940b61afSAnirudh Venkataramanan 	return ret;
3187940b61afSAnirudh Venkataramanan }
3188940b61afSAnirudh Venkataramanan 
3189940b61afSAnirudh Venkataramanan /**
31901229b339SKarol Kolacinski  * ice_misc_intr_thread_fn - misc interrupt thread function
31911229b339SKarol Kolacinski  * @irq: interrupt number
31921229b339SKarol Kolacinski  * @data: pointer to a q_vector
31931229b339SKarol Kolacinski  */
31941229b339SKarol Kolacinski static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
31951229b339SKarol Kolacinski {
31961229b339SKarol Kolacinski 	struct ice_pf *pf = data;
31971229b339SKarol Kolacinski 
319830f15874SJacob Keller 	if (ice_is_reset_in_progress(pf->state))
319930f15874SJacob Keller 		return IRQ_HANDLED;
32001229b339SKarol Kolacinski 
320130f15874SJacob Keller 	while (!ice_ptp_process_ts(pf))
320230f15874SJacob Keller 		usleep_range(50, 100);
320330f15874SJacob Keller 
320430f15874SJacob Keller 	return IRQ_HANDLED;
32051229b339SKarol Kolacinski }
32061229b339SKarol Kolacinski 
32071229b339SKarol Kolacinski /**
32080e04e8e1SBrett Creeley  * ice_dis_ctrlq_interrupts - disable control queue interrupts
32090e04e8e1SBrett Creeley  * @hw: pointer to HW structure
32100e04e8e1SBrett Creeley  */
32110e04e8e1SBrett Creeley static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
32120e04e8e1SBrett Creeley {
32130e04e8e1SBrett Creeley 	/* disable Admin queue Interrupt causes */
32140e04e8e1SBrett Creeley 	wr32(hw, PFINT_FW_CTL,
32150e04e8e1SBrett Creeley 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
32160e04e8e1SBrett Creeley 
32170e04e8e1SBrett Creeley 	/* disable Mailbox queue Interrupt causes */
32180e04e8e1SBrett Creeley 	wr32(hw, PFINT_MBX_CTL,
32190e04e8e1SBrett Creeley 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
32200e04e8e1SBrett Creeley 
32218f5ee3c4SJacob Keller 	wr32(hw, PFINT_SB_CTL,
32228f5ee3c4SJacob Keller 	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
32238f5ee3c4SJacob Keller 
32240e04e8e1SBrett Creeley 	/* disable Control queue Interrupt causes */
32250e04e8e1SBrett Creeley 	wr32(hw, PFINT_OICR_CTL,
32260e04e8e1SBrett Creeley 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
32270e04e8e1SBrett Creeley 
32280e04e8e1SBrett Creeley 	ice_flush(hw);
32290e04e8e1SBrett Creeley }
32300e04e8e1SBrett Creeley 
32310e04e8e1SBrett Creeley /**
3232940b61afSAnirudh Venkataramanan  * ice_free_irq_msix_misc - Unroll misc vector setup
3233940b61afSAnirudh Venkataramanan  * @pf: board private structure
3234940b61afSAnirudh Venkataramanan  */
3235940b61afSAnirudh Venkataramanan static void ice_free_irq_msix_misc(struct ice_pf *pf)
3236940b61afSAnirudh Venkataramanan {
32370e04e8e1SBrett Creeley 	struct ice_hw *hw = &pf->hw;
32380e04e8e1SBrett Creeley 
32390e04e8e1SBrett Creeley 	ice_dis_ctrlq_interrupts(hw);
32400e04e8e1SBrett Creeley 
3241940b61afSAnirudh Venkataramanan 	/* disable OICR interrupt */
32420e04e8e1SBrett Creeley 	wr32(hw, PFINT_OICR_ENA, 0);
32430e04e8e1SBrett Creeley 	ice_flush(hw);
3244940b61afSAnirudh Venkataramanan 
3245ba880734SBrett Creeley 	if (pf->msix_entries) {
3246cbe66bfeSBrett Creeley 		synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
32474015d11eSBrett Creeley 		devm_free_irq(ice_pf_to_dev(pf),
3248cbe66bfeSBrett Creeley 			      pf->msix_entries[pf->oicr_idx].vector, pf);
3249940b61afSAnirudh Venkataramanan 	}
3250940b61afSAnirudh Venkataramanan 
3251eb0208ecSPreethi Banala 	pf->num_avail_sw_msix += 1;
3252cbe66bfeSBrett Creeley 	ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
3253940b61afSAnirudh Venkataramanan }
3254940b61afSAnirudh Venkataramanan 
3255940b61afSAnirudh Venkataramanan /**
32560e04e8e1SBrett Creeley  * ice_ena_ctrlq_interrupts - enable control queue interrupts
32570e04e8e1SBrett Creeley  * @hw: pointer to HW structure
3258b07833a0SBrett Creeley  * @reg_idx: HW vector index to associate the control queue interrupts with
32590e04e8e1SBrett Creeley  */
3260b07833a0SBrett Creeley static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
32610e04e8e1SBrett Creeley {
32620e04e8e1SBrett Creeley 	u32 val;
32630e04e8e1SBrett Creeley 
3264b07833a0SBrett Creeley 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
32650e04e8e1SBrett Creeley 	       PFINT_OICR_CTL_CAUSE_ENA_M);
32660e04e8e1SBrett Creeley 	wr32(hw, PFINT_OICR_CTL, val);
32670e04e8e1SBrett Creeley 
32680e04e8e1SBrett Creeley 	/* enable Admin queue Interrupt causes */
3269b07833a0SBrett Creeley 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
32700e04e8e1SBrett Creeley 	       PFINT_FW_CTL_CAUSE_ENA_M);
32710e04e8e1SBrett Creeley 	wr32(hw, PFINT_FW_CTL, val);
32720e04e8e1SBrett Creeley 
32730e04e8e1SBrett Creeley 	/* enable Mailbox queue Interrupt causes */
3274b07833a0SBrett Creeley 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
32750e04e8e1SBrett Creeley 	       PFINT_MBX_CTL_CAUSE_ENA_M);
32760e04e8e1SBrett Creeley 	wr32(hw, PFINT_MBX_CTL, val);
32770e04e8e1SBrett Creeley 
32788f5ee3c4SJacob Keller 	/* This enables Sideband queue Interrupt causes */
32798f5ee3c4SJacob Keller 	val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
32808f5ee3c4SJacob Keller 	       PFINT_SB_CTL_CAUSE_ENA_M);
32818f5ee3c4SJacob Keller 	wr32(hw, PFINT_SB_CTL, val);
32828f5ee3c4SJacob Keller 
32830e04e8e1SBrett Creeley 	ice_flush(hw);
32840e04e8e1SBrett Creeley }
32850e04e8e1SBrett Creeley 
32860e04e8e1SBrett Creeley /**
3287940b61afSAnirudh Venkataramanan  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3288940b61afSAnirudh Venkataramanan  * @pf: board private structure
3289940b61afSAnirudh Venkataramanan  *
3290940b61afSAnirudh Venkataramanan  * This sets up the handler for MSIX 0, which is used to manage the
3291940b61afSAnirudh Venkataramanan  * non-queue interrupts, e.g. AdminQ and errors. This is not used
3292940b61afSAnirudh Venkataramanan  * when in MSI or Legacy interrupt mode.
3293940b61afSAnirudh Venkataramanan  */
3294940b61afSAnirudh Venkataramanan static int ice_req_irq_msix_misc(struct ice_pf *pf)
3295940b61afSAnirudh Venkataramanan {
32964015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
3297940b61afSAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
3298940b61afSAnirudh Venkataramanan 	int oicr_idx, err = 0;
3299940b61afSAnirudh Venkataramanan 
3300940b61afSAnirudh Venkataramanan 	if (!pf->int_name[0])
3301940b61afSAnirudh Venkataramanan 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
33024015d11eSBrett Creeley 			 dev_driver_string(dev), dev_name(dev));
3303940b61afSAnirudh Venkataramanan 
33040b28b702SAnirudh Venkataramanan 	/* Do not request IRQ but do enable OICR interrupt since settings are
33050b28b702SAnirudh Venkataramanan 	 * lost during reset. Note that this function is called only during
33060b28b702SAnirudh Venkataramanan 	 * rebuild path and not while reset is in progress.
33070b28b702SAnirudh Venkataramanan 	 */
33085df7e45dSDave Ertman 	if (ice_is_reset_in_progress(pf->state))
33090b28b702SAnirudh Venkataramanan 		goto skip_req_irq;
33100b28b702SAnirudh Venkataramanan 
3311cbe66bfeSBrett Creeley 	/* reserve one vector in irq_tracker for misc interrupts */
3312cbe66bfeSBrett Creeley 	oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3313940b61afSAnirudh Venkataramanan 	if (oicr_idx < 0)
3314940b61afSAnirudh Venkataramanan 		return oicr_idx;
3315940b61afSAnirudh Venkataramanan 
3316eb0208ecSPreethi Banala 	pf->num_avail_sw_msix -= 1;
331788865fc4SKarol Kolacinski 	pf->oicr_idx = (u16)oicr_idx;
3318940b61afSAnirudh Venkataramanan 
33191229b339SKarol Kolacinski 	err = devm_request_threaded_irq(dev,
33201229b339SKarol Kolacinski 					pf->msix_entries[pf->oicr_idx].vector,
33211229b339SKarol Kolacinski 					ice_misc_intr, ice_misc_intr_thread_fn,
33221229b339SKarol Kolacinski 					0, pf->int_name, pf);
3323940b61afSAnirudh Venkataramanan 	if (err) {
33241229b339SKarol Kolacinski 		dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3325940b61afSAnirudh Venkataramanan 			pf->int_name, err);
3326cbe66bfeSBrett Creeley 		ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3327eb0208ecSPreethi Banala 		pf->num_avail_sw_msix += 1;
3328940b61afSAnirudh Venkataramanan 		return err;
3329940b61afSAnirudh Venkataramanan 	}
3330940b61afSAnirudh Venkataramanan 
33310b28b702SAnirudh Venkataramanan skip_req_irq:
3332940b61afSAnirudh Venkataramanan 	ice_ena_misc_vector(pf);
3333940b61afSAnirudh Venkataramanan 
3334cbe66bfeSBrett Creeley 	ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
3335cbe66bfeSBrett Creeley 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
333663f545edSBrett Creeley 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3337940b61afSAnirudh Venkataramanan 
3338940b61afSAnirudh Venkataramanan 	ice_flush(hw);
3339cdedef59SAnirudh Venkataramanan 	ice_irq_dynamic_ena(hw, NULL, NULL);
3340940b61afSAnirudh Venkataramanan 
3341940b61afSAnirudh Venkataramanan 	return 0;
3342940b61afSAnirudh Venkataramanan }
3343940b61afSAnirudh Venkataramanan 
3344940b61afSAnirudh Venkataramanan /**
3345df0f8479SAnirudh Venkataramanan  * ice_napi_add - register NAPI handler for the VSI
3346df0f8479SAnirudh Venkataramanan  * @vsi: VSI for which NAPI handler is to be registered
3347df0f8479SAnirudh Venkataramanan  *
3348df0f8479SAnirudh Venkataramanan  * This function is only called in the driver's load path. Registering the NAPI
3349df0f8479SAnirudh Venkataramanan  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3350df0f8479SAnirudh Venkataramanan  * reset/rebuild, etc.)
3351df0f8479SAnirudh Venkataramanan  */
3352df0f8479SAnirudh Venkataramanan static void ice_napi_add(struct ice_vsi *vsi)
3353df0f8479SAnirudh Venkataramanan {
3354df0f8479SAnirudh Venkataramanan 	int v_idx;
3355df0f8479SAnirudh Venkataramanan 
3356df0f8479SAnirudh Venkataramanan 	if (!vsi->netdev)
3357df0f8479SAnirudh Venkataramanan 		return;
3358df0f8479SAnirudh Venkataramanan 
33590c2561c8SBrett Creeley 	ice_for_each_q_vector(vsi, v_idx)
3360df0f8479SAnirudh Venkataramanan 		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3361b48b89f9SJakub Kicinski 			       ice_napi_poll);
3362df0f8479SAnirudh Venkataramanan }
3363df0f8479SAnirudh Venkataramanan 
3364df0f8479SAnirudh Venkataramanan /**
3365462acf6aSTony Nguyen  * ice_set_ops - set netdev and ethtools ops for the given netdev
3366b6a4103cSLorenzo Bianconi  * @vsi: the VSI associated with the new netdev
33673a858ba3SAnirudh Venkataramanan  */
3368b6a4103cSLorenzo Bianconi static void ice_set_ops(struct ice_vsi *vsi)
33693a858ba3SAnirudh Venkataramanan {
3370b6a4103cSLorenzo Bianconi 	struct net_device *netdev = vsi->netdev;
3371462acf6aSTony Nguyen 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3372462acf6aSTony Nguyen 
3373462acf6aSTony Nguyen 	if (ice_is_safe_mode(pf)) {
3374462acf6aSTony Nguyen 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3375462acf6aSTony Nguyen 		ice_set_ethtool_safe_mode_ops(netdev);
3376462acf6aSTony Nguyen 		return;
3377462acf6aSTony Nguyen 	}
3378462acf6aSTony Nguyen 
3379462acf6aSTony Nguyen 	netdev->netdev_ops = &ice_netdev_ops;
3380b20e6c17SJakub Kicinski 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3381462acf6aSTony Nguyen 	ice_set_ethtool_ops(netdev);
3382b6a4103cSLorenzo Bianconi 
3383b6a4103cSLorenzo Bianconi 	if (vsi->type != ICE_VSI_PF)
3384b6a4103cSLorenzo Bianconi 		return;
3385b6a4103cSLorenzo Bianconi 
3386b6a4103cSLorenzo Bianconi 	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
3387b6a4103cSLorenzo Bianconi 			       NETDEV_XDP_ACT_XSK_ZEROCOPY |
3388b6a4103cSLorenzo Bianconi 			       NETDEV_XDP_ACT_RX_SG;
3389462acf6aSTony Nguyen }
3390462acf6aSTony Nguyen 
3391462acf6aSTony Nguyen /**
3392462acf6aSTony Nguyen  * ice_set_netdev_features - set features for the given netdev
3393462acf6aSTony Nguyen  * @netdev: netdev instance
3394462acf6aSTony Nguyen  */
3395462acf6aSTony Nguyen static void ice_set_netdev_features(struct net_device *netdev)
3396462acf6aSTony Nguyen {
3397462acf6aSTony Nguyen 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
33981babaf77SBrett Creeley 	bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3399d76a60baSAnirudh Venkataramanan 	netdev_features_t csumo_features;
3400d76a60baSAnirudh Venkataramanan 	netdev_features_t vlano_features;
3401d76a60baSAnirudh Venkataramanan 	netdev_features_t dflt_features;
3402d76a60baSAnirudh Venkataramanan 	netdev_features_t tso_features;
34033a858ba3SAnirudh Venkataramanan 
3404462acf6aSTony Nguyen 	if (ice_is_safe_mode(pf)) {
3405462acf6aSTony Nguyen 		/* safe mode */
3406462acf6aSTony Nguyen 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3407462acf6aSTony Nguyen 		netdev->hw_features = netdev->features;
3408462acf6aSTony Nguyen 		return;
3409462acf6aSTony Nguyen 	}
34103a858ba3SAnirudh Venkataramanan 
3411d76a60baSAnirudh Venkataramanan 	dflt_features = NETIF_F_SG	|
34123a858ba3SAnirudh Venkataramanan 			NETIF_F_HIGHDMA	|
3413148beb61SHenry Tieman 			NETIF_F_NTUPLE	|
34143a858ba3SAnirudh Venkataramanan 			NETIF_F_RXHASH;
34153a858ba3SAnirudh Venkataramanan 
3416d76a60baSAnirudh Venkataramanan 	csumo_features = NETIF_F_RXCSUM	  |
3417d76a60baSAnirudh Venkataramanan 			 NETIF_F_IP_CSUM  |
3418cf909e19SAnirudh Venkataramanan 			 NETIF_F_SCTP_CRC |
3419d76a60baSAnirudh Venkataramanan 			 NETIF_F_IPV6_CSUM;
3420d76a60baSAnirudh Venkataramanan 
3421d76a60baSAnirudh Venkataramanan 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3422d76a60baSAnirudh Venkataramanan 			 NETIF_F_HW_VLAN_CTAG_TX     |
3423d76a60baSAnirudh Venkataramanan 			 NETIF_F_HW_VLAN_CTAG_RX;
3424d76a60baSAnirudh Venkataramanan 
34251babaf77SBrett Creeley 	/* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
34261babaf77SBrett Creeley 	if (is_dvm_ena)
34271babaf77SBrett Creeley 		vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
34281babaf77SBrett Creeley 
3429a54e3b8cSBrett Creeley 	tso_features = NETIF_F_TSO			|
3430a4e82a81STony Nguyen 		       NETIF_F_TSO_ECN			|
3431a4e82a81STony Nguyen 		       NETIF_F_TSO6			|
3432a4e82a81STony Nguyen 		       NETIF_F_GSO_GRE			|
3433a4e82a81STony Nguyen 		       NETIF_F_GSO_UDP_TUNNEL		|
3434a4e82a81STony Nguyen 		       NETIF_F_GSO_GRE_CSUM		|
3435a4e82a81STony Nguyen 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3436a4e82a81STony Nguyen 		       NETIF_F_GSO_PARTIAL		|
3437a4e82a81STony Nguyen 		       NETIF_F_GSO_IPXIP4		|
3438a4e82a81STony Nguyen 		       NETIF_F_GSO_IPXIP6		|
3439a54e3b8cSBrett Creeley 		       NETIF_F_GSO_UDP_L4;
3440d76a60baSAnirudh Venkataramanan 
3441a4e82a81STony Nguyen 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3442a4e82a81STony Nguyen 					NETIF_F_GSO_GRE_CSUM;
3443d76a60baSAnirudh Venkataramanan 	/* set features that user can change */
3444d76a60baSAnirudh Venkataramanan 	netdev->hw_features = dflt_features | csumo_features |
3445d76a60baSAnirudh Venkataramanan 			      vlano_features | tso_features;
3446d76a60baSAnirudh Venkataramanan 
3447a4e82a81STony Nguyen 	/* add support for HW_CSUM on packets with MPLS header */
344869e66c04SJoe Damato 	netdev->mpls_features =  NETIF_F_HW_CSUM |
344969e66c04SJoe Damato 				 NETIF_F_TSO     |
345069e66c04SJoe Damato 				 NETIF_F_TSO6;
3451a4e82a81STony Nguyen 
34523a858ba3SAnirudh Venkataramanan 	/* enable features */
34533a858ba3SAnirudh Venkataramanan 	netdev->features |= netdev->hw_features;
34540d08a441SKiran Patil 
34550d08a441SKiran Patil 	netdev->hw_features |= NETIF_F_HW_TC;
345644ece4e1SMaciej Fijalkowski 	netdev->hw_features |= NETIF_F_LOOPBACK;
34570d08a441SKiran Patil 
3458d76a60baSAnirudh Venkataramanan 	/* encap and VLAN devices inherit default, csumo and tso features */
3459d76a60baSAnirudh Venkataramanan 	netdev->hw_enc_features |= dflt_features | csumo_features |
3460d76a60baSAnirudh Venkataramanan 				   tso_features;
3461d76a60baSAnirudh Venkataramanan 	netdev->vlan_features |= dflt_features | csumo_features |
3462d76a60baSAnirudh Venkataramanan 				 tso_features;
34631babaf77SBrett Creeley 
34641babaf77SBrett Creeley 	/* advertise support but don't enable by default since only one type of
34651babaf77SBrett Creeley 	 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
34661babaf77SBrett Creeley 	 * type turns on the other has to be turned off. This is enforced by the
34671babaf77SBrett Creeley 	 * ice_fix_features() ndo callback.
34681babaf77SBrett Creeley 	 */
34691babaf77SBrett Creeley 	if (is_dvm_ena)
34701babaf77SBrett Creeley 		netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
34711babaf77SBrett Creeley 			NETIF_F_HW_VLAN_STAG_TX;
3472dddd406dSJesse Brandeburg 
3473dddd406dSJesse Brandeburg 	/* Leave CRC / FCS stripping enabled by default, but allow the value to
3474dddd406dSJesse Brandeburg 	 * be changed at runtime
3475dddd406dSJesse Brandeburg 	 */
3476dddd406dSJesse Brandeburg 	netdev->hw_features |= NETIF_F_RXFCS;
3477fce92dbcSPawel Chmielewski 
3478fce92dbcSPawel Chmielewski 	netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
3479462acf6aSTony Nguyen }
3480462acf6aSTony Nguyen 
3481462acf6aSTony Nguyen /**
3482d76a60baSAnirudh Venkataramanan  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3483d76a60baSAnirudh Venkataramanan  * @lut: Lookup table
3484d76a60baSAnirudh Venkataramanan  * @rss_table_size: Lookup table size
3485d76a60baSAnirudh Venkataramanan  * @rss_size: Range of queue number for hashing
3486d76a60baSAnirudh Venkataramanan  */
3487d76a60baSAnirudh Venkataramanan void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3488d76a60baSAnirudh Venkataramanan {
3489d76a60baSAnirudh Venkataramanan 	u16 i;
3490d76a60baSAnirudh Venkataramanan 
3491d76a60baSAnirudh Venkataramanan 	for (i = 0; i < rss_table_size; i++)
3492d76a60baSAnirudh Venkataramanan 		lut[i] = i % rss_size;
3493d76a60baSAnirudh Venkataramanan }
3494d76a60baSAnirudh Venkataramanan 
3495d76a60baSAnirudh Venkataramanan /**
34960f9d5027SAnirudh Venkataramanan  * ice_pf_vsi_setup - Set up a PF VSI
34970f9d5027SAnirudh Venkataramanan  * @pf: board private structure
34980f9d5027SAnirudh Venkataramanan  * @pi: pointer to the port_info instance
34990f9d5027SAnirudh Venkataramanan  *
35000e674aebSAnirudh Venkataramanan  * Returns pointer to the successfully allocated VSI software struct
35010e674aebSAnirudh Venkataramanan  * on success, otherwise returns NULL on failure.
35020f9d5027SAnirudh Venkataramanan  */
35030f9d5027SAnirudh Venkataramanan static struct ice_vsi *
35040f9d5027SAnirudh Venkataramanan ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
35050f9d5027SAnirudh Venkataramanan {
35065e509ab2SJacob Keller 	struct ice_vsi_cfg_params params = {};
35075e509ab2SJacob Keller 
35085e509ab2SJacob Keller 	params.type = ICE_VSI_PF;
35095e509ab2SJacob Keller 	params.pi = pi;
35105e509ab2SJacob Keller 	params.flags = ICE_VSI_FLAG_INIT;
35115e509ab2SJacob Keller 
35125e509ab2SJacob Keller 	return ice_vsi_setup(pf, &params);
35130f9d5027SAnirudh Venkataramanan }
35140f9d5027SAnirudh Venkataramanan 
3515fbc7b27aSKiran Patil static struct ice_vsi *
3516fbc7b27aSKiran Patil ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3517fbc7b27aSKiran Patil 		   struct ice_channel *ch)
3518fbc7b27aSKiran Patil {
35195e509ab2SJacob Keller 	struct ice_vsi_cfg_params params = {};
35205e509ab2SJacob Keller 
35215e509ab2SJacob Keller 	params.type = ICE_VSI_CHNL;
35225e509ab2SJacob Keller 	params.pi = pi;
35235e509ab2SJacob Keller 	params.ch = ch;
35245e509ab2SJacob Keller 	params.flags = ICE_VSI_FLAG_INIT;
35255e509ab2SJacob Keller 
35265e509ab2SJacob Keller 	return ice_vsi_setup(pf, &params);
3527fbc7b27aSKiran Patil }
3528fbc7b27aSKiran Patil 
35290f9d5027SAnirudh Venkataramanan /**
3530148beb61SHenry Tieman  * ice_ctrl_vsi_setup - Set up a control VSI
3531148beb61SHenry Tieman  * @pf: board private structure
3532148beb61SHenry Tieman  * @pi: pointer to the port_info instance
3533148beb61SHenry Tieman  *
3534148beb61SHenry Tieman  * Returns pointer to the successfully allocated VSI software struct
3535148beb61SHenry Tieman  * on success, otherwise returns NULL on failure.
3536148beb61SHenry Tieman  */
3537148beb61SHenry Tieman static struct ice_vsi *
3538148beb61SHenry Tieman ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3539148beb61SHenry Tieman {
35405e509ab2SJacob Keller 	struct ice_vsi_cfg_params params = {};
35415e509ab2SJacob Keller 
35425e509ab2SJacob Keller 	params.type = ICE_VSI_CTRL;
35435e509ab2SJacob Keller 	params.pi = pi;
35445e509ab2SJacob Keller 	params.flags = ICE_VSI_FLAG_INIT;
35455e509ab2SJacob Keller 
35465e509ab2SJacob Keller 	return ice_vsi_setup(pf, &params);
3547148beb61SHenry Tieman }
3548148beb61SHenry Tieman 
3549148beb61SHenry Tieman /**
35500e674aebSAnirudh Venkataramanan  * ice_lb_vsi_setup - Set up a loopback VSI
35510e674aebSAnirudh Venkataramanan  * @pf: board private structure
35520e674aebSAnirudh Venkataramanan  * @pi: pointer to the port_info instance
35530e674aebSAnirudh Venkataramanan  *
35540e674aebSAnirudh Venkataramanan  * Returns pointer to the successfully allocated VSI software struct
35550e674aebSAnirudh Venkataramanan  * on success, otherwise returns NULL on failure.
35560e674aebSAnirudh Venkataramanan  */
35570e674aebSAnirudh Venkataramanan struct ice_vsi *
35580e674aebSAnirudh Venkataramanan ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
35590e674aebSAnirudh Venkataramanan {
35605e509ab2SJacob Keller 	struct ice_vsi_cfg_params params = {};
35615e509ab2SJacob Keller 
35625e509ab2SJacob Keller 	params.type = ICE_VSI_LB;
35635e509ab2SJacob Keller 	params.pi = pi;
35645e509ab2SJacob Keller 	params.flags = ICE_VSI_FLAG_INIT;
35655e509ab2SJacob Keller 
35665e509ab2SJacob Keller 	return ice_vsi_setup(pf, &params);
35670e674aebSAnirudh Venkataramanan }
35680e674aebSAnirudh Venkataramanan 
35690e674aebSAnirudh Venkataramanan /**
3570f9867df6SAnirudh Venkataramanan  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3571d76a60baSAnirudh Venkataramanan  * @netdev: network interface to be adjusted
35722bfefa2dSBrett Creeley  * @proto: VLAN TPID
3573f9867df6SAnirudh Venkataramanan  * @vid: VLAN ID to be added
3574d76a60baSAnirudh Venkataramanan  *
3575f9867df6SAnirudh Venkataramanan  * net_device_ops implementation for adding VLAN IDs
3576d76a60baSAnirudh Venkataramanan  */
3577c8b7abddSBruce Allan static int
35782bfefa2dSBrett Creeley ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3579d76a60baSAnirudh Venkataramanan {
3580d76a60baSAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
3581c31af68aSBrett Creeley 	struct ice_vsi_vlan_ops *vlan_ops;
3582d76a60baSAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
3583fb05ba12SBrett Creeley 	struct ice_vlan vlan;
35845eda8afdSAkeem G Abodunrin 	int ret;
3585d76a60baSAnirudh Venkataramanan 
358642f3efefSBrett Creeley 	/* VLAN 0 is added by default during load/reset */
358742f3efefSBrett Creeley 	if (!vid)
358842f3efefSBrett Creeley 		return 0;
358942f3efefSBrett Creeley 
35901273f895SIvan Vecera 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
35911273f895SIvan Vecera 		usleep_range(1000, 2000);
35921273f895SIvan Vecera 
35931273f895SIvan Vecera 	/* Add multicast promisc rule for the VLAN ID to be added if
35941273f895SIvan Vecera 	 * all-multicast is currently enabled.
35951273f895SIvan Vecera 	 */
35961273f895SIvan Vecera 	if (vsi->current_netdev_flags & IFF_ALLMULTI) {
35971273f895SIvan Vecera 		ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
35981273f895SIvan Vecera 					       ICE_MCAST_VLAN_PROMISC_BITS,
35991273f895SIvan Vecera 					       vid);
36001273f895SIvan Vecera 		if (ret)
36011273f895SIvan Vecera 			goto finish;
36021273f895SIvan Vecera 	}
36031273f895SIvan Vecera 
3604c31af68aSBrett Creeley 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
36054f74dcc1SBrett Creeley 
360642f3efefSBrett Creeley 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
360742f3efefSBrett Creeley 	 * packets aren't pruned by the device's internal switch on Rx
3608d76a60baSAnirudh Venkataramanan 	 */
36092bfefa2dSBrett Creeley 	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3610c31af68aSBrett Creeley 	ret = vlan_ops->add_vlan(vsi, &vlan);
36111273f895SIvan Vecera 	if (ret)
36121273f895SIvan Vecera 		goto finish;
36131273f895SIvan Vecera 
36141273f895SIvan Vecera 	/* If all-multicast is currently enabled and this VLAN ID is only one
36151273f895SIvan Vecera 	 * besides VLAN-0 we have to update look-up type of multicast promisc
36161273f895SIvan Vecera 	 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
36171273f895SIvan Vecera 	 */
36181273f895SIvan Vecera 	if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
36191273f895SIvan Vecera 	    ice_vsi_num_non_zero_vlans(vsi) == 1) {
36201273f895SIvan Vecera 		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
36211273f895SIvan Vecera 					   ICE_MCAST_PROMISC_BITS, 0);
36221273f895SIvan Vecera 		ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
36231273f895SIvan Vecera 					 ICE_MCAST_VLAN_PROMISC_BITS, 0);
36241273f895SIvan Vecera 	}
36251273f895SIvan Vecera 
36261273f895SIvan Vecera finish:
36271273f895SIvan Vecera 	clear_bit(ICE_CFG_BUSY, vsi->state);
36285eda8afdSAkeem G Abodunrin 
36295eda8afdSAkeem G Abodunrin 	return ret;
3630d76a60baSAnirudh Venkataramanan }
3631d76a60baSAnirudh Venkataramanan 
3632d76a60baSAnirudh Venkataramanan /**
3633f9867df6SAnirudh Venkataramanan  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3634d76a60baSAnirudh Venkataramanan  * @netdev: network interface to be adjusted
36352bfefa2dSBrett Creeley  * @proto: VLAN TPID
3636f9867df6SAnirudh Venkataramanan  * @vid: VLAN ID to be removed
3637d76a60baSAnirudh Venkataramanan  *
3638f9867df6SAnirudh Venkataramanan  * net_device_ops implementation for removing VLAN IDs
3639d76a60baSAnirudh Venkataramanan  */
3640c8b7abddSBruce Allan static int
36412bfefa2dSBrett Creeley ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3642d76a60baSAnirudh Venkataramanan {
3643d76a60baSAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
3644c31af68aSBrett Creeley 	struct ice_vsi_vlan_ops *vlan_ops;
3645d76a60baSAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
3646fb05ba12SBrett Creeley 	struct ice_vlan vlan;
36475eda8afdSAkeem G Abodunrin 	int ret;
3648d76a60baSAnirudh Venkataramanan 
364942f3efefSBrett Creeley 	/* don't allow removal of VLAN 0 */
365042f3efefSBrett Creeley 	if (!vid)
365142f3efefSBrett Creeley 		return 0;
365242f3efefSBrett Creeley 
36531273f895SIvan Vecera 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
36541273f895SIvan Vecera 		usleep_range(1000, 2000);
36551273f895SIvan Vecera 
3656abddafd4SGrzegorz Siwik 	ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3657abddafd4SGrzegorz Siwik 				    ICE_MCAST_VLAN_PROMISC_BITS, vid);
3658abddafd4SGrzegorz Siwik 	if (ret) {
3659abddafd4SGrzegorz Siwik 		netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3660abddafd4SGrzegorz Siwik 			   vsi->vsi_num);
3661abddafd4SGrzegorz Siwik 		vsi->current_netdev_flags |= IFF_ALLMULTI;
3662abddafd4SGrzegorz Siwik 	}
3663abddafd4SGrzegorz Siwik 
3664c31af68aSBrett Creeley 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3665c31af68aSBrett Creeley 
3666bc42afa9SBrett Creeley 	/* Make sure VLAN delete is successful before updating VLAN
36674f74dcc1SBrett Creeley 	 * information
3668d76a60baSAnirudh Venkataramanan 	 */
36692bfefa2dSBrett Creeley 	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3670c31af68aSBrett Creeley 	ret = vlan_ops->del_vlan(vsi, &vlan);
36715eda8afdSAkeem G Abodunrin 	if (ret)
36721273f895SIvan Vecera 		goto finish;
3673d76a60baSAnirudh Venkataramanan 
36741273f895SIvan Vecera 	/* Remove multicast promisc rule for the removed VLAN ID if
36751273f895SIvan Vecera 	 * all-multicast is enabled.
36761273f895SIvan Vecera 	 */
36771273f895SIvan Vecera 	if (vsi->current_netdev_flags & IFF_ALLMULTI)
36781273f895SIvan Vecera 		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
36791273f895SIvan Vecera 					   ICE_MCAST_VLAN_PROMISC_BITS, vid);
36801273f895SIvan Vecera 
36811273f895SIvan Vecera 	if (!ice_vsi_has_non_zero_vlans(vsi)) {
36821273f895SIvan Vecera 		/* Update look-up type of multicast promisc rule for VLAN 0
36831273f895SIvan Vecera 		 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
36841273f895SIvan Vecera 		 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
36851273f895SIvan Vecera 		 */
36861273f895SIvan Vecera 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
36871273f895SIvan Vecera 			ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
36881273f895SIvan Vecera 						   ICE_MCAST_VLAN_PROMISC_BITS,
36891273f895SIvan Vecera 						   0);
36901273f895SIvan Vecera 			ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
36911273f895SIvan Vecera 						 ICE_MCAST_PROMISC_BITS, 0);
36921273f895SIvan Vecera 		}
36931273f895SIvan Vecera 	}
36941273f895SIvan Vecera 
36951273f895SIvan Vecera finish:
36961273f895SIvan Vecera 	clear_bit(ICE_CFG_BUSY, vsi->state);
36971273f895SIvan Vecera 
36981273f895SIvan Vecera 	return ret;
3699d76a60baSAnirudh Venkataramanan }
3700d76a60baSAnirudh Venkataramanan 
3701d76a60baSAnirudh Venkataramanan /**
3702195bb48fSMichal Swiatkowski  * ice_rep_indr_tc_block_unbind
3703195bb48fSMichal Swiatkowski  * @cb_priv: indirection block private data
3704195bb48fSMichal Swiatkowski  */
3705195bb48fSMichal Swiatkowski static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3706195bb48fSMichal Swiatkowski {
3707195bb48fSMichal Swiatkowski 	struct ice_indr_block_priv *indr_priv = cb_priv;
3708195bb48fSMichal Swiatkowski 
3709195bb48fSMichal Swiatkowski 	list_del(&indr_priv->list);
3710195bb48fSMichal Swiatkowski 	kfree(indr_priv);
3711195bb48fSMichal Swiatkowski }
3712195bb48fSMichal Swiatkowski 
3713195bb48fSMichal Swiatkowski /**
3714195bb48fSMichal Swiatkowski  * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3715195bb48fSMichal Swiatkowski  * @vsi: VSI struct which has the netdev
3716195bb48fSMichal Swiatkowski  */
3717195bb48fSMichal Swiatkowski static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3718195bb48fSMichal Swiatkowski {
3719195bb48fSMichal Swiatkowski 	struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3720195bb48fSMichal Swiatkowski 
3721195bb48fSMichal Swiatkowski 	flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3722195bb48fSMichal Swiatkowski 				 ice_rep_indr_tc_block_unbind);
3723195bb48fSMichal Swiatkowski }
3724195bb48fSMichal Swiatkowski 
3725195bb48fSMichal Swiatkowski /**
3726195bb48fSMichal Swiatkowski  * ice_tc_indir_block_register - Register TC indirect block notifications
3727195bb48fSMichal Swiatkowski  * @vsi: VSI struct which has the netdev
3728195bb48fSMichal Swiatkowski  *
3729195bb48fSMichal Swiatkowski  * Returns 0 on success, negative value on failure
3730195bb48fSMichal Swiatkowski  */
3731195bb48fSMichal Swiatkowski static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3732195bb48fSMichal Swiatkowski {
3733195bb48fSMichal Swiatkowski 	struct ice_netdev_priv *np;
3734195bb48fSMichal Swiatkowski 
3735195bb48fSMichal Swiatkowski 	if (!vsi || !vsi->netdev)
3736195bb48fSMichal Swiatkowski 		return -EINVAL;
3737195bb48fSMichal Swiatkowski 
3738195bb48fSMichal Swiatkowski 	np = netdev_priv(vsi->netdev);
3739195bb48fSMichal Swiatkowski 
3740195bb48fSMichal Swiatkowski 	INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3741195bb48fSMichal Swiatkowski 	return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3742195bb48fSMichal Swiatkowski }
3743195bb48fSMichal Swiatkowski 
3744195bb48fSMichal Swiatkowski /**
37458c243700SAnirudh Venkataramanan  * ice_get_avail_q_count - Get count of queues in use
37468c243700SAnirudh Venkataramanan  * @pf_qmap: bitmap to get queue use count from
37478c243700SAnirudh Venkataramanan  * @lock: pointer to a mutex that protects access to pf_qmap
37488c243700SAnirudh Venkataramanan  * @size: size of the bitmap
3749940b61afSAnirudh Venkataramanan  */
37508c243700SAnirudh Venkataramanan static u16
37518c243700SAnirudh Venkataramanan ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3752940b61afSAnirudh Venkataramanan {
375388865fc4SKarol Kolacinski 	unsigned long bit;
375488865fc4SKarol Kolacinski 	u16 count = 0;
3755940b61afSAnirudh Venkataramanan 
37568c243700SAnirudh Venkataramanan 	mutex_lock(lock);
37578c243700SAnirudh Venkataramanan 	for_each_clear_bit(bit, pf_qmap, size)
37588c243700SAnirudh Venkataramanan 		count++;
37598c243700SAnirudh Venkataramanan 	mutex_unlock(lock);
3760940b61afSAnirudh Venkataramanan 
37618c243700SAnirudh Venkataramanan 	return count;
37628c243700SAnirudh Venkataramanan }
3763d76a60baSAnirudh Venkataramanan 
37648c243700SAnirudh Venkataramanan /**
37658c243700SAnirudh Venkataramanan  * ice_get_avail_txq_count - Get count of Tx queues in use
37668c243700SAnirudh Venkataramanan  * @pf: pointer to an ice_pf instance
37678c243700SAnirudh Venkataramanan  */
37688c243700SAnirudh Venkataramanan u16 ice_get_avail_txq_count(struct ice_pf *pf)
37698c243700SAnirudh Venkataramanan {
37708c243700SAnirudh Venkataramanan 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
37718c243700SAnirudh Venkataramanan 				     pf->max_pf_txqs);
37728c243700SAnirudh Venkataramanan }
3773940b61afSAnirudh Venkataramanan 
37748c243700SAnirudh Venkataramanan /**
37758c243700SAnirudh Venkataramanan  * ice_get_avail_rxq_count - Get count of Rx queues in use
37768c243700SAnirudh Venkataramanan  * @pf: pointer to an ice_pf instance
37778c243700SAnirudh Venkataramanan  */
37788c243700SAnirudh Venkataramanan u16 ice_get_avail_rxq_count(struct ice_pf *pf)
37798c243700SAnirudh Venkataramanan {
37808c243700SAnirudh Venkataramanan 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
37818c243700SAnirudh Venkataramanan 				     pf->max_pf_rxqs);
3782940b61afSAnirudh Venkataramanan }
3783940b61afSAnirudh Venkataramanan 
3784940b61afSAnirudh Venkataramanan /**
3785940b61afSAnirudh Venkataramanan  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3786940b61afSAnirudh Venkataramanan  * @pf: board private structure to initialize
3787940b61afSAnirudh Venkataramanan  */
3788940b61afSAnirudh Venkataramanan static void ice_deinit_pf(struct ice_pf *pf)
3789940b61afSAnirudh Venkataramanan {
37908d81fa55SAkeem G Abodunrin 	ice_service_task_stop(pf);
3791486b9eeeSIvan Vecera 	mutex_destroy(&pf->adev_mutex);
3792940b61afSAnirudh Venkataramanan 	mutex_destroy(&pf->sw_mutex);
3793b94b013eSDave Ertman 	mutex_destroy(&pf->tc_mutex);
3794940b61afSAnirudh Venkataramanan 	mutex_destroy(&pf->avail_q_mutex);
37953d5985a1SJacob Keller 	mutex_destroy(&pf->vfs.table_lock);
379678b5713aSAnirudh Venkataramanan 
379778b5713aSAnirudh Venkataramanan 	if (pf->avail_txqs) {
379878b5713aSAnirudh Venkataramanan 		bitmap_free(pf->avail_txqs);
379978b5713aSAnirudh Venkataramanan 		pf->avail_txqs = NULL;
380078b5713aSAnirudh Venkataramanan 	}
380178b5713aSAnirudh Venkataramanan 
380278b5713aSAnirudh Venkataramanan 	if (pf->avail_rxqs) {
380378b5713aSAnirudh Venkataramanan 		bitmap_free(pf->avail_rxqs);
380478b5713aSAnirudh Venkataramanan 		pf->avail_rxqs = NULL;
380578b5713aSAnirudh Venkataramanan 	}
380606c16d89SJacob Keller 
380706c16d89SJacob Keller 	if (pf->ptp.clock)
380806c16d89SJacob Keller 		ptp_clock_unregister(pf->ptp.clock);
3809940b61afSAnirudh Venkataramanan }
3810940b61afSAnirudh Venkataramanan 
3811940b61afSAnirudh Venkataramanan /**
3812462acf6aSTony Nguyen  * ice_set_pf_caps - set PFs capability flags
3813462acf6aSTony Nguyen  * @pf: pointer to the PF instance
3814462acf6aSTony Nguyen  */
3815462acf6aSTony Nguyen static void ice_set_pf_caps(struct ice_pf *pf)
3816462acf6aSTony Nguyen {
3817462acf6aSTony Nguyen 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3818462acf6aSTony Nguyen 
3819d25a0fc4SDave Ertman 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
382088f62aeaSDave Ertman 	if (func_caps->common_cap.rdma)
3821d25a0fc4SDave Ertman 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3822462acf6aSTony Nguyen 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3823462acf6aSTony Nguyen 	if (func_caps->common_cap.dcb)
3824462acf6aSTony Nguyen 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3825462acf6aSTony Nguyen 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3826462acf6aSTony Nguyen 	if (func_caps->common_cap.sr_iov_1_1) {
3827462acf6aSTony Nguyen 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3828000773c0SJacob Keller 		pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
3829dc36796eSJacob Keller 					      ICE_MAX_SRIOV_VFS);
3830462acf6aSTony Nguyen 	}
3831462acf6aSTony Nguyen 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3832462acf6aSTony Nguyen 	if (func_caps->common_cap.rss_table_size)
3833462acf6aSTony Nguyen 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3834462acf6aSTony Nguyen 
3835148beb61SHenry Tieman 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3836148beb61SHenry Tieman 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3837148beb61SHenry Tieman 		u16 unused;
3838148beb61SHenry Tieman 
3839148beb61SHenry Tieman 		/* ctrl_vsi_idx will be set to a valid value when flow director
3840148beb61SHenry Tieman 		 * is setup by ice_init_fdir
3841148beb61SHenry Tieman 		 */
3842148beb61SHenry Tieman 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3843148beb61SHenry Tieman 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3844148beb61SHenry Tieman 		/* force guaranteed filter pool for PF */
3845148beb61SHenry Tieman 		ice_alloc_fd_guar_item(&pf->hw, &unused,
3846148beb61SHenry Tieman 				       func_caps->fd_fltr_guar);
3847148beb61SHenry Tieman 		/* force shared filter pool for PF */
3848148beb61SHenry Tieman 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3849148beb61SHenry Tieman 				       func_caps->fd_fltr_best_effort);
3850148beb61SHenry Tieman 	}
3851148beb61SHenry Tieman 
385206c16d89SJacob Keller 	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
385306c16d89SJacob Keller 	if (func_caps->common_cap.ieee_1588)
385406c16d89SJacob Keller 		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
385506c16d89SJacob Keller 
3856462acf6aSTony Nguyen 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
3857462acf6aSTony Nguyen 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3858462acf6aSTony Nguyen }
3859462acf6aSTony Nguyen 
3860462acf6aSTony Nguyen /**
3861940b61afSAnirudh Venkataramanan  * ice_init_pf - Initialize general software structures (struct ice_pf)
3862940b61afSAnirudh Venkataramanan  * @pf: board private structure to initialize
3863940b61afSAnirudh Venkataramanan  */
386478b5713aSAnirudh Venkataramanan static int ice_init_pf(struct ice_pf *pf)
3865940b61afSAnirudh Venkataramanan {
3866462acf6aSTony Nguyen 	ice_set_pf_caps(pf);
3867940b61afSAnirudh Venkataramanan 
3868940b61afSAnirudh Venkataramanan 	mutex_init(&pf->sw_mutex);
3869b94b013eSDave Ertman 	mutex_init(&pf->tc_mutex);
3870486b9eeeSIvan Vecera 	mutex_init(&pf->adev_mutex);
3871d76a60baSAnirudh Venkataramanan 
3872d69ea414SJacob Keller 	INIT_HLIST_HEAD(&pf->aq_wait_list);
3873d69ea414SJacob Keller 	spin_lock_init(&pf->aq_wait_lock);
3874d69ea414SJacob Keller 	init_waitqueue_head(&pf->aq_wait_queue);
3875d69ea414SJacob Keller 
38761c08052eSJacob Keller 	init_waitqueue_head(&pf->reset_wait_queue);
38771c08052eSJacob Keller 
3878940b61afSAnirudh Venkataramanan 	/* setup service timer and periodic service task */
3879940b61afSAnirudh Venkataramanan 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3880940b61afSAnirudh Venkataramanan 	pf->serv_tmr_period = HZ;
3881940b61afSAnirudh Venkataramanan 	INIT_WORK(&pf->serv_task, ice_service_task);
38827e408e07SAnirudh Venkataramanan 	clear_bit(ICE_SERVICE_SCHED, pf->state);
388378b5713aSAnirudh Venkataramanan 
3884462acf6aSTony Nguyen 	mutex_init(&pf->avail_q_mutex);
388578b5713aSAnirudh Venkataramanan 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
388678b5713aSAnirudh Venkataramanan 	if (!pf->avail_txqs)
388778b5713aSAnirudh Venkataramanan 		return -ENOMEM;
388878b5713aSAnirudh Venkataramanan 
388978b5713aSAnirudh Venkataramanan 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
389078b5713aSAnirudh Venkataramanan 	if (!pf->avail_rxqs) {
389159ac3255SMichal Swiatkowski 		bitmap_free(pf->avail_txqs);
389278b5713aSAnirudh Venkataramanan 		pf->avail_txqs = NULL;
389378b5713aSAnirudh Venkataramanan 		return -ENOMEM;
389478b5713aSAnirudh Venkataramanan 	}
389578b5713aSAnirudh Venkataramanan 
38963d5985a1SJacob Keller 	mutex_init(&pf->vfs.table_lock);
38973d5985a1SJacob Keller 	hash_init(pf->vfs.table);
3898dde7db63SJacob Keller 	ice_mbx_init_snapshot(&pf->hw);
38993d5985a1SJacob Keller 
390078b5713aSAnirudh Venkataramanan 	return 0;
3901940b61afSAnirudh Venkataramanan }
3902940b61afSAnirudh Venkataramanan 
3903940b61afSAnirudh Venkataramanan /**
3904ce462613STony Nguyen  * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
3905ce462613STony Nguyen  * @pf: board private structure
3906ce462613STony Nguyen  * @v_remain: number of remaining MSI-X vectors to be distributed
3907ce462613STony Nguyen  *
3908ce462613STony Nguyen  * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
3909ce462613STony Nguyen  * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
3910ce462613STony Nguyen  * remaining vectors.
3911ce462613STony Nguyen  */
3912ce462613STony Nguyen static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
3913ce462613STony Nguyen {
3914ce462613STony Nguyen 	int v_rdma;
3915ce462613STony Nguyen 
3916ce462613STony Nguyen 	if (!ice_is_rdma_ena(pf)) {
3917ce462613STony Nguyen 		pf->num_lan_msix = v_remain;
3918ce462613STony Nguyen 		return;
3919ce462613STony Nguyen 	}
3920ce462613STony Nguyen 
3921ce462613STony Nguyen 	/* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
3922ce462613STony Nguyen 	v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
3923ce462613STony Nguyen 
3924ce462613STony Nguyen 	if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
3925ce462613STony Nguyen 		dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
3926ce462613STony Nguyen 		clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3927ce462613STony Nguyen 
3928ce462613STony Nguyen 		pf->num_rdma_msix = 0;
3929ce462613STony Nguyen 		pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
3930ce462613STony Nguyen 	} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
3931ce462613STony Nguyen 		   (v_remain - v_rdma < v_rdma)) {
3932ce462613STony Nguyen 		/* Support minimum RDMA and give remaining vectors to LAN MSIX */
3933ce462613STony Nguyen 		pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
3934ce462613STony Nguyen 		pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
3935ce462613STony Nguyen 	} else {
3936ce462613STony Nguyen 		/* Split remaining MSIX with RDMA after accounting for AEQ MSIX
3937ce462613STony Nguyen 		 */
3938ce462613STony Nguyen 		pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
3939ce462613STony Nguyen 				    ICE_RDMA_NUM_AEQ_MSIX;
3940ce462613STony Nguyen 		pf->num_lan_msix = v_remain - pf->num_rdma_msix;
3941ce462613STony Nguyen 	}
3942ce462613STony Nguyen }
3943ce462613STony Nguyen 
3944ce462613STony Nguyen /**
3945940b61afSAnirudh Venkataramanan  * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3946940b61afSAnirudh Venkataramanan  * @pf: board private structure
3947940b61afSAnirudh Venkataramanan  *
3948ce462613STony Nguyen  * Compute the number of MSIX vectors wanted and request from the OS. Adjust
3949ce462613STony Nguyen  * device usage if there are not enough vectors. Return the number of vectors
3950ce462613STony Nguyen  * reserved or negative on failure.
3951940b61afSAnirudh Venkataramanan  */
3952940b61afSAnirudh Venkataramanan static int ice_ena_msix_range(struct ice_pf *pf)
3953940b61afSAnirudh Venkataramanan {
3954ce462613STony Nguyen 	int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
39554015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
3956ce462613STony Nguyen 	int err, i;
3957940b61afSAnirudh Venkataramanan 
3958ce462613STony Nguyen 	hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
3959d25a0fc4SDave Ertman 	num_cpus = num_online_cpus();
3960940b61afSAnirudh Venkataramanan 
3961ce462613STony Nguyen 	/* LAN miscellaneous handler */
3962ce462613STony Nguyen 	v_other = ICE_MIN_LAN_OICR_MSIX;
3963940b61afSAnirudh Venkataramanan 
3964ce462613STony Nguyen 	/* Flow Director */
3965ce462613STony Nguyen 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
3966ce462613STony Nguyen 		v_other += ICE_FDIR_MSIX;
3967741106f7STony Nguyen 
3968ce462613STony Nguyen 	/* switchdev */
3969ce462613STony Nguyen 	v_other += ICE_ESWITCH_MSIX;
3970f66756e0SGrzegorz Nitka 
3971ce462613STony Nguyen 	v_wanted = v_other;
3972741106f7STony Nguyen 
3973ce462613STony Nguyen 	/* LAN traffic */
3974ce462613STony Nguyen 	pf->num_lan_msix = num_cpus;
3975ce462613STony Nguyen 	v_wanted += pf->num_lan_msix;
3976940b61afSAnirudh Venkataramanan 
3977ce462613STony Nguyen 	/* RDMA auxiliary driver */
397888f62aeaSDave Ertman 	if (ice_is_rdma_ena(pf)) {
3979ce462613STony Nguyen 		pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
3980ce462613STony Nguyen 		v_wanted += pf->num_rdma_msix;
3981d25a0fc4SDave Ertman 	}
3982d25a0fc4SDave Ertman 
3983ce462613STony Nguyen 	if (v_wanted > hw_num_msix) {
3984ce462613STony Nguyen 		int v_remain;
3985ce462613STony Nguyen 
3986ce462613STony Nguyen 		dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
3987ce462613STony Nguyen 			 v_wanted, hw_num_msix);
3988ce462613STony Nguyen 
3989ce462613STony Nguyen 		if (hw_num_msix < ICE_MIN_MSIX) {
3990ce462613STony Nguyen 			err = -ERANGE;
3991ce462613STony Nguyen 			goto exit_err;
3992ce462613STony Nguyen 		}
3993ce462613STony Nguyen 
3994ce462613STony Nguyen 		v_remain = hw_num_msix - v_other;
3995ce462613STony Nguyen 		if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
3996ce462613STony Nguyen 			v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
3997ce462613STony Nguyen 			v_remain = ICE_MIN_LAN_TXRX_MSIX;
3998ce462613STony Nguyen 		}
3999ce462613STony Nguyen 
4000ce462613STony Nguyen 		ice_reduce_msix_usage(pf, v_remain);
4001ce462613STony Nguyen 		v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
4002ce462613STony Nguyen 
4003ce462613STony Nguyen 		dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
4004ce462613STony Nguyen 			   pf->num_lan_msix);
4005ce462613STony Nguyen 		if (ice_is_rdma_ena(pf))
4006ce462613STony Nguyen 			dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
4007ce462613STony Nguyen 				   pf->num_rdma_msix);
4008ce462613STony Nguyen 	}
4009ce462613STony Nguyen 
4010ce462613STony Nguyen 	pf->msix_entries = devm_kcalloc(dev, v_wanted,
4011c6dfd690SBruce Allan 					sizeof(*pf->msix_entries), GFP_KERNEL);
4012940b61afSAnirudh Venkataramanan 	if (!pf->msix_entries) {
4013940b61afSAnirudh Venkataramanan 		err = -ENOMEM;
4014940b61afSAnirudh Venkataramanan 		goto exit_err;
4015940b61afSAnirudh Venkataramanan 	}
4016940b61afSAnirudh Venkataramanan 
4017ce462613STony Nguyen 	for (i = 0; i < v_wanted; i++)
4018940b61afSAnirudh Venkataramanan 		pf->msix_entries[i].entry = i;
4019940b61afSAnirudh Venkataramanan 
4020940b61afSAnirudh Venkataramanan 	/* actually reserve the vectors */
4021940b61afSAnirudh Venkataramanan 	v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
4022ce462613STony Nguyen 					 ICE_MIN_MSIX, v_wanted);
4023940b61afSAnirudh Venkataramanan 	if (v_actual < 0) {
40244015d11eSBrett Creeley 		dev_err(dev, "unable to reserve MSI-X vectors\n");
4025940b61afSAnirudh Venkataramanan 		err = v_actual;
4026940b61afSAnirudh Venkataramanan 		goto msix_err;
4027940b61afSAnirudh Venkataramanan 	}
4028940b61afSAnirudh Venkataramanan 
4029ce462613STony Nguyen 	if (v_actual < v_wanted) {
403019cce2c6SAnirudh Venkataramanan 		dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
4031ce462613STony Nguyen 			 v_wanted, v_actual);
4032152b978aSAnirudh Venkataramanan 
4033f3fe97f6SBrett Creeley 		if (v_actual < ICE_MIN_MSIX) {
4034152b978aSAnirudh Venkataramanan 			/* error if we can't get minimum vectors */
4035940b61afSAnirudh Venkataramanan 			pci_disable_msix(pf->pdev);
4036940b61afSAnirudh Venkataramanan 			err = -ERANGE;
4037940b61afSAnirudh Venkataramanan 			goto msix_err;
4038152b978aSAnirudh Venkataramanan 		} else {
4039d25a0fc4SDave Ertman 			int v_remain = v_actual - v_other;
4040d25a0fc4SDave Ertman 
4041ce462613STony Nguyen 			if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
4042ce462613STony Nguyen 				v_remain = ICE_MIN_LAN_TXRX_MSIX;
4043741106f7STony Nguyen 
4044ce462613STony Nguyen 			ice_reduce_msix_usage(pf, v_remain);
4045741106f7STony Nguyen 
4046741106f7STony Nguyen 			dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
4047741106f7STony Nguyen 				   pf->num_lan_msix);
4048d25a0fc4SDave Ertman 
404988f62aeaSDave Ertman 			if (ice_is_rdma_ena(pf))
4050d25a0fc4SDave Ertman 				dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
4051d25a0fc4SDave Ertman 					   pf->num_rdma_msix);
4052940b61afSAnirudh Venkataramanan 		}
4053940b61afSAnirudh Venkataramanan 	}
4054940b61afSAnirudh Venkataramanan 
4055940b61afSAnirudh Venkataramanan 	return v_actual;
4056940b61afSAnirudh Venkataramanan 
4057940b61afSAnirudh Venkataramanan msix_err:
40584015d11eSBrett Creeley 	devm_kfree(dev, pf->msix_entries);
4059940b61afSAnirudh Venkataramanan 
4060940b61afSAnirudh Venkataramanan exit_err:
4061d25a0fc4SDave Ertman 	pf->num_rdma_msix = 0;
4062940b61afSAnirudh Venkataramanan 	pf->num_lan_msix = 0;
4063940b61afSAnirudh Venkataramanan 	return err;
4064940b61afSAnirudh Venkataramanan }
4065940b61afSAnirudh Venkataramanan 
4066940b61afSAnirudh Venkataramanan /**
4067940b61afSAnirudh Venkataramanan  * ice_dis_msix - Disable MSI-X interrupt setup in OS
4068940b61afSAnirudh Venkataramanan  * @pf: board private structure
4069940b61afSAnirudh Venkataramanan  */
4070940b61afSAnirudh Venkataramanan static void ice_dis_msix(struct ice_pf *pf)
4071940b61afSAnirudh Venkataramanan {
4072940b61afSAnirudh Venkataramanan 	pci_disable_msix(pf->pdev);
40734015d11eSBrett Creeley 	devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
4074940b61afSAnirudh Venkataramanan 	pf->msix_entries = NULL;
4075940b61afSAnirudh Venkataramanan }
4076940b61afSAnirudh Venkataramanan 
4077940b61afSAnirudh Venkataramanan /**
4078eb0208ecSPreethi Banala  * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
4079eb0208ecSPreethi Banala  * @pf: board private structure
4080eb0208ecSPreethi Banala  */
4081eb0208ecSPreethi Banala static void ice_clear_interrupt_scheme(struct ice_pf *pf)
4082eb0208ecSPreethi Banala {
4083eb0208ecSPreethi Banala 	ice_dis_msix(pf);
4084eb0208ecSPreethi Banala 
4085cbe66bfeSBrett Creeley 	if (pf->irq_tracker) {
40864015d11eSBrett Creeley 		devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
4087cbe66bfeSBrett Creeley 		pf->irq_tracker = NULL;
4088eb0208ecSPreethi Banala 	}
4089eb0208ecSPreethi Banala }
4090eb0208ecSPreethi Banala 
4091eb0208ecSPreethi Banala /**
4092940b61afSAnirudh Venkataramanan  * ice_init_interrupt_scheme - Determine proper interrupt scheme
4093940b61afSAnirudh Venkataramanan  * @pf: board private structure to initialize
4094940b61afSAnirudh Venkataramanan  */
4095940b61afSAnirudh Venkataramanan static int ice_init_interrupt_scheme(struct ice_pf *pf)
4096940b61afSAnirudh Venkataramanan {
4097cbe66bfeSBrett Creeley 	int vectors;
4098940b61afSAnirudh Venkataramanan 
4099940b61afSAnirudh Venkataramanan 	vectors = ice_ena_msix_range(pf);
4100940b61afSAnirudh Venkataramanan 
4101940b61afSAnirudh Venkataramanan 	if (vectors < 0)
4102940b61afSAnirudh Venkataramanan 		return vectors;
4103940b61afSAnirudh Venkataramanan 
4104940b61afSAnirudh Venkataramanan 	/* set up vector assignment tracking */
4105e94c0df9SGustavo A. R. Silva 	pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
4106e94c0df9SGustavo A. R. Silva 				       struct_size(pf->irq_tracker, list, vectors),
4107e94c0df9SGustavo A. R. Silva 				       GFP_KERNEL);
4108cbe66bfeSBrett Creeley 	if (!pf->irq_tracker) {
4109940b61afSAnirudh Venkataramanan 		ice_dis_msix(pf);
4110940b61afSAnirudh Venkataramanan 		return -ENOMEM;
4111940b61afSAnirudh Venkataramanan 	}
4112940b61afSAnirudh Venkataramanan 
4113eb0208ecSPreethi Banala 	/* populate SW interrupts pool with number of OS granted IRQs. */
411488865fc4SKarol Kolacinski 	pf->num_avail_sw_msix = (u16)vectors;
411588865fc4SKarol Kolacinski 	pf->irq_tracker->num_entries = (u16)vectors;
4116cbe66bfeSBrett Creeley 	pf->irq_tracker->end = pf->irq_tracker->num_entries;
4117940b61afSAnirudh Venkataramanan 
4118940b61afSAnirudh Venkataramanan 	return 0;
4119940b61afSAnirudh Venkataramanan }
4120940b61afSAnirudh Venkataramanan 
4121940b61afSAnirudh Venkataramanan /**
412231765519SAnirudh Venkataramanan  * ice_is_wol_supported - check if WoL is supported
412331765519SAnirudh Venkataramanan  * @hw: pointer to hardware info
4124769c500dSAkeem G Abodunrin  *
4125769c500dSAkeem G Abodunrin  * Check if WoL is supported based on the HW configuration.
4126769c500dSAkeem G Abodunrin  * Returns true if NVM supports and enables WoL for this port, false otherwise
4127769c500dSAkeem G Abodunrin  */
412831765519SAnirudh Venkataramanan bool ice_is_wol_supported(struct ice_hw *hw)
4129769c500dSAkeem G Abodunrin {
4130769c500dSAkeem G Abodunrin 	u16 wol_ctrl;
4131769c500dSAkeem G Abodunrin 
4132769c500dSAkeem G Abodunrin 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
4133769c500dSAkeem G Abodunrin 	 * word) indicates WoL is not supported on the corresponding PF ID.
4134769c500dSAkeem G Abodunrin 	 */
4135769c500dSAkeem G Abodunrin 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
4136769c500dSAkeem G Abodunrin 		return false;
4137769c500dSAkeem G Abodunrin 
413831765519SAnirudh Venkataramanan 	return !(BIT(hw->port_info->lport) & wol_ctrl);
4139769c500dSAkeem G Abodunrin }
4140769c500dSAkeem G Abodunrin 
4141769c500dSAkeem G Abodunrin /**
414287324e74SHenry Tieman  * ice_vsi_recfg_qs - Change the number of queues on a VSI
414387324e74SHenry Tieman  * @vsi: VSI being changed
414487324e74SHenry Tieman  * @new_rx: new number of Rx queues
414587324e74SHenry Tieman  * @new_tx: new number of Tx queues
4146a6a0974aSDave Ertman  * @locked: is adev device_lock held
414787324e74SHenry Tieman  *
414887324e74SHenry Tieman  * Only change the number of queues if new_tx, or new_rx is non-0.
414987324e74SHenry Tieman  *
415087324e74SHenry Tieman  * Returns 0 on success.
415187324e74SHenry Tieman  */
4152a6a0974aSDave Ertman int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
415387324e74SHenry Tieman {
415487324e74SHenry Tieman 	struct ice_pf *pf = vsi->back;
415587324e74SHenry Tieman 	int err = 0, timeout = 50;
415687324e74SHenry Tieman 
415787324e74SHenry Tieman 	if (!new_rx && !new_tx)
415887324e74SHenry Tieman 		return -EINVAL;
415987324e74SHenry Tieman 
41607e408e07SAnirudh Venkataramanan 	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
416187324e74SHenry Tieman 		timeout--;
416287324e74SHenry Tieman 		if (!timeout)
416387324e74SHenry Tieman 			return -EBUSY;
416487324e74SHenry Tieman 		usleep_range(1000, 2000);
416587324e74SHenry Tieman 	}
416687324e74SHenry Tieman 
416787324e74SHenry Tieman 	if (new_tx)
416888865fc4SKarol Kolacinski 		vsi->req_txq = (u16)new_tx;
416987324e74SHenry Tieman 	if (new_rx)
417088865fc4SKarol Kolacinski 		vsi->req_rxq = (u16)new_rx;
417187324e74SHenry Tieman 
417287324e74SHenry Tieman 	/* set for the next time the netdev is started */
417387324e74SHenry Tieman 	if (!netif_running(vsi->netdev)) {
41746624e780SMichal Swiatkowski 		ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
417587324e74SHenry Tieman 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
417687324e74SHenry Tieman 		goto done;
417787324e74SHenry Tieman 	}
417887324e74SHenry Tieman 
417987324e74SHenry Tieman 	ice_vsi_close(vsi);
41806624e780SMichal Swiatkowski 	ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4181a6a0974aSDave Ertman 	ice_pf_dcb_recfg(pf, locked);
418287324e74SHenry Tieman 	ice_vsi_open(vsi);
418387324e74SHenry Tieman done:
41847e408e07SAnirudh Venkataramanan 	clear_bit(ICE_CFG_BUSY, pf->state);
418587324e74SHenry Tieman 	return err;
418687324e74SHenry Tieman }
418787324e74SHenry Tieman 
418887324e74SHenry Tieman /**
4189cd1f56f4SBrett Creeley  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4190cd1f56f4SBrett Creeley  * @pf: PF to configure
4191cd1f56f4SBrett Creeley  *
4192cd1f56f4SBrett Creeley  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4193cd1f56f4SBrett Creeley  * VSI can still Tx/Rx VLAN tagged packets.
4194cd1f56f4SBrett Creeley  */
4195cd1f56f4SBrett Creeley static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4196cd1f56f4SBrett Creeley {
4197cd1f56f4SBrett Creeley 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4198cd1f56f4SBrett Creeley 	struct ice_vsi_ctx *ctxt;
4199cd1f56f4SBrett Creeley 	struct ice_hw *hw;
42005518ac2aSTony Nguyen 	int status;
4201cd1f56f4SBrett Creeley 
4202cd1f56f4SBrett Creeley 	if (!vsi)
4203cd1f56f4SBrett Creeley 		return;
4204cd1f56f4SBrett Creeley 
4205cd1f56f4SBrett Creeley 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4206cd1f56f4SBrett Creeley 	if (!ctxt)
4207cd1f56f4SBrett Creeley 		return;
4208cd1f56f4SBrett Creeley 
4209cd1f56f4SBrett Creeley 	hw = &pf->hw;
4210cd1f56f4SBrett Creeley 	ctxt->info = vsi->info;
4211cd1f56f4SBrett Creeley 
4212cd1f56f4SBrett Creeley 	ctxt->info.valid_sections =
4213cd1f56f4SBrett Creeley 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4214cd1f56f4SBrett Creeley 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
4215cd1f56f4SBrett Creeley 			    ICE_AQ_VSI_PROP_SW_VALID);
4216cd1f56f4SBrett Creeley 
4217cd1f56f4SBrett Creeley 	/* disable VLAN anti-spoof */
4218cd1f56f4SBrett Creeley 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4219cd1f56f4SBrett Creeley 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4220cd1f56f4SBrett Creeley 
4221cd1f56f4SBrett Creeley 	/* disable VLAN pruning and keep all other settings */
4222cd1f56f4SBrett Creeley 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4223cd1f56f4SBrett Creeley 
4224cd1f56f4SBrett Creeley 	/* allow all VLANs on Tx and don't strip on Rx */
42257bd527aaSBrett Creeley 	ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
42267bd527aaSBrett Creeley 		ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4227cd1f56f4SBrett Creeley 
4228cd1f56f4SBrett Creeley 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4229cd1f56f4SBrett Creeley 	if (status) {
42305f87ec48STony Nguyen 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
42315518ac2aSTony Nguyen 			status, ice_aq_str(hw->adminq.sq_last_status));
4232cd1f56f4SBrett Creeley 	} else {
4233cd1f56f4SBrett Creeley 		vsi->info.sec_flags = ctxt->info.sec_flags;
4234cd1f56f4SBrett Creeley 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
42357bd527aaSBrett Creeley 		vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4236cd1f56f4SBrett Creeley 	}
4237cd1f56f4SBrett Creeley 
4238cd1f56f4SBrett Creeley 	kfree(ctxt);
4239cd1f56f4SBrett Creeley }
4240cd1f56f4SBrett Creeley 
4241cd1f56f4SBrett Creeley /**
4242462acf6aSTony Nguyen  * ice_log_pkg_init - log result of DDP package load
4243462acf6aSTony Nguyen  * @hw: pointer to hardware info
4244247dd97dSWojciech Drewek  * @state: state of package load
4245462acf6aSTony Nguyen  */
4246247dd97dSWojciech Drewek static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4247462acf6aSTony Nguyen {
4248247dd97dSWojciech Drewek 	struct ice_pf *pf = hw->back;
4249247dd97dSWojciech Drewek 	struct device *dev;
4250462acf6aSTony Nguyen 
4251247dd97dSWojciech Drewek 	dev = ice_pf_to_dev(pf);
4252247dd97dSWojciech Drewek 
4253247dd97dSWojciech Drewek 	switch (state) {
4254247dd97dSWojciech Drewek 	case ICE_DDP_PKG_SUCCESS:
425519cce2c6SAnirudh Venkataramanan 		dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4256462acf6aSTony Nguyen 			 hw->active_pkg_name,
4257462acf6aSTony Nguyen 			 hw->active_pkg_ver.major,
4258462acf6aSTony Nguyen 			 hw->active_pkg_ver.minor,
4259462acf6aSTony Nguyen 			 hw->active_pkg_ver.update,
4260462acf6aSTony Nguyen 			 hw->active_pkg_ver.draft);
4261247dd97dSWojciech Drewek 		break;
4262247dd97dSWojciech Drewek 	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4263247dd97dSWojciech Drewek 		dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4264247dd97dSWojciech Drewek 			 hw->active_pkg_name,
4265247dd97dSWojciech Drewek 			 hw->active_pkg_ver.major,
4266247dd97dSWojciech Drewek 			 hw->active_pkg_ver.minor,
4267247dd97dSWojciech Drewek 			 hw->active_pkg_ver.update,
4268247dd97dSWojciech Drewek 			 hw->active_pkg_ver.draft);
4269247dd97dSWojciech Drewek 		break;
4270247dd97dSWojciech Drewek 	case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
427119cce2c6SAnirudh Venkataramanan 		dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
4272462acf6aSTony Nguyen 			hw->active_pkg_name,
4273462acf6aSTony Nguyen 			hw->active_pkg_ver.major,
4274462acf6aSTony Nguyen 			hw->active_pkg_ver.minor,
4275462acf6aSTony Nguyen 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4276247dd97dSWojciech Drewek 		break;
4277247dd97dSWojciech Drewek 	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
427819cce2c6SAnirudh Venkataramanan 		dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4279462acf6aSTony Nguyen 			 hw->active_pkg_name,
4280462acf6aSTony Nguyen 			 hw->active_pkg_ver.major,
4281462acf6aSTony Nguyen 			 hw->active_pkg_ver.minor,
4282462acf6aSTony Nguyen 			 hw->active_pkg_ver.update,
4283462acf6aSTony Nguyen 			 hw->active_pkg_ver.draft,
4284462acf6aSTony Nguyen 			 hw->pkg_name,
4285462acf6aSTony Nguyen 			 hw->pkg_ver.major,
4286462acf6aSTony Nguyen 			 hw->pkg_ver.minor,
4287462acf6aSTony Nguyen 			 hw->pkg_ver.update,
4288462acf6aSTony Nguyen 			 hw->pkg_ver.draft);
4289462acf6aSTony Nguyen 		break;
4290247dd97dSWojciech Drewek 	case ICE_DDP_PKG_FW_MISMATCH:
4291b8272919SVictor Raj 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
4292b8272919SVictor Raj 		break;
4293247dd97dSWojciech Drewek 	case ICE_DDP_PKG_INVALID_FILE:
429419cce2c6SAnirudh Venkataramanan 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4295462acf6aSTony Nguyen 		break;
4296247dd97dSWojciech Drewek 	case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
429719cce2c6SAnirudh Venkataramanan 		dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
4298247dd97dSWojciech Drewek 		break;
4299247dd97dSWojciech Drewek 	case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
430019cce2c6SAnirudh Venkataramanan 		dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
4301462acf6aSTony Nguyen 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4302462acf6aSTony Nguyen 		break;
4303247dd97dSWojciech Drewek 	case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
430419cce2c6SAnirudh Venkataramanan 		dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
4305247dd97dSWojciech Drewek 		break;
4306247dd97dSWojciech Drewek 	case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
430719cce2c6SAnirudh Venkataramanan 		dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
4308247dd97dSWojciech Drewek 		break;
4309247dd97dSWojciech Drewek 	case ICE_DDP_PKG_LOAD_ERROR:
431019cce2c6SAnirudh Venkataramanan 		dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
43119918f2d2SAnirudh Venkataramanan 		/* poll for reset to complete */
43129918f2d2SAnirudh Venkataramanan 		if (ice_check_reset(hw))
43139918f2d2SAnirudh Venkataramanan 			dev_err(dev, "Error resetting device. Please reload the driver\n");
4314462acf6aSTony Nguyen 		break;
4315247dd97dSWojciech Drewek 	case ICE_DDP_PKG_ERR:
4316462acf6aSTony Nguyen 	default:
4317247dd97dSWojciech Drewek 		dev_err(dev, "An unknown error occurred when loading the DDP package.  Entering Safe Mode.\n");
43180092db5fSJesse Brandeburg 		break;
4319462acf6aSTony Nguyen 	}
4320462acf6aSTony Nguyen }
4321462acf6aSTony Nguyen 
4322462acf6aSTony Nguyen /**
4323462acf6aSTony Nguyen  * ice_load_pkg - load/reload the DDP Package file
4324462acf6aSTony Nguyen  * @firmware: firmware structure when firmware requested or NULL for reload
4325462acf6aSTony Nguyen  * @pf: pointer to the PF instance
4326462acf6aSTony Nguyen  *
4327462acf6aSTony Nguyen  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4328462acf6aSTony Nguyen  * initialize HW tables.
4329462acf6aSTony Nguyen  */
4330462acf6aSTony Nguyen static void
4331462acf6aSTony Nguyen ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4332462acf6aSTony Nguyen {
4333247dd97dSWojciech Drewek 	enum ice_ddp_state state = ICE_DDP_PKG_ERR;
43344015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
4335462acf6aSTony Nguyen 	struct ice_hw *hw = &pf->hw;
4336462acf6aSTony Nguyen 
4337462acf6aSTony Nguyen 	/* Load DDP Package */
4338462acf6aSTony Nguyen 	if (firmware && !hw->pkg_copy) {
4339247dd97dSWojciech Drewek 		state = ice_copy_and_init_pkg(hw, firmware->data,
4340462acf6aSTony Nguyen 					      firmware->size);
4341247dd97dSWojciech Drewek 		ice_log_pkg_init(hw, state);
4342462acf6aSTony Nguyen 	} else if (!firmware && hw->pkg_copy) {
4343462acf6aSTony Nguyen 		/* Reload package during rebuild after CORER/GLOBR reset */
4344247dd97dSWojciech Drewek 		state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4345247dd97dSWojciech Drewek 		ice_log_pkg_init(hw, state);
4346462acf6aSTony Nguyen 	} else {
434719cce2c6SAnirudh Venkataramanan 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4348462acf6aSTony Nguyen 	}
4349462acf6aSTony Nguyen 
4350247dd97dSWojciech Drewek 	if (!ice_is_init_pkg_successful(state)) {
4351462acf6aSTony Nguyen 		/* Safe Mode */
4352462acf6aSTony Nguyen 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4353462acf6aSTony Nguyen 		return;
4354462acf6aSTony Nguyen 	}
4355462acf6aSTony Nguyen 
4356462acf6aSTony Nguyen 	/* Successful download package is the precondition for advanced
4357462acf6aSTony Nguyen 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4358462acf6aSTony Nguyen 	 */
4359462acf6aSTony Nguyen 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4360462acf6aSTony Nguyen }
4361462acf6aSTony Nguyen 
4362462acf6aSTony Nguyen /**
4363c585ea42SBrett Creeley  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4364c585ea42SBrett Creeley  * @pf: pointer to the PF structure
4365c585ea42SBrett Creeley  *
4366c585ea42SBrett Creeley  * There is no error returned here because the driver should be able to handle
4367c585ea42SBrett Creeley  * 128 Byte cache lines, so we only print a warning in case issues are seen,
4368c585ea42SBrett Creeley  * specifically with Tx.
4369c585ea42SBrett Creeley  */
4370c585ea42SBrett Creeley static void ice_verify_cacheline_size(struct ice_pf *pf)
4371c585ea42SBrett Creeley {
4372c585ea42SBrett Creeley 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
437319cce2c6SAnirudh Venkataramanan 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4374c585ea42SBrett Creeley 			 ICE_CACHE_LINE_BYTES);
4375c585ea42SBrett Creeley }
4376c585ea42SBrett Creeley 
4377c585ea42SBrett Creeley /**
4378e3710a01SPaul M Stillwell Jr  * ice_send_version - update firmware with driver version
4379e3710a01SPaul M Stillwell Jr  * @pf: PF struct
4380e3710a01SPaul M Stillwell Jr  *
4381d54699e2STony Nguyen  * Returns 0 on success, else error code
4382e3710a01SPaul M Stillwell Jr  */
43835e24d598STony Nguyen static int ice_send_version(struct ice_pf *pf)
4384e3710a01SPaul M Stillwell Jr {
4385e3710a01SPaul M Stillwell Jr 	struct ice_driver_ver dv;
4386e3710a01SPaul M Stillwell Jr 
438734a2a3b8SJeff Kirsher 	dv.major_ver = 0xff;
438834a2a3b8SJeff Kirsher 	dv.minor_ver = 0xff;
438934a2a3b8SJeff Kirsher 	dv.build_ver = 0xff;
4390e3710a01SPaul M Stillwell Jr 	dv.subbuild_ver = 0;
439134a2a3b8SJeff Kirsher 	strscpy((char *)dv.driver_string, UTS_RELEASE,
4392e3710a01SPaul M Stillwell Jr 		sizeof(dv.driver_string));
4393e3710a01SPaul M Stillwell Jr 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4394e3710a01SPaul M Stillwell Jr }
4395e3710a01SPaul M Stillwell Jr 
4396e3710a01SPaul M Stillwell Jr /**
4397148beb61SHenry Tieman  * ice_init_fdir - Initialize flow director VSI and configuration
4398148beb61SHenry Tieman  * @pf: pointer to the PF instance
4399148beb61SHenry Tieman  *
4400148beb61SHenry Tieman  * returns 0 on success, negative on error
4401148beb61SHenry Tieman  */
4402148beb61SHenry Tieman static int ice_init_fdir(struct ice_pf *pf)
4403148beb61SHenry Tieman {
4404148beb61SHenry Tieman 	struct device *dev = ice_pf_to_dev(pf);
4405148beb61SHenry Tieman 	struct ice_vsi *ctrl_vsi;
4406148beb61SHenry Tieman 	int err;
4407148beb61SHenry Tieman 
4408148beb61SHenry Tieman 	/* Side Band Flow Director needs to have a control VSI.
4409148beb61SHenry Tieman 	 * Allocate it and store it in the PF.
4410148beb61SHenry Tieman 	 */
4411148beb61SHenry Tieman 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4412148beb61SHenry Tieman 	if (!ctrl_vsi) {
4413148beb61SHenry Tieman 		dev_dbg(dev, "could not create control VSI\n");
4414148beb61SHenry Tieman 		return -ENOMEM;
4415148beb61SHenry Tieman 	}
4416148beb61SHenry Tieman 
4417148beb61SHenry Tieman 	err = ice_vsi_open_ctrl(ctrl_vsi);
4418148beb61SHenry Tieman 	if (err) {
4419148beb61SHenry Tieman 		dev_dbg(dev, "could not open control VSI\n");
4420148beb61SHenry Tieman 		goto err_vsi_open;
4421148beb61SHenry Tieman 	}
4422148beb61SHenry Tieman 
4423148beb61SHenry Tieman 	mutex_init(&pf->hw.fdir_fltr_lock);
4424148beb61SHenry Tieman 
4425148beb61SHenry Tieman 	err = ice_fdir_create_dflt_rules(pf);
4426148beb61SHenry Tieman 	if (err)
4427148beb61SHenry Tieman 		goto err_fdir_rule;
4428148beb61SHenry Tieman 
4429148beb61SHenry Tieman 	return 0;
4430148beb61SHenry Tieman 
4431148beb61SHenry Tieman err_fdir_rule:
4432148beb61SHenry Tieman 	ice_fdir_release_flows(&pf->hw);
4433148beb61SHenry Tieman 	ice_vsi_close(ctrl_vsi);
4434148beb61SHenry Tieman err_vsi_open:
4435148beb61SHenry Tieman 	ice_vsi_release(ctrl_vsi);
4436148beb61SHenry Tieman 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4437148beb61SHenry Tieman 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4438148beb61SHenry Tieman 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4439148beb61SHenry Tieman 	}
4440148beb61SHenry Tieman 	return err;
4441148beb61SHenry Tieman }
4442148beb61SHenry Tieman 
44435b246e53SMichal Swiatkowski static void ice_deinit_fdir(struct ice_pf *pf)
44445b246e53SMichal Swiatkowski {
44455b246e53SMichal Swiatkowski 	struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
44465b246e53SMichal Swiatkowski 
44475b246e53SMichal Swiatkowski 	if (!vsi)
44485b246e53SMichal Swiatkowski 		return;
44495b246e53SMichal Swiatkowski 
44505b246e53SMichal Swiatkowski 	ice_vsi_manage_fdir(vsi, false);
44515b246e53SMichal Swiatkowski 	ice_vsi_release(vsi);
44525b246e53SMichal Swiatkowski 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
44535b246e53SMichal Swiatkowski 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
44545b246e53SMichal Swiatkowski 		pf->ctrl_vsi_idx = ICE_NO_VSI;
44555b246e53SMichal Swiatkowski 	}
44565b246e53SMichal Swiatkowski 
44575b246e53SMichal Swiatkowski 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
44585b246e53SMichal Swiatkowski }
44595b246e53SMichal Swiatkowski 
4460148beb61SHenry Tieman /**
4461462acf6aSTony Nguyen  * ice_get_opt_fw_name - return optional firmware file name or NULL
4462462acf6aSTony Nguyen  * @pf: pointer to the PF instance
4463462acf6aSTony Nguyen  */
4464462acf6aSTony Nguyen static char *ice_get_opt_fw_name(struct ice_pf *pf)
4465462acf6aSTony Nguyen {
4466462acf6aSTony Nguyen 	/* Optional firmware name same as default with additional dash
4467462acf6aSTony Nguyen 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4468462acf6aSTony Nguyen 	 */
4469462acf6aSTony Nguyen 	struct pci_dev *pdev = pf->pdev;
4470ceb2f007SJacob Keller 	char *opt_fw_filename;
4471ceb2f007SJacob Keller 	u64 dsn;
4472462acf6aSTony Nguyen 
4473462acf6aSTony Nguyen 	/* Determine the name of the optional file using the DSN (two
4474462acf6aSTony Nguyen 	 * dwords following the start of the DSN Capability).
4475462acf6aSTony Nguyen 	 */
4476ceb2f007SJacob Keller 	dsn = pci_get_dsn(pdev);
4477ceb2f007SJacob Keller 	if (!dsn)
4478ceb2f007SJacob Keller 		return NULL;
4479ceb2f007SJacob Keller 
4480462acf6aSTony Nguyen 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4481462acf6aSTony Nguyen 	if (!opt_fw_filename)
4482462acf6aSTony Nguyen 		return NULL;
4483462acf6aSTony Nguyen 
44841a9c561aSPaul M Stillwell Jr 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4485ceb2f007SJacob Keller 		 ICE_DDP_PKG_PATH, dsn);
4486462acf6aSTony Nguyen 
4487462acf6aSTony Nguyen 	return opt_fw_filename;
4488462acf6aSTony Nguyen }
4489462acf6aSTony Nguyen 
4490462acf6aSTony Nguyen /**
4491462acf6aSTony Nguyen  * ice_request_fw - Device initialization routine
4492462acf6aSTony Nguyen  * @pf: pointer to the PF instance
4493462acf6aSTony Nguyen  */
4494462acf6aSTony Nguyen static void ice_request_fw(struct ice_pf *pf)
4495462acf6aSTony Nguyen {
4496462acf6aSTony Nguyen 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
4497462acf6aSTony Nguyen 	const struct firmware *firmware = NULL;
44984015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
4499462acf6aSTony Nguyen 	int err = 0;
4500462acf6aSTony Nguyen 
4501462acf6aSTony Nguyen 	/* optional device-specific DDP (if present) overrides the default DDP
4502462acf6aSTony Nguyen 	 * package file. kernel logs a debug message if the file doesn't exist,
4503462acf6aSTony Nguyen 	 * and warning messages for other errors.
4504462acf6aSTony Nguyen 	 */
4505462acf6aSTony Nguyen 	if (opt_fw_filename) {
4506462acf6aSTony Nguyen 		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4507462acf6aSTony Nguyen 		if (err) {
4508462acf6aSTony Nguyen 			kfree(opt_fw_filename);
4509462acf6aSTony Nguyen 			goto dflt_pkg_load;
4510462acf6aSTony Nguyen 		}
4511462acf6aSTony Nguyen 
4512462acf6aSTony Nguyen 		/* request for firmware was successful. Download to device */
4513462acf6aSTony Nguyen 		ice_load_pkg(firmware, pf);
4514462acf6aSTony Nguyen 		kfree(opt_fw_filename);
4515462acf6aSTony Nguyen 		release_firmware(firmware);
4516462acf6aSTony Nguyen 		return;
4517462acf6aSTony Nguyen 	}
4518462acf6aSTony Nguyen 
4519462acf6aSTony Nguyen dflt_pkg_load:
4520462acf6aSTony Nguyen 	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4521462acf6aSTony Nguyen 	if (err) {
452219cce2c6SAnirudh Venkataramanan 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4523462acf6aSTony Nguyen 		return;
4524462acf6aSTony Nguyen 	}
4525462acf6aSTony Nguyen 
4526462acf6aSTony Nguyen 	/* request for firmware was successful. Download to device */
4527462acf6aSTony Nguyen 	ice_load_pkg(firmware, pf);
4528462acf6aSTony Nguyen 	release_firmware(firmware);
4529462acf6aSTony Nguyen }
4530462acf6aSTony Nguyen 
4531462acf6aSTony Nguyen /**
4532769c500dSAkeem G Abodunrin  * ice_print_wake_reason - show the wake up cause in the log
4533769c500dSAkeem G Abodunrin  * @pf: pointer to the PF struct
4534769c500dSAkeem G Abodunrin  */
4535769c500dSAkeem G Abodunrin static void ice_print_wake_reason(struct ice_pf *pf)
4536769c500dSAkeem G Abodunrin {
4537769c500dSAkeem G Abodunrin 	u32 wus = pf->wakeup_reason;
4538769c500dSAkeem G Abodunrin 	const char *wake_str;
4539769c500dSAkeem G Abodunrin 
4540769c500dSAkeem G Abodunrin 	/* if no wake event, nothing to print */
4541769c500dSAkeem G Abodunrin 	if (!wus)
4542769c500dSAkeem G Abodunrin 		return;
4543769c500dSAkeem G Abodunrin 
4544769c500dSAkeem G Abodunrin 	if (wus & PFPM_WUS_LNKC_M)
4545769c500dSAkeem G Abodunrin 		wake_str = "Link\n";
4546769c500dSAkeem G Abodunrin 	else if (wus & PFPM_WUS_MAG_M)
4547769c500dSAkeem G Abodunrin 		wake_str = "Magic Packet\n";
4548769c500dSAkeem G Abodunrin 	else if (wus & PFPM_WUS_MNG_M)
4549769c500dSAkeem G Abodunrin 		wake_str = "Management\n";
4550769c500dSAkeem G Abodunrin 	else if (wus & PFPM_WUS_FW_RST_WK_M)
4551769c500dSAkeem G Abodunrin 		wake_str = "Firmware Reset\n";
4552769c500dSAkeem G Abodunrin 	else
4553769c500dSAkeem G Abodunrin 		wake_str = "Unknown\n";
4554769c500dSAkeem G Abodunrin 
4555769c500dSAkeem G Abodunrin 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4556769c500dSAkeem G Abodunrin }
4557769c500dSAkeem G Abodunrin 
4558769c500dSAkeem G Abodunrin /**
4559418e5340SPaul M Stillwell Jr  * ice_register_netdev - register netdev
45605b246e53SMichal Swiatkowski  * @vsi: pointer to the VSI struct
45611e23f076SAnirudh Venkataramanan  */
45625b246e53SMichal Swiatkowski static int ice_register_netdev(struct ice_vsi *vsi)
45631e23f076SAnirudh Venkataramanan {
45645b246e53SMichal Swiatkowski 	int err;
45651e23f076SAnirudh Venkataramanan 
45661e23f076SAnirudh Venkataramanan 	if (!vsi || !vsi->netdev)
45671e23f076SAnirudh Venkataramanan 		return -EIO;
45681e23f076SAnirudh Venkataramanan 
45691e23f076SAnirudh Venkataramanan 	err = register_netdev(vsi->netdev);
45701e23f076SAnirudh Venkataramanan 	if (err)
45715b246e53SMichal Swiatkowski 		return err;
45721e23f076SAnirudh Venkataramanan 
4573a476d72aSAnirudh Venkataramanan 	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
45741e23f076SAnirudh Venkataramanan 	netif_carrier_off(vsi->netdev);
45751e23f076SAnirudh Venkataramanan 	netif_tx_stop_all_queues(vsi->netdev);
45761e23f076SAnirudh Venkataramanan 
45771e23f076SAnirudh Venkataramanan 	return 0;
45785b246e53SMichal Swiatkowski }
45795b246e53SMichal Swiatkowski 
45805b246e53SMichal Swiatkowski static void ice_unregister_netdev(struct ice_vsi *vsi)
45815b246e53SMichal Swiatkowski {
45825b246e53SMichal Swiatkowski 	if (!vsi || !vsi->netdev)
45835b246e53SMichal Swiatkowski 		return;
45845b246e53SMichal Swiatkowski 
45855b246e53SMichal Swiatkowski 	unregister_netdev(vsi->netdev);
45865b246e53SMichal Swiatkowski 	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
45875b246e53SMichal Swiatkowski }
45885b246e53SMichal Swiatkowski 
45895b246e53SMichal Swiatkowski /**
45905b246e53SMichal Swiatkowski  * ice_cfg_netdev - Allocate, configure and register a netdev
45915b246e53SMichal Swiatkowski  * @vsi: the VSI associated with the new netdev
45925b246e53SMichal Swiatkowski  *
45935b246e53SMichal Swiatkowski  * Returns 0 on success, negative value on failure
45945b246e53SMichal Swiatkowski  */
45955b246e53SMichal Swiatkowski static int ice_cfg_netdev(struct ice_vsi *vsi)
45965b246e53SMichal Swiatkowski {
45975b246e53SMichal Swiatkowski 	struct ice_netdev_priv *np;
45985b246e53SMichal Swiatkowski 	struct net_device *netdev;
45995b246e53SMichal Swiatkowski 	u8 mac_addr[ETH_ALEN];
46005b246e53SMichal Swiatkowski 
46015b246e53SMichal Swiatkowski 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
46025b246e53SMichal Swiatkowski 				    vsi->alloc_rxq);
46035b246e53SMichal Swiatkowski 	if (!netdev)
46045b246e53SMichal Swiatkowski 		return -ENOMEM;
46055b246e53SMichal Swiatkowski 
46065b246e53SMichal Swiatkowski 	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
46075b246e53SMichal Swiatkowski 	vsi->netdev = netdev;
46085b246e53SMichal Swiatkowski 	np = netdev_priv(netdev);
46095b246e53SMichal Swiatkowski 	np->vsi = vsi;
46105b246e53SMichal Swiatkowski 
46115b246e53SMichal Swiatkowski 	ice_set_netdev_features(netdev);
4612b6a4103cSLorenzo Bianconi 	ice_set_ops(vsi);
46135b246e53SMichal Swiatkowski 
46145b246e53SMichal Swiatkowski 	if (vsi->type == ICE_VSI_PF) {
46155b246e53SMichal Swiatkowski 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
46165b246e53SMichal Swiatkowski 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
46175b246e53SMichal Swiatkowski 		eth_hw_addr_set(netdev, mac_addr);
46185b246e53SMichal Swiatkowski 	}
46195b246e53SMichal Swiatkowski 
46205b246e53SMichal Swiatkowski 	netdev->priv_flags |= IFF_UNICAST_FLT;
46215b246e53SMichal Swiatkowski 
46225b246e53SMichal Swiatkowski 	/* Setup netdev TC information */
46235b246e53SMichal Swiatkowski 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
46245b246e53SMichal Swiatkowski 
46255b246e53SMichal Swiatkowski 	netdev->max_mtu = ICE_MAX_MTU;
46265b246e53SMichal Swiatkowski 
46275b246e53SMichal Swiatkowski 	return 0;
46285b246e53SMichal Swiatkowski }
46295b246e53SMichal Swiatkowski 
46305b246e53SMichal Swiatkowski static void ice_decfg_netdev(struct ice_vsi *vsi)
46315b246e53SMichal Swiatkowski {
46325b246e53SMichal Swiatkowski 	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
46331e23f076SAnirudh Venkataramanan 	free_netdev(vsi->netdev);
46341e23f076SAnirudh Venkataramanan 	vsi->netdev = NULL;
46355b246e53SMichal Swiatkowski }
46365b246e53SMichal Swiatkowski 
46375b246e53SMichal Swiatkowski static int ice_start_eth(struct ice_vsi *vsi)
46385b246e53SMichal Swiatkowski {
46395b246e53SMichal Swiatkowski 	int err;
46405b246e53SMichal Swiatkowski 
46415b246e53SMichal Swiatkowski 	err = ice_init_mac_fltr(vsi->back);
46425b246e53SMichal Swiatkowski 	if (err)
46431e23f076SAnirudh Venkataramanan 		return err;
46445b246e53SMichal Swiatkowski 
46455b246e53SMichal Swiatkowski 	rtnl_lock();
46465b246e53SMichal Swiatkowski 	err = ice_vsi_open(vsi);
46475b246e53SMichal Swiatkowski 	rtnl_unlock();
46485b246e53SMichal Swiatkowski 
46495b246e53SMichal Swiatkowski 	return err;
46505b246e53SMichal Swiatkowski }
46515b246e53SMichal Swiatkowski 
46527d46c0e6SMichal Swiatkowski static void ice_stop_eth(struct ice_vsi *vsi)
46537d46c0e6SMichal Swiatkowski {
46547d46c0e6SMichal Swiatkowski 	ice_fltr_remove_all(vsi);
46557d46c0e6SMichal Swiatkowski 	ice_vsi_close(vsi);
46567d46c0e6SMichal Swiatkowski }
46577d46c0e6SMichal Swiatkowski 
46585b246e53SMichal Swiatkowski static int ice_init_eth(struct ice_pf *pf)
46595b246e53SMichal Swiatkowski {
46605b246e53SMichal Swiatkowski 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
46615b246e53SMichal Swiatkowski 	int err;
46625b246e53SMichal Swiatkowski 
46635b246e53SMichal Swiatkowski 	if (!vsi)
46645b246e53SMichal Swiatkowski 		return -EINVAL;
46655b246e53SMichal Swiatkowski 
46665b246e53SMichal Swiatkowski 	/* init channel list */
46675b246e53SMichal Swiatkowski 	INIT_LIST_HEAD(&vsi->ch_list);
46685b246e53SMichal Swiatkowski 
46695b246e53SMichal Swiatkowski 	err = ice_cfg_netdev(vsi);
46705b246e53SMichal Swiatkowski 	if (err)
46715b246e53SMichal Swiatkowski 		return err;
46725b246e53SMichal Swiatkowski 	/* Setup DCB netlink interface */
46735b246e53SMichal Swiatkowski 	ice_dcbnl_setup(vsi);
46745b246e53SMichal Swiatkowski 
46755b246e53SMichal Swiatkowski 	err = ice_init_mac_fltr(pf);
46765b246e53SMichal Swiatkowski 	if (err)
46775b246e53SMichal Swiatkowski 		goto err_init_mac_fltr;
46785b246e53SMichal Swiatkowski 
46795b246e53SMichal Swiatkowski 	err = ice_devlink_create_pf_port(pf);
46805b246e53SMichal Swiatkowski 	if (err)
46815b246e53SMichal Swiatkowski 		goto err_devlink_create_pf_port;
46825b246e53SMichal Swiatkowski 
46835b246e53SMichal Swiatkowski 	SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
46845b246e53SMichal Swiatkowski 
46855b246e53SMichal Swiatkowski 	err = ice_register_netdev(vsi);
46865b246e53SMichal Swiatkowski 	if (err)
46875b246e53SMichal Swiatkowski 		goto err_register_netdev;
46885b246e53SMichal Swiatkowski 
46895b246e53SMichal Swiatkowski 	err = ice_tc_indir_block_register(vsi);
46905b246e53SMichal Swiatkowski 	if (err)
46915b246e53SMichal Swiatkowski 		goto err_tc_indir_block_register;
46925b246e53SMichal Swiatkowski 
46935b246e53SMichal Swiatkowski 	ice_napi_add(vsi);
46945b246e53SMichal Swiatkowski 
46955b246e53SMichal Swiatkowski 	return 0;
46965b246e53SMichal Swiatkowski 
46975b246e53SMichal Swiatkowski err_tc_indir_block_register:
46985b246e53SMichal Swiatkowski 	ice_unregister_netdev(vsi);
46995b246e53SMichal Swiatkowski err_register_netdev:
47005b246e53SMichal Swiatkowski 	ice_devlink_destroy_pf_port(pf);
47015b246e53SMichal Swiatkowski err_devlink_create_pf_port:
47025b246e53SMichal Swiatkowski err_init_mac_fltr:
47035b246e53SMichal Swiatkowski 	ice_decfg_netdev(vsi);
47045b246e53SMichal Swiatkowski 	return err;
47055b246e53SMichal Swiatkowski }
47065b246e53SMichal Swiatkowski 
47075b246e53SMichal Swiatkowski static void ice_deinit_eth(struct ice_pf *pf)
47085b246e53SMichal Swiatkowski {
47095b246e53SMichal Swiatkowski 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
47105b246e53SMichal Swiatkowski 
47115b246e53SMichal Swiatkowski 	if (!vsi)
47125b246e53SMichal Swiatkowski 		return;
47135b246e53SMichal Swiatkowski 
47145b246e53SMichal Swiatkowski 	ice_vsi_close(vsi);
47155b246e53SMichal Swiatkowski 	ice_unregister_netdev(vsi);
47165b246e53SMichal Swiatkowski 	ice_devlink_destroy_pf_port(pf);
47175b246e53SMichal Swiatkowski 	ice_tc_indir_block_unregister(vsi);
47185b246e53SMichal Swiatkowski 	ice_decfg_netdev(vsi);
47195b246e53SMichal Swiatkowski }
47205b246e53SMichal Swiatkowski 
47215b246e53SMichal Swiatkowski static int ice_init_dev(struct ice_pf *pf)
47225b246e53SMichal Swiatkowski {
47235b246e53SMichal Swiatkowski 	struct device *dev = ice_pf_to_dev(pf);
47245b246e53SMichal Swiatkowski 	struct ice_hw *hw = &pf->hw;
47255b246e53SMichal Swiatkowski 	int err;
47265b246e53SMichal Swiatkowski 
47275b246e53SMichal Swiatkowski 	err = ice_init_hw(hw);
47285b246e53SMichal Swiatkowski 	if (err) {
47295b246e53SMichal Swiatkowski 		dev_err(dev, "ice_init_hw failed: %d\n", err);
47305b246e53SMichal Swiatkowski 		return err;
47315b246e53SMichal Swiatkowski 	}
47325b246e53SMichal Swiatkowski 
47335b246e53SMichal Swiatkowski 	ice_init_feature_support(pf);
47345b246e53SMichal Swiatkowski 
47355b246e53SMichal Swiatkowski 	ice_request_fw(pf);
47365b246e53SMichal Swiatkowski 
47375b246e53SMichal Swiatkowski 	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
47385b246e53SMichal Swiatkowski 	 * set in pf->state, which will cause ice_is_safe_mode to return
47395b246e53SMichal Swiatkowski 	 * true
47405b246e53SMichal Swiatkowski 	 */
47415b246e53SMichal Swiatkowski 	if (ice_is_safe_mode(pf)) {
47425b246e53SMichal Swiatkowski 		/* we already got function/device capabilities but these don't
47435b246e53SMichal Swiatkowski 		 * reflect what the driver needs to do in safe mode. Instead of
47445b246e53SMichal Swiatkowski 		 * adding conditional logic everywhere to ignore these
47455b246e53SMichal Swiatkowski 		 * device/function capabilities, override them.
47465b246e53SMichal Swiatkowski 		 */
47475b246e53SMichal Swiatkowski 		ice_set_safe_mode_caps(hw);
47485b246e53SMichal Swiatkowski 	}
47495b246e53SMichal Swiatkowski 
47505b246e53SMichal Swiatkowski 	err = ice_init_pf(pf);
47515b246e53SMichal Swiatkowski 	if (err) {
47525b246e53SMichal Swiatkowski 		dev_err(dev, "ice_init_pf failed: %d\n", err);
47535b246e53SMichal Swiatkowski 		goto err_init_pf;
47545b246e53SMichal Swiatkowski 	}
47555b246e53SMichal Swiatkowski 
47565b246e53SMichal Swiatkowski 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
47575b246e53SMichal Swiatkowski 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
47585b246e53SMichal Swiatkowski 	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
47595b246e53SMichal Swiatkowski 	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
47605b246e53SMichal Swiatkowski 	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
47615b246e53SMichal Swiatkowski 		pf->hw.udp_tunnel_nic.tables[0].n_entries =
47625b246e53SMichal Swiatkowski 			pf->hw.tnl.valid_count[TNL_VXLAN];
47635b246e53SMichal Swiatkowski 		pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
47645b246e53SMichal Swiatkowski 			UDP_TUNNEL_TYPE_VXLAN;
47655b246e53SMichal Swiatkowski 	}
47665b246e53SMichal Swiatkowski 	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
47675b246e53SMichal Swiatkowski 		pf->hw.udp_tunnel_nic.tables[1].n_entries =
47685b246e53SMichal Swiatkowski 			pf->hw.tnl.valid_count[TNL_GENEVE];
47695b246e53SMichal Swiatkowski 		pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
47705b246e53SMichal Swiatkowski 			UDP_TUNNEL_TYPE_GENEVE;
47715b246e53SMichal Swiatkowski 	}
47725b246e53SMichal Swiatkowski 
47735b246e53SMichal Swiatkowski 	err = ice_init_interrupt_scheme(pf);
47745b246e53SMichal Swiatkowski 	if (err) {
47755b246e53SMichal Swiatkowski 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
47765b246e53SMichal Swiatkowski 		err = -EIO;
47775b246e53SMichal Swiatkowski 		goto err_init_interrupt_scheme;
47785b246e53SMichal Swiatkowski 	}
47795b246e53SMichal Swiatkowski 
47805b246e53SMichal Swiatkowski 	/* In case of MSIX we are going to setup the misc vector right here
47815b246e53SMichal Swiatkowski 	 * to handle admin queue events etc. In case of legacy and MSI
47825b246e53SMichal Swiatkowski 	 * the misc functionality and queue processing is combined in
47835b246e53SMichal Swiatkowski 	 * the same vector and that gets setup at open.
47845b246e53SMichal Swiatkowski 	 */
47855b246e53SMichal Swiatkowski 	err = ice_req_irq_msix_misc(pf);
47865b246e53SMichal Swiatkowski 	if (err) {
47875b246e53SMichal Swiatkowski 		dev_err(dev, "setup of misc vector failed: %d\n", err);
47885b246e53SMichal Swiatkowski 		goto err_req_irq_msix_misc;
47895b246e53SMichal Swiatkowski 	}
47905b246e53SMichal Swiatkowski 
47915b246e53SMichal Swiatkowski 	return 0;
47925b246e53SMichal Swiatkowski 
47935b246e53SMichal Swiatkowski err_req_irq_msix_misc:
47945b246e53SMichal Swiatkowski 	ice_clear_interrupt_scheme(pf);
47955b246e53SMichal Swiatkowski err_init_interrupt_scheme:
47965b246e53SMichal Swiatkowski 	ice_deinit_pf(pf);
47975b246e53SMichal Swiatkowski err_init_pf:
47985b246e53SMichal Swiatkowski 	ice_deinit_hw(hw);
47995b246e53SMichal Swiatkowski 	return err;
48005b246e53SMichal Swiatkowski }
48015b246e53SMichal Swiatkowski 
48025b246e53SMichal Swiatkowski static void ice_deinit_dev(struct ice_pf *pf)
48035b246e53SMichal Swiatkowski {
48045b246e53SMichal Swiatkowski 	ice_free_irq_msix_misc(pf);
48055b246e53SMichal Swiatkowski 	ice_deinit_pf(pf);
48065b246e53SMichal Swiatkowski 	ice_deinit_hw(&pf->hw);
4807*24b454bcSJakub Buchocki 
4808*24b454bcSJakub Buchocki 	/* Service task is already stopped, so call reset directly. */
4809*24b454bcSJakub Buchocki 	ice_reset(&pf->hw, ICE_RESET_PFR);
4810*24b454bcSJakub Buchocki 	pci_wait_for_pending_transaction(pf->pdev);
4811*24b454bcSJakub Buchocki 	ice_clear_interrupt_scheme(pf);
48125b246e53SMichal Swiatkowski }
48135b246e53SMichal Swiatkowski 
48145b246e53SMichal Swiatkowski static void ice_init_features(struct ice_pf *pf)
48155b246e53SMichal Swiatkowski {
48165b246e53SMichal Swiatkowski 	struct device *dev = ice_pf_to_dev(pf);
48175b246e53SMichal Swiatkowski 
48185b246e53SMichal Swiatkowski 	if (ice_is_safe_mode(pf))
48195b246e53SMichal Swiatkowski 		return;
48205b246e53SMichal Swiatkowski 
48215b246e53SMichal Swiatkowski 	/* initialize DDP driven features */
48225b246e53SMichal Swiatkowski 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
48235b246e53SMichal Swiatkowski 		ice_ptp_init(pf);
48245b246e53SMichal Swiatkowski 
48255b246e53SMichal Swiatkowski 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
48265b246e53SMichal Swiatkowski 		ice_gnss_init(pf);
48275b246e53SMichal Swiatkowski 
48285b246e53SMichal Swiatkowski 	/* Note: Flow director init failure is non-fatal to load */
48295b246e53SMichal Swiatkowski 	if (ice_init_fdir(pf))
48305b246e53SMichal Swiatkowski 		dev_err(dev, "could not initialize flow director\n");
48315b246e53SMichal Swiatkowski 
48325b246e53SMichal Swiatkowski 	/* Note: DCB init failure is non-fatal to load */
48335b246e53SMichal Swiatkowski 	if (ice_init_pf_dcb(pf, false)) {
48345b246e53SMichal Swiatkowski 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
48355b246e53SMichal Swiatkowski 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
48365b246e53SMichal Swiatkowski 	} else {
48375b246e53SMichal Swiatkowski 		ice_cfg_lldp_mib_change(&pf->hw, true);
48385b246e53SMichal Swiatkowski 	}
48395b246e53SMichal Swiatkowski 
48405b246e53SMichal Swiatkowski 	if (ice_init_lag(pf))
48415b246e53SMichal Swiatkowski 		dev_warn(dev, "Failed to init link aggregation support\n");
48425b246e53SMichal Swiatkowski }
48435b246e53SMichal Swiatkowski 
48445b246e53SMichal Swiatkowski static void ice_deinit_features(struct ice_pf *pf)
48455b246e53SMichal Swiatkowski {
48465b246e53SMichal Swiatkowski 	ice_deinit_lag(pf);
48475b246e53SMichal Swiatkowski 	if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
48485b246e53SMichal Swiatkowski 		ice_cfg_lldp_mib_change(&pf->hw, false);
48495b246e53SMichal Swiatkowski 	ice_deinit_fdir(pf);
48505b246e53SMichal Swiatkowski 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
48515b246e53SMichal Swiatkowski 		ice_gnss_exit(pf);
48525b246e53SMichal Swiatkowski 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
48535b246e53SMichal Swiatkowski 		ice_ptp_release(pf);
48545b246e53SMichal Swiatkowski }
48555b246e53SMichal Swiatkowski 
48565b246e53SMichal Swiatkowski static void ice_init_wakeup(struct ice_pf *pf)
48575b246e53SMichal Swiatkowski {
48585b246e53SMichal Swiatkowski 	/* Save wakeup reason register for later use */
48595b246e53SMichal Swiatkowski 	pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
48605b246e53SMichal Swiatkowski 
48615b246e53SMichal Swiatkowski 	/* check for a power management event */
48625b246e53SMichal Swiatkowski 	ice_print_wake_reason(pf);
48635b246e53SMichal Swiatkowski 
48645b246e53SMichal Swiatkowski 	/* clear wake status, all bits */
48655b246e53SMichal Swiatkowski 	wr32(&pf->hw, PFPM_WUS, U32_MAX);
48665b246e53SMichal Swiatkowski 
48675b246e53SMichal Swiatkowski 	/* Disable WoL at init, wait for user to enable */
48685b246e53SMichal Swiatkowski 	device_set_wakeup_enable(ice_pf_to_dev(pf), false);
48695b246e53SMichal Swiatkowski }
48705b246e53SMichal Swiatkowski 
48715b246e53SMichal Swiatkowski static int ice_init_link(struct ice_pf *pf)
48725b246e53SMichal Swiatkowski {
48735b246e53SMichal Swiatkowski 	struct device *dev = ice_pf_to_dev(pf);
48745b246e53SMichal Swiatkowski 	int err;
48755b246e53SMichal Swiatkowski 
48765b246e53SMichal Swiatkowski 	err = ice_init_link_events(pf->hw.port_info);
48775b246e53SMichal Swiatkowski 	if (err) {
48785b246e53SMichal Swiatkowski 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
48795b246e53SMichal Swiatkowski 		return err;
48805b246e53SMichal Swiatkowski 	}
48815b246e53SMichal Swiatkowski 
48825b246e53SMichal Swiatkowski 	/* not a fatal error if this fails */
48835b246e53SMichal Swiatkowski 	err = ice_init_nvm_phy_type(pf->hw.port_info);
48845b246e53SMichal Swiatkowski 	if (err)
48855b246e53SMichal Swiatkowski 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
48865b246e53SMichal Swiatkowski 
48875b246e53SMichal Swiatkowski 	/* not a fatal error if this fails */
48885b246e53SMichal Swiatkowski 	err = ice_update_link_info(pf->hw.port_info);
48895b246e53SMichal Swiatkowski 	if (err)
48905b246e53SMichal Swiatkowski 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
48915b246e53SMichal Swiatkowski 
48925b246e53SMichal Swiatkowski 	ice_init_link_dflt_override(pf->hw.port_info);
48935b246e53SMichal Swiatkowski 
48945b246e53SMichal Swiatkowski 	ice_check_link_cfg_err(pf,
48955b246e53SMichal Swiatkowski 			       pf->hw.port_info->phy.link_info.link_cfg_err);
48965b246e53SMichal Swiatkowski 
48975b246e53SMichal Swiatkowski 	/* if media available, initialize PHY settings */
48985b246e53SMichal Swiatkowski 	if (pf->hw.port_info->phy.link_info.link_info &
48995b246e53SMichal Swiatkowski 	    ICE_AQ_MEDIA_AVAILABLE) {
49005b246e53SMichal Swiatkowski 		/* not a fatal error if this fails */
49015b246e53SMichal Swiatkowski 		err = ice_init_phy_user_cfg(pf->hw.port_info);
49025b246e53SMichal Swiatkowski 		if (err)
49035b246e53SMichal Swiatkowski 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
49045b246e53SMichal Swiatkowski 
49055b246e53SMichal Swiatkowski 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
49065b246e53SMichal Swiatkowski 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
49075b246e53SMichal Swiatkowski 
49085b246e53SMichal Swiatkowski 			if (vsi)
49095b246e53SMichal Swiatkowski 				ice_configure_phy(vsi);
49105b246e53SMichal Swiatkowski 		}
49115b246e53SMichal Swiatkowski 	} else {
49125b246e53SMichal Swiatkowski 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
49135b246e53SMichal Swiatkowski 	}
49145b246e53SMichal Swiatkowski 
49155b246e53SMichal Swiatkowski 	return err;
49165b246e53SMichal Swiatkowski }
49175b246e53SMichal Swiatkowski 
49185b246e53SMichal Swiatkowski static int ice_init_pf_sw(struct ice_pf *pf)
49195b246e53SMichal Swiatkowski {
49205b246e53SMichal Swiatkowski 	bool dvm = ice_is_dvm_ena(&pf->hw);
49215b246e53SMichal Swiatkowski 	struct ice_vsi *vsi;
49225b246e53SMichal Swiatkowski 	int err;
49235b246e53SMichal Swiatkowski 
49245b246e53SMichal Swiatkowski 	/* create switch struct for the switch element created by FW on boot */
49255b246e53SMichal Swiatkowski 	pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
49265b246e53SMichal Swiatkowski 	if (!pf->first_sw)
49275b246e53SMichal Swiatkowski 		return -ENOMEM;
49285b246e53SMichal Swiatkowski 
49295b246e53SMichal Swiatkowski 	if (pf->hw.evb_veb)
49305b246e53SMichal Swiatkowski 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
49315b246e53SMichal Swiatkowski 	else
49325b246e53SMichal Swiatkowski 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
49335b246e53SMichal Swiatkowski 
49345b246e53SMichal Swiatkowski 	pf->first_sw->pf = pf;
49355b246e53SMichal Swiatkowski 
49365b246e53SMichal Swiatkowski 	/* record the sw_id available for later use */
49375b246e53SMichal Swiatkowski 	pf->first_sw->sw_id = pf->hw.port_info->sw_id;
49385b246e53SMichal Swiatkowski 
49395b246e53SMichal Swiatkowski 	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
49405b246e53SMichal Swiatkowski 	if (err)
49415b246e53SMichal Swiatkowski 		goto err_aq_set_port_params;
49425b246e53SMichal Swiatkowski 
49435b246e53SMichal Swiatkowski 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
49445b246e53SMichal Swiatkowski 	if (!vsi) {
49455b246e53SMichal Swiatkowski 		err = -ENOMEM;
49465b246e53SMichal Swiatkowski 		goto err_pf_vsi_setup;
49475b246e53SMichal Swiatkowski 	}
49485b246e53SMichal Swiatkowski 
49495b246e53SMichal Swiatkowski 	return 0;
49505b246e53SMichal Swiatkowski 
49515b246e53SMichal Swiatkowski err_pf_vsi_setup:
49525b246e53SMichal Swiatkowski err_aq_set_port_params:
49535b246e53SMichal Swiatkowski 	kfree(pf->first_sw);
49545b246e53SMichal Swiatkowski 	return err;
49555b246e53SMichal Swiatkowski }
49565b246e53SMichal Swiatkowski 
49575b246e53SMichal Swiatkowski static void ice_deinit_pf_sw(struct ice_pf *pf)
49585b246e53SMichal Swiatkowski {
49595b246e53SMichal Swiatkowski 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
49605b246e53SMichal Swiatkowski 
49615b246e53SMichal Swiatkowski 	if (!vsi)
49625b246e53SMichal Swiatkowski 		return;
49635b246e53SMichal Swiatkowski 
49645b246e53SMichal Swiatkowski 	ice_vsi_release(vsi);
49655b246e53SMichal Swiatkowski 	kfree(pf->first_sw);
49665b246e53SMichal Swiatkowski }
49675b246e53SMichal Swiatkowski 
49685b246e53SMichal Swiatkowski static int ice_alloc_vsis(struct ice_pf *pf)
49695b246e53SMichal Swiatkowski {
49705b246e53SMichal Swiatkowski 	struct device *dev = ice_pf_to_dev(pf);
49715b246e53SMichal Swiatkowski 
49725b246e53SMichal Swiatkowski 	pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
49735b246e53SMichal Swiatkowski 	if (!pf->num_alloc_vsi)
49745b246e53SMichal Swiatkowski 		return -EIO;
49755b246e53SMichal Swiatkowski 
49765b246e53SMichal Swiatkowski 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
49775b246e53SMichal Swiatkowski 		dev_warn(dev,
49785b246e53SMichal Swiatkowski 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
49795b246e53SMichal Swiatkowski 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
49805b246e53SMichal Swiatkowski 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
49815b246e53SMichal Swiatkowski 	}
49825b246e53SMichal Swiatkowski 
49835b246e53SMichal Swiatkowski 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
49845b246e53SMichal Swiatkowski 			       GFP_KERNEL);
49855b246e53SMichal Swiatkowski 	if (!pf->vsi)
49865b246e53SMichal Swiatkowski 		return -ENOMEM;
49875b246e53SMichal Swiatkowski 
49885b246e53SMichal Swiatkowski 	pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
49895b246e53SMichal Swiatkowski 				     sizeof(*pf->vsi_stats), GFP_KERNEL);
49905b246e53SMichal Swiatkowski 	if (!pf->vsi_stats) {
49915b246e53SMichal Swiatkowski 		devm_kfree(dev, pf->vsi);
49925b246e53SMichal Swiatkowski 		return -ENOMEM;
49935b246e53SMichal Swiatkowski 	}
49945b246e53SMichal Swiatkowski 
49955b246e53SMichal Swiatkowski 	return 0;
49965b246e53SMichal Swiatkowski }
49975b246e53SMichal Swiatkowski 
49985b246e53SMichal Swiatkowski static void ice_dealloc_vsis(struct ice_pf *pf)
49995b246e53SMichal Swiatkowski {
50005b246e53SMichal Swiatkowski 	devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
50015b246e53SMichal Swiatkowski 	pf->vsi_stats = NULL;
50025b246e53SMichal Swiatkowski 
50035b246e53SMichal Swiatkowski 	pf->num_alloc_vsi = 0;
50045b246e53SMichal Swiatkowski 	devm_kfree(ice_pf_to_dev(pf), pf->vsi);
50055b246e53SMichal Swiatkowski 	pf->vsi = NULL;
50065b246e53SMichal Swiatkowski }
50075b246e53SMichal Swiatkowski 
50085b246e53SMichal Swiatkowski static int ice_init_devlink(struct ice_pf *pf)
50095b246e53SMichal Swiatkowski {
50105b246e53SMichal Swiatkowski 	int err;
50115b246e53SMichal Swiatkowski 
50125b246e53SMichal Swiatkowski 	err = ice_devlink_register_params(pf);
50135b246e53SMichal Swiatkowski 	if (err)
50145b246e53SMichal Swiatkowski 		return err;
50155b246e53SMichal Swiatkowski 
50165b246e53SMichal Swiatkowski 	ice_devlink_init_regions(pf);
50175b246e53SMichal Swiatkowski 	ice_devlink_register(pf);
50185b246e53SMichal Swiatkowski 
50195b246e53SMichal Swiatkowski 	return 0;
50205b246e53SMichal Swiatkowski }
50215b246e53SMichal Swiatkowski 
50225b246e53SMichal Swiatkowski static void ice_deinit_devlink(struct ice_pf *pf)
50235b246e53SMichal Swiatkowski {
50245b246e53SMichal Swiatkowski 	ice_devlink_unregister(pf);
50255b246e53SMichal Swiatkowski 	ice_devlink_destroy_regions(pf);
50265b246e53SMichal Swiatkowski 	ice_devlink_unregister_params(pf);
50275b246e53SMichal Swiatkowski }
50285b246e53SMichal Swiatkowski 
50295b246e53SMichal Swiatkowski static int ice_init(struct ice_pf *pf)
50305b246e53SMichal Swiatkowski {
50315b246e53SMichal Swiatkowski 	int err;
50325b246e53SMichal Swiatkowski 
50335b246e53SMichal Swiatkowski 	err = ice_init_dev(pf);
50345b246e53SMichal Swiatkowski 	if (err)
50355b246e53SMichal Swiatkowski 		return err;
50365b246e53SMichal Swiatkowski 
50375b246e53SMichal Swiatkowski 	err = ice_alloc_vsis(pf);
50385b246e53SMichal Swiatkowski 	if (err)
50395b246e53SMichal Swiatkowski 		goto err_alloc_vsis;
50405b246e53SMichal Swiatkowski 
50415b246e53SMichal Swiatkowski 	err = ice_init_pf_sw(pf);
50425b246e53SMichal Swiatkowski 	if (err)
50435b246e53SMichal Swiatkowski 		goto err_init_pf_sw;
50445b246e53SMichal Swiatkowski 
50455b246e53SMichal Swiatkowski 	ice_init_wakeup(pf);
50465b246e53SMichal Swiatkowski 
50475b246e53SMichal Swiatkowski 	err = ice_init_link(pf);
50485b246e53SMichal Swiatkowski 	if (err)
50495b246e53SMichal Swiatkowski 		goto err_init_link;
50505b246e53SMichal Swiatkowski 
50515b246e53SMichal Swiatkowski 	err = ice_send_version(pf);
50525b246e53SMichal Swiatkowski 	if (err)
50535b246e53SMichal Swiatkowski 		goto err_init_link;
50545b246e53SMichal Swiatkowski 
50555b246e53SMichal Swiatkowski 	ice_verify_cacheline_size(pf);
50565b246e53SMichal Swiatkowski 
50575b246e53SMichal Swiatkowski 	if (ice_is_safe_mode(pf))
50585b246e53SMichal Swiatkowski 		ice_set_safe_mode_vlan_cfg(pf);
50595b246e53SMichal Swiatkowski 	else
50605b246e53SMichal Swiatkowski 		/* print PCI link speed and width */
50615b246e53SMichal Swiatkowski 		pcie_print_link_status(pf->pdev);
50625b246e53SMichal Swiatkowski 
50635b246e53SMichal Swiatkowski 	/* ready to go, so clear down state bit */
50645b246e53SMichal Swiatkowski 	clear_bit(ICE_DOWN, pf->state);
50655b246e53SMichal Swiatkowski 	clear_bit(ICE_SERVICE_DIS, pf->state);
50665b246e53SMichal Swiatkowski 
50675b246e53SMichal Swiatkowski 	/* since everything is good, start the service timer */
50685b246e53SMichal Swiatkowski 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
50695b246e53SMichal Swiatkowski 
50705b246e53SMichal Swiatkowski 	return 0;
50715b246e53SMichal Swiatkowski 
50725b246e53SMichal Swiatkowski err_init_link:
50735b246e53SMichal Swiatkowski 	ice_deinit_pf_sw(pf);
50745b246e53SMichal Swiatkowski err_init_pf_sw:
50755b246e53SMichal Swiatkowski 	ice_dealloc_vsis(pf);
50765b246e53SMichal Swiatkowski err_alloc_vsis:
50775b246e53SMichal Swiatkowski 	ice_deinit_dev(pf);
50785b246e53SMichal Swiatkowski 	return err;
50795b246e53SMichal Swiatkowski }
50805b246e53SMichal Swiatkowski 
50815b246e53SMichal Swiatkowski static void ice_deinit(struct ice_pf *pf)
50825b246e53SMichal Swiatkowski {
50835b246e53SMichal Swiatkowski 	set_bit(ICE_SERVICE_DIS, pf->state);
50845b246e53SMichal Swiatkowski 	set_bit(ICE_DOWN, pf->state);
50855b246e53SMichal Swiatkowski 
50865b246e53SMichal Swiatkowski 	ice_deinit_pf_sw(pf);
50875b246e53SMichal Swiatkowski 	ice_dealloc_vsis(pf);
50885b246e53SMichal Swiatkowski 	ice_deinit_dev(pf);
50895b246e53SMichal Swiatkowski }
50905b246e53SMichal Swiatkowski 
50915b246e53SMichal Swiatkowski /**
50925b246e53SMichal Swiatkowski  * ice_load - load pf by init hw and starting VSI
50935b246e53SMichal Swiatkowski  * @pf: pointer to the pf instance
50945b246e53SMichal Swiatkowski  */
50955b246e53SMichal Swiatkowski int ice_load(struct ice_pf *pf)
50965b246e53SMichal Swiatkowski {
50975e509ab2SJacob Keller 	struct ice_vsi_cfg_params params = {};
50985b246e53SMichal Swiatkowski 	struct ice_vsi *vsi;
50995b246e53SMichal Swiatkowski 	int err;
51005b246e53SMichal Swiatkowski 
51015b246e53SMichal Swiatkowski 	err = ice_init_dev(pf);
51025b246e53SMichal Swiatkowski 	if (err)
51035b246e53SMichal Swiatkowski 		return err;
51045b246e53SMichal Swiatkowski 
51055b246e53SMichal Swiatkowski 	vsi = ice_get_main_vsi(pf);
51065e509ab2SJacob Keller 
51075e509ab2SJacob Keller 	params = ice_vsi_to_params(vsi);
51085e509ab2SJacob Keller 	params.flags = ICE_VSI_FLAG_INIT;
51095e509ab2SJacob Keller 
51105e509ab2SJacob Keller 	err = ice_vsi_cfg(vsi, &params);
51115b246e53SMichal Swiatkowski 	if (err)
51125b246e53SMichal Swiatkowski 		goto err_vsi_cfg;
51135b246e53SMichal Swiatkowski 
51145b246e53SMichal Swiatkowski 	err = ice_start_eth(ice_get_main_vsi(pf));
51155b246e53SMichal Swiatkowski 	if (err)
51165b246e53SMichal Swiatkowski 		goto err_start_eth;
51175b246e53SMichal Swiatkowski 
51185b246e53SMichal Swiatkowski 	err = ice_init_rdma(pf);
51195b246e53SMichal Swiatkowski 	if (err)
51205b246e53SMichal Swiatkowski 		goto err_init_rdma;
51215b246e53SMichal Swiatkowski 
51225b246e53SMichal Swiatkowski 	ice_init_features(pf);
51235b246e53SMichal Swiatkowski 	ice_service_task_restart(pf);
51245b246e53SMichal Swiatkowski 
51255b246e53SMichal Swiatkowski 	clear_bit(ICE_DOWN, pf->state);
51265b246e53SMichal Swiatkowski 
51275b246e53SMichal Swiatkowski 	return 0;
51285b246e53SMichal Swiatkowski 
51295b246e53SMichal Swiatkowski err_init_rdma:
51305b246e53SMichal Swiatkowski 	ice_vsi_close(ice_get_main_vsi(pf));
51315b246e53SMichal Swiatkowski err_start_eth:
51325b246e53SMichal Swiatkowski 	ice_vsi_decfg(ice_get_main_vsi(pf));
51335b246e53SMichal Swiatkowski err_vsi_cfg:
51345b246e53SMichal Swiatkowski 	ice_deinit_dev(pf);
51355b246e53SMichal Swiatkowski 	return err;
51365b246e53SMichal Swiatkowski }
51375b246e53SMichal Swiatkowski 
51385b246e53SMichal Swiatkowski /**
51395b246e53SMichal Swiatkowski  * ice_unload - unload pf by stopping VSI and deinit hw
51405b246e53SMichal Swiatkowski  * @pf: pointer to the pf instance
51415b246e53SMichal Swiatkowski  */
51425b246e53SMichal Swiatkowski void ice_unload(struct ice_pf *pf)
51435b246e53SMichal Swiatkowski {
51445b246e53SMichal Swiatkowski 	ice_deinit_features(pf);
51455b246e53SMichal Swiatkowski 	ice_deinit_rdma(pf);
51467d46c0e6SMichal Swiatkowski 	ice_stop_eth(ice_get_main_vsi(pf));
51475b246e53SMichal Swiatkowski 	ice_vsi_decfg(ice_get_main_vsi(pf));
51485b246e53SMichal Swiatkowski 	ice_deinit_dev(pf);
51491e23f076SAnirudh Venkataramanan }
51501e23f076SAnirudh Venkataramanan 
51511e23f076SAnirudh Venkataramanan /**
5152837f08fdSAnirudh Venkataramanan  * ice_probe - Device initialization routine
5153837f08fdSAnirudh Venkataramanan  * @pdev: PCI device information struct
5154837f08fdSAnirudh Venkataramanan  * @ent: entry in ice_pci_tbl
5155837f08fdSAnirudh Venkataramanan  *
5156837f08fdSAnirudh Venkataramanan  * Returns 0 on success, negative on failure
5157837f08fdSAnirudh Venkataramanan  */
5158c8b7abddSBruce Allan static int
5159c8b7abddSBruce Allan ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
5160837f08fdSAnirudh Venkataramanan {
516177ed84f4SBruce Allan 	struct device *dev = &pdev->dev;
5162837f08fdSAnirudh Venkataramanan 	struct ice_pf *pf;
5163837f08fdSAnirudh Venkataramanan 	struct ice_hw *hw;
51645b246e53SMichal Swiatkowski 	int err;
5165837f08fdSAnirudh Venkataramanan 
516650ac7479SAnirudh Venkataramanan 	if (pdev->is_virtfn) {
516750ac7479SAnirudh Venkataramanan 		dev_err(dev, "can't probe a virtual function\n");
516850ac7479SAnirudh Venkataramanan 		return -EINVAL;
516950ac7479SAnirudh Venkataramanan 	}
517050ac7479SAnirudh Venkataramanan 
51714ee656bbSTony Nguyen 	/* this driver uses devres, see
51724ee656bbSTony Nguyen 	 * Documentation/driver-api/driver-model/devres.rst
51734ee656bbSTony Nguyen 	 */
5174837f08fdSAnirudh Venkataramanan 	err = pcim_enable_device(pdev);
5175837f08fdSAnirudh Venkataramanan 	if (err)
5176837f08fdSAnirudh Venkataramanan 		return err;
5177837f08fdSAnirudh Venkataramanan 
517880ad6ddeSJesse Brandeburg 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
5179837f08fdSAnirudh Venkataramanan 	if (err) {
518077ed84f4SBruce Allan 		dev_err(dev, "BAR0 I/O map error %d\n", err);
5181837f08fdSAnirudh Venkataramanan 		return err;
5182837f08fdSAnirudh Venkataramanan 	}
5183837f08fdSAnirudh Venkataramanan 
51841adf7eadSJacob Keller 	pf = ice_allocate_pf(dev);
5185837f08fdSAnirudh Venkataramanan 	if (!pf)
5186837f08fdSAnirudh Venkataramanan 		return -ENOMEM;
5187837f08fdSAnirudh Venkataramanan 
518873e30a62SDave Ertman 	/* initialize Auxiliary index to invalid value */
518973e30a62SDave Ertman 	pf->aux_idx = -1;
519073e30a62SDave Ertman 
51912f2da36eSAnirudh Venkataramanan 	/* set up for high or low DMA */
519277ed84f4SBruce Allan 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5193837f08fdSAnirudh Venkataramanan 	if (err) {
519477ed84f4SBruce Allan 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
5195837f08fdSAnirudh Venkataramanan 		return err;
5196837f08fdSAnirudh Venkataramanan 	}
5197837f08fdSAnirudh Venkataramanan 
5198837f08fdSAnirudh Venkataramanan 	pci_set_master(pdev);
5199837f08fdSAnirudh Venkataramanan 
5200837f08fdSAnirudh Venkataramanan 	pf->pdev = pdev;
5201837f08fdSAnirudh Venkataramanan 	pci_set_drvdata(pdev, pf);
52027e408e07SAnirudh Venkataramanan 	set_bit(ICE_DOWN, pf->state);
52038d81fa55SAkeem G Abodunrin 	/* Disable service task until DOWN bit is cleared */
52047e408e07SAnirudh Venkataramanan 	set_bit(ICE_SERVICE_DIS, pf->state);
5205837f08fdSAnirudh Venkataramanan 
5206837f08fdSAnirudh Venkataramanan 	hw = &pf->hw;
5207837f08fdSAnirudh Venkataramanan 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
52084e56802eSMichal Swiatkowski 	pci_save_state(pdev);
52094e56802eSMichal Swiatkowski 
5210837f08fdSAnirudh Venkataramanan 	hw->back = pf;
52115b246e53SMichal Swiatkowski 	hw->port_info = NULL;
5212837f08fdSAnirudh Venkataramanan 	hw->vendor_id = pdev->vendor;
5213837f08fdSAnirudh Venkataramanan 	hw->device_id = pdev->device;
5214837f08fdSAnirudh Venkataramanan 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5215837f08fdSAnirudh Venkataramanan 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
5216837f08fdSAnirudh Venkataramanan 	hw->subsystem_device_id = pdev->subsystem_device;
5217837f08fdSAnirudh Venkataramanan 	hw->bus.device = PCI_SLOT(pdev->devfn);
5218837f08fdSAnirudh Venkataramanan 	hw->bus.func = PCI_FUNC(pdev->devfn);
5219f31e4b6fSAnirudh Venkataramanan 	ice_set_ctrlq_len(hw);
5220f31e4b6fSAnirudh Venkataramanan 
5221837f08fdSAnirudh Venkataramanan 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5222837f08fdSAnirudh Venkataramanan 
52237ec59eeaSAnirudh Venkataramanan #ifndef CONFIG_DYNAMIC_DEBUG
52247ec59eeaSAnirudh Venkataramanan 	if (debug < -1)
52257ec59eeaSAnirudh Venkataramanan 		hw->debug_mask = debug;
52267ec59eeaSAnirudh Venkataramanan #endif
52277ec59eeaSAnirudh Venkataramanan 
52285b246e53SMichal Swiatkowski 	err = ice_init(pf);
522908771bceSAnirudh Venkataramanan 	if (err)
52305b246e53SMichal Swiatkowski 		goto err_init;
52311a3571b5SPaul Greenwalt 
52325b246e53SMichal Swiatkowski 	err = ice_init_eth(pf);
523308771bceSAnirudh Venkataramanan 	if (err)
52345b246e53SMichal Swiatkowski 		goto err_init_eth;
52351a3571b5SPaul Greenwalt 
5236d25a0fc4SDave Ertman 	err = ice_init_rdma(pf);
52375b246e53SMichal Swiatkowski 	if (err)
52385b246e53SMichal Swiatkowski 		goto err_init_rdma;
5239d25a0fc4SDave Ertman 
52405b246e53SMichal Swiatkowski 	err = ice_init_devlink(pf);
52415b246e53SMichal Swiatkowski 	if (err)
52425b246e53SMichal Swiatkowski 		goto err_init_devlink;
52435b246e53SMichal Swiatkowski 
52445b246e53SMichal Swiatkowski 	ice_init_features(pf);
52455b246e53SMichal Swiatkowski 
5246837f08fdSAnirudh Venkataramanan 	return 0;
5247f31e4b6fSAnirudh Venkataramanan 
52485b246e53SMichal Swiatkowski err_init_devlink:
52495b246e53SMichal Swiatkowski 	ice_deinit_rdma(pf);
52505b246e53SMichal Swiatkowski err_init_rdma:
52515b246e53SMichal Swiatkowski 	ice_deinit_eth(pf);
52525b246e53SMichal Swiatkowski err_init_eth:
52535b246e53SMichal Swiatkowski 	ice_deinit(pf);
52545b246e53SMichal Swiatkowski err_init:
5255769c500dSAkeem G Abodunrin 	pci_disable_device(pdev);
5256f31e4b6fSAnirudh Venkataramanan 	return err;
5257837f08fdSAnirudh Venkataramanan }
5258837f08fdSAnirudh Venkataramanan 
5259837f08fdSAnirudh Venkataramanan /**
5260769c500dSAkeem G Abodunrin  * ice_set_wake - enable or disable Wake on LAN
5261769c500dSAkeem G Abodunrin  * @pf: pointer to the PF struct
5262769c500dSAkeem G Abodunrin  *
5263769c500dSAkeem G Abodunrin  * Simple helper for WoL control
5264769c500dSAkeem G Abodunrin  */
5265769c500dSAkeem G Abodunrin static void ice_set_wake(struct ice_pf *pf)
5266769c500dSAkeem G Abodunrin {
5267769c500dSAkeem G Abodunrin 	struct ice_hw *hw = &pf->hw;
5268769c500dSAkeem G Abodunrin 	bool wol = pf->wol_ena;
5269769c500dSAkeem G Abodunrin 
5270769c500dSAkeem G Abodunrin 	/* clear wake state, otherwise new wake events won't fire */
5271769c500dSAkeem G Abodunrin 	wr32(hw, PFPM_WUS, U32_MAX);
5272769c500dSAkeem G Abodunrin 
5273769c500dSAkeem G Abodunrin 	/* enable / disable APM wake up, no RMW needed */
5274769c500dSAkeem G Abodunrin 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5275769c500dSAkeem G Abodunrin 
5276769c500dSAkeem G Abodunrin 	/* set magic packet filter enabled */
5277769c500dSAkeem G Abodunrin 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5278769c500dSAkeem G Abodunrin }
5279769c500dSAkeem G Abodunrin 
5280769c500dSAkeem G Abodunrin /**
5281ef860480STony Nguyen  * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5282769c500dSAkeem G Abodunrin  * @pf: pointer to the PF struct
5283769c500dSAkeem G Abodunrin  *
5284769c500dSAkeem G Abodunrin  * Issue firmware command to enable multicast magic wake, making
5285769c500dSAkeem G Abodunrin  * sure that any locally administered address (LAA) is used for
5286769c500dSAkeem G Abodunrin  * wake, and that PF reset doesn't undo the LAA.
5287769c500dSAkeem G Abodunrin  */
5288769c500dSAkeem G Abodunrin static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5289769c500dSAkeem G Abodunrin {
5290769c500dSAkeem G Abodunrin 	struct device *dev = ice_pf_to_dev(pf);
5291769c500dSAkeem G Abodunrin 	struct ice_hw *hw = &pf->hw;
5292769c500dSAkeem G Abodunrin 	u8 mac_addr[ETH_ALEN];
5293769c500dSAkeem G Abodunrin 	struct ice_vsi *vsi;
52945518ac2aSTony Nguyen 	int status;
5295769c500dSAkeem G Abodunrin 	u8 flags;
5296769c500dSAkeem G Abodunrin 
5297769c500dSAkeem G Abodunrin 	if (!pf->wol_ena)
5298769c500dSAkeem G Abodunrin 		return;
5299769c500dSAkeem G Abodunrin 
5300769c500dSAkeem G Abodunrin 	vsi = ice_get_main_vsi(pf);
5301769c500dSAkeem G Abodunrin 	if (!vsi)
5302769c500dSAkeem G Abodunrin 		return;
5303769c500dSAkeem G Abodunrin 
5304769c500dSAkeem G Abodunrin 	/* Get current MAC address in case it's an LAA */
5305769c500dSAkeem G Abodunrin 	if (vsi->netdev)
5306769c500dSAkeem G Abodunrin 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5307769c500dSAkeem G Abodunrin 	else
5308769c500dSAkeem G Abodunrin 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5309769c500dSAkeem G Abodunrin 
5310769c500dSAkeem G Abodunrin 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5311769c500dSAkeem G Abodunrin 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5312769c500dSAkeem G Abodunrin 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5313769c500dSAkeem G Abodunrin 
5314769c500dSAkeem G Abodunrin 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5315769c500dSAkeem G Abodunrin 	if (status)
53165f87ec48STony Nguyen 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
53175518ac2aSTony Nguyen 			status, ice_aq_str(hw->adminq.sq_last_status));
5318769c500dSAkeem G Abodunrin }
5319769c500dSAkeem G Abodunrin 
5320769c500dSAkeem G Abodunrin /**
5321837f08fdSAnirudh Venkataramanan  * ice_remove - Device removal routine
5322837f08fdSAnirudh Venkataramanan  * @pdev: PCI device information struct
5323837f08fdSAnirudh Venkataramanan  */
5324837f08fdSAnirudh Venkataramanan static void ice_remove(struct pci_dev *pdev)
5325837f08fdSAnirudh Venkataramanan {
5326837f08fdSAnirudh Venkataramanan 	struct ice_pf *pf = pci_get_drvdata(pdev);
532781b23589SDave Ertman 	int i;
5328837f08fdSAnirudh Venkataramanan 
5329afd9d4abSAnirudh Venkataramanan 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5330afd9d4abSAnirudh Venkataramanan 		if (!ice_is_reset_in_progress(pf->state))
5331afd9d4abSAnirudh Venkataramanan 			break;
5332afd9d4abSAnirudh Venkataramanan 		msleep(100);
5333afd9d4abSAnirudh Venkataramanan 	}
5334afd9d4abSAnirudh Venkataramanan 
5335f844d521SBrett Creeley 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
53367e408e07SAnirudh Venkataramanan 		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5337f844d521SBrett Creeley 		ice_free_vfs(pf);
5338f844d521SBrett Creeley 	}
5339f844d521SBrett Creeley 
53408d81fa55SAkeem G Abodunrin 	ice_service_task_stop(pf);
5341d69ea414SJacob Keller 	ice_aq_cancel_waiting_tasks(pf);
5342f9f5301eSDave Ertman 	set_bit(ICE_DOWN, pf->state);
5343d69ea414SJacob Keller 
534428bf2672SBrett Creeley 	if (!ice_is_safe_mode(pf))
534528bf2672SBrett Creeley 		ice_remove_arfs(pf);
53465b246e53SMichal Swiatkowski 	ice_deinit_features(pf);
53475b246e53SMichal Swiatkowski 	ice_deinit_devlink(pf);
53485b246e53SMichal Swiatkowski 	ice_deinit_rdma(pf);
53495b246e53SMichal Swiatkowski 	ice_deinit_eth(pf);
53505b246e53SMichal Swiatkowski 	ice_deinit(pf);
53515b246e53SMichal Swiatkowski 
53520f9d5027SAnirudh Venkataramanan 	ice_vsi_release_all(pf);
53535b246e53SMichal Swiatkowski 
53545b246e53SMichal Swiatkowski 	ice_setup_mc_magic_wake(pf);
5355769c500dSAkeem G Abodunrin 	ice_set_wake(pf);
53561adf7eadSJacob Keller 
5357769c500dSAkeem G Abodunrin 	pci_disable_device(pdev);
5358837f08fdSAnirudh Venkataramanan }
5359837f08fdSAnirudh Venkataramanan 
53605995b6d0SBrett Creeley /**
5361769c500dSAkeem G Abodunrin  * ice_shutdown - PCI callback for shutting down device
5362769c500dSAkeem G Abodunrin  * @pdev: PCI device information struct
5363769c500dSAkeem G Abodunrin  */
5364769c500dSAkeem G Abodunrin static void ice_shutdown(struct pci_dev *pdev)
5365769c500dSAkeem G Abodunrin {
5366769c500dSAkeem G Abodunrin 	struct ice_pf *pf = pci_get_drvdata(pdev);
5367769c500dSAkeem G Abodunrin 
5368769c500dSAkeem G Abodunrin 	ice_remove(pdev);
5369769c500dSAkeem G Abodunrin 
5370769c500dSAkeem G Abodunrin 	if (system_state == SYSTEM_POWER_OFF) {
5371769c500dSAkeem G Abodunrin 		pci_wake_from_d3(pdev, pf->wol_ena);
5372769c500dSAkeem G Abodunrin 		pci_set_power_state(pdev, PCI_D3hot);
5373769c500dSAkeem G Abodunrin 	}
5374769c500dSAkeem G Abodunrin }
5375769c500dSAkeem G Abodunrin 
5376769c500dSAkeem G Abodunrin #ifdef CONFIG_PM
5377769c500dSAkeem G Abodunrin /**
5378769c500dSAkeem G Abodunrin  * ice_prepare_for_shutdown - prep for PCI shutdown
5379769c500dSAkeem G Abodunrin  * @pf: board private structure
5380769c500dSAkeem G Abodunrin  *
5381769c500dSAkeem G Abodunrin  * Inform or close all dependent features in prep for PCI device shutdown
5382769c500dSAkeem G Abodunrin  */
5383769c500dSAkeem G Abodunrin static void ice_prepare_for_shutdown(struct ice_pf *pf)
5384769c500dSAkeem G Abodunrin {
5385769c500dSAkeem G Abodunrin 	struct ice_hw *hw = &pf->hw;
5386769c500dSAkeem G Abodunrin 	u32 v;
5387769c500dSAkeem G Abodunrin 
5388769c500dSAkeem G Abodunrin 	/* Notify VFs of impending reset */
5389769c500dSAkeem G Abodunrin 	if (ice_check_sq_alive(hw, &hw->mailboxq))
5390769c500dSAkeem G Abodunrin 		ice_vc_notify_reset(pf);
5391769c500dSAkeem G Abodunrin 
5392769c500dSAkeem G Abodunrin 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5393769c500dSAkeem G Abodunrin 
5394769c500dSAkeem G Abodunrin 	/* disable the VSIs and their queues that are not already DOWN */
5395769c500dSAkeem G Abodunrin 	ice_pf_dis_all_vsi(pf, false);
5396769c500dSAkeem G Abodunrin 
5397769c500dSAkeem G Abodunrin 	ice_for_each_vsi(pf, v)
5398769c500dSAkeem G Abodunrin 		if (pf->vsi[v])
5399769c500dSAkeem G Abodunrin 			pf->vsi[v]->vsi_num = 0;
5400769c500dSAkeem G Abodunrin 
5401769c500dSAkeem G Abodunrin 	ice_shutdown_all_ctrlq(hw);
5402769c500dSAkeem G Abodunrin }
5403769c500dSAkeem G Abodunrin 
5404769c500dSAkeem G Abodunrin /**
5405769c500dSAkeem G Abodunrin  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5406769c500dSAkeem G Abodunrin  * @pf: board private structure to reinitialize
5407769c500dSAkeem G Abodunrin  *
5408769c500dSAkeem G Abodunrin  * This routine reinitialize interrupt scheme that was cleared during
5409769c500dSAkeem G Abodunrin  * power management suspend callback.
5410769c500dSAkeem G Abodunrin  *
5411769c500dSAkeem G Abodunrin  * This should be called during resume routine to re-allocate the q_vectors
5412769c500dSAkeem G Abodunrin  * and reacquire interrupts.
5413769c500dSAkeem G Abodunrin  */
5414769c500dSAkeem G Abodunrin static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5415769c500dSAkeem G Abodunrin {
5416769c500dSAkeem G Abodunrin 	struct device *dev = ice_pf_to_dev(pf);
5417769c500dSAkeem G Abodunrin 	int ret, v;
5418769c500dSAkeem G Abodunrin 
5419769c500dSAkeem G Abodunrin 	/* Since we clear MSIX flag during suspend, we need to
5420769c500dSAkeem G Abodunrin 	 * set it back during resume...
5421769c500dSAkeem G Abodunrin 	 */
5422769c500dSAkeem G Abodunrin 
5423769c500dSAkeem G Abodunrin 	ret = ice_init_interrupt_scheme(pf);
5424769c500dSAkeem G Abodunrin 	if (ret) {
5425769c500dSAkeem G Abodunrin 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5426769c500dSAkeem G Abodunrin 		return ret;
5427769c500dSAkeem G Abodunrin 	}
5428769c500dSAkeem G Abodunrin 
5429769c500dSAkeem G Abodunrin 	/* Remap vectors and rings, after successful re-init interrupts */
5430769c500dSAkeem G Abodunrin 	ice_for_each_vsi(pf, v) {
5431769c500dSAkeem G Abodunrin 		if (!pf->vsi[v])
5432769c500dSAkeem G Abodunrin 			continue;
5433769c500dSAkeem G Abodunrin 
5434769c500dSAkeem G Abodunrin 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5435769c500dSAkeem G Abodunrin 		if (ret)
5436769c500dSAkeem G Abodunrin 			goto err_reinit;
5437769c500dSAkeem G Abodunrin 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5438769c500dSAkeem G Abodunrin 	}
5439769c500dSAkeem G Abodunrin 
5440769c500dSAkeem G Abodunrin 	ret = ice_req_irq_msix_misc(pf);
5441769c500dSAkeem G Abodunrin 	if (ret) {
5442769c500dSAkeem G Abodunrin 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5443769c500dSAkeem G Abodunrin 			ret);
5444769c500dSAkeem G Abodunrin 		goto err_reinit;
5445769c500dSAkeem G Abodunrin 	}
5446769c500dSAkeem G Abodunrin 
5447769c500dSAkeem G Abodunrin 	return 0;
5448769c500dSAkeem G Abodunrin 
5449769c500dSAkeem G Abodunrin err_reinit:
5450769c500dSAkeem G Abodunrin 	while (v--)
5451769c500dSAkeem G Abodunrin 		if (pf->vsi[v])
5452769c500dSAkeem G Abodunrin 			ice_vsi_free_q_vectors(pf->vsi[v]);
5453769c500dSAkeem G Abodunrin 
5454769c500dSAkeem G Abodunrin 	return ret;
5455769c500dSAkeem G Abodunrin }
5456769c500dSAkeem G Abodunrin 
5457769c500dSAkeem G Abodunrin /**
5458769c500dSAkeem G Abodunrin  * ice_suspend
5459769c500dSAkeem G Abodunrin  * @dev: generic device information structure
5460769c500dSAkeem G Abodunrin  *
5461769c500dSAkeem G Abodunrin  * Power Management callback to quiesce the device and prepare
5462769c500dSAkeem G Abodunrin  * for D3 transition.
5463769c500dSAkeem G Abodunrin  */
546465c72291SWei Yongjun static int __maybe_unused ice_suspend(struct device *dev)
5465769c500dSAkeem G Abodunrin {
5466769c500dSAkeem G Abodunrin 	struct pci_dev *pdev = to_pci_dev(dev);
5467769c500dSAkeem G Abodunrin 	struct ice_pf *pf;
5468769c500dSAkeem G Abodunrin 	int disabled, v;
5469769c500dSAkeem G Abodunrin 
5470769c500dSAkeem G Abodunrin 	pf = pci_get_drvdata(pdev);
5471769c500dSAkeem G Abodunrin 
5472769c500dSAkeem G Abodunrin 	if (!ice_pf_state_is_nominal(pf)) {
5473769c500dSAkeem G Abodunrin 		dev_err(dev, "Device is not ready, no need to suspend it\n");
5474769c500dSAkeem G Abodunrin 		return -EBUSY;
5475769c500dSAkeem G Abodunrin 	}
5476769c500dSAkeem G Abodunrin 
5477769c500dSAkeem G Abodunrin 	/* Stop watchdog tasks until resume completion.
5478769c500dSAkeem G Abodunrin 	 * Even though it is most likely that the service task is
5479769c500dSAkeem G Abodunrin 	 * disabled if the device is suspended or down, the service task's
5480769c500dSAkeem G Abodunrin 	 * state is controlled by a different state bit, and we should
5481769c500dSAkeem G Abodunrin 	 * store and honor whatever state that bit is in at this point.
5482769c500dSAkeem G Abodunrin 	 */
5483769c500dSAkeem G Abodunrin 	disabled = ice_service_task_stop(pf);
5484769c500dSAkeem G Abodunrin 
5485f9f5301eSDave Ertman 	ice_unplug_aux_dev(pf);
5486f9f5301eSDave Ertman 
5487769c500dSAkeem G Abodunrin 	/* Already suspended?, then there is nothing to do */
54887e408e07SAnirudh Venkataramanan 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5489769c500dSAkeem G Abodunrin 		if (!disabled)
5490769c500dSAkeem G Abodunrin 			ice_service_task_restart(pf);
5491769c500dSAkeem G Abodunrin 		return 0;
5492769c500dSAkeem G Abodunrin 	}
5493769c500dSAkeem G Abodunrin 
54947e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_DOWN, pf->state) ||
5495769c500dSAkeem G Abodunrin 	    ice_is_reset_in_progress(pf->state)) {
5496769c500dSAkeem G Abodunrin 		dev_err(dev, "can't suspend device in reset or already down\n");
5497769c500dSAkeem G Abodunrin 		if (!disabled)
5498769c500dSAkeem G Abodunrin 			ice_service_task_restart(pf);
5499769c500dSAkeem G Abodunrin 		return 0;
5500769c500dSAkeem G Abodunrin 	}
5501769c500dSAkeem G Abodunrin 
5502769c500dSAkeem G Abodunrin 	ice_setup_mc_magic_wake(pf);
5503769c500dSAkeem G Abodunrin 
5504769c500dSAkeem G Abodunrin 	ice_prepare_for_shutdown(pf);
5505769c500dSAkeem G Abodunrin 
5506769c500dSAkeem G Abodunrin 	ice_set_wake(pf);
5507769c500dSAkeem G Abodunrin 
5508769c500dSAkeem G Abodunrin 	/* Free vectors, clear the interrupt scheme and release IRQs
5509769c500dSAkeem G Abodunrin 	 * for proper hibernation, especially with large number of CPUs.
5510769c500dSAkeem G Abodunrin 	 * Otherwise hibernation might fail when mapping all the vectors back
5511769c500dSAkeem G Abodunrin 	 * to CPU0.
5512769c500dSAkeem G Abodunrin 	 */
5513769c500dSAkeem G Abodunrin 	ice_free_irq_msix_misc(pf);
5514769c500dSAkeem G Abodunrin 	ice_for_each_vsi(pf, v) {
5515769c500dSAkeem G Abodunrin 		if (!pf->vsi[v])
5516769c500dSAkeem G Abodunrin 			continue;
5517769c500dSAkeem G Abodunrin 		ice_vsi_free_q_vectors(pf->vsi[v]);
5518769c500dSAkeem G Abodunrin 	}
5519769c500dSAkeem G Abodunrin 	ice_clear_interrupt_scheme(pf);
5520769c500dSAkeem G Abodunrin 
5521466e4392SAnirudh Venkataramanan 	pci_save_state(pdev);
5522769c500dSAkeem G Abodunrin 	pci_wake_from_d3(pdev, pf->wol_ena);
5523769c500dSAkeem G Abodunrin 	pci_set_power_state(pdev, PCI_D3hot);
5524769c500dSAkeem G Abodunrin 	return 0;
5525769c500dSAkeem G Abodunrin }
5526769c500dSAkeem G Abodunrin 
5527769c500dSAkeem G Abodunrin /**
5528769c500dSAkeem G Abodunrin  * ice_resume - PM callback for waking up from D3
5529769c500dSAkeem G Abodunrin  * @dev: generic device information structure
5530769c500dSAkeem G Abodunrin  */
553165c72291SWei Yongjun static int __maybe_unused ice_resume(struct device *dev)
5532769c500dSAkeem G Abodunrin {
5533769c500dSAkeem G Abodunrin 	struct pci_dev *pdev = to_pci_dev(dev);
5534769c500dSAkeem G Abodunrin 	enum ice_reset_req reset_type;
5535769c500dSAkeem G Abodunrin 	struct ice_pf *pf;
5536769c500dSAkeem G Abodunrin 	struct ice_hw *hw;
5537769c500dSAkeem G Abodunrin 	int ret;
5538769c500dSAkeem G Abodunrin 
5539769c500dSAkeem G Abodunrin 	pci_set_power_state(pdev, PCI_D0);
5540769c500dSAkeem G Abodunrin 	pci_restore_state(pdev);
5541769c500dSAkeem G Abodunrin 	pci_save_state(pdev);
5542769c500dSAkeem G Abodunrin 
5543769c500dSAkeem G Abodunrin 	if (!pci_device_is_present(pdev))
5544769c500dSAkeem G Abodunrin 		return -ENODEV;
5545769c500dSAkeem G Abodunrin 
5546769c500dSAkeem G Abodunrin 	ret = pci_enable_device_mem(pdev);
5547769c500dSAkeem G Abodunrin 	if (ret) {
5548769c500dSAkeem G Abodunrin 		dev_err(dev, "Cannot enable device after suspend\n");
5549769c500dSAkeem G Abodunrin 		return ret;
5550769c500dSAkeem G Abodunrin 	}
5551769c500dSAkeem G Abodunrin 
5552769c500dSAkeem G Abodunrin 	pf = pci_get_drvdata(pdev);
5553769c500dSAkeem G Abodunrin 	hw = &pf->hw;
5554769c500dSAkeem G Abodunrin 
5555769c500dSAkeem G Abodunrin 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
5556769c500dSAkeem G Abodunrin 	ice_print_wake_reason(pf);
5557769c500dSAkeem G Abodunrin 
5558769c500dSAkeem G Abodunrin 	/* We cleared the interrupt scheme when we suspended, so we need to
5559769c500dSAkeem G Abodunrin 	 * restore it now to resume device functionality.
5560769c500dSAkeem G Abodunrin 	 */
5561769c500dSAkeem G Abodunrin 	ret = ice_reinit_interrupt_scheme(pf);
5562769c500dSAkeem G Abodunrin 	if (ret)
5563769c500dSAkeem G Abodunrin 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5564769c500dSAkeem G Abodunrin 
55657e408e07SAnirudh Venkataramanan 	clear_bit(ICE_DOWN, pf->state);
5566769c500dSAkeem G Abodunrin 	/* Now perform PF reset and rebuild */
5567769c500dSAkeem G Abodunrin 	reset_type = ICE_RESET_PFR;
5568769c500dSAkeem G Abodunrin 	/* re-enable service task for reset, but allow reset to schedule it */
55697e408e07SAnirudh Venkataramanan 	clear_bit(ICE_SERVICE_DIS, pf->state);
5570769c500dSAkeem G Abodunrin 
5571769c500dSAkeem G Abodunrin 	if (ice_schedule_reset(pf, reset_type))
5572769c500dSAkeem G Abodunrin 		dev_err(dev, "Reset during resume failed.\n");
5573769c500dSAkeem G Abodunrin 
55747e408e07SAnirudh Venkataramanan 	clear_bit(ICE_SUSPENDED, pf->state);
5575769c500dSAkeem G Abodunrin 	ice_service_task_restart(pf);
5576769c500dSAkeem G Abodunrin 
5577769c500dSAkeem G Abodunrin 	/* Restart the service task */
5578769c500dSAkeem G Abodunrin 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5579769c500dSAkeem G Abodunrin 
5580769c500dSAkeem G Abodunrin 	return 0;
5581769c500dSAkeem G Abodunrin }
5582769c500dSAkeem G Abodunrin #endif /* CONFIG_PM */
5583769c500dSAkeem G Abodunrin 
5584769c500dSAkeem G Abodunrin /**
55855995b6d0SBrett Creeley  * ice_pci_err_detected - warning that PCI error has been detected
55865995b6d0SBrett Creeley  * @pdev: PCI device information struct
55875995b6d0SBrett Creeley  * @err: the type of PCI error
55885995b6d0SBrett Creeley  *
55895995b6d0SBrett Creeley  * Called to warn that something happened on the PCI bus and the error handling
55905995b6d0SBrett Creeley  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
55915995b6d0SBrett Creeley  */
55925995b6d0SBrett Creeley static pci_ers_result_t
559316d79cd4SLuc Van Oostenryck ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
55945995b6d0SBrett Creeley {
55955995b6d0SBrett Creeley 	struct ice_pf *pf = pci_get_drvdata(pdev);
55965995b6d0SBrett Creeley 
55975995b6d0SBrett Creeley 	if (!pf) {
55985995b6d0SBrett Creeley 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
55995995b6d0SBrett Creeley 			__func__, err);
56005995b6d0SBrett Creeley 		return PCI_ERS_RESULT_DISCONNECT;
56015995b6d0SBrett Creeley 	}
56025995b6d0SBrett Creeley 
56037e408e07SAnirudh Venkataramanan 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
56045995b6d0SBrett Creeley 		ice_service_task_stop(pf);
56055995b6d0SBrett Creeley 
56067e408e07SAnirudh Venkataramanan 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
56077e408e07SAnirudh Venkataramanan 			set_bit(ICE_PFR_REQ, pf->state);
5608fbc7b27aSKiran Patil 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
56095995b6d0SBrett Creeley 		}
56105995b6d0SBrett Creeley 	}
56115995b6d0SBrett Creeley 
56125995b6d0SBrett Creeley 	return PCI_ERS_RESULT_NEED_RESET;
56135995b6d0SBrett Creeley }
56145995b6d0SBrett Creeley 
56155995b6d0SBrett Creeley /**
56165995b6d0SBrett Creeley  * ice_pci_err_slot_reset - a PCI slot reset has just happened
56175995b6d0SBrett Creeley  * @pdev: PCI device information struct
56185995b6d0SBrett Creeley  *
56195995b6d0SBrett Creeley  * Called to determine if the driver can recover from the PCI slot reset by
56205995b6d0SBrett Creeley  * using a register read to determine if the device is recoverable.
56215995b6d0SBrett Creeley  */
56225995b6d0SBrett Creeley static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
56235995b6d0SBrett Creeley {
56245995b6d0SBrett Creeley 	struct ice_pf *pf = pci_get_drvdata(pdev);
56255995b6d0SBrett Creeley 	pci_ers_result_t result;
56265995b6d0SBrett Creeley 	int err;
56275995b6d0SBrett Creeley 	u32 reg;
56285995b6d0SBrett Creeley 
56295995b6d0SBrett Creeley 	err = pci_enable_device_mem(pdev);
56305995b6d0SBrett Creeley 	if (err) {
563119cce2c6SAnirudh Venkataramanan 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
56325995b6d0SBrett Creeley 			err);
56335995b6d0SBrett Creeley 		result = PCI_ERS_RESULT_DISCONNECT;
56345995b6d0SBrett Creeley 	} else {
56355995b6d0SBrett Creeley 		pci_set_master(pdev);
56365995b6d0SBrett Creeley 		pci_restore_state(pdev);
56375995b6d0SBrett Creeley 		pci_save_state(pdev);
56385995b6d0SBrett Creeley 		pci_wake_from_d3(pdev, false);
56395995b6d0SBrett Creeley 
56405995b6d0SBrett Creeley 		/* Check for life */
56415995b6d0SBrett Creeley 		reg = rd32(&pf->hw, GLGEN_RTRIG);
56425995b6d0SBrett Creeley 		if (!reg)
56435995b6d0SBrett Creeley 			result = PCI_ERS_RESULT_RECOVERED;
56445995b6d0SBrett Creeley 		else
56455995b6d0SBrett Creeley 			result = PCI_ERS_RESULT_DISCONNECT;
56465995b6d0SBrett Creeley 	}
56475995b6d0SBrett Creeley 
56485995b6d0SBrett Creeley 	return result;
56495995b6d0SBrett Creeley }
56505995b6d0SBrett Creeley 
56515995b6d0SBrett Creeley /**
56525995b6d0SBrett Creeley  * ice_pci_err_resume - restart operations after PCI error recovery
56535995b6d0SBrett Creeley  * @pdev: PCI device information struct
56545995b6d0SBrett Creeley  *
56555995b6d0SBrett Creeley  * Called to allow the driver to bring things back up after PCI error and/or
56565995b6d0SBrett Creeley  * reset recovery have finished
56575995b6d0SBrett Creeley  */
56585995b6d0SBrett Creeley static void ice_pci_err_resume(struct pci_dev *pdev)
56595995b6d0SBrett Creeley {
56605995b6d0SBrett Creeley 	struct ice_pf *pf = pci_get_drvdata(pdev);
56615995b6d0SBrett Creeley 
56625995b6d0SBrett Creeley 	if (!pf) {
566319cce2c6SAnirudh Venkataramanan 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
566419cce2c6SAnirudh Venkataramanan 			__func__);
56655995b6d0SBrett Creeley 		return;
56665995b6d0SBrett Creeley 	}
56675995b6d0SBrett Creeley 
56687e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_SUSPENDED, pf->state)) {
56695995b6d0SBrett Creeley 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
56705995b6d0SBrett Creeley 			__func__);
56715995b6d0SBrett Creeley 		return;
56725995b6d0SBrett Creeley 	}
56735995b6d0SBrett Creeley 
5674a54a0b24SNick Nunley 	ice_restore_all_vfs_msi_state(pdev);
5675a54a0b24SNick Nunley 
56765995b6d0SBrett Creeley 	ice_do_reset(pf, ICE_RESET_PFR);
56775995b6d0SBrett Creeley 	ice_service_task_restart(pf);
56785995b6d0SBrett Creeley 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
56795995b6d0SBrett Creeley }
56805995b6d0SBrett Creeley 
56815995b6d0SBrett Creeley /**
56825995b6d0SBrett Creeley  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
56835995b6d0SBrett Creeley  * @pdev: PCI device information struct
56845995b6d0SBrett Creeley  */
56855995b6d0SBrett Creeley static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
56865995b6d0SBrett Creeley {
56875995b6d0SBrett Creeley 	struct ice_pf *pf = pci_get_drvdata(pdev);
56885995b6d0SBrett Creeley 
56897e408e07SAnirudh Venkataramanan 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
56905995b6d0SBrett Creeley 		ice_service_task_stop(pf);
56915995b6d0SBrett Creeley 
56927e408e07SAnirudh Venkataramanan 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
56937e408e07SAnirudh Venkataramanan 			set_bit(ICE_PFR_REQ, pf->state);
5694fbc7b27aSKiran Patil 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
56955995b6d0SBrett Creeley 		}
56965995b6d0SBrett Creeley 	}
56975995b6d0SBrett Creeley }
56985995b6d0SBrett Creeley 
56995995b6d0SBrett Creeley /**
57005995b6d0SBrett Creeley  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
57015995b6d0SBrett Creeley  * @pdev: PCI device information struct
57025995b6d0SBrett Creeley  */
57035995b6d0SBrett Creeley static void ice_pci_err_reset_done(struct pci_dev *pdev)
57045995b6d0SBrett Creeley {
57055995b6d0SBrett Creeley 	ice_pci_err_resume(pdev);
57065995b6d0SBrett Creeley }
57075995b6d0SBrett Creeley 
5708837f08fdSAnirudh Venkataramanan /* ice_pci_tbl - PCI Device ID Table
5709837f08fdSAnirudh Venkataramanan  *
5710837f08fdSAnirudh Venkataramanan  * Wildcard entries (PCI_ANY_ID) should come last
5711837f08fdSAnirudh Venkataramanan  * Last entry must be all 0s
5712837f08fdSAnirudh Venkataramanan  *
5713837f08fdSAnirudh Venkataramanan  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5714837f08fdSAnirudh Venkataramanan  *   Class, Class Mask, private data (not used) }
5715837f08fdSAnirudh Venkataramanan  */
5716837f08fdSAnirudh Venkataramanan static const struct pci_device_id ice_pci_tbl[] = {
5717633d7449SAnirudh Venkataramanan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5718633d7449SAnirudh Venkataramanan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5719633d7449SAnirudh Venkataramanan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
57207dcf78b8STony Nguyen 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
57217dcf78b8STony Nguyen 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
5722195fb977SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5723e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5724e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5725e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5726e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5727e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
57285d9e618cSJacob Keller 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
57295d9e618cSJacob Keller 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
57305d9e618cSJacob Keller 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
57315d9e618cSJacob Keller 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
57325d9e618cSJacob Keller 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
57332fbfa966SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
57345d9e618cSJacob Keller 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
57355d9e618cSJacob Keller 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
57365d9e618cSJacob Keller 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5737e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5738e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5739e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5740e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5741e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5742f52d1668SPaul M Stillwell Jr 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
5743837f08fdSAnirudh Venkataramanan 	/* required last entry */
5744837f08fdSAnirudh Venkataramanan 	{ 0, }
5745837f08fdSAnirudh Venkataramanan };
5746837f08fdSAnirudh Venkataramanan MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5747837f08fdSAnirudh Venkataramanan 
5748769c500dSAkeem G Abodunrin static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5749769c500dSAkeem G Abodunrin 
57505995b6d0SBrett Creeley static const struct pci_error_handlers ice_pci_err_handler = {
57515995b6d0SBrett Creeley 	.error_detected = ice_pci_err_detected,
57525995b6d0SBrett Creeley 	.slot_reset = ice_pci_err_slot_reset,
57535995b6d0SBrett Creeley 	.reset_prepare = ice_pci_err_reset_prepare,
57545995b6d0SBrett Creeley 	.reset_done = ice_pci_err_reset_done,
57555995b6d0SBrett Creeley 	.resume = ice_pci_err_resume
57565995b6d0SBrett Creeley };
57575995b6d0SBrett Creeley 
5758837f08fdSAnirudh Venkataramanan static struct pci_driver ice_driver = {
5759837f08fdSAnirudh Venkataramanan 	.name = KBUILD_MODNAME,
5760837f08fdSAnirudh Venkataramanan 	.id_table = ice_pci_tbl,
5761837f08fdSAnirudh Venkataramanan 	.probe = ice_probe,
5762837f08fdSAnirudh Venkataramanan 	.remove = ice_remove,
5763769c500dSAkeem G Abodunrin #ifdef CONFIG_PM
5764769c500dSAkeem G Abodunrin 	.driver.pm = &ice_pm_ops,
5765769c500dSAkeem G Abodunrin #endif /* CONFIG_PM */
5766769c500dSAkeem G Abodunrin 	.shutdown = ice_shutdown,
5767ddf30f7fSAnirudh Venkataramanan 	.sriov_configure = ice_sriov_configure,
57685995b6d0SBrett Creeley 	.err_handler = &ice_pci_err_handler
5769837f08fdSAnirudh Venkataramanan };
5770837f08fdSAnirudh Venkataramanan 
5771837f08fdSAnirudh Venkataramanan /**
5772837f08fdSAnirudh Venkataramanan  * ice_module_init - Driver registration routine
5773837f08fdSAnirudh Venkataramanan  *
5774837f08fdSAnirudh Venkataramanan  * ice_module_init is the first routine called when the driver is
5775837f08fdSAnirudh Venkataramanan  * loaded. All it does is register with the PCI subsystem.
5776837f08fdSAnirudh Venkataramanan  */
5777837f08fdSAnirudh Venkataramanan static int __init ice_module_init(void)
5778837f08fdSAnirudh Venkataramanan {
5779837f08fdSAnirudh Venkataramanan 	int status;
5780837f08fdSAnirudh Venkataramanan 
578134a2a3b8SJeff Kirsher 	pr_info("%s\n", ice_driver_string);
5782837f08fdSAnirudh Venkataramanan 	pr_info("%s\n", ice_copyright);
5783837f08fdSAnirudh Venkataramanan 
57844d159f78SAnirudh Venkataramanan 	ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
5785940b61afSAnirudh Venkataramanan 	if (!ice_wq) {
5786940b61afSAnirudh Venkataramanan 		pr_err("Failed to create workqueue\n");
5787940b61afSAnirudh Venkataramanan 		return -ENOMEM;
5788940b61afSAnirudh Venkataramanan 	}
5789940b61afSAnirudh Venkataramanan 
5790837f08fdSAnirudh Venkataramanan 	status = pci_register_driver(&ice_driver);
5791940b61afSAnirudh Venkataramanan 	if (status) {
57922f2da36eSAnirudh Venkataramanan 		pr_err("failed to register PCI driver, err %d\n", status);
5793940b61afSAnirudh Venkataramanan 		destroy_workqueue(ice_wq);
5794940b61afSAnirudh Venkataramanan 	}
5795837f08fdSAnirudh Venkataramanan 
5796837f08fdSAnirudh Venkataramanan 	return status;
5797837f08fdSAnirudh Venkataramanan }
5798837f08fdSAnirudh Venkataramanan module_init(ice_module_init);
5799837f08fdSAnirudh Venkataramanan 
5800837f08fdSAnirudh Venkataramanan /**
5801837f08fdSAnirudh Venkataramanan  * ice_module_exit - Driver exit cleanup routine
5802837f08fdSAnirudh Venkataramanan  *
5803837f08fdSAnirudh Venkataramanan  * ice_module_exit is called just before the driver is removed
5804837f08fdSAnirudh Venkataramanan  * from memory.
5805837f08fdSAnirudh Venkataramanan  */
5806837f08fdSAnirudh Venkataramanan static void __exit ice_module_exit(void)
5807837f08fdSAnirudh Venkataramanan {
5808837f08fdSAnirudh Venkataramanan 	pci_unregister_driver(&ice_driver);
5809940b61afSAnirudh Venkataramanan 	destroy_workqueue(ice_wq);
5810837f08fdSAnirudh Venkataramanan 	pr_info("module unloaded\n");
5811837f08fdSAnirudh Venkataramanan }
5812837f08fdSAnirudh Venkataramanan module_exit(ice_module_exit);
58133a858ba3SAnirudh Venkataramanan 
58143a858ba3SAnirudh Venkataramanan /**
5815f9867df6SAnirudh Venkataramanan  * ice_set_mac_address - NDO callback to set MAC address
5816e94d4478SAnirudh Venkataramanan  * @netdev: network interface device structure
5817e94d4478SAnirudh Venkataramanan  * @pi: pointer to an address structure
5818e94d4478SAnirudh Venkataramanan  *
5819e94d4478SAnirudh Venkataramanan  * Returns 0 on success, negative on failure
5820e94d4478SAnirudh Venkataramanan  */
5821e94d4478SAnirudh Venkataramanan static int ice_set_mac_address(struct net_device *netdev, void *pi)
5822e94d4478SAnirudh Venkataramanan {
5823e94d4478SAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
5824e94d4478SAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
5825e94d4478SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
5826e94d4478SAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
5827e94d4478SAnirudh Venkataramanan 	struct sockaddr *addr = pi;
5828b357d971SBrett Creeley 	u8 old_mac[ETH_ALEN];
5829e94d4478SAnirudh Venkataramanan 	u8 flags = 0;
5830e94d4478SAnirudh Venkataramanan 	u8 *mac;
58312ccc1c1cSTony Nguyen 	int err;
5832e94d4478SAnirudh Venkataramanan 
5833e94d4478SAnirudh Venkataramanan 	mac = (u8 *)addr->sa_data;
5834e94d4478SAnirudh Venkataramanan 
5835e94d4478SAnirudh Venkataramanan 	if (!is_valid_ether_addr(mac))
5836e94d4478SAnirudh Venkataramanan 		return -EADDRNOTAVAIL;
5837e94d4478SAnirudh Venkataramanan 
5838e94d4478SAnirudh Venkataramanan 	if (ether_addr_equal(netdev->dev_addr, mac)) {
58393ba7f53fSBrett Creeley 		netdev_dbg(netdev, "already using mac %pM\n", mac);
5840e94d4478SAnirudh Venkataramanan 		return 0;
5841e94d4478SAnirudh Venkataramanan 	}
5842e94d4478SAnirudh Venkataramanan 
58437e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_DOWN, pf->state) ||
58445df7e45dSDave Ertman 	    ice_is_reset_in_progress(pf->state)) {
5845e94d4478SAnirudh Venkataramanan 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
5846e94d4478SAnirudh Venkataramanan 			   mac);
5847e94d4478SAnirudh Venkataramanan 		return -EBUSY;
5848e94d4478SAnirudh Venkataramanan 	}
5849e94d4478SAnirudh Venkataramanan 
58509fea7498SKiran Patil 	if (ice_chnl_dmac_fltr_cnt(pf)) {
58519fea7498SKiran Patil 		netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
58529fea7498SKiran Patil 			   mac);
58539fea7498SKiran Patil 		return -EAGAIN;
58549fea7498SKiran Patil 	}
58559fea7498SKiran Patil 
58563ba7f53fSBrett Creeley 	netif_addr_lock_bh(netdev);
5857b357d971SBrett Creeley 	ether_addr_copy(old_mac, netdev->dev_addr);
5858b357d971SBrett Creeley 	/* change the netdev's MAC address */
5859a05e4c0aSJakub Kicinski 	eth_hw_addr_set(netdev, mac);
5860b357d971SBrett Creeley 	netif_addr_unlock_bh(netdev);
5861b357d971SBrett Creeley 
5862757976abSLihong Yang 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
58632ccc1c1cSTony Nguyen 	err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
58642ccc1c1cSTony Nguyen 	if (err && err != -ENOENT) {
5865e94d4478SAnirudh Venkataramanan 		err = -EADDRNOTAVAIL;
5866bbb968e8SAkeem G Abodunrin 		goto err_update_filters;
5867e94d4478SAnirudh Venkataramanan 	}
5868e94d4478SAnirudh Venkataramanan 
586913ed5e8aSNick Nunley 	/* Add filter for new MAC. If filter exists, return success */
58702ccc1c1cSTony Nguyen 	err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
58712c0069f3SIvan Vecera 	if (err == -EEXIST) {
587213ed5e8aSNick Nunley 		/* Although this MAC filter is already present in hardware it's
587313ed5e8aSNick Nunley 		 * possible in some cases (e.g. bonding) that dev_addr was
587413ed5e8aSNick Nunley 		 * modified outside of the driver and needs to be restored back
587513ed5e8aSNick Nunley 		 * to this value.
587613ed5e8aSNick Nunley 		 */
5877757976abSLihong Yang 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
58782c0069f3SIvan Vecera 
58792c0069f3SIvan Vecera 		return 0;
58802c0069f3SIvan Vecera 	} else if (err) {
5881757976abSLihong Yang 		/* error if the new filter addition failed */
5882757976abSLihong Yang 		err = -EADDRNOTAVAIL;
58832c0069f3SIvan Vecera 	}
5884757976abSLihong Yang 
5885bbb968e8SAkeem G Abodunrin err_update_filters:
5886e94d4478SAnirudh Venkataramanan 	if (err) {
58872f2da36eSAnirudh Venkataramanan 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5888e94d4478SAnirudh Venkataramanan 			   mac);
5889b357d971SBrett Creeley 		netif_addr_lock_bh(netdev);
5890f3956ebbSJakub Kicinski 		eth_hw_addr_set(netdev, old_mac);
58913ba7f53fSBrett Creeley 		netif_addr_unlock_bh(netdev);
5892e94d4478SAnirudh Venkataramanan 		return err;
5893e94d4478SAnirudh Venkataramanan 	}
5894e94d4478SAnirudh Venkataramanan 
58952f2da36eSAnirudh Venkataramanan 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5896e94d4478SAnirudh Venkataramanan 		   netdev->dev_addr);
5897e94d4478SAnirudh Venkataramanan 
5898f9867df6SAnirudh Venkataramanan 	/* write new MAC address to the firmware */
5899e94d4478SAnirudh Venkataramanan 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
59002ccc1c1cSTony Nguyen 	err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
59012ccc1c1cSTony Nguyen 	if (err) {
59025f87ec48STony Nguyen 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
59032ccc1c1cSTony Nguyen 			   mac, err);
5904e94d4478SAnirudh Venkataramanan 	}
5905e94d4478SAnirudh Venkataramanan 	return 0;
5906e94d4478SAnirudh Venkataramanan }
5907e94d4478SAnirudh Venkataramanan 
5908e94d4478SAnirudh Venkataramanan /**
5909e94d4478SAnirudh Venkataramanan  * ice_set_rx_mode - NDO callback to set the netdev filters
5910e94d4478SAnirudh Venkataramanan  * @netdev: network interface device structure
5911e94d4478SAnirudh Venkataramanan  */
5912e94d4478SAnirudh Venkataramanan static void ice_set_rx_mode(struct net_device *netdev)
5913e94d4478SAnirudh Venkataramanan {
5914e94d4478SAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
5915e94d4478SAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
5916e94d4478SAnirudh Venkataramanan 
5917e94d4478SAnirudh Venkataramanan 	if (!vsi)
5918e94d4478SAnirudh Venkataramanan 		return;
5919e94d4478SAnirudh Venkataramanan 
5920e94d4478SAnirudh Venkataramanan 	/* Set the flags to synchronize filters
5921e94d4478SAnirudh Venkataramanan 	 * ndo_set_rx_mode may be triggered even without a change in netdev
5922e94d4478SAnirudh Venkataramanan 	 * flags
5923e94d4478SAnirudh Venkataramanan 	 */
5924e97fb1aeSAnirudh Venkataramanan 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5925e97fb1aeSAnirudh Venkataramanan 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5926e94d4478SAnirudh Venkataramanan 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5927e94d4478SAnirudh Venkataramanan 
5928e94d4478SAnirudh Venkataramanan 	/* schedule our worker thread which will take care of
5929e94d4478SAnirudh Venkataramanan 	 * applying the new filter changes
5930e94d4478SAnirudh Venkataramanan 	 */
5931e94d4478SAnirudh Venkataramanan 	ice_service_task_schedule(vsi->back);
5932e94d4478SAnirudh Venkataramanan }
5933e94d4478SAnirudh Venkataramanan 
5934e94d4478SAnirudh Venkataramanan /**
59351ddef455SUsha Ketineni  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
59361ddef455SUsha Ketineni  * @netdev: network interface device structure
59371ddef455SUsha Ketineni  * @queue_index: Queue ID
59381ddef455SUsha Ketineni  * @maxrate: maximum bandwidth in Mbps
59391ddef455SUsha Ketineni  */
59401ddef455SUsha Ketineni static int
59411ddef455SUsha Ketineni ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
59421ddef455SUsha Ketineni {
59431ddef455SUsha Ketineni 	struct ice_netdev_priv *np = netdev_priv(netdev);
59441ddef455SUsha Ketineni 	struct ice_vsi *vsi = np->vsi;
59451ddef455SUsha Ketineni 	u16 q_handle;
59465518ac2aSTony Nguyen 	int status;
59471ddef455SUsha Ketineni 	u8 tc;
59481ddef455SUsha Ketineni 
59491ddef455SUsha Ketineni 	/* Validate maxrate requested is within permitted range */
59501ddef455SUsha Ketineni 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
595119cce2c6SAnirudh Venkataramanan 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
59521ddef455SUsha Ketineni 			   maxrate, queue_index);
59531ddef455SUsha Ketineni 		return -EINVAL;
59541ddef455SUsha Ketineni 	}
59551ddef455SUsha Ketineni 
59561ddef455SUsha Ketineni 	q_handle = vsi->tx_rings[queue_index]->q_handle;
59571ddef455SUsha Ketineni 	tc = ice_dcb_get_tc(vsi, queue_index);
59581ddef455SUsha Ketineni 
59591ddef455SUsha Ketineni 	/* Set BW back to default, when user set maxrate to 0 */
59601ddef455SUsha Ketineni 	if (!maxrate)
59611ddef455SUsha Ketineni 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
59621ddef455SUsha Ketineni 					       q_handle, ICE_MAX_BW);
59631ddef455SUsha Ketineni 	else
59641ddef455SUsha Ketineni 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
59651ddef455SUsha Ketineni 					  q_handle, ICE_MAX_BW, maxrate * 1000);
5966c1484691STony Nguyen 	if (status)
59675f87ec48STony Nguyen 		netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
59685f87ec48STony Nguyen 			   status);
59691ddef455SUsha Ketineni 
5970c1484691STony Nguyen 	return status;
59711ddef455SUsha Ketineni }
59721ddef455SUsha Ketineni 
59731ddef455SUsha Ketineni /**
5974e94d4478SAnirudh Venkataramanan  * ice_fdb_add - add an entry to the hardware database
5975e94d4478SAnirudh Venkataramanan  * @ndm: the input from the stack
5976e94d4478SAnirudh Venkataramanan  * @tb: pointer to array of nladdr (unused)
5977e94d4478SAnirudh Venkataramanan  * @dev: the net device pointer
5978e94d4478SAnirudh Venkataramanan  * @addr: the MAC address entry being added
5979f9867df6SAnirudh Venkataramanan  * @vid: VLAN ID
5980e94d4478SAnirudh Venkataramanan  * @flags: instructions from stack about fdb operation
598199be37edSBruce Allan  * @extack: netlink extended ack
5982e94d4478SAnirudh Venkataramanan  */
598399be37edSBruce Allan static int
598499be37edSBruce Allan ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
598599be37edSBruce Allan 	    struct net_device *dev, const unsigned char *addr, u16 vid,
598699be37edSBruce Allan 	    u16 flags, struct netlink_ext_ack __always_unused *extack)
5987e94d4478SAnirudh Venkataramanan {
5988e94d4478SAnirudh Venkataramanan 	int err;
5989e94d4478SAnirudh Venkataramanan 
5990e94d4478SAnirudh Venkataramanan 	if (vid) {
5991e94d4478SAnirudh Venkataramanan 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5992e94d4478SAnirudh Venkataramanan 		return -EINVAL;
5993e94d4478SAnirudh Venkataramanan 	}
5994e94d4478SAnirudh Venkataramanan 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5995e94d4478SAnirudh Venkataramanan 		netdev_err(dev, "FDB only supports static addresses\n");
5996e94d4478SAnirudh Venkataramanan 		return -EINVAL;
5997e94d4478SAnirudh Venkataramanan 	}
5998e94d4478SAnirudh Venkataramanan 
5999e94d4478SAnirudh Venkataramanan 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
6000e94d4478SAnirudh Venkataramanan 		err = dev_uc_add_excl(dev, addr);
6001e94d4478SAnirudh Venkataramanan 	else if (is_multicast_ether_addr(addr))
6002e94d4478SAnirudh Venkataramanan 		err = dev_mc_add_excl(dev, addr);
6003e94d4478SAnirudh Venkataramanan 	else
6004e94d4478SAnirudh Venkataramanan 		err = -EINVAL;
6005e94d4478SAnirudh Venkataramanan 
6006e94d4478SAnirudh Venkataramanan 	/* Only return duplicate errors if NLM_F_EXCL is set */
6007e94d4478SAnirudh Venkataramanan 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
6008e94d4478SAnirudh Venkataramanan 		err = 0;
6009e94d4478SAnirudh Venkataramanan 
6010e94d4478SAnirudh Venkataramanan 	return err;
6011e94d4478SAnirudh Venkataramanan }
6012e94d4478SAnirudh Venkataramanan 
6013e94d4478SAnirudh Venkataramanan /**
6014e94d4478SAnirudh Venkataramanan  * ice_fdb_del - delete an entry from the hardware database
6015e94d4478SAnirudh Venkataramanan  * @ndm: the input from the stack
6016e94d4478SAnirudh Venkataramanan  * @tb: pointer to array of nladdr (unused)
6017e94d4478SAnirudh Venkataramanan  * @dev: the net device pointer
6018e94d4478SAnirudh Venkataramanan  * @addr: the MAC address entry being added
6019f9867df6SAnirudh Venkataramanan  * @vid: VLAN ID
6020ca4567f1SAlaa Mohamed  * @extack: netlink extended ack
6021e94d4478SAnirudh Venkataramanan  */
6022c8b7abddSBruce Allan static int
6023c8b7abddSBruce Allan ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
6024e94d4478SAnirudh Venkataramanan 	    struct net_device *dev, const unsigned char *addr,
6025ca4567f1SAlaa Mohamed 	    __always_unused u16 vid, struct netlink_ext_ack *extack)
6026e94d4478SAnirudh Venkataramanan {
6027e94d4478SAnirudh Venkataramanan 	int err;
6028e94d4478SAnirudh Venkataramanan 
6029e94d4478SAnirudh Venkataramanan 	if (ndm->ndm_state & NUD_PERMANENT) {
6030e94d4478SAnirudh Venkataramanan 		netdev_err(dev, "FDB only supports static addresses\n");
6031e94d4478SAnirudh Venkataramanan 		return -EINVAL;
6032e94d4478SAnirudh Venkataramanan 	}
6033e94d4478SAnirudh Venkataramanan 
6034e94d4478SAnirudh Venkataramanan 	if (is_unicast_ether_addr(addr))
6035e94d4478SAnirudh Venkataramanan 		err = dev_uc_del(dev, addr);
6036e94d4478SAnirudh Venkataramanan 	else if (is_multicast_ether_addr(addr))
6037e94d4478SAnirudh Venkataramanan 		err = dev_mc_del(dev, addr);
6038e94d4478SAnirudh Venkataramanan 	else
6039e94d4478SAnirudh Venkataramanan 		err = -EINVAL;
6040e94d4478SAnirudh Venkataramanan 
6041e94d4478SAnirudh Venkataramanan 	return err;
6042e94d4478SAnirudh Venkataramanan }
6043e94d4478SAnirudh Venkataramanan 
60441babaf77SBrett Creeley #define NETIF_VLAN_OFFLOAD_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
60451babaf77SBrett Creeley 					 NETIF_F_HW_VLAN_CTAG_TX | \
60461babaf77SBrett Creeley 					 NETIF_F_HW_VLAN_STAG_RX | \
60471babaf77SBrett Creeley 					 NETIF_F_HW_VLAN_STAG_TX)
60481babaf77SBrett Creeley 
6049affa1029SAnatolii Gerasymenko #define NETIF_VLAN_STRIPPING_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
6050affa1029SAnatolii Gerasymenko 					 NETIF_F_HW_VLAN_STAG_RX)
6051affa1029SAnatolii Gerasymenko 
60521babaf77SBrett Creeley #define NETIF_VLAN_FILTERING_FEATURES	(NETIF_F_HW_VLAN_CTAG_FILTER | \
60531babaf77SBrett Creeley 					 NETIF_F_HW_VLAN_STAG_FILTER)
60541babaf77SBrett Creeley 
60551babaf77SBrett Creeley /**
60561babaf77SBrett Creeley  * ice_fix_features - fix the netdev features flags based on device limitations
60571babaf77SBrett Creeley  * @netdev: ptr to the netdev that flags are being fixed on
60581babaf77SBrett Creeley  * @features: features that need to be checked and possibly fixed
60591babaf77SBrett Creeley  *
60601babaf77SBrett Creeley  * Make sure any fixups are made to features in this callback. This enables the
60611babaf77SBrett Creeley  * driver to not have to check unsupported configurations throughout the driver
60621babaf77SBrett Creeley  * because that's the responsiblity of this callback.
60631babaf77SBrett Creeley  *
60641babaf77SBrett Creeley  * Single VLAN Mode (SVM) Supported Features:
60651babaf77SBrett Creeley  *	NETIF_F_HW_VLAN_CTAG_FILTER
60661babaf77SBrett Creeley  *	NETIF_F_HW_VLAN_CTAG_RX
60671babaf77SBrett Creeley  *	NETIF_F_HW_VLAN_CTAG_TX
60681babaf77SBrett Creeley  *
60691babaf77SBrett Creeley  * Double VLAN Mode (DVM) Supported Features:
60701babaf77SBrett Creeley  *	NETIF_F_HW_VLAN_CTAG_FILTER
60711babaf77SBrett Creeley  *	NETIF_F_HW_VLAN_CTAG_RX
60721babaf77SBrett Creeley  *	NETIF_F_HW_VLAN_CTAG_TX
60731babaf77SBrett Creeley  *
60741babaf77SBrett Creeley  *	NETIF_F_HW_VLAN_STAG_FILTER
60751babaf77SBrett Creeley  *	NETIF_HW_VLAN_STAG_RX
60761babaf77SBrett Creeley  *	NETIF_HW_VLAN_STAG_TX
60771babaf77SBrett Creeley  *
60781babaf77SBrett Creeley  * Features that need fixing:
60791babaf77SBrett Creeley  *	Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
60801babaf77SBrett Creeley  *	These are mutually exlusive as the VSI context cannot support multiple
60811babaf77SBrett Creeley  *	VLAN ethertypes simultaneously for stripping and/or insertion. If this
60821babaf77SBrett Creeley  *	is not done, then default to clearing the requested STAG offload
60831babaf77SBrett Creeley  *	settings.
60841babaf77SBrett Creeley  *
60851babaf77SBrett Creeley  *	All supported filtering has to be enabled or disabled together. For
60861babaf77SBrett Creeley  *	example, in DVM, CTAG and STAG filtering have to be enabled and disabled
60871babaf77SBrett Creeley  *	together. If this is not done, then default to VLAN filtering disabled.
60881babaf77SBrett Creeley  *	These are mutually exclusive as there is currently no way to
60891babaf77SBrett Creeley  *	enable/disable VLAN filtering based on VLAN ethertype when using VLAN
60901babaf77SBrett Creeley  *	prune rules.
60911babaf77SBrett Creeley  */
60921babaf77SBrett Creeley static netdev_features_t
60931babaf77SBrett Creeley ice_fix_features(struct net_device *netdev, netdev_features_t features)
60941babaf77SBrett Creeley {
60951babaf77SBrett Creeley 	struct ice_netdev_priv *np = netdev_priv(netdev);
60969542ef4fSRoman Storozhenko 	netdev_features_t req_vlan_fltr, cur_vlan_fltr;
60979542ef4fSRoman Storozhenko 	bool cur_ctag, cur_stag, req_ctag, req_stag;
60981babaf77SBrett Creeley 
60999542ef4fSRoman Storozhenko 	cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
61009542ef4fSRoman Storozhenko 	cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
61019542ef4fSRoman Storozhenko 	cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
61021babaf77SBrett Creeley 
61039542ef4fSRoman Storozhenko 	req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
61049542ef4fSRoman Storozhenko 	req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
61059542ef4fSRoman Storozhenko 	req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
61061babaf77SBrett Creeley 
61079542ef4fSRoman Storozhenko 	if (req_vlan_fltr != cur_vlan_fltr) {
61089542ef4fSRoman Storozhenko 		if (ice_is_dvm_ena(&np->vsi->back->hw)) {
61099542ef4fSRoman Storozhenko 			if (req_ctag && req_stag) {
61109542ef4fSRoman Storozhenko 				features |= NETIF_VLAN_FILTERING_FEATURES;
61119542ef4fSRoman Storozhenko 			} else if (!req_ctag && !req_stag) {
61129542ef4fSRoman Storozhenko 				features &= ~NETIF_VLAN_FILTERING_FEATURES;
61139542ef4fSRoman Storozhenko 			} else if ((!cur_ctag && req_ctag && !cur_stag) ||
61149542ef4fSRoman Storozhenko 				   (!cur_stag && req_stag && !cur_ctag)) {
61159542ef4fSRoman Storozhenko 				features |= NETIF_VLAN_FILTERING_FEATURES;
61169542ef4fSRoman Storozhenko 				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
61179542ef4fSRoman Storozhenko 			} else if ((cur_ctag && !req_ctag && cur_stag) ||
61189542ef4fSRoman Storozhenko 				   (cur_stag && !req_stag && cur_ctag)) {
61199542ef4fSRoman Storozhenko 				features &= ~NETIF_VLAN_FILTERING_FEATURES;
61209542ef4fSRoman Storozhenko 				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
61219542ef4fSRoman Storozhenko 			}
61221babaf77SBrett Creeley 		} else {
61239542ef4fSRoman Storozhenko 			if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
61249542ef4fSRoman Storozhenko 				netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
61259542ef4fSRoman Storozhenko 
61269542ef4fSRoman Storozhenko 			if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
61279542ef4fSRoman Storozhenko 				features |= NETIF_F_HW_VLAN_CTAG_FILTER;
61281babaf77SBrett Creeley 		}
61291babaf77SBrett Creeley 	}
61301babaf77SBrett Creeley 
61311babaf77SBrett Creeley 	if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
61321babaf77SBrett Creeley 	    (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
61331babaf77SBrett Creeley 		netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
61341babaf77SBrett Creeley 		features &= ~(NETIF_F_HW_VLAN_STAG_RX |
61351babaf77SBrett Creeley 			      NETIF_F_HW_VLAN_STAG_TX);
61361babaf77SBrett Creeley 	}
61371babaf77SBrett Creeley 
6138affa1029SAnatolii Gerasymenko 	if (!(netdev->features & NETIF_F_RXFCS) &&
6139affa1029SAnatolii Gerasymenko 	    (features & NETIF_F_RXFCS) &&
6140affa1029SAnatolii Gerasymenko 	    (features & NETIF_VLAN_STRIPPING_FEATURES) &&
6141affa1029SAnatolii Gerasymenko 	    !ice_vsi_has_non_zero_vlans(np->vsi)) {
6142affa1029SAnatolii Gerasymenko 		netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
6143affa1029SAnatolii Gerasymenko 		features &= ~NETIF_VLAN_STRIPPING_FEATURES;
6144affa1029SAnatolii Gerasymenko 	}
6145affa1029SAnatolii Gerasymenko 
61461babaf77SBrett Creeley 	return features;
61471babaf77SBrett Creeley }
61481babaf77SBrett Creeley 
61491babaf77SBrett Creeley /**
61501babaf77SBrett Creeley  * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
61511babaf77SBrett Creeley  * @vsi: PF's VSI
61521babaf77SBrett Creeley  * @features: features used to determine VLAN offload settings
61531babaf77SBrett Creeley  *
61541babaf77SBrett Creeley  * First, determine the vlan_ethertype based on the VLAN offload bits in
61551babaf77SBrett Creeley  * features. Then determine if stripping and insertion should be enabled or
61561babaf77SBrett Creeley  * disabled. Finally enable or disable VLAN stripping and insertion.
61571babaf77SBrett Creeley  */
61581babaf77SBrett Creeley static int
61591babaf77SBrett Creeley ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
61601babaf77SBrett Creeley {
61611babaf77SBrett Creeley 	bool enable_stripping = true, enable_insertion = true;
61621babaf77SBrett Creeley 	struct ice_vsi_vlan_ops *vlan_ops;
61631babaf77SBrett Creeley 	int strip_err = 0, insert_err = 0;
61641babaf77SBrett Creeley 	u16 vlan_ethertype = 0;
61651babaf77SBrett Creeley 
61661babaf77SBrett Creeley 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
61671babaf77SBrett Creeley 
61681babaf77SBrett Creeley 	if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
61691babaf77SBrett Creeley 		vlan_ethertype = ETH_P_8021AD;
61701babaf77SBrett Creeley 	else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
61711babaf77SBrett Creeley 		vlan_ethertype = ETH_P_8021Q;
61721babaf77SBrett Creeley 
61731babaf77SBrett Creeley 	if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
61741babaf77SBrett Creeley 		enable_stripping = false;
61751babaf77SBrett Creeley 	if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
61761babaf77SBrett Creeley 		enable_insertion = false;
61771babaf77SBrett Creeley 
61781babaf77SBrett Creeley 	if (enable_stripping)
61791babaf77SBrett Creeley 		strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
61801babaf77SBrett Creeley 	else
61811babaf77SBrett Creeley 		strip_err = vlan_ops->dis_stripping(vsi);
61821babaf77SBrett Creeley 
61831babaf77SBrett Creeley 	if (enable_insertion)
61841babaf77SBrett Creeley 		insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
61851babaf77SBrett Creeley 	else
61861babaf77SBrett Creeley 		insert_err = vlan_ops->dis_insertion(vsi);
61871babaf77SBrett Creeley 
61881babaf77SBrett Creeley 	if (strip_err || insert_err)
61891babaf77SBrett Creeley 		return -EIO;
61901babaf77SBrett Creeley 
61911babaf77SBrett Creeley 	return 0;
61921babaf77SBrett Creeley }
61931babaf77SBrett Creeley 
61941babaf77SBrett Creeley /**
61951babaf77SBrett Creeley  * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
61961babaf77SBrett Creeley  * @vsi: PF's VSI
61971babaf77SBrett Creeley  * @features: features used to determine VLAN filtering settings
61981babaf77SBrett Creeley  *
61991babaf77SBrett Creeley  * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
62001babaf77SBrett Creeley  * features.
62011babaf77SBrett Creeley  */
62021babaf77SBrett Creeley static int
62031babaf77SBrett Creeley ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
62041babaf77SBrett Creeley {
62051babaf77SBrett Creeley 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
62061babaf77SBrett Creeley 	int err = 0;
62071babaf77SBrett Creeley 
62081babaf77SBrett Creeley 	/* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
62091babaf77SBrett Creeley 	 * if either bit is set
62101babaf77SBrett Creeley 	 */
62111babaf77SBrett Creeley 	if (features &
62121babaf77SBrett Creeley 	    (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
62131babaf77SBrett Creeley 		err = vlan_ops->ena_rx_filtering(vsi);
62141babaf77SBrett Creeley 	else
62151babaf77SBrett Creeley 		err = vlan_ops->dis_rx_filtering(vsi);
62161babaf77SBrett Creeley 
62171babaf77SBrett Creeley 	return err;
62181babaf77SBrett Creeley }
62191babaf77SBrett Creeley 
62201babaf77SBrett Creeley /**
62211babaf77SBrett Creeley  * ice_set_vlan_features - set VLAN settings based on suggested feature set
62221babaf77SBrett Creeley  * @netdev: ptr to the netdev being adjusted
62231babaf77SBrett Creeley  * @features: the feature set that the stack is suggesting
62241babaf77SBrett Creeley  *
62251babaf77SBrett Creeley  * Only update VLAN settings if the requested_vlan_features are different than
62261babaf77SBrett Creeley  * the current_vlan_features.
62271babaf77SBrett Creeley  */
62281babaf77SBrett Creeley static int
62291babaf77SBrett Creeley ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
62301babaf77SBrett Creeley {
62311babaf77SBrett Creeley 	netdev_features_t current_vlan_features, requested_vlan_features;
62321babaf77SBrett Creeley 	struct ice_netdev_priv *np = netdev_priv(netdev);
62331babaf77SBrett Creeley 	struct ice_vsi *vsi = np->vsi;
62341babaf77SBrett Creeley 	int err;
62351babaf77SBrett Creeley 
62361babaf77SBrett Creeley 	current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
62371babaf77SBrett Creeley 	requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
62381babaf77SBrett Creeley 	if (current_vlan_features ^ requested_vlan_features) {
6239affa1029SAnatolii Gerasymenko 		if ((features & NETIF_F_RXFCS) &&
6240affa1029SAnatolii Gerasymenko 		    (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6241affa1029SAnatolii Gerasymenko 			dev_err(ice_pf_to_dev(vsi->back),
6242affa1029SAnatolii Gerasymenko 				"To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6243affa1029SAnatolii Gerasymenko 			return -EIO;
6244affa1029SAnatolii Gerasymenko 		}
6245affa1029SAnatolii Gerasymenko 
62461babaf77SBrett Creeley 		err = ice_set_vlan_offload_features(vsi, features);
62471babaf77SBrett Creeley 		if (err)
62481babaf77SBrett Creeley 			return err;
62491babaf77SBrett Creeley 	}
62501babaf77SBrett Creeley 
62511babaf77SBrett Creeley 	current_vlan_features = netdev->features &
62521babaf77SBrett Creeley 		NETIF_VLAN_FILTERING_FEATURES;
62531babaf77SBrett Creeley 	requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
62541babaf77SBrett Creeley 	if (current_vlan_features ^ requested_vlan_features) {
62551babaf77SBrett Creeley 		err = ice_set_vlan_filtering_features(vsi, features);
62561babaf77SBrett Creeley 		if (err)
62571babaf77SBrett Creeley 			return err;
62581babaf77SBrett Creeley 	}
62591babaf77SBrett Creeley 
62601babaf77SBrett Creeley 	return 0;
62611babaf77SBrett Creeley }
62621babaf77SBrett Creeley 
6263e94d4478SAnirudh Venkataramanan /**
626444ece4e1SMaciej Fijalkowski  * ice_set_loopback - turn on/off loopback mode on underlying PF
626544ece4e1SMaciej Fijalkowski  * @vsi: ptr to VSI
626644ece4e1SMaciej Fijalkowski  * @ena: flag to indicate the on/off setting
626744ece4e1SMaciej Fijalkowski  */
626844ece4e1SMaciej Fijalkowski static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
626944ece4e1SMaciej Fijalkowski {
627044ece4e1SMaciej Fijalkowski 	bool if_running = netif_running(vsi->netdev);
627144ece4e1SMaciej Fijalkowski 	int ret;
627244ece4e1SMaciej Fijalkowski 
627344ece4e1SMaciej Fijalkowski 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
627444ece4e1SMaciej Fijalkowski 		ret = ice_down(vsi);
627544ece4e1SMaciej Fijalkowski 		if (ret) {
627644ece4e1SMaciej Fijalkowski 			netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
627744ece4e1SMaciej Fijalkowski 			return ret;
627844ece4e1SMaciej Fijalkowski 		}
627944ece4e1SMaciej Fijalkowski 	}
628044ece4e1SMaciej Fijalkowski 	ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
628144ece4e1SMaciej Fijalkowski 	if (ret)
628244ece4e1SMaciej Fijalkowski 		netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
628344ece4e1SMaciej Fijalkowski 	if (if_running)
628444ece4e1SMaciej Fijalkowski 		ret = ice_up(vsi);
628544ece4e1SMaciej Fijalkowski 
628644ece4e1SMaciej Fijalkowski 	return ret;
628744ece4e1SMaciej Fijalkowski }
628844ece4e1SMaciej Fijalkowski 
628944ece4e1SMaciej Fijalkowski /**
6290d76a60baSAnirudh Venkataramanan  * ice_set_features - set the netdev feature flags
6291d76a60baSAnirudh Venkataramanan  * @netdev: ptr to the netdev being adjusted
6292d76a60baSAnirudh Venkataramanan  * @features: the feature set that the stack is suggesting
6293d76a60baSAnirudh Venkataramanan  */
6294c8b7abddSBruce Allan static int
6295c8b7abddSBruce Allan ice_set_features(struct net_device *netdev, netdev_features_t features)
6296d76a60baSAnirudh Venkataramanan {
6297c67672faSMaciej Fijalkowski 	netdev_features_t changed = netdev->features ^ features;
6298d76a60baSAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
6299d76a60baSAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
63005f8cc355SHenry Tieman 	struct ice_pf *pf = vsi->back;
6301d76a60baSAnirudh Venkataramanan 	int ret = 0;
6302d76a60baSAnirudh Venkataramanan 
6303462acf6aSTony Nguyen 	/* Don't set any netdev advanced features with device in Safe Mode */
6304c67672faSMaciej Fijalkowski 	if (ice_is_safe_mode(pf)) {
6305c67672faSMaciej Fijalkowski 		dev_err(ice_pf_to_dev(pf),
6306c67672faSMaciej Fijalkowski 			"Device is in Safe Mode - not enabling advanced netdev features\n");
6307462acf6aSTony Nguyen 		return ret;
6308462acf6aSTony Nguyen 	}
6309462acf6aSTony Nguyen 
63105f8cc355SHenry Tieman 	/* Do not change setting during reset */
63115f8cc355SHenry Tieman 	if (ice_is_reset_in_progress(pf->state)) {
6312c67672faSMaciej Fijalkowski 		dev_err(ice_pf_to_dev(pf),
6313c67672faSMaciej Fijalkowski 			"Device is resetting, changing advanced netdev features temporarily unavailable.\n");
63145f8cc355SHenry Tieman 		return -EBUSY;
63155f8cc355SHenry Tieman 	}
63165f8cc355SHenry Tieman 
63178f529ff9STony Nguyen 	/* Multiple features can be changed in one call so keep features in
63188f529ff9STony Nguyen 	 * separate if/else statements to guarantee each feature is checked
63198f529ff9STony Nguyen 	 */
6320c67672faSMaciej Fijalkowski 	if (changed & NETIF_F_RXHASH)
6321c67672faSMaciej Fijalkowski 		ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6322492af0abSMd Fahad Iqbal Polash 
63231babaf77SBrett Creeley 	ret = ice_set_vlan_features(netdev, features);
63241babaf77SBrett Creeley 	if (ret)
63251babaf77SBrett Creeley 		return ret;
63263171948eSTony Nguyen 
6327dddd406dSJesse Brandeburg 	/* Turn on receive of FCS aka CRC, and after setting this
6328dddd406dSJesse Brandeburg 	 * flag the packet data will have the 4 byte CRC appended
6329dddd406dSJesse Brandeburg 	 */
6330dddd406dSJesse Brandeburg 	if (changed & NETIF_F_RXFCS) {
6331affa1029SAnatolii Gerasymenko 		if ((features & NETIF_F_RXFCS) &&
6332affa1029SAnatolii Gerasymenko 		    (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6333affa1029SAnatolii Gerasymenko 			dev_err(ice_pf_to_dev(vsi->back),
6334affa1029SAnatolii Gerasymenko 				"To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6335affa1029SAnatolii Gerasymenko 			return -EIO;
6336affa1029SAnatolii Gerasymenko 		}
6337affa1029SAnatolii Gerasymenko 
6338dddd406dSJesse Brandeburg 		ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6339dddd406dSJesse Brandeburg 		ret = ice_down_up(vsi);
6340dddd406dSJesse Brandeburg 		if (ret)
6341dddd406dSJesse Brandeburg 			return ret;
6342dddd406dSJesse Brandeburg 	}
6343dddd406dSJesse Brandeburg 
6344c67672faSMaciej Fijalkowski 	if (changed & NETIF_F_NTUPLE) {
6345c67672faSMaciej Fijalkowski 		bool ena = !!(features & NETIF_F_NTUPLE);
6346c67672faSMaciej Fijalkowski 
6347c67672faSMaciej Fijalkowski 		ice_vsi_manage_fdir(vsi, ena);
6348c67672faSMaciej Fijalkowski 		ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
634928bf2672SBrett Creeley 	}
6350148beb61SHenry Tieman 
6351fbc7b27aSKiran Patil 	/* don't turn off hw_tc_offload when ADQ is already enabled */
6352fbc7b27aSKiran Patil 	if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6353fbc7b27aSKiran Patil 		dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6354fbc7b27aSKiran Patil 		return -EACCES;
6355fbc7b27aSKiran Patil 	}
63569fea7498SKiran Patil 
6357c67672faSMaciej Fijalkowski 	if (changed & NETIF_F_HW_TC) {
6358c67672faSMaciej Fijalkowski 		bool ena = !!(features & NETIF_F_HW_TC);
63599fea7498SKiran Patil 
6360c67672faSMaciej Fijalkowski 		ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6361f9867df6SAnirudh Venkataramanan 		      clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6362c67672faSMaciej Fijalkowski 	}
6363d76a60baSAnirudh Venkataramanan 
636444ece4e1SMaciej Fijalkowski 	if (changed & NETIF_F_LOOPBACK)
636544ece4e1SMaciej Fijalkowski 		ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
636644ece4e1SMaciej Fijalkowski 
636744ece4e1SMaciej Fijalkowski 	return ret;
6368d76a60baSAnirudh Venkataramanan }
6369d76a60baSAnirudh Venkataramanan 
6370d76a60baSAnirudh Venkataramanan /**
6371c31af68aSBrett Creeley  * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6372d76a60baSAnirudh Venkataramanan  * @vsi: VSI to setup VLAN properties for
6373d76a60baSAnirudh Venkataramanan  */
6374d76a60baSAnirudh Venkataramanan static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6375d76a60baSAnirudh Venkataramanan {
63761babaf77SBrett Creeley 	int err;
6377d76a60baSAnirudh Venkataramanan 
63781babaf77SBrett Creeley 	err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
63791babaf77SBrett Creeley 	if (err)
63801babaf77SBrett Creeley 		return err;
6381d76a60baSAnirudh Venkataramanan 
63821babaf77SBrett Creeley 	err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
63831babaf77SBrett Creeley 	if (err)
63841babaf77SBrett Creeley 		return err;
6385d76a60baSAnirudh Venkataramanan 
6386c31af68aSBrett Creeley 	return ice_vsi_add_vlan_zero(vsi);
6387d76a60baSAnirudh Venkataramanan }
6388d76a60baSAnirudh Venkataramanan 
6389d76a60baSAnirudh Venkataramanan /**
63900db66d20SMichal Swiatkowski  * ice_vsi_cfg_lan - Setup the VSI lan related config
6391cdedef59SAnirudh Venkataramanan  * @vsi: the VSI being configured
6392cdedef59SAnirudh Venkataramanan  *
6393cdedef59SAnirudh Venkataramanan  * Return 0 on success and negative value on error
6394cdedef59SAnirudh Venkataramanan  */
63950db66d20SMichal Swiatkowski int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6396cdedef59SAnirudh Venkataramanan {
6397cdedef59SAnirudh Venkataramanan 	int err;
6398cdedef59SAnirudh Venkataramanan 
63996a8d013eSJesse Brandeburg 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6400e94d4478SAnirudh Venkataramanan 		ice_set_rx_mode(vsi->netdev);
64019ecd25c2SAnirudh Venkataramanan 
64029ecd25c2SAnirudh Venkataramanan 		err = ice_vsi_vlan_setup(vsi);
6403d76a60baSAnirudh Venkataramanan 		if (err)
6404d76a60baSAnirudh Venkataramanan 			return err;
6405c7f2c42bSAnirudh Venkataramanan 	}
6406a629cf0aSAnirudh Venkataramanan 	ice_vsi_cfg_dcb_rings(vsi);
640703f7a986SAnirudh Venkataramanan 
640803f7a986SAnirudh Venkataramanan 	err = ice_vsi_cfg_lan_txqs(vsi);
6409efc2214bSMaciej Fijalkowski 	if (!err && ice_is_xdp_ena_vsi(vsi))
6410efc2214bSMaciej Fijalkowski 		err = ice_vsi_cfg_xdp_txqs(vsi);
6411cdedef59SAnirudh Venkataramanan 	if (!err)
6412cdedef59SAnirudh Venkataramanan 		err = ice_vsi_cfg_rxqs(vsi);
6413cdedef59SAnirudh Venkataramanan 
6414cdedef59SAnirudh Venkataramanan 	return err;
6415cdedef59SAnirudh Venkataramanan }
6416cdedef59SAnirudh Venkataramanan 
6417cdf1f1f1SJacob Keller /* THEORY OF MODERATION:
6418d8eb7ad5SJesse Brandeburg  * The ice driver hardware works differently than the hardware that DIMLIB was
6419cdf1f1f1SJacob Keller  * originally made for. ice hardware doesn't have packet count limits that
6420cdf1f1f1SJacob Keller  * can trigger an interrupt, but it *does* have interrupt rate limit support,
6421d8eb7ad5SJesse Brandeburg  * which is hard-coded to a limit of 250,000 ints/second.
6422d8eb7ad5SJesse Brandeburg  * If not using dynamic moderation, the INTRL value can be modified
6423d8eb7ad5SJesse Brandeburg  * by ethtool rx-usecs-high.
6424cdf1f1f1SJacob Keller  */
6425cdf1f1f1SJacob Keller struct ice_dim {
6426cdf1f1f1SJacob Keller 	/* the throttle rate for interrupts, basically worst case delay before
6427cdf1f1f1SJacob Keller 	 * an initial interrupt fires, value is stored in microseconds.
6428cdf1f1f1SJacob Keller 	 */
6429cdf1f1f1SJacob Keller 	u16 itr;
6430cdf1f1f1SJacob Keller };
6431cdf1f1f1SJacob Keller 
6432cdf1f1f1SJacob Keller /* Make a different profile for Rx that doesn't allow quite so aggressive
6433d8eb7ad5SJesse Brandeburg  * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6434d8eb7ad5SJesse Brandeburg  * second.
6435cdf1f1f1SJacob Keller  */
6436cdf1f1f1SJacob Keller static const struct ice_dim rx_profile[] = {
6437d8eb7ad5SJesse Brandeburg 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6438d8eb7ad5SJesse Brandeburg 	{8},    /* 125,000 ints/s */
6439d8eb7ad5SJesse Brandeburg 	{16},   /*  62,500 ints/s */
6440d8eb7ad5SJesse Brandeburg 	{62},   /*  16,129 ints/s */
6441d8eb7ad5SJesse Brandeburg 	{126}   /*   7,936 ints/s */
6442cdf1f1f1SJacob Keller };
6443cdf1f1f1SJacob Keller 
6444cdf1f1f1SJacob Keller /* The transmit profile, which has the same sorts of values
6445cdf1f1f1SJacob Keller  * as the previous struct
6446cdf1f1f1SJacob Keller  */
6447cdf1f1f1SJacob Keller static const struct ice_dim tx_profile[] = {
6448d8eb7ad5SJesse Brandeburg 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6449d8eb7ad5SJesse Brandeburg 	{8},    /* 125,000 ints/s */
6450d8eb7ad5SJesse Brandeburg 	{40},   /*  16,125 ints/s */
6451d8eb7ad5SJesse Brandeburg 	{128},  /*   7,812 ints/s */
6452d8eb7ad5SJesse Brandeburg 	{256}   /*   3,906 ints/s */
6453cdf1f1f1SJacob Keller };
6454cdf1f1f1SJacob Keller 
6455cdf1f1f1SJacob Keller static void ice_tx_dim_work(struct work_struct *work)
6456cdf1f1f1SJacob Keller {
6457cdf1f1f1SJacob Keller 	struct ice_ring_container *rc;
6458cdf1f1f1SJacob Keller 	struct dim *dim;
6459d8eb7ad5SJesse Brandeburg 	u16 itr;
6460cdf1f1f1SJacob Keller 
6461cdf1f1f1SJacob Keller 	dim = container_of(work, struct dim, work);
6462d8eb7ad5SJesse Brandeburg 	rc = (struct ice_ring_container *)dim->priv;
6463cdf1f1f1SJacob Keller 
6464d8eb7ad5SJesse Brandeburg 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6465cdf1f1f1SJacob Keller 
6466cdf1f1f1SJacob Keller 	/* look up the values in our local table */
6467cdf1f1f1SJacob Keller 	itr = tx_profile[dim->profile_ix].itr;
6468cdf1f1f1SJacob Keller 
6469d8eb7ad5SJesse Brandeburg 	ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6470cdf1f1f1SJacob Keller 	ice_write_itr(rc, itr);
6471cdf1f1f1SJacob Keller 
6472cdf1f1f1SJacob Keller 	dim->state = DIM_START_MEASURE;
6473cdf1f1f1SJacob Keller }
6474cdf1f1f1SJacob Keller 
6475cdf1f1f1SJacob Keller static void ice_rx_dim_work(struct work_struct *work)
6476cdf1f1f1SJacob Keller {
6477cdf1f1f1SJacob Keller 	struct ice_ring_container *rc;
6478cdf1f1f1SJacob Keller 	struct dim *dim;
6479d8eb7ad5SJesse Brandeburg 	u16 itr;
6480cdf1f1f1SJacob Keller 
6481cdf1f1f1SJacob Keller 	dim = container_of(work, struct dim, work);
6482d8eb7ad5SJesse Brandeburg 	rc = (struct ice_ring_container *)dim->priv;
6483cdf1f1f1SJacob Keller 
6484d8eb7ad5SJesse Brandeburg 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6485cdf1f1f1SJacob Keller 
6486cdf1f1f1SJacob Keller 	/* look up the values in our local table */
6487cdf1f1f1SJacob Keller 	itr = rx_profile[dim->profile_ix].itr;
6488cdf1f1f1SJacob Keller 
6489d8eb7ad5SJesse Brandeburg 	ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6490cdf1f1f1SJacob Keller 	ice_write_itr(rc, itr);
6491cdf1f1f1SJacob Keller 
6492cdf1f1f1SJacob Keller 	dim->state = DIM_START_MEASURE;
6493cdf1f1f1SJacob Keller }
6494cdf1f1f1SJacob Keller 
6495d8eb7ad5SJesse Brandeburg #define ICE_DIM_DEFAULT_PROFILE_IX 1
6496d8eb7ad5SJesse Brandeburg 
6497d8eb7ad5SJesse Brandeburg /**
6498d8eb7ad5SJesse Brandeburg  * ice_init_moderation - set up interrupt moderation
6499d8eb7ad5SJesse Brandeburg  * @q_vector: the vector containing rings to be configured
6500d8eb7ad5SJesse Brandeburg  *
6501d8eb7ad5SJesse Brandeburg  * Set up interrupt moderation registers, with the intent to do the right thing
6502d8eb7ad5SJesse Brandeburg  * when called from reset or from probe, and whether or not dynamic moderation
6503d8eb7ad5SJesse Brandeburg  * is enabled or not. Take special care to write all the registers in both
6504d8eb7ad5SJesse Brandeburg  * dynamic moderation mode or not in order to make sure hardware is in a known
6505d8eb7ad5SJesse Brandeburg  * state.
6506d8eb7ad5SJesse Brandeburg  */
6507d8eb7ad5SJesse Brandeburg static void ice_init_moderation(struct ice_q_vector *q_vector)
6508d8eb7ad5SJesse Brandeburg {
6509d8eb7ad5SJesse Brandeburg 	struct ice_ring_container *rc;
6510d8eb7ad5SJesse Brandeburg 	bool tx_dynamic, rx_dynamic;
6511d8eb7ad5SJesse Brandeburg 
6512d8eb7ad5SJesse Brandeburg 	rc = &q_vector->tx;
6513d8eb7ad5SJesse Brandeburg 	INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6514d8eb7ad5SJesse Brandeburg 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6515d8eb7ad5SJesse Brandeburg 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6516d8eb7ad5SJesse Brandeburg 	rc->dim.priv = rc;
6517d8eb7ad5SJesse Brandeburg 	tx_dynamic = ITR_IS_DYNAMIC(rc);
6518d8eb7ad5SJesse Brandeburg 
6519d8eb7ad5SJesse Brandeburg 	/* set the initial TX ITR to match the above */
6520d8eb7ad5SJesse Brandeburg 	ice_write_itr(rc, tx_dynamic ?
6521d8eb7ad5SJesse Brandeburg 		      tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6522d8eb7ad5SJesse Brandeburg 
6523d8eb7ad5SJesse Brandeburg 	rc = &q_vector->rx;
6524d8eb7ad5SJesse Brandeburg 	INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6525d8eb7ad5SJesse Brandeburg 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6526d8eb7ad5SJesse Brandeburg 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6527d8eb7ad5SJesse Brandeburg 	rc->dim.priv = rc;
6528d8eb7ad5SJesse Brandeburg 	rx_dynamic = ITR_IS_DYNAMIC(rc);
6529d8eb7ad5SJesse Brandeburg 
6530d8eb7ad5SJesse Brandeburg 	/* set the initial RX ITR to match the above */
6531d8eb7ad5SJesse Brandeburg 	ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6532d8eb7ad5SJesse Brandeburg 				       rc->itr_setting);
6533d8eb7ad5SJesse Brandeburg 
6534d8eb7ad5SJesse Brandeburg 	ice_set_q_vector_intrl(q_vector);
6535d8eb7ad5SJesse Brandeburg }
6536d8eb7ad5SJesse Brandeburg 
6537cdedef59SAnirudh Venkataramanan /**
65382b245cb2SAnirudh Venkataramanan  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
65392b245cb2SAnirudh Venkataramanan  * @vsi: the VSI being configured
65402b245cb2SAnirudh Venkataramanan  */
65412b245cb2SAnirudh Venkataramanan static void ice_napi_enable_all(struct ice_vsi *vsi)
65422b245cb2SAnirudh Venkataramanan {
65432b245cb2SAnirudh Venkataramanan 	int q_idx;
65442b245cb2SAnirudh Venkataramanan 
65452b245cb2SAnirudh Venkataramanan 	if (!vsi->netdev)
65462b245cb2SAnirudh Venkataramanan 		return;
65472b245cb2SAnirudh Venkataramanan 
65480c2561c8SBrett Creeley 	ice_for_each_q_vector(vsi, q_idx) {
6549eec90376SYoung Xiao 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6550eec90376SYoung Xiao 
6551d8eb7ad5SJesse Brandeburg 		ice_init_moderation(q_vector);
6552cdf1f1f1SJacob Keller 
6553e72bba21SMaciej Fijalkowski 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6554eec90376SYoung Xiao 			napi_enable(&q_vector->napi);
6555eec90376SYoung Xiao 	}
65562b245cb2SAnirudh Venkataramanan }
65572b245cb2SAnirudh Venkataramanan 
65582b245cb2SAnirudh Venkataramanan /**
6559cdedef59SAnirudh Venkataramanan  * ice_up_complete - Finish the last steps of bringing up a connection
6560cdedef59SAnirudh Venkataramanan  * @vsi: The VSI being configured
6561cdedef59SAnirudh Venkataramanan  *
6562cdedef59SAnirudh Venkataramanan  * Return 0 on success and negative value on error
6563cdedef59SAnirudh Venkataramanan  */
6564cdedef59SAnirudh Venkataramanan static int ice_up_complete(struct ice_vsi *vsi)
6565cdedef59SAnirudh Venkataramanan {
6566cdedef59SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
6567cdedef59SAnirudh Venkataramanan 	int err;
6568cdedef59SAnirudh Venkataramanan 
6569cdedef59SAnirudh Venkataramanan 	ice_vsi_cfg_msix(vsi);
6570cdedef59SAnirudh Venkataramanan 
6571cdedef59SAnirudh Venkataramanan 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
6572cdedef59SAnirudh Venkataramanan 	 * Tx queue group list was configured and the context bits were
6573cdedef59SAnirudh Venkataramanan 	 * programmed using ice_vsi_cfg_txqs
6574cdedef59SAnirudh Venkataramanan 	 */
657513a6233bSBrett Creeley 	err = ice_vsi_start_all_rx_rings(vsi);
6576cdedef59SAnirudh Venkataramanan 	if (err)
6577cdedef59SAnirudh Venkataramanan 		return err;
6578cdedef59SAnirudh Venkataramanan 
6579e97fb1aeSAnirudh Venkataramanan 	clear_bit(ICE_VSI_DOWN, vsi->state);
65802b245cb2SAnirudh Venkataramanan 	ice_napi_enable_all(vsi);
6581cdedef59SAnirudh Venkataramanan 	ice_vsi_ena_irq(vsi);
6582cdedef59SAnirudh Venkataramanan 
6583cdedef59SAnirudh Venkataramanan 	if (vsi->port_info &&
6584cdedef59SAnirudh Venkataramanan 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
65856a8d013eSJesse Brandeburg 	    vsi->netdev && vsi->type == ICE_VSI_PF) {
6586cdedef59SAnirudh Venkataramanan 		ice_print_link_msg(vsi, true);
6587cdedef59SAnirudh Venkataramanan 		netif_tx_start_all_queues(vsi->netdev);
6588cdedef59SAnirudh Venkataramanan 		netif_carrier_on(vsi->netdev);
65893a749623SJacob Keller 		ice_ptp_link_change(pf, pf->hw.pf_id, true);
6590cdedef59SAnirudh Venkataramanan 	}
6591cdedef59SAnirudh Venkataramanan 
659231b6298fSPaul Greenwalt 	/* Perform an initial read of the statistics registers now to
659331b6298fSPaul Greenwalt 	 * set the baseline so counters are ready when interface is up
659431b6298fSPaul Greenwalt 	 */
659531b6298fSPaul Greenwalt 	ice_update_eth_stats(vsi);
65966a8d013eSJesse Brandeburg 
65976a8d013eSJesse Brandeburg 	if (vsi->type == ICE_VSI_PF)
6598cdedef59SAnirudh Venkataramanan 		ice_service_task_schedule(pf);
6599cdedef59SAnirudh Venkataramanan 
66001b5c19c7SBruce Allan 	return 0;
6601cdedef59SAnirudh Venkataramanan }
6602cdedef59SAnirudh Venkataramanan 
6603cdedef59SAnirudh Venkataramanan /**
6604fcea6f3dSAnirudh Venkataramanan  * ice_up - Bring the connection back up after being down
6605fcea6f3dSAnirudh Venkataramanan  * @vsi: VSI being configured
6606fcea6f3dSAnirudh Venkataramanan  */
6607fcea6f3dSAnirudh Venkataramanan int ice_up(struct ice_vsi *vsi)
6608fcea6f3dSAnirudh Venkataramanan {
6609fcea6f3dSAnirudh Venkataramanan 	int err;
6610fcea6f3dSAnirudh Venkataramanan 
66110db66d20SMichal Swiatkowski 	err = ice_vsi_cfg_lan(vsi);
6612fcea6f3dSAnirudh Venkataramanan 	if (!err)
6613fcea6f3dSAnirudh Venkataramanan 		err = ice_up_complete(vsi);
6614fcea6f3dSAnirudh Venkataramanan 
6615fcea6f3dSAnirudh Venkataramanan 	return err;
6616fcea6f3dSAnirudh Venkataramanan }
6617fcea6f3dSAnirudh Venkataramanan 
6618fcea6f3dSAnirudh Venkataramanan /**
6619fcea6f3dSAnirudh Venkataramanan  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6620e72bba21SMaciej Fijalkowski  * @syncp: pointer to u64_stats_sync
6621e72bba21SMaciej Fijalkowski  * @stats: stats that pkts and bytes count will be taken from
6622fcea6f3dSAnirudh Venkataramanan  * @pkts: packets stats counter
6623fcea6f3dSAnirudh Venkataramanan  * @bytes: bytes stats counter
6624fcea6f3dSAnirudh Venkataramanan  *
6625fcea6f3dSAnirudh Venkataramanan  * This function fetches stats from the ring considering the atomic operations
6626fcea6f3dSAnirudh Venkataramanan  * that needs to be performed to read u64 values in 32 bit machine.
6627fcea6f3dSAnirudh Venkataramanan  */
6628c8ff29b5SMarcin Szycik void
6629c8ff29b5SMarcin Szycik ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6630c8ff29b5SMarcin Szycik 			     struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6631fcea6f3dSAnirudh Venkataramanan {
6632fcea6f3dSAnirudh Venkataramanan 	unsigned int start;
6633fcea6f3dSAnirudh Venkataramanan 
6634fcea6f3dSAnirudh Venkataramanan 	do {
6635068c38adSThomas Gleixner 		start = u64_stats_fetch_begin(syncp);
6636e72bba21SMaciej Fijalkowski 		*pkts = stats.pkts;
6637e72bba21SMaciej Fijalkowski 		*bytes = stats.bytes;
6638068c38adSThomas Gleixner 	} while (u64_stats_fetch_retry(syncp, start));
6639fcea6f3dSAnirudh Venkataramanan }
6640fcea6f3dSAnirudh Venkataramanan 
6641fcea6f3dSAnirudh Venkataramanan /**
664249d358e0SMarta Plantykow  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
664349d358e0SMarta Plantykow  * @vsi: the VSI to be updated
66441a0f25a5SJesse Brandeburg  * @vsi_stats: the stats struct to be updated
664549d358e0SMarta Plantykow  * @rings: rings to work on
664649d358e0SMarta Plantykow  * @count: number of rings
664749d358e0SMarta Plantykow  */
664849d358e0SMarta Plantykow static void
66491a0f25a5SJesse Brandeburg ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
66501a0f25a5SJesse Brandeburg 			     struct rtnl_link_stats64 *vsi_stats,
66511a0f25a5SJesse Brandeburg 			     struct ice_tx_ring **rings, u16 count)
665249d358e0SMarta Plantykow {
665349d358e0SMarta Plantykow 	u16 i;
665449d358e0SMarta Plantykow 
665549d358e0SMarta Plantykow 	for (i = 0; i < count; i++) {
6656e72bba21SMaciej Fijalkowski 		struct ice_tx_ring *ring;
6657e72bba21SMaciej Fijalkowski 		u64 pkts = 0, bytes = 0;
665849d358e0SMarta Plantykow 
665949d358e0SMarta Plantykow 		ring = READ_ONCE(rings[i]);
6660288ecf49SBenjamin Mikailenko 		if (!ring || !ring->ring_stats)
6661f1535469SMaciej Fijalkowski 			continue;
6662288ecf49SBenjamin Mikailenko 		ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6663288ecf49SBenjamin Mikailenko 					     ring->ring_stats->stats, &pkts,
6664288ecf49SBenjamin Mikailenko 					     &bytes);
666549d358e0SMarta Plantykow 		vsi_stats->tx_packets += pkts;
666649d358e0SMarta Plantykow 		vsi_stats->tx_bytes += bytes;
6667288ecf49SBenjamin Mikailenko 		vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6668288ecf49SBenjamin Mikailenko 		vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6669288ecf49SBenjamin Mikailenko 		vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
667049d358e0SMarta Plantykow 	}
667149d358e0SMarta Plantykow }
667249d358e0SMarta Plantykow 
667349d358e0SMarta Plantykow /**
6674fcea6f3dSAnirudh Venkataramanan  * ice_update_vsi_ring_stats - Update VSI stats counters
6675fcea6f3dSAnirudh Venkataramanan  * @vsi: the VSI to be updated
6676fcea6f3dSAnirudh Venkataramanan  */
6677fcea6f3dSAnirudh Venkataramanan static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6678fcea6f3dSAnirudh Venkataramanan {
66792fd5e433SBenjamin Mikailenko 	struct rtnl_link_stats64 *net_stats, *stats_prev;
66801a0f25a5SJesse Brandeburg 	struct rtnl_link_stats64 *vsi_stats;
6681fcea6f3dSAnirudh Venkataramanan 	u64 pkts, bytes;
6682fcea6f3dSAnirudh Venkataramanan 	int i;
6683fcea6f3dSAnirudh Venkataramanan 
66841a0f25a5SJesse Brandeburg 	vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
66851a0f25a5SJesse Brandeburg 	if (!vsi_stats)
66861a0f25a5SJesse Brandeburg 		return;
6687fcea6f3dSAnirudh Venkataramanan 
6688fcea6f3dSAnirudh Venkataramanan 	/* reset non-netdev (extended) stats */
6689fcea6f3dSAnirudh Venkataramanan 	vsi->tx_restart = 0;
6690fcea6f3dSAnirudh Venkataramanan 	vsi->tx_busy = 0;
6691fcea6f3dSAnirudh Venkataramanan 	vsi->tx_linearize = 0;
6692fcea6f3dSAnirudh Venkataramanan 	vsi->rx_buf_failed = 0;
6693fcea6f3dSAnirudh Venkataramanan 	vsi->rx_page_failed = 0;
6694fcea6f3dSAnirudh Venkataramanan 
6695fcea6f3dSAnirudh Venkataramanan 	rcu_read_lock();
6696fcea6f3dSAnirudh Venkataramanan 
6697fcea6f3dSAnirudh Venkataramanan 	/* update Tx rings counters */
66981a0f25a5SJesse Brandeburg 	ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
66991a0f25a5SJesse Brandeburg 				     vsi->num_txq);
6700fcea6f3dSAnirudh Venkataramanan 
6701fcea6f3dSAnirudh Venkataramanan 	/* update Rx rings counters */
6702fcea6f3dSAnirudh Venkataramanan 	ice_for_each_rxq(vsi, i) {
6703e72bba21SMaciej Fijalkowski 		struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6704288ecf49SBenjamin Mikailenko 		struct ice_ring_stats *ring_stats;
6705b6b0501dSPaul M Stillwell Jr 
6706288ecf49SBenjamin Mikailenko 		ring_stats = ring->ring_stats;
6707288ecf49SBenjamin Mikailenko 		ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6708288ecf49SBenjamin Mikailenko 					     ring_stats->stats, &pkts,
6709288ecf49SBenjamin Mikailenko 					     &bytes);
6710fcea6f3dSAnirudh Venkataramanan 		vsi_stats->rx_packets += pkts;
6711fcea6f3dSAnirudh Venkataramanan 		vsi_stats->rx_bytes += bytes;
6712288ecf49SBenjamin Mikailenko 		vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6713288ecf49SBenjamin Mikailenko 		vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6714fcea6f3dSAnirudh Venkataramanan 	}
6715fcea6f3dSAnirudh Venkataramanan 
671649d358e0SMarta Plantykow 	/* update XDP Tx rings counters */
671749d358e0SMarta Plantykow 	if (ice_is_xdp_ena_vsi(vsi))
67181a0f25a5SJesse Brandeburg 		ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
671949d358e0SMarta Plantykow 					     vsi->num_xdp_txq);
672049d358e0SMarta Plantykow 
6721fcea6f3dSAnirudh Venkataramanan 	rcu_read_unlock();
67221a0f25a5SJesse Brandeburg 
67232fd5e433SBenjamin Mikailenko 	net_stats = &vsi->net_stats;
67242fd5e433SBenjamin Mikailenko 	stats_prev = &vsi->net_stats_prev;
67252fd5e433SBenjamin Mikailenko 
67262fd5e433SBenjamin Mikailenko 	/* clear prev counters after reset */
67272fd5e433SBenjamin Mikailenko 	if (vsi_stats->tx_packets < stats_prev->tx_packets ||
67282fd5e433SBenjamin Mikailenko 	    vsi_stats->rx_packets < stats_prev->rx_packets) {
67292fd5e433SBenjamin Mikailenko 		stats_prev->tx_packets = 0;
67302fd5e433SBenjamin Mikailenko 		stats_prev->tx_bytes = 0;
67312fd5e433SBenjamin Mikailenko 		stats_prev->rx_packets = 0;
67322fd5e433SBenjamin Mikailenko 		stats_prev->rx_bytes = 0;
67332fd5e433SBenjamin Mikailenko 	}
67342fd5e433SBenjamin Mikailenko 
67352fd5e433SBenjamin Mikailenko 	/* update netdev counters */
67362fd5e433SBenjamin Mikailenko 	net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
67372fd5e433SBenjamin Mikailenko 	net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
67382fd5e433SBenjamin Mikailenko 	net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
67392fd5e433SBenjamin Mikailenko 	net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
67402fd5e433SBenjamin Mikailenko 
67412fd5e433SBenjamin Mikailenko 	stats_prev->tx_packets = vsi_stats->tx_packets;
67422fd5e433SBenjamin Mikailenko 	stats_prev->tx_bytes = vsi_stats->tx_bytes;
67432fd5e433SBenjamin Mikailenko 	stats_prev->rx_packets = vsi_stats->rx_packets;
67442fd5e433SBenjamin Mikailenko 	stats_prev->rx_bytes = vsi_stats->rx_bytes;
67451a0f25a5SJesse Brandeburg 
67461a0f25a5SJesse Brandeburg 	kfree(vsi_stats);
6747fcea6f3dSAnirudh Venkataramanan }
6748fcea6f3dSAnirudh Venkataramanan 
6749fcea6f3dSAnirudh Venkataramanan /**
6750fcea6f3dSAnirudh Venkataramanan  * ice_update_vsi_stats - Update VSI stats counters
6751fcea6f3dSAnirudh Venkataramanan  * @vsi: the VSI to be updated
6752fcea6f3dSAnirudh Venkataramanan  */
67535a4a8673SBruce Allan void ice_update_vsi_stats(struct ice_vsi *vsi)
6754fcea6f3dSAnirudh Venkataramanan {
6755fcea6f3dSAnirudh Venkataramanan 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6756fcea6f3dSAnirudh Venkataramanan 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
6757fcea6f3dSAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
6758fcea6f3dSAnirudh Venkataramanan 
6759e97fb1aeSAnirudh Venkataramanan 	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
67607e408e07SAnirudh Venkataramanan 	    test_bit(ICE_CFG_BUSY, pf->state))
6761fcea6f3dSAnirudh Venkataramanan 		return;
6762fcea6f3dSAnirudh Venkataramanan 
6763fcea6f3dSAnirudh Venkataramanan 	/* get stats as recorded by Tx/Rx rings */
6764fcea6f3dSAnirudh Venkataramanan 	ice_update_vsi_ring_stats(vsi);
6765fcea6f3dSAnirudh Venkataramanan 
6766fcea6f3dSAnirudh Venkataramanan 	/* get VSI stats as recorded by the hardware */
6767fcea6f3dSAnirudh Venkataramanan 	ice_update_eth_stats(vsi);
6768fcea6f3dSAnirudh Venkataramanan 
6769fcea6f3dSAnirudh Venkataramanan 	cur_ns->tx_errors = cur_es->tx_errors;
677051fe27e1SAnirudh Venkataramanan 	cur_ns->rx_dropped = cur_es->rx_discards;
6771fcea6f3dSAnirudh Venkataramanan 	cur_ns->tx_dropped = cur_es->tx_discards;
6772fcea6f3dSAnirudh Venkataramanan 	cur_ns->multicast = cur_es->rx_multicast;
6773fcea6f3dSAnirudh Venkataramanan 
6774fcea6f3dSAnirudh Venkataramanan 	/* update some more netdev stats if this is main VSI */
6775fcea6f3dSAnirudh Venkataramanan 	if (vsi->type == ICE_VSI_PF) {
6776fcea6f3dSAnirudh Venkataramanan 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
6777fcea6f3dSAnirudh Venkataramanan 		cur_ns->rx_errors = pf->stats.crc_errors +
67784f1fe43cSBrett Creeley 				    pf->stats.illegal_bytes +
67794f1fe43cSBrett Creeley 				    pf->stats.rx_len_errors +
67804f1fe43cSBrett Creeley 				    pf->stats.rx_undersize +
67814f1fe43cSBrett Creeley 				    pf->hw_csum_rx_error +
67824f1fe43cSBrett Creeley 				    pf->stats.rx_jabber +
67834f1fe43cSBrett Creeley 				    pf->stats.rx_fragments +
67844f1fe43cSBrett Creeley 				    pf->stats.rx_oversize;
6785fcea6f3dSAnirudh Venkataramanan 		cur_ns->rx_length_errors = pf->stats.rx_len_errors;
678656923ab6SBrett Creeley 		/* record drops from the port level */
678756923ab6SBrett Creeley 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6788fcea6f3dSAnirudh Venkataramanan 	}
6789fcea6f3dSAnirudh Venkataramanan }
6790fcea6f3dSAnirudh Venkataramanan 
6791fcea6f3dSAnirudh Venkataramanan /**
6792fcea6f3dSAnirudh Venkataramanan  * ice_update_pf_stats - Update PF port stats counters
6793fcea6f3dSAnirudh Venkataramanan  * @pf: PF whose stats needs to be updated
6794fcea6f3dSAnirudh Venkataramanan  */
67955a4a8673SBruce Allan void ice_update_pf_stats(struct ice_pf *pf)
6796fcea6f3dSAnirudh Venkataramanan {
6797fcea6f3dSAnirudh Venkataramanan 	struct ice_hw_port_stats *prev_ps, *cur_ps;
6798fcea6f3dSAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
67994ab95646SHenry Tieman 	u16 fd_ctr_base;
68009e7a5d17SUsha Ketineni 	u8 port;
6801fcea6f3dSAnirudh Venkataramanan 
68029e7a5d17SUsha Ketineni 	port = hw->port_info->lport;
6803fcea6f3dSAnirudh Venkataramanan 	prev_ps = &pf->stats_prev;
6804fcea6f3dSAnirudh Venkataramanan 	cur_ps = &pf->stats;
6805fcea6f3dSAnirudh Venkataramanan 
68062fd5e433SBenjamin Mikailenko 	if (ice_is_reset_in_progress(pf->state))
68072fd5e433SBenjamin Mikailenko 		pf->stat_prev_loaded = false;
68082fd5e433SBenjamin Mikailenko 
68099e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
681036517fd3SJacob Keller 			  &prev_ps->eth.rx_bytes,
6811fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->eth.rx_bytes);
6812fcea6f3dSAnirudh Venkataramanan 
68139e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
681436517fd3SJacob Keller 			  &prev_ps->eth.rx_unicast,
6815fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->eth.rx_unicast);
6816fcea6f3dSAnirudh Venkataramanan 
68179e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
681836517fd3SJacob Keller 			  &prev_ps->eth.rx_multicast,
6819fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->eth.rx_multicast);
6820fcea6f3dSAnirudh Venkataramanan 
68219e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
682236517fd3SJacob Keller 			  &prev_ps->eth.rx_broadcast,
6823fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->eth.rx_broadcast);
6824fcea6f3dSAnirudh Venkataramanan 
682556923ab6SBrett Creeley 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
682656923ab6SBrett Creeley 			  &prev_ps->eth.rx_discards,
682756923ab6SBrett Creeley 			  &cur_ps->eth.rx_discards);
682856923ab6SBrett Creeley 
68299e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
683036517fd3SJacob Keller 			  &prev_ps->eth.tx_bytes,
6831fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->eth.tx_bytes);
6832fcea6f3dSAnirudh Venkataramanan 
68339e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
683436517fd3SJacob Keller 			  &prev_ps->eth.tx_unicast,
6835fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->eth.tx_unicast);
6836fcea6f3dSAnirudh Venkataramanan 
68379e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
683836517fd3SJacob Keller 			  &prev_ps->eth.tx_multicast,
6839fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->eth.tx_multicast);
6840fcea6f3dSAnirudh Venkataramanan 
68419e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
684236517fd3SJacob Keller 			  &prev_ps->eth.tx_broadcast,
6843fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->eth.tx_broadcast);
6844fcea6f3dSAnirudh Venkataramanan 
68459e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6846fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->tx_dropped_link_down,
6847fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->tx_dropped_link_down);
6848fcea6f3dSAnirudh Venkataramanan 
68499e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
685036517fd3SJacob Keller 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
6851fcea6f3dSAnirudh Venkataramanan 
68529e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
685336517fd3SJacob Keller 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
6854fcea6f3dSAnirudh Venkataramanan 
68559e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
685636517fd3SJacob Keller 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
6857fcea6f3dSAnirudh Venkataramanan 
68589e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
685936517fd3SJacob Keller 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
6860fcea6f3dSAnirudh Venkataramanan 
68619e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
6862fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
6863fcea6f3dSAnirudh Venkataramanan 
68649e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
6865fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
6866fcea6f3dSAnirudh Venkataramanan 
68679e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
6868fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
6869fcea6f3dSAnirudh Venkataramanan 
68709e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
687136517fd3SJacob Keller 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
6872fcea6f3dSAnirudh Venkataramanan 
68739e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
687436517fd3SJacob Keller 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
6875fcea6f3dSAnirudh Venkataramanan 
68769e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
687736517fd3SJacob Keller 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
6878fcea6f3dSAnirudh Venkataramanan 
68799e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
688036517fd3SJacob Keller 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
6881fcea6f3dSAnirudh Venkataramanan 
68829e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
6883fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
6884fcea6f3dSAnirudh Venkataramanan 
68859e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
6886fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
6887fcea6f3dSAnirudh Venkataramanan 
68889e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
6889fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
6890fcea6f3dSAnirudh Venkataramanan 
68914ab95646SHenry Tieman 	fd_ctr_base = hw->fd_ctr_base;
68924ab95646SHenry Tieman 
68934ab95646SHenry Tieman 	ice_stat_update40(hw,
68944ab95646SHenry Tieman 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
68954ab95646SHenry Tieman 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
68964ab95646SHenry Tieman 			  &cur_ps->fd_sb_match);
68979e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
6898fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
6899fcea6f3dSAnirudh Venkataramanan 
69009e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
6901fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
6902fcea6f3dSAnirudh Venkataramanan 
69039e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
6904fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
6905fcea6f3dSAnirudh Venkataramanan 
69069e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
6907fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
6908fcea6f3dSAnirudh Venkataramanan 
69094b0fdcebSAnirudh Venkataramanan 	ice_update_dcb_stats(pf);
69104b0fdcebSAnirudh Venkataramanan 
69119e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
6912fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
6913fcea6f3dSAnirudh Venkataramanan 
69149e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
6915fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
6916fcea6f3dSAnirudh Venkataramanan 
69179e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
6918fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->mac_local_faults,
6919fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->mac_local_faults);
6920fcea6f3dSAnirudh Venkataramanan 
69219e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
6922fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->mac_remote_faults,
6923fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->mac_remote_faults);
6924fcea6f3dSAnirudh Venkataramanan 
69259e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
6926fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
6927fcea6f3dSAnirudh Venkataramanan 
69289e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
6929fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
6930fcea6f3dSAnirudh Venkataramanan 
69319e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
6932fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
6933fcea6f3dSAnirudh Venkataramanan 
69349e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
6935fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
6936fcea6f3dSAnirudh Venkataramanan 
69379e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
6938fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
6939fcea6f3dSAnirudh Venkataramanan 
69404ab95646SHenry Tieman 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
69414ab95646SHenry Tieman 
6942fcea6f3dSAnirudh Venkataramanan 	pf->stat_prev_loaded = true;
6943fcea6f3dSAnirudh Venkataramanan }
6944fcea6f3dSAnirudh Venkataramanan 
6945fcea6f3dSAnirudh Venkataramanan /**
6946fcea6f3dSAnirudh Venkataramanan  * ice_get_stats64 - get statistics for network device structure
6947fcea6f3dSAnirudh Venkataramanan  * @netdev: network interface device structure
6948fcea6f3dSAnirudh Venkataramanan  * @stats: main device statistics structure
6949fcea6f3dSAnirudh Venkataramanan  */
6950fcea6f3dSAnirudh Venkataramanan static
6951fcea6f3dSAnirudh Venkataramanan void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
6952fcea6f3dSAnirudh Venkataramanan {
6953fcea6f3dSAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
6954fcea6f3dSAnirudh Venkataramanan 	struct rtnl_link_stats64 *vsi_stats;
6955fcea6f3dSAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
6956fcea6f3dSAnirudh Venkataramanan 
6957fcea6f3dSAnirudh Venkataramanan 	vsi_stats = &vsi->net_stats;
6958fcea6f3dSAnirudh Venkataramanan 
69593d57fd10SDave Ertman 	if (!vsi->num_txq || !vsi->num_rxq)
6960fcea6f3dSAnirudh Venkataramanan 		return;
69613d57fd10SDave Ertman 
6962fcea6f3dSAnirudh Venkataramanan 	/* netdev packet/byte stats come from ring counter. These are obtained
6963fcea6f3dSAnirudh Venkataramanan 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
69643d57fd10SDave Ertman 	 * But, only call the update routine and read the registers if VSI is
69653d57fd10SDave Ertman 	 * not down.
6966fcea6f3dSAnirudh Venkataramanan 	 */
6967e97fb1aeSAnirudh Venkataramanan 	if (!test_bit(ICE_VSI_DOWN, vsi->state))
6968fcea6f3dSAnirudh Venkataramanan 		ice_update_vsi_ring_stats(vsi);
6969fcea6f3dSAnirudh Venkataramanan 	stats->tx_packets = vsi_stats->tx_packets;
6970fcea6f3dSAnirudh Venkataramanan 	stats->tx_bytes = vsi_stats->tx_bytes;
6971fcea6f3dSAnirudh Venkataramanan 	stats->rx_packets = vsi_stats->rx_packets;
6972fcea6f3dSAnirudh Venkataramanan 	stats->rx_bytes = vsi_stats->rx_bytes;
6973fcea6f3dSAnirudh Venkataramanan 
6974fcea6f3dSAnirudh Venkataramanan 	/* The rest of the stats can be read from the hardware but instead we
6975fcea6f3dSAnirudh Venkataramanan 	 * just return values that the watchdog task has already obtained from
6976fcea6f3dSAnirudh Venkataramanan 	 * the hardware.
6977fcea6f3dSAnirudh Venkataramanan 	 */
6978fcea6f3dSAnirudh Venkataramanan 	stats->multicast = vsi_stats->multicast;
6979fcea6f3dSAnirudh Venkataramanan 	stats->tx_errors = vsi_stats->tx_errors;
6980fcea6f3dSAnirudh Venkataramanan 	stats->tx_dropped = vsi_stats->tx_dropped;
6981fcea6f3dSAnirudh Venkataramanan 	stats->rx_errors = vsi_stats->rx_errors;
6982fcea6f3dSAnirudh Venkataramanan 	stats->rx_dropped = vsi_stats->rx_dropped;
6983fcea6f3dSAnirudh Venkataramanan 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6984fcea6f3dSAnirudh Venkataramanan 	stats->rx_length_errors = vsi_stats->rx_length_errors;
6985fcea6f3dSAnirudh Venkataramanan }
6986fcea6f3dSAnirudh Venkataramanan 
6987fcea6f3dSAnirudh Venkataramanan /**
69882b245cb2SAnirudh Venkataramanan  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
69892b245cb2SAnirudh Venkataramanan  * @vsi: VSI having NAPI disabled
69902b245cb2SAnirudh Venkataramanan  */
69912b245cb2SAnirudh Venkataramanan static void ice_napi_disable_all(struct ice_vsi *vsi)
69922b245cb2SAnirudh Venkataramanan {
69932b245cb2SAnirudh Venkataramanan 	int q_idx;
69942b245cb2SAnirudh Venkataramanan 
69952b245cb2SAnirudh Venkataramanan 	if (!vsi->netdev)
69962b245cb2SAnirudh Venkataramanan 		return;
69972b245cb2SAnirudh Venkataramanan 
69980c2561c8SBrett Creeley 	ice_for_each_q_vector(vsi, q_idx) {
6999eec90376SYoung Xiao 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
7000eec90376SYoung Xiao 
7001e72bba21SMaciej Fijalkowski 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
7002eec90376SYoung Xiao 			napi_disable(&q_vector->napi);
7003cdf1f1f1SJacob Keller 
7004cdf1f1f1SJacob Keller 		cancel_work_sync(&q_vector->tx.dim.work);
7005cdf1f1f1SJacob Keller 		cancel_work_sync(&q_vector->rx.dim.work);
7006eec90376SYoung Xiao 	}
70072b245cb2SAnirudh Venkataramanan }
70082b245cb2SAnirudh Venkataramanan 
70092b245cb2SAnirudh Venkataramanan /**
7010cdedef59SAnirudh Venkataramanan  * ice_down - Shutdown the connection
7011cdedef59SAnirudh Venkataramanan  * @vsi: The VSI being stopped
701221c6e36bSJesse Brandeburg  *
701321c6e36bSJesse Brandeburg  * Caller of this function is expected to set the vsi->state ICE_DOWN bit
7014cdedef59SAnirudh Venkataramanan  */
7015fcea6f3dSAnirudh Venkataramanan int ice_down(struct ice_vsi *vsi)
7016cdedef59SAnirudh Venkataramanan {
70178ac71327SMateusz Palczewski 	int i, tx_err, rx_err, vlan_err = 0;
7018cdedef59SAnirudh Venkataramanan 
701921c6e36bSJesse Brandeburg 	WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
702021c6e36bSJesse Brandeburg 
7021b3be918dSGrzegorz Nitka 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
7022c31af68aSBrett Creeley 		vlan_err = ice_vsi_del_vlan_zero(vsi);
70233a749623SJacob Keller 		ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
7024cdedef59SAnirudh Venkataramanan 		netif_carrier_off(vsi->netdev);
7025cdedef59SAnirudh Venkataramanan 		netif_tx_disable(vsi->netdev);
7026b3be918dSGrzegorz Nitka 	} else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
7027b3be918dSGrzegorz Nitka 		ice_eswitch_stop_all_tx_queues(vsi->back);
7028cdedef59SAnirudh Venkataramanan 	}
7029cdedef59SAnirudh Venkataramanan 
7030cdedef59SAnirudh Venkataramanan 	ice_vsi_dis_irq(vsi);
703103f7a986SAnirudh Venkataramanan 
703203f7a986SAnirudh Venkataramanan 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
703372adf242SAnirudh Venkataramanan 	if (tx_err)
703419cce2c6SAnirudh Venkataramanan 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
703572adf242SAnirudh Venkataramanan 			   vsi->vsi_num, tx_err);
7036efc2214bSMaciej Fijalkowski 	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
7037efc2214bSMaciej Fijalkowski 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
7038efc2214bSMaciej Fijalkowski 		if (tx_err)
703919cce2c6SAnirudh Venkataramanan 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
7040efc2214bSMaciej Fijalkowski 				   vsi->vsi_num, tx_err);
7041efc2214bSMaciej Fijalkowski 	}
704272adf242SAnirudh Venkataramanan 
704313a6233bSBrett Creeley 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
704472adf242SAnirudh Venkataramanan 	if (rx_err)
704519cce2c6SAnirudh Venkataramanan 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
704672adf242SAnirudh Venkataramanan 			   vsi->vsi_num, rx_err);
704772adf242SAnirudh Venkataramanan 
70482b245cb2SAnirudh Venkataramanan 	ice_napi_disable_all(vsi);
7049cdedef59SAnirudh Venkataramanan 
7050cdedef59SAnirudh Venkataramanan 	ice_for_each_txq(vsi, i)
7051cdedef59SAnirudh Venkataramanan 		ice_clean_tx_ring(vsi->tx_rings[i]);
7052cdedef59SAnirudh Venkataramanan 
705378c50d69SKamil Maziarz 	if (ice_is_xdp_ena_vsi(vsi))
705478c50d69SKamil Maziarz 		ice_for_each_xdp_txq(vsi, i)
705578c50d69SKamil Maziarz 			ice_clean_tx_ring(vsi->xdp_rings[i]);
705678c50d69SKamil Maziarz 
7057cdedef59SAnirudh Venkataramanan 	ice_for_each_rxq(vsi, i)
7058cdedef59SAnirudh Venkataramanan 		ice_clean_rx_ring(vsi->rx_rings[i]);
7059cdedef59SAnirudh Venkataramanan 
70608ac71327SMateusz Palczewski 	if (tx_err || rx_err || vlan_err) {
706119cce2c6SAnirudh Venkataramanan 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
7062cdedef59SAnirudh Venkataramanan 			   vsi->vsi_num, vsi->vsw->sw_id);
706372adf242SAnirudh Venkataramanan 		return -EIO;
706472adf242SAnirudh Venkataramanan 	}
706572adf242SAnirudh Venkataramanan 
706672adf242SAnirudh Venkataramanan 	return 0;
7067cdedef59SAnirudh Venkataramanan }
7068cdedef59SAnirudh Venkataramanan 
7069cdedef59SAnirudh Venkataramanan /**
7070dddd406dSJesse Brandeburg  * ice_down_up - shutdown the VSI connection and bring it up
7071dddd406dSJesse Brandeburg  * @vsi: the VSI to be reconnected
7072dddd406dSJesse Brandeburg  */
7073dddd406dSJesse Brandeburg int ice_down_up(struct ice_vsi *vsi)
7074dddd406dSJesse Brandeburg {
7075dddd406dSJesse Brandeburg 	int ret;
7076dddd406dSJesse Brandeburg 
7077dddd406dSJesse Brandeburg 	/* if DOWN already set, nothing to do */
7078dddd406dSJesse Brandeburg 	if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
7079dddd406dSJesse Brandeburg 		return 0;
7080dddd406dSJesse Brandeburg 
7081dddd406dSJesse Brandeburg 	ret = ice_down(vsi);
7082dddd406dSJesse Brandeburg 	if (ret)
7083dddd406dSJesse Brandeburg 		return ret;
7084dddd406dSJesse Brandeburg 
7085dddd406dSJesse Brandeburg 	ret = ice_up(vsi);
7086dddd406dSJesse Brandeburg 	if (ret) {
7087dddd406dSJesse Brandeburg 		netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
7088dddd406dSJesse Brandeburg 		return ret;
7089dddd406dSJesse Brandeburg 	}
7090dddd406dSJesse Brandeburg 
7091dddd406dSJesse Brandeburg 	return 0;
7092dddd406dSJesse Brandeburg }
7093dddd406dSJesse Brandeburg 
7094dddd406dSJesse Brandeburg /**
7095cdedef59SAnirudh Venkataramanan  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
7096cdedef59SAnirudh Venkataramanan  * @vsi: VSI having resources allocated
7097cdedef59SAnirudh Venkataramanan  *
7098cdedef59SAnirudh Venkataramanan  * Return 0 on success, negative on failure
7099cdedef59SAnirudh Venkataramanan  */
71000e674aebSAnirudh Venkataramanan int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
7101cdedef59SAnirudh Venkataramanan {
7102dab0588fSJesse Brandeburg 	int i, err = 0;
7103cdedef59SAnirudh Venkataramanan 
7104cdedef59SAnirudh Venkataramanan 	if (!vsi->num_txq) {
71059a946843SAnirudh Venkataramanan 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
7106cdedef59SAnirudh Venkataramanan 			vsi->vsi_num);
7107cdedef59SAnirudh Venkataramanan 		return -EINVAL;
7108cdedef59SAnirudh Venkataramanan 	}
7109cdedef59SAnirudh Venkataramanan 
7110cdedef59SAnirudh Venkataramanan 	ice_for_each_txq(vsi, i) {
7111e72bba21SMaciej Fijalkowski 		struct ice_tx_ring *ring = vsi->tx_rings[i];
7112eb0ee8abSMichal Swiatkowski 
7113eb0ee8abSMichal Swiatkowski 		if (!ring)
7114eb0ee8abSMichal Swiatkowski 			return -EINVAL;
7115eb0ee8abSMichal Swiatkowski 
71161c54c839SGrzegorz Nitka 		if (vsi->netdev)
7117eb0ee8abSMichal Swiatkowski 			ring->netdev = vsi->netdev;
7118eb0ee8abSMichal Swiatkowski 		err = ice_setup_tx_ring(ring);
7119cdedef59SAnirudh Venkataramanan 		if (err)
7120cdedef59SAnirudh Venkataramanan 			break;
7121cdedef59SAnirudh Venkataramanan 	}
7122cdedef59SAnirudh Venkataramanan 
7123cdedef59SAnirudh Venkataramanan 	return err;
7124cdedef59SAnirudh Venkataramanan }
7125cdedef59SAnirudh Venkataramanan 
7126cdedef59SAnirudh Venkataramanan /**
7127cdedef59SAnirudh Venkataramanan  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
7128cdedef59SAnirudh Venkataramanan  * @vsi: VSI having resources allocated
7129cdedef59SAnirudh Venkataramanan  *
7130cdedef59SAnirudh Venkataramanan  * Return 0 on success, negative on failure
7131cdedef59SAnirudh Venkataramanan  */
71320e674aebSAnirudh Venkataramanan int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
7133cdedef59SAnirudh Venkataramanan {
7134dab0588fSJesse Brandeburg 	int i, err = 0;
7135cdedef59SAnirudh Venkataramanan 
7136cdedef59SAnirudh Venkataramanan 	if (!vsi->num_rxq) {
71379a946843SAnirudh Venkataramanan 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
7138cdedef59SAnirudh Venkataramanan 			vsi->vsi_num);
7139cdedef59SAnirudh Venkataramanan 		return -EINVAL;
7140cdedef59SAnirudh Venkataramanan 	}
7141cdedef59SAnirudh Venkataramanan 
7142cdedef59SAnirudh Venkataramanan 	ice_for_each_rxq(vsi, i) {
7143e72bba21SMaciej Fijalkowski 		struct ice_rx_ring *ring = vsi->rx_rings[i];
7144eb0ee8abSMichal Swiatkowski 
7145eb0ee8abSMichal Swiatkowski 		if (!ring)
7146eb0ee8abSMichal Swiatkowski 			return -EINVAL;
7147eb0ee8abSMichal Swiatkowski 
71481c54c839SGrzegorz Nitka 		if (vsi->netdev)
7149eb0ee8abSMichal Swiatkowski 			ring->netdev = vsi->netdev;
7150eb0ee8abSMichal Swiatkowski 		err = ice_setup_rx_ring(ring);
7151cdedef59SAnirudh Venkataramanan 		if (err)
7152cdedef59SAnirudh Venkataramanan 			break;
7153cdedef59SAnirudh Venkataramanan 	}
7154cdedef59SAnirudh Venkataramanan 
7155cdedef59SAnirudh Venkataramanan 	return err;
7156cdedef59SAnirudh Venkataramanan }
7157cdedef59SAnirudh Venkataramanan 
7158cdedef59SAnirudh Venkataramanan /**
7159148beb61SHenry Tieman  * ice_vsi_open_ctrl - open control VSI for use
7160148beb61SHenry Tieman  * @vsi: the VSI to open
7161148beb61SHenry Tieman  *
7162148beb61SHenry Tieman  * Initialization of the Control VSI
7163148beb61SHenry Tieman  *
7164148beb61SHenry Tieman  * Returns 0 on success, negative value on error
7165148beb61SHenry Tieman  */
7166148beb61SHenry Tieman int ice_vsi_open_ctrl(struct ice_vsi *vsi)
7167148beb61SHenry Tieman {
7168148beb61SHenry Tieman 	char int_name[ICE_INT_NAME_STR_LEN];
7169148beb61SHenry Tieman 	struct ice_pf *pf = vsi->back;
7170148beb61SHenry Tieman 	struct device *dev;
7171148beb61SHenry Tieman 	int err;
7172148beb61SHenry Tieman 
7173148beb61SHenry Tieman 	dev = ice_pf_to_dev(pf);
7174148beb61SHenry Tieman 	/* allocate descriptors */
7175148beb61SHenry Tieman 	err = ice_vsi_setup_tx_rings(vsi);
7176148beb61SHenry Tieman 	if (err)
7177148beb61SHenry Tieman 		goto err_setup_tx;
7178148beb61SHenry Tieman 
7179148beb61SHenry Tieman 	err = ice_vsi_setup_rx_rings(vsi);
7180148beb61SHenry Tieman 	if (err)
7181148beb61SHenry Tieman 		goto err_setup_rx;
7182148beb61SHenry Tieman 
71830db66d20SMichal Swiatkowski 	err = ice_vsi_cfg_lan(vsi);
7184148beb61SHenry Tieman 	if (err)
7185148beb61SHenry Tieman 		goto err_setup_rx;
7186148beb61SHenry Tieman 
7187148beb61SHenry Tieman 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
7188148beb61SHenry Tieman 		 dev_driver_string(dev), dev_name(dev));
7189148beb61SHenry Tieman 	err = ice_vsi_req_irq_msix(vsi, int_name);
7190148beb61SHenry Tieman 	if (err)
7191148beb61SHenry Tieman 		goto err_setup_rx;
7192148beb61SHenry Tieman 
7193148beb61SHenry Tieman 	ice_vsi_cfg_msix(vsi);
7194148beb61SHenry Tieman 
7195148beb61SHenry Tieman 	err = ice_vsi_start_all_rx_rings(vsi);
7196148beb61SHenry Tieman 	if (err)
7197148beb61SHenry Tieman 		goto err_up_complete;
7198148beb61SHenry Tieman 
7199e97fb1aeSAnirudh Venkataramanan 	clear_bit(ICE_VSI_DOWN, vsi->state);
7200148beb61SHenry Tieman 	ice_vsi_ena_irq(vsi);
7201148beb61SHenry Tieman 
7202148beb61SHenry Tieman 	return 0;
7203148beb61SHenry Tieman 
7204148beb61SHenry Tieman err_up_complete:
7205148beb61SHenry Tieman 	ice_down(vsi);
7206148beb61SHenry Tieman err_setup_rx:
7207148beb61SHenry Tieman 	ice_vsi_free_rx_rings(vsi);
7208148beb61SHenry Tieman err_setup_tx:
7209148beb61SHenry Tieman 	ice_vsi_free_tx_rings(vsi);
7210148beb61SHenry Tieman 
7211148beb61SHenry Tieman 	return err;
7212148beb61SHenry Tieman }
7213148beb61SHenry Tieman 
7214148beb61SHenry Tieman /**
7215cdedef59SAnirudh Venkataramanan  * ice_vsi_open - Called when a network interface is made active
7216cdedef59SAnirudh Venkataramanan  * @vsi: the VSI to open
7217cdedef59SAnirudh Venkataramanan  *
7218cdedef59SAnirudh Venkataramanan  * Initialization of the VSI
7219cdedef59SAnirudh Venkataramanan  *
7220cdedef59SAnirudh Venkataramanan  * Returns 0 on success, negative value on error
7221cdedef59SAnirudh Venkataramanan  */
72221a1c40dfSGrzegorz Nitka int ice_vsi_open(struct ice_vsi *vsi)
7223cdedef59SAnirudh Venkataramanan {
7224cdedef59SAnirudh Venkataramanan 	char int_name[ICE_INT_NAME_STR_LEN];
7225cdedef59SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
7226cdedef59SAnirudh Venkataramanan 	int err;
7227cdedef59SAnirudh Venkataramanan 
7228cdedef59SAnirudh Venkataramanan 	/* allocate descriptors */
7229cdedef59SAnirudh Venkataramanan 	err = ice_vsi_setup_tx_rings(vsi);
7230cdedef59SAnirudh Venkataramanan 	if (err)
7231cdedef59SAnirudh Venkataramanan 		goto err_setup_tx;
7232cdedef59SAnirudh Venkataramanan 
7233cdedef59SAnirudh Venkataramanan 	err = ice_vsi_setup_rx_rings(vsi);
7234cdedef59SAnirudh Venkataramanan 	if (err)
7235cdedef59SAnirudh Venkataramanan 		goto err_setup_rx;
7236cdedef59SAnirudh Venkataramanan 
72370db66d20SMichal Swiatkowski 	err = ice_vsi_cfg_lan(vsi);
7238cdedef59SAnirudh Venkataramanan 	if (err)
7239cdedef59SAnirudh Venkataramanan 		goto err_setup_rx;
7240cdedef59SAnirudh Venkataramanan 
7241cdedef59SAnirudh Venkataramanan 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
72424015d11eSBrett Creeley 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7243ba880734SBrett Creeley 	err = ice_vsi_req_irq_msix(vsi, int_name);
7244cdedef59SAnirudh Venkataramanan 	if (err)
7245cdedef59SAnirudh Venkataramanan 		goto err_setup_rx;
7246cdedef59SAnirudh Venkataramanan 
7247122045caSMichal Swiatkowski 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7248122045caSMichal Swiatkowski 
72491a1c40dfSGrzegorz Nitka 	if (vsi->type == ICE_VSI_PF) {
7250cdedef59SAnirudh Venkataramanan 		/* Notify the stack of the actual queue counts. */
7251cdedef59SAnirudh Venkataramanan 		err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7252cdedef59SAnirudh Venkataramanan 		if (err)
7253cdedef59SAnirudh Venkataramanan 			goto err_set_qs;
7254cdedef59SAnirudh Venkataramanan 
7255cdedef59SAnirudh Venkataramanan 		err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7256cdedef59SAnirudh Venkataramanan 		if (err)
7257cdedef59SAnirudh Venkataramanan 			goto err_set_qs;
72581a1c40dfSGrzegorz Nitka 	}
7259cdedef59SAnirudh Venkataramanan 
7260cdedef59SAnirudh Venkataramanan 	err = ice_up_complete(vsi);
7261cdedef59SAnirudh Venkataramanan 	if (err)
7262cdedef59SAnirudh Venkataramanan 		goto err_up_complete;
7263cdedef59SAnirudh Venkataramanan 
7264cdedef59SAnirudh Venkataramanan 	return 0;
7265cdedef59SAnirudh Venkataramanan 
7266cdedef59SAnirudh Venkataramanan err_up_complete:
7267cdedef59SAnirudh Venkataramanan 	ice_down(vsi);
7268cdedef59SAnirudh Venkataramanan err_set_qs:
7269cdedef59SAnirudh Venkataramanan 	ice_vsi_free_irq(vsi);
7270cdedef59SAnirudh Venkataramanan err_setup_rx:
7271cdedef59SAnirudh Venkataramanan 	ice_vsi_free_rx_rings(vsi);
7272cdedef59SAnirudh Venkataramanan err_setup_tx:
7273cdedef59SAnirudh Venkataramanan 	ice_vsi_free_tx_rings(vsi);
7274cdedef59SAnirudh Venkataramanan 
7275cdedef59SAnirudh Venkataramanan 	return err;
7276cdedef59SAnirudh Venkataramanan }
7277cdedef59SAnirudh Venkataramanan 
7278cdedef59SAnirudh Venkataramanan /**
72790f9d5027SAnirudh Venkataramanan  * ice_vsi_release_all - Delete all VSIs
72800f9d5027SAnirudh Venkataramanan  * @pf: PF from which all VSIs are being removed
72810f9d5027SAnirudh Venkataramanan  */
72820f9d5027SAnirudh Venkataramanan static void ice_vsi_release_all(struct ice_pf *pf)
72830f9d5027SAnirudh Venkataramanan {
72840f9d5027SAnirudh Venkataramanan 	int err, i;
72850f9d5027SAnirudh Venkataramanan 
72860f9d5027SAnirudh Venkataramanan 	if (!pf->vsi)
72870f9d5027SAnirudh Venkataramanan 		return;
72880f9d5027SAnirudh Venkataramanan 
728980ed404aSBrett Creeley 	ice_for_each_vsi(pf, i) {
72900f9d5027SAnirudh Venkataramanan 		if (!pf->vsi[i])
72910f9d5027SAnirudh Venkataramanan 			continue;
72920f9d5027SAnirudh Venkataramanan 
7293fbc7b27aSKiran Patil 		if (pf->vsi[i]->type == ICE_VSI_CHNL)
7294fbc7b27aSKiran Patil 			continue;
7295fbc7b27aSKiran Patil 
72960f9d5027SAnirudh Venkataramanan 		err = ice_vsi_release(pf->vsi[i]);
72970f9d5027SAnirudh Venkataramanan 		if (err)
729819cce2c6SAnirudh Venkataramanan 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
72990f9d5027SAnirudh Venkataramanan 				i, err, pf->vsi[i]->vsi_num);
73000f9d5027SAnirudh Venkataramanan 	}
73010f9d5027SAnirudh Venkataramanan }
73020f9d5027SAnirudh Venkataramanan 
73030f9d5027SAnirudh Venkataramanan /**
7304462acf6aSTony Nguyen  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7305462acf6aSTony Nguyen  * @pf: pointer to the PF instance
7306462acf6aSTony Nguyen  * @type: VSI type to rebuild
7307462acf6aSTony Nguyen  *
7308462acf6aSTony Nguyen  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
73090f9d5027SAnirudh Venkataramanan  */
7310462acf6aSTony Nguyen static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
73110f9d5027SAnirudh Venkataramanan {
73124015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
7313462acf6aSTony Nguyen 	int i, err;
73140f9d5027SAnirudh Venkataramanan 
731580ed404aSBrett Creeley 	ice_for_each_vsi(pf, i) {
73164425e053SKrzysztof Kazimierczak 		struct ice_vsi *vsi = pf->vsi[i];
73170f9d5027SAnirudh Venkataramanan 
7318462acf6aSTony Nguyen 		if (!vsi || vsi->type != type)
73190f9d5027SAnirudh Venkataramanan 			continue;
73200f9d5027SAnirudh Venkataramanan 
7321462acf6aSTony Nguyen 		/* rebuild the VSI */
73226624e780SMichal Swiatkowski 		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
73230f9d5027SAnirudh Venkataramanan 		if (err) {
732419cce2c6SAnirudh Venkataramanan 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7325964674f1SAnirudh Venkataramanan 				err, vsi->idx, ice_vsi_type_str(type));
73260f9d5027SAnirudh Venkataramanan 			return err;
73270f9d5027SAnirudh Venkataramanan 		}
73280f9d5027SAnirudh Venkataramanan 
7329462acf6aSTony Nguyen 		/* replay filters for the VSI */
73302ccc1c1cSTony Nguyen 		err = ice_replay_vsi(&pf->hw, vsi->idx);
73312ccc1c1cSTony Nguyen 		if (err) {
73325f87ec48STony Nguyen 			dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
73332ccc1c1cSTony Nguyen 				err, vsi->idx, ice_vsi_type_str(type));
7334c1484691STony Nguyen 			return err;
7335334cb062SAnirudh Venkataramanan 		}
7336334cb062SAnirudh Venkataramanan 
7337334cb062SAnirudh Venkataramanan 		/* Re-map HW VSI number, using VSI handle that has been
7338334cb062SAnirudh Venkataramanan 		 * previously validated in ice_replay_vsi() call above
7339334cb062SAnirudh Venkataramanan 		 */
7340462acf6aSTony Nguyen 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7341334cb062SAnirudh Venkataramanan 
7342462acf6aSTony Nguyen 		/* enable the VSI */
7343462acf6aSTony Nguyen 		err = ice_ena_vsi(vsi, false);
7344462acf6aSTony Nguyen 		if (err) {
734519cce2c6SAnirudh Venkataramanan 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7346964674f1SAnirudh Venkataramanan 				err, vsi->idx, ice_vsi_type_str(type));
7347462acf6aSTony Nguyen 			return err;
7348334cb062SAnirudh Venkataramanan 		}
7349334cb062SAnirudh Venkataramanan 
73504015d11eSBrett Creeley 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
73514015d11eSBrett Creeley 			 ice_vsi_type_str(type));
7352462acf6aSTony Nguyen 	}
7353462acf6aSTony Nguyen 
7354334cb062SAnirudh Venkataramanan 	return 0;
7355334cb062SAnirudh Venkataramanan }
7356334cb062SAnirudh Venkataramanan 
7357334cb062SAnirudh Venkataramanan /**
7358462acf6aSTony Nguyen  * ice_update_pf_netdev_link - Update PF netdev link status
7359462acf6aSTony Nguyen  * @pf: pointer to the PF instance
7360462acf6aSTony Nguyen  */
7361462acf6aSTony Nguyen static void ice_update_pf_netdev_link(struct ice_pf *pf)
7362462acf6aSTony Nguyen {
7363462acf6aSTony Nguyen 	bool link_up;
7364462acf6aSTony Nguyen 	int i;
7365462acf6aSTony Nguyen 
7366462acf6aSTony Nguyen 	ice_for_each_vsi(pf, i) {
7367462acf6aSTony Nguyen 		struct ice_vsi *vsi = pf->vsi[i];
7368462acf6aSTony Nguyen 
7369462acf6aSTony Nguyen 		if (!vsi || vsi->type != ICE_VSI_PF)
7370462acf6aSTony Nguyen 			return;
7371462acf6aSTony Nguyen 
7372462acf6aSTony Nguyen 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7373462acf6aSTony Nguyen 		if (link_up) {
7374462acf6aSTony Nguyen 			netif_carrier_on(pf->vsi[i]->netdev);
7375462acf6aSTony Nguyen 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7376462acf6aSTony Nguyen 		} else {
7377462acf6aSTony Nguyen 			netif_carrier_off(pf->vsi[i]->netdev);
7378462acf6aSTony Nguyen 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7379462acf6aSTony Nguyen 		}
7380462acf6aSTony Nguyen 	}
7381462acf6aSTony Nguyen }
7382462acf6aSTony Nguyen 
7383462acf6aSTony Nguyen /**
73840b28b702SAnirudh Venkataramanan  * ice_rebuild - rebuild after reset
73852f2da36eSAnirudh Venkataramanan  * @pf: PF to rebuild
7386462acf6aSTony Nguyen  * @reset_type: type of reset
738712bb018cSBrett Creeley  *
738812bb018cSBrett Creeley  * Do not rebuild VF VSI in this flow because that is already handled via
738912bb018cSBrett Creeley  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
739012bb018cSBrett Creeley  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
739112bb018cSBrett Creeley  * to reset/rebuild all the VF VSI twice.
73920b28b702SAnirudh Venkataramanan  */
7393462acf6aSTony Nguyen static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
73940b28b702SAnirudh Venkataramanan {
73954015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
73960b28b702SAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
7397a1ffafb0SBrett Creeley 	bool dvm;
7398462acf6aSTony Nguyen 	int err;
73990b28b702SAnirudh Venkataramanan 
74007e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_DOWN, pf->state))
74010b28b702SAnirudh Venkataramanan 		goto clear_recovery;
74020b28b702SAnirudh Venkataramanan 
7403462acf6aSTony Nguyen 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
74040b28b702SAnirudh Venkataramanan 
7405b537752eSPetr Oros #define ICE_EMP_RESET_SLEEP_MS 5000
7406399e27dbSJacob Keller 	if (reset_type == ICE_RESET_EMPR) {
7407399e27dbSJacob Keller 		/* If an EMP reset has occurred, any previously pending flash
7408399e27dbSJacob Keller 		 * update will have completed. We no longer know whether or
7409399e27dbSJacob Keller 		 * not the NVM update EMP reset is restricted.
7410399e27dbSJacob Keller 		 */
7411399e27dbSJacob Keller 		pf->fw_emp_reset_disabled = false;
7412b537752eSPetr Oros 
7413b537752eSPetr Oros 		msleep(ICE_EMP_RESET_SLEEP_MS);
7414399e27dbSJacob Keller 	}
7415399e27dbSJacob Keller 
74162ccc1c1cSTony Nguyen 	err = ice_init_all_ctrlq(hw);
74172ccc1c1cSTony Nguyen 	if (err) {
74182ccc1c1cSTony Nguyen 		dev_err(dev, "control queues init failed %d\n", err);
74190f9d5027SAnirudh Venkataramanan 		goto err_init_ctrlq;
74200b28b702SAnirudh Venkataramanan 	}
74210b28b702SAnirudh Venkataramanan 
7422462acf6aSTony Nguyen 	/* if DDP was previously loaded successfully */
7423462acf6aSTony Nguyen 	if (!ice_is_safe_mode(pf)) {
7424462acf6aSTony Nguyen 		/* reload the SW DB of filter tables */
7425462acf6aSTony Nguyen 		if (reset_type == ICE_RESET_PFR)
7426462acf6aSTony Nguyen 			ice_fill_blk_tbls(hw);
7427462acf6aSTony Nguyen 		else
7428462acf6aSTony Nguyen 			/* Reload DDP Package after CORER/GLOBR reset */
7429462acf6aSTony Nguyen 			ice_load_pkg(NULL, pf);
7430462acf6aSTony Nguyen 	}
7431462acf6aSTony Nguyen 
74322ccc1c1cSTony Nguyen 	err = ice_clear_pf_cfg(hw);
74332ccc1c1cSTony Nguyen 	if (err) {
74342ccc1c1cSTony Nguyen 		dev_err(dev, "clear PF configuration failed %d\n", err);
74350f9d5027SAnirudh Venkataramanan 		goto err_init_ctrlq;
74360b28b702SAnirudh Venkataramanan 	}
74370b28b702SAnirudh Venkataramanan 
74380b28b702SAnirudh Venkataramanan 	ice_clear_pxe_mode(hw);
74390b28b702SAnirudh Venkataramanan 
74402ccc1c1cSTony Nguyen 	err = ice_init_nvm(hw);
74412ccc1c1cSTony Nguyen 	if (err) {
74422ccc1c1cSTony Nguyen 		dev_err(dev, "ice_init_nvm failed %d\n", err);
744397a4ec01SJacob Keller 		goto err_init_ctrlq;
744497a4ec01SJacob Keller 	}
744597a4ec01SJacob Keller 
74462ccc1c1cSTony Nguyen 	err = ice_get_caps(hw);
74472ccc1c1cSTony Nguyen 	if (err) {
74482ccc1c1cSTony Nguyen 		dev_err(dev, "ice_get_caps failed %d\n", err);
74490f9d5027SAnirudh Venkataramanan 		goto err_init_ctrlq;
74500b28b702SAnirudh Venkataramanan 	}
74510b28b702SAnirudh Venkataramanan 
74522ccc1c1cSTony Nguyen 	err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
74532ccc1c1cSTony Nguyen 	if (err) {
74542ccc1c1cSTony Nguyen 		dev_err(dev, "set_mac_cfg failed %d\n", err);
745542449105SAnirudh Venkataramanan 		goto err_init_ctrlq;
745642449105SAnirudh Venkataramanan 	}
745742449105SAnirudh Venkataramanan 
7458a1ffafb0SBrett Creeley 	dvm = ice_is_dvm_ena(hw);
7459a1ffafb0SBrett Creeley 
7460a1ffafb0SBrett Creeley 	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7461a1ffafb0SBrett Creeley 	if (err)
7462a1ffafb0SBrett Creeley 		goto err_init_ctrlq;
7463a1ffafb0SBrett Creeley 
74640f9d5027SAnirudh Venkataramanan 	err = ice_sched_init_port(hw->port_info);
74650f9d5027SAnirudh Venkataramanan 	if (err)
74660f9d5027SAnirudh Venkataramanan 		goto err_sched_init_port;
74670f9d5027SAnirudh Venkataramanan 
74680b28b702SAnirudh Venkataramanan 	/* start misc vector */
74690b28b702SAnirudh Venkataramanan 	err = ice_req_irq_msix_misc(pf);
74700b28b702SAnirudh Venkataramanan 	if (err) {
74710b28b702SAnirudh Venkataramanan 		dev_err(dev, "misc vector setup failed: %d\n", err);
7472462acf6aSTony Nguyen 		goto err_sched_init_port;
74730b28b702SAnirudh Venkataramanan 	}
74740b28b702SAnirudh Venkataramanan 
747583af0039SHenry Tieman 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
747683af0039SHenry Tieman 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
747783af0039SHenry Tieman 		if (!rd32(hw, PFQF_FD_SIZE)) {
747883af0039SHenry Tieman 			u16 unused, guar, b_effort;
747983af0039SHenry Tieman 
748083af0039SHenry Tieman 			guar = hw->func_caps.fd_fltr_guar;
748183af0039SHenry Tieman 			b_effort = hw->func_caps.fd_fltr_best_effort;
748283af0039SHenry Tieman 
748383af0039SHenry Tieman 			/* force guaranteed filter pool for PF */
748483af0039SHenry Tieman 			ice_alloc_fd_guar_item(hw, &unused, guar);
748583af0039SHenry Tieman 			/* force shared filter pool for PF */
748683af0039SHenry Tieman 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
748783af0039SHenry Tieman 		}
748883af0039SHenry Tieman 	}
748983af0039SHenry Tieman 
7490462acf6aSTony Nguyen 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7491462acf6aSTony Nguyen 		ice_dcb_rebuild(pf);
7492462acf6aSTony Nguyen 
749306c16d89SJacob Keller 	/* If the PF previously had enabled PTP, PTP init needs to happen before
749406c16d89SJacob Keller 	 * the VSI rebuild. If not, this causes the PTP link status events to
749506c16d89SJacob Keller 	 * fail.
749606c16d89SJacob Keller 	 */
749706c16d89SJacob Keller 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
749848096710SKarol Kolacinski 		ice_ptp_reset(pf);
749906c16d89SJacob Keller 
750043113ff7SKarol Kolacinski 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
750143113ff7SKarol Kolacinski 		ice_gnss_init(pf);
750243113ff7SKarol Kolacinski 
7503462acf6aSTony Nguyen 	/* rebuild PF VSI */
7504462acf6aSTony Nguyen 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
75050f9d5027SAnirudh Venkataramanan 	if (err) {
7506462acf6aSTony Nguyen 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
75070f9d5027SAnirudh Venkataramanan 		goto err_vsi_rebuild;
75080f9d5027SAnirudh Venkataramanan 	}
75090b28b702SAnirudh Venkataramanan 
751048096710SKarol Kolacinski 	/* configure PTP timestamping after VSI rebuild */
751148096710SKarol Kolacinski 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
751248096710SKarol Kolacinski 		ice_ptp_cfg_timestamp(pf, false);
751348096710SKarol Kolacinski 
7514b3be918dSGrzegorz Nitka 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
7515b3be918dSGrzegorz Nitka 	if (err) {
7516b3be918dSGrzegorz Nitka 		dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
7517b3be918dSGrzegorz Nitka 		goto err_vsi_rebuild;
7518b3be918dSGrzegorz Nitka 	}
7519b3be918dSGrzegorz Nitka 
7520fbc7b27aSKiran Patil 	if (reset_type == ICE_RESET_PFR) {
7521fbc7b27aSKiran Patil 		err = ice_rebuild_channels(pf);
7522fbc7b27aSKiran Patil 		if (err) {
7523fbc7b27aSKiran Patil 			dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7524fbc7b27aSKiran Patil 				err);
7525fbc7b27aSKiran Patil 			goto err_vsi_rebuild;
7526fbc7b27aSKiran Patil 		}
7527fbc7b27aSKiran Patil 	}
7528fbc7b27aSKiran Patil 
752983af0039SHenry Tieman 	/* If Flow Director is active */
753083af0039SHenry Tieman 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
753183af0039SHenry Tieman 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
753283af0039SHenry Tieman 		if (err) {
753383af0039SHenry Tieman 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
753483af0039SHenry Tieman 			goto err_vsi_rebuild;
753583af0039SHenry Tieman 		}
753683af0039SHenry Tieman 
753783af0039SHenry Tieman 		/* replay HW Flow Director recipes */
753883af0039SHenry Tieman 		if (hw->fdir_prof)
753983af0039SHenry Tieman 			ice_fdir_replay_flows(hw);
754083af0039SHenry Tieman 
754183af0039SHenry Tieman 		/* replay Flow Director filters */
754283af0039SHenry Tieman 		ice_fdir_replay_fltrs(pf);
754328bf2672SBrett Creeley 
754428bf2672SBrett Creeley 		ice_rebuild_arfs(pf);
754583af0039SHenry Tieman 	}
754683af0039SHenry Tieman 
7547462acf6aSTony Nguyen 	ice_update_pf_netdev_link(pf);
7548462acf6aSTony Nguyen 
7549462acf6aSTony Nguyen 	/* tell the firmware we are up */
75502ccc1c1cSTony Nguyen 	err = ice_send_version(pf);
75512ccc1c1cSTony Nguyen 	if (err) {
75525f87ec48STony Nguyen 		dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
75532ccc1c1cSTony Nguyen 			err);
7554462acf6aSTony Nguyen 		goto err_vsi_rebuild;
7555ce317dd9SAnirudh Venkataramanan 	}
7556462acf6aSTony Nguyen 
7557462acf6aSTony Nguyen 	ice_replay_post(hw);
7558ce317dd9SAnirudh Venkataramanan 
75590f9d5027SAnirudh Venkataramanan 	/* if we get here, reset flow is successful */
75607e408e07SAnirudh Venkataramanan 	clear_bit(ICE_RESET_FAILED, pf->state);
7561f9f5301eSDave Ertman 
7562f9f5301eSDave Ertman 	ice_plug_aux_dev(pf);
75630b28b702SAnirudh Venkataramanan 	return;
75640b28b702SAnirudh Venkataramanan 
75650f9d5027SAnirudh Venkataramanan err_vsi_rebuild:
75660f9d5027SAnirudh Venkataramanan err_sched_init_port:
75670f9d5027SAnirudh Venkataramanan 	ice_sched_cleanup_all(hw);
75680f9d5027SAnirudh Venkataramanan err_init_ctrlq:
75690b28b702SAnirudh Venkataramanan 	ice_shutdown_all_ctrlq(hw);
75707e408e07SAnirudh Venkataramanan 	set_bit(ICE_RESET_FAILED, pf->state);
75710b28b702SAnirudh Venkataramanan clear_recovery:
75720f9d5027SAnirudh Venkataramanan 	/* set this bit in PF state to control service task scheduling */
75737e408e07SAnirudh Venkataramanan 	set_bit(ICE_NEEDS_RESTART, pf->state);
75740f9d5027SAnirudh Venkataramanan 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
75750b28b702SAnirudh Venkataramanan }
75760b28b702SAnirudh Venkataramanan 
75770b28b702SAnirudh Venkataramanan /**
7578e94d4478SAnirudh Venkataramanan  * ice_change_mtu - NDO callback to change the MTU
7579e94d4478SAnirudh Venkataramanan  * @netdev: network interface device structure
7580e94d4478SAnirudh Venkataramanan  * @new_mtu: new value for maximum frame size
7581e94d4478SAnirudh Venkataramanan  *
7582e94d4478SAnirudh Venkataramanan  * Returns 0 on success, negative on failure
7583e94d4478SAnirudh Venkataramanan  */
7584e94d4478SAnirudh Venkataramanan static int ice_change_mtu(struct net_device *netdev, int new_mtu)
7585e94d4478SAnirudh Venkataramanan {
7586e94d4478SAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
7587e94d4478SAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
7588e94d4478SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
75892fba7dc5SMaciej Fijalkowski 	struct bpf_prog *prog;
7590e94d4478SAnirudh Venkataramanan 	u8 count = 0;
7591348048e7SDave Ertman 	int err = 0;
7592e94d4478SAnirudh Venkataramanan 
759322bef5e7SJesse Brandeburg 	if (new_mtu == (int)netdev->mtu) {
75942f2da36eSAnirudh Venkataramanan 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7595e94d4478SAnirudh Venkataramanan 		return 0;
7596e94d4478SAnirudh Venkataramanan 	}
7597e94d4478SAnirudh Venkataramanan 
75982fba7dc5SMaciej Fijalkowski 	prog = vsi->xdp_prog;
75992fba7dc5SMaciej Fijalkowski 	if (prog && !prog->aux->xdp_has_frags) {
760023b44513SMaciej Fijalkowski 		int frame_size = ice_max_xdp_frame_size(vsi);
7601efc2214bSMaciej Fijalkowski 
7602efc2214bSMaciej Fijalkowski 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7603efc2214bSMaciej Fijalkowski 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
760423b44513SMaciej Fijalkowski 				   frame_size - ICE_ETH_PKT_HDR_PAD);
7605efc2214bSMaciej Fijalkowski 			return -EINVAL;
7606efc2214bSMaciej Fijalkowski 		}
7607c61bcebdSMaciej Fijalkowski 	} else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
7608c61bcebdSMaciej Fijalkowski 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
7609c61bcebdSMaciej Fijalkowski 			netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
7610c61bcebdSMaciej Fijalkowski 				   ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
7611c61bcebdSMaciej Fijalkowski 			return -EINVAL;
7612c61bcebdSMaciej Fijalkowski 		}
7613efc2214bSMaciej Fijalkowski 	}
7614efc2214bSMaciej Fijalkowski 
7615e94d4478SAnirudh Venkataramanan 	/* if a reset is in progress, wait for some time for it to complete */
7616e94d4478SAnirudh Venkataramanan 	do {
76175df7e45dSDave Ertman 		if (ice_is_reset_in_progress(pf->state)) {
7618e94d4478SAnirudh Venkataramanan 			count++;
7619e94d4478SAnirudh Venkataramanan 			usleep_range(1000, 2000);
7620e94d4478SAnirudh Venkataramanan 		} else {
7621e94d4478SAnirudh Venkataramanan 			break;
7622e94d4478SAnirudh Venkataramanan 		}
7623e94d4478SAnirudh Venkataramanan 
7624e94d4478SAnirudh Venkataramanan 	} while (count < 100);
7625e94d4478SAnirudh Venkataramanan 
7626e94d4478SAnirudh Venkataramanan 	if (count == 100) {
76272f2da36eSAnirudh Venkataramanan 		netdev_err(netdev, "can't change MTU. Device is busy\n");
7628e94d4478SAnirudh Venkataramanan 		return -EBUSY;
7629e94d4478SAnirudh Venkataramanan 	}
7630e94d4478SAnirudh Venkataramanan 
763122bef5e7SJesse Brandeburg 	netdev->mtu = (unsigned int)new_mtu;
7632e94d4478SAnirudh Venkataramanan 
7633e94d4478SAnirudh Venkataramanan 	/* if VSI is up, bring it down and then back up */
7634e97fb1aeSAnirudh Venkataramanan 	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
7635e94d4478SAnirudh Venkataramanan 		err = ice_down(vsi);
7636e94d4478SAnirudh Venkataramanan 		if (err) {
7637fe6cd890SMitch Williams 			netdev_err(netdev, "change MTU if_down err %d\n", err);
763897b01291SDave Ertman 			return err;
7639e94d4478SAnirudh Venkataramanan 		}
7640e94d4478SAnirudh Venkataramanan 
7641e94d4478SAnirudh Venkataramanan 		err = ice_up(vsi);
7642e94d4478SAnirudh Venkataramanan 		if (err) {
76432f2da36eSAnirudh Venkataramanan 			netdev_err(netdev, "change MTU if_up err %d\n", err);
764497b01291SDave Ertman 			return err;
7645e94d4478SAnirudh Venkataramanan 		}
7646e94d4478SAnirudh Venkataramanan 	}
7647e94d4478SAnirudh Venkataramanan 
7648bda5b7dbSTony Nguyen 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
764997b01291SDave Ertman 	set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7650348048e7SDave Ertman 
7651348048e7SDave Ertman 	return err;
7652e94d4478SAnirudh Venkataramanan }
7653e94d4478SAnirudh Venkataramanan 
7654e94d4478SAnirudh Venkataramanan /**
7655a7605370SArnd Bergmann  * ice_eth_ioctl - Access the hwtstamp interface
765677a78115SJacob Keller  * @netdev: network interface device structure
765777a78115SJacob Keller  * @ifr: interface request data
765877a78115SJacob Keller  * @cmd: ioctl command
765977a78115SJacob Keller  */
7660a7605370SArnd Bergmann static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
766177a78115SJacob Keller {
766277a78115SJacob Keller 	struct ice_netdev_priv *np = netdev_priv(netdev);
766377a78115SJacob Keller 	struct ice_pf *pf = np->vsi->back;
766477a78115SJacob Keller 
766577a78115SJacob Keller 	switch (cmd) {
766677a78115SJacob Keller 	case SIOCGHWTSTAMP:
766777a78115SJacob Keller 		return ice_ptp_get_ts_config(pf, ifr);
766877a78115SJacob Keller 	case SIOCSHWTSTAMP:
766977a78115SJacob Keller 		return ice_ptp_set_ts_config(pf, ifr);
767077a78115SJacob Keller 	default:
767177a78115SJacob Keller 		return -EOPNOTSUPP;
767277a78115SJacob Keller 	}
767377a78115SJacob Keller }
767477a78115SJacob Keller 
767577a78115SJacob Keller /**
76760fee3577SLihong Yang  * ice_aq_str - convert AQ err code to a string
76770fee3577SLihong Yang  * @aq_err: the AQ error code to convert
76780fee3577SLihong Yang  */
76790fee3577SLihong Yang const char *ice_aq_str(enum ice_aq_err aq_err)
76800fee3577SLihong Yang {
76810fee3577SLihong Yang 	switch (aq_err) {
76820fee3577SLihong Yang 	case ICE_AQ_RC_OK:
76830fee3577SLihong Yang 		return "OK";
76840fee3577SLihong Yang 	case ICE_AQ_RC_EPERM:
76850fee3577SLihong Yang 		return "ICE_AQ_RC_EPERM";
76860fee3577SLihong Yang 	case ICE_AQ_RC_ENOENT:
76870fee3577SLihong Yang 		return "ICE_AQ_RC_ENOENT";
76880fee3577SLihong Yang 	case ICE_AQ_RC_ENOMEM:
76890fee3577SLihong Yang 		return "ICE_AQ_RC_ENOMEM";
76900fee3577SLihong Yang 	case ICE_AQ_RC_EBUSY:
76910fee3577SLihong Yang 		return "ICE_AQ_RC_EBUSY";
76920fee3577SLihong Yang 	case ICE_AQ_RC_EEXIST:
76930fee3577SLihong Yang 		return "ICE_AQ_RC_EEXIST";
76940fee3577SLihong Yang 	case ICE_AQ_RC_EINVAL:
76950fee3577SLihong Yang 		return "ICE_AQ_RC_EINVAL";
76960fee3577SLihong Yang 	case ICE_AQ_RC_ENOSPC:
76970fee3577SLihong Yang 		return "ICE_AQ_RC_ENOSPC";
76980fee3577SLihong Yang 	case ICE_AQ_RC_ENOSYS:
76990fee3577SLihong Yang 		return "ICE_AQ_RC_ENOSYS";
7700b5e19a64SChinh T Cao 	case ICE_AQ_RC_EMODE:
7701b5e19a64SChinh T Cao 		return "ICE_AQ_RC_EMODE";
77020fee3577SLihong Yang 	case ICE_AQ_RC_ENOSEC:
77030fee3577SLihong Yang 		return "ICE_AQ_RC_ENOSEC";
77040fee3577SLihong Yang 	case ICE_AQ_RC_EBADSIG:
77050fee3577SLihong Yang 		return "ICE_AQ_RC_EBADSIG";
77060fee3577SLihong Yang 	case ICE_AQ_RC_ESVN:
77070fee3577SLihong Yang 		return "ICE_AQ_RC_ESVN";
77080fee3577SLihong Yang 	case ICE_AQ_RC_EBADMAN:
77090fee3577SLihong Yang 		return "ICE_AQ_RC_EBADMAN";
77100fee3577SLihong Yang 	case ICE_AQ_RC_EBADBUF:
77110fee3577SLihong Yang 		return "ICE_AQ_RC_EBADBUF";
77120fee3577SLihong Yang 	}
77130fee3577SLihong Yang 
77140fee3577SLihong Yang 	return "ICE_AQ_RC_UNKNOWN";
77150fee3577SLihong Yang }
77160fee3577SLihong Yang 
77170fee3577SLihong Yang /**
7718b66a972aSBrett Creeley  * ice_set_rss_lut - Set RSS LUT
7719d76a60baSAnirudh Venkataramanan  * @vsi: Pointer to VSI structure
7720d76a60baSAnirudh Venkataramanan  * @lut: Lookup table
7721d76a60baSAnirudh Venkataramanan  * @lut_size: Lookup table size
7722d76a60baSAnirudh Venkataramanan  *
7723d76a60baSAnirudh Venkataramanan  * Returns 0 on success, negative on failure
7724d76a60baSAnirudh Venkataramanan  */
7725b66a972aSBrett Creeley int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7726d76a60baSAnirudh Venkataramanan {
7727b66a972aSBrett Creeley 	struct ice_aq_get_set_rss_lut_params params = {};
7728b66a972aSBrett Creeley 	struct ice_hw *hw = &vsi->back->hw;
77295e24d598STony Nguyen 	int status;
7730d76a60baSAnirudh Venkataramanan 
7731b66a972aSBrett Creeley 	if (!lut)
7732b66a972aSBrett Creeley 		return -EINVAL;
7733d76a60baSAnirudh Venkataramanan 
7734b66a972aSBrett Creeley 	params.vsi_handle = vsi->idx;
7735b66a972aSBrett Creeley 	params.lut_size = lut_size;
7736b66a972aSBrett Creeley 	params.lut_type = vsi->rss_lut_type;
7737b66a972aSBrett Creeley 	params.lut = lut;
7738d76a60baSAnirudh Venkataramanan 
7739b66a972aSBrett Creeley 	status = ice_aq_set_rss_lut(hw, &params);
7740c1484691STony Nguyen 	if (status)
77415f87ec48STony Nguyen 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
77425518ac2aSTony Nguyen 			status, ice_aq_str(hw->adminq.sq_last_status));
7743d76a60baSAnirudh Venkataramanan 
7744c1484691STony Nguyen 	return status;
7745d76a60baSAnirudh Venkataramanan }
7746d76a60baSAnirudh Venkataramanan 
7747d76a60baSAnirudh Venkataramanan /**
7748b66a972aSBrett Creeley  * ice_set_rss_key - Set RSS key
7749b66a972aSBrett Creeley  * @vsi: Pointer to the VSI structure
7750b66a972aSBrett Creeley  * @seed: RSS hash seed
7751b66a972aSBrett Creeley  *
7752b66a972aSBrett Creeley  * Returns 0 on success, negative on failure
7753b66a972aSBrett Creeley  */
7754b66a972aSBrett Creeley int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7755b66a972aSBrett Creeley {
7756b66a972aSBrett Creeley 	struct ice_hw *hw = &vsi->back->hw;
77575e24d598STony Nguyen 	int status;
7758b66a972aSBrett Creeley 
7759b66a972aSBrett Creeley 	if (!seed)
7760b66a972aSBrett Creeley 		return -EINVAL;
7761b66a972aSBrett Creeley 
7762b66a972aSBrett Creeley 	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7763c1484691STony Nguyen 	if (status)
77645f87ec48STony Nguyen 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
77655518ac2aSTony Nguyen 			status, ice_aq_str(hw->adminq.sq_last_status));
7766b66a972aSBrett Creeley 
7767c1484691STony Nguyen 	return status;
7768b66a972aSBrett Creeley }
7769b66a972aSBrett Creeley 
7770b66a972aSBrett Creeley /**
7771b66a972aSBrett Creeley  * ice_get_rss_lut - Get RSS LUT
7772d76a60baSAnirudh Venkataramanan  * @vsi: Pointer to VSI structure
7773d76a60baSAnirudh Venkataramanan  * @lut: Buffer to store the lookup table entries
7774d76a60baSAnirudh Venkataramanan  * @lut_size: Size of buffer to store the lookup table entries
7775d76a60baSAnirudh Venkataramanan  *
7776d76a60baSAnirudh Venkataramanan  * Returns 0 on success, negative on failure
7777d76a60baSAnirudh Venkataramanan  */
7778b66a972aSBrett Creeley int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7779d76a60baSAnirudh Venkataramanan {
7780b66a972aSBrett Creeley 	struct ice_aq_get_set_rss_lut_params params = {};
7781b66a972aSBrett Creeley 	struct ice_hw *hw = &vsi->back->hw;
77825e24d598STony Nguyen 	int status;
7783d76a60baSAnirudh Venkataramanan 
7784b66a972aSBrett Creeley 	if (!lut)
7785b66a972aSBrett Creeley 		return -EINVAL;
7786d76a60baSAnirudh Venkataramanan 
7787b66a972aSBrett Creeley 	params.vsi_handle = vsi->idx;
7788b66a972aSBrett Creeley 	params.lut_size = lut_size;
7789b66a972aSBrett Creeley 	params.lut_type = vsi->rss_lut_type;
7790b66a972aSBrett Creeley 	params.lut = lut;
7791b66a972aSBrett Creeley 
7792b66a972aSBrett Creeley 	status = ice_aq_get_rss_lut(hw, &params);
7793c1484691STony Nguyen 	if (status)
77945f87ec48STony Nguyen 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
77955518ac2aSTony Nguyen 			status, ice_aq_str(hw->adminq.sq_last_status));
7796b66a972aSBrett Creeley 
7797c1484691STony Nguyen 	return status;
7798d76a60baSAnirudh Venkataramanan }
7799d76a60baSAnirudh Venkataramanan 
7800b66a972aSBrett Creeley /**
7801b66a972aSBrett Creeley  * ice_get_rss_key - Get RSS key
7802b66a972aSBrett Creeley  * @vsi: Pointer to VSI structure
7803b66a972aSBrett Creeley  * @seed: Buffer to store the key in
7804b66a972aSBrett Creeley  *
7805b66a972aSBrett Creeley  * Returns 0 on success, negative on failure
7806b66a972aSBrett Creeley  */
7807b66a972aSBrett Creeley int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7808b66a972aSBrett Creeley {
7809b66a972aSBrett Creeley 	struct ice_hw *hw = &vsi->back->hw;
78105e24d598STony Nguyen 	int status;
7811e3c53928SBrett Creeley 
7812b66a972aSBrett Creeley 	if (!seed)
7813b66a972aSBrett Creeley 		return -EINVAL;
7814b66a972aSBrett Creeley 
7815b66a972aSBrett Creeley 	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7816c1484691STony Nguyen 	if (status)
78175f87ec48STony Nguyen 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
78185518ac2aSTony Nguyen 			status, ice_aq_str(hw->adminq.sq_last_status));
7819d76a60baSAnirudh Venkataramanan 
7820c1484691STony Nguyen 	return status;
7821d76a60baSAnirudh Venkataramanan }
7822d76a60baSAnirudh Venkataramanan 
7823d76a60baSAnirudh Venkataramanan /**
7824b1edc14aSMd Fahad Iqbal Polash  * ice_bridge_getlink - Get the hardware bridge mode
7825b1edc14aSMd Fahad Iqbal Polash  * @skb: skb buff
7826f9867df6SAnirudh Venkataramanan  * @pid: process ID
7827b1edc14aSMd Fahad Iqbal Polash  * @seq: RTNL message seq
7828b1edc14aSMd Fahad Iqbal Polash  * @dev: the netdev being configured
7829b1edc14aSMd Fahad Iqbal Polash  * @filter_mask: filter mask passed in
7830b1edc14aSMd Fahad Iqbal Polash  * @nlflags: netlink flags passed in
7831b1edc14aSMd Fahad Iqbal Polash  *
7832b1edc14aSMd Fahad Iqbal Polash  * Return the bridge mode (VEB/VEPA)
7833b1edc14aSMd Fahad Iqbal Polash  */
7834b1edc14aSMd Fahad Iqbal Polash static int
7835b1edc14aSMd Fahad Iqbal Polash ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7836b1edc14aSMd Fahad Iqbal Polash 		   struct net_device *dev, u32 filter_mask, int nlflags)
7837b1edc14aSMd Fahad Iqbal Polash {
7838b1edc14aSMd Fahad Iqbal Polash 	struct ice_netdev_priv *np = netdev_priv(dev);
7839b1edc14aSMd Fahad Iqbal Polash 	struct ice_vsi *vsi = np->vsi;
7840b1edc14aSMd Fahad Iqbal Polash 	struct ice_pf *pf = vsi->back;
7841b1edc14aSMd Fahad Iqbal Polash 	u16 bmode;
7842b1edc14aSMd Fahad Iqbal Polash 
7843b1edc14aSMd Fahad Iqbal Polash 	bmode = pf->first_sw->bridge_mode;
7844b1edc14aSMd Fahad Iqbal Polash 
7845b1edc14aSMd Fahad Iqbal Polash 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
7846b1edc14aSMd Fahad Iqbal Polash 				       filter_mask, NULL);
7847b1edc14aSMd Fahad Iqbal Polash }
7848b1edc14aSMd Fahad Iqbal Polash 
7849b1edc14aSMd Fahad Iqbal Polash /**
7850b1edc14aSMd Fahad Iqbal Polash  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
7851b1edc14aSMd Fahad Iqbal Polash  * @vsi: Pointer to VSI structure
7852b1edc14aSMd Fahad Iqbal Polash  * @bmode: Hardware bridge mode (VEB/VEPA)
7853b1edc14aSMd Fahad Iqbal Polash  *
7854b1edc14aSMd Fahad Iqbal Polash  * Returns 0 on success, negative on failure
7855b1edc14aSMd Fahad Iqbal Polash  */
7856b1edc14aSMd Fahad Iqbal Polash static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7857b1edc14aSMd Fahad Iqbal Polash {
7858b1edc14aSMd Fahad Iqbal Polash 	struct ice_aqc_vsi_props *vsi_props;
7859b1edc14aSMd Fahad Iqbal Polash 	struct ice_hw *hw = &vsi->back->hw;
7860198a666aSBruce Allan 	struct ice_vsi_ctx *ctxt;
78612ccc1c1cSTony Nguyen 	int ret;
7862b1edc14aSMd Fahad Iqbal Polash 
7863b1edc14aSMd Fahad Iqbal Polash 	vsi_props = &vsi->info;
7864198a666aSBruce Allan 
78659efe35d0STony Nguyen 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
7866198a666aSBruce Allan 	if (!ctxt)
7867198a666aSBruce Allan 		return -ENOMEM;
7868198a666aSBruce Allan 
7869198a666aSBruce Allan 	ctxt->info = vsi->info;
7870b1edc14aSMd Fahad Iqbal Polash 
7871b1edc14aSMd Fahad Iqbal Polash 	if (bmode == BRIDGE_MODE_VEB)
7872b1edc14aSMd Fahad Iqbal Polash 		/* change from VEPA to VEB mode */
7873198a666aSBruce Allan 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7874b1edc14aSMd Fahad Iqbal Polash 	else
7875b1edc14aSMd Fahad Iqbal Polash 		/* change from VEB to VEPA mode */
7876198a666aSBruce Allan 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7877198a666aSBruce Allan 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
78785726ca0eSAnirudh Venkataramanan 
78792ccc1c1cSTony Nguyen 	ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
78802ccc1c1cSTony Nguyen 	if (ret) {
78815f87ec48STony Nguyen 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
78822ccc1c1cSTony Nguyen 			bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
7883198a666aSBruce Allan 		goto out;
7884b1edc14aSMd Fahad Iqbal Polash 	}
7885b1edc14aSMd Fahad Iqbal Polash 	/* Update sw flags for book keeping */
7886198a666aSBruce Allan 	vsi_props->sw_flags = ctxt->info.sw_flags;
7887b1edc14aSMd Fahad Iqbal Polash 
7888198a666aSBruce Allan out:
78899efe35d0STony Nguyen 	kfree(ctxt);
7890198a666aSBruce Allan 	return ret;
7891b1edc14aSMd Fahad Iqbal Polash }
7892b1edc14aSMd Fahad Iqbal Polash 
7893b1edc14aSMd Fahad Iqbal Polash /**
7894b1edc14aSMd Fahad Iqbal Polash  * ice_bridge_setlink - Set the hardware bridge mode
7895b1edc14aSMd Fahad Iqbal Polash  * @dev: the netdev being configured
7896b1edc14aSMd Fahad Iqbal Polash  * @nlh: RTNL message
7897b1edc14aSMd Fahad Iqbal Polash  * @flags: bridge setlink flags
78982fd527b7SPetr Machata  * @extack: netlink extended ack
7899b1edc14aSMd Fahad Iqbal Polash  *
7900b1edc14aSMd Fahad Iqbal Polash  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
7901b1edc14aSMd Fahad Iqbal Polash  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
7902b1edc14aSMd Fahad Iqbal Polash  * not already set for all VSIs connected to this switch. And also update the
7903b1edc14aSMd Fahad Iqbal Polash  * unicast switch filter rules for the corresponding switch of the netdev.
7904b1edc14aSMd Fahad Iqbal Polash  */
7905b1edc14aSMd Fahad Iqbal Polash static int
7906b1edc14aSMd Fahad Iqbal Polash ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
79073d505147SBruce Allan 		   u16 __always_unused flags,
79083d505147SBruce Allan 		   struct netlink_ext_ack __always_unused *extack)
7909b1edc14aSMd Fahad Iqbal Polash {
7910b1edc14aSMd Fahad Iqbal Polash 	struct ice_netdev_priv *np = netdev_priv(dev);
7911b1edc14aSMd Fahad Iqbal Polash 	struct ice_pf *pf = np->vsi->back;
7912b1edc14aSMd Fahad Iqbal Polash 	struct nlattr *attr, *br_spec;
7913b1edc14aSMd Fahad Iqbal Polash 	struct ice_hw *hw = &pf->hw;
7914b1edc14aSMd Fahad Iqbal Polash 	struct ice_sw *pf_sw;
7915b1edc14aSMd Fahad Iqbal Polash 	int rem, v, err = 0;
7916b1edc14aSMd Fahad Iqbal Polash 
7917b1edc14aSMd Fahad Iqbal Polash 	pf_sw = pf->first_sw;
7918b1edc14aSMd Fahad Iqbal Polash 	/* find the attribute in the netlink message */
7919b1edc14aSMd Fahad Iqbal Polash 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7920b1edc14aSMd Fahad Iqbal Polash 
7921b1edc14aSMd Fahad Iqbal Polash 	nla_for_each_nested(attr, br_spec, rem) {
7922b1edc14aSMd Fahad Iqbal Polash 		__u16 mode;
7923b1edc14aSMd Fahad Iqbal Polash 
7924b1edc14aSMd Fahad Iqbal Polash 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
7925b1edc14aSMd Fahad Iqbal Polash 			continue;
7926b1edc14aSMd Fahad Iqbal Polash 		mode = nla_get_u16(attr);
7927b1edc14aSMd Fahad Iqbal Polash 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
7928b1edc14aSMd Fahad Iqbal Polash 			return -EINVAL;
7929b1edc14aSMd Fahad Iqbal Polash 		/* Continue  if bridge mode is not being flipped */
7930b1edc14aSMd Fahad Iqbal Polash 		if (mode == pf_sw->bridge_mode)
7931b1edc14aSMd Fahad Iqbal Polash 			continue;
7932b1edc14aSMd Fahad Iqbal Polash 		/* Iterates through the PF VSI list and update the loopback
7933b1edc14aSMd Fahad Iqbal Polash 		 * mode of the VSI
7934b1edc14aSMd Fahad Iqbal Polash 		 */
7935b1edc14aSMd Fahad Iqbal Polash 		ice_for_each_vsi(pf, v) {
7936b1edc14aSMd Fahad Iqbal Polash 			if (!pf->vsi[v])
7937b1edc14aSMd Fahad Iqbal Polash 				continue;
7938b1edc14aSMd Fahad Iqbal Polash 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7939b1edc14aSMd Fahad Iqbal Polash 			if (err)
7940b1edc14aSMd Fahad Iqbal Polash 				return err;
7941b1edc14aSMd Fahad Iqbal Polash 		}
7942b1edc14aSMd Fahad Iqbal Polash 
7943b1edc14aSMd Fahad Iqbal Polash 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7944b1edc14aSMd Fahad Iqbal Polash 		/* Update the unicast switch filter rules for the corresponding
7945b1edc14aSMd Fahad Iqbal Polash 		 * switch of the netdev
7946b1edc14aSMd Fahad Iqbal Polash 		 */
79472ccc1c1cSTony Nguyen 		err = ice_update_sw_rule_bridge_mode(hw);
79482ccc1c1cSTony Nguyen 		if (err) {
79495f87ec48STony Nguyen 			netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
79502ccc1c1cSTony Nguyen 				   mode, err,
79510fee3577SLihong Yang 				   ice_aq_str(hw->adminq.sq_last_status));
7952b1edc14aSMd Fahad Iqbal Polash 			/* revert hw->evb_veb */
7953b1edc14aSMd Fahad Iqbal Polash 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
7954c1484691STony Nguyen 			return err;
7955b1edc14aSMd Fahad Iqbal Polash 		}
7956b1edc14aSMd Fahad Iqbal Polash 
7957b1edc14aSMd Fahad Iqbal Polash 		pf_sw->bridge_mode = mode;
7958b1edc14aSMd Fahad Iqbal Polash 	}
7959b1edc14aSMd Fahad Iqbal Polash 
7960b1edc14aSMd Fahad Iqbal Polash 	return 0;
7961b1edc14aSMd Fahad Iqbal Polash }
7962b1edc14aSMd Fahad Iqbal Polash 
7963b1edc14aSMd Fahad Iqbal Polash /**
7964b3969fd7SSudheer Mogilappagari  * ice_tx_timeout - Respond to a Tx Hang
7965b3969fd7SSudheer Mogilappagari  * @netdev: network interface device structure
7966644f40eaSBruce Allan  * @txqueue: Tx queue
7967b3969fd7SSudheer Mogilappagari  */
79680290bd29SMichael S. Tsirkin static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
7969b3969fd7SSudheer Mogilappagari {
7970b3969fd7SSudheer Mogilappagari 	struct ice_netdev_priv *np = netdev_priv(netdev);
7971e72bba21SMaciej Fijalkowski 	struct ice_tx_ring *tx_ring = NULL;
7972b3969fd7SSudheer Mogilappagari 	struct ice_vsi *vsi = np->vsi;
7973b3969fd7SSudheer Mogilappagari 	struct ice_pf *pf = vsi->back;
7974807bc98dSBrett Creeley 	u32 i;
7975b3969fd7SSudheer Mogilappagari 
7976b3969fd7SSudheer Mogilappagari 	pf->tx_timeout_count++;
7977b3969fd7SSudheer Mogilappagari 
7978610ed0e9SAvinash JD 	/* Check if PFC is enabled for the TC to which the queue belongs
7979610ed0e9SAvinash JD 	 * to. If yes then Tx timeout is not caused by a hung queue, no
7980610ed0e9SAvinash JD 	 * need to reset and rebuild
7981610ed0e9SAvinash JD 	 */
7982610ed0e9SAvinash JD 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7983610ed0e9SAvinash JD 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7984610ed0e9SAvinash JD 			 txqueue);
7985610ed0e9SAvinash JD 		return;
7986610ed0e9SAvinash JD 	}
7987610ed0e9SAvinash JD 
7988b3969fd7SSudheer Mogilappagari 	/* now that we have an index, find the tx_ring struct */
79892faf63b6SMaciej Fijalkowski 	ice_for_each_txq(vsi, i)
7990bc0c6fabSBruce Allan 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7991ed5a3f66SJulio Faracco 			if (txqueue == vsi->tx_rings[i]->q_index) {
7992b3969fd7SSudheer Mogilappagari 				tx_ring = vsi->tx_rings[i];
7993b3969fd7SSudheer Mogilappagari 				break;
7994b3969fd7SSudheer Mogilappagari 			}
7995b3969fd7SSudheer Mogilappagari 
7996b3969fd7SSudheer Mogilappagari 	/* Reset recovery level if enough time has elapsed after last timeout.
7997b3969fd7SSudheer Mogilappagari 	 * Also ensure no new reset action happens before next timeout period.
7998b3969fd7SSudheer Mogilappagari 	 */
7999b3969fd7SSudheer Mogilappagari 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
8000b3969fd7SSudheer Mogilappagari 		pf->tx_timeout_recovery_level = 1;
8001b3969fd7SSudheer Mogilappagari 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
8002b3969fd7SSudheer Mogilappagari 				       netdev->watchdog_timeo)))
8003b3969fd7SSudheer Mogilappagari 		return;
8004b3969fd7SSudheer Mogilappagari 
8005b3969fd7SSudheer Mogilappagari 	if (tx_ring) {
8006807bc98dSBrett Creeley 		struct ice_hw *hw = &pf->hw;
8007807bc98dSBrett Creeley 		u32 head, val = 0;
8008807bc98dSBrett Creeley 
8009ed5a3f66SJulio Faracco 		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
8010807bc98dSBrett Creeley 			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
8011b3969fd7SSudheer Mogilappagari 		/* Read interrupt register */
8012ba880734SBrett Creeley 		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
8013b3969fd7SSudheer Mogilappagari 
801493ff4858STony Nguyen 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
8015ed5a3f66SJulio Faracco 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
8016807bc98dSBrett Creeley 			    head, tx_ring->next_to_use, val);
8017b3969fd7SSudheer Mogilappagari 	}
8018b3969fd7SSudheer Mogilappagari 
8019b3969fd7SSudheer Mogilappagari 	pf->tx_timeout_last_recovery = jiffies;
802093ff4858STony Nguyen 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
8021ed5a3f66SJulio Faracco 		    pf->tx_timeout_recovery_level, txqueue);
8022b3969fd7SSudheer Mogilappagari 
8023b3969fd7SSudheer Mogilappagari 	switch (pf->tx_timeout_recovery_level) {
8024b3969fd7SSudheer Mogilappagari 	case 1:
80257e408e07SAnirudh Venkataramanan 		set_bit(ICE_PFR_REQ, pf->state);
8026b3969fd7SSudheer Mogilappagari 		break;
8027b3969fd7SSudheer Mogilappagari 	case 2:
80287e408e07SAnirudh Venkataramanan 		set_bit(ICE_CORER_REQ, pf->state);
8029b3969fd7SSudheer Mogilappagari 		break;
8030b3969fd7SSudheer Mogilappagari 	case 3:
80317e408e07SAnirudh Venkataramanan 		set_bit(ICE_GLOBR_REQ, pf->state);
8032b3969fd7SSudheer Mogilappagari 		break;
8033b3969fd7SSudheer Mogilappagari 	default:
8034b3969fd7SSudheer Mogilappagari 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
80357e408e07SAnirudh Venkataramanan 		set_bit(ICE_DOWN, pf->state);
8036e97fb1aeSAnirudh Venkataramanan 		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
80377e408e07SAnirudh Venkataramanan 		set_bit(ICE_SERVICE_DIS, pf->state);
8038b3969fd7SSudheer Mogilappagari 		break;
8039b3969fd7SSudheer Mogilappagari 	}
8040b3969fd7SSudheer Mogilappagari 
8041b3969fd7SSudheer Mogilappagari 	ice_service_task_schedule(pf);
8042b3969fd7SSudheer Mogilappagari 	pf->tx_timeout_recovery_level++;
8043b3969fd7SSudheer Mogilappagari }
8044b3969fd7SSudheer Mogilappagari 
8045b3969fd7SSudheer Mogilappagari /**
80460d08a441SKiran Patil  * ice_setup_tc_cls_flower - flower classifier offloads
80470d08a441SKiran Patil  * @np: net device to configure
80480d08a441SKiran Patil  * @filter_dev: device on which filter is added
80490d08a441SKiran Patil  * @cls_flower: offload data
80500d08a441SKiran Patil  */
80510d08a441SKiran Patil static int
80520d08a441SKiran Patil ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
80530d08a441SKiran Patil 			struct net_device *filter_dev,
80540d08a441SKiran Patil 			struct flow_cls_offload *cls_flower)
80550d08a441SKiran Patil {
80560d08a441SKiran Patil 	struct ice_vsi *vsi = np->vsi;
80570d08a441SKiran Patil 
80580d08a441SKiran Patil 	if (cls_flower->common.chain_index)
80590d08a441SKiran Patil 		return -EOPNOTSUPP;
80600d08a441SKiran Patil 
80610d08a441SKiran Patil 	switch (cls_flower->command) {
80620d08a441SKiran Patil 	case FLOW_CLS_REPLACE:
80630d08a441SKiran Patil 		return ice_add_cls_flower(filter_dev, vsi, cls_flower);
80640d08a441SKiran Patil 	case FLOW_CLS_DESTROY:
80650d08a441SKiran Patil 		return ice_del_cls_flower(vsi, cls_flower);
80660d08a441SKiran Patil 	default:
80670d08a441SKiran Patil 		return -EINVAL;
80680d08a441SKiran Patil 	}
80690d08a441SKiran Patil }
80700d08a441SKiran Patil 
80710d08a441SKiran Patil /**
80720d08a441SKiran Patil  * ice_setup_tc_block_cb - callback handler registered for TC block
80730d08a441SKiran Patil  * @type: TC SETUP type
80740d08a441SKiran Patil  * @type_data: TC flower offload data that contains user input
80750d08a441SKiran Patil  * @cb_priv: netdev private data
80760d08a441SKiran Patil  */
80770d08a441SKiran Patil static int
80780d08a441SKiran Patil ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
80790d08a441SKiran Patil {
80800d08a441SKiran Patil 	struct ice_netdev_priv *np = cb_priv;
80810d08a441SKiran Patil 
80820d08a441SKiran Patil 	switch (type) {
80830d08a441SKiran Patil 	case TC_SETUP_CLSFLOWER:
80840d08a441SKiran Patil 		return ice_setup_tc_cls_flower(np, np->vsi->netdev,
80850d08a441SKiran Patil 					       type_data);
80860d08a441SKiran Patil 	default:
80870d08a441SKiran Patil 		return -EOPNOTSUPP;
80880d08a441SKiran Patil 	}
80890d08a441SKiran Patil }
80900d08a441SKiran Patil 
8091fbc7b27aSKiran Patil /**
8092fbc7b27aSKiran Patil  * ice_validate_mqprio_qopt - Validate TCF input parameters
8093fbc7b27aSKiran Patil  * @vsi: Pointer to VSI
8094fbc7b27aSKiran Patil  * @mqprio_qopt: input parameters for mqprio queue configuration
8095fbc7b27aSKiran Patil  *
8096fbc7b27aSKiran Patil  * This function validates MQPRIO params, such as qcount (power of 2 wherever
8097fbc7b27aSKiran Patil  * needed), and make sure user doesn't specify qcount and BW rate limit
8098fbc7b27aSKiran Patil  * for TCs, which are more than "num_tc"
8099fbc7b27aSKiran Patil  */
8100fbc7b27aSKiran Patil static int
8101fbc7b27aSKiran Patil ice_validate_mqprio_qopt(struct ice_vsi *vsi,
8102fbc7b27aSKiran Patil 			 struct tc_mqprio_qopt_offload *mqprio_qopt)
8103fbc7b27aSKiran Patil {
8104fbc7b27aSKiran Patil 	u64 sum_max_rate = 0, sum_min_rate = 0;
8105fbc7b27aSKiran Patil 	int non_power_of_2_qcount = 0;
8106fbc7b27aSKiran Patil 	struct ice_pf *pf = vsi->back;
8107fbc7b27aSKiran Patil 	int max_rss_q_cnt = 0;
8108fbc7b27aSKiran Patil 	struct device *dev;
8109fbc7b27aSKiran Patil 	int i, speed;
8110fbc7b27aSKiran Patil 	u8 num_tc;
8111fbc7b27aSKiran Patil 
8112fbc7b27aSKiran Patil 	if (vsi->type != ICE_VSI_PF)
8113fbc7b27aSKiran Patil 		return -EINVAL;
8114fbc7b27aSKiran Patil 
8115fbc7b27aSKiran Patil 	if (mqprio_qopt->qopt.offset[0] != 0 ||
8116fbc7b27aSKiran Patil 	    mqprio_qopt->qopt.num_tc < 1 ||
8117fbc7b27aSKiran Patil 	    mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
8118fbc7b27aSKiran Patil 		return -EINVAL;
8119fbc7b27aSKiran Patil 
8120fbc7b27aSKiran Patil 	dev = ice_pf_to_dev(pf);
8121fbc7b27aSKiran Patil 	vsi->ch_rss_size = 0;
8122fbc7b27aSKiran Patil 	num_tc = mqprio_qopt->qopt.num_tc;
8123fbc7b27aSKiran Patil 
8124fbc7b27aSKiran Patil 	for (i = 0; num_tc; i++) {
8125fbc7b27aSKiran Patil 		int qcount = mqprio_qopt->qopt.count[i];
8126fbc7b27aSKiran Patil 		u64 max_rate, min_rate, rem;
8127fbc7b27aSKiran Patil 
8128fbc7b27aSKiran Patil 		if (!qcount)
8129fbc7b27aSKiran Patil 			return -EINVAL;
8130fbc7b27aSKiran Patil 
8131fbc7b27aSKiran Patil 		if (is_power_of_2(qcount)) {
8132fbc7b27aSKiran Patil 			if (non_power_of_2_qcount &&
8133fbc7b27aSKiran Patil 			    qcount > non_power_of_2_qcount) {
8134fbc7b27aSKiran Patil 				dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
8135fbc7b27aSKiran Patil 					qcount, non_power_of_2_qcount);
8136fbc7b27aSKiran Patil 				return -EINVAL;
8137fbc7b27aSKiran Patil 			}
8138fbc7b27aSKiran Patil 			if (qcount > max_rss_q_cnt)
8139fbc7b27aSKiran Patil 				max_rss_q_cnt = qcount;
8140fbc7b27aSKiran Patil 		} else {
8141fbc7b27aSKiran Patil 			if (non_power_of_2_qcount &&
8142fbc7b27aSKiran Patil 			    qcount != non_power_of_2_qcount) {
8143fbc7b27aSKiran Patil 				dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
8144fbc7b27aSKiran Patil 					qcount, non_power_of_2_qcount);
8145fbc7b27aSKiran Patil 				return -EINVAL;
8146fbc7b27aSKiran Patil 			}
8147fbc7b27aSKiran Patil 			if (qcount < max_rss_q_cnt) {
8148fbc7b27aSKiran Patil 				dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
8149fbc7b27aSKiran Patil 					qcount, max_rss_q_cnt);
8150fbc7b27aSKiran Patil 				return -EINVAL;
8151fbc7b27aSKiran Patil 			}
8152fbc7b27aSKiran Patil 			max_rss_q_cnt = qcount;
8153fbc7b27aSKiran Patil 			non_power_of_2_qcount = qcount;
8154fbc7b27aSKiran Patil 		}
8155fbc7b27aSKiran Patil 
8156fbc7b27aSKiran Patil 		/* TC command takes input in K/N/Gbps or K/M/Gbit etc but
8157fbc7b27aSKiran Patil 		 * converts the bandwidth rate limit into Bytes/s when
8158fbc7b27aSKiran Patil 		 * passing it down to the driver. So convert input bandwidth
8159fbc7b27aSKiran Patil 		 * from Bytes/s to Kbps
8160fbc7b27aSKiran Patil 		 */
8161fbc7b27aSKiran Patil 		max_rate = mqprio_qopt->max_rate[i];
8162fbc7b27aSKiran Patil 		max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
8163fbc7b27aSKiran Patil 		sum_max_rate += max_rate;
8164fbc7b27aSKiran Patil 
8165fbc7b27aSKiran Patil 		/* min_rate is minimum guaranteed rate and it can't be zero */
8166fbc7b27aSKiran Patil 		min_rate = mqprio_qopt->min_rate[i];
8167fbc7b27aSKiran Patil 		min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
8168fbc7b27aSKiran Patil 		sum_min_rate += min_rate;
8169fbc7b27aSKiran Patil 
8170fbc7b27aSKiran Patil 		if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
8171fbc7b27aSKiran Patil 			dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
8172fbc7b27aSKiran Patil 				min_rate, ICE_MIN_BW_LIMIT);
8173fbc7b27aSKiran Patil 			return -EINVAL;
8174fbc7b27aSKiran Patil 		}
8175fbc7b27aSKiran Patil 
8176fbc7b27aSKiran Patil 		iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
8177fbc7b27aSKiran Patil 		if (rem) {
8178fbc7b27aSKiran Patil 			dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
8179fbc7b27aSKiran Patil 				i, ICE_MIN_BW_LIMIT);
8180fbc7b27aSKiran Patil 			return -EINVAL;
8181fbc7b27aSKiran Patil 		}
8182fbc7b27aSKiran Patil 
8183fbc7b27aSKiran Patil 		iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
8184fbc7b27aSKiran Patil 		if (rem) {
8185fbc7b27aSKiran Patil 			dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
8186fbc7b27aSKiran Patil 				i, ICE_MIN_BW_LIMIT);
8187fbc7b27aSKiran Patil 			return -EINVAL;
8188fbc7b27aSKiran Patil 		}
8189fbc7b27aSKiran Patil 
8190fbc7b27aSKiran Patil 		/* min_rate can't be more than max_rate, except when max_rate
8191fbc7b27aSKiran Patil 		 * is zero (implies max_rate sought is max line rate). In such
8192fbc7b27aSKiran Patil 		 * a case min_rate can be more than max.
8193fbc7b27aSKiran Patil 		 */
8194fbc7b27aSKiran Patil 		if (max_rate && min_rate > max_rate) {
8195fbc7b27aSKiran Patil 			dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
8196fbc7b27aSKiran Patil 				min_rate, max_rate);
8197fbc7b27aSKiran Patil 			return -EINVAL;
8198fbc7b27aSKiran Patil 		}
8199fbc7b27aSKiran Patil 
8200fbc7b27aSKiran Patil 		if (i >= mqprio_qopt->qopt.num_tc - 1)
8201fbc7b27aSKiran Patil 			break;
8202fbc7b27aSKiran Patil 		if (mqprio_qopt->qopt.offset[i + 1] !=
8203fbc7b27aSKiran Patil 		    (mqprio_qopt->qopt.offset[i] + qcount))
8204fbc7b27aSKiran Patil 			return -EINVAL;
8205fbc7b27aSKiran Patil 	}
8206fbc7b27aSKiran Patil 	if (vsi->num_rxq <
8207fbc7b27aSKiran Patil 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8208fbc7b27aSKiran Patil 		return -EINVAL;
8209fbc7b27aSKiran Patil 	if (vsi->num_txq <
8210fbc7b27aSKiran Patil 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8211fbc7b27aSKiran Patil 		return -EINVAL;
8212fbc7b27aSKiran Patil 
8213fbc7b27aSKiran Patil 	speed = ice_get_link_speed_kbps(vsi);
8214fbc7b27aSKiran Patil 	if (sum_max_rate && sum_max_rate > (u64)speed) {
8215fbc7b27aSKiran Patil 		dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n",
8216fbc7b27aSKiran Patil 			sum_max_rate, speed);
8217fbc7b27aSKiran Patil 		return -EINVAL;
8218fbc7b27aSKiran Patil 	}
8219fbc7b27aSKiran Patil 	if (sum_min_rate && sum_min_rate > (u64)speed) {
8220fbc7b27aSKiran Patil 		dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
8221fbc7b27aSKiran Patil 			sum_min_rate, speed);
8222fbc7b27aSKiran Patil 		return -EINVAL;
8223fbc7b27aSKiran Patil 	}
8224fbc7b27aSKiran Patil 
8225fbc7b27aSKiran Patil 	/* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8226fbc7b27aSKiran Patil 	vsi->ch_rss_size = max_rss_q_cnt;
8227fbc7b27aSKiran Patil 
8228fbc7b27aSKiran Patil 	return 0;
8229fbc7b27aSKiran Patil }
8230fbc7b27aSKiran Patil 
8231fbc7b27aSKiran Patil /**
823240319796SKiran Patil  * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
823340319796SKiran Patil  * @pf: ptr to PF device
823440319796SKiran Patil  * @vsi: ptr to VSI
823540319796SKiran Patil  */
823640319796SKiran Patil static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
823740319796SKiran Patil {
823840319796SKiran Patil 	struct device *dev = ice_pf_to_dev(pf);
823940319796SKiran Patil 	bool added = false;
824040319796SKiran Patil 	struct ice_hw *hw;
824140319796SKiran Patil 	int flow;
824240319796SKiran Patil 
824340319796SKiran Patil 	if (!(vsi->num_gfltr || vsi->num_bfltr))
824440319796SKiran Patil 		return -EINVAL;
824540319796SKiran Patil 
824640319796SKiran Patil 	hw = &pf->hw;
824740319796SKiran Patil 	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
824840319796SKiran Patil 		struct ice_fd_hw_prof *prof;
824940319796SKiran Patil 		int tun, status;
825040319796SKiran Patil 		u64 entry_h;
825140319796SKiran Patil 
825240319796SKiran Patil 		if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
825340319796SKiran Patil 		      hw->fdir_prof[flow]->cnt))
825440319796SKiran Patil 			continue;
825540319796SKiran Patil 
825640319796SKiran Patil 		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
825740319796SKiran Patil 			enum ice_flow_priority prio;
825840319796SKiran Patil 			u64 prof_id;
825940319796SKiran Patil 
826040319796SKiran Patil 			/* add this VSI to FDir profile for this flow */
826140319796SKiran Patil 			prio = ICE_FLOW_PRIO_NORMAL;
826240319796SKiran Patil 			prof = hw->fdir_prof[flow];
826340319796SKiran Patil 			prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
826440319796SKiran Patil 			status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
826540319796SKiran Patil 						    prof->vsi_h[0], vsi->idx,
826640319796SKiran Patil 						    prio, prof->fdir_seg[tun],
826740319796SKiran Patil 						    &entry_h);
826840319796SKiran Patil 			if (status) {
826940319796SKiran Patil 				dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
827040319796SKiran Patil 					vsi->idx, flow);
827140319796SKiran Patil 				continue;
827240319796SKiran Patil 			}
827340319796SKiran Patil 
827440319796SKiran Patil 			prof->entry_h[prof->cnt][tun] = entry_h;
827540319796SKiran Patil 		}
827640319796SKiran Patil 
827740319796SKiran Patil 		/* store VSI for filter replay and delete */
827840319796SKiran Patil 		prof->vsi_h[prof->cnt] = vsi->idx;
827940319796SKiran Patil 		prof->cnt++;
828040319796SKiran Patil 
828140319796SKiran Patil 		added = true;
828240319796SKiran Patil 		dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
828340319796SKiran Patil 			flow);
828440319796SKiran Patil 	}
828540319796SKiran Patil 
828640319796SKiran Patil 	if (!added)
828740319796SKiran Patil 		dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
828840319796SKiran Patil 
828940319796SKiran Patil 	return 0;
829040319796SKiran Patil }
829140319796SKiran Patil 
829240319796SKiran Patil /**
8293fbc7b27aSKiran Patil  * ice_add_channel - add a channel by adding VSI
8294fbc7b27aSKiran Patil  * @pf: ptr to PF device
8295fbc7b27aSKiran Patil  * @sw_id: underlying HW switching element ID
8296fbc7b27aSKiran Patil  * @ch: ptr to channel structure
8297fbc7b27aSKiran Patil  *
8298fbc7b27aSKiran Patil  * Add a channel (VSI) using add_vsi and queue_map
8299fbc7b27aSKiran Patil  */
8300fbc7b27aSKiran Patil static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8301fbc7b27aSKiran Patil {
8302fbc7b27aSKiran Patil 	struct device *dev = ice_pf_to_dev(pf);
8303fbc7b27aSKiran Patil 	struct ice_vsi *vsi;
8304fbc7b27aSKiran Patil 
8305fbc7b27aSKiran Patil 	if (ch->type != ICE_VSI_CHNL) {
8306fbc7b27aSKiran Patil 		dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8307fbc7b27aSKiran Patil 		return -EINVAL;
8308fbc7b27aSKiran Patil 	}
8309fbc7b27aSKiran Patil 
8310fbc7b27aSKiran Patil 	vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8311fbc7b27aSKiran Patil 	if (!vsi || vsi->type != ICE_VSI_CHNL) {
8312fbc7b27aSKiran Patil 		dev_err(dev, "create chnl VSI failure\n");
8313fbc7b27aSKiran Patil 		return -EINVAL;
8314fbc7b27aSKiran Patil 	}
8315fbc7b27aSKiran Patil 
831640319796SKiran Patil 	ice_add_vsi_to_fdir(pf, vsi);
831740319796SKiran Patil 
8318fbc7b27aSKiran Patil 	ch->sw_id = sw_id;
8319fbc7b27aSKiran Patil 	ch->vsi_num = vsi->vsi_num;
8320fbc7b27aSKiran Patil 	ch->info.mapping_flags = vsi->info.mapping_flags;
8321fbc7b27aSKiran Patil 	ch->ch_vsi = vsi;
8322fbc7b27aSKiran Patil 	/* set the back pointer of channel for newly created VSI */
8323fbc7b27aSKiran Patil 	vsi->ch = ch;
8324fbc7b27aSKiran Patil 
8325fbc7b27aSKiran Patil 	memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8326fbc7b27aSKiran Patil 	       sizeof(vsi->info.q_mapping));
8327fbc7b27aSKiran Patil 	memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8328fbc7b27aSKiran Patil 	       sizeof(vsi->info.tc_mapping));
8329fbc7b27aSKiran Patil 
8330fbc7b27aSKiran Patil 	return 0;
8331fbc7b27aSKiran Patil }
8332fbc7b27aSKiran Patil 
8333fbc7b27aSKiran Patil /**
8334fbc7b27aSKiran Patil  * ice_chnl_cfg_res
8335fbc7b27aSKiran Patil  * @vsi: the VSI being setup
8336fbc7b27aSKiran Patil  * @ch: ptr to channel structure
8337fbc7b27aSKiran Patil  *
8338fbc7b27aSKiran Patil  * Configure channel specific resources such as rings, vector.
8339fbc7b27aSKiran Patil  */
8340fbc7b27aSKiran Patil static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8341fbc7b27aSKiran Patil {
8342fbc7b27aSKiran Patil 	int i;
8343fbc7b27aSKiran Patil 
8344fbc7b27aSKiran Patil 	for (i = 0; i < ch->num_txq; i++) {
8345fbc7b27aSKiran Patil 		struct ice_q_vector *tx_q_vector, *rx_q_vector;
8346fbc7b27aSKiran Patil 		struct ice_ring_container *rc;
8347fbc7b27aSKiran Patil 		struct ice_tx_ring *tx_ring;
8348fbc7b27aSKiran Patil 		struct ice_rx_ring *rx_ring;
8349fbc7b27aSKiran Patil 
8350fbc7b27aSKiran Patil 		tx_ring = vsi->tx_rings[ch->base_q + i];
8351fbc7b27aSKiran Patil 		rx_ring = vsi->rx_rings[ch->base_q + i];
8352fbc7b27aSKiran Patil 		if (!tx_ring || !rx_ring)
8353fbc7b27aSKiran Patil 			continue;
8354fbc7b27aSKiran Patil 
8355fbc7b27aSKiran Patil 		/* setup ring being channel enabled */
8356fbc7b27aSKiran Patil 		tx_ring->ch = ch;
8357fbc7b27aSKiran Patil 		rx_ring->ch = ch;
8358fbc7b27aSKiran Patil 
8359fbc7b27aSKiran Patil 		/* following code block sets up vector specific attributes */
8360fbc7b27aSKiran Patil 		tx_q_vector = tx_ring->q_vector;
8361fbc7b27aSKiran Patil 		rx_q_vector = rx_ring->q_vector;
8362fbc7b27aSKiran Patil 		if (!tx_q_vector && !rx_q_vector)
8363fbc7b27aSKiran Patil 			continue;
8364fbc7b27aSKiran Patil 
8365fbc7b27aSKiran Patil 		if (tx_q_vector) {
8366fbc7b27aSKiran Patil 			tx_q_vector->ch = ch;
8367fbc7b27aSKiran Patil 			/* setup Tx and Rx ITR setting if DIM is off */
8368fbc7b27aSKiran Patil 			rc = &tx_q_vector->tx;
8369fbc7b27aSKiran Patil 			if (!ITR_IS_DYNAMIC(rc))
8370fbc7b27aSKiran Patil 				ice_write_itr(rc, rc->itr_setting);
8371fbc7b27aSKiran Patil 		}
8372fbc7b27aSKiran Patil 		if (rx_q_vector) {
8373fbc7b27aSKiran Patil 			rx_q_vector->ch = ch;
8374fbc7b27aSKiran Patil 			/* setup Tx and Rx ITR setting if DIM is off */
8375fbc7b27aSKiran Patil 			rc = &rx_q_vector->rx;
8376fbc7b27aSKiran Patil 			if (!ITR_IS_DYNAMIC(rc))
8377fbc7b27aSKiran Patil 				ice_write_itr(rc, rc->itr_setting);
8378fbc7b27aSKiran Patil 		}
8379fbc7b27aSKiran Patil 	}
8380fbc7b27aSKiran Patil 
8381fbc7b27aSKiran Patil 	/* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8382fbc7b27aSKiran Patil 	 * GLINT_ITR register would have written to perform in-context
8383fbc7b27aSKiran Patil 	 * update, hence perform flush
8384fbc7b27aSKiran Patil 	 */
8385fbc7b27aSKiran Patil 	if (ch->num_txq || ch->num_rxq)
8386fbc7b27aSKiran Patil 		ice_flush(&vsi->back->hw);
8387fbc7b27aSKiran Patil }
8388fbc7b27aSKiran Patil 
8389fbc7b27aSKiran Patil /**
8390fbc7b27aSKiran Patil  * ice_cfg_chnl_all_res - configure channel resources
8391fbc7b27aSKiran Patil  * @vsi: pte to main_vsi
8392fbc7b27aSKiran Patil  * @ch: ptr to channel structure
8393fbc7b27aSKiran Patil  *
8394fbc7b27aSKiran Patil  * This function configures channel specific resources such as flow-director
8395fbc7b27aSKiran Patil  * counter index, and other resources such as queues, vectors, ITR settings
8396fbc7b27aSKiran Patil  */
8397fbc7b27aSKiran Patil static void
8398fbc7b27aSKiran Patil ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8399fbc7b27aSKiran Patil {
8400fbc7b27aSKiran Patil 	/* configure channel (aka ADQ) resources such as queues, vectors,
8401fbc7b27aSKiran Patil 	 * ITR settings for channel specific vectors and anything else
8402fbc7b27aSKiran Patil 	 */
8403fbc7b27aSKiran Patil 	ice_chnl_cfg_res(vsi, ch);
8404fbc7b27aSKiran Patil }
8405fbc7b27aSKiran Patil 
8406fbc7b27aSKiran Patil /**
8407fbc7b27aSKiran Patil  * ice_setup_hw_channel - setup new channel
8408fbc7b27aSKiran Patil  * @pf: ptr to PF device
8409fbc7b27aSKiran Patil  * @vsi: the VSI being setup
8410fbc7b27aSKiran Patil  * @ch: ptr to channel structure
8411fbc7b27aSKiran Patil  * @sw_id: underlying HW switching element ID
8412fbc7b27aSKiran Patil  * @type: type of channel to be created (VMDq2/VF)
8413fbc7b27aSKiran Patil  *
8414fbc7b27aSKiran Patil  * Setup new channel (VSI) based on specified type (VMDq2/VF)
8415fbc7b27aSKiran Patil  * and configures Tx rings accordingly
8416fbc7b27aSKiran Patil  */
8417fbc7b27aSKiran Patil static int
8418fbc7b27aSKiran Patil ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8419fbc7b27aSKiran Patil 		     struct ice_channel *ch, u16 sw_id, u8 type)
8420fbc7b27aSKiran Patil {
8421fbc7b27aSKiran Patil 	struct device *dev = ice_pf_to_dev(pf);
8422fbc7b27aSKiran Patil 	int ret;
8423fbc7b27aSKiran Patil 
8424fbc7b27aSKiran Patil 	ch->base_q = vsi->next_base_q;
8425fbc7b27aSKiran Patil 	ch->type = type;
8426fbc7b27aSKiran Patil 
8427fbc7b27aSKiran Patil 	ret = ice_add_channel(pf, sw_id, ch);
8428fbc7b27aSKiran Patil 	if (ret) {
8429fbc7b27aSKiran Patil 		dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8430fbc7b27aSKiran Patil 		return ret;
8431fbc7b27aSKiran Patil 	}
8432fbc7b27aSKiran Patil 
8433fbc7b27aSKiran Patil 	/* configure/setup ADQ specific resources */
8434fbc7b27aSKiran Patil 	ice_cfg_chnl_all_res(vsi, ch);
8435fbc7b27aSKiran Patil 
8436fbc7b27aSKiran Patil 	/* make sure to update the next_base_q so that subsequent channel's
8437fbc7b27aSKiran Patil 	 * (aka ADQ) VSI queue map is correct
8438fbc7b27aSKiran Patil 	 */
8439fbc7b27aSKiran Patil 	vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8440fbc7b27aSKiran Patil 	dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8441fbc7b27aSKiran Patil 		ch->num_rxq);
8442fbc7b27aSKiran Patil 
8443fbc7b27aSKiran Patil 	return 0;
8444fbc7b27aSKiran Patil }
8445fbc7b27aSKiran Patil 
8446fbc7b27aSKiran Patil /**
8447fbc7b27aSKiran Patil  * ice_setup_channel - setup new channel using uplink element
8448fbc7b27aSKiran Patil  * @pf: ptr to PF device
8449fbc7b27aSKiran Patil  * @vsi: the VSI being setup
8450fbc7b27aSKiran Patil  * @ch: ptr to channel structure
8451fbc7b27aSKiran Patil  *
8452fbc7b27aSKiran Patil  * Setup new channel (VSI) based on specified type (VMDq2/VF)
8453fbc7b27aSKiran Patil  * and uplink switching element
8454fbc7b27aSKiran Patil  */
8455fbc7b27aSKiran Patil static bool
8456fbc7b27aSKiran Patil ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8457fbc7b27aSKiran Patil 		  struct ice_channel *ch)
8458fbc7b27aSKiran Patil {
8459fbc7b27aSKiran Patil 	struct device *dev = ice_pf_to_dev(pf);
8460fbc7b27aSKiran Patil 	u16 sw_id;
8461fbc7b27aSKiran Patil 	int ret;
8462fbc7b27aSKiran Patil 
8463fbc7b27aSKiran Patil 	if (vsi->type != ICE_VSI_PF) {
8464fbc7b27aSKiran Patil 		dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8465fbc7b27aSKiran Patil 		return false;
8466fbc7b27aSKiran Patil 	}
8467fbc7b27aSKiran Patil 
8468fbc7b27aSKiran Patil 	sw_id = pf->first_sw->sw_id;
8469fbc7b27aSKiran Patil 
8470fbc7b27aSKiran Patil 	/* create channel (VSI) */
8471fbc7b27aSKiran Patil 	ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8472fbc7b27aSKiran Patil 	if (ret) {
8473fbc7b27aSKiran Patil 		dev_err(dev, "failed to setup hw_channel\n");
8474fbc7b27aSKiran Patil 		return false;
8475fbc7b27aSKiran Patil 	}
8476fbc7b27aSKiran Patil 	dev_dbg(dev, "successfully created channel()\n");
8477fbc7b27aSKiran Patil 
8478fbc7b27aSKiran Patil 	return ch->ch_vsi ? true : false;
8479fbc7b27aSKiran Patil }
8480fbc7b27aSKiran Patil 
8481fbc7b27aSKiran Patil /**
8482fbc7b27aSKiran Patil  * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8483fbc7b27aSKiran Patil  * @vsi: VSI to be configured
8484fbc7b27aSKiran Patil  * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8485fbc7b27aSKiran Patil  * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8486fbc7b27aSKiran Patil  */
8487fbc7b27aSKiran Patil static int
8488fbc7b27aSKiran Patil ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8489fbc7b27aSKiran Patil {
8490fbc7b27aSKiran Patil 	int err;
8491fbc7b27aSKiran Patil 
8492fbc7b27aSKiran Patil 	err = ice_set_min_bw_limit(vsi, min_tx_rate);
8493fbc7b27aSKiran Patil 	if (err)
8494fbc7b27aSKiran Patil 		return err;
8495fbc7b27aSKiran Patil 
8496fbc7b27aSKiran Patil 	return ice_set_max_bw_limit(vsi, max_tx_rate);
8497fbc7b27aSKiran Patil }
8498fbc7b27aSKiran Patil 
8499fbc7b27aSKiran Patil /**
8500fbc7b27aSKiran Patil  * ice_create_q_channel - function to create channel
8501fbc7b27aSKiran Patil  * @vsi: VSI to be configured
8502fbc7b27aSKiran Patil  * @ch: ptr to channel (it contains channel specific params)
8503fbc7b27aSKiran Patil  *
8504fbc7b27aSKiran Patil  * This function creates channel (VSI) using num_queues specified by user,
8505fbc7b27aSKiran Patil  * reconfigs RSS if needed.
8506fbc7b27aSKiran Patil  */
8507fbc7b27aSKiran Patil static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8508fbc7b27aSKiran Patil {
8509fbc7b27aSKiran Patil 	struct ice_pf *pf = vsi->back;
8510fbc7b27aSKiran Patil 	struct device *dev;
8511fbc7b27aSKiran Patil 
8512fbc7b27aSKiran Patil 	if (!ch)
8513fbc7b27aSKiran Patil 		return -EINVAL;
8514fbc7b27aSKiran Patil 
8515fbc7b27aSKiran Patil 	dev = ice_pf_to_dev(pf);
8516fbc7b27aSKiran Patil 	if (!ch->num_txq || !ch->num_rxq) {
8517fbc7b27aSKiran Patil 		dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8518fbc7b27aSKiran Patil 		return -EINVAL;
8519fbc7b27aSKiran Patil 	}
8520fbc7b27aSKiran Patil 
8521fbc7b27aSKiran Patil 	if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8522fbc7b27aSKiran Patil 		dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8523fbc7b27aSKiran Patil 			vsi->cnt_q_avail, ch->num_txq);
8524fbc7b27aSKiran Patil 		return -EINVAL;
8525fbc7b27aSKiran Patil 	}
8526fbc7b27aSKiran Patil 
8527fbc7b27aSKiran Patil 	if (!ice_setup_channel(pf, vsi, ch)) {
8528fbc7b27aSKiran Patil 		dev_info(dev, "Failed to setup channel\n");
8529fbc7b27aSKiran Patil 		return -EINVAL;
8530fbc7b27aSKiran Patil 	}
8531fbc7b27aSKiran Patil 	/* configure BW rate limit */
8532fbc7b27aSKiran Patil 	if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8533fbc7b27aSKiran Patil 		int ret;
8534fbc7b27aSKiran Patil 
8535fbc7b27aSKiran Patil 		ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8536fbc7b27aSKiran Patil 				       ch->min_tx_rate);
8537fbc7b27aSKiran Patil 		if (ret)
8538fbc7b27aSKiran Patil 			dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8539fbc7b27aSKiran Patil 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8540fbc7b27aSKiran Patil 		else
8541fbc7b27aSKiran Patil 			dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8542fbc7b27aSKiran Patil 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8543fbc7b27aSKiran Patil 	}
8544fbc7b27aSKiran Patil 
8545fbc7b27aSKiran Patil 	vsi->cnt_q_avail -= ch->num_txq;
8546fbc7b27aSKiran Patil 
8547fbc7b27aSKiran Patil 	return 0;
8548fbc7b27aSKiran Patil }
8549fbc7b27aSKiran Patil 
8550fbc7b27aSKiran Patil /**
85519fea7498SKiran Patil  * ice_rem_all_chnl_fltrs - removes all channel filters
85529fea7498SKiran Patil  * @pf: ptr to PF, TC-flower based filter are tracked at PF level
85539fea7498SKiran Patil  *
85549fea7498SKiran Patil  * Remove all advanced switch filters only if they are channel specific
85559fea7498SKiran Patil  * tc-flower based filter
85569fea7498SKiran Patil  */
85579fea7498SKiran Patil static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
85589fea7498SKiran Patil {
85599fea7498SKiran Patil 	struct ice_tc_flower_fltr *fltr;
85609fea7498SKiran Patil 	struct hlist_node *node;
85619fea7498SKiran Patil 
85629fea7498SKiran Patil 	/* to remove all channel filters, iterate an ordered list of filters */
85639fea7498SKiran Patil 	hlist_for_each_entry_safe(fltr, node,
85649fea7498SKiran Patil 				  &pf->tc_flower_fltr_list,
85659fea7498SKiran Patil 				  tc_flower_node) {
85669fea7498SKiran Patil 		struct ice_rule_query_data rule;
85679fea7498SKiran Patil 		int status;
85689fea7498SKiran Patil 
85699fea7498SKiran Patil 		/* for now process only channel specific filters */
85709fea7498SKiran Patil 		if (!ice_is_chnl_fltr(fltr))
85719fea7498SKiran Patil 			continue;
85729fea7498SKiran Patil 
85739fea7498SKiran Patil 		rule.rid = fltr->rid;
85749fea7498SKiran Patil 		rule.rule_id = fltr->rule_id;
8575143b86f3SAmritha Nambiar 		rule.vsi_handle = fltr->dest_vsi_handle;
85769fea7498SKiran Patil 		status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
85779fea7498SKiran Patil 		if (status) {
85789fea7498SKiran Patil 			if (status == -ENOENT)
85799fea7498SKiran Patil 				dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
85809fea7498SKiran Patil 					rule.rule_id);
85819fea7498SKiran Patil 			else
85829fea7498SKiran Patil 				dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
85839fea7498SKiran Patil 					status);
85849fea7498SKiran Patil 		} else if (fltr->dest_vsi) {
85859fea7498SKiran Patil 			/* update advanced switch filter count */
85869fea7498SKiran Patil 			if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
85879fea7498SKiran Patil 				u32 flags = fltr->flags;
85889fea7498SKiran Patil 
85899fea7498SKiran Patil 				fltr->dest_vsi->num_chnl_fltr--;
85909fea7498SKiran Patil 				if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
85919fea7498SKiran Patil 					     ICE_TC_FLWR_FIELD_ENC_DST_MAC))
85929fea7498SKiran Patil 					pf->num_dmac_chnl_fltrs--;
85939fea7498SKiran Patil 			}
85949fea7498SKiran Patil 		}
85959fea7498SKiran Patil 
85969fea7498SKiran Patil 		hlist_del(&fltr->tc_flower_node);
85979fea7498SKiran Patil 		kfree(fltr);
85989fea7498SKiran Patil 	}
85999fea7498SKiran Patil }
86009fea7498SKiran Patil 
86019fea7498SKiran Patil /**
8602fbc7b27aSKiran Patil  * ice_remove_q_channels - Remove queue channels for the TCs
8603fbc7b27aSKiran Patil  * @vsi: VSI to be configured
8604fbc7b27aSKiran Patil  * @rem_fltr: delete advanced switch filter or not
8605fbc7b27aSKiran Patil  *
8606fbc7b27aSKiran Patil  * Remove queue channels for the TCs
8607fbc7b27aSKiran Patil  */
86089fea7498SKiran Patil static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8609fbc7b27aSKiran Patil {
8610fbc7b27aSKiran Patil 	struct ice_channel *ch, *ch_tmp;
86119fea7498SKiran Patil 	struct ice_pf *pf = vsi->back;
8612fbc7b27aSKiran Patil 	int i;
8613fbc7b27aSKiran Patil 
86149fea7498SKiran Patil 	/* remove all tc-flower based filter if they are channel filters only */
86159fea7498SKiran Patil 	if (rem_fltr)
86169fea7498SKiran Patil 		ice_rem_all_chnl_fltrs(pf);
86179fea7498SKiran Patil 
861840319796SKiran Patil 	/* remove ntuple filters since queue configuration is being changed */
861940319796SKiran Patil 	if  (vsi->netdev->features & NETIF_F_NTUPLE) {
862040319796SKiran Patil 		struct ice_hw *hw = &pf->hw;
862140319796SKiran Patil 
862240319796SKiran Patil 		mutex_lock(&hw->fdir_fltr_lock);
862340319796SKiran Patil 		ice_fdir_del_all_fltrs(vsi);
862440319796SKiran Patil 		mutex_unlock(&hw->fdir_fltr_lock);
862540319796SKiran Patil 	}
862640319796SKiran Patil 
8627fbc7b27aSKiran Patil 	/* perform cleanup for channels if they exist */
8628fbc7b27aSKiran Patil 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8629fbc7b27aSKiran Patil 		struct ice_vsi *ch_vsi;
8630fbc7b27aSKiran Patil 
8631fbc7b27aSKiran Patil 		list_del(&ch->list);
8632fbc7b27aSKiran Patil 		ch_vsi = ch->ch_vsi;
8633fbc7b27aSKiran Patil 		if (!ch_vsi) {
8634fbc7b27aSKiran Patil 			kfree(ch);
8635fbc7b27aSKiran Patil 			continue;
8636fbc7b27aSKiran Patil 		}
8637fbc7b27aSKiran Patil 
8638fbc7b27aSKiran Patil 		/* Reset queue contexts */
8639fbc7b27aSKiran Patil 		for (i = 0; i < ch->num_rxq; i++) {
8640fbc7b27aSKiran Patil 			struct ice_tx_ring *tx_ring;
8641fbc7b27aSKiran Patil 			struct ice_rx_ring *rx_ring;
8642fbc7b27aSKiran Patil 
8643fbc7b27aSKiran Patil 			tx_ring = vsi->tx_rings[ch->base_q + i];
8644fbc7b27aSKiran Patil 			rx_ring = vsi->rx_rings[ch->base_q + i];
8645fbc7b27aSKiran Patil 			if (tx_ring) {
8646fbc7b27aSKiran Patil 				tx_ring->ch = NULL;
8647fbc7b27aSKiran Patil 				if (tx_ring->q_vector)
8648fbc7b27aSKiran Patil 					tx_ring->q_vector->ch = NULL;
8649fbc7b27aSKiran Patil 			}
8650fbc7b27aSKiran Patil 			if (rx_ring) {
8651fbc7b27aSKiran Patil 				rx_ring->ch = NULL;
8652fbc7b27aSKiran Patil 				if (rx_ring->q_vector)
8653fbc7b27aSKiran Patil 					rx_ring->q_vector->ch = NULL;
8654fbc7b27aSKiran Patil 			}
8655fbc7b27aSKiran Patil 		}
8656fbc7b27aSKiran Patil 
865740319796SKiran Patil 		/* Release FD resources for the channel VSI */
865840319796SKiran Patil 		ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
865940319796SKiran Patil 
8660fbc7b27aSKiran Patil 		/* clear the VSI from scheduler tree */
8661fbc7b27aSKiran Patil 		ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8662fbc7b27aSKiran Patil 
8663227bf450SMichal Swiatkowski 		/* Delete VSI from FW, PF and HW VSI arrays */
8664fbc7b27aSKiran Patil 		ice_vsi_delete(ch->ch_vsi);
8665fbc7b27aSKiran Patil 
8666fbc7b27aSKiran Patil 		/* free the channel */
8667fbc7b27aSKiran Patil 		kfree(ch);
8668fbc7b27aSKiran Patil 	}
8669fbc7b27aSKiran Patil 
8670fbc7b27aSKiran Patil 	/* clear the channel VSI map which is stored in main VSI */
8671fbc7b27aSKiran Patil 	ice_for_each_chnl_tc(i)
8672fbc7b27aSKiran Patil 		vsi->tc_map_vsi[i] = NULL;
8673fbc7b27aSKiran Patil 
8674fbc7b27aSKiran Patil 	/* reset main VSI's all TC information */
8675fbc7b27aSKiran Patil 	vsi->all_enatc = 0;
8676fbc7b27aSKiran Patil 	vsi->all_numtc = 0;
8677fbc7b27aSKiran Patil }
8678fbc7b27aSKiran Patil 
8679fbc7b27aSKiran Patil /**
8680fbc7b27aSKiran Patil  * ice_rebuild_channels - rebuild channel
8681fbc7b27aSKiran Patil  * @pf: ptr to PF
8682fbc7b27aSKiran Patil  *
8683fbc7b27aSKiran Patil  * Recreate channel VSIs and replay filters
8684fbc7b27aSKiran Patil  */
8685fbc7b27aSKiran Patil static int ice_rebuild_channels(struct ice_pf *pf)
8686fbc7b27aSKiran Patil {
8687fbc7b27aSKiran Patil 	struct device *dev = ice_pf_to_dev(pf);
8688fbc7b27aSKiran Patil 	struct ice_vsi *main_vsi;
8689fbc7b27aSKiran Patil 	bool rem_adv_fltr = true;
8690fbc7b27aSKiran Patil 	struct ice_channel *ch;
8691fbc7b27aSKiran Patil 	struct ice_vsi *vsi;
8692fbc7b27aSKiran Patil 	int tc_idx = 1;
8693fbc7b27aSKiran Patil 	int i, err;
8694fbc7b27aSKiran Patil 
8695fbc7b27aSKiran Patil 	main_vsi = ice_get_main_vsi(pf);
8696fbc7b27aSKiran Patil 	if (!main_vsi)
8697fbc7b27aSKiran Patil 		return 0;
8698fbc7b27aSKiran Patil 
8699fbc7b27aSKiran Patil 	if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8700fbc7b27aSKiran Patil 	    main_vsi->old_numtc == 1)
8701fbc7b27aSKiran Patil 		return 0; /* nothing to be done */
8702fbc7b27aSKiran Patil 
8703fbc7b27aSKiran Patil 	/* reconfigure main VSI based on old value of TC and cached values
8704fbc7b27aSKiran Patil 	 * for MQPRIO opts
8705fbc7b27aSKiran Patil 	 */
8706fbc7b27aSKiran Patil 	err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8707fbc7b27aSKiran Patil 	if (err) {
8708fbc7b27aSKiran Patil 		dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8709fbc7b27aSKiran Patil 			main_vsi->old_ena_tc, main_vsi->vsi_num);
8710fbc7b27aSKiran Patil 		return err;
8711fbc7b27aSKiran Patil 	}
8712fbc7b27aSKiran Patil 
8713fbc7b27aSKiran Patil 	/* rebuild ADQ VSIs */
8714fbc7b27aSKiran Patil 	ice_for_each_vsi(pf, i) {
8715fbc7b27aSKiran Patil 		enum ice_vsi_type type;
8716fbc7b27aSKiran Patil 
8717fbc7b27aSKiran Patil 		vsi = pf->vsi[i];
8718fbc7b27aSKiran Patil 		if (!vsi || vsi->type != ICE_VSI_CHNL)
8719fbc7b27aSKiran Patil 			continue;
8720fbc7b27aSKiran Patil 
8721fbc7b27aSKiran Patil 		type = vsi->type;
8722fbc7b27aSKiran Patil 
8723fbc7b27aSKiran Patil 		/* rebuild ADQ VSI */
87246624e780SMichal Swiatkowski 		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
8725fbc7b27aSKiran Patil 		if (err) {
8726fbc7b27aSKiran Patil 			dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
8727fbc7b27aSKiran Patil 				ice_vsi_type_str(type), vsi->idx, err);
8728fbc7b27aSKiran Patil 			goto cleanup;
8729fbc7b27aSKiran Patil 		}
8730fbc7b27aSKiran Patil 
8731fbc7b27aSKiran Patil 		/* Re-map HW VSI number, using VSI handle that has been
8732fbc7b27aSKiran Patil 		 * previously validated in ice_replay_vsi() call above
8733fbc7b27aSKiran Patil 		 */
8734fbc7b27aSKiran Patil 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8735fbc7b27aSKiran Patil 
8736fbc7b27aSKiran Patil 		/* replay filters for the VSI */
8737fbc7b27aSKiran Patil 		err = ice_replay_vsi(&pf->hw, vsi->idx);
8738fbc7b27aSKiran Patil 		if (err) {
8739fbc7b27aSKiran Patil 			dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
8740fbc7b27aSKiran Patil 				ice_vsi_type_str(type), err, vsi->idx);
8741fbc7b27aSKiran Patil 			rem_adv_fltr = false;
8742fbc7b27aSKiran Patil 			goto cleanup;
8743fbc7b27aSKiran Patil 		}
8744fbc7b27aSKiran Patil 		dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
8745fbc7b27aSKiran Patil 			 ice_vsi_type_str(type), vsi->idx);
8746fbc7b27aSKiran Patil 
8747fbc7b27aSKiran Patil 		/* store ADQ VSI at correct TC index in main VSI's
8748fbc7b27aSKiran Patil 		 * map of TC to VSI
8749fbc7b27aSKiran Patil 		 */
8750fbc7b27aSKiran Patil 		main_vsi->tc_map_vsi[tc_idx++] = vsi;
8751fbc7b27aSKiran Patil 	}
8752fbc7b27aSKiran Patil 
8753fbc7b27aSKiran Patil 	/* ADQ VSI(s) has been rebuilt successfully, so setup
8754fbc7b27aSKiran Patil 	 * channel for main VSI's Tx and Rx rings
8755fbc7b27aSKiran Patil 	 */
8756fbc7b27aSKiran Patil 	list_for_each_entry(ch, &main_vsi->ch_list, list) {
8757fbc7b27aSKiran Patil 		struct ice_vsi *ch_vsi;
8758fbc7b27aSKiran Patil 
8759fbc7b27aSKiran Patil 		ch_vsi = ch->ch_vsi;
8760fbc7b27aSKiran Patil 		if (!ch_vsi)
8761fbc7b27aSKiran Patil 			continue;
8762fbc7b27aSKiran Patil 
8763fbc7b27aSKiran Patil 		/* reconfig channel resources */
8764fbc7b27aSKiran Patil 		ice_cfg_chnl_all_res(main_vsi, ch);
8765fbc7b27aSKiran Patil 
8766fbc7b27aSKiran Patil 		/* replay BW rate limit if it is non-zero */
8767fbc7b27aSKiran Patil 		if (!ch->max_tx_rate && !ch->min_tx_rate)
8768fbc7b27aSKiran Patil 			continue;
8769fbc7b27aSKiran Patil 
8770fbc7b27aSKiran Patil 		err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8771fbc7b27aSKiran Patil 				       ch->min_tx_rate);
8772fbc7b27aSKiran Patil 		if (err)
8773fbc7b27aSKiran Patil 			dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8774fbc7b27aSKiran Patil 				err, ch->max_tx_rate, ch->min_tx_rate,
8775fbc7b27aSKiran Patil 				ch_vsi->vsi_num);
8776fbc7b27aSKiran Patil 		else
8777fbc7b27aSKiran Patil 			dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8778fbc7b27aSKiran Patil 				ch->max_tx_rate, ch->min_tx_rate,
8779fbc7b27aSKiran Patil 				ch_vsi->vsi_num);
8780fbc7b27aSKiran Patil 	}
8781fbc7b27aSKiran Patil 
8782fbc7b27aSKiran Patil 	/* reconfig RSS for main VSI */
8783fbc7b27aSKiran Patil 	if (main_vsi->ch_rss_size)
8784fbc7b27aSKiran Patil 		ice_vsi_cfg_rss_lut_key(main_vsi);
8785fbc7b27aSKiran Patil 
8786fbc7b27aSKiran Patil 	return 0;
8787fbc7b27aSKiran Patil 
8788fbc7b27aSKiran Patil cleanup:
8789fbc7b27aSKiran Patil 	ice_remove_q_channels(main_vsi, rem_adv_fltr);
8790fbc7b27aSKiran Patil 	return err;
8791fbc7b27aSKiran Patil }
8792fbc7b27aSKiran Patil 
8793fbc7b27aSKiran Patil /**
8794fbc7b27aSKiran Patil  * ice_create_q_channels - Add queue channel for the given TCs
8795fbc7b27aSKiran Patil  * @vsi: VSI to be configured
8796fbc7b27aSKiran Patil  *
8797fbc7b27aSKiran Patil  * Configures queue channel mapping to the given TCs
8798fbc7b27aSKiran Patil  */
8799fbc7b27aSKiran Patil static int ice_create_q_channels(struct ice_vsi *vsi)
8800fbc7b27aSKiran Patil {
8801fbc7b27aSKiran Patil 	struct ice_pf *pf = vsi->back;
8802fbc7b27aSKiran Patil 	struct ice_channel *ch;
8803fbc7b27aSKiran Patil 	int ret = 0, i;
8804fbc7b27aSKiran Patil 
8805fbc7b27aSKiran Patil 	ice_for_each_chnl_tc(i) {
8806fbc7b27aSKiran Patil 		if (!(vsi->all_enatc & BIT(i)))
8807fbc7b27aSKiran Patil 			continue;
8808fbc7b27aSKiran Patil 
8809fbc7b27aSKiran Patil 		ch = kzalloc(sizeof(*ch), GFP_KERNEL);
8810fbc7b27aSKiran Patil 		if (!ch) {
8811fbc7b27aSKiran Patil 			ret = -ENOMEM;
8812fbc7b27aSKiran Patil 			goto err_free;
8813fbc7b27aSKiran Patil 		}
8814fbc7b27aSKiran Patil 		INIT_LIST_HEAD(&ch->list);
8815fbc7b27aSKiran Patil 		ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8816fbc7b27aSKiran Patil 		ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8817fbc7b27aSKiran Patil 		ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8818fbc7b27aSKiran Patil 		ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8819fbc7b27aSKiran Patil 		ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8820fbc7b27aSKiran Patil 
8821fbc7b27aSKiran Patil 		/* convert to Kbits/s */
8822fbc7b27aSKiran Patil 		if (ch->max_tx_rate)
8823fbc7b27aSKiran Patil 			ch->max_tx_rate = div_u64(ch->max_tx_rate,
8824fbc7b27aSKiran Patil 						  ICE_BW_KBPS_DIVISOR);
8825fbc7b27aSKiran Patil 		if (ch->min_tx_rate)
8826fbc7b27aSKiran Patil 			ch->min_tx_rate = div_u64(ch->min_tx_rate,
8827fbc7b27aSKiran Patil 						  ICE_BW_KBPS_DIVISOR);
8828fbc7b27aSKiran Patil 
8829fbc7b27aSKiran Patil 		ret = ice_create_q_channel(vsi, ch);
8830fbc7b27aSKiran Patil 		if (ret) {
8831fbc7b27aSKiran Patil 			dev_err(ice_pf_to_dev(pf),
8832fbc7b27aSKiran Patil 				"failed creating channel TC:%d\n", i);
8833fbc7b27aSKiran Patil 			kfree(ch);
8834fbc7b27aSKiran Patil 			goto err_free;
8835fbc7b27aSKiran Patil 		}
8836fbc7b27aSKiran Patil 		list_add_tail(&ch->list, &vsi->ch_list);
8837fbc7b27aSKiran Patil 		vsi->tc_map_vsi[i] = ch->ch_vsi;
8838fbc7b27aSKiran Patil 		dev_dbg(ice_pf_to_dev(pf),
8839fbc7b27aSKiran Patil 			"successfully created channel: VSI %pK\n", ch->ch_vsi);
8840fbc7b27aSKiran Patil 	}
8841fbc7b27aSKiran Patil 	return 0;
8842fbc7b27aSKiran Patil 
8843fbc7b27aSKiran Patil err_free:
8844fbc7b27aSKiran Patil 	ice_remove_q_channels(vsi, false);
8845fbc7b27aSKiran Patil 
8846fbc7b27aSKiran Patil 	return ret;
8847fbc7b27aSKiran Patil }
8848fbc7b27aSKiran Patil 
8849fbc7b27aSKiran Patil /**
8850fbc7b27aSKiran Patil  * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
8851fbc7b27aSKiran Patil  * @netdev: net device to configure
8852fbc7b27aSKiran Patil  * @type_data: TC offload data
8853fbc7b27aSKiran Patil  */
8854fbc7b27aSKiran Patil static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
8855fbc7b27aSKiran Patil {
8856fbc7b27aSKiran Patil 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8857fbc7b27aSKiran Patil 	struct ice_netdev_priv *np = netdev_priv(netdev);
8858fbc7b27aSKiran Patil 	struct ice_vsi *vsi = np->vsi;
8859fbc7b27aSKiran Patil 	struct ice_pf *pf = vsi->back;
8860fbc7b27aSKiran Patil 	u16 mode, ena_tc_qdisc = 0;
8861fbc7b27aSKiran Patil 	int cur_txq, cur_rxq;
8862fbc7b27aSKiran Patil 	u8 hw = 0, num_tcf;
8863fbc7b27aSKiran Patil 	struct device *dev;
8864fbc7b27aSKiran Patil 	int ret, i;
8865fbc7b27aSKiran Patil 
8866fbc7b27aSKiran Patil 	dev = ice_pf_to_dev(pf);
8867fbc7b27aSKiran Patil 	num_tcf = mqprio_qopt->qopt.num_tc;
8868fbc7b27aSKiran Patil 	hw = mqprio_qopt->qopt.hw;
8869fbc7b27aSKiran Patil 	mode = mqprio_qopt->mode;
8870fbc7b27aSKiran Patil 	if (!hw) {
8871fbc7b27aSKiran Patil 		clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8872fbc7b27aSKiran Patil 		vsi->ch_rss_size = 0;
8873fbc7b27aSKiran Patil 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8874fbc7b27aSKiran Patil 		goto config_tcf;
8875fbc7b27aSKiran Patil 	}
8876fbc7b27aSKiran Patil 
8877fbc7b27aSKiran Patil 	/* Generate queue region map for number of TCF requested */
8878fbc7b27aSKiran Patil 	for (i = 0; i < num_tcf; i++)
8879fbc7b27aSKiran Patil 		ena_tc_qdisc |= BIT(i);
8880fbc7b27aSKiran Patil 
8881fbc7b27aSKiran Patil 	switch (mode) {
8882fbc7b27aSKiran Patil 	case TC_MQPRIO_MODE_CHANNEL:
8883fbc7b27aSKiran Patil 
888480fe30a8SMichal Wilczynski 		if (pf->hw.port_info->is_custom_tx_enabled) {
888580fe30a8SMichal Wilczynski 			dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
888680fe30a8SMichal Wilczynski 			return -EBUSY;
888780fe30a8SMichal Wilczynski 		}
888880fe30a8SMichal Wilczynski 		ice_tear_down_devlink_rate_tree(pf);
888980fe30a8SMichal Wilczynski 
8890fbc7b27aSKiran Patil 		ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8891fbc7b27aSKiran Patil 		if (ret) {
8892fbc7b27aSKiran Patil 			netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
8893fbc7b27aSKiran Patil 				   ret);
8894fbc7b27aSKiran Patil 			return ret;
8895fbc7b27aSKiran Patil 		}
8896fbc7b27aSKiran Patil 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8897fbc7b27aSKiran Patil 		set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
88989fea7498SKiran Patil 		/* don't assume state of hw_tc_offload during driver load
88999fea7498SKiran Patil 		 * and set the flag for TC flower filter if hw_tc_offload
89009fea7498SKiran Patil 		 * already ON
89019fea7498SKiran Patil 		 */
89029fea7498SKiran Patil 		if (vsi->netdev->features & NETIF_F_HW_TC)
89039fea7498SKiran Patil 			set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
8904fbc7b27aSKiran Patil 		break;
8905fbc7b27aSKiran Patil 	default:
8906fbc7b27aSKiran Patil 		return -EINVAL;
8907fbc7b27aSKiran Patil 	}
8908fbc7b27aSKiran Patil 
8909fbc7b27aSKiran Patil config_tcf:
8910fbc7b27aSKiran Patil 
8911fbc7b27aSKiran Patil 	/* Requesting same TCF configuration as already enabled */
8912fbc7b27aSKiran Patil 	if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8913fbc7b27aSKiran Patil 	    mode != TC_MQPRIO_MODE_CHANNEL)
8914fbc7b27aSKiran Patil 		return 0;
8915fbc7b27aSKiran Patil 
8916fbc7b27aSKiran Patil 	/* Pause VSI queues */
8917fbc7b27aSKiran Patil 	ice_dis_vsi(vsi, true);
8918fbc7b27aSKiran Patil 
8919fbc7b27aSKiran Patil 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
8920fbc7b27aSKiran Patil 		ice_remove_q_channels(vsi, true);
8921fbc7b27aSKiran Patil 
8922fbc7b27aSKiran Patil 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8923fbc7b27aSKiran Patil 		vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
8924fbc7b27aSKiran Patil 				     num_online_cpus());
8925fbc7b27aSKiran Patil 		vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
8926fbc7b27aSKiran Patil 				     num_online_cpus());
8927fbc7b27aSKiran Patil 	} else {
8928fbc7b27aSKiran Patil 		/* logic to rebuild VSI, same like ethtool -L */
8929fbc7b27aSKiran Patil 		u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
8930fbc7b27aSKiran Patil 
8931fbc7b27aSKiran Patil 		for (i = 0; i < num_tcf; i++) {
8932fbc7b27aSKiran Patil 			if (!(ena_tc_qdisc & BIT(i)))
8933fbc7b27aSKiran Patil 				continue;
8934fbc7b27aSKiran Patil 
8935fbc7b27aSKiran Patil 			offset = vsi->mqprio_qopt.qopt.offset[i];
8936fbc7b27aSKiran Patil 			qcount_rx = vsi->mqprio_qopt.qopt.count[i];
8937fbc7b27aSKiran Patil 			qcount_tx = vsi->mqprio_qopt.qopt.count[i];
8938fbc7b27aSKiran Patil 		}
8939fbc7b27aSKiran Patil 		vsi->req_txq = offset + qcount_tx;
8940fbc7b27aSKiran Patil 		vsi->req_rxq = offset + qcount_rx;
8941fbc7b27aSKiran Patil 
8942fbc7b27aSKiran Patil 		/* store away original rss_size info, so that it gets reused
8943fbc7b27aSKiran Patil 		 * form ice_vsi_rebuild during tc-qdisc delete stage - to
8944fbc7b27aSKiran Patil 		 * determine, what should be the rss_sizefor main VSI
8945fbc7b27aSKiran Patil 		 */
8946fbc7b27aSKiran Patil 		vsi->orig_rss_size = vsi->rss_size;
8947fbc7b27aSKiran Patil 	}
8948fbc7b27aSKiran Patil 
8949fbc7b27aSKiran Patil 	/* save current values of Tx and Rx queues before calling VSI rebuild
8950fbc7b27aSKiran Patil 	 * for fallback option
8951fbc7b27aSKiran Patil 	 */
8952fbc7b27aSKiran Patil 	cur_txq = vsi->num_txq;
8953fbc7b27aSKiran Patil 	cur_rxq = vsi->num_rxq;
8954fbc7b27aSKiran Patil 
8955fbc7b27aSKiran Patil 	/* proceed with rebuild main VSI using correct number of queues */
89566624e780SMichal Swiatkowski 	ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
8957fbc7b27aSKiran Patil 	if (ret) {
8958fbc7b27aSKiran Patil 		/* fallback to current number of queues */
8959fbc7b27aSKiran Patil 		dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
8960fbc7b27aSKiran Patil 		vsi->req_txq = cur_txq;
8961fbc7b27aSKiran Patil 		vsi->req_rxq = cur_rxq;
8962fbc7b27aSKiran Patil 		clear_bit(ICE_RESET_FAILED, pf->state);
89636624e780SMichal Swiatkowski 		if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
8964fbc7b27aSKiran Patil 			dev_err(dev, "Rebuild of main VSI failed again\n");
8965fbc7b27aSKiran Patil 			return ret;
8966fbc7b27aSKiran Patil 		}
8967fbc7b27aSKiran Patil 	}
8968fbc7b27aSKiran Patil 
8969fbc7b27aSKiran Patil 	vsi->all_numtc = num_tcf;
8970fbc7b27aSKiran Patil 	vsi->all_enatc = ena_tc_qdisc;
8971fbc7b27aSKiran Patil 	ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
8972fbc7b27aSKiran Patil 	if (ret) {
8973fbc7b27aSKiran Patil 		netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
8974fbc7b27aSKiran Patil 			   vsi->vsi_num);
8975fbc7b27aSKiran Patil 		goto exit;
8976fbc7b27aSKiran Patil 	}
8977fbc7b27aSKiran Patil 
8978fbc7b27aSKiran Patil 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8979fbc7b27aSKiran Patil 		u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8980fbc7b27aSKiran Patil 		u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
8981fbc7b27aSKiran Patil 
8982fbc7b27aSKiran Patil 		/* set TC0 rate limit if specified */
8983fbc7b27aSKiran Patil 		if (max_tx_rate || min_tx_rate) {
8984fbc7b27aSKiran Patil 			/* convert to Kbits/s */
8985fbc7b27aSKiran Patil 			if (max_tx_rate)
8986fbc7b27aSKiran Patil 				max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
8987fbc7b27aSKiran Patil 			if (min_tx_rate)
8988fbc7b27aSKiran Patil 				min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
8989fbc7b27aSKiran Patil 
8990fbc7b27aSKiran Patil 			ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
8991fbc7b27aSKiran Patil 			if (!ret) {
8992fbc7b27aSKiran Patil 				dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
8993fbc7b27aSKiran Patil 					max_tx_rate, min_tx_rate, vsi->vsi_num);
8994fbc7b27aSKiran Patil 			} else {
8995fbc7b27aSKiran Patil 				dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
8996fbc7b27aSKiran Patil 					max_tx_rate, min_tx_rate, vsi->vsi_num);
8997fbc7b27aSKiran Patil 				goto exit;
8998fbc7b27aSKiran Patil 			}
8999fbc7b27aSKiran Patil 		}
9000fbc7b27aSKiran Patil 		ret = ice_create_q_channels(vsi);
9001fbc7b27aSKiran Patil 		if (ret) {
9002fbc7b27aSKiran Patil 			netdev_err(netdev, "failed configuring queue channels\n");
9003fbc7b27aSKiran Patil 			goto exit;
9004fbc7b27aSKiran Patil 		} else {
9005fbc7b27aSKiran Patil 			netdev_dbg(netdev, "successfully configured channels\n");
9006fbc7b27aSKiran Patil 		}
9007fbc7b27aSKiran Patil 	}
9008fbc7b27aSKiran Patil 
9009fbc7b27aSKiran Patil 	if (vsi->ch_rss_size)
9010fbc7b27aSKiran Patil 		ice_vsi_cfg_rss_lut_key(vsi);
9011fbc7b27aSKiran Patil 
9012fbc7b27aSKiran Patil exit:
9013fbc7b27aSKiran Patil 	/* if error, reset the all_numtc and all_enatc */
9014fbc7b27aSKiran Patil 	if (ret) {
9015fbc7b27aSKiran Patil 		vsi->all_numtc = 0;
9016fbc7b27aSKiran Patil 		vsi->all_enatc = 0;
9017fbc7b27aSKiran Patil 	}
9018fbc7b27aSKiran Patil 	/* resume VSI */
9019fbc7b27aSKiran Patil 	ice_ena_vsi(vsi, true);
9020fbc7b27aSKiran Patil 
9021fbc7b27aSKiran Patil 	return ret;
9022fbc7b27aSKiran Patil }
9023fbc7b27aSKiran Patil 
90240d08a441SKiran Patil static LIST_HEAD(ice_block_cb_list);
90250d08a441SKiran Patil 
90260d08a441SKiran Patil static int
90270d08a441SKiran Patil ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
90280d08a441SKiran Patil 	     void *type_data)
90290d08a441SKiran Patil {
90300d08a441SKiran Patil 	struct ice_netdev_priv *np = netdev_priv(netdev);
9031fbc7b27aSKiran Patil 	struct ice_pf *pf = np->vsi->back;
9032fbc7b27aSKiran Patil 	int err;
90330d08a441SKiran Patil 
90340d08a441SKiran Patil 	switch (type) {
90350d08a441SKiran Patil 	case TC_SETUP_BLOCK:
90360d08a441SKiran Patil 		return flow_block_cb_setup_simple(type_data,
90370d08a441SKiran Patil 						  &ice_block_cb_list,
90380d08a441SKiran Patil 						  ice_setup_tc_block_cb,
90390d08a441SKiran Patil 						  np, np, true);
9040fbc7b27aSKiran Patil 	case TC_SETUP_QDISC_MQPRIO:
9041fbc7b27aSKiran Patil 		/* setup traffic classifier for receive side */
9042fbc7b27aSKiran Patil 		mutex_lock(&pf->tc_mutex);
9043fbc7b27aSKiran Patil 		err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
9044fbc7b27aSKiran Patil 		mutex_unlock(&pf->tc_mutex);
9045fbc7b27aSKiran Patil 		return err;
90460d08a441SKiran Patil 	default:
90470d08a441SKiran Patil 		return -EOPNOTSUPP;
90480d08a441SKiran Patil 	}
90490d08a441SKiran Patil 	return -EOPNOTSUPP;
90500d08a441SKiran Patil }
90510d08a441SKiran Patil 
9052195bb48fSMichal Swiatkowski static struct ice_indr_block_priv *
9053195bb48fSMichal Swiatkowski ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
9054195bb48fSMichal Swiatkowski 			   struct net_device *netdev)
9055195bb48fSMichal Swiatkowski {
9056195bb48fSMichal Swiatkowski 	struct ice_indr_block_priv *cb_priv;
9057195bb48fSMichal Swiatkowski 
9058195bb48fSMichal Swiatkowski 	list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
9059195bb48fSMichal Swiatkowski 		if (!cb_priv->netdev)
9060195bb48fSMichal Swiatkowski 			return NULL;
9061195bb48fSMichal Swiatkowski 		if (cb_priv->netdev == netdev)
9062195bb48fSMichal Swiatkowski 			return cb_priv;
9063195bb48fSMichal Swiatkowski 	}
9064195bb48fSMichal Swiatkowski 	return NULL;
9065195bb48fSMichal Swiatkowski }
9066195bb48fSMichal Swiatkowski 
9067195bb48fSMichal Swiatkowski static int
9068195bb48fSMichal Swiatkowski ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
9069195bb48fSMichal Swiatkowski 			void *indr_priv)
9070195bb48fSMichal Swiatkowski {
9071195bb48fSMichal Swiatkowski 	struct ice_indr_block_priv *priv = indr_priv;
9072195bb48fSMichal Swiatkowski 	struct ice_netdev_priv *np = priv->np;
9073195bb48fSMichal Swiatkowski 
9074195bb48fSMichal Swiatkowski 	switch (type) {
9075195bb48fSMichal Swiatkowski 	case TC_SETUP_CLSFLOWER:
9076195bb48fSMichal Swiatkowski 		return ice_setup_tc_cls_flower(np, priv->netdev,
9077195bb48fSMichal Swiatkowski 					       (struct flow_cls_offload *)
9078195bb48fSMichal Swiatkowski 					       type_data);
9079195bb48fSMichal Swiatkowski 	default:
9080195bb48fSMichal Swiatkowski 		return -EOPNOTSUPP;
9081195bb48fSMichal Swiatkowski 	}
9082195bb48fSMichal Swiatkowski }
9083195bb48fSMichal Swiatkowski 
9084195bb48fSMichal Swiatkowski static int
9085195bb48fSMichal Swiatkowski ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
9086195bb48fSMichal Swiatkowski 			struct ice_netdev_priv *np,
9087195bb48fSMichal Swiatkowski 			struct flow_block_offload *f, void *data,
9088195bb48fSMichal Swiatkowski 			void (*cleanup)(struct flow_block_cb *block_cb))
9089195bb48fSMichal Swiatkowski {
9090195bb48fSMichal Swiatkowski 	struct ice_indr_block_priv *indr_priv;
9091195bb48fSMichal Swiatkowski 	struct flow_block_cb *block_cb;
9092195bb48fSMichal Swiatkowski 
90939e300987SMichal Swiatkowski 	if (!ice_is_tunnel_supported(netdev) &&
90949e300987SMichal Swiatkowski 	    !(is_vlan_dev(netdev) &&
90959e300987SMichal Swiatkowski 	      vlan_dev_real_dev(netdev) == np->vsi->netdev))
90969e300987SMichal Swiatkowski 		return -EOPNOTSUPP;
90979e300987SMichal Swiatkowski 
9098195bb48fSMichal Swiatkowski 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9099195bb48fSMichal Swiatkowski 		return -EOPNOTSUPP;
9100195bb48fSMichal Swiatkowski 
9101195bb48fSMichal Swiatkowski 	switch (f->command) {
9102195bb48fSMichal Swiatkowski 	case FLOW_BLOCK_BIND:
9103195bb48fSMichal Swiatkowski 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
9104195bb48fSMichal Swiatkowski 		if (indr_priv)
9105195bb48fSMichal Swiatkowski 			return -EEXIST;
9106195bb48fSMichal Swiatkowski 
9107195bb48fSMichal Swiatkowski 		indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
9108195bb48fSMichal Swiatkowski 		if (!indr_priv)
9109195bb48fSMichal Swiatkowski 			return -ENOMEM;
9110195bb48fSMichal Swiatkowski 
9111195bb48fSMichal Swiatkowski 		indr_priv->netdev = netdev;
9112195bb48fSMichal Swiatkowski 		indr_priv->np = np;
9113195bb48fSMichal Swiatkowski 		list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
9114195bb48fSMichal Swiatkowski 
9115195bb48fSMichal Swiatkowski 		block_cb =
9116195bb48fSMichal Swiatkowski 			flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
9117195bb48fSMichal Swiatkowski 						 indr_priv, indr_priv,
9118195bb48fSMichal Swiatkowski 						 ice_rep_indr_tc_block_unbind,
9119195bb48fSMichal Swiatkowski 						 f, netdev, sch, data, np,
9120195bb48fSMichal Swiatkowski 						 cleanup);
9121195bb48fSMichal Swiatkowski 
9122195bb48fSMichal Swiatkowski 		if (IS_ERR(block_cb)) {
9123195bb48fSMichal Swiatkowski 			list_del(&indr_priv->list);
9124195bb48fSMichal Swiatkowski 			kfree(indr_priv);
9125195bb48fSMichal Swiatkowski 			return PTR_ERR(block_cb);
9126195bb48fSMichal Swiatkowski 		}
9127195bb48fSMichal Swiatkowski 		flow_block_cb_add(block_cb, f);
9128195bb48fSMichal Swiatkowski 		list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
9129195bb48fSMichal Swiatkowski 		break;
9130195bb48fSMichal Swiatkowski 	case FLOW_BLOCK_UNBIND:
9131195bb48fSMichal Swiatkowski 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
9132195bb48fSMichal Swiatkowski 		if (!indr_priv)
9133195bb48fSMichal Swiatkowski 			return -ENOENT;
9134195bb48fSMichal Swiatkowski 
9135195bb48fSMichal Swiatkowski 		block_cb = flow_block_cb_lookup(f->block,
9136195bb48fSMichal Swiatkowski 						ice_indr_setup_block_cb,
9137195bb48fSMichal Swiatkowski 						indr_priv);
9138195bb48fSMichal Swiatkowski 		if (!block_cb)
9139195bb48fSMichal Swiatkowski 			return -ENOENT;
9140195bb48fSMichal Swiatkowski 
9141195bb48fSMichal Swiatkowski 		flow_indr_block_cb_remove(block_cb, f);
9142195bb48fSMichal Swiatkowski 
9143195bb48fSMichal Swiatkowski 		list_del(&block_cb->driver_list);
9144195bb48fSMichal Swiatkowski 		break;
9145195bb48fSMichal Swiatkowski 	default:
9146195bb48fSMichal Swiatkowski 		return -EOPNOTSUPP;
9147195bb48fSMichal Swiatkowski 	}
9148195bb48fSMichal Swiatkowski 	return 0;
9149195bb48fSMichal Swiatkowski }
9150195bb48fSMichal Swiatkowski 
9151195bb48fSMichal Swiatkowski static int
9152195bb48fSMichal Swiatkowski ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
9153195bb48fSMichal Swiatkowski 		     void *cb_priv, enum tc_setup_type type, void *type_data,
9154195bb48fSMichal Swiatkowski 		     void *data,
9155195bb48fSMichal Swiatkowski 		     void (*cleanup)(struct flow_block_cb *block_cb))
9156195bb48fSMichal Swiatkowski {
9157195bb48fSMichal Swiatkowski 	switch (type) {
9158195bb48fSMichal Swiatkowski 	case TC_SETUP_BLOCK:
9159195bb48fSMichal Swiatkowski 		return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
9160195bb48fSMichal Swiatkowski 					       data, cleanup);
9161195bb48fSMichal Swiatkowski 
9162195bb48fSMichal Swiatkowski 	default:
9163195bb48fSMichal Swiatkowski 		return -EOPNOTSUPP;
9164195bb48fSMichal Swiatkowski 	}
9165195bb48fSMichal Swiatkowski }
9166195bb48fSMichal Swiatkowski 
91670d08a441SKiran Patil /**
9168cdedef59SAnirudh Venkataramanan  * ice_open - Called when a network interface becomes active
9169cdedef59SAnirudh Venkataramanan  * @netdev: network interface device structure
9170cdedef59SAnirudh Venkataramanan  *
9171cdedef59SAnirudh Venkataramanan  * The open entry point is called when a network interface is made
9172cdedef59SAnirudh Venkataramanan  * active by the system (IFF_UP). At this point all resources needed
9173cdedef59SAnirudh Venkataramanan  * for transmit and receive operations are allocated, the interrupt
9174cdedef59SAnirudh Venkataramanan  * handler is registered with the OS, the netdev watchdog is enabled,
9175cdedef59SAnirudh Venkataramanan  * and the stack is notified that the interface is ready.
9176cdedef59SAnirudh Venkataramanan  *
9177cdedef59SAnirudh Venkataramanan  * Returns 0 on success, negative value on failure
9178cdedef59SAnirudh Venkataramanan  */
91790e674aebSAnirudh Venkataramanan int ice_open(struct net_device *netdev)
9180cdedef59SAnirudh Venkataramanan {
9181cdedef59SAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
9182e95fc857SKrzysztof Goreczny 	struct ice_pf *pf = np->vsi->back;
9183e95fc857SKrzysztof Goreczny 
9184e95fc857SKrzysztof Goreczny 	if (ice_is_reset_in_progress(pf->state)) {
9185e95fc857SKrzysztof Goreczny 		netdev_err(netdev, "can't open net device while reset is in progress");
9186e95fc857SKrzysztof Goreczny 		return -EBUSY;
9187e95fc857SKrzysztof Goreczny 	}
9188e95fc857SKrzysztof Goreczny 
9189e95fc857SKrzysztof Goreczny 	return ice_open_internal(netdev);
9190e95fc857SKrzysztof Goreczny }
9191e95fc857SKrzysztof Goreczny 
9192e95fc857SKrzysztof Goreczny /**
9193e95fc857SKrzysztof Goreczny  * ice_open_internal - Called when a network interface becomes active
9194e95fc857SKrzysztof Goreczny  * @netdev: network interface device structure
9195e95fc857SKrzysztof Goreczny  *
9196e95fc857SKrzysztof Goreczny  * Internal ice_open implementation. Should not be used directly except for ice_open and reset
9197e95fc857SKrzysztof Goreczny  * handling routine
9198e95fc857SKrzysztof Goreczny  *
9199e95fc857SKrzysztof Goreczny  * Returns 0 on success, negative value on failure
9200e95fc857SKrzysztof Goreczny  */
9201e95fc857SKrzysztof Goreczny int ice_open_internal(struct net_device *netdev)
9202e95fc857SKrzysztof Goreczny {
9203e95fc857SKrzysztof Goreczny 	struct ice_netdev_priv *np = netdev_priv(netdev);
9204cdedef59SAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
9205de75135bSAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
92066d599946STony Nguyen 	struct ice_port_info *pi;
9207cdedef59SAnirudh Venkataramanan 	int err;
9208cdedef59SAnirudh Venkataramanan 
92097e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
92100f9d5027SAnirudh Venkataramanan 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
92110f9d5027SAnirudh Venkataramanan 		return -EIO;
92120f9d5027SAnirudh Venkataramanan 	}
92130f9d5027SAnirudh Venkataramanan 
9214cdedef59SAnirudh Venkataramanan 	netif_carrier_off(netdev);
9215cdedef59SAnirudh Venkataramanan 
92166d599946STony Nguyen 	pi = vsi->port_info;
92172ccc1c1cSTony Nguyen 	err = ice_update_link_info(pi);
92182ccc1c1cSTony Nguyen 	if (err) {
92192ccc1c1cSTony Nguyen 		netdev_err(netdev, "Failed to get link info, error %d\n", err);
9220c1484691STony Nguyen 		return err;
92216d599946STony Nguyen 	}
92226d599946STony Nguyen 
922399d40752SBrett Creeley 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9224c77849f5SAnirudh Venkataramanan 
92256d599946STony Nguyen 	/* Set PHY if there is media, otherwise, turn off PHY */
92266d599946STony Nguyen 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
92271a3571b5SPaul Greenwalt 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
92287e408e07SAnirudh Venkataramanan 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
92291a3571b5SPaul Greenwalt 			err = ice_init_phy_user_cfg(pi);
92301a3571b5SPaul Greenwalt 			if (err) {
92311a3571b5SPaul Greenwalt 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
92321a3571b5SPaul Greenwalt 					   err);
92331a3571b5SPaul Greenwalt 				return err;
92341a3571b5SPaul Greenwalt 			}
92351a3571b5SPaul Greenwalt 		}
92361a3571b5SPaul Greenwalt 
92371a3571b5SPaul Greenwalt 		err = ice_configure_phy(vsi);
9238b6f934f0SBrett Creeley 		if (err) {
923919cce2c6SAnirudh Venkataramanan 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
92406d599946STony Nguyen 				   err);
9241b6f934f0SBrett Creeley 			return err;
9242b6f934f0SBrett Creeley 		}
92436d599946STony Nguyen 	} else {
92441a3571b5SPaul Greenwalt 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9245d348d517SAnirudh Venkataramanan 		ice_set_link(vsi, false);
92466d599946STony Nguyen 	}
9247cdedef59SAnirudh Venkataramanan 
9248b6f934f0SBrett Creeley 	err = ice_vsi_open(vsi);
9249cdedef59SAnirudh Venkataramanan 	if (err)
9250cdedef59SAnirudh Venkataramanan 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9251cdedef59SAnirudh Venkataramanan 			   vsi->vsi_num, vsi->vsw->sw_id);
9252a4e82a81STony Nguyen 
9253a4e82a81STony Nguyen 	/* Update existing tunnels information */
9254a4e82a81STony Nguyen 	udp_tunnel_get_rx_info(netdev);
9255a4e82a81STony Nguyen 
9256cdedef59SAnirudh Venkataramanan 	return err;
9257cdedef59SAnirudh Venkataramanan }
9258cdedef59SAnirudh Venkataramanan 
9259cdedef59SAnirudh Venkataramanan /**
9260cdedef59SAnirudh Venkataramanan  * ice_stop - Disables a network interface
9261cdedef59SAnirudh Venkataramanan  * @netdev: network interface device structure
9262cdedef59SAnirudh Venkataramanan  *
9263cdedef59SAnirudh Venkataramanan  * The stop entry point is called when an interface is de-activated by the OS,
9264cdedef59SAnirudh Venkataramanan  * and the netdevice enters the DOWN state. The hardware is still under the
9265cdedef59SAnirudh Venkataramanan  * driver's control, but the netdev interface is disabled.
9266cdedef59SAnirudh Venkataramanan  *
9267cdedef59SAnirudh Venkataramanan  * Returns success only - not allowed to fail
9268cdedef59SAnirudh Venkataramanan  */
92690e674aebSAnirudh Venkataramanan int ice_stop(struct net_device *netdev)
9270cdedef59SAnirudh Venkataramanan {
9271cdedef59SAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
9272cdedef59SAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
9273e95fc857SKrzysztof Goreczny 	struct ice_pf *pf = vsi->back;
9274e95fc857SKrzysztof Goreczny 
9275e95fc857SKrzysztof Goreczny 	if (ice_is_reset_in_progress(pf->state)) {
9276e95fc857SKrzysztof Goreczny 		netdev_err(netdev, "can't stop net device while reset is in progress");
9277e95fc857SKrzysztof Goreczny 		return -EBUSY;
9278e95fc857SKrzysztof Goreczny 	}
9279cdedef59SAnirudh Venkataramanan 
92808ac71327SMateusz Palczewski 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
92818ac71327SMateusz Palczewski 		int link_err = ice_force_phys_link_state(vsi, false);
92828ac71327SMateusz Palczewski 
92838ac71327SMateusz Palczewski 		if (link_err) {
92848ac71327SMateusz Palczewski 			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
92858ac71327SMateusz Palczewski 				   vsi->vsi_num, link_err);
92868ac71327SMateusz Palczewski 			return -EIO;
92878ac71327SMateusz Palczewski 		}
92888ac71327SMateusz Palczewski 	}
92898ac71327SMateusz Palczewski 
9290cdedef59SAnirudh Venkataramanan 	ice_vsi_close(vsi);
9291cdedef59SAnirudh Venkataramanan 
9292cdedef59SAnirudh Venkataramanan 	return 0;
9293cdedef59SAnirudh Venkataramanan }
9294cdedef59SAnirudh Venkataramanan 
9295e94d4478SAnirudh Venkataramanan /**
9296e94d4478SAnirudh Venkataramanan  * ice_features_check - Validate encapsulated packet conforms to limits
9297e94d4478SAnirudh Venkataramanan  * @skb: skb buffer
9298e94d4478SAnirudh Venkataramanan  * @netdev: This port's netdev
9299e94d4478SAnirudh Venkataramanan  * @features: Offload features that the stack believes apply
9300e94d4478SAnirudh Venkataramanan  */
9301e94d4478SAnirudh Venkataramanan static netdev_features_t
9302e94d4478SAnirudh Venkataramanan ice_features_check(struct sk_buff *skb,
9303e94d4478SAnirudh Venkataramanan 		   struct net_device __always_unused *netdev,
9304e94d4478SAnirudh Venkataramanan 		   netdev_features_t features)
9305e94d4478SAnirudh Venkataramanan {
930646b699c5SJesse Brandeburg 	bool gso = skb_is_gso(skb);
9307e94d4478SAnirudh Venkataramanan 	size_t len;
9308e94d4478SAnirudh Venkataramanan 
9309e94d4478SAnirudh Venkataramanan 	/* No point in doing any of this if neither checksum nor GSO are
9310e94d4478SAnirudh Venkataramanan 	 * being requested for this frame. We can rule out both by just
9311e94d4478SAnirudh Venkataramanan 	 * checking for CHECKSUM_PARTIAL
9312e94d4478SAnirudh Venkataramanan 	 */
9313e94d4478SAnirudh Venkataramanan 	if (skb->ip_summed != CHECKSUM_PARTIAL)
9314e94d4478SAnirudh Venkataramanan 		return features;
9315e94d4478SAnirudh Venkataramanan 
9316e94d4478SAnirudh Venkataramanan 	/* We cannot support GSO if the MSS is going to be less than
9317e94d4478SAnirudh Venkataramanan 	 * 64 bytes. If it is then we need to drop support for GSO.
9318e94d4478SAnirudh Venkataramanan 	 */
931946b699c5SJesse Brandeburg 	if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9320e94d4478SAnirudh Venkataramanan 		features &= ~NETIF_F_GSO_MASK;
9321e94d4478SAnirudh Venkataramanan 
932246b699c5SJesse Brandeburg 	len = skb_network_offset(skb);
9323a4e82a81STony Nguyen 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9324e94d4478SAnirudh Venkataramanan 		goto out_rm_features;
9325e94d4478SAnirudh Venkataramanan 
932646b699c5SJesse Brandeburg 	len = skb_network_header_len(skb);
9327a4e82a81STony Nguyen 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9328e94d4478SAnirudh Venkataramanan 		goto out_rm_features;
9329e94d4478SAnirudh Venkataramanan 
9330e94d4478SAnirudh Venkataramanan 	if (skb->encapsulation) {
933146b699c5SJesse Brandeburg 		/* this must work for VXLAN frames AND IPIP/SIT frames, and in
933246b699c5SJesse Brandeburg 		 * the case of IPIP frames, the transport header pointer is
933346b699c5SJesse Brandeburg 		 * after the inner header! So check to make sure that this
933446b699c5SJesse Brandeburg 		 * is a GRE or UDP_TUNNEL frame before doing that math.
933546b699c5SJesse Brandeburg 		 */
933646b699c5SJesse Brandeburg 		if (gso && (skb_shinfo(skb)->gso_type &
933746b699c5SJesse Brandeburg 			    (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
933846b699c5SJesse Brandeburg 			len = skb_inner_network_header(skb) -
933946b699c5SJesse Brandeburg 			      skb_transport_header(skb);
9340a4e82a81STony Nguyen 			if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9341e94d4478SAnirudh Venkataramanan 				goto out_rm_features;
934246b699c5SJesse Brandeburg 		}
9343e94d4478SAnirudh Venkataramanan 
934446b699c5SJesse Brandeburg 		len = skb_inner_network_header_len(skb);
9345a4e82a81STony Nguyen 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9346e94d4478SAnirudh Venkataramanan 			goto out_rm_features;
9347e94d4478SAnirudh Venkataramanan 	}
9348e94d4478SAnirudh Venkataramanan 
9349e94d4478SAnirudh Venkataramanan 	return features;
9350e94d4478SAnirudh Venkataramanan out_rm_features:
9351e94d4478SAnirudh Venkataramanan 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9352e94d4478SAnirudh Venkataramanan }
9353e94d4478SAnirudh Venkataramanan 
9354462acf6aSTony Nguyen static const struct net_device_ops ice_netdev_safe_mode_ops = {
9355462acf6aSTony Nguyen 	.ndo_open = ice_open,
9356462acf6aSTony Nguyen 	.ndo_stop = ice_stop,
9357462acf6aSTony Nguyen 	.ndo_start_xmit = ice_start_xmit,
9358462acf6aSTony Nguyen 	.ndo_set_mac_address = ice_set_mac_address,
9359462acf6aSTony Nguyen 	.ndo_validate_addr = eth_validate_addr,
9360462acf6aSTony Nguyen 	.ndo_change_mtu = ice_change_mtu,
9361462acf6aSTony Nguyen 	.ndo_get_stats64 = ice_get_stats64,
9362462acf6aSTony Nguyen 	.ndo_tx_timeout = ice_tx_timeout,
9363ebc5399eSMaciej Fijalkowski 	.ndo_bpf = ice_xdp_safe_mode,
9364462acf6aSTony Nguyen };
9365462acf6aSTony Nguyen 
9366cdedef59SAnirudh Venkataramanan static const struct net_device_ops ice_netdev_ops = {
9367cdedef59SAnirudh Venkataramanan 	.ndo_open = ice_open,
9368cdedef59SAnirudh Venkataramanan 	.ndo_stop = ice_stop,
93692b245cb2SAnirudh Venkataramanan 	.ndo_start_xmit = ice_start_xmit,
93702a87bd73SDave Ertman 	.ndo_select_queue = ice_select_queue,
9371e94d4478SAnirudh Venkataramanan 	.ndo_features_check = ice_features_check,
93721babaf77SBrett Creeley 	.ndo_fix_features = ice_fix_features,
9373e94d4478SAnirudh Venkataramanan 	.ndo_set_rx_mode = ice_set_rx_mode,
9374e94d4478SAnirudh Venkataramanan 	.ndo_set_mac_address = ice_set_mac_address,
9375e94d4478SAnirudh Venkataramanan 	.ndo_validate_addr = eth_validate_addr,
9376e94d4478SAnirudh Venkataramanan 	.ndo_change_mtu = ice_change_mtu,
9377fcea6f3dSAnirudh Venkataramanan 	.ndo_get_stats64 = ice_get_stats64,
93781ddef455SUsha Ketineni 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
9379a7605370SArnd Bergmann 	.ndo_eth_ioctl = ice_eth_ioctl,
93807c710869SAnirudh Venkataramanan 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
93817c710869SAnirudh Venkataramanan 	.ndo_set_vf_mac = ice_set_vf_mac,
93827c710869SAnirudh Venkataramanan 	.ndo_get_vf_config = ice_get_vf_cfg,
93837c710869SAnirudh Venkataramanan 	.ndo_set_vf_trust = ice_set_vf_trust,
93847c710869SAnirudh Venkataramanan 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
93857c710869SAnirudh Venkataramanan 	.ndo_set_vf_link_state = ice_set_vf_link_state,
9386730fdea4SJesse Brandeburg 	.ndo_get_vf_stats = ice_get_vf_stats,
93874ecc8633SBrett Creeley 	.ndo_set_vf_rate = ice_set_vf_bw,
9388d76a60baSAnirudh Venkataramanan 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9389d76a60baSAnirudh Venkataramanan 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
93900d08a441SKiran Patil 	.ndo_setup_tc = ice_setup_tc,
9391d76a60baSAnirudh Venkataramanan 	.ndo_set_features = ice_set_features,
9392b1edc14aSMd Fahad Iqbal Polash 	.ndo_bridge_getlink = ice_bridge_getlink,
9393b1edc14aSMd Fahad Iqbal Polash 	.ndo_bridge_setlink = ice_bridge_setlink,
9394e94d4478SAnirudh Venkataramanan 	.ndo_fdb_add = ice_fdb_add,
9395e94d4478SAnirudh Venkataramanan 	.ndo_fdb_del = ice_fdb_del,
939628bf2672SBrett Creeley #ifdef CONFIG_RFS_ACCEL
939728bf2672SBrett Creeley 	.ndo_rx_flow_steer = ice_rx_flow_steer,
939828bf2672SBrett Creeley #endif
9399b3969fd7SSudheer Mogilappagari 	.ndo_tx_timeout = ice_tx_timeout,
9400efc2214bSMaciej Fijalkowski 	.ndo_bpf = ice_xdp,
9401efc2214bSMaciej Fijalkowski 	.ndo_xdp_xmit = ice_xdp_xmit,
94022d4238f5SKrzysztof Kazimierczak 	.ndo_xsk_wakeup = ice_xsk_wakeup,
9403cdedef59SAnirudh Venkataramanan };
9404