1837f08fdSAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0
2837f08fdSAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */
3837f08fdSAnirudh Venkataramanan 
4837f08fdSAnirudh Venkataramanan /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5837f08fdSAnirudh Venkataramanan 
6837f08fdSAnirudh Venkataramanan #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7837f08fdSAnirudh Venkataramanan 
834a2a3b8SJeff Kirsher #include <generated/utsrelease.h>
9837f08fdSAnirudh Venkataramanan #include "ice.h"
10eff380aaSAnirudh Venkataramanan #include "ice_base.h"
1145d3d428SAnirudh Venkataramanan #include "ice_lib.h"
121b8f15b6SMichal Swiatkowski #include "ice_fltr.h"
1337b6f646SAnirudh Venkataramanan #include "ice_dcb_lib.h"
14b94b013eSDave Ertman #include "ice_dcb_nl.h"
151adf7eadSJacob Keller #include "ice_devlink.h"
163089cf6dSJesse Brandeburg /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
173089cf6dSJesse Brandeburg  * ice tracepoint functions. This must be done exactly once across the
183089cf6dSJesse Brandeburg  * ice driver.
193089cf6dSJesse Brandeburg  */
203089cf6dSJesse Brandeburg #define CREATE_TRACE_POINTS
213089cf6dSJesse Brandeburg #include "ice_trace.h"
22b3be918dSGrzegorz Nitka #include "ice_eswitch.h"
230d08a441SKiran Patil #include "ice_tc_lib.h"
24837f08fdSAnirudh Venkataramanan 
25837f08fdSAnirudh Venkataramanan #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
26837f08fdSAnirudh Venkataramanan static const char ice_driver_string[] = DRV_SUMMARY;
27837f08fdSAnirudh Venkataramanan static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
28837f08fdSAnirudh Venkataramanan 
29462acf6aSTony Nguyen /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
30462acf6aSTony Nguyen #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
31462acf6aSTony Nguyen #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
32462acf6aSTony Nguyen 
33837f08fdSAnirudh Venkataramanan MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
34837f08fdSAnirudh Venkataramanan MODULE_DESCRIPTION(DRV_SUMMARY);
3598674ebeSJesse Brandeburg MODULE_LICENSE("GPL v2");
36462acf6aSTony Nguyen MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
37837f08fdSAnirudh Venkataramanan 
38837f08fdSAnirudh Venkataramanan static int debug = -1;
39837f08fdSAnirudh Venkataramanan module_param(debug, int, 0644);
407ec59eeaSAnirudh Venkataramanan #ifndef CONFIG_DYNAMIC_DEBUG
417ec59eeaSAnirudh Venkataramanan MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
427ec59eeaSAnirudh Venkataramanan #else
437ec59eeaSAnirudh Venkataramanan MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
447ec59eeaSAnirudh Venkataramanan #endif /* !CONFIG_DYNAMIC_DEBUG */
45837f08fdSAnirudh Venkataramanan 
46d25a0fc4SDave Ertman static DEFINE_IDA(ice_aux_ida);
4722bf877eSMaciej Fijalkowski DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
4822bf877eSMaciej Fijalkowski EXPORT_SYMBOL(ice_xdp_locking_key);
49d25a0fc4SDave Ertman 
50940b61afSAnirudh Venkataramanan static struct workqueue_struct *ice_wq;
51462acf6aSTony Nguyen static const struct net_device_ops ice_netdev_safe_mode_ops;
52cdedef59SAnirudh Venkataramanan static const struct net_device_ops ice_netdev_ops;
53940b61afSAnirudh Venkataramanan 
54462acf6aSTony Nguyen static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
5528c2a645SAnirudh Venkataramanan 
560f9d5027SAnirudh Venkataramanan static void ice_vsi_release_all(struct ice_pf *pf);
573a858ba3SAnirudh Venkataramanan 
58fbc7b27aSKiran Patil static int ice_rebuild_channels(struct ice_pf *pf);
59fbc7b27aSKiran Patil static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
60fbc7b27aSKiran Patil 
61195bb48fSMichal Swiatkowski static int
62195bb48fSMichal Swiatkowski ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
63195bb48fSMichal Swiatkowski 		     void *cb_priv, enum tc_setup_type type, void *type_data,
64195bb48fSMichal Swiatkowski 		     void *data,
65195bb48fSMichal Swiatkowski 		     void (*cleanup)(struct flow_block_cb *block_cb));
66195bb48fSMichal Swiatkowski 
67df006dd4SDave Ertman bool netif_is_ice(struct net_device *dev)
68df006dd4SDave Ertman {
69df006dd4SDave Ertman 	return dev && (dev->netdev_ops == &ice_netdev_ops);
70df006dd4SDave Ertman }
71df006dd4SDave Ertman 
723a858ba3SAnirudh Venkataramanan /**
73b3969fd7SSudheer Mogilappagari  * ice_get_tx_pending - returns number of Tx descriptors not processed
74b3969fd7SSudheer Mogilappagari  * @ring: the ring of descriptors
75b3969fd7SSudheer Mogilappagari  */
76e72bba21SMaciej Fijalkowski static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
77b3969fd7SSudheer Mogilappagari {
78c1ddf1f5SBrett Creeley 	u16 head, tail;
79b3969fd7SSudheer Mogilappagari 
80b3969fd7SSudheer Mogilappagari 	head = ring->next_to_clean;
81c1ddf1f5SBrett Creeley 	tail = ring->next_to_use;
82b3969fd7SSudheer Mogilappagari 
83b3969fd7SSudheer Mogilappagari 	if (head != tail)
84b3969fd7SSudheer Mogilappagari 		return (head < tail) ?
85b3969fd7SSudheer Mogilappagari 			tail - head : (tail + ring->count - head);
86b3969fd7SSudheer Mogilappagari 	return 0;
87b3969fd7SSudheer Mogilappagari }
88b3969fd7SSudheer Mogilappagari 
89b3969fd7SSudheer Mogilappagari /**
90b3969fd7SSudheer Mogilappagari  * ice_check_for_hang_subtask - check for and recover hung queues
91b3969fd7SSudheer Mogilappagari  * @pf: pointer to PF struct
92b3969fd7SSudheer Mogilappagari  */
93b3969fd7SSudheer Mogilappagari static void ice_check_for_hang_subtask(struct ice_pf *pf)
94b3969fd7SSudheer Mogilappagari {
95b3969fd7SSudheer Mogilappagari 	struct ice_vsi *vsi = NULL;
96e89e899fSBrett Creeley 	struct ice_hw *hw;
97b3969fd7SSudheer Mogilappagari 	unsigned int i;
98b3969fd7SSudheer Mogilappagari 	int packets;
99e89e899fSBrett Creeley 	u32 v;
100b3969fd7SSudheer Mogilappagari 
101b3969fd7SSudheer Mogilappagari 	ice_for_each_vsi(pf, v)
102b3969fd7SSudheer Mogilappagari 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
103b3969fd7SSudheer Mogilappagari 			vsi = pf->vsi[v];
104b3969fd7SSudheer Mogilappagari 			break;
105b3969fd7SSudheer Mogilappagari 		}
106b3969fd7SSudheer Mogilappagari 
107e97fb1aeSAnirudh Venkataramanan 	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
108b3969fd7SSudheer Mogilappagari 		return;
109b3969fd7SSudheer Mogilappagari 
110b3969fd7SSudheer Mogilappagari 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
111b3969fd7SSudheer Mogilappagari 		return;
112b3969fd7SSudheer Mogilappagari 
113e89e899fSBrett Creeley 	hw = &vsi->back->hw;
114e89e899fSBrett Creeley 
1152faf63b6SMaciej Fijalkowski 	ice_for_each_txq(vsi, i) {
116e72bba21SMaciej Fijalkowski 		struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
117b3969fd7SSudheer Mogilappagari 
118fbc7b27aSKiran Patil 		if (!tx_ring)
119fbc7b27aSKiran Patil 			continue;
120fbc7b27aSKiran Patil 		if (ice_ring_ch_enabled(tx_ring))
121fbc7b27aSKiran Patil 			continue;
122fbc7b27aSKiran Patil 
123fbc7b27aSKiran Patil 		if (tx_ring->desc) {
124b3969fd7SSudheer Mogilappagari 			/* If packet counter has not changed the queue is
125b3969fd7SSudheer Mogilappagari 			 * likely stalled, so force an interrupt for this
126b3969fd7SSudheer Mogilappagari 			 * queue.
127b3969fd7SSudheer Mogilappagari 			 *
128b3969fd7SSudheer Mogilappagari 			 * prev_pkt would be negative if there was no
129b3969fd7SSudheer Mogilappagari 			 * pending work.
130b3969fd7SSudheer Mogilappagari 			 */
131b3969fd7SSudheer Mogilappagari 			packets = tx_ring->stats.pkts & INT_MAX;
132b3969fd7SSudheer Mogilappagari 			if (tx_ring->tx_stats.prev_pkt == packets) {
133b3969fd7SSudheer Mogilappagari 				/* Trigger sw interrupt to revive the queue */
134e89e899fSBrett Creeley 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
135b3969fd7SSudheer Mogilappagari 				continue;
136b3969fd7SSudheer Mogilappagari 			}
137b3969fd7SSudheer Mogilappagari 
138b3969fd7SSudheer Mogilappagari 			/* Memory barrier between read of packet count and call
139b3969fd7SSudheer Mogilappagari 			 * to ice_get_tx_pending()
140b3969fd7SSudheer Mogilappagari 			 */
141b3969fd7SSudheer Mogilappagari 			smp_rmb();
142b3969fd7SSudheer Mogilappagari 			tx_ring->tx_stats.prev_pkt =
143b3969fd7SSudheer Mogilappagari 			    ice_get_tx_pending(tx_ring) ? packets : -1;
144b3969fd7SSudheer Mogilappagari 		}
145b3969fd7SSudheer Mogilappagari 	}
146b3969fd7SSudheer Mogilappagari }
147b3969fd7SSudheer Mogilappagari 
148b3969fd7SSudheer Mogilappagari /**
149561f4379STony Nguyen  * ice_init_mac_fltr - Set initial MAC filters
150561f4379STony Nguyen  * @pf: board private structure
151561f4379STony Nguyen  *
1522f2da36eSAnirudh Venkataramanan  * Set initial set of MAC filters for PF VSI; configure filters for permanent
153561f4379STony Nguyen  * address and broadcast address. If an error is encountered, netdevice will be
154561f4379STony Nguyen  * unregistered.
155561f4379STony Nguyen  */
156561f4379STony Nguyen static int ice_init_mac_fltr(struct ice_pf *pf)
157561f4379STony Nguyen {
158561f4379STony Nguyen 	struct ice_vsi *vsi;
1591b8f15b6SMichal Swiatkowski 	u8 *perm_addr;
160561f4379STony Nguyen 
161208ff751SAnirudh Venkataramanan 	vsi = ice_get_main_vsi(pf);
162561f4379STony Nguyen 	if (!vsi)
163561f4379STony Nguyen 		return -EINVAL;
164561f4379STony Nguyen 
1651b8f15b6SMichal Swiatkowski 	perm_addr = vsi->port_info->mac.perm_addr;
166c1484691STony Nguyen 	return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
167561f4379STony Nguyen }
168561f4379STony Nguyen 
169561f4379STony Nguyen /**
170f9867df6SAnirudh Venkataramanan  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
171e94d4478SAnirudh Venkataramanan  * @netdev: the net device on which the sync is happening
172f9867df6SAnirudh Venkataramanan  * @addr: MAC address to sync
173e94d4478SAnirudh Venkataramanan  *
174e94d4478SAnirudh Venkataramanan  * This is a callback function which is called by the in kernel device sync
175e94d4478SAnirudh Venkataramanan  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
176e94d4478SAnirudh Venkataramanan  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
177f9867df6SAnirudh Venkataramanan  * MAC filters from the hardware.
178e94d4478SAnirudh Venkataramanan  */
179e94d4478SAnirudh Venkataramanan static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
180e94d4478SAnirudh Venkataramanan {
181e94d4478SAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
182e94d4478SAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
183e94d4478SAnirudh Venkataramanan 
1841b8f15b6SMichal Swiatkowski 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
1851b8f15b6SMichal Swiatkowski 				     ICE_FWD_TO_VSI))
186e94d4478SAnirudh Venkataramanan 		return -EINVAL;
187e94d4478SAnirudh Venkataramanan 
188e94d4478SAnirudh Venkataramanan 	return 0;
189e94d4478SAnirudh Venkataramanan }
190e94d4478SAnirudh Venkataramanan 
191e94d4478SAnirudh Venkataramanan /**
192f9867df6SAnirudh Venkataramanan  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
193e94d4478SAnirudh Venkataramanan  * @netdev: the net device on which the unsync is happening
194f9867df6SAnirudh Venkataramanan  * @addr: MAC address to unsync
195e94d4478SAnirudh Venkataramanan  *
196e94d4478SAnirudh Venkataramanan  * This is a callback function which is called by the in kernel device unsync
197e94d4478SAnirudh Venkataramanan  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
198e94d4478SAnirudh Venkataramanan  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
199f9867df6SAnirudh Venkataramanan  * delete the MAC filters from the hardware.
200e94d4478SAnirudh Venkataramanan  */
201e94d4478SAnirudh Venkataramanan static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
202e94d4478SAnirudh Venkataramanan {
203e94d4478SAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
204e94d4478SAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
205e94d4478SAnirudh Venkataramanan 
2063ba7f53fSBrett Creeley 	/* Under some circumstances, we might receive a request to delete our
2073ba7f53fSBrett Creeley 	 * own device address from our uc list. Because we store the device
2083ba7f53fSBrett Creeley 	 * address in the VSI's MAC filter list, we need to ignore such
2093ba7f53fSBrett Creeley 	 * requests and not delete our device address from this list.
2103ba7f53fSBrett Creeley 	 */
2113ba7f53fSBrett Creeley 	if (ether_addr_equal(addr, netdev->dev_addr))
2123ba7f53fSBrett Creeley 		return 0;
2133ba7f53fSBrett Creeley 
2141b8f15b6SMichal Swiatkowski 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
2151b8f15b6SMichal Swiatkowski 				     ICE_FWD_TO_VSI))
216e94d4478SAnirudh Venkataramanan 		return -EINVAL;
217e94d4478SAnirudh Venkataramanan 
218e94d4478SAnirudh Venkataramanan 	return 0;
219e94d4478SAnirudh Venkataramanan }
220e94d4478SAnirudh Venkataramanan 
221e94d4478SAnirudh Venkataramanan /**
222e94d4478SAnirudh Venkataramanan  * ice_vsi_fltr_changed - check if filter state changed
223e94d4478SAnirudh Venkataramanan  * @vsi: VSI to be checked
224e94d4478SAnirudh Venkataramanan  *
225e94d4478SAnirudh Venkataramanan  * returns true if filter state has changed, false otherwise.
226e94d4478SAnirudh Venkataramanan  */
227e94d4478SAnirudh Venkataramanan static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
228e94d4478SAnirudh Venkataramanan {
229e97fb1aeSAnirudh Venkataramanan 	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
230e97fb1aeSAnirudh Venkataramanan 	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) ||
231e97fb1aeSAnirudh Venkataramanan 	       test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
232e94d4478SAnirudh Venkataramanan }
233e94d4478SAnirudh Venkataramanan 
234e94d4478SAnirudh Venkataramanan /**
235fabf480bSBrett Creeley  * ice_set_promisc - Enable promiscuous mode for a given PF
2365eda8afdSAkeem G Abodunrin  * @vsi: the VSI being configured
2375eda8afdSAkeem G Abodunrin  * @promisc_m: mask of promiscuous config bits
2385eda8afdSAkeem G Abodunrin  *
2395eda8afdSAkeem G Abodunrin  */
240fabf480bSBrett Creeley static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
2415eda8afdSAkeem G Abodunrin {
2425e24d598STony Nguyen 	int status;
2435eda8afdSAkeem G Abodunrin 
2445eda8afdSAkeem G Abodunrin 	if (vsi->type != ICE_VSI_PF)
2455eda8afdSAkeem G Abodunrin 		return 0;
2465eda8afdSAkeem G Abodunrin 
247fabf480bSBrett Creeley 	if (vsi->num_vlan > 1)
248fabf480bSBrett Creeley 		status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
2495eda8afdSAkeem G Abodunrin 	else
250fabf480bSBrett Creeley 		status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
251c1484691STony Nguyen 	return status;
2525eda8afdSAkeem G Abodunrin }
2535eda8afdSAkeem G Abodunrin 
254fabf480bSBrett Creeley /**
255fabf480bSBrett Creeley  * ice_clear_promisc - Disable promiscuous mode for a given PF
256fabf480bSBrett Creeley  * @vsi: the VSI being configured
257fabf480bSBrett Creeley  * @promisc_m: mask of promiscuous config bits
258fabf480bSBrett Creeley  *
259fabf480bSBrett Creeley  */
260fabf480bSBrett Creeley static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
261fabf480bSBrett Creeley {
2625e24d598STony Nguyen 	int status;
263fabf480bSBrett Creeley 
264fabf480bSBrett Creeley 	if (vsi->type != ICE_VSI_PF)
265fabf480bSBrett Creeley 		return 0;
266fabf480bSBrett Creeley 
267fabf480bSBrett Creeley 	if (vsi->num_vlan > 1)
268fabf480bSBrett Creeley 		status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
269fabf480bSBrett Creeley 	else
270fabf480bSBrett Creeley 		status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
271c1484691STony Nguyen 	return status;
2725eda8afdSAkeem G Abodunrin }
2735eda8afdSAkeem G Abodunrin 
2745eda8afdSAkeem G Abodunrin /**
275e94d4478SAnirudh Venkataramanan  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
276e94d4478SAnirudh Venkataramanan  * @vsi: ptr to the VSI
277e94d4478SAnirudh Venkataramanan  *
278e94d4478SAnirudh Venkataramanan  * Push any outstanding VSI filter changes through the AdminQ.
279e94d4478SAnirudh Venkataramanan  */
280e94d4478SAnirudh Venkataramanan static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
281e94d4478SAnirudh Venkataramanan {
2829a946843SAnirudh Venkataramanan 	struct device *dev = ice_pf_to_dev(vsi->back);
283e94d4478SAnirudh Venkataramanan 	struct net_device *netdev = vsi->netdev;
284e94d4478SAnirudh Venkataramanan 	bool promisc_forced_on = false;
285e94d4478SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
286e94d4478SAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
287e94d4478SAnirudh Venkataramanan 	u32 changed_flags = 0;
2885eda8afdSAkeem G Abodunrin 	u8 promisc_m;
2892ccc1c1cSTony Nguyen 	int err;
290e94d4478SAnirudh Venkataramanan 
291e94d4478SAnirudh Venkataramanan 	if (!vsi->netdev)
292e94d4478SAnirudh Venkataramanan 		return -EINVAL;
293e94d4478SAnirudh Venkataramanan 
2947e408e07SAnirudh Venkataramanan 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
295e94d4478SAnirudh Venkataramanan 		usleep_range(1000, 2000);
296e94d4478SAnirudh Venkataramanan 
297e94d4478SAnirudh Venkataramanan 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
298e94d4478SAnirudh Venkataramanan 	vsi->current_netdev_flags = vsi->netdev->flags;
299e94d4478SAnirudh Venkataramanan 
300e94d4478SAnirudh Venkataramanan 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
301e94d4478SAnirudh Venkataramanan 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
302e94d4478SAnirudh Venkataramanan 
303e94d4478SAnirudh Venkataramanan 	if (ice_vsi_fltr_changed(vsi)) {
304e97fb1aeSAnirudh Venkataramanan 		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
305e97fb1aeSAnirudh Venkataramanan 		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
306e97fb1aeSAnirudh Venkataramanan 		clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
307e94d4478SAnirudh Venkataramanan 
308e94d4478SAnirudh Venkataramanan 		/* grab the netdev's addr_list_lock */
309e94d4478SAnirudh Venkataramanan 		netif_addr_lock_bh(netdev);
310e94d4478SAnirudh Venkataramanan 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
311e94d4478SAnirudh Venkataramanan 			      ice_add_mac_to_unsync_list);
312e94d4478SAnirudh Venkataramanan 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
313e94d4478SAnirudh Venkataramanan 			      ice_add_mac_to_unsync_list);
314e94d4478SAnirudh Venkataramanan 		/* our temp lists are populated. release lock */
315e94d4478SAnirudh Venkataramanan 		netif_addr_unlock_bh(netdev);
316e94d4478SAnirudh Venkataramanan 	}
317e94d4478SAnirudh Venkataramanan 
318f9867df6SAnirudh Venkataramanan 	/* Remove MAC addresses in the unsync list */
3192ccc1c1cSTony Nguyen 	err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
3201b8f15b6SMichal Swiatkowski 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
3212ccc1c1cSTony Nguyen 	if (err) {
322e94d4478SAnirudh Venkataramanan 		netdev_err(netdev, "Failed to delete MAC filters\n");
323e94d4478SAnirudh Venkataramanan 		/* if we failed because of alloc failures, just bail */
3242ccc1c1cSTony Nguyen 		if (err == -ENOMEM)
325e94d4478SAnirudh Venkataramanan 			goto out;
326e94d4478SAnirudh Venkataramanan 	}
327e94d4478SAnirudh Venkataramanan 
328f9867df6SAnirudh Venkataramanan 	/* Add MAC addresses in the sync list */
3292ccc1c1cSTony Nguyen 	err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
3301b8f15b6SMichal Swiatkowski 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
33189f3e4a5SPreethi Banala 	/* If filter is added successfully or already exists, do not go into
33289f3e4a5SPreethi Banala 	 * 'if' condition and report it as error. Instead continue processing
33389f3e4a5SPreethi Banala 	 * rest of the function.
33489f3e4a5SPreethi Banala 	 */
3352ccc1c1cSTony Nguyen 	if (err && err != -EEXIST) {
336e94d4478SAnirudh Venkataramanan 		netdev_err(netdev, "Failed to add MAC filters\n");
337f9867df6SAnirudh Venkataramanan 		/* If there is no more space for new umac filters, VSI
338e94d4478SAnirudh Venkataramanan 		 * should go into promiscuous mode. There should be some
339e94d4478SAnirudh Venkataramanan 		 * space reserved for promiscuous filters.
340e94d4478SAnirudh Venkataramanan 		 */
341e94d4478SAnirudh Venkataramanan 		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
3427e408e07SAnirudh Venkataramanan 		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
343e94d4478SAnirudh Venkataramanan 				      vsi->state)) {
344e94d4478SAnirudh Venkataramanan 			promisc_forced_on = true;
34519cce2c6SAnirudh Venkataramanan 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
346e94d4478SAnirudh Venkataramanan 				    vsi->vsi_num);
347e94d4478SAnirudh Venkataramanan 		} else {
348e94d4478SAnirudh Venkataramanan 			goto out;
349e94d4478SAnirudh Venkataramanan 		}
350e94d4478SAnirudh Venkataramanan 	}
3512ccc1c1cSTony Nguyen 	err = 0;
352e94d4478SAnirudh Venkataramanan 	/* check for changes in promiscuous modes */
3535eda8afdSAkeem G Abodunrin 	if (changed_flags & IFF_ALLMULTI) {
3545eda8afdSAkeem G Abodunrin 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
355bcf68ea1SNick Nunley 			if (vsi->num_vlan > 1)
3565eda8afdSAkeem G Abodunrin 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
3575eda8afdSAkeem G Abodunrin 			else
3585eda8afdSAkeem G Abodunrin 				promisc_m = ICE_MCAST_PROMISC_BITS;
3595eda8afdSAkeem G Abodunrin 
360fabf480bSBrett Creeley 			err = ice_set_promisc(vsi, promisc_m);
3615eda8afdSAkeem G Abodunrin 			if (err) {
3625eda8afdSAkeem G Abodunrin 				netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
3635eda8afdSAkeem G Abodunrin 					   vsi->vsi_num);
3645eda8afdSAkeem G Abodunrin 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
3655eda8afdSAkeem G Abodunrin 				goto out_promisc;
3665eda8afdSAkeem G Abodunrin 			}
36792ace482SBruce Allan 		} else {
36892ace482SBruce Allan 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
369bcf68ea1SNick Nunley 			if (vsi->num_vlan > 1)
3705eda8afdSAkeem G Abodunrin 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
3715eda8afdSAkeem G Abodunrin 			else
3725eda8afdSAkeem G Abodunrin 				promisc_m = ICE_MCAST_PROMISC_BITS;
3735eda8afdSAkeem G Abodunrin 
374fabf480bSBrett Creeley 			err = ice_clear_promisc(vsi, promisc_m);
3755eda8afdSAkeem G Abodunrin 			if (err) {
3765eda8afdSAkeem G Abodunrin 				netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
3775eda8afdSAkeem G Abodunrin 					   vsi->vsi_num);
3785eda8afdSAkeem G Abodunrin 				vsi->current_netdev_flags |= IFF_ALLMULTI;
3795eda8afdSAkeem G Abodunrin 				goto out_promisc;
3805eda8afdSAkeem G Abodunrin 			}
3815eda8afdSAkeem G Abodunrin 		}
3825eda8afdSAkeem G Abodunrin 	}
383e94d4478SAnirudh Venkataramanan 
384e94d4478SAnirudh Venkataramanan 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
385e97fb1aeSAnirudh Venkataramanan 	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
386e97fb1aeSAnirudh Venkataramanan 		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
387e94d4478SAnirudh Venkataramanan 		if (vsi->current_netdev_flags & IFF_PROMISC) {
388f9867df6SAnirudh Venkataramanan 			/* Apply Rx filter rule to get traffic from wire */
389fc0f39bcSBrett Creeley 			if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
390fc0f39bcSBrett Creeley 				err = ice_set_dflt_vsi(pf->first_sw, vsi);
391fc0f39bcSBrett Creeley 				if (err && err != -EEXIST) {
39219cce2c6SAnirudh Venkataramanan 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
393fc0f39bcSBrett Creeley 						   err, vsi->vsi_num);
394fc0f39bcSBrett Creeley 					vsi->current_netdev_flags &=
395fc0f39bcSBrett Creeley 						~IFF_PROMISC;
396e94d4478SAnirudh Venkataramanan 					goto out_promisc;
397e94d4478SAnirudh Venkataramanan 				}
3982ccc1c1cSTony Nguyen 				err = 0;
399bc42afa9SBrett Creeley 				vsi->vlan_ops.dis_rx_filtering(vsi);
400fc0f39bcSBrett Creeley 			}
401e94d4478SAnirudh Venkataramanan 		} else {
402f9867df6SAnirudh Venkataramanan 			/* Clear Rx filter to remove traffic from wire */
403fc0f39bcSBrett Creeley 			if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
404fc0f39bcSBrett Creeley 				err = ice_clear_dflt_vsi(pf->first_sw);
405fc0f39bcSBrett Creeley 				if (err) {
40619cce2c6SAnirudh Venkataramanan 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
407fc0f39bcSBrett Creeley 						   err, vsi->vsi_num);
408fc0f39bcSBrett Creeley 					vsi->current_netdev_flags |=
409fc0f39bcSBrett Creeley 						IFF_PROMISC;
410e94d4478SAnirudh Venkataramanan 					goto out_promisc;
411e94d4478SAnirudh Venkataramanan 				}
41268d210a6SNick Nunley 				if (vsi->num_vlan > 1)
413bc42afa9SBrett Creeley 					vsi->vlan_ops.ena_rx_filtering(vsi);
414e94d4478SAnirudh Venkataramanan 			}
415e94d4478SAnirudh Venkataramanan 		}
416fc0f39bcSBrett Creeley 	}
417e94d4478SAnirudh Venkataramanan 	goto exit;
418e94d4478SAnirudh Venkataramanan 
419e94d4478SAnirudh Venkataramanan out_promisc:
420e97fb1aeSAnirudh Venkataramanan 	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
421e94d4478SAnirudh Venkataramanan 	goto exit;
422e94d4478SAnirudh Venkataramanan out:
423e94d4478SAnirudh Venkataramanan 	/* if something went wrong then set the changed flag so we try again */
424e97fb1aeSAnirudh Venkataramanan 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
425e97fb1aeSAnirudh Venkataramanan 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
426e94d4478SAnirudh Venkataramanan exit:
4277e408e07SAnirudh Venkataramanan 	clear_bit(ICE_CFG_BUSY, vsi->state);
428e94d4478SAnirudh Venkataramanan 	return err;
429e94d4478SAnirudh Venkataramanan }
430e94d4478SAnirudh Venkataramanan 
431e94d4478SAnirudh Venkataramanan /**
432e94d4478SAnirudh Venkataramanan  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
433e94d4478SAnirudh Venkataramanan  * @pf: board private structure
434e94d4478SAnirudh Venkataramanan  */
435e94d4478SAnirudh Venkataramanan static void ice_sync_fltr_subtask(struct ice_pf *pf)
436e94d4478SAnirudh Venkataramanan {
437e94d4478SAnirudh Venkataramanan 	int v;
438e94d4478SAnirudh Venkataramanan 
439e94d4478SAnirudh Venkataramanan 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
440e94d4478SAnirudh Venkataramanan 		return;
441e94d4478SAnirudh Venkataramanan 
442e94d4478SAnirudh Venkataramanan 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
443e94d4478SAnirudh Venkataramanan 
44480ed404aSBrett Creeley 	ice_for_each_vsi(pf, v)
445e94d4478SAnirudh Venkataramanan 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
446e94d4478SAnirudh Venkataramanan 		    ice_vsi_sync_fltr(pf->vsi[v])) {
447e94d4478SAnirudh Venkataramanan 			/* come back and try again later */
448e94d4478SAnirudh Venkataramanan 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
449e94d4478SAnirudh Venkataramanan 			break;
450e94d4478SAnirudh Venkataramanan 		}
451e94d4478SAnirudh Venkataramanan }
452e94d4478SAnirudh Venkataramanan 
453e94d4478SAnirudh Venkataramanan /**
4547b9ffc76SAnirudh Venkataramanan  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
4557b9ffc76SAnirudh Venkataramanan  * @pf: the PF
4567b9ffc76SAnirudh Venkataramanan  * @locked: is the rtnl_lock already held
4577b9ffc76SAnirudh Venkataramanan  */
4587b9ffc76SAnirudh Venkataramanan static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
4597b9ffc76SAnirudh Venkataramanan {
460b126bd6bSKiran Patil 	int node;
4617b9ffc76SAnirudh Venkataramanan 	int v;
4627b9ffc76SAnirudh Venkataramanan 
4637b9ffc76SAnirudh Venkataramanan 	ice_for_each_vsi(pf, v)
4647b9ffc76SAnirudh Venkataramanan 		if (pf->vsi[v])
4657b9ffc76SAnirudh Venkataramanan 			ice_dis_vsi(pf->vsi[v], locked);
466b126bd6bSKiran Patil 
467b126bd6bSKiran Patil 	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
468b126bd6bSKiran Patil 		pf->pf_agg_node[node].num_vsis = 0;
469b126bd6bSKiran Patil 
470b126bd6bSKiran Patil 	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
471b126bd6bSKiran Patil 		pf->vf_agg_node[node].num_vsis = 0;
4727b9ffc76SAnirudh Venkataramanan }
4737b9ffc76SAnirudh Venkataramanan 
4747b9ffc76SAnirudh Venkataramanan /**
475c1e5da5dSWojciech Drewek  * ice_clear_sw_switch_recipes - clear switch recipes
476c1e5da5dSWojciech Drewek  * @pf: board private structure
477c1e5da5dSWojciech Drewek  *
478c1e5da5dSWojciech Drewek  * Mark switch recipes as not created in sw structures. There are cases where
479c1e5da5dSWojciech Drewek  * rules (especially advanced rules) need to be restored, either re-read from
480c1e5da5dSWojciech Drewek  * hardware or added again. For example after the reset. 'recp_created' flag
481c1e5da5dSWojciech Drewek  * prevents from doing that and need to be cleared upfront.
482c1e5da5dSWojciech Drewek  */
483c1e5da5dSWojciech Drewek static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
484c1e5da5dSWojciech Drewek {
485c1e5da5dSWojciech Drewek 	struct ice_sw_recipe *recp;
486c1e5da5dSWojciech Drewek 	u8 i;
487c1e5da5dSWojciech Drewek 
488c1e5da5dSWojciech Drewek 	recp = pf->hw.switch_info->recp_list;
489c1e5da5dSWojciech Drewek 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
490c1e5da5dSWojciech Drewek 		recp[i].recp_created = false;
491c1e5da5dSWojciech Drewek }
492c1e5da5dSWojciech Drewek 
493c1e5da5dSWojciech Drewek /**
494fbc7b27aSKiran Patil  * ice_prepare_for_reset - prep for reset
4950b28b702SAnirudh Venkataramanan  * @pf: board private structure
496fbc7b27aSKiran Patil  * @reset_type: reset type requested
4970b28b702SAnirudh Venkataramanan  *
4980b28b702SAnirudh Venkataramanan  * Inform or close all dependent features in prep for reset.
4990b28b702SAnirudh Venkataramanan  */
5000b28b702SAnirudh Venkataramanan static void
501fbc7b27aSKiran Patil ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
5020b28b702SAnirudh Venkataramanan {
5030b28b702SAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
504fbc7b27aSKiran Patil 	struct ice_vsi *vsi;
505c1e08830SJesse Brandeburg 	unsigned int i;
5060b28b702SAnirudh Venkataramanan 
507fbc7b27aSKiran Patil 	dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
508fbc7b27aSKiran Patil 
5095abac9d7SBrett Creeley 	/* already prepared for reset */
5107e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
5115abac9d7SBrett Creeley 		return;
5125abac9d7SBrett Creeley 
513f9f5301eSDave Ertman 	ice_unplug_aux_dev(pf);
514f9f5301eSDave Ertman 
515007676b4SAnirudh Venkataramanan 	/* Notify VFs of impending reset */
516007676b4SAnirudh Venkataramanan 	if (ice_check_sq_alive(hw, &hw->mailboxq))
517007676b4SAnirudh Venkataramanan 		ice_vc_notify_reset(pf);
518007676b4SAnirudh Venkataramanan 
519c7aeb4d1SAkeem G Abodunrin 	/* Disable VFs until reset is completed */
520005881bcSBrett Creeley 	ice_for_each_vf(pf, i)
52177ca27c4SPaul Greenwalt 		ice_set_vf_state_qs_dis(&pf->vf[i]);
522c7aeb4d1SAkeem G Abodunrin 
523c1e5da5dSWojciech Drewek 	if (ice_is_eswitch_mode_switchdev(pf)) {
524c1e5da5dSWojciech Drewek 		if (reset_type != ICE_RESET_PFR)
525c1e5da5dSWojciech Drewek 			ice_clear_sw_switch_recipes(pf);
526c1e5da5dSWojciech Drewek 	}
527c1e5da5dSWojciech Drewek 
528fbc7b27aSKiran Patil 	/* release ADQ specific HW and SW resources */
529fbc7b27aSKiran Patil 	vsi = ice_get_main_vsi(pf);
530fbc7b27aSKiran Patil 	if (!vsi)
531fbc7b27aSKiran Patil 		goto skip;
532fbc7b27aSKiran Patil 
533fbc7b27aSKiran Patil 	/* to be on safe side, reset orig_rss_size so that normal flow
534fbc7b27aSKiran Patil 	 * of deciding rss_size can take precedence
535fbc7b27aSKiran Patil 	 */
536fbc7b27aSKiran Patil 	vsi->orig_rss_size = 0;
537fbc7b27aSKiran Patil 
538fbc7b27aSKiran Patil 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
539fbc7b27aSKiran Patil 		if (reset_type == ICE_RESET_PFR) {
540fbc7b27aSKiran Patil 			vsi->old_ena_tc = vsi->all_enatc;
541fbc7b27aSKiran Patil 			vsi->old_numtc = vsi->all_numtc;
542fbc7b27aSKiran Patil 		} else {
543fbc7b27aSKiran Patil 			ice_remove_q_channels(vsi, true);
544fbc7b27aSKiran Patil 
545fbc7b27aSKiran Patil 			/* for other reset type, do not support channel rebuild
546fbc7b27aSKiran Patil 			 * hence reset needed info
547fbc7b27aSKiran Patil 			 */
548fbc7b27aSKiran Patil 			vsi->old_ena_tc = 0;
549fbc7b27aSKiran Patil 			vsi->all_enatc = 0;
550fbc7b27aSKiran Patil 			vsi->old_numtc = 0;
551fbc7b27aSKiran Patil 			vsi->all_numtc = 0;
552fbc7b27aSKiran Patil 			vsi->req_txq = 0;
553fbc7b27aSKiran Patil 			vsi->req_rxq = 0;
554fbc7b27aSKiran Patil 			clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
555fbc7b27aSKiran Patil 			memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
556fbc7b27aSKiran Patil 		}
557fbc7b27aSKiran Patil 	}
558fbc7b27aSKiran Patil skip:
559fbc7b27aSKiran Patil 
560462acf6aSTony Nguyen 	/* clear SW filtering DB */
561462acf6aSTony Nguyen 	ice_clear_hw_tbls(hw);
5620b28b702SAnirudh Venkataramanan 	/* disable the VSIs and their queues that are not already DOWN */
5637b9ffc76SAnirudh Venkataramanan 	ice_pf_dis_all_vsi(pf, false);
5640b28b702SAnirudh Venkataramanan 
56506c16d89SJacob Keller 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
56648096710SKarol Kolacinski 		ice_ptp_prepare_for_reset(pf);
56706c16d89SJacob Keller 
568c5a2a4a3SUsha Ketineni 	if (hw->port_info)
569c5a2a4a3SUsha Ketineni 		ice_sched_clear_port(hw->port_info);
570c5a2a4a3SUsha Ketineni 
5710b28b702SAnirudh Venkataramanan 	ice_shutdown_all_ctrlq(hw);
5720f9d5027SAnirudh Venkataramanan 
5737e408e07SAnirudh Venkataramanan 	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
5740b28b702SAnirudh Venkataramanan }
5750b28b702SAnirudh Venkataramanan 
5760b28b702SAnirudh Venkataramanan /**
5770b28b702SAnirudh Venkataramanan  * ice_do_reset - Initiate one of many types of resets
5780b28b702SAnirudh Venkataramanan  * @pf: board private structure
579fbc7b27aSKiran Patil  * @reset_type: reset type requested before this function was called.
5800b28b702SAnirudh Venkataramanan  */
5810b28b702SAnirudh Venkataramanan static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
5820b28b702SAnirudh Venkataramanan {
5834015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
5840b28b702SAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
5850b28b702SAnirudh Venkataramanan 
5860b28b702SAnirudh Venkataramanan 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
5870b28b702SAnirudh Venkataramanan 
588fbc7b27aSKiran Patil 	ice_prepare_for_reset(pf, reset_type);
5890b28b702SAnirudh Venkataramanan 
5900b28b702SAnirudh Venkataramanan 	/* trigger the reset */
5910b28b702SAnirudh Venkataramanan 	if (ice_reset(hw, reset_type)) {
5920b28b702SAnirudh Venkataramanan 		dev_err(dev, "reset %d failed\n", reset_type);
5937e408e07SAnirudh Venkataramanan 		set_bit(ICE_RESET_FAILED, pf->state);
5947e408e07SAnirudh Venkataramanan 		clear_bit(ICE_RESET_OICR_RECV, pf->state);
5957e408e07SAnirudh Venkataramanan 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
5967e408e07SAnirudh Venkataramanan 		clear_bit(ICE_PFR_REQ, pf->state);
5977e408e07SAnirudh Venkataramanan 		clear_bit(ICE_CORER_REQ, pf->state);
5987e408e07SAnirudh Venkataramanan 		clear_bit(ICE_GLOBR_REQ, pf->state);
5991c08052eSJacob Keller 		wake_up(&pf->reset_wait_queue);
6000b28b702SAnirudh Venkataramanan 		return;
6010b28b702SAnirudh Venkataramanan 	}
6020b28b702SAnirudh Venkataramanan 
6030f9d5027SAnirudh Venkataramanan 	/* PFR is a bit of a special case because it doesn't result in an OICR
6040f9d5027SAnirudh Venkataramanan 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
6050f9d5027SAnirudh Venkataramanan 	 * associated state bits.
6060f9d5027SAnirudh Venkataramanan 	 */
6070b28b702SAnirudh Venkataramanan 	if (reset_type == ICE_RESET_PFR) {
6080b28b702SAnirudh Venkataramanan 		pf->pfr_count++;
609462acf6aSTony Nguyen 		ice_rebuild(pf, reset_type);
6107e408e07SAnirudh Venkataramanan 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
6117e408e07SAnirudh Venkataramanan 		clear_bit(ICE_PFR_REQ, pf->state);
6121c08052eSJacob Keller 		wake_up(&pf->reset_wait_queue);
6131c44e3bcSAkeem G Abodunrin 		ice_reset_all_vfs(pf, true);
6140b28b702SAnirudh Venkataramanan 	}
6150b28b702SAnirudh Venkataramanan }
6160b28b702SAnirudh Venkataramanan 
6170b28b702SAnirudh Venkataramanan /**
6180b28b702SAnirudh Venkataramanan  * ice_reset_subtask - Set up for resetting the device and driver
6190b28b702SAnirudh Venkataramanan  * @pf: board private structure
6200b28b702SAnirudh Venkataramanan  */
6210b28b702SAnirudh Venkataramanan static void ice_reset_subtask(struct ice_pf *pf)
6220b28b702SAnirudh Venkataramanan {
6230f9d5027SAnirudh Venkataramanan 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
6240b28b702SAnirudh Venkataramanan 
6250b28b702SAnirudh Venkataramanan 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
6260f9d5027SAnirudh Venkataramanan 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
6270f9d5027SAnirudh Venkataramanan 	 * of reset is pending and sets bits in pf->state indicating the reset
6287e408e07SAnirudh Venkataramanan 	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
6290f9d5027SAnirudh Venkataramanan 	 * prepare for pending reset if not already (for PF software-initiated
6300f9d5027SAnirudh Venkataramanan 	 * global resets the software should already be prepared for it as
6317e408e07SAnirudh Venkataramanan 	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
6320f9d5027SAnirudh Venkataramanan 	 * by firmware or software on other PFs, that bit is not set so prepare
6330f9d5027SAnirudh Venkataramanan 	 * for the reset now), poll for reset done, rebuild and return.
6340b28b702SAnirudh Venkataramanan 	 */
6357e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
6362ebd4428SDave Ertman 		/* Perform the largest reset requested */
6377e408e07SAnirudh Venkataramanan 		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
6382ebd4428SDave Ertman 			reset_type = ICE_RESET_CORER;
6397e408e07SAnirudh Venkataramanan 		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
6402ebd4428SDave Ertman 			reset_type = ICE_RESET_GLOBR;
6417e408e07SAnirudh Venkataramanan 		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
64203af8406SAnirudh Venkataramanan 			reset_type = ICE_RESET_EMPR;
6432ebd4428SDave Ertman 		/* return if no valid reset type requested */
6442ebd4428SDave Ertman 		if (reset_type == ICE_RESET_INVAL)
6452ebd4428SDave Ertman 			return;
646fbc7b27aSKiran Patil 		ice_prepare_for_reset(pf, reset_type);
6470b28b702SAnirudh Venkataramanan 
6480b28b702SAnirudh Venkataramanan 		/* make sure we are ready to rebuild */
649fd2a9817SAnirudh Venkataramanan 		if (ice_check_reset(&pf->hw)) {
6507e408e07SAnirudh Venkataramanan 			set_bit(ICE_RESET_FAILED, pf->state);
651fd2a9817SAnirudh Venkataramanan 		} else {
652fd2a9817SAnirudh Venkataramanan 			/* done with reset. start rebuild */
653fd2a9817SAnirudh Venkataramanan 			pf->hw.reset_ongoing = false;
654462acf6aSTony Nguyen 			ice_rebuild(pf, reset_type);
6550f9d5027SAnirudh Venkataramanan 			/* clear bit to resume normal operations, but
6560f9d5027SAnirudh Venkataramanan 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
6570f9d5027SAnirudh Venkataramanan 			 */
6587e408e07SAnirudh Venkataramanan 			clear_bit(ICE_RESET_OICR_RECV, pf->state);
6597e408e07SAnirudh Venkataramanan 			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
6607e408e07SAnirudh Venkataramanan 			clear_bit(ICE_PFR_REQ, pf->state);
6617e408e07SAnirudh Venkataramanan 			clear_bit(ICE_CORER_REQ, pf->state);
6627e408e07SAnirudh Venkataramanan 			clear_bit(ICE_GLOBR_REQ, pf->state);
6631c08052eSJacob Keller 			wake_up(&pf->reset_wait_queue);
6641c44e3bcSAkeem G Abodunrin 			ice_reset_all_vfs(pf, true);
6650f9d5027SAnirudh Venkataramanan 		}
6660f9d5027SAnirudh Venkataramanan 
6670f9d5027SAnirudh Venkataramanan 		return;
6680b28b702SAnirudh Venkataramanan 	}
6690b28b702SAnirudh Venkataramanan 
6700b28b702SAnirudh Venkataramanan 	/* No pending resets to finish processing. Check for new resets */
6717e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_PFR_REQ, pf->state))
6720f9d5027SAnirudh Venkataramanan 		reset_type = ICE_RESET_PFR;
6737e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_CORER_REQ, pf->state))
6740f9d5027SAnirudh Venkataramanan 		reset_type = ICE_RESET_CORER;
6757e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_GLOBR_REQ, pf->state))
6760b28b702SAnirudh Venkataramanan 		reset_type = ICE_RESET_GLOBR;
6770f9d5027SAnirudh Venkataramanan 	/* If no valid reset type requested just return */
6780f9d5027SAnirudh Venkataramanan 	if (reset_type == ICE_RESET_INVAL)
6790f9d5027SAnirudh Venkataramanan 		return;
6800b28b702SAnirudh Venkataramanan 
6810f9d5027SAnirudh Venkataramanan 	/* reset if not already down or busy */
6827e408e07SAnirudh Venkataramanan 	if (!test_bit(ICE_DOWN, pf->state) &&
6837e408e07SAnirudh Venkataramanan 	    !test_bit(ICE_CFG_BUSY, pf->state)) {
6840b28b702SAnirudh Venkataramanan 		ice_do_reset(pf, reset_type);
6850b28b702SAnirudh Venkataramanan 	}
6860b28b702SAnirudh Venkataramanan }
6870b28b702SAnirudh Venkataramanan 
6880b28b702SAnirudh Venkataramanan /**
6892e0ab37cSJesse Brandeburg  * ice_print_topo_conflict - print topology conflict message
6902e0ab37cSJesse Brandeburg  * @vsi: the VSI whose topology status is being checked
6912e0ab37cSJesse Brandeburg  */
6922e0ab37cSJesse Brandeburg static void ice_print_topo_conflict(struct ice_vsi *vsi)
6932e0ab37cSJesse Brandeburg {
6942e0ab37cSJesse Brandeburg 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
6952e0ab37cSJesse Brandeburg 	case ICE_AQ_LINK_TOPO_CONFLICT:
6962e0ab37cSJesse Brandeburg 	case ICE_AQ_LINK_MEDIA_CONFLICT:
6975878589dSPaul Greenwalt 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
6985878589dSPaul Greenwalt 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
6995878589dSPaul Greenwalt 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
7005c57145aSPaul Greenwalt 		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
7012e0ab37cSJesse Brandeburg 		break;
7025878589dSPaul Greenwalt 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
7034fc5fbeeSAnirudh Venkataramanan 		if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
7044fc5fbeeSAnirudh Venkataramanan 			netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
7054fc5fbeeSAnirudh Venkataramanan 		else
7064fc5fbeeSAnirudh Venkataramanan 			netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
7075878589dSPaul Greenwalt 		break;
7082e0ab37cSJesse Brandeburg 	default:
7092e0ab37cSJesse Brandeburg 		break;
7102e0ab37cSJesse Brandeburg 	}
7112e0ab37cSJesse Brandeburg }
7122e0ab37cSJesse Brandeburg 
7132e0ab37cSJesse Brandeburg /**
714cdedef59SAnirudh Venkataramanan  * ice_print_link_msg - print link up or down message
715cdedef59SAnirudh Venkataramanan  * @vsi: the VSI whose link status is being queried
716cdedef59SAnirudh Venkataramanan  * @isup: boolean for if the link is now up or down
717cdedef59SAnirudh Venkataramanan  */
718fcea6f3dSAnirudh Venkataramanan void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
719cdedef59SAnirudh Venkataramanan {
720f776b3acSPaul Greenwalt 	struct ice_aqc_get_phy_caps_data *caps;
7215ee30564SPaul Greenwalt 	const char *an_advertised;
722f776b3acSPaul Greenwalt 	const char *fec_req;
723cdedef59SAnirudh Venkataramanan 	const char *speed;
724f776b3acSPaul Greenwalt 	const char *fec;
725cdedef59SAnirudh Venkataramanan 	const char *fc;
72643260988SJesse Brandeburg 	const char *an;
7275518ac2aSTony Nguyen 	int status;
728cdedef59SAnirudh Venkataramanan 
729c2a23e00SBrett Creeley 	if (!vsi)
730c2a23e00SBrett Creeley 		return;
731c2a23e00SBrett Creeley 
732cdedef59SAnirudh Venkataramanan 	if (vsi->current_isup == isup)
733cdedef59SAnirudh Venkataramanan 		return;
734cdedef59SAnirudh Venkataramanan 
735cdedef59SAnirudh Venkataramanan 	vsi->current_isup = isup;
736cdedef59SAnirudh Venkataramanan 
737cdedef59SAnirudh Venkataramanan 	if (!isup) {
738cdedef59SAnirudh Venkataramanan 		netdev_info(vsi->netdev, "NIC Link is Down\n");
739cdedef59SAnirudh Venkataramanan 		return;
740cdedef59SAnirudh Venkataramanan 	}
741cdedef59SAnirudh Venkataramanan 
742cdedef59SAnirudh Venkataramanan 	switch (vsi->port_info->phy.link_info.link_speed) {
743072efdf8SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_100GB:
744072efdf8SAnirudh Venkataramanan 		speed = "100 G";
745072efdf8SAnirudh Venkataramanan 		break;
746072efdf8SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_50GB:
747072efdf8SAnirudh Venkataramanan 		speed = "50 G";
748072efdf8SAnirudh Venkataramanan 		break;
749cdedef59SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_40GB:
750cdedef59SAnirudh Venkataramanan 		speed = "40 G";
751cdedef59SAnirudh Venkataramanan 		break;
752cdedef59SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_25GB:
753cdedef59SAnirudh Venkataramanan 		speed = "25 G";
754cdedef59SAnirudh Venkataramanan 		break;
755cdedef59SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_20GB:
756cdedef59SAnirudh Venkataramanan 		speed = "20 G";
757cdedef59SAnirudh Venkataramanan 		break;
758cdedef59SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_10GB:
759cdedef59SAnirudh Venkataramanan 		speed = "10 G";
760cdedef59SAnirudh Venkataramanan 		break;
761cdedef59SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_5GB:
762cdedef59SAnirudh Venkataramanan 		speed = "5 G";
763cdedef59SAnirudh Venkataramanan 		break;
764cdedef59SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_2500MB:
765cdedef59SAnirudh Venkataramanan 		speed = "2.5 G";
766cdedef59SAnirudh Venkataramanan 		break;
767cdedef59SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_1000MB:
768cdedef59SAnirudh Venkataramanan 		speed = "1 G";
769cdedef59SAnirudh Venkataramanan 		break;
770cdedef59SAnirudh Venkataramanan 	case ICE_AQ_LINK_SPEED_100MB:
771cdedef59SAnirudh Venkataramanan 		speed = "100 M";
772cdedef59SAnirudh Venkataramanan 		break;
773cdedef59SAnirudh Venkataramanan 	default:
774cdedef59SAnirudh Venkataramanan 		speed = "Unknown ";
775cdedef59SAnirudh Venkataramanan 		break;
776cdedef59SAnirudh Venkataramanan 	}
777cdedef59SAnirudh Venkataramanan 
778cdedef59SAnirudh Venkataramanan 	switch (vsi->port_info->fc.current_mode) {
779cdedef59SAnirudh Venkataramanan 	case ICE_FC_FULL:
7802f2da36eSAnirudh Venkataramanan 		fc = "Rx/Tx";
781cdedef59SAnirudh Venkataramanan 		break;
782cdedef59SAnirudh Venkataramanan 	case ICE_FC_TX_PAUSE:
7832f2da36eSAnirudh Venkataramanan 		fc = "Tx";
784cdedef59SAnirudh Venkataramanan 		break;
785cdedef59SAnirudh Venkataramanan 	case ICE_FC_RX_PAUSE:
7862f2da36eSAnirudh Venkataramanan 		fc = "Rx";
787cdedef59SAnirudh Venkataramanan 		break;
788203a068aSBrett Creeley 	case ICE_FC_NONE:
789203a068aSBrett Creeley 		fc = "None";
790203a068aSBrett Creeley 		break;
791cdedef59SAnirudh Venkataramanan 	default:
792cdedef59SAnirudh Venkataramanan 		fc = "Unknown";
793cdedef59SAnirudh Venkataramanan 		break;
794cdedef59SAnirudh Venkataramanan 	}
795cdedef59SAnirudh Venkataramanan 
796f776b3acSPaul Greenwalt 	/* Get FEC mode based on negotiated link info */
797f776b3acSPaul Greenwalt 	switch (vsi->port_info->phy.link_info.fec_info) {
798f776b3acSPaul Greenwalt 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
799f776b3acSPaul Greenwalt 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
800f776b3acSPaul Greenwalt 		fec = "RS-FEC";
801f776b3acSPaul Greenwalt 		break;
802f776b3acSPaul Greenwalt 	case ICE_AQ_LINK_25G_KR_FEC_EN:
803f776b3acSPaul Greenwalt 		fec = "FC-FEC/BASE-R";
804f776b3acSPaul Greenwalt 		break;
805f776b3acSPaul Greenwalt 	default:
806f776b3acSPaul Greenwalt 		fec = "NONE";
807f776b3acSPaul Greenwalt 		break;
808f776b3acSPaul Greenwalt 	}
809f776b3acSPaul Greenwalt 
81043260988SJesse Brandeburg 	/* check if autoneg completed, might be false due to not supported */
81143260988SJesse Brandeburg 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
81243260988SJesse Brandeburg 		an = "True";
81343260988SJesse Brandeburg 	else
81443260988SJesse Brandeburg 		an = "False";
81543260988SJesse Brandeburg 
816f776b3acSPaul Greenwalt 	/* Get FEC mode requested based on PHY caps last SW configuration */
8179efe35d0STony Nguyen 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
818f776b3acSPaul Greenwalt 	if (!caps) {
819f776b3acSPaul Greenwalt 		fec_req = "Unknown";
8205ee30564SPaul Greenwalt 		an_advertised = "Unknown";
821f776b3acSPaul Greenwalt 		goto done;
822f776b3acSPaul Greenwalt 	}
823f776b3acSPaul Greenwalt 
824f776b3acSPaul Greenwalt 	status = ice_aq_get_phy_caps(vsi->port_info, false,
825d6730a87SAnirudh Venkataramanan 				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
826f776b3acSPaul Greenwalt 	if (status)
827f776b3acSPaul Greenwalt 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
828f776b3acSPaul Greenwalt 
8295ee30564SPaul Greenwalt 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
8305ee30564SPaul Greenwalt 
831f776b3acSPaul Greenwalt 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
832f776b3acSPaul Greenwalt 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
833f776b3acSPaul Greenwalt 		fec_req = "RS-FEC";
834f776b3acSPaul Greenwalt 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
835f776b3acSPaul Greenwalt 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
836f776b3acSPaul Greenwalt 		fec_req = "FC-FEC/BASE-R";
837f776b3acSPaul Greenwalt 	else
838f776b3acSPaul Greenwalt 		fec_req = "NONE";
839f776b3acSPaul Greenwalt 
8409efe35d0STony Nguyen 	kfree(caps);
841f776b3acSPaul Greenwalt 
842f776b3acSPaul Greenwalt done:
8435ee30564SPaul Greenwalt 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
8445ee30564SPaul Greenwalt 		    speed, fec_req, fec, an_advertised, an, fc);
8452e0ab37cSJesse Brandeburg 	ice_print_topo_conflict(vsi);
846cdedef59SAnirudh Venkataramanan }
847cdedef59SAnirudh Venkataramanan 
848cdedef59SAnirudh Venkataramanan /**
849f9867df6SAnirudh Venkataramanan  * ice_vsi_link_event - update the VSI's netdev
850f9867df6SAnirudh Venkataramanan  * @vsi: the VSI on which the link event occurred
851f9867df6SAnirudh Venkataramanan  * @link_up: whether or not the VSI needs to be set up or down
8520b28b702SAnirudh Venkataramanan  */
8530b28b702SAnirudh Venkataramanan static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
8540b28b702SAnirudh Venkataramanan {
855c2a23e00SBrett Creeley 	if (!vsi)
856c2a23e00SBrett Creeley 		return;
857c2a23e00SBrett Creeley 
858e97fb1aeSAnirudh Venkataramanan 	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
8590b28b702SAnirudh Venkataramanan 		return;
8600b28b702SAnirudh Venkataramanan 
8610b28b702SAnirudh Venkataramanan 	if (vsi->type == ICE_VSI_PF) {
862c2a23e00SBrett Creeley 		if (link_up == netif_carrier_ok(vsi->netdev))
8630b28b702SAnirudh Venkataramanan 			return;
864c2a23e00SBrett Creeley 
8650b28b702SAnirudh Venkataramanan 		if (link_up) {
8660b28b702SAnirudh Venkataramanan 			netif_carrier_on(vsi->netdev);
8670b28b702SAnirudh Venkataramanan 			netif_tx_wake_all_queues(vsi->netdev);
8680b28b702SAnirudh Venkataramanan 		} else {
8690b28b702SAnirudh Venkataramanan 			netif_carrier_off(vsi->netdev);
8700b28b702SAnirudh Venkataramanan 			netif_tx_stop_all_queues(vsi->netdev);
8710b28b702SAnirudh Venkataramanan 		}
8720b28b702SAnirudh Venkataramanan 	}
8730b28b702SAnirudh Venkataramanan }
8740b28b702SAnirudh Venkataramanan 
8750b28b702SAnirudh Venkataramanan /**
8767d9c9b79SDave Ertman  * ice_set_dflt_mib - send a default config MIB to the FW
8777d9c9b79SDave Ertman  * @pf: private PF struct
8787d9c9b79SDave Ertman  *
8797d9c9b79SDave Ertman  * This function sends a default configuration MIB to the FW.
8807d9c9b79SDave Ertman  *
8817d9c9b79SDave Ertman  * If this function errors out at any point, the driver is still able to
8827d9c9b79SDave Ertman  * function.  The main impact is that LFC may not operate as expected.
8837d9c9b79SDave Ertman  * Therefore an error state in this function should be treated with a DBG
8847d9c9b79SDave Ertman  * message and continue on with driver rebuild/reenable.
8857d9c9b79SDave Ertman  */
8867d9c9b79SDave Ertman static void ice_set_dflt_mib(struct ice_pf *pf)
8877d9c9b79SDave Ertman {
8887d9c9b79SDave Ertman 	struct device *dev = ice_pf_to_dev(pf);
8897d9c9b79SDave Ertman 	u8 mib_type, *buf, *lldpmib = NULL;
8907d9c9b79SDave Ertman 	u16 len, typelen, offset = 0;
8917d9c9b79SDave Ertman 	struct ice_lldp_org_tlv *tlv;
89212aae8f1SBruce Allan 	struct ice_hw *hw = &pf->hw;
8937d9c9b79SDave Ertman 	u32 ouisubtype;
8947d9c9b79SDave Ertman 
8957d9c9b79SDave Ertman 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
8967d9c9b79SDave Ertman 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
8977d9c9b79SDave Ertman 	if (!lldpmib) {
8987d9c9b79SDave Ertman 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
8997d9c9b79SDave Ertman 			__func__);
9007d9c9b79SDave Ertman 		return;
9017d9c9b79SDave Ertman 	}
9027d9c9b79SDave Ertman 
9037d9c9b79SDave Ertman 	/* Add ETS CFG TLV */
9047d9c9b79SDave Ertman 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
9057d9c9b79SDave Ertman 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
9067d9c9b79SDave Ertman 		   ICE_IEEE_ETS_TLV_LEN);
9077d9c9b79SDave Ertman 	tlv->typelen = htons(typelen);
9087d9c9b79SDave Ertman 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
9097d9c9b79SDave Ertman 		      ICE_IEEE_SUBTYPE_ETS_CFG);
9107d9c9b79SDave Ertman 	tlv->ouisubtype = htonl(ouisubtype);
9117d9c9b79SDave Ertman 
9127d9c9b79SDave Ertman 	buf = tlv->tlvinfo;
9137d9c9b79SDave Ertman 	buf[0] = 0;
9147d9c9b79SDave Ertman 
9157d9c9b79SDave Ertman 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
9167d9c9b79SDave Ertman 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
9177d9c9b79SDave Ertman 	 * Octets 13 - 20 are TSA values - leave as zeros
9187d9c9b79SDave Ertman 	 */
9197d9c9b79SDave Ertman 	buf[5] = 0x64;
9207d9c9b79SDave Ertman 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
9217d9c9b79SDave Ertman 	offset += len + 2;
9227d9c9b79SDave Ertman 	tlv = (struct ice_lldp_org_tlv *)
9237d9c9b79SDave Ertman 		((char *)tlv + sizeof(tlv->typelen) + len);
9247d9c9b79SDave Ertman 
9257d9c9b79SDave Ertman 	/* Add ETS REC TLV */
9267d9c9b79SDave Ertman 	buf = tlv->tlvinfo;
9277d9c9b79SDave Ertman 	tlv->typelen = htons(typelen);
9287d9c9b79SDave Ertman 
9297d9c9b79SDave Ertman 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
9307d9c9b79SDave Ertman 		      ICE_IEEE_SUBTYPE_ETS_REC);
9317d9c9b79SDave Ertman 	tlv->ouisubtype = htonl(ouisubtype);
9327d9c9b79SDave Ertman 
9337d9c9b79SDave Ertman 	/* First octet of buf is reserved
9347d9c9b79SDave Ertman 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
9357d9c9b79SDave Ertman 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
9367d9c9b79SDave Ertman 	 * Octets 13 - 20 are TSA value - leave as zeros
9377d9c9b79SDave Ertman 	 */
9387d9c9b79SDave Ertman 	buf[5] = 0x64;
9397d9c9b79SDave Ertman 	offset += len + 2;
9407d9c9b79SDave Ertman 	tlv = (struct ice_lldp_org_tlv *)
9417d9c9b79SDave Ertman 		((char *)tlv + sizeof(tlv->typelen) + len);
9427d9c9b79SDave Ertman 
9437d9c9b79SDave Ertman 	/* Add PFC CFG TLV */
9447d9c9b79SDave Ertman 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
9457d9c9b79SDave Ertman 		   ICE_IEEE_PFC_TLV_LEN);
9467d9c9b79SDave Ertman 	tlv->typelen = htons(typelen);
9477d9c9b79SDave Ertman 
9487d9c9b79SDave Ertman 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
9497d9c9b79SDave Ertman 		      ICE_IEEE_SUBTYPE_PFC_CFG);
9507d9c9b79SDave Ertman 	tlv->ouisubtype = htonl(ouisubtype);
9517d9c9b79SDave Ertman 
9527d9c9b79SDave Ertman 	/* Octet 1 left as all zeros - PFC disabled */
9537d9c9b79SDave Ertman 	buf[0] = 0x08;
9547d9c9b79SDave Ertman 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
9557d9c9b79SDave Ertman 	offset += len + 2;
9567d9c9b79SDave Ertman 
9577d9c9b79SDave Ertman 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
9587d9c9b79SDave Ertman 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
9597d9c9b79SDave Ertman 
9607d9c9b79SDave Ertman 	kfree(lldpmib);
9617d9c9b79SDave Ertman }
9627d9c9b79SDave Ertman 
9637d9c9b79SDave Ertman /**
96499d40752SBrett Creeley  * ice_check_phy_fw_load - check if PHY FW load failed
96599d40752SBrett Creeley  * @pf: pointer to PF struct
96699d40752SBrett Creeley  * @link_cfg_err: bitmap from the link info structure
96799d40752SBrett Creeley  *
96899d40752SBrett Creeley  * check if external PHY FW load failed and print an error message if it did
96999d40752SBrett Creeley  */
97099d40752SBrett Creeley static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
97199d40752SBrett Creeley {
97299d40752SBrett Creeley 	if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
97399d40752SBrett Creeley 		clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
97499d40752SBrett Creeley 		return;
97599d40752SBrett Creeley 	}
97699d40752SBrett Creeley 
97799d40752SBrett Creeley 	if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
97899d40752SBrett Creeley 		return;
97999d40752SBrett Creeley 
98099d40752SBrett Creeley 	if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
98199d40752SBrett Creeley 		dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
98299d40752SBrett Creeley 		set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
98399d40752SBrett Creeley 	}
98499d40752SBrett Creeley }
98599d40752SBrett Creeley 
98699d40752SBrett Creeley /**
987c77849f5SAnirudh Venkataramanan  * ice_check_module_power
988c77849f5SAnirudh Venkataramanan  * @pf: pointer to PF struct
989c77849f5SAnirudh Venkataramanan  * @link_cfg_err: bitmap from the link info structure
990c77849f5SAnirudh Venkataramanan  *
991c77849f5SAnirudh Venkataramanan  * check module power level returned by a previous call to aq_get_link_info
992c77849f5SAnirudh Venkataramanan  * and print error messages if module power level is not supported
993c77849f5SAnirudh Venkataramanan  */
994c77849f5SAnirudh Venkataramanan static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
995c77849f5SAnirudh Venkataramanan {
996c77849f5SAnirudh Venkataramanan 	/* if module power level is supported, clear the flag */
997c77849f5SAnirudh Venkataramanan 	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
998c77849f5SAnirudh Venkataramanan 			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
999c77849f5SAnirudh Venkataramanan 		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1000c77849f5SAnirudh Venkataramanan 		return;
1001c77849f5SAnirudh Venkataramanan 	}
1002c77849f5SAnirudh Venkataramanan 
1003c77849f5SAnirudh Venkataramanan 	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1004c77849f5SAnirudh Venkataramanan 	 * above block didn't clear this bit, there's nothing to do
1005c77849f5SAnirudh Venkataramanan 	 */
1006c77849f5SAnirudh Venkataramanan 	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1007c77849f5SAnirudh Venkataramanan 		return;
1008c77849f5SAnirudh Venkataramanan 
1009c77849f5SAnirudh Venkataramanan 	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1010c77849f5SAnirudh Venkataramanan 		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1011c77849f5SAnirudh Venkataramanan 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1012c77849f5SAnirudh Venkataramanan 	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1013c77849f5SAnirudh Venkataramanan 		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1014c77849f5SAnirudh Venkataramanan 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1015c77849f5SAnirudh Venkataramanan 	}
1016c77849f5SAnirudh Venkataramanan }
1017c77849f5SAnirudh Venkataramanan 
1018c77849f5SAnirudh Venkataramanan /**
101999d40752SBrett Creeley  * ice_check_link_cfg_err - check if link configuration failed
102099d40752SBrett Creeley  * @pf: pointer to the PF struct
102199d40752SBrett Creeley  * @link_cfg_err: bitmap from the link info structure
102299d40752SBrett Creeley  *
102399d40752SBrett Creeley  * print if any link configuration failure happens due to the value in the
102499d40752SBrett Creeley  * link_cfg_err parameter in the link info structure
102599d40752SBrett Creeley  */
102699d40752SBrett Creeley static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
102799d40752SBrett Creeley {
102899d40752SBrett Creeley 	ice_check_module_power(pf, link_cfg_err);
102999d40752SBrett Creeley 	ice_check_phy_fw_load(pf, link_cfg_err);
103099d40752SBrett Creeley }
103199d40752SBrett Creeley 
103299d40752SBrett Creeley /**
10330b28b702SAnirudh Venkataramanan  * ice_link_event - process the link event
10342f2da36eSAnirudh Venkataramanan  * @pf: PF that the link event is associated with
10350b28b702SAnirudh Venkataramanan  * @pi: port_info for the port that the link event is associated with
1036c2a23e00SBrett Creeley  * @link_up: true if the physical link is up and false if it is down
1037c2a23e00SBrett Creeley  * @link_speed: current link speed received from the link event
10380b28b702SAnirudh Venkataramanan  *
1039c2a23e00SBrett Creeley  * Returns 0 on success and negative on failure
10400b28b702SAnirudh Venkataramanan  */
10410b28b702SAnirudh Venkataramanan static int
1042c2a23e00SBrett Creeley ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1043c2a23e00SBrett Creeley 	       u16 link_speed)
10440b28b702SAnirudh Venkataramanan {
10454015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
10460b28b702SAnirudh Venkataramanan 	struct ice_phy_info *phy_info;
1047c2a23e00SBrett Creeley 	struct ice_vsi *vsi;
1048c2a23e00SBrett Creeley 	u16 old_link_speed;
1049c2a23e00SBrett Creeley 	bool old_link;
10505518ac2aSTony Nguyen 	int status;
10510b28b702SAnirudh Venkataramanan 
10520b28b702SAnirudh Venkataramanan 	phy_info = &pi->phy;
10530b28b702SAnirudh Venkataramanan 	phy_info->link_info_old = phy_info->link_info;
10540b28b702SAnirudh Venkataramanan 
1055c2a23e00SBrett Creeley 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
10560b28b702SAnirudh Venkataramanan 	old_link_speed = phy_info->link_info_old.link_speed;
10570b28b702SAnirudh Venkataramanan 
1058c2a23e00SBrett Creeley 	/* update the link info structures and re-enable link events,
1059c2a23e00SBrett Creeley 	 * don't bail on failure due to other book keeping needed
1060c2a23e00SBrett Creeley 	 */
1061d348d517SAnirudh Venkataramanan 	status = ice_update_link_info(pi);
1062d348d517SAnirudh Venkataramanan 	if (status)
10635f87ec48STony Nguyen 		dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
10645f87ec48STony Nguyen 			pi->lport, status,
1065d348d517SAnirudh Venkataramanan 			ice_aq_str(pi->hw->adminq.sq_last_status));
10660b28b702SAnirudh Venkataramanan 
106799d40752SBrett Creeley 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1068c77849f5SAnirudh Venkataramanan 
10690ce6c34aSDave Ertman 	/* Check if the link state is up after updating link info, and treat
10700ce6c34aSDave Ertman 	 * this event as an UP event since the link is actually UP now.
10710ce6c34aSDave Ertman 	 */
10720ce6c34aSDave Ertman 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
10730ce6c34aSDave Ertman 		link_up = true;
10740ce6c34aSDave Ertman 
1075208ff751SAnirudh Venkataramanan 	vsi = ice_get_main_vsi(pf);
10760b28b702SAnirudh Venkataramanan 	if (!vsi || !vsi->port_info)
1077c2a23e00SBrett Creeley 		return -EINVAL;
10780b28b702SAnirudh Venkataramanan 
10796d599946STony Nguyen 	/* turn off PHY if media was removed */
10806d599946STony Nguyen 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
10816d599946STony Nguyen 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
10826d599946STony Nguyen 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1083d348d517SAnirudh Venkataramanan 		ice_set_link(vsi, false);
10846d599946STony Nguyen 	}
10856d599946STony Nguyen 
10861a3571b5SPaul Greenwalt 	/* if the old link up/down and speed is the same as the new */
10871a3571b5SPaul Greenwalt 	if (link_up == old_link && link_speed == old_link_speed)
1088d348d517SAnirudh Venkataramanan 		return 0;
10891a3571b5SPaul Greenwalt 
10903a749623SJacob Keller 	if (!ice_is_e810(&pf->hw))
10913a749623SJacob Keller 		ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
10923a749623SJacob Keller 
10937d9c9b79SDave Ertman 	if (ice_is_dcb_active(pf)) {
10947d9c9b79SDave Ertman 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1095242b5e06SDave Ertman 			ice_dcb_rebuild(pf);
10967d9c9b79SDave Ertman 	} else {
10977d9c9b79SDave Ertman 		if (link_up)
10987d9c9b79SDave Ertman 			ice_set_dflt_mib(pf);
10997d9c9b79SDave Ertman 	}
1100c2a23e00SBrett Creeley 	ice_vsi_link_event(vsi, link_up);
1101c2a23e00SBrett Creeley 	ice_print_link_msg(vsi, link_up);
11020b28b702SAnirudh Venkataramanan 
110353b8decbSAnirudh Venkataramanan 	ice_vc_notify_link_state(pf);
110453b8decbSAnirudh Venkataramanan 
1105d348d517SAnirudh Venkataramanan 	return 0;
11060b28b702SAnirudh Venkataramanan }
11070b28b702SAnirudh Venkataramanan 
11080b28b702SAnirudh Venkataramanan /**
11094f4be03bSAnirudh Venkataramanan  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
11104f4be03bSAnirudh Venkataramanan  * @pf: board private structure
11110b28b702SAnirudh Venkataramanan  */
11124f4be03bSAnirudh Venkataramanan static void ice_watchdog_subtask(struct ice_pf *pf)
11130b28b702SAnirudh Venkataramanan {
11144f4be03bSAnirudh Venkataramanan 	int i;
11150b28b702SAnirudh Venkataramanan 
11164f4be03bSAnirudh Venkataramanan 	/* if interface is down do nothing */
11177e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_DOWN, pf->state) ||
11187e408e07SAnirudh Venkataramanan 	    test_bit(ICE_CFG_BUSY, pf->state))
11194f4be03bSAnirudh Venkataramanan 		return;
11200b28b702SAnirudh Venkataramanan 
11214f4be03bSAnirudh Venkataramanan 	/* make sure we don't do these things too often */
11224f4be03bSAnirudh Venkataramanan 	if (time_before(jiffies,
11234f4be03bSAnirudh Venkataramanan 			pf->serv_tmr_prev + pf->serv_tmr_period))
11244f4be03bSAnirudh Venkataramanan 		return;
11250b28b702SAnirudh Venkataramanan 
11264f4be03bSAnirudh Venkataramanan 	pf->serv_tmr_prev = jiffies;
11274f4be03bSAnirudh Venkataramanan 
11284f4be03bSAnirudh Venkataramanan 	/* Update the stats for active netdevs so the network stack
11294f4be03bSAnirudh Venkataramanan 	 * can look at updated numbers whenever it cares to
11304f4be03bSAnirudh Venkataramanan 	 */
11314f4be03bSAnirudh Venkataramanan 	ice_update_pf_stats(pf);
113280ed404aSBrett Creeley 	ice_for_each_vsi(pf, i)
11334f4be03bSAnirudh Venkataramanan 		if (pf->vsi[i] && pf->vsi[i]->netdev)
11344f4be03bSAnirudh Venkataramanan 			ice_update_vsi_stats(pf->vsi[i]);
11350b28b702SAnirudh Venkataramanan }
11360b28b702SAnirudh Venkataramanan 
11370b28b702SAnirudh Venkataramanan /**
1138250c3b3eSBrett Creeley  * ice_init_link_events - enable/initialize link events
1139250c3b3eSBrett Creeley  * @pi: pointer to the port_info instance
1140250c3b3eSBrett Creeley  *
1141250c3b3eSBrett Creeley  * Returns -EIO on failure, 0 on success
1142250c3b3eSBrett Creeley  */
1143250c3b3eSBrett Creeley static int ice_init_link_events(struct ice_port_info *pi)
1144250c3b3eSBrett Creeley {
1145250c3b3eSBrett Creeley 	u16 mask;
1146250c3b3eSBrett Creeley 
1147250c3b3eSBrett Creeley 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
114899d40752SBrett Creeley 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
114999d40752SBrett Creeley 		       ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1150250c3b3eSBrett Creeley 
1151250c3b3eSBrett Creeley 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
115219cce2c6SAnirudh Venkataramanan 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1153250c3b3eSBrett Creeley 			pi->lport);
1154250c3b3eSBrett Creeley 		return -EIO;
1155250c3b3eSBrett Creeley 	}
1156250c3b3eSBrett Creeley 
1157250c3b3eSBrett Creeley 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
115819cce2c6SAnirudh Venkataramanan 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1159250c3b3eSBrett Creeley 			pi->lport);
1160250c3b3eSBrett Creeley 		return -EIO;
1161250c3b3eSBrett Creeley 	}
1162250c3b3eSBrett Creeley 
1163250c3b3eSBrett Creeley 	return 0;
1164250c3b3eSBrett Creeley }
1165250c3b3eSBrett Creeley 
1166250c3b3eSBrett Creeley /**
1167250c3b3eSBrett Creeley  * ice_handle_link_event - handle link event via ARQ
11682f2da36eSAnirudh Venkataramanan  * @pf: PF that the link event is associated with
1169c2a23e00SBrett Creeley  * @event: event structure containing link status info
1170250c3b3eSBrett Creeley  */
1171c2a23e00SBrett Creeley static int
1172c2a23e00SBrett Creeley ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1173250c3b3eSBrett Creeley {
1174c2a23e00SBrett Creeley 	struct ice_aqc_get_link_status_data *link_data;
1175250c3b3eSBrett Creeley 	struct ice_port_info *port_info;
1176250c3b3eSBrett Creeley 	int status;
1177250c3b3eSBrett Creeley 
1178c2a23e00SBrett Creeley 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1179250c3b3eSBrett Creeley 	port_info = pf->hw.port_info;
1180250c3b3eSBrett Creeley 	if (!port_info)
1181250c3b3eSBrett Creeley 		return -EINVAL;
1182250c3b3eSBrett Creeley 
1183c2a23e00SBrett Creeley 	status = ice_link_event(pf, port_info,
1184c2a23e00SBrett Creeley 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1185c2a23e00SBrett Creeley 				le16_to_cpu(link_data->link_speed));
1186250c3b3eSBrett Creeley 	if (status)
118719cce2c6SAnirudh Venkataramanan 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
118819cce2c6SAnirudh Venkataramanan 			status);
1189250c3b3eSBrett Creeley 
1190250c3b3eSBrett Creeley 	return status;
1191250c3b3eSBrett Creeley }
1192250c3b3eSBrett Creeley 
1193d69ea414SJacob Keller enum ice_aq_task_state {
1194d69ea414SJacob Keller 	ICE_AQ_TASK_WAITING = 0,
1195d69ea414SJacob Keller 	ICE_AQ_TASK_COMPLETE,
1196d69ea414SJacob Keller 	ICE_AQ_TASK_CANCELED,
1197d69ea414SJacob Keller };
1198d69ea414SJacob Keller 
1199d69ea414SJacob Keller struct ice_aq_task {
1200d69ea414SJacob Keller 	struct hlist_node entry;
1201d69ea414SJacob Keller 
1202d69ea414SJacob Keller 	u16 opcode;
1203d69ea414SJacob Keller 	struct ice_rq_event_info *event;
1204d69ea414SJacob Keller 	enum ice_aq_task_state state;
1205d69ea414SJacob Keller };
1206d69ea414SJacob Keller 
1207d69ea414SJacob Keller /**
1208ef860480STony Nguyen  * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1209d69ea414SJacob Keller  * @pf: pointer to the PF private structure
1210d69ea414SJacob Keller  * @opcode: the opcode to wait for
1211d69ea414SJacob Keller  * @timeout: how long to wait, in jiffies
1212d69ea414SJacob Keller  * @event: storage for the event info
1213d69ea414SJacob Keller  *
1214d69ea414SJacob Keller  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1215d69ea414SJacob Keller  * current thread will be put to sleep until the specified event occurs or
1216d69ea414SJacob Keller  * until the given timeout is reached.
1217d69ea414SJacob Keller  *
1218d69ea414SJacob Keller  * To obtain only the descriptor contents, pass an event without an allocated
1219d69ea414SJacob Keller  * msg_buf. If the complete data buffer is desired, allocate the
1220d69ea414SJacob Keller  * event->msg_buf with enough space ahead of time.
1221d69ea414SJacob Keller  *
1222d69ea414SJacob Keller  * Returns: zero on success, or a negative error code on failure.
1223d69ea414SJacob Keller  */
1224d69ea414SJacob Keller int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1225d69ea414SJacob Keller 			  struct ice_rq_event_info *event)
1226d69ea414SJacob Keller {
12271e8249ccSJacob Keller 	struct device *dev = ice_pf_to_dev(pf);
1228d69ea414SJacob Keller 	struct ice_aq_task *task;
12291e8249ccSJacob Keller 	unsigned long start;
1230d69ea414SJacob Keller 	long ret;
1231d69ea414SJacob Keller 	int err;
1232d69ea414SJacob Keller 
1233d69ea414SJacob Keller 	task = kzalloc(sizeof(*task), GFP_KERNEL);
1234d69ea414SJacob Keller 	if (!task)
1235d69ea414SJacob Keller 		return -ENOMEM;
1236d69ea414SJacob Keller 
1237d69ea414SJacob Keller 	INIT_HLIST_NODE(&task->entry);
1238d69ea414SJacob Keller 	task->opcode = opcode;
1239d69ea414SJacob Keller 	task->event = event;
1240d69ea414SJacob Keller 	task->state = ICE_AQ_TASK_WAITING;
1241d69ea414SJacob Keller 
1242d69ea414SJacob Keller 	spin_lock_bh(&pf->aq_wait_lock);
1243d69ea414SJacob Keller 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1244d69ea414SJacob Keller 	spin_unlock_bh(&pf->aq_wait_lock);
1245d69ea414SJacob Keller 
12461e8249ccSJacob Keller 	start = jiffies;
12471e8249ccSJacob Keller 
1248d69ea414SJacob Keller 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1249d69ea414SJacob Keller 					       timeout);
1250d69ea414SJacob Keller 	switch (task->state) {
1251d69ea414SJacob Keller 	case ICE_AQ_TASK_WAITING:
1252d69ea414SJacob Keller 		err = ret < 0 ? ret : -ETIMEDOUT;
1253d69ea414SJacob Keller 		break;
1254d69ea414SJacob Keller 	case ICE_AQ_TASK_CANCELED:
1255d69ea414SJacob Keller 		err = ret < 0 ? ret : -ECANCELED;
1256d69ea414SJacob Keller 		break;
1257d69ea414SJacob Keller 	case ICE_AQ_TASK_COMPLETE:
1258d69ea414SJacob Keller 		err = ret < 0 ? ret : 0;
1259d69ea414SJacob Keller 		break;
1260d69ea414SJacob Keller 	default:
1261d69ea414SJacob Keller 		WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1262d69ea414SJacob Keller 		err = -EINVAL;
1263d69ea414SJacob Keller 		break;
1264d69ea414SJacob Keller 	}
1265d69ea414SJacob Keller 
12661e8249ccSJacob Keller 	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
12671e8249ccSJacob Keller 		jiffies_to_msecs(jiffies - start),
12681e8249ccSJacob Keller 		jiffies_to_msecs(timeout),
12691e8249ccSJacob Keller 		opcode);
12701e8249ccSJacob Keller 
1271d69ea414SJacob Keller 	spin_lock_bh(&pf->aq_wait_lock);
1272d69ea414SJacob Keller 	hlist_del(&task->entry);
1273d69ea414SJacob Keller 	spin_unlock_bh(&pf->aq_wait_lock);
1274d69ea414SJacob Keller 	kfree(task);
1275d69ea414SJacob Keller 
1276d69ea414SJacob Keller 	return err;
1277d69ea414SJacob Keller }
1278d69ea414SJacob Keller 
1279d69ea414SJacob Keller /**
1280d69ea414SJacob Keller  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1281d69ea414SJacob Keller  * @pf: pointer to the PF private structure
1282d69ea414SJacob Keller  * @opcode: the opcode of the event
1283d69ea414SJacob Keller  * @event: the event to check
1284d69ea414SJacob Keller  *
1285d69ea414SJacob Keller  * Loops over the current list of pending threads waiting for an AdminQ event.
1286d69ea414SJacob Keller  * For each matching task, copy the contents of the event into the task
1287d69ea414SJacob Keller  * structure and wake up the thread.
1288d69ea414SJacob Keller  *
1289d69ea414SJacob Keller  * If multiple threads wait for the same opcode, they will all be woken up.
1290d69ea414SJacob Keller  *
1291d69ea414SJacob Keller  * Note that event->msg_buf will only be duplicated if the event has a buffer
1292d69ea414SJacob Keller  * with enough space already allocated. Otherwise, only the descriptor and
1293d69ea414SJacob Keller  * message length will be copied.
1294d69ea414SJacob Keller  *
1295d69ea414SJacob Keller  * Returns: true if an event was found, false otherwise
1296d69ea414SJacob Keller  */
1297d69ea414SJacob Keller static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1298d69ea414SJacob Keller 				struct ice_rq_event_info *event)
1299d69ea414SJacob Keller {
1300d69ea414SJacob Keller 	struct ice_aq_task *task;
1301d69ea414SJacob Keller 	bool found = false;
1302d69ea414SJacob Keller 
1303d69ea414SJacob Keller 	spin_lock_bh(&pf->aq_wait_lock);
1304d69ea414SJacob Keller 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1305d69ea414SJacob Keller 		if (task->state || task->opcode != opcode)
1306d69ea414SJacob Keller 			continue;
1307d69ea414SJacob Keller 
1308d69ea414SJacob Keller 		memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1309d69ea414SJacob Keller 		task->event->msg_len = event->msg_len;
1310d69ea414SJacob Keller 
1311d69ea414SJacob Keller 		/* Only copy the data buffer if a destination was set */
1312d69ea414SJacob Keller 		if (task->event->msg_buf &&
1313d69ea414SJacob Keller 		    task->event->buf_len > event->buf_len) {
1314d69ea414SJacob Keller 			memcpy(task->event->msg_buf, event->msg_buf,
1315d69ea414SJacob Keller 			       event->buf_len);
1316d69ea414SJacob Keller 			task->event->buf_len = event->buf_len;
1317d69ea414SJacob Keller 		}
1318d69ea414SJacob Keller 
1319d69ea414SJacob Keller 		task->state = ICE_AQ_TASK_COMPLETE;
1320d69ea414SJacob Keller 		found = true;
1321d69ea414SJacob Keller 	}
1322d69ea414SJacob Keller 	spin_unlock_bh(&pf->aq_wait_lock);
1323d69ea414SJacob Keller 
1324d69ea414SJacob Keller 	if (found)
1325d69ea414SJacob Keller 		wake_up(&pf->aq_wait_queue);
1326d69ea414SJacob Keller }
1327d69ea414SJacob Keller 
1328d69ea414SJacob Keller /**
1329d69ea414SJacob Keller  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1330d69ea414SJacob Keller  * @pf: the PF private structure
1331d69ea414SJacob Keller  *
1332d69ea414SJacob Keller  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1333d69ea414SJacob Keller  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1334d69ea414SJacob Keller  */
1335d69ea414SJacob Keller static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1336d69ea414SJacob Keller {
1337d69ea414SJacob Keller 	struct ice_aq_task *task;
1338d69ea414SJacob Keller 
1339d69ea414SJacob Keller 	spin_lock_bh(&pf->aq_wait_lock);
1340d69ea414SJacob Keller 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1341d69ea414SJacob Keller 		task->state = ICE_AQ_TASK_CANCELED;
1342d69ea414SJacob Keller 	spin_unlock_bh(&pf->aq_wait_lock);
1343d69ea414SJacob Keller 
1344d69ea414SJacob Keller 	wake_up(&pf->aq_wait_queue);
1345d69ea414SJacob Keller }
1346d69ea414SJacob Keller 
1347250c3b3eSBrett Creeley /**
1348940b61afSAnirudh Venkataramanan  * __ice_clean_ctrlq - helper function to clean controlq rings
1349940b61afSAnirudh Venkataramanan  * @pf: ptr to struct ice_pf
1350940b61afSAnirudh Venkataramanan  * @q_type: specific Control queue type
1351940b61afSAnirudh Venkataramanan  */
1352940b61afSAnirudh Venkataramanan static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1353940b61afSAnirudh Venkataramanan {
13544015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
1355940b61afSAnirudh Venkataramanan 	struct ice_rq_event_info event;
1356940b61afSAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
1357940b61afSAnirudh Venkataramanan 	struct ice_ctl_q_info *cq;
1358940b61afSAnirudh Venkataramanan 	u16 pending, i = 0;
1359940b61afSAnirudh Venkataramanan 	const char *qtype;
1360940b61afSAnirudh Venkataramanan 	u32 oldval, val;
1361940b61afSAnirudh Venkataramanan 
13620b28b702SAnirudh Venkataramanan 	/* Do not clean control queue if/when PF reset fails */
13637e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_RESET_FAILED, pf->state))
13640b28b702SAnirudh Venkataramanan 		return 0;
13650b28b702SAnirudh Venkataramanan 
1366940b61afSAnirudh Venkataramanan 	switch (q_type) {
1367940b61afSAnirudh Venkataramanan 	case ICE_CTL_Q_ADMIN:
1368940b61afSAnirudh Venkataramanan 		cq = &hw->adminq;
1369940b61afSAnirudh Venkataramanan 		qtype = "Admin";
1370940b61afSAnirudh Venkataramanan 		break;
13718f5ee3c4SJacob Keller 	case ICE_CTL_Q_SB:
13728f5ee3c4SJacob Keller 		cq = &hw->sbq;
13738f5ee3c4SJacob Keller 		qtype = "Sideband";
13748f5ee3c4SJacob Keller 		break;
137575d2b253SAnirudh Venkataramanan 	case ICE_CTL_Q_MAILBOX:
137675d2b253SAnirudh Venkataramanan 		cq = &hw->mailboxq;
137775d2b253SAnirudh Venkataramanan 		qtype = "Mailbox";
13780891c896SVignesh Sridhar 		/* we are going to try to detect a malicious VF, so set the
13790891c896SVignesh Sridhar 		 * state to begin detection
13800891c896SVignesh Sridhar 		 */
13810891c896SVignesh Sridhar 		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
138275d2b253SAnirudh Venkataramanan 		break;
1383940b61afSAnirudh Venkataramanan 	default:
13844015d11eSBrett Creeley 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1385940b61afSAnirudh Venkataramanan 		return 0;
1386940b61afSAnirudh Venkataramanan 	}
1387940b61afSAnirudh Venkataramanan 
1388940b61afSAnirudh Venkataramanan 	/* check for error indications - PF_xx_AxQLEN register layout for
1389940b61afSAnirudh Venkataramanan 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1390940b61afSAnirudh Venkataramanan 	 */
1391940b61afSAnirudh Venkataramanan 	val = rd32(hw, cq->rq.len);
1392940b61afSAnirudh Venkataramanan 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1393940b61afSAnirudh Venkataramanan 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1394940b61afSAnirudh Venkataramanan 		oldval = val;
1395940b61afSAnirudh Venkataramanan 		if (val & PF_FW_ARQLEN_ARQVFE_M)
13964015d11eSBrett Creeley 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
13974015d11eSBrett Creeley 				qtype);
1398940b61afSAnirudh Venkataramanan 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
139919cce2c6SAnirudh Venkataramanan 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1400940b61afSAnirudh Venkataramanan 				qtype);
1401940b61afSAnirudh Venkataramanan 		}
1402940b61afSAnirudh Venkataramanan 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
140319cce2c6SAnirudh Venkataramanan 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1404940b61afSAnirudh Venkataramanan 				qtype);
1405940b61afSAnirudh Venkataramanan 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1406940b61afSAnirudh Venkataramanan 			 PF_FW_ARQLEN_ARQCRIT_M);
1407940b61afSAnirudh Venkataramanan 		if (oldval != val)
1408940b61afSAnirudh Venkataramanan 			wr32(hw, cq->rq.len, val);
1409940b61afSAnirudh Venkataramanan 	}
1410940b61afSAnirudh Venkataramanan 
1411940b61afSAnirudh Venkataramanan 	val = rd32(hw, cq->sq.len);
1412940b61afSAnirudh Venkataramanan 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1413940b61afSAnirudh Venkataramanan 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1414940b61afSAnirudh Venkataramanan 		oldval = val;
1415940b61afSAnirudh Venkataramanan 		if (val & PF_FW_ATQLEN_ATQVFE_M)
141619cce2c6SAnirudh Venkataramanan 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
141719cce2c6SAnirudh Venkataramanan 				qtype);
1418940b61afSAnirudh Venkataramanan 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
14194015d11eSBrett Creeley 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1420940b61afSAnirudh Venkataramanan 				qtype);
1421940b61afSAnirudh Venkataramanan 		}
1422940b61afSAnirudh Venkataramanan 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
14234015d11eSBrett Creeley 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1424940b61afSAnirudh Venkataramanan 				qtype);
1425940b61afSAnirudh Venkataramanan 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1426940b61afSAnirudh Venkataramanan 			 PF_FW_ATQLEN_ATQCRIT_M);
1427940b61afSAnirudh Venkataramanan 		if (oldval != val)
1428940b61afSAnirudh Venkataramanan 			wr32(hw, cq->sq.len, val);
1429940b61afSAnirudh Venkataramanan 	}
1430940b61afSAnirudh Venkataramanan 
1431940b61afSAnirudh Venkataramanan 	event.buf_len = cq->rq_buf_size;
14329efe35d0STony Nguyen 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1433940b61afSAnirudh Venkataramanan 	if (!event.msg_buf)
1434940b61afSAnirudh Venkataramanan 		return 0;
1435940b61afSAnirudh Venkataramanan 
1436940b61afSAnirudh Venkataramanan 	do {
14370b28b702SAnirudh Venkataramanan 		u16 opcode;
14385518ac2aSTony Nguyen 		int ret;
1439940b61afSAnirudh Venkataramanan 
1440940b61afSAnirudh Venkataramanan 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1441d54699e2STony Nguyen 		if (ret == -EALREADY)
1442940b61afSAnirudh Venkataramanan 			break;
1443940b61afSAnirudh Venkataramanan 		if (ret) {
14445f87ec48STony Nguyen 			dev_err(dev, "%s Receive Queue event error %d\n", qtype,
14455f87ec48STony Nguyen 				ret);
1446940b61afSAnirudh Venkataramanan 			break;
1447940b61afSAnirudh Venkataramanan 		}
14480b28b702SAnirudh Venkataramanan 
14490b28b702SAnirudh Venkataramanan 		opcode = le16_to_cpu(event.desc.opcode);
14500b28b702SAnirudh Venkataramanan 
1451d69ea414SJacob Keller 		/* Notify any thread that might be waiting for this event */
1452d69ea414SJacob Keller 		ice_aq_check_events(pf, opcode, &event);
1453d69ea414SJacob Keller 
14540b28b702SAnirudh Venkataramanan 		switch (opcode) {
1455250c3b3eSBrett Creeley 		case ice_aqc_opc_get_link_status:
1456c2a23e00SBrett Creeley 			if (ice_handle_link_event(pf, &event))
14574015d11eSBrett Creeley 				dev_err(dev, "Could not handle link event\n");
1458250c3b3eSBrett Creeley 			break;
14592309ae38SBrett Creeley 		case ice_aqc_opc_event_lan_overflow:
14602309ae38SBrett Creeley 			ice_vf_lan_overflow_event(pf, &event);
14612309ae38SBrett Creeley 			break;
14621071a835SAnirudh Venkataramanan 		case ice_mbx_opc_send_msg_to_pf:
14630891c896SVignesh Sridhar 			if (!ice_is_malicious_vf(pf, &event, i, pending))
14641071a835SAnirudh Venkataramanan 				ice_vc_process_vf_msg(pf, &event);
14651071a835SAnirudh Venkataramanan 			break;
14668b97ceb1SHieu Tran 		case ice_aqc_opc_fw_logging:
14678b97ceb1SHieu Tran 			ice_output_fw_log(hw, &event.desc, event.msg_buf);
14688b97ceb1SHieu Tran 			break;
146900cc3f1bSAnirudh Venkataramanan 		case ice_aqc_opc_lldp_set_mib_change:
147000cc3f1bSAnirudh Venkataramanan 			ice_dcb_process_lldp_set_mib_change(pf, &event);
147100cc3f1bSAnirudh Venkataramanan 			break;
14720b28b702SAnirudh Venkataramanan 		default:
147319cce2c6SAnirudh Venkataramanan 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
14740b28b702SAnirudh Venkataramanan 				qtype, opcode);
14750b28b702SAnirudh Venkataramanan 			break;
14760b28b702SAnirudh Venkataramanan 		}
1477940b61afSAnirudh Venkataramanan 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1478940b61afSAnirudh Venkataramanan 
14799efe35d0STony Nguyen 	kfree(event.msg_buf);
1480940b61afSAnirudh Venkataramanan 
1481940b61afSAnirudh Venkataramanan 	return pending && (i == ICE_DFLT_IRQ_WORK);
1482940b61afSAnirudh Venkataramanan }
1483940b61afSAnirudh Venkataramanan 
1484940b61afSAnirudh Venkataramanan /**
14853d6b640eSAnirudh Venkataramanan  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
14863d6b640eSAnirudh Venkataramanan  * @hw: pointer to hardware info
14873d6b640eSAnirudh Venkataramanan  * @cq: control queue information
14883d6b640eSAnirudh Venkataramanan  *
14893d6b640eSAnirudh Venkataramanan  * returns true if there are pending messages in a queue, false if there aren't
14903d6b640eSAnirudh Venkataramanan  */
14913d6b640eSAnirudh Venkataramanan static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
14923d6b640eSAnirudh Venkataramanan {
14933d6b640eSAnirudh Venkataramanan 	u16 ntu;
14943d6b640eSAnirudh Venkataramanan 
14953d6b640eSAnirudh Venkataramanan 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
14963d6b640eSAnirudh Venkataramanan 	return cq->rq.next_to_clean != ntu;
14973d6b640eSAnirudh Venkataramanan }
14983d6b640eSAnirudh Venkataramanan 
14993d6b640eSAnirudh Venkataramanan /**
1500940b61afSAnirudh Venkataramanan  * ice_clean_adminq_subtask - clean the AdminQ rings
1501940b61afSAnirudh Venkataramanan  * @pf: board private structure
1502940b61afSAnirudh Venkataramanan  */
1503940b61afSAnirudh Venkataramanan static void ice_clean_adminq_subtask(struct ice_pf *pf)
1504940b61afSAnirudh Venkataramanan {
1505940b61afSAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
1506940b61afSAnirudh Venkataramanan 
15077e408e07SAnirudh Venkataramanan 	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1508940b61afSAnirudh Venkataramanan 		return;
1509940b61afSAnirudh Venkataramanan 
1510940b61afSAnirudh Venkataramanan 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1511940b61afSAnirudh Venkataramanan 		return;
1512940b61afSAnirudh Venkataramanan 
15137e408e07SAnirudh Venkataramanan 	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1514940b61afSAnirudh Venkataramanan 
15153d6b640eSAnirudh Venkataramanan 	/* There might be a situation where new messages arrive to a control
15163d6b640eSAnirudh Venkataramanan 	 * queue between processing the last message and clearing the
15173d6b640eSAnirudh Venkataramanan 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
15183d6b640eSAnirudh Venkataramanan 	 * ice_ctrlq_pending) and process new messages if any.
15193d6b640eSAnirudh Venkataramanan 	 */
15203d6b640eSAnirudh Venkataramanan 	if (ice_ctrlq_pending(hw, &hw->adminq))
15213d6b640eSAnirudh Venkataramanan 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1522940b61afSAnirudh Venkataramanan 
1523940b61afSAnirudh Venkataramanan 	ice_flush(hw);
1524940b61afSAnirudh Venkataramanan }
1525940b61afSAnirudh Venkataramanan 
1526940b61afSAnirudh Venkataramanan /**
152775d2b253SAnirudh Venkataramanan  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
152875d2b253SAnirudh Venkataramanan  * @pf: board private structure
152975d2b253SAnirudh Venkataramanan  */
153075d2b253SAnirudh Venkataramanan static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
153175d2b253SAnirudh Venkataramanan {
153275d2b253SAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
153375d2b253SAnirudh Venkataramanan 
15347e408e07SAnirudh Venkataramanan 	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
153575d2b253SAnirudh Venkataramanan 		return;
153675d2b253SAnirudh Venkataramanan 
153775d2b253SAnirudh Venkataramanan 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
153875d2b253SAnirudh Venkataramanan 		return;
153975d2b253SAnirudh Venkataramanan 
15407e408e07SAnirudh Venkataramanan 	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
154175d2b253SAnirudh Venkataramanan 
154275d2b253SAnirudh Venkataramanan 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
154375d2b253SAnirudh Venkataramanan 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
154475d2b253SAnirudh Venkataramanan 
154575d2b253SAnirudh Venkataramanan 	ice_flush(hw);
154675d2b253SAnirudh Venkataramanan }
154775d2b253SAnirudh Venkataramanan 
154875d2b253SAnirudh Venkataramanan /**
15498f5ee3c4SJacob Keller  * ice_clean_sbq_subtask - clean the Sideband Queue rings
15508f5ee3c4SJacob Keller  * @pf: board private structure
15518f5ee3c4SJacob Keller  */
15528f5ee3c4SJacob Keller static void ice_clean_sbq_subtask(struct ice_pf *pf)
15538f5ee3c4SJacob Keller {
15548f5ee3c4SJacob Keller 	struct ice_hw *hw = &pf->hw;
15558f5ee3c4SJacob Keller 
15568f5ee3c4SJacob Keller 	/* Nothing to do here if sideband queue is not supported */
15578f5ee3c4SJacob Keller 	if (!ice_is_sbq_supported(hw)) {
15588f5ee3c4SJacob Keller 		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
15598f5ee3c4SJacob Keller 		return;
15608f5ee3c4SJacob Keller 	}
15618f5ee3c4SJacob Keller 
15628f5ee3c4SJacob Keller 	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
15638f5ee3c4SJacob Keller 		return;
15648f5ee3c4SJacob Keller 
15658f5ee3c4SJacob Keller 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
15668f5ee3c4SJacob Keller 		return;
15678f5ee3c4SJacob Keller 
15688f5ee3c4SJacob Keller 	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
15698f5ee3c4SJacob Keller 
15708f5ee3c4SJacob Keller 	if (ice_ctrlq_pending(hw, &hw->sbq))
15718f5ee3c4SJacob Keller 		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
15728f5ee3c4SJacob Keller 
15738f5ee3c4SJacob Keller 	ice_flush(hw);
15748f5ee3c4SJacob Keller }
15758f5ee3c4SJacob Keller 
15768f5ee3c4SJacob Keller /**
1577940b61afSAnirudh Venkataramanan  * ice_service_task_schedule - schedule the service task to wake up
1578940b61afSAnirudh Venkataramanan  * @pf: board private structure
1579940b61afSAnirudh Venkataramanan  *
1580940b61afSAnirudh Venkataramanan  * If not already scheduled, this puts the task into the work queue.
1581940b61afSAnirudh Venkataramanan  */
158228bf2672SBrett Creeley void ice_service_task_schedule(struct ice_pf *pf)
1583940b61afSAnirudh Venkataramanan {
15847e408e07SAnirudh Venkataramanan 	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
15857e408e07SAnirudh Venkataramanan 	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
15867e408e07SAnirudh Venkataramanan 	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1587940b61afSAnirudh Venkataramanan 		queue_work(ice_wq, &pf->serv_task);
1588940b61afSAnirudh Venkataramanan }
1589940b61afSAnirudh Venkataramanan 
1590940b61afSAnirudh Venkataramanan /**
1591940b61afSAnirudh Venkataramanan  * ice_service_task_complete - finish up the service task
1592940b61afSAnirudh Venkataramanan  * @pf: board private structure
1593940b61afSAnirudh Venkataramanan  */
1594940b61afSAnirudh Venkataramanan static void ice_service_task_complete(struct ice_pf *pf)
1595940b61afSAnirudh Venkataramanan {
15967e408e07SAnirudh Venkataramanan 	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1597940b61afSAnirudh Venkataramanan 
1598940b61afSAnirudh Venkataramanan 	/* force memory (pf->state) to sync before next service task */
1599940b61afSAnirudh Venkataramanan 	smp_mb__before_atomic();
16007e408e07SAnirudh Venkataramanan 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1601940b61afSAnirudh Venkataramanan }
1602940b61afSAnirudh Venkataramanan 
1603940b61afSAnirudh Venkataramanan /**
16048d81fa55SAkeem G Abodunrin  * ice_service_task_stop - stop service task and cancel works
16058d81fa55SAkeem G Abodunrin  * @pf: board private structure
1606769c500dSAkeem G Abodunrin  *
16077e408e07SAnirudh Venkataramanan  * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1608769c500dSAkeem G Abodunrin  * 1 otherwise.
16098d81fa55SAkeem G Abodunrin  */
1610769c500dSAkeem G Abodunrin static int ice_service_task_stop(struct ice_pf *pf)
16118d81fa55SAkeem G Abodunrin {
1612769c500dSAkeem G Abodunrin 	int ret;
1613769c500dSAkeem G Abodunrin 
16147e408e07SAnirudh Venkataramanan 	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
16158d81fa55SAkeem G Abodunrin 
16168d81fa55SAkeem G Abodunrin 	if (pf->serv_tmr.function)
16178d81fa55SAkeem G Abodunrin 		del_timer_sync(&pf->serv_tmr);
16188d81fa55SAkeem G Abodunrin 	if (pf->serv_task.func)
16198d81fa55SAkeem G Abodunrin 		cancel_work_sync(&pf->serv_task);
16208d81fa55SAkeem G Abodunrin 
16217e408e07SAnirudh Venkataramanan 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1622769c500dSAkeem G Abodunrin 	return ret;
16238d81fa55SAkeem G Abodunrin }
16248d81fa55SAkeem G Abodunrin 
16258d81fa55SAkeem G Abodunrin /**
16265995b6d0SBrett Creeley  * ice_service_task_restart - restart service task and schedule works
16275995b6d0SBrett Creeley  * @pf: board private structure
16285995b6d0SBrett Creeley  *
16295995b6d0SBrett Creeley  * This function is needed for suspend and resume works (e.g WoL scenario)
16305995b6d0SBrett Creeley  */
16315995b6d0SBrett Creeley static void ice_service_task_restart(struct ice_pf *pf)
16325995b6d0SBrett Creeley {
16337e408e07SAnirudh Venkataramanan 	clear_bit(ICE_SERVICE_DIS, pf->state);
16345995b6d0SBrett Creeley 	ice_service_task_schedule(pf);
16355995b6d0SBrett Creeley }
16365995b6d0SBrett Creeley 
16375995b6d0SBrett Creeley /**
1638940b61afSAnirudh Venkataramanan  * ice_service_timer - timer callback to schedule service task
1639940b61afSAnirudh Venkataramanan  * @t: pointer to timer_list
1640940b61afSAnirudh Venkataramanan  */
1641940b61afSAnirudh Venkataramanan static void ice_service_timer(struct timer_list *t)
1642940b61afSAnirudh Venkataramanan {
1643940b61afSAnirudh Venkataramanan 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1644940b61afSAnirudh Venkataramanan 
1645940b61afSAnirudh Venkataramanan 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1646940b61afSAnirudh Venkataramanan 	ice_service_task_schedule(pf);
1647940b61afSAnirudh Venkataramanan }
1648940b61afSAnirudh Venkataramanan 
1649940b61afSAnirudh Venkataramanan /**
1650b3969fd7SSudheer Mogilappagari  * ice_handle_mdd_event - handle malicious driver detect event
1651b3969fd7SSudheer Mogilappagari  * @pf: pointer to the PF structure
1652b3969fd7SSudheer Mogilappagari  *
16539d5c5a52SPaul Greenwalt  * Called from service task. OICR interrupt handler indicates MDD event.
16549d5c5a52SPaul Greenwalt  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
16559d5c5a52SPaul Greenwalt  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
16569d5c5a52SPaul Greenwalt  * disable the queue, the PF can be configured to reset the VF using ethtool
16579d5c5a52SPaul Greenwalt  * private flag mdd-auto-reset-vf.
1658b3969fd7SSudheer Mogilappagari  */
1659b3969fd7SSudheer Mogilappagari static void ice_handle_mdd_event(struct ice_pf *pf)
1660b3969fd7SSudheer Mogilappagari {
16614015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
1662b3969fd7SSudheer Mogilappagari 	struct ice_hw *hw = &pf->hw;
1663c1e08830SJesse Brandeburg 	unsigned int i;
1664b3969fd7SSudheer Mogilappagari 	u32 reg;
1665b3969fd7SSudheer Mogilappagari 
16667e408e07SAnirudh Venkataramanan 	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
16679d5c5a52SPaul Greenwalt 		/* Since the VF MDD event logging is rate limited, check if
16689d5c5a52SPaul Greenwalt 		 * there are pending MDD events.
16699d5c5a52SPaul Greenwalt 		 */
16709d5c5a52SPaul Greenwalt 		ice_print_vfs_mdd_events(pf);
1671b3969fd7SSudheer Mogilappagari 		return;
16729d5c5a52SPaul Greenwalt 	}
1673b3969fd7SSudheer Mogilappagari 
16749d5c5a52SPaul Greenwalt 	/* find what triggered an MDD event */
1675b3969fd7SSudheer Mogilappagari 	reg = rd32(hw, GL_MDET_TX_PQM);
1676b3969fd7SSudheer Mogilappagari 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1677b3969fd7SSudheer Mogilappagari 		u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1678b3969fd7SSudheer Mogilappagari 				GL_MDET_TX_PQM_PF_NUM_S;
1679b3969fd7SSudheer Mogilappagari 		u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1680b3969fd7SSudheer Mogilappagari 				GL_MDET_TX_PQM_VF_NUM_S;
1681b3969fd7SSudheer Mogilappagari 		u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1682b3969fd7SSudheer Mogilappagari 				GL_MDET_TX_PQM_MAL_TYPE_S;
1683b3969fd7SSudheer Mogilappagari 		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1684b3969fd7SSudheer Mogilappagari 				GL_MDET_TX_PQM_QNUM_S);
1685b3969fd7SSudheer Mogilappagari 
1686b3969fd7SSudheer Mogilappagari 		if (netif_msg_tx_err(pf))
16874015d11eSBrett Creeley 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1688b3969fd7SSudheer Mogilappagari 				 event, queue, pf_num, vf_num);
1689b3969fd7SSudheer Mogilappagari 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1690b3969fd7SSudheer Mogilappagari 	}
1691b3969fd7SSudheer Mogilappagari 
1692b3969fd7SSudheer Mogilappagari 	reg = rd32(hw, GL_MDET_TX_TCLAN);
1693b3969fd7SSudheer Mogilappagari 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1694b3969fd7SSudheer Mogilappagari 		u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1695b3969fd7SSudheer Mogilappagari 				GL_MDET_TX_TCLAN_PF_NUM_S;
1696b3969fd7SSudheer Mogilappagari 		u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1697b3969fd7SSudheer Mogilappagari 				GL_MDET_TX_TCLAN_VF_NUM_S;
1698b3969fd7SSudheer Mogilappagari 		u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1699b3969fd7SSudheer Mogilappagari 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1700b3969fd7SSudheer Mogilappagari 		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1701b3969fd7SSudheer Mogilappagari 				GL_MDET_TX_TCLAN_QNUM_S);
1702b3969fd7SSudheer Mogilappagari 
17031d8bd992SBen Shelton 		if (netif_msg_tx_err(pf))
17044015d11eSBrett Creeley 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1705b3969fd7SSudheer Mogilappagari 				 event, queue, pf_num, vf_num);
1706b3969fd7SSudheer Mogilappagari 		wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1707b3969fd7SSudheer Mogilappagari 	}
1708b3969fd7SSudheer Mogilappagari 
1709b3969fd7SSudheer Mogilappagari 	reg = rd32(hw, GL_MDET_RX);
1710b3969fd7SSudheer Mogilappagari 	if (reg & GL_MDET_RX_VALID_M) {
1711b3969fd7SSudheer Mogilappagari 		u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1712b3969fd7SSudheer Mogilappagari 				GL_MDET_RX_PF_NUM_S;
1713b3969fd7SSudheer Mogilappagari 		u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1714b3969fd7SSudheer Mogilappagari 				GL_MDET_RX_VF_NUM_S;
1715b3969fd7SSudheer Mogilappagari 		u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1716b3969fd7SSudheer Mogilappagari 				GL_MDET_RX_MAL_TYPE_S;
1717b3969fd7SSudheer Mogilappagari 		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1718b3969fd7SSudheer Mogilappagari 				GL_MDET_RX_QNUM_S);
1719b3969fd7SSudheer Mogilappagari 
1720b3969fd7SSudheer Mogilappagari 		if (netif_msg_rx_err(pf))
17214015d11eSBrett Creeley 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1722b3969fd7SSudheer Mogilappagari 				 event, queue, pf_num, vf_num);
1723b3969fd7SSudheer Mogilappagari 		wr32(hw, GL_MDET_RX, 0xffffffff);
1724b3969fd7SSudheer Mogilappagari 	}
1725b3969fd7SSudheer Mogilappagari 
17269d5c5a52SPaul Greenwalt 	/* check to see if this PF caused an MDD event */
1727b3969fd7SSudheer Mogilappagari 	reg = rd32(hw, PF_MDET_TX_PQM);
1728b3969fd7SSudheer Mogilappagari 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1729b3969fd7SSudheer Mogilappagari 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
17309d5c5a52SPaul Greenwalt 		if (netif_msg_tx_err(pf))
17319d5c5a52SPaul Greenwalt 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1732b3969fd7SSudheer Mogilappagari 	}
1733b3969fd7SSudheer Mogilappagari 
1734b3969fd7SSudheer Mogilappagari 	reg = rd32(hw, PF_MDET_TX_TCLAN);
1735b3969fd7SSudheer Mogilappagari 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1736b3969fd7SSudheer Mogilappagari 		wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
17379d5c5a52SPaul Greenwalt 		if (netif_msg_tx_err(pf))
17389d5c5a52SPaul Greenwalt 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1739b3969fd7SSudheer Mogilappagari 	}
1740b3969fd7SSudheer Mogilappagari 
1741b3969fd7SSudheer Mogilappagari 	reg = rd32(hw, PF_MDET_RX);
1742b3969fd7SSudheer Mogilappagari 	if (reg & PF_MDET_RX_VALID_M) {
1743b3969fd7SSudheer Mogilappagari 		wr32(hw, PF_MDET_RX, 0xFFFF);
17449d5c5a52SPaul Greenwalt 		if (netif_msg_rx_err(pf))
17459d5c5a52SPaul Greenwalt 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1746b3969fd7SSudheer Mogilappagari 	}
1747b3969fd7SSudheer Mogilappagari 
17489d5c5a52SPaul Greenwalt 	/* Check to see if one of the VFs caused an MDD event, and then
17499d5c5a52SPaul Greenwalt 	 * increment counters and set print pending
17509d5c5a52SPaul Greenwalt 	 */
1751005881bcSBrett Creeley 	ice_for_each_vf(pf, i) {
17527c4bc1f5SAnirudh Venkataramanan 		struct ice_vf *vf = &pf->vf[i];
17537c4bc1f5SAnirudh Venkataramanan 
17547c4bc1f5SAnirudh Venkataramanan 		reg = rd32(hw, VP_MDET_TX_PQM(i));
17557c4bc1f5SAnirudh Venkataramanan 		if (reg & VP_MDET_TX_PQM_VALID_M) {
17567c4bc1f5SAnirudh Venkataramanan 			wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
17579d5c5a52SPaul Greenwalt 			vf->mdd_tx_events.count++;
17587e408e07SAnirudh Venkataramanan 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
17599d5c5a52SPaul Greenwalt 			if (netif_msg_tx_err(pf))
17609d5c5a52SPaul Greenwalt 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
17617c4bc1f5SAnirudh Venkataramanan 					 i);
17627c4bc1f5SAnirudh Venkataramanan 		}
17637c4bc1f5SAnirudh Venkataramanan 
17647c4bc1f5SAnirudh Venkataramanan 		reg = rd32(hw, VP_MDET_TX_TCLAN(i));
17657c4bc1f5SAnirudh Venkataramanan 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
17667c4bc1f5SAnirudh Venkataramanan 			wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
17679d5c5a52SPaul Greenwalt 			vf->mdd_tx_events.count++;
17687e408e07SAnirudh Venkataramanan 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
17699d5c5a52SPaul Greenwalt 			if (netif_msg_tx_err(pf))
17709d5c5a52SPaul Greenwalt 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
17717c4bc1f5SAnirudh Venkataramanan 					 i);
17727c4bc1f5SAnirudh Venkataramanan 		}
17737c4bc1f5SAnirudh Venkataramanan 
17747c4bc1f5SAnirudh Venkataramanan 		reg = rd32(hw, VP_MDET_TX_TDPU(i));
17757c4bc1f5SAnirudh Venkataramanan 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
17767c4bc1f5SAnirudh Venkataramanan 			wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
17779d5c5a52SPaul Greenwalt 			vf->mdd_tx_events.count++;
17787e408e07SAnirudh Venkataramanan 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
17799d5c5a52SPaul Greenwalt 			if (netif_msg_tx_err(pf))
17809d5c5a52SPaul Greenwalt 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
17817c4bc1f5SAnirudh Venkataramanan 					 i);
17827c4bc1f5SAnirudh Venkataramanan 		}
17837c4bc1f5SAnirudh Venkataramanan 
17847c4bc1f5SAnirudh Venkataramanan 		reg = rd32(hw, VP_MDET_RX(i));
17857c4bc1f5SAnirudh Venkataramanan 		if (reg & VP_MDET_RX_VALID_M) {
17867c4bc1f5SAnirudh Venkataramanan 			wr32(hw, VP_MDET_RX(i), 0xFFFF);
17879d5c5a52SPaul Greenwalt 			vf->mdd_rx_events.count++;
17887e408e07SAnirudh Venkataramanan 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
17899d5c5a52SPaul Greenwalt 			if (netif_msg_rx_err(pf))
17909d5c5a52SPaul Greenwalt 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
17917c4bc1f5SAnirudh Venkataramanan 					 i);
17929d5c5a52SPaul Greenwalt 
17939d5c5a52SPaul Greenwalt 			/* Since the queue is disabled on VF Rx MDD events, the
17949d5c5a52SPaul Greenwalt 			 * PF can be configured to reset the VF through ethtool
17959d5c5a52SPaul Greenwalt 			 * private flag mdd-auto-reset-vf.
17969d5c5a52SPaul Greenwalt 			 */
17977438a3b0SPaul Greenwalt 			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
17987438a3b0SPaul Greenwalt 				/* VF MDD event counters will be cleared by
17997438a3b0SPaul Greenwalt 				 * reset, so print the event prior to reset.
18007438a3b0SPaul Greenwalt 				 */
18017438a3b0SPaul Greenwalt 				ice_print_vf_rx_mdd_event(vf);
18029d5c5a52SPaul Greenwalt 				ice_reset_vf(&pf->vf[i], false);
18039d5c5a52SPaul Greenwalt 			}
18047c4bc1f5SAnirudh Venkataramanan 		}
18057438a3b0SPaul Greenwalt 	}
18067c4bc1f5SAnirudh Venkataramanan 
18079d5c5a52SPaul Greenwalt 	ice_print_vfs_mdd_events(pf);
1808b3969fd7SSudheer Mogilappagari }
1809b3969fd7SSudheer Mogilappagari 
1810b3969fd7SSudheer Mogilappagari /**
18116d599946STony Nguyen  * ice_force_phys_link_state - Force the physical link state
18126d599946STony Nguyen  * @vsi: VSI to force the physical link state to up/down
18136d599946STony Nguyen  * @link_up: true/false indicates to set the physical link to up/down
18146d599946STony Nguyen  *
18156d599946STony Nguyen  * Force the physical link state by getting the current PHY capabilities from
18166d599946STony Nguyen  * hardware and setting the PHY config based on the determined capabilities. If
18176d599946STony Nguyen  * link changes a link event will be triggered because both the Enable Automatic
18186d599946STony Nguyen  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
18196d599946STony Nguyen  *
18206d599946STony Nguyen  * Returns 0 on success, negative on failure
18216d599946STony Nguyen  */
18226d599946STony Nguyen static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
18236d599946STony Nguyen {
18246d599946STony Nguyen 	struct ice_aqc_get_phy_caps_data *pcaps;
18256d599946STony Nguyen 	struct ice_aqc_set_phy_cfg_data *cfg;
18266d599946STony Nguyen 	struct ice_port_info *pi;
18276d599946STony Nguyen 	struct device *dev;
18286d599946STony Nguyen 	int retcode;
18296d599946STony Nguyen 
18306d599946STony Nguyen 	if (!vsi || !vsi->port_info || !vsi->back)
18316d599946STony Nguyen 		return -EINVAL;
18326d599946STony Nguyen 	if (vsi->type != ICE_VSI_PF)
18336d599946STony Nguyen 		return 0;
18346d599946STony Nguyen 
18359a946843SAnirudh Venkataramanan 	dev = ice_pf_to_dev(vsi->back);
18366d599946STony Nguyen 
18376d599946STony Nguyen 	pi = vsi->port_info;
18386d599946STony Nguyen 
18399efe35d0STony Nguyen 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
18406d599946STony Nguyen 	if (!pcaps)
18416d599946STony Nguyen 		return -ENOMEM;
18426d599946STony Nguyen 
1843d6730a87SAnirudh Venkataramanan 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
18446d599946STony Nguyen 				      NULL);
18456d599946STony Nguyen 	if (retcode) {
184619cce2c6SAnirudh Venkataramanan 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
18476d599946STony Nguyen 			vsi->vsi_num, retcode);
18486d599946STony Nguyen 		retcode = -EIO;
18496d599946STony Nguyen 		goto out;
18506d599946STony Nguyen 	}
18516d599946STony Nguyen 
18526d599946STony Nguyen 	/* No change in link */
18536d599946STony Nguyen 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
18546d599946STony Nguyen 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
18556d599946STony Nguyen 		goto out;
18566d599946STony Nguyen 
18571a3571b5SPaul Greenwalt 	/* Use the current user PHY configuration. The current user PHY
18581a3571b5SPaul Greenwalt 	 * configuration is initialized during probe from PHY capabilities
18591a3571b5SPaul Greenwalt 	 * software mode, and updated on set PHY configuration.
18601a3571b5SPaul Greenwalt 	 */
18611a3571b5SPaul Greenwalt 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
18626d599946STony Nguyen 	if (!cfg) {
18636d599946STony Nguyen 		retcode = -ENOMEM;
18646d599946STony Nguyen 		goto out;
18656d599946STony Nguyen 	}
18666d599946STony Nguyen 
18671a3571b5SPaul Greenwalt 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
18686d599946STony Nguyen 	if (link_up)
18696d599946STony Nguyen 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
18706d599946STony Nguyen 	else
18716d599946STony Nguyen 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
18726d599946STony Nguyen 
18731a3571b5SPaul Greenwalt 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
18746d599946STony Nguyen 	if (retcode) {
18756d599946STony Nguyen 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
18766d599946STony Nguyen 			vsi->vsi_num, retcode);
18776d599946STony Nguyen 		retcode = -EIO;
18786d599946STony Nguyen 	}
18796d599946STony Nguyen 
18809efe35d0STony Nguyen 	kfree(cfg);
18816d599946STony Nguyen out:
18829efe35d0STony Nguyen 	kfree(pcaps);
18836d599946STony Nguyen 	return retcode;
18846d599946STony Nguyen }
18856d599946STony Nguyen 
18866d599946STony Nguyen /**
18871a3571b5SPaul Greenwalt  * ice_init_nvm_phy_type - Initialize the NVM PHY type
18881a3571b5SPaul Greenwalt  * @pi: port info structure
18891a3571b5SPaul Greenwalt  *
1890ea78ce4dSPaul Greenwalt  * Initialize nvm_phy_type_[low|high] for link lenient mode support
18911a3571b5SPaul Greenwalt  */
18921a3571b5SPaul Greenwalt static int ice_init_nvm_phy_type(struct ice_port_info *pi)
18931a3571b5SPaul Greenwalt {
18941a3571b5SPaul Greenwalt 	struct ice_aqc_get_phy_caps_data *pcaps;
18951a3571b5SPaul Greenwalt 	struct ice_pf *pf = pi->hw->back;
18962ccc1c1cSTony Nguyen 	int err;
18971a3571b5SPaul Greenwalt 
18981a3571b5SPaul Greenwalt 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
18991a3571b5SPaul Greenwalt 	if (!pcaps)
19001a3571b5SPaul Greenwalt 		return -ENOMEM;
19011a3571b5SPaul Greenwalt 
19022ccc1c1cSTony Nguyen 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
19032ccc1c1cSTony Nguyen 				  pcaps, NULL);
19041a3571b5SPaul Greenwalt 
19052ccc1c1cSTony Nguyen 	if (err) {
19061a3571b5SPaul Greenwalt 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
19071a3571b5SPaul Greenwalt 		goto out;
19081a3571b5SPaul Greenwalt 	}
19091a3571b5SPaul Greenwalt 
19101a3571b5SPaul Greenwalt 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
19111a3571b5SPaul Greenwalt 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
19121a3571b5SPaul Greenwalt 
19131a3571b5SPaul Greenwalt out:
19141a3571b5SPaul Greenwalt 	kfree(pcaps);
19151a3571b5SPaul Greenwalt 	return err;
19161a3571b5SPaul Greenwalt }
19171a3571b5SPaul Greenwalt 
19181a3571b5SPaul Greenwalt /**
1919ea78ce4dSPaul Greenwalt  * ice_init_link_dflt_override - Initialize link default override
1920ea78ce4dSPaul Greenwalt  * @pi: port info structure
1921b4e813ddSBruce Allan  *
1922b4e813ddSBruce Allan  * Initialize link default override and PHY total port shutdown during probe
1923ea78ce4dSPaul Greenwalt  */
1924ea78ce4dSPaul Greenwalt static void ice_init_link_dflt_override(struct ice_port_info *pi)
1925ea78ce4dSPaul Greenwalt {
1926ea78ce4dSPaul Greenwalt 	struct ice_link_default_override_tlv *ldo;
1927ea78ce4dSPaul Greenwalt 	struct ice_pf *pf = pi->hw->back;
1928ea78ce4dSPaul Greenwalt 
1929ea78ce4dSPaul Greenwalt 	ldo = &pf->link_dflt_override;
1930b4e813ddSBruce Allan 	if (ice_get_link_default_override(ldo, pi))
1931b4e813ddSBruce Allan 		return;
1932b4e813ddSBruce Allan 
1933b4e813ddSBruce Allan 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1934b4e813ddSBruce Allan 		return;
1935b4e813ddSBruce Allan 
1936b4e813ddSBruce Allan 	/* Enable Total Port Shutdown (override/replace link-down-on-close
1937b4e813ddSBruce Allan 	 * ethtool private flag) for ports with Port Disable bit set.
1938b4e813ddSBruce Allan 	 */
1939b4e813ddSBruce Allan 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1940b4e813ddSBruce Allan 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1941ea78ce4dSPaul Greenwalt }
1942ea78ce4dSPaul Greenwalt 
1943ea78ce4dSPaul Greenwalt /**
1944ea78ce4dSPaul Greenwalt  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1945ea78ce4dSPaul Greenwalt  * @pi: port info structure
1946ea78ce4dSPaul Greenwalt  *
19470a02944fSAnirudh Venkataramanan  * If default override is enabled, initialize the user PHY cfg speed and FEC
1948ea78ce4dSPaul Greenwalt  * settings using the default override mask from the NVM.
1949ea78ce4dSPaul Greenwalt  *
1950ea78ce4dSPaul Greenwalt  * The PHY should only be configured with the default override settings the
19517e408e07SAnirudh Venkataramanan  * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
1952ea78ce4dSPaul Greenwalt  * is used to indicate that the user PHY cfg default override is initialized
1953ea78ce4dSPaul Greenwalt  * and the PHY has not been configured with the default override settings. The
1954ea78ce4dSPaul Greenwalt  * state is set here, and cleared in ice_configure_phy the first time the PHY is
1955ea78ce4dSPaul Greenwalt  * configured.
19560a02944fSAnirudh Venkataramanan  *
19570a02944fSAnirudh Venkataramanan  * This function should be called only if the FW doesn't support default
19580a02944fSAnirudh Venkataramanan  * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
1959ea78ce4dSPaul Greenwalt  */
1960ea78ce4dSPaul Greenwalt static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1961ea78ce4dSPaul Greenwalt {
1962ea78ce4dSPaul Greenwalt 	struct ice_link_default_override_tlv *ldo;
1963ea78ce4dSPaul Greenwalt 	struct ice_aqc_set_phy_cfg_data *cfg;
1964ea78ce4dSPaul Greenwalt 	struct ice_phy_info *phy = &pi->phy;
1965ea78ce4dSPaul Greenwalt 	struct ice_pf *pf = pi->hw->back;
1966ea78ce4dSPaul Greenwalt 
1967ea78ce4dSPaul Greenwalt 	ldo = &pf->link_dflt_override;
1968ea78ce4dSPaul Greenwalt 
1969ea78ce4dSPaul Greenwalt 	/* If link default override is enabled, use to mask NVM PHY capabilities
1970ea78ce4dSPaul Greenwalt 	 * for speed and FEC default configuration.
1971ea78ce4dSPaul Greenwalt 	 */
1972ea78ce4dSPaul Greenwalt 	cfg = &phy->curr_user_phy_cfg;
1973ea78ce4dSPaul Greenwalt 
1974ea78ce4dSPaul Greenwalt 	if (ldo->phy_type_low || ldo->phy_type_high) {
1975ea78ce4dSPaul Greenwalt 		cfg->phy_type_low = pf->nvm_phy_type_lo &
1976ea78ce4dSPaul Greenwalt 				    cpu_to_le64(ldo->phy_type_low);
1977ea78ce4dSPaul Greenwalt 		cfg->phy_type_high = pf->nvm_phy_type_hi &
1978ea78ce4dSPaul Greenwalt 				     cpu_to_le64(ldo->phy_type_high);
1979ea78ce4dSPaul Greenwalt 	}
1980ea78ce4dSPaul Greenwalt 	cfg->link_fec_opt = ldo->fec_options;
1981ea78ce4dSPaul Greenwalt 	phy->curr_user_fec_req = ICE_FEC_AUTO;
1982ea78ce4dSPaul Greenwalt 
19837e408e07SAnirudh Venkataramanan 	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
1984ea78ce4dSPaul Greenwalt }
1985ea78ce4dSPaul Greenwalt 
1986ea78ce4dSPaul Greenwalt /**
19871a3571b5SPaul Greenwalt  * ice_init_phy_user_cfg - Initialize the PHY user configuration
19881a3571b5SPaul Greenwalt  * @pi: port info structure
19891a3571b5SPaul Greenwalt  *
19901a3571b5SPaul Greenwalt  * Initialize the current user PHY configuration, speed, FEC, and FC requested
19911a3571b5SPaul Greenwalt  * mode to default. The PHY defaults are from get PHY capabilities topology
19921a3571b5SPaul Greenwalt  * with media so call when media is first available. An error is returned if
19931a3571b5SPaul Greenwalt  * called when media is not available. The PHY initialization completed state is
19941a3571b5SPaul Greenwalt  * set here.
19951a3571b5SPaul Greenwalt  *
19961a3571b5SPaul Greenwalt  * These configurations are used when setting PHY
19971a3571b5SPaul Greenwalt  * configuration. The user PHY configuration is updated on set PHY
19981a3571b5SPaul Greenwalt  * configuration. Returns 0 on success, negative on failure
19991a3571b5SPaul Greenwalt  */
20001a3571b5SPaul Greenwalt static int ice_init_phy_user_cfg(struct ice_port_info *pi)
20011a3571b5SPaul Greenwalt {
20021a3571b5SPaul Greenwalt 	struct ice_aqc_get_phy_caps_data *pcaps;
20031a3571b5SPaul Greenwalt 	struct ice_phy_info *phy = &pi->phy;
20041a3571b5SPaul Greenwalt 	struct ice_pf *pf = pi->hw->back;
20052ccc1c1cSTony Nguyen 	int err;
20061a3571b5SPaul Greenwalt 
20071a3571b5SPaul Greenwalt 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
20081a3571b5SPaul Greenwalt 		return -EIO;
20091a3571b5SPaul Greenwalt 
20101a3571b5SPaul Greenwalt 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
20111a3571b5SPaul Greenwalt 	if (!pcaps)
20121a3571b5SPaul Greenwalt 		return -ENOMEM;
20131a3571b5SPaul Greenwalt 
20140a02944fSAnirudh Venkataramanan 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
20152ccc1c1cSTony Nguyen 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
20160a02944fSAnirudh Venkataramanan 					  pcaps, NULL);
20170a02944fSAnirudh Venkataramanan 	else
20182ccc1c1cSTony Nguyen 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
20190a02944fSAnirudh Venkataramanan 					  pcaps, NULL);
20202ccc1c1cSTony Nguyen 	if (err) {
20211a3571b5SPaul Greenwalt 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
20221a3571b5SPaul Greenwalt 		goto err_out;
20231a3571b5SPaul Greenwalt 	}
20241a3571b5SPaul Greenwalt 
2025ea78ce4dSPaul Greenwalt 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2026ea78ce4dSPaul Greenwalt 
2027ea78ce4dSPaul Greenwalt 	/* check if lenient mode is supported and enabled */
2028dc6aaa13SAnirudh Venkataramanan 	if (ice_fw_supports_link_override(pi->hw) &&
2029ea78ce4dSPaul Greenwalt 	    !(pcaps->module_compliance_enforcement &
2030ea78ce4dSPaul Greenwalt 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2031ea78ce4dSPaul Greenwalt 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2032ea78ce4dSPaul Greenwalt 
20330a02944fSAnirudh Venkataramanan 		/* if the FW supports default PHY configuration mode, then the driver
20340a02944fSAnirudh Venkataramanan 		 * does not have to apply link override settings. If not,
20350a02944fSAnirudh Venkataramanan 		 * initialize user PHY configuration with link override values
2036ea78ce4dSPaul Greenwalt 		 */
20370a02944fSAnirudh Venkataramanan 		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
20380a02944fSAnirudh Venkataramanan 		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2039ea78ce4dSPaul Greenwalt 			ice_init_phy_cfg_dflt_override(pi);
2040ea78ce4dSPaul Greenwalt 			goto out;
2041ea78ce4dSPaul Greenwalt 		}
2042ea78ce4dSPaul Greenwalt 	}
2043ea78ce4dSPaul Greenwalt 
20440a02944fSAnirudh Venkataramanan 	/* if link default override is not enabled, set user flow control and
20450a02944fSAnirudh Venkataramanan 	 * FEC settings based on what get_phy_caps returned
2046ea78ce4dSPaul Greenwalt 	 */
20471a3571b5SPaul Greenwalt 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
20481a3571b5SPaul Greenwalt 						      pcaps->link_fec_options);
20491a3571b5SPaul Greenwalt 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
20501a3571b5SPaul Greenwalt 
2051ea78ce4dSPaul Greenwalt out:
20521a3571b5SPaul Greenwalt 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
20537e408e07SAnirudh Venkataramanan 	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
20541a3571b5SPaul Greenwalt err_out:
20551a3571b5SPaul Greenwalt 	kfree(pcaps);
20561a3571b5SPaul Greenwalt 	return err;
20571a3571b5SPaul Greenwalt }
20581a3571b5SPaul Greenwalt 
20591a3571b5SPaul Greenwalt /**
20601a3571b5SPaul Greenwalt  * ice_configure_phy - configure PHY
20611a3571b5SPaul Greenwalt  * @vsi: VSI of PHY
20621a3571b5SPaul Greenwalt  *
20631a3571b5SPaul Greenwalt  * Set the PHY configuration. If the current PHY configuration is the same as
20641a3571b5SPaul Greenwalt  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
20651a3571b5SPaul Greenwalt  * configure the based get PHY capabilities for topology with media.
20661a3571b5SPaul Greenwalt  */
20671a3571b5SPaul Greenwalt static int ice_configure_phy(struct ice_vsi *vsi)
20681a3571b5SPaul Greenwalt {
20691a3571b5SPaul Greenwalt 	struct device *dev = ice_pf_to_dev(vsi->back);
2070efc1eddbSAnirudh Venkataramanan 	struct ice_port_info *pi = vsi->port_info;
20711a3571b5SPaul Greenwalt 	struct ice_aqc_get_phy_caps_data *pcaps;
20721a3571b5SPaul Greenwalt 	struct ice_aqc_set_phy_cfg_data *cfg;
2073efc1eddbSAnirudh Venkataramanan 	struct ice_phy_info *phy = &pi->phy;
2074efc1eddbSAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
20752ccc1c1cSTony Nguyen 	int err;
20761a3571b5SPaul Greenwalt 
20771a3571b5SPaul Greenwalt 	/* Ensure we have media as we cannot configure a medialess port */
2078efc1eddbSAnirudh Venkataramanan 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
20791a3571b5SPaul Greenwalt 		return -EPERM;
20801a3571b5SPaul Greenwalt 
20811a3571b5SPaul Greenwalt 	ice_print_topo_conflict(vsi);
20821a3571b5SPaul Greenwalt 
20834fc5fbeeSAnirudh Venkataramanan 	if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
20844fc5fbeeSAnirudh Venkataramanan 	    phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
20851a3571b5SPaul Greenwalt 		return -EPERM;
20861a3571b5SPaul Greenwalt 
2087efc1eddbSAnirudh Venkataramanan 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
20881a3571b5SPaul Greenwalt 		return ice_force_phys_link_state(vsi, true);
20891a3571b5SPaul Greenwalt 
20901a3571b5SPaul Greenwalt 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
20911a3571b5SPaul Greenwalt 	if (!pcaps)
20921a3571b5SPaul Greenwalt 		return -ENOMEM;
20931a3571b5SPaul Greenwalt 
20941a3571b5SPaul Greenwalt 	/* Get current PHY config */
20952ccc1c1cSTony Nguyen 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
20961a3571b5SPaul Greenwalt 				  NULL);
20972ccc1c1cSTony Nguyen 	if (err) {
20985f87ec48STony Nguyen 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
20992ccc1c1cSTony Nguyen 			vsi->vsi_num, err);
21001a3571b5SPaul Greenwalt 		goto done;
21011a3571b5SPaul Greenwalt 	}
21021a3571b5SPaul Greenwalt 
21031a3571b5SPaul Greenwalt 	/* If PHY enable link is configured and configuration has not changed,
21041a3571b5SPaul Greenwalt 	 * there's nothing to do
21051a3571b5SPaul Greenwalt 	 */
21061a3571b5SPaul Greenwalt 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2107efc1eddbSAnirudh Venkataramanan 	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
21081a3571b5SPaul Greenwalt 		goto done;
21091a3571b5SPaul Greenwalt 
21101a3571b5SPaul Greenwalt 	/* Use PHY topology as baseline for configuration */
21111a3571b5SPaul Greenwalt 	memset(pcaps, 0, sizeof(*pcaps));
21120a02944fSAnirudh Venkataramanan 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
21132ccc1c1cSTony Nguyen 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
21140a02944fSAnirudh Venkataramanan 					  pcaps, NULL);
21150a02944fSAnirudh Venkataramanan 	else
21162ccc1c1cSTony Nguyen 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
21170a02944fSAnirudh Venkataramanan 					  pcaps, NULL);
21182ccc1c1cSTony Nguyen 	if (err) {
21195f87ec48STony Nguyen 		dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
21202ccc1c1cSTony Nguyen 			vsi->vsi_num, err);
21211a3571b5SPaul Greenwalt 		goto done;
21221a3571b5SPaul Greenwalt 	}
21231a3571b5SPaul Greenwalt 
21241a3571b5SPaul Greenwalt 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
21251a3571b5SPaul Greenwalt 	if (!cfg) {
21261a3571b5SPaul Greenwalt 		err = -ENOMEM;
21271a3571b5SPaul Greenwalt 		goto done;
21281a3571b5SPaul Greenwalt 	}
21291a3571b5SPaul Greenwalt 
2130ea78ce4dSPaul Greenwalt 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
21311a3571b5SPaul Greenwalt 
21321a3571b5SPaul Greenwalt 	/* Speed - If default override pending, use curr_user_phy_cfg set in
21331a3571b5SPaul Greenwalt 	 * ice_init_phy_user_cfg_ldo.
21341a3571b5SPaul Greenwalt 	 */
21357e408e07SAnirudh Venkataramanan 	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2136ea78ce4dSPaul Greenwalt 			       vsi->back->state)) {
2137efc1eddbSAnirudh Venkataramanan 		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2138efc1eddbSAnirudh Venkataramanan 		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2139ea78ce4dSPaul Greenwalt 	} else {
2140ea78ce4dSPaul Greenwalt 		u64 phy_low = 0, phy_high = 0;
2141ea78ce4dSPaul Greenwalt 
2142ea78ce4dSPaul Greenwalt 		ice_update_phy_type(&phy_low, &phy_high,
2143ea78ce4dSPaul Greenwalt 				    pi->phy.curr_user_speed_req);
21441a3571b5SPaul Greenwalt 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2145ea78ce4dSPaul Greenwalt 		cfg->phy_type_high = pcaps->phy_type_high &
2146ea78ce4dSPaul Greenwalt 				     cpu_to_le64(phy_high);
2147ea78ce4dSPaul Greenwalt 	}
21481a3571b5SPaul Greenwalt 
21491a3571b5SPaul Greenwalt 	/* Can't provide what was requested; use PHY capabilities */
21501a3571b5SPaul Greenwalt 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
21511a3571b5SPaul Greenwalt 		cfg->phy_type_low = pcaps->phy_type_low;
21521a3571b5SPaul Greenwalt 		cfg->phy_type_high = pcaps->phy_type_high;
21531a3571b5SPaul Greenwalt 	}
21541a3571b5SPaul Greenwalt 
21551a3571b5SPaul Greenwalt 	/* FEC */
2156efc1eddbSAnirudh Venkataramanan 	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
21571a3571b5SPaul Greenwalt 
21581a3571b5SPaul Greenwalt 	/* Can't provide what was requested; use PHY capabilities */
21591a3571b5SPaul Greenwalt 	if (cfg->link_fec_opt !=
21601a3571b5SPaul Greenwalt 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
21611a3571b5SPaul Greenwalt 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
21621a3571b5SPaul Greenwalt 		cfg->link_fec_opt = pcaps->link_fec_options;
21631a3571b5SPaul Greenwalt 	}
21641a3571b5SPaul Greenwalt 
21651a3571b5SPaul Greenwalt 	/* Flow Control - always supported; no need to check against
21661a3571b5SPaul Greenwalt 	 * capabilities
21671a3571b5SPaul Greenwalt 	 */
2168efc1eddbSAnirudh Venkataramanan 	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
21691a3571b5SPaul Greenwalt 
21701a3571b5SPaul Greenwalt 	/* Enable link and link update */
21711a3571b5SPaul Greenwalt 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
21721a3571b5SPaul Greenwalt 
21732ccc1c1cSTony Nguyen 	err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2174c1484691STony Nguyen 	if (err)
21755f87ec48STony Nguyen 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
21762ccc1c1cSTony Nguyen 			vsi->vsi_num, err);
21771a3571b5SPaul Greenwalt 
21781a3571b5SPaul Greenwalt 	kfree(cfg);
21791a3571b5SPaul Greenwalt done:
21801a3571b5SPaul Greenwalt 	kfree(pcaps);
21811a3571b5SPaul Greenwalt 	return err;
21821a3571b5SPaul Greenwalt }
21831a3571b5SPaul Greenwalt 
21841a3571b5SPaul Greenwalt /**
21851a3571b5SPaul Greenwalt  * ice_check_media_subtask - Check for media
21866d599946STony Nguyen  * @pf: pointer to PF struct
21871a3571b5SPaul Greenwalt  *
21881a3571b5SPaul Greenwalt  * If media is available, then initialize PHY user configuration if it is not
21891a3571b5SPaul Greenwalt  * been, and configure the PHY if the interface is up.
21906d599946STony Nguyen  */
21916d599946STony Nguyen static void ice_check_media_subtask(struct ice_pf *pf)
21926d599946STony Nguyen {
21936d599946STony Nguyen 	struct ice_port_info *pi;
21946d599946STony Nguyen 	struct ice_vsi *vsi;
21956d599946STony Nguyen 	int err;
21966d599946STony Nguyen 
21971a3571b5SPaul Greenwalt 	/* No need to check for media if it's already present */
21981a3571b5SPaul Greenwalt 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
21996d599946STony Nguyen 		return;
22006d599946STony Nguyen 
22011a3571b5SPaul Greenwalt 	vsi = ice_get_main_vsi(pf);
22021a3571b5SPaul Greenwalt 	if (!vsi)
22036d599946STony Nguyen 		return;
22046d599946STony Nguyen 
22056d599946STony Nguyen 	/* Refresh link info and check if media is present */
22066d599946STony Nguyen 	pi = vsi->port_info;
22076d599946STony Nguyen 	err = ice_update_link_info(pi);
22086d599946STony Nguyen 	if (err)
22096d599946STony Nguyen 		return;
22106d599946STony Nguyen 
221199d40752SBrett Creeley 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2212c77849f5SAnirudh Venkataramanan 
22136d599946STony Nguyen 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
22147e408e07SAnirudh Venkataramanan 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
22151a3571b5SPaul Greenwalt 			ice_init_phy_user_cfg(pi);
22161a3571b5SPaul Greenwalt 
22171a3571b5SPaul Greenwalt 		/* PHY settings are reset on media insertion, reconfigure
22181a3571b5SPaul Greenwalt 		 * PHY to preserve settings.
22191a3571b5SPaul Greenwalt 		 */
2220e97fb1aeSAnirudh Venkataramanan 		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
22211a3571b5SPaul Greenwalt 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
22226d599946STony Nguyen 			return;
22231a3571b5SPaul Greenwalt 
22241a3571b5SPaul Greenwalt 		err = ice_configure_phy(vsi);
22251a3571b5SPaul Greenwalt 		if (!err)
22266d599946STony Nguyen 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
22276d599946STony Nguyen 
22286d599946STony Nguyen 		/* A Link Status Event will be generated; the event handler
22296d599946STony Nguyen 		 * will complete bringing the interface up
22306d599946STony Nguyen 		 */
22316d599946STony Nguyen 	}
22326d599946STony Nguyen }
22336d599946STony Nguyen 
22346d599946STony Nguyen /**
2235940b61afSAnirudh Venkataramanan  * ice_service_task - manage and run subtasks
2236940b61afSAnirudh Venkataramanan  * @work: pointer to work_struct contained by the PF struct
2237940b61afSAnirudh Venkataramanan  */
2238940b61afSAnirudh Venkataramanan static void ice_service_task(struct work_struct *work)
2239940b61afSAnirudh Venkataramanan {
2240940b61afSAnirudh Venkataramanan 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2241940b61afSAnirudh Venkataramanan 	unsigned long start_time = jiffies;
2242940b61afSAnirudh Venkataramanan 
2243940b61afSAnirudh Venkataramanan 	/* subtasks */
22440b28b702SAnirudh Venkataramanan 
22450b28b702SAnirudh Venkataramanan 	/* process reset requests first */
22460b28b702SAnirudh Venkataramanan 	ice_reset_subtask(pf);
22470b28b702SAnirudh Venkataramanan 
22480f9d5027SAnirudh Venkataramanan 	/* bail if a reset/recovery cycle is pending or rebuild failed */
22495df7e45dSDave Ertman 	if (ice_is_reset_in_progress(pf->state) ||
22507e408e07SAnirudh Venkataramanan 	    test_bit(ICE_SUSPENDED, pf->state) ||
22517e408e07SAnirudh Venkataramanan 	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
22520b28b702SAnirudh Venkataramanan 		ice_service_task_complete(pf);
22530b28b702SAnirudh Venkataramanan 		return;
22540b28b702SAnirudh Venkataramanan 	}
22550b28b702SAnirudh Venkataramanan 
2256462acf6aSTony Nguyen 	ice_clean_adminq_subtask(pf);
22576d599946STony Nguyen 	ice_check_media_subtask(pf);
2258b3969fd7SSudheer Mogilappagari 	ice_check_for_hang_subtask(pf);
2259e94d4478SAnirudh Venkataramanan 	ice_sync_fltr_subtask(pf);
2260b3969fd7SSudheer Mogilappagari 	ice_handle_mdd_event(pf);
2261fcea6f3dSAnirudh Venkataramanan 	ice_watchdog_subtask(pf);
2262462acf6aSTony Nguyen 
2263462acf6aSTony Nguyen 	if (ice_is_safe_mode(pf)) {
2264462acf6aSTony Nguyen 		ice_service_task_complete(pf);
2265462acf6aSTony Nguyen 		return;
2266462acf6aSTony Nguyen 	}
2267462acf6aSTony Nguyen 
2268462acf6aSTony Nguyen 	ice_process_vflr_event(pf);
226975d2b253SAnirudh Venkataramanan 	ice_clean_mailboxq_subtask(pf);
22708f5ee3c4SJacob Keller 	ice_clean_sbq_subtask(pf);
227128bf2672SBrett Creeley 	ice_sync_arfs_fltrs(pf);
2272d6218317SQi Zhang 	ice_flush_fdir_ctx(pf);
22737e408e07SAnirudh Venkataramanan 
22747e408e07SAnirudh Venkataramanan 	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2275940b61afSAnirudh Venkataramanan 	ice_service_task_complete(pf);
2276940b61afSAnirudh Venkataramanan 
2277940b61afSAnirudh Venkataramanan 	/* If the tasks have taken longer than one service timer period
2278940b61afSAnirudh Venkataramanan 	 * or there is more work to be done, reset the service timer to
2279940b61afSAnirudh Venkataramanan 	 * schedule the service task now.
2280940b61afSAnirudh Venkataramanan 	 */
2281940b61afSAnirudh Venkataramanan 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
22827e408e07SAnirudh Venkataramanan 	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
22837e408e07SAnirudh Venkataramanan 	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
22847e408e07SAnirudh Venkataramanan 	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
22857e408e07SAnirudh Venkataramanan 	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
22868f5ee3c4SJacob Keller 	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
22877e408e07SAnirudh Venkataramanan 	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2288940b61afSAnirudh Venkataramanan 		mod_timer(&pf->serv_tmr, jiffies);
2289940b61afSAnirudh Venkataramanan }
2290940b61afSAnirudh Venkataramanan 
2291837f08fdSAnirudh Venkataramanan /**
2292f31e4b6fSAnirudh Venkataramanan  * ice_set_ctrlq_len - helper function to set controlq length
2293f9867df6SAnirudh Venkataramanan  * @hw: pointer to the HW instance
2294f31e4b6fSAnirudh Venkataramanan  */
2295f31e4b6fSAnirudh Venkataramanan static void ice_set_ctrlq_len(struct ice_hw *hw)
2296f31e4b6fSAnirudh Venkataramanan {
2297f31e4b6fSAnirudh Venkataramanan 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2298f31e4b6fSAnirudh Venkataramanan 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2299f31e4b6fSAnirudh Venkataramanan 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2300f31e4b6fSAnirudh Venkataramanan 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2301c8a1071dSLukasz Czapnik 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
230211836214SBrett Creeley 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
230375d2b253SAnirudh Venkataramanan 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
230475d2b253SAnirudh Venkataramanan 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
23058f5ee3c4SJacob Keller 	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
23068f5ee3c4SJacob Keller 	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
23078f5ee3c4SJacob Keller 	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
23088f5ee3c4SJacob Keller 	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2309f31e4b6fSAnirudh Venkataramanan }
2310f31e4b6fSAnirudh Venkataramanan 
2311f31e4b6fSAnirudh Venkataramanan /**
231287324e74SHenry Tieman  * ice_schedule_reset - schedule a reset
231387324e74SHenry Tieman  * @pf: board private structure
231487324e74SHenry Tieman  * @reset: reset being requested
231587324e74SHenry Tieman  */
231687324e74SHenry Tieman int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
231787324e74SHenry Tieman {
231887324e74SHenry Tieman 	struct device *dev = ice_pf_to_dev(pf);
231987324e74SHenry Tieman 
232087324e74SHenry Tieman 	/* bail out if earlier reset has failed */
23217e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_RESET_FAILED, pf->state)) {
232287324e74SHenry Tieman 		dev_dbg(dev, "earlier reset has failed\n");
232387324e74SHenry Tieman 		return -EIO;
232487324e74SHenry Tieman 	}
232587324e74SHenry Tieman 	/* bail if reset/recovery already in progress */
232687324e74SHenry Tieman 	if (ice_is_reset_in_progress(pf->state)) {
232787324e74SHenry Tieman 		dev_dbg(dev, "Reset already in progress\n");
232887324e74SHenry Tieman 		return -EBUSY;
232987324e74SHenry Tieman 	}
233087324e74SHenry Tieman 
2331f9f5301eSDave Ertman 	ice_unplug_aux_dev(pf);
2332f9f5301eSDave Ertman 
233387324e74SHenry Tieman 	switch (reset) {
233487324e74SHenry Tieman 	case ICE_RESET_PFR:
23357e408e07SAnirudh Venkataramanan 		set_bit(ICE_PFR_REQ, pf->state);
233687324e74SHenry Tieman 		break;
233787324e74SHenry Tieman 	case ICE_RESET_CORER:
23387e408e07SAnirudh Venkataramanan 		set_bit(ICE_CORER_REQ, pf->state);
233987324e74SHenry Tieman 		break;
234087324e74SHenry Tieman 	case ICE_RESET_GLOBR:
23417e408e07SAnirudh Venkataramanan 		set_bit(ICE_GLOBR_REQ, pf->state);
234287324e74SHenry Tieman 		break;
234387324e74SHenry Tieman 	default:
234487324e74SHenry Tieman 		return -EINVAL;
234587324e74SHenry Tieman 	}
234687324e74SHenry Tieman 
234787324e74SHenry Tieman 	ice_service_task_schedule(pf);
234887324e74SHenry Tieman 	return 0;
234987324e74SHenry Tieman }
235087324e74SHenry Tieman 
235187324e74SHenry Tieman /**
2352cdedef59SAnirudh Venkataramanan  * ice_irq_affinity_notify - Callback for affinity changes
2353cdedef59SAnirudh Venkataramanan  * @notify: context as to what irq was changed
2354cdedef59SAnirudh Venkataramanan  * @mask: the new affinity mask
2355cdedef59SAnirudh Venkataramanan  *
2356cdedef59SAnirudh Venkataramanan  * This is a callback function used by the irq_set_affinity_notifier function
2357cdedef59SAnirudh Venkataramanan  * so that we may register to receive changes to the irq affinity masks.
2358cdedef59SAnirudh Venkataramanan  */
2359c8b7abddSBruce Allan static void
2360c8b7abddSBruce Allan ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2361cdedef59SAnirudh Venkataramanan 			const cpumask_t *mask)
2362cdedef59SAnirudh Venkataramanan {
2363cdedef59SAnirudh Venkataramanan 	struct ice_q_vector *q_vector =
2364cdedef59SAnirudh Venkataramanan 		container_of(notify, struct ice_q_vector, affinity_notify);
2365cdedef59SAnirudh Venkataramanan 
2366cdedef59SAnirudh Venkataramanan 	cpumask_copy(&q_vector->affinity_mask, mask);
2367cdedef59SAnirudh Venkataramanan }
2368cdedef59SAnirudh Venkataramanan 
2369cdedef59SAnirudh Venkataramanan /**
2370cdedef59SAnirudh Venkataramanan  * ice_irq_affinity_release - Callback for affinity notifier release
2371cdedef59SAnirudh Venkataramanan  * @ref: internal core kernel usage
2372cdedef59SAnirudh Venkataramanan  *
2373cdedef59SAnirudh Venkataramanan  * This is a callback function used by the irq_set_affinity_notifier function
2374cdedef59SAnirudh Venkataramanan  * to inform the current notification subscriber that they will no longer
2375cdedef59SAnirudh Venkataramanan  * receive notifications.
2376cdedef59SAnirudh Venkataramanan  */
2377cdedef59SAnirudh Venkataramanan static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2378cdedef59SAnirudh Venkataramanan 
2379cdedef59SAnirudh Venkataramanan /**
2380cdedef59SAnirudh Venkataramanan  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2381cdedef59SAnirudh Venkataramanan  * @vsi: the VSI being configured
2382cdedef59SAnirudh Venkataramanan  */
2383cdedef59SAnirudh Venkataramanan static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2384cdedef59SAnirudh Venkataramanan {
2385ba880734SBrett Creeley 	struct ice_hw *hw = &vsi->back->hw;
2386cdedef59SAnirudh Venkataramanan 	int i;
2387cdedef59SAnirudh Venkataramanan 
23880c2561c8SBrett Creeley 	ice_for_each_q_vector(vsi, i)
2389cdedef59SAnirudh Venkataramanan 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2390cdedef59SAnirudh Venkataramanan 
2391cdedef59SAnirudh Venkataramanan 	ice_flush(hw);
2392cdedef59SAnirudh Venkataramanan 	return 0;
2393cdedef59SAnirudh Venkataramanan }
2394cdedef59SAnirudh Venkataramanan 
2395cdedef59SAnirudh Venkataramanan /**
2396cdedef59SAnirudh Venkataramanan  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2397cdedef59SAnirudh Venkataramanan  * @vsi: the VSI being configured
2398cdedef59SAnirudh Venkataramanan  * @basename: name for the vector
2399cdedef59SAnirudh Venkataramanan  */
2400cdedef59SAnirudh Venkataramanan static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2401cdedef59SAnirudh Venkataramanan {
2402cdedef59SAnirudh Venkataramanan 	int q_vectors = vsi->num_q_vectors;
2403cdedef59SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
2404cbe66bfeSBrett Creeley 	int base = vsi->base_vector;
24054015d11eSBrett Creeley 	struct device *dev;
2406cdedef59SAnirudh Venkataramanan 	int rx_int_idx = 0;
2407cdedef59SAnirudh Venkataramanan 	int tx_int_idx = 0;
2408cdedef59SAnirudh Venkataramanan 	int vector, err;
2409cdedef59SAnirudh Venkataramanan 	int irq_num;
2410cdedef59SAnirudh Venkataramanan 
24114015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
2412cdedef59SAnirudh Venkataramanan 	for (vector = 0; vector < q_vectors; vector++) {
2413cdedef59SAnirudh Venkataramanan 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2414cdedef59SAnirudh Venkataramanan 
2415cdedef59SAnirudh Venkataramanan 		irq_num = pf->msix_entries[base + vector].vector;
2416cdedef59SAnirudh Venkataramanan 
2417e72bba21SMaciej Fijalkowski 		if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2418cdedef59SAnirudh Venkataramanan 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2419cdedef59SAnirudh Venkataramanan 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2420cdedef59SAnirudh Venkataramanan 			tx_int_idx++;
2421e72bba21SMaciej Fijalkowski 		} else if (q_vector->rx.rx_ring) {
2422cdedef59SAnirudh Venkataramanan 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2423cdedef59SAnirudh Venkataramanan 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2424e72bba21SMaciej Fijalkowski 		} else if (q_vector->tx.tx_ring) {
2425cdedef59SAnirudh Venkataramanan 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2426cdedef59SAnirudh Venkataramanan 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2427cdedef59SAnirudh Venkataramanan 		} else {
2428cdedef59SAnirudh Venkataramanan 			/* skip this unused q_vector */
2429cdedef59SAnirudh Venkataramanan 			continue;
2430cdedef59SAnirudh Venkataramanan 		}
2431da62c5ffSQi Zhang 		if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
2432da62c5ffSQi Zhang 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2433da62c5ffSQi Zhang 					       IRQF_SHARED, q_vector->name,
2434da62c5ffSQi Zhang 					       q_vector);
2435da62c5ffSQi Zhang 		else
2436da62c5ffSQi Zhang 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2437da62c5ffSQi Zhang 					       0, q_vector->name, q_vector);
2438cdedef59SAnirudh Venkataramanan 		if (err) {
243919cce2c6SAnirudh Venkataramanan 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
244019cce2c6SAnirudh Venkataramanan 				   err);
2441cdedef59SAnirudh Venkataramanan 			goto free_q_irqs;
2442cdedef59SAnirudh Venkataramanan 		}
2443cdedef59SAnirudh Venkataramanan 
2444cdedef59SAnirudh Venkataramanan 		/* register for affinity change notifications */
244528bf2672SBrett Creeley 		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
244628bf2672SBrett Creeley 			struct irq_affinity_notify *affinity_notify;
244728bf2672SBrett Creeley 
244828bf2672SBrett Creeley 			affinity_notify = &q_vector->affinity_notify;
244928bf2672SBrett Creeley 			affinity_notify->notify = ice_irq_affinity_notify;
245028bf2672SBrett Creeley 			affinity_notify->release = ice_irq_affinity_release;
245128bf2672SBrett Creeley 			irq_set_affinity_notifier(irq_num, affinity_notify);
245228bf2672SBrett Creeley 		}
2453cdedef59SAnirudh Venkataramanan 
2454cdedef59SAnirudh Venkataramanan 		/* assign the mask for this irq */
2455cdedef59SAnirudh Venkataramanan 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2456cdedef59SAnirudh Venkataramanan 	}
2457cdedef59SAnirudh Venkataramanan 
2458cdedef59SAnirudh Venkataramanan 	vsi->irqs_ready = true;
2459cdedef59SAnirudh Venkataramanan 	return 0;
2460cdedef59SAnirudh Venkataramanan 
2461cdedef59SAnirudh Venkataramanan free_q_irqs:
2462cdedef59SAnirudh Venkataramanan 	while (vector) {
2463cdedef59SAnirudh Venkataramanan 		vector--;
246428bf2672SBrett Creeley 		irq_num = pf->msix_entries[base + vector].vector;
246528bf2672SBrett Creeley 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2466cdedef59SAnirudh Venkataramanan 			irq_set_affinity_notifier(irq_num, NULL);
2467cdedef59SAnirudh Venkataramanan 		irq_set_affinity_hint(irq_num, NULL);
24684015d11eSBrett Creeley 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2469cdedef59SAnirudh Venkataramanan 	}
2470cdedef59SAnirudh Venkataramanan 	return err;
2471cdedef59SAnirudh Venkataramanan }
2472cdedef59SAnirudh Venkataramanan 
2473cdedef59SAnirudh Venkataramanan /**
2474efc2214bSMaciej Fijalkowski  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2475efc2214bSMaciej Fijalkowski  * @vsi: VSI to setup Tx rings used by XDP
2476efc2214bSMaciej Fijalkowski  *
2477efc2214bSMaciej Fijalkowski  * Return 0 on success and negative value on error
2478efc2214bSMaciej Fijalkowski  */
2479efc2214bSMaciej Fijalkowski static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2480efc2214bSMaciej Fijalkowski {
24819a946843SAnirudh Venkataramanan 	struct device *dev = ice_pf_to_dev(vsi->back);
24829610bd98SMaciej Fijalkowski 	struct ice_tx_desc *tx_desc;
24839610bd98SMaciej Fijalkowski 	int i, j;
2484efc2214bSMaciej Fijalkowski 
24852faf63b6SMaciej Fijalkowski 	ice_for_each_xdp_txq(vsi, i) {
2486efc2214bSMaciej Fijalkowski 		u16 xdp_q_idx = vsi->alloc_txq + i;
2487e72bba21SMaciej Fijalkowski 		struct ice_tx_ring *xdp_ring;
2488efc2214bSMaciej Fijalkowski 
2489efc2214bSMaciej Fijalkowski 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2490efc2214bSMaciej Fijalkowski 
2491efc2214bSMaciej Fijalkowski 		if (!xdp_ring)
2492efc2214bSMaciej Fijalkowski 			goto free_xdp_rings;
2493efc2214bSMaciej Fijalkowski 
2494efc2214bSMaciej Fijalkowski 		xdp_ring->q_index = xdp_q_idx;
2495efc2214bSMaciej Fijalkowski 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2496efc2214bSMaciej Fijalkowski 		xdp_ring->vsi = vsi;
2497efc2214bSMaciej Fijalkowski 		xdp_ring->netdev = NULL;
24989610bd98SMaciej Fijalkowski 		xdp_ring->next_dd = ICE_TX_THRESH - 1;
24999610bd98SMaciej Fijalkowski 		xdp_ring->next_rs = ICE_TX_THRESH - 1;
2500efc2214bSMaciej Fijalkowski 		xdp_ring->dev = dev;
2501efc2214bSMaciej Fijalkowski 		xdp_ring->count = vsi->num_tx_desc;
2502b1d95cc2SCiara Loftus 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2503efc2214bSMaciej Fijalkowski 		if (ice_setup_tx_ring(xdp_ring))
2504efc2214bSMaciej Fijalkowski 			goto free_xdp_rings;
2505efc2214bSMaciej Fijalkowski 		ice_set_ring_xdp(xdp_ring);
2506e72bba21SMaciej Fijalkowski 		xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
250722bf877eSMaciej Fijalkowski 		spin_lock_init(&xdp_ring->tx_lock);
25089610bd98SMaciej Fijalkowski 		for (j = 0; j < xdp_ring->count; j++) {
25099610bd98SMaciej Fijalkowski 			tx_desc = ICE_TX_DESC(xdp_ring, j);
25109610bd98SMaciej Fijalkowski 			tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE);
25119610bd98SMaciej Fijalkowski 		}
2512efc2214bSMaciej Fijalkowski 	}
2513efc2214bSMaciej Fijalkowski 
251422bf877eSMaciej Fijalkowski 	ice_for_each_rxq(vsi, i) {
251522bf877eSMaciej Fijalkowski 		if (static_key_enabled(&ice_xdp_locking_key))
251622bf877eSMaciej Fijalkowski 			vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
251722bf877eSMaciej Fijalkowski 		else
2518eb087cd8SMaciej Fijalkowski 			vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
251922bf877eSMaciej Fijalkowski 	}
2520eb087cd8SMaciej Fijalkowski 
2521efc2214bSMaciej Fijalkowski 	return 0;
2522efc2214bSMaciej Fijalkowski 
2523efc2214bSMaciej Fijalkowski free_xdp_rings:
2524efc2214bSMaciej Fijalkowski 	for (; i >= 0; i--)
2525efc2214bSMaciej Fijalkowski 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2526efc2214bSMaciej Fijalkowski 			ice_free_tx_ring(vsi->xdp_rings[i]);
2527efc2214bSMaciej Fijalkowski 	return -ENOMEM;
2528efc2214bSMaciej Fijalkowski }
2529efc2214bSMaciej Fijalkowski 
2530efc2214bSMaciej Fijalkowski /**
2531efc2214bSMaciej Fijalkowski  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2532efc2214bSMaciej Fijalkowski  * @vsi: VSI to set the bpf prog on
2533efc2214bSMaciej Fijalkowski  * @prog: the bpf prog pointer
2534efc2214bSMaciej Fijalkowski  */
2535efc2214bSMaciej Fijalkowski static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2536efc2214bSMaciej Fijalkowski {
2537efc2214bSMaciej Fijalkowski 	struct bpf_prog *old_prog;
2538efc2214bSMaciej Fijalkowski 	int i;
2539efc2214bSMaciej Fijalkowski 
2540efc2214bSMaciej Fijalkowski 	old_prog = xchg(&vsi->xdp_prog, prog);
2541efc2214bSMaciej Fijalkowski 	if (old_prog)
2542efc2214bSMaciej Fijalkowski 		bpf_prog_put(old_prog);
2543efc2214bSMaciej Fijalkowski 
2544efc2214bSMaciej Fijalkowski 	ice_for_each_rxq(vsi, i)
2545efc2214bSMaciej Fijalkowski 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2546efc2214bSMaciej Fijalkowski }
2547efc2214bSMaciej Fijalkowski 
2548efc2214bSMaciej Fijalkowski /**
2549efc2214bSMaciej Fijalkowski  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2550efc2214bSMaciej Fijalkowski  * @vsi: VSI to bring up Tx rings used by XDP
2551efc2214bSMaciej Fijalkowski  * @prog: bpf program that will be assigned to VSI
2552efc2214bSMaciej Fijalkowski  *
2553efc2214bSMaciej Fijalkowski  * Return 0 on success and negative value on error
2554efc2214bSMaciej Fijalkowski  */
2555efc2214bSMaciej Fijalkowski int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2556efc2214bSMaciej Fijalkowski {
2557efc2214bSMaciej Fijalkowski 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2558efc2214bSMaciej Fijalkowski 	int xdp_rings_rem = vsi->num_xdp_txq;
2559efc2214bSMaciej Fijalkowski 	struct ice_pf *pf = vsi->back;
2560efc2214bSMaciej Fijalkowski 	struct ice_qs_cfg xdp_qs_cfg = {
2561efc2214bSMaciej Fijalkowski 		.qs_mutex = &pf->avail_q_mutex,
2562efc2214bSMaciej Fijalkowski 		.pf_map = pf->avail_txqs,
2563efc2214bSMaciej Fijalkowski 		.pf_map_size = pf->max_pf_txqs,
2564efc2214bSMaciej Fijalkowski 		.q_count = vsi->num_xdp_txq,
2565efc2214bSMaciej Fijalkowski 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2566efc2214bSMaciej Fijalkowski 		.vsi_map = vsi->txq_map,
2567efc2214bSMaciej Fijalkowski 		.vsi_map_offset = vsi->alloc_txq,
2568efc2214bSMaciej Fijalkowski 		.mapping_mode = ICE_VSI_MAP_CONTIG
2569efc2214bSMaciej Fijalkowski 	};
25704015d11eSBrett Creeley 	struct device *dev;
2571efc2214bSMaciej Fijalkowski 	int i, v_idx;
25725518ac2aSTony Nguyen 	int status;
2573efc2214bSMaciej Fijalkowski 
25744015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
25754015d11eSBrett Creeley 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2576efc2214bSMaciej Fijalkowski 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2577efc2214bSMaciej Fijalkowski 	if (!vsi->xdp_rings)
2578efc2214bSMaciej Fijalkowski 		return -ENOMEM;
2579efc2214bSMaciej Fijalkowski 
2580efc2214bSMaciej Fijalkowski 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2581efc2214bSMaciej Fijalkowski 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2582efc2214bSMaciej Fijalkowski 		goto err_map_xdp;
2583efc2214bSMaciej Fijalkowski 
258422bf877eSMaciej Fijalkowski 	if (static_key_enabled(&ice_xdp_locking_key))
258522bf877eSMaciej Fijalkowski 		netdev_warn(vsi->netdev,
258622bf877eSMaciej Fijalkowski 			    "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
258722bf877eSMaciej Fijalkowski 
2588efc2214bSMaciej Fijalkowski 	if (ice_xdp_alloc_setup_rings(vsi))
2589efc2214bSMaciej Fijalkowski 		goto clear_xdp_rings;
2590efc2214bSMaciej Fijalkowski 
2591efc2214bSMaciej Fijalkowski 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2592efc2214bSMaciej Fijalkowski 	ice_for_each_q_vector(vsi, v_idx) {
2593efc2214bSMaciej Fijalkowski 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2594efc2214bSMaciej Fijalkowski 		int xdp_rings_per_v, q_id, q_base;
2595efc2214bSMaciej Fijalkowski 
2596efc2214bSMaciej Fijalkowski 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2597efc2214bSMaciej Fijalkowski 					       vsi->num_q_vectors - v_idx);
2598efc2214bSMaciej Fijalkowski 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2599efc2214bSMaciej Fijalkowski 
2600efc2214bSMaciej Fijalkowski 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2601e72bba21SMaciej Fijalkowski 			struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2602efc2214bSMaciej Fijalkowski 
2603efc2214bSMaciej Fijalkowski 			xdp_ring->q_vector = q_vector;
2604e72bba21SMaciej Fijalkowski 			xdp_ring->next = q_vector->tx.tx_ring;
2605e72bba21SMaciej Fijalkowski 			q_vector->tx.tx_ring = xdp_ring;
2606efc2214bSMaciej Fijalkowski 		}
2607efc2214bSMaciej Fijalkowski 		xdp_rings_rem -= xdp_rings_per_v;
2608efc2214bSMaciej Fijalkowski 	}
2609efc2214bSMaciej Fijalkowski 
2610efc2214bSMaciej Fijalkowski 	/* omit the scheduler update if in reset path; XDP queues will be
2611efc2214bSMaciej Fijalkowski 	 * taken into account at the end of ice_vsi_rebuild, where
2612efc2214bSMaciej Fijalkowski 	 * ice_cfg_vsi_lan is being called
2613efc2214bSMaciej Fijalkowski 	 */
2614efc2214bSMaciej Fijalkowski 	if (ice_is_reset_in_progress(pf->state))
2615efc2214bSMaciej Fijalkowski 		return 0;
2616efc2214bSMaciej Fijalkowski 
2617efc2214bSMaciej Fijalkowski 	/* tell the Tx scheduler that right now we have
2618efc2214bSMaciej Fijalkowski 	 * additional queues
2619efc2214bSMaciej Fijalkowski 	 */
2620efc2214bSMaciej Fijalkowski 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2621efc2214bSMaciej Fijalkowski 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2622efc2214bSMaciej Fijalkowski 
2623efc2214bSMaciej Fijalkowski 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2624efc2214bSMaciej Fijalkowski 				 max_txqs);
2625efc2214bSMaciej Fijalkowski 	if (status) {
26265f87ec48STony Nguyen 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
26275f87ec48STony Nguyen 			status);
2628efc2214bSMaciej Fijalkowski 		goto clear_xdp_rings;
2629efc2214bSMaciej Fijalkowski 	}
2630f65ee535SMarta Plantykow 
2631f65ee535SMarta Plantykow 	/* assign the prog only when it's not already present on VSI;
2632f65ee535SMarta Plantykow 	 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2633f65ee535SMarta Plantykow 	 * VSI rebuild that happens under ethtool -L can expose us to
2634f65ee535SMarta Plantykow 	 * the bpf_prog refcount issues as we would be swapping same
2635f65ee535SMarta Plantykow 	 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2636f65ee535SMarta Plantykow 	 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2637f65ee535SMarta Plantykow 	 * this is not harmful as dev_xdp_install bumps the refcount
2638f65ee535SMarta Plantykow 	 * before calling the op exposed by the driver;
2639f65ee535SMarta Plantykow 	 */
2640f65ee535SMarta Plantykow 	if (!ice_is_xdp_ena_vsi(vsi))
2641efc2214bSMaciej Fijalkowski 		ice_vsi_assign_bpf_prog(vsi, prog);
2642efc2214bSMaciej Fijalkowski 
2643efc2214bSMaciej Fijalkowski 	return 0;
2644efc2214bSMaciej Fijalkowski clear_xdp_rings:
26452faf63b6SMaciej Fijalkowski 	ice_for_each_xdp_txq(vsi, i)
2646efc2214bSMaciej Fijalkowski 		if (vsi->xdp_rings[i]) {
2647efc2214bSMaciej Fijalkowski 			kfree_rcu(vsi->xdp_rings[i], rcu);
2648efc2214bSMaciej Fijalkowski 			vsi->xdp_rings[i] = NULL;
2649efc2214bSMaciej Fijalkowski 		}
2650efc2214bSMaciej Fijalkowski 
2651efc2214bSMaciej Fijalkowski err_map_xdp:
2652efc2214bSMaciej Fijalkowski 	mutex_lock(&pf->avail_q_mutex);
26532faf63b6SMaciej Fijalkowski 	ice_for_each_xdp_txq(vsi, i) {
2654efc2214bSMaciej Fijalkowski 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2655efc2214bSMaciej Fijalkowski 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2656efc2214bSMaciej Fijalkowski 	}
2657efc2214bSMaciej Fijalkowski 	mutex_unlock(&pf->avail_q_mutex);
2658efc2214bSMaciej Fijalkowski 
26594015d11eSBrett Creeley 	devm_kfree(dev, vsi->xdp_rings);
2660efc2214bSMaciej Fijalkowski 	return -ENOMEM;
2661efc2214bSMaciej Fijalkowski }
2662efc2214bSMaciej Fijalkowski 
2663efc2214bSMaciej Fijalkowski /**
2664efc2214bSMaciej Fijalkowski  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2665efc2214bSMaciej Fijalkowski  * @vsi: VSI to remove XDP rings
2666efc2214bSMaciej Fijalkowski  *
2667efc2214bSMaciej Fijalkowski  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2668efc2214bSMaciej Fijalkowski  * resources
2669efc2214bSMaciej Fijalkowski  */
2670efc2214bSMaciej Fijalkowski int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2671efc2214bSMaciej Fijalkowski {
2672efc2214bSMaciej Fijalkowski 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2673efc2214bSMaciej Fijalkowski 	struct ice_pf *pf = vsi->back;
2674efc2214bSMaciej Fijalkowski 	int i, v_idx;
2675efc2214bSMaciej Fijalkowski 
2676efc2214bSMaciej Fijalkowski 	/* q_vectors are freed in reset path so there's no point in detaching
2677ac382a09SBruce Allan 	 * rings; in case of rebuild being triggered not from reset bits
2678efc2214bSMaciej Fijalkowski 	 * in pf->state won't be set, so additionally check first q_vector
2679efc2214bSMaciej Fijalkowski 	 * against NULL
2680efc2214bSMaciej Fijalkowski 	 */
2681efc2214bSMaciej Fijalkowski 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2682efc2214bSMaciej Fijalkowski 		goto free_qmap;
2683efc2214bSMaciej Fijalkowski 
2684efc2214bSMaciej Fijalkowski 	ice_for_each_q_vector(vsi, v_idx) {
2685efc2214bSMaciej Fijalkowski 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2686e72bba21SMaciej Fijalkowski 		struct ice_tx_ring *ring;
2687efc2214bSMaciej Fijalkowski 
2688e72bba21SMaciej Fijalkowski 		ice_for_each_tx_ring(ring, q_vector->tx)
2689efc2214bSMaciej Fijalkowski 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2690efc2214bSMaciej Fijalkowski 				break;
2691efc2214bSMaciej Fijalkowski 
2692efc2214bSMaciej Fijalkowski 		/* restore the value of last node prior to XDP setup */
2693e72bba21SMaciej Fijalkowski 		q_vector->tx.tx_ring = ring;
2694efc2214bSMaciej Fijalkowski 	}
2695efc2214bSMaciej Fijalkowski 
2696efc2214bSMaciej Fijalkowski free_qmap:
2697efc2214bSMaciej Fijalkowski 	mutex_lock(&pf->avail_q_mutex);
26982faf63b6SMaciej Fijalkowski 	ice_for_each_xdp_txq(vsi, i) {
2699efc2214bSMaciej Fijalkowski 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2700efc2214bSMaciej Fijalkowski 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2701efc2214bSMaciej Fijalkowski 	}
2702efc2214bSMaciej Fijalkowski 	mutex_unlock(&pf->avail_q_mutex);
2703efc2214bSMaciej Fijalkowski 
27042faf63b6SMaciej Fijalkowski 	ice_for_each_xdp_txq(vsi, i)
2705efc2214bSMaciej Fijalkowski 		if (vsi->xdp_rings[i]) {
2706efc2214bSMaciej Fijalkowski 			if (vsi->xdp_rings[i]->desc)
2707efc2214bSMaciej Fijalkowski 				ice_free_tx_ring(vsi->xdp_rings[i]);
2708efc2214bSMaciej Fijalkowski 			kfree_rcu(vsi->xdp_rings[i], rcu);
2709efc2214bSMaciej Fijalkowski 			vsi->xdp_rings[i] = NULL;
2710efc2214bSMaciej Fijalkowski 		}
2711efc2214bSMaciej Fijalkowski 
27124015d11eSBrett Creeley 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2713efc2214bSMaciej Fijalkowski 	vsi->xdp_rings = NULL;
2714efc2214bSMaciej Fijalkowski 
271522bf877eSMaciej Fijalkowski 	if (static_key_enabled(&ice_xdp_locking_key))
271622bf877eSMaciej Fijalkowski 		static_branch_dec(&ice_xdp_locking_key);
271722bf877eSMaciej Fijalkowski 
2718efc2214bSMaciej Fijalkowski 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2719efc2214bSMaciej Fijalkowski 		return 0;
2720efc2214bSMaciej Fijalkowski 
2721efc2214bSMaciej Fijalkowski 	ice_vsi_assign_bpf_prog(vsi, NULL);
2722efc2214bSMaciej Fijalkowski 
2723efc2214bSMaciej Fijalkowski 	/* notify Tx scheduler that we destroyed XDP queues and bring
2724efc2214bSMaciej Fijalkowski 	 * back the old number of child nodes
2725efc2214bSMaciej Fijalkowski 	 */
2726efc2214bSMaciej Fijalkowski 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2727efc2214bSMaciej Fijalkowski 		max_txqs[i] = vsi->num_txq;
2728efc2214bSMaciej Fijalkowski 
2729c8f135c6SMarta Plantykow 	/* change number of XDP Tx queues to 0 */
2730c8f135c6SMarta Plantykow 	vsi->num_xdp_txq = 0;
2731c8f135c6SMarta Plantykow 
2732efc2214bSMaciej Fijalkowski 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2733efc2214bSMaciej Fijalkowski 			       max_txqs);
2734efc2214bSMaciej Fijalkowski }
2735efc2214bSMaciej Fijalkowski 
2736efc2214bSMaciej Fijalkowski /**
2737c7a21904SMichal Swiatkowski  * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2738c7a21904SMichal Swiatkowski  * @vsi: VSI to schedule napi on
2739c7a21904SMichal Swiatkowski  */
2740c7a21904SMichal Swiatkowski static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2741c7a21904SMichal Swiatkowski {
2742c7a21904SMichal Swiatkowski 	int i;
2743c7a21904SMichal Swiatkowski 
2744c7a21904SMichal Swiatkowski 	ice_for_each_rxq(vsi, i) {
2745e72bba21SMaciej Fijalkowski 		struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2746c7a21904SMichal Swiatkowski 
2747c7a21904SMichal Swiatkowski 		if (rx_ring->xsk_pool)
2748c7a21904SMichal Swiatkowski 			napi_schedule(&rx_ring->q_vector->napi);
2749c7a21904SMichal Swiatkowski 	}
2750c7a21904SMichal Swiatkowski }
2751c7a21904SMichal Swiatkowski 
2752c7a21904SMichal Swiatkowski /**
275322bf877eSMaciej Fijalkowski  * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
275422bf877eSMaciej Fijalkowski  * @vsi: VSI to determine the count of XDP Tx qs
275522bf877eSMaciej Fijalkowski  *
275622bf877eSMaciej Fijalkowski  * returns 0 if Tx qs count is higher than at least half of CPU count,
275722bf877eSMaciej Fijalkowski  * -ENOMEM otherwise
275822bf877eSMaciej Fijalkowski  */
275922bf877eSMaciej Fijalkowski int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
276022bf877eSMaciej Fijalkowski {
276122bf877eSMaciej Fijalkowski 	u16 avail = ice_get_avail_txq_count(vsi->back);
276222bf877eSMaciej Fijalkowski 	u16 cpus = num_possible_cpus();
276322bf877eSMaciej Fijalkowski 
276422bf877eSMaciej Fijalkowski 	if (avail < cpus / 2)
276522bf877eSMaciej Fijalkowski 		return -ENOMEM;
276622bf877eSMaciej Fijalkowski 
276722bf877eSMaciej Fijalkowski 	vsi->num_xdp_txq = min_t(u16, avail, cpus);
276822bf877eSMaciej Fijalkowski 
276922bf877eSMaciej Fijalkowski 	if (vsi->num_xdp_txq < cpus)
277022bf877eSMaciej Fijalkowski 		static_branch_inc(&ice_xdp_locking_key);
277122bf877eSMaciej Fijalkowski 
277222bf877eSMaciej Fijalkowski 	return 0;
277322bf877eSMaciej Fijalkowski }
277422bf877eSMaciej Fijalkowski 
277522bf877eSMaciej Fijalkowski /**
2776efc2214bSMaciej Fijalkowski  * ice_xdp_setup_prog - Add or remove XDP eBPF program
2777efc2214bSMaciej Fijalkowski  * @vsi: VSI to setup XDP for
2778efc2214bSMaciej Fijalkowski  * @prog: XDP program
2779efc2214bSMaciej Fijalkowski  * @extack: netlink extended ack
2780efc2214bSMaciej Fijalkowski  */
2781efc2214bSMaciej Fijalkowski static int
2782efc2214bSMaciej Fijalkowski ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2783efc2214bSMaciej Fijalkowski 		   struct netlink_ext_ack *extack)
2784efc2214bSMaciej Fijalkowski {
2785efc2214bSMaciej Fijalkowski 	int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2786efc2214bSMaciej Fijalkowski 	bool if_running = netif_running(vsi->netdev);
2787efc2214bSMaciej Fijalkowski 	int ret = 0, xdp_ring_err = 0;
2788efc2214bSMaciej Fijalkowski 
2789efc2214bSMaciej Fijalkowski 	if (frame_size > vsi->rx_buf_len) {
2790efc2214bSMaciej Fijalkowski 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2791efc2214bSMaciej Fijalkowski 		return -EOPNOTSUPP;
2792efc2214bSMaciej Fijalkowski 	}
2793efc2214bSMaciej Fijalkowski 
2794efc2214bSMaciej Fijalkowski 	/* need to stop netdev while setting up the program for Rx rings */
2795e97fb1aeSAnirudh Venkataramanan 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2796efc2214bSMaciej Fijalkowski 		ret = ice_down(vsi);
2797efc2214bSMaciej Fijalkowski 		if (ret) {
2798af23635aSJesse Brandeburg 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2799efc2214bSMaciej Fijalkowski 			return ret;
2800efc2214bSMaciej Fijalkowski 		}
2801efc2214bSMaciej Fijalkowski 	}
2802efc2214bSMaciej Fijalkowski 
2803efc2214bSMaciej Fijalkowski 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
280422bf877eSMaciej Fijalkowski 		xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
280522bf877eSMaciej Fijalkowski 		if (xdp_ring_err) {
280622bf877eSMaciej Fijalkowski 			NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
280722bf877eSMaciej Fijalkowski 		} else {
2808efc2214bSMaciej Fijalkowski 			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2809efc2214bSMaciej Fijalkowski 			if (xdp_ring_err)
2810af23635aSJesse Brandeburg 				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
281122bf877eSMaciej Fijalkowski 		}
2812efc2214bSMaciej Fijalkowski 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2813efc2214bSMaciej Fijalkowski 		xdp_ring_err = ice_destroy_xdp_rings(vsi);
2814efc2214bSMaciej Fijalkowski 		if (xdp_ring_err)
2815af23635aSJesse Brandeburg 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2816efc2214bSMaciej Fijalkowski 	} else {
2817f65ee535SMarta Plantykow 		/* safe to call even when prog == vsi->xdp_prog as
2818f65ee535SMarta Plantykow 		 * dev_xdp_install in net/core/dev.c incremented prog's
2819f65ee535SMarta Plantykow 		 * refcount so corresponding bpf_prog_put won't cause
2820f65ee535SMarta Plantykow 		 * underflow
2821f65ee535SMarta Plantykow 		 */
2822efc2214bSMaciej Fijalkowski 		ice_vsi_assign_bpf_prog(vsi, prog);
2823efc2214bSMaciej Fijalkowski 	}
2824efc2214bSMaciej Fijalkowski 
2825efc2214bSMaciej Fijalkowski 	if (if_running)
2826efc2214bSMaciej Fijalkowski 		ret = ice_up(vsi);
2827efc2214bSMaciej Fijalkowski 
2828c7a21904SMichal Swiatkowski 	if (!ret && prog)
2829c7a21904SMichal Swiatkowski 		ice_vsi_rx_napi_schedule(vsi);
28302d4238f5SKrzysztof Kazimierczak 
2831efc2214bSMaciej Fijalkowski 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
2832efc2214bSMaciej Fijalkowski }
2833efc2214bSMaciej Fijalkowski 
2834efc2214bSMaciej Fijalkowski /**
2835ebc5399eSMaciej Fijalkowski  * ice_xdp_safe_mode - XDP handler for safe mode
2836ebc5399eSMaciej Fijalkowski  * @dev: netdevice
2837ebc5399eSMaciej Fijalkowski  * @xdp: XDP command
2838ebc5399eSMaciej Fijalkowski  */
2839ebc5399eSMaciej Fijalkowski static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2840ebc5399eSMaciej Fijalkowski 			     struct netdev_bpf *xdp)
2841ebc5399eSMaciej Fijalkowski {
2842ebc5399eSMaciej Fijalkowski 	NL_SET_ERR_MSG_MOD(xdp->extack,
2843ebc5399eSMaciej Fijalkowski 			   "Please provide working DDP firmware package in order to use XDP\n"
2844ebc5399eSMaciej Fijalkowski 			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2845ebc5399eSMaciej Fijalkowski 	return -EOPNOTSUPP;
2846ebc5399eSMaciej Fijalkowski }
2847ebc5399eSMaciej Fijalkowski 
2848ebc5399eSMaciej Fijalkowski /**
2849efc2214bSMaciej Fijalkowski  * ice_xdp - implements XDP handler
2850efc2214bSMaciej Fijalkowski  * @dev: netdevice
2851efc2214bSMaciej Fijalkowski  * @xdp: XDP command
2852efc2214bSMaciej Fijalkowski  */
2853efc2214bSMaciej Fijalkowski static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2854efc2214bSMaciej Fijalkowski {
2855efc2214bSMaciej Fijalkowski 	struct ice_netdev_priv *np = netdev_priv(dev);
2856efc2214bSMaciej Fijalkowski 	struct ice_vsi *vsi = np->vsi;
2857efc2214bSMaciej Fijalkowski 
2858efc2214bSMaciej Fijalkowski 	if (vsi->type != ICE_VSI_PF) {
2859af23635aSJesse Brandeburg 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2860efc2214bSMaciej Fijalkowski 		return -EINVAL;
2861efc2214bSMaciej Fijalkowski 	}
2862efc2214bSMaciej Fijalkowski 
2863efc2214bSMaciej Fijalkowski 	switch (xdp->command) {
2864efc2214bSMaciej Fijalkowski 	case XDP_SETUP_PROG:
2865efc2214bSMaciej Fijalkowski 		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
28661742b3d5SMagnus Karlsson 	case XDP_SETUP_XSK_POOL:
28671742b3d5SMagnus Karlsson 		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
28682d4238f5SKrzysztof Kazimierczak 					  xdp->xsk.queue_id);
2869efc2214bSMaciej Fijalkowski 	default:
2870efc2214bSMaciej Fijalkowski 		return -EINVAL;
2871efc2214bSMaciej Fijalkowski 	}
2872efc2214bSMaciej Fijalkowski }
2873efc2214bSMaciej Fijalkowski 
2874efc2214bSMaciej Fijalkowski /**
2875940b61afSAnirudh Venkataramanan  * ice_ena_misc_vector - enable the non-queue interrupts
2876940b61afSAnirudh Venkataramanan  * @pf: board private structure
2877940b61afSAnirudh Venkataramanan  */
2878940b61afSAnirudh Venkataramanan static void ice_ena_misc_vector(struct ice_pf *pf)
2879940b61afSAnirudh Venkataramanan {
2880940b61afSAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
2881940b61afSAnirudh Venkataramanan 	u32 val;
2882940b61afSAnirudh Venkataramanan 
28839d5c5a52SPaul Greenwalt 	/* Disable anti-spoof detection interrupt to prevent spurious event
28849d5c5a52SPaul Greenwalt 	 * interrupts during a function reset. Anti-spoof functionally is
28859d5c5a52SPaul Greenwalt 	 * still supported.
28869d5c5a52SPaul Greenwalt 	 */
28879d5c5a52SPaul Greenwalt 	val = rd32(hw, GL_MDCK_TX_TDPU);
28889d5c5a52SPaul Greenwalt 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
28899d5c5a52SPaul Greenwalt 	wr32(hw, GL_MDCK_TX_TDPU, val);
28909d5c5a52SPaul Greenwalt 
2891940b61afSAnirudh Venkataramanan 	/* clear things first */
2892940b61afSAnirudh Venkataramanan 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
2893940b61afSAnirudh Venkataramanan 	rd32(hw, PFINT_OICR);		/* read to clear */
2894940b61afSAnirudh Venkataramanan 
28953bcd7fa3SBruce Allan 	val = (PFINT_OICR_ECC_ERR_M |
2896940b61afSAnirudh Venkataramanan 	       PFINT_OICR_MAL_DETECT_M |
2897940b61afSAnirudh Venkataramanan 	       PFINT_OICR_GRST_M |
2898940b61afSAnirudh Venkataramanan 	       PFINT_OICR_PCI_EXCEPTION_M |
2899007676b4SAnirudh Venkataramanan 	       PFINT_OICR_VFLR_M |
29003bcd7fa3SBruce Allan 	       PFINT_OICR_HMC_ERR_M |
2901348048e7SDave Ertman 	       PFINT_OICR_PE_PUSH_M |
29023bcd7fa3SBruce Allan 	       PFINT_OICR_PE_CRITERR_M);
2903940b61afSAnirudh Venkataramanan 
2904940b61afSAnirudh Venkataramanan 	wr32(hw, PFINT_OICR_ENA, val);
2905940b61afSAnirudh Venkataramanan 
2906940b61afSAnirudh Venkataramanan 	/* SW_ITR_IDX = 0, but don't change INTENA */
2907cbe66bfeSBrett Creeley 	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
2908940b61afSAnirudh Venkataramanan 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
2909940b61afSAnirudh Venkataramanan }
2910940b61afSAnirudh Venkataramanan 
2911940b61afSAnirudh Venkataramanan /**
2912940b61afSAnirudh Venkataramanan  * ice_misc_intr - misc interrupt handler
2913940b61afSAnirudh Venkataramanan  * @irq: interrupt number
2914940b61afSAnirudh Venkataramanan  * @data: pointer to a q_vector
2915940b61afSAnirudh Venkataramanan  */
2916940b61afSAnirudh Venkataramanan static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
2917940b61afSAnirudh Venkataramanan {
2918940b61afSAnirudh Venkataramanan 	struct ice_pf *pf = (struct ice_pf *)data;
2919940b61afSAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
2920940b61afSAnirudh Venkataramanan 	irqreturn_t ret = IRQ_NONE;
29214015d11eSBrett Creeley 	struct device *dev;
2922940b61afSAnirudh Venkataramanan 	u32 oicr, ena_mask;
2923940b61afSAnirudh Venkataramanan 
29244015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
29257e408e07SAnirudh Venkataramanan 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
29267e408e07SAnirudh Venkataramanan 	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
29278f5ee3c4SJacob Keller 	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
2928940b61afSAnirudh Venkataramanan 
2929940b61afSAnirudh Venkataramanan 	oicr = rd32(hw, PFINT_OICR);
2930940b61afSAnirudh Venkataramanan 	ena_mask = rd32(hw, PFINT_OICR_ENA);
2931940b61afSAnirudh Venkataramanan 
29320e674aebSAnirudh Venkataramanan 	if (oicr & PFINT_OICR_SWINT_M) {
29330e674aebSAnirudh Venkataramanan 		ena_mask &= ~PFINT_OICR_SWINT_M;
29340e674aebSAnirudh Venkataramanan 		pf->sw_int_count++;
29350e674aebSAnirudh Venkataramanan 	}
29360e674aebSAnirudh Venkataramanan 
2937b3969fd7SSudheer Mogilappagari 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
2938b3969fd7SSudheer Mogilappagari 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
29397e408e07SAnirudh Venkataramanan 		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
2940b3969fd7SSudheer Mogilappagari 	}
2941007676b4SAnirudh Venkataramanan 	if (oicr & PFINT_OICR_VFLR_M) {
2942f844d521SBrett Creeley 		/* disable any further VFLR event notifications */
29437e408e07SAnirudh Venkataramanan 		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
2944f844d521SBrett Creeley 			u32 reg = rd32(hw, PFINT_OICR_ENA);
2945f844d521SBrett Creeley 
2946f844d521SBrett Creeley 			reg &= ~PFINT_OICR_VFLR_M;
2947f844d521SBrett Creeley 			wr32(hw, PFINT_OICR_ENA, reg);
2948f844d521SBrett Creeley 		} else {
2949007676b4SAnirudh Venkataramanan 			ena_mask &= ~PFINT_OICR_VFLR_M;
29507e408e07SAnirudh Venkataramanan 			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
2951007676b4SAnirudh Venkataramanan 		}
2952f844d521SBrett Creeley 	}
2953b3969fd7SSudheer Mogilappagari 
29540b28b702SAnirudh Venkataramanan 	if (oicr & PFINT_OICR_GRST_M) {
29550b28b702SAnirudh Venkataramanan 		u32 reset;
2956b3969fd7SSudheer Mogilappagari 
29570b28b702SAnirudh Venkataramanan 		/* we have a reset warning */
29580b28b702SAnirudh Venkataramanan 		ena_mask &= ~PFINT_OICR_GRST_M;
29590b28b702SAnirudh Venkataramanan 		reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
29600b28b702SAnirudh Venkataramanan 			GLGEN_RSTAT_RESET_TYPE_S;
29610b28b702SAnirudh Venkataramanan 
29620b28b702SAnirudh Venkataramanan 		if (reset == ICE_RESET_CORER)
29630b28b702SAnirudh Venkataramanan 			pf->corer_count++;
29640b28b702SAnirudh Venkataramanan 		else if (reset == ICE_RESET_GLOBR)
29650b28b702SAnirudh Venkataramanan 			pf->globr_count++;
2966ca4929b6SBrett Creeley 		else if (reset == ICE_RESET_EMPR)
29670b28b702SAnirudh Venkataramanan 			pf->empr_count++;
2968ca4929b6SBrett Creeley 		else
29694015d11eSBrett Creeley 			dev_dbg(dev, "Invalid reset type %d\n", reset);
29700b28b702SAnirudh Venkataramanan 
29710b28b702SAnirudh Venkataramanan 		/* If a reset cycle isn't already in progress, we set a bit in
29720b28b702SAnirudh Venkataramanan 		 * pf->state so that the service task can start a reset/rebuild.
29730b28b702SAnirudh Venkataramanan 		 */
29747e408e07SAnirudh Venkataramanan 		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
29750b28b702SAnirudh Venkataramanan 			if (reset == ICE_RESET_CORER)
29767e408e07SAnirudh Venkataramanan 				set_bit(ICE_CORER_RECV, pf->state);
29770b28b702SAnirudh Venkataramanan 			else if (reset == ICE_RESET_GLOBR)
29787e408e07SAnirudh Venkataramanan 				set_bit(ICE_GLOBR_RECV, pf->state);
29790b28b702SAnirudh Venkataramanan 			else
29807e408e07SAnirudh Venkataramanan 				set_bit(ICE_EMPR_RECV, pf->state);
29810b28b702SAnirudh Venkataramanan 
2982fd2a9817SAnirudh Venkataramanan 			/* There are couple of different bits at play here.
2983fd2a9817SAnirudh Venkataramanan 			 * hw->reset_ongoing indicates whether the hardware is
2984fd2a9817SAnirudh Venkataramanan 			 * in reset. This is set to true when a reset interrupt
2985fd2a9817SAnirudh Venkataramanan 			 * is received and set back to false after the driver
2986fd2a9817SAnirudh Venkataramanan 			 * has determined that the hardware is out of reset.
2987fd2a9817SAnirudh Venkataramanan 			 *
29887e408e07SAnirudh Venkataramanan 			 * ICE_RESET_OICR_RECV in pf->state indicates
2989fd2a9817SAnirudh Venkataramanan 			 * that a post reset rebuild is required before the
2990fd2a9817SAnirudh Venkataramanan 			 * driver is operational again. This is set above.
2991fd2a9817SAnirudh Venkataramanan 			 *
2992fd2a9817SAnirudh Venkataramanan 			 * As this is the start of the reset/rebuild cycle, set
2993fd2a9817SAnirudh Venkataramanan 			 * both to indicate that.
2994fd2a9817SAnirudh Venkataramanan 			 */
2995fd2a9817SAnirudh Venkataramanan 			hw->reset_ongoing = true;
29960b28b702SAnirudh Venkataramanan 		}
29970b28b702SAnirudh Venkataramanan 	}
29980b28b702SAnirudh Venkataramanan 
2999ea9b847cSJacob Keller 	if (oicr & PFINT_OICR_TSYN_TX_M) {
3000ea9b847cSJacob Keller 		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3001ea9b847cSJacob Keller 		ice_ptp_process_ts(pf);
3002ea9b847cSJacob Keller 	}
3003ea9b847cSJacob Keller 
3004172db5f9SMaciej Machnikowski 	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3005172db5f9SMaciej Machnikowski 		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3006172db5f9SMaciej Machnikowski 		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3007172db5f9SMaciej Machnikowski 
3008172db5f9SMaciej Machnikowski 		/* Save EVENTs from GTSYN register */
3009172db5f9SMaciej Machnikowski 		pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
3010172db5f9SMaciej Machnikowski 						     GLTSYN_STAT_EVENT1_M |
3011172db5f9SMaciej Machnikowski 						     GLTSYN_STAT_EVENT2_M);
3012172db5f9SMaciej Machnikowski 		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3013172db5f9SMaciej Machnikowski 		kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
3014172db5f9SMaciej Machnikowski 	}
3015172db5f9SMaciej Machnikowski 
3016348048e7SDave Ertman #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3017348048e7SDave Ertman 	if (oicr & ICE_AUX_CRIT_ERR) {
3018348048e7SDave Ertman 		struct iidc_event *event;
3019348048e7SDave Ertman 
3020348048e7SDave Ertman 		ena_mask &= ~ICE_AUX_CRIT_ERR;
3021348048e7SDave Ertman 		event = kzalloc(sizeof(*event), GFP_KERNEL);
3022348048e7SDave Ertman 		if (event) {
3023348048e7SDave Ertman 			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
3024348048e7SDave Ertman 			/* report the entire OICR value to AUX driver */
3025348048e7SDave Ertman 			event->reg = oicr;
3026348048e7SDave Ertman 			ice_send_event_to_aux(pf, event);
3027348048e7SDave Ertman 			kfree(event);
3028348048e7SDave Ertman 		}
3029940b61afSAnirudh Venkataramanan 	}
3030940b61afSAnirudh Venkataramanan 
30318d7189d2SMd Fahad Iqbal Polash 	/* Report any remaining unexpected interrupts */
3032940b61afSAnirudh Venkataramanan 	oicr &= ena_mask;
3033940b61afSAnirudh Venkataramanan 	if (oicr) {
30344015d11eSBrett Creeley 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3035940b61afSAnirudh Venkataramanan 		/* If a critical error is pending there is no choice but to
3036940b61afSAnirudh Venkataramanan 		 * reset the device.
3037940b61afSAnirudh Venkataramanan 		 */
3038348048e7SDave Ertman 		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
30390b28b702SAnirudh Venkataramanan 			    PFINT_OICR_ECC_ERR_M)) {
30407e408e07SAnirudh Venkataramanan 			set_bit(ICE_PFR_REQ, pf->state);
30410b28b702SAnirudh Venkataramanan 			ice_service_task_schedule(pf);
30420b28b702SAnirudh Venkataramanan 		}
3043940b61afSAnirudh Venkataramanan 	}
3044940b61afSAnirudh Venkataramanan 	ret = IRQ_HANDLED;
3045940b61afSAnirudh Venkataramanan 
3046940b61afSAnirudh Venkataramanan 	ice_service_task_schedule(pf);
3047cdedef59SAnirudh Venkataramanan 	ice_irq_dynamic_ena(hw, NULL, NULL);
3048940b61afSAnirudh Venkataramanan 
3049940b61afSAnirudh Venkataramanan 	return ret;
3050940b61afSAnirudh Venkataramanan }
3051940b61afSAnirudh Venkataramanan 
3052940b61afSAnirudh Venkataramanan /**
30530e04e8e1SBrett Creeley  * ice_dis_ctrlq_interrupts - disable control queue interrupts
30540e04e8e1SBrett Creeley  * @hw: pointer to HW structure
30550e04e8e1SBrett Creeley  */
30560e04e8e1SBrett Creeley static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
30570e04e8e1SBrett Creeley {
30580e04e8e1SBrett Creeley 	/* disable Admin queue Interrupt causes */
30590e04e8e1SBrett Creeley 	wr32(hw, PFINT_FW_CTL,
30600e04e8e1SBrett Creeley 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
30610e04e8e1SBrett Creeley 
30620e04e8e1SBrett Creeley 	/* disable Mailbox queue Interrupt causes */
30630e04e8e1SBrett Creeley 	wr32(hw, PFINT_MBX_CTL,
30640e04e8e1SBrett Creeley 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
30650e04e8e1SBrett Creeley 
30668f5ee3c4SJacob Keller 	wr32(hw, PFINT_SB_CTL,
30678f5ee3c4SJacob Keller 	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
30688f5ee3c4SJacob Keller 
30690e04e8e1SBrett Creeley 	/* disable Control queue Interrupt causes */
30700e04e8e1SBrett Creeley 	wr32(hw, PFINT_OICR_CTL,
30710e04e8e1SBrett Creeley 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
30720e04e8e1SBrett Creeley 
30730e04e8e1SBrett Creeley 	ice_flush(hw);
30740e04e8e1SBrett Creeley }
30750e04e8e1SBrett Creeley 
30760e04e8e1SBrett Creeley /**
3077940b61afSAnirudh Venkataramanan  * ice_free_irq_msix_misc - Unroll misc vector setup
3078940b61afSAnirudh Venkataramanan  * @pf: board private structure
3079940b61afSAnirudh Venkataramanan  */
3080940b61afSAnirudh Venkataramanan static void ice_free_irq_msix_misc(struct ice_pf *pf)
3081940b61afSAnirudh Venkataramanan {
30820e04e8e1SBrett Creeley 	struct ice_hw *hw = &pf->hw;
30830e04e8e1SBrett Creeley 
30840e04e8e1SBrett Creeley 	ice_dis_ctrlq_interrupts(hw);
30850e04e8e1SBrett Creeley 
3086940b61afSAnirudh Venkataramanan 	/* disable OICR interrupt */
30870e04e8e1SBrett Creeley 	wr32(hw, PFINT_OICR_ENA, 0);
30880e04e8e1SBrett Creeley 	ice_flush(hw);
3089940b61afSAnirudh Venkataramanan 
3090ba880734SBrett Creeley 	if (pf->msix_entries) {
3091cbe66bfeSBrett Creeley 		synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
30924015d11eSBrett Creeley 		devm_free_irq(ice_pf_to_dev(pf),
3093cbe66bfeSBrett Creeley 			      pf->msix_entries[pf->oicr_idx].vector, pf);
3094940b61afSAnirudh Venkataramanan 	}
3095940b61afSAnirudh Venkataramanan 
3096eb0208ecSPreethi Banala 	pf->num_avail_sw_msix += 1;
3097cbe66bfeSBrett Creeley 	ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
3098940b61afSAnirudh Venkataramanan }
3099940b61afSAnirudh Venkataramanan 
3100940b61afSAnirudh Venkataramanan /**
31010e04e8e1SBrett Creeley  * ice_ena_ctrlq_interrupts - enable control queue interrupts
31020e04e8e1SBrett Creeley  * @hw: pointer to HW structure
3103b07833a0SBrett Creeley  * @reg_idx: HW vector index to associate the control queue interrupts with
31040e04e8e1SBrett Creeley  */
3105b07833a0SBrett Creeley static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
31060e04e8e1SBrett Creeley {
31070e04e8e1SBrett Creeley 	u32 val;
31080e04e8e1SBrett Creeley 
3109b07833a0SBrett Creeley 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
31100e04e8e1SBrett Creeley 	       PFINT_OICR_CTL_CAUSE_ENA_M);
31110e04e8e1SBrett Creeley 	wr32(hw, PFINT_OICR_CTL, val);
31120e04e8e1SBrett Creeley 
31130e04e8e1SBrett Creeley 	/* enable Admin queue Interrupt causes */
3114b07833a0SBrett Creeley 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
31150e04e8e1SBrett Creeley 	       PFINT_FW_CTL_CAUSE_ENA_M);
31160e04e8e1SBrett Creeley 	wr32(hw, PFINT_FW_CTL, val);
31170e04e8e1SBrett Creeley 
31180e04e8e1SBrett Creeley 	/* enable Mailbox queue Interrupt causes */
3119b07833a0SBrett Creeley 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
31200e04e8e1SBrett Creeley 	       PFINT_MBX_CTL_CAUSE_ENA_M);
31210e04e8e1SBrett Creeley 	wr32(hw, PFINT_MBX_CTL, val);
31220e04e8e1SBrett Creeley 
31238f5ee3c4SJacob Keller 	/* This enables Sideband queue Interrupt causes */
31248f5ee3c4SJacob Keller 	val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
31258f5ee3c4SJacob Keller 	       PFINT_SB_CTL_CAUSE_ENA_M);
31268f5ee3c4SJacob Keller 	wr32(hw, PFINT_SB_CTL, val);
31278f5ee3c4SJacob Keller 
31280e04e8e1SBrett Creeley 	ice_flush(hw);
31290e04e8e1SBrett Creeley }
31300e04e8e1SBrett Creeley 
31310e04e8e1SBrett Creeley /**
3132940b61afSAnirudh Venkataramanan  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3133940b61afSAnirudh Venkataramanan  * @pf: board private structure
3134940b61afSAnirudh Venkataramanan  *
3135940b61afSAnirudh Venkataramanan  * This sets up the handler for MSIX 0, which is used to manage the
3136940b61afSAnirudh Venkataramanan  * non-queue interrupts, e.g. AdminQ and errors. This is not used
3137940b61afSAnirudh Venkataramanan  * when in MSI or Legacy interrupt mode.
3138940b61afSAnirudh Venkataramanan  */
3139940b61afSAnirudh Venkataramanan static int ice_req_irq_msix_misc(struct ice_pf *pf)
3140940b61afSAnirudh Venkataramanan {
31414015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
3142940b61afSAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
3143940b61afSAnirudh Venkataramanan 	int oicr_idx, err = 0;
3144940b61afSAnirudh Venkataramanan 
3145940b61afSAnirudh Venkataramanan 	if (!pf->int_name[0])
3146940b61afSAnirudh Venkataramanan 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
31474015d11eSBrett Creeley 			 dev_driver_string(dev), dev_name(dev));
3148940b61afSAnirudh Venkataramanan 
31490b28b702SAnirudh Venkataramanan 	/* Do not request IRQ but do enable OICR interrupt since settings are
31500b28b702SAnirudh Venkataramanan 	 * lost during reset. Note that this function is called only during
31510b28b702SAnirudh Venkataramanan 	 * rebuild path and not while reset is in progress.
31520b28b702SAnirudh Venkataramanan 	 */
31535df7e45dSDave Ertman 	if (ice_is_reset_in_progress(pf->state))
31540b28b702SAnirudh Venkataramanan 		goto skip_req_irq;
31550b28b702SAnirudh Venkataramanan 
3156cbe66bfeSBrett Creeley 	/* reserve one vector in irq_tracker for misc interrupts */
3157cbe66bfeSBrett Creeley 	oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3158940b61afSAnirudh Venkataramanan 	if (oicr_idx < 0)
3159940b61afSAnirudh Venkataramanan 		return oicr_idx;
3160940b61afSAnirudh Venkataramanan 
3161eb0208ecSPreethi Banala 	pf->num_avail_sw_msix -= 1;
316288865fc4SKarol Kolacinski 	pf->oicr_idx = (u16)oicr_idx;
3163940b61afSAnirudh Venkataramanan 
31644015d11eSBrett Creeley 	err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
3165940b61afSAnirudh Venkataramanan 			       ice_misc_intr, 0, pf->int_name, pf);
3166940b61afSAnirudh Venkataramanan 	if (err) {
31674015d11eSBrett Creeley 		dev_err(dev, "devm_request_irq for %s failed: %d\n",
3168940b61afSAnirudh Venkataramanan 			pf->int_name, err);
3169cbe66bfeSBrett Creeley 		ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3170eb0208ecSPreethi Banala 		pf->num_avail_sw_msix += 1;
3171940b61afSAnirudh Venkataramanan 		return err;
3172940b61afSAnirudh Venkataramanan 	}
3173940b61afSAnirudh Venkataramanan 
31740b28b702SAnirudh Venkataramanan skip_req_irq:
3175940b61afSAnirudh Venkataramanan 	ice_ena_misc_vector(pf);
3176940b61afSAnirudh Venkataramanan 
3177cbe66bfeSBrett Creeley 	ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
3178cbe66bfeSBrett Creeley 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
317963f545edSBrett Creeley 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3180940b61afSAnirudh Venkataramanan 
3181940b61afSAnirudh Venkataramanan 	ice_flush(hw);
3182cdedef59SAnirudh Venkataramanan 	ice_irq_dynamic_ena(hw, NULL, NULL);
3183940b61afSAnirudh Venkataramanan 
3184940b61afSAnirudh Venkataramanan 	return 0;
3185940b61afSAnirudh Venkataramanan }
3186940b61afSAnirudh Venkataramanan 
3187940b61afSAnirudh Venkataramanan /**
3188df0f8479SAnirudh Venkataramanan  * ice_napi_add - register NAPI handler for the VSI
3189df0f8479SAnirudh Venkataramanan  * @vsi: VSI for which NAPI handler is to be registered
3190df0f8479SAnirudh Venkataramanan  *
3191df0f8479SAnirudh Venkataramanan  * This function is only called in the driver's load path. Registering the NAPI
3192df0f8479SAnirudh Venkataramanan  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3193df0f8479SAnirudh Venkataramanan  * reset/rebuild, etc.)
3194df0f8479SAnirudh Venkataramanan  */
3195df0f8479SAnirudh Venkataramanan static void ice_napi_add(struct ice_vsi *vsi)
3196df0f8479SAnirudh Venkataramanan {
3197df0f8479SAnirudh Venkataramanan 	int v_idx;
3198df0f8479SAnirudh Venkataramanan 
3199df0f8479SAnirudh Venkataramanan 	if (!vsi->netdev)
3200df0f8479SAnirudh Venkataramanan 		return;
3201df0f8479SAnirudh Venkataramanan 
32020c2561c8SBrett Creeley 	ice_for_each_q_vector(vsi, v_idx)
3203df0f8479SAnirudh Venkataramanan 		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3204df0f8479SAnirudh Venkataramanan 			       ice_napi_poll, NAPI_POLL_WEIGHT);
3205df0f8479SAnirudh Venkataramanan }
3206df0f8479SAnirudh Venkataramanan 
3207df0f8479SAnirudh Venkataramanan /**
3208462acf6aSTony Nguyen  * ice_set_ops - set netdev and ethtools ops for the given netdev
3209462acf6aSTony Nguyen  * @netdev: netdev instance
32103a858ba3SAnirudh Venkataramanan  */
3211462acf6aSTony Nguyen static void ice_set_ops(struct net_device *netdev)
32123a858ba3SAnirudh Venkataramanan {
3213462acf6aSTony Nguyen 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3214462acf6aSTony Nguyen 
3215462acf6aSTony Nguyen 	if (ice_is_safe_mode(pf)) {
3216462acf6aSTony Nguyen 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3217462acf6aSTony Nguyen 		ice_set_ethtool_safe_mode_ops(netdev);
3218462acf6aSTony Nguyen 		return;
3219462acf6aSTony Nguyen 	}
3220462acf6aSTony Nguyen 
3221462acf6aSTony Nguyen 	netdev->netdev_ops = &ice_netdev_ops;
3222b20e6c17SJakub Kicinski 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3223462acf6aSTony Nguyen 	ice_set_ethtool_ops(netdev);
3224462acf6aSTony Nguyen }
3225462acf6aSTony Nguyen 
3226462acf6aSTony Nguyen /**
3227462acf6aSTony Nguyen  * ice_set_netdev_features - set features for the given netdev
3228462acf6aSTony Nguyen  * @netdev: netdev instance
3229462acf6aSTony Nguyen  */
3230462acf6aSTony Nguyen static void ice_set_netdev_features(struct net_device *netdev)
3231462acf6aSTony Nguyen {
3232462acf6aSTony Nguyen 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3233d76a60baSAnirudh Venkataramanan 	netdev_features_t csumo_features;
3234d76a60baSAnirudh Venkataramanan 	netdev_features_t vlano_features;
3235d76a60baSAnirudh Venkataramanan 	netdev_features_t dflt_features;
3236d76a60baSAnirudh Venkataramanan 	netdev_features_t tso_features;
32373a858ba3SAnirudh Venkataramanan 
3238462acf6aSTony Nguyen 	if (ice_is_safe_mode(pf)) {
3239462acf6aSTony Nguyen 		/* safe mode */
3240462acf6aSTony Nguyen 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3241462acf6aSTony Nguyen 		netdev->hw_features = netdev->features;
3242462acf6aSTony Nguyen 		return;
3243462acf6aSTony Nguyen 	}
32443a858ba3SAnirudh Venkataramanan 
3245d76a60baSAnirudh Venkataramanan 	dflt_features = NETIF_F_SG	|
32463a858ba3SAnirudh Venkataramanan 			NETIF_F_HIGHDMA	|
3247148beb61SHenry Tieman 			NETIF_F_NTUPLE	|
32483a858ba3SAnirudh Venkataramanan 			NETIF_F_RXHASH;
32493a858ba3SAnirudh Venkataramanan 
3250d76a60baSAnirudh Venkataramanan 	csumo_features = NETIF_F_RXCSUM	  |
3251d76a60baSAnirudh Venkataramanan 			 NETIF_F_IP_CSUM  |
3252cf909e19SAnirudh Venkataramanan 			 NETIF_F_SCTP_CRC |
3253d76a60baSAnirudh Venkataramanan 			 NETIF_F_IPV6_CSUM;
3254d76a60baSAnirudh Venkataramanan 
3255d76a60baSAnirudh Venkataramanan 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3256d76a60baSAnirudh Venkataramanan 			 NETIF_F_HW_VLAN_CTAG_TX     |
3257d76a60baSAnirudh Venkataramanan 			 NETIF_F_HW_VLAN_CTAG_RX;
3258d76a60baSAnirudh Venkataramanan 
3259a54e3b8cSBrett Creeley 	tso_features = NETIF_F_TSO			|
3260a4e82a81STony Nguyen 		       NETIF_F_TSO_ECN			|
3261a4e82a81STony Nguyen 		       NETIF_F_TSO6			|
3262a4e82a81STony Nguyen 		       NETIF_F_GSO_GRE			|
3263a4e82a81STony Nguyen 		       NETIF_F_GSO_UDP_TUNNEL		|
3264a4e82a81STony Nguyen 		       NETIF_F_GSO_GRE_CSUM		|
3265a4e82a81STony Nguyen 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3266a4e82a81STony Nguyen 		       NETIF_F_GSO_PARTIAL		|
3267a4e82a81STony Nguyen 		       NETIF_F_GSO_IPXIP4		|
3268a4e82a81STony Nguyen 		       NETIF_F_GSO_IPXIP6		|
3269a54e3b8cSBrett Creeley 		       NETIF_F_GSO_UDP_L4;
3270d76a60baSAnirudh Venkataramanan 
3271a4e82a81STony Nguyen 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3272a4e82a81STony Nguyen 					NETIF_F_GSO_GRE_CSUM;
3273d76a60baSAnirudh Venkataramanan 	/* set features that user can change */
3274d76a60baSAnirudh Venkataramanan 	netdev->hw_features = dflt_features | csumo_features |
3275d76a60baSAnirudh Venkataramanan 			      vlano_features | tso_features;
3276d76a60baSAnirudh Venkataramanan 
3277a4e82a81STony Nguyen 	/* add support for HW_CSUM on packets with MPLS header */
3278a4e82a81STony Nguyen 	netdev->mpls_features =  NETIF_F_HW_CSUM;
3279a4e82a81STony Nguyen 
32803a858ba3SAnirudh Venkataramanan 	/* enable features */
32813a858ba3SAnirudh Venkataramanan 	netdev->features |= netdev->hw_features;
32820d08a441SKiran Patil 
32830d08a441SKiran Patil 	netdev->hw_features |= NETIF_F_HW_TC;
32840d08a441SKiran Patil 
3285d76a60baSAnirudh Venkataramanan 	/* encap and VLAN devices inherit default, csumo and tso features */
3286d76a60baSAnirudh Venkataramanan 	netdev->hw_enc_features |= dflt_features | csumo_features |
3287d76a60baSAnirudh Venkataramanan 				   tso_features;
3288d76a60baSAnirudh Venkataramanan 	netdev->vlan_features |= dflt_features | csumo_features |
3289d76a60baSAnirudh Venkataramanan 				 tso_features;
3290462acf6aSTony Nguyen }
3291462acf6aSTony Nguyen 
3292462acf6aSTony Nguyen /**
3293462acf6aSTony Nguyen  * ice_cfg_netdev - Allocate, configure and register a netdev
3294462acf6aSTony Nguyen  * @vsi: the VSI associated with the new netdev
3295462acf6aSTony Nguyen  *
3296462acf6aSTony Nguyen  * Returns 0 on success, negative value on failure
3297462acf6aSTony Nguyen  */
3298462acf6aSTony Nguyen static int ice_cfg_netdev(struct ice_vsi *vsi)
3299462acf6aSTony Nguyen {
3300462acf6aSTony Nguyen 	struct ice_netdev_priv *np;
3301462acf6aSTony Nguyen 	struct net_device *netdev;
3302462acf6aSTony Nguyen 	u8 mac_addr[ETH_ALEN];
33031adf7eadSJacob Keller 
3304462acf6aSTony Nguyen 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
3305462acf6aSTony Nguyen 				    vsi->alloc_rxq);
33061e23f076SAnirudh Venkataramanan 	if (!netdev)
33071e23f076SAnirudh Venkataramanan 		return -ENOMEM;
3308462acf6aSTony Nguyen 
3309a476d72aSAnirudh Venkataramanan 	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3310462acf6aSTony Nguyen 	vsi->netdev = netdev;
3311462acf6aSTony Nguyen 	np = netdev_priv(netdev);
3312462acf6aSTony Nguyen 	np->vsi = vsi;
3313462acf6aSTony Nguyen 
3314462acf6aSTony Nguyen 	ice_set_netdev_features(netdev);
3315462acf6aSTony Nguyen 
3316462acf6aSTony Nguyen 	ice_set_ops(netdev);
33173a858ba3SAnirudh Venkataramanan 
33183a858ba3SAnirudh Venkataramanan 	if (vsi->type == ICE_VSI_PF) {
3319c73bf3bdSPaul M Stillwell Jr 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
33203a858ba3SAnirudh Venkataramanan 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
3321f3956ebbSJakub Kicinski 		eth_hw_addr_set(netdev, mac_addr);
33223a858ba3SAnirudh Venkataramanan 		ether_addr_copy(netdev->perm_addr, mac_addr);
33233a858ba3SAnirudh Venkataramanan 	}
33243a858ba3SAnirudh Venkataramanan 
33253a858ba3SAnirudh Venkataramanan 	netdev->priv_flags |= IFF_UNICAST_FLT;
33263a858ba3SAnirudh Venkataramanan 
3327462acf6aSTony Nguyen 	/* Setup netdev TC information */
3328462acf6aSTony Nguyen 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
3329cdedef59SAnirudh Venkataramanan 
33303a858ba3SAnirudh Venkataramanan 	/* setup watchdog timeout value to be 5 second */
33313a858ba3SAnirudh Venkataramanan 	netdev->watchdog_timeo = 5 * HZ;
33323a858ba3SAnirudh Venkataramanan 
33333a858ba3SAnirudh Venkataramanan 	netdev->min_mtu = ETH_MIN_MTU;
33343a858ba3SAnirudh Venkataramanan 	netdev->max_mtu = ICE_MAX_MTU;
33353a858ba3SAnirudh Venkataramanan 
33363a858ba3SAnirudh Venkataramanan 	return 0;
33373a858ba3SAnirudh Venkataramanan }
33383a858ba3SAnirudh Venkataramanan 
33393a858ba3SAnirudh Venkataramanan /**
3340d76a60baSAnirudh Venkataramanan  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3341d76a60baSAnirudh Venkataramanan  * @lut: Lookup table
3342d76a60baSAnirudh Venkataramanan  * @rss_table_size: Lookup table size
3343d76a60baSAnirudh Venkataramanan  * @rss_size: Range of queue number for hashing
3344d76a60baSAnirudh Venkataramanan  */
3345d76a60baSAnirudh Venkataramanan void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3346d76a60baSAnirudh Venkataramanan {
3347d76a60baSAnirudh Venkataramanan 	u16 i;
3348d76a60baSAnirudh Venkataramanan 
3349d76a60baSAnirudh Venkataramanan 	for (i = 0; i < rss_table_size; i++)
3350d76a60baSAnirudh Venkataramanan 		lut[i] = i % rss_size;
3351d76a60baSAnirudh Venkataramanan }
3352d76a60baSAnirudh Venkataramanan 
3353d76a60baSAnirudh Venkataramanan /**
33540f9d5027SAnirudh Venkataramanan  * ice_pf_vsi_setup - Set up a PF VSI
33550f9d5027SAnirudh Venkataramanan  * @pf: board private structure
33560f9d5027SAnirudh Venkataramanan  * @pi: pointer to the port_info instance
33570f9d5027SAnirudh Venkataramanan  *
33580e674aebSAnirudh Venkataramanan  * Returns pointer to the successfully allocated VSI software struct
33590e674aebSAnirudh Venkataramanan  * on success, otherwise returns NULL on failure.
33600f9d5027SAnirudh Venkataramanan  */
33610f9d5027SAnirudh Venkataramanan static struct ice_vsi *
33620f9d5027SAnirudh Venkataramanan ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
33630f9d5027SAnirudh Venkataramanan {
33640754d65bSKiran Patil 	return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL);
33650f9d5027SAnirudh Venkataramanan }
33660f9d5027SAnirudh Venkataramanan 
3367fbc7b27aSKiran Patil static struct ice_vsi *
3368fbc7b27aSKiran Patil ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3369fbc7b27aSKiran Patil 		   struct ice_channel *ch)
3370fbc7b27aSKiran Patil {
3371fbc7b27aSKiran Patil 	return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch);
3372fbc7b27aSKiran Patil }
3373fbc7b27aSKiran Patil 
33740f9d5027SAnirudh Venkataramanan /**
3375148beb61SHenry Tieman  * ice_ctrl_vsi_setup - Set up a control VSI
3376148beb61SHenry Tieman  * @pf: board private structure
3377148beb61SHenry Tieman  * @pi: pointer to the port_info instance
3378148beb61SHenry Tieman  *
3379148beb61SHenry Tieman  * Returns pointer to the successfully allocated VSI software struct
3380148beb61SHenry Tieman  * on success, otherwise returns NULL on failure.
3381148beb61SHenry Tieman  */
3382148beb61SHenry Tieman static struct ice_vsi *
3383148beb61SHenry Tieman ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3384148beb61SHenry Tieman {
33850754d65bSKiran Patil 	return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL);
3386148beb61SHenry Tieman }
3387148beb61SHenry Tieman 
3388148beb61SHenry Tieman /**
33890e674aebSAnirudh Venkataramanan  * ice_lb_vsi_setup - Set up a loopback VSI
33900e674aebSAnirudh Venkataramanan  * @pf: board private structure
33910e674aebSAnirudh Venkataramanan  * @pi: pointer to the port_info instance
33920e674aebSAnirudh Venkataramanan  *
33930e674aebSAnirudh Venkataramanan  * Returns pointer to the successfully allocated VSI software struct
33940e674aebSAnirudh Venkataramanan  * on success, otherwise returns NULL on failure.
33950e674aebSAnirudh Venkataramanan  */
33960e674aebSAnirudh Venkataramanan struct ice_vsi *
33970e674aebSAnirudh Venkataramanan ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
33980e674aebSAnirudh Venkataramanan {
33990754d65bSKiran Patil 	return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL);
34000e674aebSAnirudh Venkataramanan }
34010e674aebSAnirudh Venkataramanan 
34020e674aebSAnirudh Venkataramanan /**
3403f9867df6SAnirudh Venkataramanan  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3404d76a60baSAnirudh Venkataramanan  * @netdev: network interface to be adjusted
34052bfefa2dSBrett Creeley  * @proto: VLAN TPID
3406f9867df6SAnirudh Venkataramanan  * @vid: VLAN ID to be added
3407d76a60baSAnirudh Venkataramanan  *
3408f9867df6SAnirudh Venkataramanan  * net_device_ops implementation for adding VLAN IDs
3409d76a60baSAnirudh Venkataramanan  */
3410c8b7abddSBruce Allan static int
34112bfefa2dSBrett Creeley ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3412d76a60baSAnirudh Venkataramanan {
3413d76a60baSAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
3414d76a60baSAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
3415fb05ba12SBrett Creeley 	struct ice_vlan vlan;
34165eda8afdSAkeem G Abodunrin 	int ret;
3417d76a60baSAnirudh Venkataramanan 
341842f3efefSBrett Creeley 	/* VLAN 0 is added by default during load/reset */
341942f3efefSBrett Creeley 	if (!vid)
342042f3efefSBrett Creeley 		return 0;
342142f3efefSBrett Creeley 
342242f3efefSBrett Creeley 	/* Enable VLAN pruning when a VLAN other than 0 is added */
342342f3efefSBrett Creeley 	if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
3424bc42afa9SBrett Creeley 		ret = vsi->vlan_ops.ena_rx_filtering(vsi);
34254f74dcc1SBrett Creeley 		if (ret)
34264f74dcc1SBrett Creeley 			return ret;
34274f74dcc1SBrett Creeley 	}
34284f74dcc1SBrett Creeley 
342942f3efefSBrett Creeley 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
343042f3efefSBrett Creeley 	 * packets aren't pruned by the device's internal switch on Rx
3431d76a60baSAnirudh Venkataramanan 	 */
34322bfefa2dSBrett Creeley 	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3433fb05ba12SBrett Creeley 	ret = vsi->vlan_ops.add_vlan(vsi, &vlan);
3434bcf68ea1SNick Nunley 	if (!ret)
3435e97fb1aeSAnirudh Venkataramanan 		set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
34365eda8afdSAkeem G Abodunrin 
34375eda8afdSAkeem G Abodunrin 	return ret;
3438d76a60baSAnirudh Venkataramanan }
3439d76a60baSAnirudh Venkataramanan 
3440d76a60baSAnirudh Venkataramanan /**
3441f9867df6SAnirudh Venkataramanan  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3442d76a60baSAnirudh Venkataramanan  * @netdev: network interface to be adjusted
34432bfefa2dSBrett Creeley  * @proto: VLAN TPID
3444f9867df6SAnirudh Venkataramanan  * @vid: VLAN ID to be removed
3445d76a60baSAnirudh Venkataramanan  *
3446f9867df6SAnirudh Venkataramanan  * net_device_ops implementation for removing VLAN IDs
3447d76a60baSAnirudh Venkataramanan  */
3448c8b7abddSBruce Allan static int
34492bfefa2dSBrett Creeley ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3450d76a60baSAnirudh Venkataramanan {
3451d76a60baSAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
3452d76a60baSAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
3453fb05ba12SBrett Creeley 	struct ice_vlan vlan;
34545eda8afdSAkeem G Abodunrin 	int ret;
3455d76a60baSAnirudh Venkataramanan 
345642f3efefSBrett Creeley 	/* don't allow removal of VLAN 0 */
345742f3efefSBrett Creeley 	if (!vid)
345842f3efefSBrett Creeley 		return 0;
345942f3efefSBrett Creeley 
3460bc42afa9SBrett Creeley 	/* Make sure VLAN delete is successful before updating VLAN
34614f74dcc1SBrett Creeley 	 * information
3462d76a60baSAnirudh Venkataramanan 	 */
34632bfefa2dSBrett Creeley 	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3464fb05ba12SBrett Creeley 	ret = vsi->vlan_ops.del_vlan(vsi, &vlan);
34655eda8afdSAkeem G Abodunrin 	if (ret)
34665eda8afdSAkeem G Abodunrin 		return ret;
3467d76a60baSAnirudh Venkataramanan 
346842f3efefSBrett Creeley 	/* Disable pruning when VLAN 0 is the only VLAN rule */
346942f3efefSBrett Creeley 	if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
3470bc42afa9SBrett Creeley 		vsi->vlan_ops.dis_rx_filtering(vsi);
34714f74dcc1SBrett Creeley 
3472e97fb1aeSAnirudh Venkataramanan 	set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
34735eda8afdSAkeem G Abodunrin 	return ret;
3474d76a60baSAnirudh Venkataramanan }
3475d76a60baSAnirudh Venkataramanan 
3476d76a60baSAnirudh Venkataramanan /**
3477195bb48fSMichal Swiatkowski  * ice_rep_indr_tc_block_unbind
3478195bb48fSMichal Swiatkowski  * @cb_priv: indirection block private data
3479195bb48fSMichal Swiatkowski  */
3480195bb48fSMichal Swiatkowski static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3481195bb48fSMichal Swiatkowski {
3482195bb48fSMichal Swiatkowski 	struct ice_indr_block_priv *indr_priv = cb_priv;
3483195bb48fSMichal Swiatkowski 
3484195bb48fSMichal Swiatkowski 	list_del(&indr_priv->list);
3485195bb48fSMichal Swiatkowski 	kfree(indr_priv);
3486195bb48fSMichal Swiatkowski }
3487195bb48fSMichal Swiatkowski 
3488195bb48fSMichal Swiatkowski /**
3489195bb48fSMichal Swiatkowski  * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3490195bb48fSMichal Swiatkowski  * @vsi: VSI struct which has the netdev
3491195bb48fSMichal Swiatkowski  */
3492195bb48fSMichal Swiatkowski static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3493195bb48fSMichal Swiatkowski {
3494195bb48fSMichal Swiatkowski 	struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3495195bb48fSMichal Swiatkowski 
3496195bb48fSMichal Swiatkowski 	flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3497195bb48fSMichal Swiatkowski 				 ice_rep_indr_tc_block_unbind);
3498195bb48fSMichal Swiatkowski }
3499195bb48fSMichal Swiatkowski 
3500195bb48fSMichal Swiatkowski /**
3501195bb48fSMichal Swiatkowski  * ice_tc_indir_block_remove - clean indirect TC block notifications
3502195bb48fSMichal Swiatkowski  * @pf: PF structure
3503195bb48fSMichal Swiatkowski  */
3504195bb48fSMichal Swiatkowski static void ice_tc_indir_block_remove(struct ice_pf *pf)
3505195bb48fSMichal Swiatkowski {
3506195bb48fSMichal Swiatkowski 	struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
3507195bb48fSMichal Swiatkowski 
3508195bb48fSMichal Swiatkowski 	if (!pf_vsi)
3509195bb48fSMichal Swiatkowski 		return;
3510195bb48fSMichal Swiatkowski 
3511195bb48fSMichal Swiatkowski 	ice_tc_indir_block_unregister(pf_vsi);
3512195bb48fSMichal Swiatkowski }
3513195bb48fSMichal Swiatkowski 
3514195bb48fSMichal Swiatkowski /**
3515195bb48fSMichal Swiatkowski  * ice_tc_indir_block_register - Register TC indirect block notifications
3516195bb48fSMichal Swiatkowski  * @vsi: VSI struct which has the netdev
3517195bb48fSMichal Swiatkowski  *
3518195bb48fSMichal Swiatkowski  * Returns 0 on success, negative value on failure
3519195bb48fSMichal Swiatkowski  */
3520195bb48fSMichal Swiatkowski static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3521195bb48fSMichal Swiatkowski {
3522195bb48fSMichal Swiatkowski 	struct ice_netdev_priv *np;
3523195bb48fSMichal Swiatkowski 
3524195bb48fSMichal Swiatkowski 	if (!vsi || !vsi->netdev)
3525195bb48fSMichal Swiatkowski 		return -EINVAL;
3526195bb48fSMichal Swiatkowski 
3527195bb48fSMichal Swiatkowski 	np = netdev_priv(vsi->netdev);
3528195bb48fSMichal Swiatkowski 
3529195bb48fSMichal Swiatkowski 	INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3530195bb48fSMichal Swiatkowski 	return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3531195bb48fSMichal Swiatkowski }
3532195bb48fSMichal Swiatkowski 
3533195bb48fSMichal Swiatkowski /**
35343a858ba3SAnirudh Venkataramanan  * ice_setup_pf_sw - Setup the HW switch on startup or after reset
35353a858ba3SAnirudh Venkataramanan  * @pf: board private structure
35363a858ba3SAnirudh Venkataramanan  *
35373a858ba3SAnirudh Venkataramanan  * Returns 0 on success, negative value on failure
35383a858ba3SAnirudh Venkataramanan  */
35393a858ba3SAnirudh Venkataramanan static int ice_setup_pf_sw(struct ice_pf *pf)
35403a858ba3SAnirudh Venkataramanan {
3541195bb48fSMichal Swiatkowski 	struct device *dev = ice_pf_to_dev(pf);
35423a858ba3SAnirudh Venkataramanan 	struct ice_vsi *vsi;
35432ccc1c1cSTony Nguyen 	int status;
35443a858ba3SAnirudh Venkataramanan 
35455df7e45dSDave Ertman 	if (ice_is_reset_in_progress(pf->state))
35460f9d5027SAnirudh Venkataramanan 		return -EBUSY;
35470f9d5027SAnirudh Venkataramanan 
35480f9d5027SAnirudh Venkataramanan 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3549135f4b9eSJacob Keller 	if (!vsi)
3550135f4b9eSJacob Keller 		return -ENOMEM;
35513a858ba3SAnirudh Venkataramanan 
3552fbc7b27aSKiran Patil 	/* init channel list */
3553fbc7b27aSKiran Patil 	INIT_LIST_HEAD(&vsi->ch_list);
3554fbc7b27aSKiran Patil 
3555df0f8479SAnirudh Venkataramanan 	status = ice_cfg_netdev(vsi);
3556c1484691STony Nguyen 	if (status)
3557df0f8479SAnirudh Venkataramanan 		goto unroll_vsi_setup;
3558efc2214bSMaciej Fijalkowski 	/* netdev has to be configured before setting frame size */
3559efc2214bSMaciej Fijalkowski 	ice_vsi_cfg_frame_size(vsi);
3560df0f8479SAnirudh Venkataramanan 
3561195bb48fSMichal Swiatkowski 	/* init indirect block notifications */
3562195bb48fSMichal Swiatkowski 	status = ice_tc_indir_block_register(vsi);
3563195bb48fSMichal Swiatkowski 	if (status) {
3564195bb48fSMichal Swiatkowski 		dev_err(dev, "Failed to register netdev notifier\n");
3565195bb48fSMichal Swiatkowski 		goto unroll_cfg_netdev;
3566195bb48fSMichal Swiatkowski 	}
3567195bb48fSMichal Swiatkowski 
3568b94b013eSDave Ertman 	/* Setup DCB netlink interface */
3569b94b013eSDave Ertman 	ice_dcbnl_setup(vsi);
3570b94b013eSDave Ertman 
3571df0f8479SAnirudh Venkataramanan 	/* registering the NAPI handler requires both the queues and
3572df0f8479SAnirudh Venkataramanan 	 * netdev to be created, which are done in ice_pf_vsi_setup()
3573df0f8479SAnirudh Venkataramanan 	 * and ice_cfg_netdev() respectively
3574df0f8479SAnirudh Venkataramanan 	 */
3575df0f8479SAnirudh Venkataramanan 	ice_napi_add(vsi);
3576df0f8479SAnirudh Venkataramanan 
357728bf2672SBrett Creeley 	status = ice_set_cpu_rx_rmap(vsi);
357828bf2672SBrett Creeley 	if (status) {
3579195bb48fSMichal Swiatkowski 		dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n",
358028bf2672SBrett Creeley 			vsi->vsi_num, status);
358128bf2672SBrett Creeley 		goto unroll_napi_add;
358228bf2672SBrett Creeley 	}
3583561f4379STony Nguyen 	status = ice_init_mac_fltr(pf);
35849daf8208SAnirudh Venkataramanan 	if (status)
358528bf2672SBrett Creeley 		goto free_cpu_rx_map;
35869daf8208SAnirudh Venkataramanan 
35872ccc1c1cSTony Nguyen 	return 0;
35889daf8208SAnirudh Venkataramanan 
358928bf2672SBrett Creeley free_cpu_rx_map:
359028bf2672SBrett Creeley 	ice_free_cpu_rx_rmap(vsi);
3591df0f8479SAnirudh Venkataramanan unroll_napi_add:
3592195bb48fSMichal Swiatkowski 	ice_tc_indir_block_unregister(vsi);
3593195bb48fSMichal Swiatkowski unroll_cfg_netdev:
35943a858ba3SAnirudh Venkataramanan 	if (vsi) {
3595df0f8479SAnirudh Venkataramanan 		ice_napi_del(vsi);
35963a858ba3SAnirudh Venkataramanan 		if (vsi->netdev) {
3597a476d72aSAnirudh Venkataramanan 			clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
35983a858ba3SAnirudh Venkataramanan 			free_netdev(vsi->netdev);
35993a858ba3SAnirudh Venkataramanan 			vsi->netdev = NULL;
36003a858ba3SAnirudh Venkataramanan 		}
3601df0f8479SAnirudh Venkataramanan 	}
36029daf8208SAnirudh Venkataramanan 
3603df0f8479SAnirudh Venkataramanan unroll_vsi_setup:
3604135f4b9eSJacob Keller 	ice_vsi_release(vsi);
36053a858ba3SAnirudh Venkataramanan 	return status;
36063a858ba3SAnirudh Venkataramanan }
36073a858ba3SAnirudh Venkataramanan 
36083a858ba3SAnirudh Venkataramanan /**
36098c243700SAnirudh Venkataramanan  * ice_get_avail_q_count - Get count of queues in use
36108c243700SAnirudh Venkataramanan  * @pf_qmap: bitmap to get queue use count from
36118c243700SAnirudh Venkataramanan  * @lock: pointer to a mutex that protects access to pf_qmap
36128c243700SAnirudh Venkataramanan  * @size: size of the bitmap
3613940b61afSAnirudh Venkataramanan  */
36148c243700SAnirudh Venkataramanan static u16
36158c243700SAnirudh Venkataramanan ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3616940b61afSAnirudh Venkataramanan {
361788865fc4SKarol Kolacinski 	unsigned long bit;
361888865fc4SKarol Kolacinski 	u16 count = 0;
3619940b61afSAnirudh Venkataramanan 
36208c243700SAnirudh Venkataramanan 	mutex_lock(lock);
36218c243700SAnirudh Venkataramanan 	for_each_clear_bit(bit, pf_qmap, size)
36228c243700SAnirudh Venkataramanan 		count++;
36238c243700SAnirudh Venkataramanan 	mutex_unlock(lock);
3624940b61afSAnirudh Venkataramanan 
36258c243700SAnirudh Venkataramanan 	return count;
36268c243700SAnirudh Venkataramanan }
3627d76a60baSAnirudh Venkataramanan 
36288c243700SAnirudh Venkataramanan /**
36298c243700SAnirudh Venkataramanan  * ice_get_avail_txq_count - Get count of Tx queues in use
36308c243700SAnirudh Venkataramanan  * @pf: pointer to an ice_pf instance
36318c243700SAnirudh Venkataramanan  */
36328c243700SAnirudh Venkataramanan u16 ice_get_avail_txq_count(struct ice_pf *pf)
36338c243700SAnirudh Venkataramanan {
36348c243700SAnirudh Venkataramanan 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
36358c243700SAnirudh Venkataramanan 				     pf->max_pf_txqs);
36368c243700SAnirudh Venkataramanan }
3637940b61afSAnirudh Venkataramanan 
36388c243700SAnirudh Venkataramanan /**
36398c243700SAnirudh Venkataramanan  * ice_get_avail_rxq_count - Get count of Rx queues in use
36408c243700SAnirudh Venkataramanan  * @pf: pointer to an ice_pf instance
36418c243700SAnirudh Venkataramanan  */
36428c243700SAnirudh Venkataramanan u16 ice_get_avail_rxq_count(struct ice_pf *pf)
36438c243700SAnirudh Venkataramanan {
36448c243700SAnirudh Venkataramanan 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
36458c243700SAnirudh Venkataramanan 				     pf->max_pf_rxqs);
3646940b61afSAnirudh Venkataramanan }
3647940b61afSAnirudh Venkataramanan 
3648940b61afSAnirudh Venkataramanan /**
3649940b61afSAnirudh Venkataramanan  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3650940b61afSAnirudh Venkataramanan  * @pf: board private structure to initialize
3651940b61afSAnirudh Venkataramanan  */
3652940b61afSAnirudh Venkataramanan static void ice_deinit_pf(struct ice_pf *pf)
3653940b61afSAnirudh Venkataramanan {
36548d81fa55SAkeem G Abodunrin 	ice_service_task_stop(pf);
3655940b61afSAnirudh Venkataramanan 	mutex_destroy(&pf->sw_mutex);
3656b94b013eSDave Ertman 	mutex_destroy(&pf->tc_mutex);
3657940b61afSAnirudh Venkataramanan 	mutex_destroy(&pf->avail_q_mutex);
365878b5713aSAnirudh Venkataramanan 
365978b5713aSAnirudh Venkataramanan 	if (pf->avail_txqs) {
366078b5713aSAnirudh Venkataramanan 		bitmap_free(pf->avail_txqs);
366178b5713aSAnirudh Venkataramanan 		pf->avail_txqs = NULL;
366278b5713aSAnirudh Venkataramanan 	}
366378b5713aSAnirudh Venkataramanan 
366478b5713aSAnirudh Venkataramanan 	if (pf->avail_rxqs) {
366578b5713aSAnirudh Venkataramanan 		bitmap_free(pf->avail_rxqs);
366678b5713aSAnirudh Venkataramanan 		pf->avail_rxqs = NULL;
366778b5713aSAnirudh Venkataramanan 	}
366806c16d89SJacob Keller 
366906c16d89SJacob Keller 	if (pf->ptp.clock)
367006c16d89SJacob Keller 		ptp_clock_unregister(pf->ptp.clock);
3671940b61afSAnirudh Venkataramanan }
3672940b61afSAnirudh Venkataramanan 
3673940b61afSAnirudh Venkataramanan /**
3674462acf6aSTony Nguyen  * ice_set_pf_caps - set PFs capability flags
3675462acf6aSTony Nguyen  * @pf: pointer to the PF instance
3676462acf6aSTony Nguyen  */
3677462acf6aSTony Nguyen static void ice_set_pf_caps(struct ice_pf *pf)
3678462acf6aSTony Nguyen {
3679462acf6aSTony Nguyen 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3680462acf6aSTony Nguyen 
3681d25a0fc4SDave Ertman 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3682d25a0fc4SDave Ertman 	clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
3683d25a0fc4SDave Ertman 	if (func_caps->common_cap.rdma) {
3684d25a0fc4SDave Ertman 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3685d25a0fc4SDave Ertman 		set_bit(ICE_FLAG_AUX_ENA, pf->flags);
3686d25a0fc4SDave Ertman 	}
3687462acf6aSTony Nguyen 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3688462acf6aSTony Nguyen 	if (func_caps->common_cap.dcb)
3689462acf6aSTony Nguyen 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3690462acf6aSTony Nguyen 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3691462acf6aSTony Nguyen 	if (func_caps->common_cap.sr_iov_1_1) {
3692462acf6aSTony Nguyen 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3693462acf6aSTony Nguyen 		pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
3694462acf6aSTony Nguyen 					      ICE_MAX_VF_COUNT);
3695462acf6aSTony Nguyen 	}
3696462acf6aSTony Nguyen 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3697462acf6aSTony Nguyen 	if (func_caps->common_cap.rss_table_size)
3698462acf6aSTony Nguyen 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3699462acf6aSTony Nguyen 
3700148beb61SHenry Tieman 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3701148beb61SHenry Tieman 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3702148beb61SHenry Tieman 		u16 unused;
3703148beb61SHenry Tieman 
3704148beb61SHenry Tieman 		/* ctrl_vsi_idx will be set to a valid value when flow director
3705148beb61SHenry Tieman 		 * is setup by ice_init_fdir
3706148beb61SHenry Tieman 		 */
3707148beb61SHenry Tieman 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3708148beb61SHenry Tieman 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3709148beb61SHenry Tieman 		/* force guaranteed filter pool for PF */
3710148beb61SHenry Tieman 		ice_alloc_fd_guar_item(&pf->hw, &unused,
3711148beb61SHenry Tieman 				       func_caps->fd_fltr_guar);
3712148beb61SHenry Tieman 		/* force shared filter pool for PF */
3713148beb61SHenry Tieman 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3714148beb61SHenry Tieman 				       func_caps->fd_fltr_best_effort);
3715148beb61SHenry Tieman 	}
3716148beb61SHenry Tieman 
371706c16d89SJacob Keller 	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
371806c16d89SJacob Keller 	if (func_caps->common_cap.ieee_1588)
371906c16d89SJacob Keller 		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
372006c16d89SJacob Keller 
3721462acf6aSTony Nguyen 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
3722462acf6aSTony Nguyen 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3723462acf6aSTony Nguyen }
3724462acf6aSTony Nguyen 
3725462acf6aSTony Nguyen /**
3726940b61afSAnirudh Venkataramanan  * ice_init_pf - Initialize general software structures (struct ice_pf)
3727940b61afSAnirudh Venkataramanan  * @pf: board private structure to initialize
3728940b61afSAnirudh Venkataramanan  */
372978b5713aSAnirudh Venkataramanan static int ice_init_pf(struct ice_pf *pf)
3730940b61afSAnirudh Venkataramanan {
3731462acf6aSTony Nguyen 	ice_set_pf_caps(pf);
3732940b61afSAnirudh Venkataramanan 
3733940b61afSAnirudh Venkataramanan 	mutex_init(&pf->sw_mutex);
3734b94b013eSDave Ertman 	mutex_init(&pf->tc_mutex);
3735d76a60baSAnirudh Venkataramanan 
3736d69ea414SJacob Keller 	INIT_HLIST_HEAD(&pf->aq_wait_list);
3737d69ea414SJacob Keller 	spin_lock_init(&pf->aq_wait_lock);
3738d69ea414SJacob Keller 	init_waitqueue_head(&pf->aq_wait_queue);
3739d69ea414SJacob Keller 
37401c08052eSJacob Keller 	init_waitqueue_head(&pf->reset_wait_queue);
37411c08052eSJacob Keller 
3742940b61afSAnirudh Venkataramanan 	/* setup service timer and periodic service task */
3743940b61afSAnirudh Venkataramanan 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3744940b61afSAnirudh Venkataramanan 	pf->serv_tmr_period = HZ;
3745940b61afSAnirudh Venkataramanan 	INIT_WORK(&pf->serv_task, ice_service_task);
37467e408e07SAnirudh Venkataramanan 	clear_bit(ICE_SERVICE_SCHED, pf->state);
374778b5713aSAnirudh Venkataramanan 
3748462acf6aSTony Nguyen 	mutex_init(&pf->avail_q_mutex);
374978b5713aSAnirudh Venkataramanan 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
375078b5713aSAnirudh Venkataramanan 	if (!pf->avail_txqs)
375178b5713aSAnirudh Venkataramanan 		return -ENOMEM;
375278b5713aSAnirudh Venkataramanan 
375378b5713aSAnirudh Venkataramanan 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
375478b5713aSAnirudh Venkataramanan 	if (!pf->avail_rxqs) {
37554015d11eSBrett Creeley 		devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
375678b5713aSAnirudh Venkataramanan 		pf->avail_txqs = NULL;
375778b5713aSAnirudh Venkataramanan 		return -ENOMEM;
375878b5713aSAnirudh Venkataramanan 	}
375978b5713aSAnirudh Venkataramanan 
376078b5713aSAnirudh Venkataramanan 	return 0;
3761940b61afSAnirudh Venkataramanan }
3762940b61afSAnirudh Venkataramanan 
3763940b61afSAnirudh Venkataramanan /**
3764940b61afSAnirudh Venkataramanan  * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3765940b61afSAnirudh Venkataramanan  * @pf: board private structure
3766940b61afSAnirudh Venkataramanan  *
3767940b61afSAnirudh Venkataramanan  * compute the number of MSIX vectors required (v_budget) and request from
3768940b61afSAnirudh Venkataramanan  * the OS. Return the number of vectors reserved or negative on failure
3769940b61afSAnirudh Venkataramanan  */
3770940b61afSAnirudh Venkataramanan static int ice_ena_msix_range(struct ice_pf *pf)
3771940b61afSAnirudh Venkataramanan {
3772d25a0fc4SDave Ertman 	int num_cpus, v_left, v_actual, v_other, v_budget = 0;
37734015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
3774940b61afSAnirudh Venkataramanan 	int needed, err, i;
3775940b61afSAnirudh Venkataramanan 
3776940b61afSAnirudh Venkataramanan 	v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3777d25a0fc4SDave Ertman 	num_cpus = num_online_cpus();
3778940b61afSAnirudh Venkataramanan 
3779741106f7STony Nguyen 	/* reserve for LAN miscellaneous handler */
3780741106f7STony Nguyen 	needed = ICE_MIN_LAN_OICR_MSIX;
3781152b978aSAnirudh Venkataramanan 	if (v_left < needed)
3782152b978aSAnirudh Venkataramanan 		goto no_hw_vecs_left_err;
3783940b61afSAnirudh Venkataramanan 	v_budget += needed;
3784940b61afSAnirudh Venkataramanan 	v_left -= needed;
3785940b61afSAnirudh Venkataramanan 
3786741106f7STony Nguyen 	/* reserve for flow director */
3787741106f7STony Nguyen 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3788741106f7STony Nguyen 		needed = ICE_FDIR_MSIX;
3789741106f7STony Nguyen 		if (v_left < needed)
3790741106f7STony Nguyen 			goto no_hw_vecs_left_err;
3791741106f7STony Nguyen 		v_budget += needed;
3792741106f7STony Nguyen 		v_left -= needed;
3793741106f7STony Nguyen 	}
3794741106f7STony Nguyen 
3795f66756e0SGrzegorz Nitka 	/* reserve for switchdev */
3796f66756e0SGrzegorz Nitka 	needed = ICE_ESWITCH_MSIX;
3797f66756e0SGrzegorz Nitka 	if (v_left < needed)
3798f66756e0SGrzegorz Nitka 		goto no_hw_vecs_left_err;
3799f66756e0SGrzegorz Nitka 	v_budget += needed;
3800f66756e0SGrzegorz Nitka 	v_left -= needed;
3801f66756e0SGrzegorz Nitka 
3802741106f7STony Nguyen 	/* total used for non-traffic vectors */
3803741106f7STony Nguyen 	v_other = v_budget;
3804741106f7STony Nguyen 
3805940b61afSAnirudh Venkataramanan 	/* reserve vectors for LAN traffic */
3806d25a0fc4SDave Ertman 	needed = num_cpus;
3807152b978aSAnirudh Venkataramanan 	if (v_left < needed)
3808152b978aSAnirudh Venkataramanan 		goto no_hw_vecs_left_err;
3809152b978aSAnirudh Venkataramanan 	pf->num_lan_msix = needed;
3810152b978aSAnirudh Venkataramanan 	v_budget += needed;
3811152b978aSAnirudh Venkataramanan 	v_left -= needed;
3812940b61afSAnirudh Venkataramanan 
3813d25a0fc4SDave Ertman 	/* reserve vectors for RDMA auxiliary driver */
3814d25a0fc4SDave Ertman 	if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3815d25a0fc4SDave Ertman 		needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
3816d25a0fc4SDave Ertman 		if (v_left < needed)
3817d25a0fc4SDave Ertman 			goto no_hw_vecs_left_err;
3818d25a0fc4SDave Ertman 		pf->num_rdma_msix = needed;
3819d25a0fc4SDave Ertman 		v_budget += needed;
3820d25a0fc4SDave Ertman 		v_left -= needed;
3821d25a0fc4SDave Ertman 	}
3822d25a0fc4SDave Ertman 
38234015d11eSBrett Creeley 	pf->msix_entries = devm_kcalloc(dev, v_budget,
3824c6dfd690SBruce Allan 					sizeof(*pf->msix_entries), GFP_KERNEL);
3825940b61afSAnirudh Venkataramanan 	if (!pf->msix_entries) {
3826940b61afSAnirudh Venkataramanan 		err = -ENOMEM;
3827940b61afSAnirudh Venkataramanan 		goto exit_err;
3828940b61afSAnirudh Venkataramanan 	}
3829940b61afSAnirudh Venkataramanan 
3830940b61afSAnirudh Venkataramanan 	for (i = 0; i < v_budget; i++)
3831940b61afSAnirudh Venkataramanan 		pf->msix_entries[i].entry = i;
3832940b61afSAnirudh Venkataramanan 
3833940b61afSAnirudh Venkataramanan 	/* actually reserve the vectors */
3834940b61afSAnirudh Venkataramanan 	v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3835940b61afSAnirudh Venkataramanan 					 ICE_MIN_MSIX, v_budget);
3836940b61afSAnirudh Venkataramanan 	if (v_actual < 0) {
38374015d11eSBrett Creeley 		dev_err(dev, "unable to reserve MSI-X vectors\n");
3838940b61afSAnirudh Venkataramanan 		err = v_actual;
3839940b61afSAnirudh Venkataramanan 		goto msix_err;
3840940b61afSAnirudh Venkataramanan 	}
3841940b61afSAnirudh Venkataramanan 
3842940b61afSAnirudh Venkataramanan 	if (v_actual < v_budget) {
384319cce2c6SAnirudh Venkataramanan 		dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
3844940b61afSAnirudh Venkataramanan 			 v_budget, v_actual);
3845152b978aSAnirudh Venkataramanan 
3846f3fe97f6SBrett Creeley 		if (v_actual < ICE_MIN_MSIX) {
3847152b978aSAnirudh Venkataramanan 			/* error if we can't get minimum vectors */
3848940b61afSAnirudh Venkataramanan 			pci_disable_msix(pf->pdev);
3849940b61afSAnirudh Venkataramanan 			err = -ERANGE;
3850940b61afSAnirudh Venkataramanan 			goto msix_err;
3851152b978aSAnirudh Venkataramanan 		} else {
3852d25a0fc4SDave Ertman 			int v_remain = v_actual - v_other;
3853d25a0fc4SDave Ertman 			int v_rdma = 0, v_min_rdma = 0;
3854d25a0fc4SDave Ertman 
3855d25a0fc4SDave Ertman 			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3856d25a0fc4SDave Ertman 				/* Need at least 1 interrupt in addition to
3857d25a0fc4SDave Ertman 				 * AEQ MSIX
3858d25a0fc4SDave Ertman 				 */
3859d25a0fc4SDave Ertman 				v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
3860d25a0fc4SDave Ertman 				v_min_rdma = ICE_MIN_RDMA_MSIX;
3861d25a0fc4SDave Ertman 			}
3862741106f7STony Nguyen 
3863741106f7STony Nguyen 			if (v_actual == ICE_MIN_MSIX ||
3864d25a0fc4SDave Ertman 			    v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
3865d25a0fc4SDave Ertman 				dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
3866d25a0fc4SDave Ertman 				clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3867d25a0fc4SDave Ertman 
3868d25a0fc4SDave Ertman 				pf->num_rdma_msix = 0;
3869f3fe97f6SBrett Creeley 				pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
3870d25a0fc4SDave Ertman 			} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
3871d25a0fc4SDave Ertman 				   (v_remain - v_rdma < v_rdma)) {
3872d25a0fc4SDave Ertman 				/* Support minimum RDMA and give remaining
3873d25a0fc4SDave Ertman 				 * vectors to LAN MSIX
3874d25a0fc4SDave Ertman 				 */
3875d25a0fc4SDave Ertman 				pf->num_rdma_msix = v_min_rdma;
3876d25a0fc4SDave Ertman 				pf->num_lan_msix = v_remain - v_min_rdma;
3877d25a0fc4SDave Ertman 			} else {
3878d25a0fc4SDave Ertman 				/* Split remaining MSIX with RDMA after
3879d25a0fc4SDave Ertman 				 * accounting for AEQ MSIX
3880d25a0fc4SDave Ertman 				 */
3881d25a0fc4SDave Ertman 				pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
3882d25a0fc4SDave Ertman 						    ICE_RDMA_NUM_AEQ_MSIX;
3883d25a0fc4SDave Ertman 				pf->num_lan_msix = v_remain - pf->num_rdma_msix;
3884d25a0fc4SDave Ertman 			}
3885741106f7STony Nguyen 
3886741106f7STony Nguyen 			dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
3887741106f7STony Nguyen 				   pf->num_lan_msix);
3888d25a0fc4SDave Ertman 
3889d25a0fc4SDave Ertman 			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
3890d25a0fc4SDave Ertman 				dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
3891d25a0fc4SDave Ertman 					   pf->num_rdma_msix);
3892940b61afSAnirudh Venkataramanan 		}
3893940b61afSAnirudh Venkataramanan 	}
3894940b61afSAnirudh Venkataramanan 
3895940b61afSAnirudh Venkataramanan 	return v_actual;
3896940b61afSAnirudh Venkataramanan 
3897940b61afSAnirudh Venkataramanan msix_err:
38984015d11eSBrett Creeley 	devm_kfree(dev, pf->msix_entries);
3899940b61afSAnirudh Venkataramanan 	goto exit_err;
3900940b61afSAnirudh Venkataramanan 
3901152b978aSAnirudh Venkataramanan no_hw_vecs_left_err:
390219cce2c6SAnirudh Venkataramanan 	dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
3903152b978aSAnirudh Venkataramanan 		needed, v_left);
3904152b978aSAnirudh Venkataramanan 	err = -ERANGE;
3905940b61afSAnirudh Venkataramanan exit_err:
3906d25a0fc4SDave Ertman 	pf->num_rdma_msix = 0;
3907940b61afSAnirudh Venkataramanan 	pf->num_lan_msix = 0;
3908940b61afSAnirudh Venkataramanan 	return err;
3909940b61afSAnirudh Venkataramanan }
3910940b61afSAnirudh Venkataramanan 
3911940b61afSAnirudh Venkataramanan /**
3912940b61afSAnirudh Venkataramanan  * ice_dis_msix - Disable MSI-X interrupt setup in OS
3913940b61afSAnirudh Venkataramanan  * @pf: board private structure
3914940b61afSAnirudh Venkataramanan  */
3915940b61afSAnirudh Venkataramanan static void ice_dis_msix(struct ice_pf *pf)
3916940b61afSAnirudh Venkataramanan {
3917940b61afSAnirudh Venkataramanan 	pci_disable_msix(pf->pdev);
39184015d11eSBrett Creeley 	devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
3919940b61afSAnirudh Venkataramanan 	pf->msix_entries = NULL;
3920940b61afSAnirudh Venkataramanan }
3921940b61afSAnirudh Venkataramanan 
3922940b61afSAnirudh Venkataramanan /**
3923eb0208ecSPreethi Banala  * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
3924eb0208ecSPreethi Banala  * @pf: board private structure
3925eb0208ecSPreethi Banala  */
3926eb0208ecSPreethi Banala static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3927eb0208ecSPreethi Banala {
3928eb0208ecSPreethi Banala 	ice_dis_msix(pf);
3929eb0208ecSPreethi Banala 
3930cbe66bfeSBrett Creeley 	if (pf->irq_tracker) {
39314015d11eSBrett Creeley 		devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
3932cbe66bfeSBrett Creeley 		pf->irq_tracker = NULL;
3933eb0208ecSPreethi Banala 	}
3934eb0208ecSPreethi Banala }
3935eb0208ecSPreethi Banala 
3936eb0208ecSPreethi Banala /**
3937940b61afSAnirudh Venkataramanan  * ice_init_interrupt_scheme - Determine proper interrupt scheme
3938940b61afSAnirudh Venkataramanan  * @pf: board private structure to initialize
3939940b61afSAnirudh Venkataramanan  */
3940940b61afSAnirudh Venkataramanan static int ice_init_interrupt_scheme(struct ice_pf *pf)
3941940b61afSAnirudh Venkataramanan {
3942cbe66bfeSBrett Creeley 	int vectors;
3943940b61afSAnirudh Venkataramanan 
3944940b61afSAnirudh Venkataramanan 	vectors = ice_ena_msix_range(pf);
3945940b61afSAnirudh Venkataramanan 
3946940b61afSAnirudh Venkataramanan 	if (vectors < 0)
3947940b61afSAnirudh Venkataramanan 		return vectors;
3948940b61afSAnirudh Venkataramanan 
3949940b61afSAnirudh Venkataramanan 	/* set up vector assignment tracking */
3950e94c0df9SGustavo A. R. Silva 	pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
3951e94c0df9SGustavo A. R. Silva 				       struct_size(pf->irq_tracker, list, vectors),
3952e94c0df9SGustavo A. R. Silva 				       GFP_KERNEL);
3953cbe66bfeSBrett Creeley 	if (!pf->irq_tracker) {
3954940b61afSAnirudh Venkataramanan 		ice_dis_msix(pf);
3955940b61afSAnirudh Venkataramanan 		return -ENOMEM;
3956940b61afSAnirudh Venkataramanan 	}
3957940b61afSAnirudh Venkataramanan 
3958eb0208ecSPreethi Banala 	/* populate SW interrupts pool with number of OS granted IRQs. */
395988865fc4SKarol Kolacinski 	pf->num_avail_sw_msix = (u16)vectors;
396088865fc4SKarol Kolacinski 	pf->irq_tracker->num_entries = (u16)vectors;
3961cbe66bfeSBrett Creeley 	pf->irq_tracker->end = pf->irq_tracker->num_entries;
3962940b61afSAnirudh Venkataramanan 
3963940b61afSAnirudh Venkataramanan 	return 0;
3964940b61afSAnirudh Venkataramanan }
3965940b61afSAnirudh Venkataramanan 
3966940b61afSAnirudh Venkataramanan /**
396731765519SAnirudh Venkataramanan  * ice_is_wol_supported - check if WoL is supported
396831765519SAnirudh Venkataramanan  * @hw: pointer to hardware info
3969769c500dSAkeem G Abodunrin  *
3970769c500dSAkeem G Abodunrin  * Check if WoL is supported based on the HW configuration.
3971769c500dSAkeem G Abodunrin  * Returns true if NVM supports and enables WoL for this port, false otherwise
3972769c500dSAkeem G Abodunrin  */
397331765519SAnirudh Venkataramanan bool ice_is_wol_supported(struct ice_hw *hw)
3974769c500dSAkeem G Abodunrin {
3975769c500dSAkeem G Abodunrin 	u16 wol_ctrl;
3976769c500dSAkeem G Abodunrin 
3977769c500dSAkeem G Abodunrin 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3978769c500dSAkeem G Abodunrin 	 * word) indicates WoL is not supported on the corresponding PF ID.
3979769c500dSAkeem G Abodunrin 	 */
3980769c500dSAkeem G Abodunrin 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3981769c500dSAkeem G Abodunrin 		return false;
3982769c500dSAkeem G Abodunrin 
398331765519SAnirudh Venkataramanan 	return !(BIT(hw->port_info->lport) & wol_ctrl);
3984769c500dSAkeem G Abodunrin }
3985769c500dSAkeem G Abodunrin 
3986769c500dSAkeem G Abodunrin /**
398787324e74SHenry Tieman  * ice_vsi_recfg_qs - Change the number of queues on a VSI
398887324e74SHenry Tieman  * @vsi: VSI being changed
398987324e74SHenry Tieman  * @new_rx: new number of Rx queues
399087324e74SHenry Tieman  * @new_tx: new number of Tx queues
399187324e74SHenry Tieman  *
399287324e74SHenry Tieman  * Only change the number of queues if new_tx, or new_rx is non-0.
399387324e74SHenry Tieman  *
399487324e74SHenry Tieman  * Returns 0 on success.
399587324e74SHenry Tieman  */
399687324e74SHenry Tieman int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
399787324e74SHenry Tieman {
399887324e74SHenry Tieman 	struct ice_pf *pf = vsi->back;
399987324e74SHenry Tieman 	int err = 0, timeout = 50;
400087324e74SHenry Tieman 
400187324e74SHenry Tieman 	if (!new_rx && !new_tx)
400287324e74SHenry Tieman 		return -EINVAL;
400387324e74SHenry Tieman 
40047e408e07SAnirudh Venkataramanan 	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
400587324e74SHenry Tieman 		timeout--;
400687324e74SHenry Tieman 		if (!timeout)
400787324e74SHenry Tieman 			return -EBUSY;
400887324e74SHenry Tieman 		usleep_range(1000, 2000);
400987324e74SHenry Tieman 	}
401087324e74SHenry Tieman 
401187324e74SHenry Tieman 	if (new_tx)
401288865fc4SKarol Kolacinski 		vsi->req_txq = (u16)new_tx;
401387324e74SHenry Tieman 	if (new_rx)
401488865fc4SKarol Kolacinski 		vsi->req_rxq = (u16)new_rx;
401587324e74SHenry Tieman 
401687324e74SHenry Tieman 	/* set for the next time the netdev is started */
401787324e74SHenry Tieman 	if (!netif_running(vsi->netdev)) {
401887324e74SHenry Tieman 		ice_vsi_rebuild(vsi, false);
401987324e74SHenry Tieman 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
402087324e74SHenry Tieman 		goto done;
402187324e74SHenry Tieman 	}
402287324e74SHenry Tieman 
402387324e74SHenry Tieman 	ice_vsi_close(vsi);
402487324e74SHenry Tieman 	ice_vsi_rebuild(vsi, false);
402587324e74SHenry Tieman 	ice_pf_dcb_recfg(pf);
402687324e74SHenry Tieman 	ice_vsi_open(vsi);
402787324e74SHenry Tieman done:
40287e408e07SAnirudh Venkataramanan 	clear_bit(ICE_CFG_BUSY, pf->state);
402987324e74SHenry Tieman 	return err;
403087324e74SHenry Tieman }
403187324e74SHenry Tieman 
403287324e74SHenry Tieman /**
4033cd1f56f4SBrett Creeley  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4034cd1f56f4SBrett Creeley  * @pf: PF to configure
4035cd1f56f4SBrett Creeley  *
4036cd1f56f4SBrett Creeley  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4037cd1f56f4SBrett Creeley  * VSI can still Tx/Rx VLAN tagged packets.
4038cd1f56f4SBrett Creeley  */
4039cd1f56f4SBrett Creeley static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4040cd1f56f4SBrett Creeley {
4041cd1f56f4SBrett Creeley 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4042cd1f56f4SBrett Creeley 	struct ice_vsi_ctx *ctxt;
4043cd1f56f4SBrett Creeley 	struct ice_hw *hw;
40445518ac2aSTony Nguyen 	int status;
4045cd1f56f4SBrett Creeley 
4046cd1f56f4SBrett Creeley 	if (!vsi)
4047cd1f56f4SBrett Creeley 		return;
4048cd1f56f4SBrett Creeley 
4049cd1f56f4SBrett Creeley 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4050cd1f56f4SBrett Creeley 	if (!ctxt)
4051cd1f56f4SBrett Creeley 		return;
4052cd1f56f4SBrett Creeley 
4053cd1f56f4SBrett Creeley 	hw = &pf->hw;
4054cd1f56f4SBrett Creeley 	ctxt->info = vsi->info;
4055cd1f56f4SBrett Creeley 
4056cd1f56f4SBrett Creeley 	ctxt->info.valid_sections =
4057cd1f56f4SBrett Creeley 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4058cd1f56f4SBrett Creeley 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
4059cd1f56f4SBrett Creeley 			    ICE_AQ_VSI_PROP_SW_VALID);
4060cd1f56f4SBrett Creeley 
4061cd1f56f4SBrett Creeley 	/* disable VLAN anti-spoof */
4062cd1f56f4SBrett Creeley 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4063cd1f56f4SBrett Creeley 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4064cd1f56f4SBrett Creeley 
4065cd1f56f4SBrett Creeley 	/* disable VLAN pruning and keep all other settings */
4066cd1f56f4SBrett Creeley 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4067cd1f56f4SBrett Creeley 
4068cd1f56f4SBrett Creeley 	/* allow all VLANs on Tx and don't strip on Rx */
4069*7bd527aaSBrett Creeley 	ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4070*7bd527aaSBrett Creeley 		ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4071cd1f56f4SBrett Creeley 
4072cd1f56f4SBrett Creeley 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4073cd1f56f4SBrett Creeley 	if (status) {
40745f87ec48STony Nguyen 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
40755518ac2aSTony Nguyen 			status, ice_aq_str(hw->adminq.sq_last_status));
4076cd1f56f4SBrett Creeley 	} else {
4077cd1f56f4SBrett Creeley 		vsi->info.sec_flags = ctxt->info.sec_flags;
4078cd1f56f4SBrett Creeley 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4079*7bd527aaSBrett Creeley 		vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4080cd1f56f4SBrett Creeley 	}
4081cd1f56f4SBrett Creeley 
4082cd1f56f4SBrett Creeley 	kfree(ctxt);
4083cd1f56f4SBrett Creeley }
4084cd1f56f4SBrett Creeley 
4085cd1f56f4SBrett Creeley /**
4086462acf6aSTony Nguyen  * ice_log_pkg_init - log result of DDP package load
4087462acf6aSTony Nguyen  * @hw: pointer to hardware info
4088247dd97dSWojciech Drewek  * @state: state of package load
4089462acf6aSTony Nguyen  */
4090247dd97dSWojciech Drewek static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4091462acf6aSTony Nguyen {
4092247dd97dSWojciech Drewek 	struct ice_pf *pf = hw->back;
4093247dd97dSWojciech Drewek 	struct device *dev;
4094462acf6aSTony Nguyen 
4095247dd97dSWojciech Drewek 	dev = ice_pf_to_dev(pf);
4096247dd97dSWojciech Drewek 
4097247dd97dSWojciech Drewek 	switch (state) {
4098247dd97dSWojciech Drewek 	case ICE_DDP_PKG_SUCCESS:
409919cce2c6SAnirudh Venkataramanan 		dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4100462acf6aSTony Nguyen 			 hw->active_pkg_name,
4101462acf6aSTony Nguyen 			 hw->active_pkg_ver.major,
4102462acf6aSTony Nguyen 			 hw->active_pkg_ver.minor,
4103462acf6aSTony Nguyen 			 hw->active_pkg_ver.update,
4104462acf6aSTony Nguyen 			 hw->active_pkg_ver.draft);
4105247dd97dSWojciech Drewek 		break;
4106247dd97dSWojciech Drewek 	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4107247dd97dSWojciech Drewek 		dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4108247dd97dSWojciech Drewek 			 hw->active_pkg_name,
4109247dd97dSWojciech Drewek 			 hw->active_pkg_ver.major,
4110247dd97dSWojciech Drewek 			 hw->active_pkg_ver.minor,
4111247dd97dSWojciech Drewek 			 hw->active_pkg_ver.update,
4112247dd97dSWojciech Drewek 			 hw->active_pkg_ver.draft);
4113247dd97dSWojciech Drewek 		break;
4114247dd97dSWojciech Drewek 	case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
411519cce2c6SAnirudh Venkataramanan 		dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
4116462acf6aSTony Nguyen 			hw->active_pkg_name,
4117462acf6aSTony Nguyen 			hw->active_pkg_ver.major,
4118462acf6aSTony Nguyen 			hw->active_pkg_ver.minor,
4119462acf6aSTony Nguyen 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4120247dd97dSWojciech Drewek 		break;
4121247dd97dSWojciech Drewek 	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
412219cce2c6SAnirudh Venkataramanan 		dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4123462acf6aSTony Nguyen 			 hw->active_pkg_name,
4124462acf6aSTony Nguyen 			 hw->active_pkg_ver.major,
4125462acf6aSTony Nguyen 			 hw->active_pkg_ver.minor,
4126462acf6aSTony Nguyen 			 hw->active_pkg_ver.update,
4127462acf6aSTony Nguyen 			 hw->active_pkg_ver.draft,
4128462acf6aSTony Nguyen 			 hw->pkg_name,
4129462acf6aSTony Nguyen 			 hw->pkg_ver.major,
4130462acf6aSTony Nguyen 			 hw->pkg_ver.minor,
4131462acf6aSTony Nguyen 			 hw->pkg_ver.update,
4132462acf6aSTony Nguyen 			 hw->pkg_ver.draft);
4133462acf6aSTony Nguyen 		break;
4134247dd97dSWojciech Drewek 	case ICE_DDP_PKG_FW_MISMATCH:
4135b8272919SVictor Raj 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
4136b8272919SVictor Raj 		break;
4137247dd97dSWojciech Drewek 	case ICE_DDP_PKG_INVALID_FILE:
413819cce2c6SAnirudh Venkataramanan 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4139462acf6aSTony Nguyen 		break;
4140247dd97dSWojciech Drewek 	case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
414119cce2c6SAnirudh Venkataramanan 		dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
4142247dd97dSWojciech Drewek 		break;
4143247dd97dSWojciech Drewek 	case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
414419cce2c6SAnirudh Venkataramanan 		dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
4145462acf6aSTony Nguyen 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4146462acf6aSTony Nguyen 		break;
4147247dd97dSWojciech Drewek 	case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
414819cce2c6SAnirudh Venkataramanan 		dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
4149247dd97dSWojciech Drewek 		break;
4150247dd97dSWojciech Drewek 	case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
415119cce2c6SAnirudh Venkataramanan 		dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
4152247dd97dSWojciech Drewek 		break;
4153247dd97dSWojciech Drewek 	case ICE_DDP_PKG_LOAD_ERROR:
415419cce2c6SAnirudh Venkataramanan 		dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
41559918f2d2SAnirudh Venkataramanan 		/* poll for reset to complete */
41569918f2d2SAnirudh Venkataramanan 		if (ice_check_reset(hw))
41579918f2d2SAnirudh Venkataramanan 			dev_err(dev, "Error resetting device. Please reload the driver\n");
4158462acf6aSTony Nguyen 		break;
4159247dd97dSWojciech Drewek 	case ICE_DDP_PKG_ERR:
4160462acf6aSTony Nguyen 	default:
4161247dd97dSWojciech Drewek 		dev_err(dev, "An unknown error occurred when loading the DDP package.  Entering Safe Mode.\n");
41620092db5fSJesse Brandeburg 		break;
4163462acf6aSTony Nguyen 	}
4164462acf6aSTony Nguyen }
4165462acf6aSTony Nguyen 
4166462acf6aSTony Nguyen /**
4167462acf6aSTony Nguyen  * ice_load_pkg - load/reload the DDP Package file
4168462acf6aSTony Nguyen  * @firmware: firmware structure when firmware requested or NULL for reload
4169462acf6aSTony Nguyen  * @pf: pointer to the PF instance
4170462acf6aSTony Nguyen  *
4171462acf6aSTony Nguyen  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4172462acf6aSTony Nguyen  * initialize HW tables.
4173462acf6aSTony Nguyen  */
4174462acf6aSTony Nguyen static void
4175462acf6aSTony Nguyen ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4176462acf6aSTony Nguyen {
4177247dd97dSWojciech Drewek 	enum ice_ddp_state state = ICE_DDP_PKG_ERR;
41784015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
4179462acf6aSTony Nguyen 	struct ice_hw *hw = &pf->hw;
4180462acf6aSTony Nguyen 
4181462acf6aSTony Nguyen 	/* Load DDP Package */
4182462acf6aSTony Nguyen 	if (firmware && !hw->pkg_copy) {
4183247dd97dSWojciech Drewek 		state = ice_copy_and_init_pkg(hw, firmware->data,
4184462acf6aSTony Nguyen 					      firmware->size);
4185247dd97dSWojciech Drewek 		ice_log_pkg_init(hw, state);
4186462acf6aSTony Nguyen 	} else if (!firmware && hw->pkg_copy) {
4187462acf6aSTony Nguyen 		/* Reload package during rebuild after CORER/GLOBR reset */
4188247dd97dSWojciech Drewek 		state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4189247dd97dSWojciech Drewek 		ice_log_pkg_init(hw, state);
4190462acf6aSTony Nguyen 	} else {
419119cce2c6SAnirudh Venkataramanan 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4192462acf6aSTony Nguyen 	}
4193462acf6aSTony Nguyen 
4194247dd97dSWojciech Drewek 	if (!ice_is_init_pkg_successful(state)) {
4195462acf6aSTony Nguyen 		/* Safe Mode */
4196462acf6aSTony Nguyen 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4197462acf6aSTony Nguyen 		return;
4198462acf6aSTony Nguyen 	}
4199462acf6aSTony Nguyen 
4200462acf6aSTony Nguyen 	/* Successful download package is the precondition for advanced
4201462acf6aSTony Nguyen 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4202462acf6aSTony Nguyen 	 */
4203462acf6aSTony Nguyen 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4204462acf6aSTony Nguyen }
4205462acf6aSTony Nguyen 
4206462acf6aSTony Nguyen /**
4207c585ea42SBrett Creeley  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4208c585ea42SBrett Creeley  * @pf: pointer to the PF structure
4209c585ea42SBrett Creeley  *
4210c585ea42SBrett Creeley  * There is no error returned here because the driver should be able to handle
4211c585ea42SBrett Creeley  * 128 Byte cache lines, so we only print a warning in case issues are seen,
4212c585ea42SBrett Creeley  * specifically with Tx.
4213c585ea42SBrett Creeley  */
4214c585ea42SBrett Creeley static void ice_verify_cacheline_size(struct ice_pf *pf)
4215c585ea42SBrett Creeley {
4216c585ea42SBrett Creeley 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
421719cce2c6SAnirudh Venkataramanan 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4218c585ea42SBrett Creeley 			 ICE_CACHE_LINE_BYTES);
4219c585ea42SBrett Creeley }
4220c585ea42SBrett Creeley 
4221c585ea42SBrett Creeley /**
4222e3710a01SPaul M Stillwell Jr  * ice_send_version - update firmware with driver version
4223e3710a01SPaul M Stillwell Jr  * @pf: PF struct
4224e3710a01SPaul M Stillwell Jr  *
4225d54699e2STony Nguyen  * Returns 0 on success, else error code
4226e3710a01SPaul M Stillwell Jr  */
42275e24d598STony Nguyen static int ice_send_version(struct ice_pf *pf)
4228e3710a01SPaul M Stillwell Jr {
4229e3710a01SPaul M Stillwell Jr 	struct ice_driver_ver dv;
4230e3710a01SPaul M Stillwell Jr 
423134a2a3b8SJeff Kirsher 	dv.major_ver = 0xff;
423234a2a3b8SJeff Kirsher 	dv.minor_ver = 0xff;
423334a2a3b8SJeff Kirsher 	dv.build_ver = 0xff;
4234e3710a01SPaul M Stillwell Jr 	dv.subbuild_ver = 0;
423534a2a3b8SJeff Kirsher 	strscpy((char *)dv.driver_string, UTS_RELEASE,
4236e3710a01SPaul M Stillwell Jr 		sizeof(dv.driver_string));
4237e3710a01SPaul M Stillwell Jr 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4238e3710a01SPaul M Stillwell Jr }
4239e3710a01SPaul M Stillwell Jr 
4240e3710a01SPaul M Stillwell Jr /**
4241148beb61SHenry Tieman  * ice_init_fdir - Initialize flow director VSI and configuration
4242148beb61SHenry Tieman  * @pf: pointer to the PF instance
4243148beb61SHenry Tieman  *
4244148beb61SHenry Tieman  * returns 0 on success, negative on error
4245148beb61SHenry Tieman  */
4246148beb61SHenry Tieman static int ice_init_fdir(struct ice_pf *pf)
4247148beb61SHenry Tieman {
4248148beb61SHenry Tieman 	struct device *dev = ice_pf_to_dev(pf);
4249148beb61SHenry Tieman 	struct ice_vsi *ctrl_vsi;
4250148beb61SHenry Tieman 	int err;
4251148beb61SHenry Tieman 
4252148beb61SHenry Tieman 	/* Side Band Flow Director needs to have a control VSI.
4253148beb61SHenry Tieman 	 * Allocate it and store it in the PF.
4254148beb61SHenry Tieman 	 */
4255148beb61SHenry Tieman 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4256148beb61SHenry Tieman 	if (!ctrl_vsi) {
4257148beb61SHenry Tieman 		dev_dbg(dev, "could not create control VSI\n");
4258148beb61SHenry Tieman 		return -ENOMEM;
4259148beb61SHenry Tieman 	}
4260148beb61SHenry Tieman 
4261148beb61SHenry Tieman 	err = ice_vsi_open_ctrl(ctrl_vsi);
4262148beb61SHenry Tieman 	if (err) {
4263148beb61SHenry Tieman 		dev_dbg(dev, "could not open control VSI\n");
4264148beb61SHenry Tieman 		goto err_vsi_open;
4265148beb61SHenry Tieman 	}
4266148beb61SHenry Tieman 
4267148beb61SHenry Tieman 	mutex_init(&pf->hw.fdir_fltr_lock);
4268148beb61SHenry Tieman 
4269148beb61SHenry Tieman 	err = ice_fdir_create_dflt_rules(pf);
4270148beb61SHenry Tieman 	if (err)
4271148beb61SHenry Tieman 		goto err_fdir_rule;
4272148beb61SHenry Tieman 
4273148beb61SHenry Tieman 	return 0;
4274148beb61SHenry Tieman 
4275148beb61SHenry Tieman err_fdir_rule:
4276148beb61SHenry Tieman 	ice_fdir_release_flows(&pf->hw);
4277148beb61SHenry Tieman 	ice_vsi_close(ctrl_vsi);
4278148beb61SHenry Tieman err_vsi_open:
4279148beb61SHenry Tieman 	ice_vsi_release(ctrl_vsi);
4280148beb61SHenry Tieman 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4281148beb61SHenry Tieman 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4282148beb61SHenry Tieman 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4283148beb61SHenry Tieman 	}
4284148beb61SHenry Tieman 	return err;
4285148beb61SHenry Tieman }
4286148beb61SHenry Tieman 
4287148beb61SHenry Tieman /**
4288462acf6aSTony Nguyen  * ice_get_opt_fw_name - return optional firmware file name or NULL
4289462acf6aSTony Nguyen  * @pf: pointer to the PF instance
4290462acf6aSTony Nguyen  */
4291462acf6aSTony Nguyen static char *ice_get_opt_fw_name(struct ice_pf *pf)
4292462acf6aSTony Nguyen {
4293462acf6aSTony Nguyen 	/* Optional firmware name same as default with additional dash
4294462acf6aSTony Nguyen 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4295462acf6aSTony Nguyen 	 */
4296462acf6aSTony Nguyen 	struct pci_dev *pdev = pf->pdev;
4297ceb2f007SJacob Keller 	char *opt_fw_filename;
4298ceb2f007SJacob Keller 	u64 dsn;
4299462acf6aSTony Nguyen 
4300462acf6aSTony Nguyen 	/* Determine the name of the optional file using the DSN (two
4301462acf6aSTony Nguyen 	 * dwords following the start of the DSN Capability).
4302462acf6aSTony Nguyen 	 */
4303ceb2f007SJacob Keller 	dsn = pci_get_dsn(pdev);
4304ceb2f007SJacob Keller 	if (!dsn)
4305ceb2f007SJacob Keller 		return NULL;
4306ceb2f007SJacob Keller 
4307462acf6aSTony Nguyen 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4308462acf6aSTony Nguyen 	if (!opt_fw_filename)
4309462acf6aSTony Nguyen 		return NULL;
4310462acf6aSTony Nguyen 
43111a9c561aSPaul M Stillwell Jr 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4312ceb2f007SJacob Keller 		 ICE_DDP_PKG_PATH, dsn);
4313462acf6aSTony Nguyen 
4314462acf6aSTony Nguyen 	return opt_fw_filename;
4315462acf6aSTony Nguyen }
4316462acf6aSTony Nguyen 
4317462acf6aSTony Nguyen /**
4318462acf6aSTony Nguyen  * ice_request_fw - Device initialization routine
4319462acf6aSTony Nguyen  * @pf: pointer to the PF instance
4320462acf6aSTony Nguyen  */
4321462acf6aSTony Nguyen static void ice_request_fw(struct ice_pf *pf)
4322462acf6aSTony Nguyen {
4323462acf6aSTony Nguyen 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
4324462acf6aSTony Nguyen 	const struct firmware *firmware = NULL;
43254015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
4326462acf6aSTony Nguyen 	int err = 0;
4327462acf6aSTony Nguyen 
4328462acf6aSTony Nguyen 	/* optional device-specific DDP (if present) overrides the default DDP
4329462acf6aSTony Nguyen 	 * package file. kernel logs a debug message if the file doesn't exist,
4330462acf6aSTony Nguyen 	 * and warning messages for other errors.
4331462acf6aSTony Nguyen 	 */
4332462acf6aSTony Nguyen 	if (opt_fw_filename) {
4333462acf6aSTony Nguyen 		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4334462acf6aSTony Nguyen 		if (err) {
4335462acf6aSTony Nguyen 			kfree(opt_fw_filename);
4336462acf6aSTony Nguyen 			goto dflt_pkg_load;
4337462acf6aSTony Nguyen 		}
4338462acf6aSTony Nguyen 
4339462acf6aSTony Nguyen 		/* request for firmware was successful. Download to device */
4340462acf6aSTony Nguyen 		ice_load_pkg(firmware, pf);
4341462acf6aSTony Nguyen 		kfree(opt_fw_filename);
4342462acf6aSTony Nguyen 		release_firmware(firmware);
4343462acf6aSTony Nguyen 		return;
4344462acf6aSTony Nguyen 	}
4345462acf6aSTony Nguyen 
4346462acf6aSTony Nguyen dflt_pkg_load:
4347462acf6aSTony Nguyen 	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4348462acf6aSTony Nguyen 	if (err) {
434919cce2c6SAnirudh Venkataramanan 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4350462acf6aSTony Nguyen 		return;
4351462acf6aSTony Nguyen 	}
4352462acf6aSTony Nguyen 
4353462acf6aSTony Nguyen 	/* request for firmware was successful. Download to device */
4354462acf6aSTony Nguyen 	ice_load_pkg(firmware, pf);
4355462acf6aSTony Nguyen 	release_firmware(firmware);
4356462acf6aSTony Nguyen }
4357462acf6aSTony Nguyen 
4358462acf6aSTony Nguyen /**
4359769c500dSAkeem G Abodunrin  * ice_print_wake_reason - show the wake up cause in the log
4360769c500dSAkeem G Abodunrin  * @pf: pointer to the PF struct
4361769c500dSAkeem G Abodunrin  */
4362769c500dSAkeem G Abodunrin static void ice_print_wake_reason(struct ice_pf *pf)
4363769c500dSAkeem G Abodunrin {
4364769c500dSAkeem G Abodunrin 	u32 wus = pf->wakeup_reason;
4365769c500dSAkeem G Abodunrin 	const char *wake_str;
4366769c500dSAkeem G Abodunrin 
4367769c500dSAkeem G Abodunrin 	/* if no wake event, nothing to print */
4368769c500dSAkeem G Abodunrin 	if (!wus)
4369769c500dSAkeem G Abodunrin 		return;
4370769c500dSAkeem G Abodunrin 
4371769c500dSAkeem G Abodunrin 	if (wus & PFPM_WUS_LNKC_M)
4372769c500dSAkeem G Abodunrin 		wake_str = "Link\n";
4373769c500dSAkeem G Abodunrin 	else if (wus & PFPM_WUS_MAG_M)
4374769c500dSAkeem G Abodunrin 		wake_str = "Magic Packet\n";
4375769c500dSAkeem G Abodunrin 	else if (wus & PFPM_WUS_MNG_M)
4376769c500dSAkeem G Abodunrin 		wake_str = "Management\n";
4377769c500dSAkeem G Abodunrin 	else if (wus & PFPM_WUS_FW_RST_WK_M)
4378769c500dSAkeem G Abodunrin 		wake_str = "Firmware Reset\n";
4379769c500dSAkeem G Abodunrin 	else
4380769c500dSAkeem G Abodunrin 		wake_str = "Unknown\n";
4381769c500dSAkeem G Abodunrin 
4382769c500dSAkeem G Abodunrin 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4383769c500dSAkeem G Abodunrin }
4384769c500dSAkeem G Abodunrin 
4385769c500dSAkeem G Abodunrin /**
43861e23f076SAnirudh Venkataramanan  * ice_register_netdev - register netdev and devlink port
43871e23f076SAnirudh Venkataramanan  * @pf: pointer to the PF struct
43881e23f076SAnirudh Venkataramanan  */
43891e23f076SAnirudh Venkataramanan static int ice_register_netdev(struct ice_pf *pf)
43901e23f076SAnirudh Venkataramanan {
43911e23f076SAnirudh Venkataramanan 	struct ice_vsi *vsi;
43921e23f076SAnirudh Venkataramanan 	int err = 0;
43931e23f076SAnirudh Venkataramanan 
43941e23f076SAnirudh Venkataramanan 	vsi = ice_get_main_vsi(pf);
43951e23f076SAnirudh Venkataramanan 	if (!vsi || !vsi->netdev)
43961e23f076SAnirudh Venkataramanan 		return -EIO;
43971e23f076SAnirudh Venkataramanan 
43981e23f076SAnirudh Venkataramanan 	err = register_netdev(vsi->netdev);
43991e23f076SAnirudh Venkataramanan 	if (err)
44001e23f076SAnirudh Venkataramanan 		goto err_register_netdev;
44011e23f076SAnirudh Venkataramanan 
4402a476d72aSAnirudh Venkataramanan 	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
44031e23f076SAnirudh Venkataramanan 	netif_carrier_off(vsi->netdev);
44041e23f076SAnirudh Venkataramanan 	netif_tx_stop_all_queues(vsi->netdev);
44052ae0aa47SWojciech Drewek 	err = ice_devlink_create_pf_port(pf);
44061e23f076SAnirudh Venkataramanan 	if (err)
44071e23f076SAnirudh Venkataramanan 		goto err_devlink_create;
44081e23f076SAnirudh Venkataramanan 
44092ae0aa47SWojciech Drewek 	devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
44101e23f076SAnirudh Venkataramanan 
44111e23f076SAnirudh Venkataramanan 	return 0;
44121e23f076SAnirudh Venkataramanan err_devlink_create:
44131e23f076SAnirudh Venkataramanan 	unregister_netdev(vsi->netdev);
4414a476d72aSAnirudh Venkataramanan 	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
44151e23f076SAnirudh Venkataramanan err_register_netdev:
44161e23f076SAnirudh Venkataramanan 	free_netdev(vsi->netdev);
44171e23f076SAnirudh Venkataramanan 	vsi->netdev = NULL;
4418a476d72aSAnirudh Venkataramanan 	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
44191e23f076SAnirudh Venkataramanan 	return err;
44201e23f076SAnirudh Venkataramanan }
44211e23f076SAnirudh Venkataramanan 
44221e23f076SAnirudh Venkataramanan /**
4423837f08fdSAnirudh Venkataramanan  * ice_probe - Device initialization routine
4424837f08fdSAnirudh Venkataramanan  * @pdev: PCI device information struct
4425837f08fdSAnirudh Venkataramanan  * @ent: entry in ice_pci_tbl
4426837f08fdSAnirudh Venkataramanan  *
4427837f08fdSAnirudh Venkataramanan  * Returns 0 on success, negative on failure
4428837f08fdSAnirudh Venkataramanan  */
4429c8b7abddSBruce Allan static int
4430c8b7abddSBruce Allan ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4431837f08fdSAnirudh Venkataramanan {
443277ed84f4SBruce Allan 	struct device *dev = &pdev->dev;
4433837f08fdSAnirudh Venkataramanan 	struct ice_pf *pf;
4434837f08fdSAnirudh Venkataramanan 	struct ice_hw *hw;
4435b20e6c17SJakub Kicinski 	int i, err;
4436837f08fdSAnirudh Venkataramanan 
443750ac7479SAnirudh Venkataramanan 	if (pdev->is_virtfn) {
443850ac7479SAnirudh Venkataramanan 		dev_err(dev, "can't probe a virtual function\n");
443950ac7479SAnirudh Venkataramanan 		return -EINVAL;
444050ac7479SAnirudh Venkataramanan 	}
444150ac7479SAnirudh Venkataramanan 
44424ee656bbSTony Nguyen 	/* this driver uses devres, see
44434ee656bbSTony Nguyen 	 * Documentation/driver-api/driver-model/devres.rst
44444ee656bbSTony Nguyen 	 */
4445837f08fdSAnirudh Venkataramanan 	err = pcim_enable_device(pdev);
4446837f08fdSAnirudh Venkataramanan 	if (err)
4447837f08fdSAnirudh Venkataramanan 		return err;
4448837f08fdSAnirudh Venkataramanan 
444980ad6ddeSJesse Brandeburg 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
4450837f08fdSAnirudh Venkataramanan 	if (err) {
445177ed84f4SBruce Allan 		dev_err(dev, "BAR0 I/O map error %d\n", err);
4452837f08fdSAnirudh Venkataramanan 		return err;
4453837f08fdSAnirudh Venkataramanan 	}
4454837f08fdSAnirudh Venkataramanan 
44551adf7eadSJacob Keller 	pf = ice_allocate_pf(dev);
4456837f08fdSAnirudh Venkataramanan 	if (!pf)
4457837f08fdSAnirudh Venkataramanan 		return -ENOMEM;
4458837f08fdSAnirudh Venkataramanan 
445973e30a62SDave Ertman 	/* initialize Auxiliary index to invalid value */
446073e30a62SDave Ertman 	pf->aux_idx = -1;
446173e30a62SDave Ertman 
44622f2da36eSAnirudh Venkataramanan 	/* set up for high or low DMA */
446377ed84f4SBruce Allan 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4464837f08fdSAnirudh Venkataramanan 	if (err) {
446577ed84f4SBruce Allan 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4466837f08fdSAnirudh Venkataramanan 		return err;
4467837f08fdSAnirudh Venkataramanan 	}
4468837f08fdSAnirudh Venkataramanan 
4469837f08fdSAnirudh Venkataramanan 	pci_enable_pcie_error_reporting(pdev);
4470837f08fdSAnirudh Venkataramanan 	pci_set_master(pdev);
4471837f08fdSAnirudh Venkataramanan 
4472837f08fdSAnirudh Venkataramanan 	pf->pdev = pdev;
4473837f08fdSAnirudh Venkataramanan 	pci_set_drvdata(pdev, pf);
44747e408e07SAnirudh Venkataramanan 	set_bit(ICE_DOWN, pf->state);
44758d81fa55SAkeem G Abodunrin 	/* Disable service task until DOWN bit is cleared */
44767e408e07SAnirudh Venkataramanan 	set_bit(ICE_SERVICE_DIS, pf->state);
4477837f08fdSAnirudh Venkataramanan 
4478837f08fdSAnirudh Venkataramanan 	hw = &pf->hw;
4479837f08fdSAnirudh Venkataramanan 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
44804e56802eSMichal Swiatkowski 	pci_save_state(pdev);
44814e56802eSMichal Swiatkowski 
4482837f08fdSAnirudh Venkataramanan 	hw->back = pf;
4483837f08fdSAnirudh Venkataramanan 	hw->vendor_id = pdev->vendor;
4484837f08fdSAnirudh Venkataramanan 	hw->device_id = pdev->device;
4485837f08fdSAnirudh Venkataramanan 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4486837f08fdSAnirudh Venkataramanan 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
4487837f08fdSAnirudh Venkataramanan 	hw->subsystem_device_id = pdev->subsystem_device;
4488837f08fdSAnirudh Venkataramanan 	hw->bus.device = PCI_SLOT(pdev->devfn);
4489837f08fdSAnirudh Venkataramanan 	hw->bus.func = PCI_FUNC(pdev->devfn);
4490f31e4b6fSAnirudh Venkataramanan 	ice_set_ctrlq_len(hw);
4491f31e4b6fSAnirudh Venkataramanan 
4492837f08fdSAnirudh Venkataramanan 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4493837f08fdSAnirudh Venkataramanan 
44947ec59eeaSAnirudh Venkataramanan #ifndef CONFIG_DYNAMIC_DEBUG
44957ec59eeaSAnirudh Venkataramanan 	if (debug < -1)
44967ec59eeaSAnirudh Venkataramanan 		hw->debug_mask = debug;
44977ec59eeaSAnirudh Venkataramanan #endif
44987ec59eeaSAnirudh Venkataramanan 
4499f31e4b6fSAnirudh Venkataramanan 	err = ice_init_hw(hw);
4500f31e4b6fSAnirudh Venkataramanan 	if (err) {
450177ed84f4SBruce Allan 		dev_err(dev, "ice_init_hw failed: %d\n", err);
4502f31e4b6fSAnirudh Venkataramanan 		err = -EIO;
4503f31e4b6fSAnirudh Venkataramanan 		goto err_exit_unroll;
4504f31e4b6fSAnirudh Venkataramanan 	}
4505f31e4b6fSAnirudh Venkataramanan 
450640b24760SAnirudh Venkataramanan 	ice_init_feature_support(pf);
450740b24760SAnirudh Venkataramanan 
4508462acf6aSTony Nguyen 	ice_request_fw(pf);
4509462acf6aSTony Nguyen 
4510462acf6aSTony Nguyen 	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4511462acf6aSTony Nguyen 	 * set in pf->state, which will cause ice_is_safe_mode to return
4512462acf6aSTony Nguyen 	 * true
4513462acf6aSTony Nguyen 	 */
4514462acf6aSTony Nguyen 	if (ice_is_safe_mode(pf)) {
4515462acf6aSTony Nguyen 		/* we already got function/device capabilities but these don't
4516462acf6aSTony Nguyen 		 * reflect what the driver needs to do in safe mode. Instead of
4517462acf6aSTony Nguyen 		 * adding conditional logic everywhere to ignore these
4518462acf6aSTony Nguyen 		 * device/function capabilities, override them.
4519462acf6aSTony Nguyen 		 */
4520462acf6aSTony Nguyen 		ice_set_safe_mode_caps(hw);
4521462acf6aSTony Nguyen 	}
4522462acf6aSTony Nguyen 
452378b5713aSAnirudh Venkataramanan 	err = ice_init_pf(pf);
452478b5713aSAnirudh Venkataramanan 	if (err) {
452578b5713aSAnirudh Venkataramanan 		dev_err(dev, "ice_init_pf failed: %d\n", err);
452678b5713aSAnirudh Venkataramanan 		goto err_init_pf_unroll;
452778b5713aSAnirudh Venkataramanan 	}
4528940b61afSAnirudh Venkataramanan 
4529dce730f1SJacob Keller 	ice_devlink_init_regions(pf);
4530dce730f1SJacob Keller 
4531b20e6c17SJakub Kicinski 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4532b20e6c17SJakub Kicinski 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4533b20e6c17SJakub Kicinski 	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4534b20e6c17SJakub Kicinski 	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4535b20e6c17SJakub Kicinski 	i = 0;
4536b20e6c17SJakub Kicinski 	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4537b20e6c17SJakub Kicinski 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4538b20e6c17SJakub Kicinski 			pf->hw.tnl.valid_count[TNL_VXLAN];
4539b20e6c17SJakub Kicinski 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4540b20e6c17SJakub Kicinski 			UDP_TUNNEL_TYPE_VXLAN;
4541b20e6c17SJakub Kicinski 		i++;
4542b20e6c17SJakub Kicinski 	}
4543b20e6c17SJakub Kicinski 	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4544b20e6c17SJakub Kicinski 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4545b20e6c17SJakub Kicinski 			pf->hw.tnl.valid_count[TNL_GENEVE];
4546b20e6c17SJakub Kicinski 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4547b20e6c17SJakub Kicinski 			UDP_TUNNEL_TYPE_GENEVE;
4548b20e6c17SJakub Kicinski 		i++;
4549b20e6c17SJakub Kicinski 	}
4550b20e6c17SJakub Kicinski 
4551995c90f2SAnirudh Venkataramanan 	pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4552940b61afSAnirudh Venkataramanan 	if (!pf->num_alloc_vsi) {
4553940b61afSAnirudh Venkataramanan 		err = -EIO;
4554940b61afSAnirudh Venkataramanan 		goto err_init_pf_unroll;
4555940b61afSAnirudh Venkataramanan 	}
4556b20e6c17SJakub Kicinski 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4557b20e6c17SJakub Kicinski 		dev_warn(&pf->pdev->dev,
4558b20e6c17SJakub Kicinski 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4559b20e6c17SJakub Kicinski 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4560b20e6c17SJakub Kicinski 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4561b20e6c17SJakub Kicinski 	}
4562940b61afSAnirudh Venkataramanan 
456377ed84f4SBruce Allan 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
456477ed84f4SBruce Allan 			       GFP_KERNEL);
4565940b61afSAnirudh Venkataramanan 	if (!pf->vsi) {
4566940b61afSAnirudh Venkataramanan 		err = -ENOMEM;
4567940b61afSAnirudh Venkataramanan 		goto err_init_pf_unroll;
4568940b61afSAnirudh Venkataramanan 	}
4569940b61afSAnirudh Venkataramanan 
4570940b61afSAnirudh Venkataramanan 	err = ice_init_interrupt_scheme(pf);
4571940b61afSAnirudh Venkataramanan 	if (err) {
457277ed84f4SBruce Allan 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4573940b61afSAnirudh Venkataramanan 		err = -EIO;
4574bc3a0241SJacob Keller 		goto err_init_vsi_unroll;
4575940b61afSAnirudh Venkataramanan 	}
4576940b61afSAnirudh Venkataramanan 
4577940b61afSAnirudh Venkataramanan 	/* In case of MSIX we are going to setup the misc vector right here
4578940b61afSAnirudh Venkataramanan 	 * to handle admin queue events etc. In case of legacy and MSI
4579940b61afSAnirudh Venkataramanan 	 * the misc functionality and queue processing is combined in
4580940b61afSAnirudh Venkataramanan 	 * the same vector and that gets setup at open.
4581940b61afSAnirudh Venkataramanan 	 */
4582940b61afSAnirudh Venkataramanan 	err = ice_req_irq_msix_misc(pf);
4583940b61afSAnirudh Venkataramanan 	if (err) {
458477ed84f4SBruce Allan 		dev_err(dev, "setup of misc vector failed: %d\n", err);
4585940b61afSAnirudh Venkataramanan 		goto err_init_interrupt_unroll;
4586940b61afSAnirudh Venkataramanan 	}
4587940b61afSAnirudh Venkataramanan 
4588940b61afSAnirudh Venkataramanan 	/* create switch struct for the switch element created by FW on boot */
458977ed84f4SBruce Allan 	pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4590940b61afSAnirudh Venkataramanan 	if (!pf->first_sw) {
4591940b61afSAnirudh Venkataramanan 		err = -ENOMEM;
4592940b61afSAnirudh Venkataramanan 		goto err_msix_misc_unroll;
4593940b61afSAnirudh Venkataramanan 	}
4594940b61afSAnirudh Venkataramanan 
4595b1edc14aSMd Fahad Iqbal Polash 	if (hw->evb_veb)
4596940b61afSAnirudh Venkataramanan 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4597b1edc14aSMd Fahad Iqbal Polash 	else
4598b1edc14aSMd Fahad Iqbal Polash 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4599b1edc14aSMd Fahad Iqbal Polash 
4600940b61afSAnirudh Venkataramanan 	pf->first_sw->pf = pf;
4601940b61afSAnirudh Venkataramanan 
4602940b61afSAnirudh Venkataramanan 	/* record the sw_id available for later use */
4603940b61afSAnirudh Venkataramanan 	pf->first_sw->sw_id = hw->port_info->sw_id;
4604940b61afSAnirudh Venkataramanan 
46053a858ba3SAnirudh Venkataramanan 	err = ice_setup_pf_sw(pf);
46063a858ba3SAnirudh Venkataramanan 	if (err) {
46072f2da36eSAnirudh Venkataramanan 		dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
46083a858ba3SAnirudh Venkataramanan 		goto err_alloc_sw_unroll;
46093a858ba3SAnirudh Venkataramanan 	}
46109daf8208SAnirudh Venkataramanan 
46117e408e07SAnirudh Venkataramanan 	clear_bit(ICE_SERVICE_DIS, pf->state);
46129daf8208SAnirudh Venkataramanan 
4613e3710a01SPaul M Stillwell Jr 	/* tell the firmware we are up */
4614e3710a01SPaul M Stillwell Jr 	err = ice_send_version(pf);
4615e3710a01SPaul M Stillwell Jr 	if (err) {
461619cce2c6SAnirudh Venkataramanan 		dev_err(dev, "probe failed sending driver version %s. error: %d\n",
461734a2a3b8SJeff Kirsher 			UTS_RELEASE, err);
461878116e97SMarcin Szycik 		goto err_send_version_unroll;
4619e3710a01SPaul M Stillwell Jr 	}
4620e3710a01SPaul M Stillwell Jr 
46219daf8208SAnirudh Venkataramanan 	/* since everything is good, start the service timer */
46229daf8208SAnirudh Venkataramanan 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
46239daf8208SAnirudh Venkataramanan 
4624250c3b3eSBrett Creeley 	err = ice_init_link_events(pf->hw.port_info);
4625250c3b3eSBrett Creeley 	if (err) {
4626250c3b3eSBrett Creeley 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
462778116e97SMarcin Szycik 		goto err_send_version_unroll;
4628250c3b3eSBrett Creeley 	}
4629250c3b3eSBrett Creeley 
463008771bceSAnirudh Venkataramanan 	/* not a fatal error if this fails */
46311a3571b5SPaul Greenwalt 	err = ice_init_nvm_phy_type(pf->hw.port_info);
463208771bceSAnirudh Venkataramanan 	if (err)
46331a3571b5SPaul Greenwalt 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
46341a3571b5SPaul Greenwalt 
463508771bceSAnirudh Venkataramanan 	/* not a fatal error if this fails */
46361a3571b5SPaul Greenwalt 	err = ice_update_link_info(pf->hw.port_info);
463708771bceSAnirudh Venkataramanan 	if (err)
46381a3571b5SPaul Greenwalt 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
46391a3571b5SPaul Greenwalt 
4640ea78ce4dSPaul Greenwalt 	ice_init_link_dflt_override(pf->hw.port_info);
4641ea78ce4dSPaul Greenwalt 
464299d40752SBrett Creeley 	ice_check_link_cfg_err(pf,
464399d40752SBrett Creeley 			       pf->hw.port_info->phy.link_info.link_cfg_err);
4644c77849f5SAnirudh Venkataramanan 
46451a3571b5SPaul Greenwalt 	/* if media available, initialize PHY settings */
46461a3571b5SPaul Greenwalt 	if (pf->hw.port_info->phy.link_info.link_info &
46471a3571b5SPaul Greenwalt 	    ICE_AQ_MEDIA_AVAILABLE) {
464808771bceSAnirudh Venkataramanan 		/* not a fatal error if this fails */
46491a3571b5SPaul Greenwalt 		err = ice_init_phy_user_cfg(pf->hw.port_info);
465008771bceSAnirudh Venkataramanan 		if (err)
46511a3571b5SPaul Greenwalt 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
46521a3571b5SPaul Greenwalt 
46531a3571b5SPaul Greenwalt 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
46541a3571b5SPaul Greenwalt 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
46551a3571b5SPaul Greenwalt 
46561a3571b5SPaul Greenwalt 			if (vsi)
46571a3571b5SPaul Greenwalt 				ice_configure_phy(vsi);
46581a3571b5SPaul Greenwalt 		}
46591a3571b5SPaul Greenwalt 	} else {
46601a3571b5SPaul Greenwalt 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
46611a3571b5SPaul Greenwalt 	}
46621a3571b5SPaul Greenwalt 
4663c585ea42SBrett Creeley 	ice_verify_cacheline_size(pf);
4664c585ea42SBrett Creeley 
4665769c500dSAkeem G Abodunrin 	/* Save wakeup reason register for later use */
4666769c500dSAkeem G Abodunrin 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4667769c500dSAkeem G Abodunrin 
4668769c500dSAkeem G Abodunrin 	/* check for a power management event */
4669769c500dSAkeem G Abodunrin 	ice_print_wake_reason(pf);
4670769c500dSAkeem G Abodunrin 
4671769c500dSAkeem G Abodunrin 	/* clear wake status, all bits */
4672769c500dSAkeem G Abodunrin 	wr32(hw, PFPM_WUS, U32_MAX);
4673769c500dSAkeem G Abodunrin 
4674769c500dSAkeem G Abodunrin 	/* Disable WoL at init, wait for user to enable */
4675769c500dSAkeem G Abodunrin 	device_set_wakeup_enable(dev, false);
4676769c500dSAkeem G Abodunrin 
4677cd1f56f4SBrett Creeley 	if (ice_is_safe_mode(pf)) {
4678cd1f56f4SBrett Creeley 		ice_set_safe_mode_vlan_cfg(pf);
4679de75135bSAnirudh Venkataramanan 		goto probe_done;
4680cd1f56f4SBrett Creeley 	}
4681462acf6aSTony Nguyen 
4682462acf6aSTony Nguyen 	/* initialize DDP driven features */
468306c16d89SJacob Keller 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
468406c16d89SJacob Keller 		ice_ptp_init(pf);
4685462acf6aSTony Nguyen 
4686148beb61SHenry Tieman 	/* Note: Flow director init failure is non-fatal to load */
4687148beb61SHenry Tieman 	if (ice_init_fdir(pf))
4688148beb61SHenry Tieman 		dev_err(dev, "could not initialize flow director\n");
4689148beb61SHenry Tieman 
4690462acf6aSTony Nguyen 	/* Note: DCB init failure is non-fatal to load */
4691462acf6aSTony Nguyen 	if (ice_init_pf_dcb(pf, false)) {
4692462acf6aSTony Nguyen 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4693462acf6aSTony Nguyen 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4694462acf6aSTony Nguyen 	} else {
4695462acf6aSTony Nguyen 		ice_cfg_lldp_mib_change(&pf->hw, true);
4696462acf6aSTony Nguyen 	}
4697462acf6aSTony Nguyen 
4698df006dd4SDave Ertman 	if (ice_init_lag(pf))
4699df006dd4SDave Ertman 		dev_warn(dev, "Failed to init link aggregation support\n");
4700df006dd4SDave Ertman 
4701e18ff118SPaul Greenwalt 	/* print PCI link speed and width */
4702e18ff118SPaul Greenwalt 	pcie_print_link_status(pf->pdev);
4703e18ff118SPaul Greenwalt 
4704de75135bSAnirudh Venkataramanan probe_done:
47051e23f076SAnirudh Venkataramanan 	err = ice_register_netdev(pf);
47061e23f076SAnirudh Venkataramanan 	if (err)
47071e23f076SAnirudh Venkataramanan 		goto err_netdev_reg;
47081e23f076SAnirudh Venkataramanan 
4709e523af4eSShiraz Saleem 	err = ice_devlink_register_params(pf);
4710e523af4eSShiraz Saleem 	if (err)
4711e523af4eSShiraz Saleem 		goto err_netdev_reg;
4712e523af4eSShiraz Saleem 
4713de75135bSAnirudh Venkataramanan 	/* ready to go, so clear down state bit */
47147e408e07SAnirudh Venkataramanan 	clear_bit(ICE_DOWN, pf->state);
4715d25a0fc4SDave Ertman 	if (ice_is_aux_ena(pf)) {
4716d25a0fc4SDave Ertman 		pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
4717d25a0fc4SDave Ertman 		if (pf->aux_idx < 0) {
4718d25a0fc4SDave Ertman 			dev_err(dev, "Failed to allocate device ID for AUX driver\n");
4719d25a0fc4SDave Ertman 			err = -ENOMEM;
4720e523af4eSShiraz Saleem 			goto err_devlink_reg_param;
4721d25a0fc4SDave Ertman 		}
4722d25a0fc4SDave Ertman 
4723d25a0fc4SDave Ertman 		err = ice_init_rdma(pf);
4724d25a0fc4SDave Ertman 		if (err) {
4725d25a0fc4SDave Ertman 			dev_err(dev, "Failed to initialize RDMA: %d\n", err);
4726d25a0fc4SDave Ertman 			err = -EIO;
4727d25a0fc4SDave Ertman 			goto err_init_aux_unroll;
4728d25a0fc4SDave Ertman 		}
4729d25a0fc4SDave Ertman 	} else {
4730d25a0fc4SDave Ertman 		dev_warn(dev, "RDMA is not supported on this device\n");
4731d25a0fc4SDave Ertman 	}
4732d25a0fc4SDave Ertman 
4733838cefd5SLeon Romanovsky 	ice_devlink_register(pf);
4734837f08fdSAnirudh Venkataramanan 	return 0;
4735f31e4b6fSAnirudh Venkataramanan 
4736d25a0fc4SDave Ertman err_init_aux_unroll:
4737d25a0fc4SDave Ertman 	pf->adev = NULL;
4738d25a0fc4SDave Ertman 	ida_free(&ice_aux_ida, pf->aux_idx);
4739e523af4eSShiraz Saleem err_devlink_reg_param:
4740e523af4eSShiraz Saleem 	ice_devlink_unregister_params(pf);
47411e23f076SAnirudh Venkataramanan err_netdev_reg:
474278116e97SMarcin Szycik err_send_version_unroll:
474378116e97SMarcin Szycik 	ice_vsi_release_all(pf);
47443a858ba3SAnirudh Venkataramanan err_alloc_sw_unroll:
47457e408e07SAnirudh Venkataramanan 	set_bit(ICE_SERVICE_DIS, pf->state);
47467e408e07SAnirudh Venkataramanan 	set_bit(ICE_DOWN, pf->state);
47474015d11eSBrett Creeley 	devm_kfree(dev, pf->first_sw);
4748940b61afSAnirudh Venkataramanan err_msix_misc_unroll:
4749940b61afSAnirudh Venkataramanan 	ice_free_irq_msix_misc(pf);
4750940b61afSAnirudh Venkataramanan err_init_interrupt_unroll:
4751940b61afSAnirudh Venkataramanan 	ice_clear_interrupt_scheme(pf);
4752bc3a0241SJacob Keller err_init_vsi_unroll:
475377ed84f4SBruce Allan 	devm_kfree(dev, pf->vsi);
4754940b61afSAnirudh Venkataramanan err_init_pf_unroll:
4755940b61afSAnirudh Venkataramanan 	ice_deinit_pf(pf);
4756dce730f1SJacob Keller 	ice_devlink_destroy_regions(pf);
4757940b61afSAnirudh Venkataramanan 	ice_deinit_hw(hw);
4758f31e4b6fSAnirudh Venkataramanan err_exit_unroll:
4759f31e4b6fSAnirudh Venkataramanan 	pci_disable_pcie_error_reporting(pdev);
4760769c500dSAkeem G Abodunrin 	pci_disable_device(pdev);
4761f31e4b6fSAnirudh Venkataramanan 	return err;
4762837f08fdSAnirudh Venkataramanan }
4763837f08fdSAnirudh Venkataramanan 
4764837f08fdSAnirudh Venkataramanan /**
4765769c500dSAkeem G Abodunrin  * ice_set_wake - enable or disable Wake on LAN
4766769c500dSAkeem G Abodunrin  * @pf: pointer to the PF struct
4767769c500dSAkeem G Abodunrin  *
4768769c500dSAkeem G Abodunrin  * Simple helper for WoL control
4769769c500dSAkeem G Abodunrin  */
4770769c500dSAkeem G Abodunrin static void ice_set_wake(struct ice_pf *pf)
4771769c500dSAkeem G Abodunrin {
4772769c500dSAkeem G Abodunrin 	struct ice_hw *hw = &pf->hw;
4773769c500dSAkeem G Abodunrin 	bool wol = pf->wol_ena;
4774769c500dSAkeem G Abodunrin 
4775769c500dSAkeem G Abodunrin 	/* clear wake state, otherwise new wake events won't fire */
4776769c500dSAkeem G Abodunrin 	wr32(hw, PFPM_WUS, U32_MAX);
4777769c500dSAkeem G Abodunrin 
4778769c500dSAkeem G Abodunrin 	/* enable / disable APM wake up, no RMW needed */
4779769c500dSAkeem G Abodunrin 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4780769c500dSAkeem G Abodunrin 
4781769c500dSAkeem G Abodunrin 	/* set magic packet filter enabled */
4782769c500dSAkeem G Abodunrin 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4783769c500dSAkeem G Abodunrin }
4784769c500dSAkeem G Abodunrin 
4785769c500dSAkeem G Abodunrin /**
4786ef860480STony Nguyen  * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
4787769c500dSAkeem G Abodunrin  * @pf: pointer to the PF struct
4788769c500dSAkeem G Abodunrin  *
4789769c500dSAkeem G Abodunrin  * Issue firmware command to enable multicast magic wake, making
4790769c500dSAkeem G Abodunrin  * sure that any locally administered address (LAA) is used for
4791769c500dSAkeem G Abodunrin  * wake, and that PF reset doesn't undo the LAA.
4792769c500dSAkeem G Abodunrin  */
4793769c500dSAkeem G Abodunrin static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4794769c500dSAkeem G Abodunrin {
4795769c500dSAkeem G Abodunrin 	struct device *dev = ice_pf_to_dev(pf);
4796769c500dSAkeem G Abodunrin 	struct ice_hw *hw = &pf->hw;
4797769c500dSAkeem G Abodunrin 	u8 mac_addr[ETH_ALEN];
4798769c500dSAkeem G Abodunrin 	struct ice_vsi *vsi;
47995518ac2aSTony Nguyen 	int status;
4800769c500dSAkeem G Abodunrin 	u8 flags;
4801769c500dSAkeem G Abodunrin 
4802769c500dSAkeem G Abodunrin 	if (!pf->wol_ena)
4803769c500dSAkeem G Abodunrin 		return;
4804769c500dSAkeem G Abodunrin 
4805769c500dSAkeem G Abodunrin 	vsi = ice_get_main_vsi(pf);
4806769c500dSAkeem G Abodunrin 	if (!vsi)
4807769c500dSAkeem G Abodunrin 		return;
4808769c500dSAkeem G Abodunrin 
4809769c500dSAkeem G Abodunrin 	/* Get current MAC address in case it's an LAA */
4810769c500dSAkeem G Abodunrin 	if (vsi->netdev)
4811769c500dSAkeem G Abodunrin 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4812769c500dSAkeem G Abodunrin 	else
4813769c500dSAkeem G Abodunrin 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4814769c500dSAkeem G Abodunrin 
4815769c500dSAkeem G Abodunrin 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4816769c500dSAkeem G Abodunrin 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4817769c500dSAkeem G Abodunrin 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4818769c500dSAkeem G Abodunrin 
4819769c500dSAkeem G Abodunrin 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4820769c500dSAkeem G Abodunrin 	if (status)
48215f87ec48STony Nguyen 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
48225518ac2aSTony Nguyen 			status, ice_aq_str(hw->adminq.sq_last_status));
4823769c500dSAkeem G Abodunrin }
4824769c500dSAkeem G Abodunrin 
4825769c500dSAkeem G Abodunrin /**
4826837f08fdSAnirudh Venkataramanan  * ice_remove - Device removal routine
4827837f08fdSAnirudh Venkataramanan  * @pdev: PCI device information struct
4828837f08fdSAnirudh Venkataramanan  */
4829837f08fdSAnirudh Venkataramanan static void ice_remove(struct pci_dev *pdev)
4830837f08fdSAnirudh Venkataramanan {
4831837f08fdSAnirudh Venkataramanan 	struct ice_pf *pf = pci_get_drvdata(pdev);
483281b23589SDave Ertman 	int i;
4833837f08fdSAnirudh Venkataramanan 
4834838cefd5SLeon Romanovsky 	ice_devlink_unregister(pf);
4835afd9d4abSAnirudh Venkataramanan 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
4836afd9d4abSAnirudh Venkataramanan 		if (!ice_is_reset_in_progress(pf->state))
4837afd9d4abSAnirudh Venkataramanan 			break;
4838afd9d4abSAnirudh Venkataramanan 		msleep(100);
4839afd9d4abSAnirudh Venkataramanan 	}
4840afd9d4abSAnirudh Venkataramanan 
4841195bb48fSMichal Swiatkowski 	ice_tc_indir_block_remove(pf);
4842195bb48fSMichal Swiatkowski 
4843f844d521SBrett Creeley 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
48447e408e07SAnirudh Venkataramanan 		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
4845f844d521SBrett Creeley 		ice_free_vfs(pf);
4846f844d521SBrett Creeley 	}
4847f844d521SBrett Creeley 
48488d81fa55SAkeem G Abodunrin 	ice_service_task_stop(pf);
4849f31e4b6fSAnirudh Venkataramanan 
4850d69ea414SJacob Keller 	ice_aq_cancel_waiting_tasks(pf);
4851f9f5301eSDave Ertman 	ice_unplug_aux_dev(pf);
485273e30a62SDave Ertman 	if (pf->aux_idx >= 0)
4853d25a0fc4SDave Ertman 		ida_free(&ice_aux_ida, pf->aux_idx);
4854e523af4eSShiraz Saleem 	ice_devlink_unregister_params(pf);
4855f9f5301eSDave Ertman 	set_bit(ICE_DOWN, pf->state);
4856d69ea414SJacob Keller 
4857148beb61SHenry Tieman 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4858df006dd4SDave Ertman 	ice_deinit_lag(pf);
485906c16d89SJacob Keller 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
486006c16d89SJacob Keller 		ice_ptp_release(pf);
486128bf2672SBrett Creeley 	if (!ice_is_safe_mode(pf))
486228bf2672SBrett Creeley 		ice_remove_arfs(pf);
4863769c500dSAkeem G Abodunrin 	ice_setup_mc_magic_wake(pf);
48640f9d5027SAnirudh Venkataramanan 	ice_vsi_release_all(pf);
4865769c500dSAkeem G Abodunrin 	ice_set_wake(pf);
4866940b61afSAnirudh Venkataramanan 	ice_free_irq_msix_misc(pf);
486781b23589SDave Ertman 	ice_for_each_vsi(pf, i) {
486881b23589SDave Ertman 		if (!pf->vsi[i])
486981b23589SDave Ertman 			continue;
487081b23589SDave Ertman 		ice_vsi_free_q_vectors(pf->vsi[i]);
487181b23589SDave Ertman 	}
4872940b61afSAnirudh Venkataramanan 	ice_deinit_pf(pf);
4873dce730f1SJacob Keller 	ice_devlink_destroy_regions(pf);
4874f31e4b6fSAnirudh Venkataramanan 	ice_deinit_hw(&pf->hw);
48751adf7eadSJacob Keller 
487618057cb3SBruce Allan 	/* Issue a PFR as part of the prescribed driver unload flow.  Do not
487718057cb3SBruce Allan 	 * do it via ice_schedule_reset() since there is no need to rebuild
487818057cb3SBruce Allan 	 * and the service task is already stopped.
487918057cb3SBruce Allan 	 */
488018057cb3SBruce Allan 	ice_reset(&pf->hw, ICE_RESET_PFR);
4881c6012ac1SBruce Allan 	pci_wait_for_pending_transaction(pdev);
4882c6012ac1SBruce Allan 	ice_clear_interrupt_scheme(pf);
4883837f08fdSAnirudh Venkataramanan 	pci_disable_pcie_error_reporting(pdev);
4884769c500dSAkeem G Abodunrin 	pci_disable_device(pdev);
4885837f08fdSAnirudh Venkataramanan }
4886837f08fdSAnirudh Venkataramanan 
48875995b6d0SBrett Creeley /**
4888769c500dSAkeem G Abodunrin  * ice_shutdown - PCI callback for shutting down device
4889769c500dSAkeem G Abodunrin  * @pdev: PCI device information struct
4890769c500dSAkeem G Abodunrin  */
4891769c500dSAkeem G Abodunrin static void ice_shutdown(struct pci_dev *pdev)
4892769c500dSAkeem G Abodunrin {
4893769c500dSAkeem G Abodunrin 	struct ice_pf *pf = pci_get_drvdata(pdev);
4894769c500dSAkeem G Abodunrin 
4895769c500dSAkeem G Abodunrin 	ice_remove(pdev);
4896769c500dSAkeem G Abodunrin 
4897769c500dSAkeem G Abodunrin 	if (system_state == SYSTEM_POWER_OFF) {
4898769c500dSAkeem G Abodunrin 		pci_wake_from_d3(pdev, pf->wol_ena);
4899769c500dSAkeem G Abodunrin 		pci_set_power_state(pdev, PCI_D3hot);
4900769c500dSAkeem G Abodunrin 	}
4901769c500dSAkeem G Abodunrin }
4902769c500dSAkeem G Abodunrin 
4903769c500dSAkeem G Abodunrin #ifdef CONFIG_PM
4904769c500dSAkeem G Abodunrin /**
4905769c500dSAkeem G Abodunrin  * ice_prepare_for_shutdown - prep for PCI shutdown
4906769c500dSAkeem G Abodunrin  * @pf: board private structure
4907769c500dSAkeem G Abodunrin  *
4908769c500dSAkeem G Abodunrin  * Inform or close all dependent features in prep for PCI device shutdown
4909769c500dSAkeem G Abodunrin  */
4910769c500dSAkeem G Abodunrin static void ice_prepare_for_shutdown(struct ice_pf *pf)
4911769c500dSAkeem G Abodunrin {
4912769c500dSAkeem G Abodunrin 	struct ice_hw *hw = &pf->hw;
4913769c500dSAkeem G Abodunrin 	u32 v;
4914769c500dSAkeem G Abodunrin 
4915769c500dSAkeem G Abodunrin 	/* Notify VFs of impending reset */
4916769c500dSAkeem G Abodunrin 	if (ice_check_sq_alive(hw, &hw->mailboxq))
4917769c500dSAkeem G Abodunrin 		ice_vc_notify_reset(pf);
4918769c500dSAkeem G Abodunrin 
4919769c500dSAkeem G Abodunrin 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
4920769c500dSAkeem G Abodunrin 
4921769c500dSAkeem G Abodunrin 	/* disable the VSIs and their queues that are not already DOWN */
4922769c500dSAkeem G Abodunrin 	ice_pf_dis_all_vsi(pf, false);
4923769c500dSAkeem G Abodunrin 
4924769c500dSAkeem G Abodunrin 	ice_for_each_vsi(pf, v)
4925769c500dSAkeem G Abodunrin 		if (pf->vsi[v])
4926769c500dSAkeem G Abodunrin 			pf->vsi[v]->vsi_num = 0;
4927769c500dSAkeem G Abodunrin 
4928769c500dSAkeem G Abodunrin 	ice_shutdown_all_ctrlq(hw);
4929769c500dSAkeem G Abodunrin }
4930769c500dSAkeem G Abodunrin 
4931769c500dSAkeem G Abodunrin /**
4932769c500dSAkeem G Abodunrin  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
4933769c500dSAkeem G Abodunrin  * @pf: board private structure to reinitialize
4934769c500dSAkeem G Abodunrin  *
4935769c500dSAkeem G Abodunrin  * This routine reinitialize interrupt scheme that was cleared during
4936769c500dSAkeem G Abodunrin  * power management suspend callback.
4937769c500dSAkeem G Abodunrin  *
4938769c500dSAkeem G Abodunrin  * This should be called during resume routine to re-allocate the q_vectors
4939769c500dSAkeem G Abodunrin  * and reacquire interrupts.
4940769c500dSAkeem G Abodunrin  */
4941769c500dSAkeem G Abodunrin static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
4942769c500dSAkeem G Abodunrin {
4943769c500dSAkeem G Abodunrin 	struct device *dev = ice_pf_to_dev(pf);
4944769c500dSAkeem G Abodunrin 	int ret, v;
4945769c500dSAkeem G Abodunrin 
4946769c500dSAkeem G Abodunrin 	/* Since we clear MSIX flag during suspend, we need to
4947769c500dSAkeem G Abodunrin 	 * set it back during resume...
4948769c500dSAkeem G Abodunrin 	 */
4949769c500dSAkeem G Abodunrin 
4950769c500dSAkeem G Abodunrin 	ret = ice_init_interrupt_scheme(pf);
4951769c500dSAkeem G Abodunrin 	if (ret) {
4952769c500dSAkeem G Abodunrin 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
4953769c500dSAkeem G Abodunrin 		return ret;
4954769c500dSAkeem G Abodunrin 	}
4955769c500dSAkeem G Abodunrin 
4956769c500dSAkeem G Abodunrin 	/* Remap vectors and rings, after successful re-init interrupts */
4957769c500dSAkeem G Abodunrin 	ice_for_each_vsi(pf, v) {
4958769c500dSAkeem G Abodunrin 		if (!pf->vsi[v])
4959769c500dSAkeem G Abodunrin 			continue;
4960769c500dSAkeem G Abodunrin 
4961769c500dSAkeem G Abodunrin 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
4962769c500dSAkeem G Abodunrin 		if (ret)
4963769c500dSAkeem G Abodunrin 			goto err_reinit;
4964769c500dSAkeem G Abodunrin 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
4965769c500dSAkeem G Abodunrin 	}
4966769c500dSAkeem G Abodunrin 
4967769c500dSAkeem G Abodunrin 	ret = ice_req_irq_msix_misc(pf);
4968769c500dSAkeem G Abodunrin 	if (ret) {
4969769c500dSAkeem G Abodunrin 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
4970769c500dSAkeem G Abodunrin 			ret);
4971769c500dSAkeem G Abodunrin 		goto err_reinit;
4972769c500dSAkeem G Abodunrin 	}
4973769c500dSAkeem G Abodunrin 
4974769c500dSAkeem G Abodunrin 	return 0;
4975769c500dSAkeem G Abodunrin 
4976769c500dSAkeem G Abodunrin err_reinit:
4977769c500dSAkeem G Abodunrin 	while (v--)
4978769c500dSAkeem G Abodunrin 		if (pf->vsi[v])
4979769c500dSAkeem G Abodunrin 			ice_vsi_free_q_vectors(pf->vsi[v]);
4980769c500dSAkeem G Abodunrin 
4981769c500dSAkeem G Abodunrin 	return ret;
4982769c500dSAkeem G Abodunrin }
4983769c500dSAkeem G Abodunrin 
4984769c500dSAkeem G Abodunrin /**
4985769c500dSAkeem G Abodunrin  * ice_suspend
4986769c500dSAkeem G Abodunrin  * @dev: generic device information structure
4987769c500dSAkeem G Abodunrin  *
4988769c500dSAkeem G Abodunrin  * Power Management callback to quiesce the device and prepare
4989769c500dSAkeem G Abodunrin  * for D3 transition.
4990769c500dSAkeem G Abodunrin  */
499165c72291SWei Yongjun static int __maybe_unused ice_suspend(struct device *dev)
4992769c500dSAkeem G Abodunrin {
4993769c500dSAkeem G Abodunrin 	struct pci_dev *pdev = to_pci_dev(dev);
4994769c500dSAkeem G Abodunrin 	struct ice_pf *pf;
4995769c500dSAkeem G Abodunrin 	int disabled, v;
4996769c500dSAkeem G Abodunrin 
4997769c500dSAkeem G Abodunrin 	pf = pci_get_drvdata(pdev);
4998769c500dSAkeem G Abodunrin 
4999769c500dSAkeem G Abodunrin 	if (!ice_pf_state_is_nominal(pf)) {
5000769c500dSAkeem G Abodunrin 		dev_err(dev, "Device is not ready, no need to suspend it\n");
5001769c500dSAkeem G Abodunrin 		return -EBUSY;
5002769c500dSAkeem G Abodunrin 	}
5003769c500dSAkeem G Abodunrin 
5004769c500dSAkeem G Abodunrin 	/* Stop watchdog tasks until resume completion.
5005769c500dSAkeem G Abodunrin 	 * Even though it is most likely that the service task is
5006769c500dSAkeem G Abodunrin 	 * disabled if the device is suspended or down, the service task's
5007769c500dSAkeem G Abodunrin 	 * state is controlled by a different state bit, and we should
5008769c500dSAkeem G Abodunrin 	 * store and honor whatever state that bit is in at this point.
5009769c500dSAkeem G Abodunrin 	 */
5010769c500dSAkeem G Abodunrin 	disabled = ice_service_task_stop(pf);
5011769c500dSAkeem G Abodunrin 
5012f9f5301eSDave Ertman 	ice_unplug_aux_dev(pf);
5013f9f5301eSDave Ertman 
5014769c500dSAkeem G Abodunrin 	/* Already suspended?, then there is nothing to do */
50157e408e07SAnirudh Venkataramanan 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5016769c500dSAkeem G Abodunrin 		if (!disabled)
5017769c500dSAkeem G Abodunrin 			ice_service_task_restart(pf);
5018769c500dSAkeem G Abodunrin 		return 0;
5019769c500dSAkeem G Abodunrin 	}
5020769c500dSAkeem G Abodunrin 
50217e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_DOWN, pf->state) ||
5022769c500dSAkeem G Abodunrin 	    ice_is_reset_in_progress(pf->state)) {
5023769c500dSAkeem G Abodunrin 		dev_err(dev, "can't suspend device in reset or already down\n");
5024769c500dSAkeem G Abodunrin 		if (!disabled)
5025769c500dSAkeem G Abodunrin 			ice_service_task_restart(pf);
5026769c500dSAkeem G Abodunrin 		return 0;
5027769c500dSAkeem G Abodunrin 	}
5028769c500dSAkeem G Abodunrin 
5029769c500dSAkeem G Abodunrin 	ice_setup_mc_magic_wake(pf);
5030769c500dSAkeem G Abodunrin 
5031769c500dSAkeem G Abodunrin 	ice_prepare_for_shutdown(pf);
5032769c500dSAkeem G Abodunrin 
5033769c500dSAkeem G Abodunrin 	ice_set_wake(pf);
5034769c500dSAkeem G Abodunrin 
5035769c500dSAkeem G Abodunrin 	/* Free vectors, clear the interrupt scheme and release IRQs
5036769c500dSAkeem G Abodunrin 	 * for proper hibernation, especially with large number of CPUs.
5037769c500dSAkeem G Abodunrin 	 * Otherwise hibernation might fail when mapping all the vectors back
5038769c500dSAkeem G Abodunrin 	 * to CPU0.
5039769c500dSAkeem G Abodunrin 	 */
5040769c500dSAkeem G Abodunrin 	ice_free_irq_msix_misc(pf);
5041769c500dSAkeem G Abodunrin 	ice_for_each_vsi(pf, v) {
5042769c500dSAkeem G Abodunrin 		if (!pf->vsi[v])
5043769c500dSAkeem G Abodunrin 			continue;
5044769c500dSAkeem G Abodunrin 		ice_vsi_free_q_vectors(pf->vsi[v]);
5045769c500dSAkeem G Abodunrin 	}
50461831da7eSYongxin Liu 	ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
5047769c500dSAkeem G Abodunrin 	ice_clear_interrupt_scheme(pf);
5048769c500dSAkeem G Abodunrin 
5049466e4392SAnirudh Venkataramanan 	pci_save_state(pdev);
5050769c500dSAkeem G Abodunrin 	pci_wake_from_d3(pdev, pf->wol_ena);
5051769c500dSAkeem G Abodunrin 	pci_set_power_state(pdev, PCI_D3hot);
5052769c500dSAkeem G Abodunrin 	return 0;
5053769c500dSAkeem G Abodunrin }
5054769c500dSAkeem G Abodunrin 
5055769c500dSAkeem G Abodunrin /**
5056769c500dSAkeem G Abodunrin  * ice_resume - PM callback for waking up from D3
5057769c500dSAkeem G Abodunrin  * @dev: generic device information structure
5058769c500dSAkeem G Abodunrin  */
505965c72291SWei Yongjun static int __maybe_unused ice_resume(struct device *dev)
5060769c500dSAkeem G Abodunrin {
5061769c500dSAkeem G Abodunrin 	struct pci_dev *pdev = to_pci_dev(dev);
5062769c500dSAkeem G Abodunrin 	enum ice_reset_req reset_type;
5063769c500dSAkeem G Abodunrin 	struct ice_pf *pf;
5064769c500dSAkeem G Abodunrin 	struct ice_hw *hw;
5065769c500dSAkeem G Abodunrin 	int ret;
5066769c500dSAkeem G Abodunrin 
5067769c500dSAkeem G Abodunrin 	pci_set_power_state(pdev, PCI_D0);
5068769c500dSAkeem G Abodunrin 	pci_restore_state(pdev);
5069769c500dSAkeem G Abodunrin 	pci_save_state(pdev);
5070769c500dSAkeem G Abodunrin 
5071769c500dSAkeem G Abodunrin 	if (!pci_device_is_present(pdev))
5072769c500dSAkeem G Abodunrin 		return -ENODEV;
5073769c500dSAkeem G Abodunrin 
5074769c500dSAkeem G Abodunrin 	ret = pci_enable_device_mem(pdev);
5075769c500dSAkeem G Abodunrin 	if (ret) {
5076769c500dSAkeem G Abodunrin 		dev_err(dev, "Cannot enable device after suspend\n");
5077769c500dSAkeem G Abodunrin 		return ret;
5078769c500dSAkeem G Abodunrin 	}
5079769c500dSAkeem G Abodunrin 
5080769c500dSAkeem G Abodunrin 	pf = pci_get_drvdata(pdev);
5081769c500dSAkeem G Abodunrin 	hw = &pf->hw;
5082769c500dSAkeem G Abodunrin 
5083769c500dSAkeem G Abodunrin 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
5084769c500dSAkeem G Abodunrin 	ice_print_wake_reason(pf);
5085769c500dSAkeem G Abodunrin 
5086769c500dSAkeem G Abodunrin 	/* We cleared the interrupt scheme when we suspended, so we need to
5087769c500dSAkeem G Abodunrin 	 * restore it now to resume device functionality.
5088769c500dSAkeem G Abodunrin 	 */
5089769c500dSAkeem G Abodunrin 	ret = ice_reinit_interrupt_scheme(pf);
5090769c500dSAkeem G Abodunrin 	if (ret)
5091769c500dSAkeem G Abodunrin 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5092769c500dSAkeem G Abodunrin 
50937e408e07SAnirudh Venkataramanan 	clear_bit(ICE_DOWN, pf->state);
5094769c500dSAkeem G Abodunrin 	/* Now perform PF reset and rebuild */
5095769c500dSAkeem G Abodunrin 	reset_type = ICE_RESET_PFR;
5096769c500dSAkeem G Abodunrin 	/* re-enable service task for reset, but allow reset to schedule it */
50977e408e07SAnirudh Venkataramanan 	clear_bit(ICE_SERVICE_DIS, pf->state);
5098769c500dSAkeem G Abodunrin 
5099769c500dSAkeem G Abodunrin 	if (ice_schedule_reset(pf, reset_type))
5100769c500dSAkeem G Abodunrin 		dev_err(dev, "Reset during resume failed.\n");
5101769c500dSAkeem G Abodunrin 
51027e408e07SAnirudh Venkataramanan 	clear_bit(ICE_SUSPENDED, pf->state);
5103769c500dSAkeem G Abodunrin 	ice_service_task_restart(pf);
5104769c500dSAkeem G Abodunrin 
5105769c500dSAkeem G Abodunrin 	/* Restart the service task */
5106769c500dSAkeem G Abodunrin 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5107769c500dSAkeem G Abodunrin 
5108769c500dSAkeem G Abodunrin 	return 0;
5109769c500dSAkeem G Abodunrin }
5110769c500dSAkeem G Abodunrin #endif /* CONFIG_PM */
5111769c500dSAkeem G Abodunrin 
5112769c500dSAkeem G Abodunrin /**
51135995b6d0SBrett Creeley  * ice_pci_err_detected - warning that PCI error has been detected
51145995b6d0SBrett Creeley  * @pdev: PCI device information struct
51155995b6d0SBrett Creeley  * @err: the type of PCI error
51165995b6d0SBrett Creeley  *
51175995b6d0SBrett Creeley  * Called to warn that something happened on the PCI bus and the error handling
51185995b6d0SBrett Creeley  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
51195995b6d0SBrett Creeley  */
51205995b6d0SBrett Creeley static pci_ers_result_t
512116d79cd4SLuc Van Oostenryck ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
51225995b6d0SBrett Creeley {
51235995b6d0SBrett Creeley 	struct ice_pf *pf = pci_get_drvdata(pdev);
51245995b6d0SBrett Creeley 
51255995b6d0SBrett Creeley 	if (!pf) {
51265995b6d0SBrett Creeley 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
51275995b6d0SBrett Creeley 			__func__, err);
51285995b6d0SBrett Creeley 		return PCI_ERS_RESULT_DISCONNECT;
51295995b6d0SBrett Creeley 	}
51305995b6d0SBrett Creeley 
51317e408e07SAnirudh Venkataramanan 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
51325995b6d0SBrett Creeley 		ice_service_task_stop(pf);
51335995b6d0SBrett Creeley 
51347e408e07SAnirudh Venkataramanan 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
51357e408e07SAnirudh Venkataramanan 			set_bit(ICE_PFR_REQ, pf->state);
5136fbc7b27aSKiran Patil 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
51375995b6d0SBrett Creeley 		}
51385995b6d0SBrett Creeley 	}
51395995b6d0SBrett Creeley 
51405995b6d0SBrett Creeley 	return PCI_ERS_RESULT_NEED_RESET;
51415995b6d0SBrett Creeley }
51425995b6d0SBrett Creeley 
51435995b6d0SBrett Creeley /**
51445995b6d0SBrett Creeley  * ice_pci_err_slot_reset - a PCI slot reset has just happened
51455995b6d0SBrett Creeley  * @pdev: PCI device information struct
51465995b6d0SBrett Creeley  *
51475995b6d0SBrett Creeley  * Called to determine if the driver can recover from the PCI slot reset by
51485995b6d0SBrett Creeley  * using a register read to determine if the device is recoverable.
51495995b6d0SBrett Creeley  */
51505995b6d0SBrett Creeley static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
51515995b6d0SBrett Creeley {
51525995b6d0SBrett Creeley 	struct ice_pf *pf = pci_get_drvdata(pdev);
51535995b6d0SBrett Creeley 	pci_ers_result_t result;
51545995b6d0SBrett Creeley 	int err;
51555995b6d0SBrett Creeley 	u32 reg;
51565995b6d0SBrett Creeley 
51575995b6d0SBrett Creeley 	err = pci_enable_device_mem(pdev);
51585995b6d0SBrett Creeley 	if (err) {
515919cce2c6SAnirudh Venkataramanan 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
51605995b6d0SBrett Creeley 			err);
51615995b6d0SBrett Creeley 		result = PCI_ERS_RESULT_DISCONNECT;
51625995b6d0SBrett Creeley 	} else {
51635995b6d0SBrett Creeley 		pci_set_master(pdev);
51645995b6d0SBrett Creeley 		pci_restore_state(pdev);
51655995b6d0SBrett Creeley 		pci_save_state(pdev);
51665995b6d0SBrett Creeley 		pci_wake_from_d3(pdev, false);
51675995b6d0SBrett Creeley 
51685995b6d0SBrett Creeley 		/* Check for life */
51695995b6d0SBrett Creeley 		reg = rd32(&pf->hw, GLGEN_RTRIG);
51705995b6d0SBrett Creeley 		if (!reg)
51715995b6d0SBrett Creeley 			result = PCI_ERS_RESULT_RECOVERED;
51725995b6d0SBrett Creeley 		else
51735995b6d0SBrett Creeley 			result = PCI_ERS_RESULT_DISCONNECT;
51745995b6d0SBrett Creeley 	}
51755995b6d0SBrett Creeley 
5176894020fdSKuppuswamy Sathyanarayanan 	err = pci_aer_clear_nonfatal_status(pdev);
51775995b6d0SBrett Creeley 	if (err)
517886f26a77SLinus Torvalds 		dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
51795995b6d0SBrett Creeley 			err);
51805995b6d0SBrett Creeley 		/* non-fatal, continue */
51815995b6d0SBrett Creeley 
51825995b6d0SBrett Creeley 	return result;
51835995b6d0SBrett Creeley }
51845995b6d0SBrett Creeley 
51855995b6d0SBrett Creeley /**
51865995b6d0SBrett Creeley  * ice_pci_err_resume - restart operations after PCI error recovery
51875995b6d0SBrett Creeley  * @pdev: PCI device information struct
51885995b6d0SBrett Creeley  *
51895995b6d0SBrett Creeley  * Called to allow the driver to bring things back up after PCI error and/or
51905995b6d0SBrett Creeley  * reset recovery have finished
51915995b6d0SBrett Creeley  */
51925995b6d0SBrett Creeley static void ice_pci_err_resume(struct pci_dev *pdev)
51935995b6d0SBrett Creeley {
51945995b6d0SBrett Creeley 	struct ice_pf *pf = pci_get_drvdata(pdev);
51955995b6d0SBrett Creeley 
51965995b6d0SBrett Creeley 	if (!pf) {
519719cce2c6SAnirudh Venkataramanan 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
519819cce2c6SAnirudh Venkataramanan 			__func__);
51995995b6d0SBrett Creeley 		return;
52005995b6d0SBrett Creeley 	}
52015995b6d0SBrett Creeley 
52027e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_SUSPENDED, pf->state)) {
52035995b6d0SBrett Creeley 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
52045995b6d0SBrett Creeley 			__func__);
52055995b6d0SBrett Creeley 		return;
52065995b6d0SBrett Creeley 	}
52075995b6d0SBrett Creeley 
5208a54a0b24SNick Nunley 	ice_restore_all_vfs_msi_state(pdev);
5209a54a0b24SNick Nunley 
52105995b6d0SBrett Creeley 	ice_do_reset(pf, ICE_RESET_PFR);
52115995b6d0SBrett Creeley 	ice_service_task_restart(pf);
52125995b6d0SBrett Creeley 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
52135995b6d0SBrett Creeley }
52145995b6d0SBrett Creeley 
52155995b6d0SBrett Creeley /**
52165995b6d0SBrett Creeley  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
52175995b6d0SBrett Creeley  * @pdev: PCI device information struct
52185995b6d0SBrett Creeley  */
52195995b6d0SBrett Creeley static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
52205995b6d0SBrett Creeley {
52215995b6d0SBrett Creeley 	struct ice_pf *pf = pci_get_drvdata(pdev);
52225995b6d0SBrett Creeley 
52237e408e07SAnirudh Venkataramanan 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
52245995b6d0SBrett Creeley 		ice_service_task_stop(pf);
52255995b6d0SBrett Creeley 
52267e408e07SAnirudh Venkataramanan 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
52277e408e07SAnirudh Venkataramanan 			set_bit(ICE_PFR_REQ, pf->state);
5228fbc7b27aSKiran Patil 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
52295995b6d0SBrett Creeley 		}
52305995b6d0SBrett Creeley 	}
52315995b6d0SBrett Creeley }
52325995b6d0SBrett Creeley 
52335995b6d0SBrett Creeley /**
52345995b6d0SBrett Creeley  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
52355995b6d0SBrett Creeley  * @pdev: PCI device information struct
52365995b6d0SBrett Creeley  */
52375995b6d0SBrett Creeley static void ice_pci_err_reset_done(struct pci_dev *pdev)
52385995b6d0SBrett Creeley {
52395995b6d0SBrett Creeley 	ice_pci_err_resume(pdev);
52405995b6d0SBrett Creeley }
52415995b6d0SBrett Creeley 
5242837f08fdSAnirudh Venkataramanan /* ice_pci_tbl - PCI Device ID Table
5243837f08fdSAnirudh Venkataramanan  *
5244837f08fdSAnirudh Venkataramanan  * Wildcard entries (PCI_ANY_ID) should come last
5245837f08fdSAnirudh Venkataramanan  * Last entry must be all 0s
5246837f08fdSAnirudh Venkataramanan  *
5247837f08fdSAnirudh Venkataramanan  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5248837f08fdSAnirudh Venkataramanan  *   Class, Class Mask, private data (not used) }
5249837f08fdSAnirudh Venkataramanan  */
5250837f08fdSAnirudh Venkataramanan static const struct pci_device_id ice_pci_tbl[] = {
5251633d7449SAnirudh Venkataramanan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5252633d7449SAnirudh Venkataramanan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5253633d7449SAnirudh Venkataramanan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
52547dcf78b8STony Nguyen 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
52557dcf78b8STony Nguyen 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
5256195fb977SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5257e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5258e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5259e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5260e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5261e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
52625d9e618cSJacob Keller 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
52635d9e618cSJacob Keller 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
52645d9e618cSJacob Keller 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
52655d9e618cSJacob Keller 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
52665d9e618cSJacob Keller 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
52672fbfa966SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
52685d9e618cSJacob Keller 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
52695d9e618cSJacob Keller 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
52705d9e618cSJacob Keller 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5271e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5272e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5273e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5274e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5275e36aeec0SBruce Allan 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5276837f08fdSAnirudh Venkataramanan 	/* required last entry */
5277837f08fdSAnirudh Venkataramanan 	{ 0, }
5278837f08fdSAnirudh Venkataramanan };
5279837f08fdSAnirudh Venkataramanan MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5280837f08fdSAnirudh Venkataramanan 
5281769c500dSAkeem G Abodunrin static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5282769c500dSAkeem G Abodunrin 
52835995b6d0SBrett Creeley static const struct pci_error_handlers ice_pci_err_handler = {
52845995b6d0SBrett Creeley 	.error_detected = ice_pci_err_detected,
52855995b6d0SBrett Creeley 	.slot_reset = ice_pci_err_slot_reset,
52865995b6d0SBrett Creeley 	.reset_prepare = ice_pci_err_reset_prepare,
52875995b6d0SBrett Creeley 	.reset_done = ice_pci_err_reset_done,
52885995b6d0SBrett Creeley 	.resume = ice_pci_err_resume
52895995b6d0SBrett Creeley };
52905995b6d0SBrett Creeley 
5291837f08fdSAnirudh Venkataramanan static struct pci_driver ice_driver = {
5292837f08fdSAnirudh Venkataramanan 	.name = KBUILD_MODNAME,
5293837f08fdSAnirudh Venkataramanan 	.id_table = ice_pci_tbl,
5294837f08fdSAnirudh Venkataramanan 	.probe = ice_probe,
5295837f08fdSAnirudh Venkataramanan 	.remove = ice_remove,
5296769c500dSAkeem G Abodunrin #ifdef CONFIG_PM
5297769c500dSAkeem G Abodunrin 	.driver.pm = &ice_pm_ops,
5298769c500dSAkeem G Abodunrin #endif /* CONFIG_PM */
5299769c500dSAkeem G Abodunrin 	.shutdown = ice_shutdown,
5300ddf30f7fSAnirudh Venkataramanan 	.sriov_configure = ice_sriov_configure,
53015995b6d0SBrett Creeley 	.err_handler = &ice_pci_err_handler
5302837f08fdSAnirudh Venkataramanan };
5303837f08fdSAnirudh Venkataramanan 
5304837f08fdSAnirudh Venkataramanan /**
5305837f08fdSAnirudh Venkataramanan  * ice_module_init - Driver registration routine
5306837f08fdSAnirudh Venkataramanan  *
5307837f08fdSAnirudh Venkataramanan  * ice_module_init is the first routine called when the driver is
5308837f08fdSAnirudh Venkataramanan  * loaded. All it does is register with the PCI subsystem.
5309837f08fdSAnirudh Venkataramanan  */
5310837f08fdSAnirudh Venkataramanan static int __init ice_module_init(void)
5311837f08fdSAnirudh Venkataramanan {
5312837f08fdSAnirudh Venkataramanan 	int status;
5313837f08fdSAnirudh Venkataramanan 
531434a2a3b8SJeff Kirsher 	pr_info("%s\n", ice_driver_string);
5315837f08fdSAnirudh Venkataramanan 	pr_info("%s\n", ice_copyright);
5316837f08fdSAnirudh Venkataramanan 
53170f9d5027SAnirudh Venkataramanan 	ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
5318940b61afSAnirudh Venkataramanan 	if (!ice_wq) {
5319940b61afSAnirudh Venkataramanan 		pr_err("Failed to create workqueue\n");
5320940b61afSAnirudh Venkataramanan 		return -ENOMEM;
5321940b61afSAnirudh Venkataramanan 	}
5322940b61afSAnirudh Venkataramanan 
5323837f08fdSAnirudh Venkataramanan 	status = pci_register_driver(&ice_driver);
5324940b61afSAnirudh Venkataramanan 	if (status) {
53252f2da36eSAnirudh Venkataramanan 		pr_err("failed to register PCI driver, err %d\n", status);
5326940b61afSAnirudh Venkataramanan 		destroy_workqueue(ice_wq);
5327940b61afSAnirudh Venkataramanan 	}
5328837f08fdSAnirudh Venkataramanan 
5329837f08fdSAnirudh Venkataramanan 	return status;
5330837f08fdSAnirudh Venkataramanan }
5331837f08fdSAnirudh Venkataramanan module_init(ice_module_init);
5332837f08fdSAnirudh Venkataramanan 
5333837f08fdSAnirudh Venkataramanan /**
5334837f08fdSAnirudh Venkataramanan  * ice_module_exit - Driver exit cleanup routine
5335837f08fdSAnirudh Venkataramanan  *
5336837f08fdSAnirudh Venkataramanan  * ice_module_exit is called just before the driver is removed
5337837f08fdSAnirudh Venkataramanan  * from memory.
5338837f08fdSAnirudh Venkataramanan  */
5339837f08fdSAnirudh Venkataramanan static void __exit ice_module_exit(void)
5340837f08fdSAnirudh Venkataramanan {
5341837f08fdSAnirudh Venkataramanan 	pci_unregister_driver(&ice_driver);
5342940b61afSAnirudh Venkataramanan 	destroy_workqueue(ice_wq);
5343837f08fdSAnirudh Venkataramanan 	pr_info("module unloaded\n");
5344837f08fdSAnirudh Venkataramanan }
5345837f08fdSAnirudh Venkataramanan module_exit(ice_module_exit);
53463a858ba3SAnirudh Venkataramanan 
53473a858ba3SAnirudh Venkataramanan /**
5348f9867df6SAnirudh Venkataramanan  * ice_set_mac_address - NDO callback to set MAC address
5349e94d4478SAnirudh Venkataramanan  * @netdev: network interface device structure
5350e94d4478SAnirudh Venkataramanan  * @pi: pointer to an address structure
5351e94d4478SAnirudh Venkataramanan  *
5352e94d4478SAnirudh Venkataramanan  * Returns 0 on success, negative on failure
5353e94d4478SAnirudh Venkataramanan  */
5354e94d4478SAnirudh Venkataramanan static int ice_set_mac_address(struct net_device *netdev, void *pi)
5355e94d4478SAnirudh Venkataramanan {
5356e94d4478SAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
5357e94d4478SAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
5358e94d4478SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
5359e94d4478SAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
5360e94d4478SAnirudh Venkataramanan 	struct sockaddr *addr = pi;
5361b357d971SBrett Creeley 	u8 old_mac[ETH_ALEN];
5362e94d4478SAnirudh Venkataramanan 	u8 flags = 0;
5363e94d4478SAnirudh Venkataramanan 	u8 *mac;
53642ccc1c1cSTony Nguyen 	int err;
5365e94d4478SAnirudh Venkataramanan 
5366e94d4478SAnirudh Venkataramanan 	mac = (u8 *)addr->sa_data;
5367e94d4478SAnirudh Venkataramanan 
5368e94d4478SAnirudh Venkataramanan 	if (!is_valid_ether_addr(mac))
5369e94d4478SAnirudh Venkataramanan 		return -EADDRNOTAVAIL;
5370e94d4478SAnirudh Venkataramanan 
5371e94d4478SAnirudh Venkataramanan 	if (ether_addr_equal(netdev->dev_addr, mac)) {
53723ba7f53fSBrett Creeley 		netdev_dbg(netdev, "already using mac %pM\n", mac);
5373e94d4478SAnirudh Venkataramanan 		return 0;
5374e94d4478SAnirudh Venkataramanan 	}
5375e94d4478SAnirudh Venkataramanan 
53767e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_DOWN, pf->state) ||
53775df7e45dSDave Ertman 	    ice_is_reset_in_progress(pf->state)) {
5378e94d4478SAnirudh Venkataramanan 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
5379e94d4478SAnirudh Venkataramanan 			   mac);
5380e94d4478SAnirudh Venkataramanan 		return -EBUSY;
5381e94d4478SAnirudh Venkataramanan 	}
5382e94d4478SAnirudh Venkataramanan 
53839fea7498SKiran Patil 	if (ice_chnl_dmac_fltr_cnt(pf)) {
53849fea7498SKiran Patil 		netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
53859fea7498SKiran Patil 			   mac);
53869fea7498SKiran Patil 		return -EAGAIN;
53879fea7498SKiran Patil 	}
53889fea7498SKiran Patil 
53893ba7f53fSBrett Creeley 	netif_addr_lock_bh(netdev);
5390b357d971SBrett Creeley 	ether_addr_copy(old_mac, netdev->dev_addr);
5391b357d971SBrett Creeley 	/* change the netdev's MAC address */
5392a05e4c0aSJakub Kicinski 	eth_hw_addr_set(netdev, mac);
5393b357d971SBrett Creeley 	netif_addr_unlock_bh(netdev);
5394b357d971SBrett Creeley 
5395757976abSLihong Yang 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
53962ccc1c1cSTony Nguyen 	err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
53972ccc1c1cSTony Nguyen 	if (err && err != -ENOENT) {
5398e94d4478SAnirudh Venkataramanan 		err = -EADDRNOTAVAIL;
5399bbb968e8SAkeem G Abodunrin 		goto err_update_filters;
5400e94d4478SAnirudh Venkataramanan 	}
5401e94d4478SAnirudh Venkataramanan 
540213ed5e8aSNick Nunley 	/* Add filter for new MAC. If filter exists, return success */
54032ccc1c1cSTony Nguyen 	err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
54042ccc1c1cSTony Nguyen 	if (err == -EEXIST)
540513ed5e8aSNick Nunley 		/* Although this MAC filter is already present in hardware it's
540613ed5e8aSNick Nunley 		 * possible in some cases (e.g. bonding) that dev_addr was
540713ed5e8aSNick Nunley 		 * modified outside of the driver and needs to be restored back
540813ed5e8aSNick Nunley 		 * to this value.
540913ed5e8aSNick Nunley 		 */
5410757976abSLihong Yang 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
54112ccc1c1cSTony Nguyen 	else if (err)
5412757976abSLihong Yang 		/* error if the new filter addition failed */
5413757976abSLihong Yang 		err = -EADDRNOTAVAIL;
5414757976abSLihong Yang 
5415bbb968e8SAkeem G Abodunrin err_update_filters:
5416e94d4478SAnirudh Venkataramanan 	if (err) {
54172f2da36eSAnirudh Venkataramanan 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5418e94d4478SAnirudh Venkataramanan 			   mac);
5419b357d971SBrett Creeley 		netif_addr_lock_bh(netdev);
5420f3956ebbSJakub Kicinski 		eth_hw_addr_set(netdev, old_mac);
54213ba7f53fSBrett Creeley 		netif_addr_unlock_bh(netdev);
5422e94d4478SAnirudh Venkataramanan 		return err;
5423e94d4478SAnirudh Venkataramanan 	}
5424e94d4478SAnirudh Venkataramanan 
54252f2da36eSAnirudh Venkataramanan 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5426e94d4478SAnirudh Venkataramanan 		   netdev->dev_addr);
5427e94d4478SAnirudh Venkataramanan 
5428f9867df6SAnirudh Venkataramanan 	/* write new MAC address to the firmware */
5429e94d4478SAnirudh Venkataramanan 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
54302ccc1c1cSTony Nguyen 	err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
54312ccc1c1cSTony Nguyen 	if (err) {
54325f87ec48STony Nguyen 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
54332ccc1c1cSTony Nguyen 			   mac, err);
5434e94d4478SAnirudh Venkataramanan 	}
5435e94d4478SAnirudh Venkataramanan 	return 0;
5436e94d4478SAnirudh Venkataramanan }
5437e94d4478SAnirudh Venkataramanan 
5438e94d4478SAnirudh Venkataramanan /**
5439e94d4478SAnirudh Venkataramanan  * ice_set_rx_mode - NDO callback to set the netdev filters
5440e94d4478SAnirudh Venkataramanan  * @netdev: network interface device structure
5441e94d4478SAnirudh Venkataramanan  */
5442e94d4478SAnirudh Venkataramanan static void ice_set_rx_mode(struct net_device *netdev)
5443e94d4478SAnirudh Venkataramanan {
5444e94d4478SAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
5445e94d4478SAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
5446e94d4478SAnirudh Venkataramanan 
5447e94d4478SAnirudh Venkataramanan 	if (!vsi)
5448e94d4478SAnirudh Venkataramanan 		return;
5449e94d4478SAnirudh Venkataramanan 
5450e94d4478SAnirudh Venkataramanan 	/* Set the flags to synchronize filters
5451e94d4478SAnirudh Venkataramanan 	 * ndo_set_rx_mode may be triggered even without a change in netdev
5452e94d4478SAnirudh Venkataramanan 	 * flags
5453e94d4478SAnirudh Venkataramanan 	 */
5454e97fb1aeSAnirudh Venkataramanan 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5455e97fb1aeSAnirudh Venkataramanan 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5456e94d4478SAnirudh Venkataramanan 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5457e94d4478SAnirudh Venkataramanan 
5458e94d4478SAnirudh Venkataramanan 	/* schedule our worker thread which will take care of
5459e94d4478SAnirudh Venkataramanan 	 * applying the new filter changes
5460e94d4478SAnirudh Venkataramanan 	 */
5461e94d4478SAnirudh Venkataramanan 	ice_service_task_schedule(vsi->back);
5462e94d4478SAnirudh Venkataramanan }
5463e94d4478SAnirudh Venkataramanan 
5464e94d4478SAnirudh Venkataramanan /**
54651ddef455SUsha Ketineni  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
54661ddef455SUsha Ketineni  * @netdev: network interface device structure
54671ddef455SUsha Ketineni  * @queue_index: Queue ID
54681ddef455SUsha Ketineni  * @maxrate: maximum bandwidth in Mbps
54691ddef455SUsha Ketineni  */
54701ddef455SUsha Ketineni static int
54711ddef455SUsha Ketineni ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
54721ddef455SUsha Ketineni {
54731ddef455SUsha Ketineni 	struct ice_netdev_priv *np = netdev_priv(netdev);
54741ddef455SUsha Ketineni 	struct ice_vsi *vsi = np->vsi;
54751ddef455SUsha Ketineni 	u16 q_handle;
54765518ac2aSTony Nguyen 	int status;
54771ddef455SUsha Ketineni 	u8 tc;
54781ddef455SUsha Ketineni 
54791ddef455SUsha Ketineni 	/* Validate maxrate requested is within permitted range */
54801ddef455SUsha Ketineni 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
548119cce2c6SAnirudh Venkataramanan 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
54821ddef455SUsha Ketineni 			   maxrate, queue_index);
54831ddef455SUsha Ketineni 		return -EINVAL;
54841ddef455SUsha Ketineni 	}
54851ddef455SUsha Ketineni 
54861ddef455SUsha Ketineni 	q_handle = vsi->tx_rings[queue_index]->q_handle;
54871ddef455SUsha Ketineni 	tc = ice_dcb_get_tc(vsi, queue_index);
54881ddef455SUsha Ketineni 
54891ddef455SUsha Ketineni 	/* Set BW back to default, when user set maxrate to 0 */
54901ddef455SUsha Ketineni 	if (!maxrate)
54911ddef455SUsha Ketineni 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
54921ddef455SUsha Ketineni 					       q_handle, ICE_MAX_BW);
54931ddef455SUsha Ketineni 	else
54941ddef455SUsha Ketineni 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
54951ddef455SUsha Ketineni 					  q_handle, ICE_MAX_BW, maxrate * 1000);
5496c1484691STony Nguyen 	if (status)
54975f87ec48STony Nguyen 		netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
54985f87ec48STony Nguyen 			   status);
54991ddef455SUsha Ketineni 
5500c1484691STony Nguyen 	return status;
55011ddef455SUsha Ketineni }
55021ddef455SUsha Ketineni 
55031ddef455SUsha Ketineni /**
5504e94d4478SAnirudh Venkataramanan  * ice_fdb_add - add an entry to the hardware database
5505e94d4478SAnirudh Venkataramanan  * @ndm: the input from the stack
5506e94d4478SAnirudh Venkataramanan  * @tb: pointer to array of nladdr (unused)
5507e94d4478SAnirudh Venkataramanan  * @dev: the net device pointer
5508e94d4478SAnirudh Venkataramanan  * @addr: the MAC address entry being added
5509f9867df6SAnirudh Venkataramanan  * @vid: VLAN ID
5510e94d4478SAnirudh Venkataramanan  * @flags: instructions from stack about fdb operation
551199be37edSBruce Allan  * @extack: netlink extended ack
5512e94d4478SAnirudh Venkataramanan  */
551399be37edSBruce Allan static int
551499be37edSBruce Allan ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
551599be37edSBruce Allan 	    struct net_device *dev, const unsigned char *addr, u16 vid,
551699be37edSBruce Allan 	    u16 flags, struct netlink_ext_ack __always_unused *extack)
5517e94d4478SAnirudh Venkataramanan {
5518e94d4478SAnirudh Venkataramanan 	int err;
5519e94d4478SAnirudh Venkataramanan 
5520e94d4478SAnirudh Venkataramanan 	if (vid) {
5521e94d4478SAnirudh Venkataramanan 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5522e94d4478SAnirudh Venkataramanan 		return -EINVAL;
5523e94d4478SAnirudh Venkataramanan 	}
5524e94d4478SAnirudh Venkataramanan 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5525e94d4478SAnirudh Venkataramanan 		netdev_err(dev, "FDB only supports static addresses\n");
5526e94d4478SAnirudh Venkataramanan 		return -EINVAL;
5527e94d4478SAnirudh Venkataramanan 	}
5528e94d4478SAnirudh Venkataramanan 
5529e94d4478SAnirudh Venkataramanan 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5530e94d4478SAnirudh Venkataramanan 		err = dev_uc_add_excl(dev, addr);
5531e94d4478SAnirudh Venkataramanan 	else if (is_multicast_ether_addr(addr))
5532e94d4478SAnirudh Venkataramanan 		err = dev_mc_add_excl(dev, addr);
5533e94d4478SAnirudh Venkataramanan 	else
5534e94d4478SAnirudh Venkataramanan 		err = -EINVAL;
5535e94d4478SAnirudh Venkataramanan 
5536e94d4478SAnirudh Venkataramanan 	/* Only return duplicate errors if NLM_F_EXCL is set */
5537e94d4478SAnirudh Venkataramanan 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
5538e94d4478SAnirudh Venkataramanan 		err = 0;
5539e94d4478SAnirudh Venkataramanan 
5540e94d4478SAnirudh Venkataramanan 	return err;
5541e94d4478SAnirudh Venkataramanan }
5542e94d4478SAnirudh Venkataramanan 
5543e94d4478SAnirudh Venkataramanan /**
5544e94d4478SAnirudh Venkataramanan  * ice_fdb_del - delete an entry from the hardware database
5545e94d4478SAnirudh Venkataramanan  * @ndm: the input from the stack
5546e94d4478SAnirudh Venkataramanan  * @tb: pointer to array of nladdr (unused)
5547e94d4478SAnirudh Venkataramanan  * @dev: the net device pointer
5548e94d4478SAnirudh Venkataramanan  * @addr: the MAC address entry being added
5549f9867df6SAnirudh Venkataramanan  * @vid: VLAN ID
5550e94d4478SAnirudh Venkataramanan  */
5551c8b7abddSBruce Allan static int
5552c8b7abddSBruce Allan ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5553e94d4478SAnirudh Venkataramanan 	    struct net_device *dev, const unsigned char *addr,
5554e94d4478SAnirudh Venkataramanan 	    __always_unused u16 vid)
5555e94d4478SAnirudh Venkataramanan {
5556e94d4478SAnirudh Venkataramanan 	int err;
5557e94d4478SAnirudh Venkataramanan 
5558e94d4478SAnirudh Venkataramanan 	if (ndm->ndm_state & NUD_PERMANENT) {
5559e94d4478SAnirudh Venkataramanan 		netdev_err(dev, "FDB only supports static addresses\n");
5560e94d4478SAnirudh Venkataramanan 		return -EINVAL;
5561e94d4478SAnirudh Venkataramanan 	}
5562e94d4478SAnirudh Venkataramanan 
5563e94d4478SAnirudh Venkataramanan 	if (is_unicast_ether_addr(addr))
5564e94d4478SAnirudh Venkataramanan 		err = dev_uc_del(dev, addr);
5565e94d4478SAnirudh Venkataramanan 	else if (is_multicast_ether_addr(addr))
5566e94d4478SAnirudh Venkataramanan 		err = dev_mc_del(dev, addr);
5567e94d4478SAnirudh Venkataramanan 	else
5568e94d4478SAnirudh Venkataramanan 		err = -EINVAL;
5569e94d4478SAnirudh Venkataramanan 
5570e94d4478SAnirudh Venkataramanan 	return err;
5571e94d4478SAnirudh Venkataramanan }
5572e94d4478SAnirudh Venkataramanan 
5573e94d4478SAnirudh Venkataramanan /**
5574d76a60baSAnirudh Venkataramanan  * ice_set_features - set the netdev feature flags
5575d76a60baSAnirudh Venkataramanan  * @netdev: ptr to the netdev being adjusted
5576d76a60baSAnirudh Venkataramanan  * @features: the feature set that the stack is suggesting
5577d76a60baSAnirudh Venkataramanan  */
5578c8b7abddSBruce Allan static int
5579c8b7abddSBruce Allan ice_set_features(struct net_device *netdev, netdev_features_t features)
5580d76a60baSAnirudh Venkataramanan {
5581d76a60baSAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
5582d76a60baSAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
55835f8cc355SHenry Tieman 	struct ice_pf *pf = vsi->back;
5584d76a60baSAnirudh Venkataramanan 	int ret = 0;
5585d76a60baSAnirudh Venkataramanan 
5586462acf6aSTony Nguyen 	/* Don't set any netdev advanced features with device in Safe Mode */
5587462acf6aSTony Nguyen 	if (ice_is_safe_mode(vsi->back)) {
558819cce2c6SAnirudh Venkataramanan 		dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
5589462acf6aSTony Nguyen 		return ret;
5590462acf6aSTony Nguyen 	}
5591462acf6aSTony Nguyen 
55925f8cc355SHenry Tieman 	/* Do not change setting during reset */
55935f8cc355SHenry Tieman 	if (ice_is_reset_in_progress(pf->state)) {
559419cce2c6SAnirudh Venkataramanan 		dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
55955f8cc355SHenry Tieman 		return -EBUSY;
55965f8cc355SHenry Tieman 	}
55975f8cc355SHenry Tieman 
55988f529ff9STony Nguyen 	/* Multiple features can be changed in one call so keep features in
55998f529ff9STony Nguyen 	 * separate if/else statements to guarantee each feature is checked
56008f529ff9STony Nguyen 	 */
5601492af0abSMd Fahad Iqbal Polash 	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
56024fe36226SPaul M Stillwell Jr 		ice_vsi_manage_rss_lut(vsi, true);
5603492af0abSMd Fahad Iqbal Polash 	else if (!(features & NETIF_F_RXHASH) &&
5604492af0abSMd Fahad Iqbal Polash 		 netdev->features & NETIF_F_RXHASH)
56054fe36226SPaul M Stillwell Jr 		ice_vsi_manage_rss_lut(vsi, false);
5606492af0abSMd Fahad Iqbal Polash 
5607d76a60baSAnirudh Venkataramanan 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
5608d76a60baSAnirudh Venkataramanan 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
56092bfefa2dSBrett Creeley 		ret = vsi->vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
5610d76a60baSAnirudh Venkataramanan 	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
5611d76a60baSAnirudh Venkataramanan 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5612bc42afa9SBrett Creeley 		ret = vsi->vlan_ops.dis_stripping(vsi);
56138f529ff9STony Nguyen 
56148f529ff9STony Nguyen 	if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
5615d76a60baSAnirudh Venkataramanan 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
56162bfefa2dSBrett Creeley 		ret = vsi->vlan_ops.ena_insertion(vsi, ETH_P_8021Q);
5617d76a60baSAnirudh Venkataramanan 	else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
5618d76a60baSAnirudh Venkataramanan 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5619bc42afa9SBrett Creeley 		ret = vsi->vlan_ops.dis_insertion(vsi);
5620d76a60baSAnirudh Venkataramanan 
56213171948eSTony Nguyen 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
56223171948eSTony Nguyen 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5623bc42afa9SBrett Creeley 		ret = vsi->vlan_ops.ena_rx_filtering(vsi);
56243171948eSTony Nguyen 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
56253171948eSTony Nguyen 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5626bc42afa9SBrett Creeley 		ret = vsi->vlan_ops.dis_rx_filtering(vsi);
56273171948eSTony Nguyen 
5628148beb61SHenry Tieman 	if ((features & NETIF_F_NTUPLE) &&
562928bf2672SBrett Creeley 	    !(netdev->features & NETIF_F_NTUPLE)) {
5630148beb61SHenry Tieman 		ice_vsi_manage_fdir(vsi, true);
563128bf2672SBrett Creeley 		ice_init_arfs(vsi);
563228bf2672SBrett Creeley 	} else if (!(features & NETIF_F_NTUPLE) &&
563328bf2672SBrett Creeley 		 (netdev->features & NETIF_F_NTUPLE)) {
5634148beb61SHenry Tieman 		ice_vsi_manage_fdir(vsi, false);
563528bf2672SBrett Creeley 		ice_clear_arfs(vsi);
563628bf2672SBrett Creeley 	}
5637148beb61SHenry Tieman 
5638fbc7b27aSKiran Patil 	/* don't turn off hw_tc_offload when ADQ is already enabled */
5639fbc7b27aSKiran Patil 	if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
5640fbc7b27aSKiran Patil 		dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
5641fbc7b27aSKiran Patil 		return -EACCES;
5642fbc7b27aSKiran Patil 	}
56439fea7498SKiran Patil 
56449fea7498SKiran Patil 	if ((features & NETIF_F_HW_TC) &&
56459fea7498SKiran Patil 	    !(netdev->features & NETIF_F_HW_TC))
56469fea7498SKiran Patil 		set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
56479fea7498SKiran Patil 	else
56489fea7498SKiran Patil 		clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
56499fea7498SKiran Patil 
5650d76a60baSAnirudh Venkataramanan 	return ret;
5651d76a60baSAnirudh Venkataramanan }
5652d76a60baSAnirudh Venkataramanan 
5653d76a60baSAnirudh Venkataramanan /**
5654f9867df6SAnirudh Venkataramanan  * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
5655f9867df6SAnirudh Venkataramanan  * @vsi: VSI to setup VLAN properties for
5656d76a60baSAnirudh Venkataramanan  */
5657d76a60baSAnirudh Venkataramanan static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
5658d76a60baSAnirudh Venkataramanan {
5659d76a60baSAnirudh Venkataramanan 	int ret = 0;
5660d76a60baSAnirudh Venkataramanan 
5661d76a60baSAnirudh Venkataramanan 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
56622bfefa2dSBrett Creeley 		ret = vsi->vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
5663d76a60baSAnirudh Venkataramanan 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
56642bfefa2dSBrett Creeley 		ret = vsi->vlan_ops.ena_insertion(vsi, ETH_P_8021Q);
5665d76a60baSAnirudh Venkataramanan 
5666d76a60baSAnirudh Venkataramanan 	return ret;
5667d76a60baSAnirudh Venkataramanan }
5668d76a60baSAnirudh Venkataramanan 
5669d76a60baSAnirudh Venkataramanan /**
5670cdedef59SAnirudh Venkataramanan  * ice_vsi_cfg - Setup the VSI
5671cdedef59SAnirudh Venkataramanan  * @vsi: the VSI being configured
5672cdedef59SAnirudh Venkataramanan  *
5673cdedef59SAnirudh Venkataramanan  * Return 0 on success and negative value on error
5674cdedef59SAnirudh Venkataramanan  */
56750e674aebSAnirudh Venkataramanan int ice_vsi_cfg(struct ice_vsi *vsi)
5676cdedef59SAnirudh Venkataramanan {
5677cdedef59SAnirudh Venkataramanan 	int err;
5678cdedef59SAnirudh Venkataramanan 
5679c7f2c42bSAnirudh Venkataramanan 	if (vsi->netdev) {
5680e94d4478SAnirudh Venkataramanan 		ice_set_rx_mode(vsi->netdev);
56819ecd25c2SAnirudh Venkataramanan 
56829ecd25c2SAnirudh Venkataramanan 		err = ice_vsi_vlan_setup(vsi);
56839ecd25c2SAnirudh Venkataramanan 
5684d76a60baSAnirudh Venkataramanan 		if (err)
5685d76a60baSAnirudh Venkataramanan 			return err;
5686c7f2c42bSAnirudh Venkataramanan 	}
5687a629cf0aSAnirudh Venkataramanan 	ice_vsi_cfg_dcb_rings(vsi);
568803f7a986SAnirudh Venkataramanan 
568903f7a986SAnirudh Venkataramanan 	err = ice_vsi_cfg_lan_txqs(vsi);
5690efc2214bSMaciej Fijalkowski 	if (!err && ice_is_xdp_ena_vsi(vsi))
5691efc2214bSMaciej Fijalkowski 		err = ice_vsi_cfg_xdp_txqs(vsi);
5692cdedef59SAnirudh Venkataramanan 	if (!err)
5693cdedef59SAnirudh Venkataramanan 		err = ice_vsi_cfg_rxqs(vsi);
5694cdedef59SAnirudh Venkataramanan 
5695cdedef59SAnirudh Venkataramanan 	return err;
5696cdedef59SAnirudh Venkataramanan }
5697cdedef59SAnirudh Venkataramanan 
5698cdf1f1f1SJacob Keller /* THEORY OF MODERATION:
5699d8eb7ad5SJesse Brandeburg  * The ice driver hardware works differently than the hardware that DIMLIB was
5700cdf1f1f1SJacob Keller  * originally made for. ice hardware doesn't have packet count limits that
5701cdf1f1f1SJacob Keller  * can trigger an interrupt, but it *does* have interrupt rate limit support,
5702d8eb7ad5SJesse Brandeburg  * which is hard-coded to a limit of 250,000 ints/second.
5703d8eb7ad5SJesse Brandeburg  * If not using dynamic moderation, the INTRL value can be modified
5704d8eb7ad5SJesse Brandeburg  * by ethtool rx-usecs-high.
5705cdf1f1f1SJacob Keller  */
5706cdf1f1f1SJacob Keller struct ice_dim {
5707cdf1f1f1SJacob Keller 	/* the throttle rate for interrupts, basically worst case delay before
5708cdf1f1f1SJacob Keller 	 * an initial interrupt fires, value is stored in microseconds.
5709cdf1f1f1SJacob Keller 	 */
5710cdf1f1f1SJacob Keller 	u16 itr;
5711cdf1f1f1SJacob Keller };
5712cdf1f1f1SJacob Keller 
5713cdf1f1f1SJacob Keller /* Make a different profile for Rx that doesn't allow quite so aggressive
5714d8eb7ad5SJesse Brandeburg  * moderation at the high end (it maxes out at 126us or about 8k interrupts a
5715d8eb7ad5SJesse Brandeburg  * second.
5716cdf1f1f1SJacob Keller  */
5717cdf1f1f1SJacob Keller static const struct ice_dim rx_profile[] = {
5718d8eb7ad5SJesse Brandeburg 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
5719d8eb7ad5SJesse Brandeburg 	{8},    /* 125,000 ints/s */
5720d8eb7ad5SJesse Brandeburg 	{16},   /*  62,500 ints/s */
5721d8eb7ad5SJesse Brandeburg 	{62},   /*  16,129 ints/s */
5722d8eb7ad5SJesse Brandeburg 	{126}   /*   7,936 ints/s */
5723cdf1f1f1SJacob Keller };
5724cdf1f1f1SJacob Keller 
5725cdf1f1f1SJacob Keller /* The transmit profile, which has the same sorts of values
5726cdf1f1f1SJacob Keller  * as the previous struct
5727cdf1f1f1SJacob Keller  */
5728cdf1f1f1SJacob Keller static const struct ice_dim tx_profile[] = {
5729d8eb7ad5SJesse Brandeburg 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
5730d8eb7ad5SJesse Brandeburg 	{8},    /* 125,000 ints/s */
5731d8eb7ad5SJesse Brandeburg 	{40},   /*  16,125 ints/s */
5732d8eb7ad5SJesse Brandeburg 	{128},  /*   7,812 ints/s */
5733d8eb7ad5SJesse Brandeburg 	{256}   /*   3,906 ints/s */
5734cdf1f1f1SJacob Keller };
5735cdf1f1f1SJacob Keller 
5736cdf1f1f1SJacob Keller static void ice_tx_dim_work(struct work_struct *work)
5737cdf1f1f1SJacob Keller {
5738cdf1f1f1SJacob Keller 	struct ice_ring_container *rc;
5739cdf1f1f1SJacob Keller 	struct dim *dim;
5740d8eb7ad5SJesse Brandeburg 	u16 itr;
5741cdf1f1f1SJacob Keller 
5742cdf1f1f1SJacob Keller 	dim = container_of(work, struct dim, work);
5743d8eb7ad5SJesse Brandeburg 	rc = (struct ice_ring_container *)dim->priv;
5744cdf1f1f1SJacob Keller 
5745d8eb7ad5SJesse Brandeburg 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
5746cdf1f1f1SJacob Keller 
5747cdf1f1f1SJacob Keller 	/* look up the values in our local table */
5748cdf1f1f1SJacob Keller 	itr = tx_profile[dim->profile_ix].itr;
5749cdf1f1f1SJacob Keller 
5750d8eb7ad5SJesse Brandeburg 	ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
5751cdf1f1f1SJacob Keller 	ice_write_itr(rc, itr);
5752cdf1f1f1SJacob Keller 
5753cdf1f1f1SJacob Keller 	dim->state = DIM_START_MEASURE;
5754cdf1f1f1SJacob Keller }
5755cdf1f1f1SJacob Keller 
5756cdf1f1f1SJacob Keller static void ice_rx_dim_work(struct work_struct *work)
5757cdf1f1f1SJacob Keller {
5758cdf1f1f1SJacob Keller 	struct ice_ring_container *rc;
5759cdf1f1f1SJacob Keller 	struct dim *dim;
5760d8eb7ad5SJesse Brandeburg 	u16 itr;
5761cdf1f1f1SJacob Keller 
5762cdf1f1f1SJacob Keller 	dim = container_of(work, struct dim, work);
5763d8eb7ad5SJesse Brandeburg 	rc = (struct ice_ring_container *)dim->priv;
5764cdf1f1f1SJacob Keller 
5765d8eb7ad5SJesse Brandeburg 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
5766cdf1f1f1SJacob Keller 
5767cdf1f1f1SJacob Keller 	/* look up the values in our local table */
5768cdf1f1f1SJacob Keller 	itr = rx_profile[dim->profile_ix].itr;
5769cdf1f1f1SJacob Keller 
5770d8eb7ad5SJesse Brandeburg 	ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
5771cdf1f1f1SJacob Keller 	ice_write_itr(rc, itr);
5772cdf1f1f1SJacob Keller 
5773cdf1f1f1SJacob Keller 	dim->state = DIM_START_MEASURE;
5774cdf1f1f1SJacob Keller }
5775cdf1f1f1SJacob Keller 
5776d8eb7ad5SJesse Brandeburg #define ICE_DIM_DEFAULT_PROFILE_IX 1
5777d8eb7ad5SJesse Brandeburg 
5778d8eb7ad5SJesse Brandeburg /**
5779d8eb7ad5SJesse Brandeburg  * ice_init_moderation - set up interrupt moderation
5780d8eb7ad5SJesse Brandeburg  * @q_vector: the vector containing rings to be configured
5781d8eb7ad5SJesse Brandeburg  *
5782d8eb7ad5SJesse Brandeburg  * Set up interrupt moderation registers, with the intent to do the right thing
5783d8eb7ad5SJesse Brandeburg  * when called from reset or from probe, and whether or not dynamic moderation
5784d8eb7ad5SJesse Brandeburg  * is enabled or not. Take special care to write all the registers in both
5785d8eb7ad5SJesse Brandeburg  * dynamic moderation mode or not in order to make sure hardware is in a known
5786d8eb7ad5SJesse Brandeburg  * state.
5787d8eb7ad5SJesse Brandeburg  */
5788d8eb7ad5SJesse Brandeburg static void ice_init_moderation(struct ice_q_vector *q_vector)
5789d8eb7ad5SJesse Brandeburg {
5790d8eb7ad5SJesse Brandeburg 	struct ice_ring_container *rc;
5791d8eb7ad5SJesse Brandeburg 	bool tx_dynamic, rx_dynamic;
5792d8eb7ad5SJesse Brandeburg 
5793d8eb7ad5SJesse Brandeburg 	rc = &q_vector->tx;
5794d8eb7ad5SJesse Brandeburg 	INIT_WORK(&rc->dim.work, ice_tx_dim_work);
5795d8eb7ad5SJesse Brandeburg 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5796d8eb7ad5SJesse Brandeburg 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
5797d8eb7ad5SJesse Brandeburg 	rc->dim.priv = rc;
5798d8eb7ad5SJesse Brandeburg 	tx_dynamic = ITR_IS_DYNAMIC(rc);
5799d8eb7ad5SJesse Brandeburg 
5800d8eb7ad5SJesse Brandeburg 	/* set the initial TX ITR to match the above */
5801d8eb7ad5SJesse Brandeburg 	ice_write_itr(rc, tx_dynamic ?
5802d8eb7ad5SJesse Brandeburg 		      tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
5803d8eb7ad5SJesse Brandeburg 
5804d8eb7ad5SJesse Brandeburg 	rc = &q_vector->rx;
5805d8eb7ad5SJesse Brandeburg 	INIT_WORK(&rc->dim.work, ice_rx_dim_work);
5806d8eb7ad5SJesse Brandeburg 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5807d8eb7ad5SJesse Brandeburg 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
5808d8eb7ad5SJesse Brandeburg 	rc->dim.priv = rc;
5809d8eb7ad5SJesse Brandeburg 	rx_dynamic = ITR_IS_DYNAMIC(rc);
5810d8eb7ad5SJesse Brandeburg 
5811d8eb7ad5SJesse Brandeburg 	/* set the initial RX ITR to match the above */
5812d8eb7ad5SJesse Brandeburg 	ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
5813d8eb7ad5SJesse Brandeburg 				       rc->itr_setting);
5814d8eb7ad5SJesse Brandeburg 
5815d8eb7ad5SJesse Brandeburg 	ice_set_q_vector_intrl(q_vector);
5816d8eb7ad5SJesse Brandeburg }
5817d8eb7ad5SJesse Brandeburg 
5818cdedef59SAnirudh Venkataramanan /**
58192b245cb2SAnirudh Venkataramanan  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
58202b245cb2SAnirudh Venkataramanan  * @vsi: the VSI being configured
58212b245cb2SAnirudh Venkataramanan  */
58222b245cb2SAnirudh Venkataramanan static void ice_napi_enable_all(struct ice_vsi *vsi)
58232b245cb2SAnirudh Venkataramanan {
58242b245cb2SAnirudh Venkataramanan 	int q_idx;
58252b245cb2SAnirudh Venkataramanan 
58262b245cb2SAnirudh Venkataramanan 	if (!vsi->netdev)
58272b245cb2SAnirudh Venkataramanan 		return;
58282b245cb2SAnirudh Venkataramanan 
58290c2561c8SBrett Creeley 	ice_for_each_q_vector(vsi, q_idx) {
5830eec90376SYoung Xiao 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5831eec90376SYoung Xiao 
5832d8eb7ad5SJesse Brandeburg 		ice_init_moderation(q_vector);
5833cdf1f1f1SJacob Keller 
5834e72bba21SMaciej Fijalkowski 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
5835eec90376SYoung Xiao 			napi_enable(&q_vector->napi);
5836eec90376SYoung Xiao 	}
58372b245cb2SAnirudh Venkataramanan }
58382b245cb2SAnirudh Venkataramanan 
58392b245cb2SAnirudh Venkataramanan /**
5840cdedef59SAnirudh Venkataramanan  * ice_up_complete - Finish the last steps of bringing up a connection
5841cdedef59SAnirudh Venkataramanan  * @vsi: The VSI being configured
5842cdedef59SAnirudh Venkataramanan  *
5843cdedef59SAnirudh Venkataramanan  * Return 0 on success and negative value on error
5844cdedef59SAnirudh Venkataramanan  */
5845cdedef59SAnirudh Venkataramanan static int ice_up_complete(struct ice_vsi *vsi)
5846cdedef59SAnirudh Venkataramanan {
5847cdedef59SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
5848cdedef59SAnirudh Venkataramanan 	int err;
5849cdedef59SAnirudh Venkataramanan 
5850cdedef59SAnirudh Venkataramanan 	ice_vsi_cfg_msix(vsi);
5851cdedef59SAnirudh Venkataramanan 
5852cdedef59SAnirudh Venkataramanan 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
5853cdedef59SAnirudh Venkataramanan 	 * Tx queue group list was configured and the context bits were
5854cdedef59SAnirudh Venkataramanan 	 * programmed using ice_vsi_cfg_txqs
5855cdedef59SAnirudh Venkataramanan 	 */
585613a6233bSBrett Creeley 	err = ice_vsi_start_all_rx_rings(vsi);
5857cdedef59SAnirudh Venkataramanan 	if (err)
5858cdedef59SAnirudh Venkataramanan 		return err;
5859cdedef59SAnirudh Venkataramanan 
5860e97fb1aeSAnirudh Venkataramanan 	clear_bit(ICE_VSI_DOWN, vsi->state);
58612b245cb2SAnirudh Venkataramanan 	ice_napi_enable_all(vsi);
5862cdedef59SAnirudh Venkataramanan 	ice_vsi_ena_irq(vsi);
5863cdedef59SAnirudh Venkataramanan 
5864cdedef59SAnirudh Venkataramanan 	if (vsi->port_info &&
5865cdedef59SAnirudh Venkataramanan 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
5866cdedef59SAnirudh Venkataramanan 	    vsi->netdev) {
5867cdedef59SAnirudh Venkataramanan 		ice_print_link_msg(vsi, true);
5868cdedef59SAnirudh Venkataramanan 		netif_tx_start_all_queues(vsi->netdev);
5869cdedef59SAnirudh Venkataramanan 		netif_carrier_on(vsi->netdev);
58703a749623SJacob Keller 		if (!ice_is_e810(&pf->hw))
58713a749623SJacob Keller 			ice_ptp_link_change(pf, pf->hw.pf_id, true);
5872cdedef59SAnirudh Venkataramanan 	}
5873cdedef59SAnirudh Venkataramanan 
587428dc1b86SJesse Brandeburg 	/* clear this now, and the first stats read will be used as baseline */
587528dc1b86SJesse Brandeburg 	vsi->stat_offsets_loaded = false;
587628dc1b86SJesse Brandeburg 
5877cdedef59SAnirudh Venkataramanan 	ice_service_task_schedule(pf);
5878cdedef59SAnirudh Venkataramanan 
58791b5c19c7SBruce Allan 	return 0;
5880cdedef59SAnirudh Venkataramanan }
5881cdedef59SAnirudh Venkataramanan 
5882cdedef59SAnirudh Venkataramanan /**
5883fcea6f3dSAnirudh Venkataramanan  * ice_up - Bring the connection back up after being down
5884fcea6f3dSAnirudh Venkataramanan  * @vsi: VSI being configured
5885fcea6f3dSAnirudh Venkataramanan  */
5886fcea6f3dSAnirudh Venkataramanan int ice_up(struct ice_vsi *vsi)
5887fcea6f3dSAnirudh Venkataramanan {
5888fcea6f3dSAnirudh Venkataramanan 	int err;
5889fcea6f3dSAnirudh Venkataramanan 
5890fcea6f3dSAnirudh Venkataramanan 	err = ice_vsi_cfg(vsi);
5891fcea6f3dSAnirudh Venkataramanan 	if (!err)
5892fcea6f3dSAnirudh Venkataramanan 		err = ice_up_complete(vsi);
5893fcea6f3dSAnirudh Venkataramanan 
5894fcea6f3dSAnirudh Venkataramanan 	return err;
5895fcea6f3dSAnirudh Venkataramanan }
5896fcea6f3dSAnirudh Venkataramanan 
5897fcea6f3dSAnirudh Venkataramanan /**
5898fcea6f3dSAnirudh Venkataramanan  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
5899e72bba21SMaciej Fijalkowski  * @syncp: pointer to u64_stats_sync
5900e72bba21SMaciej Fijalkowski  * @stats: stats that pkts and bytes count will be taken from
5901fcea6f3dSAnirudh Venkataramanan  * @pkts: packets stats counter
5902fcea6f3dSAnirudh Venkataramanan  * @bytes: bytes stats counter
5903fcea6f3dSAnirudh Venkataramanan  *
5904fcea6f3dSAnirudh Venkataramanan  * This function fetches stats from the ring considering the atomic operations
5905fcea6f3dSAnirudh Venkataramanan  * that needs to be performed to read u64 values in 32 bit machine.
5906fcea6f3dSAnirudh Venkataramanan  */
5907c8b7abddSBruce Allan static void
5908e72bba21SMaciej Fijalkowski ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats,
5909e72bba21SMaciej Fijalkowski 			     u64 *pkts, u64 *bytes)
5910fcea6f3dSAnirudh Venkataramanan {
5911fcea6f3dSAnirudh Venkataramanan 	unsigned int start;
5912fcea6f3dSAnirudh Venkataramanan 
5913fcea6f3dSAnirudh Venkataramanan 	do {
5914e72bba21SMaciej Fijalkowski 		start = u64_stats_fetch_begin_irq(syncp);
5915e72bba21SMaciej Fijalkowski 		*pkts = stats.pkts;
5916e72bba21SMaciej Fijalkowski 		*bytes = stats.bytes;
5917e72bba21SMaciej Fijalkowski 	} while (u64_stats_fetch_retry_irq(syncp, start));
5918fcea6f3dSAnirudh Venkataramanan }
5919fcea6f3dSAnirudh Venkataramanan 
5920fcea6f3dSAnirudh Venkataramanan /**
592149d358e0SMarta Plantykow  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
592249d358e0SMarta Plantykow  * @vsi: the VSI to be updated
59231a0f25a5SJesse Brandeburg  * @vsi_stats: the stats struct to be updated
592449d358e0SMarta Plantykow  * @rings: rings to work on
592549d358e0SMarta Plantykow  * @count: number of rings
592649d358e0SMarta Plantykow  */
592749d358e0SMarta Plantykow static void
59281a0f25a5SJesse Brandeburg ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
59291a0f25a5SJesse Brandeburg 			     struct rtnl_link_stats64 *vsi_stats,
59301a0f25a5SJesse Brandeburg 			     struct ice_tx_ring **rings, u16 count)
593149d358e0SMarta Plantykow {
593249d358e0SMarta Plantykow 	u16 i;
593349d358e0SMarta Plantykow 
593449d358e0SMarta Plantykow 	for (i = 0; i < count; i++) {
5935e72bba21SMaciej Fijalkowski 		struct ice_tx_ring *ring;
5936e72bba21SMaciej Fijalkowski 		u64 pkts = 0, bytes = 0;
593749d358e0SMarta Plantykow 
593849d358e0SMarta Plantykow 		ring = READ_ONCE(rings[i]);
5939e72bba21SMaciej Fijalkowski 		if (ring)
5940e72bba21SMaciej Fijalkowski 			ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
594149d358e0SMarta Plantykow 		vsi_stats->tx_packets += pkts;
594249d358e0SMarta Plantykow 		vsi_stats->tx_bytes += bytes;
594349d358e0SMarta Plantykow 		vsi->tx_restart += ring->tx_stats.restart_q;
594449d358e0SMarta Plantykow 		vsi->tx_busy += ring->tx_stats.tx_busy;
594549d358e0SMarta Plantykow 		vsi->tx_linearize += ring->tx_stats.tx_linearize;
594649d358e0SMarta Plantykow 	}
594749d358e0SMarta Plantykow }
594849d358e0SMarta Plantykow 
594949d358e0SMarta Plantykow /**
5950fcea6f3dSAnirudh Venkataramanan  * ice_update_vsi_ring_stats - Update VSI stats counters
5951fcea6f3dSAnirudh Venkataramanan  * @vsi: the VSI to be updated
5952fcea6f3dSAnirudh Venkataramanan  */
5953fcea6f3dSAnirudh Venkataramanan static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
5954fcea6f3dSAnirudh Venkataramanan {
59551a0f25a5SJesse Brandeburg 	struct rtnl_link_stats64 *vsi_stats;
5956fcea6f3dSAnirudh Venkataramanan 	u64 pkts, bytes;
5957fcea6f3dSAnirudh Venkataramanan 	int i;
5958fcea6f3dSAnirudh Venkataramanan 
59591a0f25a5SJesse Brandeburg 	vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
59601a0f25a5SJesse Brandeburg 	if (!vsi_stats)
59611a0f25a5SJesse Brandeburg 		return;
5962fcea6f3dSAnirudh Venkataramanan 
5963fcea6f3dSAnirudh Venkataramanan 	/* reset non-netdev (extended) stats */
5964fcea6f3dSAnirudh Venkataramanan 	vsi->tx_restart = 0;
5965fcea6f3dSAnirudh Venkataramanan 	vsi->tx_busy = 0;
5966fcea6f3dSAnirudh Venkataramanan 	vsi->tx_linearize = 0;
5967fcea6f3dSAnirudh Venkataramanan 	vsi->rx_buf_failed = 0;
5968fcea6f3dSAnirudh Venkataramanan 	vsi->rx_page_failed = 0;
5969fcea6f3dSAnirudh Venkataramanan 
5970fcea6f3dSAnirudh Venkataramanan 	rcu_read_lock();
5971fcea6f3dSAnirudh Venkataramanan 
5972fcea6f3dSAnirudh Venkataramanan 	/* update Tx rings counters */
59731a0f25a5SJesse Brandeburg 	ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
59741a0f25a5SJesse Brandeburg 				     vsi->num_txq);
5975fcea6f3dSAnirudh Venkataramanan 
5976fcea6f3dSAnirudh Venkataramanan 	/* update Rx rings counters */
5977fcea6f3dSAnirudh Venkataramanan 	ice_for_each_rxq(vsi, i) {
5978e72bba21SMaciej Fijalkowski 		struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
5979b6b0501dSPaul M Stillwell Jr 
5980e72bba21SMaciej Fijalkowski 		ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
5981fcea6f3dSAnirudh Venkataramanan 		vsi_stats->rx_packets += pkts;
5982fcea6f3dSAnirudh Venkataramanan 		vsi_stats->rx_bytes += bytes;
5983fcea6f3dSAnirudh Venkataramanan 		vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
5984fcea6f3dSAnirudh Venkataramanan 		vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
5985fcea6f3dSAnirudh Venkataramanan 	}
5986fcea6f3dSAnirudh Venkataramanan 
598749d358e0SMarta Plantykow 	/* update XDP Tx rings counters */
598849d358e0SMarta Plantykow 	if (ice_is_xdp_ena_vsi(vsi))
59891a0f25a5SJesse Brandeburg 		ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
599049d358e0SMarta Plantykow 					     vsi->num_xdp_txq);
599149d358e0SMarta Plantykow 
5992fcea6f3dSAnirudh Venkataramanan 	rcu_read_unlock();
59931a0f25a5SJesse Brandeburg 
59941a0f25a5SJesse Brandeburg 	vsi->net_stats.tx_packets = vsi_stats->tx_packets;
59951a0f25a5SJesse Brandeburg 	vsi->net_stats.tx_bytes = vsi_stats->tx_bytes;
59961a0f25a5SJesse Brandeburg 	vsi->net_stats.rx_packets = vsi_stats->rx_packets;
59971a0f25a5SJesse Brandeburg 	vsi->net_stats.rx_bytes = vsi_stats->rx_bytes;
59981a0f25a5SJesse Brandeburg 
59991a0f25a5SJesse Brandeburg 	kfree(vsi_stats);
6000fcea6f3dSAnirudh Venkataramanan }
6001fcea6f3dSAnirudh Venkataramanan 
6002fcea6f3dSAnirudh Venkataramanan /**
6003fcea6f3dSAnirudh Venkataramanan  * ice_update_vsi_stats - Update VSI stats counters
6004fcea6f3dSAnirudh Venkataramanan  * @vsi: the VSI to be updated
6005fcea6f3dSAnirudh Venkataramanan  */
60065a4a8673SBruce Allan void ice_update_vsi_stats(struct ice_vsi *vsi)
6007fcea6f3dSAnirudh Venkataramanan {
6008fcea6f3dSAnirudh Venkataramanan 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6009fcea6f3dSAnirudh Venkataramanan 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
6010fcea6f3dSAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
6011fcea6f3dSAnirudh Venkataramanan 
6012e97fb1aeSAnirudh Venkataramanan 	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
60137e408e07SAnirudh Venkataramanan 	    test_bit(ICE_CFG_BUSY, pf->state))
6014fcea6f3dSAnirudh Venkataramanan 		return;
6015fcea6f3dSAnirudh Venkataramanan 
6016fcea6f3dSAnirudh Venkataramanan 	/* get stats as recorded by Tx/Rx rings */
6017fcea6f3dSAnirudh Venkataramanan 	ice_update_vsi_ring_stats(vsi);
6018fcea6f3dSAnirudh Venkataramanan 
6019fcea6f3dSAnirudh Venkataramanan 	/* get VSI stats as recorded by the hardware */
6020fcea6f3dSAnirudh Venkataramanan 	ice_update_eth_stats(vsi);
6021fcea6f3dSAnirudh Venkataramanan 
6022fcea6f3dSAnirudh Venkataramanan 	cur_ns->tx_errors = cur_es->tx_errors;
602351fe27e1SAnirudh Venkataramanan 	cur_ns->rx_dropped = cur_es->rx_discards;
6024fcea6f3dSAnirudh Venkataramanan 	cur_ns->tx_dropped = cur_es->tx_discards;
6025fcea6f3dSAnirudh Venkataramanan 	cur_ns->multicast = cur_es->rx_multicast;
6026fcea6f3dSAnirudh Venkataramanan 
6027fcea6f3dSAnirudh Venkataramanan 	/* update some more netdev stats if this is main VSI */
6028fcea6f3dSAnirudh Venkataramanan 	if (vsi->type == ICE_VSI_PF) {
6029fcea6f3dSAnirudh Venkataramanan 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
6030fcea6f3dSAnirudh Venkataramanan 		cur_ns->rx_errors = pf->stats.crc_errors +
60314f1fe43cSBrett Creeley 				    pf->stats.illegal_bytes +
60324f1fe43cSBrett Creeley 				    pf->stats.rx_len_errors +
60334f1fe43cSBrett Creeley 				    pf->stats.rx_undersize +
60344f1fe43cSBrett Creeley 				    pf->hw_csum_rx_error +
60354f1fe43cSBrett Creeley 				    pf->stats.rx_jabber +
60364f1fe43cSBrett Creeley 				    pf->stats.rx_fragments +
60374f1fe43cSBrett Creeley 				    pf->stats.rx_oversize;
6038fcea6f3dSAnirudh Venkataramanan 		cur_ns->rx_length_errors = pf->stats.rx_len_errors;
603956923ab6SBrett Creeley 		/* record drops from the port level */
604056923ab6SBrett Creeley 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6041fcea6f3dSAnirudh Venkataramanan 	}
6042fcea6f3dSAnirudh Venkataramanan }
6043fcea6f3dSAnirudh Venkataramanan 
6044fcea6f3dSAnirudh Venkataramanan /**
6045fcea6f3dSAnirudh Venkataramanan  * ice_update_pf_stats - Update PF port stats counters
6046fcea6f3dSAnirudh Venkataramanan  * @pf: PF whose stats needs to be updated
6047fcea6f3dSAnirudh Venkataramanan  */
60485a4a8673SBruce Allan void ice_update_pf_stats(struct ice_pf *pf)
6049fcea6f3dSAnirudh Venkataramanan {
6050fcea6f3dSAnirudh Venkataramanan 	struct ice_hw_port_stats *prev_ps, *cur_ps;
6051fcea6f3dSAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
60524ab95646SHenry Tieman 	u16 fd_ctr_base;
60539e7a5d17SUsha Ketineni 	u8 port;
6054fcea6f3dSAnirudh Venkataramanan 
60559e7a5d17SUsha Ketineni 	port = hw->port_info->lport;
6056fcea6f3dSAnirudh Venkataramanan 	prev_ps = &pf->stats_prev;
6057fcea6f3dSAnirudh Venkataramanan 	cur_ps = &pf->stats;
6058fcea6f3dSAnirudh Venkataramanan 
60599e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
606036517fd3SJacob Keller 			  &prev_ps->eth.rx_bytes,
6061fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->eth.rx_bytes);
6062fcea6f3dSAnirudh Venkataramanan 
60639e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
606436517fd3SJacob Keller 			  &prev_ps->eth.rx_unicast,
6065fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->eth.rx_unicast);
6066fcea6f3dSAnirudh Venkataramanan 
60679e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
606836517fd3SJacob Keller 			  &prev_ps->eth.rx_multicast,
6069fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->eth.rx_multicast);
6070fcea6f3dSAnirudh Venkataramanan 
60719e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
607236517fd3SJacob Keller 			  &prev_ps->eth.rx_broadcast,
6073fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->eth.rx_broadcast);
6074fcea6f3dSAnirudh Venkataramanan 
607556923ab6SBrett Creeley 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
607656923ab6SBrett Creeley 			  &prev_ps->eth.rx_discards,
607756923ab6SBrett Creeley 			  &cur_ps->eth.rx_discards);
607856923ab6SBrett Creeley 
60799e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
608036517fd3SJacob Keller 			  &prev_ps->eth.tx_bytes,
6081fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->eth.tx_bytes);
6082fcea6f3dSAnirudh Venkataramanan 
60839e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
608436517fd3SJacob Keller 			  &prev_ps->eth.tx_unicast,
6085fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->eth.tx_unicast);
6086fcea6f3dSAnirudh Venkataramanan 
60879e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
608836517fd3SJacob Keller 			  &prev_ps->eth.tx_multicast,
6089fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->eth.tx_multicast);
6090fcea6f3dSAnirudh Venkataramanan 
60919e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
609236517fd3SJacob Keller 			  &prev_ps->eth.tx_broadcast,
6093fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->eth.tx_broadcast);
6094fcea6f3dSAnirudh Venkataramanan 
60959e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6096fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->tx_dropped_link_down,
6097fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->tx_dropped_link_down);
6098fcea6f3dSAnirudh Venkataramanan 
60999e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
610036517fd3SJacob Keller 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
6101fcea6f3dSAnirudh Venkataramanan 
61029e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
610336517fd3SJacob Keller 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
6104fcea6f3dSAnirudh Venkataramanan 
61059e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
610636517fd3SJacob Keller 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
6107fcea6f3dSAnirudh Venkataramanan 
61089e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
610936517fd3SJacob Keller 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
6110fcea6f3dSAnirudh Venkataramanan 
61119e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
6112fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
6113fcea6f3dSAnirudh Venkataramanan 
61149e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
6115fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
6116fcea6f3dSAnirudh Venkataramanan 
61179e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
6118fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
6119fcea6f3dSAnirudh Venkataramanan 
61209e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
612136517fd3SJacob Keller 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
6122fcea6f3dSAnirudh Venkataramanan 
61239e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
612436517fd3SJacob Keller 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
6125fcea6f3dSAnirudh Venkataramanan 
61269e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
612736517fd3SJacob Keller 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
6128fcea6f3dSAnirudh Venkataramanan 
61299e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
613036517fd3SJacob Keller 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
6131fcea6f3dSAnirudh Venkataramanan 
61329e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
6133fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
6134fcea6f3dSAnirudh Venkataramanan 
61359e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
6136fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
6137fcea6f3dSAnirudh Venkataramanan 
61389e7a5d17SUsha Ketineni 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
6139fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
6140fcea6f3dSAnirudh Venkataramanan 
61414ab95646SHenry Tieman 	fd_ctr_base = hw->fd_ctr_base;
61424ab95646SHenry Tieman 
61434ab95646SHenry Tieman 	ice_stat_update40(hw,
61444ab95646SHenry Tieman 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
61454ab95646SHenry Tieman 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
61464ab95646SHenry Tieman 			  &cur_ps->fd_sb_match);
61479e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
6148fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
6149fcea6f3dSAnirudh Venkataramanan 
61509e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
6151fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
6152fcea6f3dSAnirudh Venkataramanan 
61539e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
6154fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
6155fcea6f3dSAnirudh Venkataramanan 
61569e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
6157fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
6158fcea6f3dSAnirudh Venkataramanan 
61594b0fdcebSAnirudh Venkataramanan 	ice_update_dcb_stats(pf);
61604b0fdcebSAnirudh Venkataramanan 
61619e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
6162fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
6163fcea6f3dSAnirudh Venkataramanan 
61649e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
6165fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
6166fcea6f3dSAnirudh Venkataramanan 
61679e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
6168fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->mac_local_faults,
6169fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->mac_local_faults);
6170fcea6f3dSAnirudh Venkataramanan 
61719e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
6172fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->mac_remote_faults,
6173fcea6f3dSAnirudh Venkataramanan 			  &cur_ps->mac_remote_faults);
6174fcea6f3dSAnirudh Venkataramanan 
61759e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
6176fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
6177fcea6f3dSAnirudh Venkataramanan 
61789e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
6179fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
6180fcea6f3dSAnirudh Venkataramanan 
61819e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
6182fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
6183fcea6f3dSAnirudh Venkataramanan 
61849e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
6185fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
6186fcea6f3dSAnirudh Venkataramanan 
61879e7a5d17SUsha Ketineni 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
6188fcea6f3dSAnirudh Venkataramanan 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
6189fcea6f3dSAnirudh Venkataramanan 
61904ab95646SHenry Tieman 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
61914ab95646SHenry Tieman 
6192fcea6f3dSAnirudh Venkataramanan 	pf->stat_prev_loaded = true;
6193fcea6f3dSAnirudh Venkataramanan }
6194fcea6f3dSAnirudh Venkataramanan 
6195fcea6f3dSAnirudh Venkataramanan /**
6196fcea6f3dSAnirudh Venkataramanan  * ice_get_stats64 - get statistics for network device structure
6197fcea6f3dSAnirudh Venkataramanan  * @netdev: network interface device structure
6198fcea6f3dSAnirudh Venkataramanan  * @stats: main device statistics structure
6199fcea6f3dSAnirudh Venkataramanan  */
6200fcea6f3dSAnirudh Venkataramanan static
6201fcea6f3dSAnirudh Venkataramanan void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
6202fcea6f3dSAnirudh Venkataramanan {
6203fcea6f3dSAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
6204fcea6f3dSAnirudh Venkataramanan 	struct rtnl_link_stats64 *vsi_stats;
6205fcea6f3dSAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
6206fcea6f3dSAnirudh Venkataramanan 
6207fcea6f3dSAnirudh Venkataramanan 	vsi_stats = &vsi->net_stats;
6208fcea6f3dSAnirudh Venkataramanan 
62093d57fd10SDave Ertman 	if (!vsi->num_txq || !vsi->num_rxq)
6210fcea6f3dSAnirudh Venkataramanan 		return;
62113d57fd10SDave Ertman 
6212fcea6f3dSAnirudh Venkataramanan 	/* netdev packet/byte stats come from ring counter. These are obtained
6213fcea6f3dSAnirudh Venkataramanan 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
62143d57fd10SDave Ertman 	 * But, only call the update routine and read the registers if VSI is
62153d57fd10SDave Ertman 	 * not down.
6216fcea6f3dSAnirudh Venkataramanan 	 */
6217e97fb1aeSAnirudh Venkataramanan 	if (!test_bit(ICE_VSI_DOWN, vsi->state))
6218fcea6f3dSAnirudh Venkataramanan 		ice_update_vsi_ring_stats(vsi);
6219fcea6f3dSAnirudh Venkataramanan 	stats->tx_packets = vsi_stats->tx_packets;
6220fcea6f3dSAnirudh Venkataramanan 	stats->tx_bytes = vsi_stats->tx_bytes;
6221fcea6f3dSAnirudh Venkataramanan 	stats->rx_packets = vsi_stats->rx_packets;
6222fcea6f3dSAnirudh Venkataramanan 	stats->rx_bytes = vsi_stats->rx_bytes;
6223fcea6f3dSAnirudh Venkataramanan 
6224fcea6f3dSAnirudh Venkataramanan 	/* The rest of the stats can be read from the hardware but instead we
6225fcea6f3dSAnirudh Venkataramanan 	 * just return values that the watchdog task has already obtained from
6226fcea6f3dSAnirudh Venkataramanan 	 * the hardware.
6227fcea6f3dSAnirudh Venkataramanan 	 */
6228fcea6f3dSAnirudh Venkataramanan 	stats->multicast = vsi_stats->multicast;
6229fcea6f3dSAnirudh Venkataramanan 	stats->tx_errors = vsi_stats->tx_errors;
6230fcea6f3dSAnirudh Venkataramanan 	stats->tx_dropped = vsi_stats->tx_dropped;
6231fcea6f3dSAnirudh Venkataramanan 	stats->rx_errors = vsi_stats->rx_errors;
6232fcea6f3dSAnirudh Venkataramanan 	stats->rx_dropped = vsi_stats->rx_dropped;
6233fcea6f3dSAnirudh Venkataramanan 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6234fcea6f3dSAnirudh Venkataramanan 	stats->rx_length_errors = vsi_stats->rx_length_errors;
6235fcea6f3dSAnirudh Venkataramanan }
6236fcea6f3dSAnirudh Venkataramanan 
6237fcea6f3dSAnirudh Venkataramanan /**
62382b245cb2SAnirudh Venkataramanan  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
62392b245cb2SAnirudh Venkataramanan  * @vsi: VSI having NAPI disabled
62402b245cb2SAnirudh Venkataramanan  */
62412b245cb2SAnirudh Venkataramanan static void ice_napi_disable_all(struct ice_vsi *vsi)
62422b245cb2SAnirudh Venkataramanan {
62432b245cb2SAnirudh Venkataramanan 	int q_idx;
62442b245cb2SAnirudh Venkataramanan 
62452b245cb2SAnirudh Venkataramanan 	if (!vsi->netdev)
62462b245cb2SAnirudh Venkataramanan 		return;
62472b245cb2SAnirudh Venkataramanan 
62480c2561c8SBrett Creeley 	ice_for_each_q_vector(vsi, q_idx) {
6249eec90376SYoung Xiao 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6250eec90376SYoung Xiao 
6251e72bba21SMaciej Fijalkowski 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6252eec90376SYoung Xiao 			napi_disable(&q_vector->napi);
6253cdf1f1f1SJacob Keller 
6254cdf1f1f1SJacob Keller 		cancel_work_sync(&q_vector->tx.dim.work);
6255cdf1f1f1SJacob Keller 		cancel_work_sync(&q_vector->rx.dim.work);
6256eec90376SYoung Xiao 	}
62572b245cb2SAnirudh Venkataramanan }
62582b245cb2SAnirudh Venkataramanan 
62592b245cb2SAnirudh Venkataramanan /**
6260cdedef59SAnirudh Venkataramanan  * ice_down - Shutdown the connection
6261cdedef59SAnirudh Venkataramanan  * @vsi: The VSI being stopped
626221c6e36bSJesse Brandeburg  *
626321c6e36bSJesse Brandeburg  * Caller of this function is expected to set the vsi->state ICE_DOWN bit
6264cdedef59SAnirudh Venkataramanan  */
6265fcea6f3dSAnirudh Venkataramanan int ice_down(struct ice_vsi *vsi)
6266cdedef59SAnirudh Venkataramanan {
6267ab4ab73fSBruce Allan 	int i, tx_err, rx_err, link_err = 0;
6268cdedef59SAnirudh Venkataramanan 
626921c6e36bSJesse Brandeburg 	WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
627021c6e36bSJesse Brandeburg 
6271b3be918dSGrzegorz Nitka 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
62723a749623SJacob Keller 		if (!ice_is_e810(&vsi->back->hw))
62733a749623SJacob Keller 			ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
6274cdedef59SAnirudh Venkataramanan 		netif_carrier_off(vsi->netdev);
6275cdedef59SAnirudh Venkataramanan 		netif_tx_disable(vsi->netdev);
6276b3be918dSGrzegorz Nitka 	} else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
6277b3be918dSGrzegorz Nitka 		ice_eswitch_stop_all_tx_queues(vsi->back);
6278cdedef59SAnirudh Venkataramanan 	}
6279cdedef59SAnirudh Venkataramanan 
6280cdedef59SAnirudh Venkataramanan 	ice_vsi_dis_irq(vsi);
628103f7a986SAnirudh Venkataramanan 
628203f7a986SAnirudh Venkataramanan 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
628372adf242SAnirudh Venkataramanan 	if (tx_err)
628419cce2c6SAnirudh Venkataramanan 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
628572adf242SAnirudh Venkataramanan 			   vsi->vsi_num, tx_err);
6286efc2214bSMaciej Fijalkowski 	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6287efc2214bSMaciej Fijalkowski 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6288efc2214bSMaciej Fijalkowski 		if (tx_err)
628919cce2c6SAnirudh Venkataramanan 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6290efc2214bSMaciej Fijalkowski 				   vsi->vsi_num, tx_err);
6291efc2214bSMaciej Fijalkowski 	}
629272adf242SAnirudh Venkataramanan 
629313a6233bSBrett Creeley 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
629472adf242SAnirudh Venkataramanan 	if (rx_err)
629519cce2c6SAnirudh Venkataramanan 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
629672adf242SAnirudh Venkataramanan 			   vsi->vsi_num, rx_err);
629772adf242SAnirudh Venkataramanan 
62982b245cb2SAnirudh Venkataramanan 	ice_napi_disable_all(vsi);
6299cdedef59SAnirudh Venkataramanan 
6300ab4ab73fSBruce Allan 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
6301b6f934f0SBrett Creeley 		link_err = ice_force_phys_link_state(vsi, false);
6302b6f934f0SBrett Creeley 		if (link_err)
630319cce2c6SAnirudh Venkataramanan 			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
6304b6f934f0SBrett Creeley 				   vsi->vsi_num, link_err);
6305ab4ab73fSBruce Allan 	}
6306b6f934f0SBrett Creeley 
6307cdedef59SAnirudh Venkataramanan 	ice_for_each_txq(vsi, i)
6308cdedef59SAnirudh Venkataramanan 		ice_clean_tx_ring(vsi->tx_rings[i]);
6309cdedef59SAnirudh Venkataramanan 
6310cdedef59SAnirudh Venkataramanan 	ice_for_each_rxq(vsi, i)
6311cdedef59SAnirudh Venkataramanan 		ice_clean_rx_ring(vsi->rx_rings[i]);
6312cdedef59SAnirudh Venkataramanan 
6313b6f934f0SBrett Creeley 	if (tx_err || rx_err || link_err) {
631419cce2c6SAnirudh Venkataramanan 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6315cdedef59SAnirudh Venkataramanan 			   vsi->vsi_num, vsi->vsw->sw_id);
631672adf242SAnirudh Venkataramanan 		return -EIO;
631772adf242SAnirudh Venkataramanan 	}
631872adf242SAnirudh Venkataramanan 
631972adf242SAnirudh Venkataramanan 	return 0;
6320cdedef59SAnirudh Venkataramanan }
6321cdedef59SAnirudh Venkataramanan 
6322cdedef59SAnirudh Venkataramanan /**
6323cdedef59SAnirudh Venkataramanan  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6324cdedef59SAnirudh Venkataramanan  * @vsi: VSI having resources allocated
6325cdedef59SAnirudh Venkataramanan  *
6326cdedef59SAnirudh Venkataramanan  * Return 0 on success, negative on failure
6327cdedef59SAnirudh Venkataramanan  */
63280e674aebSAnirudh Venkataramanan int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6329cdedef59SAnirudh Venkataramanan {
6330dab0588fSJesse Brandeburg 	int i, err = 0;
6331cdedef59SAnirudh Venkataramanan 
6332cdedef59SAnirudh Venkataramanan 	if (!vsi->num_txq) {
63339a946843SAnirudh Venkataramanan 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6334cdedef59SAnirudh Venkataramanan 			vsi->vsi_num);
6335cdedef59SAnirudh Venkataramanan 		return -EINVAL;
6336cdedef59SAnirudh Venkataramanan 	}
6337cdedef59SAnirudh Venkataramanan 
6338cdedef59SAnirudh Venkataramanan 	ice_for_each_txq(vsi, i) {
6339e72bba21SMaciej Fijalkowski 		struct ice_tx_ring *ring = vsi->tx_rings[i];
6340eb0ee8abSMichal Swiatkowski 
6341eb0ee8abSMichal Swiatkowski 		if (!ring)
6342eb0ee8abSMichal Swiatkowski 			return -EINVAL;
6343eb0ee8abSMichal Swiatkowski 
63441c54c839SGrzegorz Nitka 		if (vsi->netdev)
6345eb0ee8abSMichal Swiatkowski 			ring->netdev = vsi->netdev;
6346eb0ee8abSMichal Swiatkowski 		err = ice_setup_tx_ring(ring);
6347cdedef59SAnirudh Venkataramanan 		if (err)
6348cdedef59SAnirudh Venkataramanan 			break;
6349cdedef59SAnirudh Venkataramanan 	}
6350cdedef59SAnirudh Venkataramanan 
6351cdedef59SAnirudh Venkataramanan 	return err;
6352cdedef59SAnirudh Venkataramanan }
6353cdedef59SAnirudh Venkataramanan 
6354cdedef59SAnirudh Venkataramanan /**
6355cdedef59SAnirudh Venkataramanan  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
6356cdedef59SAnirudh Venkataramanan  * @vsi: VSI having resources allocated
6357cdedef59SAnirudh Venkataramanan  *
6358cdedef59SAnirudh Venkataramanan  * Return 0 on success, negative on failure
6359cdedef59SAnirudh Venkataramanan  */
63600e674aebSAnirudh Venkataramanan int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
6361cdedef59SAnirudh Venkataramanan {
6362dab0588fSJesse Brandeburg 	int i, err = 0;
6363cdedef59SAnirudh Venkataramanan 
6364cdedef59SAnirudh Venkataramanan 	if (!vsi->num_rxq) {
63659a946843SAnirudh Venkataramanan 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
6366cdedef59SAnirudh Venkataramanan 			vsi->vsi_num);
6367cdedef59SAnirudh Venkataramanan 		return -EINVAL;
6368cdedef59SAnirudh Venkataramanan 	}
6369cdedef59SAnirudh Venkataramanan 
6370cdedef59SAnirudh Venkataramanan 	ice_for_each_rxq(vsi, i) {
6371e72bba21SMaciej Fijalkowski 		struct ice_rx_ring *ring = vsi->rx_rings[i];
6372eb0ee8abSMichal Swiatkowski 
6373eb0ee8abSMichal Swiatkowski 		if (!ring)
6374eb0ee8abSMichal Swiatkowski 			return -EINVAL;
6375eb0ee8abSMichal Swiatkowski 
63761c54c839SGrzegorz Nitka 		if (vsi->netdev)
6377eb0ee8abSMichal Swiatkowski 			ring->netdev = vsi->netdev;
6378eb0ee8abSMichal Swiatkowski 		err = ice_setup_rx_ring(ring);
6379cdedef59SAnirudh Venkataramanan 		if (err)
6380cdedef59SAnirudh Venkataramanan 			break;
6381cdedef59SAnirudh Venkataramanan 	}
6382cdedef59SAnirudh Venkataramanan 
6383cdedef59SAnirudh Venkataramanan 	return err;
6384cdedef59SAnirudh Venkataramanan }
6385cdedef59SAnirudh Venkataramanan 
6386cdedef59SAnirudh Venkataramanan /**
6387148beb61SHenry Tieman  * ice_vsi_open_ctrl - open control VSI for use
6388148beb61SHenry Tieman  * @vsi: the VSI to open
6389148beb61SHenry Tieman  *
6390148beb61SHenry Tieman  * Initialization of the Control VSI
6391148beb61SHenry Tieman  *
6392148beb61SHenry Tieman  * Returns 0 on success, negative value on error
6393148beb61SHenry Tieman  */
6394148beb61SHenry Tieman int ice_vsi_open_ctrl(struct ice_vsi *vsi)
6395148beb61SHenry Tieman {
6396148beb61SHenry Tieman 	char int_name[ICE_INT_NAME_STR_LEN];
6397148beb61SHenry Tieman 	struct ice_pf *pf = vsi->back;
6398148beb61SHenry Tieman 	struct device *dev;
6399148beb61SHenry Tieman 	int err;
6400148beb61SHenry Tieman 
6401148beb61SHenry Tieman 	dev = ice_pf_to_dev(pf);
6402148beb61SHenry Tieman 	/* allocate descriptors */
6403148beb61SHenry Tieman 	err = ice_vsi_setup_tx_rings(vsi);
6404148beb61SHenry Tieman 	if (err)
6405148beb61SHenry Tieman 		goto err_setup_tx;
6406148beb61SHenry Tieman 
6407148beb61SHenry Tieman 	err = ice_vsi_setup_rx_rings(vsi);
6408148beb61SHenry Tieman 	if (err)
6409148beb61SHenry Tieman 		goto err_setup_rx;
6410148beb61SHenry Tieman 
6411148beb61SHenry Tieman 	err = ice_vsi_cfg(vsi);
6412148beb61SHenry Tieman 	if (err)
6413148beb61SHenry Tieman 		goto err_setup_rx;
6414148beb61SHenry Tieman 
6415148beb61SHenry Tieman 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
6416148beb61SHenry Tieman 		 dev_driver_string(dev), dev_name(dev));
6417148beb61SHenry Tieman 	err = ice_vsi_req_irq_msix(vsi, int_name);
6418148beb61SHenry Tieman 	if (err)
6419148beb61SHenry Tieman 		goto err_setup_rx;
6420148beb61SHenry Tieman 
6421148beb61SHenry Tieman 	ice_vsi_cfg_msix(vsi);
6422148beb61SHenry Tieman 
6423148beb61SHenry Tieman 	err = ice_vsi_start_all_rx_rings(vsi);
6424148beb61SHenry Tieman 	if (err)
6425148beb61SHenry Tieman 		goto err_up_complete;
6426148beb61SHenry Tieman 
6427e97fb1aeSAnirudh Venkataramanan 	clear_bit(ICE_VSI_DOWN, vsi->state);
6428148beb61SHenry Tieman 	ice_vsi_ena_irq(vsi);
6429148beb61SHenry Tieman 
6430148beb61SHenry Tieman 	return 0;
6431148beb61SHenry Tieman 
6432148beb61SHenry Tieman err_up_complete:
6433148beb61SHenry Tieman 	ice_down(vsi);
6434148beb61SHenry Tieman err_setup_rx:
6435148beb61SHenry Tieman 	ice_vsi_free_rx_rings(vsi);
6436148beb61SHenry Tieman err_setup_tx:
6437148beb61SHenry Tieman 	ice_vsi_free_tx_rings(vsi);
6438148beb61SHenry Tieman 
6439148beb61SHenry Tieman 	return err;
6440148beb61SHenry Tieman }
6441148beb61SHenry Tieman 
6442148beb61SHenry Tieman /**
6443cdedef59SAnirudh Venkataramanan  * ice_vsi_open - Called when a network interface is made active
6444cdedef59SAnirudh Venkataramanan  * @vsi: the VSI to open
6445cdedef59SAnirudh Venkataramanan  *
6446cdedef59SAnirudh Venkataramanan  * Initialization of the VSI
6447cdedef59SAnirudh Venkataramanan  *
6448cdedef59SAnirudh Venkataramanan  * Returns 0 on success, negative value on error
6449cdedef59SAnirudh Venkataramanan  */
64501a1c40dfSGrzegorz Nitka int ice_vsi_open(struct ice_vsi *vsi)
6451cdedef59SAnirudh Venkataramanan {
6452cdedef59SAnirudh Venkataramanan 	char int_name[ICE_INT_NAME_STR_LEN];
6453cdedef59SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
6454cdedef59SAnirudh Venkataramanan 	int err;
6455cdedef59SAnirudh Venkataramanan 
6456cdedef59SAnirudh Venkataramanan 	/* allocate descriptors */
6457cdedef59SAnirudh Venkataramanan 	err = ice_vsi_setup_tx_rings(vsi);
6458cdedef59SAnirudh Venkataramanan 	if (err)
6459cdedef59SAnirudh Venkataramanan 		goto err_setup_tx;
6460cdedef59SAnirudh Venkataramanan 
6461cdedef59SAnirudh Venkataramanan 	err = ice_vsi_setup_rx_rings(vsi);
6462cdedef59SAnirudh Venkataramanan 	if (err)
6463cdedef59SAnirudh Venkataramanan 		goto err_setup_rx;
6464cdedef59SAnirudh Venkataramanan 
6465cdedef59SAnirudh Venkataramanan 	err = ice_vsi_cfg(vsi);
6466cdedef59SAnirudh Venkataramanan 	if (err)
6467cdedef59SAnirudh Venkataramanan 		goto err_setup_rx;
6468cdedef59SAnirudh Venkataramanan 
6469cdedef59SAnirudh Venkataramanan 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
64704015d11eSBrett Creeley 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
6471ba880734SBrett Creeley 	err = ice_vsi_req_irq_msix(vsi, int_name);
6472cdedef59SAnirudh Venkataramanan 	if (err)
6473cdedef59SAnirudh Venkataramanan 		goto err_setup_rx;
6474cdedef59SAnirudh Venkataramanan 
64751a1c40dfSGrzegorz Nitka 	if (vsi->type == ICE_VSI_PF) {
6476cdedef59SAnirudh Venkataramanan 		/* Notify the stack of the actual queue counts. */
6477cdedef59SAnirudh Venkataramanan 		err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
6478cdedef59SAnirudh Venkataramanan 		if (err)
6479cdedef59SAnirudh Venkataramanan 			goto err_set_qs;
6480cdedef59SAnirudh Venkataramanan 
6481cdedef59SAnirudh Venkataramanan 		err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
6482cdedef59SAnirudh Venkataramanan 		if (err)
6483cdedef59SAnirudh Venkataramanan 			goto err_set_qs;
64841a1c40dfSGrzegorz Nitka 	}
6485cdedef59SAnirudh Venkataramanan 
6486cdedef59SAnirudh Venkataramanan 	err = ice_up_complete(vsi);
6487cdedef59SAnirudh Venkataramanan 	if (err)
6488cdedef59SAnirudh Venkataramanan 		goto err_up_complete;
6489cdedef59SAnirudh Venkataramanan 
6490cdedef59SAnirudh Venkataramanan 	return 0;
6491cdedef59SAnirudh Venkataramanan 
6492cdedef59SAnirudh Venkataramanan err_up_complete:
6493cdedef59SAnirudh Venkataramanan 	ice_down(vsi);
6494cdedef59SAnirudh Venkataramanan err_set_qs:
6495cdedef59SAnirudh Venkataramanan 	ice_vsi_free_irq(vsi);
6496cdedef59SAnirudh Venkataramanan err_setup_rx:
6497cdedef59SAnirudh Venkataramanan 	ice_vsi_free_rx_rings(vsi);
6498cdedef59SAnirudh Venkataramanan err_setup_tx:
6499cdedef59SAnirudh Venkataramanan 	ice_vsi_free_tx_rings(vsi);
6500cdedef59SAnirudh Venkataramanan 
6501cdedef59SAnirudh Venkataramanan 	return err;
6502cdedef59SAnirudh Venkataramanan }
6503cdedef59SAnirudh Venkataramanan 
6504cdedef59SAnirudh Venkataramanan /**
65050f9d5027SAnirudh Venkataramanan  * ice_vsi_release_all - Delete all VSIs
65060f9d5027SAnirudh Venkataramanan  * @pf: PF from which all VSIs are being removed
65070f9d5027SAnirudh Venkataramanan  */
65080f9d5027SAnirudh Venkataramanan static void ice_vsi_release_all(struct ice_pf *pf)
65090f9d5027SAnirudh Venkataramanan {
65100f9d5027SAnirudh Venkataramanan 	int err, i;
65110f9d5027SAnirudh Venkataramanan 
65120f9d5027SAnirudh Venkataramanan 	if (!pf->vsi)
65130f9d5027SAnirudh Venkataramanan 		return;
65140f9d5027SAnirudh Venkataramanan 
651580ed404aSBrett Creeley 	ice_for_each_vsi(pf, i) {
65160f9d5027SAnirudh Venkataramanan 		if (!pf->vsi[i])
65170f9d5027SAnirudh Venkataramanan 			continue;
65180f9d5027SAnirudh Venkataramanan 
6519fbc7b27aSKiran Patil 		if (pf->vsi[i]->type == ICE_VSI_CHNL)
6520fbc7b27aSKiran Patil 			continue;
6521fbc7b27aSKiran Patil 
65220f9d5027SAnirudh Venkataramanan 		err = ice_vsi_release(pf->vsi[i]);
65230f9d5027SAnirudh Venkataramanan 		if (err)
652419cce2c6SAnirudh Venkataramanan 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
65250f9d5027SAnirudh Venkataramanan 				i, err, pf->vsi[i]->vsi_num);
65260f9d5027SAnirudh Venkataramanan 	}
65270f9d5027SAnirudh Venkataramanan }
65280f9d5027SAnirudh Venkataramanan 
65290f9d5027SAnirudh Venkataramanan /**
6530462acf6aSTony Nguyen  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
6531462acf6aSTony Nguyen  * @pf: pointer to the PF instance
6532462acf6aSTony Nguyen  * @type: VSI type to rebuild
6533462acf6aSTony Nguyen  *
6534462acf6aSTony Nguyen  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
65350f9d5027SAnirudh Venkataramanan  */
6536462acf6aSTony Nguyen static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
65370f9d5027SAnirudh Venkataramanan {
65384015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
6539462acf6aSTony Nguyen 	int i, err;
65400f9d5027SAnirudh Venkataramanan 
654180ed404aSBrett Creeley 	ice_for_each_vsi(pf, i) {
65424425e053SKrzysztof Kazimierczak 		struct ice_vsi *vsi = pf->vsi[i];
65430f9d5027SAnirudh Venkataramanan 
6544462acf6aSTony Nguyen 		if (!vsi || vsi->type != type)
65450f9d5027SAnirudh Venkataramanan 			continue;
65460f9d5027SAnirudh Venkataramanan 
6547462acf6aSTony Nguyen 		/* rebuild the VSI */
654887324e74SHenry Tieman 		err = ice_vsi_rebuild(vsi, true);
65490f9d5027SAnirudh Venkataramanan 		if (err) {
655019cce2c6SAnirudh Venkataramanan 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
6551964674f1SAnirudh Venkataramanan 				err, vsi->idx, ice_vsi_type_str(type));
65520f9d5027SAnirudh Venkataramanan 			return err;
65530f9d5027SAnirudh Venkataramanan 		}
65540f9d5027SAnirudh Venkataramanan 
6555462acf6aSTony Nguyen 		/* replay filters for the VSI */
65562ccc1c1cSTony Nguyen 		err = ice_replay_vsi(&pf->hw, vsi->idx);
65572ccc1c1cSTony Nguyen 		if (err) {
65585f87ec48STony Nguyen 			dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
65592ccc1c1cSTony Nguyen 				err, vsi->idx, ice_vsi_type_str(type));
6560c1484691STony Nguyen 			return err;
6561334cb062SAnirudh Venkataramanan 		}
6562334cb062SAnirudh Venkataramanan 
6563334cb062SAnirudh Venkataramanan 		/* Re-map HW VSI number, using VSI handle that has been
6564334cb062SAnirudh Venkataramanan 		 * previously validated in ice_replay_vsi() call above
6565334cb062SAnirudh Venkataramanan 		 */
6566462acf6aSTony Nguyen 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
6567334cb062SAnirudh Venkataramanan 
6568462acf6aSTony Nguyen 		/* enable the VSI */
6569462acf6aSTony Nguyen 		err = ice_ena_vsi(vsi, false);
6570462acf6aSTony Nguyen 		if (err) {
657119cce2c6SAnirudh Venkataramanan 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
6572964674f1SAnirudh Venkataramanan 				err, vsi->idx, ice_vsi_type_str(type));
6573462acf6aSTony Nguyen 			return err;
6574334cb062SAnirudh Venkataramanan 		}
6575334cb062SAnirudh Venkataramanan 
65764015d11eSBrett Creeley 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
65774015d11eSBrett Creeley 			 ice_vsi_type_str(type));
6578462acf6aSTony Nguyen 	}
6579462acf6aSTony Nguyen 
6580334cb062SAnirudh Venkataramanan 	return 0;
6581334cb062SAnirudh Venkataramanan }
6582334cb062SAnirudh Venkataramanan 
6583334cb062SAnirudh Venkataramanan /**
6584462acf6aSTony Nguyen  * ice_update_pf_netdev_link - Update PF netdev link status
6585462acf6aSTony Nguyen  * @pf: pointer to the PF instance
6586462acf6aSTony Nguyen  */
6587462acf6aSTony Nguyen static void ice_update_pf_netdev_link(struct ice_pf *pf)
6588462acf6aSTony Nguyen {
6589462acf6aSTony Nguyen 	bool link_up;
6590462acf6aSTony Nguyen 	int i;
6591462acf6aSTony Nguyen 
6592462acf6aSTony Nguyen 	ice_for_each_vsi(pf, i) {
6593462acf6aSTony Nguyen 		struct ice_vsi *vsi = pf->vsi[i];
6594462acf6aSTony Nguyen 
6595462acf6aSTony Nguyen 		if (!vsi || vsi->type != ICE_VSI_PF)
6596462acf6aSTony Nguyen 			return;
6597462acf6aSTony Nguyen 
6598462acf6aSTony Nguyen 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
6599462acf6aSTony Nguyen 		if (link_up) {
6600462acf6aSTony Nguyen 			netif_carrier_on(pf->vsi[i]->netdev);
6601462acf6aSTony Nguyen 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
6602462acf6aSTony Nguyen 		} else {
6603462acf6aSTony Nguyen 			netif_carrier_off(pf->vsi[i]->netdev);
6604462acf6aSTony Nguyen 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
6605462acf6aSTony Nguyen 		}
6606462acf6aSTony Nguyen 	}
6607462acf6aSTony Nguyen }
6608462acf6aSTony Nguyen 
6609462acf6aSTony Nguyen /**
66100b28b702SAnirudh Venkataramanan  * ice_rebuild - rebuild after reset
66112f2da36eSAnirudh Venkataramanan  * @pf: PF to rebuild
6612462acf6aSTony Nguyen  * @reset_type: type of reset
661312bb018cSBrett Creeley  *
661412bb018cSBrett Creeley  * Do not rebuild VF VSI in this flow because that is already handled via
661512bb018cSBrett Creeley  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
661612bb018cSBrett Creeley  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
661712bb018cSBrett Creeley  * to reset/rebuild all the VF VSI twice.
66180b28b702SAnirudh Venkataramanan  */
6619462acf6aSTony Nguyen static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
66200b28b702SAnirudh Venkataramanan {
66214015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
66220b28b702SAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
6623462acf6aSTony Nguyen 	int err;
66240b28b702SAnirudh Venkataramanan 
66257e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_DOWN, pf->state))
66260b28b702SAnirudh Venkataramanan 		goto clear_recovery;
66270b28b702SAnirudh Venkataramanan 
6628462acf6aSTony Nguyen 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
66290b28b702SAnirudh Venkataramanan 
6630399e27dbSJacob Keller 	if (reset_type == ICE_RESET_EMPR) {
6631399e27dbSJacob Keller 		/* If an EMP reset has occurred, any previously pending flash
6632399e27dbSJacob Keller 		 * update will have completed. We no longer know whether or
6633399e27dbSJacob Keller 		 * not the NVM update EMP reset is restricted.
6634399e27dbSJacob Keller 		 */
6635399e27dbSJacob Keller 		pf->fw_emp_reset_disabled = false;
6636399e27dbSJacob Keller 	}
6637399e27dbSJacob Keller 
66382ccc1c1cSTony Nguyen 	err = ice_init_all_ctrlq(hw);
66392ccc1c1cSTony Nguyen 	if (err) {
66402ccc1c1cSTony Nguyen 		dev_err(dev, "control queues init failed %d\n", err);
66410f9d5027SAnirudh Venkataramanan 		goto err_init_ctrlq;
66420b28b702SAnirudh Venkataramanan 	}
66430b28b702SAnirudh Venkataramanan 
6644462acf6aSTony Nguyen 	/* if DDP was previously loaded successfully */
6645462acf6aSTony Nguyen 	if (!ice_is_safe_mode(pf)) {
6646462acf6aSTony Nguyen 		/* reload the SW DB of filter tables */
6647462acf6aSTony Nguyen 		if (reset_type == ICE_RESET_PFR)
6648462acf6aSTony Nguyen 			ice_fill_blk_tbls(hw);
6649462acf6aSTony Nguyen 		else
6650462acf6aSTony Nguyen 			/* Reload DDP Package after CORER/GLOBR reset */
6651462acf6aSTony Nguyen 			ice_load_pkg(NULL, pf);
6652462acf6aSTony Nguyen 	}
6653462acf6aSTony Nguyen 
66542ccc1c1cSTony Nguyen 	err = ice_clear_pf_cfg(hw);
66552ccc1c1cSTony Nguyen 	if (err) {
66562ccc1c1cSTony Nguyen 		dev_err(dev, "clear PF configuration failed %d\n", err);
66570f9d5027SAnirudh Venkataramanan 		goto err_init_ctrlq;
66580b28b702SAnirudh Venkataramanan 	}
66590b28b702SAnirudh Venkataramanan 
6660fc0f39bcSBrett Creeley 	if (pf->first_sw->dflt_vsi_ena)
666119cce2c6SAnirudh Venkataramanan 		dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
6662fc0f39bcSBrett Creeley 	/* clear the default VSI configuration if it exists */
6663fc0f39bcSBrett Creeley 	pf->first_sw->dflt_vsi = NULL;
6664fc0f39bcSBrett Creeley 	pf->first_sw->dflt_vsi_ena = false;
6665fc0f39bcSBrett Creeley 
66660b28b702SAnirudh Venkataramanan 	ice_clear_pxe_mode(hw);
66670b28b702SAnirudh Venkataramanan 
66682ccc1c1cSTony Nguyen 	err = ice_init_nvm(hw);
66692ccc1c1cSTony Nguyen 	if (err) {
66702ccc1c1cSTony Nguyen 		dev_err(dev, "ice_init_nvm failed %d\n", err);
667197a4ec01SJacob Keller 		goto err_init_ctrlq;
667297a4ec01SJacob Keller 	}
667397a4ec01SJacob Keller 
66742ccc1c1cSTony Nguyen 	err = ice_get_caps(hw);
66752ccc1c1cSTony Nguyen 	if (err) {
66762ccc1c1cSTony Nguyen 		dev_err(dev, "ice_get_caps failed %d\n", err);
66770f9d5027SAnirudh Venkataramanan 		goto err_init_ctrlq;
66780b28b702SAnirudh Venkataramanan 	}
66790b28b702SAnirudh Venkataramanan 
66802ccc1c1cSTony Nguyen 	err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
66812ccc1c1cSTony Nguyen 	if (err) {
66822ccc1c1cSTony Nguyen 		dev_err(dev, "set_mac_cfg failed %d\n", err);
668342449105SAnirudh Venkataramanan 		goto err_init_ctrlq;
668442449105SAnirudh Venkataramanan 	}
668542449105SAnirudh Venkataramanan 
66860f9d5027SAnirudh Venkataramanan 	err = ice_sched_init_port(hw->port_info);
66870f9d5027SAnirudh Venkataramanan 	if (err)
66880f9d5027SAnirudh Venkataramanan 		goto err_sched_init_port;
66890f9d5027SAnirudh Venkataramanan 
66900b28b702SAnirudh Venkataramanan 	/* start misc vector */
66910b28b702SAnirudh Venkataramanan 	err = ice_req_irq_msix_misc(pf);
66920b28b702SAnirudh Venkataramanan 	if (err) {
66930b28b702SAnirudh Venkataramanan 		dev_err(dev, "misc vector setup failed: %d\n", err);
6694462acf6aSTony Nguyen 		goto err_sched_init_port;
66950b28b702SAnirudh Venkataramanan 	}
66960b28b702SAnirudh Venkataramanan 
669783af0039SHenry Tieman 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
669883af0039SHenry Tieman 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
669983af0039SHenry Tieman 		if (!rd32(hw, PFQF_FD_SIZE)) {
670083af0039SHenry Tieman 			u16 unused, guar, b_effort;
670183af0039SHenry Tieman 
670283af0039SHenry Tieman 			guar = hw->func_caps.fd_fltr_guar;
670383af0039SHenry Tieman 			b_effort = hw->func_caps.fd_fltr_best_effort;
670483af0039SHenry Tieman 
670583af0039SHenry Tieman 			/* force guaranteed filter pool for PF */
670683af0039SHenry Tieman 			ice_alloc_fd_guar_item(hw, &unused, guar);
670783af0039SHenry Tieman 			/* force shared filter pool for PF */
670883af0039SHenry Tieman 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
670983af0039SHenry Tieman 		}
671083af0039SHenry Tieman 	}
671183af0039SHenry Tieman 
6712462acf6aSTony Nguyen 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
6713462acf6aSTony Nguyen 		ice_dcb_rebuild(pf);
6714462acf6aSTony Nguyen 
671506c16d89SJacob Keller 	/* If the PF previously had enabled PTP, PTP init needs to happen before
671606c16d89SJacob Keller 	 * the VSI rebuild. If not, this causes the PTP link status events to
671706c16d89SJacob Keller 	 * fail.
671806c16d89SJacob Keller 	 */
671906c16d89SJacob Keller 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
672048096710SKarol Kolacinski 		ice_ptp_reset(pf);
672106c16d89SJacob Keller 
6722462acf6aSTony Nguyen 	/* rebuild PF VSI */
6723462acf6aSTony Nguyen 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
67240f9d5027SAnirudh Venkataramanan 	if (err) {
6725462acf6aSTony Nguyen 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
67260f9d5027SAnirudh Venkataramanan 		goto err_vsi_rebuild;
67270f9d5027SAnirudh Venkataramanan 	}
67280b28b702SAnirudh Venkataramanan 
672948096710SKarol Kolacinski 	/* configure PTP timestamping after VSI rebuild */
673048096710SKarol Kolacinski 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
673148096710SKarol Kolacinski 		ice_ptp_cfg_timestamp(pf, false);
673248096710SKarol Kolacinski 
6733b3be918dSGrzegorz Nitka 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
6734b3be918dSGrzegorz Nitka 	if (err) {
6735b3be918dSGrzegorz Nitka 		dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
6736b3be918dSGrzegorz Nitka 		goto err_vsi_rebuild;
6737b3be918dSGrzegorz Nitka 	}
6738b3be918dSGrzegorz Nitka 
6739fbc7b27aSKiran Patil 	if (reset_type == ICE_RESET_PFR) {
6740fbc7b27aSKiran Patil 		err = ice_rebuild_channels(pf);
6741fbc7b27aSKiran Patil 		if (err) {
6742fbc7b27aSKiran Patil 			dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
6743fbc7b27aSKiran Patil 				err);
6744fbc7b27aSKiran Patil 			goto err_vsi_rebuild;
6745fbc7b27aSKiran Patil 		}
6746fbc7b27aSKiran Patil 	}
6747fbc7b27aSKiran Patil 
674883af0039SHenry Tieman 	/* If Flow Director is active */
674983af0039SHenry Tieman 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
675083af0039SHenry Tieman 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
675183af0039SHenry Tieman 		if (err) {
675283af0039SHenry Tieman 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
675383af0039SHenry Tieman 			goto err_vsi_rebuild;
675483af0039SHenry Tieman 		}
675583af0039SHenry Tieman 
675683af0039SHenry Tieman 		/* replay HW Flow Director recipes */
675783af0039SHenry Tieman 		if (hw->fdir_prof)
675883af0039SHenry Tieman 			ice_fdir_replay_flows(hw);
675983af0039SHenry Tieman 
676083af0039SHenry Tieman 		/* replay Flow Director filters */
676183af0039SHenry Tieman 		ice_fdir_replay_fltrs(pf);
676228bf2672SBrett Creeley 
676328bf2672SBrett Creeley 		ice_rebuild_arfs(pf);
676483af0039SHenry Tieman 	}
676583af0039SHenry Tieman 
6766462acf6aSTony Nguyen 	ice_update_pf_netdev_link(pf);
6767462acf6aSTony Nguyen 
6768462acf6aSTony Nguyen 	/* tell the firmware we are up */
67692ccc1c1cSTony Nguyen 	err = ice_send_version(pf);
67702ccc1c1cSTony Nguyen 	if (err) {
67715f87ec48STony Nguyen 		dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
67722ccc1c1cSTony Nguyen 			err);
6773462acf6aSTony Nguyen 		goto err_vsi_rebuild;
6774ce317dd9SAnirudh Venkataramanan 	}
6775462acf6aSTony Nguyen 
6776462acf6aSTony Nguyen 	ice_replay_post(hw);
6777ce317dd9SAnirudh Venkataramanan 
67780f9d5027SAnirudh Venkataramanan 	/* if we get here, reset flow is successful */
67797e408e07SAnirudh Venkataramanan 	clear_bit(ICE_RESET_FAILED, pf->state);
6780f9f5301eSDave Ertman 
6781f9f5301eSDave Ertman 	ice_plug_aux_dev(pf);
67820b28b702SAnirudh Venkataramanan 	return;
67830b28b702SAnirudh Venkataramanan 
67840f9d5027SAnirudh Venkataramanan err_vsi_rebuild:
67850f9d5027SAnirudh Venkataramanan err_sched_init_port:
67860f9d5027SAnirudh Venkataramanan 	ice_sched_cleanup_all(hw);
67870f9d5027SAnirudh Venkataramanan err_init_ctrlq:
67880b28b702SAnirudh Venkataramanan 	ice_shutdown_all_ctrlq(hw);
67897e408e07SAnirudh Venkataramanan 	set_bit(ICE_RESET_FAILED, pf->state);
67900b28b702SAnirudh Venkataramanan clear_recovery:
67910f9d5027SAnirudh Venkataramanan 	/* set this bit in PF state to control service task scheduling */
67927e408e07SAnirudh Venkataramanan 	set_bit(ICE_NEEDS_RESTART, pf->state);
67930f9d5027SAnirudh Venkataramanan 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
67940b28b702SAnirudh Venkataramanan }
67950b28b702SAnirudh Venkataramanan 
67960b28b702SAnirudh Venkataramanan /**
679723b44513SMaciej Fijalkowski  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
679823b44513SMaciej Fijalkowski  * @vsi: Pointer to VSI structure
679923b44513SMaciej Fijalkowski  */
680023b44513SMaciej Fijalkowski static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
680123b44513SMaciej Fijalkowski {
680223b44513SMaciej Fijalkowski 	if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
680323b44513SMaciej Fijalkowski 		return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
680423b44513SMaciej Fijalkowski 	else
680523b44513SMaciej Fijalkowski 		return ICE_RXBUF_3072;
680623b44513SMaciej Fijalkowski }
680723b44513SMaciej Fijalkowski 
680823b44513SMaciej Fijalkowski /**
6809e94d4478SAnirudh Venkataramanan  * ice_change_mtu - NDO callback to change the MTU
6810e94d4478SAnirudh Venkataramanan  * @netdev: network interface device structure
6811e94d4478SAnirudh Venkataramanan  * @new_mtu: new value for maximum frame size
6812e94d4478SAnirudh Venkataramanan  *
6813e94d4478SAnirudh Venkataramanan  * Returns 0 on success, negative on failure
6814e94d4478SAnirudh Venkataramanan  */
6815e94d4478SAnirudh Venkataramanan static int ice_change_mtu(struct net_device *netdev, int new_mtu)
6816e94d4478SAnirudh Venkataramanan {
6817e94d4478SAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
6818e94d4478SAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
6819e94d4478SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
6820348048e7SDave Ertman 	struct iidc_event *event;
6821e94d4478SAnirudh Venkataramanan 	u8 count = 0;
6822348048e7SDave Ertman 	int err = 0;
6823e94d4478SAnirudh Venkataramanan 
682422bef5e7SJesse Brandeburg 	if (new_mtu == (int)netdev->mtu) {
68252f2da36eSAnirudh Venkataramanan 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
6826e94d4478SAnirudh Venkataramanan 		return 0;
6827e94d4478SAnirudh Venkataramanan 	}
6828e94d4478SAnirudh Venkataramanan 
6829efc2214bSMaciej Fijalkowski 	if (ice_is_xdp_ena_vsi(vsi)) {
683023b44513SMaciej Fijalkowski 		int frame_size = ice_max_xdp_frame_size(vsi);
6831efc2214bSMaciej Fijalkowski 
6832efc2214bSMaciej Fijalkowski 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
6833efc2214bSMaciej Fijalkowski 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
683423b44513SMaciej Fijalkowski 				   frame_size - ICE_ETH_PKT_HDR_PAD);
6835efc2214bSMaciej Fijalkowski 			return -EINVAL;
6836efc2214bSMaciej Fijalkowski 		}
6837efc2214bSMaciej Fijalkowski 	}
6838efc2214bSMaciej Fijalkowski 
6839e94d4478SAnirudh Venkataramanan 	/* if a reset is in progress, wait for some time for it to complete */
6840e94d4478SAnirudh Venkataramanan 	do {
68415df7e45dSDave Ertman 		if (ice_is_reset_in_progress(pf->state)) {
6842e94d4478SAnirudh Venkataramanan 			count++;
6843e94d4478SAnirudh Venkataramanan 			usleep_range(1000, 2000);
6844e94d4478SAnirudh Venkataramanan 		} else {
6845e94d4478SAnirudh Venkataramanan 			break;
6846e94d4478SAnirudh Venkataramanan 		}
6847e94d4478SAnirudh Venkataramanan 
6848e94d4478SAnirudh Venkataramanan 	} while (count < 100);
6849e94d4478SAnirudh Venkataramanan 
6850e94d4478SAnirudh Venkataramanan 	if (count == 100) {
68512f2da36eSAnirudh Venkataramanan 		netdev_err(netdev, "can't change MTU. Device is busy\n");
6852e94d4478SAnirudh Venkataramanan 		return -EBUSY;
6853e94d4478SAnirudh Venkataramanan 	}
6854e94d4478SAnirudh Venkataramanan 
6855348048e7SDave Ertman 	event = kzalloc(sizeof(*event), GFP_KERNEL);
6856348048e7SDave Ertman 	if (!event)
6857348048e7SDave Ertman 		return -ENOMEM;
6858348048e7SDave Ertman 
6859348048e7SDave Ertman 	set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6860348048e7SDave Ertman 	ice_send_event_to_aux(pf, event);
6861348048e7SDave Ertman 	clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6862348048e7SDave Ertman 
686322bef5e7SJesse Brandeburg 	netdev->mtu = (unsigned int)new_mtu;
6864e94d4478SAnirudh Venkataramanan 
6865e94d4478SAnirudh Venkataramanan 	/* if VSI is up, bring it down and then back up */
6866e97fb1aeSAnirudh Venkataramanan 	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6867e94d4478SAnirudh Venkataramanan 		err = ice_down(vsi);
6868e94d4478SAnirudh Venkataramanan 		if (err) {
6869fe6cd890SMitch Williams 			netdev_err(netdev, "change MTU if_down err %d\n", err);
6870348048e7SDave Ertman 			goto event_after;
6871e94d4478SAnirudh Venkataramanan 		}
6872e94d4478SAnirudh Venkataramanan 
6873e94d4478SAnirudh Venkataramanan 		err = ice_up(vsi);
6874e94d4478SAnirudh Venkataramanan 		if (err) {
68752f2da36eSAnirudh Venkataramanan 			netdev_err(netdev, "change MTU if_up err %d\n", err);
6876348048e7SDave Ertman 			goto event_after;
6877e94d4478SAnirudh Venkataramanan 		}
6878e94d4478SAnirudh Venkataramanan 	}
6879e94d4478SAnirudh Venkataramanan 
6880bda5b7dbSTony Nguyen 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
6881348048e7SDave Ertman event_after:
6882348048e7SDave Ertman 	set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
6883348048e7SDave Ertman 	ice_send_event_to_aux(pf, event);
6884348048e7SDave Ertman 	kfree(event);
6885348048e7SDave Ertman 
6886348048e7SDave Ertman 	return err;
6887e94d4478SAnirudh Venkataramanan }
6888e94d4478SAnirudh Venkataramanan 
6889e94d4478SAnirudh Venkataramanan /**
6890a7605370SArnd Bergmann  * ice_eth_ioctl - Access the hwtstamp interface
689177a78115SJacob Keller  * @netdev: network interface device structure
689277a78115SJacob Keller  * @ifr: interface request data
689377a78115SJacob Keller  * @cmd: ioctl command
689477a78115SJacob Keller  */
6895a7605370SArnd Bergmann static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
689677a78115SJacob Keller {
689777a78115SJacob Keller 	struct ice_netdev_priv *np = netdev_priv(netdev);
689877a78115SJacob Keller 	struct ice_pf *pf = np->vsi->back;
689977a78115SJacob Keller 
690077a78115SJacob Keller 	switch (cmd) {
690177a78115SJacob Keller 	case SIOCGHWTSTAMP:
690277a78115SJacob Keller 		return ice_ptp_get_ts_config(pf, ifr);
690377a78115SJacob Keller 	case SIOCSHWTSTAMP:
690477a78115SJacob Keller 		return ice_ptp_set_ts_config(pf, ifr);
690577a78115SJacob Keller 	default:
690677a78115SJacob Keller 		return -EOPNOTSUPP;
690777a78115SJacob Keller 	}
690877a78115SJacob Keller }
690977a78115SJacob Keller 
691077a78115SJacob Keller /**
69110fee3577SLihong Yang  * ice_aq_str - convert AQ err code to a string
69120fee3577SLihong Yang  * @aq_err: the AQ error code to convert
69130fee3577SLihong Yang  */
69140fee3577SLihong Yang const char *ice_aq_str(enum ice_aq_err aq_err)
69150fee3577SLihong Yang {
69160fee3577SLihong Yang 	switch (aq_err) {
69170fee3577SLihong Yang 	case ICE_AQ_RC_OK:
69180fee3577SLihong Yang 		return "OK";
69190fee3577SLihong Yang 	case ICE_AQ_RC_EPERM:
69200fee3577SLihong Yang 		return "ICE_AQ_RC_EPERM";
69210fee3577SLihong Yang 	case ICE_AQ_RC_ENOENT:
69220fee3577SLihong Yang 		return "ICE_AQ_RC_ENOENT";
69230fee3577SLihong Yang 	case ICE_AQ_RC_ENOMEM:
69240fee3577SLihong Yang 		return "ICE_AQ_RC_ENOMEM";
69250fee3577SLihong Yang 	case ICE_AQ_RC_EBUSY:
69260fee3577SLihong Yang 		return "ICE_AQ_RC_EBUSY";
69270fee3577SLihong Yang 	case ICE_AQ_RC_EEXIST:
69280fee3577SLihong Yang 		return "ICE_AQ_RC_EEXIST";
69290fee3577SLihong Yang 	case ICE_AQ_RC_EINVAL:
69300fee3577SLihong Yang 		return "ICE_AQ_RC_EINVAL";
69310fee3577SLihong Yang 	case ICE_AQ_RC_ENOSPC:
69320fee3577SLihong Yang 		return "ICE_AQ_RC_ENOSPC";
69330fee3577SLihong Yang 	case ICE_AQ_RC_ENOSYS:
69340fee3577SLihong Yang 		return "ICE_AQ_RC_ENOSYS";
6935b5e19a64SChinh T Cao 	case ICE_AQ_RC_EMODE:
6936b5e19a64SChinh T Cao 		return "ICE_AQ_RC_EMODE";
69370fee3577SLihong Yang 	case ICE_AQ_RC_ENOSEC:
69380fee3577SLihong Yang 		return "ICE_AQ_RC_ENOSEC";
69390fee3577SLihong Yang 	case ICE_AQ_RC_EBADSIG:
69400fee3577SLihong Yang 		return "ICE_AQ_RC_EBADSIG";
69410fee3577SLihong Yang 	case ICE_AQ_RC_ESVN:
69420fee3577SLihong Yang 		return "ICE_AQ_RC_ESVN";
69430fee3577SLihong Yang 	case ICE_AQ_RC_EBADMAN:
69440fee3577SLihong Yang 		return "ICE_AQ_RC_EBADMAN";
69450fee3577SLihong Yang 	case ICE_AQ_RC_EBADBUF:
69460fee3577SLihong Yang 		return "ICE_AQ_RC_EBADBUF";
69470fee3577SLihong Yang 	}
69480fee3577SLihong Yang 
69490fee3577SLihong Yang 	return "ICE_AQ_RC_UNKNOWN";
69500fee3577SLihong Yang }
69510fee3577SLihong Yang 
69520fee3577SLihong Yang /**
6953b66a972aSBrett Creeley  * ice_set_rss_lut - Set RSS LUT
6954d76a60baSAnirudh Venkataramanan  * @vsi: Pointer to VSI structure
6955d76a60baSAnirudh Venkataramanan  * @lut: Lookup table
6956d76a60baSAnirudh Venkataramanan  * @lut_size: Lookup table size
6957d76a60baSAnirudh Venkataramanan  *
6958d76a60baSAnirudh Venkataramanan  * Returns 0 on success, negative on failure
6959d76a60baSAnirudh Venkataramanan  */
6960b66a972aSBrett Creeley int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6961d76a60baSAnirudh Venkataramanan {
6962b66a972aSBrett Creeley 	struct ice_aq_get_set_rss_lut_params params = {};
6963b66a972aSBrett Creeley 	struct ice_hw *hw = &vsi->back->hw;
69645e24d598STony Nguyen 	int status;
6965d76a60baSAnirudh Venkataramanan 
6966b66a972aSBrett Creeley 	if (!lut)
6967b66a972aSBrett Creeley 		return -EINVAL;
6968d76a60baSAnirudh Venkataramanan 
6969b66a972aSBrett Creeley 	params.vsi_handle = vsi->idx;
6970b66a972aSBrett Creeley 	params.lut_size = lut_size;
6971b66a972aSBrett Creeley 	params.lut_type = vsi->rss_lut_type;
6972b66a972aSBrett Creeley 	params.lut = lut;
6973d76a60baSAnirudh Venkataramanan 
6974b66a972aSBrett Creeley 	status = ice_aq_set_rss_lut(hw, &params);
6975c1484691STony Nguyen 	if (status)
69765f87ec48STony Nguyen 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
69775518ac2aSTony Nguyen 			status, ice_aq_str(hw->adminq.sq_last_status));
6978d76a60baSAnirudh Venkataramanan 
6979c1484691STony Nguyen 	return status;
6980d76a60baSAnirudh Venkataramanan }
6981d76a60baSAnirudh Venkataramanan 
6982d76a60baSAnirudh Venkataramanan /**
6983b66a972aSBrett Creeley  * ice_set_rss_key - Set RSS key
6984b66a972aSBrett Creeley  * @vsi: Pointer to the VSI structure
6985b66a972aSBrett Creeley  * @seed: RSS hash seed
6986b66a972aSBrett Creeley  *
6987b66a972aSBrett Creeley  * Returns 0 on success, negative on failure
6988b66a972aSBrett Creeley  */
6989b66a972aSBrett Creeley int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
6990b66a972aSBrett Creeley {
6991b66a972aSBrett Creeley 	struct ice_hw *hw = &vsi->back->hw;
69925e24d598STony Nguyen 	int status;
6993b66a972aSBrett Creeley 
6994b66a972aSBrett Creeley 	if (!seed)
6995b66a972aSBrett Creeley 		return -EINVAL;
6996b66a972aSBrett Creeley 
6997b66a972aSBrett Creeley 	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6998c1484691STony Nguyen 	if (status)
69995f87ec48STony Nguyen 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
70005518ac2aSTony Nguyen 			status, ice_aq_str(hw->adminq.sq_last_status));
7001b66a972aSBrett Creeley 
7002c1484691STony Nguyen 	return status;
7003b66a972aSBrett Creeley }
7004b66a972aSBrett Creeley 
7005b66a972aSBrett Creeley /**
7006b66a972aSBrett Creeley  * ice_get_rss_lut - Get RSS LUT
7007d76a60baSAnirudh Venkataramanan  * @vsi: Pointer to VSI structure
7008d76a60baSAnirudh Venkataramanan  * @lut: Buffer to store the lookup table entries
7009d76a60baSAnirudh Venkataramanan  * @lut_size: Size of buffer to store the lookup table entries
7010d76a60baSAnirudh Venkataramanan  *
7011d76a60baSAnirudh Venkataramanan  * Returns 0 on success, negative on failure
7012d76a60baSAnirudh Venkataramanan  */
7013b66a972aSBrett Creeley int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7014d76a60baSAnirudh Venkataramanan {
7015b66a972aSBrett Creeley 	struct ice_aq_get_set_rss_lut_params params = {};
7016b66a972aSBrett Creeley 	struct ice_hw *hw = &vsi->back->hw;
70175e24d598STony Nguyen 	int status;
7018d76a60baSAnirudh Venkataramanan 
7019b66a972aSBrett Creeley 	if (!lut)
7020b66a972aSBrett Creeley 		return -EINVAL;
7021d76a60baSAnirudh Venkataramanan 
7022b66a972aSBrett Creeley 	params.vsi_handle = vsi->idx;
7023b66a972aSBrett Creeley 	params.lut_size = lut_size;
7024b66a972aSBrett Creeley 	params.lut_type = vsi->rss_lut_type;
7025b66a972aSBrett Creeley 	params.lut = lut;
7026b66a972aSBrett Creeley 
7027b66a972aSBrett Creeley 	status = ice_aq_get_rss_lut(hw, &params);
7028c1484691STony Nguyen 	if (status)
70295f87ec48STony Nguyen 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
70305518ac2aSTony Nguyen 			status, ice_aq_str(hw->adminq.sq_last_status));
7031b66a972aSBrett Creeley 
7032c1484691STony Nguyen 	return status;
7033d76a60baSAnirudh Venkataramanan }
7034d76a60baSAnirudh Venkataramanan 
7035b66a972aSBrett Creeley /**
7036b66a972aSBrett Creeley  * ice_get_rss_key - Get RSS key
7037b66a972aSBrett Creeley  * @vsi: Pointer to VSI structure
7038b66a972aSBrett Creeley  * @seed: Buffer to store the key in
7039b66a972aSBrett Creeley  *
7040b66a972aSBrett Creeley  * Returns 0 on success, negative on failure
7041b66a972aSBrett Creeley  */
7042b66a972aSBrett Creeley int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7043b66a972aSBrett Creeley {
7044b66a972aSBrett Creeley 	struct ice_hw *hw = &vsi->back->hw;
70455e24d598STony Nguyen 	int status;
7046e3c53928SBrett Creeley 
7047b66a972aSBrett Creeley 	if (!seed)
7048b66a972aSBrett Creeley 		return -EINVAL;
7049b66a972aSBrett Creeley 
7050b66a972aSBrett Creeley 	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7051c1484691STony Nguyen 	if (status)
70525f87ec48STony Nguyen 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
70535518ac2aSTony Nguyen 			status, ice_aq_str(hw->adminq.sq_last_status));
7054d76a60baSAnirudh Venkataramanan 
7055c1484691STony Nguyen 	return status;
7056d76a60baSAnirudh Venkataramanan }
7057d76a60baSAnirudh Venkataramanan 
7058d76a60baSAnirudh Venkataramanan /**
7059b1edc14aSMd Fahad Iqbal Polash  * ice_bridge_getlink - Get the hardware bridge mode
7060b1edc14aSMd Fahad Iqbal Polash  * @skb: skb buff
7061f9867df6SAnirudh Venkataramanan  * @pid: process ID
7062b1edc14aSMd Fahad Iqbal Polash  * @seq: RTNL message seq
7063b1edc14aSMd Fahad Iqbal Polash  * @dev: the netdev being configured
7064b1edc14aSMd Fahad Iqbal Polash  * @filter_mask: filter mask passed in
7065b1edc14aSMd Fahad Iqbal Polash  * @nlflags: netlink flags passed in
7066b1edc14aSMd Fahad Iqbal Polash  *
7067b1edc14aSMd Fahad Iqbal Polash  * Return the bridge mode (VEB/VEPA)
7068b1edc14aSMd Fahad Iqbal Polash  */
7069b1edc14aSMd Fahad Iqbal Polash static int
7070b1edc14aSMd Fahad Iqbal Polash ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7071b1edc14aSMd Fahad Iqbal Polash 		   struct net_device *dev, u32 filter_mask, int nlflags)
7072b1edc14aSMd Fahad Iqbal Polash {
7073b1edc14aSMd Fahad Iqbal Polash 	struct ice_netdev_priv *np = netdev_priv(dev);
7074b1edc14aSMd Fahad Iqbal Polash 	struct ice_vsi *vsi = np->vsi;
7075b1edc14aSMd Fahad Iqbal Polash 	struct ice_pf *pf = vsi->back;
7076b1edc14aSMd Fahad Iqbal Polash 	u16 bmode;
7077b1edc14aSMd Fahad Iqbal Polash 
7078b1edc14aSMd Fahad Iqbal Polash 	bmode = pf->first_sw->bridge_mode;
7079b1edc14aSMd Fahad Iqbal Polash 
7080b1edc14aSMd Fahad Iqbal Polash 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
7081b1edc14aSMd Fahad Iqbal Polash 				       filter_mask, NULL);
7082b1edc14aSMd Fahad Iqbal Polash }
7083b1edc14aSMd Fahad Iqbal Polash 
7084b1edc14aSMd Fahad Iqbal Polash /**
7085b1edc14aSMd Fahad Iqbal Polash  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
7086b1edc14aSMd Fahad Iqbal Polash  * @vsi: Pointer to VSI structure
7087b1edc14aSMd Fahad Iqbal Polash  * @bmode: Hardware bridge mode (VEB/VEPA)
7088b1edc14aSMd Fahad Iqbal Polash  *
7089b1edc14aSMd Fahad Iqbal Polash  * Returns 0 on success, negative on failure
7090b1edc14aSMd Fahad Iqbal Polash  */
7091b1edc14aSMd Fahad Iqbal Polash static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7092b1edc14aSMd Fahad Iqbal Polash {
7093b1edc14aSMd Fahad Iqbal Polash 	struct ice_aqc_vsi_props *vsi_props;
7094b1edc14aSMd Fahad Iqbal Polash 	struct ice_hw *hw = &vsi->back->hw;
7095198a666aSBruce Allan 	struct ice_vsi_ctx *ctxt;
70962ccc1c1cSTony Nguyen 	int ret;
7097b1edc14aSMd Fahad Iqbal Polash 
7098b1edc14aSMd Fahad Iqbal Polash 	vsi_props = &vsi->info;
7099198a666aSBruce Allan 
71009efe35d0STony Nguyen 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
7101198a666aSBruce Allan 	if (!ctxt)
7102198a666aSBruce Allan 		return -ENOMEM;
7103198a666aSBruce Allan 
7104198a666aSBruce Allan 	ctxt->info = vsi->info;
7105b1edc14aSMd Fahad Iqbal Polash 
7106b1edc14aSMd Fahad Iqbal Polash 	if (bmode == BRIDGE_MODE_VEB)
7107b1edc14aSMd Fahad Iqbal Polash 		/* change from VEPA to VEB mode */
7108198a666aSBruce Allan 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7109b1edc14aSMd Fahad Iqbal Polash 	else
7110b1edc14aSMd Fahad Iqbal Polash 		/* change from VEB to VEPA mode */
7111198a666aSBruce Allan 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7112198a666aSBruce Allan 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
71135726ca0eSAnirudh Venkataramanan 
71142ccc1c1cSTony Nguyen 	ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
71152ccc1c1cSTony Nguyen 	if (ret) {
71165f87ec48STony Nguyen 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
71172ccc1c1cSTony Nguyen 			bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
7118198a666aSBruce Allan 		goto out;
7119b1edc14aSMd Fahad Iqbal Polash 	}
7120b1edc14aSMd Fahad Iqbal Polash 	/* Update sw flags for book keeping */
7121198a666aSBruce Allan 	vsi_props->sw_flags = ctxt->info.sw_flags;
7122b1edc14aSMd Fahad Iqbal Polash 
7123198a666aSBruce Allan out:
71249efe35d0STony Nguyen 	kfree(ctxt);
7125198a666aSBruce Allan 	return ret;
7126b1edc14aSMd Fahad Iqbal Polash }
7127b1edc14aSMd Fahad Iqbal Polash 
7128b1edc14aSMd Fahad Iqbal Polash /**
7129b1edc14aSMd Fahad Iqbal Polash  * ice_bridge_setlink - Set the hardware bridge mode
7130b1edc14aSMd Fahad Iqbal Polash  * @dev: the netdev being configured
7131b1edc14aSMd Fahad Iqbal Polash  * @nlh: RTNL message
7132b1edc14aSMd Fahad Iqbal Polash  * @flags: bridge setlink flags
71332fd527b7SPetr Machata  * @extack: netlink extended ack
7134b1edc14aSMd Fahad Iqbal Polash  *
7135b1edc14aSMd Fahad Iqbal Polash  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
7136b1edc14aSMd Fahad Iqbal Polash  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
7137b1edc14aSMd Fahad Iqbal Polash  * not already set for all VSIs connected to this switch. And also update the
7138b1edc14aSMd Fahad Iqbal Polash  * unicast switch filter rules for the corresponding switch of the netdev.
7139b1edc14aSMd Fahad Iqbal Polash  */
7140b1edc14aSMd Fahad Iqbal Polash static int
7141b1edc14aSMd Fahad Iqbal Polash ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
71423d505147SBruce Allan 		   u16 __always_unused flags,
71433d505147SBruce Allan 		   struct netlink_ext_ack __always_unused *extack)
7144b1edc14aSMd Fahad Iqbal Polash {
7145b1edc14aSMd Fahad Iqbal Polash 	struct ice_netdev_priv *np = netdev_priv(dev);
7146b1edc14aSMd Fahad Iqbal Polash 	struct ice_pf *pf = np->vsi->back;
7147b1edc14aSMd Fahad Iqbal Polash 	struct nlattr *attr, *br_spec;
7148b1edc14aSMd Fahad Iqbal Polash 	struct ice_hw *hw = &pf->hw;
7149b1edc14aSMd Fahad Iqbal Polash 	struct ice_sw *pf_sw;
7150b1edc14aSMd Fahad Iqbal Polash 	int rem, v, err = 0;
7151b1edc14aSMd Fahad Iqbal Polash 
7152b1edc14aSMd Fahad Iqbal Polash 	pf_sw = pf->first_sw;
7153b1edc14aSMd Fahad Iqbal Polash 	/* find the attribute in the netlink message */
7154b1edc14aSMd Fahad Iqbal Polash 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7155b1edc14aSMd Fahad Iqbal Polash 
7156b1edc14aSMd Fahad Iqbal Polash 	nla_for_each_nested(attr, br_spec, rem) {
7157b1edc14aSMd Fahad Iqbal Polash 		__u16 mode;
7158b1edc14aSMd Fahad Iqbal Polash 
7159b1edc14aSMd Fahad Iqbal Polash 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
7160b1edc14aSMd Fahad Iqbal Polash 			continue;
7161b1edc14aSMd Fahad Iqbal Polash 		mode = nla_get_u16(attr);
7162b1edc14aSMd Fahad Iqbal Polash 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
7163b1edc14aSMd Fahad Iqbal Polash 			return -EINVAL;
7164b1edc14aSMd Fahad Iqbal Polash 		/* Continue  if bridge mode is not being flipped */
7165b1edc14aSMd Fahad Iqbal Polash 		if (mode == pf_sw->bridge_mode)
7166b1edc14aSMd Fahad Iqbal Polash 			continue;
7167b1edc14aSMd Fahad Iqbal Polash 		/* Iterates through the PF VSI list and update the loopback
7168b1edc14aSMd Fahad Iqbal Polash 		 * mode of the VSI
7169b1edc14aSMd Fahad Iqbal Polash 		 */
7170b1edc14aSMd Fahad Iqbal Polash 		ice_for_each_vsi(pf, v) {
7171b1edc14aSMd Fahad Iqbal Polash 			if (!pf->vsi[v])
7172b1edc14aSMd Fahad Iqbal Polash 				continue;
7173b1edc14aSMd Fahad Iqbal Polash 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7174b1edc14aSMd Fahad Iqbal Polash 			if (err)
7175b1edc14aSMd Fahad Iqbal Polash 				return err;
7176b1edc14aSMd Fahad Iqbal Polash 		}
7177b1edc14aSMd Fahad Iqbal Polash 
7178b1edc14aSMd Fahad Iqbal Polash 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7179b1edc14aSMd Fahad Iqbal Polash 		/* Update the unicast switch filter rules for the corresponding
7180b1edc14aSMd Fahad Iqbal Polash 		 * switch of the netdev
7181b1edc14aSMd Fahad Iqbal Polash 		 */
71822ccc1c1cSTony Nguyen 		err = ice_update_sw_rule_bridge_mode(hw);
71832ccc1c1cSTony Nguyen 		if (err) {
71845f87ec48STony Nguyen 			netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
71852ccc1c1cSTony Nguyen 				   mode, err,
71860fee3577SLihong Yang 				   ice_aq_str(hw->adminq.sq_last_status));
7187b1edc14aSMd Fahad Iqbal Polash 			/* revert hw->evb_veb */
7188b1edc14aSMd Fahad Iqbal Polash 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
7189c1484691STony Nguyen 			return err;
7190b1edc14aSMd Fahad Iqbal Polash 		}
7191b1edc14aSMd Fahad Iqbal Polash 
7192b1edc14aSMd Fahad Iqbal Polash 		pf_sw->bridge_mode = mode;
7193b1edc14aSMd Fahad Iqbal Polash 	}
7194b1edc14aSMd Fahad Iqbal Polash 
7195b1edc14aSMd Fahad Iqbal Polash 	return 0;
7196b1edc14aSMd Fahad Iqbal Polash }
7197b1edc14aSMd Fahad Iqbal Polash 
7198b1edc14aSMd Fahad Iqbal Polash /**
7199b3969fd7SSudheer Mogilappagari  * ice_tx_timeout - Respond to a Tx Hang
7200b3969fd7SSudheer Mogilappagari  * @netdev: network interface device structure
7201644f40eaSBruce Allan  * @txqueue: Tx queue
7202b3969fd7SSudheer Mogilappagari  */
72030290bd29SMichael S. Tsirkin static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
7204b3969fd7SSudheer Mogilappagari {
7205b3969fd7SSudheer Mogilappagari 	struct ice_netdev_priv *np = netdev_priv(netdev);
7206e72bba21SMaciej Fijalkowski 	struct ice_tx_ring *tx_ring = NULL;
7207b3969fd7SSudheer Mogilappagari 	struct ice_vsi *vsi = np->vsi;
7208b3969fd7SSudheer Mogilappagari 	struct ice_pf *pf = vsi->back;
7209807bc98dSBrett Creeley 	u32 i;
7210b3969fd7SSudheer Mogilappagari 
7211b3969fd7SSudheer Mogilappagari 	pf->tx_timeout_count++;
7212b3969fd7SSudheer Mogilappagari 
7213610ed0e9SAvinash JD 	/* Check if PFC is enabled for the TC to which the queue belongs
7214610ed0e9SAvinash JD 	 * to. If yes then Tx timeout is not caused by a hung queue, no
7215610ed0e9SAvinash JD 	 * need to reset and rebuild
7216610ed0e9SAvinash JD 	 */
7217610ed0e9SAvinash JD 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7218610ed0e9SAvinash JD 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7219610ed0e9SAvinash JD 			 txqueue);
7220610ed0e9SAvinash JD 		return;
7221610ed0e9SAvinash JD 	}
7222610ed0e9SAvinash JD 
7223b3969fd7SSudheer Mogilappagari 	/* now that we have an index, find the tx_ring struct */
72242faf63b6SMaciej Fijalkowski 	ice_for_each_txq(vsi, i)
7225bc0c6fabSBruce Allan 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7226ed5a3f66SJulio Faracco 			if (txqueue == vsi->tx_rings[i]->q_index) {
7227b3969fd7SSudheer Mogilappagari 				tx_ring = vsi->tx_rings[i];
7228b3969fd7SSudheer Mogilappagari 				break;
7229b3969fd7SSudheer Mogilappagari 			}
7230b3969fd7SSudheer Mogilappagari 
7231b3969fd7SSudheer Mogilappagari 	/* Reset recovery level if enough time has elapsed after last timeout.
7232b3969fd7SSudheer Mogilappagari 	 * Also ensure no new reset action happens before next timeout period.
7233b3969fd7SSudheer Mogilappagari 	 */
7234b3969fd7SSudheer Mogilappagari 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7235b3969fd7SSudheer Mogilappagari 		pf->tx_timeout_recovery_level = 1;
7236b3969fd7SSudheer Mogilappagari 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7237b3969fd7SSudheer Mogilappagari 				       netdev->watchdog_timeo)))
7238b3969fd7SSudheer Mogilappagari 		return;
7239b3969fd7SSudheer Mogilappagari 
7240b3969fd7SSudheer Mogilappagari 	if (tx_ring) {
7241807bc98dSBrett Creeley 		struct ice_hw *hw = &pf->hw;
7242807bc98dSBrett Creeley 		u32 head, val = 0;
7243807bc98dSBrett Creeley 
7244ed5a3f66SJulio Faracco 		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7245807bc98dSBrett Creeley 			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7246b3969fd7SSudheer Mogilappagari 		/* Read interrupt register */
7247ba880734SBrett Creeley 		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7248b3969fd7SSudheer Mogilappagari 
724993ff4858STony Nguyen 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7250ed5a3f66SJulio Faracco 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7251807bc98dSBrett Creeley 			    head, tx_ring->next_to_use, val);
7252b3969fd7SSudheer Mogilappagari 	}
7253b3969fd7SSudheer Mogilappagari 
7254b3969fd7SSudheer Mogilappagari 	pf->tx_timeout_last_recovery = jiffies;
725593ff4858STony Nguyen 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7256ed5a3f66SJulio Faracco 		    pf->tx_timeout_recovery_level, txqueue);
7257b3969fd7SSudheer Mogilappagari 
7258b3969fd7SSudheer Mogilappagari 	switch (pf->tx_timeout_recovery_level) {
7259b3969fd7SSudheer Mogilappagari 	case 1:
72607e408e07SAnirudh Venkataramanan 		set_bit(ICE_PFR_REQ, pf->state);
7261b3969fd7SSudheer Mogilappagari 		break;
7262b3969fd7SSudheer Mogilappagari 	case 2:
72637e408e07SAnirudh Venkataramanan 		set_bit(ICE_CORER_REQ, pf->state);
7264b3969fd7SSudheer Mogilappagari 		break;
7265b3969fd7SSudheer Mogilappagari 	case 3:
72667e408e07SAnirudh Venkataramanan 		set_bit(ICE_GLOBR_REQ, pf->state);
7267b3969fd7SSudheer Mogilappagari 		break;
7268b3969fd7SSudheer Mogilappagari 	default:
7269b3969fd7SSudheer Mogilappagari 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
72707e408e07SAnirudh Venkataramanan 		set_bit(ICE_DOWN, pf->state);
7271e97fb1aeSAnirudh Venkataramanan 		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
72727e408e07SAnirudh Venkataramanan 		set_bit(ICE_SERVICE_DIS, pf->state);
7273b3969fd7SSudheer Mogilappagari 		break;
7274b3969fd7SSudheer Mogilappagari 	}
7275b3969fd7SSudheer Mogilappagari 
7276b3969fd7SSudheer Mogilappagari 	ice_service_task_schedule(pf);
7277b3969fd7SSudheer Mogilappagari 	pf->tx_timeout_recovery_level++;
7278b3969fd7SSudheer Mogilappagari }
7279b3969fd7SSudheer Mogilappagari 
7280b3969fd7SSudheer Mogilappagari /**
72810d08a441SKiran Patil  * ice_setup_tc_cls_flower - flower classifier offloads
72820d08a441SKiran Patil  * @np: net device to configure
72830d08a441SKiran Patil  * @filter_dev: device on which filter is added
72840d08a441SKiran Patil  * @cls_flower: offload data
72850d08a441SKiran Patil  */
72860d08a441SKiran Patil static int
72870d08a441SKiran Patil ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
72880d08a441SKiran Patil 			struct net_device *filter_dev,
72890d08a441SKiran Patil 			struct flow_cls_offload *cls_flower)
72900d08a441SKiran Patil {
72910d08a441SKiran Patil 	struct ice_vsi *vsi = np->vsi;
72920d08a441SKiran Patil 
72930d08a441SKiran Patil 	if (cls_flower->common.chain_index)
72940d08a441SKiran Patil 		return -EOPNOTSUPP;
72950d08a441SKiran Patil 
72960d08a441SKiran Patil 	switch (cls_flower->command) {
72970d08a441SKiran Patil 	case FLOW_CLS_REPLACE:
72980d08a441SKiran Patil 		return ice_add_cls_flower(filter_dev, vsi, cls_flower);
72990d08a441SKiran Patil 	case FLOW_CLS_DESTROY:
73000d08a441SKiran Patil 		return ice_del_cls_flower(vsi, cls_flower);
73010d08a441SKiran Patil 	default:
73020d08a441SKiran Patil 		return -EINVAL;
73030d08a441SKiran Patil 	}
73040d08a441SKiran Patil }
73050d08a441SKiran Patil 
73060d08a441SKiran Patil /**
73070d08a441SKiran Patil  * ice_setup_tc_block_cb - callback handler registered for TC block
73080d08a441SKiran Patil  * @type: TC SETUP type
73090d08a441SKiran Patil  * @type_data: TC flower offload data that contains user input
73100d08a441SKiran Patil  * @cb_priv: netdev private data
73110d08a441SKiran Patil  */
73120d08a441SKiran Patil static int
73130d08a441SKiran Patil ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
73140d08a441SKiran Patil {
73150d08a441SKiran Patil 	struct ice_netdev_priv *np = cb_priv;
73160d08a441SKiran Patil 
73170d08a441SKiran Patil 	switch (type) {
73180d08a441SKiran Patil 	case TC_SETUP_CLSFLOWER:
73190d08a441SKiran Patil 		return ice_setup_tc_cls_flower(np, np->vsi->netdev,
73200d08a441SKiran Patil 					       type_data);
73210d08a441SKiran Patil 	default:
73220d08a441SKiran Patil 		return -EOPNOTSUPP;
73230d08a441SKiran Patil 	}
73240d08a441SKiran Patil }
73250d08a441SKiran Patil 
7326fbc7b27aSKiran Patil /**
7327fbc7b27aSKiran Patil  * ice_validate_mqprio_qopt - Validate TCF input parameters
7328fbc7b27aSKiran Patil  * @vsi: Pointer to VSI
7329fbc7b27aSKiran Patil  * @mqprio_qopt: input parameters for mqprio queue configuration
7330fbc7b27aSKiran Patil  *
7331fbc7b27aSKiran Patil  * This function validates MQPRIO params, such as qcount (power of 2 wherever
7332fbc7b27aSKiran Patil  * needed), and make sure user doesn't specify qcount and BW rate limit
7333fbc7b27aSKiran Patil  * for TCs, which are more than "num_tc"
7334fbc7b27aSKiran Patil  */
7335fbc7b27aSKiran Patil static int
7336fbc7b27aSKiran Patil ice_validate_mqprio_qopt(struct ice_vsi *vsi,
7337fbc7b27aSKiran Patil 			 struct tc_mqprio_qopt_offload *mqprio_qopt)
7338fbc7b27aSKiran Patil {
7339fbc7b27aSKiran Patil 	u64 sum_max_rate = 0, sum_min_rate = 0;
7340fbc7b27aSKiran Patil 	int non_power_of_2_qcount = 0;
7341fbc7b27aSKiran Patil 	struct ice_pf *pf = vsi->back;
7342fbc7b27aSKiran Patil 	int max_rss_q_cnt = 0;
7343fbc7b27aSKiran Patil 	struct device *dev;
7344fbc7b27aSKiran Patil 	int i, speed;
7345fbc7b27aSKiran Patil 	u8 num_tc;
7346fbc7b27aSKiran Patil 
7347fbc7b27aSKiran Patil 	if (vsi->type != ICE_VSI_PF)
7348fbc7b27aSKiran Patil 		return -EINVAL;
7349fbc7b27aSKiran Patil 
7350fbc7b27aSKiran Patil 	if (mqprio_qopt->qopt.offset[0] != 0 ||
7351fbc7b27aSKiran Patil 	    mqprio_qopt->qopt.num_tc < 1 ||
7352fbc7b27aSKiran Patil 	    mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
7353fbc7b27aSKiran Patil 		return -EINVAL;
7354fbc7b27aSKiran Patil 
7355fbc7b27aSKiran Patil 	dev = ice_pf_to_dev(pf);
7356fbc7b27aSKiran Patil 	vsi->ch_rss_size = 0;
7357fbc7b27aSKiran Patil 	num_tc = mqprio_qopt->qopt.num_tc;
7358fbc7b27aSKiran Patil 
7359fbc7b27aSKiran Patil 	for (i = 0; num_tc; i++) {
7360fbc7b27aSKiran Patil 		int qcount = mqprio_qopt->qopt.count[i];
7361fbc7b27aSKiran Patil 		u64 max_rate, min_rate, rem;
7362fbc7b27aSKiran Patil 
7363fbc7b27aSKiran Patil 		if (!qcount)
7364fbc7b27aSKiran Patil 			return -EINVAL;
7365fbc7b27aSKiran Patil 
7366fbc7b27aSKiran Patil 		if (is_power_of_2(qcount)) {
7367fbc7b27aSKiran Patil 			if (non_power_of_2_qcount &&
7368fbc7b27aSKiran Patil 			    qcount > non_power_of_2_qcount) {
7369fbc7b27aSKiran Patil 				dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
7370fbc7b27aSKiran Patil 					qcount, non_power_of_2_qcount);
7371fbc7b27aSKiran Patil 				return -EINVAL;
7372fbc7b27aSKiran Patil 			}
7373fbc7b27aSKiran Patil 			if (qcount > max_rss_q_cnt)
7374fbc7b27aSKiran Patil 				max_rss_q_cnt = qcount;
7375fbc7b27aSKiran Patil 		} else {
7376fbc7b27aSKiran Patil 			if (non_power_of_2_qcount &&
7377fbc7b27aSKiran Patil 			    qcount != non_power_of_2_qcount) {
7378fbc7b27aSKiran Patil 				dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
7379fbc7b27aSKiran Patil 					qcount, non_power_of_2_qcount);
7380fbc7b27aSKiran Patil 				return -EINVAL;
7381fbc7b27aSKiran Patil 			}
7382fbc7b27aSKiran Patil 			if (qcount < max_rss_q_cnt) {
7383fbc7b27aSKiran Patil 				dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
7384fbc7b27aSKiran Patil 					qcount, max_rss_q_cnt);
7385fbc7b27aSKiran Patil 				return -EINVAL;
7386fbc7b27aSKiran Patil 			}
7387fbc7b27aSKiran Patil 			max_rss_q_cnt = qcount;
7388fbc7b27aSKiran Patil 			non_power_of_2_qcount = qcount;
7389fbc7b27aSKiran Patil 		}
7390fbc7b27aSKiran Patil 
7391fbc7b27aSKiran Patil 		/* TC command takes input in K/N/Gbps or K/M/Gbit etc but
7392fbc7b27aSKiran Patil 		 * converts the bandwidth rate limit into Bytes/s when
7393fbc7b27aSKiran Patil 		 * passing it down to the driver. So convert input bandwidth
7394fbc7b27aSKiran Patil 		 * from Bytes/s to Kbps
7395fbc7b27aSKiran Patil 		 */
7396fbc7b27aSKiran Patil 		max_rate = mqprio_qopt->max_rate[i];
7397fbc7b27aSKiran Patil 		max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
7398fbc7b27aSKiran Patil 		sum_max_rate += max_rate;
7399fbc7b27aSKiran Patil 
7400fbc7b27aSKiran Patil 		/* min_rate is minimum guaranteed rate and it can't be zero */
7401fbc7b27aSKiran Patil 		min_rate = mqprio_qopt->min_rate[i];
7402fbc7b27aSKiran Patil 		min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
7403fbc7b27aSKiran Patil 		sum_min_rate += min_rate;
7404fbc7b27aSKiran Patil 
7405fbc7b27aSKiran Patil 		if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
7406fbc7b27aSKiran Patil 			dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
7407fbc7b27aSKiran Patil 				min_rate, ICE_MIN_BW_LIMIT);
7408fbc7b27aSKiran Patil 			return -EINVAL;
7409fbc7b27aSKiran Patil 		}
7410fbc7b27aSKiran Patil 
7411fbc7b27aSKiran Patil 		iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
7412fbc7b27aSKiran Patil 		if (rem) {
7413fbc7b27aSKiran Patil 			dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
7414fbc7b27aSKiran Patil 				i, ICE_MIN_BW_LIMIT);
7415fbc7b27aSKiran Patil 			return -EINVAL;
7416fbc7b27aSKiran Patil 		}
7417fbc7b27aSKiran Patil 
7418fbc7b27aSKiran Patil 		iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
7419fbc7b27aSKiran Patil 		if (rem) {
7420fbc7b27aSKiran Patil 			dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
7421fbc7b27aSKiran Patil 				i, ICE_MIN_BW_LIMIT);
7422fbc7b27aSKiran Patil 			return -EINVAL;
7423fbc7b27aSKiran Patil 		}
7424fbc7b27aSKiran Patil 
7425fbc7b27aSKiran Patil 		/* min_rate can't be more than max_rate, except when max_rate
7426fbc7b27aSKiran Patil 		 * is zero (implies max_rate sought is max line rate). In such
7427fbc7b27aSKiran Patil 		 * a case min_rate can be more than max.
7428fbc7b27aSKiran Patil 		 */
7429fbc7b27aSKiran Patil 		if (max_rate && min_rate > max_rate) {
7430fbc7b27aSKiran Patil 			dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
7431fbc7b27aSKiran Patil 				min_rate, max_rate);
7432fbc7b27aSKiran Patil 			return -EINVAL;
7433fbc7b27aSKiran Patil 		}
7434fbc7b27aSKiran Patil 
7435fbc7b27aSKiran Patil 		if (i >= mqprio_qopt->qopt.num_tc - 1)
7436fbc7b27aSKiran Patil 			break;
7437fbc7b27aSKiran Patil 		if (mqprio_qopt->qopt.offset[i + 1] !=
7438fbc7b27aSKiran Patil 		    (mqprio_qopt->qopt.offset[i] + qcount))
7439fbc7b27aSKiran Patil 			return -EINVAL;
7440fbc7b27aSKiran Patil 	}
7441fbc7b27aSKiran Patil 	if (vsi->num_rxq <
7442fbc7b27aSKiran Patil 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7443fbc7b27aSKiran Patil 		return -EINVAL;
7444fbc7b27aSKiran Patil 	if (vsi->num_txq <
7445fbc7b27aSKiran Patil 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7446fbc7b27aSKiran Patil 		return -EINVAL;
7447fbc7b27aSKiran Patil 
7448fbc7b27aSKiran Patil 	speed = ice_get_link_speed_kbps(vsi);
7449fbc7b27aSKiran Patil 	if (sum_max_rate && sum_max_rate > (u64)speed) {
7450fbc7b27aSKiran Patil 		dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n",
7451fbc7b27aSKiran Patil 			sum_max_rate, speed);
7452fbc7b27aSKiran Patil 		return -EINVAL;
7453fbc7b27aSKiran Patil 	}
7454fbc7b27aSKiran Patil 	if (sum_min_rate && sum_min_rate > (u64)speed) {
7455fbc7b27aSKiran Patil 		dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
7456fbc7b27aSKiran Patil 			sum_min_rate, speed);
7457fbc7b27aSKiran Patil 		return -EINVAL;
7458fbc7b27aSKiran Patil 	}
7459fbc7b27aSKiran Patil 
7460fbc7b27aSKiran Patil 	/* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
7461fbc7b27aSKiran Patil 	vsi->ch_rss_size = max_rss_q_cnt;
7462fbc7b27aSKiran Patil 
7463fbc7b27aSKiran Patil 	return 0;
7464fbc7b27aSKiran Patil }
7465fbc7b27aSKiran Patil 
7466fbc7b27aSKiran Patil /**
746740319796SKiran Patil  * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
746840319796SKiran Patil  * @pf: ptr to PF device
746940319796SKiran Patil  * @vsi: ptr to VSI
747040319796SKiran Patil  */
747140319796SKiran Patil static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
747240319796SKiran Patil {
747340319796SKiran Patil 	struct device *dev = ice_pf_to_dev(pf);
747440319796SKiran Patil 	bool added = false;
747540319796SKiran Patil 	struct ice_hw *hw;
747640319796SKiran Patil 	int flow;
747740319796SKiran Patil 
747840319796SKiran Patil 	if (!(vsi->num_gfltr || vsi->num_bfltr))
747940319796SKiran Patil 		return -EINVAL;
748040319796SKiran Patil 
748140319796SKiran Patil 	hw = &pf->hw;
748240319796SKiran Patil 	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
748340319796SKiran Patil 		struct ice_fd_hw_prof *prof;
748440319796SKiran Patil 		int tun, status;
748540319796SKiran Patil 		u64 entry_h;
748640319796SKiran Patil 
748740319796SKiran Patil 		if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
748840319796SKiran Patil 		      hw->fdir_prof[flow]->cnt))
748940319796SKiran Patil 			continue;
749040319796SKiran Patil 
749140319796SKiran Patil 		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
749240319796SKiran Patil 			enum ice_flow_priority prio;
749340319796SKiran Patil 			u64 prof_id;
749440319796SKiran Patil 
749540319796SKiran Patil 			/* add this VSI to FDir profile for this flow */
749640319796SKiran Patil 			prio = ICE_FLOW_PRIO_NORMAL;
749740319796SKiran Patil 			prof = hw->fdir_prof[flow];
749840319796SKiran Patil 			prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
749940319796SKiran Patil 			status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
750040319796SKiran Patil 						    prof->vsi_h[0], vsi->idx,
750140319796SKiran Patil 						    prio, prof->fdir_seg[tun],
750240319796SKiran Patil 						    &entry_h);
750340319796SKiran Patil 			if (status) {
750440319796SKiran Patil 				dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
750540319796SKiran Patil 					vsi->idx, flow);
750640319796SKiran Patil 				continue;
750740319796SKiran Patil 			}
750840319796SKiran Patil 
750940319796SKiran Patil 			prof->entry_h[prof->cnt][tun] = entry_h;
751040319796SKiran Patil 		}
751140319796SKiran Patil 
751240319796SKiran Patil 		/* store VSI for filter replay and delete */
751340319796SKiran Patil 		prof->vsi_h[prof->cnt] = vsi->idx;
751440319796SKiran Patil 		prof->cnt++;
751540319796SKiran Patil 
751640319796SKiran Patil 		added = true;
751740319796SKiran Patil 		dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
751840319796SKiran Patil 			flow);
751940319796SKiran Patil 	}
752040319796SKiran Patil 
752140319796SKiran Patil 	if (!added)
752240319796SKiran Patil 		dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
752340319796SKiran Patil 
752440319796SKiran Patil 	return 0;
752540319796SKiran Patil }
752640319796SKiran Patil 
752740319796SKiran Patil /**
7528fbc7b27aSKiran Patil  * ice_add_channel - add a channel by adding VSI
7529fbc7b27aSKiran Patil  * @pf: ptr to PF device
7530fbc7b27aSKiran Patil  * @sw_id: underlying HW switching element ID
7531fbc7b27aSKiran Patil  * @ch: ptr to channel structure
7532fbc7b27aSKiran Patil  *
7533fbc7b27aSKiran Patil  * Add a channel (VSI) using add_vsi and queue_map
7534fbc7b27aSKiran Patil  */
7535fbc7b27aSKiran Patil static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
7536fbc7b27aSKiran Patil {
7537fbc7b27aSKiran Patil 	struct device *dev = ice_pf_to_dev(pf);
7538fbc7b27aSKiran Patil 	struct ice_vsi *vsi;
7539fbc7b27aSKiran Patil 
7540fbc7b27aSKiran Patil 	if (ch->type != ICE_VSI_CHNL) {
7541fbc7b27aSKiran Patil 		dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
7542fbc7b27aSKiran Patil 		return -EINVAL;
7543fbc7b27aSKiran Patil 	}
7544fbc7b27aSKiran Patil 
7545fbc7b27aSKiran Patil 	vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
7546fbc7b27aSKiran Patil 	if (!vsi || vsi->type != ICE_VSI_CHNL) {
7547fbc7b27aSKiran Patil 		dev_err(dev, "create chnl VSI failure\n");
7548fbc7b27aSKiran Patil 		return -EINVAL;
7549fbc7b27aSKiran Patil 	}
7550fbc7b27aSKiran Patil 
755140319796SKiran Patil 	ice_add_vsi_to_fdir(pf, vsi);
755240319796SKiran Patil 
7553fbc7b27aSKiran Patil 	ch->sw_id = sw_id;
7554fbc7b27aSKiran Patil 	ch->vsi_num = vsi->vsi_num;
7555fbc7b27aSKiran Patil 	ch->info.mapping_flags = vsi->info.mapping_flags;
7556fbc7b27aSKiran Patil 	ch->ch_vsi = vsi;
7557fbc7b27aSKiran Patil 	/* set the back pointer of channel for newly created VSI */
7558fbc7b27aSKiran Patil 	vsi->ch = ch;
7559fbc7b27aSKiran Patil 
7560fbc7b27aSKiran Patil 	memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
7561fbc7b27aSKiran Patil 	       sizeof(vsi->info.q_mapping));
7562fbc7b27aSKiran Patil 	memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
7563fbc7b27aSKiran Patil 	       sizeof(vsi->info.tc_mapping));
7564fbc7b27aSKiran Patil 
7565fbc7b27aSKiran Patil 	return 0;
7566fbc7b27aSKiran Patil }
7567fbc7b27aSKiran Patil 
7568fbc7b27aSKiran Patil /**
7569fbc7b27aSKiran Patil  * ice_chnl_cfg_res
7570fbc7b27aSKiran Patil  * @vsi: the VSI being setup
7571fbc7b27aSKiran Patil  * @ch: ptr to channel structure
7572fbc7b27aSKiran Patil  *
7573fbc7b27aSKiran Patil  * Configure channel specific resources such as rings, vector.
7574fbc7b27aSKiran Patil  */
7575fbc7b27aSKiran Patil static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
7576fbc7b27aSKiran Patil {
7577fbc7b27aSKiran Patil 	int i;
7578fbc7b27aSKiran Patil 
7579fbc7b27aSKiran Patil 	for (i = 0; i < ch->num_txq; i++) {
7580fbc7b27aSKiran Patil 		struct ice_q_vector *tx_q_vector, *rx_q_vector;
7581fbc7b27aSKiran Patil 		struct ice_ring_container *rc;
7582fbc7b27aSKiran Patil 		struct ice_tx_ring *tx_ring;
7583fbc7b27aSKiran Patil 		struct ice_rx_ring *rx_ring;
7584fbc7b27aSKiran Patil 
7585fbc7b27aSKiran Patil 		tx_ring = vsi->tx_rings[ch->base_q + i];
7586fbc7b27aSKiran Patil 		rx_ring = vsi->rx_rings[ch->base_q + i];
7587fbc7b27aSKiran Patil 		if (!tx_ring || !rx_ring)
7588fbc7b27aSKiran Patil 			continue;
7589fbc7b27aSKiran Patil 
7590fbc7b27aSKiran Patil 		/* setup ring being channel enabled */
7591fbc7b27aSKiran Patil 		tx_ring->ch = ch;
7592fbc7b27aSKiran Patil 		rx_ring->ch = ch;
7593fbc7b27aSKiran Patil 
7594fbc7b27aSKiran Patil 		/* following code block sets up vector specific attributes */
7595fbc7b27aSKiran Patil 		tx_q_vector = tx_ring->q_vector;
7596fbc7b27aSKiran Patil 		rx_q_vector = rx_ring->q_vector;
7597fbc7b27aSKiran Patil 		if (!tx_q_vector && !rx_q_vector)
7598fbc7b27aSKiran Patil 			continue;
7599fbc7b27aSKiran Patil 
7600fbc7b27aSKiran Patil 		if (tx_q_vector) {
7601fbc7b27aSKiran Patil 			tx_q_vector->ch = ch;
7602fbc7b27aSKiran Patil 			/* setup Tx and Rx ITR setting if DIM is off */
7603fbc7b27aSKiran Patil 			rc = &tx_q_vector->tx;
7604fbc7b27aSKiran Patil 			if (!ITR_IS_DYNAMIC(rc))
7605fbc7b27aSKiran Patil 				ice_write_itr(rc, rc->itr_setting);
7606fbc7b27aSKiran Patil 		}
7607fbc7b27aSKiran Patil 		if (rx_q_vector) {
7608fbc7b27aSKiran Patil 			rx_q_vector->ch = ch;
7609fbc7b27aSKiran Patil 			/* setup Tx and Rx ITR setting if DIM is off */
7610fbc7b27aSKiran Patil 			rc = &rx_q_vector->rx;
7611fbc7b27aSKiran Patil 			if (!ITR_IS_DYNAMIC(rc))
7612fbc7b27aSKiran Patil 				ice_write_itr(rc, rc->itr_setting);
7613fbc7b27aSKiran Patil 		}
7614fbc7b27aSKiran Patil 	}
7615fbc7b27aSKiran Patil 
7616fbc7b27aSKiran Patil 	/* it is safe to assume that, if channel has non-zero num_t[r]xq, then
7617fbc7b27aSKiran Patil 	 * GLINT_ITR register would have written to perform in-context
7618fbc7b27aSKiran Patil 	 * update, hence perform flush
7619fbc7b27aSKiran Patil 	 */
7620fbc7b27aSKiran Patil 	if (ch->num_txq || ch->num_rxq)
7621fbc7b27aSKiran Patil 		ice_flush(&vsi->back->hw);
7622fbc7b27aSKiran Patil }
7623fbc7b27aSKiran Patil 
7624fbc7b27aSKiran Patil /**
7625fbc7b27aSKiran Patil  * ice_cfg_chnl_all_res - configure channel resources
7626fbc7b27aSKiran Patil  * @vsi: pte to main_vsi
7627fbc7b27aSKiran Patil  * @ch: ptr to channel structure
7628fbc7b27aSKiran Patil  *
7629fbc7b27aSKiran Patil  * This function configures channel specific resources such as flow-director
7630fbc7b27aSKiran Patil  * counter index, and other resources such as queues, vectors, ITR settings
7631fbc7b27aSKiran Patil  */
7632fbc7b27aSKiran Patil static void
7633fbc7b27aSKiran Patil ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
7634fbc7b27aSKiran Patil {
7635fbc7b27aSKiran Patil 	/* configure channel (aka ADQ) resources such as queues, vectors,
7636fbc7b27aSKiran Patil 	 * ITR settings for channel specific vectors and anything else
7637fbc7b27aSKiran Patil 	 */
7638fbc7b27aSKiran Patil 	ice_chnl_cfg_res(vsi, ch);
7639fbc7b27aSKiran Patil }
7640fbc7b27aSKiran Patil 
7641fbc7b27aSKiran Patil /**
7642fbc7b27aSKiran Patil  * ice_setup_hw_channel - setup new channel
7643fbc7b27aSKiran Patil  * @pf: ptr to PF device
7644fbc7b27aSKiran Patil  * @vsi: the VSI being setup
7645fbc7b27aSKiran Patil  * @ch: ptr to channel structure
7646fbc7b27aSKiran Patil  * @sw_id: underlying HW switching element ID
7647fbc7b27aSKiran Patil  * @type: type of channel to be created (VMDq2/VF)
7648fbc7b27aSKiran Patil  *
7649fbc7b27aSKiran Patil  * Setup new channel (VSI) based on specified type (VMDq2/VF)
7650fbc7b27aSKiran Patil  * and configures Tx rings accordingly
7651fbc7b27aSKiran Patil  */
7652fbc7b27aSKiran Patil static int
7653fbc7b27aSKiran Patil ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
7654fbc7b27aSKiran Patil 		     struct ice_channel *ch, u16 sw_id, u8 type)
7655fbc7b27aSKiran Patil {
7656fbc7b27aSKiran Patil 	struct device *dev = ice_pf_to_dev(pf);
7657fbc7b27aSKiran Patil 	int ret;
7658fbc7b27aSKiran Patil 
7659fbc7b27aSKiran Patil 	ch->base_q = vsi->next_base_q;
7660fbc7b27aSKiran Patil 	ch->type = type;
7661fbc7b27aSKiran Patil 
7662fbc7b27aSKiran Patil 	ret = ice_add_channel(pf, sw_id, ch);
7663fbc7b27aSKiran Patil 	if (ret) {
7664fbc7b27aSKiran Patil 		dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
7665fbc7b27aSKiran Patil 		return ret;
7666fbc7b27aSKiran Patil 	}
7667fbc7b27aSKiran Patil 
7668fbc7b27aSKiran Patil 	/* configure/setup ADQ specific resources */
7669fbc7b27aSKiran Patil 	ice_cfg_chnl_all_res(vsi, ch);
7670fbc7b27aSKiran Patil 
7671fbc7b27aSKiran Patil 	/* make sure to update the next_base_q so that subsequent channel's
7672fbc7b27aSKiran Patil 	 * (aka ADQ) VSI queue map is correct
7673fbc7b27aSKiran Patil 	 */
7674fbc7b27aSKiran Patil 	vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
7675fbc7b27aSKiran Patil 	dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
7676fbc7b27aSKiran Patil 		ch->num_rxq);
7677fbc7b27aSKiran Patil 
7678fbc7b27aSKiran Patil 	return 0;
7679fbc7b27aSKiran Patil }
7680fbc7b27aSKiran Patil 
7681fbc7b27aSKiran Patil /**
7682fbc7b27aSKiran Patil  * ice_setup_channel - setup new channel using uplink element
7683fbc7b27aSKiran Patil  * @pf: ptr to PF device
7684fbc7b27aSKiran Patil  * @vsi: the VSI being setup
7685fbc7b27aSKiran Patil  * @ch: ptr to channel structure
7686fbc7b27aSKiran Patil  *
7687fbc7b27aSKiran Patil  * Setup new channel (VSI) based on specified type (VMDq2/VF)
7688fbc7b27aSKiran Patil  * and uplink switching element
7689fbc7b27aSKiran Patil  */
7690fbc7b27aSKiran Patil static bool
7691fbc7b27aSKiran Patil ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
7692fbc7b27aSKiran Patil 		  struct ice_channel *ch)
7693fbc7b27aSKiran Patil {
7694fbc7b27aSKiran Patil 	struct device *dev = ice_pf_to_dev(pf);
7695fbc7b27aSKiran Patil 	u16 sw_id;
7696fbc7b27aSKiran Patil 	int ret;
7697fbc7b27aSKiran Patil 
7698fbc7b27aSKiran Patil 	if (vsi->type != ICE_VSI_PF) {
7699fbc7b27aSKiran Patil 		dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
7700fbc7b27aSKiran Patil 		return false;
7701fbc7b27aSKiran Patil 	}
7702fbc7b27aSKiran Patil 
7703fbc7b27aSKiran Patil 	sw_id = pf->first_sw->sw_id;
7704fbc7b27aSKiran Patil 
7705fbc7b27aSKiran Patil 	/* create channel (VSI) */
7706fbc7b27aSKiran Patil 	ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
7707fbc7b27aSKiran Patil 	if (ret) {
7708fbc7b27aSKiran Patil 		dev_err(dev, "failed to setup hw_channel\n");
7709fbc7b27aSKiran Patil 		return false;
7710fbc7b27aSKiran Patil 	}
7711fbc7b27aSKiran Patil 	dev_dbg(dev, "successfully created channel()\n");
7712fbc7b27aSKiran Patil 
7713fbc7b27aSKiran Patil 	return ch->ch_vsi ? true : false;
7714fbc7b27aSKiran Patil }
7715fbc7b27aSKiran Patil 
7716fbc7b27aSKiran Patil /**
7717fbc7b27aSKiran Patil  * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
7718fbc7b27aSKiran Patil  * @vsi: VSI to be configured
7719fbc7b27aSKiran Patil  * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
7720fbc7b27aSKiran Patil  * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
7721fbc7b27aSKiran Patil  */
7722fbc7b27aSKiran Patil static int
7723fbc7b27aSKiran Patil ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
7724fbc7b27aSKiran Patil {
7725fbc7b27aSKiran Patil 	int err;
7726fbc7b27aSKiran Patil 
7727fbc7b27aSKiran Patil 	err = ice_set_min_bw_limit(vsi, min_tx_rate);
7728fbc7b27aSKiran Patil 	if (err)
7729fbc7b27aSKiran Patil 		return err;
7730fbc7b27aSKiran Patil 
7731fbc7b27aSKiran Patil 	return ice_set_max_bw_limit(vsi, max_tx_rate);
7732fbc7b27aSKiran Patil }
7733fbc7b27aSKiran Patil 
7734fbc7b27aSKiran Patil /**
7735fbc7b27aSKiran Patil  * ice_create_q_channel - function to create channel
7736fbc7b27aSKiran Patil  * @vsi: VSI to be configured
7737fbc7b27aSKiran Patil  * @ch: ptr to channel (it contains channel specific params)
7738fbc7b27aSKiran Patil  *
7739fbc7b27aSKiran Patil  * This function creates channel (VSI) using num_queues specified by user,
7740fbc7b27aSKiran Patil  * reconfigs RSS if needed.
7741fbc7b27aSKiran Patil  */
7742fbc7b27aSKiran Patil static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
7743fbc7b27aSKiran Patil {
7744fbc7b27aSKiran Patil 	struct ice_pf *pf = vsi->back;
7745fbc7b27aSKiran Patil 	struct device *dev;
7746fbc7b27aSKiran Patil 
7747fbc7b27aSKiran Patil 	if (!ch)
7748fbc7b27aSKiran Patil 		return -EINVAL;
7749fbc7b27aSKiran Patil 
7750fbc7b27aSKiran Patil 	dev = ice_pf_to_dev(pf);
7751fbc7b27aSKiran Patil 	if (!ch->num_txq || !ch->num_rxq) {
7752fbc7b27aSKiran Patil 		dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
7753fbc7b27aSKiran Patil 		return -EINVAL;
7754fbc7b27aSKiran Patil 	}
7755fbc7b27aSKiran Patil 
7756fbc7b27aSKiran Patil 	if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
7757fbc7b27aSKiran Patil 		dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
7758fbc7b27aSKiran Patil 			vsi->cnt_q_avail, ch->num_txq);
7759fbc7b27aSKiran Patil 		return -EINVAL;
7760fbc7b27aSKiran Patil 	}
7761fbc7b27aSKiran Patil 
7762fbc7b27aSKiran Patil 	if (!ice_setup_channel(pf, vsi, ch)) {
7763fbc7b27aSKiran Patil 		dev_info(dev, "Failed to setup channel\n");
7764fbc7b27aSKiran Patil 		return -EINVAL;
7765fbc7b27aSKiran Patil 	}
7766fbc7b27aSKiran Patil 	/* configure BW rate limit */
7767fbc7b27aSKiran Patil 	if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
7768fbc7b27aSKiran Patil 		int ret;
7769fbc7b27aSKiran Patil 
7770fbc7b27aSKiran Patil 		ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
7771fbc7b27aSKiran Patil 				       ch->min_tx_rate);
7772fbc7b27aSKiran Patil 		if (ret)
7773fbc7b27aSKiran Patil 			dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
7774fbc7b27aSKiran Patil 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
7775fbc7b27aSKiran Patil 		else
7776fbc7b27aSKiran Patil 			dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
7777fbc7b27aSKiran Patil 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
7778fbc7b27aSKiran Patil 	}
7779fbc7b27aSKiran Patil 
7780fbc7b27aSKiran Patil 	vsi->cnt_q_avail -= ch->num_txq;
7781fbc7b27aSKiran Patil 
7782fbc7b27aSKiran Patil 	return 0;
7783fbc7b27aSKiran Patil }
7784fbc7b27aSKiran Patil 
7785fbc7b27aSKiran Patil /**
77869fea7498SKiran Patil  * ice_rem_all_chnl_fltrs - removes all channel filters
77879fea7498SKiran Patil  * @pf: ptr to PF, TC-flower based filter are tracked at PF level
77889fea7498SKiran Patil  *
77899fea7498SKiran Patil  * Remove all advanced switch filters only if they are channel specific
77909fea7498SKiran Patil  * tc-flower based filter
77919fea7498SKiran Patil  */
77929fea7498SKiran Patil static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
77939fea7498SKiran Patil {
77949fea7498SKiran Patil 	struct ice_tc_flower_fltr *fltr;
77959fea7498SKiran Patil 	struct hlist_node *node;
77969fea7498SKiran Patil 
77979fea7498SKiran Patil 	/* to remove all channel filters, iterate an ordered list of filters */
77989fea7498SKiran Patil 	hlist_for_each_entry_safe(fltr, node,
77999fea7498SKiran Patil 				  &pf->tc_flower_fltr_list,
78009fea7498SKiran Patil 				  tc_flower_node) {
78019fea7498SKiran Patil 		struct ice_rule_query_data rule;
78029fea7498SKiran Patil 		int status;
78039fea7498SKiran Patil 
78049fea7498SKiran Patil 		/* for now process only channel specific filters */
78059fea7498SKiran Patil 		if (!ice_is_chnl_fltr(fltr))
78069fea7498SKiran Patil 			continue;
78079fea7498SKiran Patil 
78089fea7498SKiran Patil 		rule.rid = fltr->rid;
78099fea7498SKiran Patil 		rule.rule_id = fltr->rule_id;
78109fea7498SKiran Patil 		rule.vsi_handle = fltr->dest_id;
78119fea7498SKiran Patil 		status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
78129fea7498SKiran Patil 		if (status) {
78139fea7498SKiran Patil 			if (status == -ENOENT)
78149fea7498SKiran Patil 				dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
78159fea7498SKiran Patil 					rule.rule_id);
78169fea7498SKiran Patil 			else
78179fea7498SKiran Patil 				dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
78189fea7498SKiran Patil 					status);
78199fea7498SKiran Patil 		} else if (fltr->dest_vsi) {
78209fea7498SKiran Patil 			/* update advanced switch filter count */
78219fea7498SKiran Patil 			if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
78229fea7498SKiran Patil 				u32 flags = fltr->flags;
78239fea7498SKiran Patil 
78249fea7498SKiran Patil 				fltr->dest_vsi->num_chnl_fltr--;
78259fea7498SKiran Patil 				if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
78269fea7498SKiran Patil 					     ICE_TC_FLWR_FIELD_ENC_DST_MAC))
78279fea7498SKiran Patil 					pf->num_dmac_chnl_fltrs--;
78289fea7498SKiran Patil 			}
78299fea7498SKiran Patil 		}
78309fea7498SKiran Patil 
78319fea7498SKiran Patil 		hlist_del(&fltr->tc_flower_node);
78329fea7498SKiran Patil 		kfree(fltr);
78339fea7498SKiran Patil 	}
78349fea7498SKiran Patil }
78359fea7498SKiran Patil 
78369fea7498SKiran Patil /**
7837fbc7b27aSKiran Patil  * ice_remove_q_channels - Remove queue channels for the TCs
7838fbc7b27aSKiran Patil  * @vsi: VSI to be configured
7839fbc7b27aSKiran Patil  * @rem_fltr: delete advanced switch filter or not
7840fbc7b27aSKiran Patil  *
7841fbc7b27aSKiran Patil  * Remove queue channels for the TCs
7842fbc7b27aSKiran Patil  */
78439fea7498SKiran Patil static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
7844fbc7b27aSKiran Patil {
7845fbc7b27aSKiran Patil 	struct ice_channel *ch, *ch_tmp;
78469fea7498SKiran Patil 	struct ice_pf *pf = vsi->back;
7847fbc7b27aSKiran Patil 	int i;
7848fbc7b27aSKiran Patil 
78499fea7498SKiran Patil 	/* remove all tc-flower based filter if they are channel filters only */
78509fea7498SKiran Patil 	if (rem_fltr)
78519fea7498SKiran Patil 		ice_rem_all_chnl_fltrs(pf);
78529fea7498SKiran Patil 
785340319796SKiran Patil 	/* remove ntuple filters since queue configuration is being changed */
785440319796SKiran Patil 	if  (vsi->netdev->features & NETIF_F_NTUPLE) {
785540319796SKiran Patil 		struct ice_hw *hw = &pf->hw;
785640319796SKiran Patil 
785740319796SKiran Patil 		mutex_lock(&hw->fdir_fltr_lock);
785840319796SKiran Patil 		ice_fdir_del_all_fltrs(vsi);
785940319796SKiran Patil 		mutex_unlock(&hw->fdir_fltr_lock);
786040319796SKiran Patil 	}
786140319796SKiran Patil 
7862fbc7b27aSKiran Patil 	/* perform cleanup for channels if they exist */
7863fbc7b27aSKiran Patil 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
7864fbc7b27aSKiran Patil 		struct ice_vsi *ch_vsi;
7865fbc7b27aSKiran Patil 
7866fbc7b27aSKiran Patil 		list_del(&ch->list);
7867fbc7b27aSKiran Patil 		ch_vsi = ch->ch_vsi;
7868fbc7b27aSKiran Patil 		if (!ch_vsi) {
7869fbc7b27aSKiran Patil 			kfree(ch);
7870fbc7b27aSKiran Patil 			continue;
7871fbc7b27aSKiran Patil 		}
7872fbc7b27aSKiran Patil 
7873fbc7b27aSKiran Patil 		/* Reset queue contexts */
7874fbc7b27aSKiran Patil 		for (i = 0; i < ch->num_rxq; i++) {
7875fbc7b27aSKiran Patil 			struct ice_tx_ring *tx_ring;
7876fbc7b27aSKiran Patil 			struct ice_rx_ring *rx_ring;
7877fbc7b27aSKiran Patil 
7878fbc7b27aSKiran Patil 			tx_ring = vsi->tx_rings[ch->base_q + i];
7879fbc7b27aSKiran Patil 			rx_ring = vsi->rx_rings[ch->base_q + i];
7880fbc7b27aSKiran Patil 			if (tx_ring) {
7881fbc7b27aSKiran Patil 				tx_ring->ch = NULL;
7882fbc7b27aSKiran Patil 				if (tx_ring->q_vector)
7883fbc7b27aSKiran Patil 					tx_ring->q_vector->ch = NULL;
7884fbc7b27aSKiran Patil 			}
7885fbc7b27aSKiran Patil 			if (rx_ring) {
7886fbc7b27aSKiran Patil 				rx_ring->ch = NULL;
7887fbc7b27aSKiran Patil 				if (rx_ring->q_vector)
7888fbc7b27aSKiran Patil 					rx_ring->q_vector->ch = NULL;
7889fbc7b27aSKiran Patil 			}
7890fbc7b27aSKiran Patil 		}
7891fbc7b27aSKiran Patil 
789240319796SKiran Patil 		/* Release FD resources for the channel VSI */
789340319796SKiran Patil 		ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
789440319796SKiran Patil 
7895fbc7b27aSKiran Patil 		/* clear the VSI from scheduler tree */
7896fbc7b27aSKiran Patil 		ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
7897fbc7b27aSKiran Patil 
7898fbc7b27aSKiran Patil 		/* Delete VSI from FW */
7899fbc7b27aSKiran Patil 		ice_vsi_delete(ch->ch_vsi);
7900fbc7b27aSKiran Patil 
7901fbc7b27aSKiran Patil 		/* Delete VSI from PF and HW VSI arrays */
7902fbc7b27aSKiran Patil 		ice_vsi_clear(ch->ch_vsi);
7903fbc7b27aSKiran Patil 
7904fbc7b27aSKiran Patil 		/* free the channel */
7905fbc7b27aSKiran Patil 		kfree(ch);
7906fbc7b27aSKiran Patil 	}
7907fbc7b27aSKiran Patil 
7908fbc7b27aSKiran Patil 	/* clear the channel VSI map which is stored in main VSI */
7909fbc7b27aSKiran Patil 	ice_for_each_chnl_tc(i)
7910fbc7b27aSKiran Patil 		vsi->tc_map_vsi[i] = NULL;
7911fbc7b27aSKiran Patil 
7912fbc7b27aSKiran Patil 	/* reset main VSI's all TC information */
7913fbc7b27aSKiran Patil 	vsi->all_enatc = 0;
7914fbc7b27aSKiran Patil 	vsi->all_numtc = 0;
7915fbc7b27aSKiran Patil }
7916fbc7b27aSKiran Patil 
7917fbc7b27aSKiran Patil /**
7918fbc7b27aSKiran Patil  * ice_rebuild_channels - rebuild channel
7919fbc7b27aSKiran Patil  * @pf: ptr to PF
7920fbc7b27aSKiran Patil  *
7921fbc7b27aSKiran Patil  * Recreate channel VSIs and replay filters
7922fbc7b27aSKiran Patil  */
7923fbc7b27aSKiran Patil static int ice_rebuild_channels(struct ice_pf *pf)
7924fbc7b27aSKiran Patil {
7925fbc7b27aSKiran Patil 	struct device *dev = ice_pf_to_dev(pf);
7926fbc7b27aSKiran Patil 	struct ice_vsi *main_vsi;
7927fbc7b27aSKiran Patil 	bool rem_adv_fltr = true;
7928fbc7b27aSKiran Patil 	struct ice_channel *ch;
7929fbc7b27aSKiran Patil 	struct ice_vsi *vsi;
7930fbc7b27aSKiran Patil 	int tc_idx = 1;
7931fbc7b27aSKiran Patil 	int i, err;
7932fbc7b27aSKiran Patil 
7933fbc7b27aSKiran Patil 	main_vsi = ice_get_main_vsi(pf);
7934fbc7b27aSKiran Patil 	if (!main_vsi)
7935fbc7b27aSKiran Patil 		return 0;
7936fbc7b27aSKiran Patil 
7937fbc7b27aSKiran Patil 	if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
7938fbc7b27aSKiran Patil 	    main_vsi->old_numtc == 1)
7939fbc7b27aSKiran Patil 		return 0; /* nothing to be done */
7940fbc7b27aSKiran Patil 
7941fbc7b27aSKiran Patil 	/* reconfigure main VSI based on old value of TC and cached values
7942fbc7b27aSKiran Patil 	 * for MQPRIO opts
7943fbc7b27aSKiran Patil 	 */
7944fbc7b27aSKiran Patil 	err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
7945fbc7b27aSKiran Patil 	if (err) {
7946fbc7b27aSKiran Patil 		dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
7947fbc7b27aSKiran Patil 			main_vsi->old_ena_tc, main_vsi->vsi_num);
7948fbc7b27aSKiran Patil 		return err;
7949fbc7b27aSKiran Patil 	}
7950fbc7b27aSKiran Patil 
7951fbc7b27aSKiran Patil 	/* rebuild ADQ VSIs */
7952fbc7b27aSKiran Patil 	ice_for_each_vsi(pf, i) {
7953fbc7b27aSKiran Patil 		enum ice_vsi_type type;
7954fbc7b27aSKiran Patil 
7955fbc7b27aSKiran Patil 		vsi = pf->vsi[i];
7956fbc7b27aSKiran Patil 		if (!vsi || vsi->type != ICE_VSI_CHNL)
7957fbc7b27aSKiran Patil 			continue;
7958fbc7b27aSKiran Patil 
7959fbc7b27aSKiran Patil 		type = vsi->type;
7960fbc7b27aSKiran Patil 
7961fbc7b27aSKiran Patil 		/* rebuild ADQ VSI */
7962fbc7b27aSKiran Patil 		err = ice_vsi_rebuild(vsi, true);
7963fbc7b27aSKiran Patil 		if (err) {
7964fbc7b27aSKiran Patil 			dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
7965fbc7b27aSKiran Patil 				ice_vsi_type_str(type), vsi->idx, err);
7966fbc7b27aSKiran Patil 			goto cleanup;
7967fbc7b27aSKiran Patil 		}
7968fbc7b27aSKiran Patil 
7969fbc7b27aSKiran Patil 		/* Re-map HW VSI number, using VSI handle that has been
7970fbc7b27aSKiran Patil 		 * previously validated in ice_replay_vsi() call above
7971fbc7b27aSKiran Patil 		 */
7972fbc7b27aSKiran Patil 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7973fbc7b27aSKiran Patil 
7974fbc7b27aSKiran Patil 		/* replay filters for the VSI */
7975fbc7b27aSKiran Patil 		err = ice_replay_vsi(&pf->hw, vsi->idx);
7976fbc7b27aSKiran Patil 		if (err) {
7977fbc7b27aSKiran Patil 			dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
7978fbc7b27aSKiran Patil 				ice_vsi_type_str(type), err, vsi->idx);
7979fbc7b27aSKiran Patil 			rem_adv_fltr = false;
7980fbc7b27aSKiran Patil 			goto cleanup;
7981fbc7b27aSKiran Patil 		}
7982fbc7b27aSKiran Patil 		dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
7983fbc7b27aSKiran Patil 			 ice_vsi_type_str(type), vsi->idx);
7984fbc7b27aSKiran Patil 
7985fbc7b27aSKiran Patil 		/* store ADQ VSI at correct TC index in main VSI's
7986fbc7b27aSKiran Patil 		 * map of TC to VSI
7987fbc7b27aSKiran Patil 		 */
7988fbc7b27aSKiran Patil 		main_vsi->tc_map_vsi[tc_idx++] = vsi;
7989fbc7b27aSKiran Patil 	}
7990fbc7b27aSKiran Patil 
7991fbc7b27aSKiran Patil 	/* ADQ VSI(s) has been rebuilt successfully, so setup
7992fbc7b27aSKiran Patil 	 * channel for main VSI's Tx and Rx rings
7993fbc7b27aSKiran Patil 	 */
7994fbc7b27aSKiran Patil 	list_for_each_entry(ch, &main_vsi->ch_list, list) {
7995fbc7b27aSKiran Patil 		struct ice_vsi *ch_vsi;
7996fbc7b27aSKiran Patil 
7997fbc7b27aSKiran Patil 		ch_vsi = ch->ch_vsi;
7998fbc7b27aSKiran Patil 		if (!ch_vsi)
7999fbc7b27aSKiran Patil 			continue;
8000fbc7b27aSKiran Patil 
8001fbc7b27aSKiran Patil 		/* reconfig channel resources */
8002fbc7b27aSKiran Patil 		ice_cfg_chnl_all_res(main_vsi, ch);
8003fbc7b27aSKiran Patil 
8004fbc7b27aSKiran Patil 		/* replay BW rate limit if it is non-zero */
8005fbc7b27aSKiran Patil 		if (!ch->max_tx_rate && !ch->min_tx_rate)
8006fbc7b27aSKiran Patil 			continue;
8007fbc7b27aSKiran Patil 
8008fbc7b27aSKiran Patil 		err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8009fbc7b27aSKiran Patil 				       ch->min_tx_rate);
8010fbc7b27aSKiran Patil 		if (err)
8011fbc7b27aSKiran Patil 			dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8012fbc7b27aSKiran Patil 				err, ch->max_tx_rate, ch->min_tx_rate,
8013fbc7b27aSKiran Patil 				ch_vsi->vsi_num);
8014fbc7b27aSKiran Patil 		else
8015fbc7b27aSKiran Patil 			dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8016fbc7b27aSKiran Patil 				ch->max_tx_rate, ch->min_tx_rate,
8017fbc7b27aSKiran Patil 				ch_vsi->vsi_num);
8018fbc7b27aSKiran Patil 	}
8019fbc7b27aSKiran Patil 
8020fbc7b27aSKiran Patil 	/* reconfig RSS for main VSI */
8021fbc7b27aSKiran Patil 	if (main_vsi->ch_rss_size)
8022fbc7b27aSKiran Patil 		ice_vsi_cfg_rss_lut_key(main_vsi);
8023fbc7b27aSKiran Patil 
8024fbc7b27aSKiran Patil 	return 0;
8025fbc7b27aSKiran Patil 
8026fbc7b27aSKiran Patil cleanup:
8027fbc7b27aSKiran Patil 	ice_remove_q_channels(main_vsi, rem_adv_fltr);
8028fbc7b27aSKiran Patil 	return err;
8029fbc7b27aSKiran Patil }
8030fbc7b27aSKiran Patil 
8031fbc7b27aSKiran Patil /**
8032fbc7b27aSKiran Patil  * ice_create_q_channels - Add queue channel for the given TCs
8033fbc7b27aSKiran Patil  * @vsi: VSI to be configured
8034fbc7b27aSKiran Patil  *
8035fbc7b27aSKiran Patil  * Configures queue channel mapping to the given TCs
8036fbc7b27aSKiran Patil  */
8037fbc7b27aSKiran Patil static int ice_create_q_channels(struct ice_vsi *vsi)
8038fbc7b27aSKiran Patil {
8039fbc7b27aSKiran Patil 	struct ice_pf *pf = vsi->back;
8040fbc7b27aSKiran Patil 	struct ice_channel *ch;
8041fbc7b27aSKiran Patil 	int ret = 0, i;
8042fbc7b27aSKiran Patil 
8043fbc7b27aSKiran Patil 	ice_for_each_chnl_tc(i) {
8044fbc7b27aSKiran Patil 		if (!(vsi->all_enatc & BIT(i)))
8045fbc7b27aSKiran Patil 			continue;
8046fbc7b27aSKiran Patil 
8047fbc7b27aSKiran Patil 		ch = kzalloc(sizeof(*ch), GFP_KERNEL);
8048fbc7b27aSKiran Patil 		if (!ch) {
8049fbc7b27aSKiran Patil 			ret = -ENOMEM;
8050fbc7b27aSKiran Patil 			goto err_free;
8051fbc7b27aSKiran Patil 		}
8052fbc7b27aSKiran Patil 		INIT_LIST_HEAD(&ch->list);
8053fbc7b27aSKiran Patil 		ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8054fbc7b27aSKiran Patil 		ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8055fbc7b27aSKiran Patil 		ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8056fbc7b27aSKiran Patil 		ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8057fbc7b27aSKiran Patil 		ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8058fbc7b27aSKiran Patil 
8059fbc7b27aSKiran Patil 		/* convert to Kbits/s */
8060fbc7b27aSKiran Patil 		if (ch->max_tx_rate)
8061fbc7b27aSKiran Patil 			ch->max_tx_rate = div_u64(ch->max_tx_rate,
8062fbc7b27aSKiran Patil 						  ICE_BW_KBPS_DIVISOR);
8063fbc7b27aSKiran Patil 		if (ch->min_tx_rate)
8064fbc7b27aSKiran Patil 			ch->min_tx_rate = div_u64(ch->min_tx_rate,
8065fbc7b27aSKiran Patil 						  ICE_BW_KBPS_DIVISOR);
8066fbc7b27aSKiran Patil 
8067fbc7b27aSKiran Patil 		ret = ice_create_q_channel(vsi, ch);
8068fbc7b27aSKiran Patil 		if (ret) {
8069fbc7b27aSKiran Patil 			dev_err(ice_pf_to_dev(pf),
8070fbc7b27aSKiran Patil 				"failed creating channel TC:%d\n", i);
8071fbc7b27aSKiran Patil 			kfree(ch);
8072fbc7b27aSKiran Patil 			goto err_free;
8073fbc7b27aSKiran Patil 		}
8074fbc7b27aSKiran Patil 		list_add_tail(&ch->list, &vsi->ch_list);
8075fbc7b27aSKiran Patil 		vsi->tc_map_vsi[i] = ch->ch_vsi;
8076fbc7b27aSKiran Patil 		dev_dbg(ice_pf_to_dev(pf),
8077fbc7b27aSKiran Patil 			"successfully created channel: VSI %pK\n", ch->ch_vsi);
8078fbc7b27aSKiran Patil 	}
8079fbc7b27aSKiran Patil 	return 0;
8080fbc7b27aSKiran Patil 
8081fbc7b27aSKiran Patil err_free:
8082fbc7b27aSKiran Patil 	ice_remove_q_channels(vsi, false);
8083fbc7b27aSKiran Patil 
8084fbc7b27aSKiran Patil 	return ret;
8085fbc7b27aSKiran Patil }
8086fbc7b27aSKiran Patil 
8087fbc7b27aSKiran Patil /**
8088fbc7b27aSKiran Patil  * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
8089fbc7b27aSKiran Patil  * @netdev: net device to configure
8090fbc7b27aSKiran Patil  * @type_data: TC offload data
8091fbc7b27aSKiran Patil  */
8092fbc7b27aSKiran Patil static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
8093fbc7b27aSKiran Patil {
8094fbc7b27aSKiran Patil 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8095fbc7b27aSKiran Patil 	struct ice_netdev_priv *np = netdev_priv(netdev);
8096fbc7b27aSKiran Patil 	struct ice_vsi *vsi = np->vsi;
8097fbc7b27aSKiran Patil 	struct ice_pf *pf = vsi->back;
8098fbc7b27aSKiran Patil 	u16 mode, ena_tc_qdisc = 0;
8099fbc7b27aSKiran Patil 	int cur_txq, cur_rxq;
8100fbc7b27aSKiran Patil 	u8 hw = 0, num_tcf;
8101fbc7b27aSKiran Patil 	struct device *dev;
8102fbc7b27aSKiran Patil 	int ret, i;
8103fbc7b27aSKiran Patil 
8104fbc7b27aSKiran Patil 	dev = ice_pf_to_dev(pf);
8105fbc7b27aSKiran Patil 	num_tcf = mqprio_qopt->qopt.num_tc;
8106fbc7b27aSKiran Patil 	hw = mqprio_qopt->qopt.hw;
8107fbc7b27aSKiran Patil 	mode = mqprio_qopt->mode;
8108fbc7b27aSKiran Patil 	if (!hw) {
8109fbc7b27aSKiran Patil 		clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8110fbc7b27aSKiran Patil 		vsi->ch_rss_size = 0;
8111fbc7b27aSKiran Patil 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8112fbc7b27aSKiran Patil 		goto config_tcf;
8113fbc7b27aSKiran Patil 	}
8114fbc7b27aSKiran Patil 
8115fbc7b27aSKiran Patil 	/* Generate queue region map for number of TCF requested */
8116fbc7b27aSKiran Patil 	for (i = 0; i < num_tcf; i++)
8117fbc7b27aSKiran Patil 		ena_tc_qdisc |= BIT(i);
8118fbc7b27aSKiran Patil 
8119fbc7b27aSKiran Patil 	switch (mode) {
8120fbc7b27aSKiran Patil 	case TC_MQPRIO_MODE_CHANNEL:
8121fbc7b27aSKiran Patil 
8122fbc7b27aSKiran Patil 		ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8123fbc7b27aSKiran Patil 		if (ret) {
8124fbc7b27aSKiran Patil 			netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
8125fbc7b27aSKiran Patil 				   ret);
8126fbc7b27aSKiran Patil 			return ret;
8127fbc7b27aSKiran Patil 		}
8128fbc7b27aSKiran Patil 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8129fbc7b27aSKiran Patil 		set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
81309fea7498SKiran Patil 		/* don't assume state of hw_tc_offload during driver load
81319fea7498SKiran Patil 		 * and set the flag for TC flower filter if hw_tc_offload
81329fea7498SKiran Patil 		 * already ON
81339fea7498SKiran Patil 		 */
81349fea7498SKiran Patil 		if (vsi->netdev->features & NETIF_F_HW_TC)
81359fea7498SKiran Patil 			set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
8136fbc7b27aSKiran Patil 		break;
8137fbc7b27aSKiran Patil 	default:
8138fbc7b27aSKiran Patil 		return -EINVAL;
8139fbc7b27aSKiran Patil 	}
8140fbc7b27aSKiran Patil 
8141fbc7b27aSKiran Patil config_tcf:
8142fbc7b27aSKiran Patil 
8143fbc7b27aSKiran Patil 	/* Requesting same TCF configuration as already enabled */
8144fbc7b27aSKiran Patil 	if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8145fbc7b27aSKiran Patil 	    mode != TC_MQPRIO_MODE_CHANNEL)
8146fbc7b27aSKiran Patil 		return 0;
8147fbc7b27aSKiran Patil 
8148fbc7b27aSKiran Patil 	/* Pause VSI queues */
8149fbc7b27aSKiran Patil 	ice_dis_vsi(vsi, true);
8150fbc7b27aSKiran Patil 
8151fbc7b27aSKiran Patil 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
8152fbc7b27aSKiran Patil 		ice_remove_q_channels(vsi, true);
8153fbc7b27aSKiran Patil 
8154fbc7b27aSKiran Patil 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8155fbc7b27aSKiran Patil 		vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
8156fbc7b27aSKiran Patil 				     num_online_cpus());
8157fbc7b27aSKiran Patil 		vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
8158fbc7b27aSKiran Patil 				     num_online_cpus());
8159fbc7b27aSKiran Patil 	} else {
8160fbc7b27aSKiran Patil 		/* logic to rebuild VSI, same like ethtool -L */
8161fbc7b27aSKiran Patil 		u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
8162fbc7b27aSKiran Patil 
8163fbc7b27aSKiran Patil 		for (i = 0; i < num_tcf; i++) {
8164fbc7b27aSKiran Patil 			if (!(ena_tc_qdisc & BIT(i)))
8165fbc7b27aSKiran Patil 				continue;
8166fbc7b27aSKiran Patil 
8167fbc7b27aSKiran Patil 			offset = vsi->mqprio_qopt.qopt.offset[i];
8168fbc7b27aSKiran Patil 			qcount_rx = vsi->mqprio_qopt.qopt.count[i];
8169fbc7b27aSKiran Patil 			qcount_tx = vsi->mqprio_qopt.qopt.count[i];
8170fbc7b27aSKiran Patil 		}
8171fbc7b27aSKiran Patil 		vsi->req_txq = offset + qcount_tx;
8172fbc7b27aSKiran Patil 		vsi->req_rxq = offset + qcount_rx;
8173fbc7b27aSKiran Patil 
8174fbc7b27aSKiran Patil 		/* store away original rss_size info, so that it gets reused
8175fbc7b27aSKiran Patil 		 * form ice_vsi_rebuild during tc-qdisc delete stage - to
8176fbc7b27aSKiran Patil 		 * determine, what should be the rss_sizefor main VSI
8177fbc7b27aSKiran Patil 		 */
8178fbc7b27aSKiran Patil 		vsi->orig_rss_size = vsi->rss_size;
8179fbc7b27aSKiran Patil 	}
8180fbc7b27aSKiran Patil 
8181fbc7b27aSKiran Patil 	/* save current values of Tx and Rx queues before calling VSI rebuild
8182fbc7b27aSKiran Patil 	 * for fallback option
8183fbc7b27aSKiran Patil 	 */
8184fbc7b27aSKiran Patil 	cur_txq = vsi->num_txq;
8185fbc7b27aSKiran Patil 	cur_rxq = vsi->num_rxq;
8186fbc7b27aSKiran Patil 
8187fbc7b27aSKiran Patil 	/* proceed with rebuild main VSI using correct number of queues */
8188fbc7b27aSKiran Patil 	ret = ice_vsi_rebuild(vsi, false);
8189fbc7b27aSKiran Patil 	if (ret) {
8190fbc7b27aSKiran Patil 		/* fallback to current number of queues */
8191fbc7b27aSKiran Patil 		dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
8192fbc7b27aSKiran Patil 		vsi->req_txq = cur_txq;
8193fbc7b27aSKiran Patil 		vsi->req_rxq = cur_rxq;
8194fbc7b27aSKiran Patil 		clear_bit(ICE_RESET_FAILED, pf->state);
8195fbc7b27aSKiran Patil 		if (ice_vsi_rebuild(vsi, false)) {
8196fbc7b27aSKiran Patil 			dev_err(dev, "Rebuild of main VSI failed again\n");
8197fbc7b27aSKiran Patil 			return ret;
8198fbc7b27aSKiran Patil 		}
8199fbc7b27aSKiran Patil 	}
8200fbc7b27aSKiran Patil 
8201fbc7b27aSKiran Patil 	vsi->all_numtc = num_tcf;
8202fbc7b27aSKiran Patil 	vsi->all_enatc = ena_tc_qdisc;
8203fbc7b27aSKiran Patil 	ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
8204fbc7b27aSKiran Patil 	if (ret) {
8205fbc7b27aSKiran Patil 		netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
8206fbc7b27aSKiran Patil 			   vsi->vsi_num);
8207fbc7b27aSKiran Patil 		goto exit;
8208fbc7b27aSKiran Patil 	}
8209fbc7b27aSKiran Patil 
8210fbc7b27aSKiran Patil 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8211fbc7b27aSKiran Patil 		u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8212fbc7b27aSKiran Patil 		u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
8213fbc7b27aSKiran Patil 
8214fbc7b27aSKiran Patil 		/* set TC0 rate limit if specified */
8215fbc7b27aSKiran Patil 		if (max_tx_rate || min_tx_rate) {
8216fbc7b27aSKiran Patil 			/* convert to Kbits/s */
8217fbc7b27aSKiran Patil 			if (max_tx_rate)
8218fbc7b27aSKiran Patil 				max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
8219fbc7b27aSKiran Patil 			if (min_tx_rate)
8220fbc7b27aSKiran Patil 				min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
8221fbc7b27aSKiran Patil 
8222fbc7b27aSKiran Patil 			ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
8223fbc7b27aSKiran Patil 			if (!ret) {
8224fbc7b27aSKiran Patil 				dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
8225fbc7b27aSKiran Patil 					max_tx_rate, min_tx_rate, vsi->vsi_num);
8226fbc7b27aSKiran Patil 			} else {
8227fbc7b27aSKiran Patil 				dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
8228fbc7b27aSKiran Patil 					max_tx_rate, min_tx_rate, vsi->vsi_num);
8229fbc7b27aSKiran Patil 				goto exit;
8230fbc7b27aSKiran Patil 			}
8231fbc7b27aSKiran Patil 		}
8232fbc7b27aSKiran Patil 		ret = ice_create_q_channels(vsi);
8233fbc7b27aSKiran Patil 		if (ret) {
8234fbc7b27aSKiran Patil 			netdev_err(netdev, "failed configuring queue channels\n");
8235fbc7b27aSKiran Patil 			goto exit;
8236fbc7b27aSKiran Patil 		} else {
8237fbc7b27aSKiran Patil 			netdev_dbg(netdev, "successfully configured channels\n");
8238fbc7b27aSKiran Patil 		}
8239fbc7b27aSKiran Patil 	}
8240fbc7b27aSKiran Patil 
8241fbc7b27aSKiran Patil 	if (vsi->ch_rss_size)
8242fbc7b27aSKiran Patil 		ice_vsi_cfg_rss_lut_key(vsi);
8243fbc7b27aSKiran Patil 
8244fbc7b27aSKiran Patil exit:
8245fbc7b27aSKiran Patil 	/* if error, reset the all_numtc and all_enatc */
8246fbc7b27aSKiran Patil 	if (ret) {
8247fbc7b27aSKiran Patil 		vsi->all_numtc = 0;
8248fbc7b27aSKiran Patil 		vsi->all_enatc = 0;
8249fbc7b27aSKiran Patil 	}
8250fbc7b27aSKiran Patil 	/* resume VSI */
8251fbc7b27aSKiran Patil 	ice_ena_vsi(vsi, true);
8252fbc7b27aSKiran Patil 
8253fbc7b27aSKiran Patil 	return ret;
8254fbc7b27aSKiran Patil }
8255fbc7b27aSKiran Patil 
82560d08a441SKiran Patil static LIST_HEAD(ice_block_cb_list);
82570d08a441SKiran Patil 
82580d08a441SKiran Patil static int
82590d08a441SKiran Patil ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
82600d08a441SKiran Patil 	     void *type_data)
82610d08a441SKiran Patil {
82620d08a441SKiran Patil 	struct ice_netdev_priv *np = netdev_priv(netdev);
8263fbc7b27aSKiran Patil 	struct ice_pf *pf = np->vsi->back;
8264fbc7b27aSKiran Patil 	int err;
82650d08a441SKiran Patil 
82660d08a441SKiran Patil 	switch (type) {
82670d08a441SKiran Patil 	case TC_SETUP_BLOCK:
82680d08a441SKiran Patil 		return flow_block_cb_setup_simple(type_data,
82690d08a441SKiran Patil 						  &ice_block_cb_list,
82700d08a441SKiran Patil 						  ice_setup_tc_block_cb,
82710d08a441SKiran Patil 						  np, np, true);
8272fbc7b27aSKiran Patil 	case TC_SETUP_QDISC_MQPRIO:
8273fbc7b27aSKiran Patil 		/* setup traffic classifier for receive side */
8274fbc7b27aSKiran Patil 		mutex_lock(&pf->tc_mutex);
8275fbc7b27aSKiran Patil 		err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
8276fbc7b27aSKiran Patil 		mutex_unlock(&pf->tc_mutex);
8277fbc7b27aSKiran Patil 		return err;
82780d08a441SKiran Patil 	default:
82790d08a441SKiran Patil 		return -EOPNOTSUPP;
82800d08a441SKiran Patil 	}
82810d08a441SKiran Patil 	return -EOPNOTSUPP;
82820d08a441SKiran Patil }
82830d08a441SKiran Patil 
8284195bb48fSMichal Swiatkowski static struct ice_indr_block_priv *
8285195bb48fSMichal Swiatkowski ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
8286195bb48fSMichal Swiatkowski 			   struct net_device *netdev)
8287195bb48fSMichal Swiatkowski {
8288195bb48fSMichal Swiatkowski 	struct ice_indr_block_priv *cb_priv;
8289195bb48fSMichal Swiatkowski 
8290195bb48fSMichal Swiatkowski 	list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
8291195bb48fSMichal Swiatkowski 		if (!cb_priv->netdev)
8292195bb48fSMichal Swiatkowski 			return NULL;
8293195bb48fSMichal Swiatkowski 		if (cb_priv->netdev == netdev)
8294195bb48fSMichal Swiatkowski 			return cb_priv;
8295195bb48fSMichal Swiatkowski 	}
8296195bb48fSMichal Swiatkowski 	return NULL;
8297195bb48fSMichal Swiatkowski }
8298195bb48fSMichal Swiatkowski 
8299195bb48fSMichal Swiatkowski static int
8300195bb48fSMichal Swiatkowski ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
8301195bb48fSMichal Swiatkowski 			void *indr_priv)
8302195bb48fSMichal Swiatkowski {
8303195bb48fSMichal Swiatkowski 	struct ice_indr_block_priv *priv = indr_priv;
8304195bb48fSMichal Swiatkowski 	struct ice_netdev_priv *np = priv->np;
8305195bb48fSMichal Swiatkowski 
8306195bb48fSMichal Swiatkowski 	switch (type) {
8307195bb48fSMichal Swiatkowski 	case TC_SETUP_CLSFLOWER:
8308195bb48fSMichal Swiatkowski 		return ice_setup_tc_cls_flower(np, priv->netdev,
8309195bb48fSMichal Swiatkowski 					       (struct flow_cls_offload *)
8310195bb48fSMichal Swiatkowski 					       type_data);
8311195bb48fSMichal Swiatkowski 	default:
8312195bb48fSMichal Swiatkowski 		return -EOPNOTSUPP;
8313195bb48fSMichal Swiatkowski 	}
8314195bb48fSMichal Swiatkowski }
8315195bb48fSMichal Swiatkowski 
8316195bb48fSMichal Swiatkowski static int
8317195bb48fSMichal Swiatkowski ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
8318195bb48fSMichal Swiatkowski 			struct ice_netdev_priv *np,
8319195bb48fSMichal Swiatkowski 			struct flow_block_offload *f, void *data,
8320195bb48fSMichal Swiatkowski 			void (*cleanup)(struct flow_block_cb *block_cb))
8321195bb48fSMichal Swiatkowski {
8322195bb48fSMichal Swiatkowski 	struct ice_indr_block_priv *indr_priv;
8323195bb48fSMichal Swiatkowski 	struct flow_block_cb *block_cb;
8324195bb48fSMichal Swiatkowski 
83259e300987SMichal Swiatkowski 	if (!ice_is_tunnel_supported(netdev) &&
83269e300987SMichal Swiatkowski 	    !(is_vlan_dev(netdev) &&
83279e300987SMichal Swiatkowski 	      vlan_dev_real_dev(netdev) == np->vsi->netdev))
83289e300987SMichal Swiatkowski 		return -EOPNOTSUPP;
83299e300987SMichal Swiatkowski 
8330195bb48fSMichal Swiatkowski 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
8331195bb48fSMichal Swiatkowski 		return -EOPNOTSUPP;
8332195bb48fSMichal Swiatkowski 
8333195bb48fSMichal Swiatkowski 	switch (f->command) {
8334195bb48fSMichal Swiatkowski 	case FLOW_BLOCK_BIND:
8335195bb48fSMichal Swiatkowski 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
8336195bb48fSMichal Swiatkowski 		if (indr_priv)
8337195bb48fSMichal Swiatkowski 			return -EEXIST;
8338195bb48fSMichal Swiatkowski 
8339195bb48fSMichal Swiatkowski 		indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
8340195bb48fSMichal Swiatkowski 		if (!indr_priv)
8341195bb48fSMichal Swiatkowski 			return -ENOMEM;
8342195bb48fSMichal Swiatkowski 
8343195bb48fSMichal Swiatkowski 		indr_priv->netdev = netdev;
8344195bb48fSMichal Swiatkowski 		indr_priv->np = np;
8345195bb48fSMichal Swiatkowski 		list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
8346195bb48fSMichal Swiatkowski 
8347195bb48fSMichal Swiatkowski 		block_cb =
8348195bb48fSMichal Swiatkowski 			flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
8349195bb48fSMichal Swiatkowski 						 indr_priv, indr_priv,
8350195bb48fSMichal Swiatkowski 						 ice_rep_indr_tc_block_unbind,
8351195bb48fSMichal Swiatkowski 						 f, netdev, sch, data, np,
8352195bb48fSMichal Swiatkowski 						 cleanup);
8353195bb48fSMichal Swiatkowski 
8354195bb48fSMichal Swiatkowski 		if (IS_ERR(block_cb)) {
8355195bb48fSMichal Swiatkowski 			list_del(&indr_priv->list);
8356195bb48fSMichal Swiatkowski 			kfree(indr_priv);
8357195bb48fSMichal Swiatkowski 			return PTR_ERR(block_cb);
8358195bb48fSMichal Swiatkowski 		}
8359195bb48fSMichal Swiatkowski 		flow_block_cb_add(block_cb, f);
8360195bb48fSMichal Swiatkowski 		list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
8361195bb48fSMichal Swiatkowski 		break;
8362195bb48fSMichal Swiatkowski 	case FLOW_BLOCK_UNBIND:
8363195bb48fSMichal Swiatkowski 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
8364195bb48fSMichal Swiatkowski 		if (!indr_priv)
8365195bb48fSMichal Swiatkowski 			return -ENOENT;
8366195bb48fSMichal Swiatkowski 
8367195bb48fSMichal Swiatkowski 		block_cb = flow_block_cb_lookup(f->block,
8368195bb48fSMichal Swiatkowski 						ice_indr_setup_block_cb,
8369195bb48fSMichal Swiatkowski 						indr_priv);
8370195bb48fSMichal Swiatkowski 		if (!block_cb)
8371195bb48fSMichal Swiatkowski 			return -ENOENT;
8372195bb48fSMichal Swiatkowski 
8373195bb48fSMichal Swiatkowski 		flow_indr_block_cb_remove(block_cb, f);
8374195bb48fSMichal Swiatkowski 
8375195bb48fSMichal Swiatkowski 		list_del(&block_cb->driver_list);
8376195bb48fSMichal Swiatkowski 		break;
8377195bb48fSMichal Swiatkowski 	default:
8378195bb48fSMichal Swiatkowski 		return -EOPNOTSUPP;
8379195bb48fSMichal Swiatkowski 	}
8380195bb48fSMichal Swiatkowski 	return 0;
8381195bb48fSMichal Swiatkowski }
8382195bb48fSMichal Swiatkowski 
8383195bb48fSMichal Swiatkowski static int
8384195bb48fSMichal Swiatkowski ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
8385195bb48fSMichal Swiatkowski 		     void *cb_priv, enum tc_setup_type type, void *type_data,
8386195bb48fSMichal Swiatkowski 		     void *data,
8387195bb48fSMichal Swiatkowski 		     void (*cleanup)(struct flow_block_cb *block_cb))
8388195bb48fSMichal Swiatkowski {
8389195bb48fSMichal Swiatkowski 	switch (type) {
8390195bb48fSMichal Swiatkowski 	case TC_SETUP_BLOCK:
8391195bb48fSMichal Swiatkowski 		return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
8392195bb48fSMichal Swiatkowski 					       data, cleanup);
8393195bb48fSMichal Swiatkowski 
8394195bb48fSMichal Swiatkowski 	default:
8395195bb48fSMichal Swiatkowski 		return -EOPNOTSUPP;
8396195bb48fSMichal Swiatkowski 	}
8397195bb48fSMichal Swiatkowski }
8398195bb48fSMichal Swiatkowski 
83990d08a441SKiran Patil /**
8400cdedef59SAnirudh Venkataramanan  * ice_open - Called when a network interface becomes active
8401cdedef59SAnirudh Venkataramanan  * @netdev: network interface device structure
8402cdedef59SAnirudh Venkataramanan  *
8403cdedef59SAnirudh Venkataramanan  * The open entry point is called when a network interface is made
8404cdedef59SAnirudh Venkataramanan  * active by the system (IFF_UP). At this point all resources needed
8405cdedef59SAnirudh Venkataramanan  * for transmit and receive operations are allocated, the interrupt
8406cdedef59SAnirudh Venkataramanan  * handler is registered with the OS, the netdev watchdog is enabled,
8407cdedef59SAnirudh Venkataramanan  * and the stack is notified that the interface is ready.
8408cdedef59SAnirudh Venkataramanan  *
8409cdedef59SAnirudh Venkataramanan  * Returns 0 on success, negative value on failure
8410cdedef59SAnirudh Venkataramanan  */
84110e674aebSAnirudh Venkataramanan int ice_open(struct net_device *netdev)
8412cdedef59SAnirudh Venkataramanan {
8413cdedef59SAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
8414e95fc857SKrzysztof Goreczny 	struct ice_pf *pf = np->vsi->back;
8415e95fc857SKrzysztof Goreczny 
8416e95fc857SKrzysztof Goreczny 	if (ice_is_reset_in_progress(pf->state)) {
8417e95fc857SKrzysztof Goreczny 		netdev_err(netdev, "can't open net device while reset is in progress");
8418e95fc857SKrzysztof Goreczny 		return -EBUSY;
8419e95fc857SKrzysztof Goreczny 	}
8420e95fc857SKrzysztof Goreczny 
8421e95fc857SKrzysztof Goreczny 	return ice_open_internal(netdev);
8422e95fc857SKrzysztof Goreczny }
8423e95fc857SKrzysztof Goreczny 
8424e95fc857SKrzysztof Goreczny /**
8425e95fc857SKrzysztof Goreczny  * ice_open_internal - Called when a network interface becomes active
8426e95fc857SKrzysztof Goreczny  * @netdev: network interface device structure
8427e95fc857SKrzysztof Goreczny  *
8428e95fc857SKrzysztof Goreczny  * Internal ice_open implementation. Should not be used directly except for ice_open and reset
8429e95fc857SKrzysztof Goreczny  * handling routine
8430e95fc857SKrzysztof Goreczny  *
8431e95fc857SKrzysztof Goreczny  * Returns 0 on success, negative value on failure
8432e95fc857SKrzysztof Goreczny  */
8433e95fc857SKrzysztof Goreczny int ice_open_internal(struct net_device *netdev)
8434e95fc857SKrzysztof Goreczny {
8435e95fc857SKrzysztof Goreczny 	struct ice_netdev_priv *np = netdev_priv(netdev);
8436cdedef59SAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
8437de75135bSAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
84386d599946STony Nguyen 	struct ice_port_info *pi;
8439cdedef59SAnirudh Venkataramanan 	int err;
8440cdedef59SAnirudh Venkataramanan 
84417e408e07SAnirudh Venkataramanan 	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
84420f9d5027SAnirudh Venkataramanan 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
84430f9d5027SAnirudh Venkataramanan 		return -EIO;
84440f9d5027SAnirudh Venkataramanan 	}
84450f9d5027SAnirudh Venkataramanan 
8446cdedef59SAnirudh Venkataramanan 	netif_carrier_off(netdev);
8447cdedef59SAnirudh Venkataramanan 
84486d599946STony Nguyen 	pi = vsi->port_info;
84492ccc1c1cSTony Nguyen 	err = ice_update_link_info(pi);
84502ccc1c1cSTony Nguyen 	if (err) {
84512ccc1c1cSTony Nguyen 		netdev_err(netdev, "Failed to get link info, error %d\n", err);
8452c1484691STony Nguyen 		return err;
84536d599946STony Nguyen 	}
84546d599946STony Nguyen 
845599d40752SBrett Creeley 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
8456c77849f5SAnirudh Venkataramanan 
84576d599946STony Nguyen 	/* Set PHY if there is media, otherwise, turn off PHY */
84586d599946STony Nguyen 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
84591a3571b5SPaul Greenwalt 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
84607e408e07SAnirudh Venkataramanan 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
84611a3571b5SPaul Greenwalt 			err = ice_init_phy_user_cfg(pi);
84621a3571b5SPaul Greenwalt 			if (err) {
84631a3571b5SPaul Greenwalt 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
84641a3571b5SPaul Greenwalt 					   err);
84651a3571b5SPaul Greenwalt 				return err;
84661a3571b5SPaul Greenwalt 			}
84671a3571b5SPaul Greenwalt 		}
84681a3571b5SPaul Greenwalt 
84691a3571b5SPaul Greenwalt 		err = ice_configure_phy(vsi);
8470b6f934f0SBrett Creeley 		if (err) {
847119cce2c6SAnirudh Venkataramanan 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
84726d599946STony Nguyen 				   err);
8473b6f934f0SBrett Creeley 			return err;
8474b6f934f0SBrett Creeley 		}
84756d599946STony Nguyen 	} else {
84761a3571b5SPaul Greenwalt 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
8477d348d517SAnirudh Venkataramanan 		ice_set_link(vsi, false);
84786d599946STony Nguyen 	}
8479cdedef59SAnirudh Venkataramanan 
8480b6f934f0SBrett Creeley 	err = ice_vsi_open(vsi);
8481cdedef59SAnirudh Venkataramanan 	if (err)
8482cdedef59SAnirudh Venkataramanan 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
8483cdedef59SAnirudh Venkataramanan 			   vsi->vsi_num, vsi->vsw->sw_id);
8484a4e82a81STony Nguyen 
8485a4e82a81STony Nguyen 	/* Update existing tunnels information */
8486a4e82a81STony Nguyen 	udp_tunnel_get_rx_info(netdev);
8487a4e82a81STony Nguyen 
8488cdedef59SAnirudh Venkataramanan 	return err;
8489cdedef59SAnirudh Venkataramanan }
8490cdedef59SAnirudh Venkataramanan 
8491cdedef59SAnirudh Venkataramanan /**
8492cdedef59SAnirudh Venkataramanan  * ice_stop - Disables a network interface
8493cdedef59SAnirudh Venkataramanan  * @netdev: network interface device structure
8494cdedef59SAnirudh Venkataramanan  *
8495cdedef59SAnirudh Venkataramanan  * The stop entry point is called when an interface is de-activated by the OS,
8496cdedef59SAnirudh Venkataramanan  * and the netdevice enters the DOWN state. The hardware is still under the
8497cdedef59SAnirudh Venkataramanan  * driver's control, but the netdev interface is disabled.
8498cdedef59SAnirudh Venkataramanan  *
8499cdedef59SAnirudh Venkataramanan  * Returns success only - not allowed to fail
8500cdedef59SAnirudh Venkataramanan  */
85010e674aebSAnirudh Venkataramanan int ice_stop(struct net_device *netdev)
8502cdedef59SAnirudh Venkataramanan {
8503cdedef59SAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
8504cdedef59SAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
8505e95fc857SKrzysztof Goreczny 	struct ice_pf *pf = vsi->back;
8506e95fc857SKrzysztof Goreczny 
8507e95fc857SKrzysztof Goreczny 	if (ice_is_reset_in_progress(pf->state)) {
8508e95fc857SKrzysztof Goreczny 		netdev_err(netdev, "can't stop net device while reset is in progress");
8509e95fc857SKrzysztof Goreczny 		return -EBUSY;
8510e95fc857SKrzysztof Goreczny 	}
8511cdedef59SAnirudh Venkataramanan 
8512cdedef59SAnirudh Venkataramanan 	ice_vsi_close(vsi);
8513cdedef59SAnirudh Venkataramanan 
8514cdedef59SAnirudh Venkataramanan 	return 0;
8515cdedef59SAnirudh Venkataramanan }
8516cdedef59SAnirudh Venkataramanan 
8517e94d4478SAnirudh Venkataramanan /**
8518e94d4478SAnirudh Venkataramanan  * ice_features_check - Validate encapsulated packet conforms to limits
8519e94d4478SAnirudh Venkataramanan  * @skb: skb buffer
8520e94d4478SAnirudh Venkataramanan  * @netdev: This port's netdev
8521e94d4478SAnirudh Venkataramanan  * @features: Offload features that the stack believes apply
8522e94d4478SAnirudh Venkataramanan  */
8523e94d4478SAnirudh Venkataramanan static netdev_features_t
8524e94d4478SAnirudh Venkataramanan ice_features_check(struct sk_buff *skb,
8525e94d4478SAnirudh Venkataramanan 		   struct net_device __always_unused *netdev,
8526e94d4478SAnirudh Venkataramanan 		   netdev_features_t features)
8527e94d4478SAnirudh Venkataramanan {
8528e94d4478SAnirudh Venkataramanan 	size_t len;
8529e94d4478SAnirudh Venkataramanan 
8530e94d4478SAnirudh Venkataramanan 	/* No point in doing any of this if neither checksum nor GSO are
8531e94d4478SAnirudh Venkataramanan 	 * being requested for this frame. We can rule out both by just
8532e94d4478SAnirudh Venkataramanan 	 * checking for CHECKSUM_PARTIAL
8533e94d4478SAnirudh Venkataramanan 	 */
8534e94d4478SAnirudh Venkataramanan 	if (skb->ip_summed != CHECKSUM_PARTIAL)
8535e94d4478SAnirudh Venkataramanan 		return features;
8536e94d4478SAnirudh Venkataramanan 
8537e94d4478SAnirudh Venkataramanan 	/* We cannot support GSO if the MSS is going to be less than
8538e94d4478SAnirudh Venkataramanan 	 * 64 bytes. If it is then we need to drop support for GSO.
8539e94d4478SAnirudh Venkataramanan 	 */
8540e94d4478SAnirudh Venkataramanan 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
8541e94d4478SAnirudh Venkataramanan 		features &= ~NETIF_F_GSO_MASK;
8542e94d4478SAnirudh Venkataramanan 
8543e94d4478SAnirudh Venkataramanan 	len = skb_network_header(skb) - skb->data;
8544a4e82a81STony Nguyen 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
8545e94d4478SAnirudh Venkataramanan 		goto out_rm_features;
8546e94d4478SAnirudh Venkataramanan 
8547e94d4478SAnirudh Venkataramanan 	len = skb_transport_header(skb) - skb_network_header(skb);
8548a4e82a81STony Nguyen 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
8549e94d4478SAnirudh Venkataramanan 		goto out_rm_features;
8550e94d4478SAnirudh Venkataramanan 
8551e94d4478SAnirudh Venkataramanan 	if (skb->encapsulation) {
8552e94d4478SAnirudh Venkataramanan 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
8553a4e82a81STony Nguyen 		if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
8554e94d4478SAnirudh Venkataramanan 			goto out_rm_features;
8555e94d4478SAnirudh Venkataramanan 
8556e94d4478SAnirudh Venkataramanan 		len = skb_inner_transport_header(skb) -
8557e94d4478SAnirudh Venkataramanan 		      skb_inner_network_header(skb);
8558a4e82a81STony Nguyen 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
8559e94d4478SAnirudh Venkataramanan 			goto out_rm_features;
8560e94d4478SAnirudh Venkataramanan 	}
8561e94d4478SAnirudh Venkataramanan 
8562e94d4478SAnirudh Venkataramanan 	return features;
8563e94d4478SAnirudh Venkataramanan out_rm_features:
8564e94d4478SAnirudh Venkataramanan 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
8565e94d4478SAnirudh Venkataramanan }
8566e94d4478SAnirudh Venkataramanan 
8567462acf6aSTony Nguyen static const struct net_device_ops ice_netdev_safe_mode_ops = {
8568462acf6aSTony Nguyen 	.ndo_open = ice_open,
8569462acf6aSTony Nguyen 	.ndo_stop = ice_stop,
8570462acf6aSTony Nguyen 	.ndo_start_xmit = ice_start_xmit,
8571462acf6aSTony Nguyen 	.ndo_set_mac_address = ice_set_mac_address,
8572462acf6aSTony Nguyen 	.ndo_validate_addr = eth_validate_addr,
8573462acf6aSTony Nguyen 	.ndo_change_mtu = ice_change_mtu,
8574462acf6aSTony Nguyen 	.ndo_get_stats64 = ice_get_stats64,
8575462acf6aSTony Nguyen 	.ndo_tx_timeout = ice_tx_timeout,
8576ebc5399eSMaciej Fijalkowski 	.ndo_bpf = ice_xdp_safe_mode,
8577462acf6aSTony Nguyen };
8578462acf6aSTony Nguyen 
8579cdedef59SAnirudh Venkataramanan static const struct net_device_ops ice_netdev_ops = {
8580cdedef59SAnirudh Venkataramanan 	.ndo_open = ice_open,
8581cdedef59SAnirudh Venkataramanan 	.ndo_stop = ice_stop,
85822b245cb2SAnirudh Venkataramanan 	.ndo_start_xmit = ice_start_xmit,
85832a87bd73SDave Ertman 	.ndo_select_queue = ice_select_queue,
8584e94d4478SAnirudh Venkataramanan 	.ndo_features_check = ice_features_check,
8585e94d4478SAnirudh Venkataramanan 	.ndo_set_rx_mode = ice_set_rx_mode,
8586e94d4478SAnirudh Venkataramanan 	.ndo_set_mac_address = ice_set_mac_address,
8587e94d4478SAnirudh Venkataramanan 	.ndo_validate_addr = eth_validate_addr,
8588e94d4478SAnirudh Venkataramanan 	.ndo_change_mtu = ice_change_mtu,
8589fcea6f3dSAnirudh Venkataramanan 	.ndo_get_stats64 = ice_get_stats64,
85901ddef455SUsha Ketineni 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
8591a7605370SArnd Bergmann 	.ndo_eth_ioctl = ice_eth_ioctl,
85927c710869SAnirudh Venkataramanan 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
85937c710869SAnirudh Venkataramanan 	.ndo_set_vf_mac = ice_set_vf_mac,
85947c710869SAnirudh Venkataramanan 	.ndo_get_vf_config = ice_get_vf_cfg,
85957c710869SAnirudh Venkataramanan 	.ndo_set_vf_trust = ice_set_vf_trust,
85967c710869SAnirudh Venkataramanan 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
85977c710869SAnirudh Venkataramanan 	.ndo_set_vf_link_state = ice_set_vf_link_state,
8598730fdea4SJesse Brandeburg 	.ndo_get_vf_stats = ice_get_vf_stats,
85994ecc8633SBrett Creeley 	.ndo_set_vf_rate = ice_set_vf_bw,
8600d76a60baSAnirudh Venkataramanan 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
8601d76a60baSAnirudh Venkataramanan 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
86020d08a441SKiran Patil 	.ndo_setup_tc = ice_setup_tc,
8603d76a60baSAnirudh Venkataramanan 	.ndo_set_features = ice_set_features,
8604b1edc14aSMd Fahad Iqbal Polash 	.ndo_bridge_getlink = ice_bridge_getlink,
8605b1edc14aSMd Fahad Iqbal Polash 	.ndo_bridge_setlink = ice_bridge_setlink,
8606e94d4478SAnirudh Venkataramanan 	.ndo_fdb_add = ice_fdb_add,
8607e94d4478SAnirudh Venkataramanan 	.ndo_fdb_del = ice_fdb_del,
860828bf2672SBrett Creeley #ifdef CONFIG_RFS_ACCEL
860928bf2672SBrett Creeley 	.ndo_rx_flow_steer = ice_rx_flow_steer,
861028bf2672SBrett Creeley #endif
8611b3969fd7SSudheer Mogilappagari 	.ndo_tx_timeout = ice_tx_timeout,
8612efc2214bSMaciej Fijalkowski 	.ndo_bpf = ice_xdp,
8613efc2214bSMaciej Fijalkowski 	.ndo_xdp_xmit = ice_xdp_xmit,
86142d4238f5SKrzysztof Kazimierczak 	.ndo_xsk_wakeup = ice_xsk_wakeup,
8615cdedef59SAnirudh Venkataramanan };
8616