145d3d428SAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0
245d3d428SAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */
345d3d428SAnirudh Venkataramanan 
445d3d428SAnirudh Venkataramanan #include "ice.h"
5eff380aaSAnirudh Venkataramanan #include "ice_base.h"
6c90ed40cSTony Nguyen #include "ice_flow.h"
745d3d428SAnirudh Venkataramanan #include "ice_lib.h"
81b8f15b6SMichal Swiatkowski #include "ice_fltr.h"
97b9ffc76SAnirudh Venkataramanan #include "ice_dcb_lib.h"
1048d40025SJacob Keller #include "ice_devlink.h"
11c31af68aSBrett Creeley #include "ice_vsi_vlan_ops.h"
1245d3d428SAnirudh Venkataramanan 
1345d3d428SAnirudh Venkataramanan /**
14964674f1SAnirudh Venkataramanan  * ice_vsi_type_str - maps VSI type enum to string equivalents
156dae8aa0SBruce Allan  * @vsi_type: VSI type enum
16964674f1SAnirudh Venkataramanan  */
ice_vsi_type_str(enum ice_vsi_type vsi_type)176dae8aa0SBruce Allan const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
18964674f1SAnirudh Venkataramanan {
196dae8aa0SBruce Allan 	switch (vsi_type) {
20964674f1SAnirudh Venkataramanan 	case ICE_VSI_PF:
21964674f1SAnirudh Venkataramanan 		return "ICE_VSI_PF";
22964674f1SAnirudh Venkataramanan 	case ICE_VSI_VF:
23964674f1SAnirudh Venkataramanan 		return "ICE_VSI_VF";
24148beb61SHenry Tieman 	case ICE_VSI_CTRL:
25148beb61SHenry Tieman 		return "ICE_VSI_CTRL";
260754d65bSKiran Patil 	case ICE_VSI_CHNL:
270754d65bSKiran Patil 		return "ICE_VSI_CHNL";
28964674f1SAnirudh Venkataramanan 	case ICE_VSI_LB:
29964674f1SAnirudh Venkataramanan 		return "ICE_VSI_LB";
30f66756e0SGrzegorz Nitka 	case ICE_VSI_SWITCHDEV_CTRL:
31f66756e0SGrzegorz Nitka 		return "ICE_VSI_SWITCHDEV_CTRL";
32964674f1SAnirudh Venkataramanan 	default:
33964674f1SAnirudh Venkataramanan 		return "unknown";
34964674f1SAnirudh Venkataramanan 	}
35964674f1SAnirudh Venkataramanan }
36964674f1SAnirudh Venkataramanan 
37964674f1SAnirudh Venkataramanan /**
3813a6233bSBrett Creeley  * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
39d02f734cSMaciej Fijalkowski  * @vsi: the VSI being configured
40d02f734cSMaciej Fijalkowski  * @ena: start or stop the Rx rings
4113a6233bSBrett Creeley  *
4213a6233bSBrett Creeley  * First enable/disable all of the Rx rings, flush any remaining writes, and
4313a6233bSBrett Creeley  * then verify that they have all been enabled/disabled successfully. This will
4413a6233bSBrett Creeley  * let all of the register writes complete when enabling/disabling the Rx rings
4513a6233bSBrett Creeley  * before waiting for the change in hardware to complete.
46d02f734cSMaciej Fijalkowski  */
ice_vsi_ctrl_all_rx_rings(struct ice_vsi * vsi,bool ena)4713a6233bSBrett Creeley static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
48d02f734cSMaciej Fijalkowski {
4988865fc4SKarol Kolacinski 	int ret = 0;
5088865fc4SKarol Kolacinski 	u16 i;
51d02f734cSMaciej Fijalkowski 
522faf63b6SMaciej Fijalkowski 	ice_for_each_rxq(vsi, i)
5313a6233bSBrett Creeley 		ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
5413a6233bSBrett Creeley 
5513a6233bSBrett Creeley 	ice_flush(&vsi->back->hw);
5613a6233bSBrett Creeley 
572faf63b6SMaciej Fijalkowski 	ice_for_each_rxq(vsi, i) {
5813a6233bSBrett Creeley 		ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
59d02f734cSMaciej Fijalkowski 		if (ret)
60d02f734cSMaciej Fijalkowski 			break;
6172adf242SAnirudh Venkataramanan 	}
6272adf242SAnirudh Venkataramanan 
6372adf242SAnirudh Venkataramanan 	return ret;
6472adf242SAnirudh Venkataramanan }
6572adf242SAnirudh Venkataramanan 
6672adf242SAnirudh Venkataramanan /**
6728c2a645SAnirudh Venkataramanan  * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
6828c2a645SAnirudh Venkataramanan  * @vsi: VSI pointer
6928c2a645SAnirudh Venkataramanan  *
7028c2a645SAnirudh Venkataramanan  * On error: returns error code (negative)
7128c2a645SAnirudh Venkataramanan  * On success: returns 0
7228c2a645SAnirudh Venkataramanan  */
ice_vsi_alloc_arrays(struct ice_vsi * vsi)73a85a3847SBrett Creeley static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
7428c2a645SAnirudh Venkataramanan {
7528c2a645SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
764015d11eSBrett Creeley 	struct device *dev;
774015d11eSBrett Creeley 
784015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
790754d65bSKiran Patil 	if (vsi->type == ICE_VSI_CHNL)
800754d65bSKiran Patil 		return 0;
8128c2a645SAnirudh Venkataramanan 
8228c2a645SAnirudh Venkataramanan 	/* allocate memory for both Tx and Rx ring pointers */
834015d11eSBrett Creeley 	vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
84c6dfd690SBruce Allan 				     sizeof(*vsi->tx_rings), GFP_KERNEL);
8528c2a645SAnirudh Venkataramanan 	if (!vsi->tx_rings)
8678b5713aSAnirudh Venkataramanan 		return -ENOMEM;
8728c2a645SAnirudh Venkataramanan 
884015d11eSBrett Creeley 	vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq,
89c6dfd690SBruce Allan 				     sizeof(*vsi->rx_rings), GFP_KERNEL);
9028c2a645SAnirudh Venkataramanan 	if (!vsi->rx_rings)
9178b5713aSAnirudh Venkataramanan 		goto err_rings;
9278b5713aSAnirudh Venkataramanan 
93792b2086SMaciej Fijalkowski 	/* txq_map needs to have enough space to track both Tx (stack) rings
94792b2086SMaciej Fijalkowski 	 * and XDP rings; at this point vsi->num_xdp_txq might not be set,
95792b2086SMaciej Fijalkowski 	 * so use num_possible_cpus() as we want to always provide XDP ring
96792b2086SMaciej Fijalkowski 	 * per CPU, regardless of queue count settings from user that might
97792b2086SMaciej Fijalkowski 	 * have come from ethtool's set_channels() callback;
98792b2086SMaciej Fijalkowski 	 */
99792b2086SMaciej Fijalkowski 	vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()),
10078b5713aSAnirudh Venkataramanan 				    sizeof(*vsi->txq_map), GFP_KERNEL);
10178b5713aSAnirudh Venkataramanan 
10278b5713aSAnirudh Venkataramanan 	if (!vsi->txq_map)
10378b5713aSAnirudh Venkataramanan 		goto err_txq_map;
10478b5713aSAnirudh Venkataramanan 
1054015d11eSBrett Creeley 	vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq,
10678b5713aSAnirudh Venkataramanan 				    sizeof(*vsi->rxq_map), GFP_KERNEL);
10778b5713aSAnirudh Venkataramanan 	if (!vsi->rxq_map)
10878b5713aSAnirudh Venkataramanan 		goto err_rxq_map;
10978b5713aSAnirudh Venkataramanan 
1100e674aebSAnirudh Venkataramanan 	/* There is no need to allocate q_vectors for a loopback VSI. */
1110e674aebSAnirudh Venkataramanan 	if (vsi->type == ICE_VSI_LB)
1120e674aebSAnirudh Venkataramanan 		return 0;
1130e674aebSAnirudh Venkataramanan 
11428c2a645SAnirudh Venkataramanan 	/* allocate memory for q_vector pointers */
1154015d11eSBrett Creeley 	vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
116a85a3847SBrett Creeley 				      sizeof(*vsi->q_vectors), GFP_KERNEL);
11728c2a645SAnirudh Venkataramanan 	if (!vsi->q_vectors)
11828c2a645SAnirudh Venkataramanan 		goto err_vectors;
11928c2a645SAnirudh Venkataramanan 
12028c2a645SAnirudh Venkataramanan 	return 0;
12128c2a645SAnirudh Venkataramanan 
12228c2a645SAnirudh Venkataramanan err_vectors:
1234015d11eSBrett Creeley 	devm_kfree(dev, vsi->rxq_map);
12478b5713aSAnirudh Venkataramanan err_rxq_map:
1254015d11eSBrett Creeley 	devm_kfree(dev, vsi->txq_map);
12678b5713aSAnirudh Venkataramanan err_txq_map:
1274015d11eSBrett Creeley 	devm_kfree(dev, vsi->rx_rings);
12878b5713aSAnirudh Venkataramanan err_rings:
1294015d11eSBrett Creeley 	devm_kfree(dev, vsi->tx_rings);
13028c2a645SAnirudh Venkataramanan 	return -ENOMEM;
13128c2a645SAnirudh Venkataramanan }
13228c2a645SAnirudh Venkataramanan 
13328c2a645SAnirudh Venkataramanan /**
134ad71b256SBrett Creeley  * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
135ad71b256SBrett Creeley  * @vsi: the VSI being configured
136ad71b256SBrett Creeley  */
ice_vsi_set_num_desc(struct ice_vsi * vsi)137ad71b256SBrett Creeley static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
138ad71b256SBrett Creeley {
139ad71b256SBrett Creeley 	switch (vsi->type) {
140ad71b256SBrett Creeley 	case ICE_VSI_PF:
141f66756e0SGrzegorz Nitka 	case ICE_VSI_SWITCHDEV_CTRL:
142148beb61SHenry Tieman 	case ICE_VSI_CTRL:
1430e674aebSAnirudh Venkataramanan 	case ICE_VSI_LB:
144a02016deSPaul M Stillwell Jr 		/* a user could change the values of num_[tr]x_desc using
145a02016deSPaul M Stillwell Jr 		 * ethtool -G so we should keep those values instead of
146a02016deSPaul M Stillwell Jr 		 * overwriting them with the defaults.
147a02016deSPaul M Stillwell Jr 		 */
148a02016deSPaul M Stillwell Jr 		if (!vsi->num_rx_desc)
149ad71b256SBrett Creeley 			vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
150a02016deSPaul M Stillwell Jr 		if (!vsi->num_tx_desc)
151ad71b256SBrett Creeley 			vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
152ad71b256SBrett Creeley 		break;
153ad71b256SBrett Creeley 	default:
15419cce2c6SAnirudh Venkataramanan 		dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
155ad71b256SBrett Creeley 			vsi->type);
156ad71b256SBrett Creeley 		break;
157ad71b256SBrett Creeley 	}
158ad71b256SBrett Creeley }
159ad71b256SBrett Creeley 
160ad71b256SBrett Creeley /**
161ad71b256SBrett Creeley  * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
16228c2a645SAnirudh Venkataramanan  * @vsi: the VSI being configured
16328c2a645SAnirudh Venkataramanan  *
16428c2a645SAnirudh Venkataramanan  * Return 0 on success and a negative value on error
16528c2a645SAnirudh Venkataramanan  */
ice_vsi_set_num_qs(struct ice_vsi * vsi)166157acda5SJacob Keller static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
16728c2a645SAnirudh Venkataramanan {
168b03d519dSJacob Keller 	enum ice_vsi_type vsi_type = vsi->type;
16928c2a645SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
170157acda5SJacob Keller 	struct ice_vf *vf = vsi->vf;
1715743020dSAkeem G Abodunrin 
172b03d519dSJacob Keller 	if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
173b03d519dSJacob Keller 		return;
1745743020dSAkeem G Abodunrin 
175b03d519dSJacob Keller 	switch (vsi_type) {
17628c2a645SAnirudh Venkataramanan 	case ICE_VSI_PF:
17787324e74SHenry Tieman 		if (vsi->req_txq) {
17887324e74SHenry Tieman 			vsi->alloc_txq = vsi->req_txq;
17987324e74SHenry Tieman 			vsi->num_txq = vsi->req_txq;
180b38b7f2bSSalil Mehta 		} else {
181b38b7f2bSSalil Mehta 			vsi->alloc_txq = min3(pf->num_lan_msix,
182b38b7f2bSSalil Mehta 					      ice_get_avail_txq_count(pf),
183b38b7f2bSSalil Mehta 					      (u16)num_online_cpus());
18487324e74SHenry Tieman 		}
1858c243700SAnirudh Venkataramanan 
1868c243700SAnirudh Venkataramanan 		pf->num_lan_tx = vsi->alloc_txq;
1878c243700SAnirudh Venkataramanan 
1888c243700SAnirudh Venkataramanan 		/* only 1 Rx queue unless RSS is enabled */
18987324e74SHenry Tieman 		if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
1908c243700SAnirudh Venkataramanan 			vsi->alloc_rxq = 1;
19187324e74SHenry Tieman 		} else {
19287324e74SHenry Tieman 			if (vsi->req_rxq) {
19387324e74SHenry Tieman 				vsi->alloc_rxq = vsi->req_rxq;
19487324e74SHenry Tieman 				vsi->num_rxq = vsi->req_rxq;
195b38b7f2bSSalil Mehta 			} else {
196b38b7f2bSSalil Mehta 				vsi->alloc_rxq = min3(pf->num_lan_msix,
197b38b7f2bSSalil Mehta 						      ice_get_avail_rxq_count(pf),
198b38b7f2bSSalil Mehta 						      (u16)num_online_cpus());
19987324e74SHenry Tieman 			}
20087324e74SHenry Tieman 		}
2018c243700SAnirudh Venkataramanan 
2028c243700SAnirudh Venkataramanan 		pf->num_lan_rx = vsi->alloc_rxq;
2038c243700SAnirudh Venkataramanan 
204f3fe97f6SBrett Creeley 		vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
205f3fe97f6SBrett Creeley 					   max_t(int, vsi->alloc_rxq,
206f3fe97f6SBrett Creeley 						 vsi->alloc_txq));
20728c2a645SAnirudh Venkataramanan 		break;
208f66756e0SGrzegorz Nitka 	case ICE_VSI_SWITCHDEV_CTRL:
209f66756e0SGrzegorz Nitka 		/* The number of queues for ctrl VSI is equal to number of VFs.
210f66756e0SGrzegorz Nitka 		 * Each ring is associated to the corresponding VF_PR netdev.
211f66756e0SGrzegorz Nitka 		 */
212fb916db1SJacob Keller 		vsi->alloc_txq = ice_get_num_vfs(pf);
213fb916db1SJacob Keller 		vsi->alloc_rxq = vsi->alloc_txq;
214f66756e0SGrzegorz Nitka 		vsi->num_q_vectors = 1;
215f66756e0SGrzegorz Nitka 		break;
2168ede0178SAnirudh Venkataramanan 	case ICE_VSI_VF:
217f0457690SBrett Creeley 		if (vf->num_req_qs)
218f0457690SBrett Creeley 			vf->num_vf_qs = vf->num_req_qs;
2195743020dSAkeem G Abodunrin 		vsi->alloc_txq = vf->num_vf_qs;
2205743020dSAkeem G Abodunrin 		vsi->alloc_rxq = vf->num_vf_qs;
221000773c0SJacob Keller 		/* pf->vfs.num_msix_per includes (VF miscellaneous vector +
2228ede0178SAnirudh Venkataramanan 		 * data queue interrupts). Since vsi->num_q_vectors is number
223047e52c0SAnirudh Venkataramanan 		 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
224047e52c0SAnirudh Venkataramanan 		 * original vector count
2258ede0178SAnirudh Venkataramanan 		 */
226000773c0SJacob Keller 		vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF;
2278ede0178SAnirudh Venkataramanan 		break;
228148beb61SHenry Tieman 	case ICE_VSI_CTRL:
229148beb61SHenry Tieman 		vsi->alloc_txq = 1;
230148beb61SHenry Tieman 		vsi->alloc_rxq = 1;
231148beb61SHenry Tieman 		vsi->num_q_vectors = 1;
232148beb61SHenry Tieman 		break;
2330754d65bSKiran Patil 	case ICE_VSI_CHNL:
2340754d65bSKiran Patil 		vsi->alloc_txq = 0;
2350754d65bSKiran Patil 		vsi->alloc_rxq = 0;
2360754d65bSKiran Patil 		break;
2370e674aebSAnirudh Venkataramanan 	case ICE_VSI_LB:
2380e674aebSAnirudh Venkataramanan 		vsi->alloc_txq = 1;
2390e674aebSAnirudh Venkataramanan 		vsi->alloc_rxq = 1;
2400e674aebSAnirudh Venkataramanan 		break;
24128c2a645SAnirudh Venkataramanan 	default:
242b03d519dSJacob Keller 		dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type);
24328c2a645SAnirudh Venkataramanan 		break;
24428c2a645SAnirudh Venkataramanan 	}
245ad71b256SBrett Creeley 
246ad71b256SBrett Creeley 	ice_vsi_set_num_desc(vsi);
24728c2a645SAnirudh Venkataramanan }
24828c2a645SAnirudh Venkataramanan 
24928c2a645SAnirudh Venkataramanan /**
25028c2a645SAnirudh Venkataramanan  * ice_get_free_slot - get the next non-NULL location index in array
25128c2a645SAnirudh Venkataramanan  * @array: array to search
25228c2a645SAnirudh Venkataramanan  * @size: size of the array
25328c2a645SAnirudh Venkataramanan  * @curr: last known occupied index to be used as a search hint
25428c2a645SAnirudh Venkataramanan  *
25528c2a645SAnirudh Venkataramanan  * void * is being used to keep the functionality generic. This lets us use this
25628c2a645SAnirudh Venkataramanan  * function on any array of pointers.
25728c2a645SAnirudh Venkataramanan  */
ice_get_free_slot(void * array,int size,int curr)25837bb8390SAnirudh Venkataramanan static int ice_get_free_slot(void *array, int size, int curr)
25928c2a645SAnirudh Venkataramanan {
26028c2a645SAnirudh Venkataramanan 	int **tmp_array = (int **)array;
26128c2a645SAnirudh Venkataramanan 	int next;
26228c2a645SAnirudh Venkataramanan 
26328c2a645SAnirudh Venkataramanan 	if (curr < (size - 1) && !tmp_array[curr + 1]) {
26428c2a645SAnirudh Venkataramanan 		next = curr + 1;
26528c2a645SAnirudh Venkataramanan 	} else {
26628c2a645SAnirudh Venkataramanan 		int i = 0;
26728c2a645SAnirudh Venkataramanan 
26828c2a645SAnirudh Venkataramanan 		while ((i < size) && (tmp_array[i]))
26928c2a645SAnirudh Venkataramanan 			i++;
27028c2a645SAnirudh Venkataramanan 		if (i == size)
27128c2a645SAnirudh Venkataramanan 			next = ICE_NO_VSI;
27228c2a645SAnirudh Venkataramanan 		else
27328c2a645SAnirudh Venkataramanan 			next = i;
27428c2a645SAnirudh Venkataramanan 	}
27528c2a645SAnirudh Venkataramanan 	return next;
27628c2a645SAnirudh Venkataramanan }
27728c2a645SAnirudh Venkataramanan 
27828c2a645SAnirudh Venkataramanan /**
279227bf450SMichal Swiatkowski  * ice_vsi_delete_from_hw - delete a VSI from the switch
2805153a18eSAnirudh Venkataramanan  * @vsi: pointer to VSI being removed
2815153a18eSAnirudh Venkataramanan  */
ice_vsi_delete_from_hw(struct ice_vsi * vsi)282227bf450SMichal Swiatkowski static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)
2835153a18eSAnirudh Venkataramanan {
2845153a18eSAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
285198a666aSBruce Allan 	struct ice_vsi_ctx *ctxt;
2865e24d598STony Nguyen 	int status;
2875153a18eSAnirudh Venkataramanan 
2887d46c0e6SMichal Swiatkowski 	ice_fltr_remove_all(vsi);
2899efe35d0STony Nguyen 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
290198a666aSBruce Allan 	if (!ctxt)
291198a666aSBruce Allan 		return;
292198a666aSBruce Allan 
2938ede0178SAnirudh Venkataramanan 	if (vsi->type == ICE_VSI_VF)
294b03d519dSJacob Keller 		ctxt->vf_num = vsi->vf->vf_id;
295198a666aSBruce Allan 	ctxt->vsi_num = vsi->vsi_num;
2965153a18eSAnirudh Venkataramanan 
297198a666aSBruce Allan 	memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
2985153a18eSAnirudh Venkataramanan 
299198a666aSBruce Allan 	status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
3005153a18eSAnirudh Venkataramanan 	if (status)
3015f87ec48STony Nguyen 		dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
3025f87ec48STony Nguyen 			vsi->vsi_num, status);
303198a666aSBruce Allan 
3049efe35d0STony Nguyen 	kfree(ctxt);
3055153a18eSAnirudh Venkataramanan }
3065153a18eSAnirudh Venkataramanan 
3075153a18eSAnirudh Venkataramanan /**
308a85a3847SBrett Creeley  * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
30907309a0eSAnirudh Venkataramanan  * @vsi: pointer to VSI being cleared
31007309a0eSAnirudh Venkataramanan  */
ice_vsi_free_arrays(struct ice_vsi * vsi)311a85a3847SBrett Creeley static void ice_vsi_free_arrays(struct ice_vsi *vsi)
31207309a0eSAnirudh Venkataramanan {
31307309a0eSAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
3144015d11eSBrett Creeley 	struct device *dev;
3154015d11eSBrett Creeley 
3164015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
31707309a0eSAnirudh Venkataramanan 
31807309a0eSAnirudh Venkataramanan 	/* free the ring and vector containers */
3194015d11eSBrett Creeley 	devm_kfree(dev, vsi->q_vectors);
32007309a0eSAnirudh Venkataramanan 	vsi->q_vectors = NULL;
3214015d11eSBrett Creeley 	devm_kfree(dev, vsi->tx_rings);
32207309a0eSAnirudh Venkataramanan 	vsi->tx_rings = NULL;
3234015d11eSBrett Creeley 	devm_kfree(dev, vsi->rx_rings);
32407309a0eSAnirudh Venkataramanan 	vsi->rx_rings = NULL;
3254015d11eSBrett Creeley 	devm_kfree(dev, vsi->txq_map);
32678b5713aSAnirudh Venkataramanan 	vsi->txq_map = NULL;
3274015d11eSBrett Creeley 	devm_kfree(dev, vsi->rxq_map);
32878b5713aSAnirudh Venkataramanan 	vsi->rxq_map = NULL;
32978b5713aSAnirudh Venkataramanan }
33007309a0eSAnirudh Venkataramanan 
33107309a0eSAnirudh Venkataramanan /**
3326624e780SMichal Swiatkowski  * ice_vsi_free_stats - Free the ring statistics structures
3336624e780SMichal Swiatkowski  * @vsi: VSI pointer
3346624e780SMichal Swiatkowski  */
ice_vsi_free_stats(struct ice_vsi * vsi)3356624e780SMichal Swiatkowski static void ice_vsi_free_stats(struct ice_vsi *vsi)
3366624e780SMichal Swiatkowski {
3376624e780SMichal Swiatkowski 	struct ice_vsi_stats *vsi_stat;
3386624e780SMichal Swiatkowski 	struct ice_pf *pf = vsi->back;
3396624e780SMichal Swiatkowski 	int i;
3406624e780SMichal Swiatkowski 
3416624e780SMichal Swiatkowski 	if (vsi->type == ICE_VSI_CHNL)
3426624e780SMichal Swiatkowski 		return;
3436624e780SMichal Swiatkowski 	if (!pf->vsi_stats)
3446624e780SMichal Swiatkowski 		return;
3456624e780SMichal Swiatkowski 
3466624e780SMichal Swiatkowski 	vsi_stat = pf->vsi_stats[vsi->idx];
3476624e780SMichal Swiatkowski 	if (!vsi_stat)
3486624e780SMichal Swiatkowski 		return;
3496624e780SMichal Swiatkowski 
3506624e780SMichal Swiatkowski 	ice_for_each_alloc_txq(vsi, i) {
3516624e780SMichal Swiatkowski 		if (vsi_stat->tx_ring_stats[i]) {
3526624e780SMichal Swiatkowski 			kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
3536624e780SMichal Swiatkowski 			WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
3546624e780SMichal Swiatkowski 		}
3556624e780SMichal Swiatkowski 	}
3566624e780SMichal Swiatkowski 
3576624e780SMichal Swiatkowski 	ice_for_each_alloc_rxq(vsi, i) {
3586624e780SMichal Swiatkowski 		if (vsi_stat->rx_ring_stats[i]) {
3596624e780SMichal Swiatkowski 			kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
3606624e780SMichal Swiatkowski 			WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
3616624e780SMichal Swiatkowski 		}
3626624e780SMichal Swiatkowski 	}
3636624e780SMichal Swiatkowski 
3646624e780SMichal Swiatkowski 	kfree(vsi_stat->tx_ring_stats);
3656624e780SMichal Swiatkowski 	kfree(vsi_stat->rx_ring_stats);
3666624e780SMichal Swiatkowski 	kfree(vsi_stat);
3676624e780SMichal Swiatkowski 	pf->vsi_stats[vsi->idx] = NULL;
3686624e780SMichal Swiatkowski }
3696624e780SMichal Swiatkowski 
3706624e780SMichal Swiatkowski /**
3716624e780SMichal Swiatkowski  * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
3726624e780SMichal Swiatkowski  * @vsi: VSI which is having stats allocated
3736624e780SMichal Swiatkowski  */
ice_vsi_alloc_ring_stats(struct ice_vsi * vsi)3746624e780SMichal Swiatkowski static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
3756624e780SMichal Swiatkowski {
3766624e780SMichal Swiatkowski 	struct ice_ring_stats **tx_ring_stats;
3776624e780SMichal Swiatkowski 	struct ice_ring_stats **rx_ring_stats;
3786624e780SMichal Swiatkowski 	struct ice_vsi_stats *vsi_stats;
3796624e780SMichal Swiatkowski 	struct ice_pf *pf = vsi->back;
3806624e780SMichal Swiatkowski 	u16 i;
3816624e780SMichal Swiatkowski 
3826624e780SMichal Swiatkowski 	vsi_stats = pf->vsi_stats[vsi->idx];
3836624e780SMichal Swiatkowski 	tx_ring_stats = vsi_stats->tx_ring_stats;
3846624e780SMichal Swiatkowski 	rx_ring_stats = vsi_stats->rx_ring_stats;
3856624e780SMichal Swiatkowski 
3866624e780SMichal Swiatkowski 	/* Allocate Tx ring stats */
3876624e780SMichal Swiatkowski 	ice_for_each_alloc_txq(vsi, i) {
3886624e780SMichal Swiatkowski 		struct ice_ring_stats *ring_stats;
3896624e780SMichal Swiatkowski 		struct ice_tx_ring *ring;
3906624e780SMichal Swiatkowski 
3916624e780SMichal Swiatkowski 		ring = vsi->tx_rings[i];
3926624e780SMichal Swiatkowski 		ring_stats = tx_ring_stats[i];
3936624e780SMichal Swiatkowski 
3946624e780SMichal Swiatkowski 		if (!ring_stats) {
3956624e780SMichal Swiatkowski 			ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
3966624e780SMichal Swiatkowski 			if (!ring_stats)
3976624e780SMichal Swiatkowski 				goto err_out;
3986624e780SMichal Swiatkowski 
3996624e780SMichal Swiatkowski 			WRITE_ONCE(tx_ring_stats[i], ring_stats);
4006624e780SMichal Swiatkowski 		}
4016624e780SMichal Swiatkowski 
4026624e780SMichal Swiatkowski 		ring->ring_stats = ring_stats;
4036624e780SMichal Swiatkowski 	}
4046624e780SMichal Swiatkowski 
4056624e780SMichal Swiatkowski 	/* Allocate Rx ring stats */
4066624e780SMichal Swiatkowski 	ice_for_each_alloc_rxq(vsi, i) {
4076624e780SMichal Swiatkowski 		struct ice_ring_stats *ring_stats;
4086624e780SMichal Swiatkowski 		struct ice_rx_ring *ring;
4096624e780SMichal Swiatkowski 
4106624e780SMichal Swiatkowski 		ring = vsi->rx_rings[i];
4116624e780SMichal Swiatkowski 		ring_stats = rx_ring_stats[i];
4126624e780SMichal Swiatkowski 
4136624e780SMichal Swiatkowski 		if (!ring_stats) {
4146624e780SMichal Swiatkowski 			ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
4156624e780SMichal Swiatkowski 			if (!ring_stats)
4166624e780SMichal Swiatkowski 				goto err_out;
4176624e780SMichal Swiatkowski 
4186624e780SMichal Swiatkowski 			WRITE_ONCE(rx_ring_stats[i], ring_stats);
4196624e780SMichal Swiatkowski 		}
4206624e780SMichal Swiatkowski 
4216624e780SMichal Swiatkowski 		ring->ring_stats = ring_stats;
4226624e780SMichal Swiatkowski 	}
4236624e780SMichal Swiatkowski 
4246624e780SMichal Swiatkowski 	return 0;
4256624e780SMichal Swiatkowski 
4266624e780SMichal Swiatkowski err_out:
4276624e780SMichal Swiatkowski 	ice_vsi_free_stats(vsi);
4286624e780SMichal Swiatkowski 	return -ENOMEM;
4296624e780SMichal Swiatkowski }
4306624e780SMichal Swiatkowski 
4316624e780SMichal Swiatkowski /**
4320db66d20SMichal Swiatkowski  * ice_vsi_free - clean up and deallocate the provided VSI
43307309a0eSAnirudh Venkataramanan  * @vsi: pointer to VSI being cleared
43407309a0eSAnirudh Venkataramanan  *
43507309a0eSAnirudh Venkataramanan  * This deallocates the VSI's queue resources, removes it from the PF's
43607309a0eSAnirudh Venkataramanan  * VSI array if necessary, and deallocates the VSI
43707309a0eSAnirudh Venkataramanan  */
ice_vsi_free(struct ice_vsi * vsi)438227bf450SMichal Swiatkowski static void ice_vsi_free(struct ice_vsi *vsi)
43907309a0eSAnirudh Venkataramanan {
44007309a0eSAnirudh Venkataramanan 	struct ice_pf *pf = NULL;
4414015d11eSBrett Creeley 	struct device *dev;
44207309a0eSAnirudh Venkataramanan 
443227bf450SMichal Swiatkowski 	if (!vsi || !vsi->back)
444227bf450SMichal Swiatkowski 		return;
44507309a0eSAnirudh Venkataramanan 
44607309a0eSAnirudh Venkataramanan 	pf = vsi->back;
4474015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
44807309a0eSAnirudh Venkataramanan 
44907309a0eSAnirudh Venkataramanan 	if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
4504015d11eSBrett Creeley 		dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
451227bf450SMichal Swiatkowski 		return;
45207309a0eSAnirudh Venkataramanan 	}
45307309a0eSAnirudh Venkataramanan 
45407309a0eSAnirudh Venkataramanan 	mutex_lock(&pf->sw_mutex);
45507309a0eSAnirudh Venkataramanan 	/* updates the PF for this cleared VSI */
45607309a0eSAnirudh Venkataramanan 
45707309a0eSAnirudh Venkataramanan 	pf->vsi[vsi->idx] = NULL;
458da62c5ffSQi Zhang 	pf->next_vsi = vsi->idx;
45907309a0eSAnirudh Venkataramanan 
4606624e780SMichal Swiatkowski 	ice_vsi_free_stats(vsi);
461a85a3847SBrett Creeley 	ice_vsi_free_arrays(vsi);
46207309a0eSAnirudh Venkataramanan 	mutex_unlock(&pf->sw_mutex);
4634015d11eSBrett Creeley 	devm_kfree(dev, vsi);
464227bf450SMichal Swiatkowski }
46507309a0eSAnirudh Venkataramanan 
ice_vsi_delete(struct ice_vsi * vsi)466227bf450SMichal Swiatkowski void ice_vsi_delete(struct ice_vsi *vsi)
467227bf450SMichal Swiatkowski {
468227bf450SMichal Swiatkowski 	ice_vsi_delete_from_hw(vsi);
469227bf450SMichal Swiatkowski 	ice_vsi_free(vsi);
47007309a0eSAnirudh Venkataramanan }
47107309a0eSAnirudh Venkataramanan 
47207309a0eSAnirudh Venkataramanan /**
473148beb61SHenry Tieman  * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI
474148beb61SHenry Tieman  * @irq: interrupt number
475148beb61SHenry Tieman  * @data: pointer to a q_vector
476148beb61SHenry Tieman  */
ice_msix_clean_ctrl_vsi(int __always_unused irq,void * data)477148beb61SHenry Tieman static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
478148beb61SHenry Tieman {
479148beb61SHenry Tieman 	struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
480148beb61SHenry Tieman 
481e72bba21SMaciej Fijalkowski 	if (!q_vector->tx.tx_ring)
482148beb61SHenry Tieman 		return IRQ_HANDLED;
483148beb61SHenry Tieman 
484148beb61SHenry Tieman #define FDIR_RX_DESC_CLEAN_BUDGET 64
485e72bba21SMaciej Fijalkowski 	ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
486e72bba21SMaciej Fijalkowski 	ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
487148beb61SHenry Tieman 
488148beb61SHenry Tieman 	return IRQ_HANDLED;
489148beb61SHenry Tieman }
490148beb61SHenry Tieman 
491148beb61SHenry Tieman /**
4925153a18eSAnirudh Venkataramanan  * ice_msix_clean_rings - MSIX mode Interrupt Handler
4935153a18eSAnirudh Venkataramanan  * @irq: interrupt number
4945153a18eSAnirudh Venkataramanan  * @data: pointer to a q_vector
4955153a18eSAnirudh Venkataramanan  */
ice_msix_clean_rings(int __always_unused irq,void * data)496f3aaaaaaSAnirudh Venkataramanan static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
4975153a18eSAnirudh Venkataramanan {
4985153a18eSAnirudh Venkataramanan 	struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
4995153a18eSAnirudh Venkataramanan 
500e72bba21SMaciej Fijalkowski 	if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
5015153a18eSAnirudh Venkataramanan 		return IRQ_HANDLED;
5025153a18eSAnirudh Venkataramanan 
503cdf1f1f1SJacob Keller 	q_vector->total_events++;
504cdf1f1f1SJacob Keller 
5055153a18eSAnirudh Venkataramanan 	napi_schedule(&q_vector->napi);
5065153a18eSAnirudh Venkataramanan 
5075153a18eSAnirudh Venkataramanan 	return IRQ_HANDLED;
5085153a18eSAnirudh Venkataramanan }
5095153a18eSAnirudh Venkataramanan 
ice_eswitch_msix_clean_rings(int __always_unused irq,void * data)510f66756e0SGrzegorz Nitka static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data)
511f66756e0SGrzegorz Nitka {
512f66756e0SGrzegorz Nitka 	struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
513f66756e0SGrzegorz Nitka 	struct ice_pf *pf = q_vector->vsi->back;
514c4c2c7dbSJacob Keller 	struct ice_vf *vf;
515c4c2c7dbSJacob Keller 	unsigned int bkt;
516f66756e0SGrzegorz Nitka 
517e72bba21SMaciej Fijalkowski 	if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
518f66756e0SGrzegorz Nitka 		return IRQ_HANDLED;
519f66756e0SGrzegorz Nitka 
5203d5985a1SJacob Keller 	rcu_read_lock();
5213d5985a1SJacob Keller 	ice_for_each_vf_rcu(pf, bkt, vf)
522c4c2c7dbSJacob Keller 		napi_schedule(&vf->repr->q_vector->napi);
5233d5985a1SJacob Keller 	rcu_read_unlock();
524f66756e0SGrzegorz Nitka 
525f66756e0SGrzegorz Nitka 	return IRQ_HANDLED;
526f66756e0SGrzegorz Nitka }
527f66756e0SGrzegorz Nitka 
5285153a18eSAnirudh Venkataramanan /**
529288ecf49SBenjamin Mikailenko  * ice_vsi_alloc_stat_arrays - Allocate statistics arrays
530288ecf49SBenjamin Mikailenko  * @vsi: VSI pointer
531288ecf49SBenjamin Mikailenko  */
ice_vsi_alloc_stat_arrays(struct ice_vsi * vsi)532288ecf49SBenjamin Mikailenko static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)
533288ecf49SBenjamin Mikailenko {
534288ecf49SBenjamin Mikailenko 	struct ice_vsi_stats *vsi_stat;
535288ecf49SBenjamin Mikailenko 	struct ice_pf *pf = vsi->back;
536288ecf49SBenjamin Mikailenko 
537288ecf49SBenjamin Mikailenko 	if (vsi->type == ICE_VSI_CHNL)
538288ecf49SBenjamin Mikailenko 		return 0;
539288ecf49SBenjamin Mikailenko 	if (!pf->vsi_stats)
540288ecf49SBenjamin Mikailenko 		return -ENOENT;
541288ecf49SBenjamin Mikailenko 
5426624e780SMichal Swiatkowski 	if (pf->vsi_stats[vsi->idx])
5436624e780SMichal Swiatkowski 	/* realloc will happen in rebuild path */
5446624e780SMichal Swiatkowski 		return 0;
5456624e780SMichal Swiatkowski 
546288ecf49SBenjamin Mikailenko 	vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL);
547288ecf49SBenjamin Mikailenko 	if (!vsi_stat)
548288ecf49SBenjamin Mikailenko 		return -ENOMEM;
549288ecf49SBenjamin Mikailenko 
550288ecf49SBenjamin Mikailenko 	vsi_stat->tx_ring_stats =
551288ecf49SBenjamin Mikailenko 		kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats),
552288ecf49SBenjamin Mikailenko 			GFP_KERNEL);
553288ecf49SBenjamin Mikailenko 	if (!vsi_stat->tx_ring_stats)
554288ecf49SBenjamin Mikailenko 		goto err_alloc_tx;
555288ecf49SBenjamin Mikailenko 
556288ecf49SBenjamin Mikailenko 	vsi_stat->rx_ring_stats =
557288ecf49SBenjamin Mikailenko 		kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats),
558288ecf49SBenjamin Mikailenko 			GFP_KERNEL);
559288ecf49SBenjamin Mikailenko 	if (!vsi_stat->rx_ring_stats)
560288ecf49SBenjamin Mikailenko 		goto err_alloc_rx;
561288ecf49SBenjamin Mikailenko 
562288ecf49SBenjamin Mikailenko 	pf->vsi_stats[vsi->idx] = vsi_stat;
563288ecf49SBenjamin Mikailenko 
564288ecf49SBenjamin Mikailenko 	return 0;
565288ecf49SBenjamin Mikailenko 
566288ecf49SBenjamin Mikailenko err_alloc_rx:
567288ecf49SBenjamin Mikailenko 	kfree(vsi_stat->rx_ring_stats);
568288ecf49SBenjamin Mikailenko err_alloc_tx:
569288ecf49SBenjamin Mikailenko 	kfree(vsi_stat->tx_ring_stats);
570288ecf49SBenjamin Mikailenko 	kfree(vsi_stat);
571288ecf49SBenjamin Mikailenko 	pf->vsi_stats[vsi->idx] = NULL;
572288ecf49SBenjamin Mikailenko 	return -ENOMEM;
573288ecf49SBenjamin Mikailenko }
574288ecf49SBenjamin Mikailenko 
575288ecf49SBenjamin Mikailenko /**
5766624e780SMichal Swiatkowski  * ice_vsi_alloc_def - set default values for already allocated VSI
5776624e780SMichal Swiatkowski  * @vsi: ptr to VSI
5786624e780SMichal Swiatkowski  * @ch: ptr to channel
5796624e780SMichal Swiatkowski  */
5806624e780SMichal Swiatkowski static int
ice_vsi_alloc_def(struct ice_vsi * vsi,struct ice_channel * ch)581157acda5SJacob Keller ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
5826624e780SMichal Swiatkowski {
5836624e780SMichal Swiatkowski 	if (vsi->type != ICE_VSI_CHNL) {
584157acda5SJacob Keller 		ice_vsi_set_num_qs(vsi);
5856624e780SMichal Swiatkowski 		if (ice_vsi_alloc_arrays(vsi))
5866624e780SMichal Swiatkowski 			return -ENOMEM;
5876624e780SMichal Swiatkowski 	}
5886624e780SMichal Swiatkowski 
5896624e780SMichal Swiatkowski 	switch (vsi->type) {
5906624e780SMichal Swiatkowski 	case ICE_VSI_SWITCHDEV_CTRL:
5916624e780SMichal Swiatkowski 		/* Setup eswitch MSIX irq handler for VSI */
5926624e780SMichal Swiatkowski 		vsi->irq_handler = ice_eswitch_msix_clean_rings;
5936624e780SMichal Swiatkowski 		break;
5946624e780SMichal Swiatkowski 	case ICE_VSI_PF:
5956624e780SMichal Swiatkowski 		/* Setup default MSIX irq handler for VSI */
5966624e780SMichal Swiatkowski 		vsi->irq_handler = ice_msix_clean_rings;
5976624e780SMichal Swiatkowski 		break;
5986624e780SMichal Swiatkowski 	case ICE_VSI_CTRL:
5996624e780SMichal Swiatkowski 		/* Setup ctrl VSI MSIX irq handler */
6006624e780SMichal Swiatkowski 		vsi->irq_handler = ice_msix_clean_ctrl_vsi;
6016624e780SMichal Swiatkowski 		break;
6026624e780SMichal Swiatkowski 	case ICE_VSI_CHNL:
6036624e780SMichal Swiatkowski 		if (!ch)
6046624e780SMichal Swiatkowski 			return -EINVAL;
6056624e780SMichal Swiatkowski 
6066624e780SMichal Swiatkowski 		vsi->num_rxq = ch->num_rxq;
6076624e780SMichal Swiatkowski 		vsi->num_txq = ch->num_txq;
6086624e780SMichal Swiatkowski 		vsi->next_base_q = ch->base_q;
6096624e780SMichal Swiatkowski 		break;
6106624e780SMichal Swiatkowski 	case ICE_VSI_VF:
6118173c2f9SMichal Swiatkowski 	case ICE_VSI_LB:
6126624e780SMichal Swiatkowski 		break;
6136624e780SMichal Swiatkowski 	default:
6146624e780SMichal Swiatkowski 		ice_vsi_free_arrays(vsi);
6156624e780SMichal Swiatkowski 		return -EINVAL;
6166624e780SMichal Swiatkowski 	}
6176624e780SMichal Swiatkowski 
6186624e780SMichal Swiatkowski 	return 0;
6196624e780SMichal Swiatkowski }
6206624e780SMichal Swiatkowski 
6216624e780SMichal Swiatkowski /**
62237bb8390SAnirudh Venkataramanan  * ice_vsi_alloc - Allocates the next available struct VSI in the PF
62337bb8390SAnirudh Venkataramanan  * @pf: board private structure
624b03d519dSJacob Keller  *
625e1588197SJacob Keller  * Reserves a VSI index from the PF and allocates an empty VSI structure
626e1588197SJacob Keller  * without a type. The VSI structure must later be initialized by calling
627e1588197SJacob Keller  * ice_vsi_cfg().
62837bb8390SAnirudh Venkataramanan  *
62937bb8390SAnirudh Venkataramanan  * returns a pointer to a VSI on success, NULL on failure.
63037bb8390SAnirudh Venkataramanan  */
ice_vsi_alloc(struct ice_pf * pf)631e1588197SJacob Keller static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
63237bb8390SAnirudh Venkataramanan {
6334015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
63437bb8390SAnirudh Venkataramanan 	struct ice_vsi *vsi = NULL;
63537bb8390SAnirudh Venkataramanan 
63637bb8390SAnirudh Venkataramanan 	/* Need to protect the allocation of the VSIs at the PF level */
63737bb8390SAnirudh Venkataramanan 	mutex_lock(&pf->sw_mutex);
63837bb8390SAnirudh Venkataramanan 
63937bb8390SAnirudh Venkataramanan 	/* If we have already allocated our maximum number of VSIs,
64037bb8390SAnirudh Venkataramanan 	 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
64137bb8390SAnirudh Venkataramanan 	 * is available to be populated
64237bb8390SAnirudh Venkataramanan 	 */
64337bb8390SAnirudh Venkataramanan 	if (pf->next_vsi == ICE_NO_VSI) {
6444015d11eSBrett Creeley 		dev_dbg(dev, "out of VSI slots!\n");
64537bb8390SAnirudh Venkataramanan 		goto unlock_pf;
64637bb8390SAnirudh Venkataramanan 	}
64737bb8390SAnirudh Venkataramanan 
6484015d11eSBrett Creeley 	vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
64937bb8390SAnirudh Venkataramanan 	if (!vsi)
65037bb8390SAnirudh Venkataramanan 		goto unlock_pf;
65137bb8390SAnirudh Venkataramanan 
65237bb8390SAnirudh Venkataramanan 	vsi->back = pf;
653e97fb1aeSAnirudh Venkataramanan 	set_bit(ICE_VSI_DOWN, vsi->state);
6549d56b7fdSJesse Brandeburg 
655148beb61SHenry Tieman 	/* fill slot and make note of the index */
656148beb61SHenry Tieman 	vsi->idx = pf->next_vsi;
65737bb8390SAnirudh Venkataramanan 	pf->vsi[pf->next_vsi] = vsi;
65837bb8390SAnirudh Venkataramanan 
65937bb8390SAnirudh Venkataramanan 	/* prepare pf->next_vsi for next use */
66037bb8390SAnirudh Venkataramanan 	pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
66137bb8390SAnirudh Venkataramanan 					 pf->next_vsi);
662da62c5ffSQi Zhang 
66337bb8390SAnirudh Venkataramanan unlock_pf:
66437bb8390SAnirudh Venkataramanan 	mutex_unlock(&pf->sw_mutex);
66537bb8390SAnirudh Venkataramanan 	return vsi;
66637bb8390SAnirudh Venkataramanan }
66737bb8390SAnirudh Venkataramanan 
66837bb8390SAnirudh Venkataramanan /**
669148beb61SHenry Tieman  * ice_alloc_fd_res - Allocate FD resource for a VSI
670148beb61SHenry Tieman  * @vsi: pointer to the ice_vsi
671148beb61SHenry Tieman  *
672148beb61SHenry Tieman  * This allocates the FD resources
673148beb61SHenry Tieman  *
674148beb61SHenry Tieman  * Returns 0 on success, -EPERM on no-op or -EIO on failure
675148beb61SHenry Tieman  */
ice_alloc_fd_res(struct ice_vsi * vsi)676148beb61SHenry Tieman static int ice_alloc_fd_res(struct ice_vsi *vsi)
677148beb61SHenry Tieman {
678148beb61SHenry Tieman 	struct ice_pf *pf = vsi->back;
679148beb61SHenry Tieman 	u32 g_val, b_val;
680148beb61SHenry Tieman 
68140319796SKiran Patil 	/* Flow Director filters are only allocated/assigned to the PF VSI or
68240319796SKiran Patil 	 * CHNL VSI which passes the traffic. The CTRL VSI is only used to
68340319796SKiran Patil 	 * add/delete filters so resources are not allocated to it
684148beb61SHenry Tieman 	 */
68540319796SKiran Patil 	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
68640319796SKiran Patil 		return -EPERM;
68740319796SKiran Patil 
68840319796SKiran Patil 	if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF ||
68940319796SKiran Patil 	      vsi->type == ICE_VSI_CHNL))
69040319796SKiran Patil 		return -EPERM;
691148beb61SHenry Tieman 
692148beb61SHenry Tieman 	/* FD filters from guaranteed pool per VSI */
693148beb61SHenry Tieman 	g_val = pf->hw.func_caps.fd_fltr_guar;
694148beb61SHenry Tieman 	if (!g_val)
695148beb61SHenry Tieman 		return -EPERM;
696148beb61SHenry Tieman 
697148beb61SHenry Tieman 	/* FD filters from best effort pool */
698148beb61SHenry Tieman 	b_val = pf->hw.func_caps.fd_fltr_best_effort;
699148beb61SHenry Tieman 	if (!b_val)
700148beb61SHenry Tieman 		return -EPERM;
701148beb61SHenry Tieman 
70240319796SKiran Patil 	/* PF main VSI gets only 64 FD resources from guaranteed pool
70340319796SKiran Patil 	 * when ADQ is configured.
70440319796SKiran Patil 	 */
70540319796SKiran Patil #define ICE_PF_VSI_GFLTR	64
706148beb61SHenry Tieman 
70740319796SKiran Patil 	/* determine FD filter resources per VSI from shared(best effort) and
70840319796SKiran Patil 	 * dedicated pool
70940319796SKiran Patil 	 */
71040319796SKiran Patil 	if (vsi->type == ICE_VSI_PF) {
71140319796SKiran Patil 		vsi->num_gfltr = g_val;
71240319796SKiran Patil 		/* if MQPRIO is configured, main VSI doesn't get all FD
71340319796SKiran Patil 		 * resources from guaranteed pool. PF VSI gets 64 FD resources
71440319796SKiran Patil 		 */
71540319796SKiran Patil 		if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
71640319796SKiran Patil 			if (g_val < ICE_PF_VSI_GFLTR)
717148beb61SHenry Tieman 				return -EPERM;
71840319796SKiran Patil 			/* allow bare minimum entries for PF VSI */
71940319796SKiran Patil 			vsi->num_gfltr = ICE_PF_VSI_GFLTR;
72040319796SKiran Patil 		}
721148beb61SHenry Tieman 
722148beb61SHenry Tieman 		/* each VSI gets same "best_effort" quota */
723148beb61SHenry Tieman 		vsi->num_bfltr = b_val;
72440319796SKiran Patil 	} else if (vsi->type == ICE_VSI_VF) {
725da62c5ffSQi Zhang 		vsi->num_gfltr = 0;
726da62c5ffSQi Zhang 
727da62c5ffSQi Zhang 		/* each VSI gets same "best_effort" quota */
728da62c5ffSQi Zhang 		vsi->num_bfltr = b_val;
72940319796SKiran Patil 	} else {
73040319796SKiran Patil 		struct ice_vsi *main_vsi;
73140319796SKiran Patil 		int numtc;
73240319796SKiran Patil 
73340319796SKiran Patil 		main_vsi = ice_get_main_vsi(pf);
73440319796SKiran Patil 		if (!main_vsi)
73540319796SKiran Patil 			return -EPERM;
73640319796SKiran Patil 
73740319796SKiran Patil 		if (!main_vsi->all_numtc)
73840319796SKiran Patil 			return -EINVAL;
73940319796SKiran Patil 
74040319796SKiran Patil 		/* figure out ADQ numtc */
74140319796SKiran Patil 		numtc = main_vsi->all_numtc - ICE_CHNL_START_TC;
74240319796SKiran Patil 
74340319796SKiran Patil 		/* only one TC but still asking resources for channels,
74440319796SKiran Patil 		 * invalid config
74540319796SKiran Patil 		 */
74640319796SKiran Patil 		if (numtc < ICE_CHNL_START_TC)
74740319796SKiran Patil 			return -EPERM;
74840319796SKiran Patil 
74940319796SKiran Patil 		g_val -= ICE_PF_VSI_GFLTR;
75040319796SKiran Patil 		/* channel VSIs gets equal share from guaranteed pool */
75140319796SKiran Patil 		vsi->num_gfltr = g_val / numtc;
75240319796SKiran Patil 
75340319796SKiran Patil 		/* each VSI gets same "best_effort" quota */
75440319796SKiran Patil 		vsi->num_bfltr = b_val;
755da62c5ffSQi Zhang 	}
756da62c5ffSQi Zhang 
757148beb61SHenry Tieman 	return 0;
758148beb61SHenry Tieman }
759148beb61SHenry Tieman 
760148beb61SHenry Tieman /**
761df0f8479SAnirudh Venkataramanan  * ice_vsi_get_qs - Assign queues from PF to VSI
762df0f8479SAnirudh Venkataramanan  * @vsi: the VSI to assign queues to
763df0f8479SAnirudh Venkataramanan  *
764df0f8479SAnirudh Venkataramanan  * Returns 0 on success and a negative value on error
765df0f8479SAnirudh Venkataramanan  */
ice_vsi_get_qs(struct ice_vsi * vsi)76637bb8390SAnirudh Venkataramanan static int ice_vsi_get_qs(struct ice_vsi *vsi)
767df0f8479SAnirudh Venkataramanan {
76803f7a986SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
76903f7a986SAnirudh Venkataramanan 	struct ice_qs_cfg tx_qs_cfg = {
77003f7a986SAnirudh Venkataramanan 		.qs_mutex = &pf->avail_q_mutex,
77103f7a986SAnirudh Venkataramanan 		.pf_map = pf->avail_txqs,
77278b5713aSAnirudh Venkataramanan 		.pf_map_size = pf->max_pf_txqs,
77303f7a986SAnirudh Venkataramanan 		.q_count = vsi->alloc_txq,
77403f7a986SAnirudh Venkataramanan 		.scatter_count = ICE_MAX_SCATTER_TXQS,
77503f7a986SAnirudh Venkataramanan 		.vsi_map = vsi->txq_map,
77603f7a986SAnirudh Venkataramanan 		.vsi_map_offset = 0,
77739066dc5SBrett Creeley 		.mapping_mode = ICE_VSI_MAP_CONTIG
77803f7a986SAnirudh Venkataramanan 	};
77903f7a986SAnirudh Venkataramanan 	struct ice_qs_cfg rx_qs_cfg = {
78003f7a986SAnirudh Venkataramanan 		.qs_mutex = &pf->avail_q_mutex,
78103f7a986SAnirudh Venkataramanan 		.pf_map = pf->avail_rxqs,
78278b5713aSAnirudh Venkataramanan 		.pf_map_size = pf->max_pf_rxqs,
78303f7a986SAnirudh Venkataramanan 		.q_count = vsi->alloc_rxq,
78403f7a986SAnirudh Venkataramanan 		.scatter_count = ICE_MAX_SCATTER_RXQS,
78503f7a986SAnirudh Venkataramanan 		.vsi_map = vsi->rxq_map,
78603f7a986SAnirudh Venkataramanan 		.vsi_map_offset = 0,
78739066dc5SBrett Creeley 		.mapping_mode = ICE_VSI_MAP_CONTIG
78803f7a986SAnirudh Venkataramanan 	};
78939066dc5SBrett Creeley 	int ret;
790df0f8479SAnirudh Venkataramanan 
7910754d65bSKiran Patil 	if (vsi->type == ICE_VSI_CHNL)
7920754d65bSKiran Patil 		return 0;
7930754d65bSKiran Patil 
79403f7a986SAnirudh Venkataramanan 	ret = __ice_vsi_get_qs(&tx_qs_cfg);
79539066dc5SBrett Creeley 	if (ret)
796df0f8479SAnirudh Venkataramanan 		return ret;
79739066dc5SBrett Creeley 	vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode;
79839066dc5SBrett Creeley 
79939066dc5SBrett Creeley 	ret = __ice_vsi_get_qs(&rx_qs_cfg);
80039066dc5SBrett Creeley 	if (ret)
80139066dc5SBrett Creeley 		return ret;
80239066dc5SBrett Creeley 	vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode;
80339066dc5SBrett Creeley 
80439066dc5SBrett Creeley 	return 0;
805df0f8479SAnirudh Venkataramanan }
806df0f8479SAnirudh Venkataramanan 
807df0f8479SAnirudh Venkataramanan /**
8085153a18eSAnirudh Venkataramanan  * ice_vsi_put_qs - Release queues from VSI to PF
8095153a18eSAnirudh Venkataramanan  * @vsi: the VSI that is going to release queues
8105153a18eSAnirudh Venkataramanan  */
ice_vsi_put_qs(struct ice_vsi * vsi)811135f4b9eSJacob Keller static void ice_vsi_put_qs(struct ice_vsi *vsi)
8125153a18eSAnirudh Venkataramanan {
8135153a18eSAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
8145153a18eSAnirudh Venkataramanan 	int i;
8155153a18eSAnirudh Venkataramanan 
8165153a18eSAnirudh Venkataramanan 	mutex_lock(&pf->avail_q_mutex);
8175153a18eSAnirudh Venkataramanan 
8182faf63b6SMaciej Fijalkowski 	ice_for_each_alloc_txq(vsi, i) {
8195153a18eSAnirudh Venkataramanan 		clear_bit(vsi->txq_map[i], pf->avail_txqs);
8205153a18eSAnirudh Venkataramanan 		vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
8215153a18eSAnirudh Venkataramanan 	}
8225153a18eSAnirudh Venkataramanan 
8232faf63b6SMaciej Fijalkowski 	ice_for_each_alloc_rxq(vsi, i) {
8245153a18eSAnirudh Venkataramanan 		clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
8255153a18eSAnirudh Venkataramanan 		vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
8265153a18eSAnirudh Venkataramanan 	}
8275153a18eSAnirudh Venkataramanan 
8285153a18eSAnirudh Venkataramanan 	mutex_unlock(&pf->avail_q_mutex);
8295153a18eSAnirudh Venkataramanan }
8305153a18eSAnirudh Venkataramanan 
8315153a18eSAnirudh Venkataramanan /**
832462acf6aSTony Nguyen  * ice_is_safe_mode
833462acf6aSTony Nguyen  * @pf: pointer to the PF struct
834462acf6aSTony Nguyen  *
835462acf6aSTony Nguyen  * returns true if driver is in safe mode, false otherwise
836462acf6aSTony Nguyen  */
ice_is_safe_mode(struct ice_pf * pf)837462acf6aSTony Nguyen bool ice_is_safe_mode(struct ice_pf *pf)
838462acf6aSTony Nguyen {
839462acf6aSTony Nguyen 	return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
840462acf6aSTony Nguyen }
841462acf6aSTony Nguyen 
842462acf6aSTony Nguyen /**
84388f62aeaSDave Ertman  * ice_is_rdma_ena
844d25a0fc4SDave Ertman  * @pf: pointer to the PF struct
845d25a0fc4SDave Ertman  *
84688f62aeaSDave Ertman  * returns true if RDMA is currently supported, false otherwise
847d25a0fc4SDave Ertman  */
ice_is_rdma_ena(struct ice_pf * pf)84888f62aeaSDave Ertman bool ice_is_rdma_ena(struct ice_pf *pf)
849d25a0fc4SDave Ertman {
85088f62aeaSDave Ertman 	return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
851d25a0fc4SDave Ertman }
852d25a0fc4SDave Ertman 
853d25a0fc4SDave Ertman /**
8542c61054cSTony Nguyen  * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
8552c61054cSTony Nguyen  * @vsi: the VSI being cleaned up
8562c61054cSTony Nguyen  *
8572c61054cSTony Nguyen  * This function deletes RSS input set for all flows that were configured
8582c61054cSTony Nguyen  * for this VSI
8592c61054cSTony Nguyen  */
ice_vsi_clean_rss_flow_fld(struct ice_vsi * vsi)8602c61054cSTony Nguyen static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
8612c61054cSTony Nguyen {
8622c61054cSTony Nguyen 	struct ice_pf *pf = vsi->back;
8635e24d598STony Nguyen 	int status;
8642c61054cSTony Nguyen 
8652c61054cSTony Nguyen 	if (ice_is_safe_mode(pf))
8662c61054cSTony Nguyen 		return;
8672c61054cSTony Nguyen 
8682c61054cSTony Nguyen 	status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
8692c61054cSTony Nguyen 	if (status)
8705f87ec48STony Nguyen 		dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
8715f87ec48STony Nguyen 			vsi->vsi_num, status);
8722c61054cSTony Nguyen }
8732c61054cSTony Nguyen 
8742c61054cSTony Nguyen /**
8752c61054cSTony Nguyen  * ice_rss_clean - Delete RSS related VSI structures and configuration
876df0f8479SAnirudh Venkataramanan  * @vsi: the VSI being removed
877df0f8479SAnirudh Venkataramanan  */
ice_rss_clean(struct ice_vsi * vsi)878df0f8479SAnirudh Venkataramanan static void ice_rss_clean(struct ice_vsi *vsi)
879df0f8479SAnirudh Venkataramanan {
8804015d11eSBrett Creeley 	struct ice_pf *pf = vsi->back;
8814015d11eSBrett Creeley 	struct device *dev;
882df0f8479SAnirudh Venkataramanan 
8834015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
884df0f8479SAnirudh Venkataramanan 
8854015d11eSBrett Creeley 	devm_kfree(dev, vsi->rss_hkey_user);
8864015d11eSBrett Creeley 	devm_kfree(dev, vsi->rss_lut_user);
8872c61054cSTony Nguyen 
8882c61054cSTony Nguyen 	ice_vsi_clean_rss_flow_fld(vsi);
8892c61054cSTony Nguyen 	/* remove RSS replay list */
8902c61054cSTony Nguyen 	if (!ice_is_safe_mode(pf))
8912c61054cSTony Nguyen 		ice_rem_vsi_rss_list(&pf->hw, vsi->idx);
892df0f8479SAnirudh Venkataramanan }
893df0f8479SAnirudh Venkataramanan 
894df0f8479SAnirudh Venkataramanan /**
89528c2a645SAnirudh Venkataramanan  * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
89628c2a645SAnirudh Venkataramanan  * @vsi: the VSI being configured
89728c2a645SAnirudh Venkataramanan  */
ice_vsi_set_rss_params(struct ice_vsi * vsi)89837bb8390SAnirudh Venkataramanan static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
89928c2a645SAnirudh Venkataramanan {
90028c2a645SAnirudh Venkataramanan 	struct ice_hw_common_caps *cap;
90128c2a645SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
902b6143c9bSPrzemek Kitszel 	u16 max_rss_size;
90328c2a645SAnirudh Venkataramanan 
90428c2a645SAnirudh Venkataramanan 	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
90528c2a645SAnirudh Venkataramanan 		vsi->rss_size = 1;
90628c2a645SAnirudh Venkataramanan 		return;
90728c2a645SAnirudh Venkataramanan 	}
90828c2a645SAnirudh Venkataramanan 
90928c2a645SAnirudh Venkataramanan 	cap = &pf->hw.func_caps.common_cap;
910b6143c9bSPrzemek Kitszel 	max_rss_size = BIT(cap->rss_table_entry_width);
91128c2a645SAnirudh Venkataramanan 	switch (vsi->type) {
9120754d65bSKiran Patil 	case ICE_VSI_CHNL:
91328c2a645SAnirudh Venkataramanan 	case ICE_VSI_PF:
91428c2a645SAnirudh Venkataramanan 		/* PF VSI will inherit RSS instance of PF */
91588865fc4SKarol Kolacinski 		vsi->rss_table_size = (u16)cap->rss_table_size;
9160754d65bSKiran Patil 		if (vsi->type == ICE_VSI_CHNL)
917b6143c9bSPrzemek Kitszel 			vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size);
9180754d65bSKiran Patil 		else
91988865fc4SKarol Kolacinski 			vsi->rss_size = min_t(u16, num_online_cpus(),
920b6143c9bSPrzemek Kitszel 					      max_rss_size);
921b6143c9bSPrzemek Kitszel 		vsi->rss_lut_type = ICE_LUT_PF;
92228c2a645SAnirudh Venkataramanan 		break;
923f66756e0SGrzegorz Nitka 	case ICE_VSI_SWITCHDEV_CTRL:
924b6143c9bSPrzemek Kitszel 		vsi->rss_table_size = ICE_LUT_VSI_SIZE;
925b6143c9bSPrzemek Kitszel 		vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
926b6143c9bSPrzemek Kitszel 		vsi->rss_lut_type = ICE_LUT_VSI;
927f66756e0SGrzegorz Nitka 		break;
9288ede0178SAnirudh Venkataramanan 	case ICE_VSI_VF:
9290ca469fbSMitch Williams 		/* VF VSI will get a small RSS table.
9300ca469fbSMitch Williams 		 * For VSI_LUT, LUT size should be set to 64 bytes.
9318ede0178SAnirudh Venkataramanan 		 */
932b6143c9bSPrzemek Kitszel 		vsi->rss_table_size = ICE_LUT_VSI_SIZE;
9330ca469fbSMitch Williams 		vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
934b6143c9bSPrzemek Kitszel 		vsi->rss_lut_type = ICE_LUT_VSI;
9358ede0178SAnirudh Venkataramanan 		break;
9360e674aebSAnirudh Venkataramanan 	case ICE_VSI_LB:
9370e674aebSAnirudh Venkataramanan 		break;
93828c2a645SAnirudh Venkataramanan 	default:
939148beb61SHenry Tieman 		dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n",
940148beb61SHenry Tieman 			ice_vsi_type_str(vsi->type));
94128c2a645SAnirudh Venkataramanan 		break;
94228c2a645SAnirudh Venkataramanan 	}
94328c2a645SAnirudh Venkataramanan }
94428c2a645SAnirudh Venkataramanan 
94528c2a645SAnirudh Venkataramanan /**
94628c2a645SAnirudh Venkataramanan  * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
9471babaf77SBrett Creeley  * @hw: HW structure used to determine the VLAN mode of the device
94828c2a645SAnirudh Venkataramanan  * @ctxt: the VSI context being set
94928c2a645SAnirudh Venkataramanan  *
95028c2a645SAnirudh Venkataramanan  * This initializes a default VSI context for all sections except the Queues.
95128c2a645SAnirudh Venkataramanan  */
ice_set_dflt_vsi_ctx(struct ice_hw * hw,struct ice_vsi_ctx * ctxt)9521babaf77SBrett Creeley static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
95328c2a645SAnirudh Venkataramanan {
95428c2a645SAnirudh Venkataramanan 	u32 table = 0;
95528c2a645SAnirudh Venkataramanan 
95628c2a645SAnirudh Venkataramanan 	memset(&ctxt->info, 0, sizeof(ctxt->info));
95728c2a645SAnirudh Venkataramanan 	/* VSI's should be allocated from shared pool */
95828c2a645SAnirudh Venkataramanan 	ctxt->alloc_from_pool = true;
95928c2a645SAnirudh Venkataramanan 	/* Src pruning enabled by default */
96028c2a645SAnirudh Venkataramanan 	ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
96128c2a645SAnirudh Venkataramanan 	/* Traffic from VSI can be sent to LAN */
96228c2a645SAnirudh Venkataramanan 	ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
9631babaf77SBrett Creeley 	/* allow all untagged/tagged packets by default on Tx */
9647bd527aaSBrett Creeley 	ctxt->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL &
9657bd527aaSBrett Creeley 				  ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >>
9667bd527aaSBrett Creeley 				 ICE_AQ_VSI_INNER_VLAN_TX_MODE_S);
9671babaf77SBrett Creeley 	/* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which
9681babaf77SBrett Creeley 	 * results in legacy behavior (show VLAN, DEI, and UP) in descriptor.
9691babaf77SBrett Creeley 	 *
9701babaf77SBrett Creeley 	 * DVM - leave inner VLAN in packet by default
9711babaf77SBrett Creeley 	 */
9721babaf77SBrett Creeley 	if (ice_is_dvm_ena(hw)) {
9731babaf77SBrett Creeley 		ctxt->info.inner_vlan_flags |=
9746bc0e112SJesse Brandeburg 			FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
9756bc0e112SJesse Brandeburg 				   ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
9761babaf77SBrett Creeley 		ctxt->info.outer_vlan_flags =
9771babaf77SBrett Creeley 			(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
9781babaf77SBrett Creeley 			 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
9791babaf77SBrett Creeley 			ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
9801babaf77SBrett Creeley 		ctxt->info.outer_vlan_flags |=
9811babaf77SBrett Creeley 			(ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
9821babaf77SBrett Creeley 			 ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
9831babaf77SBrett Creeley 			ICE_AQ_VSI_OUTER_TAG_TYPE_M;
984b33de560SMichal Swiatkowski 		ctxt->info.outer_vlan_flags |=
985b33de560SMichal Swiatkowski 			FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M,
986b33de560SMichal Swiatkowski 				   ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING);
9871babaf77SBrett Creeley 	}
98828c2a645SAnirudh Venkataramanan 	/* Have 1:1 UP mapping for both ingress/egress tables */
98928c2a645SAnirudh Venkataramanan 	table |= ICE_UP_TABLE_TRANSLATE(0, 0);
99028c2a645SAnirudh Venkataramanan 	table |= ICE_UP_TABLE_TRANSLATE(1, 1);
99128c2a645SAnirudh Venkataramanan 	table |= ICE_UP_TABLE_TRANSLATE(2, 2);
99228c2a645SAnirudh Venkataramanan 	table |= ICE_UP_TABLE_TRANSLATE(3, 3);
99328c2a645SAnirudh Venkataramanan 	table |= ICE_UP_TABLE_TRANSLATE(4, 4);
99428c2a645SAnirudh Venkataramanan 	table |= ICE_UP_TABLE_TRANSLATE(5, 5);
99528c2a645SAnirudh Venkataramanan 	table |= ICE_UP_TABLE_TRANSLATE(6, 6);
99628c2a645SAnirudh Venkataramanan 	table |= ICE_UP_TABLE_TRANSLATE(7, 7);
99728c2a645SAnirudh Venkataramanan 	ctxt->info.ingress_table = cpu_to_le32(table);
99828c2a645SAnirudh Venkataramanan 	ctxt->info.egress_table = cpu_to_le32(table);
99928c2a645SAnirudh Venkataramanan 	/* Have 1:1 UP mapping for outer to inner UP table */
100028c2a645SAnirudh Venkataramanan 	ctxt->info.outer_up_table = cpu_to_le32(table);
100128c2a645SAnirudh Venkataramanan 	/* No Outer tag support outer_tag_flags remains to zero */
100228c2a645SAnirudh Venkataramanan }
100328c2a645SAnirudh Venkataramanan 
100428c2a645SAnirudh Venkataramanan /**
100528c2a645SAnirudh Venkataramanan  * ice_vsi_setup_q_map - Setup a VSI queue map
100628c2a645SAnirudh Venkataramanan  * @vsi: the VSI being configured
100728c2a645SAnirudh Venkataramanan  * @ctxt: VSI context structure
100828c2a645SAnirudh Venkataramanan  */
ice_vsi_setup_q_map(struct ice_vsi * vsi,struct ice_vsi_ctx * ctxt)1009a632b2a4SAnatolii Gerasymenko static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
101028c2a645SAnirudh Venkataramanan {
1011a509702cSDing Hui 	u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
10128134d5ffSBrett Creeley 	u16 num_txq_per_tc, num_rxq_per_tc;
101328c2a645SAnirudh Venkataramanan 	u16 qcount_tx = vsi->alloc_txq;
101428c2a645SAnirudh Venkataramanan 	u16 qcount_rx = vsi->alloc_rxq;
1015c5a2a4a3SUsha Ketineni 	u8 netdev_tc = 0;
101628c2a645SAnirudh Venkataramanan 	int i;
101728c2a645SAnirudh Venkataramanan 
10180754d65bSKiran Patil 	if (!vsi->tc_cfg.numtc) {
101928c2a645SAnirudh Venkataramanan 		/* at least TC0 should be enabled by default */
10200754d65bSKiran Patil 		vsi->tc_cfg.numtc = 1;
10210754d65bSKiran Patil 		vsi->tc_cfg.ena_tc = 1;
102228c2a645SAnirudh Venkataramanan 	}
102328c2a645SAnirudh Venkataramanan 
10248134d5ffSBrett Creeley 	num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC);
10258134d5ffSBrett Creeley 	if (!num_rxq_per_tc)
10268134d5ffSBrett Creeley 		num_rxq_per_tc = 1;
10278134d5ffSBrett Creeley 	num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc;
10288134d5ffSBrett Creeley 	if (!num_txq_per_tc)
10298134d5ffSBrett Creeley 		num_txq_per_tc = 1;
10308134d5ffSBrett Creeley 
10318134d5ffSBrett Creeley 	/* find the (rounded up) power-of-2 of qcount */
10328134d5ffSBrett Creeley 	pow = (u16)order_base_2(num_rxq_per_tc);
103328c2a645SAnirudh Venkataramanan 
103428c2a645SAnirudh Venkataramanan 	/* TC mapping is a function of the number of Rx queues assigned to the
103528c2a645SAnirudh Venkataramanan 	 * VSI for each traffic class and the offset of these queues.
103628c2a645SAnirudh Venkataramanan 	 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
103728c2a645SAnirudh Venkataramanan 	 * queues allocated to TC0. No:of queues is a power-of-2.
103828c2a645SAnirudh Venkataramanan 	 *
103928c2a645SAnirudh Venkataramanan 	 * If TC is not enabled, the queue offset is set to 0, and allocate one
104028c2a645SAnirudh Venkataramanan 	 * queue, this way, traffic for the given TC will be sent to the default
104128c2a645SAnirudh Venkataramanan 	 * queue.
104228c2a645SAnirudh Venkataramanan 	 *
104328c2a645SAnirudh Venkataramanan 	 * Setup number and offset of Rx queues for all TCs for the VSI
104428c2a645SAnirudh Venkataramanan 	 */
10452bdc97beSBruce Allan 	ice_for_each_traffic_class(i) {
104628c2a645SAnirudh Venkataramanan 		if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
104728c2a645SAnirudh Venkataramanan 			/* TC is not enabled */
104828c2a645SAnirudh Venkataramanan 			vsi->tc_cfg.tc_info[i].qoffset = 0;
1049c5a2a4a3SUsha Ketineni 			vsi->tc_cfg.tc_info[i].qcount_rx = 1;
1050c5a2a4a3SUsha Ketineni 			vsi->tc_cfg.tc_info[i].qcount_tx = 1;
1051c5a2a4a3SUsha Ketineni 			vsi->tc_cfg.tc_info[i].netdev_tc = 0;
105228c2a645SAnirudh Venkataramanan 			ctxt->info.tc_mapping[i] = 0;
105328c2a645SAnirudh Venkataramanan 			continue;
105428c2a645SAnirudh Venkataramanan 		}
105528c2a645SAnirudh Venkataramanan 
105628c2a645SAnirudh Venkataramanan 		/* TC is enabled */
105728c2a645SAnirudh Venkataramanan 		vsi->tc_cfg.tc_info[i].qoffset = offset;
10588134d5ffSBrett Creeley 		vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc;
10598134d5ffSBrett Creeley 		vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc;
1060c5a2a4a3SUsha Ketineni 		vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
106128c2a645SAnirudh Venkataramanan 
106228c2a645SAnirudh Venkataramanan 		qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
106328c2a645SAnirudh Venkataramanan 			ICE_AQ_VSI_TC_Q_OFFSET_M) |
106428c2a645SAnirudh Venkataramanan 			((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
106528c2a645SAnirudh Venkataramanan 			 ICE_AQ_VSI_TC_Q_NUM_M);
10668134d5ffSBrett Creeley 		offset += num_rxq_per_tc;
10678134d5ffSBrett Creeley 		tx_count += num_txq_per_tc;
106828c2a645SAnirudh Venkataramanan 		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
106928c2a645SAnirudh Venkataramanan 	}
107060dcc39eSKiran Patil 
107160dcc39eSKiran Patil 	/* if offset is non-zero, means it is calculated correctly based on
107260dcc39eSKiran Patil 	 * enabled TCs for a given VSI otherwise qcount_rx will always
107360dcc39eSKiran Patil 	 * be correct and non-zero because it is based off - VSI's
107460dcc39eSKiran Patil 	 * allocated Rx queues which is at least 1 (hence qcount_tx will be
107560dcc39eSKiran Patil 	 * at least 1)
107660dcc39eSKiran Patil 	 */
107760dcc39eSKiran Patil 	if (offset)
1078a509702cSDing Hui 		rx_count = offset;
107960dcc39eSKiran Patil 	else
1080a509702cSDing Hui 		rx_count = num_rxq_per_tc;
108160dcc39eSKiran Patil 
1082a509702cSDing Hui 	if (rx_count > vsi->alloc_rxq) {
1083a632b2a4SAnatolii Gerasymenko 		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
1084a509702cSDing Hui 			rx_count, vsi->alloc_rxq);
1085a509702cSDing Hui 		return -EINVAL;
1086a509702cSDing Hui 	}
1087a509702cSDing Hui 
1088a509702cSDing Hui 	if (tx_count > vsi->alloc_txq) {
1089a509702cSDing Hui 		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
1090a509702cSDing Hui 			tx_count, vsi->alloc_txq);
1091a632b2a4SAnatolii Gerasymenko 		return -EINVAL;
1092a632b2a4SAnatolii Gerasymenko 	}
1093a632b2a4SAnatolii Gerasymenko 
1094c5a2a4a3SUsha Ketineni 	vsi->num_txq = tx_count;
1095a509702cSDing Hui 	vsi->num_rxq = rx_count;
109628c2a645SAnirudh Venkataramanan 
10978ede0178SAnirudh Venkataramanan 	if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
10989a946843SAnirudh Venkataramanan 		dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
10998ede0178SAnirudh Venkataramanan 		/* since there is a chance that num_rxq could have been changed
11008ede0178SAnirudh Venkataramanan 		 * in the above for loop, make num_txq equal to num_rxq.
11018ede0178SAnirudh Venkataramanan 		 */
11028ede0178SAnirudh Venkataramanan 		vsi->num_txq = vsi->num_rxq;
11038ede0178SAnirudh Venkataramanan 	}
11048ede0178SAnirudh Venkataramanan 
110528c2a645SAnirudh Venkataramanan 	/* Rx queue mapping */
110628c2a645SAnirudh Venkataramanan 	ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
110728c2a645SAnirudh Venkataramanan 	/* q_mapping buffer holds the info for the first queue allocated for
110828c2a645SAnirudh Venkataramanan 	 * this VSI in the PF space and also the number of queues associated
110928c2a645SAnirudh Venkataramanan 	 * with this VSI.
111028c2a645SAnirudh Venkataramanan 	 */
111128c2a645SAnirudh Venkataramanan 	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
111228c2a645SAnirudh Venkataramanan 	ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
1113a632b2a4SAnatolii Gerasymenko 
1114a632b2a4SAnatolii Gerasymenko 	return 0;
111528c2a645SAnirudh Venkataramanan }
111628c2a645SAnirudh Venkataramanan 
111728c2a645SAnirudh Venkataramanan /**
1118148beb61SHenry Tieman  * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI
1119148beb61SHenry Tieman  * @ctxt: the VSI context being set
1120148beb61SHenry Tieman  * @vsi: the VSI being configured
1121148beb61SHenry Tieman  */
ice_set_fd_vsi_ctx(struct ice_vsi_ctx * ctxt,struct ice_vsi * vsi)1122148beb61SHenry Tieman static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
1123148beb61SHenry Tieman {
1124148beb61SHenry Tieman 	u8 dflt_q_group, dflt_q_prio;
1125148beb61SHenry Tieman 	u16 dflt_q, report_q, val;
1126148beb61SHenry Tieman 
1127da62c5ffSQi Zhang 	if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL &&
112840319796SKiran Patil 	    vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL)
1129148beb61SHenry Tieman 		return;
1130148beb61SHenry Tieman 
1131148beb61SHenry Tieman 	val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1132148beb61SHenry Tieman 	ctxt->info.valid_sections |= cpu_to_le16(val);
1133148beb61SHenry Tieman 	dflt_q = 0;
1134148beb61SHenry Tieman 	dflt_q_group = 0;
1135148beb61SHenry Tieman 	report_q = 0;
1136148beb61SHenry Tieman 	dflt_q_prio = 0;
1137148beb61SHenry Tieman 
1138148beb61SHenry Tieman 	/* enable flow director filtering/programming */
1139148beb61SHenry Tieman 	val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
1140148beb61SHenry Tieman 	ctxt->info.fd_options = cpu_to_le16(val);
1141148beb61SHenry Tieman 	/* max of allocated flow director filters */
1142148beb61SHenry Tieman 	ctxt->info.max_fd_fltr_dedicated =
1143148beb61SHenry Tieman 			cpu_to_le16(vsi->num_gfltr);
1144148beb61SHenry Tieman 	/* max of shared flow director filters any VSI may program */
1145148beb61SHenry Tieman 	ctxt->info.max_fd_fltr_shared =
1146148beb61SHenry Tieman 			cpu_to_le16(vsi->num_bfltr);
1147148beb61SHenry Tieman 	/* default queue index within the VSI of the default FD */
1148148beb61SHenry Tieman 	val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) &
1149148beb61SHenry Tieman 	       ICE_AQ_VSI_FD_DEF_Q_M);
1150148beb61SHenry Tieman 	/* target queue or queue group to the FD filter */
1151148beb61SHenry Tieman 	val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) &
1152148beb61SHenry Tieman 		ICE_AQ_VSI_FD_DEF_GRP_M);
1153148beb61SHenry Tieman 	ctxt->info.fd_def_q = cpu_to_le16(val);
1154148beb61SHenry Tieman 	/* queue index on which FD filter completion is reported */
1155148beb61SHenry Tieman 	val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) &
1156148beb61SHenry Tieman 	       ICE_AQ_VSI_FD_REPORT_Q_M);
1157148beb61SHenry Tieman 	/* priority of the default qindex action */
1158148beb61SHenry Tieman 	val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) &
1159148beb61SHenry Tieman 		ICE_AQ_VSI_FD_DEF_PRIORITY_M);
1160148beb61SHenry Tieman 	ctxt->info.fd_report_opt = cpu_to_le16(val);
1161148beb61SHenry Tieman }
1162148beb61SHenry Tieman 
1163148beb61SHenry Tieman /**
116428c2a645SAnirudh Venkataramanan  * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
116528c2a645SAnirudh Venkataramanan  * @ctxt: the VSI context being set
116628c2a645SAnirudh Venkataramanan  * @vsi: the VSI being configured
116728c2a645SAnirudh Venkataramanan  */
ice_set_rss_vsi_ctx(struct ice_vsi_ctx * ctxt,struct ice_vsi * vsi)116828c2a645SAnirudh Venkataramanan static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
116928c2a645SAnirudh Venkataramanan {
117028c2a645SAnirudh Venkataramanan 	u8 lut_type, hash_type;
11714015d11eSBrett Creeley 	struct device *dev;
1172819d8998SJesse Brandeburg 	struct ice_pf *pf;
1173819d8998SJesse Brandeburg 
1174819d8998SJesse Brandeburg 	pf = vsi->back;
11754015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
117628c2a645SAnirudh Venkataramanan 
117728c2a645SAnirudh Venkataramanan 	switch (vsi->type) {
11780754d65bSKiran Patil 	case ICE_VSI_CHNL:
117928c2a645SAnirudh Venkataramanan 	case ICE_VSI_PF:
118028c2a645SAnirudh Venkataramanan 		/* PF VSI will inherit RSS instance of PF */
118128c2a645SAnirudh Venkataramanan 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
1182334a1227SAhmed Zaki 		hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
118328c2a645SAnirudh Venkataramanan 		break;
11848ede0178SAnirudh Venkataramanan 	case ICE_VSI_VF:
11858ede0178SAnirudh Venkataramanan 		/* VF VSI will gets a small RSS table which is a VSI LUT type */
11868ede0178SAnirudh Venkataramanan 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
1187334a1227SAhmed Zaki 		hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
11888ede0178SAnirudh Venkataramanan 		break;
1189148beb61SHenry Tieman 	default:
11904015d11eSBrett Creeley 		dev_dbg(dev, "Unsupported VSI type %s\n",
1191964674f1SAnirudh Venkataramanan 			ice_vsi_type_str(vsi->type));
11920e674aebSAnirudh Venkataramanan 		return;
119328c2a645SAnirudh Venkataramanan 	}
119428c2a645SAnirudh Venkataramanan 
119528c2a645SAnirudh Venkataramanan 	ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
119628c2a645SAnirudh Venkataramanan 				ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
1197242e3450SJesse Brandeburg 				(hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
119828c2a645SAnirudh Venkataramanan }
119928c2a645SAnirudh Venkataramanan 
12000754d65bSKiran Patil static void
ice_chnl_vsi_setup_q_map(struct ice_vsi * vsi,struct ice_vsi_ctx * ctxt)12010754d65bSKiran Patil ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
12020754d65bSKiran Patil {
12030754d65bSKiran Patil 	struct ice_pf *pf = vsi->back;
12040754d65bSKiran Patil 	u16 qcount, qmap;
12050754d65bSKiran Patil 	u8 offset = 0;
12060754d65bSKiran Patil 	int pow;
12070754d65bSKiran Patil 
12080754d65bSKiran Patil 	qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
12090754d65bSKiran Patil 
12100754d65bSKiran Patil 	pow = order_base_2(qcount);
12110754d65bSKiran Patil 	qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
12120754d65bSKiran Patil 		 ICE_AQ_VSI_TC_Q_OFFSET_M) |
12130754d65bSKiran Patil 		 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
12140754d65bSKiran Patil 		   ICE_AQ_VSI_TC_Q_NUM_M);
12150754d65bSKiran Patil 
12160754d65bSKiran Patil 	ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
12170754d65bSKiran Patil 	ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
12180754d65bSKiran Patil 	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q);
12190754d65bSKiran Patil 	ctxt->info.q_mapping[1] = cpu_to_le16(qcount);
12200754d65bSKiran Patil }
12210754d65bSKiran Patil 
122228c2a645SAnirudh Venkataramanan /**
122345f5478cSJan Sokolowski  * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
122445f5478cSJan Sokolowski  * @vsi: VSI to check whether or not VLAN pruning is enabled.
122545f5478cSJan Sokolowski  *
122645f5478cSJan Sokolowski  * returns true if Rx VLAN pruning is enabled and false otherwise.
122745f5478cSJan Sokolowski  */
ice_vsi_is_vlan_pruning_ena(struct ice_vsi * vsi)122845f5478cSJan Sokolowski static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
122945f5478cSJan Sokolowski {
1230e528e5b2SJan Sokolowski 	return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
123145f5478cSJan Sokolowski }
123245f5478cSJan Sokolowski 
123345f5478cSJan Sokolowski /**
123428c2a645SAnirudh Venkataramanan  * ice_vsi_init - Create and initialize a VSI
123528c2a645SAnirudh Venkataramanan  * @vsi: the VSI being configured
12365e509ab2SJacob Keller  * @vsi_flags: VSI configuration flags
12375e509ab2SJacob Keller  *
12385e509ab2SJacob Keller  * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to
12395e509ab2SJacob Keller  * reconfigure an existing context.
124028c2a645SAnirudh Venkataramanan  *
124128c2a645SAnirudh Venkataramanan  * This initializes a VSI context depending on the VSI type to be added and
124228c2a645SAnirudh Venkataramanan  * passes it down to the add_vsi aq command to create a new VSI.
124328c2a645SAnirudh Venkataramanan  */
ice_vsi_init(struct ice_vsi * vsi,u32 vsi_flags)12445e509ab2SJacob Keller static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
124528c2a645SAnirudh Venkataramanan {
124628c2a645SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
124728c2a645SAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
1248198a666aSBruce Allan 	struct ice_vsi_ctx *ctxt;
124987324e74SHenry Tieman 	struct device *dev;
125028c2a645SAnirudh Venkataramanan 	int ret = 0;
125128c2a645SAnirudh Venkataramanan 
125287324e74SHenry Tieman 	dev = ice_pf_to_dev(pf);
12539efe35d0STony Nguyen 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1254198a666aSBruce Allan 	if (!ctxt)
1255198a666aSBruce Allan 		return -ENOMEM;
1256198a666aSBruce Allan 
125728c2a645SAnirudh Venkataramanan 	switch (vsi->type) {
1258148beb61SHenry Tieman 	case ICE_VSI_CTRL:
12590e674aebSAnirudh Venkataramanan 	case ICE_VSI_LB:
126028c2a645SAnirudh Venkataramanan 	case ICE_VSI_PF:
1261198a666aSBruce Allan 		ctxt->flags = ICE_AQ_VSI_TYPE_PF;
126228c2a645SAnirudh Venkataramanan 		break;
1263f66756e0SGrzegorz Nitka 	case ICE_VSI_SWITCHDEV_CTRL:
12640754d65bSKiran Patil 	case ICE_VSI_CHNL:
1265f66756e0SGrzegorz Nitka 		ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
1266f66756e0SGrzegorz Nitka 		break;
12678ede0178SAnirudh Venkataramanan 	case ICE_VSI_VF:
1268198a666aSBruce Allan 		ctxt->flags = ICE_AQ_VSI_TYPE_VF;
12698ede0178SAnirudh Venkataramanan 		/* VF number here is the absolute VF number (0-255) */
1270b03d519dSJacob Keller 		ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id;
12718ede0178SAnirudh Venkataramanan 		break;
127228c2a645SAnirudh Venkataramanan 	default:
12739efe35d0STony Nguyen 		ret = -ENODEV;
12749efe35d0STony Nguyen 		goto out;
127528c2a645SAnirudh Venkataramanan 	}
127628c2a645SAnirudh Venkataramanan 
12770754d65bSKiran Patil 	/* Handle VLAN pruning for channel VSI if main VSI has VLAN
12780754d65bSKiran Patil 	 * prune enabled
12790754d65bSKiran Patil 	 */
12800754d65bSKiran Patil 	if (vsi->type == ICE_VSI_CHNL) {
12810754d65bSKiran Patil 		struct ice_vsi *main_vsi;
12820754d65bSKiran Patil 
12830754d65bSKiran Patil 		main_vsi = ice_get_main_vsi(pf);
12840754d65bSKiran Patil 		if (main_vsi && ice_vsi_is_vlan_pruning_ena(main_vsi))
12850754d65bSKiran Patil 			ctxt->info.sw_flags2 |=
12860754d65bSKiran Patil 				ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
12870754d65bSKiran Patil 		else
12880754d65bSKiran Patil 			ctxt->info.sw_flags2 &=
12890754d65bSKiran Patil 				~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
12900754d65bSKiran Patil 	}
12910754d65bSKiran Patil 
12921babaf77SBrett Creeley 	ice_set_dflt_vsi_ctx(hw, ctxt);
1293148beb61SHenry Tieman 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
1294148beb61SHenry Tieman 		ice_set_fd_vsi_ctx(ctxt, vsi);
129528c2a645SAnirudh Venkataramanan 	/* if the switch is in VEB mode, allow VSI loopback */
129628c2a645SAnirudh Venkataramanan 	if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
1297198a666aSBruce Allan 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
129828c2a645SAnirudh Venkataramanan 
129928c2a645SAnirudh Venkataramanan 	/* Set LUT type and HASH type if RSS is enabled */
1300148beb61SHenry Tieman 	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) &&
1301148beb61SHenry Tieman 	    vsi->type != ICE_VSI_CTRL) {
1302198a666aSBruce Allan 		ice_set_rss_vsi_ctx(ctxt, vsi);
130387324e74SHenry Tieman 		/* if updating VSI context, make sure to set valid_section:
130487324e74SHenry Tieman 		 * to indicate which section of VSI context being updated
130587324e74SHenry Tieman 		 */
13065e509ab2SJacob Keller 		if (!(vsi_flags & ICE_VSI_FLAG_INIT))
130787324e74SHenry Tieman 			ctxt->info.valid_sections |=
130887324e74SHenry Tieman 				cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
130987324e74SHenry Tieman 	}
131028c2a645SAnirudh Venkataramanan 
1311198a666aSBruce Allan 	ctxt->info.sw_id = vsi->port_info->sw_id;
13120754d65bSKiran Patil 	if (vsi->type == ICE_VSI_CHNL) {
13130754d65bSKiran Patil 		ice_chnl_vsi_setup_q_map(vsi, ctxt);
13140754d65bSKiran Patil 	} else {
1315a632b2a4SAnatolii Gerasymenko 		ret = ice_vsi_setup_q_map(vsi, ctxt);
1316a632b2a4SAnatolii Gerasymenko 		if (ret)
1317a632b2a4SAnatolii Gerasymenko 			goto out;
1318a632b2a4SAnatolii Gerasymenko 
13195e509ab2SJacob Keller 		if (!(vsi_flags & ICE_VSI_FLAG_INIT))
13206624e780SMichal Swiatkowski 			/* means VSI being updated */
132187324e74SHenry Tieman 			/* must to indicate which section of VSI context are
132287324e74SHenry Tieman 			 * being modified
132387324e74SHenry Tieman 			 */
132487324e74SHenry Tieman 			ctxt->info.valid_sections |=
132587324e74SHenry Tieman 				cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
13260754d65bSKiran Patil 	}
132728c2a645SAnirudh Venkataramanan 
13280c3a6101SDave Ertman 	/* Allow control frames out of main VSI */
13290c3a6101SDave Ertman 	if (vsi->type == ICE_VSI_PF) {
13300c3a6101SDave Ertman 		ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
13310c3a6101SDave Ertman 		ctxt->info.valid_sections |=
13320c3a6101SDave Ertman 			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
13330c3a6101SDave Ertman 	}
13340c3a6101SDave Ertman 
13355e509ab2SJacob Keller 	if (vsi_flags & ICE_VSI_FLAG_INIT) {
1336198a666aSBruce Allan 		ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
133728c2a645SAnirudh Venkataramanan 		if (ret) {
133887324e74SHenry Tieman 			dev_err(dev, "Add VSI failed, err %d\n", ret);
13399efe35d0STony Nguyen 			ret = -EIO;
13409efe35d0STony Nguyen 			goto out;
134128c2a645SAnirudh Venkataramanan 		}
134287324e74SHenry Tieman 	} else {
134387324e74SHenry Tieman 		ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
134487324e74SHenry Tieman 		if (ret) {
134587324e74SHenry Tieman 			dev_err(dev, "Update VSI failed, err %d\n", ret);
134687324e74SHenry Tieman 			ret = -EIO;
134787324e74SHenry Tieman 			goto out;
134887324e74SHenry Tieman 		}
134987324e74SHenry Tieman 	}
135028c2a645SAnirudh Venkataramanan 
135128c2a645SAnirudh Venkataramanan 	/* keep context for update VSI operations */
1352198a666aSBruce Allan 	vsi->info = ctxt->info;
135328c2a645SAnirudh Venkataramanan 
135428c2a645SAnirudh Venkataramanan 	/* record VSI number returned */
1355198a666aSBruce Allan 	vsi->vsi_num = ctxt->vsi_num;
135628c2a645SAnirudh Venkataramanan 
13579efe35d0STony Nguyen out:
13589efe35d0STony Nguyen 	kfree(ctxt);
135928c2a645SAnirudh Venkataramanan 	return ret;
136028c2a645SAnirudh Venkataramanan }
136128c2a645SAnirudh Venkataramanan 
136228c2a645SAnirudh Venkataramanan /**
136328c2a645SAnirudh Venkataramanan  * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
136428c2a645SAnirudh Venkataramanan  * @vsi: the VSI having rings deallocated
136528c2a645SAnirudh Venkataramanan  */
ice_vsi_clear_rings(struct ice_vsi * vsi)1366df0f8479SAnirudh Venkataramanan static void ice_vsi_clear_rings(struct ice_vsi *vsi)
136728c2a645SAnirudh Venkataramanan {
136828c2a645SAnirudh Venkataramanan 	int i;
136928c2a645SAnirudh Venkataramanan 
1370f6a07271SJacob Keller 	/* Avoid stale references by clearing map from vector to ring */
1371f6a07271SJacob Keller 	if (vsi->q_vectors) {
1372f6a07271SJacob Keller 		ice_for_each_q_vector(vsi, i) {
1373f6a07271SJacob Keller 			struct ice_q_vector *q_vector = vsi->q_vectors[i];
1374f6a07271SJacob Keller 
1375f6a07271SJacob Keller 			if (q_vector) {
1376e72bba21SMaciej Fijalkowski 				q_vector->tx.tx_ring = NULL;
1377e72bba21SMaciej Fijalkowski 				q_vector->rx.rx_ring = NULL;
1378f6a07271SJacob Keller 			}
1379f6a07271SJacob Keller 		}
1380f6a07271SJacob Keller 	}
1381f6a07271SJacob Keller 
138228c2a645SAnirudh Venkataramanan 	if (vsi->tx_rings) {
13832faf63b6SMaciej Fijalkowski 		ice_for_each_alloc_txq(vsi, i) {
138428c2a645SAnirudh Venkataramanan 			if (vsi->tx_rings[i]) {
138528c2a645SAnirudh Venkataramanan 				kfree_rcu(vsi->tx_rings[i], rcu);
1386b1d95cc2SCiara Loftus 				WRITE_ONCE(vsi->tx_rings[i], NULL);
138728c2a645SAnirudh Venkataramanan 			}
138828c2a645SAnirudh Venkataramanan 		}
138928c2a645SAnirudh Venkataramanan 	}
139028c2a645SAnirudh Venkataramanan 	if (vsi->rx_rings) {
13912faf63b6SMaciej Fijalkowski 		ice_for_each_alloc_rxq(vsi, i) {
139228c2a645SAnirudh Venkataramanan 			if (vsi->rx_rings[i]) {
139328c2a645SAnirudh Venkataramanan 				kfree_rcu(vsi->rx_rings[i], rcu);
1394b1d95cc2SCiara Loftus 				WRITE_ONCE(vsi->rx_rings[i], NULL);
139528c2a645SAnirudh Venkataramanan 			}
139628c2a645SAnirudh Venkataramanan 		}
139728c2a645SAnirudh Venkataramanan 	}
139828c2a645SAnirudh Venkataramanan }
139928c2a645SAnirudh Venkataramanan 
140028c2a645SAnirudh Venkataramanan /**
140128c2a645SAnirudh Venkataramanan  * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
140228c2a645SAnirudh Venkataramanan  * @vsi: VSI which is having rings allocated
140328c2a645SAnirudh Venkataramanan  */
ice_vsi_alloc_rings(struct ice_vsi * vsi)140437bb8390SAnirudh Venkataramanan static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
140528c2a645SAnirudh Venkataramanan {
14060d54d8f7SBrett Creeley 	bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw);
140728c2a645SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
14084015d11eSBrett Creeley 	struct device *dev;
140988865fc4SKarol Kolacinski 	u16 i;
141028c2a645SAnirudh Venkataramanan 
14114015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
1412d337f2afSAnirudh Venkataramanan 	/* Allocate Tx rings */
14132faf63b6SMaciej Fijalkowski 	ice_for_each_alloc_txq(vsi, i) {
1414e72bba21SMaciej Fijalkowski 		struct ice_tx_ring *ring;
141528c2a645SAnirudh Venkataramanan 
141628c2a645SAnirudh Venkataramanan 		/* allocate with kzalloc(), free with kfree_rcu() */
141728c2a645SAnirudh Venkataramanan 		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
141828c2a645SAnirudh Venkataramanan 
141928c2a645SAnirudh Venkataramanan 		if (!ring)
142028c2a645SAnirudh Venkataramanan 			goto err_out;
142128c2a645SAnirudh Venkataramanan 
142228c2a645SAnirudh Venkataramanan 		ring->q_index = i;
142328c2a645SAnirudh Venkataramanan 		ring->reg_idx = vsi->txq_map[i];
142428c2a645SAnirudh Venkataramanan 		ring->vsi = vsi;
1425ea9b847cSJacob Keller 		ring->tx_tstamps = &pf->ptp.port.tx;
14264015d11eSBrett Creeley 		ring->dev = dev;
1427ad71b256SBrett Creeley 		ring->count = vsi->num_tx_desc;
1428ccfee182SAnatolii Gerasymenko 		ring->txq_teid = ICE_INVAL_TEID;
14290d54d8f7SBrett Creeley 		if (dvm_ena)
14300d54d8f7SBrett Creeley 			ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
14310d54d8f7SBrett Creeley 		else
14320d54d8f7SBrett Creeley 			ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1;
1433b1d95cc2SCiara Loftus 		WRITE_ONCE(vsi->tx_rings[i], ring);
143428c2a645SAnirudh Venkataramanan 	}
143528c2a645SAnirudh Venkataramanan 
1436d337f2afSAnirudh Venkataramanan 	/* Allocate Rx rings */
14372faf63b6SMaciej Fijalkowski 	ice_for_each_alloc_rxq(vsi, i) {
1438e72bba21SMaciej Fijalkowski 		struct ice_rx_ring *ring;
143928c2a645SAnirudh Venkataramanan 
144028c2a645SAnirudh Venkataramanan 		/* allocate with kzalloc(), free with kfree_rcu() */
144128c2a645SAnirudh Venkataramanan 		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
144228c2a645SAnirudh Venkataramanan 		if (!ring)
144328c2a645SAnirudh Venkataramanan 			goto err_out;
144428c2a645SAnirudh Venkataramanan 
144528c2a645SAnirudh Venkataramanan 		ring->q_index = i;
144628c2a645SAnirudh Venkataramanan 		ring->reg_idx = vsi->rxq_map[i];
144728c2a645SAnirudh Venkataramanan 		ring->vsi = vsi;
144828c2a645SAnirudh Venkataramanan 		ring->netdev = vsi->netdev;
14494015d11eSBrett Creeley 		ring->dev = dev;
1450ad71b256SBrett Creeley 		ring->count = vsi->num_rx_desc;
1451cf6b82fdSJacob Keller 		ring->cached_phctime = pf->ptp.cached_phc_time;
1452b1d95cc2SCiara Loftus 		WRITE_ONCE(vsi->rx_rings[i], ring);
145328c2a645SAnirudh Venkataramanan 	}
145428c2a645SAnirudh Venkataramanan 
145528c2a645SAnirudh Venkataramanan 	return 0;
145628c2a645SAnirudh Venkataramanan 
145728c2a645SAnirudh Venkataramanan err_out:
145828c2a645SAnirudh Venkataramanan 	ice_vsi_clear_rings(vsi);
145928c2a645SAnirudh Venkataramanan 	return -ENOMEM;
146028c2a645SAnirudh Venkataramanan }
146128c2a645SAnirudh Venkataramanan 
146228c2a645SAnirudh Venkataramanan /**
1463492af0abSMd Fahad Iqbal Polash  * ice_vsi_manage_rss_lut - disable/enable RSS
1464492af0abSMd Fahad Iqbal Polash  * @vsi: the VSI being changed
1465492af0abSMd Fahad Iqbal Polash  * @ena: boolean value indicating if this is an enable or disable request
1466492af0abSMd Fahad Iqbal Polash  *
1467492af0abSMd Fahad Iqbal Polash  * In the event of disable request for RSS, this function will zero out RSS
1468492af0abSMd Fahad Iqbal Polash  * LUT, while in the event of enable request for RSS, it will reconfigure RSS
1469492af0abSMd Fahad Iqbal Polash  * LUT.
1470492af0abSMd Fahad Iqbal Polash  */
ice_vsi_manage_rss_lut(struct ice_vsi * vsi,bool ena)14714fe36226SPaul M Stillwell Jr void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
1472492af0abSMd Fahad Iqbal Polash {
1473492af0abSMd Fahad Iqbal Polash 	u8 *lut;
1474492af0abSMd Fahad Iqbal Polash 
14759efe35d0STony Nguyen 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1476492af0abSMd Fahad Iqbal Polash 	if (!lut)
14774fe36226SPaul M Stillwell Jr 		return;
1478492af0abSMd Fahad Iqbal Polash 
1479492af0abSMd Fahad Iqbal Polash 	if (ena) {
1480492af0abSMd Fahad Iqbal Polash 		if (vsi->rss_lut_user)
1481492af0abSMd Fahad Iqbal Polash 			memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1482492af0abSMd Fahad Iqbal Polash 		else
1483492af0abSMd Fahad Iqbal Polash 			ice_fill_rss_lut(lut, vsi->rss_table_size,
1484492af0abSMd Fahad Iqbal Polash 					 vsi->rss_size);
1485492af0abSMd Fahad Iqbal Polash 	}
1486492af0abSMd Fahad Iqbal Polash 
14874fe36226SPaul M Stillwell Jr 	ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
14889efe35d0STony Nguyen 	kfree(lut);
1489492af0abSMd Fahad Iqbal Polash }
1490492af0abSMd Fahad Iqbal Polash 
1491492af0abSMd Fahad Iqbal Polash /**
1492dddd406dSJesse Brandeburg  * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
1493dddd406dSJesse Brandeburg  * @vsi: VSI to be configured
1494dddd406dSJesse Brandeburg  * @disable: set to true to have FCS / CRC in the frame data
1495dddd406dSJesse Brandeburg  */
ice_vsi_cfg_crc_strip(struct ice_vsi * vsi,bool disable)1496dddd406dSJesse Brandeburg void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable)
1497dddd406dSJesse Brandeburg {
1498dddd406dSJesse Brandeburg 	int i;
1499dddd406dSJesse Brandeburg 
1500dddd406dSJesse Brandeburg 	ice_for_each_rxq(vsi, i)
1501dddd406dSJesse Brandeburg 		if (disable)
1502dddd406dSJesse Brandeburg 			vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
1503dddd406dSJesse Brandeburg 		else
1504dddd406dSJesse Brandeburg 			vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
1505dddd406dSJesse Brandeburg }
1506dddd406dSJesse Brandeburg 
1507dddd406dSJesse Brandeburg /**
150837bb8390SAnirudh Venkataramanan  * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
150937bb8390SAnirudh Venkataramanan  * @vsi: VSI to be configured
151037bb8390SAnirudh Venkataramanan  */
ice_vsi_cfg_rss_lut_key(struct ice_vsi * vsi)15110754d65bSKiran Patil int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
151237bb8390SAnirudh Venkataramanan {
151337bb8390SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
15144015d11eSBrett Creeley 	struct device *dev;
1515b66a972aSBrett Creeley 	u8 *lut, *key;
1516b66a972aSBrett Creeley 	int err;
151737bb8390SAnirudh Venkataramanan 
15184015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
15190754d65bSKiran Patil 	if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size &&
15200754d65bSKiran Patil 	    (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) {
15210754d65bSKiran Patil 		vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size);
15220754d65bSKiran Patil 	} else {
152388865fc4SKarol Kolacinski 		vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
152437bb8390SAnirudh Venkataramanan 
15250754d65bSKiran Patil 		/* If orig_rss_size is valid and it is less than determined
15260754d65bSKiran Patil 		 * main VSI's rss_size, update main VSI's rss_size to be
15270754d65bSKiran Patil 		 * orig_rss_size so that when tc-qdisc is deleted, main VSI
15280754d65bSKiran Patil 		 * RSS table gets programmed to be correct (whatever it was
15290754d65bSKiran Patil 		 * to begin with (prior to setup-tc for ADQ config)
15300754d65bSKiran Patil 		 */
15310754d65bSKiran Patil 		if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size &&
15320754d65bSKiran Patil 		    vsi->orig_rss_size <= vsi->num_rxq) {
15330754d65bSKiran Patil 			vsi->rss_size = vsi->orig_rss_size;
15340754d65bSKiran Patil 			/* now orig_rss_size is used, reset it to zero */
15350754d65bSKiran Patil 			vsi->orig_rss_size = 0;
15360754d65bSKiran Patil 		}
15370754d65bSKiran Patil 	}
15380754d65bSKiran Patil 
15399efe35d0STony Nguyen 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
154037bb8390SAnirudh Venkataramanan 	if (!lut)
154137bb8390SAnirudh Venkataramanan 		return -ENOMEM;
154237bb8390SAnirudh Venkataramanan 
154337bb8390SAnirudh Venkataramanan 	if (vsi->rss_lut_user)
154437bb8390SAnirudh Venkataramanan 		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
154537bb8390SAnirudh Venkataramanan 	else
154637bb8390SAnirudh Venkataramanan 		ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
154737bb8390SAnirudh Venkataramanan 
1548b66a972aSBrett Creeley 	err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
1549b66a972aSBrett Creeley 	if (err) {
1550b66a972aSBrett Creeley 		dev_err(dev, "set_rss_lut failed, error %d\n", err);
155137bb8390SAnirudh Venkataramanan 		goto ice_vsi_cfg_rss_exit;
155237bb8390SAnirudh Venkataramanan 	}
155337bb8390SAnirudh Venkataramanan 
1554b66a972aSBrett Creeley 	key = kzalloc(ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE, GFP_KERNEL);
155537bb8390SAnirudh Venkataramanan 	if (!key) {
155637bb8390SAnirudh Venkataramanan 		err = -ENOMEM;
155737bb8390SAnirudh Venkataramanan 		goto ice_vsi_cfg_rss_exit;
155837bb8390SAnirudh Venkataramanan 	}
155937bb8390SAnirudh Venkataramanan 
156037bb8390SAnirudh Venkataramanan 	if (vsi->rss_hkey_user)
1561b66a972aSBrett Creeley 		memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
156237bb8390SAnirudh Venkataramanan 	else
1563b66a972aSBrett Creeley 		netdev_rss_key_fill((void *)key, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
156437bb8390SAnirudh Venkataramanan 
1565b66a972aSBrett Creeley 	err = ice_set_rss_key(vsi, key);
1566b66a972aSBrett Creeley 	if (err)
1567b66a972aSBrett Creeley 		dev_err(dev, "set_rss_key failed, error %d\n", err);
156837bb8390SAnirudh Venkataramanan 
15699efe35d0STony Nguyen 	kfree(key);
157037bb8390SAnirudh Venkataramanan ice_vsi_cfg_rss_exit:
15719efe35d0STony Nguyen 	kfree(lut);
157237bb8390SAnirudh Venkataramanan 	return err;
157337bb8390SAnirudh Venkataramanan }
157437bb8390SAnirudh Venkataramanan 
157537bb8390SAnirudh Venkataramanan /**
15761c01c8c6SMd Fahad Iqbal Polash  * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
15771c01c8c6SMd Fahad Iqbal Polash  * @vsi: VSI to be configured
15781c01c8c6SMd Fahad Iqbal Polash  *
15791c01c8c6SMd Fahad Iqbal Polash  * This function will only be called during the VF VSI setup. Upon successful
15801c01c8c6SMd Fahad Iqbal Polash  * completion of package download, this function will configure default RSS
15811c01c8c6SMd Fahad Iqbal Polash  * input sets for VF VSI.
15821c01c8c6SMd Fahad Iqbal Polash  */
ice_vsi_set_vf_rss_flow_fld(struct ice_vsi * vsi)15831c01c8c6SMd Fahad Iqbal Polash static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
15841c01c8c6SMd Fahad Iqbal Polash {
15851c01c8c6SMd Fahad Iqbal Polash 	struct ice_pf *pf = vsi->back;
15861c01c8c6SMd Fahad Iqbal Polash 	struct device *dev;
15875518ac2aSTony Nguyen 	int status;
15881c01c8c6SMd Fahad Iqbal Polash 
15891c01c8c6SMd Fahad Iqbal Polash 	dev = ice_pf_to_dev(pf);
15901c01c8c6SMd Fahad Iqbal Polash 	if (ice_is_safe_mode(pf)) {
15911c01c8c6SMd Fahad Iqbal Polash 		dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
15921c01c8c6SMd Fahad Iqbal Polash 			vsi->vsi_num);
15931c01c8c6SMd Fahad Iqbal Polash 		return;
15941c01c8c6SMd Fahad Iqbal Polash 	}
15951c01c8c6SMd Fahad Iqbal Polash 
15961c01c8c6SMd Fahad Iqbal Polash 	status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
15971c01c8c6SMd Fahad Iqbal Polash 	if (status)
15985f87ec48STony Nguyen 		dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
15995f87ec48STony Nguyen 			vsi->vsi_num, status);
16001c01c8c6SMd Fahad Iqbal Polash }
16011c01c8c6SMd Fahad Iqbal Polash 
16021c01c8c6SMd Fahad Iqbal Polash /**
1603c90ed40cSTony Nguyen  * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
1604c90ed40cSTony Nguyen  * @vsi: VSI to be configured
1605c90ed40cSTony Nguyen  *
1606c90ed40cSTony Nguyen  * This function will only be called after successful download package call
1607c90ed40cSTony Nguyen  * during initialization of PF. Since the downloaded package will erase the
1608c90ed40cSTony Nguyen  * RSS section, this function will configure RSS input sets for different
1609c90ed40cSTony Nguyen  * flow types. The last profile added has the highest priority, therefore 2
1610c90ed40cSTony Nguyen  * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles
1611c90ed40cSTony Nguyen  * (i.e. IPv4 src/dst TCP src/dst port).
1612c90ed40cSTony Nguyen  */
ice_vsi_set_rss_flow_fld(struct ice_vsi * vsi)1613c90ed40cSTony Nguyen static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
1614c90ed40cSTony Nguyen {
1615c90ed40cSTony Nguyen 	u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
1616c90ed40cSTony Nguyen 	struct ice_pf *pf = vsi->back;
1617c90ed40cSTony Nguyen 	struct ice_hw *hw = &pf->hw;
1618c90ed40cSTony Nguyen 	struct device *dev;
16195518ac2aSTony Nguyen 	int status;
1620c90ed40cSTony Nguyen 
1621c90ed40cSTony Nguyen 	dev = ice_pf_to_dev(pf);
1622c90ed40cSTony Nguyen 	if (ice_is_safe_mode(pf)) {
1623c90ed40cSTony Nguyen 		dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1624c90ed40cSTony Nguyen 			vsi_num);
1625c90ed40cSTony Nguyen 		return;
1626c90ed40cSTony Nguyen 	}
1627c90ed40cSTony Nguyen 	/* configure RSS for IPv4 with input set IP src/dst */
1628c90ed40cSTony Nguyen 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
1629c90ed40cSTony Nguyen 				 ICE_FLOW_SEG_HDR_IPV4);
1630c90ed40cSTony Nguyen 	if (status)
16315f87ec48STony Nguyen 		dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
16325f87ec48STony Nguyen 			vsi_num, status);
1633c90ed40cSTony Nguyen 
1634c90ed40cSTony Nguyen 	/* configure RSS for IPv6 with input set IPv6 src/dst */
1635c90ed40cSTony Nguyen 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
1636c90ed40cSTony Nguyen 				 ICE_FLOW_SEG_HDR_IPV6);
1637c90ed40cSTony Nguyen 	if (status)
16385f87ec48STony Nguyen 		dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
16395f87ec48STony Nguyen 			vsi_num, status);
1640c90ed40cSTony Nguyen 
1641c90ed40cSTony Nguyen 	/* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
1642c90ed40cSTony Nguyen 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
1643c90ed40cSTony Nguyen 				 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
1644c90ed40cSTony Nguyen 	if (status)
16455f87ec48STony Nguyen 		dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
16465f87ec48STony Nguyen 			vsi_num, status);
1647c90ed40cSTony Nguyen 
1648c90ed40cSTony Nguyen 	/* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
1649c90ed40cSTony Nguyen 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
1650c90ed40cSTony Nguyen 				 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
1651c90ed40cSTony Nguyen 	if (status)
16525f87ec48STony Nguyen 		dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
16535f87ec48STony Nguyen 			vsi_num, status);
1654c90ed40cSTony Nguyen 
1655c90ed40cSTony Nguyen 	/* configure RSS for sctp4 with input set IP src/dst */
1656c90ed40cSTony Nguyen 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
1657c90ed40cSTony Nguyen 				 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
1658c90ed40cSTony Nguyen 	if (status)
16595f87ec48STony Nguyen 		dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
16605f87ec48STony Nguyen 			vsi_num, status);
1661c90ed40cSTony Nguyen 
1662c90ed40cSTony Nguyen 	/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
1663c90ed40cSTony Nguyen 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
1664c90ed40cSTony Nguyen 				 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
1665c90ed40cSTony Nguyen 	if (status)
16665f87ec48STony Nguyen 		dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
16675f87ec48STony Nguyen 			vsi_num, status);
1668c90ed40cSTony Nguyen 
1669c90ed40cSTony Nguyen 	/* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
1670c90ed40cSTony Nguyen 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
1671c90ed40cSTony Nguyen 				 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
1672c90ed40cSTony Nguyen 	if (status)
16735f87ec48STony Nguyen 		dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
16745f87ec48STony Nguyen 			vsi_num, status);
1675c90ed40cSTony Nguyen 
1676c90ed40cSTony Nguyen 	/* configure RSS for sctp6 with input set IPv6 src/dst */
1677c90ed40cSTony Nguyen 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
1678c90ed40cSTony Nguyen 				 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
1679c90ed40cSTony Nguyen 	if (status)
16805f87ec48STony Nguyen 		dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
16815f87ec48STony Nguyen 			vsi_num, status);
168286006f99SJesse Brandeburg 
168386006f99SJesse Brandeburg 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
168486006f99SJesse Brandeburg 				 ICE_FLOW_SEG_HDR_ESP);
168586006f99SJesse Brandeburg 	if (status)
168686006f99SJesse Brandeburg 		dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
168786006f99SJesse Brandeburg 			vsi_num, status);
1688c90ed40cSTony Nguyen }
1689c90ed40cSTony Nguyen 
1690c90ed40cSTony Nguyen /**
169145f5478cSJan Sokolowski  * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
169245f5478cSJan Sokolowski  * @vsi: VSI
169345f5478cSJan Sokolowski  */
ice_vsi_cfg_frame_size(struct ice_vsi * vsi)169445f5478cSJan Sokolowski static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
169545f5478cSJan Sokolowski {
169645f5478cSJan Sokolowski 	if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
169745f5478cSJan Sokolowski 		vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
169845f5478cSJan Sokolowski 		vsi->rx_buf_len = ICE_RXBUF_1664;
169945f5478cSJan Sokolowski #if (PAGE_SIZE < 8192)
170045f5478cSJan Sokolowski 	} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
170145f5478cSJan Sokolowski 		   (vsi->netdev->mtu <= ETH_DATA_LEN)) {
170245f5478cSJan Sokolowski 		vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
170345f5478cSJan Sokolowski 		vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
170445f5478cSJan Sokolowski #endif
170545f5478cSJan Sokolowski 	} else {
170645f5478cSJan Sokolowski 		vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
170745f5478cSJan Sokolowski 		vsi->rx_buf_len = ICE_RXBUF_3072;
170845f5478cSJan Sokolowski 	}
170945f5478cSJan Sokolowski }
171045f5478cSJan Sokolowski 
171145f5478cSJan Sokolowski /**
1712769c500dSAkeem G Abodunrin  * ice_pf_state_is_nominal - checks the PF for nominal state
1713769c500dSAkeem G Abodunrin  * @pf: pointer to PF to check
1714769c500dSAkeem G Abodunrin  *
1715769c500dSAkeem G Abodunrin  * Check the PF's state for a collection of bits that would indicate
1716769c500dSAkeem G Abodunrin  * the PF is in a state that would inhibit normal operation for
1717769c500dSAkeem G Abodunrin  * driver functionality.
1718769c500dSAkeem G Abodunrin  *
1719769c500dSAkeem G Abodunrin  * Returns true if PF is in a nominal state, false otherwise
1720769c500dSAkeem G Abodunrin  */
ice_pf_state_is_nominal(struct ice_pf * pf)1721769c500dSAkeem G Abodunrin bool ice_pf_state_is_nominal(struct ice_pf *pf)
1722769c500dSAkeem G Abodunrin {
17237e408e07SAnirudh Venkataramanan 	DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 };
1724769c500dSAkeem G Abodunrin 
1725769c500dSAkeem G Abodunrin 	if (!pf)
1726769c500dSAkeem G Abodunrin 		return false;
1727769c500dSAkeem G Abodunrin 
17287e408e07SAnirudh Venkataramanan 	bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS);
17297e408e07SAnirudh Venkataramanan 	if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS))
1730769c500dSAkeem G Abodunrin 		return false;
1731769c500dSAkeem G Abodunrin 
1732769c500dSAkeem G Abodunrin 	return true;
1733769c500dSAkeem G Abodunrin }
1734769c500dSAkeem G Abodunrin 
1735769c500dSAkeem G Abodunrin /**
173645d3d428SAnirudh Venkataramanan  * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
173745d3d428SAnirudh Venkataramanan  * @vsi: the VSI to be updated
173845d3d428SAnirudh Venkataramanan  */
ice_update_eth_stats(struct ice_vsi * vsi)173945d3d428SAnirudh Venkataramanan void ice_update_eth_stats(struct ice_vsi *vsi)
174045d3d428SAnirudh Venkataramanan {
174145d3d428SAnirudh Venkataramanan 	struct ice_eth_stats *prev_es, *cur_es;
174245d3d428SAnirudh Venkataramanan 	struct ice_hw *hw = &vsi->back->hw;
17432fd5e433SBenjamin Mikailenko 	struct ice_pf *pf = vsi->back;
174445d3d428SAnirudh Venkataramanan 	u16 vsi_num = vsi->vsi_num;    /* HW absolute index of a VSI */
174545d3d428SAnirudh Venkataramanan 
174645d3d428SAnirudh Venkataramanan 	prev_es = &vsi->eth_stats_prev;
174745d3d428SAnirudh Venkataramanan 	cur_es = &vsi->eth_stats;
174845d3d428SAnirudh Venkataramanan 
17492fd5e433SBenjamin Mikailenko 	if (ice_is_reset_in_progress(pf->state))
17502fd5e433SBenjamin Mikailenko 		vsi->stat_offsets_loaded = false;
17512fd5e433SBenjamin Mikailenko 
175236517fd3SJacob Keller 	ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
175336517fd3SJacob Keller 			  &prev_es->rx_bytes, &cur_es->rx_bytes);
175445d3d428SAnirudh Venkataramanan 
175536517fd3SJacob Keller 	ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
175636517fd3SJacob Keller 			  &prev_es->rx_unicast, &cur_es->rx_unicast);
175745d3d428SAnirudh Venkataramanan 
175836517fd3SJacob Keller 	ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
175936517fd3SJacob Keller 			  &prev_es->rx_multicast, &cur_es->rx_multicast);
176045d3d428SAnirudh Venkataramanan 
176136517fd3SJacob Keller 	ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
176236517fd3SJacob Keller 			  &prev_es->rx_broadcast, &cur_es->rx_broadcast);
176345d3d428SAnirudh Venkataramanan 
176445d3d428SAnirudh Venkataramanan 	ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
176545d3d428SAnirudh Venkataramanan 			  &prev_es->rx_discards, &cur_es->rx_discards);
176645d3d428SAnirudh Venkataramanan 
176736517fd3SJacob Keller 	ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
176836517fd3SJacob Keller 			  &prev_es->tx_bytes, &cur_es->tx_bytes);
176945d3d428SAnirudh Venkataramanan 
177036517fd3SJacob Keller 	ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
177136517fd3SJacob Keller 			  &prev_es->tx_unicast, &cur_es->tx_unicast);
177245d3d428SAnirudh Venkataramanan 
177336517fd3SJacob Keller 	ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
177436517fd3SJacob Keller 			  &prev_es->tx_multicast, &cur_es->tx_multicast);
177545d3d428SAnirudh Venkataramanan 
177636517fd3SJacob Keller 	ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
177736517fd3SJacob Keller 			  &prev_es->tx_broadcast, &cur_es->tx_broadcast);
177845d3d428SAnirudh Venkataramanan 
177945d3d428SAnirudh Venkataramanan 	ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
178045d3d428SAnirudh Venkataramanan 			  &prev_es->tx_errors, &cur_es->tx_errors);
178145d3d428SAnirudh Venkataramanan 
178245d3d428SAnirudh Venkataramanan 	vsi->stat_offsets_loaded = true;
178345d3d428SAnirudh Venkataramanan }
178445d3d428SAnirudh Venkataramanan 
178545d3d428SAnirudh Venkataramanan /**
1786401ce33bSBrett Creeley  * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
1787401ce33bSBrett Creeley  * @hw: HW pointer
1788401ce33bSBrett Creeley  * @pf_q: index of the Rx queue in the PF's queue space
1789401ce33bSBrett Creeley  * @rxdid: flexible descriptor RXDID
1790401ce33bSBrett Creeley  * @prio: priority for the RXDID for this queue
179177a78115SJacob Keller  * @ena_ts: true to enable timestamp and false to disable timestamp
1792401ce33bSBrett Creeley  */
1793401ce33bSBrett Creeley void
ice_write_qrxflxp_cntxt(struct ice_hw * hw,u16 pf_q,u32 rxdid,u32 prio,bool ena_ts)179477a78115SJacob Keller ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
179577a78115SJacob Keller 			bool ena_ts)
1796401ce33bSBrett Creeley {
1797401ce33bSBrett Creeley 	int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
1798401ce33bSBrett Creeley 
1799401ce33bSBrett Creeley 	/* clear any previous values */
1800401ce33bSBrett Creeley 	regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
1801401ce33bSBrett Creeley 		    QRXFLXP_CNTXT_RXDID_PRIO_M |
1802401ce33bSBrett Creeley 		    QRXFLXP_CNTXT_TS_M);
1803401ce33bSBrett Creeley 
1804401ce33bSBrett Creeley 	regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
1805401ce33bSBrett Creeley 		QRXFLXP_CNTXT_RXDID_IDX_M;
1806401ce33bSBrett Creeley 
1807401ce33bSBrett Creeley 	regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) &
1808401ce33bSBrett Creeley 		QRXFLXP_CNTXT_RXDID_PRIO_M;
1809401ce33bSBrett Creeley 
181077a78115SJacob Keller 	if (ena_ts)
181177a78115SJacob Keller 		/* Enable TimeSync on this queue */
181277a78115SJacob Keller 		regval |= QRXFLXP_CNTXT_TS_M;
181377a78115SJacob Keller 
1814401ce33bSBrett Creeley 	wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
1815401ce33bSBrett Creeley }
1816401ce33bSBrett Creeley 
ice_vsi_cfg_single_rxq(struct ice_vsi * vsi,u16 q_idx)18177ad15440SBrett Creeley int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
18187ad15440SBrett Creeley {
18197ad15440SBrett Creeley 	if (q_idx >= vsi->num_rxq)
18207ad15440SBrett Creeley 		return -EINVAL;
18217ad15440SBrett Creeley 
18227ad15440SBrett Creeley 	return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
18237ad15440SBrett Creeley }
18247ad15440SBrett Creeley 
ice_vsi_cfg_single_txq(struct ice_vsi * vsi,struct ice_tx_ring ** tx_rings,u16 q_idx)1825e72bba21SMaciej Fijalkowski int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
18267ad15440SBrett Creeley {
18277ad15440SBrett Creeley 	struct ice_aqc_add_tx_qgrp *qg_buf;
18287ad15440SBrett Creeley 	int err;
18297ad15440SBrett Creeley 
18307ad15440SBrett Creeley 	if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
18317ad15440SBrett Creeley 		return -EINVAL;
18327ad15440SBrett Creeley 
18337ad15440SBrett Creeley 	qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
18347ad15440SBrett Creeley 	if (!qg_buf)
18357ad15440SBrett Creeley 		return -ENOMEM;
18367ad15440SBrett Creeley 
18377ad15440SBrett Creeley 	qg_buf->num_txqs = 1;
18387ad15440SBrett Creeley 
18397ad15440SBrett Creeley 	err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
18407ad15440SBrett Creeley 	kfree(qg_buf);
18417ad15440SBrett Creeley 	return err;
18427ad15440SBrett Creeley }
18437ad15440SBrett Creeley 
1844401ce33bSBrett Creeley /**
184572adf242SAnirudh Venkataramanan  * ice_vsi_cfg_rxqs - Configure the VSI for Rx
184672adf242SAnirudh Venkataramanan  * @vsi: the VSI being configured
184772adf242SAnirudh Venkataramanan  *
184872adf242SAnirudh Venkataramanan  * Return 0 on success and a negative value on error
184972adf242SAnirudh Venkataramanan  * Configure the Rx VSI for operation.
185072adf242SAnirudh Venkataramanan  */
ice_vsi_cfg_rxqs(struct ice_vsi * vsi)185172adf242SAnirudh Venkataramanan int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
185272adf242SAnirudh Venkataramanan {
185372adf242SAnirudh Venkataramanan 	u16 i;
185472adf242SAnirudh Venkataramanan 
18558ede0178SAnirudh Venkataramanan 	if (vsi->type == ICE_VSI_VF)
18568ede0178SAnirudh Venkataramanan 		goto setup_rings;
18578ede0178SAnirudh Venkataramanan 
1858efc2214bSMaciej Fijalkowski 	ice_vsi_cfg_frame_size(vsi);
18598ede0178SAnirudh Venkataramanan setup_rings:
186072adf242SAnirudh Venkataramanan 	/* set up individual rings */
186143c7f919SKrzysztof Kazimierczak 	ice_for_each_rxq(vsi, i) {
186243c7f919SKrzysztof Kazimierczak 		int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
186372adf242SAnirudh Venkataramanan 
186443c7f919SKrzysztof Kazimierczak 		if (err)
186572adf242SAnirudh Venkataramanan 			return err;
186672adf242SAnirudh Venkataramanan 	}
18671553f4f7SBrett Creeley 
18681553f4f7SBrett Creeley 	return 0;
18691553f4f7SBrett Creeley }
187072adf242SAnirudh Venkataramanan 
187172adf242SAnirudh Venkataramanan /**
187272adf242SAnirudh Venkataramanan  * ice_vsi_cfg_txqs - Configure the VSI for Tx
187372adf242SAnirudh Venkataramanan  * @vsi: the VSI being configured
187403f7a986SAnirudh Venkataramanan  * @rings: Tx ring array to be configured
18752e84f6b3SMaciej Fijalkowski  * @count: number of Tx ring array elements
187672adf242SAnirudh Venkataramanan  *
187772adf242SAnirudh Venkataramanan  * Return 0 on success and a negative value on error
187872adf242SAnirudh Venkataramanan  * Configure the Tx VSI for operation.
187972adf242SAnirudh Venkataramanan  */
188003f7a986SAnirudh Venkataramanan static int
ice_vsi_cfg_txqs(struct ice_vsi * vsi,struct ice_tx_ring ** rings,u16 count)1881e72bba21SMaciej Fijalkowski ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
188272adf242SAnirudh Venkataramanan {
188372adf242SAnirudh Venkataramanan 	struct ice_aqc_add_tx_qgrp *qg_buf;
1884e75d1b2cSMaciej Fijalkowski 	u16 q_idx = 0;
1885d02f734cSMaciej Fijalkowski 	int err = 0;
188672adf242SAnirudh Venkataramanan 
188766486d89SBruce Allan 	qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
188872adf242SAnirudh Venkataramanan 	if (!qg_buf)
188972adf242SAnirudh Venkataramanan 		return -ENOMEM;
189072adf242SAnirudh Venkataramanan 
189172adf242SAnirudh Venkataramanan 	qg_buf->num_txqs = 1;
189272adf242SAnirudh Venkataramanan 
18932e84f6b3SMaciej Fijalkowski 	for (q_idx = 0; q_idx < count; q_idx++) {
1894e75d1b2cSMaciej Fijalkowski 		err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
1895d02f734cSMaciej Fijalkowski 		if (err)
189672adf242SAnirudh Venkataramanan 			goto err_cfg_txqs;
1897e75d1b2cSMaciej Fijalkowski 	}
1898c5a2a4a3SUsha Ketineni 
189972adf242SAnirudh Venkataramanan err_cfg_txqs:
1900e75d1b2cSMaciej Fijalkowski 	kfree(qg_buf);
190172adf242SAnirudh Venkataramanan 	return err;
190272adf242SAnirudh Venkataramanan }
190372adf242SAnirudh Venkataramanan 
190472adf242SAnirudh Venkataramanan /**
190503f7a986SAnirudh Venkataramanan  * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
190603f7a986SAnirudh Venkataramanan  * @vsi: the VSI being configured
190703f7a986SAnirudh Venkataramanan  *
190803f7a986SAnirudh Venkataramanan  * Return 0 on success and a negative value on error
190903f7a986SAnirudh Venkataramanan  * Configure the Tx VSI for operation.
191003f7a986SAnirudh Venkataramanan  */
ice_vsi_cfg_lan_txqs(struct ice_vsi * vsi)191103f7a986SAnirudh Venkataramanan int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
191203f7a986SAnirudh Venkataramanan {
19132e84f6b3SMaciej Fijalkowski 	return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
191403f7a986SAnirudh Venkataramanan }
191503f7a986SAnirudh Venkataramanan 
191603f7a986SAnirudh Venkataramanan /**
1917efc2214bSMaciej Fijalkowski  * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
1918efc2214bSMaciej Fijalkowski  * @vsi: the VSI being configured
1919efc2214bSMaciej Fijalkowski  *
1920efc2214bSMaciej Fijalkowski  * Return 0 on success and a negative value on error
1921efc2214bSMaciej Fijalkowski  * Configure the Tx queues dedicated for XDP in given VSI for operation.
1922efc2214bSMaciej Fijalkowski  */
ice_vsi_cfg_xdp_txqs(struct ice_vsi * vsi)1923efc2214bSMaciej Fijalkowski int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
1924efc2214bSMaciej Fijalkowski {
19252d4238f5SKrzysztof Kazimierczak 	int ret;
19262d4238f5SKrzysztof Kazimierczak 	int i;
19272d4238f5SKrzysztof Kazimierczak 
19282e84f6b3SMaciej Fijalkowski 	ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
19292d4238f5SKrzysztof Kazimierczak 	if (ret)
19302d4238f5SKrzysztof Kazimierczak 		return ret;
19312d4238f5SKrzysztof Kazimierczak 
19329ead7e74SMaciej Fijalkowski 	ice_for_each_rxq(vsi, i)
19339ead7e74SMaciej Fijalkowski 		ice_tx_xsk_pool(vsi, i);
19342d4238f5SKrzysztof Kazimierczak 
1935c4a9c8e7SMichal Swiatkowski 	return 0;
1936efc2214bSMaciej Fijalkowski }
1937efc2214bSMaciej Fijalkowski 
1938efc2214bSMaciej Fijalkowski /**
19399e4ab4c2SBrett Creeley  * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
19409e4ab4c2SBrett Creeley  * @intrl: interrupt rate limit in usecs
19419e4ab4c2SBrett Creeley  * @gran: interrupt rate limit granularity in usecs
19429e4ab4c2SBrett Creeley  *
19439e4ab4c2SBrett Creeley  * This function converts a decimal interrupt rate limit in usecs to the format
19449e4ab4c2SBrett Creeley  * expected by firmware.
19459e4ab4c2SBrett Creeley  */
ice_intrl_usec_to_reg(u8 intrl,u8 gran)1946b8b47723SJesse Brandeburg static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
19479e4ab4c2SBrett Creeley {
19489e4ab4c2SBrett Creeley 	u32 val = intrl / gran;
19499e4ab4c2SBrett Creeley 
19509e4ab4c2SBrett Creeley 	if (val)
19519e4ab4c2SBrett Creeley 		return val | GLINT_RATE_INTRL_ENA_M;
19529e4ab4c2SBrett Creeley 	return 0;
19539e4ab4c2SBrett Creeley }
19549e4ab4c2SBrett Creeley 
19559e4ab4c2SBrett Creeley /**
1956b8b47723SJesse Brandeburg  * ice_write_intrl - write throttle rate limit to interrupt specific register
1957b8b47723SJesse Brandeburg  * @q_vector: pointer to interrupt specific structure
1958b8b47723SJesse Brandeburg  * @intrl: throttle rate limit in microseconds to write
1959b8b47723SJesse Brandeburg  */
ice_write_intrl(struct ice_q_vector * q_vector,u8 intrl)1960b8b47723SJesse Brandeburg void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl)
1961b8b47723SJesse Brandeburg {
1962b8b47723SJesse Brandeburg 	struct ice_hw *hw = &q_vector->vsi->back->hw;
1963b8b47723SJesse Brandeburg 
1964b8b47723SJesse Brandeburg 	wr32(hw, GLINT_RATE(q_vector->reg_idx),
1965b8b47723SJesse Brandeburg 	     ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25));
1966b8b47723SJesse Brandeburg }
1967b8b47723SJesse Brandeburg 
ice_pull_qvec_from_rc(struct ice_ring_container * rc)1968e72bba21SMaciej Fijalkowski static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc)
1969e72bba21SMaciej Fijalkowski {
1970e72bba21SMaciej Fijalkowski 	switch (rc->type) {
1971e72bba21SMaciej Fijalkowski 	case ICE_RX_CONTAINER:
1972e72bba21SMaciej Fijalkowski 		if (rc->rx_ring)
1973e72bba21SMaciej Fijalkowski 			return rc->rx_ring->q_vector;
1974e72bba21SMaciej Fijalkowski 		break;
1975e72bba21SMaciej Fijalkowski 	case ICE_TX_CONTAINER:
1976e72bba21SMaciej Fijalkowski 		if (rc->tx_ring)
1977e72bba21SMaciej Fijalkowski 			return rc->tx_ring->q_vector;
1978370764e6SNathan Chancellor 		break;
1979e72bba21SMaciej Fijalkowski 	default:
1980e72bba21SMaciej Fijalkowski 		break;
1981e72bba21SMaciej Fijalkowski 	}
1982e72bba21SMaciej Fijalkowski 
1983e72bba21SMaciej Fijalkowski 	return NULL;
1984e72bba21SMaciej Fijalkowski }
1985e72bba21SMaciej Fijalkowski 
1986b8b47723SJesse Brandeburg /**
1987b8b47723SJesse Brandeburg  * __ice_write_itr - write throttle rate to register
1988b8b47723SJesse Brandeburg  * @q_vector: pointer to interrupt data structure
1989b8b47723SJesse Brandeburg  * @rc: pointer to ring container
1990b8b47723SJesse Brandeburg  * @itr: throttle rate in microseconds to write
1991b8b47723SJesse Brandeburg  */
__ice_write_itr(struct ice_q_vector * q_vector,struct ice_ring_container * rc,u16 itr)1992b8b47723SJesse Brandeburg static void __ice_write_itr(struct ice_q_vector *q_vector,
1993b8b47723SJesse Brandeburg 			    struct ice_ring_container *rc, u16 itr)
1994b8b47723SJesse Brandeburg {
1995b8b47723SJesse Brandeburg 	struct ice_hw *hw = &q_vector->vsi->back->hw;
1996b8b47723SJesse Brandeburg 
1997b8b47723SJesse Brandeburg 	wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
1998b8b47723SJesse Brandeburg 	     ITR_REG_ALIGN(itr) >> ICE_ITR_GRAN_S);
1999b8b47723SJesse Brandeburg }
2000b8b47723SJesse Brandeburg 
2001b8b47723SJesse Brandeburg /**
2002b8b47723SJesse Brandeburg  * ice_write_itr - write throttle rate to queue specific register
2003b8b47723SJesse Brandeburg  * @rc: pointer to ring container
2004b8b47723SJesse Brandeburg  * @itr: throttle rate in microseconds to write
2005b8b47723SJesse Brandeburg  */
ice_write_itr(struct ice_ring_container * rc,u16 itr)2006b8b47723SJesse Brandeburg void ice_write_itr(struct ice_ring_container *rc, u16 itr)
2007b8b47723SJesse Brandeburg {
2008b8b47723SJesse Brandeburg 	struct ice_q_vector *q_vector;
2009b8b47723SJesse Brandeburg 
2010e72bba21SMaciej Fijalkowski 	q_vector = ice_pull_qvec_from_rc(rc);
2011e72bba21SMaciej Fijalkowski 	if (!q_vector)
2012b8b47723SJesse Brandeburg 		return;
2013b8b47723SJesse Brandeburg 
2014b8b47723SJesse Brandeburg 	__ice_write_itr(q_vector, rc, itr);
2015b8b47723SJesse Brandeburg }
2016b8b47723SJesse Brandeburg 
2017b8b47723SJesse Brandeburg /**
2018d8eb7ad5SJesse Brandeburg  * ice_set_q_vector_intrl - set up interrupt rate limiting
2019d8eb7ad5SJesse Brandeburg  * @q_vector: the vector to be configured
2020d8eb7ad5SJesse Brandeburg  *
2021d8eb7ad5SJesse Brandeburg  * Interrupt rate limiting is local to the vector, not per-queue so we must
2022d8eb7ad5SJesse Brandeburg  * detect if either ring container has dynamic moderation enabled to decide
2023d8eb7ad5SJesse Brandeburg  * what to set the interrupt rate limit to via INTRL settings. In the case that
2024d8eb7ad5SJesse Brandeburg  * dynamic moderation is disabled on both, write the value with the cached
2025d8eb7ad5SJesse Brandeburg  * setting to make sure INTRL register matches the user visible value.
2026d8eb7ad5SJesse Brandeburg  */
ice_set_q_vector_intrl(struct ice_q_vector * q_vector)2027d8eb7ad5SJesse Brandeburg void ice_set_q_vector_intrl(struct ice_q_vector *q_vector)
2028d8eb7ad5SJesse Brandeburg {
2029d8eb7ad5SJesse Brandeburg 	if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) {
2030d8eb7ad5SJesse Brandeburg 		/* in the case of dynamic enabled, cap each vector to no more
2031d8eb7ad5SJesse Brandeburg 		 * than (4 us) 250,000 ints/sec, which allows low latency
2032d8eb7ad5SJesse Brandeburg 		 * but still less than 500,000 interrupts per second, which
2033d8eb7ad5SJesse Brandeburg 		 * reduces CPU a bit in the case of the lowest latency
2034d8eb7ad5SJesse Brandeburg 		 * setting. The 4 here is a value in microseconds.
2035d8eb7ad5SJesse Brandeburg 		 */
2036d8eb7ad5SJesse Brandeburg 		ice_write_intrl(q_vector, 4);
2037d8eb7ad5SJesse Brandeburg 	} else {
2038d8eb7ad5SJesse Brandeburg 		ice_write_intrl(q_vector, q_vector->intrl);
2039d8eb7ad5SJesse Brandeburg 	}
2040d8eb7ad5SJesse Brandeburg }
2041d8eb7ad5SJesse Brandeburg 
2042d8eb7ad5SJesse Brandeburg /**
204372adf242SAnirudh Venkataramanan  * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
204472adf242SAnirudh Venkataramanan  * @vsi: the VSI being configured
2045047e52c0SAnirudh Venkataramanan  *
2046047e52c0SAnirudh Venkataramanan  * This configures MSIX mode interrupts for the PF VSI, and should not be used
2047047e52c0SAnirudh Venkataramanan  * for the VF VSI.
204872adf242SAnirudh Venkataramanan  */
ice_vsi_cfg_msix(struct ice_vsi * vsi)204972adf242SAnirudh Venkataramanan void ice_vsi_cfg_msix(struct ice_vsi *vsi)
205072adf242SAnirudh Venkataramanan {
205172adf242SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
205272adf242SAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
205388865fc4SKarol Kolacinski 	u16 txq = 0, rxq = 0;
2054d2b464a7SBrett Creeley 	int i, q;
205572adf242SAnirudh Venkataramanan 
20562faf63b6SMaciej Fijalkowski 	ice_for_each_q_vector(vsi, i) {
205772adf242SAnirudh Venkataramanan 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
2058b07833a0SBrett Creeley 		u16 reg_idx = q_vector->reg_idx;
205972adf242SAnirudh Venkataramanan 
2060b07833a0SBrett Creeley 		ice_cfg_itr(hw, q_vector);
20619e4ab4c2SBrett Creeley 
206272adf242SAnirudh Venkataramanan 		/* Both Transmit Queue Interrupt Cause Control register
206372adf242SAnirudh Venkataramanan 		 * and Receive Queue Interrupt Cause control register
206472adf242SAnirudh Venkataramanan 		 * expects MSIX_INDX field to be the vector index
206572adf242SAnirudh Venkataramanan 		 * within the function space and not the absolute
206672adf242SAnirudh Venkataramanan 		 * vector index across PF or across device.
206772adf242SAnirudh Venkataramanan 		 * For SR-IOV VF VSIs queue vector index always starts
206872adf242SAnirudh Venkataramanan 		 * with 1 since first vector index(0) is used for OICR
206972adf242SAnirudh Venkataramanan 		 * in VF space. Since VMDq and other PF VSIs are within
207072adf242SAnirudh Venkataramanan 		 * the PF function space, use the vector index that is
207172adf242SAnirudh Venkataramanan 		 * tracked for this PF.
207272adf242SAnirudh Venkataramanan 		 */
207372adf242SAnirudh Venkataramanan 		for (q = 0; q < q_vector->num_ring_tx; q++) {
2074047e52c0SAnirudh Venkataramanan 			ice_cfg_txq_interrupt(vsi, txq, reg_idx,
2075047e52c0SAnirudh Venkataramanan 					      q_vector->tx.itr_idx);
207672adf242SAnirudh Venkataramanan 			txq++;
207772adf242SAnirudh Venkataramanan 		}
207872adf242SAnirudh Venkataramanan 
207972adf242SAnirudh Venkataramanan 		for (q = 0; q < q_vector->num_ring_rx; q++) {
2080047e52c0SAnirudh Venkataramanan 			ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
2081047e52c0SAnirudh Venkataramanan 					      q_vector->rx.itr_idx);
208272adf242SAnirudh Venkataramanan 			rxq++;
208372adf242SAnirudh Venkataramanan 		}
208472adf242SAnirudh Venkataramanan 	}
208572adf242SAnirudh Venkataramanan }
208672adf242SAnirudh Venkataramanan 
208772adf242SAnirudh Venkataramanan /**
208813a6233bSBrett Creeley  * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
208913a6233bSBrett Creeley  * @vsi: the VSI whose rings are to be enabled
209072adf242SAnirudh Venkataramanan  *
209172adf242SAnirudh Venkataramanan  * Returns 0 on success and a negative value on error
209272adf242SAnirudh Venkataramanan  */
ice_vsi_start_all_rx_rings(struct ice_vsi * vsi)209313a6233bSBrett Creeley int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi)
209472adf242SAnirudh Venkataramanan {
209513a6233bSBrett Creeley 	return ice_vsi_ctrl_all_rx_rings(vsi, true);
209672adf242SAnirudh Venkataramanan }
209772adf242SAnirudh Venkataramanan 
209872adf242SAnirudh Venkataramanan /**
209913a6233bSBrett Creeley  * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
210013a6233bSBrett Creeley  * @vsi: the VSI whose rings are to be disabled
210172adf242SAnirudh Venkataramanan  *
210272adf242SAnirudh Venkataramanan  * Returns 0 on success and a negative value on error
210372adf242SAnirudh Venkataramanan  */
ice_vsi_stop_all_rx_rings(struct ice_vsi * vsi)210413a6233bSBrett Creeley int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
210572adf242SAnirudh Venkataramanan {
210613a6233bSBrett Creeley 	return ice_vsi_ctrl_all_rx_rings(vsi, false);
210772adf242SAnirudh Venkataramanan }
210872adf242SAnirudh Venkataramanan 
210972adf242SAnirudh Venkataramanan /**
2110d02f734cSMaciej Fijalkowski  * ice_vsi_stop_tx_rings - Disable Tx rings
2111d02f734cSMaciej Fijalkowski  * @vsi: the VSI being configured
2112d02f734cSMaciej Fijalkowski  * @rst_src: reset source
2113d02f734cSMaciej Fijalkowski  * @rel_vmvf_num: Relative ID of VF/VM
2114d02f734cSMaciej Fijalkowski  * @rings: Tx ring array to be stopped
21152e84f6b3SMaciej Fijalkowski  * @count: number of Tx ring array elements
2116d02f734cSMaciej Fijalkowski  */
2117d02f734cSMaciej Fijalkowski static int
ice_vsi_stop_tx_rings(struct ice_vsi * vsi,enum ice_disq_rst_src rst_src,u16 rel_vmvf_num,struct ice_tx_ring ** rings,u16 count)2118d02f734cSMaciej Fijalkowski ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2119e72bba21SMaciej Fijalkowski 		      u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count)
2120d02f734cSMaciej Fijalkowski {
2121e75d1b2cSMaciej Fijalkowski 	u16 q_idx;
2122d02f734cSMaciej Fijalkowski 
2123d02f734cSMaciej Fijalkowski 	if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
2124d02f734cSMaciej Fijalkowski 		return -EINVAL;
2125d02f734cSMaciej Fijalkowski 
21262e84f6b3SMaciej Fijalkowski 	for (q_idx = 0; q_idx < count; q_idx++) {
2127d02f734cSMaciej Fijalkowski 		struct ice_txq_meta txq_meta = { };
2128e75d1b2cSMaciej Fijalkowski 		int status;
2129d02f734cSMaciej Fijalkowski 
2130d02f734cSMaciej Fijalkowski 		if (!rings || !rings[q_idx])
2131d02f734cSMaciej Fijalkowski 			return -EINVAL;
2132d02f734cSMaciej Fijalkowski 
2133d02f734cSMaciej Fijalkowski 		ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
2134e75d1b2cSMaciej Fijalkowski 		status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
2135d02f734cSMaciej Fijalkowski 					      rings[q_idx], &txq_meta);
2136d02f734cSMaciej Fijalkowski 
2137d02f734cSMaciej Fijalkowski 		if (status)
2138d02f734cSMaciej Fijalkowski 			return status;
2139bb87ee0eSAnirudh Venkataramanan 	}
214072adf242SAnirudh Venkataramanan 
2141d02f734cSMaciej Fijalkowski 	return 0;
214272adf242SAnirudh Venkataramanan }
21435153a18eSAnirudh Venkataramanan 
21445153a18eSAnirudh Venkataramanan /**
214503f7a986SAnirudh Venkataramanan  * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
214603f7a986SAnirudh Venkataramanan  * @vsi: the VSI being configured
214703f7a986SAnirudh Venkataramanan  * @rst_src: reset source
2148f9867df6SAnirudh Venkataramanan  * @rel_vmvf_num: Relative ID of VF/VM
214903f7a986SAnirudh Venkataramanan  */
2150c8b7abddSBruce Allan int
ice_vsi_stop_lan_tx_rings(struct ice_vsi * vsi,enum ice_disq_rst_src rst_src,u16 rel_vmvf_num)2151c8b7abddSBruce Allan ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2152c8b7abddSBruce Allan 			  u16 rel_vmvf_num)
215303f7a986SAnirudh Venkataramanan {
21542e84f6b3SMaciej Fijalkowski 	return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
215503f7a986SAnirudh Venkataramanan }
215603f7a986SAnirudh Venkataramanan 
215703f7a986SAnirudh Venkataramanan /**
2158efc2214bSMaciej Fijalkowski  * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
2159efc2214bSMaciej Fijalkowski  * @vsi: the VSI being configured
2160efc2214bSMaciej Fijalkowski  */
ice_vsi_stop_xdp_tx_rings(struct ice_vsi * vsi)2161efc2214bSMaciej Fijalkowski int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
2162efc2214bSMaciej Fijalkowski {
21632e84f6b3SMaciej Fijalkowski 	return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
2164efc2214bSMaciej Fijalkowski }
2165efc2214bSMaciej Fijalkowski 
2166efc2214bSMaciej Fijalkowski /**
2167f23df522SNorbert Zulinski  * ice_vsi_is_rx_queue_active
2168f23df522SNorbert Zulinski  * @vsi: the VSI being configured
2169f23df522SNorbert Zulinski  *
2170f23df522SNorbert Zulinski  * Return true if at least one queue is active.
2171f23df522SNorbert Zulinski  */
ice_vsi_is_rx_queue_active(struct ice_vsi * vsi)2172f23df522SNorbert Zulinski bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi)
2173f23df522SNorbert Zulinski {
2174f23df522SNorbert Zulinski 	struct ice_pf *pf = vsi->back;
2175f23df522SNorbert Zulinski 	struct ice_hw *hw = &pf->hw;
2176f23df522SNorbert Zulinski 	int i;
2177f23df522SNorbert Zulinski 
2178f23df522SNorbert Zulinski 	ice_for_each_rxq(vsi, i) {
2179f23df522SNorbert Zulinski 		u32 rx_reg;
2180f23df522SNorbert Zulinski 		int pf_q;
2181f23df522SNorbert Zulinski 
2182f23df522SNorbert Zulinski 		pf_q = vsi->rxq_map[i];
2183f23df522SNorbert Zulinski 		rx_reg = rd32(hw, QRX_CTRL(pf_q));
2184f23df522SNorbert Zulinski 		if (rx_reg & QRX_CTRL_QENA_STAT_M)
2185f23df522SNorbert Zulinski 			return true;
2186f23df522SNorbert Zulinski 	}
2187f23df522SNorbert Zulinski 
2188f23df522SNorbert Zulinski 	return false;
2189f23df522SNorbert Zulinski }
2190f23df522SNorbert Zulinski 
ice_vsi_set_tc_cfg(struct ice_vsi * vsi)21917b9ffc76SAnirudh Venkataramanan static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
21927b9ffc76SAnirudh Venkataramanan {
21930754d65bSKiran Patil 	if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
21940754d65bSKiran Patil 		vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
21950754d65bSKiran Patil 		vsi->tc_cfg.numtc = 1;
21960754d65bSKiran Patil 		return;
21970754d65bSKiran Patil 	}
21987b9ffc76SAnirudh Venkataramanan 
21990754d65bSKiran Patil 	/* set VSI TC information based on DCB config */
22000754d65bSKiran Patil 	ice_vsi_set_dcb_tc_cfg(vsi);
22017b9ffc76SAnirudh Venkataramanan }
22027b9ffc76SAnirudh Venkataramanan 
22035153a18eSAnirudh Venkataramanan /**
22042e0e6228SDave Ertman  * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
22052e0e6228SDave Ertman  * @vsi: the VSI being configured
22062e0e6228SDave Ertman  * @tx: bool to determine Tx or Rx rule
22072e0e6228SDave Ertman  * @create: bool to determine create or remove Rule
22082e0e6228SDave Ertman  */
ice_cfg_sw_lldp(struct ice_vsi * vsi,bool tx,bool create)22092e0e6228SDave Ertman void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
22102e0e6228SDave Ertman {
22115e24d598STony Nguyen 	int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
22121b8f15b6SMichal Swiatkowski 			enum ice_sw_fwd_act_type act);
22132e0e6228SDave Ertman 	struct ice_pf *pf = vsi->back;
22144015d11eSBrett Creeley 	struct device *dev;
22155518ac2aSTony Nguyen 	int status;
22162e0e6228SDave Ertman 
22174015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
22181b8f15b6SMichal Swiatkowski 	eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
22192e0e6228SDave Ertman 
222034295a36SDave Ertman 	if (tx) {
22211b8f15b6SMichal Swiatkowski 		status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
22221b8f15b6SMichal Swiatkowski 				  ICE_DROP_PACKET);
222334295a36SDave Ertman 	} else {
222434295a36SDave Ertman 		if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) {
222534295a36SDave Ertman 			status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num,
222634295a36SDave Ertman 							  create);
222734295a36SDave Ertman 		} else {
222834295a36SDave Ertman 			status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
222934295a36SDave Ertman 					  ICE_FWD_TO_VSI);
223034295a36SDave Ertman 		}
223134295a36SDave Ertman 	}
22322e0e6228SDave Ertman 
22332e0e6228SDave Ertman 	if (status)
22345f87ec48STony Nguyen 		dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
22352e0e6228SDave Ertman 			create ? "adding" : "removing", tx ? "TX" : "RX",
22365f87ec48STony Nguyen 			vsi->vsi_num, status);
22372e0e6228SDave Ertman }
22382e0e6228SDave Ertman 
2239d95276ceSAkeem G Abodunrin /**
2240b126bd6bSKiran Patil  * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it
2241b126bd6bSKiran Patil  * @vsi: pointer to the VSI
2242b126bd6bSKiran Patil  *
2243b126bd6bSKiran Patil  * This function will allocate new scheduler aggregator now if needed and will
2244b126bd6bSKiran Patil  * move specified VSI into it.
2245b126bd6bSKiran Patil  */
ice_set_agg_vsi(struct ice_vsi * vsi)2246b126bd6bSKiran Patil static void ice_set_agg_vsi(struct ice_vsi *vsi)
2247b126bd6bSKiran Patil {
2248b126bd6bSKiran Patil 	struct device *dev = ice_pf_to_dev(vsi->back);
2249b126bd6bSKiran Patil 	struct ice_agg_node *agg_node_iter = NULL;
2250b126bd6bSKiran Patil 	u32 agg_id = ICE_INVALID_AGG_NODE_ID;
2251b126bd6bSKiran Patil 	struct ice_agg_node *agg_node = NULL;
2252b126bd6bSKiran Patil 	int node_offset, max_agg_nodes = 0;
2253b126bd6bSKiran Patil 	struct ice_port_info *port_info;
2254b126bd6bSKiran Patil 	struct ice_pf *pf = vsi->back;
2255b126bd6bSKiran Patil 	u32 agg_node_id_start = 0;
22565e24d598STony Nguyen 	int status;
2257b126bd6bSKiran Patil 
2258b126bd6bSKiran Patil 	/* create (as needed) scheduler aggregator node and move VSI into
2259b126bd6bSKiran Patil 	 * corresponding aggregator node
2260b126bd6bSKiran Patil 	 * - PF aggregator node to contains VSIs of type _PF and _CTRL
2261b126bd6bSKiran Patil 	 * - VF aggregator nodes will contain VF VSI
2262b126bd6bSKiran Patil 	 */
2263b126bd6bSKiran Patil 	port_info = pf->hw.port_info;
2264b126bd6bSKiran Patil 	if (!port_info)
2265b126bd6bSKiran Patil 		return;
2266b126bd6bSKiran Patil 
2267b126bd6bSKiran Patil 	switch (vsi->type) {
2268b126bd6bSKiran Patil 	case ICE_VSI_CTRL:
22690754d65bSKiran Patil 	case ICE_VSI_CHNL:
2270b126bd6bSKiran Patil 	case ICE_VSI_LB:
2271b126bd6bSKiran Patil 	case ICE_VSI_PF:
2272f66756e0SGrzegorz Nitka 	case ICE_VSI_SWITCHDEV_CTRL:
2273b126bd6bSKiran Patil 		max_agg_nodes = ICE_MAX_PF_AGG_NODES;
2274b126bd6bSKiran Patil 		agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
2275b126bd6bSKiran Patil 		agg_node_iter = &pf->pf_agg_node[0];
2276b126bd6bSKiran Patil 		break;
2277b126bd6bSKiran Patil 	case ICE_VSI_VF:
2278b126bd6bSKiran Patil 		/* user can create 'n' VFs on a given PF, but since max children
2279b126bd6bSKiran Patil 		 * per aggregator node can be only 64. Following code handles
2280b126bd6bSKiran Patil 		 * aggregator(s) for VF VSIs, either selects a agg_node which
2281b126bd6bSKiran Patil 		 * was already created provided num_vsis < 64, otherwise
2282b126bd6bSKiran Patil 		 * select next available node, which will be created
2283b126bd6bSKiran Patil 		 */
2284b126bd6bSKiran Patil 		max_agg_nodes = ICE_MAX_VF_AGG_NODES;
2285b126bd6bSKiran Patil 		agg_node_id_start = ICE_VF_AGG_NODE_ID_START;
2286b126bd6bSKiran Patil 		agg_node_iter = &pf->vf_agg_node[0];
2287b126bd6bSKiran Patil 		break;
2288b126bd6bSKiran Patil 	default:
2289b126bd6bSKiran Patil 		/* other VSI type, handle later if needed */
2290b126bd6bSKiran Patil 		dev_dbg(dev, "unexpected VSI type %s\n",
2291b126bd6bSKiran Patil 			ice_vsi_type_str(vsi->type));
2292b126bd6bSKiran Patil 		return;
2293b126bd6bSKiran Patil 	}
2294b126bd6bSKiran Patil 
2295b126bd6bSKiran Patil 	/* find the appropriate aggregator node */
2296b126bd6bSKiran Patil 	for (node_offset = 0; node_offset < max_agg_nodes; node_offset++) {
2297b126bd6bSKiran Patil 		/* see if we can find space in previously created
2298b126bd6bSKiran Patil 		 * node if num_vsis < 64, otherwise skip
2299b126bd6bSKiran Patil 		 */
2300b126bd6bSKiran Patil 		if (agg_node_iter->num_vsis &&
2301b126bd6bSKiran Patil 		    agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
2302b126bd6bSKiran Patil 			agg_node_iter++;
2303b126bd6bSKiran Patil 			continue;
2304b126bd6bSKiran Patil 		}
2305b126bd6bSKiran Patil 
2306b126bd6bSKiran Patil 		if (agg_node_iter->valid &&
2307b126bd6bSKiran Patil 		    agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) {
2308b126bd6bSKiran Patil 			agg_id = agg_node_iter->agg_id;
2309b126bd6bSKiran Patil 			agg_node = agg_node_iter;
2310b126bd6bSKiran Patil 			break;
2311b126bd6bSKiran Patil 		}
2312b126bd6bSKiran Patil 
2313b126bd6bSKiran Patil 		/* find unclaimed agg_id */
2314b126bd6bSKiran Patil 		if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) {
2315b126bd6bSKiran Patil 			agg_id = node_offset + agg_node_id_start;
2316b126bd6bSKiran Patil 			agg_node = agg_node_iter;
2317b126bd6bSKiran Patil 			break;
2318b126bd6bSKiran Patil 		}
2319b126bd6bSKiran Patil 		/* move to next agg_node */
2320b126bd6bSKiran Patil 		agg_node_iter++;
2321b126bd6bSKiran Patil 	}
2322b126bd6bSKiran Patil 
2323b126bd6bSKiran Patil 	if (!agg_node)
2324b126bd6bSKiran Patil 		return;
2325b126bd6bSKiran Patil 
2326b126bd6bSKiran Patil 	/* if selected aggregator node was not created, create it */
2327b126bd6bSKiran Patil 	if (!agg_node->valid) {
2328b126bd6bSKiran Patil 		status = ice_cfg_agg(port_info, agg_id, ICE_AGG_TYPE_AGG,
2329b126bd6bSKiran Patil 				     (u8)vsi->tc_cfg.ena_tc);
2330b126bd6bSKiran Patil 		if (status) {
2331b126bd6bSKiran Patil 			dev_err(dev, "unable to create aggregator node with agg_id %u\n",
2332b126bd6bSKiran Patil 				agg_id);
2333b126bd6bSKiran Patil 			return;
2334b126bd6bSKiran Patil 		}
2335138f9f50SJulia Lawall 		/* aggregator node is created, store the needed info */
2336b126bd6bSKiran Patil 		agg_node->valid = true;
2337b126bd6bSKiran Patil 		agg_node->agg_id = agg_id;
2338b126bd6bSKiran Patil 	}
2339b126bd6bSKiran Patil 
2340b126bd6bSKiran Patil 	/* move VSI to corresponding aggregator node */
2341b126bd6bSKiran Patil 	status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx,
2342b126bd6bSKiran Patil 				     (u8)vsi->tc_cfg.ena_tc);
2343b126bd6bSKiran Patil 	if (status) {
2344b126bd6bSKiran Patil 		dev_err(dev, "unable to move VSI idx %u into aggregator %u node",
2345b126bd6bSKiran Patil 			vsi->idx, agg_id);
2346b126bd6bSKiran Patil 		return;
2347b126bd6bSKiran Patil 	}
2348b126bd6bSKiran Patil 
2349b126bd6bSKiran Patil 	/* keep active children count for aggregator node */
2350b126bd6bSKiran Patil 	agg_node->num_vsis++;
2351b126bd6bSKiran Patil 
2352b126bd6bSKiran Patil 	/* cache the 'agg_id' in VSI, so that after reset - VSI will be moved
2353b126bd6bSKiran Patil 	 * to aggregator node
2354b126bd6bSKiran Patil 	 */
2355b126bd6bSKiran Patil 	vsi->agg_node = agg_node;
2356b126bd6bSKiran Patil 	dev_dbg(dev, "successfully moved VSI idx %u tc_bitmap 0x%x) into aggregator node %d which has num_vsis %u\n",
2357b126bd6bSKiran Patil 		vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id,
2358b126bd6bSKiran Patil 		vsi->agg_node->num_vsis);
2359b126bd6bSKiran Patil }
2360b126bd6bSKiran Patil 
ice_vsi_cfg_tc_lan(struct ice_pf * pf,struct ice_vsi * vsi)23616624e780SMichal Swiatkowski static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
236237bb8390SAnirudh Venkataramanan {
236337bb8390SAnirudh Venkataramanan 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
23644015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
236537bb8390SAnirudh Venkataramanan 	int ret, i;
236637bb8390SAnirudh Venkataramanan 
23676624e780SMichal Swiatkowski 	/* configure VSI nodes based on number of queues and TC's */
23686624e780SMichal Swiatkowski 	ice_for_each_traffic_class(i) {
23696624e780SMichal Swiatkowski 		if (!(vsi->tc_cfg.ena_tc & BIT(i)))
23706624e780SMichal Swiatkowski 			continue;
23715743020dSAkeem G Abodunrin 
23726624e780SMichal Swiatkowski 		if (vsi->type == ICE_VSI_CHNL) {
23736624e780SMichal Swiatkowski 			if (!vsi->alloc_txq && vsi->num_txq)
23746624e780SMichal Swiatkowski 				max_txqs[i] = vsi->num_txq;
23756624e780SMichal Swiatkowski 			else
23766624e780SMichal Swiatkowski 				max_txqs[i] = pf->num_lan_tx;
23776624e780SMichal Swiatkowski 		} else {
23786624e780SMichal Swiatkowski 			max_txqs[i] = vsi->alloc_txq;
23796624e780SMichal Swiatkowski 		}
238079733dfcSLarysa Zaremba 
238179733dfcSLarysa Zaremba 		if (vsi->type == ICE_VSI_PF)
238279733dfcSLarysa Zaremba 			max_txqs[i] += vsi->num_xdp_txq;
238337bb8390SAnirudh Venkataramanan 	}
238437bb8390SAnirudh Venkataramanan 
23856624e780SMichal Swiatkowski 	dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
23866624e780SMichal Swiatkowski 	ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
23876624e780SMichal Swiatkowski 			      max_txqs);
23886624e780SMichal Swiatkowski 	if (ret) {
23896624e780SMichal Swiatkowski 		dev_err(dev, "VSI %d failed lan queue config, error %d\n",
23906624e780SMichal Swiatkowski 			vsi->vsi_num, ret);
23916624e780SMichal Swiatkowski 		return ret;
23926624e780SMichal Swiatkowski 	}
23936624e780SMichal Swiatkowski 
23946624e780SMichal Swiatkowski 	return 0;
23956624e780SMichal Swiatkowski }
23966624e780SMichal Swiatkowski 
23976624e780SMichal Swiatkowski /**
23986624e780SMichal Swiatkowski  * ice_vsi_cfg_def - configure default VSI based on the type
23996624e780SMichal Swiatkowski  * @vsi: pointer to VSI
24005e509ab2SJacob Keller  * @params: the parameters to configure this VSI with
24016624e780SMichal Swiatkowski  */
24026624e780SMichal Swiatkowski static int
ice_vsi_cfg_def(struct ice_vsi * vsi,struct ice_vsi_cfg_params * params)24035e509ab2SJacob Keller ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
24046624e780SMichal Swiatkowski {
24056624e780SMichal Swiatkowski 	struct device *dev = ice_pf_to_dev(vsi->back);
24066624e780SMichal Swiatkowski 	struct ice_pf *pf = vsi->back;
24076624e780SMichal Swiatkowski 	int ret;
24086624e780SMichal Swiatkowski 
240937bb8390SAnirudh Venkataramanan 	vsi->vsw = pf->first_sw;
2410d95276ceSAkeem G Abodunrin 
24115e509ab2SJacob Keller 	ret = ice_vsi_alloc_def(vsi, params->ch);
24126624e780SMichal Swiatkowski 	if (ret)
24136624e780SMichal Swiatkowski 		return ret;
24146624e780SMichal Swiatkowski 
24156624e780SMichal Swiatkowski 	/* allocate memory for Tx/Rx ring stat pointers */
2416c4a9c8e7SMichal Swiatkowski 	ret = ice_vsi_alloc_stat_arrays(vsi);
2417c4a9c8e7SMichal Swiatkowski 	if (ret)
24186624e780SMichal Swiatkowski 		goto unroll_vsi_alloc;
24196624e780SMichal Swiatkowski 
2420148beb61SHenry Tieman 	ice_alloc_fd_res(vsi);
2421148beb61SHenry Tieman 
2422c4a9c8e7SMichal Swiatkowski 	ret = ice_vsi_get_qs(vsi);
2423c4a9c8e7SMichal Swiatkowski 	if (ret) {
242437bb8390SAnirudh Venkataramanan 		dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
242537bb8390SAnirudh Venkataramanan 			vsi->idx);
24266624e780SMichal Swiatkowski 		goto unroll_vsi_alloc_stat;
242737bb8390SAnirudh Venkataramanan 	}
242837bb8390SAnirudh Venkataramanan 
242937bb8390SAnirudh Venkataramanan 	/* set RSS capabilities */
243037bb8390SAnirudh Venkataramanan 	ice_vsi_set_rss_params(vsi);
243137bb8390SAnirudh Venkataramanan 
2432f9867df6SAnirudh Venkataramanan 	/* set TC configuration */
2433c5a2a4a3SUsha Ketineni 	ice_vsi_set_tc_cfg(vsi);
2434c5a2a4a3SUsha Ketineni 
243537bb8390SAnirudh Venkataramanan 	/* create the VSI */
24365e509ab2SJacob Keller 	ret = ice_vsi_init(vsi, params->flags);
243737bb8390SAnirudh Venkataramanan 	if (ret)
243837bb8390SAnirudh Venkataramanan 		goto unroll_get_qs;
243937bb8390SAnirudh Venkataramanan 
2440bc42afa9SBrett Creeley 	ice_vsi_init_vlan_ops(vsi);
2441bc42afa9SBrett Creeley 
244237bb8390SAnirudh Venkataramanan 	switch (vsi->type) {
2443148beb61SHenry Tieman 	case ICE_VSI_CTRL:
2444f66756e0SGrzegorz Nitka 	case ICE_VSI_SWITCHDEV_CTRL:
244537bb8390SAnirudh Venkataramanan 	case ICE_VSI_PF:
244637bb8390SAnirudh Venkataramanan 		ret = ice_vsi_alloc_q_vectors(vsi);
244737bb8390SAnirudh Venkataramanan 		if (ret)
244837bb8390SAnirudh Venkataramanan 			goto unroll_vsi_init;
244937bb8390SAnirudh Venkataramanan 
245037bb8390SAnirudh Venkataramanan 		ret = ice_vsi_alloc_rings(vsi);
245137bb8390SAnirudh Venkataramanan 		if (ret)
245237bb8390SAnirudh Venkataramanan 			goto unroll_vector_base;
245337bb8390SAnirudh Venkataramanan 
2454288ecf49SBenjamin Mikailenko 		ret = ice_vsi_alloc_ring_stats(vsi);
2455288ecf49SBenjamin Mikailenko 		if (ret)
2456288ecf49SBenjamin Mikailenko 			goto unroll_vector_base;
2457288ecf49SBenjamin Mikailenko 
245837bb8390SAnirudh Venkataramanan 		ice_vsi_map_rings_to_vectors(vsi);
2459ab7470bcSAhmed Zaki 		vsi->stat_offsets_loaded = false;
2460ab7470bcSAhmed Zaki 
24616624e780SMichal Swiatkowski 		if (ice_is_xdp_ena_vsi(vsi)) {
24626624e780SMichal Swiatkowski 			ret = ice_vsi_determine_xdp_res(vsi);
24636624e780SMichal Swiatkowski 			if (ret)
24646624e780SMichal Swiatkowski 				goto unroll_vector_base;
2465649b63f5SLarysa Zaremba 			ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
2466649b63f5SLarysa Zaremba 						    ICE_XDP_CFG_PART);
24676624e780SMichal Swiatkowski 			if (ret)
24686624e780SMichal Swiatkowski 				goto unroll_vector_base;
24696624e780SMichal Swiatkowski 		}
247037bb8390SAnirudh Venkataramanan 
2471148beb61SHenry Tieman 		/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
2472148beb61SHenry Tieman 		if (vsi->type != ICE_VSI_CTRL)
2473148beb61SHenry Tieman 			/* Do not exit if configuring RSS had an issue, at
2474148beb61SHenry Tieman 			 * least receive traffic on first queue. Hence no
2475148beb61SHenry Tieman 			 * need to capture return value
247637bb8390SAnirudh Venkataramanan 			 */
2477c90ed40cSTony Nguyen 			if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
247837bb8390SAnirudh Venkataramanan 				ice_vsi_cfg_rss_lut_key(vsi);
2479c90ed40cSTony Nguyen 				ice_vsi_set_rss_flow_fld(vsi);
2480c90ed40cSTony Nguyen 			}
248128bf2672SBrett Creeley 		ice_init_arfs(vsi);
248237bb8390SAnirudh Venkataramanan 		break;
24830754d65bSKiran Patil 	case ICE_VSI_CHNL:
24840754d65bSKiran Patil 		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
24850754d65bSKiran Patil 			ice_vsi_cfg_rss_lut_key(vsi);
24860754d65bSKiran Patil 			ice_vsi_set_rss_flow_fld(vsi);
24870754d65bSKiran Patil 		}
24880754d65bSKiran Patil 		break;
24898ede0178SAnirudh Venkataramanan 	case ICE_VSI_VF:
24908ede0178SAnirudh Venkataramanan 		/* VF driver will take care of creating netdev for this type and
24918ede0178SAnirudh Venkataramanan 		 * map queues to vectors through Virtchnl, PF driver only
24928ede0178SAnirudh Venkataramanan 		 * creates a VSI and corresponding structures for bookkeeping
24938ede0178SAnirudh Venkataramanan 		 * purpose
24948ede0178SAnirudh Venkataramanan 		 */
24958ede0178SAnirudh Venkataramanan 		ret = ice_vsi_alloc_q_vectors(vsi);
24968ede0178SAnirudh Venkataramanan 		if (ret)
24978ede0178SAnirudh Venkataramanan 			goto unroll_vsi_init;
24988ede0178SAnirudh Venkataramanan 
24998ede0178SAnirudh Venkataramanan 		ret = ice_vsi_alloc_rings(vsi);
25008ede0178SAnirudh Venkataramanan 		if (ret)
25018ede0178SAnirudh Venkataramanan 			goto unroll_alloc_q_vector;
25028ede0178SAnirudh Venkataramanan 
2503288ecf49SBenjamin Mikailenko 		ret = ice_vsi_alloc_ring_stats(vsi);
2504288ecf49SBenjamin Mikailenko 		if (ret)
2505288ecf49SBenjamin Mikailenko 			goto unroll_vector_base;
2506ab7470bcSAhmed Zaki 
2507ab7470bcSAhmed Zaki 		vsi->stat_offsets_loaded = false;
2508ab7470bcSAhmed Zaki 
25093a9e32bbSMd Fahad Iqbal Polash 		/* Do not exit if configuring RSS had an issue, at least
25103a9e32bbSMd Fahad Iqbal Polash 		 * receive traffic on first queue. Hence no need to capture
25113a9e32bbSMd Fahad Iqbal Polash 		 * return value
25123a9e32bbSMd Fahad Iqbal Polash 		 */
25131c01c8c6SMd Fahad Iqbal Polash 		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
25143a9e32bbSMd Fahad Iqbal Polash 			ice_vsi_cfg_rss_lut_key(vsi);
25151c01c8c6SMd Fahad Iqbal Polash 			ice_vsi_set_vf_rss_flow_fld(vsi);
25161c01c8c6SMd Fahad Iqbal Polash 		}
25178ede0178SAnirudh Venkataramanan 		break;
25180e674aebSAnirudh Venkataramanan 	case ICE_VSI_LB:
25190e674aebSAnirudh Venkataramanan 		ret = ice_vsi_alloc_rings(vsi);
25200e674aebSAnirudh Venkataramanan 		if (ret)
25210e674aebSAnirudh Venkataramanan 			goto unroll_vsi_init;
2522288ecf49SBenjamin Mikailenko 
2523288ecf49SBenjamin Mikailenko 		ret = ice_vsi_alloc_ring_stats(vsi);
2524288ecf49SBenjamin Mikailenko 		if (ret)
2525288ecf49SBenjamin Mikailenko 			goto unroll_vector_base;
2526288ecf49SBenjamin Mikailenko 
25270e674aebSAnirudh Venkataramanan 		break;
252837bb8390SAnirudh Venkataramanan 	default:
2529df17b7e0SAnirudh Venkataramanan 		/* clean up the resources and exit */
2530c4a9c8e7SMichal Swiatkowski 		ret = -EINVAL;
253137bb8390SAnirudh Venkataramanan 		goto unroll_vsi_init;
253237bb8390SAnirudh Venkataramanan 	}
253337bb8390SAnirudh Venkataramanan 
25346624e780SMichal Swiatkowski 	return 0;
253537bb8390SAnirudh Venkataramanan 
25366624e780SMichal Swiatkowski unroll_vector_base:
25376624e780SMichal Swiatkowski 	/* reclaim SW interrupts back to the common pool */
25386624e780SMichal Swiatkowski unroll_alloc_q_vector:
25396624e780SMichal Swiatkowski 	ice_vsi_free_q_vectors(vsi);
25406624e780SMichal Swiatkowski unroll_vsi_init:
2541227bf450SMichal Swiatkowski 	ice_vsi_delete_from_hw(vsi);
25426624e780SMichal Swiatkowski unroll_get_qs:
25436624e780SMichal Swiatkowski 	ice_vsi_put_qs(vsi);
25446624e780SMichal Swiatkowski unroll_vsi_alloc_stat:
25456624e780SMichal Swiatkowski 	ice_vsi_free_stats(vsi);
25466624e780SMichal Swiatkowski unroll_vsi_alloc:
25476624e780SMichal Swiatkowski 	ice_vsi_free_arrays(vsi);
25486624e780SMichal Swiatkowski 	return ret;
25496624e780SMichal Swiatkowski }
25506624e780SMichal Swiatkowski 
25516624e780SMichal Swiatkowski /**
2552e1588197SJacob Keller  * ice_vsi_cfg - configure a previously allocated VSI
25536624e780SMichal Swiatkowski  * @vsi: pointer to VSI
25545e509ab2SJacob Keller  * @params: parameters used to configure this VSI
25556624e780SMichal Swiatkowski  */
ice_vsi_cfg(struct ice_vsi * vsi,struct ice_vsi_cfg_params * params)25565e509ab2SJacob Keller int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
25576624e780SMichal Swiatkowski {
2558e1588197SJacob Keller 	struct ice_pf *pf = vsi->back;
25596624e780SMichal Swiatkowski 	int ret;
25606624e780SMichal Swiatkowski 
2561e1588197SJacob Keller 	if (WARN_ON(params->type == ICE_VSI_VF && !params->vf))
2562e1588197SJacob Keller 		return -EINVAL;
2563e1588197SJacob Keller 
2564e1588197SJacob Keller 	vsi->type = params->type;
2565e1588197SJacob Keller 	vsi->port_info = params->pi;
2566e1588197SJacob Keller 
2567e1588197SJacob Keller 	/* For VSIs which don't have a connected VF, this will be NULL */
2568e1588197SJacob Keller 	vsi->vf = params->vf;
2569e1588197SJacob Keller 
25705e509ab2SJacob Keller 	ret = ice_vsi_cfg_def(vsi, params);
25716624e780SMichal Swiatkowski 	if (ret)
25726624e780SMichal Swiatkowski 		return ret;
25736624e780SMichal Swiatkowski 
25746624e780SMichal Swiatkowski 	ret = ice_vsi_cfg_tc_lan(vsi->back, vsi);
25756624e780SMichal Swiatkowski 	if (ret)
25766624e780SMichal Swiatkowski 		ice_vsi_decfg(vsi);
25776624e780SMichal Swiatkowski 
2578e1588197SJacob Keller 	if (vsi->type == ICE_VSI_CTRL) {
2579e1588197SJacob Keller 		if (vsi->vf) {
2580e1588197SJacob Keller 			WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI);
2581e1588197SJacob Keller 			vsi->vf->ctrl_vsi_idx = vsi->idx;
2582e1588197SJacob Keller 		} else {
2583e1588197SJacob Keller 			WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI);
2584e1588197SJacob Keller 			pf->ctrl_vsi_idx = vsi->idx;
2585e1588197SJacob Keller 		}
2586e1588197SJacob Keller 	}
2587e1588197SJacob Keller 
25886624e780SMichal Swiatkowski 	return ret;
25896624e780SMichal Swiatkowski }
25906624e780SMichal Swiatkowski 
25916624e780SMichal Swiatkowski /**
25926624e780SMichal Swiatkowski  * ice_vsi_decfg - remove all VSI configuration
25936624e780SMichal Swiatkowski  * @vsi: pointer to VSI
25946624e780SMichal Swiatkowski  */
ice_vsi_decfg(struct ice_vsi * vsi)25956624e780SMichal Swiatkowski void ice_vsi_decfg(struct ice_vsi *vsi)
25966624e780SMichal Swiatkowski {
25976624e780SMichal Swiatkowski 	struct ice_pf *pf = vsi->back;
25986624e780SMichal Swiatkowski 	int err;
25996624e780SMichal Swiatkowski 
26006624e780SMichal Swiatkowski 	/* The Rx rule will only exist to remove if the LLDP FW
26016624e780SMichal Swiatkowski 	 * engine is currently stopped
26026624e780SMichal Swiatkowski 	 */
26036624e780SMichal Swiatkowski 	if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
26046624e780SMichal Swiatkowski 	    !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
26056624e780SMichal Swiatkowski 		ice_cfg_sw_lldp(vsi, false, false);
26066624e780SMichal Swiatkowski 
26076624e780SMichal Swiatkowski 	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
26086624e780SMichal Swiatkowski 	err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
26096624e780SMichal Swiatkowski 	if (err)
26106624e780SMichal Swiatkowski 		dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
26116624e780SMichal Swiatkowski 			vsi->vsi_num, err);
26126624e780SMichal Swiatkowski 
26136624e780SMichal Swiatkowski 	if (ice_is_xdp_ena_vsi(vsi))
26146624e780SMichal Swiatkowski 		/* return value check can be skipped here, it always returns
26156624e780SMichal Swiatkowski 		 * 0 if reset is in progress
26166624e780SMichal Swiatkowski 		 */
2617649b63f5SLarysa Zaremba 		ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
26186624e780SMichal Swiatkowski 
26196624e780SMichal Swiatkowski 	ice_vsi_clear_rings(vsi);
26206624e780SMichal Swiatkowski 	ice_vsi_free_q_vectors(vsi);
26216624e780SMichal Swiatkowski 	ice_vsi_put_qs(vsi);
26226624e780SMichal Swiatkowski 	ice_vsi_free_arrays(vsi);
26236624e780SMichal Swiatkowski 
26246624e780SMichal Swiatkowski 	/* SR-IOV determines needed MSIX resources all at once instead of per
26256624e780SMichal Swiatkowski 	 * VSI since when VFs are spawned we know how many VFs there are and how
26266624e780SMichal Swiatkowski 	 * many interrupts each VF needs. SR-IOV MSIX resources are also
26276624e780SMichal Swiatkowski 	 * cleared in the same manner.
26286624e780SMichal Swiatkowski 	 */
26296624e780SMichal Swiatkowski 
26306624e780SMichal Swiatkowski 	if (vsi->type == ICE_VSI_VF &&
26316624e780SMichal Swiatkowski 	    vsi->agg_node && vsi->agg_node->valid)
26326624e780SMichal Swiatkowski 		vsi->agg_node->num_vsis--;
26330754d65bSKiran Patil }
26340754d65bSKiran Patil 
26356624e780SMichal Swiatkowski /**
26366624e780SMichal Swiatkowski  * ice_vsi_setup - Set up a VSI by a given type
26376624e780SMichal Swiatkowski  * @pf: board private structure
26385e509ab2SJacob Keller  * @params: parameters to use when creating the VSI
26396624e780SMichal Swiatkowski  *
26406624e780SMichal Swiatkowski  * This allocates the sw VSI structure and its queue resources.
26416624e780SMichal Swiatkowski  *
26426624e780SMichal Swiatkowski  * Returns pointer to the successfully allocated and configured VSI sw struct on
26436624e780SMichal Swiatkowski  * success, NULL on failure.
26446624e780SMichal Swiatkowski  */
26456624e780SMichal Swiatkowski struct ice_vsi *
ice_vsi_setup(struct ice_pf * pf,struct ice_vsi_cfg_params * params)26465e509ab2SJacob Keller ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
26476624e780SMichal Swiatkowski {
26486624e780SMichal Swiatkowski 	struct device *dev = ice_pf_to_dev(pf);
26496624e780SMichal Swiatkowski 	struct ice_vsi *vsi;
26506624e780SMichal Swiatkowski 	int ret;
26516624e780SMichal Swiatkowski 
26525e509ab2SJacob Keller 	/* ice_vsi_setup can only initialize a new VSI, and we must have
26535e509ab2SJacob Keller 	 * a port_info structure for it.
26545e509ab2SJacob Keller 	 */
26555e509ab2SJacob Keller 	if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) ||
26565e509ab2SJacob Keller 	    WARN_ON(!params->pi))
26575e509ab2SJacob Keller 		return NULL;
26585e509ab2SJacob Keller 
2659e1588197SJacob Keller 	vsi = ice_vsi_alloc(pf);
26606624e780SMichal Swiatkowski 	if (!vsi) {
26616624e780SMichal Swiatkowski 		dev_err(dev, "could not allocate VSI\n");
26626624e780SMichal Swiatkowski 		return NULL;
266337bb8390SAnirudh Venkataramanan 	}
266437bb8390SAnirudh Venkataramanan 
26655e509ab2SJacob Keller 	ret = ice_vsi_cfg(vsi, params);
26666624e780SMichal Swiatkowski 	if (ret)
26676624e780SMichal Swiatkowski 		goto err_vsi_cfg;
26686624e780SMichal Swiatkowski 
2669d95276ceSAkeem G Abodunrin 	/* Add switch rule to drop all Tx Flow Control Frames, of look up
2670d95276ceSAkeem G Abodunrin 	 * type ETHERTYPE from VSIs, and restrict malicious VF from sending
2671d95276ceSAkeem G Abodunrin 	 * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
2672d95276ceSAkeem G Abodunrin 	 * The rule is added once for PF VSI in order to create appropriate
2673d95276ceSAkeem G Abodunrin 	 * recipe, since VSI/VSI list is ignored with drop action...
2674241c8cf0SPaul Greenwalt 	 * Also add rules to handle LLDP Tx packets.  Tx LLDP packets need to
2675241c8cf0SPaul Greenwalt 	 * be dropped so that VFs cannot send LLDP packets to reconfig DCB
2676241c8cf0SPaul Greenwalt 	 * settings in the HW.
2677d95276ceSAkeem G Abodunrin 	 */
26786624e780SMichal Swiatkowski 	if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
26791b8f15b6SMichal Swiatkowski 		ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
26801b8f15b6SMichal Swiatkowski 				 ICE_DROP_PACKET);
26812e0e6228SDave Ertman 		ice_cfg_sw_lldp(vsi, true, true);
2682462acf6aSTony Nguyen 	}
26832e0e6228SDave Ertman 
2684b126bd6bSKiran Patil 	if (!vsi->agg_node)
2685b126bd6bSKiran Patil 		ice_set_agg_vsi(vsi);
26866624e780SMichal Swiatkowski 
268737bb8390SAnirudh Venkataramanan 	return vsi;
268837bb8390SAnirudh Venkataramanan 
26896624e780SMichal Swiatkowski err_vsi_cfg:
26900db66d20SMichal Swiatkowski 	ice_vsi_free(vsi);
269137bb8390SAnirudh Venkataramanan 
269237bb8390SAnirudh Venkataramanan 	return NULL;
269337bb8390SAnirudh Venkataramanan }
269437bb8390SAnirudh Venkataramanan 
269537bb8390SAnirudh Venkataramanan /**
26965153a18eSAnirudh Venkataramanan  * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
26975153a18eSAnirudh Venkataramanan  * @vsi: the VSI being cleaned up
26985153a18eSAnirudh Venkataramanan  */
ice_vsi_release_msix(struct ice_vsi * vsi)26995153a18eSAnirudh Venkataramanan static void ice_vsi_release_msix(struct ice_vsi *vsi)
27005153a18eSAnirudh Venkataramanan {
27015153a18eSAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
27025153a18eSAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
27035153a18eSAnirudh Venkataramanan 	u32 txq = 0;
27045153a18eSAnirudh Venkataramanan 	u32 rxq = 0;
27055153a18eSAnirudh Venkataramanan 	int i, q;
27065153a18eSAnirudh Venkataramanan 
27072faf63b6SMaciej Fijalkowski 	ice_for_each_q_vector(vsi, i) {
27085153a18eSAnirudh Venkataramanan 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
27095153a18eSAnirudh Venkataramanan 
2710b8b47723SJesse Brandeburg 		ice_write_intrl(q_vector, 0);
27115153a18eSAnirudh Venkataramanan 		for (q = 0; q < q_vector->num_ring_tx; q++) {
2712b8b47723SJesse Brandeburg 			ice_write_itr(&q_vector->tx, 0);
27135153a18eSAnirudh Venkataramanan 			wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
2714efc2214bSMaciej Fijalkowski 			if (ice_is_xdp_ena_vsi(vsi)) {
2715efc2214bSMaciej Fijalkowski 				u32 xdp_txq = txq + vsi->num_xdp_txq;
2716efc2214bSMaciej Fijalkowski 
2717efc2214bSMaciej Fijalkowski 				wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
2718efc2214bSMaciej Fijalkowski 			}
27195153a18eSAnirudh Venkataramanan 			txq++;
27205153a18eSAnirudh Venkataramanan 		}
27215153a18eSAnirudh Venkataramanan 
27225153a18eSAnirudh Venkataramanan 		for (q = 0; q < q_vector->num_ring_rx; q++) {
2723b8b47723SJesse Brandeburg 			ice_write_itr(&q_vector->rx, 0);
27245153a18eSAnirudh Venkataramanan 			wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
27255153a18eSAnirudh Venkataramanan 			rxq++;
27265153a18eSAnirudh Venkataramanan 		}
27275153a18eSAnirudh Venkataramanan 	}
27285153a18eSAnirudh Venkataramanan 
27295153a18eSAnirudh Venkataramanan 	ice_flush(hw);
27305153a18eSAnirudh Venkataramanan }
27315153a18eSAnirudh Venkataramanan 
27325153a18eSAnirudh Venkataramanan /**
27335153a18eSAnirudh Venkataramanan  * ice_vsi_free_irq - Free the IRQ association with the OS
27345153a18eSAnirudh Venkataramanan  * @vsi: the VSI being configured
27355153a18eSAnirudh Venkataramanan  */
ice_vsi_free_irq(struct ice_vsi * vsi)27365153a18eSAnirudh Venkataramanan void ice_vsi_free_irq(struct ice_vsi *vsi)
27375153a18eSAnirudh Venkataramanan {
27385153a18eSAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
27395153a18eSAnirudh Venkataramanan 	int i;
27405153a18eSAnirudh Venkataramanan 
27415153a18eSAnirudh Venkataramanan 	if (!vsi->q_vectors || !vsi->irqs_ready)
27425153a18eSAnirudh Venkataramanan 		return;
27435153a18eSAnirudh Venkataramanan 
2744eb0208ecSPreethi Banala 	ice_vsi_release_msix(vsi);
27458ede0178SAnirudh Venkataramanan 	if (vsi->type == ICE_VSI_VF)
27468ede0178SAnirudh Venkataramanan 		return;
2747eb0208ecSPreethi Banala 
27485153a18eSAnirudh Venkataramanan 	vsi->irqs_ready = false;
2749d7442f51SAlexander Lobakin 	ice_free_cpu_rx_rmap(vsi);
2750d7442f51SAlexander Lobakin 
27510c2561c8SBrett Creeley 	ice_for_each_q_vector(vsi, i) {
27525153a18eSAnirudh Venkataramanan 		int irq_num;
27535153a18eSAnirudh Venkataramanan 
27544aad5335SPiotr Raczynski 		irq_num = vsi->q_vectors[i]->irq.virq;
27555153a18eSAnirudh Venkataramanan 
27565153a18eSAnirudh Venkataramanan 		/* free only the irqs that were actually requested */
27575153a18eSAnirudh Venkataramanan 		if (!vsi->q_vectors[i] ||
27585153a18eSAnirudh Venkataramanan 		    !(vsi->q_vectors[i]->num_ring_tx ||
27595153a18eSAnirudh Venkataramanan 		      vsi->q_vectors[i]->num_ring_rx))
27605153a18eSAnirudh Venkataramanan 			continue;
27615153a18eSAnirudh Venkataramanan 
27625153a18eSAnirudh Venkataramanan 		/* clear the affinity notifier in the IRQ descriptor */
2763d7442f51SAlexander Lobakin 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
27645153a18eSAnirudh Venkataramanan 			irq_set_affinity_notifier(irq_num, NULL);
27655153a18eSAnirudh Venkataramanan 
27665153a18eSAnirudh Venkataramanan 		/* clear the affinity_mask in the IRQ descriptor */
27675153a18eSAnirudh Venkataramanan 		irq_set_affinity_hint(irq_num, NULL);
27685153a18eSAnirudh Venkataramanan 		synchronize_irq(irq_num);
27694015d11eSBrett Creeley 		devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
27705153a18eSAnirudh Venkataramanan 	}
27715153a18eSAnirudh Venkataramanan }
27725153a18eSAnirudh Venkataramanan 
27735153a18eSAnirudh Venkataramanan /**
27745153a18eSAnirudh Venkataramanan  * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
27755153a18eSAnirudh Venkataramanan  * @vsi: the VSI having resources freed
27765153a18eSAnirudh Venkataramanan  */
ice_vsi_free_tx_rings(struct ice_vsi * vsi)27775153a18eSAnirudh Venkataramanan void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
27785153a18eSAnirudh Venkataramanan {
27795153a18eSAnirudh Venkataramanan 	int i;
27805153a18eSAnirudh Venkataramanan 
27815153a18eSAnirudh Venkataramanan 	if (!vsi->tx_rings)
27825153a18eSAnirudh Venkataramanan 		return;
27835153a18eSAnirudh Venkataramanan 
27845153a18eSAnirudh Venkataramanan 	ice_for_each_txq(vsi, i)
27855153a18eSAnirudh Venkataramanan 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
27865153a18eSAnirudh Venkataramanan 			ice_free_tx_ring(vsi->tx_rings[i]);
27875153a18eSAnirudh Venkataramanan }
27885153a18eSAnirudh Venkataramanan 
27895153a18eSAnirudh Venkataramanan /**
27905153a18eSAnirudh Venkataramanan  * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
27915153a18eSAnirudh Venkataramanan  * @vsi: the VSI having resources freed
27925153a18eSAnirudh Venkataramanan  */
ice_vsi_free_rx_rings(struct ice_vsi * vsi)27935153a18eSAnirudh Venkataramanan void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
27945153a18eSAnirudh Venkataramanan {
27955153a18eSAnirudh Venkataramanan 	int i;
27965153a18eSAnirudh Venkataramanan 
27975153a18eSAnirudh Venkataramanan 	if (!vsi->rx_rings)
27985153a18eSAnirudh Venkataramanan 		return;
27995153a18eSAnirudh Venkataramanan 
28005153a18eSAnirudh Venkataramanan 	ice_for_each_rxq(vsi, i)
28015153a18eSAnirudh Venkataramanan 		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
28025153a18eSAnirudh Venkataramanan 			ice_free_rx_ring(vsi->rx_rings[i]);
28035153a18eSAnirudh Venkataramanan }
28045153a18eSAnirudh Venkataramanan 
28055153a18eSAnirudh Venkataramanan /**
280607309a0eSAnirudh Venkataramanan  * ice_vsi_close - Shut down a VSI
280707309a0eSAnirudh Venkataramanan  * @vsi: the VSI being shut down
280807309a0eSAnirudh Venkataramanan  */
ice_vsi_close(struct ice_vsi * vsi)280907309a0eSAnirudh Venkataramanan void ice_vsi_close(struct ice_vsi *vsi)
281007309a0eSAnirudh Venkataramanan {
2811e97fb1aeSAnirudh Venkataramanan 	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state))
281207309a0eSAnirudh Venkataramanan 		ice_down(vsi);
281307309a0eSAnirudh Venkataramanan 
281407309a0eSAnirudh Venkataramanan 	ice_vsi_free_irq(vsi);
281507309a0eSAnirudh Venkataramanan 	ice_vsi_free_tx_rings(vsi);
281607309a0eSAnirudh Venkataramanan 	ice_vsi_free_rx_rings(vsi);
281707309a0eSAnirudh Venkataramanan }
281807309a0eSAnirudh Venkataramanan 
281907309a0eSAnirudh Venkataramanan /**
28209d614b64SAnirudh Venkataramanan  * ice_ena_vsi - resume a VSI
28219d614b64SAnirudh Venkataramanan  * @vsi: the VSI being resume
28229d614b64SAnirudh Venkataramanan  * @locked: is the rtnl_lock already held
28239d614b64SAnirudh Venkataramanan  */
ice_ena_vsi(struct ice_vsi * vsi,bool locked)28249d614b64SAnirudh Venkataramanan int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
28259d614b64SAnirudh Venkataramanan {
28269d614b64SAnirudh Venkataramanan 	int err = 0;
28279d614b64SAnirudh Venkataramanan 
2828e97fb1aeSAnirudh Venkataramanan 	if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state))
28299d614b64SAnirudh Venkataramanan 		return 0;
28309d614b64SAnirudh Venkataramanan 
2831e97fb1aeSAnirudh Venkataramanan 	clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
28329d614b64SAnirudh Venkataramanan 
28339d614b64SAnirudh Venkataramanan 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
28349d614b64SAnirudh Venkataramanan 		if (netif_running(vsi->netdev)) {
28359d614b64SAnirudh Venkataramanan 			if (!locked)
28369d614b64SAnirudh Venkataramanan 				rtnl_lock();
28379d614b64SAnirudh Venkataramanan 
2838e95fc857SKrzysztof Goreczny 			err = ice_open_internal(vsi->netdev);
28399d614b64SAnirudh Venkataramanan 
28409d614b64SAnirudh Venkataramanan 			if (!locked)
28419d614b64SAnirudh Venkataramanan 				rtnl_unlock();
28429d614b64SAnirudh Venkataramanan 		}
2843148beb61SHenry Tieman 	} else if (vsi->type == ICE_VSI_CTRL) {
2844148beb61SHenry Tieman 		err = ice_vsi_open_ctrl(vsi);
28459d614b64SAnirudh Venkataramanan 	}
28469d614b64SAnirudh Venkataramanan 
28479d614b64SAnirudh Venkataramanan 	return err;
28489d614b64SAnirudh Venkataramanan }
28499d614b64SAnirudh Venkataramanan 
28509d614b64SAnirudh Venkataramanan /**
28519d614b64SAnirudh Venkataramanan  * ice_dis_vsi - pause a VSI
28529d614b64SAnirudh Venkataramanan  * @vsi: the VSI being paused
28539d614b64SAnirudh Venkataramanan  * @locked: is the rtnl_lock already held
28549d614b64SAnirudh Venkataramanan  */
ice_dis_vsi(struct ice_vsi * vsi,bool locked)28559d614b64SAnirudh Venkataramanan void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
28569d614b64SAnirudh Venkataramanan {
2857e97fb1aeSAnirudh Venkataramanan 	if (test_bit(ICE_VSI_DOWN, vsi->state))
28589d614b64SAnirudh Venkataramanan 		return;
28599d614b64SAnirudh Venkataramanan 
2860e97fb1aeSAnirudh Venkataramanan 	set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
28619d614b64SAnirudh Venkataramanan 
28629d614b64SAnirudh Venkataramanan 	if (vsi->type == ICE_VSI_PF && vsi->netdev) {
28639d614b64SAnirudh Venkataramanan 		if (netif_running(vsi->netdev)) {
28649d614b64SAnirudh Venkataramanan 			if (!locked)
28659d614b64SAnirudh Venkataramanan 				rtnl_lock();
28669d614b64SAnirudh Venkataramanan 
2867e95fc857SKrzysztof Goreczny 			ice_vsi_close(vsi);
28689d614b64SAnirudh Venkataramanan 
28699d614b64SAnirudh Venkataramanan 			if (!locked)
28709d614b64SAnirudh Venkataramanan 				rtnl_unlock();
28719d614b64SAnirudh Venkataramanan 		} else {
28729d614b64SAnirudh Venkataramanan 			ice_vsi_close(vsi);
28739d614b64SAnirudh Venkataramanan 		}
2874f66756e0SGrzegorz Nitka 	} else if (vsi->type == ICE_VSI_CTRL ||
2875f66756e0SGrzegorz Nitka 		   vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
2876148beb61SHenry Tieman 		ice_vsi_close(vsi);
28779d614b64SAnirudh Venkataramanan 	}
28789d614b64SAnirudh Venkataramanan }
28799d614b64SAnirudh Venkataramanan 
28809d614b64SAnirudh Venkataramanan /**
28815153a18eSAnirudh Venkataramanan  * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
28825153a18eSAnirudh Venkataramanan  * @vsi: the VSI being un-configured
28835153a18eSAnirudh Venkataramanan  */
ice_vsi_dis_irq(struct ice_vsi * vsi)28845153a18eSAnirudh Venkataramanan void ice_vsi_dis_irq(struct ice_vsi *vsi)
28855153a18eSAnirudh Venkataramanan {
28865153a18eSAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
28875153a18eSAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
28885153a18eSAnirudh Venkataramanan 	u32 val;
28895153a18eSAnirudh Venkataramanan 	int i;
28905153a18eSAnirudh Venkataramanan 
28915153a18eSAnirudh Venkataramanan 	/* disable interrupt causation from each queue */
28925153a18eSAnirudh Venkataramanan 	if (vsi->tx_rings) {
28935153a18eSAnirudh Venkataramanan 		ice_for_each_txq(vsi, i) {
28945153a18eSAnirudh Venkataramanan 			if (vsi->tx_rings[i]) {
28955153a18eSAnirudh Venkataramanan 				u16 reg;
28965153a18eSAnirudh Venkataramanan 
28975153a18eSAnirudh Venkataramanan 				reg = vsi->tx_rings[i]->reg_idx;
28985153a18eSAnirudh Venkataramanan 				val = rd32(hw, QINT_TQCTL(reg));
28995153a18eSAnirudh Venkataramanan 				val &= ~QINT_TQCTL_CAUSE_ENA_M;
29005153a18eSAnirudh Venkataramanan 				wr32(hw, QINT_TQCTL(reg), val);
29015153a18eSAnirudh Venkataramanan 			}
29025153a18eSAnirudh Venkataramanan 		}
29035153a18eSAnirudh Venkataramanan 	}
29045153a18eSAnirudh Venkataramanan 
29055153a18eSAnirudh Venkataramanan 	if (vsi->rx_rings) {
29065153a18eSAnirudh Venkataramanan 		ice_for_each_rxq(vsi, i) {
29075153a18eSAnirudh Venkataramanan 			if (vsi->rx_rings[i]) {
29085153a18eSAnirudh Venkataramanan 				u16 reg;
29095153a18eSAnirudh Venkataramanan 
29105153a18eSAnirudh Venkataramanan 				reg = vsi->rx_rings[i]->reg_idx;
29115153a18eSAnirudh Venkataramanan 				val = rd32(hw, QINT_RQCTL(reg));
29125153a18eSAnirudh Venkataramanan 				val &= ~QINT_RQCTL_CAUSE_ENA_M;
29135153a18eSAnirudh Venkataramanan 				wr32(hw, QINT_RQCTL(reg), val);
29145153a18eSAnirudh Venkataramanan 			}
29155153a18eSAnirudh Venkataramanan 		}
29165153a18eSAnirudh Venkataramanan 	}
29175153a18eSAnirudh Venkataramanan 
29185153a18eSAnirudh Venkataramanan 	/* disable each interrupt */
2919462acf6aSTony Nguyen 	ice_for_each_q_vector(vsi, i) {
2920462acf6aSTony Nguyen 		if (!vsi->q_vectors[i])
2921462acf6aSTony Nguyen 			continue;
2922b07833a0SBrett Creeley 		wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
2923462acf6aSTony Nguyen 	}
29245153a18eSAnirudh Venkataramanan 
29255153a18eSAnirudh Venkataramanan 	ice_flush(hw);
2926b07833a0SBrett Creeley 
2927da4a9e73SBrett Creeley 	/* don't call synchronize_irq() for VF's from the host */
2928da4a9e73SBrett Creeley 	if (vsi->type == ICE_VSI_VF)
2929da4a9e73SBrett Creeley 		return;
2930da4a9e73SBrett Creeley 
29310c2561c8SBrett Creeley 	ice_for_each_q_vector(vsi, i)
29324aad5335SPiotr Raczynski 		synchronize_irq(vsi->q_vectors[i]->irq.virq);
29335153a18eSAnirudh Venkataramanan }
29345153a18eSAnirudh Venkataramanan 
29355153a18eSAnirudh Venkataramanan /**
2936df0f8479SAnirudh Venkataramanan  * ice_vsi_release - Delete a VSI and free its resources
2937df0f8479SAnirudh Venkataramanan  * @vsi: the VSI being removed
2938df0f8479SAnirudh Venkataramanan  *
2939df0f8479SAnirudh Venkataramanan  * Returns 0 on success or < 0 on error
2940df0f8479SAnirudh Venkataramanan  */
ice_vsi_release(struct ice_vsi * vsi)2941df0f8479SAnirudh Venkataramanan int ice_vsi_release(struct ice_vsi *vsi)
2942df0f8479SAnirudh Venkataramanan {
2943df0f8479SAnirudh Venkataramanan 	struct ice_pf *pf;
2944df0f8479SAnirudh Venkataramanan 
2945df0f8479SAnirudh Venkataramanan 	if (!vsi->back)
2946df0f8479SAnirudh Venkataramanan 		return -ENODEV;
2947df0f8479SAnirudh Venkataramanan 	pf = vsi->back;
2948b751930cSBrett Creeley 
2949df0f8479SAnirudh Venkataramanan 	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2950df0f8479SAnirudh Venkataramanan 		ice_rss_clean(vsi);
2951df0f8479SAnirudh Venkataramanan 
2952df0f8479SAnirudh Venkataramanan 	ice_vsi_close(vsi);
29536624e780SMichal Swiatkowski 	ice_vsi_decfg(vsi);
2954aa6ccf3fSBrett Creeley 
2955df0f8479SAnirudh Venkataramanan 	/* retain SW VSI data structure since it is needed to unregister and
2956df0f8479SAnirudh Venkataramanan 	 * free VSI netdev when PF is not in reset recovery pending state,\
2957df0f8479SAnirudh Venkataramanan 	 * for ex: during rmmod.
2958df0f8479SAnirudh Venkataramanan 	 */
29595df7e45dSDave Ertman 	if (!ice_is_reset_in_progress(pf->state))
2960227bf450SMichal Swiatkowski 		ice_vsi_delete(vsi);
2961df0f8479SAnirudh Venkataramanan 
2962df0f8479SAnirudh Venkataramanan 	return 0;
2963df0f8479SAnirudh Venkataramanan }
2964df0f8479SAnirudh Venkataramanan 
2965df0f8479SAnirudh Venkataramanan /**
296661dc79ceSMichal Swiatkowski  * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
296761dc79ceSMichal Swiatkowski  * @vsi: VSI connected with q_vectors
296861dc79ceSMichal Swiatkowski  * @coalesce: array of struct with stored coalesce
296961dc79ceSMichal Swiatkowski  *
297061dc79ceSMichal Swiatkowski  * Returns array size.
297161dc79ceSMichal Swiatkowski  */
297261dc79ceSMichal Swiatkowski static int
ice_vsi_rebuild_get_coalesce(struct ice_vsi * vsi,struct ice_coalesce_stored * coalesce)297361dc79ceSMichal Swiatkowski ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
297461dc79ceSMichal Swiatkowski 			     struct ice_coalesce_stored *coalesce)
297561dc79ceSMichal Swiatkowski {
297661dc79ceSMichal Swiatkowski 	int i;
297761dc79ceSMichal Swiatkowski 
297861dc79ceSMichal Swiatkowski 	ice_for_each_q_vector(vsi, i) {
297961dc79ceSMichal Swiatkowski 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
298061dc79ceSMichal Swiatkowski 
2981bf13502eSMichal Wilczynski 		coalesce[i].itr_tx = q_vector->tx.itr_settings;
2982bf13502eSMichal Wilczynski 		coalesce[i].itr_rx = q_vector->rx.itr_settings;
298361dc79ceSMichal Swiatkowski 		coalesce[i].intrl = q_vector->intrl;
29842ec56385SPaul M Stillwell Jr 
29852ec56385SPaul M Stillwell Jr 		if (i < vsi->num_txq)
29862ec56385SPaul M Stillwell Jr 			coalesce[i].tx_valid = true;
29872ec56385SPaul M Stillwell Jr 		if (i < vsi->num_rxq)
29882ec56385SPaul M Stillwell Jr 			coalesce[i].rx_valid = true;
298961dc79ceSMichal Swiatkowski 	}
299061dc79ceSMichal Swiatkowski 
299161dc79ceSMichal Swiatkowski 	return vsi->num_q_vectors;
299261dc79ceSMichal Swiatkowski }
299361dc79ceSMichal Swiatkowski 
299461dc79ceSMichal Swiatkowski /**
299561dc79ceSMichal Swiatkowski  * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
299661dc79ceSMichal Swiatkowski  * @vsi: VSI connected with q_vectors
299761dc79ceSMichal Swiatkowski  * @coalesce: pointer to array of struct with stored coalesce
299861dc79ceSMichal Swiatkowski  * @size: size of coalesce array
299961dc79ceSMichal Swiatkowski  *
300061dc79ceSMichal Swiatkowski  * Before this function, ice_vsi_rebuild_get_coalesce should be called to save
300161dc79ceSMichal Swiatkowski  * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce
300261dc79ceSMichal Swiatkowski  * to default value.
300361dc79ceSMichal Swiatkowski  */
300461dc79ceSMichal Swiatkowski static void
ice_vsi_rebuild_set_coalesce(struct ice_vsi * vsi,struct ice_coalesce_stored * coalesce,int size)300561dc79ceSMichal Swiatkowski ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
300661dc79ceSMichal Swiatkowski 			     struct ice_coalesce_stored *coalesce, int size)
300761dc79ceSMichal Swiatkowski {
3008b8b47723SJesse Brandeburg 	struct ice_ring_container *rc;
300961dc79ceSMichal Swiatkowski 	int i;
301061dc79ceSMichal Swiatkowski 
301161dc79ceSMichal Swiatkowski 	if ((size && !coalesce) || !vsi)
301261dc79ceSMichal Swiatkowski 		return;
301361dc79ceSMichal Swiatkowski 
30142ec56385SPaul M Stillwell Jr 	/* There are a couple of cases that have to be handled here:
30152ec56385SPaul M Stillwell Jr 	 *   1. The case where the number of queue vectors stays the same, but
30162ec56385SPaul M Stillwell Jr 	 *      the number of Tx or Rx rings changes (the first for loop)
30172ec56385SPaul M Stillwell Jr 	 *   2. The case where the number of queue vectors increased (the
30182ec56385SPaul M Stillwell Jr 	 *      second for loop)
3019a039f6fcSBrett Creeley 	 */
30202ec56385SPaul M Stillwell Jr 	for (i = 0; i < size && i < vsi->num_q_vectors; i++) {
30212ec56385SPaul M Stillwell Jr 		/* There are 2 cases to handle here and they are the same for
30222ec56385SPaul M Stillwell Jr 		 * both Tx and Rx:
30232ec56385SPaul M Stillwell Jr 		 *   if the entry was valid previously (coalesce[i].[tr]x_valid
30242ec56385SPaul M Stillwell Jr 		 *   and the loop variable is less than the number of rings
30252ec56385SPaul M Stillwell Jr 		 *   allocated, then write the previous values
30262ec56385SPaul M Stillwell Jr 		 *
30272ec56385SPaul M Stillwell Jr 		 *   if the entry was not valid previously, but the number of
30282ec56385SPaul M Stillwell Jr 		 *   rings is less than are allocated (this means the number of
30292ec56385SPaul M Stillwell Jr 		 *   rings increased from previously), then write out the
30302ec56385SPaul M Stillwell Jr 		 *   values in the first element
3031b8b47723SJesse Brandeburg 		 *
3032b8b47723SJesse Brandeburg 		 *   Also, always write the ITR, even if in ITR_IS_DYNAMIC
3033b8b47723SJesse Brandeburg 		 *   as there is no harm because the dynamic algorithm
3034b8b47723SJesse Brandeburg 		 *   will just overwrite.
30352ec56385SPaul M Stillwell Jr 		 */
3036b8b47723SJesse Brandeburg 		if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
3037b8b47723SJesse Brandeburg 			rc = &vsi->q_vectors[i]->rx;
3038bf13502eSMichal Wilczynski 			rc->itr_settings = coalesce[i].itr_rx;
3039b8b47723SJesse Brandeburg 			ice_write_itr(rc, rc->itr_setting);
3040b8b47723SJesse Brandeburg 		} else if (i < vsi->alloc_rxq) {
3041b8b47723SJesse Brandeburg 			rc = &vsi->q_vectors[i]->rx;
3042bf13502eSMichal Wilczynski 			rc->itr_settings = coalesce[0].itr_rx;
3043b8b47723SJesse Brandeburg 			ice_write_itr(rc, rc->itr_setting);
3044b8b47723SJesse Brandeburg 		}
30452ec56385SPaul M Stillwell Jr 
3046b8b47723SJesse Brandeburg 		if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
3047b8b47723SJesse Brandeburg 			rc = &vsi->q_vectors[i]->tx;
3048bf13502eSMichal Wilczynski 			rc->itr_settings = coalesce[i].itr_tx;
3049b8b47723SJesse Brandeburg 			ice_write_itr(rc, rc->itr_setting);
3050b8b47723SJesse Brandeburg 		} else if (i < vsi->alloc_txq) {
3051b8b47723SJesse Brandeburg 			rc = &vsi->q_vectors[i]->tx;
3052bf13502eSMichal Wilczynski 			rc->itr_settings = coalesce[0].itr_tx;
3053b8b47723SJesse Brandeburg 			ice_write_itr(rc, rc->itr_setting);
3054b8b47723SJesse Brandeburg 		}
30552ec56385SPaul M Stillwell Jr 
3056b8b47723SJesse Brandeburg 		vsi->q_vectors[i]->intrl = coalesce[i].intrl;
3057d16a4f45SJesse Brandeburg 		ice_set_q_vector_intrl(vsi->q_vectors[i]);
30582ec56385SPaul M Stillwell Jr 	}
30592ec56385SPaul M Stillwell Jr 
30602ec56385SPaul M Stillwell Jr 	/* the number of queue vectors increased so write whatever is in
30612ec56385SPaul M Stillwell Jr 	 * the first element
30622ec56385SPaul M Stillwell Jr 	 */
30632ec56385SPaul M Stillwell Jr 	for (; i < vsi->num_q_vectors; i++) {
3064b8b47723SJesse Brandeburg 		/* transmit */
3065b8b47723SJesse Brandeburg 		rc = &vsi->q_vectors[i]->tx;
3066bf13502eSMichal Wilczynski 		rc->itr_settings = coalesce[0].itr_tx;
3067b8b47723SJesse Brandeburg 		ice_write_itr(rc, rc->itr_setting);
3068b8b47723SJesse Brandeburg 
3069b8b47723SJesse Brandeburg 		/* receive */
3070b8b47723SJesse Brandeburg 		rc = &vsi->q_vectors[i]->rx;
3071bf13502eSMichal Wilczynski 		rc->itr_settings = coalesce[0].itr_rx;
3072b8b47723SJesse Brandeburg 		ice_write_itr(rc, rc->itr_setting);
3073b8b47723SJesse Brandeburg 
3074b8b47723SJesse Brandeburg 		vsi->q_vectors[i]->intrl = coalesce[0].intrl;
3075d16a4f45SJesse Brandeburg 		ice_set_q_vector_intrl(vsi->q_vectors[i]);
30762ec56385SPaul M Stillwell Jr 	}
307761dc79ceSMichal Swiatkowski }
307861dc79ceSMichal Swiatkowski 
307961dc79ceSMichal Swiatkowski /**
3080feddf6c0SMichal Swiatkowski  * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones
3081288ecf49SBenjamin Mikailenko  * @vsi: VSI pointer
3082288ecf49SBenjamin Mikailenko  */
3083feddf6c0SMichal Swiatkowski static int
ice_vsi_realloc_stat_arrays(struct ice_vsi * vsi)3084feddf6c0SMichal Swiatkowski ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
3085288ecf49SBenjamin Mikailenko {
3086feddf6c0SMichal Swiatkowski 	u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq;
3087feddf6c0SMichal Swiatkowski 	u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq;
3088feddf6c0SMichal Swiatkowski 	struct ice_ring_stats **tx_ring_stats;
3089feddf6c0SMichal Swiatkowski 	struct ice_ring_stats **rx_ring_stats;
3090288ecf49SBenjamin Mikailenko 	struct ice_vsi_stats *vsi_stat;
3091288ecf49SBenjamin Mikailenko 	struct ice_pf *pf = vsi->back;
3092feddf6c0SMichal Swiatkowski 	u16 prev_txq = vsi->alloc_txq;
3093feddf6c0SMichal Swiatkowski 	u16 prev_rxq = vsi->alloc_rxq;
3094288ecf49SBenjamin Mikailenko 	int i;
3095288ecf49SBenjamin Mikailenko 
3096288ecf49SBenjamin Mikailenko 	vsi_stat = pf->vsi_stats[vsi->idx];
3097288ecf49SBenjamin Mikailenko 
3098feddf6c0SMichal Swiatkowski 	if (req_txq < prev_txq) {
3099feddf6c0SMichal Swiatkowski 		for (i = req_txq; i < prev_txq; i++) {
3100288ecf49SBenjamin Mikailenko 			if (vsi_stat->tx_ring_stats[i]) {
3101288ecf49SBenjamin Mikailenko 				kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
3102288ecf49SBenjamin Mikailenko 				WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
3103288ecf49SBenjamin Mikailenko 			}
3104288ecf49SBenjamin Mikailenko 		}
3105288ecf49SBenjamin Mikailenko 	}
3106288ecf49SBenjamin Mikailenko 
3107ac522af8SJesse Brandeburg 	tx_ring_stats = vsi_stat->tx_ring_stats;
3108feddf6c0SMichal Swiatkowski 	vsi_stat->tx_ring_stats =
3109feddf6c0SMichal Swiatkowski 		krealloc_array(vsi_stat->tx_ring_stats, req_txq,
3110feddf6c0SMichal Swiatkowski 			       sizeof(*vsi_stat->tx_ring_stats),
3111feddf6c0SMichal Swiatkowski 			       GFP_KERNEL | __GFP_ZERO);
3112feddf6c0SMichal Swiatkowski 	if (!vsi_stat->tx_ring_stats) {
3113feddf6c0SMichal Swiatkowski 		vsi_stat->tx_ring_stats = tx_ring_stats;
3114feddf6c0SMichal Swiatkowski 		return -ENOMEM;
3115feddf6c0SMichal Swiatkowski 	}
3116feddf6c0SMichal Swiatkowski 
3117feddf6c0SMichal Swiatkowski 	if (req_rxq < prev_rxq) {
3118feddf6c0SMichal Swiatkowski 		for (i = req_rxq; i < prev_rxq; i++) {
3119288ecf49SBenjamin Mikailenko 			if (vsi_stat->rx_ring_stats[i]) {
3120288ecf49SBenjamin Mikailenko 				kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
3121288ecf49SBenjamin Mikailenko 				WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
3122288ecf49SBenjamin Mikailenko 			}
3123288ecf49SBenjamin Mikailenko 		}
3124288ecf49SBenjamin Mikailenko 	}
3125feddf6c0SMichal Swiatkowski 
3126feddf6c0SMichal Swiatkowski 	rx_ring_stats = vsi_stat->rx_ring_stats;
3127feddf6c0SMichal Swiatkowski 	vsi_stat->rx_ring_stats =
3128feddf6c0SMichal Swiatkowski 		krealloc_array(vsi_stat->rx_ring_stats, req_rxq,
3129feddf6c0SMichal Swiatkowski 			       sizeof(*vsi_stat->rx_ring_stats),
3130feddf6c0SMichal Swiatkowski 			       GFP_KERNEL | __GFP_ZERO);
3131feddf6c0SMichal Swiatkowski 	if (!vsi_stat->rx_ring_stats) {
3132feddf6c0SMichal Swiatkowski 		vsi_stat->rx_ring_stats = rx_ring_stats;
3133feddf6c0SMichal Swiatkowski 		return -ENOMEM;
3134feddf6c0SMichal Swiatkowski 	}
3135feddf6c0SMichal Swiatkowski 
3136feddf6c0SMichal Swiatkowski 	return 0;
3137288ecf49SBenjamin Mikailenko }
3138288ecf49SBenjamin Mikailenko 
3139288ecf49SBenjamin Mikailenko /**
3140df0f8479SAnirudh Venkataramanan  * ice_vsi_rebuild - Rebuild VSI after reset
3141df0f8479SAnirudh Venkataramanan  * @vsi: VSI to be rebuild
31425e509ab2SJacob Keller  * @vsi_flags: flags used for VSI rebuild flow
31435e509ab2SJacob Keller  *
31445e509ab2SJacob Keller  * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or
31455e509ab2SJacob Keller  * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware.
3146df0f8479SAnirudh Venkataramanan  *
3147df0f8479SAnirudh Venkataramanan  * Returns 0 on success and negative value on failure
3148df0f8479SAnirudh Venkataramanan  */
ice_vsi_rebuild(struct ice_vsi * vsi,u32 vsi_flags)31495e509ab2SJacob Keller int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
3150df0f8479SAnirudh Venkataramanan {
31515e509ab2SJacob Keller 	struct ice_vsi_cfg_params params = {};
315261dc79ceSMichal Swiatkowski 	struct ice_coalesce_stored *coalesce;
3153e40a02f0SJesse Brandeburg 	int prev_num_q_vectors;
3154c5a2a4a3SUsha Ketineni 	struct ice_pf *pf;
3155feddf6c0SMichal Swiatkowski 	int ret;
3156df0f8479SAnirudh Venkataramanan 
3157df0f8479SAnirudh Venkataramanan 	if (!vsi)
3158df0f8479SAnirudh Venkataramanan 		return -EINVAL;
3159df0f8479SAnirudh Venkataramanan 
31605e509ab2SJacob Keller 	params = ice_vsi_to_params(vsi);
31615e509ab2SJacob Keller 	params.flags = vsi_flags;
31625e509ab2SJacob Keller 
3163c5a2a4a3SUsha Ketineni 	pf = vsi->back;
31646624e780SMichal Swiatkowski 	if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
3165b03d519dSJacob Keller 		return -EINVAL;
31667eeac889SAkeem G Abodunrin 
3167feddf6c0SMichal Swiatkowski 	ret = ice_vsi_realloc_stat_arrays(vsi);
3168feddf6c0SMichal Swiatkowski 	if (ret)
3169feddf6c0SMichal Swiatkowski 		goto err_vsi_cfg;
3170288ecf49SBenjamin Mikailenko 
31716624e780SMichal Swiatkowski 	ice_vsi_decfg(vsi);
31725e509ab2SJacob Keller 	ret = ice_vsi_cfg_def(vsi, &params);
3173ff7e9321SBrett Creeley 	if (ret)
31746624e780SMichal Swiatkowski 		goto err_vsi_cfg;
31757eeac889SAkeem G Abodunrin 
3176e40a02f0SJesse Brandeburg 	coalesce = kcalloc(vsi->num_q_vectors,
3177e40a02f0SJesse Brandeburg 			   sizeof(struct ice_coalesce_stored), GFP_KERNEL);
3178e40a02f0SJesse Brandeburg 	if (!coalesce)
3179e40a02f0SJesse Brandeburg 		return -ENOMEM;
3180e40a02f0SJesse Brandeburg 
3181e40a02f0SJesse Brandeburg 	prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
3182e40a02f0SJesse Brandeburg 
31836624e780SMichal Swiatkowski 	ret = ice_vsi_cfg_tc_lan(pf, vsi);
31842ccc1c1cSTony Nguyen 	if (ret) {
31855e509ab2SJacob Keller 		if (vsi_flags & ICE_VSI_FLAG_INIT) {
318687324e74SHenry Tieman 			ret = -EIO;
31876624e780SMichal Swiatkowski 			goto err_vsi_cfg_tc_lan;
3188c4a9c8e7SMichal Swiatkowski 		}
3189c4a9c8e7SMichal Swiatkowski 
31900db66d20SMichal Swiatkowski 		kfree(coalesce);
319187324e74SHenry Tieman 		return ice_schedule_reset(pf, ICE_RESET_PFR);
319287324e74SHenry Tieman 	}
3193288ecf49SBenjamin Mikailenko 
319461dc79ceSMichal Swiatkowski 	ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
319561dc79ceSMichal Swiatkowski 	kfree(coalesce);
319661dc79ceSMichal Swiatkowski 
3197df0f8479SAnirudh Venkataramanan 	return 0;
3198df0f8479SAnirudh Venkataramanan 
31996624e780SMichal Swiatkowski err_vsi_cfg_tc_lan:
32006624e780SMichal Swiatkowski 	ice_vsi_decfg(vsi);
320161dc79ceSMichal Swiatkowski 	kfree(coalesce);
3202e40a02f0SJesse Brandeburg err_vsi_cfg:
3203df0f8479SAnirudh Venkataramanan 	return ret;
3204df0f8479SAnirudh Venkataramanan }
3205df0f8479SAnirudh Venkataramanan 
3206df0f8479SAnirudh Venkataramanan /**
32075df7e45dSDave Ertman  * ice_is_reset_in_progress - check for a reset in progress
32082f2da36eSAnirudh Venkataramanan  * @state: PF state field
32095153a18eSAnirudh Venkataramanan  */
ice_is_reset_in_progress(unsigned long * state)32105df7e45dSDave Ertman bool ice_is_reset_in_progress(unsigned long *state)
32115153a18eSAnirudh Venkataramanan {
32127e408e07SAnirudh Venkataramanan 	return test_bit(ICE_RESET_OICR_RECV, state) ||
32137e408e07SAnirudh Venkataramanan 	       test_bit(ICE_PFR_REQ, state) ||
32147e408e07SAnirudh Venkataramanan 	       test_bit(ICE_CORER_REQ, state) ||
32157e408e07SAnirudh Venkataramanan 	       test_bit(ICE_GLOBR_REQ, state);
32165153a18eSAnirudh Venkataramanan }
32177b9ffc76SAnirudh Venkataramanan 
32181c08052eSJacob Keller /**
32191c08052eSJacob Keller  * ice_wait_for_reset - Wait for driver to finish reset and rebuild
32201c08052eSJacob Keller  * @pf: pointer to the PF structure
32211c08052eSJacob Keller  * @timeout: length of time to wait, in jiffies
32221c08052eSJacob Keller  *
32231c08052eSJacob Keller  * Wait (sleep) for a short time until the driver finishes cleaning up from
32241c08052eSJacob Keller  * a device reset. The caller must be able to sleep. Use this to delay
32251c08052eSJacob Keller  * operations that could fail while the driver is cleaning up after a device
32261c08052eSJacob Keller  * reset.
32271c08052eSJacob Keller  *
32281c08052eSJacob Keller  * Returns 0 on success, -EBUSY if the reset is not finished within the
32291c08052eSJacob Keller  * timeout, and -ERESTARTSYS if the thread was interrupted.
32301c08052eSJacob Keller  */
ice_wait_for_reset(struct ice_pf * pf,unsigned long timeout)32311c08052eSJacob Keller int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout)
32321c08052eSJacob Keller {
32331c08052eSJacob Keller 	long ret;
32341c08052eSJacob Keller 
32351c08052eSJacob Keller 	ret = wait_event_interruptible_timeout(pf->reset_wait_queue,
32361c08052eSJacob Keller 					       !ice_is_reset_in_progress(pf->state),
32371c08052eSJacob Keller 					       timeout);
32381c08052eSJacob Keller 	if (ret < 0)
32391c08052eSJacob Keller 		return ret;
32401c08052eSJacob Keller 	else if (!ret)
32411c08052eSJacob Keller 		return -EBUSY;
32421c08052eSJacob Keller 	else
32431c08052eSJacob Keller 		return 0;
32441c08052eSJacob Keller }
32451c08052eSJacob Keller 
32467b9ffc76SAnirudh Venkataramanan /**
32477b9ffc76SAnirudh Venkataramanan  * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
32487b9ffc76SAnirudh Venkataramanan  * @vsi: VSI being configured
32497b9ffc76SAnirudh Venkataramanan  * @ctx: the context buffer returned from AQ VSI update command
32507b9ffc76SAnirudh Venkataramanan  */
ice_vsi_update_q_map(struct ice_vsi * vsi,struct ice_vsi_ctx * ctx)32517b9ffc76SAnirudh Venkataramanan static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
32527b9ffc76SAnirudh Venkataramanan {
32537b9ffc76SAnirudh Venkataramanan 	vsi->info.mapping_flags = ctx->info.mapping_flags;
32547b9ffc76SAnirudh Venkataramanan 	memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
32557b9ffc76SAnirudh Venkataramanan 	       sizeof(vsi->info.q_mapping));
32567b9ffc76SAnirudh Venkataramanan 	memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
32577b9ffc76SAnirudh Venkataramanan 	       sizeof(vsi->info.tc_mapping));
32587b9ffc76SAnirudh Venkataramanan }
32597b9ffc76SAnirudh Venkataramanan 
32607b9ffc76SAnirudh Venkataramanan /**
32610754d65bSKiran Patil  * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
32620754d65bSKiran Patil  * @vsi: the VSI being configured
32630754d65bSKiran Patil  * @ena_tc: TC map to be enabled
32640754d65bSKiran Patil  */
ice_vsi_cfg_netdev_tc(struct ice_vsi * vsi,u8 ena_tc)32650754d65bSKiran Patil void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
32660754d65bSKiran Patil {
32670754d65bSKiran Patil 	struct net_device *netdev = vsi->netdev;
32680754d65bSKiran Patil 	struct ice_pf *pf = vsi->back;
32690754d65bSKiran Patil 	int numtc = vsi->tc_cfg.numtc;
32700754d65bSKiran Patil 	struct ice_dcbx_cfg *dcbcfg;
32710754d65bSKiran Patil 	u8 netdev_tc;
32720754d65bSKiran Patil 	int i;
32730754d65bSKiran Patil 
32740754d65bSKiran Patil 	if (!netdev)
32750754d65bSKiran Patil 		return;
32760754d65bSKiran Patil 
32770754d65bSKiran Patil 	/* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */
32780754d65bSKiran Patil 	if (vsi->type == ICE_VSI_CHNL)
32790754d65bSKiran Patil 		return;
32800754d65bSKiran Patil 
32810754d65bSKiran Patil 	if (!ena_tc) {
32820754d65bSKiran Patil 		netdev_reset_tc(netdev);
32830754d65bSKiran Patil 		return;
32840754d65bSKiran Patil 	}
32850754d65bSKiran Patil 
32860754d65bSKiran Patil 	if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf))
32870754d65bSKiran Patil 		numtc = vsi->all_numtc;
32880754d65bSKiran Patil 
32890754d65bSKiran Patil 	if (netdev_set_num_tc(netdev, numtc))
32900754d65bSKiran Patil 		return;
32910754d65bSKiran Patil 
32920754d65bSKiran Patil 	dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
32930754d65bSKiran Patil 
32940754d65bSKiran Patil 	ice_for_each_traffic_class(i)
32950754d65bSKiran Patil 		if (vsi->tc_cfg.ena_tc & BIT(i))
32960754d65bSKiran Patil 			netdev_set_tc_queue(netdev,
32970754d65bSKiran Patil 					    vsi->tc_cfg.tc_info[i].netdev_tc,
32980754d65bSKiran Patil 					    vsi->tc_cfg.tc_info[i].qcount_tx,
32990754d65bSKiran Patil 					    vsi->tc_cfg.tc_info[i].qoffset);
33000754d65bSKiran Patil 	/* setup TC queue map for CHNL TCs */
33010754d65bSKiran Patil 	ice_for_each_chnl_tc(i) {
33020754d65bSKiran Patil 		if (!(vsi->all_enatc & BIT(i)))
33030754d65bSKiran Patil 			break;
33040754d65bSKiran Patil 		if (!vsi->mqprio_qopt.qopt.count[i])
33050754d65bSKiran Patil 			break;
33060754d65bSKiran Patil 		netdev_set_tc_queue(netdev, i,
33070754d65bSKiran Patil 				    vsi->mqprio_qopt.qopt.count[i],
33080754d65bSKiran Patil 				    vsi->mqprio_qopt.qopt.offset[i]);
33090754d65bSKiran Patil 	}
33100754d65bSKiran Patil 
33110754d65bSKiran Patil 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
33120754d65bSKiran Patil 		return;
33130754d65bSKiran Patil 
33140754d65bSKiran Patil 	for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
33150754d65bSKiran Patil 		u8 ets_tc = dcbcfg->etscfg.prio_table[i];
33160754d65bSKiran Patil 
33170754d65bSKiran Patil 		/* Get the mapped netdev TC# for the UP */
33180754d65bSKiran Patil 		netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
33190754d65bSKiran Patil 		netdev_set_prio_tc_map(netdev, i, netdev_tc);
33200754d65bSKiran Patil 	}
33210754d65bSKiran Patil }
33220754d65bSKiran Patil 
33230754d65bSKiran Patil /**
33240754d65bSKiran Patil  * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config
33250754d65bSKiran Patil  * @vsi: the VSI being configured,
33260754d65bSKiran Patil  * @ctxt: VSI context structure
33270754d65bSKiran Patil  * @ena_tc: number of traffic classes to enable
33280754d65bSKiran Patil  *
33290754d65bSKiran Patil  * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
33300754d65bSKiran Patil  */
3331a632b2a4SAnatolii Gerasymenko static int
ice_vsi_setup_q_map_mqprio(struct ice_vsi * vsi,struct ice_vsi_ctx * ctxt,u8 ena_tc)33320754d65bSKiran Patil ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
33330754d65bSKiran Patil 			   u8 ena_tc)
33340754d65bSKiran Patil {
33350754d65bSKiran Patil 	u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
33360754d65bSKiran Patil 	u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
33370754d65bSKiran Patil 	int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
3338a509702cSDing Hui 	u16 new_txq, new_rxq;
33390754d65bSKiran Patil 	u8 netdev_tc = 0;
33400754d65bSKiran Patil 	int i;
33410754d65bSKiran Patil 
33420754d65bSKiran Patil 	vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1;
33430754d65bSKiran Patil 
33440754d65bSKiran Patil 	pow = order_base_2(tc0_qcount);
33450754d65bSKiran Patil 	qmap = ((tc0_offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
33460754d65bSKiran Patil 		ICE_AQ_VSI_TC_Q_OFFSET_M) |
33470754d65bSKiran Patil 		((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M);
33480754d65bSKiran Patil 
33490754d65bSKiran Patil 	ice_for_each_traffic_class(i) {
33500754d65bSKiran Patil 		if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
33510754d65bSKiran Patil 			/* TC is not enabled */
33520754d65bSKiran Patil 			vsi->tc_cfg.tc_info[i].qoffset = 0;
33530754d65bSKiran Patil 			vsi->tc_cfg.tc_info[i].qcount_rx = 1;
33540754d65bSKiran Patil 			vsi->tc_cfg.tc_info[i].qcount_tx = 1;
33550754d65bSKiran Patil 			vsi->tc_cfg.tc_info[i].netdev_tc = 0;
33560754d65bSKiran Patil 			ctxt->info.tc_mapping[i] = 0;
33570754d65bSKiran Patil 			continue;
33580754d65bSKiran Patil 		}
33590754d65bSKiran Patil 
33600754d65bSKiran Patil 		offset = vsi->mqprio_qopt.qopt.offset[i];
33610754d65bSKiran Patil 		qcount_rx = vsi->mqprio_qopt.qopt.count[i];
33620754d65bSKiran Patil 		qcount_tx = vsi->mqprio_qopt.qopt.count[i];
33630754d65bSKiran Patil 		vsi->tc_cfg.tc_info[i].qoffset = offset;
33640754d65bSKiran Patil 		vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
33650754d65bSKiran Patil 		vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx;
33660754d65bSKiran Patil 		vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
33670754d65bSKiran Patil 	}
33680754d65bSKiran Patil 
33690754d65bSKiran Patil 	if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) {
33700754d65bSKiran Patil 		ice_for_each_chnl_tc(i) {
33710754d65bSKiran Patil 			if (!(vsi->all_enatc & BIT(i)))
33720754d65bSKiran Patil 				continue;
33730754d65bSKiran Patil 			offset = vsi->mqprio_qopt.qopt.offset[i];
33740754d65bSKiran Patil 			qcount_rx = vsi->mqprio_qopt.qopt.count[i];
33750754d65bSKiran Patil 			qcount_tx = vsi->mqprio_qopt.qopt.count[i];
33760754d65bSKiran Patil 		}
33770754d65bSKiran Patil 	}
33780754d65bSKiran Patil 
3379a509702cSDing Hui 	new_txq = offset + qcount_tx;
3380a509702cSDing Hui 	if (new_txq > vsi->alloc_txq) {
3381a632b2a4SAnatolii Gerasymenko 		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
3382a509702cSDing Hui 			new_txq, vsi->alloc_txq);
3383a632b2a4SAnatolii Gerasymenko 		return -EINVAL;
3384a632b2a4SAnatolii Gerasymenko 	}
3385a632b2a4SAnatolii Gerasymenko 
3386a509702cSDing Hui 	new_rxq = offset + qcount_rx;
3387a509702cSDing Hui 	if (new_rxq > vsi->alloc_rxq) {
3388a632b2a4SAnatolii Gerasymenko 		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
3389a509702cSDing Hui 			new_rxq, vsi->alloc_rxq);
3390a632b2a4SAnatolii Gerasymenko 		return -EINVAL;
3391a632b2a4SAnatolii Gerasymenko 	}
33920754d65bSKiran Patil 
3393a509702cSDing Hui 	/* Set actual Tx/Rx queue pairs */
3394a509702cSDing Hui 	vsi->num_txq = new_txq;
3395a509702cSDing Hui 	vsi->num_rxq = new_rxq;
3396a509702cSDing Hui 
33970754d65bSKiran Patil 	/* Setup queue TC[0].qmap for given VSI context */
33980754d65bSKiran Patil 	ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
33990754d65bSKiran Patil 	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
34000754d65bSKiran Patil 	ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount);
34010754d65bSKiran Patil 
34020754d65bSKiran Patil 	/* Find queue count available for channel VSIs and starting offset
34030754d65bSKiran Patil 	 * for channel VSIs
34040754d65bSKiran Patil 	 */
34050754d65bSKiran Patil 	if (tc0_qcount && tc0_qcount < vsi->num_rxq) {
34060754d65bSKiran Patil 		vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount;
34070754d65bSKiran Patil 		vsi->next_base_q = tc0_qcount;
34080754d65bSKiran Patil 	}
34090754d65bSKiran Patil 	dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n",  vsi->num_txq);
34100754d65bSKiran Patil 	dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n",  vsi->num_rxq);
34110754d65bSKiran Patil 	dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
34120754d65bSKiran Patil 		vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
3413a632b2a4SAnatolii Gerasymenko 
3414a632b2a4SAnatolii Gerasymenko 	return 0;
34150754d65bSKiran Patil }
34160754d65bSKiran Patil 
34170754d65bSKiran Patil /**
34187b9ffc76SAnirudh Venkataramanan  * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
34197b9ffc76SAnirudh Venkataramanan  * @vsi: VSI to be configured
34207b9ffc76SAnirudh Venkataramanan  * @ena_tc: TC bitmap
34217b9ffc76SAnirudh Venkataramanan  *
34227b9ffc76SAnirudh Venkataramanan  * VSI queues expected to be quiesced before calling this function
34237b9ffc76SAnirudh Venkataramanan  */
ice_vsi_cfg_tc(struct ice_vsi * vsi,u8 ena_tc)34247b9ffc76SAnirudh Venkataramanan int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
34257b9ffc76SAnirudh Venkataramanan {
34267b9ffc76SAnirudh Venkataramanan 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
34277b9ffc76SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
3428a509702cSDing Hui 	struct ice_tc_cfg old_tc_cfg;
34294ee656bbSTony Nguyen 	struct ice_vsi_ctx *ctx;
34304015d11eSBrett Creeley 	struct device *dev;
34317b9ffc76SAnirudh Venkataramanan 	int i, ret = 0;
34327b9ffc76SAnirudh Venkataramanan 	u8 num_tc = 0;
34337b9ffc76SAnirudh Venkataramanan 
34344015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
34350754d65bSKiran Patil 	if (vsi->tc_cfg.ena_tc == ena_tc &&
34360754d65bSKiran Patil 	    vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
3437c4a9c8e7SMichal Swiatkowski 		return 0;
34384015d11eSBrett Creeley 
34397b9ffc76SAnirudh Venkataramanan 	ice_for_each_traffic_class(i) {
34407b9ffc76SAnirudh Venkataramanan 		/* build bitmap of enabled TCs */
34417b9ffc76SAnirudh Venkataramanan 		if (ena_tc & BIT(i))
34427b9ffc76SAnirudh Venkataramanan 			num_tc++;
34437b9ffc76SAnirudh Venkataramanan 		/* populate max_txqs per TC */
3444d5a46359SAkeem G Abodunrin 		max_txqs[i] = vsi->alloc_txq;
34450754d65bSKiran Patil 		/* Update max_txqs if it is CHNL VSI, because alloc_t[r]xq are
34460754d65bSKiran Patil 		 * zero for CHNL VSI, hence use num_txq instead as max_txqs
34470754d65bSKiran Patil 		 */
34480754d65bSKiran Patil 		if (vsi->type == ICE_VSI_CHNL &&
34490754d65bSKiran Patil 		    test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
34500754d65bSKiran Patil 			max_txqs[i] = vsi->num_txq;
34517b9ffc76SAnirudh Venkataramanan 	}
34527b9ffc76SAnirudh Venkataramanan 
3453a509702cSDing Hui 	memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
34547b9ffc76SAnirudh Venkataramanan 	vsi->tc_cfg.ena_tc = ena_tc;
34557b9ffc76SAnirudh Venkataramanan 	vsi->tc_cfg.numtc = num_tc;
34567b9ffc76SAnirudh Venkataramanan 
34579efe35d0STony Nguyen 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
34587b9ffc76SAnirudh Venkataramanan 	if (!ctx)
34597b9ffc76SAnirudh Venkataramanan 		return -ENOMEM;
34607b9ffc76SAnirudh Venkataramanan 
34617b9ffc76SAnirudh Venkataramanan 	ctx->vf_num = 0;
34627b9ffc76SAnirudh Venkataramanan 	ctx->info = vsi->info;
34637b9ffc76SAnirudh Venkataramanan 
34640754d65bSKiran Patil 	if (vsi->type == ICE_VSI_PF &&
34650754d65bSKiran Patil 	    test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3466a632b2a4SAnatolii Gerasymenko 		ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
34670754d65bSKiran Patil 	else
3468a632b2a4SAnatolii Gerasymenko 		ret = ice_vsi_setup_q_map(vsi, ctx);
3469a632b2a4SAnatolii Gerasymenko 
3470a509702cSDing Hui 	if (ret) {
3471a509702cSDing Hui 		memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
3472a632b2a4SAnatolii Gerasymenko 		goto out;
3473a509702cSDing Hui 	}
34747b9ffc76SAnirudh Venkataramanan 
34757b9ffc76SAnirudh Venkataramanan 	/* must to indicate which section of VSI context are being modified */
34767b9ffc76SAnirudh Venkataramanan 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
34772ccc1c1cSTony Nguyen 	ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
34782ccc1c1cSTony Nguyen 	if (ret) {
34794015d11eSBrett Creeley 		dev_info(dev, "Failed VSI Update\n");
34807b9ffc76SAnirudh Venkataramanan 		goto out;
34817b9ffc76SAnirudh Venkataramanan 	}
34827b9ffc76SAnirudh Venkataramanan 
34830754d65bSKiran Patil 	if (vsi->type == ICE_VSI_PF &&
34840754d65bSKiran Patil 	    test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
34852ccc1c1cSTony Nguyen 		ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
34860754d65bSKiran Patil 	else
34872ccc1c1cSTony Nguyen 		ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
34880754d65bSKiran Patil 				      vsi->tc_cfg.ena_tc, max_txqs);
34897b9ffc76SAnirudh Venkataramanan 
34902ccc1c1cSTony Nguyen 	if (ret) {
34915f87ec48STony Nguyen 		dev_err(dev, "VSI %d failed TC config, error %d\n",
34922ccc1c1cSTony Nguyen 			vsi->vsi_num, ret);
34937b9ffc76SAnirudh Venkataramanan 		goto out;
34947b9ffc76SAnirudh Venkataramanan 	}
34957b9ffc76SAnirudh Venkataramanan 	ice_vsi_update_q_map(vsi, ctx);
34967b9ffc76SAnirudh Venkataramanan 	vsi->info.valid_sections = 0;
34977b9ffc76SAnirudh Venkataramanan 
34987b9ffc76SAnirudh Venkataramanan 	ice_vsi_cfg_netdev_tc(vsi, ena_tc);
34997b9ffc76SAnirudh Venkataramanan out:
35009efe35d0STony Nguyen 	kfree(ctx);
35017b9ffc76SAnirudh Venkataramanan 	return ret;
35027b9ffc76SAnirudh Venkataramanan }
3503bbb968e8SAkeem G Abodunrin 
3504bbb968e8SAkeem G Abodunrin /**
35052d4238f5SKrzysztof Kazimierczak  * ice_update_ring_stats - Update ring statistics
3506e72bba21SMaciej Fijalkowski  * @stats: stats to be updated
35072d4238f5SKrzysztof Kazimierczak  * @pkts: number of processed packets
35082d4238f5SKrzysztof Kazimierczak  * @bytes: number of processed bytes
35092d4238f5SKrzysztof Kazimierczak  *
35102d4238f5SKrzysztof Kazimierczak  * This function assumes that caller has acquired a u64_stats_sync lock.
35112d4238f5SKrzysztof Kazimierczak  */
ice_update_ring_stats(struct ice_q_stats * stats,u64 pkts,u64 bytes)3512e72bba21SMaciej Fijalkowski static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes)
35132d4238f5SKrzysztof Kazimierczak {
3514e72bba21SMaciej Fijalkowski 	stats->bytes += bytes;
3515e72bba21SMaciej Fijalkowski 	stats->pkts += pkts;
35162d4238f5SKrzysztof Kazimierczak }
35172d4238f5SKrzysztof Kazimierczak 
35182d4238f5SKrzysztof Kazimierczak /**
35192d4238f5SKrzysztof Kazimierczak  * ice_update_tx_ring_stats - Update Tx ring specific counters
35202d4238f5SKrzysztof Kazimierczak  * @tx_ring: ring to update
35212d4238f5SKrzysztof Kazimierczak  * @pkts: number of processed packets
35222d4238f5SKrzysztof Kazimierczak  * @bytes: number of processed bytes
35232d4238f5SKrzysztof Kazimierczak  */
ice_update_tx_ring_stats(struct ice_tx_ring * tx_ring,u64 pkts,u64 bytes)3524e72bba21SMaciej Fijalkowski void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
35252d4238f5SKrzysztof Kazimierczak {
3526288ecf49SBenjamin Mikailenko 	u64_stats_update_begin(&tx_ring->ring_stats->syncp);
3527288ecf49SBenjamin Mikailenko 	ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes);
3528288ecf49SBenjamin Mikailenko 	u64_stats_update_end(&tx_ring->ring_stats->syncp);
35292d4238f5SKrzysztof Kazimierczak }
35302d4238f5SKrzysztof Kazimierczak 
35312d4238f5SKrzysztof Kazimierczak /**
35322d4238f5SKrzysztof Kazimierczak  * ice_update_rx_ring_stats - Update Rx ring specific counters
35332d4238f5SKrzysztof Kazimierczak  * @rx_ring: ring to update
35342d4238f5SKrzysztof Kazimierczak  * @pkts: number of processed packets
35352d4238f5SKrzysztof Kazimierczak  * @bytes: number of processed bytes
35362d4238f5SKrzysztof Kazimierczak  */
ice_update_rx_ring_stats(struct ice_rx_ring * rx_ring,u64 pkts,u64 bytes)3537e72bba21SMaciej Fijalkowski void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
35382d4238f5SKrzysztof Kazimierczak {
3539288ecf49SBenjamin Mikailenko 	u64_stats_update_begin(&rx_ring->ring_stats->syncp);
3540288ecf49SBenjamin Mikailenko 	ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes);
3541288ecf49SBenjamin Mikailenko 	u64_stats_update_end(&rx_ring->ring_stats->syncp);
35422d4238f5SKrzysztof Kazimierczak }
35432d4238f5SKrzysztof Kazimierczak 
35442d4238f5SKrzysztof Kazimierczak /**
3545fc0f39bcSBrett Creeley  * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
3546d7393425SMichal Wilczynski  * @pi: port info of the switch with default VSI
3547fc0f39bcSBrett Creeley  *
3548d7393425SMichal Wilczynski  * Return true if the there is a single VSI in default forwarding VSI list
3549fc0f39bcSBrett Creeley  */
ice_is_dflt_vsi_in_use(struct ice_port_info * pi)3550d7393425SMichal Wilczynski bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi)
3551fc0f39bcSBrett Creeley {
3552d7393425SMichal Wilczynski 	bool exists = false;
3553d7393425SMichal Wilczynski 
3554d7393425SMichal Wilczynski 	ice_check_if_dflt_vsi(pi, 0, &exists);
3555d7393425SMichal Wilczynski 	return exists;
3556fc0f39bcSBrett Creeley }
3557fc0f39bcSBrett Creeley 
3558fc0f39bcSBrett Creeley /**
3559fc0f39bcSBrett Creeley  * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
3560fc0f39bcSBrett Creeley  * @vsi: VSI to compare against default forwarding VSI
3561fc0f39bcSBrett Creeley  *
3562fc0f39bcSBrett Creeley  * If this VSI passed in is the default forwarding VSI then return true, else
3563fc0f39bcSBrett Creeley  * return false
3564fc0f39bcSBrett Creeley  */
ice_is_vsi_dflt_vsi(struct ice_vsi * vsi)3565d7393425SMichal Wilczynski bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi)
3566fc0f39bcSBrett Creeley {
3567d7393425SMichal Wilczynski 	return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL);
3568fc0f39bcSBrett Creeley }
3569fc0f39bcSBrett Creeley 
3570fc0f39bcSBrett Creeley /**
3571fc0f39bcSBrett Creeley  * ice_set_dflt_vsi - set the default forwarding VSI
3572fc0f39bcSBrett Creeley  * @vsi: VSI getting set as the default forwarding VSI on the switch
3573fc0f39bcSBrett Creeley  *
3574fc0f39bcSBrett Creeley  * If the VSI passed in is already the default VSI and it's enabled just return
3575fc0f39bcSBrett Creeley  * success.
3576fc0f39bcSBrett Creeley  *
3577fc0f39bcSBrett Creeley  * Otherwise try to set the VSI passed in as the switch's default VSI and
3578fc0f39bcSBrett Creeley  * return the result.
3579fc0f39bcSBrett Creeley  */
ice_set_dflt_vsi(struct ice_vsi * vsi)3580d7393425SMichal Wilczynski int ice_set_dflt_vsi(struct ice_vsi *vsi)
3581fc0f39bcSBrett Creeley {
3582fc0f39bcSBrett Creeley 	struct device *dev;
35835518ac2aSTony Nguyen 	int status;
3584fc0f39bcSBrett Creeley 
3585d7393425SMichal Wilczynski 	if (!vsi)
3586fc0f39bcSBrett Creeley 		return -EINVAL;
3587fc0f39bcSBrett Creeley 
3588fc0f39bcSBrett Creeley 	dev = ice_pf_to_dev(vsi->back);
3589fc0f39bcSBrett Creeley 
3590776fe199SMichal Swiatkowski 	if (ice_lag_is_switchdev_running(vsi->back)) {
3591776fe199SMichal Swiatkowski 		dev_dbg(dev, "VSI %d passed is a part of LAG containing interfaces in switchdev mode, nothing to do\n",
3592776fe199SMichal Swiatkowski 			vsi->vsi_num);
3593776fe199SMichal Swiatkowski 		return 0;
3594776fe199SMichal Swiatkowski 	}
3595776fe199SMichal Swiatkowski 
3596fc0f39bcSBrett Creeley 	/* the VSI passed in is already the default VSI */
3597d7393425SMichal Wilczynski 	if (ice_is_vsi_dflt_vsi(vsi)) {
3598fc0f39bcSBrett Creeley 		dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
3599fc0f39bcSBrett Creeley 			vsi->vsi_num);
3600fc0f39bcSBrett Creeley 		return 0;
3601fc0f39bcSBrett Creeley 	}
3602fc0f39bcSBrett Creeley 
3603d7393425SMichal Wilczynski 	status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX);
3604fc0f39bcSBrett Creeley 	if (status) {
36055f87ec48STony Nguyen 		dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
36065f87ec48STony Nguyen 			vsi->vsi_num, status);
3607c1484691STony Nguyen 		return status;
3608fc0f39bcSBrett Creeley 	}
3609fc0f39bcSBrett Creeley 
3610fc0f39bcSBrett Creeley 	return 0;
3611fc0f39bcSBrett Creeley }
3612fc0f39bcSBrett Creeley 
3613fc0f39bcSBrett Creeley /**
3614fc0f39bcSBrett Creeley  * ice_clear_dflt_vsi - clear the default forwarding VSI
3615d7393425SMichal Wilczynski  * @vsi: VSI to remove from filter list
3616fc0f39bcSBrett Creeley  *
3617fc0f39bcSBrett Creeley  * If the switch has no default VSI or it's not enabled then return error.
3618fc0f39bcSBrett Creeley  *
3619fc0f39bcSBrett Creeley  * Otherwise try to clear the default VSI and return the result.
3620fc0f39bcSBrett Creeley  */
ice_clear_dflt_vsi(struct ice_vsi * vsi)3621d7393425SMichal Wilczynski int ice_clear_dflt_vsi(struct ice_vsi *vsi)
3622fc0f39bcSBrett Creeley {
3623fc0f39bcSBrett Creeley 	struct device *dev;
36245518ac2aSTony Nguyen 	int status;
3625fc0f39bcSBrett Creeley 
3626d7393425SMichal Wilczynski 	if (!vsi)
3627fc0f39bcSBrett Creeley 		return -EINVAL;
3628fc0f39bcSBrett Creeley 
3629d7393425SMichal Wilczynski 	dev = ice_pf_to_dev(vsi->back);
3630fc0f39bcSBrett Creeley 
3631fc0f39bcSBrett Creeley 	/* there is no default VSI configured */
3632d7393425SMichal Wilczynski 	if (!ice_is_dflt_vsi_in_use(vsi->port_info))
3633fc0f39bcSBrett Creeley 		return -ENODEV;
3634fc0f39bcSBrett Creeley 
3635d7393425SMichal Wilczynski 	status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false,
3636fc0f39bcSBrett Creeley 				  ICE_FLTR_RX);
3637fc0f39bcSBrett Creeley 	if (status) {
36385f87ec48STony Nguyen 		dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
3639d7393425SMichal Wilczynski 			vsi->vsi_num, status);
3640fc0f39bcSBrett Creeley 		return -EIO;
3641fc0f39bcSBrett Creeley 	}
3642fc0f39bcSBrett Creeley 
3643fc0f39bcSBrett Creeley 	return 0;
3644fc0f39bcSBrett Creeley }
3645d348d517SAnirudh Venkataramanan 
3646d348d517SAnirudh Venkataramanan /**
36474ecc8633SBrett Creeley  * ice_get_link_speed_mbps - get link speed in Mbps
36484ecc8633SBrett Creeley  * @vsi: the VSI whose link speed is being queried
36494ecc8633SBrett Creeley  *
36504ecc8633SBrett Creeley  * Return current VSI link speed and 0 if the speed is unknown.
36514ecc8633SBrett Creeley  */
ice_get_link_speed_mbps(struct ice_vsi * vsi)36524ecc8633SBrett Creeley int ice_get_link_speed_mbps(struct ice_vsi *vsi)
36534ecc8633SBrett Creeley {
36541d0e28a9SBrett Creeley 	unsigned int link_speed;
36551d0e28a9SBrett Creeley 
36561d0e28a9SBrett Creeley 	link_speed = vsi->port_info->phy.link_info.link_speed;
36571d0e28a9SBrett Creeley 
36581d0e28a9SBrett Creeley 	return (int)ice_get_link_speed(fls(link_speed) - 1);
36594ecc8633SBrett Creeley }
36604ecc8633SBrett Creeley 
36614ecc8633SBrett Creeley /**
36624ecc8633SBrett Creeley  * ice_get_link_speed_kbps - get link speed in Kbps
36634ecc8633SBrett Creeley  * @vsi: the VSI whose link speed is being queried
36644ecc8633SBrett Creeley  *
36654ecc8633SBrett Creeley  * Return current VSI link speed and 0 if the speed is unknown.
36664ecc8633SBrett Creeley  */
ice_get_link_speed_kbps(struct ice_vsi * vsi)3667fbc7b27aSKiran Patil int ice_get_link_speed_kbps(struct ice_vsi *vsi)
36684ecc8633SBrett Creeley {
36694ecc8633SBrett Creeley 	int speed_mbps;
36704ecc8633SBrett Creeley 
36714ecc8633SBrett Creeley 	speed_mbps = ice_get_link_speed_mbps(vsi);
36724ecc8633SBrett Creeley 
36734ecc8633SBrett Creeley 	return speed_mbps * 1000;
36744ecc8633SBrett Creeley }
36754ecc8633SBrett Creeley 
36764ecc8633SBrett Creeley /**
36774ecc8633SBrett Creeley  * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate
36784ecc8633SBrett Creeley  * @vsi: VSI to be configured
36794ecc8633SBrett Creeley  * @min_tx_rate: min Tx rate in Kbps to be configured as BW limit
36804ecc8633SBrett Creeley  *
36814ecc8633SBrett Creeley  * If the min_tx_rate is specified as 0 that means to clear the minimum BW limit
36824ecc8633SBrett Creeley  * profile, otherwise a non-zero value will force a minimum BW limit for the VSI
36834ecc8633SBrett Creeley  * on TC 0.
36844ecc8633SBrett Creeley  */
ice_set_min_bw_limit(struct ice_vsi * vsi,u64 min_tx_rate)36854ecc8633SBrett Creeley int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
36864ecc8633SBrett Creeley {
36874ecc8633SBrett Creeley 	struct ice_pf *pf = vsi->back;
36884ecc8633SBrett Creeley 	struct device *dev;
36895518ac2aSTony Nguyen 	int status;
36904ecc8633SBrett Creeley 	int speed;
36914ecc8633SBrett Creeley 
36924ecc8633SBrett Creeley 	dev = ice_pf_to_dev(pf);
36934ecc8633SBrett Creeley 	if (!vsi->port_info) {
36944ecc8633SBrett Creeley 		dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
36954ecc8633SBrett Creeley 			vsi->idx, vsi->type);
36964ecc8633SBrett Creeley 		return -EINVAL;
36974ecc8633SBrett Creeley 	}
36984ecc8633SBrett Creeley 
36994ecc8633SBrett Creeley 	speed = ice_get_link_speed_kbps(vsi);
37004ecc8633SBrett Creeley 	if (min_tx_rate > (u64)speed) {
37014ecc8633SBrett Creeley 		dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
37024ecc8633SBrett Creeley 			min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
37034ecc8633SBrett Creeley 			speed);
37044ecc8633SBrett Creeley 		return -EINVAL;
37054ecc8633SBrett Creeley 	}
37064ecc8633SBrett Creeley 
37074ecc8633SBrett Creeley 	/* Configure min BW for VSI limit */
37084ecc8633SBrett Creeley 	if (min_tx_rate) {
37094ecc8633SBrett Creeley 		status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
37104ecc8633SBrett Creeley 						   ICE_MIN_BW, min_tx_rate);
37114ecc8633SBrett Creeley 		if (status) {
37124ecc8633SBrett Creeley 			dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n",
37134ecc8633SBrett Creeley 				min_tx_rate, ice_vsi_type_str(vsi->type),
37144ecc8633SBrett Creeley 				vsi->idx);
3715c1484691STony Nguyen 			return status;
37164ecc8633SBrett Creeley 		}
37174ecc8633SBrett Creeley 
37184ecc8633SBrett Creeley 		dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n",
37194ecc8633SBrett Creeley 			min_tx_rate, ice_vsi_type_str(vsi->type));
37204ecc8633SBrett Creeley 	} else {
37214ecc8633SBrett Creeley 		status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
37224ecc8633SBrett Creeley 							vsi->idx, 0,
37234ecc8633SBrett Creeley 							ICE_MIN_BW);
37244ecc8633SBrett Creeley 		if (status) {
37254ecc8633SBrett Creeley 			dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n",
37264ecc8633SBrett Creeley 				ice_vsi_type_str(vsi->type), vsi->idx);
3727c1484691STony Nguyen 			return status;
37284ecc8633SBrett Creeley 		}
37294ecc8633SBrett Creeley 
37304ecc8633SBrett Creeley 		dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n",
37314ecc8633SBrett Creeley 			ice_vsi_type_str(vsi->type), vsi->idx);
37324ecc8633SBrett Creeley 	}
37334ecc8633SBrett Creeley 
37344ecc8633SBrett Creeley 	return 0;
37354ecc8633SBrett Creeley }
37364ecc8633SBrett Creeley 
37374ecc8633SBrett Creeley /**
37384ecc8633SBrett Creeley  * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate
37394ecc8633SBrett Creeley  * @vsi: VSI to be configured
37404ecc8633SBrett Creeley  * @max_tx_rate: max Tx rate in Kbps to be configured as BW limit
37414ecc8633SBrett Creeley  *
37424ecc8633SBrett Creeley  * If the max_tx_rate is specified as 0 that means to clear the maximum BW limit
37434ecc8633SBrett Creeley  * profile, otherwise a non-zero value will force a maximum BW limit for the VSI
37444ecc8633SBrett Creeley  * on TC 0.
37454ecc8633SBrett Creeley  */
ice_set_max_bw_limit(struct ice_vsi * vsi,u64 max_tx_rate)37464ecc8633SBrett Creeley int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
37474ecc8633SBrett Creeley {
37484ecc8633SBrett Creeley 	struct ice_pf *pf = vsi->back;
37494ecc8633SBrett Creeley 	struct device *dev;
37505518ac2aSTony Nguyen 	int status;
37514ecc8633SBrett Creeley 	int speed;
37524ecc8633SBrett Creeley 
37534ecc8633SBrett Creeley 	dev = ice_pf_to_dev(pf);
37544ecc8633SBrett Creeley 	if (!vsi->port_info) {
37554ecc8633SBrett Creeley 		dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
37564ecc8633SBrett Creeley 			vsi->idx, vsi->type);
37574ecc8633SBrett Creeley 		return -EINVAL;
37584ecc8633SBrett Creeley 	}
37594ecc8633SBrett Creeley 
37604ecc8633SBrett Creeley 	speed = ice_get_link_speed_kbps(vsi);
37614ecc8633SBrett Creeley 	if (max_tx_rate > (u64)speed) {
37624ecc8633SBrett Creeley 		dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
37634ecc8633SBrett Creeley 			max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
37644ecc8633SBrett Creeley 			speed);
37654ecc8633SBrett Creeley 		return -EINVAL;
37664ecc8633SBrett Creeley 	}
37674ecc8633SBrett Creeley 
37684ecc8633SBrett Creeley 	/* Configure max BW for VSI limit */
37694ecc8633SBrett Creeley 	if (max_tx_rate) {
37704ecc8633SBrett Creeley 		status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
37714ecc8633SBrett Creeley 						   ICE_MAX_BW, max_tx_rate);
37724ecc8633SBrett Creeley 		if (status) {
37734ecc8633SBrett Creeley 			dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n",
37744ecc8633SBrett Creeley 				max_tx_rate, ice_vsi_type_str(vsi->type),
37754ecc8633SBrett Creeley 				vsi->idx);
3776c1484691STony Nguyen 			return status;
37774ecc8633SBrett Creeley 		}
37784ecc8633SBrett Creeley 
37794ecc8633SBrett Creeley 		dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n",
37804ecc8633SBrett Creeley 			max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx);
37814ecc8633SBrett Creeley 	} else {
37824ecc8633SBrett Creeley 		status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
37834ecc8633SBrett Creeley 							vsi->idx, 0,
37844ecc8633SBrett Creeley 							ICE_MAX_BW);
37854ecc8633SBrett Creeley 		if (status) {
37864ecc8633SBrett Creeley 			dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n",
37874ecc8633SBrett Creeley 				ice_vsi_type_str(vsi->type), vsi->idx);
3788c1484691STony Nguyen 			return status;
37894ecc8633SBrett Creeley 		}
37904ecc8633SBrett Creeley 
37914ecc8633SBrett Creeley 		dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n",
37924ecc8633SBrett Creeley 			ice_vsi_type_str(vsi->type), vsi->idx);
37934ecc8633SBrett Creeley 	}
37944ecc8633SBrett Creeley 
37954ecc8633SBrett Creeley 	return 0;
37964ecc8633SBrett Creeley }
37974ecc8633SBrett Creeley 
37984ecc8633SBrett Creeley /**
3799d348d517SAnirudh Venkataramanan  * ice_set_link - turn on/off physical link
3800d348d517SAnirudh Venkataramanan  * @vsi: VSI to modify physical link on
3801d348d517SAnirudh Venkataramanan  * @ena: turn on/off physical link
3802d348d517SAnirudh Venkataramanan  */
ice_set_link(struct ice_vsi * vsi,bool ena)3803d348d517SAnirudh Venkataramanan int ice_set_link(struct ice_vsi *vsi, bool ena)
3804d348d517SAnirudh Venkataramanan {
3805d348d517SAnirudh Venkataramanan 	struct device *dev = ice_pf_to_dev(vsi->back);
3806d348d517SAnirudh Venkataramanan 	struct ice_port_info *pi = vsi->port_info;
3807d348d517SAnirudh Venkataramanan 	struct ice_hw *hw = pi->hw;
38085e24d598STony Nguyen 	int status;
3809d348d517SAnirudh Venkataramanan 
3810d348d517SAnirudh Venkataramanan 	if (vsi->type != ICE_VSI_PF)
3811d348d517SAnirudh Venkataramanan 		return -EINVAL;
3812d348d517SAnirudh Venkataramanan 
3813d348d517SAnirudh Venkataramanan 	status = ice_aq_set_link_restart_an(pi, ena, NULL);
3814d348d517SAnirudh Venkataramanan 
3815d348d517SAnirudh Venkataramanan 	/* if link is owned by manageability, FW will return ICE_AQ_RC_EMODE.
3816d348d517SAnirudh Venkataramanan 	 * this is not a fatal error, so print a warning message and return
3817d348d517SAnirudh Venkataramanan 	 * a success code. Return an error if FW returns an error code other
3818d348d517SAnirudh Venkataramanan 	 * than ICE_AQ_RC_EMODE
3819d348d517SAnirudh Venkataramanan 	 */
3820d54699e2STony Nguyen 	if (status == -EIO) {
3821d348d517SAnirudh Venkataramanan 		if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3822ad24d9ebSJonathan Toppins 			dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
38235f87ec48STony Nguyen 				(ena ? "ON" : "OFF"), status,
3824d348d517SAnirudh Venkataramanan 				ice_aq_str(hw->adminq.sq_last_status));
3825d348d517SAnirudh Venkataramanan 	} else if (status) {
38265f87ec48STony Nguyen 		dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
38275f87ec48STony Nguyen 			(ena ? "ON" : "OFF"), status,
3828d348d517SAnirudh Venkataramanan 			ice_aq_str(hw->adminq.sq_last_status));
3829c1484691STony Nguyen 		return status;
3830d348d517SAnirudh Venkataramanan 	}
3831d348d517SAnirudh Venkataramanan 
3832d348d517SAnirudh Venkataramanan 	return 0;
3833d348d517SAnirudh Venkataramanan }
383440b24760SAnirudh Venkataramanan 
383540b24760SAnirudh Venkataramanan /**
38363e0b5971SBrett Creeley  * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI
38373e0b5971SBrett Creeley  * @vsi: VSI used to add VLAN filters
3838c31af68aSBrett Creeley  *
3839c31af68aSBrett Creeley  * In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are based
3840c31af68aSBrett Creeley  * on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8) doesn't
3841c31af68aSBrett Creeley  * matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
3842c31af68aSBrett Creeley  * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
3843c31af68aSBrett Creeley  *
3844c31af68aSBrett Creeley  * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
3845c31af68aSBrett Creeley  * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
3846c31af68aSBrett Creeley  * traffic in SVM, since the VLAN TPID isn't part of filtering.
3847c31af68aSBrett Creeley  *
3848c31af68aSBrett Creeley  * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
3849c31af68aSBrett Creeley  * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
3850c31af68aSBrett Creeley  * part of filtering.
38513e0b5971SBrett Creeley  */
ice_vsi_add_vlan_zero(struct ice_vsi * vsi)38523e0b5971SBrett Creeley int ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
38533e0b5971SBrett Creeley {
3854c31af68aSBrett Creeley 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3855fb05ba12SBrett Creeley 	struct ice_vlan vlan;
3856c31af68aSBrett Creeley 	int err;
3857fb05ba12SBrett Creeley 
38582bfefa2dSBrett Creeley 	vlan = ICE_VLAN(0, 0, 0);
3859c31af68aSBrett Creeley 	err = vlan_ops->add_vlan(vsi, &vlan);
3860c31af68aSBrett Creeley 	if (err && err != -EEXIST)
3861c31af68aSBrett Creeley 		return err;
3862c31af68aSBrett Creeley 
3863c31af68aSBrett Creeley 	/* in SVM both VLAN 0 filters are identical */
3864c31af68aSBrett Creeley 	if (!ice_is_dvm_ena(&vsi->back->hw))
3865c31af68aSBrett Creeley 		return 0;
3866c31af68aSBrett Creeley 
3867c31af68aSBrett Creeley 	vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
3868c31af68aSBrett Creeley 	err = vlan_ops->add_vlan(vsi, &vlan);
3869c31af68aSBrett Creeley 	if (err && err != -EEXIST)
3870c31af68aSBrett Creeley 		return err;
3871c31af68aSBrett Creeley 
3872c31af68aSBrett Creeley 	return 0;
3873c31af68aSBrett Creeley }
3874c31af68aSBrett Creeley 
3875c31af68aSBrett Creeley /**
3876c31af68aSBrett Creeley  * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI
3877c31af68aSBrett Creeley  * @vsi: VSI used to add VLAN filters
3878c31af68aSBrett Creeley  *
3879c31af68aSBrett Creeley  * Delete the VLAN 0 filters in the same manner that they were added in
3880c31af68aSBrett Creeley  * ice_vsi_add_vlan_zero.
3881c31af68aSBrett Creeley  */
ice_vsi_del_vlan_zero(struct ice_vsi * vsi)3882c31af68aSBrett Creeley int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
3883c31af68aSBrett Creeley {
3884c31af68aSBrett Creeley 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3885c31af68aSBrett Creeley 	struct ice_vlan vlan;
3886c31af68aSBrett Creeley 	int err;
3887c31af68aSBrett Creeley 
3888c31af68aSBrett Creeley 	vlan = ICE_VLAN(0, 0, 0);
3889c31af68aSBrett Creeley 	err = vlan_ops->del_vlan(vsi, &vlan);
3890c31af68aSBrett Creeley 	if (err && err != -EEXIST)
3891c31af68aSBrett Creeley 		return err;
3892c31af68aSBrett Creeley 
3893c31af68aSBrett Creeley 	/* in SVM both VLAN 0 filters are identical */
3894c31af68aSBrett Creeley 	if (!ice_is_dvm_ena(&vsi->back->hw))
3895c31af68aSBrett Creeley 		return 0;
3896c31af68aSBrett Creeley 
3897c31af68aSBrett Creeley 	vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
3898c31af68aSBrett Creeley 	err = vlan_ops->del_vlan(vsi, &vlan);
3899c31af68aSBrett Creeley 	if (err && err != -EEXIST)
3900c31af68aSBrett Creeley 		return err;
3901c31af68aSBrett Creeley 
3902abddafd4SGrzegorz Siwik 	/* when deleting the last VLAN filter, make sure to disable the VLAN
3903abddafd4SGrzegorz Siwik 	 * promisc mode so the filter isn't left by accident
3904abddafd4SGrzegorz Siwik 	 */
3905abddafd4SGrzegorz Siwik 	return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3906abddafd4SGrzegorz Siwik 				    ICE_MCAST_VLAN_PROMISC_BITS, 0);
3907c31af68aSBrett Creeley }
3908c31af68aSBrett Creeley 
3909c31af68aSBrett Creeley /**
3910c31af68aSBrett Creeley  * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode
3911c31af68aSBrett Creeley  * @vsi: VSI used to get the VLAN mode
3912c31af68aSBrett Creeley  *
3913c31af68aSBrett Creeley  * If DVM is enabled then 2 VLAN 0 filters are added, else if SVM is enabled
3914c31af68aSBrett Creeley  * then 1 VLAN 0 filter is added. See ice_vsi_add_vlan_zero for more details.
3915c31af68aSBrett Creeley  */
ice_vsi_num_zero_vlans(struct ice_vsi * vsi)3916c31af68aSBrett Creeley static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi)
3917c31af68aSBrett Creeley {
3918c31af68aSBrett Creeley #define ICE_DVM_NUM_ZERO_VLAN_FLTRS	2
3919c31af68aSBrett Creeley #define ICE_SVM_NUM_ZERO_VLAN_FLTRS	1
3920c31af68aSBrett Creeley 	/* no VLAN 0 filter is created when a port VLAN is active */
3921b03d519dSJacob Keller 	if (vsi->type == ICE_VSI_VF) {
3922b03d519dSJacob Keller 		if (WARN_ON(!vsi->vf))
3923c31af68aSBrett Creeley 			return 0;
3924b03d519dSJacob Keller 
3925b03d519dSJacob Keller 		if (ice_vf_is_port_vlan_ena(vsi->vf))
3926b03d519dSJacob Keller 			return 0;
3927b03d519dSJacob Keller 	}
3928b03d519dSJacob Keller 
3929c31af68aSBrett Creeley 	if (ice_is_dvm_ena(&vsi->back->hw))
3930c31af68aSBrett Creeley 		return ICE_DVM_NUM_ZERO_VLAN_FLTRS;
3931c31af68aSBrett Creeley 	else
3932c31af68aSBrett Creeley 		return ICE_SVM_NUM_ZERO_VLAN_FLTRS;
3933c31af68aSBrett Creeley }
3934c31af68aSBrett Creeley 
3935c31af68aSBrett Creeley /**
3936c31af68aSBrett Creeley  * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs
3937c31af68aSBrett Creeley  * @vsi: VSI used to determine if any non-zero VLANs have been added
3938c31af68aSBrett Creeley  */
ice_vsi_has_non_zero_vlans(struct ice_vsi * vsi)3939c31af68aSBrett Creeley bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi)
3940c31af68aSBrett Creeley {
3941c31af68aSBrett Creeley 	return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi));
3942c31af68aSBrett Creeley }
3943c31af68aSBrett Creeley 
3944c31af68aSBrett Creeley /**
3945c31af68aSBrett Creeley  * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI
3946c31af68aSBrett Creeley  * @vsi: VSI used to get the number of non-zero VLANs added
3947c31af68aSBrett Creeley  */
ice_vsi_num_non_zero_vlans(struct ice_vsi * vsi)3948c31af68aSBrett Creeley u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi)
3949c31af68aSBrett Creeley {
3950c31af68aSBrett Creeley 	return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi));
39513e0b5971SBrett Creeley }
39523e0b5971SBrett Creeley 
39533e0b5971SBrett Creeley /**
395440b24760SAnirudh Venkataramanan  * ice_is_feature_supported
395540b24760SAnirudh Venkataramanan  * @pf: pointer to the struct ice_pf instance
395640b24760SAnirudh Venkataramanan  * @f: feature enum to be checked
395740b24760SAnirudh Venkataramanan  *
395840b24760SAnirudh Venkataramanan  * returns true if feature is supported, false otherwise
395940b24760SAnirudh Venkataramanan  */
ice_is_feature_supported(struct ice_pf * pf,enum ice_feature f)396040b24760SAnirudh Venkataramanan bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f)
396140b24760SAnirudh Venkataramanan {
396240b24760SAnirudh Venkataramanan 	if (f < 0 || f >= ICE_F_MAX)
396340b24760SAnirudh Venkataramanan 		return false;
396440b24760SAnirudh Venkataramanan 
396540b24760SAnirudh Venkataramanan 	return test_bit(f, pf->features);
396640b24760SAnirudh Venkataramanan }
396740b24760SAnirudh Venkataramanan 
396840b24760SAnirudh Venkataramanan /**
396940b24760SAnirudh Venkataramanan  * ice_set_feature_support
397040b24760SAnirudh Venkataramanan  * @pf: pointer to the struct ice_pf instance
397140b24760SAnirudh Venkataramanan  * @f: feature enum to set
397240b24760SAnirudh Venkataramanan  */
ice_set_feature_support(struct ice_pf * pf,enum ice_feature f)3973bb52f42aSDave Ertman void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
397440b24760SAnirudh Venkataramanan {
397540b24760SAnirudh Venkataramanan 	if (f < 0 || f >= ICE_F_MAX)
397640b24760SAnirudh Venkataramanan 		return;
397740b24760SAnirudh Venkataramanan 
397840b24760SAnirudh Venkataramanan 	set_bit(f, pf->features);
397940b24760SAnirudh Venkataramanan }
398040b24760SAnirudh Venkataramanan 
398140b24760SAnirudh Venkataramanan /**
3982325b2064SMaciej Machnikowski  * ice_clear_feature_support
3983325b2064SMaciej Machnikowski  * @pf: pointer to the struct ice_pf instance
3984325b2064SMaciej Machnikowski  * @f: feature enum to clear
3985325b2064SMaciej Machnikowski  */
ice_clear_feature_support(struct ice_pf * pf,enum ice_feature f)3986325b2064SMaciej Machnikowski void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f)
3987325b2064SMaciej Machnikowski {
3988325b2064SMaciej Machnikowski 	if (f < 0 || f >= ICE_F_MAX)
3989325b2064SMaciej Machnikowski 		return;
3990325b2064SMaciej Machnikowski 
3991325b2064SMaciej Machnikowski 	clear_bit(f, pf->features);
3992325b2064SMaciej Machnikowski }
3993325b2064SMaciej Machnikowski 
3994325b2064SMaciej Machnikowski /**
399540b24760SAnirudh Venkataramanan  * ice_init_feature_support
399640b24760SAnirudh Venkataramanan  * @pf: pointer to the struct ice_pf instance
399740b24760SAnirudh Venkataramanan  *
399840b24760SAnirudh Venkataramanan  * called during init to setup supported feature
399940b24760SAnirudh Venkataramanan  */
ice_init_feature_support(struct ice_pf * pf)400040b24760SAnirudh Venkataramanan void ice_init_feature_support(struct ice_pf *pf)
400140b24760SAnirudh Venkataramanan {
400240b24760SAnirudh Venkataramanan 	switch (pf->hw.device_id) {
400340b24760SAnirudh Venkataramanan 	case ICE_DEV_ID_E810C_BACKPLANE:
400440b24760SAnirudh Venkataramanan 	case ICE_DEV_ID_E810C_QSFP:
400540b24760SAnirudh Venkataramanan 	case ICE_DEV_ID_E810C_SFP:
400640b24760SAnirudh Venkataramanan 		ice_set_feature_support(pf, ICE_F_DSCP);
4007896a55aaSAnirudh Venkataramanan 		ice_set_feature_support(pf, ICE_F_PTP_EXTTS);
400843113ff7SKarol Kolacinski 		if (ice_is_e810t(&pf->hw)) {
4009325b2064SMaciej Machnikowski 			ice_set_feature_support(pf, ICE_F_SMA_CTRL);
401043113ff7SKarol Kolacinski 			if (ice_gnss_is_gps_present(&pf->hw))
401143113ff7SKarol Kolacinski 				ice_set_feature_support(pf, ICE_F_GNSS);
401243113ff7SKarol Kolacinski 		}
401340b24760SAnirudh Venkataramanan 		break;
401440b24760SAnirudh Venkataramanan 	default:
401540b24760SAnirudh Venkataramanan 		break;
401640b24760SAnirudh Venkataramanan 	}
401740b24760SAnirudh Venkataramanan }
4018ff5411efSMichal Swiatkowski 
4019ff5411efSMichal Swiatkowski /**
4020ff5411efSMichal Swiatkowski  * ice_vsi_update_security - update security block in VSI
4021ff5411efSMichal Swiatkowski  * @vsi: pointer to VSI structure
4022ff5411efSMichal Swiatkowski  * @fill: function pointer to fill ctx
4023ff5411efSMichal Swiatkowski  */
4024ff5411efSMichal Swiatkowski int
ice_vsi_update_security(struct ice_vsi * vsi,void (* fill)(struct ice_vsi_ctx *))4025ff5411efSMichal Swiatkowski ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
4026ff5411efSMichal Swiatkowski {
4027ff5411efSMichal Swiatkowski 	struct ice_vsi_ctx ctx = { 0 };
4028ff5411efSMichal Swiatkowski 
4029ff5411efSMichal Swiatkowski 	ctx.info = vsi->info;
4030ff5411efSMichal Swiatkowski 	ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
4031ff5411efSMichal Swiatkowski 	fill(&ctx);
4032ff5411efSMichal Swiatkowski 
4033ff5411efSMichal Swiatkowski 	if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
4034ff5411efSMichal Swiatkowski 		return -ENODEV;
4035ff5411efSMichal Swiatkowski 
4036ff5411efSMichal Swiatkowski 	vsi->info = ctx.info;
4037ff5411efSMichal Swiatkowski 	return 0;
4038ff5411efSMichal Swiatkowski }
4039ff5411efSMichal Swiatkowski 
4040ff5411efSMichal Swiatkowski /**
4041ff5411efSMichal Swiatkowski  * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx
4042ff5411efSMichal Swiatkowski  * @ctx: pointer to VSI ctx structure
4043ff5411efSMichal Swiatkowski  */
ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx * ctx)4044ff5411efSMichal Swiatkowski void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx)
4045ff5411efSMichal Swiatkowski {
4046ff5411efSMichal Swiatkowski 	ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
4047ff5411efSMichal Swiatkowski 			       (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4048ff5411efSMichal Swiatkowski 				ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4049ff5411efSMichal Swiatkowski }
4050ff5411efSMichal Swiatkowski 
4051ff5411efSMichal Swiatkowski /**
4052ff5411efSMichal Swiatkowski  * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx
4053ff5411efSMichal Swiatkowski  * @ctx: pointer to VSI ctx structure
4054ff5411efSMichal Swiatkowski  */
ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx * ctx)4055ff5411efSMichal Swiatkowski void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
4056ff5411efSMichal Swiatkowski {
4057ff5411efSMichal Swiatkowski 	ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF &
4058ff5411efSMichal Swiatkowski 			       ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4059ff5411efSMichal Swiatkowski 				 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4060ff5411efSMichal Swiatkowski }
4061ff5411efSMichal Swiatkowski 
4062ff5411efSMichal Swiatkowski /**
4063ff5411efSMichal Swiatkowski  * ice_vsi_ctx_set_allow_override - allow destination override on VSI
4064ff5411efSMichal Swiatkowski  * @ctx: pointer to VSI ctx structure
4065ff5411efSMichal Swiatkowski  */
ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx * ctx)4066ff5411efSMichal Swiatkowski void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
4067ff5411efSMichal Swiatkowski {
4068ff5411efSMichal Swiatkowski 	ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
4069ff5411efSMichal Swiatkowski }
4070ff5411efSMichal Swiatkowski 
4071ff5411efSMichal Swiatkowski /**
4072ff5411efSMichal Swiatkowski  * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
4073ff5411efSMichal Swiatkowski  * @ctx: pointer to VSI ctx structure
4074ff5411efSMichal Swiatkowski  */
ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx * ctx)4075ff5411efSMichal Swiatkowski void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
4076ff5411efSMichal Swiatkowski {
4077ff5411efSMichal Swiatkowski 	ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
4078ff5411efSMichal Swiatkowski }
40796c0f4441SWojciech Drewek 
40806c0f4441SWojciech Drewek /**
40816c0f4441SWojciech Drewek  * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
40826c0f4441SWojciech Drewek  * @vsi: pointer to VSI structure
40836c0f4441SWojciech Drewek  * @set: set or unset the bit
40846c0f4441SWojciech Drewek  */
40856c0f4441SWojciech Drewek int
ice_vsi_update_local_lb(struct ice_vsi * vsi,bool set)40866c0f4441SWojciech Drewek ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set)
40876c0f4441SWojciech Drewek {
40886c0f4441SWojciech Drewek 	struct ice_vsi_ctx ctx = {
40896c0f4441SWojciech Drewek 		.info	= vsi->info,
40906c0f4441SWojciech Drewek 	};
40916c0f4441SWojciech Drewek 
40926c0f4441SWojciech Drewek 	ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
40936c0f4441SWojciech Drewek 	if (set)
40946c0f4441SWojciech Drewek 		ctx.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
40956c0f4441SWojciech Drewek 	else
40966c0f4441SWojciech Drewek 		ctx.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
40976c0f4441SWojciech Drewek 
40986c0f4441SWojciech Drewek 	if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
40996c0f4441SWojciech Drewek 		return -ENODEV;
41006c0f4441SWojciech Drewek 
41016c0f4441SWojciech Drewek 	vsi->info = ctx.info;
41026c0f4441SWojciech Drewek 	return 0;
41036c0f4441SWojciech Drewek }
4104