145d3d428SAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0
245d3d428SAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */
345d3d428SAnirudh Venkataramanan 
445d3d428SAnirudh Venkataramanan #include "ice.h"
5eff380aaSAnirudh Venkataramanan #include "ice_base.h"
6c90ed40cSTony Nguyen #include "ice_flow.h"
745d3d428SAnirudh Venkataramanan #include "ice_lib.h"
81b8f15b6SMichal Swiatkowski #include "ice_fltr.h"
97b9ffc76SAnirudh Venkataramanan #include "ice_dcb_lib.h"
1048d40025SJacob Keller #include "ice_devlink.h"
11c31af68aSBrett Creeley #include "ice_vsi_vlan_ops.h"
1245d3d428SAnirudh Venkataramanan 
1345d3d428SAnirudh Venkataramanan /**
14964674f1SAnirudh Venkataramanan  * ice_vsi_type_str - maps VSI type enum to string equivalents
156dae8aa0SBruce Allan  * @vsi_type: VSI type enum
16964674f1SAnirudh Venkataramanan  */
176dae8aa0SBruce Allan const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
18964674f1SAnirudh Venkataramanan {
196dae8aa0SBruce Allan 	switch (vsi_type) {
20964674f1SAnirudh Venkataramanan 	case ICE_VSI_PF:
21964674f1SAnirudh Venkataramanan 		return "ICE_VSI_PF";
22964674f1SAnirudh Venkataramanan 	case ICE_VSI_VF:
23964674f1SAnirudh Venkataramanan 		return "ICE_VSI_VF";
24148beb61SHenry Tieman 	case ICE_VSI_CTRL:
25148beb61SHenry Tieman 		return "ICE_VSI_CTRL";
260754d65bSKiran Patil 	case ICE_VSI_CHNL:
270754d65bSKiran Patil 		return "ICE_VSI_CHNL";
28964674f1SAnirudh Venkataramanan 	case ICE_VSI_LB:
29964674f1SAnirudh Venkataramanan 		return "ICE_VSI_LB";
30f66756e0SGrzegorz Nitka 	case ICE_VSI_SWITCHDEV_CTRL:
31f66756e0SGrzegorz Nitka 		return "ICE_VSI_SWITCHDEV_CTRL";
32964674f1SAnirudh Venkataramanan 	default:
33964674f1SAnirudh Venkataramanan 		return "unknown";
34964674f1SAnirudh Venkataramanan 	}
35964674f1SAnirudh Venkataramanan }
36964674f1SAnirudh Venkataramanan 
37964674f1SAnirudh Venkataramanan /**
3813a6233bSBrett Creeley  * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
39d02f734cSMaciej Fijalkowski  * @vsi: the VSI being configured
40d02f734cSMaciej Fijalkowski  * @ena: start or stop the Rx rings
4113a6233bSBrett Creeley  *
4213a6233bSBrett Creeley  * First enable/disable all of the Rx rings, flush any remaining writes, and
4313a6233bSBrett Creeley  * then verify that they have all been enabled/disabled successfully. This will
4413a6233bSBrett Creeley  * let all of the register writes complete when enabling/disabling the Rx rings
4513a6233bSBrett Creeley  * before waiting for the change in hardware to complete.
46d02f734cSMaciej Fijalkowski  */
4713a6233bSBrett Creeley static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
48d02f734cSMaciej Fijalkowski {
4988865fc4SKarol Kolacinski 	int ret = 0;
5088865fc4SKarol Kolacinski 	u16 i;
51d02f734cSMaciej Fijalkowski 
522faf63b6SMaciej Fijalkowski 	ice_for_each_rxq(vsi, i)
5313a6233bSBrett Creeley 		ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
5413a6233bSBrett Creeley 
5513a6233bSBrett Creeley 	ice_flush(&vsi->back->hw);
5613a6233bSBrett Creeley 
572faf63b6SMaciej Fijalkowski 	ice_for_each_rxq(vsi, i) {
5813a6233bSBrett Creeley 		ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
59d02f734cSMaciej Fijalkowski 		if (ret)
60d02f734cSMaciej Fijalkowski 			break;
6172adf242SAnirudh Venkataramanan 	}
6272adf242SAnirudh Venkataramanan 
6372adf242SAnirudh Venkataramanan 	return ret;
6472adf242SAnirudh Venkataramanan }
6572adf242SAnirudh Venkataramanan 
6672adf242SAnirudh Venkataramanan /**
6728c2a645SAnirudh Venkataramanan  * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
6828c2a645SAnirudh Venkataramanan  * @vsi: VSI pointer
6928c2a645SAnirudh Venkataramanan  *
7028c2a645SAnirudh Venkataramanan  * On error: returns error code (negative)
7128c2a645SAnirudh Venkataramanan  * On success: returns 0
7228c2a645SAnirudh Venkataramanan  */
73a85a3847SBrett Creeley static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
7428c2a645SAnirudh Venkataramanan {
7528c2a645SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
764015d11eSBrett Creeley 	struct device *dev;
774015d11eSBrett Creeley 
784015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
790754d65bSKiran Patil 	if (vsi->type == ICE_VSI_CHNL)
800754d65bSKiran Patil 		return 0;
8128c2a645SAnirudh Venkataramanan 
8228c2a645SAnirudh Venkataramanan 	/* allocate memory for both Tx and Rx ring pointers */
834015d11eSBrett Creeley 	vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
84c6dfd690SBruce Allan 				     sizeof(*vsi->tx_rings), GFP_KERNEL);
8528c2a645SAnirudh Venkataramanan 	if (!vsi->tx_rings)
8678b5713aSAnirudh Venkataramanan 		return -ENOMEM;
8728c2a645SAnirudh Venkataramanan 
884015d11eSBrett Creeley 	vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq,
89c6dfd690SBruce Allan 				     sizeof(*vsi->rx_rings), GFP_KERNEL);
9028c2a645SAnirudh Venkataramanan 	if (!vsi->rx_rings)
9178b5713aSAnirudh Venkataramanan 		goto err_rings;
9278b5713aSAnirudh Venkataramanan 
93792b2086SMaciej Fijalkowski 	/* txq_map needs to have enough space to track both Tx (stack) rings
94792b2086SMaciej Fijalkowski 	 * and XDP rings; at this point vsi->num_xdp_txq might not be set,
95792b2086SMaciej Fijalkowski 	 * so use num_possible_cpus() as we want to always provide XDP ring
96792b2086SMaciej Fijalkowski 	 * per CPU, regardless of queue count settings from user that might
97792b2086SMaciej Fijalkowski 	 * have come from ethtool's set_channels() callback;
98792b2086SMaciej Fijalkowski 	 */
99792b2086SMaciej Fijalkowski 	vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()),
10078b5713aSAnirudh Venkataramanan 				    sizeof(*vsi->txq_map), GFP_KERNEL);
10178b5713aSAnirudh Venkataramanan 
10278b5713aSAnirudh Venkataramanan 	if (!vsi->txq_map)
10378b5713aSAnirudh Venkataramanan 		goto err_txq_map;
10478b5713aSAnirudh Venkataramanan 
1054015d11eSBrett Creeley 	vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq,
10678b5713aSAnirudh Venkataramanan 				    sizeof(*vsi->rxq_map), GFP_KERNEL);
10778b5713aSAnirudh Venkataramanan 	if (!vsi->rxq_map)
10878b5713aSAnirudh Venkataramanan 		goto err_rxq_map;
10978b5713aSAnirudh Venkataramanan 
1100e674aebSAnirudh Venkataramanan 	/* There is no need to allocate q_vectors for a loopback VSI. */
1110e674aebSAnirudh Venkataramanan 	if (vsi->type == ICE_VSI_LB)
1120e674aebSAnirudh Venkataramanan 		return 0;
1130e674aebSAnirudh Venkataramanan 
11428c2a645SAnirudh Venkataramanan 	/* allocate memory for q_vector pointers */
1154015d11eSBrett Creeley 	vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
116a85a3847SBrett Creeley 				      sizeof(*vsi->q_vectors), GFP_KERNEL);
11728c2a645SAnirudh Venkataramanan 	if (!vsi->q_vectors)
11828c2a645SAnirudh Venkataramanan 		goto err_vectors;
11928c2a645SAnirudh Venkataramanan 
120e102db78SMaciej Fijalkowski 	vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
121e102db78SMaciej Fijalkowski 	if (!vsi->af_xdp_zc_qps)
122e102db78SMaciej Fijalkowski 		goto err_zc_qps;
123e102db78SMaciej Fijalkowski 
12428c2a645SAnirudh Venkataramanan 	return 0;
12528c2a645SAnirudh Venkataramanan 
126e102db78SMaciej Fijalkowski err_zc_qps:
127e102db78SMaciej Fijalkowski 	devm_kfree(dev, vsi->q_vectors);
12828c2a645SAnirudh Venkataramanan err_vectors:
1294015d11eSBrett Creeley 	devm_kfree(dev, vsi->rxq_map);
13078b5713aSAnirudh Venkataramanan err_rxq_map:
1314015d11eSBrett Creeley 	devm_kfree(dev, vsi->txq_map);
13278b5713aSAnirudh Venkataramanan err_txq_map:
1334015d11eSBrett Creeley 	devm_kfree(dev, vsi->rx_rings);
13478b5713aSAnirudh Venkataramanan err_rings:
1354015d11eSBrett Creeley 	devm_kfree(dev, vsi->tx_rings);
13628c2a645SAnirudh Venkataramanan 	return -ENOMEM;
13728c2a645SAnirudh Venkataramanan }
13828c2a645SAnirudh Venkataramanan 
13928c2a645SAnirudh Venkataramanan /**
140ad71b256SBrett Creeley  * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
141ad71b256SBrett Creeley  * @vsi: the VSI being configured
142ad71b256SBrett Creeley  */
143ad71b256SBrett Creeley static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
144ad71b256SBrett Creeley {
145ad71b256SBrett Creeley 	switch (vsi->type) {
146ad71b256SBrett Creeley 	case ICE_VSI_PF:
147f66756e0SGrzegorz Nitka 	case ICE_VSI_SWITCHDEV_CTRL:
148148beb61SHenry Tieman 	case ICE_VSI_CTRL:
1490e674aebSAnirudh Venkataramanan 	case ICE_VSI_LB:
150a02016deSPaul M Stillwell Jr 		/* a user could change the values of num_[tr]x_desc using
151a02016deSPaul M Stillwell Jr 		 * ethtool -G so we should keep those values instead of
152a02016deSPaul M Stillwell Jr 		 * overwriting them with the defaults.
153a02016deSPaul M Stillwell Jr 		 */
154a02016deSPaul M Stillwell Jr 		if (!vsi->num_rx_desc)
155ad71b256SBrett Creeley 			vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
156a02016deSPaul M Stillwell Jr 		if (!vsi->num_tx_desc)
157ad71b256SBrett Creeley 			vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
158ad71b256SBrett Creeley 		break;
159ad71b256SBrett Creeley 	default:
16019cce2c6SAnirudh Venkataramanan 		dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
161ad71b256SBrett Creeley 			vsi->type);
162ad71b256SBrett Creeley 		break;
163ad71b256SBrett Creeley 	}
164ad71b256SBrett Creeley }
165ad71b256SBrett Creeley 
166ad71b256SBrett Creeley /**
167ad71b256SBrett Creeley  * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
16828c2a645SAnirudh Venkataramanan  * @vsi: the VSI being configured
16928c2a645SAnirudh Venkataramanan  *
17028c2a645SAnirudh Venkataramanan  * Return 0 on success and a negative value on error
17128c2a645SAnirudh Venkataramanan  */
172157acda5SJacob Keller static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
17328c2a645SAnirudh Venkataramanan {
174b03d519dSJacob Keller 	enum ice_vsi_type vsi_type = vsi->type;
17528c2a645SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
176157acda5SJacob Keller 	struct ice_vf *vf = vsi->vf;
1775743020dSAkeem G Abodunrin 
178b03d519dSJacob Keller 	if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
179b03d519dSJacob Keller 		return;
1805743020dSAkeem G Abodunrin 
181b03d519dSJacob Keller 	switch (vsi_type) {
18228c2a645SAnirudh Venkataramanan 	case ICE_VSI_PF:
18387324e74SHenry Tieman 		if (vsi->req_txq) {
18487324e74SHenry Tieman 			vsi->alloc_txq = vsi->req_txq;
18587324e74SHenry Tieman 			vsi->num_txq = vsi->req_txq;
186b38b7f2bSSalil Mehta 		} else {
187b38b7f2bSSalil Mehta 			vsi->alloc_txq = min3(pf->num_lan_msix,
188b38b7f2bSSalil Mehta 					      ice_get_avail_txq_count(pf),
189b38b7f2bSSalil Mehta 					      (u16)num_online_cpus());
19087324e74SHenry Tieman 		}
1918c243700SAnirudh Venkataramanan 
1928c243700SAnirudh Venkataramanan 		pf->num_lan_tx = vsi->alloc_txq;
1938c243700SAnirudh Venkataramanan 
1948c243700SAnirudh Venkataramanan 		/* only 1 Rx queue unless RSS is enabled */
19587324e74SHenry Tieman 		if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
1968c243700SAnirudh Venkataramanan 			vsi->alloc_rxq = 1;
19787324e74SHenry Tieman 		} else {
19887324e74SHenry Tieman 			if (vsi->req_rxq) {
19987324e74SHenry Tieman 				vsi->alloc_rxq = vsi->req_rxq;
20087324e74SHenry Tieman 				vsi->num_rxq = vsi->req_rxq;
201b38b7f2bSSalil Mehta 			} else {
202b38b7f2bSSalil Mehta 				vsi->alloc_rxq = min3(pf->num_lan_msix,
203b38b7f2bSSalil Mehta 						      ice_get_avail_rxq_count(pf),
204b38b7f2bSSalil Mehta 						      (u16)num_online_cpus());
20587324e74SHenry Tieman 			}
20687324e74SHenry Tieman 		}
2078c243700SAnirudh Venkataramanan 
2088c243700SAnirudh Venkataramanan 		pf->num_lan_rx = vsi->alloc_rxq;
2098c243700SAnirudh Venkataramanan 
210f3fe97f6SBrett Creeley 		vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
211f3fe97f6SBrett Creeley 					   max_t(int, vsi->alloc_rxq,
212f3fe97f6SBrett Creeley 						 vsi->alloc_txq));
21328c2a645SAnirudh Venkataramanan 		break;
214f66756e0SGrzegorz Nitka 	case ICE_VSI_SWITCHDEV_CTRL:
215f66756e0SGrzegorz Nitka 		/* The number of queues for ctrl VSI is equal to number of VFs.
216f66756e0SGrzegorz Nitka 		 * Each ring is associated to the corresponding VF_PR netdev.
217f66756e0SGrzegorz Nitka 		 */
218fb916db1SJacob Keller 		vsi->alloc_txq = ice_get_num_vfs(pf);
219fb916db1SJacob Keller 		vsi->alloc_rxq = vsi->alloc_txq;
220f66756e0SGrzegorz Nitka 		vsi->num_q_vectors = 1;
221f66756e0SGrzegorz Nitka 		break;
2228ede0178SAnirudh Venkataramanan 	case ICE_VSI_VF:
223f0457690SBrett Creeley 		if (vf->num_req_qs)
224f0457690SBrett Creeley 			vf->num_vf_qs = vf->num_req_qs;
2255743020dSAkeem G Abodunrin 		vsi->alloc_txq = vf->num_vf_qs;
2265743020dSAkeem G Abodunrin 		vsi->alloc_rxq = vf->num_vf_qs;
227000773c0SJacob Keller 		/* pf->vfs.num_msix_per includes (VF miscellaneous vector +
2288ede0178SAnirudh Venkataramanan 		 * data queue interrupts). Since vsi->num_q_vectors is number
229047e52c0SAnirudh Venkataramanan 		 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
230047e52c0SAnirudh Venkataramanan 		 * original vector count
2318ede0178SAnirudh Venkataramanan 		 */
232000773c0SJacob Keller 		vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF;
2338ede0178SAnirudh Venkataramanan 		break;
234148beb61SHenry Tieman 	case ICE_VSI_CTRL:
235148beb61SHenry Tieman 		vsi->alloc_txq = 1;
236148beb61SHenry Tieman 		vsi->alloc_rxq = 1;
237148beb61SHenry Tieman 		vsi->num_q_vectors = 1;
238148beb61SHenry Tieman 		break;
2390754d65bSKiran Patil 	case ICE_VSI_CHNL:
2400754d65bSKiran Patil 		vsi->alloc_txq = 0;
2410754d65bSKiran Patil 		vsi->alloc_rxq = 0;
2420754d65bSKiran Patil 		break;
2430e674aebSAnirudh Venkataramanan 	case ICE_VSI_LB:
2440e674aebSAnirudh Venkataramanan 		vsi->alloc_txq = 1;
2450e674aebSAnirudh Venkataramanan 		vsi->alloc_rxq = 1;
2460e674aebSAnirudh Venkataramanan 		break;
24728c2a645SAnirudh Venkataramanan 	default:
248b03d519dSJacob Keller 		dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type);
24928c2a645SAnirudh Venkataramanan 		break;
25028c2a645SAnirudh Venkataramanan 	}
251ad71b256SBrett Creeley 
252ad71b256SBrett Creeley 	ice_vsi_set_num_desc(vsi);
25328c2a645SAnirudh Venkataramanan }
25428c2a645SAnirudh Venkataramanan 
25528c2a645SAnirudh Venkataramanan /**
25628c2a645SAnirudh Venkataramanan  * ice_get_free_slot - get the next non-NULL location index in array
25728c2a645SAnirudh Venkataramanan  * @array: array to search
25828c2a645SAnirudh Venkataramanan  * @size: size of the array
25928c2a645SAnirudh Venkataramanan  * @curr: last known occupied index to be used as a search hint
26028c2a645SAnirudh Venkataramanan  *
26128c2a645SAnirudh Venkataramanan  * void * is being used to keep the functionality generic. This lets us use this
26228c2a645SAnirudh Venkataramanan  * function on any array of pointers.
26328c2a645SAnirudh Venkataramanan  */
26437bb8390SAnirudh Venkataramanan static int ice_get_free_slot(void *array, int size, int curr)
26528c2a645SAnirudh Venkataramanan {
26628c2a645SAnirudh Venkataramanan 	int **tmp_array = (int **)array;
26728c2a645SAnirudh Venkataramanan 	int next;
26828c2a645SAnirudh Venkataramanan 
26928c2a645SAnirudh Venkataramanan 	if (curr < (size - 1) && !tmp_array[curr + 1]) {
27028c2a645SAnirudh Venkataramanan 		next = curr + 1;
27128c2a645SAnirudh Venkataramanan 	} else {
27228c2a645SAnirudh Venkataramanan 		int i = 0;
27328c2a645SAnirudh Venkataramanan 
27428c2a645SAnirudh Venkataramanan 		while ((i < size) && (tmp_array[i]))
27528c2a645SAnirudh Venkataramanan 			i++;
27628c2a645SAnirudh Venkataramanan 		if (i == size)
27728c2a645SAnirudh Venkataramanan 			next = ICE_NO_VSI;
27828c2a645SAnirudh Venkataramanan 		else
27928c2a645SAnirudh Venkataramanan 			next = i;
28028c2a645SAnirudh Venkataramanan 	}
28128c2a645SAnirudh Venkataramanan 	return next;
28228c2a645SAnirudh Venkataramanan }
28328c2a645SAnirudh Venkataramanan 
28428c2a645SAnirudh Venkataramanan /**
285227bf450SMichal Swiatkowski  * ice_vsi_delete_from_hw - delete a VSI from the switch
2865153a18eSAnirudh Venkataramanan  * @vsi: pointer to VSI being removed
2875153a18eSAnirudh Venkataramanan  */
288227bf450SMichal Swiatkowski static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)
2895153a18eSAnirudh Venkataramanan {
2905153a18eSAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
291198a666aSBruce Allan 	struct ice_vsi_ctx *ctxt;
2925e24d598STony Nguyen 	int status;
2935153a18eSAnirudh Venkataramanan 
2947d46c0e6SMichal Swiatkowski 	ice_fltr_remove_all(vsi);
2959efe35d0STony Nguyen 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
296198a666aSBruce Allan 	if (!ctxt)
297198a666aSBruce Allan 		return;
298198a666aSBruce Allan 
2998ede0178SAnirudh Venkataramanan 	if (vsi->type == ICE_VSI_VF)
300b03d519dSJacob Keller 		ctxt->vf_num = vsi->vf->vf_id;
301198a666aSBruce Allan 	ctxt->vsi_num = vsi->vsi_num;
3025153a18eSAnirudh Venkataramanan 
303198a666aSBruce Allan 	memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
3045153a18eSAnirudh Venkataramanan 
305198a666aSBruce Allan 	status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
3065153a18eSAnirudh Venkataramanan 	if (status)
3075f87ec48STony Nguyen 		dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
3085f87ec48STony Nguyen 			vsi->vsi_num, status);
309198a666aSBruce Allan 
3109efe35d0STony Nguyen 	kfree(ctxt);
3115153a18eSAnirudh Venkataramanan }
3125153a18eSAnirudh Venkataramanan 
3135153a18eSAnirudh Venkataramanan /**
314a85a3847SBrett Creeley  * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
31507309a0eSAnirudh Venkataramanan  * @vsi: pointer to VSI being cleared
31607309a0eSAnirudh Venkataramanan  */
317a85a3847SBrett Creeley static void ice_vsi_free_arrays(struct ice_vsi *vsi)
31807309a0eSAnirudh Venkataramanan {
31907309a0eSAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
3204015d11eSBrett Creeley 	struct device *dev;
3214015d11eSBrett Creeley 
3224015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
32307309a0eSAnirudh Venkataramanan 
324e102db78SMaciej Fijalkowski 	bitmap_free(vsi->af_xdp_zc_qps);
325e102db78SMaciej Fijalkowski 	vsi->af_xdp_zc_qps = NULL;
32607309a0eSAnirudh Venkataramanan 	/* free the ring and vector containers */
3274015d11eSBrett Creeley 	devm_kfree(dev, vsi->q_vectors);
32807309a0eSAnirudh Venkataramanan 	vsi->q_vectors = NULL;
3294015d11eSBrett Creeley 	devm_kfree(dev, vsi->tx_rings);
33007309a0eSAnirudh Venkataramanan 	vsi->tx_rings = NULL;
3314015d11eSBrett Creeley 	devm_kfree(dev, vsi->rx_rings);
33207309a0eSAnirudh Venkataramanan 	vsi->rx_rings = NULL;
3334015d11eSBrett Creeley 	devm_kfree(dev, vsi->txq_map);
33478b5713aSAnirudh Venkataramanan 	vsi->txq_map = NULL;
3354015d11eSBrett Creeley 	devm_kfree(dev, vsi->rxq_map);
33678b5713aSAnirudh Venkataramanan 	vsi->rxq_map = NULL;
33778b5713aSAnirudh Venkataramanan }
33807309a0eSAnirudh Venkataramanan 
33907309a0eSAnirudh Venkataramanan /**
3406624e780SMichal Swiatkowski  * ice_vsi_free_stats - Free the ring statistics structures
3416624e780SMichal Swiatkowski  * @vsi: VSI pointer
3426624e780SMichal Swiatkowski  */
3436624e780SMichal Swiatkowski static void ice_vsi_free_stats(struct ice_vsi *vsi)
3446624e780SMichal Swiatkowski {
3456624e780SMichal Swiatkowski 	struct ice_vsi_stats *vsi_stat;
3466624e780SMichal Swiatkowski 	struct ice_pf *pf = vsi->back;
3476624e780SMichal Swiatkowski 	int i;
3486624e780SMichal Swiatkowski 
3496624e780SMichal Swiatkowski 	if (vsi->type == ICE_VSI_CHNL)
3506624e780SMichal Swiatkowski 		return;
3516624e780SMichal Swiatkowski 	if (!pf->vsi_stats)
3526624e780SMichal Swiatkowski 		return;
3536624e780SMichal Swiatkowski 
3546624e780SMichal Swiatkowski 	vsi_stat = pf->vsi_stats[vsi->idx];
3556624e780SMichal Swiatkowski 	if (!vsi_stat)
3566624e780SMichal Swiatkowski 		return;
3576624e780SMichal Swiatkowski 
3586624e780SMichal Swiatkowski 	ice_for_each_alloc_txq(vsi, i) {
3596624e780SMichal Swiatkowski 		if (vsi_stat->tx_ring_stats[i]) {
3606624e780SMichal Swiatkowski 			kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
3616624e780SMichal Swiatkowski 			WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
3626624e780SMichal Swiatkowski 		}
3636624e780SMichal Swiatkowski 	}
3646624e780SMichal Swiatkowski 
3656624e780SMichal Swiatkowski 	ice_for_each_alloc_rxq(vsi, i) {
3666624e780SMichal Swiatkowski 		if (vsi_stat->rx_ring_stats[i]) {
3676624e780SMichal Swiatkowski 			kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
3686624e780SMichal Swiatkowski 			WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
3696624e780SMichal Swiatkowski 		}
3706624e780SMichal Swiatkowski 	}
3716624e780SMichal Swiatkowski 
3726624e780SMichal Swiatkowski 	kfree(vsi_stat->tx_ring_stats);
3736624e780SMichal Swiatkowski 	kfree(vsi_stat->rx_ring_stats);
3746624e780SMichal Swiatkowski 	kfree(vsi_stat);
3756624e780SMichal Swiatkowski 	pf->vsi_stats[vsi->idx] = NULL;
3766624e780SMichal Swiatkowski }
3776624e780SMichal Swiatkowski 
3786624e780SMichal Swiatkowski /**
3796624e780SMichal Swiatkowski  * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
3806624e780SMichal Swiatkowski  * @vsi: VSI which is having stats allocated
3816624e780SMichal Swiatkowski  */
3826624e780SMichal Swiatkowski static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
3836624e780SMichal Swiatkowski {
3846624e780SMichal Swiatkowski 	struct ice_ring_stats **tx_ring_stats;
3856624e780SMichal Swiatkowski 	struct ice_ring_stats **rx_ring_stats;
3866624e780SMichal Swiatkowski 	struct ice_vsi_stats *vsi_stats;
3876624e780SMichal Swiatkowski 	struct ice_pf *pf = vsi->back;
3886624e780SMichal Swiatkowski 	u16 i;
3896624e780SMichal Swiatkowski 
3906624e780SMichal Swiatkowski 	vsi_stats = pf->vsi_stats[vsi->idx];
3916624e780SMichal Swiatkowski 	tx_ring_stats = vsi_stats->tx_ring_stats;
3926624e780SMichal Swiatkowski 	rx_ring_stats = vsi_stats->rx_ring_stats;
3936624e780SMichal Swiatkowski 
3946624e780SMichal Swiatkowski 	/* Allocate Tx ring stats */
3956624e780SMichal Swiatkowski 	ice_for_each_alloc_txq(vsi, i) {
3966624e780SMichal Swiatkowski 		struct ice_ring_stats *ring_stats;
3976624e780SMichal Swiatkowski 		struct ice_tx_ring *ring;
3986624e780SMichal Swiatkowski 
3996624e780SMichal Swiatkowski 		ring = vsi->tx_rings[i];
4006624e780SMichal Swiatkowski 		ring_stats = tx_ring_stats[i];
4016624e780SMichal Swiatkowski 
4026624e780SMichal Swiatkowski 		if (!ring_stats) {
4036624e780SMichal Swiatkowski 			ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
4046624e780SMichal Swiatkowski 			if (!ring_stats)
4056624e780SMichal Swiatkowski 				goto err_out;
4066624e780SMichal Swiatkowski 
4076624e780SMichal Swiatkowski 			WRITE_ONCE(tx_ring_stats[i], ring_stats);
4086624e780SMichal Swiatkowski 		}
4096624e780SMichal Swiatkowski 
4106624e780SMichal Swiatkowski 		ring->ring_stats = ring_stats;
4116624e780SMichal Swiatkowski 	}
4126624e780SMichal Swiatkowski 
4136624e780SMichal Swiatkowski 	/* Allocate Rx ring stats */
4146624e780SMichal Swiatkowski 	ice_for_each_alloc_rxq(vsi, i) {
4156624e780SMichal Swiatkowski 		struct ice_ring_stats *ring_stats;
4166624e780SMichal Swiatkowski 		struct ice_rx_ring *ring;
4176624e780SMichal Swiatkowski 
4186624e780SMichal Swiatkowski 		ring = vsi->rx_rings[i];
4196624e780SMichal Swiatkowski 		ring_stats = rx_ring_stats[i];
4206624e780SMichal Swiatkowski 
4216624e780SMichal Swiatkowski 		if (!ring_stats) {
4226624e780SMichal Swiatkowski 			ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
4236624e780SMichal Swiatkowski 			if (!ring_stats)
4246624e780SMichal Swiatkowski 				goto err_out;
4256624e780SMichal Swiatkowski 
4266624e780SMichal Swiatkowski 			WRITE_ONCE(rx_ring_stats[i], ring_stats);
4276624e780SMichal Swiatkowski 		}
4286624e780SMichal Swiatkowski 
4296624e780SMichal Swiatkowski 		ring->ring_stats = ring_stats;
4306624e780SMichal Swiatkowski 	}
4316624e780SMichal Swiatkowski 
4326624e780SMichal Swiatkowski 	return 0;
4336624e780SMichal Swiatkowski 
4346624e780SMichal Swiatkowski err_out:
4356624e780SMichal Swiatkowski 	ice_vsi_free_stats(vsi);
4366624e780SMichal Swiatkowski 	return -ENOMEM;
4376624e780SMichal Swiatkowski }
4386624e780SMichal Swiatkowski 
4396624e780SMichal Swiatkowski /**
4400db66d20SMichal Swiatkowski  * ice_vsi_free - clean up and deallocate the provided VSI
44107309a0eSAnirudh Venkataramanan  * @vsi: pointer to VSI being cleared
44207309a0eSAnirudh Venkataramanan  *
44307309a0eSAnirudh Venkataramanan  * This deallocates the VSI's queue resources, removes it from the PF's
44407309a0eSAnirudh Venkataramanan  * VSI array if necessary, and deallocates the VSI
44507309a0eSAnirudh Venkataramanan  */
446227bf450SMichal Swiatkowski static void ice_vsi_free(struct ice_vsi *vsi)
44707309a0eSAnirudh Venkataramanan {
44807309a0eSAnirudh Venkataramanan 	struct ice_pf *pf = NULL;
4494015d11eSBrett Creeley 	struct device *dev;
45007309a0eSAnirudh Venkataramanan 
451227bf450SMichal Swiatkowski 	if (!vsi || !vsi->back)
452227bf450SMichal Swiatkowski 		return;
45307309a0eSAnirudh Venkataramanan 
45407309a0eSAnirudh Venkataramanan 	pf = vsi->back;
4554015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
45607309a0eSAnirudh Venkataramanan 
45707309a0eSAnirudh Venkataramanan 	if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
4584015d11eSBrett Creeley 		dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
459227bf450SMichal Swiatkowski 		return;
46007309a0eSAnirudh Venkataramanan 	}
46107309a0eSAnirudh Venkataramanan 
46207309a0eSAnirudh Venkataramanan 	mutex_lock(&pf->sw_mutex);
46307309a0eSAnirudh Venkataramanan 	/* updates the PF for this cleared VSI */
46407309a0eSAnirudh Venkataramanan 
46507309a0eSAnirudh Venkataramanan 	pf->vsi[vsi->idx] = NULL;
466da62c5ffSQi Zhang 	pf->next_vsi = vsi->idx;
46707309a0eSAnirudh Venkataramanan 
4686624e780SMichal Swiatkowski 	ice_vsi_free_stats(vsi);
469a85a3847SBrett Creeley 	ice_vsi_free_arrays(vsi);
47007309a0eSAnirudh Venkataramanan 	mutex_unlock(&pf->sw_mutex);
4714015d11eSBrett Creeley 	devm_kfree(dev, vsi);
472227bf450SMichal Swiatkowski }
47307309a0eSAnirudh Venkataramanan 
474227bf450SMichal Swiatkowski void ice_vsi_delete(struct ice_vsi *vsi)
475227bf450SMichal Swiatkowski {
476227bf450SMichal Swiatkowski 	ice_vsi_delete_from_hw(vsi);
477227bf450SMichal Swiatkowski 	ice_vsi_free(vsi);
47807309a0eSAnirudh Venkataramanan }
47907309a0eSAnirudh Venkataramanan 
48007309a0eSAnirudh Venkataramanan /**
481148beb61SHenry Tieman  * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI
482148beb61SHenry Tieman  * @irq: interrupt number
483148beb61SHenry Tieman  * @data: pointer to a q_vector
484148beb61SHenry Tieman  */
485148beb61SHenry Tieman static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
486148beb61SHenry Tieman {
487148beb61SHenry Tieman 	struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
488148beb61SHenry Tieman 
489e72bba21SMaciej Fijalkowski 	if (!q_vector->tx.tx_ring)
490148beb61SHenry Tieman 		return IRQ_HANDLED;
491148beb61SHenry Tieman 
492148beb61SHenry Tieman #define FDIR_RX_DESC_CLEAN_BUDGET 64
493e72bba21SMaciej Fijalkowski 	ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
494e72bba21SMaciej Fijalkowski 	ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
495148beb61SHenry Tieman 
496148beb61SHenry Tieman 	return IRQ_HANDLED;
497148beb61SHenry Tieman }
498148beb61SHenry Tieman 
499148beb61SHenry Tieman /**
5005153a18eSAnirudh Venkataramanan  * ice_msix_clean_rings - MSIX mode Interrupt Handler
5015153a18eSAnirudh Venkataramanan  * @irq: interrupt number
5025153a18eSAnirudh Venkataramanan  * @data: pointer to a q_vector
5035153a18eSAnirudh Venkataramanan  */
504f3aaaaaaSAnirudh Venkataramanan static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
5055153a18eSAnirudh Venkataramanan {
5065153a18eSAnirudh Venkataramanan 	struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
5075153a18eSAnirudh Venkataramanan 
508e72bba21SMaciej Fijalkowski 	if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
5095153a18eSAnirudh Venkataramanan 		return IRQ_HANDLED;
5105153a18eSAnirudh Venkataramanan 
511cdf1f1f1SJacob Keller 	q_vector->total_events++;
512cdf1f1f1SJacob Keller 
5135153a18eSAnirudh Venkataramanan 	napi_schedule(&q_vector->napi);
5145153a18eSAnirudh Venkataramanan 
5155153a18eSAnirudh Venkataramanan 	return IRQ_HANDLED;
5165153a18eSAnirudh Venkataramanan }
5175153a18eSAnirudh Venkataramanan 
518f66756e0SGrzegorz Nitka static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data)
519f66756e0SGrzegorz Nitka {
520f66756e0SGrzegorz Nitka 	struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
521f66756e0SGrzegorz Nitka 	struct ice_pf *pf = q_vector->vsi->back;
522c4c2c7dbSJacob Keller 	struct ice_vf *vf;
523c4c2c7dbSJacob Keller 	unsigned int bkt;
524f66756e0SGrzegorz Nitka 
525e72bba21SMaciej Fijalkowski 	if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
526f66756e0SGrzegorz Nitka 		return IRQ_HANDLED;
527f66756e0SGrzegorz Nitka 
5283d5985a1SJacob Keller 	rcu_read_lock();
5293d5985a1SJacob Keller 	ice_for_each_vf_rcu(pf, bkt, vf)
530c4c2c7dbSJacob Keller 		napi_schedule(&vf->repr->q_vector->napi);
5313d5985a1SJacob Keller 	rcu_read_unlock();
532f66756e0SGrzegorz Nitka 
533f66756e0SGrzegorz Nitka 	return IRQ_HANDLED;
534f66756e0SGrzegorz Nitka }
535f66756e0SGrzegorz Nitka 
5365153a18eSAnirudh Venkataramanan /**
537288ecf49SBenjamin Mikailenko  * ice_vsi_alloc_stat_arrays - Allocate statistics arrays
538288ecf49SBenjamin Mikailenko  * @vsi: VSI pointer
539288ecf49SBenjamin Mikailenko  */
540288ecf49SBenjamin Mikailenko static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)
541288ecf49SBenjamin Mikailenko {
542288ecf49SBenjamin Mikailenko 	struct ice_vsi_stats *vsi_stat;
543288ecf49SBenjamin Mikailenko 	struct ice_pf *pf = vsi->back;
544288ecf49SBenjamin Mikailenko 
545288ecf49SBenjamin Mikailenko 	if (vsi->type == ICE_VSI_CHNL)
546288ecf49SBenjamin Mikailenko 		return 0;
547288ecf49SBenjamin Mikailenko 	if (!pf->vsi_stats)
548288ecf49SBenjamin Mikailenko 		return -ENOENT;
549288ecf49SBenjamin Mikailenko 
5506624e780SMichal Swiatkowski 	if (pf->vsi_stats[vsi->idx])
5516624e780SMichal Swiatkowski 	/* realloc will happen in rebuild path */
5526624e780SMichal Swiatkowski 		return 0;
5536624e780SMichal Swiatkowski 
554288ecf49SBenjamin Mikailenko 	vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL);
555288ecf49SBenjamin Mikailenko 	if (!vsi_stat)
556288ecf49SBenjamin Mikailenko 		return -ENOMEM;
557288ecf49SBenjamin Mikailenko 
558288ecf49SBenjamin Mikailenko 	vsi_stat->tx_ring_stats =
559288ecf49SBenjamin Mikailenko 		kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats),
560288ecf49SBenjamin Mikailenko 			GFP_KERNEL);
561288ecf49SBenjamin Mikailenko 	if (!vsi_stat->tx_ring_stats)
562288ecf49SBenjamin Mikailenko 		goto err_alloc_tx;
563288ecf49SBenjamin Mikailenko 
564288ecf49SBenjamin Mikailenko 	vsi_stat->rx_ring_stats =
565288ecf49SBenjamin Mikailenko 		kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats),
566288ecf49SBenjamin Mikailenko 			GFP_KERNEL);
567288ecf49SBenjamin Mikailenko 	if (!vsi_stat->rx_ring_stats)
568288ecf49SBenjamin Mikailenko 		goto err_alloc_rx;
569288ecf49SBenjamin Mikailenko 
570288ecf49SBenjamin Mikailenko 	pf->vsi_stats[vsi->idx] = vsi_stat;
571288ecf49SBenjamin Mikailenko 
572288ecf49SBenjamin Mikailenko 	return 0;
573288ecf49SBenjamin Mikailenko 
574288ecf49SBenjamin Mikailenko err_alloc_rx:
575288ecf49SBenjamin Mikailenko 	kfree(vsi_stat->rx_ring_stats);
576288ecf49SBenjamin Mikailenko err_alloc_tx:
577288ecf49SBenjamin Mikailenko 	kfree(vsi_stat->tx_ring_stats);
578288ecf49SBenjamin Mikailenko 	kfree(vsi_stat);
579288ecf49SBenjamin Mikailenko 	pf->vsi_stats[vsi->idx] = NULL;
580288ecf49SBenjamin Mikailenko 	return -ENOMEM;
581288ecf49SBenjamin Mikailenko }
582288ecf49SBenjamin Mikailenko 
583288ecf49SBenjamin Mikailenko /**
5846624e780SMichal Swiatkowski  * ice_vsi_alloc_def - set default values for already allocated VSI
5856624e780SMichal Swiatkowski  * @vsi: ptr to VSI
5866624e780SMichal Swiatkowski  * @ch: ptr to channel
5876624e780SMichal Swiatkowski  */
5886624e780SMichal Swiatkowski static int
589157acda5SJacob Keller ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
5906624e780SMichal Swiatkowski {
5916624e780SMichal Swiatkowski 	if (vsi->type != ICE_VSI_CHNL) {
592157acda5SJacob Keller 		ice_vsi_set_num_qs(vsi);
5936624e780SMichal Swiatkowski 		if (ice_vsi_alloc_arrays(vsi))
5946624e780SMichal Swiatkowski 			return -ENOMEM;
5956624e780SMichal Swiatkowski 	}
5966624e780SMichal Swiatkowski 
5976624e780SMichal Swiatkowski 	switch (vsi->type) {
5986624e780SMichal Swiatkowski 	case ICE_VSI_SWITCHDEV_CTRL:
5996624e780SMichal Swiatkowski 		/* Setup eswitch MSIX irq handler for VSI */
6006624e780SMichal Swiatkowski 		vsi->irq_handler = ice_eswitch_msix_clean_rings;
6016624e780SMichal Swiatkowski 		break;
6026624e780SMichal Swiatkowski 	case ICE_VSI_PF:
6036624e780SMichal Swiatkowski 		/* Setup default MSIX irq handler for VSI */
6046624e780SMichal Swiatkowski 		vsi->irq_handler = ice_msix_clean_rings;
6056624e780SMichal Swiatkowski 		break;
6066624e780SMichal Swiatkowski 	case ICE_VSI_CTRL:
6076624e780SMichal Swiatkowski 		/* Setup ctrl VSI MSIX irq handler */
6086624e780SMichal Swiatkowski 		vsi->irq_handler = ice_msix_clean_ctrl_vsi;
6096624e780SMichal Swiatkowski 		break;
6106624e780SMichal Swiatkowski 	case ICE_VSI_CHNL:
6116624e780SMichal Swiatkowski 		if (!ch)
6126624e780SMichal Swiatkowski 			return -EINVAL;
6136624e780SMichal Swiatkowski 
6146624e780SMichal Swiatkowski 		vsi->num_rxq = ch->num_rxq;
6156624e780SMichal Swiatkowski 		vsi->num_txq = ch->num_txq;
6166624e780SMichal Swiatkowski 		vsi->next_base_q = ch->base_q;
6176624e780SMichal Swiatkowski 		break;
6186624e780SMichal Swiatkowski 	case ICE_VSI_VF:
6198173c2f9SMichal Swiatkowski 	case ICE_VSI_LB:
6206624e780SMichal Swiatkowski 		break;
6216624e780SMichal Swiatkowski 	default:
6226624e780SMichal Swiatkowski 		ice_vsi_free_arrays(vsi);
6236624e780SMichal Swiatkowski 		return -EINVAL;
6246624e780SMichal Swiatkowski 	}
6256624e780SMichal Swiatkowski 
6266624e780SMichal Swiatkowski 	return 0;
6276624e780SMichal Swiatkowski }
6286624e780SMichal Swiatkowski 
6296624e780SMichal Swiatkowski /**
63037bb8390SAnirudh Venkataramanan  * ice_vsi_alloc - Allocates the next available struct VSI in the PF
63137bb8390SAnirudh Venkataramanan  * @pf: board private structure
632b03d519dSJacob Keller  *
633e1588197SJacob Keller  * Reserves a VSI index from the PF and allocates an empty VSI structure
634e1588197SJacob Keller  * without a type. The VSI structure must later be initialized by calling
635e1588197SJacob Keller  * ice_vsi_cfg().
63637bb8390SAnirudh Venkataramanan  *
63737bb8390SAnirudh Venkataramanan  * returns a pointer to a VSI on success, NULL on failure.
63837bb8390SAnirudh Venkataramanan  */
639e1588197SJacob Keller static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
64037bb8390SAnirudh Venkataramanan {
6414015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
64237bb8390SAnirudh Venkataramanan 	struct ice_vsi *vsi = NULL;
64337bb8390SAnirudh Venkataramanan 
64437bb8390SAnirudh Venkataramanan 	/* Need to protect the allocation of the VSIs at the PF level */
64537bb8390SAnirudh Venkataramanan 	mutex_lock(&pf->sw_mutex);
64637bb8390SAnirudh Venkataramanan 
64737bb8390SAnirudh Venkataramanan 	/* If we have already allocated our maximum number of VSIs,
64837bb8390SAnirudh Venkataramanan 	 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
64937bb8390SAnirudh Venkataramanan 	 * is available to be populated
65037bb8390SAnirudh Venkataramanan 	 */
65137bb8390SAnirudh Venkataramanan 	if (pf->next_vsi == ICE_NO_VSI) {
6524015d11eSBrett Creeley 		dev_dbg(dev, "out of VSI slots!\n");
65337bb8390SAnirudh Venkataramanan 		goto unlock_pf;
65437bb8390SAnirudh Venkataramanan 	}
65537bb8390SAnirudh Venkataramanan 
6564015d11eSBrett Creeley 	vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
65737bb8390SAnirudh Venkataramanan 	if (!vsi)
65837bb8390SAnirudh Venkataramanan 		goto unlock_pf;
65937bb8390SAnirudh Venkataramanan 
66037bb8390SAnirudh Venkataramanan 	vsi->back = pf;
661e97fb1aeSAnirudh Venkataramanan 	set_bit(ICE_VSI_DOWN, vsi->state);
6629d56b7fdSJesse Brandeburg 
663148beb61SHenry Tieman 	/* fill slot and make note of the index */
664148beb61SHenry Tieman 	vsi->idx = pf->next_vsi;
66537bb8390SAnirudh Venkataramanan 	pf->vsi[pf->next_vsi] = vsi;
66637bb8390SAnirudh Venkataramanan 
66737bb8390SAnirudh Venkataramanan 	/* prepare pf->next_vsi for next use */
66837bb8390SAnirudh Venkataramanan 	pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
66937bb8390SAnirudh Venkataramanan 					 pf->next_vsi);
670da62c5ffSQi Zhang 
67137bb8390SAnirudh Venkataramanan unlock_pf:
67237bb8390SAnirudh Venkataramanan 	mutex_unlock(&pf->sw_mutex);
67337bb8390SAnirudh Venkataramanan 	return vsi;
67437bb8390SAnirudh Venkataramanan }
67537bb8390SAnirudh Venkataramanan 
67637bb8390SAnirudh Venkataramanan /**
677148beb61SHenry Tieman  * ice_alloc_fd_res - Allocate FD resource for a VSI
678148beb61SHenry Tieman  * @vsi: pointer to the ice_vsi
679148beb61SHenry Tieman  *
680148beb61SHenry Tieman  * This allocates the FD resources
681148beb61SHenry Tieman  *
682148beb61SHenry Tieman  * Returns 0 on success, -EPERM on no-op or -EIO on failure
683148beb61SHenry Tieman  */
684148beb61SHenry Tieman static int ice_alloc_fd_res(struct ice_vsi *vsi)
685148beb61SHenry Tieman {
686148beb61SHenry Tieman 	struct ice_pf *pf = vsi->back;
687148beb61SHenry Tieman 	u32 g_val, b_val;
688148beb61SHenry Tieman 
68940319796SKiran Patil 	/* Flow Director filters are only allocated/assigned to the PF VSI or
69040319796SKiran Patil 	 * CHNL VSI which passes the traffic. The CTRL VSI is only used to
69140319796SKiran Patil 	 * add/delete filters so resources are not allocated to it
692148beb61SHenry Tieman 	 */
69340319796SKiran Patil 	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
69440319796SKiran Patil 		return -EPERM;
69540319796SKiran Patil 
69640319796SKiran Patil 	if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF ||
69740319796SKiran Patil 	      vsi->type == ICE_VSI_CHNL))
69840319796SKiran Patil 		return -EPERM;
699148beb61SHenry Tieman 
700148beb61SHenry Tieman 	/* FD filters from guaranteed pool per VSI */
701148beb61SHenry Tieman 	g_val = pf->hw.func_caps.fd_fltr_guar;
702148beb61SHenry Tieman 	if (!g_val)
703148beb61SHenry Tieman 		return -EPERM;
704148beb61SHenry Tieman 
705148beb61SHenry Tieman 	/* FD filters from best effort pool */
706148beb61SHenry Tieman 	b_val = pf->hw.func_caps.fd_fltr_best_effort;
707148beb61SHenry Tieman 	if (!b_val)
708148beb61SHenry Tieman 		return -EPERM;
709148beb61SHenry Tieman 
71040319796SKiran Patil 	/* PF main VSI gets only 64 FD resources from guaranteed pool
71140319796SKiran Patil 	 * when ADQ is configured.
71240319796SKiran Patil 	 */
71340319796SKiran Patil #define ICE_PF_VSI_GFLTR	64
714148beb61SHenry Tieman 
71540319796SKiran Patil 	/* determine FD filter resources per VSI from shared(best effort) and
71640319796SKiran Patil 	 * dedicated pool
71740319796SKiran Patil 	 */
71840319796SKiran Patil 	if (vsi->type == ICE_VSI_PF) {
71940319796SKiran Patil 		vsi->num_gfltr = g_val;
72040319796SKiran Patil 		/* if MQPRIO is configured, main VSI doesn't get all FD
72140319796SKiran Patil 		 * resources from guaranteed pool. PF VSI gets 64 FD resources
72240319796SKiran Patil 		 */
72340319796SKiran Patil 		if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
72440319796SKiran Patil 			if (g_val < ICE_PF_VSI_GFLTR)
725148beb61SHenry Tieman 				return -EPERM;
72640319796SKiran Patil 			/* allow bare minimum entries for PF VSI */
72740319796SKiran Patil 			vsi->num_gfltr = ICE_PF_VSI_GFLTR;
72840319796SKiran Patil 		}
729148beb61SHenry Tieman 
730148beb61SHenry Tieman 		/* each VSI gets same "best_effort" quota */
731148beb61SHenry Tieman 		vsi->num_bfltr = b_val;
73240319796SKiran Patil 	} else if (vsi->type == ICE_VSI_VF) {
733da62c5ffSQi Zhang 		vsi->num_gfltr = 0;
734da62c5ffSQi Zhang 
735da62c5ffSQi Zhang 		/* each VSI gets same "best_effort" quota */
736da62c5ffSQi Zhang 		vsi->num_bfltr = b_val;
73740319796SKiran Patil 	} else {
73840319796SKiran Patil 		struct ice_vsi *main_vsi;
73940319796SKiran Patil 		int numtc;
74040319796SKiran Patil 
74140319796SKiran Patil 		main_vsi = ice_get_main_vsi(pf);
74240319796SKiran Patil 		if (!main_vsi)
74340319796SKiran Patil 			return -EPERM;
74440319796SKiran Patil 
74540319796SKiran Patil 		if (!main_vsi->all_numtc)
74640319796SKiran Patil 			return -EINVAL;
74740319796SKiran Patil 
74840319796SKiran Patil 		/* figure out ADQ numtc */
74940319796SKiran Patil 		numtc = main_vsi->all_numtc - ICE_CHNL_START_TC;
75040319796SKiran Patil 
75140319796SKiran Patil 		/* only one TC but still asking resources for channels,
75240319796SKiran Patil 		 * invalid config
75340319796SKiran Patil 		 */
75440319796SKiran Patil 		if (numtc < ICE_CHNL_START_TC)
75540319796SKiran Patil 			return -EPERM;
75640319796SKiran Patil 
75740319796SKiran Patil 		g_val -= ICE_PF_VSI_GFLTR;
75840319796SKiran Patil 		/* channel VSIs gets equal share from guaranteed pool */
75940319796SKiran Patil 		vsi->num_gfltr = g_val / numtc;
76040319796SKiran Patil 
76140319796SKiran Patil 		/* each VSI gets same "best_effort" quota */
76240319796SKiran Patil 		vsi->num_bfltr = b_val;
763da62c5ffSQi Zhang 	}
764da62c5ffSQi Zhang 
765148beb61SHenry Tieman 	return 0;
766148beb61SHenry Tieman }
767148beb61SHenry Tieman 
768148beb61SHenry Tieman /**
769df0f8479SAnirudh Venkataramanan  * ice_vsi_get_qs - Assign queues from PF to VSI
770df0f8479SAnirudh Venkataramanan  * @vsi: the VSI to assign queues to
771df0f8479SAnirudh Venkataramanan  *
772df0f8479SAnirudh Venkataramanan  * Returns 0 on success and a negative value on error
773df0f8479SAnirudh Venkataramanan  */
77437bb8390SAnirudh Venkataramanan static int ice_vsi_get_qs(struct ice_vsi *vsi)
775df0f8479SAnirudh Venkataramanan {
77603f7a986SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
77703f7a986SAnirudh Venkataramanan 	struct ice_qs_cfg tx_qs_cfg = {
77803f7a986SAnirudh Venkataramanan 		.qs_mutex = &pf->avail_q_mutex,
77903f7a986SAnirudh Venkataramanan 		.pf_map = pf->avail_txqs,
78078b5713aSAnirudh Venkataramanan 		.pf_map_size = pf->max_pf_txqs,
78103f7a986SAnirudh Venkataramanan 		.q_count = vsi->alloc_txq,
78203f7a986SAnirudh Venkataramanan 		.scatter_count = ICE_MAX_SCATTER_TXQS,
78303f7a986SAnirudh Venkataramanan 		.vsi_map = vsi->txq_map,
78403f7a986SAnirudh Venkataramanan 		.vsi_map_offset = 0,
78539066dc5SBrett Creeley 		.mapping_mode = ICE_VSI_MAP_CONTIG
78603f7a986SAnirudh Venkataramanan 	};
78703f7a986SAnirudh Venkataramanan 	struct ice_qs_cfg rx_qs_cfg = {
78803f7a986SAnirudh Venkataramanan 		.qs_mutex = &pf->avail_q_mutex,
78903f7a986SAnirudh Venkataramanan 		.pf_map = pf->avail_rxqs,
79078b5713aSAnirudh Venkataramanan 		.pf_map_size = pf->max_pf_rxqs,
79103f7a986SAnirudh Venkataramanan 		.q_count = vsi->alloc_rxq,
79203f7a986SAnirudh Venkataramanan 		.scatter_count = ICE_MAX_SCATTER_RXQS,
79303f7a986SAnirudh Venkataramanan 		.vsi_map = vsi->rxq_map,
79403f7a986SAnirudh Venkataramanan 		.vsi_map_offset = 0,
79539066dc5SBrett Creeley 		.mapping_mode = ICE_VSI_MAP_CONTIG
79603f7a986SAnirudh Venkataramanan 	};
79739066dc5SBrett Creeley 	int ret;
798df0f8479SAnirudh Venkataramanan 
7990754d65bSKiran Patil 	if (vsi->type == ICE_VSI_CHNL)
8000754d65bSKiran Patil 		return 0;
8010754d65bSKiran Patil 
80203f7a986SAnirudh Venkataramanan 	ret = __ice_vsi_get_qs(&tx_qs_cfg);
80339066dc5SBrett Creeley 	if (ret)
804df0f8479SAnirudh Venkataramanan 		return ret;
80539066dc5SBrett Creeley 	vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode;
80639066dc5SBrett Creeley 
80739066dc5SBrett Creeley 	ret = __ice_vsi_get_qs(&rx_qs_cfg);
80839066dc5SBrett Creeley 	if (ret)
80939066dc5SBrett Creeley 		return ret;
81039066dc5SBrett Creeley 	vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode;
81139066dc5SBrett Creeley 
81239066dc5SBrett Creeley 	return 0;
813df0f8479SAnirudh Venkataramanan }
814df0f8479SAnirudh Venkataramanan 
815df0f8479SAnirudh Venkataramanan /**
8165153a18eSAnirudh Venkataramanan  * ice_vsi_put_qs - Release queues from VSI to PF
8175153a18eSAnirudh Venkataramanan  * @vsi: the VSI that is going to release queues
8185153a18eSAnirudh Venkataramanan  */
819135f4b9eSJacob Keller static void ice_vsi_put_qs(struct ice_vsi *vsi)
8205153a18eSAnirudh Venkataramanan {
8215153a18eSAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
8225153a18eSAnirudh Venkataramanan 	int i;
8235153a18eSAnirudh Venkataramanan 
8245153a18eSAnirudh Venkataramanan 	mutex_lock(&pf->avail_q_mutex);
8255153a18eSAnirudh Venkataramanan 
8262faf63b6SMaciej Fijalkowski 	ice_for_each_alloc_txq(vsi, i) {
8275153a18eSAnirudh Venkataramanan 		clear_bit(vsi->txq_map[i], pf->avail_txqs);
8285153a18eSAnirudh Venkataramanan 		vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
8295153a18eSAnirudh Venkataramanan 	}
8305153a18eSAnirudh Venkataramanan 
8312faf63b6SMaciej Fijalkowski 	ice_for_each_alloc_rxq(vsi, i) {
8325153a18eSAnirudh Venkataramanan 		clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
8335153a18eSAnirudh Venkataramanan 		vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
8345153a18eSAnirudh Venkataramanan 	}
8355153a18eSAnirudh Venkataramanan 
8365153a18eSAnirudh Venkataramanan 	mutex_unlock(&pf->avail_q_mutex);
8375153a18eSAnirudh Venkataramanan }
8385153a18eSAnirudh Venkataramanan 
8395153a18eSAnirudh Venkataramanan /**
840462acf6aSTony Nguyen  * ice_is_safe_mode
841462acf6aSTony Nguyen  * @pf: pointer to the PF struct
842462acf6aSTony Nguyen  *
843462acf6aSTony Nguyen  * returns true if driver is in safe mode, false otherwise
844462acf6aSTony Nguyen  */
845462acf6aSTony Nguyen bool ice_is_safe_mode(struct ice_pf *pf)
846462acf6aSTony Nguyen {
847462acf6aSTony Nguyen 	return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
848462acf6aSTony Nguyen }
849462acf6aSTony Nguyen 
850462acf6aSTony Nguyen /**
85188f62aeaSDave Ertman  * ice_is_rdma_ena
852d25a0fc4SDave Ertman  * @pf: pointer to the PF struct
853d25a0fc4SDave Ertman  *
85488f62aeaSDave Ertman  * returns true if RDMA is currently supported, false otherwise
855d25a0fc4SDave Ertman  */
85688f62aeaSDave Ertman bool ice_is_rdma_ena(struct ice_pf *pf)
857d25a0fc4SDave Ertman {
85888f62aeaSDave Ertman 	return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
859d25a0fc4SDave Ertman }
860d25a0fc4SDave Ertman 
861d25a0fc4SDave Ertman /**
8622c61054cSTony Nguyen  * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
8632c61054cSTony Nguyen  * @vsi: the VSI being cleaned up
8642c61054cSTony Nguyen  *
8652c61054cSTony Nguyen  * This function deletes RSS input set for all flows that were configured
8662c61054cSTony Nguyen  * for this VSI
8672c61054cSTony Nguyen  */
8682c61054cSTony Nguyen static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
8692c61054cSTony Nguyen {
8702c61054cSTony Nguyen 	struct ice_pf *pf = vsi->back;
8715e24d598STony Nguyen 	int status;
8722c61054cSTony Nguyen 
8732c61054cSTony Nguyen 	if (ice_is_safe_mode(pf))
8742c61054cSTony Nguyen 		return;
8752c61054cSTony Nguyen 
8762c61054cSTony Nguyen 	status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
8772c61054cSTony Nguyen 	if (status)
8785f87ec48STony Nguyen 		dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
8795f87ec48STony Nguyen 			vsi->vsi_num, status);
8802c61054cSTony Nguyen }
8812c61054cSTony Nguyen 
8822c61054cSTony Nguyen /**
8832c61054cSTony Nguyen  * ice_rss_clean - Delete RSS related VSI structures and configuration
884df0f8479SAnirudh Venkataramanan  * @vsi: the VSI being removed
885df0f8479SAnirudh Venkataramanan  */
886df0f8479SAnirudh Venkataramanan static void ice_rss_clean(struct ice_vsi *vsi)
887df0f8479SAnirudh Venkataramanan {
8884015d11eSBrett Creeley 	struct ice_pf *pf = vsi->back;
8894015d11eSBrett Creeley 	struct device *dev;
890df0f8479SAnirudh Venkataramanan 
8914015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
892df0f8479SAnirudh Venkataramanan 
8934015d11eSBrett Creeley 	devm_kfree(dev, vsi->rss_hkey_user);
8944015d11eSBrett Creeley 	devm_kfree(dev, vsi->rss_lut_user);
8952c61054cSTony Nguyen 
8962c61054cSTony Nguyen 	ice_vsi_clean_rss_flow_fld(vsi);
8972c61054cSTony Nguyen 	/* remove RSS replay list */
8982c61054cSTony Nguyen 	if (!ice_is_safe_mode(pf))
8992c61054cSTony Nguyen 		ice_rem_vsi_rss_list(&pf->hw, vsi->idx);
900df0f8479SAnirudh Venkataramanan }
901df0f8479SAnirudh Venkataramanan 
902df0f8479SAnirudh Venkataramanan /**
90328c2a645SAnirudh Venkataramanan  * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
90428c2a645SAnirudh Venkataramanan  * @vsi: the VSI being configured
90528c2a645SAnirudh Venkataramanan  */
90637bb8390SAnirudh Venkataramanan static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
90728c2a645SAnirudh Venkataramanan {
90828c2a645SAnirudh Venkataramanan 	struct ice_hw_common_caps *cap;
90928c2a645SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
910b6143c9bSPrzemek Kitszel 	u16 max_rss_size;
91128c2a645SAnirudh Venkataramanan 
91228c2a645SAnirudh Venkataramanan 	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
91328c2a645SAnirudh Venkataramanan 		vsi->rss_size = 1;
91428c2a645SAnirudh Venkataramanan 		return;
91528c2a645SAnirudh Venkataramanan 	}
91628c2a645SAnirudh Venkataramanan 
91728c2a645SAnirudh Venkataramanan 	cap = &pf->hw.func_caps.common_cap;
918b6143c9bSPrzemek Kitszel 	max_rss_size = BIT(cap->rss_table_entry_width);
91928c2a645SAnirudh Venkataramanan 	switch (vsi->type) {
9200754d65bSKiran Patil 	case ICE_VSI_CHNL:
92128c2a645SAnirudh Venkataramanan 	case ICE_VSI_PF:
92228c2a645SAnirudh Venkataramanan 		/* PF VSI will inherit RSS instance of PF */
92388865fc4SKarol Kolacinski 		vsi->rss_table_size = (u16)cap->rss_table_size;
9240754d65bSKiran Patil 		if (vsi->type == ICE_VSI_CHNL)
925b6143c9bSPrzemek Kitszel 			vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size);
9260754d65bSKiran Patil 		else
92788865fc4SKarol Kolacinski 			vsi->rss_size = min_t(u16, num_online_cpus(),
928b6143c9bSPrzemek Kitszel 					      max_rss_size);
929b6143c9bSPrzemek Kitszel 		vsi->rss_lut_type = ICE_LUT_PF;
93028c2a645SAnirudh Venkataramanan 		break;
931f66756e0SGrzegorz Nitka 	case ICE_VSI_SWITCHDEV_CTRL:
932b6143c9bSPrzemek Kitszel 		vsi->rss_table_size = ICE_LUT_VSI_SIZE;
933b6143c9bSPrzemek Kitszel 		vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
934b6143c9bSPrzemek Kitszel 		vsi->rss_lut_type = ICE_LUT_VSI;
935f66756e0SGrzegorz Nitka 		break;
9368ede0178SAnirudh Venkataramanan 	case ICE_VSI_VF:
9370ca469fbSMitch Williams 		/* VF VSI will get a small RSS table.
9380ca469fbSMitch Williams 		 * For VSI_LUT, LUT size should be set to 64 bytes.
9398ede0178SAnirudh Venkataramanan 		 */
940b6143c9bSPrzemek Kitszel 		vsi->rss_table_size = ICE_LUT_VSI_SIZE;
9410ca469fbSMitch Williams 		vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
942b6143c9bSPrzemek Kitszel 		vsi->rss_lut_type = ICE_LUT_VSI;
9438ede0178SAnirudh Venkataramanan 		break;
9440e674aebSAnirudh Venkataramanan 	case ICE_VSI_LB:
9450e674aebSAnirudh Venkataramanan 		break;
94628c2a645SAnirudh Venkataramanan 	default:
947148beb61SHenry Tieman 		dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n",
948148beb61SHenry Tieman 			ice_vsi_type_str(vsi->type));
94928c2a645SAnirudh Venkataramanan 		break;
95028c2a645SAnirudh Venkataramanan 	}
95128c2a645SAnirudh Venkataramanan }
95228c2a645SAnirudh Venkataramanan 
95328c2a645SAnirudh Venkataramanan /**
95428c2a645SAnirudh Venkataramanan  * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
9551babaf77SBrett Creeley  * @hw: HW structure used to determine the VLAN mode of the device
95628c2a645SAnirudh Venkataramanan  * @ctxt: the VSI context being set
95728c2a645SAnirudh Venkataramanan  *
95828c2a645SAnirudh Venkataramanan  * This initializes a default VSI context for all sections except the Queues.
95928c2a645SAnirudh Venkataramanan  */
9601babaf77SBrett Creeley static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
96128c2a645SAnirudh Venkataramanan {
96228c2a645SAnirudh Venkataramanan 	u32 table = 0;
96328c2a645SAnirudh Venkataramanan 
96428c2a645SAnirudh Venkataramanan 	memset(&ctxt->info, 0, sizeof(ctxt->info));
96528c2a645SAnirudh Venkataramanan 	/* VSI's should be allocated from shared pool */
96628c2a645SAnirudh Venkataramanan 	ctxt->alloc_from_pool = true;
96728c2a645SAnirudh Venkataramanan 	/* Src pruning enabled by default */
96828c2a645SAnirudh Venkataramanan 	ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
96928c2a645SAnirudh Venkataramanan 	/* Traffic from VSI can be sent to LAN */
97028c2a645SAnirudh Venkataramanan 	ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
9711babaf77SBrett Creeley 	/* allow all untagged/tagged packets by default on Tx */
9727bd527aaSBrett Creeley 	ctxt->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL &
9737bd527aaSBrett Creeley 				  ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >>
9747bd527aaSBrett Creeley 				 ICE_AQ_VSI_INNER_VLAN_TX_MODE_S);
9751babaf77SBrett Creeley 	/* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which
9761babaf77SBrett Creeley 	 * results in legacy behavior (show VLAN, DEI, and UP) in descriptor.
9771babaf77SBrett Creeley 	 *
9781babaf77SBrett Creeley 	 * DVM - leave inner VLAN in packet by default
9791babaf77SBrett Creeley 	 */
9801babaf77SBrett Creeley 	if (ice_is_dvm_ena(hw)) {
9811babaf77SBrett Creeley 		ctxt->info.inner_vlan_flags |=
9821babaf77SBrett Creeley 			ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
9831babaf77SBrett Creeley 		ctxt->info.outer_vlan_flags =
9841babaf77SBrett Creeley 			(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
9851babaf77SBrett Creeley 			 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
9861babaf77SBrett Creeley 			ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
9871babaf77SBrett Creeley 		ctxt->info.outer_vlan_flags |=
9881babaf77SBrett Creeley 			(ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
9891babaf77SBrett Creeley 			 ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
9901babaf77SBrett Creeley 			ICE_AQ_VSI_OUTER_TAG_TYPE_M;
991b33de560SMichal Swiatkowski 		ctxt->info.outer_vlan_flags |=
992b33de560SMichal Swiatkowski 			FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M,
993b33de560SMichal Swiatkowski 				   ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING);
9941babaf77SBrett Creeley 	}
99528c2a645SAnirudh Venkataramanan 	/* Have 1:1 UP mapping for both ingress/egress tables */
99628c2a645SAnirudh Venkataramanan 	table |= ICE_UP_TABLE_TRANSLATE(0, 0);
99728c2a645SAnirudh Venkataramanan 	table |= ICE_UP_TABLE_TRANSLATE(1, 1);
99828c2a645SAnirudh Venkataramanan 	table |= ICE_UP_TABLE_TRANSLATE(2, 2);
99928c2a645SAnirudh Venkataramanan 	table |= ICE_UP_TABLE_TRANSLATE(3, 3);
100028c2a645SAnirudh Venkataramanan 	table |= ICE_UP_TABLE_TRANSLATE(4, 4);
100128c2a645SAnirudh Venkataramanan 	table |= ICE_UP_TABLE_TRANSLATE(5, 5);
100228c2a645SAnirudh Venkataramanan 	table |= ICE_UP_TABLE_TRANSLATE(6, 6);
100328c2a645SAnirudh Venkataramanan 	table |= ICE_UP_TABLE_TRANSLATE(7, 7);
100428c2a645SAnirudh Venkataramanan 	ctxt->info.ingress_table = cpu_to_le32(table);
100528c2a645SAnirudh Venkataramanan 	ctxt->info.egress_table = cpu_to_le32(table);
100628c2a645SAnirudh Venkataramanan 	/* Have 1:1 UP mapping for outer to inner UP table */
100728c2a645SAnirudh Venkataramanan 	ctxt->info.outer_up_table = cpu_to_le32(table);
100828c2a645SAnirudh Venkataramanan 	/* No Outer tag support outer_tag_flags remains to zero */
100928c2a645SAnirudh Venkataramanan }
101028c2a645SAnirudh Venkataramanan 
101128c2a645SAnirudh Venkataramanan /**
101228c2a645SAnirudh Venkataramanan  * ice_vsi_setup_q_map - Setup a VSI queue map
101328c2a645SAnirudh Venkataramanan  * @vsi: the VSI being configured
101428c2a645SAnirudh Venkataramanan  * @ctxt: VSI context structure
101528c2a645SAnirudh Venkataramanan  */
1016a632b2a4SAnatolii Gerasymenko static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
101728c2a645SAnirudh Venkataramanan {
1018a509702cSDing Hui 	u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
10198134d5ffSBrett Creeley 	u16 num_txq_per_tc, num_rxq_per_tc;
102028c2a645SAnirudh Venkataramanan 	u16 qcount_tx = vsi->alloc_txq;
102128c2a645SAnirudh Venkataramanan 	u16 qcount_rx = vsi->alloc_rxq;
1022c5a2a4a3SUsha Ketineni 	u8 netdev_tc = 0;
102328c2a645SAnirudh Venkataramanan 	int i;
102428c2a645SAnirudh Venkataramanan 
10250754d65bSKiran Patil 	if (!vsi->tc_cfg.numtc) {
102628c2a645SAnirudh Venkataramanan 		/* at least TC0 should be enabled by default */
10270754d65bSKiran Patil 		vsi->tc_cfg.numtc = 1;
10280754d65bSKiran Patil 		vsi->tc_cfg.ena_tc = 1;
102928c2a645SAnirudh Venkataramanan 	}
103028c2a645SAnirudh Venkataramanan 
10318134d5ffSBrett Creeley 	num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC);
10328134d5ffSBrett Creeley 	if (!num_rxq_per_tc)
10338134d5ffSBrett Creeley 		num_rxq_per_tc = 1;
10348134d5ffSBrett Creeley 	num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc;
10358134d5ffSBrett Creeley 	if (!num_txq_per_tc)
10368134d5ffSBrett Creeley 		num_txq_per_tc = 1;
10378134d5ffSBrett Creeley 
10388134d5ffSBrett Creeley 	/* find the (rounded up) power-of-2 of qcount */
10398134d5ffSBrett Creeley 	pow = (u16)order_base_2(num_rxq_per_tc);
104028c2a645SAnirudh Venkataramanan 
104128c2a645SAnirudh Venkataramanan 	/* TC mapping is a function of the number of Rx queues assigned to the
104228c2a645SAnirudh Venkataramanan 	 * VSI for each traffic class and the offset of these queues.
104328c2a645SAnirudh Venkataramanan 	 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
104428c2a645SAnirudh Venkataramanan 	 * queues allocated to TC0. No:of queues is a power-of-2.
104528c2a645SAnirudh Venkataramanan 	 *
104628c2a645SAnirudh Venkataramanan 	 * If TC is not enabled, the queue offset is set to 0, and allocate one
104728c2a645SAnirudh Venkataramanan 	 * queue, this way, traffic for the given TC will be sent to the default
104828c2a645SAnirudh Venkataramanan 	 * queue.
104928c2a645SAnirudh Venkataramanan 	 *
105028c2a645SAnirudh Venkataramanan 	 * Setup number and offset of Rx queues for all TCs for the VSI
105128c2a645SAnirudh Venkataramanan 	 */
10522bdc97beSBruce Allan 	ice_for_each_traffic_class(i) {
105328c2a645SAnirudh Venkataramanan 		if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
105428c2a645SAnirudh Venkataramanan 			/* TC is not enabled */
105528c2a645SAnirudh Venkataramanan 			vsi->tc_cfg.tc_info[i].qoffset = 0;
1056c5a2a4a3SUsha Ketineni 			vsi->tc_cfg.tc_info[i].qcount_rx = 1;
1057c5a2a4a3SUsha Ketineni 			vsi->tc_cfg.tc_info[i].qcount_tx = 1;
1058c5a2a4a3SUsha Ketineni 			vsi->tc_cfg.tc_info[i].netdev_tc = 0;
105928c2a645SAnirudh Venkataramanan 			ctxt->info.tc_mapping[i] = 0;
106028c2a645SAnirudh Venkataramanan 			continue;
106128c2a645SAnirudh Venkataramanan 		}
106228c2a645SAnirudh Venkataramanan 
106328c2a645SAnirudh Venkataramanan 		/* TC is enabled */
106428c2a645SAnirudh Venkataramanan 		vsi->tc_cfg.tc_info[i].qoffset = offset;
10658134d5ffSBrett Creeley 		vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc;
10668134d5ffSBrett Creeley 		vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc;
1067c5a2a4a3SUsha Ketineni 		vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
106828c2a645SAnirudh Venkataramanan 
106928c2a645SAnirudh Venkataramanan 		qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
107028c2a645SAnirudh Venkataramanan 			ICE_AQ_VSI_TC_Q_OFFSET_M) |
107128c2a645SAnirudh Venkataramanan 			((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
107228c2a645SAnirudh Venkataramanan 			 ICE_AQ_VSI_TC_Q_NUM_M);
10738134d5ffSBrett Creeley 		offset += num_rxq_per_tc;
10748134d5ffSBrett Creeley 		tx_count += num_txq_per_tc;
107528c2a645SAnirudh Venkataramanan 		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
107628c2a645SAnirudh Venkataramanan 	}
107760dcc39eSKiran Patil 
107860dcc39eSKiran Patil 	/* if offset is non-zero, means it is calculated correctly based on
107960dcc39eSKiran Patil 	 * enabled TCs for a given VSI otherwise qcount_rx will always
108060dcc39eSKiran Patil 	 * be correct and non-zero because it is based off - VSI's
108160dcc39eSKiran Patil 	 * allocated Rx queues which is at least 1 (hence qcount_tx will be
108260dcc39eSKiran Patil 	 * at least 1)
108360dcc39eSKiran Patil 	 */
108460dcc39eSKiran Patil 	if (offset)
1085a509702cSDing Hui 		rx_count = offset;
108660dcc39eSKiran Patil 	else
1087a509702cSDing Hui 		rx_count = num_rxq_per_tc;
108860dcc39eSKiran Patil 
1089a509702cSDing Hui 	if (rx_count > vsi->alloc_rxq) {
1090a632b2a4SAnatolii Gerasymenko 		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
1091a509702cSDing Hui 			rx_count, vsi->alloc_rxq);
1092a509702cSDing Hui 		return -EINVAL;
1093a509702cSDing Hui 	}
1094a509702cSDing Hui 
1095a509702cSDing Hui 	if (tx_count > vsi->alloc_txq) {
1096a509702cSDing Hui 		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
1097a509702cSDing Hui 			tx_count, vsi->alloc_txq);
1098a632b2a4SAnatolii Gerasymenko 		return -EINVAL;
1099a632b2a4SAnatolii Gerasymenko 	}
1100a632b2a4SAnatolii Gerasymenko 
1101c5a2a4a3SUsha Ketineni 	vsi->num_txq = tx_count;
1102a509702cSDing Hui 	vsi->num_rxq = rx_count;
110328c2a645SAnirudh Venkataramanan 
11048ede0178SAnirudh Venkataramanan 	if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
11059a946843SAnirudh Venkataramanan 		dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
11068ede0178SAnirudh Venkataramanan 		/* since there is a chance that num_rxq could have been changed
11078ede0178SAnirudh Venkataramanan 		 * in the above for loop, make num_txq equal to num_rxq.
11088ede0178SAnirudh Venkataramanan 		 */
11098ede0178SAnirudh Venkataramanan 		vsi->num_txq = vsi->num_rxq;
11108ede0178SAnirudh Venkataramanan 	}
11118ede0178SAnirudh Venkataramanan 
111228c2a645SAnirudh Venkataramanan 	/* Rx queue mapping */
111328c2a645SAnirudh Venkataramanan 	ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
111428c2a645SAnirudh Venkataramanan 	/* q_mapping buffer holds the info for the first queue allocated for
111528c2a645SAnirudh Venkataramanan 	 * this VSI in the PF space and also the number of queues associated
111628c2a645SAnirudh Venkataramanan 	 * with this VSI.
111728c2a645SAnirudh Venkataramanan 	 */
111828c2a645SAnirudh Venkataramanan 	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
111928c2a645SAnirudh Venkataramanan 	ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
1120a632b2a4SAnatolii Gerasymenko 
1121a632b2a4SAnatolii Gerasymenko 	return 0;
112228c2a645SAnirudh Venkataramanan }
112328c2a645SAnirudh Venkataramanan 
112428c2a645SAnirudh Venkataramanan /**
1125148beb61SHenry Tieman  * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI
1126148beb61SHenry Tieman  * @ctxt: the VSI context being set
1127148beb61SHenry Tieman  * @vsi: the VSI being configured
1128148beb61SHenry Tieman  */
1129148beb61SHenry Tieman static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
1130148beb61SHenry Tieman {
1131148beb61SHenry Tieman 	u8 dflt_q_group, dflt_q_prio;
1132148beb61SHenry Tieman 	u16 dflt_q, report_q, val;
1133148beb61SHenry Tieman 
1134da62c5ffSQi Zhang 	if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL &&
113540319796SKiran Patil 	    vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL)
1136148beb61SHenry Tieman 		return;
1137148beb61SHenry Tieman 
1138148beb61SHenry Tieman 	val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1139148beb61SHenry Tieman 	ctxt->info.valid_sections |= cpu_to_le16(val);
1140148beb61SHenry Tieman 	dflt_q = 0;
1141148beb61SHenry Tieman 	dflt_q_group = 0;
1142148beb61SHenry Tieman 	report_q = 0;
1143148beb61SHenry Tieman 	dflt_q_prio = 0;
1144148beb61SHenry Tieman 
1145148beb61SHenry Tieman 	/* enable flow director filtering/programming */
1146148beb61SHenry Tieman 	val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
1147148beb61SHenry Tieman 	ctxt->info.fd_options = cpu_to_le16(val);
1148148beb61SHenry Tieman 	/* max of allocated flow director filters */
1149148beb61SHenry Tieman 	ctxt->info.max_fd_fltr_dedicated =
1150148beb61SHenry Tieman 			cpu_to_le16(vsi->num_gfltr);
1151148beb61SHenry Tieman 	/* max of shared flow director filters any VSI may program */
1152148beb61SHenry Tieman 	ctxt->info.max_fd_fltr_shared =
1153148beb61SHenry Tieman 			cpu_to_le16(vsi->num_bfltr);
1154148beb61SHenry Tieman 	/* default queue index within the VSI of the default FD */
1155148beb61SHenry Tieman 	val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) &
1156148beb61SHenry Tieman 	       ICE_AQ_VSI_FD_DEF_Q_M);
1157148beb61SHenry Tieman 	/* target queue or queue group to the FD filter */
1158148beb61SHenry Tieman 	val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) &
1159148beb61SHenry Tieman 		ICE_AQ_VSI_FD_DEF_GRP_M);
1160148beb61SHenry Tieman 	ctxt->info.fd_def_q = cpu_to_le16(val);
1161148beb61SHenry Tieman 	/* queue index on which FD filter completion is reported */
1162148beb61SHenry Tieman 	val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) &
1163148beb61SHenry Tieman 	       ICE_AQ_VSI_FD_REPORT_Q_M);
1164148beb61SHenry Tieman 	/* priority of the default qindex action */
1165148beb61SHenry Tieman 	val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) &
1166148beb61SHenry Tieman 		ICE_AQ_VSI_FD_DEF_PRIORITY_M);
1167148beb61SHenry Tieman 	ctxt->info.fd_report_opt = cpu_to_le16(val);
1168148beb61SHenry Tieman }
1169148beb61SHenry Tieman 
1170148beb61SHenry Tieman /**
117128c2a645SAnirudh Venkataramanan  * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
117228c2a645SAnirudh Venkataramanan  * @ctxt: the VSI context being set
117328c2a645SAnirudh Venkataramanan  * @vsi: the VSI being configured
117428c2a645SAnirudh Venkataramanan  */
117528c2a645SAnirudh Venkataramanan static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
117628c2a645SAnirudh Venkataramanan {
117728c2a645SAnirudh Venkataramanan 	u8 lut_type, hash_type;
11784015d11eSBrett Creeley 	struct device *dev;
1179819d8998SJesse Brandeburg 	struct ice_pf *pf;
1180819d8998SJesse Brandeburg 
1181819d8998SJesse Brandeburg 	pf = vsi->back;
11824015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
118328c2a645SAnirudh Venkataramanan 
118428c2a645SAnirudh Venkataramanan 	switch (vsi->type) {
11850754d65bSKiran Patil 	case ICE_VSI_CHNL:
118628c2a645SAnirudh Venkataramanan 	case ICE_VSI_PF:
118728c2a645SAnirudh Venkataramanan 		/* PF VSI will inherit RSS instance of PF */
118828c2a645SAnirudh Venkataramanan 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
1189*334a1227SAhmed Zaki 		hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
119028c2a645SAnirudh Venkataramanan 		break;
11918ede0178SAnirudh Venkataramanan 	case ICE_VSI_VF:
11928ede0178SAnirudh Venkataramanan 		/* VF VSI will gets a small RSS table which is a VSI LUT type */
11938ede0178SAnirudh Venkataramanan 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
1194*334a1227SAhmed Zaki 		hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
11958ede0178SAnirudh Venkataramanan 		break;
1196148beb61SHenry Tieman 	default:
11974015d11eSBrett Creeley 		dev_dbg(dev, "Unsupported VSI type %s\n",
1198964674f1SAnirudh Venkataramanan 			ice_vsi_type_str(vsi->type));
11990e674aebSAnirudh Venkataramanan 		return;
120028c2a645SAnirudh Venkataramanan 	}
120128c2a645SAnirudh Venkataramanan 
120228c2a645SAnirudh Venkataramanan 	ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
120328c2a645SAnirudh Venkataramanan 				ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
1204242e3450SJesse Brandeburg 				(hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
120528c2a645SAnirudh Venkataramanan }
120628c2a645SAnirudh Venkataramanan 
12070754d65bSKiran Patil static void
12080754d65bSKiran Patil ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
12090754d65bSKiran Patil {
12100754d65bSKiran Patil 	struct ice_pf *pf = vsi->back;
12110754d65bSKiran Patil 	u16 qcount, qmap;
12120754d65bSKiran Patil 	u8 offset = 0;
12130754d65bSKiran Patil 	int pow;
12140754d65bSKiran Patil 
12150754d65bSKiran Patil 	qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
12160754d65bSKiran Patil 
12170754d65bSKiran Patil 	pow = order_base_2(qcount);
12180754d65bSKiran Patil 	qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
12190754d65bSKiran Patil 		 ICE_AQ_VSI_TC_Q_OFFSET_M) |
12200754d65bSKiran Patil 		 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
12210754d65bSKiran Patil 		   ICE_AQ_VSI_TC_Q_NUM_M);
12220754d65bSKiran Patil 
12230754d65bSKiran Patil 	ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
12240754d65bSKiran Patil 	ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
12250754d65bSKiran Patil 	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q);
12260754d65bSKiran Patil 	ctxt->info.q_mapping[1] = cpu_to_le16(qcount);
12270754d65bSKiran Patil }
12280754d65bSKiran Patil 
122928c2a645SAnirudh Venkataramanan /**
123045f5478cSJan Sokolowski  * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
123145f5478cSJan Sokolowski  * @vsi: VSI to check whether or not VLAN pruning is enabled.
123245f5478cSJan Sokolowski  *
123345f5478cSJan Sokolowski  * returns true if Rx VLAN pruning is enabled and false otherwise.
123445f5478cSJan Sokolowski  */
123545f5478cSJan Sokolowski static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
123645f5478cSJan Sokolowski {
1237e528e5b2SJan Sokolowski 	return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
123845f5478cSJan Sokolowski }
123945f5478cSJan Sokolowski 
124045f5478cSJan Sokolowski /**
124128c2a645SAnirudh Venkataramanan  * ice_vsi_init - Create and initialize a VSI
124228c2a645SAnirudh Venkataramanan  * @vsi: the VSI being configured
12435e509ab2SJacob Keller  * @vsi_flags: VSI configuration flags
12445e509ab2SJacob Keller  *
12455e509ab2SJacob Keller  * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to
12465e509ab2SJacob Keller  * reconfigure an existing context.
124728c2a645SAnirudh Venkataramanan  *
124828c2a645SAnirudh Venkataramanan  * This initializes a VSI context depending on the VSI type to be added and
124928c2a645SAnirudh Venkataramanan  * passes it down to the add_vsi aq command to create a new VSI.
125028c2a645SAnirudh Venkataramanan  */
12515e509ab2SJacob Keller static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
125228c2a645SAnirudh Venkataramanan {
125328c2a645SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
125428c2a645SAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
1255198a666aSBruce Allan 	struct ice_vsi_ctx *ctxt;
125687324e74SHenry Tieman 	struct device *dev;
125728c2a645SAnirudh Venkataramanan 	int ret = 0;
125828c2a645SAnirudh Venkataramanan 
125987324e74SHenry Tieman 	dev = ice_pf_to_dev(pf);
12609efe35d0STony Nguyen 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1261198a666aSBruce Allan 	if (!ctxt)
1262198a666aSBruce Allan 		return -ENOMEM;
1263198a666aSBruce Allan 
126428c2a645SAnirudh Venkataramanan 	switch (vsi->type) {
1265148beb61SHenry Tieman 	case ICE_VSI_CTRL:
12660e674aebSAnirudh Venkataramanan 	case ICE_VSI_LB:
126728c2a645SAnirudh Venkataramanan 	case ICE_VSI_PF:
1268198a666aSBruce Allan 		ctxt->flags = ICE_AQ_VSI_TYPE_PF;
126928c2a645SAnirudh Venkataramanan 		break;
1270f66756e0SGrzegorz Nitka 	case ICE_VSI_SWITCHDEV_CTRL:
12710754d65bSKiran Patil 	case ICE_VSI_CHNL:
1272f66756e0SGrzegorz Nitka 		ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
1273f66756e0SGrzegorz Nitka 		break;
12748ede0178SAnirudh Venkataramanan 	case ICE_VSI_VF:
1275198a666aSBruce Allan 		ctxt->flags = ICE_AQ_VSI_TYPE_VF;
12768ede0178SAnirudh Venkataramanan 		/* VF number here is the absolute VF number (0-255) */
1277b03d519dSJacob Keller 		ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id;
12788ede0178SAnirudh Venkataramanan 		break;
127928c2a645SAnirudh Venkataramanan 	default:
12809efe35d0STony Nguyen 		ret = -ENODEV;
12819efe35d0STony Nguyen 		goto out;
128228c2a645SAnirudh Venkataramanan 	}
128328c2a645SAnirudh Venkataramanan 
12840754d65bSKiran Patil 	/* Handle VLAN pruning for channel VSI if main VSI has VLAN
12850754d65bSKiran Patil 	 * prune enabled
12860754d65bSKiran Patil 	 */
12870754d65bSKiran Patil 	if (vsi->type == ICE_VSI_CHNL) {
12880754d65bSKiran Patil 		struct ice_vsi *main_vsi;
12890754d65bSKiran Patil 
12900754d65bSKiran Patil 		main_vsi = ice_get_main_vsi(pf);
12910754d65bSKiran Patil 		if (main_vsi && ice_vsi_is_vlan_pruning_ena(main_vsi))
12920754d65bSKiran Patil 			ctxt->info.sw_flags2 |=
12930754d65bSKiran Patil 				ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
12940754d65bSKiran Patil 		else
12950754d65bSKiran Patil 			ctxt->info.sw_flags2 &=
12960754d65bSKiran Patil 				~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
12970754d65bSKiran Patil 	}
12980754d65bSKiran Patil 
12991babaf77SBrett Creeley 	ice_set_dflt_vsi_ctx(hw, ctxt);
1300148beb61SHenry Tieman 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
1301148beb61SHenry Tieman 		ice_set_fd_vsi_ctx(ctxt, vsi);
130228c2a645SAnirudh Venkataramanan 	/* if the switch is in VEB mode, allow VSI loopback */
130328c2a645SAnirudh Venkataramanan 	if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
1304198a666aSBruce Allan 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
130528c2a645SAnirudh Venkataramanan 
130628c2a645SAnirudh Venkataramanan 	/* Set LUT type and HASH type if RSS is enabled */
1307148beb61SHenry Tieman 	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) &&
1308148beb61SHenry Tieman 	    vsi->type != ICE_VSI_CTRL) {
1309198a666aSBruce Allan 		ice_set_rss_vsi_ctx(ctxt, vsi);
131087324e74SHenry Tieman 		/* if updating VSI context, make sure to set valid_section:
131187324e74SHenry Tieman 		 * to indicate which section of VSI context being updated
131287324e74SHenry Tieman 		 */
13135e509ab2SJacob Keller 		if (!(vsi_flags & ICE_VSI_FLAG_INIT))
131487324e74SHenry Tieman 			ctxt->info.valid_sections |=
131587324e74SHenry Tieman 				cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
131687324e74SHenry Tieman 	}
131728c2a645SAnirudh Venkataramanan 
1318198a666aSBruce Allan 	ctxt->info.sw_id = vsi->port_info->sw_id;
13190754d65bSKiran Patil 	if (vsi->type == ICE_VSI_CHNL) {
13200754d65bSKiran Patil 		ice_chnl_vsi_setup_q_map(vsi, ctxt);
13210754d65bSKiran Patil 	} else {
1322a632b2a4SAnatolii Gerasymenko 		ret = ice_vsi_setup_q_map(vsi, ctxt);
1323a632b2a4SAnatolii Gerasymenko 		if (ret)
1324a632b2a4SAnatolii Gerasymenko 			goto out;
1325a632b2a4SAnatolii Gerasymenko 
13265e509ab2SJacob Keller 		if (!(vsi_flags & ICE_VSI_FLAG_INIT))
13276624e780SMichal Swiatkowski 			/* means VSI being updated */
132887324e74SHenry Tieman 			/* must to indicate which section of VSI context are
132987324e74SHenry Tieman 			 * being modified
133087324e74SHenry Tieman 			 */
133187324e74SHenry Tieman 			ctxt->info.valid_sections |=
133287324e74SHenry Tieman 				cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
13330754d65bSKiran Patil 	}
133428c2a645SAnirudh Venkataramanan 
13350c3a6101SDave Ertman 	/* Allow control frames out of main VSI */
13360c3a6101SDave Ertman 	if (vsi->type == ICE_VSI_PF) {
13370c3a6101SDave Ertman 		ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
13380c3a6101SDave Ertman 		ctxt->info.valid_sections |=
13390c3a6101SDave Ertman 			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
13400c3a6101SDave Ertman 	}
13410c3a6101SDave Ertman 
13425e509ab2SJacob Keller 	if (vsi_flags & ICE_VSI_FLAG_INIT) {
1343198a666aSBruce Allan 		ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
134428c2a645SAnirudh Venkataramanan 		if (ret) {
134587324e74SHenry Tieman 			dev_err(dev, "Add VSI failed, err %d\n", ret);
13469efe35d0STony Nguyen 			ret = -EIO;
13479efe35d0STony Nguyen 			goto out;
134828c2a645SAnirudh Venkataramanan 		}
134987324e74SHenry Tieman 	} else {
135087324e74SHenry Tieman 		ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
135187324e74SHenry Tieman 		if (ret) {
135287324e74SHenry Tieman 			dev_err(dev, "Update VSI failed, err %d\n", ret);
135387324e74SHenry Tieman 			ret = -EIO;
135487324e74SHenry Tieman 			goto out;
135587324e74SHenry Tieman 		}
135687324e74SHenry Tieman 	}
135728c2a645SAnirudh Venkataramanan 
135828c2a645SAnirudh Venkataramanan 	/* keep context for update VSI operations */
1359198a666aSBruce Allan 	vsi->info = ctxt->info;
136028c2a645SAnirudh Venkataramanan 
136128c2a645SAnirudh Venkataramanan 	/* record VSI number returned */
1362198a666aSBruce Allan 	vsi->vsi_num = ctxt->vsi_num;
136328c2a645SAnirudh Venkataramanan 
13649efe35d0STony Nguyen out:
13659efe35d0STony Nguyen 	kfree(ctxt);
136628c2a645SAnirudh Venkataramanan 	return ret;
136728c2a645SAnirudh Venkataramanan }
136828c2a645SAnirudh Venkataramanan 
136928c2a645SAnirudh Venkataramanan /**
137028c2a645SAnirudh Venkataramanan  * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
137128c2a645SAnirudh Venkataramanan  * @vsi: the VSI having rings deallocated
137228c2a645SAnirudh Venkataramanan  */
1373df0f8479SAnirudh Venkataramanan static void ice_vsi_clear_rings(struct ice_vsi *vsi)
137428c2a645SAnirudh Venkataramanan {
137528c2a645SAnirudh Venkataramanan 	int i;
137628c2a645SAnirudh Venkataramanan 
1377f6a07271SJacob Keller 	/* Avoid stale references by clearing map from vector to ring */
1378f6a07271SJacob Keller 	if (vsi->q_vectors) {
1379f6a07271SJacob Keller 		ice_for_each_q_vector(vsi, i) {
1380f6a07271SJacob Keller 			struct ice_q_vector *q_vector = vsi->q_vectors[i];
1381f6a07271SJacob Keller 
1382f6a07271SJacob Keller 			if (q_vector) {
1383e72bba21SMaciej Fijalkowski 				q_vector->tx.tx_ring = NULL;
1384e72bba21SMaciej Fijalkowski 				q_vector->rx.rx_ring = NULL;
1385f6a07271SJacob Keller 			}
1386f6a07271SJacob Keller 		}
1387f6a07271SJacob Keller 	}
1388f6a07271SJacob Keller 
138928c2a645SAnirudh Venkataramanan 	if (vsi->tx_rings) {
13902faf63b6SMaciej Fijalkowski 		ice_for_each_alloc_txq(vsi, i) {
139128c2a645SAnirudh Venkataramanan 			if (vsi->tx_rings[i]) {
139228c2a645SAnirudh Venkataramanan 				kfree_rcu(vsi->tx_rings[i], rcu);
1393b1d95cc2SCiara Loftus 				WRITE_ONCE(vsi->tx_rings[i], NULL);
139428c2a645SAnirudh Venkataramanan 			}
139528c2a645SAnirudh Venkataramanan 		}
139628c2a645SAnirudh Venkataramanan 	}
139728c2a645SAnirudh Venkataramanan 	if (vsi->rx_rings) {
13982faf63b6SMaciej Fijalkowski 		ice_for_each_alloc_rxq(vsi, i) {
139928c2a645SAnirudh Venkataramanan 			if (vsi->rx_rings[i]) {
140028c2a645SAnirudh Venkataramanan 				kfree_rcu(vsi->rx_rings[i], rcu);
1401b1d95cc2SCiara Loftus 				WRITE_ONCE(vsi->rx_rings[i], NULL);
140228c2a645SAnirudh Venkataramanan 			}
140328c2a645SAnirudh Venkataramanan 		}
140428c2a645SAnirudh Venkataramanan 	}
140528c2a645SAnirudh Venkataramanan }
140628c2a645SAnirudh Venkataramanan 
140728c2a645SAnirudh Venkataramanan /**
140828c2a645SAnirudh Venkataramanan  * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
140928c2a645SAnirudh Venkataramanan  * @vsi: VSI which is having rings allocated
141028c2a645SAnirudh Venkataramanan  */
141137bb8390SAnirudh Venkataramanan static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
141228c2a645SAnirudh Venkataramanan {
14130d54d8f7SBrett Creeley 	bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw);
141428c2a645SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
14154015d11eSBrett Creeley 	struct device *dev;
141688865fc4SKarol Kolacinski 	u16 i;
141728c2a645SAnirudh Venkataramanan 
14184015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
1419d337f2afSAnirudh Venkataramanan 	/* Allocate Tx rings */
14202faf63b6SMaciej Fijalkowski 	ice_for_each_alloc_txq(vsi, i) {
1421e72bba21SMaciej Fijalkowski 		struct ice_tx_ring *ring;
142228c2a645SAnirudh Venkataramanan 
142328c2a645SAnirudh Venkataramanan 		/* allocate with kzalloc(), free with kfree_rcu() */
142428c2a645SAnirudh Venkataramanan 		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
142528c2a645SAnirudh Venkataramanan 
142628c2a645SAnirudh Venkataramanan 		if (!ring)
142728c2a645SAnirudh Venkataramanan 			goto err_out;
142828c2a645SAnirudh Venkataramanan 
142928c2a645SAnirudh Venkataramanan 		ring->q_index = i;
143028c2a645SAnirudh Venkataramanan 		ring->reg_idx = vsi->txq_map[i];
143128c2a645SAnirudh Venkataramanan 		ring->vsi = vsi;
1432ea9b847cSJacob Keller 		ring->tx_tstamps = &pf->ptp.port.tx;
14334015d11eSBrett Creeley 		ring->dev = dev;
1434ad71b256SBrett Creeley 		ring->count = vsi->num_tx_desc;
1435ccfee182SAnatolii Gerasymenko 		ring->txq_teid = ICE_INVAL_TEID;
14360d54d8f7SBrett Creeley 		if (dvm_ena)
14370d54d8f7SBrett Creeley 			ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
14380d54d8f7SBrett Creeley 		else
14390d54d8f7SBrett Creeley 			ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1;
1440b1d95cc2SCiara Loftus 		WRITE_ONCE(vsi->tx_rings[i], ring);
144128c2a645SAnirudh Venkataramanan 	}
144228c2a645SAnirudh Venkataramanan 
1443d337f2afSAnirudh Venkataramanan 	/* Allocate Rx rings */
14442faf63b6SMaciej Fijalkowski 	ice_for_each_alloc_rxq(vsi, i) {
1445e72bba21SMaciej Fijalkowski 		struct ice_rx_ring *ring;
144628c2a645SAnirudh Venkataramanan 
144728c2a645SAnirudh Venkataramanan 		/* allocate with kzalloc(), free with kfree_rcu() */
144828c2a645SAnirudh Venkataramanan 		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
144928c2a645SAnirudh Venkataramanan 		if (!ring)
145028c2a645SAnirudh Venkataramanan 			goto err_out;
145128c2a645SAnirudh Venkataramanan 
145228c2a645SAnirudh Venkataramanan 		ring->q_index = i;
145328c2a645SAnirudh Venkataramanan 		ring->reg_idx = vsi->rxq_map[i];
145428c2a645SAnirudh Venkataramanan 		ring->vsi = vsi;
145528c2a645SAnirudh Venkataramanan 		ring->netdev = vsi->netdev;
14564015d11eSBrett Creeley 		ring->dev = dev;
1457ad71b256SBrett Creeley 		ring->count = vsi->num_rx_desc;
1458cf6b82fdSJacob Keller 		ring->cached_phctime = pf->ptp.cached_phc_time;
1459b1d95cc2SCiara Loftus 		WRITE_ONCE(vsi->rx_rings[i], ring);
146028c2a645SAnirudh Venkataramanan 	}
146128c2a645SAnirudh Venkataramanan 
146228c2a645SAnirudh Venkataramanan 	return 0;
146328c2a645SAnirudh Venkataramanan 
146428c2a645SAnirudh Venkataramanan err_out:
146528c2a645SAnirudh Venkataramanan 	ice_vsi_clear_rings(vsi);
146628c2a645SAnirudh Venkataramanan 	return -ENOMEM;
146728c2a645SAnirudh Venkataramanan }
146828c2a645SAnirudh Venkataramanan 
146928c2a645SAnirudh Venkataramanan /**
1470492af0abSMd Fahad Iqbal Polash  * ice_vsi_manage_rss_lut - disable/enable RSS
1471492af0abSMd Fahad Iqbal Polash  * @vsi: the VSI being changed
1472492af0abSMd Fahad Iqbal Polash  * @ena: boolean value indicating if this is an enable or disable request
1473492af0abSMd Fahad Iqbal Polash  *
1474492af0abSMd Fahad Iqbal Polash  * In the event of disable request for RSS, this function will zero out RSS
1475492af0abSMd Fahad Iqbal Polash  * LUT, while in the event of enable request for RSS, it will reconfigure RSS
1476492af0abSMd Fahad Iqbal Polash  * LUT.
1477492af0abSMd Fahad Iqbal Polash  */
14784fe36226SPaul M Stillwell Jr void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
1479492af0abSMd Fahad Iqbal Polash {
1480492af0abSMd Fahad Iqbal Polash 	u8 *lut;
1481492af0abSMd Fahad Iqbal Polash 
14829efe35d0STony Nguyen 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1483492af0abSMd Fahad Iqbal Polash 	if (!lut)
14844fe36226SPaul M Stillwell Jr 		return;
1485492af0abSMd Fahad Iqbal Polash 
1486492af0abSMd Fahad Iqbal Polash 	if (ena) {
1487492af0abSMd Fahad Iqbal Polash 		if (vsi->rss_lut_user)
1488492af0abSMd Fahad Iqbal Polash 			memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1489492af0abSMd Fahad Iqbal Polash 		else
1490492af0abSMd Fahad Iqbal Polash 			ice_fill_rss_lut(lut, vsi->rss_table_size,
1491492af0abSMd Fahad Iqbal Polash 					 vsi->rss_size);
1492492af0abSMd Fahad Iqbal Polash 	}
1493492af0abSMd Fahad Iqbal Polash 
14944fe36226SPaul M Stillwell Jr 	ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
14959efe35d0STony Nguyen 	kfree(lut);
1496492af0abSMd Fahad Iqbal Polash }
1497492af0abSMd Fahad Iqbal Polash 
1498492af0abSMd Fahad Iqbal Polash /**
1499dddd406dSJesse Brandeburg  * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
1500dddd406dSJesse Brandeburg  * @vsi: VSI to be configured
1501dddd406dSJesse Brandeburg  * @disable: set to true to have FCS / CRC in the frame data
1502dddd406dSJesse Brandeburg  */
1503dddd406dSJesse Brandeburg void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable)
1504dddd406dSJesse Brandeburg {
1505dddd406dSJesse Brandeburg 	int i;
1506dddd406dSJesse Brandeburg 
1507dddd406dSJesse Brandeburg 	ice_for_each_rxq(vsi, i)
1508dddd406dSJesse Brandeburg 		if (disable)
1509dddd406dSJesse Brandeburg 			vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
1510dddd406dSJesse Brandeburg 		else
1511dddd406dSJesse Brandeburg 			vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
1512dddd406dSJesse Brandeburg }
1513dddd406dSJesse Brandeburg 
1514dddd406dSJesse Brandeburg /**
151537bb8390SAnirudh Venkataramanan  * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
151637bb8390SAnirudh Venkataramanan  * @vsi: VSI to be configured
151737bb8390SAnirudh Venkataramanan  */
15180754d65bSKiran Patil int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
151937bb8390SAnirudh Venkataramanan {
152037bb8390SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
15214015d11eSBrett Creeley 	struct device *dev;
1522b66a972aSBrett Creeley 	u8 *lut, *key;
1523b66a972aSBrett Creeley 	int err;
152437bb8390SAnirudh Venkataramanan 
15254015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
15260754d65bSKiran Patil 	if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size &&
15270754d65bSKiran Patil 	    (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) {
15280754d65bSKiran Patil 		vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size);
15290754d65bSKiran Patil 	} else {
153088865fc4SKarol Kolacinski 		vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
153137bb8390SAnirudh Venkataramanan 
15320754d65bSKiran Patil 		/* If orig_rss_size is valid and it is less than determined
15330754d65bSKiran Patil 		 * main VSI's rss_size, update main VSI's rss_size to be
15340754d65bSKiran Patil 		 * orig_rss_size so that when tc-qdisc is deleted, main VSI
15350754d65bSKiran Patil 		 * RSS table gets programmed to be correct (whatever it was
15360754d65bSKiran Patil 		 * to begin with (prior to setup-tc for ADQ config)
15370754d65bSKiran Patil 		 */
15380754d65bSKiran Patil 		if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size &&
15390754d65bSKiran Patil 		    vsi->orig_rss_size <= vsi->num_rxq) {
15400754d65bSKiran Patil 			vsi->rss_size = vsi->orig_rss_size;
15410754d65bSKiran Patil 			/* now orig_rss_size is used, reset it to zero */
15420754d65bSKiran Patil 			vsi->orig_rss_size = 0;
15430754d65bSKiran Patil 		}
15440754d65bSKiran Patil 	}
15450754d65bSKiran Patil 
15469efe35d0STony Nguyen 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
154737bb8390SAnirudh Venkataramanan 	if (!lut)
154837bb8390SAnirudh Venkataramanan 		return -ENOMEM;
154937bb8390SAnirudh Venkataramanan 
155037bb8390SAnirudh Venkataramanan 	if (vsi->rss_lut_user)
155137bb8390SAnirudh Venkataramanan 		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
155237bb8390SAnirudh Venkataramanan 	else
155337bb8390SAnirudh Venkataramanan 		ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
155437bb8390SAnirudh Venkataramanan 
1555b66a972aSBrett Creeley 	err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
1556b66a972aSBrett Creeley 	if (err) {
1557b66a972aSBrett Creeley 		dev_err(dev, "set_rss_lut failed, error %d\n", err);
155837bb8390SAnirudh Venkataramanan 		goto ice_vsi_cfg_rss_exit;
155937bb8390SAnirudh Venkataramanan 	}
156037bb8390SAnirudh Venkataramanan 
1561b66a972aSBrett Creeley 	key = kzalloc(ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE, GFP_KERNEL);
156237bb8390SAnirudh Venkataramanan 	if (!key) {
156337bb8390SAnirudh Venkataramanan 		err = -ENOMEM;
156437bb8390SAnirudh Venkataramanan 		goto ice_vsi_cfg_rss_exit;
156537bb8390SAnirudh Venkataramanan 	}
156637bb8390SAnirudh Venkataramanan 
156737bb8390SAnirudh Venkataramanan 	if (vsi->rss_hkey_user)
1568b66a972aSBrett Creeley 		memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
156937bb8390SAnirudh Venkataramanan 	else
1570b66a972aSBrett Creeley 		netdev_rss_key_fill((void *)key, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
157137bb8390SAnirudh Venkataramanan 
1572b66a972aSBrett Creeley 	err = ice_set_rss_key(vsi, key);
1573b66a972aSBrett Creeley 	if (err)
1574b66a972aSBrett Creeley 		dev_err(dev, "set_rss_key failed, error %d\n", err);
157537bb8390SAnirudh Venkataramanan 
15769efe35d0STony Nguyen 	kfree(key);
157737bb8390SAnirudh Venkataramanan ice_vsi_cfg_rss_exit:
15789efe35d0STony Nguyen 	kfree(lut);
157937bb8390SAnirudh Venkataramanan 	return err;
158037bb8390SAnirudh Venkataramanan }
158137bb8390SAnirudh Venkataramanan 
158237bb8390SAnirudh Venkataramanan /**
15831c01c8c6SMd Fahad Iqbal Polash  * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
15841c01c8c6SMd Fahad Iqbal Polash  * @vsi: VSI to be configured
15851c01c8c6SMd Fahad Iqbal Polash  *
15861c01c8c6SMd Fahad Iqbal Polash  * This function will only be called during the VF VSI setup. Upon successful
15871c01c8c6SMd Fahad Iqbal Polash  * completion of package download, this function will configure default RSS
15881c01c8c6SMd Fahad Iqbal Polash  * input sets for VF VSI.
15891c01c8c6SMd Fahad Iqbal Polash  */
15901c01c8c6SMd Fahad Iqbal Polash static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
15911c01c8c6SMd Fahad Iqbal Polash {
15921c01c8c6SMd Fahad Iqbal Polash 	struct ice_pf *pf = vsi->back;
15931c01c8c6SMd Fahad Iqbal Polash 	struct device *dev;
15945518ac2aSTony Nguyen 	int status;
15951c01c8c6SMd Fahad Iqbal Polash 
15961c01c8c6SMd Fahad Iqbal Polash 	dev = ice_pf_to_dev(pf);
15971c01c8c6SMd Fahad Iqbal Polash 	if (ice_is_safe_mode(pf)) {
15981c01c8c6SMd Fahad Iqbal Polash 		dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
15991c01c8c6SMd Fahad Iqbal Polash 			vsi->vsi_num);
16001c01c8c6SMd Fahad Iqbal Polash 		return;
16011c01c8c6SMd Fahad Iqbal Polash 	}
16021c01c8c6SMd Fahad Iqbal Polash 
16031c01c8c6SMd Fahad Iqbal Polash 	status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
16041c01c8c6SMd Fahad Iqbal Polash 	if (status)
16055f87ec48STony Nguyen 		dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
16065f87ec48STony Nguyen 			vsi->vsi_num, status);
16071c01c8c6SMd Fahad Iqbal Polash }
16081c01c8c6SMd Fahad Iqbal Polash 
16091c01c8c6SMd Fahad Iqbal Polash /**
1610c90ed40cSTony Nguyen  * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
1611c90ed40cSTony Nguyen  * @vsi: VSI to be configured
1612c90ed40cSTony Nguyen  *
1613c90ed40cSTony Nguyen  * This function will only be called after successful download package call
1614c90ed40cSTony Nguyen  * during initialization of PF. Since the downloaded package will erase the
1615c90ed40cSTony Nguyen  * RSS section, this function will configure RSS input sets for different
1616c90ed40cSTony Nguyen  * flow types. The last profile added has the highest priority, therefore 2
1617c90ed40cSTony Nguyen  * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles
1618c90ed40cSTony Nguyen  * (i.e. IPv4 src/dst TCP src/dst port).
1619c90ed40cSTony Nguyen  */
1620c90ed40cSTony Nguyen static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
1621c90ed40cSTony Nguyen {
1622c90ed40cSTony Nguyen 	u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
1623c90ed40cSTony Nguyen 	struct ice_pf *pf = vsi->back;
1624c90ed40cSTony Nguyen 	struct ice_hw *hw = &pf->hw;
1625c90ed40cSTony Nguyen 	struct device *dev;
16265518ac2aSTony Nguyen 	int status;
1627c90ed40cSTony Nguyen 
1628c90ed40cSTony Nguyen 	dev = ice_pf_to_dev(pf);
1629c90ed40cSTony Nguyen 	if (ice_is_safe_mode(pf)) {
1630c90ed40cSTony Nguyen 		dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1631c90ed40cSTony Nguyen 			vsi_num);
1632c90ed40cSTony Nguyen 		return;
1633c90ed40cSTony Nguyen 	}
1634c90ed40cSTony Nguyen 	/* configure RSS for IPv4 with input set IP src/dst */
1635c90ed40cSTony Nguyen 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
1636c90ed40cSTony Nguyen 				 ICE_FLOW_SEG_HDR_IPV4);
1637c90ed40cSTony Nguyen 	if (status)
16385f87ec48STony Nguyen 		dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
16395f87ec48STony Nguyen 			vsi_num, status);
1640c90ed40cSTony Nguyen 
1641c90ed40cSTony Nguyen 	/* configure RSS for IPv6 with input set IPv6 src/dst */
1642c90ed40cSTony Nguyen 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
1643c90ed40cSTony Nguyen 				 ICE_FLOW_SEG_HDR_IPV6);
1644c90ed40cSTony Nguyen 	if (status)
16455f87ec48STony Nguyen 		dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
16465f87ec48STony Nguyen 			vsi_num, status);
1647c90ed40cSTony Nguyen 
1648c90ed40cSTony Nguyen 	/* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
1649c90ed40cSTony Nguyen 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
1650c90ed40cSTony Nguyen 				 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
1651c90ed40cSTony Nguyen 	if (status)
16525f87ec48STony Nguyen 		dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
16535f87ec48STony Nguyen 			vsi_num, status);
1654c90ed40cSTony Nguyen 
1655c90ed40cSTony Nguyen 	/* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
1656c90ed40cSTony Nguyen 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
1657c90ed40cSTony Nguyen 				 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
1658c90ed40cSTony Nguyen 	if (status)
16595f87ec48STony Nguyen 		dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
16605f87ec48STony Nguyen 			vsi_num, status);
1661c90ed40cSTony Nguyen 
1662c90ed40cSTony Nguyen 	/* configure RSS for sctp4 with input set IP src/dst */
1663c90ed40cSTony Nguyen 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
1664c90ed40cSTony Nguyen 				 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
1665c90ed40cSTony Nguyen 	if (status)
16665f87ec48STony Nguyen 		dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
16675f87ec48STony Nguyen 			vsi_num, status);
1668c90ed40cSTony Nguyen 
1669c90ed40cSTony Nguyen 	/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
1670c90ed40cSTony Nguyen 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
1671c90ed40cSTony Nguyen 				 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
1672c90ed40cSTony Nguyen 	if (status)
16735f87ec48STony Nguyen 		dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
16745f87ec48STony Nguyen 			vsi_num, status);
1675c90ed40cSTony Nguyen 
1676c90ed40cSTony Nguyen 	/* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
1677c90ed40cSTony Nguyen 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
1678c90ed40cSTony Nguyen 				 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
1679c90ed40cSTony Nguyen 	if (status)
16805f87ec48STony Nguyen 		dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
16815f87ec48STony Nguyen 			vsi_num, status);
1682c90ed40cSTony Nguyen 
1683c90ed40cSTony Nguyen 	/* configure RSS for sctp6 with input set IPv6 src/dst */
1684c90ed40cSTony Nguyen 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
1685c90ed40cSTony Nguyen 				 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
1686c90ed40cSTony Nguyen 	if (status)
16875f87ec48STony Nguyen 		dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
16885f87ec48STony Nguyen 			vsi_num, status);
168986006f99SJesse Brandeburg 
169086006f99SJesse Brandeburg 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
169186006f99SJesse Brandeburg 				 ICE_FLOW_SEG_HDR_ESP);
169286006f99SJesse Brandeburg 	if (status)
169386006f99SJesse Brandeburg 		dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
169486006f99SJesse Brandeburg 			vsi_num, status);
1695c90ed40cSTony Nguyen }
1696c90ed40cSTony Nguyen 
1697c90ed40cSTony Nguyen /**
169845f5478cSJan Sokolowski  * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
169945f5478cSJan Sokolowski  * @vsi: VSI
170045f5478cSJan Sokolowski  */
170145f5478cSJan Sokolowski static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
170245f5478cSJan Sokolowski {
170345f5478cSJan Sokolowski 	if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
170445f5478cSJan Sokolowski 		vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
170545f5478cSJan Sokolowski 		vsi->rx_buf_len = ICE_RXBUF_1664;
170645f5478cSJan Sokolowski #if (PAGE_SIZE < 8192)
170745f5478cSJan Sokolowski 	} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
170845f5478cSJan Sokolowski 		   (vsi->netdev->mtu <= ETH_DATA_LEN)) {
170945f5478cSJan Sokolowski 		vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
171045f5478cSJan Sokolowski 		vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
171145f5478cSJan Sokolowski #endif
171245f5478cSJan Sokolowski 	} else {
171345f5478cSJan Sokolowski 		vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
171445f5478cSJan Sokolowski 		vsi->rx_buf_len = ICE_RXBUF_3072;
171545f5478cSJan Sokolowski 	}
171645f5478cSJan Sokolowski }
171745f5478cSJan Sokolowski 
171845f5478cSJan Sokolowski /**
1719769c500dSAkeem G Abodunrin  * ice_pf_state_is_nominal - checks the PF for nominal state
1720769c500dSAkeem G Abodunrin  * @pf: pointer to PF to check
1721769c500dSAkeem G Abodunrin  *
1722769c500dSAkeem G Abodunrin  * Check the PF's state for a collection of bits that would indicate
1723769c500dSAkeem G Abodunrin  * the PF is in a state that would inhibit normal operation for
1724769c500dSAkeem G Abodunrin  * driver functionality.
1725769c500dSAkeem G Abodunrin  *
1726769c500dSAkeem G Abodunrin  * Returns true if PF is in a nominal state, false otherwise
1727769c500dSAkeem G Abodunrin  */
1728769c500dSAkeem G Abodunrin bool ice_pf_state_is_nominal(struct ice_pf *pf)
1729769c500dSAkeem G Abodunrin {
17307e408e07SAnirudh Venkataramanan 	DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 };
1731769c500dSAkeem G Abodunrin 
1732769c500dSAkeem G Abodunrin 	if (!pf)
1733769c500dSAkeem G Abodunrin 		return false;
1734769c500dSAkeem G Abodunrin 
17357e408e07SAnirudh Venkataramanan 	bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS);
17367e408e07SAnirudh Venkataramanan 	if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS))
1737769c500dSAkeem G Abodunrin 		return false;
1738769c500dSAkeem G Abodunrin 
1739769c500dSAkeem G Abodunrin 	return true;
1740769c500dSAkeem G Abodunrin }
1741769c500dSAkeem G Abodunrin 
1742769c500dSAkeem G Abodunrin /**
174345d3d428SAnirudh Venkataramanan  * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
174445d3d428SAnirudh Venkataramanan  * @vsi: the VSI to be updated
174545d3d428SAnirudh Venkataramanan  */
174645d3d428SAnirudh Venkataramanan void ice_update_eth_stats(struct ice_vsi *vsi)
174745d3d428SAnirudh Venkataramanan {
174845d3d428SAnirudh Venkataramanan 	struct ice_eth_stats *prev_es, *cur_es;
174945d3d428SAnirudh Venkataramanan 	struct ice_hw *hw = &vsi->back->hw;
17502fd5e433SBenjamin Mikailenko 	struct ice_pf *pf = vsi->back;
175145d3d428SAnirudh Venkataramanan 	u16 vsi_num = vsi->vsi_num;    /* HW absolute index of a VSI */
175245d3d428SAnirudh Venkataramanan 
175345d3d428SAnirudh Venkataramanan 	prev_es = &vsi->eth_stats_prev;
175445d3d428SAnirudh Venkataramanan 	cur_es = &vsi->eth_stats;
175545d3d428SAnirudh Venkataramanan 
17562fd5e433SBenjamin Mikailenko 	if (ice_is_reset_in_progress(pf->state))
17572fd5e433SBenjamin Mikailenko 		vsi->stat_offsets_loaded = false;
17582fd5e433SBenjamin Mikailenko 
175936517fd3SJacob Keller 	ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
176036517fd3SJacob Keller 			  &prev_es->rx_bytes, &cur_es->rx_bytes);
176145d3d428SAnirudh Venkataramanan 
176236517fd3SJacob Keller 	ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
176336517fd3SJacob Keller 			  &prev_es->rx_unicast, &cur_es->rx_unicast);
176445d3d428SAnirudh Venkataramanan 
176536517fd3SJacob Keller 	ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
176636517fd3SJacob Keller 			  &prev_es->rx_multicast, &cur_es->rx_multicast);
176745d3d428SAnirudh Venkataramanan 
176836517fd3SJacob Keller 	ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
176936517fd3SJacob Keller 			  &prev_es->rx_broadcast, &cur_es->rx_broadcast);
177045d3d428SAnirudh Venkataramanan 
177145d3d428SAnirudh Venkataramanan 	ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
177245d3d428SAnirudh Venkataramanan 			  &prev_es->rx_discards, &cur_es->rx_discards);
177345d3d428SAnirudh Venkataramanan 
177436517fd3SJacob Keller 	ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
177536517fd3SJacob Keller 			  &prev_es->tx_bytes, &cur_es->tx_bytes);
177645d3d428SAnirudh Venkataramanan 
177736517fd3SJacob Keller 	ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
177836517fd3SJacob Keller 			  &prev_es->tx_unicast, &cur_es->tx_unicast);
177945d3d428SAnirudh Venkataramanan 
178036517fd3SJacob Keller 	ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
178136517fd3SJacob Keller 			  &prev_es->tx_multicast, &cur_es->tx_multicast);
178245d3d428SAnirudh Venkataramanan 
178336517fd3SJacob Keller 	ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
178436517fd3SJacob Keller 			  &prev_es->tx_broadcast, &cur_es->tx_broadcast);
178545d3d428SAnirudh Venkataramanan 
178645d3d428SAnirudh Venkataramanan 	ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
178745d3d428SAnirudh Venkataramanan 			  &prev_es->tx_errors, &cur_es->tx_errors);
178845d3d428SAnirudh Venkataramanan 
178945d3d428SAnirudh Venkataramanan 	vsi->stat_offsets_loaded = true;
179045d3d428SAnirudh Venkataramanan }
179145d3d428SAnirudh Venkataramanan 
179245d3d428SAnirudh Venkataramanan /**
1793401ce33bSBrett Creeley  * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
1794401ce33bSBrett Creeley  * @hw: HW pointer
1795401ce33bSBrett Creeley  * @pf_q: index of the Rx queue in the PF's queue space
1796401ce33bSBrett Creeley  * @rxdid: flexible descriptor RXDID
1797401ce33bSBrett Creeley  * @prio: priority for the RXDID for this queue
179877a78115SJacob Keller  * @ena_ts: true to enable timestamp and false to disable timestamp
1799401ce33bSBrett Creeley  */
1800401ce33bSBrett Creeley void
180177a78115SJacob Keller ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
180277a78115SJacob Keller 			bool ena_ts)
1803401ce33bSBrett Creeley {
1804401ce33bSBrett Creeley 	int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
1805401ce33bSBrett Creeley 
1806401ce33bSBrett Creeley 	/* clear any previous values */
1807401ce33bSBrett Creeley 	regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
1808401ce33bSBrett Creeley 		    QRXFLXP_CNTXT_RXDID_PRIO_M |
1809401ce33bSBrett Creeley 		    QRXFLXP_CNTXT_TS_M);
1810401ce33bSBrett Creeley 
1811401ce33bSBrett Creeley 	regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
1812401ce33bSBrett Creeley 		QRXFLXP_CNTXT_RXDID_IDX_M;
1813401ce33bSBrett Creeley 
1814401ce33bSBrett Creeley 	regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) &
1815401ce33bSBrett Creeley 		QRXFLXP_CNTXT_RXDID_PRIO_M;
1816401ce33bSBrett Creeley 
181777a78115SJacob Keller 	if (ena_ts)
181877a78115SJacob Keller 		/* Enable TimeSync on this queue */
181977a78115SJacob Keller 		regval |= QRXFLXP_CNTXT_TS_M;
182077a78115SJacob Keller 
1821401ce33bSBrett Creeley 	wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
1822401ce33bSBrett Creeley }
1823401ce33bSBrett Creeley 
18247ad15440SBrett Creeley int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
18257ad15440SBrett Creeley {
18267ad15440SBrett Creeley 	if (q_idx >= vsi->num_rxq)
18277ad15440SBrett Creeley 		return -EINVAL;
18287ad15440SBrett Creeley 
18297ad15440SBrett Creeley 	return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
18307ad15440SBrett Creeley }
18317ad15440SBrett Creeley 
1832e72bba21SMaciej Fijalkowski int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
18337ad15440SBrett Creeley {
18347ad15440SBrett Creeley 	struct ice_aqc_add_tx_qgrp *qg_buf;
18357ad15440SBrett Creeley 	int err;
18367ad15440SBrett Creeley 
18377ad15440SBrett Creeley 	if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
18387ad15440SBrett Creeley 		return -EINVAL;
18397ad15440SBrett Creeley 
18407ad15440SBrett Creeley 	qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
18417ad15440SBrett Creeley 	if (!qg_buf)
18427ad15440SBrett Creeley 		return -ENOMEM;
18437ad15440SBrett Creeley 
18447ad15440SBrett Creeley 	qg_buf->num_txqs = 1;
18457ad15440SBrett Creeley 
18467ad15440SBrett Creeley 	err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
18477ad15440SBrett Creeley 	kfree(qg_buf);
18487ad15440SBrett Creeley 	return err;
18497ad15440SBrett Creeley }
18507ad15440SBrett Creeley 
1851401ce33bSBrett Creeley /**
185272adf242SAnirudh Venkataramanan  * ice_vsi_cfg_rxqs - Configure the VSI for Rx
185372adf242SAnirudh Venkataramanan  * @vsi: the VSI being configured
185472adf242SAnirudh Venkataramanan  *
185572adf242SAnirudh Venkataramanan  * Return 0 on success and a negative value on error
185672adf242SAnirudh Venkataramanan  * Configure the Rx VSI for operation.
185772adf242SAnirudh Venkataramanan  */
185872adf242SAnirudh Venkataramanan int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
185972adf242SAnirudh Venkataramanan {
186072adf242SAnirudh Venkataramanan 	u16 i;
186172adf242SAnirudh Venkataramanan 
18628ede0178SAnirudh Venkataramanan 	if (vsi->type == ICE_VSI_VF)
18638ede0178SAnirudh Venkataramanan 		goto setup_rings;
18648ede0178SAnirudh Venkataramanan 
1865efc2214bSMaciej Fijalkowski 	ice_vsi_cfg_frame_size(vsi);
18668ede0178SAnirudh Venkataramanan setup_rings:
186772adf242SAnirudh Venkataramanan 	/* set up individual rings */
186843c7f919SKrzysztof Kazimierczak 	ice_for_each_rxq(vsi, i) {
186943c7f919SKrzysztof Kazimierczak 		int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
187072adf242SAnirudh Venkataramanan 
187143c7f919SKrzysztof Kazimierczak 		if (err)
187272adf242SAnirudh Venkataramanan 			return err;
187372adf242SAnirudh Venkataramanan 	}
18741553f4f7SBrett Creeley 
18751553f4f7SBrett Creeley 	return 0;
18761553f4f7SBrett Creeley }
187772adf242SAnirudh Venkataramanan 
187872adf242SAnirudh Venkataramanan /**
187972adf242SAnirudh Venkataramanan  * ice_vsi_cfg_txqs - Configure the VSI for Tx
188072adf242SAnirudh Venkataramanan  * @vsi: the VSI being configured
188103f7a986SAnirudh Venkataramanan  * @rings: Tx ring array to be configured
18822e84f6b3SMaciej Fijalkowski  * @count: number of Tx ring array elements
188372adf242SAnirudh Venkataramanan  *
188472adf242SAnirudh Venkataramanan  * Return 0 on success and a negative value on error
188572adf242SAnirudh Venkataramanan  * Configure the Tx VSI for operation.
188672adf242SAnirudh Venkataramanan  */
188703f7a986SAnirudh Venkataramanan static int
1888e72bba21SMaciej Fijalkowski ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
188972adf242SAnirudh Venkataramanan {
189072adf242SAnirudh Venkataramanan 	struct ice_aqc_add_tx_qgrp *qg_buf;
1891e75d1b2cSMaciej Fijalkowski 	u16 q_idx = 0;
1892d02f734cSMaciej Fijalkowski 	int err = 0;
189372adf242SAnirudh Venkataramanan 
189466486d89SBruce Allan 	qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
189572adf242SAnirudh Venkataramanan 	if (!qg_buf)
189672adf242SAnirudh Venkataramanan 		return -ENOMEM;
189772adf242SAnirudh Venkataramanan 
189872adf242SAnirudh Venkataramanan 	qg_buf->num_txqs = 1;
189972adf242SAnirudh Venkataramanan 
19002e84f6b3SMaciej Fijalkowski 	for (q_idx = 0; q_idx < count; q_idx++) {
1901e75d1b2cSMaciej Fijalkowski 		err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
1902d02f734cSMaciej Fijalkowski 		if (err)
190372adf242SAnirudh Venkataramanan 			goto err_cfg_txqs;
1904e75d1b2cSMaciej Fijalkowski 	}
1905c5a2a4a3SUsha Ketineni 
190672adf242SAnirudh Venkataramanan err_cfg_txqs:
1907e75d1b2cSMaciej Fijalkowski 	kfree(qg_buf);
190872adf242SAnirudh Venkataramanan 	return err;
190972adf242SAnirudh Venkataramanan }
191072adf242SAnirudh Venkataramanan 
191172adf242SAnirudh Venkataramanan /**
191203f7a986SAnirudh Venkataramanan  * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
191303f7a986SAnirudh Venkataramanan  * @vsi: the VSI being configured
191403f7a986SAnirudh Venkataramanan  *
191503f7a986SAnirudh Venkataramanan  * Return 0 on success and a negative value on error
191603f7a986SAnirudh Venkataramanan  * Configure the Tx VSI for operation.
191703f7a986SAnirudh Venkataramanan  */
191803f7a986SAnirudh Venkataramanan int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
191903f7a986SAnirudh Venkataramanan {
19202e84f6b3SMaciej Fijalkowski 	return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
192103f7a986SAnirudh Venkataramanan }
192203f7a986SAnirudh Venkataramanan 
192303f7a986SAnirudh Venkataramanan /**
1924efc2214bSMaciej Fijalkowski  * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
1925efc2214bSMaciej Fijalkowski  * @vsi: the VSI being configured
1926efc2214bSMaciej Fijalkowski  *
1927efc2214bSMaciej Fijalkowski  * Return 0 on success and a negative value on error
1928efc2214bSMaciej Fijalkowski  * Configure the Tx queues dedicated for XDP in given VSI for operation.
1929efc2214bSMaciej Fijalkowski  */
1930efc2214bSMaciej Fijalkowski int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
1931efc2214bSMaciej Fijalkowski {
19322d4238f5SKrzysztof Kazimierczak 	int ret;
19332d4238f5SKrzysztof Kazimierczak 	int i;
19342d4238f5SKrzysztof Kazimierczak 
19352e84f6b3SMaciej Fijalkowski 	ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
19362d4238f5SKrzysztof Kazimierczak 	if (ret)
19372d4238f5SKrzysztof Kazimierczak 		return ret;
19382d4238f5SKrzysztof Kazimierczak 
19399ead7e74SMaciej Fijalkowski 	ice_for_each_rxq(vsi, i)
19409ead7e74SMaciej Fijalkowski 		ice_tx_xsk_pool(vsi, i);
19412d4238f5SKrzysztof Kazimierczak 
1942c4a9c8e7SMichal Swiatkowski 	return 0;
1943efc2214bSMaciej Fijalkowski }
1944efc2214bSMaciej Fijalkowski 
1945efc2214bSMaciej Fijalkowski /**
19469e4ab4c2SBrett Creeley  * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
19479e4ab4c2SBrett Creeley  * @intrl: interrupt rate limit in usecs
19489e4ab4c2SBrett Creeley  * @gran: interrupt rate limit granularity in usecs
19499e4ab4c2SBrett Creeley  *
19509e4ab4c2SBrett Creeley  * This function converts a decimal interrupt rate limit in usecs to the format
19519e4ab4c2SBrett Creeley  * expected by firmware.
19529e4ab4c2SBrett Creeley  */
1953b8b47723SJesse Brandeburg static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
19549e4ab4c2SBrett Creeley {
19559e4ab4c2SBrett Creeley 	u32 val = intrl / gran;
19569e4ab4c2SBrett Creeley 
19579e4ab4c2SBrett Creeley 	if (val)
19589e4ab4c2SBrett Creeley 		return val | GLINT_RATE_INTRL_ENA_M;
19599e4ab4c2SBrett Creeley 	return 0;
19609e4ab4c2SBrett Creeley }
19619e4ab4c2SBrett Creeley 
19629e4ab4c2SBrett Creeley /**
1963b8b47723SJesse Brandeburg  * ice_write_intrl - write throttle rate limit to interrupt specific register
1964b8b47723SJesse Brandeburg  * @q_vector: pointer to interrupt specific structure
1965b8b47723SJesse Brandeburg  * @intrl: throttle rate limit in microseconds to write
1966b8b47723SJesse Brandeburg  */
1967b8b47723SJesse Brandeburg void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl)
1968b8b47723SJesse Brandeburg {
1969b8b47723SJesse Brandeburg 	struct ice_hw *hw = &q_vector->vsi->back->hw;
1970b8b47723SJesse Brandeburg 
1971b8b47723SJesse Brandeburg 	wr32(hw, GLINT_RATE(q_vector->reg_idx),
1972b8b47723SJesse Brandeburg 	     ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25));
1973b8b47723SJesse Brandeburg }
1974b8b47723SJesse Brandeburg 
1975e72bba21SMaciej Fijalkowski static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc)
1976e72bba21SMaciej Fijalkowski {
1977e72bba21SMaciej Fijalkowski 	switch (rc->type) {
1978e72bba21SMaciej Fijalkowski 	case ICE_RX_CONTAINER:
1979e72bba21SMaciej Fijalkowski 		if (rc->rx_ring)
1980e72bba21SMaciej Fijalkowski 			return rc->rx_ring->q_vector;
1981e72bba21SMaciej Fijalkowski 		break;
1982e72bba21SMaciej Fijalkowski 	case ICE_TX_CONTAINER:
1983e72bba21SMaciej Fijalkowski 		if (rc->tx_ring)
1984e72bba21SMaciej Fijalkowski 			return rc->tx_ring->q_vector;
1985370764e6SNathan Chancellor 		break;
1986e72bba21SMaciej Fijalkowski 	default:
1987e72bba21SMaciej Fijalkowski 		break;
1988e72bba21SMaciej Fijalkowski 	}
1989e72bba21SMaciej Fijalkowski 
1990e72bba21SMaciej Fijalkowski 	return NULL;
1991e72bba21SMaciej Fijalkowski }
1992e72bba21SMaciej Fijalkowski 
1993b8b47723SJesse Brandeburg /**
1994b8b47723SJesse Brandeburg  * __ice_write_itr - write throttle rate to register
1995b8b47723SJesse Brandeburg  * @q_vector: pointer to interrupt data structure
1996b8b47723SJesse Brandeburg  * @rc: pointer to ring container
1997b8b47723SJesse Brandeburg  * @itr: throttle rate in microseconds to write
1998b8b47723SJesse Brandeburg  */
1999b8b47723SJesse Brandeburg static void __ice_write_itr(struct ice_q_vector *q_vector,
2000b8b47723SJesse Brandeburg 			    struct ice_ring_container *rc, u16 itr)
2001b8b47723SJesse Brandeburg {
2002b8b47723SJesse Brandeburg 	struct ice_hw *hw = &q_vector->vsi->back->hw;
2003b8b47723SJesse Brandeburg 
2004b8b47723SJesse Brandeburg 	wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
2005b8b47723SJesse Brandeburg 	     ITR_REG_ALIGN(itr) >> ICE_ITR_GRAN_S);
2006b8b47723SJesse Brandeburg }
2007b8b47723SJesse Brandeburg 
2008b8b47723SJesse Brandeburg /**
2009b8b47723SJesse Brandeburg  * ice_write_itr - write throttle rate to queue specific register
2010b8b47723SJesse Brandeburg  * @rc: pointer to ring container
2011b8b47723SJesse Brandeburg  * @itr: throttle rate in microseconds to write
2012b8b47723SJesse Brandeburg  */
2013b8b47723SJesse Brandeburg void ice_write_itr(struct ice_ring_container *rc, u16 itr)
2014b8b47723SJesse Brandeburg {
2015b8b47723SJesse Brandeburg 	struct ice_q_vector *q_vector;
2016b8b47723SJesse Brandeburg 
2017e72bba21SMaciej Fijalkowski 	q_vector = ice_pull_qvec_from_rc(rc);
2018e72bba21SMaciej Fijalkowski 	if (!q_vector)
2019b8b47723SJesse Brandeburg 		return;
2020b8b47723SJesse Brandeburg 
2021b8b47723SJesse Brandeburg 	__ice_write_itr(q_vector, rc, itr);
2022b8b47723SJesse Brandeburg }
2023b8b47723SJesse Brandeburg 
2024b8b47723SJesse Brandeburg /**
2025d8eb7ad5SJesse Brandeburg  * ice_set_q_vector_intrl - set up interrupt rate limiting
2026d8eb7ad5SJesse Brandeburg  * @q_vector: the vector to be configured
2027d8eb7ad5SJesse Brandeburg  *
2028d8eb7ad5SJesse Brandeburg  * Interrupt rate limiting is local to the vector, not per-queue so we must
2029d8eb7ad5SJesse Brandeburg  * detect if either ring container has dynamic moderation enabled to decide
2030d8eb7ad5SJesse Brandeburg  * what to set the interrupt rate limit to via INTRL settings. In the case that
2031d8eb7ad5SJesse Brandeburg  * dynamic moderation is disabled on both, write the value with the cached
2032d8eb7ad5SJesse Brandeburg  * setting to make sure INTRL register matches the user visible value.
2033d8eb7ad5SJesse Brandeburg  */
2034d8eb7ad5SJesse Brandeburg void ice_set_q_vector_intrl(struct ice_q_vector *q_vector)
2035d8eb7ad5SJesse Brandeburg {
2036d8eb7ad5SJesse Brandeburg 	if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) {
2037d8eb7ad5SJesse Brandeburg 		/* in the case of dynamic enabled, cap each vector to no more
2038d8eb7ad5SJesse Brandeburg 		 * than (4 us) 250,000 ints/sec, which allows low latency
2039d8eb7ad5SJesse Brandeburg 		 * but still less than 500,000 interrupts per second, which
2040d8eb7ad5SJesse Brandeburg 		 * reduces CPU a bit in the case of the lowest latency
2041d8eb7ad5SJesse Brandeburg 		 * setting. The 4 here is a value in microseconds.
2042d8eb7ad5SJesse Brandeburg 		 */
2043d8eb7ad5SJesse Brandeburg 		ice_write_intrl(q_vector, 4);
2044d8eb7ad5SJesse Brandeburg 	} else {
2045d8eb7ad5SJesse Brandeburg 		ice_write_intrl(q_vector, q_vector->intrl);
2046d8eb7ad5SJesse Brandeburg 	}
2047d8eb7ad5SJesse Brandeburg }
2048d8eb7ad5SJesse Brandeburg 
2049d8eb7ad5SJesse Brandeburg /**
205072adf242SAnirudh Venkataramanan  * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
205172adf242SAnirudh Venkataramanan  * @vsi: the VSI being configured
2052047e52c0SAnirudh Venkataramanan  *
2053047e52c0SAnirudh Venkataramanan  * This configures MSIX mode interrupts for the PF VSI, and should not be used
2054047e52c0SAnirudh Venkataramanan  * for the VF VSI.
205572adf242SAnirudh Venkataramanan  */
205672adf242SAnirudh Venkataramanan void ice_vsi_cfg_msix(struct ice_vsi *vsi)
205772adf242SAnirudh Venkataramanan {
205872adf242SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
205972adf242SAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
206088865fc4SKarol Kolacinski 	u16 txq = 0, rxq = 0;
2061d2b464a7SBrett Creeley 	int i, q;
206272adf242SAnirudh Venkataramanan 
20632faf63b6SMaciej Fijalkowski 	ice_for_each_q_vector(vsi, i) {
206472adf242SAnirudh Venkataramanan 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
2065b07833a0SBrett Creeley 		u16 reg_idx = q_vector->reg_idx;
206672adf242SAnirudh Venkataramanan 
2067b07833a0SBrett Creeley 		ice_cfg_itr(hw, q_vector);
20689e4ab4c2SBrett Creeley 
206972adf242SAnirudh Venkataramanan 		/* Both Transmit Queue Interrupt Cause Control register
207072adf242SAnirudh Venkataramanan 		 * and Receive Queue Interrupt Cause control register
207172adf242SAnirudh Venkataramanan 		 * expects MSIX_INDX field to be the vector index
207272adf242SAnirudh Venkataramanan 		 * within the function space and not the absolute
207372adf242SAnirudh Venkataramanan 		 * vector index across PF or across device.
207472adf242SAnirudh Venkataramanan 		 * For SR-IOV VF VSIs queue vector index always starts
207572adf242SAnirudh Venkataramanan 		 * with 1 since first vector index(0) is used for OICR
207672adf242SAnirudh Venkataramanan 		 * in VF space. Since VMDq and other PF VSIs are within
207772adf242SAnirudh Venkataramanan 		 * the PF function space, use the vector index that is
207872adf242SAnirudh Venkataramanan 		 * tracked for this PF.
207972adf242SAnirudh Venkataramanan 		 */
208072adf242SAnirudh Venkataramanan 		for (q = 0; q < q_vector->num_ring_tx; q++) {
2081047e52c0SAnirudh Venkataramanan 			ice_cfg_txq_interrupt(vsi, txq, reg_idx,
2082047e52c0SAnirudh Venkataramanan 					      q_vector->tx.itr_idx);
208372adf242SAnirudh Venkataramanan 			txq++;
208472adf242SAnirudh Venkataramanan 		}
208572adf242SAnirudh Venkataramanan 
208672adf242SAnirudh Venkataramanan 		for (q = 0; q < q_vector->num_ring_rx; q++) {
2087047e52c0SAnirudh Venkataramanan 			ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
2088047e52c0SAnirudh Venkataramanan 					      q_vector->rx.itr_idx);
208972adf242SAnirudh Venkataramanan 			rxq++;
209072adf242SAnirudh Venkataramanan 		}
209172adf242SAnirudh Venkataramanan 	}
209272adf242SAnirudh Venkataramanan }
209372adf242SAnirudh Venkataramanan 
209472adf242SAnirudh Venkataramanan /**
209513a6233bSBrett Creeley  * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
209613a6233bSBrett Creeley  * @vsi: the VSI whose rings are to be enabled
209772adf242SAnirudh Venkataramanan  *
209872adf242SAnirudh Venkataramanan  * Returns 0 on success and a negative value on error
209972adf242SAnirudh Venkataramanan  */
210013a6233bSBrett Creeley int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi)
210172adf242SAnirudh Venkataramanan {
210213a6233bSBrett Creeley 	return ice_vsi_ctrl_all_rx_rings(vsi, true);
210372adf242SAnirudh Venkataramanan }
210472adf242SAnirudh Venkataramanan 
210572adf242SAnirudh Venkataramanan /**
210613a6233bSBrett Creeley  * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
210713a6233bSBrett Creeley  * @vsi: the VSI whose rings are to be disabled
210872adf242SAnirudh Venkataramanan  *
210972adf242SAnirudh Venkataramanan  * Returns 0 on success and a negative value on error
211072adf242SAnirudh Venkataramanan  */
211113a6233bSBrett Creeley int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
211272adf242SAnirudh Venkataramanan {
211313a6233bSBrett Creeley 	return ice_vsi_ctrl_all_rx_rings(vsi, false);
211472adf242SAnirudh Venkataramanan }
211572adf242SAnirudh Venkataramanan 
211672adf242SAnirudh Venkataramanan /**
2117d02f734cSMaciej Fijalkowski  * ice_vsi_stop_tx_rings - Disable Tx rings
2118d02f734cSMaciej Fijalkowski  * @vsi: the VSI being configured
2119d02f734cSMaciej Fijalkowski  * @rst_src: reset source
2120d02f734cSMaciej Fijalkowski  * @rel_vmvf_num: Relative ID of VF/VM
2121d02f734cSMaciej Fijalkowski  * @rings: Tx ring array to be stopped
21222e84f6b3SMaciej Fijalkowski  * @count: number of Tx ring array elements
2123d02f734cSMaciej Fijalkowski  */
2124d02f734cSMaciej Fijalkowski static int
2125d02f734cSMaciej Fijalkowski ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2126e72bba21SMaciej Fijalkowski 		      u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count)
2127d02f734cSMaciej Fijalkowski {
2128e75d1b2cSMaciej Fijalkowski 	u16 q_idx;
2129d02f734cSMaciej Fijalkowski 
2130d02f734cSMaciej Fijalkowski 	if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
2131d02f734cSMaciej Fijalkowski 		return -EINVAL;
2132d02f734cSMaciej Fijalkowski 
21332e84f6b3SMaciej Fijalkowski 	for (q_idx = 0; q_idx < count; q_idx++) {
2134d02f734cSMaciej Fijalkowski 		struct ice_txq_meta txq_meta = { };
2135e75d1b2cSMaciej Fijalkowski 		int status;
2136d02f734cSMaciej Fijalkowski 
2137d02f734cSMaciej Fijalkowski 		if (!rings || !rings[q_idx])
2138d02f734cSMaciej Fijalkowski 			return -EINVAL;
2139d02f734cSMaciej Fijalkowski 
2140d02f734cSMaciej Fijalkowski 		ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
2141e75d1b2cSMaciej Fijalkowski 		status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
2142d02f734cSMaciej Fijalkowski 					      rings[q_idx], &txq_meta);
2143d02f734cSMaciej Fijalkowski 
2144d02f734cSMaciej Fijalkowski 		if (status)
2145d02f734cSMaciej Fijalkowski 			return status;
2146bb87ee0eSAnirudh Venkataramanan 	}
214772adf242SAnirudh Venkataramanan 
2148d02f734cSMaciej Fijalkowski 	return 0;
214972adf242SAnirudh Venkataramanan }
21505153a18eSAnirudh Venkataramanan 
21515153a18eSAnirudh Venkataramanan /**
215203f7a986SAnirudh Venkataramanan  * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
215303f7a986SAnirudh Venkataramanan  * @vsi: the VSI being configured
215403f7a986SAnirudh Venkataramanan  * @rst_src: reset source
2155f9867df6SAnirudh Venkataramanan  * @rel_vmvf_num: Relative ID of VF/VM
215603f7a986SAnirudh Venkataramanan  */
2157c8b7abddSBruce Allan int
2158c8b7abddSBruce Allan ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2159c8b7abddSBruce Allan 			  u16 rel_vmvf_num)
216003f7a986SAnirudh Venkataramanan {
21612e84f6b3SMaciej Fijalkowski 	return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
216203f7a986SAnirudh Venkataramanan }
216303f7a986SAnirudh Venkataramanan 
216403f7a986SAnirudh Venkataramanan /**
2165efc2214bSMaciej Fijalkowski  * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
2166efc2214bSMaciej Fijalkowski  * @vsi: the VSI being configured
2167efc2214bSMaciej Fijalkowski  */
2168efc2214bSMaciej Fijalkowski int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
2169efc2214bSMaciej Fijalkowski {
21702e84f6b3SMaciej Fijalkowski 	return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
2171efc2214bSMaciej Fijalkowski }
2172efc2214bSMaciej Fijalkowski 
2173efc2214bSMaciej Fijalkowski /**
2174f23df522SNorbert Zulinski  * ice_vsi_is_rx_queue_active
2175f23df522SNorbert Zulinski  * @vsi: the VSI being configured
2176f23df522SNorbert Zulinski  *
2177f23df522SNorbert Zulinski  * Return true if at least one queue is active.
2178f23df522SNorbert Zulinski  */
2179f23df522SNorbert Zulinski bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi)
2180f23df522SNorbert Zulinski {
2181f23df522SNorbert Zulinski 	struct ice_pf *pf = vsi->back;
2182f23df522SNorbert Zulinski 	struct ice_hw *hw = &pf->hw;
2183f23df522SNorbert Zulinski 	int i;
2184f23df522SNorbert Zulinski 
2185f23df522SNorbert Zulinski 	ice_for_each_rxq(vsi, i) {
2186f23df522SNorbert Zulinski 		u32 rx_reg;
2187f23df522SNorbert Zulinski 		int pf_q;
2188f23df522SNorbert Zulinski 
2189f23df522SNorbert Zulinski 		pf_q = vsi->rxq_map[i];
2190f23df522SNorbert Zulinski 		rx_reg = rd32(hw, QRX_CTRL(pf_q));
2191f23df522SNorbert Zulinski 		if (rx_reg & QRX_CTRL_QENA_STAT_M)
2192f23df522SNorbert Zulinski 			return true;
2193f23df522SNorbert Zulinski 	}
2194f23df522SNorbert Zulinski 
2195f23df522SNorbert Zulinski 	return false;
2196f23df522SNorbert Zulinski }
2197f23df522SNorbert Zulinski 
21987b9ffc76SAnirudh Venkataramanan static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
21997b9ffc76SAnirudh Venkataramanan {
22000754d65bSKiran Patil 	if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
22010754d65bSKiran Patil 		vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
22020754d65bSKiran Patil 		vsi->tc_cfg.numtc = 1;
22030754d65bSKiran Patil 		return;
22040754d65bSKiran Patil 	}
22057b9ffc76SAnirudh Venkataramanan 
22060754d65bSKiran Patil 	/* set VSI TC information based on DCB config */
22070754d65bSKiran Patil 	ice_vsi_set_dcb_tc_cfg(vsi);
22087b9ffc76SAnirudh Venkataramanan }
22097b9ffc76SAnirudh Venkataramanan 
22105153a18eSAnirudh Venkataramanan /**
22112e0e6228SDave Ertman  * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
22122e0e6228SDave Ertman  * @vsi: the VSI being configured
22132e0e6228SDave Ertman  * @tx: bool to determine Tx or Rx rule
22142e0e6228SDave Ertman  * @create: bool to determine create or remove Rule
22152e0e6228SDave Ertman  */
22162e0e6228SDave Ertman void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
22172e0e6228SDave Ertman {
22185e24d598STony Nguyen 	int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
22191b8f15b6SMichal Swiatkowski 			enum ice_sw_fwd_act_type act);
22202e0e6228SDave Ertman 	struct ice_pf *pf = vsi->back;
22214015d11eSBrett Creeley 	struct device *dev;
22225518ac2aSTony Nguyen 	int status;
22232e0e6228SDave Ertman 
22244015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
22251b8f15b6SMichal Swiatkowski 	eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
22262e0e6228SDave Ertman 
222734295a36SDave Ertman 	if (tx) {
22281b8f15b6SMichal Swiatkowski 		status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
22291b8f15b6SMichal Swiatkowski 				  ICE_DROP_PACKET);
223034295a36SDave Ertman 	} else {
223134295a36SDave Ertman 		if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) {
223234295a36SDave Ertman 			status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num,
223334295a36SDave Ertman 							  create);
223434295a36SDave Ertman 		} else {
223534295a36SDave Ertman 			status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
223634295a36SDave Ertman 					  ICE_FWD_TO_VSI);
223734295a36SDave Ertman 		}
223834295a36SDave Ertman 	}
22392e0e6228SDave Ertman 
22402e0e6228SDave Ertman 	if (status)
22415f87ec48STony Nguyen 		dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
22422e0e6228SDave Ertman 			create ? "adding" : "removing", tx ? "TX" : "RX",
22435f87ec48STony Nguyen 			vsi->vsi_num, status);
22442e0e6228SDave Ertman }
22452e0e6228SDave Ertman 
2246d95276ceSAkeem G Abodunrin /**
2247b126bd6bSKiran Patil  * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it
2248b126bd6bSKiran Patil  * @vsi: pointer to the VSI
2249b126bd6bSKiran Patil  *
2250b126bd6bSKiran Patil  * This function will allocate new scheduler aggregator now if needed and will
2251b126bd6bSKiran Patil  * move specified VSI into it.
2252b126bd6bSKiran Patil  */
2253b126bd6bSKiran Patil static void ice_set_agg_vsi(struct ice_vsi *vsi)
2254b126bd6bSKiran Patil {
2255b126bd6bSKiran Patil 	struct device *dev = ice_pf_to_dev(vsi->back);
2256b126bd6bSKiran Patil 	struct ice_agg_node *agg_node_iter = NULL;
2257b126bd6bSKiran Patil 	u32 agg_id = ICE_INVALID_AGG_NODE_ID;
2258b126bd6bSKiran Patil 	struct ice_agg_node *agg_node = NULL;
2259b126bd6bSKiran Patil 	int node_offset, max_agg_nodes = 0;
2260b126bd6bSKiran Patil 	struct ice_port_info *port_info;
2261b126bd6bSKiran Patil 	struct ice_pf *pf = vsi->back;
2262b126bd6bSKiran Patil 	u32 agg_node_id_start = 0;
22635e24d598STony Nguyen 	int status;
2264b126bd6bSKiran Patil 
2265b126bd6bSKiran Patil 	/* create (as needed) scheduler aggregator node and move VSI into
2266b126bd6bSKiran Patil 	 * corresponding aggregator node
2267b126bd6bSKiran Patil 	 * - PF aggregator node to contains VSIs of type _PF and _CTRL
2268b126bd6bSKiran Patil 	 * - VF aggregator nodes will contain VF VSI
2269b126bd6bSKiran Patil 	 */
2270b126bd6bSKiran Patil 	port_info = pf->hw.port_info;
2271b126bd6bSKiran Patil 	if (!port_info)
2272b126bd6bSKiran Patil 		return;
2273b126bd6bSKiran Patil 
2274b126bd6bSKiran Patil 	switch (vsi->type) {
2275b126bd6bSKiran Patil 	case ICE_VSI_CTRL:
22760754d65bSKiran Patil 	case ICE_VSI_CHNL:
2277b126bd6bSKiran Patil 	case ICE_VSI_LB:
2278b126bd6bSKiran Patil 	case ICE_VSI_PF:
2279f66756e0SGrzegorz Nitka 	case ICE_VSI_SWITCHDEV_CTRL:
2280b126bd6bSKiran Patil 		max_agg_nodes = ICE_MAX_PF_AGG_NODES;
2281b126bd6bSKiran Patil 		agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
2282b126bd6bSKiran Patil 		agg_node_iter = &pf->pf_agg_node[0];
2283b126bd6bSKiran Patil 		break;
2284b126bd6bSKiran Patil 	case ICE_VSI_VF:
2285b126bd6bSKiran Patil 		/* user can create 'n' VFs on a given PF, but since max children
2286b126bd6bSKiran Patil 		 * per aggregator node can be only 64. Following code handles
2287b126bd6bSKiran Patil 		 * aggregator(s) for VF VSIs, either selects a agg_node which
2288b126bd6bSKiran Patil 		 * was already created provided num_vsis < 64, otherwise
2289b126bd6bSKiran Patil 		 * select next available node, which will be created
2290b126bd6bSKiran Patil 		 */
2291b126bd6bSKiran Patil 		max_agg_nodes = ICE_MAX_VF_AGG_NODES;
2292b126bd6bSKiran Patil 		agg_node_id_start = ICE_VF_AGG_NODE_ID_START;
2293b126bd6bSKiran Patil 		agg_node_iter = &pf->vf_agg_node[0];
2294b126bd6bSKiran Patil 		break;
2295b126bd6bSKiran Patil 	default:
2296b126bd6bSKiran Patil 		/* other VSI type, handle later if needed */
2297b126bd6bSKiran Patil 		dev_dbg(dev, "unexpected VSI type %s\n",
2298b126bd6bSKiran Patil 			ice_vsi_type_str(vsi->type));
2299b126bd6bSKiran Patil 		return;
2300b126bd6bSKiran Patil 	}
2301b126bd6bSKiran Patil 
2302b126bd6bSKiran Patil 	/* find the appropriate aggregator node */
2303b126bd6bSKiran Patil 	for (node_offset = 0; node_offset < max_agg_nodes; node_offset++) {
2304b126bd6bSKiran Patil 		/* see if we can find space in previously created
2305b126bd6bSKiran Patil 		 * node if num_vsis < 64, otherwise skip
2306b126bd6bSKiran Patil 		 */
2307b126bd6bSKiran Patil 		if (agg_node_iter->num_vsis &&
2308b126bd6bSKiran Patil 		    agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
2309b126bd6bSKiran Patil 			agg_node_iter++;
2310b126bd6bSKiran Patil 			continue;
2311b126bd6bSKiran Patil 		}
2312b126bd6bSKiran Patil 
2313b126bd6bSKiran Patil 		if (agg_node_iter->valid &&
2314b126bd6bSKiran Patil 		    agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) {
2315b126bd6bSKiran Patil 			agg_id = agg_node_iter->agg_id;
2316b126bd6bSKiran Patil 			agg_node = agg_node_iter;
2317b126bd6bSKiran Patil 			break;
2318b126bd6bSKiran Patil 		}
2319b126bd6bSKiran Patil 
2320b126bd6bSKiran Patil 		/* find unclaimed agg_id */
2321b126bd6bSKiran Patil 		if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) {
2322b126bd6bSKiran Patil 			agg_id = node_offset + agg_node_id_start;
2323b126bd6bSKiran Patil 			agg_node = agg_node_iter;
2324b126bd6bSKiran Patil 			break;
2325b126bd6bSKiran Patil 		}
2326b126bd6bSKiran Patil 		/* move to next agg_node */
2327b126bd6bSKiran Patil 		agg_node_iter++;
2328b126bd6bSKiran Patil 	}
2329b126bd6bSKiran Patil 
2330b126bd6bSKiran Patil 	if (!agg_node)
2331b126bd6bSKiran Patil 		return;
2332b126bd6bSKiran Patil 
2333b126bd6bSKiran Patil 	/* if selected aggregator node was not created, create it */
2334b126bd6bSKiran Patil 	if (!agg_node->valid) {
2335b126bd6bSKiran Patil 		status = ice_cfg_agg(port_info, agg_id, ICE_AGG_TYPE_AGG,
2336b126bd6bSKiran Patil 				     (u8)vsi->tc_cfg.ena_tc);
2337b126bd6bSKiran Patil 		if (status) {
2338b126bd6bSKiran Patil 			dev_err(dev, "unable to create aggregator node with agg_id %u\n",
2339b126bd6bSKiran Patil 				agg_id);
2340b126bd6bSKiran Patil 			return;
2341b126bd6bSKiran Patil 		}
2342138f9f50SJulia Lawall 		/* aggregator node is created, store the needed info */
2343b126bd6bSKiran Patil 		agg_node->valid = true;
2344b126bd6bSKiran Patil 		agg_node->agg_id = agg_id;
2345b126bd6bSKiran Patil 	}
2346b126bd6bSKiran Patil 
2347b126bd6bSKiran Patil 	/* move VSI to corresponding aggregator node */
2348b126bd6bSKiran Patil 	status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx,
2349b126bd6bSKiran Patil 				     (u8)vsi->tc_cfg.ena_tc);
2350b126bd6bSKiran Patil 	if (status) {
2351b126bd6bSKiran Patil 		dev_err(dev, "unable to move VSI idx %u into aggregator %u node",
2352b126bd6bSKiran Patil 			vsi->idx, agg_id);
2353b126bd6bSKiran Patil 		return;
2354b126bd6bSKiran Patil 	}
2355b126bd6bSKiran Patil 
2356b126bd6bSKiran Patil 	/* keep active children count for aggregator node */
2357b126bd6bSKiran Patil 	agg_node->num_vsis++;
2358b126bd6bSKiran Patil 
2359b126bd6bSKiran Patil 	/* cache the 'agg_id' in VSI, so that after reset - VSI will be moved
2360b126bd6bSKiran Patil 	 * to aggregator node
2361b126bd6bSKiran Patil 	 */
2362b126bd6bSKiran Patil 	vsi->agg_node = agg_node;
2363b126bd6bSKiran Patil 	dev_dbg(dev, "successfully moved VSI idx %u tc_bitmap 0x%x) into aggregator node %d which has num_vsis %u\n",
2364b126bd6bSKiran Patil 		vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id,
2365b126bd6bSKiran Patil 		vsi->agg_node->num_vsis);
2366b126bd6bSKiran Patil }
2367b126bd6bSKiran Patil 
23686624e780SMichal Swiatkowski static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
236937bb8390SAnirudh Venkataramanan {
237037bb8390SAnirudh Venkataramanan 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
23714015d11eSBrett Creeley 	struct device *dev = ice_pf_to_dev(pf);
237237bb8390SAnirudh Venkataramanan 	int ret, i;
237337bb8390SAnirudh Venkataramanan 
23746624e780SMichal Swiatkowski 	/* configure VSI nodes based on number of queues and TC's */
23756624e780SMichal Swiatkowski 	ice_for_each_traffic_class(i) {
23766624e780SMichal Swiatkowski 		if (!(vsi->tc_cfg.ena_tc & BIT(i)))
23776624e780SMichal Swiatkowski 			continue;
23785743020dSAkeem G Abodunrin 
23796624e780SMichal Swiatkowski 		if (vsi->type == ICE_VSI_CHNL) {
23806624e780SMichal Swiatkowski 			if (!vsi->alloc_txq && vsi->num_txq)
23816624e780SMichal Swiatkowski 				max_txqs[i] = vsi->num_txq;
23826624e780SMichal Swiatkowski 			else
23836624e780SMichal Swiatkowski 				max_txqs[i] = pf->num_lan_tx;
23846624e780SMichal Swiatkowski 		} else {
23856624e780SMichal Swiatkowski 			max_txqs[i] = vsi->alloc_txq;
23866624e780SMichal Swiatkowski 		}
238779733dfcSLarysa Zaremba 
238879733dfcSLarysa Zaremba 		if (vsi->type == ICE_VSI_PF)
238979733dfcSLarysa Zaremba 			max_txqs[i] += vsi->num_xdp_txq;
239037bb8390SAnirudh Venkataramanan 	}
239137bb8390SAnirudh Venkataramanan 
23926624e780SMichal Swiatkowski 	dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
23936624e780SMichal Swiatkowski 	ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
23946624e780SMichal Swiatkowski 			      max_txqs);
23956624e780SMichal Swiatkowski 	if (ret) {
23966624e780SMichal Swiatkowski 		dev_err(dev, "VSI %d failed lan queue config, error %d\n",
23976624e780SMichal Swiatkowski 			vsi->vsi_num, ret);
23986624e780SMichal Swiatkowski 		return ret;
23996624e780SMichal Swiatkowski 	}
24006624e780SMichal Swiatkowski 
24016624e780SMichal Swiatkowski 	return 0;
24026624e780SMichal Swiatkowski }
24036624e780SMichal Swiatkowski 
24046624e780SMichal Swiatkowski /**
24056624e780SMichal Swiatkowski  * ice_vsi_cfg_def - configure default VSI based on the type
24066624e780SMichal Swiatkowski  * @vsi: pointer to VSI
24075e509ab2SJacob Keller  * @params: the parameters to configure this VSI with
24086624e780SMichal Swiatkowski  */
24096624e780SMichal Swiatkowski static int
24105e509ab2SJacob Keller ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
24116624e780SMichal Swiatkowski {
24126624e780SMichal Swiatkowski 	struct device *dev = ice_pf_to_dev(vsi->back);
24136624e780SMichal Swiatkowski 	struct ice_pf *pf = vsi->back;
24146624e780SMichal Swiatkowski 	int ret;
24156624e780SMichal Swiatkowski 
241637bb8390SAnirudh Venkataramanan 	vsi->vsw = pf->first_sw;
2417d95276ceSAkeem G Abodunrin 
24185e509ab2SJacob Keller 	ret = ice_vsi_alloc_def(vsi, params->ch);
24196624e780SMichal Swiatkowski 	if (ret)
24206624e780SMichal Swiatkowski 		return ret;
24216624e780SMichal Swiatkowski 
24226624e780SMichal Swiatkowski 	/* allocate memory for Tx/Rx ring stat pointers */
2423c4a9c8e7SMichal Swiatkowski 	ret = ice_vsi_alloc_stat_arrays(vsi);
2424c4a9c8e7SMichal Swiatkowski 	if (ret)
24256624e780SMichal Swiatkowski 		goto unroll_vsi_alloc;
24266624e780SMichal Swiatkowski 
2427148beb61SHenry Tieman 	ice_alloc_fd_res(vsi);
2428148beb61SHenry Tieman 
2429c4a9c8e7SMichal Swiatkowski 	ret = ice_vsi_get_qs(vsi);
2430c4a9c8e7SMichal Swiatkowski 	if (ret) {
243137bb8390SAnirudh Venkataramanan 		dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
243237bb8390SAnirudh Venkataramanan 			vsi->idx);
24336624e780SMichal Swiatkowski 		goto unroll_vsi_alloc_stat;
243437bb8390SAnirudh Venkataramanan 	}
243537bb8390SAnirudh Venkataramanan 
243637bb8390SAnirudh Venkataramanan 	/* set RSS capabilities */
243737bb8390SAnirudh Venkataramanan 	ice_vsi_set_rss_params(vsi);
243837bb8390SAnirudh Venkataramanan 
2439f9867df6SAnirudh Venkataramanan 	/* set TC configuration */
2440c5a2a4a3SUsha Ketineni 	ice_vsi_set_tc_cfg(vsi);
2441c5a2a4a3SUsha Ketineni 
244237bb8390SAnirudh Venkataramanan 	/* create the VSI */
24435e509ab2SJacob Keller 	ret = ice_vsi_init(vsi, params->flags);
244437bb8390SAnirudh Venkataramanan 	if (ret)
244537bb8390SAnirudh Venkataramanan 		goto unroll_get_qs;
244637bb8390SAnirudh Venkataramanan 
2447bc42afa9SBrett Creeley 	ice_vsi_init_vlan_ops(vsi);
2448bc42afa9SBrett Creeley 
244937bb8390SAnirudh Venkataramanan 	switch (vsi->type) {
2450148beb61SHenry Tieman 	case ICE_VSI_CTRL:
2451f66756e0SGrzegorz Nitka 	case ICE_VSI_SWITCHDEV_CTRL:
245237bb8390SAnirudh Venkataramanan 	case ICE_VSI_PF:
245337bb8390SAnirudh Venkataramanan 		ret = ice_vsi_alloc_q_vectors(vsi);
245437bb8390SAnirudh Venkataramanan 		if (ret)
245537bb8390SAnirudh Venkataramanan 			goto unroll_vsi_init;
245637bb8390SAnirudh Venkataramanan 
245737bb8390SAnirudh Venkataramanan 		ret = ice_vsi_alloc_rings(vsi);
245837bb8390SAnirudh Venkataramanan 		if (ret)
245937bb8390SAnirudh Venkataramanan 			goto unroll_vector_base;
246037bb8390SAnirudh Venkataramanan 
2461288ecf49SBenjamin Mikailenko 		ret = ice_vsi_alloc_ring_stats(vsi);
2462288ecf49SBenjamin Mikailenko 		if (ret)
2463288ecf49SBenjamin Mikailenko 			goto unroll_vector_base;
2464288ecf49SBenjamin Mikailenko 
246537bb8390SAnirudh Venkataramanan 		ice_vsi_map_rings_to_vectors(vsi);
2466ab7470bcSAhmed Zaki 		vsi->stat_offsets_loaded = false;
2467ab7470bcSAhmed Zaki 
24686624e780SMichal Swiatkowski 		if (ice_is_xdp_ena_vsi(vsi)) {
24696624e780SMichal Swiatkowski 			ret = ice_vsi_determine_xdp_res(vsi);
24706624e780SMichal Swiatkowski 			if (ret)
24716624e780SMichal Swiatkowski 				goto unroll_vector_base;
24726624e780SMichal Swiatkowski 			ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
24736624e780SMichal Swiatkowski 			if (ret)
24746624e780SMichal Swiatkowski 				goto unroll_vector_base;
24756624e780SMichal Swiatkowski 		}
247637bb8390SAnirudh Venkataramanan 
2477148beb61SHenry Tieman 		/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
2478148beb61SHenry Tieman 		if (vsi->type != ICE_VSI_CTRL)
2479148beb61SHenry Tieman 			/* Do not exit if configuring RSS had an issue, at
2480148beb61SHenry Tieman 			 * least receive traffic on first queue. Hence no
2481148beb61SHenry Tieman 			 * need to capture return value
248237bb8390SAnirudh Venkataramanan 			 */
2483c90ed40cSTony Nguyen 			if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
248437bb8390SAnirudh Venkataramanan 				ice_vsi_cfg_rss_lut_key(vsi);
2485c90ed40cSTony Nguyen 				ice_vsi_set_rss_flow_fld(vsi);
2486c90ed40cSTony Nguyen 			}
248728bf2672SBrett Creeley 		ice_init_arfs(vsi);
248837bb8390SAnirudh Venkataramanan 		break;
24890754d65bSKiran Patil 	case ICE_VSI_CHNL:
24900754d65bSKiran Patil 		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
24910754d65bSKiran Patil 			ice_vsi_cfg_rss_lut_key(vsi);
24920754d65bSKiran Patil 			ice_vsi_set_rss_flow_fld(vsi);
24930754d65bSKiran Patil 		}
24940754d65bSKiran Patil 		break;
24958ede0178SAnirudh Venkataramanan 	case ICE_VSI_VF:
24968ede0178SAnirudh Venkataramanan 		/* VF driver will take care of creating netdev for this type and
24978ede0178SAnirudh Venkataramanan 		 * map queues to vectors through Virtchnl, PF driver only
24988ede0178SAnirudh Venkataramanan 		 * creates a VSI and corresponding structures for bookkeeping
24998ede0178SAnirudh Venkataramanan 		 * purpose
25008ede0178SAnirudh Venkataramanan 		 */
25018ede0178SAnirudh Venkataramanan 		ret = ice_vsi_alloc_q_vectors(vsi);
25028ede0178SAnirudh Venkataramanan 		if (ret)
25038ede0178SAnirudh Venkataramanan 			goto unroll_vsi_init;
25048ede0178SAnirudh Venkataramanan 
25058ede0178SAnirudh Venkataramanan 		ret = ice_vsi_alloc_rings(vsi);
25068ede0178SAnirudh Venkataramanan 		if (ret)
25078ede0178SAnirudh Venkataramanan 			goto unroll_alloc_q_vector;
25088ede0178SAnirudh Venkataramanan 
2509288ecf49SBenjamin Mikailenko 		ret = ice_vsi_alloc_ring_stats(vsi);
2510288ecf49SBenjamin Mikailenko 		if (ret)
2511288ecf49SBenjamin Mikailenko 			goto unroll_vector_base;
2512ab7470bcSAhmed Zaki 
2513ab7470bcSAhmed Zaki 		vsi->stat_offsets_loaded = false;
2514ab7470bcSAhmed Zaki 
25153a9e32bbSMd Fahad Iqbal Polash 		/* Do not exit if configuring RSS had an issue, at least
25163a9e32bbSMd Fahad Iqbal Polash 		 * receive traffic on first queue. Hence no need to capture
25173a9e32bbSMd Fahad Iqbal Polash 		 * return value
25183a9e32bbSMd Fahad Iqbal Polash 		 */
25191c01c8c6SMd Fahad Iqbal Polash 		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
25203a9e32bbSMd Fahad Iqbal Polash 			ice_vsi_cfg_rss_lut_key(vsi);
25211c01c8c6SMd Fahad Iqbal Polash 			ice_vsi_set_vf_rss_flow_fld(vsi);
25221c01c8c6SMd Fahad Iqbal Polash 		}
25238ede0178SAnirudh Venkataramanan 		break;
25240e674aebSAnirudh Venkataramanan 	case ICE_VSI_LB:
25250e674aebSAnirudh Venkataramanan 		ret = ice_vsi_alloc_rings(vsi);
25260e674aebSAnirudh Venkataramanan 		if (ret)
25270e674aebSAnirudh Venkataramanan 			goto unroll_vsi_init;
2528288ecf49SBenjamin Mikailenko 
2529288ecf49SBenjamin Mikailenko 		ret = ice_vsi_alloc_ring_stats(vsi);
2530288ecf49SBenjamin Mikailenko 		if (ret)
2531288ecf49SBenjamin Mikailenko 			goto unroll_vector_base;
2532288ecf49SBenjamin Mikailenko 
25330e674aebSAnirudh Venkataramanan 		break;
253437bb8390SAnirudh Venkataramanan 	default:
2535df17b7e0SAnirudh Venkataramanan 		/* clean up the resources and exit */
2536c4a9c8e7SMichal Swiatkowski 		ret = -EINVAL;
253737bb8390SAnirudh Venkataramanan 		goto unroll_vsi_init;
253837bb8390SAnirudh Venkataramanan 	}
253937bb8390SAnirudh Venkataramanan 
25406624e780SMichal Swiatkowski 	return 0;
254137bb8390SAnirudh Venkataramanan 
25426624e780SMichal Swiatkowski unroll_vector_base:
25436624e780SMichal Swiatkowski 	/* reclaim SW interrupts back to the common pool */
25446624e780SMichal Swiatkowski unroll_alloc_q_vector:
25456624e780SMichal Swiatkowski 	ice_vsi_free_q_vectors(vsi);
25466624e780SMichal Swiatkowski unroll_vsi_init:
2547227bf450SMichal Swiatkowski 	ice_vsi_delete_from_hw(vsi);
25486624e780SMichal Swiatkowski unroll_get_qs:
25496624e780SMichal Swiatkowski 	ice_vsi_put_qs(vsi);
25506624e780SMichal Swiatkowski unroll_vsi_alloc_stat:
25516624e780SMichal Swiatkowski 	ice_vsi_free_stats(vsi);
25526624e780SMichal Swiatkowski unroll_vsi_alloc:
25536624e780SMichal Swiatkowski 	ice_vsi_free_arrays(vsi);
25546624e780SMichal Swiatkowski 	return ret;
25556624e780SMichal Swiatkowski }
25566624e780SMichal Swiatkowski 
25576624e780SMichal Swiatkowski /**
2558e1588197SJacob Keller  * ice_vsi_cfg - configure a previously allocated VSI
25596624e780SMichal Swiatkowski  * @vsi: pointer to VSI
25605e509ab2SJacob Keller  * @params: parameters used to configure this VSI
25616624e780SMichal Swiatkowski  */
25625e509ab2SJacob Keller int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
25636624e780SMichal Swiatkowski {
2564e1588197SJacob Keller 	struct ice_pf *pf = vsi->back;
25656624e780SMichal Swiatkowski 	int ret;
25666624e780SMichal Swiatkowski 
2567e1588197SJacob Keller 	if (WARN_ON(params->type == ICE_VSI_VF && !params->vf))
2568e1588197SJacob Keller 		return -EINVAL;
2569e1588197SJacob Keller 
2570e1588197SJacob Keller 	vsi->type = params->type;
2571e1588197SJacob Keller 	vsi->port_info = params->pi;
2572e1588197SJacob Keller 
2573e1588197SJacob Keller 	/* For VSIs which don't have a connected VF, this will be NULL */
2574e1588197SJacob Keller 	vsi->vf = params->vf;
2575e1588197SJacob Keller 
25765e509ab2SJacob Keller 	ret = ice_vsi_cfg_def(vsi, params);
25776624e780SMichal Swiatkowski 	if (ret)
25786624e780SMichal Swiatkowski 		return ret;
25796624e780SMichal Swiatkowski 
25806624e780SMichal Swiatkowski 	ret = ice_vsi_cfg_tc_lan(vsi->back, vsi);
25816624e780SMichal Swiatkowski 	if (ret)
25826624e780SMichal Swiatkowski 		ice_vsi_decfg(vsi);
25836624e780SMichal Swiatkowski 
2584e1588197SJacob Keller 	if (vsi->type == ICE_VSI_CTRL) {
2585e1588197SJacob Keller 		if (vsi->vf) {
2586e1588197SJacob Keller 			WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI);
2587e1588197SJacob Keller 			vsi->vf->ctrl_vsi_idx = vsi->idx;
2588e1588197SJacob Keller 		} else {
2589e1588197SJacob Keller 			WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI);
2590e1588197SJacob Keller 			pf->ctrl_vsi_idx = vsi->idx;
2591e1588197SJacob Keller 		}
2592e1588197SJacob Keller 	}
2593e1588197SJacob Keller 
25946624e780SMichal Swiatkowski 	return ret;
25956624e780SMichal Swiatkowski }
25966624e780SMichal Swiatkowski 
25976624e780SMichal Swiatkowski /**
25986624e780SMichal Swiatkowski  * ice_vsi_decfg - remove all VSI configuration
25996624e780SMichal Swiatkowski  * @vsi: pointer to VSI
26006624e780SMichal Swiatkowski  */
26016624e780SMichal Swiatkowski void ice_vsi_decfg(struct ice_vsi *vsi)
26026624e780SMichal Swiatkowski {
26036624e780SMichal Swiatkowski 	struct ice_pf *pf = vsi->back;
26046624e780SMichal Swiatkowski 	int err;
26056624e780SMichal Swiatkowski 
26066624e780SMichal Swiatkowski 	/* The Rx rule will only exist to remove if the LLDP FW
26076624e780SMichal Swiatkowski 	 * engine is currently stopped
26086624e780SMichal Swiatkowski 	 */
26096624e780SMichal Swiatkowski 	if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
26106624e780SMichal Swiatkowski 	    !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
26116624e780SMichal Swiatkowski 		ice_cfg_sw_lldp(vsi, false, false);
26126624e780SMichal Swiatkowski 
26136624e780SMichal Swiatkowski 	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
26146624e780SMichal Swiatkowski 	err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
26156624e780SMichal Swiatkowski 	if (err)
26166624e780SMichal Swiatkowski 		dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
26176624e780SMichal Swiatkowski 			vsi->vsi_num, err);
26186624e780SMichal Swiatkowski 
26196624e780SMichal Swiatkowski 	if (ice_is_xdp_ena_vsi(vsi))
26206624e780SMichal Swiatkowski 		/* return value check can be skipped here, it always returns
26216624e780SMichal Swiatkowski 		 * 0 if reset is in progress
26226624e780SMichal Swiatkowski 		 */
26236624e780SMichal Swiatkowski 		ice_destroy_xdp_rings(vsi);
26246624e780SMichal Swiatkowski 
26256624e780SMichal Swiatkowski 	ice_vsi_clear_rings(vsi);
26266624e780SMichal Swiatkowski 	ice_vsi_free_q_vectors(vsi);
26276624e780SMichal Swiatkowski 	ice_vsi_put_qs(vsi);
26286624e780SMichal Swiatkowski 	ice_vsi_free_arrays(vsi);
26296624e780SMichal Swiatkowski 
26306624e780SMichal Swiatkowski 	/* SR-IOV determines needed MSIX resources all at once instead of per
26316624e780SMichal Swiatkowski 	 * VSI since when VFs are spawned we know how many VFs there are and how
26326624e780SMichal Swiatkowski 	 * many interrupts each VF needs. SR-IOV MSIX resources are also
26336624e780SMichal Swiatkowski 	 * cleared in the same manner.
26346624e780SMichal Swiatkowski 	 */
26356624e780SMichal Swiatkowski 
26366624e780SMichal Swiatkowski 	if (vsi->type == ICE_VSI_VF &&
26376624e780SMichal Swiatkowski 	    vsi->agg_node && vsi->agg_node->valid)
26386624e780SMichal Swiatkowski 		vsi->agg_node->num_vsis--;
26390754d65bSKiran Patil }
26400754d65bSKiran Patil 
26416624e780SMichal Swiatkowski /**
26426624e780SMichal Swiatkowski  * ice_vsi_setup - Set up a VSI by a given type
26436624e780SMichal Swiatkowski  * @pf: board private structure
26445e509ab2SJacob Keller  * @params: parameters to use when creating the VSI
26456624e780SMichal Swiatkowski  *
26466624e780SMichal Swiatkowski  * This allocates the sw VSI structure and its queue resources.
26476624e780SMichal Swiatkowski  *
26486624e780SMichal Swiatkowski  * Returns pointer to the successfully allocated and configured VSI sw struct on
26496624e780SMichal Swiatkowski  * success, NULL on failure.
26506624e780SMichal Swiatkowski  */
26516624e780SMichal Swiatkowski struct ice_vsi *
26525e509ab2SJacob Keller ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
26536624e780SMichal Swiatkowski {
26546624e780SMichal Swiatkowski 	struct device *dev = ice_pf_to_dev(pf);
26556624e780SMichal Swiatkowski 	struct ice_vsi *vsi;
26566624e780SMichal Swiatkowski 	int ret;
26576624e780SMichal Swiatkowski 
26585e509ab2SJacob Keller 	/* ice_vsi_setup can only initialize a new VSI, and we must have
26595e509ab2SJacob Keller 	 * a port_info structure for it.
26605e509ab2SJacob Keller 	 */
26615e509ab2SJacob Keller 	if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) ||
26625e509ab2SJacob Keller 	    WARN_ON(!params->pi))
26635e509ab2SJacob Keller 		return NULL;
26645e509ab2SJacob Keller 
2665e1588197SJacob Keller 	vsi = ice_vsi_alloc(pf);
26666624e780SMichal Swiatkowski 	if (!vsi) {
26676624e780SMichal Swiatkowski 		dev_err(dev, "could not allocate VSI\n");
26686624e780SMichal Swiatkowski 		return NULL;
266937bb8390SAnirudh Venkataramanan 	}
267037bb8390SAnirudh Venkataramanan 
26715e509ab2SJacob Keller 	ret = ice_vsi_cfg(vsi, params);
26726624e780SMichal Swiatkowski 	if (ret)
26736624e780SMichal Swiatkowski 		goto err_vsi_cfg;
26746624e780SMichal Swiatkowski 
2675d95276ceSAkeem G Abodunrin 	/* Add switch rule to drop all Tx Flow Control Frames, of look up
2676d95276ceSAkeem G Abodunrin 	 * type ETHERTYPE from VSIs, and restrict malicious VF from sending
2677d95276ceSAkeem G Abodunrin 	 * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
2678d95276ceSAkeem G Abodunrin 	 * The rule is added once for PF VSI in order to create appropriate
2679d95276ceSAkeem G Abodunrin 	 * recipe, since VSI/VSI list is ignored with drop action...
2680241c8cf0SPaul Greenwalt 	 * Also add rules to handle LLDP Tx packets.  Tx LLDP packets need to
2681241c8cf0SPaul Greenwalt 	 * be dropped so that VFs cannot send LLDP packets to reconfig DCB
2682241c8cf0SPaul Greenwalt 	 * settings in the HW.
2683d95276ceSAkeem G Abodunrin 	 */
26846624e780SMichal Swiatkowski 	if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
26851b8f15b6SMichal Swiatkowski 		ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
26861b8f15b6SMichal Swiatkowski 				 ICE_DROP_PACKET);
26872e0e6228SDave Ertman 		ice_cfg_sw_lldp(vsi, true, true);
2688462acf6aSTony Nguyen 	}
26892e0e6228SDave Ertman 
2690b126bd6bSKiran Patil 	if (!vsi->agg_node)
2691b126bd6bSKiran Patil 		ice_set_agg_vsi(vsi);
26926624e780SMichal Swiatkowski 
269337bb8390SAnirudh Venkataramanan 	return vsi;
269437bb8390SAnirudh Venkataramanan 
26956624e780SMichal Swiatkowski err_vsi_cfg:
26960db66d20SMichal Swiatkowski 	ice_vsi_free(vsi);
269737bb8390SAnirudh Venkataramanan 
269837bb8390SAnirudh Venkataramanan 	return NULL;
269937bb8390SAnirudh Venkataramanan }
270037bb8390SAnirudh Venkataramanan 
270137bb8390SAnirudh Venkataramanan /**
27025153a18eSAnirudh Venkataramanan  * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
27035153a18eSAnirudh Venkataramanan  * @vsi: the VSI being cleaned up
27045153a18eSAnirudh Venkataramanan  */
27055153a18eSAnirudh Venkataramanan static void ice_vsi_release_msix(struct ice_vsi *vsi)
27065153a18eSAnirudh Venkataramanan {
27075153a18eSAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
27085153a18eSAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
27095153a18eSAnirudh Venkataramanan 	u32 txq = 0;
27105153a18eSAnirudh Venkataramanan 	u32 rxq = 0;
27115153a18eSAnirudh Venkataramanan 	int i, q;
27125153a18eSAnirudh Venkataramanan 
27132faf63b6SMaciej Fijalkowski 	ice_for_each_q_vector(vsi, i) {
27145153a18eSAnirudh Venkataramanan 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
27155153a18eSAnirudh Venkataramanan 
2716b8b47723SJesse Brandeburg 		ice_write_intrl(q_vector, 0);
27175153a18eSAnirudh Venkataramanan 		for (q = 0; q < q_vector->num_ring_tx; q++) {
2718b8b47723SJesse Brandeburg 			ice_write_itr(&q_vector->tx, 0);
27195153a18eSAnirudh Venkataramanan 			wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
2720efc2214bSMaciej Fijalkowski 			if (ice_is_xdp_ena_vsi(vsi)) {
2721efc2214bSMaciej Fijalkowski 				u32 xdp_txq = txq + vsi->num_xdp_txq;
2722efc2214bSMaciej Fijalkowski 
2723efc2214bSMaciej Fijalkowski 				wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
2724efc2214bSMaciej Fijalkowski 			}
27255153a18eSAnirudh Venkataramanan 			txq++;
27265153a18eSAnirudh Venkataramanan 		}
27275153a18eSAnirudh Venkataramanan 
27285153a18eSAnirudh Venkataramanan 		for (q = 0; q < q_vector->num_ring_rx; q++) {
2729b8b47723SJesse Brandeburg 			ice_write_itr(&q_vector->rx, 0);
27305153a18eSAnirudh Venkataramanan 			wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
27315153a18eSAnirudh Venkataramanan 			rxq++;
27325153a18eSAnirudh Venkataramanan 		}
27335153a18eSAnirudh Venkataramanan 	}
27345153a18eSAnirudh Venkataramanan 
27355153a18eSAnirudh Venkataramanan 	ice_flush(hw);
27365153a18eSAnirudh Venkataramanan }
27375153a18eSAnirudh Venkataramanan 
27385153a18eSAnirudh Venkataramanan /**
27395153a18eSAnirudh Venkataramanan  * ice_vsi_free_irq - Free the IRQ association with the OS
27405153a18eSAnirudh Venkataramanan  * @vsi: the VSI being configured
27415153a18eSAnirudh Venkataramanan  */
27425153a18eSAnirudh Venkataramanan void ice_vsi_free_irq(struct ice_vsi *vsi)
27435153a18eSAnirudh Venkataramanan {
27445153a18eSAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
27455153a18eSAnirudh Venkataramanan 	int i;
27465153a18eSAnirudh Venkataramanan 
27475153a18eSAnirudh Venkataramanan 	if (!vsi->q_vectors || !vsi->irqs_ready)
27485153a18eSAnirudh Venkataramanan 		return;
27495153a18eSAnirudh Venkataramanan 
2750eb0208ecSPreethi Banala 	ice_vsi_release_msix(vsi);
27518ede0178SAnirudh Venkataramanan 	if (vsi->type == ICE_VSI_VF)
27528ede0178SAnirudh Venkataramanan 		return;
2753eb0208ecSPreethi Banala 
27545153a18eSAnirudh Venkataramanan 	vsi->irqs_ready = false;
2755d7442f51SAlexander Lobakin 	ice_free_cpu_rx_rmap(vsi);
2756d7442f51SAlexander Lobakin 
27570c2561c8SBrett Creeley 	ice_for_each_q_vector(vsi, i) {
27585153a18eSAnirudh Venkataramanan 		int irq_num;
27595153a18eSAnirudh Venkataramanan 
27604aad5335SPiotr Raczynski 		irq_num = vsi->q_vectors[i]->irq.virq;
27615153a18eSAnirudh Venkataramanan 
27625153a18eSAnirudh Venkataramanan 		/* free only the irqs that were actually requested */
27635153a18eSAnirudh Venkataramanan 		if (!vsi->q_vectors[i] ||
27645153a18eSAnirudh Venkataramanan 		    !(vsi->q_vectors[i]->num_ring_tx ||
27655153a18eSAnirudh Venkataramanan 		      vsi->q_vectors[i]->num_ring_rx))
27665153a18eSAnirudh Venkataramanan 			continue;
27675153a18eSAnirudh Venkataramanan 
27685153a18eSAnirudh Venkataramanan 		/* clear the affinity notifier in the IRQ descriptor */
2769d7442f51SAlexander Lobakin 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
27705153a18eSAnirudh Venkataramanan 			irq_set_affinity_notifier(irq_num, NULL);
27715153a18eSAnirudh Venkataramanan 
27725153a18eSAnirudh Venkataramanan 		/* clear the affinity_mask in the IRQ descriptor */
27735153a18eSAnirudh Venkataramanan 		irq_set_affinity_hint(irq_num, NULL);
27745153a18eSAnirudh Venkataramanan 		synchronize_irq(irq_num);
27754015d11eSBrett Creeley 		devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
27765153a18eSAnirudh Venkataramanan 	}
27775153a18eSAnirudh Venkataramanan }
27785153a18eSAnirudh Venkataramanan 
27795153a18eSAnirudh Venkataramanan /**
27805153a18eSAnirudh Venkataramanan  * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
27815153a18eSAnirudh Venkataramanan  * @vsi: the VSI having resources freed
27825153a18eSAnirudh Venkataramanan  */
27835153a18eSAnirudh Venkataramanan void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
27845153a18eSAnirudh Venkataramanan {
27855153a18eSAnirudh Venkataramanan 	int i;
27865153a18eSAnirudh Venkataramanan 
27875153a18eSAnirudh Venkataramanan 	if (!vsi->tx_rings)
27885153a18eSAnirudh Venkataramanan 		return;
27895153a18eSAnirudh Venkataramanan 
27905153a18eSAnirudh Venkataramanan 	ice_for_each_txq(vsi, i)
27915153a18eSAnirudh Venkataramanan 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
27925153a18eSAnirudh Venkataramanan 			ice_free_tx_ring(vsi->tx_rings[i]);
27935153a18eSAnirudh Venkataramanan }
27945153a18eSAnirudh Venkataramanan 
27955153a18eSAnirudh Venkataramanan /**
27965153a18eSAnirudh Venkataramanan  * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
27975153a18eSAnirudh Venkataramanan  * @vsi: the VSI having resources freed
27985153a18eSAnirudh Venkataramanan  */
27995153a18eSAnirudh Venkataramanan void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
28005153a18eSAnirudh Venkataramanan {
28015153a18eSAnirudh Venkataramanan 	int i;
28025153a18eSAnirudh Venkataramanan 
28035153a18eSAnirudh Venkataramanan 	if (!vsi->rx_rings)
28045153a18eSAnirudh Venkataramanan 		return;
28055153a18eSAnirudh Venkataramanan 
28065153a18eSAnirudh Venkataramanan 	ice_for_each_rxq(vsi, i)
28075153a18eSAnirudh Venkataramanan 		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
28085153a18eSAnirudh Venkataramanan 			ice_free_rx_ring(vsi->rx_rings[i]);
28095153a18eSAnirudh Venkataramanan }
28105153a18eSAnirudh Venkataramanan 
28115153a18eSAnirudh Venkataramanan /**
281207309a0eSAnirudh Venkataramanan  * ice_vsi_close - Shut down a VSI
281307309a0eSAnirudh Venkataramanan  * @vsi: the VSI being shut down
281407309a0eSAnirudh Venkataramanan  */
281507309a0eSAnirudh Venkataramanan void ice_vsi_close(struct ice_vsi *vsi)
281607309a0eSAnirudh Venkataramanan {
2817e97fb1aeSAnirudh Venkataramanan 	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state))
281807309a0eSAnirudh Venkataramanan 		ice_down(vsi);
281907309a0eSAnirudh Venkataramanan 
282007309a0eSAnirudh Venkataramanan 	ice_vsi_free_irq(vsi);
282107309a0eSAnirudh Venkataramanan 	ice_vsi_free_tx_rings(vsi);
282207309a0eSAnirudh Venkataramanan 	ice_vsi_free_rx_rings(vsi);
282307309a0eSAnirudh Venkataramanan }
282407309a0eSAnirudh Venkataramanan 
282507309a0eSAnirudh Venkataramanan /**
28269d614b64SAnirudh Venkataramanan  * ice_ena_vsi - resume a VSI
28279d614b64SAnirudh Venkataramanan  * @vsi: the VSI being resume
28289d614b64SAnirudh Venkataramanan  * @locked: is the rtnl_lock already held
28299d614b64SAnirudh Venkataramanan  */
28309d614b64SAnirudh Venkataramanan int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
28319d614b64SAnirudh Venkataramanan {
28329d614b64SAnirudh Venkataramanan 	int err = 0;
28339d614b64SAnirudh Venkataramanan 
2834e97fb1aeSAnirudh Venkataramanan 	if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state))
28359d614b64SAnirudh Venkataramanan 		return 0;
28369d614b64SAnirudh Venkataramanan 
2837e97fb1aeSAnirudh Venkataramanan 	clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
28389d614b64SAnirudh Venkataramanan 
28399d614b64SAnirudh Venkataramanan 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
28409d614b64SAnirudh Venkataramanan 		if (netif_running(vsi->netdev)) {
28419d614b64SAnirudh Venkataramanan 			if (!locked)
28429d614b64SAnirudh Venkataramanan 				rtnl_lock();
28439d614b64SAnirudh Venkataramanan 
2844e95fc857SKrzysztof Goreczny 			err = ice_open_internal(vsi->netdev);
28459d614b64SAnirudh Venkataramanan 
28469d614b64SAnirudh Venkataramanan 			if (!locked)
28479d614b64SAnirudh Venkataramanan 				rtnl_unlock();
28489d614b64SAnirudh Venkataramanan 		}
2849148beb61SHenry Tieman 	} else if (vsi->type == ICE_VSI_CTRL) {
2850148beb61SHenry Tieman 		err = ice_vsi_open_ctrl(vsi);
28519d614b64SAnirudh Venkataramanan 	}
28529d614b64SAnirudh Venkataramanan 
28539d614b64SAnirudh Venkataramanan 	return err;
28549d614b64SAnirudh Venkataramanan }
28559d614b64SAnirudh Venkataramanan 
28569d614b64SAnirudh Venkataramanan /**
28579d614b64SAnirudh Venkataramanan  * ice_dis_vsi - pause a VSI
28589d614b64SAnirudh Venkataramanan  * @vsi: the VSI being paused
28599d614b64SAnirudh Venkataramanan  * @locked: is the rtnl_lock already held
28609d614b64SAnirudh Venkataramanan  */
28619d614b64SAnirudh Venkataramanan void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
28629d614b64SAnirudh Venkataramanan {
2863e97fb1aeSAnirudh Venkataramanan 	if (test_bit(ICE_VSI_DOWN, vsi->state))
28649d614b64SAnirudh Venkataramanan 		return;
28659d614b64SAnirudh Venkataramanan 
2866e97fb1aeSAnirudh Venkataramanan 	set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
28679d614b64SAnirudh Venkataramanan 
28689d614b64SAnirudh Venkataramanan 	if (vsi->type == ICE_VSI_PF && vsi->netdev) {
28699d614b64SAnirudh Venkataramanan 		if (netif_running(vsi->netdev)) {
28709d614b64SAnirudh Venkataramanan 			if (!locked)
28719d614b64SAnirudh Venkataramanan 				rtnl_lock();
28729d614b64SAnirudh Venkataramanan 
2873e95fc857SKrzysztof Goreczny 			ice_vsi_close(vsi);
28749d614b64SAnirudh Venkataramanan 
28759d614b64SAnirudh Venkataramanan 			if (!locked)
28769d614b64SAnirudh Venkataramanan 				rtnl_unlock();
28779d614b64SAnirudh Venkataramanan 		} else {
28789d614b64SAnirudh Venkataramanan 			ice_vsi_close(vsi);
28799d614b64SAnirudh Venkataramanan 		}
2880f66756e0SGrzegorz Nitka 	} else if (vsi->type == ICE_VSI_CTRL ||
2881f66756e0SGrzegorz Nitka 		   vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
2882148beb61SHenry Tieman 		ice_vsi_close(vsi);
28839d614b64SAnirudh Venkataramanan 	}
28849d614b64SAnirudh Venkataramanan }
28859d614b64SAnirudh Venkataramanan 
28869d614b64SAnirudh Venkataramanan /**
28875153a18eSAnirudh Venkataramanan  * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
28885153a18eSAnirudh Venkataramanan  * @vsi: the VSI being un-configured
28895153a18eSAnirudh Venkataramanan  */
28905153a18eSAnirudh Venkataramanan void ice_vsi_dis_irq(struct ice_vsi *vsi)
28915153a18eSAnirudh Venkataramanan {
28925153a18eSAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
28935153a18eSAnirudh Venkataramanan 	struct ice_hw *hw = &pf->hw;
28945153a18eSAnirudh Venkataramanan 	u32 val;
28955153a18eSAnirudh Venkataramanan 	int i;
28965153a18eSAnirudh Venkataramanan 
28975153a18eSAnirudh Venkataramanan 	/* disable interrupt causation from each queue */
28985153a18eSAnirudh Venkataramanan 	if (vsi->tx_rings) {
28995153a18eSAnirudh Venkataramanan 		ice_for_each_txq(vsi, i) {
29005153a18eSAnirudh Venkataramanan 			if (vsi->tx_rings[i]) {
29015153a18eSAnirudh Venkataramanan 				u16 reg;
29025153a18eSAnirudh Venkataramanan 
29035153a18eSAnirudh Venkataramanan 				reg = vsi->tx_rings[i]->reg_idx;
29045153a18eSAnirudh Venkataramanan 				val = rd32(hw, QINT_TQCTL(reg));
29055153a18eSAnirudh Venkataramanan 				val &= ~QINT_TQCTL_CAUSE_ENA_M;
29065153a18eSAnirudh Venkataramanan 				wr32(hw, QINT_TQCTL(reg), val);
29075153a18eSAnirudh Venkataramanan 			}
29085153a18eSAnirudh Venkataramanan 		}
29095153a18eSAnirudh Venkataramanan 	}
29105153a18eSAnirudh Venkataramanan 
29115153a18eSAnirudh Venkataramanan 	if (vsi->rx_rings) {
29125153a18eSAnirudh Venkataramanan 		ice_for_each_rxq(vsi, i) {
29135153a18eSAnirudh Venkataramanan 			if (vsi->rx_rings[i]) {
29145153a18eSAnirudh Venkataramanan 				u16 reg;
29155153a18eSAnirudh Venkataramanan 
29165153a18eSAnirudh Venkataramanan 				reg = vsi->rx_rings[i]->reg_idx;
29175153a18eSAnirudh Venkataramanan 				val = rd32(hw, QINT_RQCTL(reg));
29185153a18eSAnirudh Venkataramanan 				val &= ~QINT_RQCTL_CAUSE_ENA_M;
29195153a18eSAnirudh Venkataramanan 				wr32(hw, QINT_RQCTL(reg), val);
29205153a18eSAnirudh Venkataramanan 			}
29215153a18eSAnirudh Venkataramanan 		}
29225153a18eSAnirudh Venkataramanan 	}
29235153a18eSAnirudh Venkataramanan 
29245153a18eSAnirudh Venkataramanan 	/* disable each interrupt */
2925462acf6aSTony Nguyen 	ice_for_each_q_vector(vsi, i) {
2926462acf6aSTony Nguyen 		if (!vsi->q_vectors[i])
2927462acf6aSTony Nguyen 			continue;
2928b07833a0SBrett Creeley 		wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
2929462acf6aSTony Nguyen 	}
29305153a18eSAnirudh Venkataramanan 
29315153a18eSAnirudh Venkataramanan 	ice_flush(hw);
2932b07833a0SBrett Creeley 
2933da4a9e73SBrett Creeley 	/* don't call synchronize_irq() for VF's from the host */
2934da4a9e73SBrett Creeley 	if (vsi->type == ICE_VSI_VF)
2935da4a9e73SBrett Creeley 		return;
2936da4a9e73SBrett Creeley 
29370c2561c8SBrett Creeley 	ice_for_each_q_vector(vsi, i)
29384aad5335SPiotr Raczynski 		synchronize_irq(vsi->q_vectors[i]->irq.virq);
29395153a18eSAnirudh Venkataramanan }
29405153a18eSAnirudh Venkataramanan 
29415153a18eSAnirudh Venkataramanan /**
2942df0f8479SAnirudh Venkataramanan  * ice_vsi_release - Delete a VSI and free its resources
2943df0f8479SAnirudh Venkataramanan  * @vsi: the VSI being removed
2944df0f8479SAnirudh Venkataramanan  *
2945df0f8479SAnirudh Venkataramanan  * Returns 0 on success or < 0 on error
2946df0f8479SAnirudh Venkataramanan  */
2947df0f8479SAnirudh Venkataramanan int ice_vsi_release(struct ice_vsi *vsi)
2948df0f8479SAnirudh Venkataramanan {
2949df0f8479SAnirudh Venkataramanan 	struct ice_pf *pf;
2950df0f8479SAnirudh Venkataramanan 
2951df0f8479SAnirudh Venkataramanan 	if (!vsi->back)
2952df0f8479SAnirudh Venkataramanan 		return -ENODEV;
2953df0f8479SAnirudh Venkataramanan 	pf = vsi->back;
2954b751930cSBrett Creeley 
2955df0f8479SAnirudh Venkataramanan 	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2956df0f8479SAnirudh Venkataramanan 		ice_rss_clean(vsi);
2957df0f8479SAnirudh Venkataramanan 
2958df0f8479SAnirudh Venkataramanan 	ice_vsi_close(vsi);
29596624e780SMichal Swiatkowski 	ice_vsi_decfg(vsi);
2960aa6ccf3fSBrett Creeley 
2961df0f8479SAnirudh Venkataramanan 	/* retain SW VSI data structure since it is needed to unregister and
2962df0f8479SAnirudh Venkataramanan 	 * free VSI netdev when PF is not in reset recovery pending state,\
2963df0f8479SAnirudh Venkataramanan 	 * for ex: during rmmod.
2964df0f8479SAnirudh Venkataramanan 	 */
29655df7e45dSDave Ertman 	if (!ice_is_reset_in_progress(pf->state))
2966227bf450SMichal Swiatkowski 		ice_vsi_delete(vsi);
2967df0f8479SAnirudh Venkataramanan 
2968df0f8479SAnirudh Venkataramanan 	return 0;
2969df0f8479SAnirudh Venkataramanan }
2970df0f8479SAnirudh Venkataramanan 
2971df0f8479SAnirudh Venkataramanan /**
297261dc79ceSMichal Swiatkowski  * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
297361dc79ceSMichal Swiatkowski  * @vsi: VSI connected with q_vectors
297461dc79ceSMichal Swiatkowski  * @coalesce: array of struct with stored coalesce
297561dc79ceSMichal Swiatkowski  *
297661dc79ceSMichal Swiatkowski  * Returns array size.
297761dc79ceSMichal Swiatkowski  */
297861dc79ceSMichal Swiatkowski static int
297961dc79ceSMichal Swiatkowski ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
298061dc79ceSMichal Swiatkowski 			     struct ice_coalesce_stored *coalesce)
298161dc79ceSMichal Swiatkowski {
298261dc79ceSMichal Swiatkowski 	int i;
298361dc79ceSMichal Swiatkowski 
298461dc79ceSMichal Swiatkowski 	ice_for_each_q_vector(vsi, i) {
298561dc79ceSMichal Swiatkowski 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
298661dc79ceSMichal Swiatkowski 
2987bf13502eSMichal Wilczynski 		coalesce[i].itr_tx = q_vector->tx.itr_settings;
2988bf13502eSMichal Wilczynski 		coalesce[i].itr_rx = q_vector->rx.itr_settings;
298961dc79ceSMichal Swiatkowski 		coalesce[i].intrl = q_vector->intrl;
29902ec56385SPaul M Stillwell Jr 
29912ec56385SPaul M Stillwell Jr 		if (i < vsi->num_txq)
29922ec56385SPaul M Stillwell Jr 			coalesce[i].tx_valid = true;
29932ec56385SPaul M Stillwell Jr 		if (i < vsi->num_rxq)
29942ec56385SPaul M Stillwell Jr 			coalesce[i].rx_valid = true;
299561dc79ceSMichal Swiatkowski 	}
299661dc79ceSMichal Swiatkowski 
299761dc79ceSMichal Swiatkowski 	return vsi->num_q_vectors;
299861dc79ceSMichal Swiatkowski }
299961dc79ceSMichal Swiatkowski 
300061dc79ceSMichal Swiatkowski /**
300161dc79ceSMichal Swiatkowski  * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
300261dc79ceSMichal Swiatkowski  * @vsi: VSI connected with q_vectors
300361dc79ceSMichal Swiatkowski  * @coalesce: pointer to array of struct with stored coalesce
300461dc79ceSMichal Swiatkowski  * @size: size of coalesce array
300561dc79ceSMichal Swiatkowski  *
300661dc79ceSMichal Swiatkowski  * Before this function, ice_vsi_rebuild_get_coalesce should be called to save
300761dc79ceSMichal Swiatkowski  * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce
300861dc79ceSMichal Swiatkowski  * to default value.
300961dc79ceSMichal Swiatkowski  */
301061dc79ceSMichal Swiatkowski static void
301161dc79ceSMichal Swiatkowski ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
301261dc79ceSMichal Swiatkowski 			     struct ice_coalesce_stored *coalesce, int size)
301361dc79ceSMichal Swiatkowski {
3014b8b47723SJesse Brandeburg 	struct ice_ring_container *rc;
301561dc79ceSMichal Swiatkowski 	int i;
301661dc79ceSMichal Swiatkowski 
301761dc79ceSMichal Swiatkowski 	if ((size && !coalesce) || !vsi)
301861dc79ceSMichal Swiatkowski 		return;
301961dc79ceSMichal Swiatkowski 
30202ec56385SPaul M Stillwell Jr 	/* There are a couple of cases that have to be handled here:
30212ec56385SPaul M Stillwell Jr 	 *   1. The case where the number of queue vectors stays the same, but
30222ec56385SPaul M Stillwell Jr 	 *      the number of Tx or Rx rings changes (the first for loop)
30232ec56385SPaul M Stillwell Jr 	 *   2. The case where the number of queue vectors increased (the
30242ec56385SPaul M Stillwell Jr 	 *      second for loop)
3025a039f6fcSBrett Creeley 	 */
30262ec56385SPaul M Stillwell Jr 	for (i = 0; i < size && i < vsi->num_q_vectors; i++) {
30272ec56385SPaul M Stillwell Jr 		/* There are 2 cases to handle here and they are the same for
30282ec56385SPaul M Stillwell Jr 		 * both Tx and Rx:
30292ec56385SPaul M Stillwell Jr 		 *   if the entry was valid previously (coalesce[i].[tr]x_valid
30302ec56385SPaul M Stillwell Jr 		 *   and the loop variable is less than the number of rings
30312ec56385SPaul M Stillwell Jr 		 *   allocated, then write the previous values
30322ec56385SPaul M Stillwell Jr 		 *
30332ec56385SPaul M Stillwell Jr 		 *   if the entry was not valid previously, but the number of
30342ec56385SPaul M Stillwell Jr 		 *   rings is less than are allocated (this means the number of
30352ec56385SPaul M Stillwell Jr 		 *   rings increased from previously), then write out the
30362ec56385SPaul M Stillwell Jr 		 *   values in the first element
3037b8b47723SJesse Brandeburg 		 *
3038b8b47723SJesse Brandeburg 		 *   Also, always write the ITR, even if in ITR_IS_DYNAMIC
3039b8b47723SJesse Brandeburg 		 *   as there is no harm because the dynamic algorithm
3040b8b47723SJesse Brandeburg 		 *   will just overwrite.
30412ec56385SPaul M Stillwell Jr 		 */
3042b8b47723SJesse Brandeburg 		if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
3043b8b47723SJesse Brandeburg 			rc = &vsi->q_vectors[i]->rx;
3044bf13502eSMichal Wilczynski 			rc->itr_settings = coalesce[i].itr_rx;
3045b8b47723SJesse Brandeburg 			ice_write_itr(rc, rc->itr_setting);
3046b8b47723SJesse Brandeburg 		} else if (i < vsi->alloc_rxq) {
3047b8b47723SJesse Brandeburg 			rc = &vsi->q_vectors[i]->rx;
3048bf13502eSMichal Wilczynski 			rc->itr_settings = coalesce[0].itr_rx;
3049b8b47723SJesse Brandeburg 			ice_write_itr(rc, rc->itr_setting);
3050b8b47723SJesse Brandeburg 		}
30512ec56385SPaul M Stillwell Jr 
3052b8b47723SJesse Brandeburg 		if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
3053b8b47723SJesse Brandeburg 			rc = &vsi->q_vectors[i]->tx;
3054bf13502eSMichal Wilczynski 			rc->itr_settings = coalesce[i].itr_tx;
3055b8b47723SJesse Brandeburg 			ice_write_itr(rc, rc->itr_setting);
3056b8b47723SJesse Brandeburg 		} else if (i < vsi->alloc_txq) {
3057b8b47723SJesse Brandeburg 			rc = &vsi->q_vectors[i]->tx;
3058bf13502eSMichal Wilczynski 			rc->itr_settings = coalesce[0].itr_tx;
3059b8b47723SJesse Brandeburg 			ice_write_itr(rc, rc->itr_setting);
3060b8b47723SJesse Brandeburg 		}
30612ec56385SPaul M Stillwell Jr 
3062b8b47723SJesse Brandeburg 		vsi->q_vectors[i]->intrl = coalesce[i].intrl;
3063d16a4f45SJesse Brandeburg 		ice_set_q_vector_intrl(vsi->q_vectors[i]);
30642ec56385SPaul M Stillwell Jr 	}
30652ec56385SPaul M Stillwell Jr 
30662ec56385SPaul M Stillwell Jr 	/* the number of queue vectors increased so write whatever is in
30672ec56385SPaul M Stillwell Jr 	 * the first element
30682ec56385SPaul M Stillwell Jr 	 */
30692ec56385SPaul M Stillwell Jr 	for (; i < vsi->num_q_vectors; i++) {
3070b8b47723SJesse Brandeburg 		/* transmit */
3071b8b47723SJesse Brandeburg 		rc = &vsi->q_vectors[i]->tx;
3072bf13502eSMichal Wilczynski 		rc->itr_settings = coalesce[0].itr_tx;
3073b8b47723SJesse Brandeburg 		ice_write_itr(rc, rc->itr_setting);
3074b8b47723SJesse Brandeburg 
3075b8b47723SJesse Brandeburg 		/* receive */
3076b8b47723SJesse Brandeburg 		rc = &vsi->q_vectors[i]->rx;
3077bf13502eSMichal Wilczynski 		rc->itr_settings = coalesce[0].itr_rx;
3078b8b47723SJesse Brandeburg 		ice_write_itr(rc, rc->itr_setting);
3079b8b47723SJesse Brandeburg 
3080b8b47723SJesse Brandeburg 		vsi->q_vectors[i]->intrl = coalesce[0].intrl;
3081d16a4f45SJesse Brandeburg 		ice_set_q_vector_intrl(vsi->q_vectors[i]);
30822ec56385SPaul M Stillwell Jr 	}
308361dc79ceSMichal Swiatkowski }
308461dc79ceSMichal Swiatkowski 
308561dc79ceSMichal Swiatkowski /**
3086288ecf49SBenjamin Mikailenko  * ice_vsi_realloc_stat_arrays - Frees unused stat structures
3087288ecf49SBenjamin Mikailenko  * @vsi: VSI pointer
3088288ecf49SBenjamin Mikailenko  * @prev_txq: Number of Tx rings before ring reallocation
3089288ecf49SBenjamin Mikailenko  * @prev_rxq: Number of Rx rings before ring reallocation
3090288ecf49SBenjamin Mikailenko  */
3091d8a23ff6STony Nguyen static void
3092288ecf49SBenjamin Mikailenko ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
3093288ecf49SBenjamin Mikailenko {
3094288ecf49SBenjamin Mikailenko 	struct ice_vsi_stats *vsi_stat;
3095288ecf49SBenjamin Mikailenko 	struct ice_pf *pf = vsi->back;
3096288ecf49SBenjamin Mikailenko 	int i;
3097288ecf49SBenjamin Mikailenko 
3098288ecf49SBenjamin Mikailenko 	if (!prev_txq || !prev_rxq)
3099d8a23ff6STony Nguyen 		return;
3100288ecf49SBenjamin Mikailenko 	if (vsi->type == ICE_VSI_CHNL)
3101d8a23ff6STony Nguyen 		return;
3102288ecf49SBenjamin Mikailenko 
3103288ecf49SBenjamin Mikailenko 	vsi_stat = pf->vsi_stats[vsi->idx];
3104288ecf49SBenjamin Mikailenko 
3105288ecf49SBenjamin Mikailenko 	if (vsi->num_txq < prev_txq) {
3106288ecf49SBenjamin Mikailenko 		for (i = vsi->num_txq; i < prev_txq; i++) {
3107288ecf49SBenjamin Mikailenko 			if (vsi_stat->tx_ring_stats[i]) {
3108288ecf49SBenjamin Mikailenko 				kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
3109288ecf49SBenjamin Mikailenko 				WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
3110288ecf49SBenjamin Mikailenko 			}
3111288ecf49SBenjamin Mikailenko 		}
3112288ecf49SBenjamin Mikailenko 	}
3113288ecf49SBenjamin Mikailenko 
3114288ecf49SBenjamin Mikailenko 	if (vsi->num_rxq < prev_rxq) {
3115288ecf49SBenjamin Mikailenko 		for (i = vsi->num_rxq; i < prev_rxq; i++) {
3116288ecf49SBenjamin Mikailenko 			if (vsi_stat->rx_ring_stats[i]) {
3117288ecf49SBenjamin Mikailenko 				kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
3118288ecf49SBenjamin Mikailenko 				WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
3119288ecf49SBenjamin Mikailenko 			}
3120288ecf49SBenjamin Mikailenko 		}
3121288ecf49SBenjamin Mikailenko 	}
3122288ecf49SBenjamin Mikailenko }
3123288ecf49SBenjamin Mikailenko 
3124288ecf49SBenjamin Mikailenko /**
3125df0f8479SAnirudh Venkataramanan  * ice_vsi_rebuild - Rebuild VSI after reset
3126df0f8479SAnirudh Venkataramanan  * @vsi: VSI to be rebuild
31275e509ab2SJacob Keller  * @vsi_flags: flags used for VSI rebuild flow
31285e509ab2SJacob Keller  *
31295e509ab2SJacob Keller  * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or
31305e509ab2SJacob Keller  * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware.
3131df0f8479SAnirudh Venkataramanan  *
3132df0f8479SAnirudh Venkataramanan  * Returns 0 on success and negative value on failure
3133df0f8479SAnirudh Venkataramanan  */
31345e509ab2SJacob Keller int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
3135df0f8479SAnirudh Venkataramanan {
31365e509ab2SJacob Keller 	struct ice_vsi_cfg_params params = {};
313761dc79ceSMichal Swiatkowski 	struct ice_coalesce_stored *coalesce;
31386624e780SMichal Swiatkowski 	int ret, prev_txq, prev_rxq;
313961dc79ceSMichal Swiatkowski 	int prev_num_q_vectors = 0;
3140c5a2a4a3SUsha Ketineni 	struct ice_pf *pf;
3141df0f8479SAnirudh Venkataramanan 
3142df0f8479SAnirudh Venkataramanan 	if (!vsi)
3143df0f8479SAnirudh Venkataramanan 		return -EINVAL;
3144df0f8479SAnirudh Venkataramanan 
31455e509ab2SJacob Keller 	params = ice_vsi_to_params(vsi);
31465e509ab2SJacob Keller 	params.flags = vsi_flags;
31475e509ab2SJacob Keller 
3148c5a2a4a3SUsha Ketineni 	pf = vsi->back;
31496624e780SMichal Swiatkowski 	if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
3150b03d519dSJacob Keller 		return -EINVAL;
31517eeac889SAkeem G Abodunrin 
315261dc79ceSMichal Swiatkowski 	coalesce = kcalloc(vsi->num_q_vectors,
315361dc79ceSMichal Swiatkowski 			   sizeof(struct ice_coalesce_stored), GFP_KERNEL);
31542ec56385SPaul M Stillwell Jr 	if (!coalesce)
31552ec56385SPaul M Stillwell Jr 		return -ENOMEM;
31562ec56385SPaul M Stillwell Jr 
31572ec56385SPaul M Stillwell Jr 	prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
31582ec56385SPaul M Stillwell Jr 
3159288ecf49SBenjamin Mikailenko 	prev_txq = vsi->num_txq;
3160288ecf49SBenjamin Mikailenko 	prev_rxq = vsi->num_rxq;
3161288ecf49SBenjamin Mikailenko 
31626624e780SMichal Swiatkowski 	ice_vsi_decfg(vsi);
31635e509ab2SJacob Keller 	ret = ice_vsi_cfg_def(vsi, &params);
3164ff7e9321SBrett Creeley 	if (ret)
31656624e780SMichal Swiatkowski 		goto err_vsi_cfg;
31667eeac889SAkeem G Abodunrin 
31676624e780SMichal Swiatkowski 	ret = ice_vsi_cfg_tc_lan(pf, vsi);
31682ccc1c1cSTony Nguyen 	if (ret) {
31695e509ab2SJacob Keller 		if (vsi_flags & ICE_VSI_FLAG_INIT) {
317087324e74SHenry Tieman 			ret = -EIO;
31716624e780SMichal Swiatkowski 			goto err_vsi_cfg_tc_lan;
3172c4a9c8e7SMichal Swiatkowski 		}
3173c4a9c8e7SMichal Swiatkowski 
31740db66d20SMichal Swiatkowski 		kfree(coalesce);
317587324e74SHenry Tieman 		return ice_schedule_reset(pf, ICE_RESET_PFR);
317687324e74SHenry Tieman 	}
3177288ecf49SBenjamin Mikailenko 
3178d8a23ff6STony Nguyen 	ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq);
3179288ecf49SBenjamin Mikailenko 
318061dc79ceSMichal Swiatkowski 	ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
318161dc79ceSMichal Swiatkowski 	kfree(coalesce);
318261dc79ceSMichal Swiatkowski 
3183df0f8479SAnirudh Venkataramanan 	return 0;
3184df0f8479SAnirudh Venkataramanan 
31856624e780SMichal Swiatkowski err_vsi_cfg_tc_lan:
31866624e780SMichal Swiatkowski 	ice_vsi_decfg(vsi);
31876624e780SMichal Swiatkowski err_vsi_cfg:
318861dc79ceSMichal Swiatkowski 	kfree(coalesce);
3189df0f8479SAnirudh Venkataramanan 	return ret;
3190df0f8479SAnirudh Venkataramanan }
3191df0f8479SAnirudh Venkataramanan 
3192df0f8479SAnirudh Venkataramanan /**
31935df7e45dSDave Ertman  * ice_is_reset_in_progress - check for a reset in progress
31942f2da36eSAnirudh Venkataramanan  * @state: PF state field
31955153a18eSAnirudh Venkataramanan  */
31965df7e45dSDave Ertman bool ice_is_reset_in_progress(unsigned long *state)
31975153a18eSAnirudh Venkataramanan {
31987e408e07SAnirudh Venkataramanan 	return test_bit(ICE_RESET_OICR_RECV, state) ||
31997e408e07SAnirudh Venkataramanan 	       test_bit(ICE_PFR_REQ, state) ||
32007e408e07SAnirudh Venkataramanan 	       test_bit(ICE_CORER_REQ, state) ||
32017e408e07SAnirudh Venkataramanan 	       test_bit(ICE_GLOBR_REQ, state);
32025153a18eSAnirudh Venkataramanan }
32037b9ffc76SAnirudh Venkataramanan 
32041c08052eSJacob Keller /**
32051c08052eSJacob Keller  * ice_wait_for_reset - Wait for driver to finish reset and rebuild
32061c08052eSJacob Keller  * @pf: pointer to the PF structure
32071c08052eSJacob Keller  * @timeout: length of time to wait, in jiffies
32081c08052eSJacob Keller  *
32091c08052eSJacob Keller  * Wait (sleep) for a short time until the driver finishes cleaning up from
32101c08052eSJacob Keller  * a device reset. The caller must be able to sleep. Use this to delay
32111c08052eSJacob Keller  * operations that could fail while the driver is cleaning up after a device
32121c08052eSJacob Keller  * reset.
32131c08052eSJacob Keller  *
32141c08052eSJacob Keller  * Returns 0 on success, -EBUSY if the reset is not finished within the
32151c08052eSJacob Keller  * timeout, and -ERESTARTSYS if the thread was interrupted.
32161c08052eSJacob Keller  */
32171c08052eSJacob Keller int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout)
32181c08052eSJacob Keller {
32191c08052eSJacob Keller 	long ret;
32201c08052eSJacob Keller 
32211c08052eSJacob Keller 	ret = wait_event_interruptible_timeout(pf->reset_wait_queue,
32221c08052eSJacob Keller 					       !ice_is_reset_in_progress(pf->state),
32231c08052eSJacob Keller 					       timeout);
32241c08052eSJacob Keller 	if (ret < 0)
32251c08052eSJacob Keller 		return ret;
32261c08052eSJacob Keller 	else if (!ret)
32271c08052eSJacob Keller 		return -EBUSY;
32281c08052eSJacob Keller 	else
32291c08052eSJacob Keller 		return 0;
32301c08052eSJacob Keller }
32311c08052eSJacob Keller 
32327b9ffc76SAnirudh Venkataramanan /**
32337b9ffc76SAnirudh Venkataramanan  * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
32347b9ffc76SAnirudh Venkataramanan  * @vsi: VSI being configured
32357b9ffc76SAnirudh Venkataramanan  * @ctx: the context buffer returned from AQ VSI update command
32367b9ffc76SAnirudh Venkataramanan  */
32377b9ffc76SAnirudh Venkataramanan static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
32387b9ffc76SAnirudh Venkataramanan {
32397b9ffc76SAnirudh Venkataramanan 	vsi->info.mapping_flags = ctx->info.mapping_flags;
32407b9ffc76SAnirudh Venkataramanan 	memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
32417b9ffc76SAnirudh Venkataramanan 	       sizeof(vsi->info.q_mapping));
32427b9ffc76SAnirudh Venkataramanan 	memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
32437b9ffc76SAnirudh Venkataramanan 	       sizeof(vsi->info.tc_mapping));
32447b9ffc76SAnirudh Venkataramanan }
32457b9ffc76SAnirudh Venkataramanan 
32467b9ffc76SAnirudh Venkataramanan /**
32470754d65bSKiran Patil  * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
32480754d65bSKiran Patil  * @vsi: the VSI being configured
32490754d65bSKiran Patil  * @ena_tc: TC map to be enabled
32500754d65bSKiran Patil  */
32510754d65bSKiran Patil void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
32520754d65bSKiran Patil {
32530754d65bSKiran Patil 	struct net_device *netdev = vsi->netdev;
32540754d65bSKiran Patil 	struct ice_pf *pf = vsi->back;
32550754d65bSKiran Patil 	int numtc = vsi->tc_cfg.numtc;
32560754d65bSKiran Patil 	struct ice_dcbx_cfg *dcbcfg;
32570754d65bSKiran Patil 	u8 netdev_tc;
32580754d65bSKiran Patil 	int i;
32590754d65bSKiran Patil 
32600754d65bSKiran Patil 	if (!netdev)
32610754d65bSKiran Patil 		return;
32620754d65bSKiran Patil 
32630754d65bSKiran Patil 	/* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */
32640754d65bSKiran Patil 	if (vsi->type == ICE_VSI_CHNL)
32650754d65bSKiran Patil 		return;
32660754d65bSKiran Patil 
32670754d65bSKiran Patil 	if (!ena_tc) {
32680754d65bSKiran Patil 		netdev_reset_tc(netdev);
32690754d65bSKiran Patil 		return;
32700754d65bSKiran Patil 	}
32710754d65bSKiran Patil 
32720754d65bSKiran Patil 	if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf))
32730754d65bSKiran Patil 		numtc = vsi->all_numtc;
32740754d65bSKiran Patil 
32750754d65bSKiran Patil 	if (netdev_set_num_tc(netdev, numtc))
32760754d65bSKiran Patil 		return;
32770754d65bSKiran Patil 
32780754d65bSKiran Patil 	dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
32790754d65bSKiran Patil 
32800754d65bSKiran Patil 	ice_for_each_traffic_class(i)
32810754d65bSKiran Patil 		if (vsi->tc_cfg.ena_tc & BIT(i))
32820754d65bSKiran Patil 			netdev_set_tc_queue(netdev,
32830754d65bSKiran Patil 					    vsi->tc_cfg.tc_info[i].netdev_tc,
32840754d65bSKiran Patil 					    vsi->tc_cfg.tc_info[i].qcount_tx,
32850754d65bSKiran Patil 					    vsi->tc_cfg.tc_info[i].qoffset);
32860754d65bSKiran Patil 	/* setup TC queue map for CHNL TCs */
32870754d65bSKiran Patil 	ice_for_each_chnl_tc(i) {
32880754d65bSKiran Patil 		if (!(vsi->all_enatc & BIT(i)))
32890754d65bSKiran Patil 			break;
32900754d65bSKiran Patil 		if (!vsi->mqprio_qopt.qopt.count[i])
32910754d65bSKiran Patil 			break;
32920754d65bSKiran Patil 		netdev_set_tc_queue(netdev, i,
32930754d65bSKiran Patil 				    vsi->mqprio_qopt.qopt.count[i],
32940754d65bSKiran Patil 				    vsi->mqprio_qopt.qopt.offset[i]);
32950754d65bSKiran Patil 	}
32960754d65bSKiran Patil 
32970754d65bSKiran Patil 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
32980754d65bSKiran Patil 		return;
32990754d65bSKiran Patil 
33000754d65bSKiran Patil 	for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
33010754d65bSKiran Patil 		u8 ets_tc = dcbcfg->etscfg.prio_table[i];
33020754d65bSKiran Patil 
33030754d65bSKiran Patil 		/* Get the mapped netdev TC# for the UP */
33040754d65bSKiran Patil 		netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
33050754d65bSKiran Patil 		netdev_set_prio_tc_map(netdev, i, netdev_tc);
33060754d65bSKiran Patil 	}
33070754d65bSKiran Patil }
33080754d65bSKiran Patil 
33090754d65bSKiran Patil /**
33100754d65bSKiran Patil  * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config
33110754d65bSKiran Patil  * @vsi: the VSI being configured,
33120754d65bSKiran Patil  * @ctxt: VSI context structure
33130754d65bSKiran Patil  * @ena_tc: number of traffic classes to enable
33140754d65bSKiran Patil  *
33150754d65bSKiran Patil  * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
33160754d65bSKiran Patil  */
3317a632b2a4SAnatolii Gerasymenko static int
33180754d65bSKiran Patil ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
33190754d65bSKiran Patil 			   u8 ena_tc)
33200754d65bSKiran Patil {
33210754d65bSKiran Patil 	u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
33220754d65bSKiran Patil 	u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
33230754d65bSKiran Patil 	int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
3324a509702cSDing Hui 	u16 new_txq, new_rxq;
33250754d65bSKiran Patil 	u8 netdev_tc = 0;
33260754d65bSKiran Patil 	int i;
33270754d65bSKiran Patil 
33280754d65bSKiran Patil 	vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1;
33290754d65bSKiran Patil 
33300754d65bSKiran Patil 	pow = order_base_2(tc0_qcount);
33310754d65bSKiran Patil 	qmap = ((tc0_offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
33320754d65bSKiran Patil 		ICE_AQ_VSI_TC_Q_OFFSET_M) |
33330754d65bSKiran Patil 		((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M);
33340754d65bSKiran Patil 
33350754d65bSKiran Patil 	ice_for_each_traffic_class(i) {
33360754d65bSKiran Patil 		if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
33370754d65bSKiran Patil 			/* TC is not enabled */
33380754d65bSKiran Patil 			vsi->tc_cfg.tc_info[i].qoffset = 0;
33390754d65bSKiran Patil 			vsi->tc_cfg.tc_info[i].qcount_rx = 1;
33400754d65bSKiran Patil 			vsi->tc_cfg.tc_info[i].qcount_tx = 1;
33410754d65bSKiran Patil 			vsi->tc_cfg.tc_info[i].netdev_tc = 0;
33420754d65bSKiran Patil 			ctxt->info.tc_mapping[i] = 0;
33430754d65bSKiran Patil 			continue;
33440754d65bSKiran Patil 		}
33450754d65bSKiran Patil 
33460754d65bSKiran Patil 		offset = vsi->mqprio_qopt.qopt.offset[i];
33470754d65bSKiran Patil 		qcount_rx = vsi->mqprio_qopt.qopt.count[i];
33480754d65bSKiran Patil 		qcount_tx = vsi->mqprio_qopt.qopt.count[i];
33490754d65bSKiran Patil 		vsi->tc_cfg.tc_info[i].qoffset = offset;
33500754d65bSKiran Patil 		vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
33510754d65bSKiran Patil 		vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx;
33520754d65bSKiran Patil 		vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
33530754d65bSKiran Patil 	}
33540754d65bSKiran Patil 
33550754d65bSKiran Patil 	if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) {
33560754d65bSKiran Patil 		ice_for_each_chnl_tc(i) {
33570754d65bSKiran Patil 			if (!(vsi->all_enatc & BIT(i)))
33580754d65bSKiran Patil 				continue;
33590754d65bSKiran Patil 			offset = vsi->mqprio_qopt.qopt.offset[i];
33600754d65bSKiran Patil 			qcount_rx = vsi->mqprio_qopt.qopt.count[i];
33610754d65bSKiran Patil 			qcount_tx = vsi->mqprio_qopt.qopt.count[i];
33620754d65bSKiran Patil 		}
33630754d65bSKiran Patil 	}
33640754d65bSKiran Patil 
3365a509702cSDing Hui 	new_txq = offset + qcount_tx;
3366a509702cSDing Hui 	if (new_txq > vsi->alloc_txq) {
3367a632b2a4SAnatolii Gerasymenko 		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
3368a509702cSDing Hui 			new_txq, vsi->alloc_txq);
3369a632b2a4SAnatolii Gerasymenko 		return -EINVAL;
3370a632b2a4SAnatolii Gerasymenko 	}
3371a632b2a4SAnatolii Gerasymenko 
3372a509702cSDing Hui 	new_rxq = offset + qcount_rx;
3373a509702cSDing Hui 	if (new_rxq > vsi->alloc_rxq) {
3374a632b2a4SAnatolii Gerasymenko 		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
3375a509702cSDing Hui 			new_rxq, vsi->alloc_rxq);
3376a632b2a4SAnatolii Gerasymenko 		return -EINVAL;
3377a632b2a4SAnatolii Gerasymenko 	}
33780754d65bSKiran Patil 
3379a509702cSDing Hui 	/* Set actual Tx/Rx queue pairs */
3380a509702cSDing Hui 	vsi->num_txq = new_txq;
3381a509702cSDing Hui 	vsi->num_rxq = new_rxq;
3382a509702cSDing Hui 
33830754d65bSKiran Patil 	/* Setup queue TC[0].qmap for given VSI context */
33840754d65bSKiran Patil 	ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
33850754d65bSKiran Patil 	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
33860754d65bSKiran Patil 	ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount);
33870754d65bSKiran Patil 
33880754d65bSKiran Patil 	/* Find queue count available for channel VSIs and starting offset
33890754d65bSKiran Patil 	 * for channel VSIs
33900754d65bSKiran Patil 	 */
33910754d65bSKiran Patil 	if (tc0_qcount && tc0_qcount < vsi->num_rxq) {
33920754d65bSKiran Patil 		vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount;
33930754d65bSKiran Patil 		vsi->next_base_q = tc0_qcount;
33940754d65bSKiran Patil 	}
33950754d65bSKiran Patil 	dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n",  vsi->num_txq);
33960754d65bSKiran Patil 	dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n",  vsi->num_rxq);
33970754d65bSKiran Patil 	dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
33980754d65bSKiran Patil 		vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
3399a632b2a4SAnatolii Gerasymenko 
3400a632b2a4SAnatolii Gerasymenko 	return 0;
34010754d65bSKiran Patil }
34020754d65bSKiran Patil 
34030754d65bSKiran Patil /**
34047b9ffc76SAnirudh Venkataramanan  * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
34057b9ffc76SAnirudh Venkataramanan  * @vsi: VSI to be configured
34067b9ffc76SAnirudh Venkataramanan  * @ena_tc: TC bitmap
34077b9ffc76SAnirudh Venkataramanan  *
34087b9ffc76SAnirudh Venkataramanan  * VSI queues expected to be quiesced before calling this function
34097b9ffc76SAnirudh Venkataramanan  */
34107b9ffc76SAnirudh Venkataramanan int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
34117b9ffc76SAnirudh Venkataramanan {
34127b9ffc76SAnirudh Venkataramanan 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
34137b9ffc76SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
3414a509702cSDing Hui 	struct ice_tc_cfg old_tc_cfg;
34154ee656bbSTony Nguyen 	struct ice_vsi_ctx *ctx;
34164015d11eSBrett Creeley 	struct device *dev;
34177b9ffc76SAnirudh Venkataramanan 	int i, ret = 0;
34187b9ffc76SAnirudh Venkataramanan 	u8 num_tc = 0;
34197b9ffc76SAnirudh Venkataramanan 
34204015d11eSBrett Creeley 	dev = ice_pf_to_dev(pf);
34210754d65bSKiran Patil 	if (vsi->tc_cfg.ena_tc == ena_tc &&
34220754d65bSKiran Patil 	    vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
3423c4a9c8e7SMichal Swiatkowski 		return 0;
34244015d11eSBrett Creeley 
34257b9ffc76SAnirudh Venkataramanan 	ice_for_each_traffic_class(i) {
34267b9ffc76SAnirudh Venkataramanan 		/* build bitmap of enabled TCs */
34277b9ffc76SAnirudh Venkataramanan 		if (ena_tc & BIT(i))
34287b9ffc76SAnirudh Venkataramanan 			num_tc++;
34297b9ffc76SAnirudh Venkataramanan 		/* populate max_txqs per TC */
3430d5a46359SAkeem G Abodunrin 		max_txqs[i] = vsi->alloc_txq;
34310754d65bSKiran Patil 		/* Update max_txqs if it is CHNL VSI, because alloc_t[r]xq are
34320754d65bSKiran Patil 		 * zero for CHNL VSI, hence use num_txq instead as max_txqs
34330754d65bSKiran Patil 		 */
34340754d65bSKiran Patil 		if (vsi->type == ICE_VSI_CHNL &&
34350754d65bSKiran Patil 		    test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
34360754d65bSKiran Patil 			max_txqs[i] = vsi->num_txq;
34377b9ffc76SAnirudh Venkataramanan 	}
34387b9ffc76SAnirudh Venkataramanan 
3439a509702cSDing Hui 	memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
34407b9ffc76SAnirudh Venkataramanan 	vsi->tc_cfg.ena_tc = ena_tc;
34417b9ffc76SAnirudh Venkataramanan 	vsi->tc_cfg.numtc = num_tc;
34427b9ffc76SAnirudh Venkataramanan 
34439efe35d0STony Nguyen 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
34447b9ffc76SAnirudh Venkataramanan 	if (!ctx)
34457b9ffc76SAnirudh Venkataramanan 		return -ENOMEM;
34467b9ffc76SAnirudh Venkataramanan 
34477b9ffc76SAnirudh Venkataramanan 	ctx->vf_num = 0;
34487b9ffc76SAnirudh Venkataramanan 	ctx->info = vsi->info;
34497b9ffc76SAnirudh Venkataramanan 
34500754d65bSKiran Patil 	if (vsi->type == ICE_VSI_PF &&
34510754d65bSKiran Patil 	    test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3452a632b2a4SAnatolii Gerasymenko 		ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
34530754d65bSKiran Patil 	else
3454a632b2a4SAnatolii Gerasymenko 		ret = ice_vsi_setup_q_map(vsi, ctx);
3455a632b2a4SAnatolii Gerasymenko 
3456a509702cSDing Hui 	if (ret) {
3457a509702cSDing Hui 		memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
3458a632b2a4SAnatolii Gerasymenko 		goto out;
3459a509702cSDing Hui 	}
34607b9ffc76SAnirudh Venkataramanan 
34617b9ffc76SAnirudh Venkataramanan 	/* must to indicate which section of VSI context are being modified */
34627b9ffc76SAnirudh Venkataramanan 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
34632ccc1c1cSTony Nguyen 	ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
34642ccc1c1cSTony Nguyen 	if (ret) {
34654015d11eSBrett Creeley 		dev_info(dev, "Failed VSI Update\n");
34667b9ffc76SAnirudh Venkataramanan 		goto out;
34677b9ffc76SAnirudh Venkataramanan 	}
34687b9ffc76SAnirudh Venkataramanan 
34690754d65bSKiran Patil 	if (vsi->type == ICE_VSI_PF &&
34700754d65bSKiran Patil 	    test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
34712ccc1c1cSTony Nguyen 		ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
34720754d65bSKiran Patil 	else
34732ccc1c1cSTony Nguyen 		ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
34740754d65bSKiran Patil 				      vsi->tc_cfg.ena_tc, max_txqs);
34757b9ffc76SAnirudh Venkataramanan 
34762ccc1c1cSTony Nguyen 	if (ret) {
34775f87ec48STony Nguyen 		dev_err(dev, "VSI %d failed TC config, error %d\n",
34782ccc1c1cSTony Nguyen 			vsi->vsi_num, ret);
34797b9ffc76SAnirudh Venkataramanan 		goto out;
34807b9ffc76SAnirudh Venkataramanan 	}
34817b9ffc76SAnirudh Venkataramanan 	ice_vsi_update_q_map(vsi, ctx);
34827b9ffc76SAnirudh Venkataramanan 	vsi->info.valid_sections = 0;
34837b9ffc76SAnirudh Venkataramanan 
34847b9ffc76SAnirudh Venkataramanan 	ice_vsi_cfg_netdev_tc(vsi, ena_tc);
34857b9ffc76SAnirudh Venkataramanan out:
34869efe35d0STony Nguyen 	kfree(ctx);
34877b9ffc76SAnirudh Venkataramanan 	return ret;
34887b9ffc76SAnirudh Venkataramanan }
3489bbb968e8SAkeem G Abodunrin 
3490bbb968e8SAkeem G Abodunrin /**
34912d4238f5SKrzysztof Kazimierczak  * ice_update_ring_stats - Update ring statistics
3492e72bba21SMaciej Fijalkowski  * @stats: stats to be updated
34932d4238f5SKrzysztof Kazimierczak  * @pkts: number of processed packets
34942d4238f5SKrzysztof Kazimierczak  * @bytes: number of processed bytes
34952d4238f5SKrzysztof Kazimierczak  *
34962d4238f5SKrzysztof Kazimierczak  * This function assumes that caller has acquired a u64_stats_sync lock.
34972d4238f5SKrzysztof Kazimierczak  */
3498e72bba21SMaciej Fijalkowski static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes)
34992d4238f5SKrzysztof Kazimierczak {
3500e72bba21SMaciej Fijalkowski 	stats->bytes += bytes;
3501e72bba21SMaciej Fijalkowski 	stats->pkts += pkts;
35022d4238f5SKrzysztof Kazimierczak }
35032d4238f5SKrzysztof Kazimierczak 
35042d4238f5SKrzysztof Kazimierczak /**
35052d4238f5SKrzysztof Kazimierczak  * ice_update_tx_ring_stats - Update Tx ring specific counters
35062d4238f5SKrzysztof Kazimierczak  * @tx_ring: ring to update
35072d4238f5SKrzysztof Kazimierczak  * @pkts: number of processed packets
35082d4238f5SKrzysztof Kazimierczak  * @bytes: number of processed bytes
35092d4238f5SKrzysztof Kazimierczak  */
3510e72bba21SMaciej Fijalkowski void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
35112d4238f5SKrzysztof Kazimierczak {
3512288ecf49SBenjamin Mikailenko 	u64_stats_update_begin(&tx_ring->ring_stats->syncp);
3513288ecf49SBenjamin Mikailenko 	ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes);
3514288ecf49SBenjamin Mikailenko 	u64_stats_update_end(&tx_ring->ring_stats->syncp);
35152d4238f5SKrzysztof Kazimierczak }
35162d4238f5SKrzysztof Kazimierczak 
35172d4238f5SKrzysztof Kazimierczak /**
35182d4238f5SKrzysztof Kazimierczak  * ice_update_rx_ring_stats - Update Rx ring specific counters
35192d4238f5SKrzysztof Kazimierczak  * @rx_ring: ring to update
35202d4238f5SKrzysztof Kazimierczak  * @pkts: number of processed packets
35212d4238f5SKrzysztof Kazimierczak  * @bytes: number of processed bytes
35222d4238f5SKrzysztof Kazimierczak  */
3523e72bba21SMaciej Fijalkowski void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
35242d4238f5SKrzysztof Kazimierczak {
3525288ecf49SBenjamin Mikailenko 	u64_stats_update_begin(&rx_ring->ring_stats->syncp);
3526288ecf49SBenjamin Mikailenko 	ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes);
3527288ecf49SBenjamin Mikailenko 	u64_stats_update_end(&rx_ring->ring_stats->syncp);
35282d4238f5SKrzysztof Kazimierczak }
35292d4238f5SKrzysztof Kazimierczak 
35302d4238f5SKrzysztof Kazimierczak /**
3531fc0f39bcSBrett Creeley  * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
3532d7393425SMichal Wilczynski  * @pi: port info of the switch with default VSI
3533fc0f39bcSBrett Creeley  *
3534d7393425SMichal Wilczynski  * Return true if the there is a single VSI in default forwarding VSI list
3535fc0f39bcSBrett Creeley  */
3536d7393425SMichal Wilczynski bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi)
3537fc0f39bcSBrett Creeley {
3538d7393425SMichal Wilczynski 	bool exists = false;
3539d7393425SMichal Wilczynski 
3540d7393425SMichal Wilczynski 	ice_check_if_dflt_vsi(pi, 0, &exists);
3541d7393425SMichal Wilczynski 	return exists;
3542fc0f39bcSBrett Creeley }
3543fc0f39bcSBrett Creeley 
3544fc0f39bcSBrett Creeley /**
3545fc0f39bcSBrett Creeley  * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
3546fc0f39bcSBrett Creeley  * @vsi: VSI to compare against default forwarding VSI
3547fc0f39bcSBrett Creeley  *
3548fc0f39bcSBrett Creeley  * If this VSI passed in is the default forwarding VSI then return true, else
3549fc0f39bcSBrett Creeley  * return false
3550fc0f39bcSBrett Creeley  */
3551d7393425SMichal Wilczynski bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi)
3552fc0f39bcSBrett Creeley {
3553d7393425SMichal Wilczynski 	return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL);
3554fc0f39bcSBrett Creeley }
3555fc0f39bcSBrett Creeley 
3556fc0f39bcSBrett Creeley /**
3557fc0f39bcSBrett Creeley  * ice_set_dflt_vsi - set the default forwarding VSI
3558fc0f39bcSBrett Creeley  * @vsi: VSI getting set as the default forwarding VSI on the switch
3559fc0f39bcSBrett Creeley  *
3560fc0f39bcSBrett Creeley  * If the VSI passed in is already the default VSI and it's enabled just return
3561fc0f39bcSBrett Creeley  * success.
3562fc0f39bcSBrett Creeley  *
3563fc0f39bcSBrett Creeley  * Otherwise try to set the VSI passed in as the switch's default VSI and
3564fc0f39bcSBrett Creeley  * return the result.
3565fc0f39bcSBrett Creeley  */
3566d7393425SMichal Wilczynski int ice_set_dflt_vsi(struct ice_vsi *vsi)
3567fc0f39bcSBrett Creeley {
3568fc0f39bcSBrett Creeley 	struct device *dev;
35695518ac2aSTony Nguyen 	int status;
3570fc0f39bcSBrett Creeley 
3571d7393425SMichal Wilczynski 	if (!vsi)
3572fc0f39bcSBrett Creeley 		return -EINVAL;
3573fc0f39bcSBrett Creeley 
3574fc0f39bcSBrett Creeley 	dev = ice_pf_to_dev(vsi->back);
3575fc0f39bcSBrett Creeley 
3576776fe199SMichal Swiatkowski 	if (ice_lag_is_switchdev_running(vsi->back)) {
3577776fe199SMichal Swiatkowski 		dev_dbg(dev, "VSI %d passed is a part of LAG containing interfaces in switchdev mode, nothing to do\n",
3578776fe199SMichal Swiatkowski 			vsi->vsi_num);
3579776fe199SMichal Swiatkowski 		return 0;
3580776fe199SMichal Swiatkowski 	}
3581776fe199SMichal Swiatkowski 
3582fc0f39bcSBrett Creeley 	/* the VSI passed in is already the default VSI */
3583d7393425SMichal Wilczynski 	if (ice_is_vsi_dflt_vsi(vsi)) {
3584fc0f39bcSBrett Creeley 		dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
3585fc0f39bcSBrett Creeley 			vsi->vsi_num);
3586fc0f39bcSBrett Creeley 		return 0;
3587fc0f39bcSBrett Creeley 	}
3588fc0f39bcSBrett Creeley 
3589d7393425SMichal Wilczynski 	status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX);
3590fc0f39bcSBrett Creeley 	if (status) {
35915f87ec48STony Nguyen 		dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
35925f87ec48STony Nguyen 			vsi->vsi_num, status);
3593c1484691STony Nguyen 		return status;
3594fc0f39bcSBrett Creeley 	}
3595fc0f39bcSBrett Creeley 
3596fc0f39bcSBrett Creeley 	return 0;
3597fc0f39bcSBrett Creeley }
3598fc0f39bcSBrett Creeley 
3599fc0f39bcSBrett Creeley /**
3600fc0f39bcSBrett Creeley  * ice_clear_dflt_vsi - clear the default forwarding VSI
3601d7393425SMichal Wilczynski  * @vsi: VSI to remove from filter list
3602fc0f39bcSBrett Creeley  *
3603fc0f39bcSBrett Creeley  * If the switch has no default VSI or it's not enabled then return error.
3604fc0f39bcSBrett Creeley  *
3605fc0f39bcSBrett Creeley  * Otherwise try to clear the default VSI and return the result.
3606fc0f39bcSBrett Creeley  */
3607d7393425SMichal Wilczynski int ice_clear_dflt_vsi(struct ice_vsi *vsi)
3608fc0f39bcSBrett Creeley {
3609fc0f39bcSBrett Creeley 	struct device *dev;
36105518ac2aSTony Nguyen 	int status;
3611fc0f39bcSBrett Creeley 
3612d7393425SMichal Wilczynski 	if (!vsi)
3613fc0f39bcSBrett Creeley 		return -EINVAL;
3614fc0f39bcSBrett Creeley 
3615d7393425SMichal Wilczynski 	dev = ice_pf_to_dev(vsi->back);
3616fc0f39bcSBrett Creeley 
3617fc0f39bcSBrett Creeley 	/* there is no default VSI configured */
3618d7393425SMichal Wilczynski 	if (!ice_is_dflt_vsi_in_use(vsi->port_info))
3619fc0f39bcSBrett Creeley 		return -ENODEV;
3620fc0f39bcSBrett Creeley 
3621d7393425SMichal Wilczynski 	status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false,
3622fc0f39bcSBrett Creeley 				  ICE_FLTR_RX);
3623fc0f39bcSBrett Creeley 	if (status) {
36245f87ec48STony Nguyen 		dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
3625d7393425SMichal Wilczynski 			vsi->vsi_num, status);
3626fc0f39bcSBrett Creeley 		return -EIO;
3627fc0f39bcSBrett Creeley 	}
3628fc0f39bcSBrett Creeley 
3629fc0f39bcSBrett Creeley 	return 0;
3630fc0f39bcSBrett Creeley }
3631d348d517SAnirudh Venkataramanan 
3632d348d517SAnirudh Venkataramanan /**
36334ecc8633SBrett Creeley  * ice_get_link_speed_mbps - get link speed in Mbps
36344ecc8633SBrett Creeley  * @vsi: the VSI whose link speed is being queried
36354ecc8633SBrett Creeley  *
36364ecc8633SBrett Creeley  * Return current VSI link speed and 0 if the speed is unknown.
36374ecc8633SBrett Creeley  */
36384ecc8633SBrett Creeley int ice_get_link_speed_mbps(struct ice_vsi *vsi)
36394ecc8633SBrett Creeley {
36401d0e28a9SBrett Creeley 	unsigned int link_speed;
36411d0e28a9SBrett Creeley 
36421d0e28a9SBrett Creeley 	link_speed = vsi->port_info->phy.link_info.link_speed;
36431d0e28a9SBrett Creeley 
36441d0e28a9SBrett Creeley 	return (int)ice_get_link_speed(fls(link_speed) - 1);
36454ecc8633SBrett Creeley }
36464ecc8633SBrett Creeley 
36474ecc8633SBrett Creeley /**
36484ecc8633SBrett Creeley  * ice_get_link_speed_kbps - get link speed in Kbps
36494ecc8633SBrett Creeley  * @vsi: the VSI whose link speed is being queried
36504ecc8633SBrett Creeley  *
36514ecc8633SBrett Creeley  * Return current VSI link speed and 0 if the speed is unknown.
36524ecc8633SBrett Creeley  */
3653fbc7b27aSKiran Patil int ice_get_link_speed_kbps(struct ice_vsi *vsi)
36544ecc8633SBrett Creeley {
36554ecc8633SBrett Creeley 	int speed_mbps;
36564ecc8633SBrett Creeley 
36574ecc8633SBrett Creeley 	speed_mbps = ice_get_link_speed_mbps(vsi);
36584ecc8633SBrett Creeley 
36594ecc8633SBrett Creeley 	return speed_mbps * 1000;
36604ecc8633SBrett Creeley }
36614ecc8633SBrett Creeley 
36624ecc8633SBrett Creeley /**
36634ecc8633SBrett Creeley  * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate
36644ecc8633SBrett Creeley  * @vsi: VSI to be configured
36654ecc8633SBrett Creeley  * @min_tx_rate: min Tx rate in Kbps to be configured as BW limit
36664ecc8633SBrett Creeley  *
36674ecc8633SBrett Creeley  * If the min_tx_rate is specified as 0 that means to clear the minimum BW limit
36684ecc8633SBrett Creeley  * profile, otherwise a non-zero value will force a minimum BW limit for the VSI
36694ecc8633SBrett Creeley  * on TC 0.
36704ecc8633SBrett Creeley  */
36714ecc8633SBrett Creeley int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
36724ecc8633SBrett Creeley {
36734ecc8633SBrett Creeley 	struct ice_pf *pf = vsi->back;
36744ecc8633SBrett Creeley 	struct device *dev;
36755518ac2aSTony Nguyen 	int status;
36764ecc8633SBrett Creeley 	int speed;
36774ecc8633SBrett Creeley 
36784ecc8633SBrett Creeley 	dev = ice_pf_to_dev(pf);
36794ecc8633SBrett Creeley 	if (!vsi->port_info) {
36804ecc8633SBrett Creeley 		dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
36814ecc8633SBrett Creeley 			vsi->idx, vsi->type);
36824ecc8633SBrett Creeley 		return -EINVAL;
36834ecc8633SBrett Creeley 	}
36844ecc8633SBrett Creeley 
36854ecc8633SBrett Creeley 	speed = ice_get_link_speed_kbps(vsi);
36864ecc8633SBrett Creeley 	if (min_tx_rate > (u64)speed) {
36874ecc8633SBrett Creeley 		dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
36884ecc8633SBrett Creeley 			min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
36894ecc8633SBrett Creeley 			speed);
36904ecc8633SBrett Creeley 		return -EINVAL;
36914ecc8633SBrett Creeley 	}
36924ecc8633SBrett Creeley 
36934ecc8633SBrett Creeley 	/* Configure min BW for VSI limit */
36944ecc8633SBrett Creeley 	if (min_tx_rate) {
36954ecc8633SBrett Creeley 		status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
36964ecc8633SBrett Creeley 						   ICE_MIN_BW, min_tx_rate);
36974ecc8633SBrett Creeley 		if (status) {
36984ecc8633SBrett Creeley 			dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n",
36994ecc8633SBrett Creeley 				min_tx_rate, ice_vsi_type_str(vsi->type),
37004ecc8633SBrett Creeley 				vsi->idx);
3701c1484691STony Nguyen 			return status;
37024ecc8633SBrett Creeley 		}
37034ecc8633SBrett Creeley 
37044ecc8633SBrett Creeley 		dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n",
37054ecc8633SBrett Creeley 			min_tx_rate, ice_vsi_type_str(vsi->type));
37064ecc8633SBrett Creeley 	} else {
37074ecc8633SBrett Creeley 		status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
37084ecc8633SBrett Creeley 							vsi->idx, 0,
37094ecc8633SBrett Creeley 							ICE_MIN_BW);
37104ecc8633SBrett Creeley 		if (status) {
37114ecc8633SBrett Creeley 			dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n",
37124ecc8633SBrett Creeley 				ice_vsi_type_str(vsi->type), vsi->idx);
3713c1484691STony Nguyen 			return status;
37144ecc8633SBrett Creeley 		}
37154ecc8633SBrett Creeley 
37164ecc8633SBrett Creeley 		dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n",
37174ecc8633SBrett Creeley 			ice_vsi_type_str(vsi->type), vsi->idx);
37184ecc8633SBrett Creeley 	}
37194ecc8633SBrett Creeley 
37204ecc8633SBrett Creeley 	return 0;
37214ecc8633SBrett Creeley }
37224ecc8633SBrett Creeley 
37234ecc8633SBrett Creeley /**
37244ecc8633SBrett Creeley  * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate
37254ecc8633SBrett Creeley  * @vsi: VSI to be configured
37264ecc8633SBrett Creeley  * @max_tx_rate: max Tx rate in Kbps to be configured as BW limit
37274ecc8633SBrett Creeley  *
37284ecc8633SBrett Creeley  * If the max_tx_rate is specified as 0 that means to clear the maximum BW limit
37294ecc8633SBrett Creeley  * profile, otherwise a non-zero value will force a maximum BW limit for the VSI
37304ecc8633SBrett Creeley  * on TC 0.
37314ecc8633SBrett Creeley  */
37324ecc8633SBrett Creeley int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
37334ecc8633SBrett Creeley {
37344ecc8633SBrett Creeley 	struct ice_pf *pf = vsi->back;
37354ecc8633SBrett Creeley 	struct device *dev;
37365518ac2aSTony Nguyen 	int status;
37374ecc8633SBrett Creeley 	int speed;
37384ecc8633SBrett Creeley 
37394ecc8633SBrett Creeley 	dev = ice_pf_to_dev(pf);
37404ecc8633SBrett Creeley 	if (!vsi->port_info) {
37414ecc8633SBrett Creeley 		dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
37424ecc8633SBrett Creeley 			vsi->idx, vsi->type);
37434ecc8633SBrett Creeley 		return -EINVAL;
37444ecc8633SBrett Creeley 	}
37454ecc8633SBrett Creeley 
37464ecc8633SBrett Creeley 	speed = ice_get_link_speed_kbps(vsi);
37474ecc8633SBrett Creeley 	if (max_tx_rate > (u64)speed) {
37484ecc8633SBrett Creeley 		dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
37494ecc8633SBrett Creeley 			max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
37504ecc8633SBrett Creeley 			speed);
37514ecc8633SBrett Creeley 		return -EINVAL;
37524ecc8633SBrett Creeley 	}
37534ecc8633SBrett Creeley 
37544ecc8633SBrett Creeley 	/* Configure max BW for VSI limit */
37554ecc8633SBrett Creeley 	if (max_tx_rate) {
37564ecc8633SBrett Creeley 		status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
37574ecc8633SBrett Creeley 						   ICE_MAX_BW, max_tx_rate);
37584ecc8633SBrett Creeley 		if (status) {
37594ecc8633SBrett Creeley 			dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n",
37604ecc8633SBrett Creeley 				max_tx_rate, ice_vsi_type_str(vsi->type),
37614ecc8633SBrett Creeley 				vsi->idx);
3762c1484691STony Nguyen 			return status;
37634ecc8633SBrett Creeley 		}
37644ecc8633SBrett Creeley 
37654ecc8633SBrett Creeley 		dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n",
37664ecc8633SBrett Creeley 			max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx);
37674ecc8633SBrett Creeley 	} else {
37684ecc8633SBrett Creeley 		status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
37694ecc8633SBrett Creeley 							vsi->idx, 0,
37704ecc8633SBrett Creeley 							ICE_MAX_BW);
37714ecc8633SBrett Creeley 		if (status) {
37724ecc8633SBrett Creeley 			dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n",
37734ecc8633SBrett Creeley 				ice_vsi_type_str(vsi->type), vsi->idx);
3774c1484691STony Nguyen 			return status;
37754ecc8633SBrett Creeley 		}
37764ecc8633SBrett Creeley 
37774ecc8633SBrett Creeley 		dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n",
37784ecc8633SBrett Creeley 			ice_vsi_type_str(vsi->type), vsi->idx);
37794ecc8633SBrett Creeley 	}
37804ecc8633SBrett Creeley 
37814ecc8633SBrett Creeley 	return 0;
37824ecc8633SBrett Creeley }
37834ecc8633SBrett Creeley 
37844ecc8633SBrett Creeley /**
3785d348d517SAnirudh Venkataramanan  * ice_set_link - turn on/off physical link
3786d348d517SAnirudh Venkataramanan  * @vsi: VSI to modify physical link on
3787d348d517SAnirudh Venkataramanan  * @ena: turn on/off physical link
3788d348d517SAnirudh Venkataramanan  */
3789d348d517SAnirudh Venkataramanan int ice_set_link(struct ice_vsi *vsi, bool ena)
3790d348d517SAnirudh Venkataramanan {
3791d348d517SAnirudh Venkataramanan 	struct device *dev = ice_pf_to_dev(vsi->back);
3792d348d517SAnirudh Venkataramanan 	struct ice_port_info *pi = vsi->port_info;
3793d348d517SAnirudh Venkataramanan 	struct ice_hw *hw = pi->hw;
37945e24d598STony Nguyen 	int status;
3795d348d517SAnirudh Venkataramanan 
3796d348d517SAnirudh Venkataramanan 	if (vsi->type != ICE_VSI_PF)
3797d348d517SAnirudh Venkataramanan 		return -EINVAL;
3798d348d517SAnirudh Venkataramanan 
3799d348d517SAnirudh Venkataramanan 	status = ice_aq_set_link_restart_an(pi, ena, NULL);
3800d348d517SAnirudh Venkataramanan 
3801d348d517SAnirudh Venkataramanan 	/* if link is owned by manageability, FW will return ICE_AQ_RC_EMODE.
3802d348d517SAnirudh Venkataramanan 	 * this is not a fatal error, so print a warning message and return
3803d348d517SAnirudh Venkataramanan 	 * a success code. Return an error if FW returns an error code other
3804d348d517SAnirudh Venkataramanan 	 * than ICE_AQ_RC_EMODE
3805d348d517SAnirudh Venkataramanan 	 */
3806d54699e2STony Nguyen 	if (status == -EIO) {
3807d348d517SAnirudh Venkataramanan 		if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3808ad24d9ebSJonathan Toppins 			dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
38095f87ec48STony Nguyen 				(ena ? "ON" : "OFF"), status,
3810d348d517SAnirudh Venkataramanan 				ice_aq_str(hw->adminq.sq_last_status));
3811d348d517SAnirudh Venkataramanan 	} else if (status) {
38125f87ec48STony Nguyen 		dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
38135f87ec48STony Nguyen 			(ena ? "ON" : "OFF"), status,
3814d348d517SAnirudh Venkataramanan 			ice_aq_str(hw->adminq.sq_last_status));
3815c1484691STony Nguyen 		return status;
3816d348d517SAnirudh Venkataramanan 	}
3817d348d517SAnirudh Venkataramanan 
3818d348d517SAnirudh Venkataramanan 	return 0;
3819d348d517SAnirudh Venkataramanan }
382040b24760SAnirudh Venkataramanan 
382140b24760SAnirudh Venkataramanan /**
38223e0b5971SBrett Creeley  * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI
38233e0b5971SBrett Creeley  * @vsi: VSI used to add VLAN filters
3824c31af68aSBrett Creeley  *
3825c31af68aSBrett Creeley  * In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are based
3826c31af68aSBrett Creeley  * on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8) doesn't
3827c31af68aSBrett Creeley  * matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
3828c31af68aSBrett Creeley  * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
3829c31af68aSBrett Creeley  *
3830c31af68aSBrett Creeley  * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
3831c31af68aSBrett Creeley  * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
3832c31af68aSBrett Creeley  * traffic in SVM, since the VLAN TPID isn't part of filtering.
3833c31af68aSBrett Creeley  *
3834c31af68aSBrett Creeley  * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
3835c31af68aSBrett Creeley  * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
3836c31af68aSBrett Creeley  * part of filtering.
38373e0b5971SBrett Creeley  */
38383e0b5971SBrett Creeley int ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
38393e0b5971SBrett Creeley {
3840c31af68aSBrett Creeley 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3841fb05ba12SBrett Creeley 	struct ice_vlan vlan;
3842c31af68aSBrett Creeley 	int err;
3843fb05ba12SBrett Creeley 
38442bfefa2dSBrett Creeley 	vlan = ICE_VLAN(0, 0, 0);
3845c31af68aSBrett Creeley 	err = vlan_ops->add_vlan(vsi, &vlan);
3846c31af68aSBrett Creeley 	if (err && err != -EEXIST)
3847c31af68aSBrett Creeley 		return err;
3848c31af68aSBrett Creeley 
3849c31af68aSBrett Creeley 	/* in SVM both VLAN 0 filters are identical */
3850c31af68aSBrett Creeley 	if (!ice_is_dvm_ena(&vsi->back->hw))
3851c31af68aSBrett Creeley 		return 0;
3852c31af68aSBrett Creeley 
3853c31af68aSBrett Creeley 	vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
3854c31af68aSBrett Creeley 	err = vlan_ops->add_vlan(vsi, &vlan);
3855c31af68aSBrett Creeley 	if (err && err != -EEXIST)
3856c31af68aSBrett Creeley 		return err;
3857c31af68aSBrett Creeley 
3858c31af68aSBrett Creeley 	return 0;
3859c31af68aSBrett Creeley }
3860c31af68aSBrett Creeley 
3861c31af68aSBrett Creeley /**
3862c31af68aSBrett Creeley  * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI
3863c31af68aSBrett Creeley  * @vsi: VSI used to add VLAN filters
3864c31af68aSBrett Creeley  *
3865c31af68aSBrett Creeley  * Delete the VLAN 0 filters in the same manner that they were added in
3866c31af68aSBrett Creeley  * ice_vsi_add_vlan_zero.
3867c31af68aSBrett Creeley  */
3868c31af68aSBrett Creeley int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
3869c31af68aSBrett Creeley {
3870c31af68aSBrett Creeley 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3871c31af68aSBrett Creeley 	struct ice_vlan vlan;
3872c31af68aSBrett Creeley 	int err;
3873c31af68aSBrett Creeley 
3874c31af68aSBrett Creeley 	vlan = ICE_VLAN(0, 0, 0);
3875c31af68aSBrett Creeley 	err = vlan_ops->del_vlan(vsi, &vlan);
3876c31af68aSBrett Creeley 	if (err && err != -EEXIST)
3877c31af68aSBrett Creeley 		return err;
3878c31af68aSBrett Creeley 
3879c31af68aSBrett Creeley 	/* in SVM both VLAN 0 filters are identical */
3880c31af68aSBrett Creeley 	if (!ice_is_dvm_ena(&vsi->back->hw))
3881c31af68aSBrett Creeley 		return 0;
3882c31af68aSBrett Creeley 
3883c31af68aSBrett Creeley 	vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
3884c31af68aSBrett Creeley 	err = vlan_ops->del_vlan(vsi, &vlan);
3885c31af68aSBrett Creeley 	if (err && err != -EEXIST)
3886c31af68aSBrett Creeley 		return err;
3887c31af68aSBrett Creeley 
3888abddafd4SGrzegorz Siwik 	/* when deleting the last VLAN filter, make sure to disable the VLAN
3889abddafd4SGrzegorz Siwik 	 * promisc mode so the filter isn't left by accident
3890abddafd4SGrzegorz Siwik 	 */
3891abddafd4SGrzegorz Siwik 	return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3892abddafd4SGrzegorz Siwik 				    ICE_MCAST_VLAN_PROMISC_BITS, 0);
3893c31af68aSBrett Creeley }
3894c31af68aSBrett Creeley 
3895c31af68aSBrett Creeley /**
3896c31af68aSBrett Creeley  * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode
3897c31af68aSBrett Creeley  * @vsi: VSI used to get the VLAN mode
3898c31af68aSBrett Creeley  *
3899c31af68aSBrett Creeley  * If DVM is enabled then 2 VLAN 0 filters are added, else if SVM is enabled
3900c31af68aSBrett Creeley  * then 1 VLAN 0 filter is added. See ice_vsi_add_vlan_zero for more details.
3901c31af68aSBrett Creeley  */
3902c31af68aSBrett Creeley static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi)
3903c31af68aSBrett Creeley {
3904c31af68aSBrett Creeley #define ICE_DVM_NUM_ZERO_VLAN_FLTRS	2
3905c31af68aSBrett Creeley #define ICE_SVM_NUM_ZERO_VLAN_FLTRS	1
3906c31af68aSBrett Creeley 	/* no VLAN 0 filter is created when a port VLAN is active */
3907b03d519dSJacob Keller 	if (vsi->type == ICE_VSI_VF) {
3908b03d519dSJacob Keller 		if (WARN_ON(!vsi->vf))
3909c31af68aSBrett Creeley 			return 0;
3910b03d519dSJacob Keller 
3911b03d519dSJacob Keller 		if (ice_vf_is_port_vlan_ena(vsi->vf))
3912b03d519dSJacob Keller 			return 0;
3913b03d519dSJacob Keller 	}
3914b03d519dSJacob Keller 
3915c31af68aSBrett Creeley 	if (ice_is_dvm_ena(&vsi->back->hw))
3916c31af68aSBrett Creeley 		return ICE_DVM_NUM_ZERO_VLAN_FLTRS;
3917c31af68aSBrett Creeley 	else
3918c31af68aSBrett Creeley 		return ICE_SVM_NUM_ZERO_VLAN_FLTRS;
3919c31af68aSBrett Creeley }
3920c31af68aSBrett Creeley 
3921c31af68aSBrett Creeley /**
3922c31af68aSBrett Creeley  * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs
3923c31af68aSBrett Creeley  * @vsi: VSI used to determine if any non-zero VLANs have been added
3924c31af68aSBrett Creeley  */
3925c31af68aSBrett Creeley bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi)
3926c31af68aSBrett Creeley {
3927c31af68aSBrett Creeley 	return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi));
3928c31af68aSBrett Creeley }
3929c31af68aSBrett Creeley 
3930c31af68aSBrett Creeley /**
3931c31af68aSBrett Creeley  * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI
3932c31af68aSBrett Creeley  * @vsi: VSI used to get the number of non-zero VLANs added
3933c31af68aSBrett Creeley  */
3934c31af68aSBrett Creeley u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi)
3935c31af68aSBrett Creeley {
3936c31af68aSBrett Creeley 	return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi));
39373e0b5971SBrett Creeley }
39383e0b5971SBrett Creeley 
39393e0b5971SBrett Creeley /**
394040b24760SAnirudh Venkataramanan  * ice_is_feature_supported
394140b24760SAnirudh Venkataramanan  * @pf: pointer to the struct ice_pf instance
394240b24760SAnirudh Venkataramanan  * @f: feature enum to be checked
394340b24760SAnirudh Venkataramanan  *
394440b24760SAnirudh Venkataramanan  * returns true if feature is supported, false otherwise
394540b24760SAnirudh Venkataramanan  */
394640b24760SAnirudh Venkataramanan bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f)
394740b24760SAnirudh Venkataramanan {
394840b24760SAnirudh Venkataramanan 	if (f < 0 || f >= ICE_F_MAX)
394940b24760SAnirudh Venkataramanan 		return false;
395040b24760SAnirudh Venkataramanan 
395140b24760SAnirudh Venkataramanan 	return test_bit(f, pf->features);
395240b24760SAnirudh Venkataramanan }
395340b24760SAnirudh Venkataramanan 
395440b24760SAnirudh Venkataramanan /**
395540b24760SAnirudh Venkataramanan  * ice_set_feature_support
395640b24760SAnirudh Venkataramanan  * @pf: pointer to the struct ice_pf instance
395740b24760SAnirudh Venkataramanan  * @f: feature enum to set
395840b24760SAnirudh Venkataramanan  */
3959bb52f42aSDave Ertman void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
396040b24760SAnirudh Venkataramanan {
396140b24760SAnirudh Venkataramanan 	if (f < 0 || f >= ICE_F_MAX)
396240b24760SAnirudh Venkataramanan 		return;
396340b24760SAnirudh Venkataramanan 
396440b24760SAnirudh Venkataramanan 	set_bit(f, pf->features);
396540b24760SAnirudh Venkataramanan }
396640b24760SAnirudh Venkataramanan 
396740b24760SAnirudh Venkataramanan /**
3968325b2064SMaciej Machnikowski  * ice_clear_feature_support
3969325b2064SMaciej Machnikowski  * @pf: pointer to the struct ice_pf instance
3970325b2064SMaciej Machnikowski  * @f: feature enum to clear
3971325b2064SMaciej Machnikowski  */
3972325b2064SMaciej Machnikowski void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f)
3973325b2064SMaciej Machnikowski {
3974325b2064SMaciej Machnikowski 	if (f < 0 || f >= ICE_F_MAX)
3975325b2064SMaciej Machnikowski 		return;
3976325b2064SMaciej Machnikowski 
3977325b2064SMaciej Machnikowski 	clear_bit(f, pf->features);
3978325b2064SMaciej Machnikowski }
3979325b2064SMaciej Machnikowski 
3980325b2064SMaciej Machnikowski /**
398140b24760SAnirudh Venkataramanan  * ice_init_feature_support
398240b24760SAnirudh Venkataramanan  * @pf: pointer to the struct ice_pf instance
398340b24760SAnirudh Venkataramanan  *
398440b24760SAnirudh Venkataramanan  * called during init to setup supported feature
398540b24760SAnirudh Venkataramanan  */
398640b24760SAnirudh Venkataramanan void ice_init_feature_support(struct ice_pf *pf)
398740b24760SAnirudh Venkataramanan {
398840b24760SAnirudh Venkataramanan 	switch (pf->hw.device_id) {
398940b24760SAnirudh Venkataramanan 	case ICE_DEV_ID_E810C_BACKPLANE:
399040b24760SAnirudh Venkataramanan 	case ICE_DEV_ID_E810C_QSFP:
399140b24760SAnirudh Venkataramanan 	case ICE_DEV_ID_E810C_SFP:
399240b24760SAnirudh Venkataramanan 		ice_set_feature_support(pf, ICE_F_DSCP);
3993896a55aaSAnirudh Venkataramanan 		ice_set_feature_support(pf, ICE_F_PTP_EXTTS);
399443113ff7SKarol Kolacinski 		if (ice_is_e810t(&pf->hw)) {
3995325b2064SMaciej Machnikowski 			ice_set_feature_support(pf, ICE_F_SMA_CTRL);
399643113ff7SKarol Kolacinski 			if (ice_gnss_is_gps_present(&pf->hw))
399743113ff7SKarol Kolacinski 				ice_set_feature_support(pf, ICE_F_GNSS);
399843113ff7SKarol Kolacinski 		}
399940b24760SAnirudh Venkataramanan 		break;
400040b24760SAnirudh Venkataramanan 	default:
400140b24760SAnirudh Venkataramanan 		break;
400240b24760SAnirudh Venkataramanan 	}
400340b24760SAnirudh Venkataramanan }
4004ff5411efSMichal Swiatkowski 
4005ff5411efSMichal Swiatkowski /**
4006ff5411efSMichal Swiatkowski  * ice_vsi_update_security - update security block in VSI
4007ff5411efSMichal Swiatkowski  * @vsi: pointer to VSI structure
4008ff5411efSMichal Swiatkowski  * @fill: function pointer to fill ctx
4009ff5411efSMichal Swiatkowski  */
4010ff5411efSMichal Swiatkowski int
4011ff5411efSMichal Swiatkowski ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
4012ff5411efSMichal Swiatkowski {
4013ff5411efSMichal Swiatkowski 	struct ice_vsi_ctx ctx = { 0 };
4014ff5411efSMichal Swiatkowski 
4015ff5411efSMichal Swiatkowski 	ctx.info = vsi->info;
4016ff5411efSMichal Swiatkowski 	ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
4017ff5411efSMichal Swiatkowski 	fill(&ctx);
4018ff5411efSMichal Swiatkowski 
4019ff5411efSMichal Swiatkowski 	if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
4020ff5411efSMichal Swiatkowski 		return -ENODEV;
4021ff5411efSMichal Swiatkowski 
4022ff5411efSMichal Swiatkowski 	vsi->info = ctx.info;
4023ff5411efSMichal Swiatkowski 	return 0;
4024ff5411efSMichal Swiatkowski }
4025ff5411efSMichal Swiatkowski 
4026ff5411efSMichal Swiatkowski /**
4027ff5411efSMichal Swiatkowski  * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx
4028ff5411efSMichal Swiatkowski  * @ctx: pointer to VSI ctx structure
4029ff5411efSMichal Swiatkowski  */
4030ff5411efSMichal Swiatkowski void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx)
4031ff5411efSMichal Swiatkowski {
4032ff5411efSMichal Swiatkowski 	ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
4033ff5411efSMichal Swiatkowski 			       (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4034ff5411efSMichal Swiatkowski 				ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4035ff5411efSMichal Swiatkowski }
4036ff5411efSMichal Swiatkowski 
4037ff5411efSMichal Swiatkowski /**
4038ff5411efSMichal Swiatkowski  * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx
4039ff5411efSMichal Swiatkowski  * @ctx: pointer to VSI ctx structure
4040ff5411efSMichal Swiatkowski  */
4041ff5411efSMichal Swiatkowski void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
4042ff5411efSMichal Swiatkowski {
4043ff5411efSMichal Swiatkowski 	ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF &
4044ff5411efSMichal Swiatkowski 			       ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4045ff5411efSMichal Swiatkowski 				 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4046ff5411efSMichal Swiatkowski }
4047ff5411efSMichal Swiatkowski 
4048ff5411efSMichal Swiatkowski /**
4049ff5411efSMichal Swiatkowski  * ice_vsi_ctx_set_allow_override - allow destination override on VSI
4050ff5411efSMichal Swiatkowski  * @ctx: pointer to VSI ctx structure
4051ff5411efSMichal Swiatkowski  */
4052ff5411efSMichal Swiatkowski void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
4053ff5411efSMichal Swiatkowski {
4054ff5411efSMichal Swiatkowski 	ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
4055ff5411efSMichal Swiatkowski }
4056ff5411efSMichal Swiatkowski 
4057ff5411efSMichal Swiatkowski /**
4058ff5411efSMichal Swiatkowski  * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
4059ff5411efSMichal Swiatkowski  * @ctx: pointer to VSI ctx structure
4060ff5411efSMichal Swiatkowski  */
4061ff5411efSMichal Swiatkowski void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
4062ff5411efSMichal Swiatkowski {
4063ff5411efSMichal Swiatkowski 	ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
4064ff5411efSMichal Swiatkowski }
40656c0f4441SWojciech Drewek 
40666c0f4441SWojciech Drewek /**
40676c0f4441SWojciech Drewek  * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
40686c0f4441SWojciech Drewek  * @vsi: pointer to VSI structure
40696c0f4441SWojciech Drewek  * @set: set or unset the bit
40706c0f4441SWojciech Drewek  */
40716c0f4441SWojciech Drewek int
40726c0f4441SWojciech Drewek ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set)
40736c0f4441SWojciech Drewek {
40746c0f4441SWojciech Drewek 	struct ice_vsi_ctx ctx = {
40756c0f4441SWojciech Drewek 		.info	= vsi->info,
40766c0f4441SWojciech Drewek 	};
40776c0f4441SWojciech Drewek 
40786c0f4441SWojciech Drewek 	ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
40796c0f4441SWojciech Drewek 	if (set)
40806c0f4441SWojciech Drewek 		ctx.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
40816c0f4441SWojciech Drewek 	else
40826c0f4441SWojciech Drewek 		ctx.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
40836c0f4441SWojciech Drewek 
40846c0f4441SWojciech Drewek 	if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
40856c0f4441SWojciech Drewek 		return -ENODEV;
40866c0f4441SWojciech Drewek 
40876c0f4441SWojciech Drewek 	vsi->info = ctx.info;
40886c0f4441SWojciech Drewek 	return 0;
40896c0f4441SWojciech Drewek }
4090