13ea9bd5dSMichal Swiatkowski // SPDX-License-Identifier: GPL-2.0
23ea9bd5dSMichal Swiatkowski /* Copyright (C) 2019-2021, Intel Corporation. */
33ea9bd5dSMichal Swiatkowski 
43ea9bd5dSMichal Swiatkowski #include "ice.h"
51a1c40dfSGrzegorz Nitka #include "ice_lib.h"
63ea9bd5dSMichal Swiatkowski #include "ice_eswitch.h"
7*f6e8fb55SWojciech Drewek #include "ice_eswitch_br.h"
81a1c40dfSGrzegorz Nitka #include "ice_fltr.h"
91a1c40dfSGrzegorz Nitka #include "ice_repr.h"
103ea9bd5dSMichal Swiatkowski #include "ice_devlink.h"
117fde6d8bSMichal Swiatkowski #include "ice_tc_lib.h"
123ea9bd5dSMichal Swiatkowski 
133ea9bd5dSMichal Swiatkowski /**
140ef4479dSMichal Swiatkowski  * ice_eswitch_add_vf_sp_rule - add adv rule with VF's VSI index
15c1e5da5dSWojciech Drewek  * @pf: pointer to PF struct
16c1e5da5dSWojciech Drewek  * @vf: pointer to VF struct
17c1e5da5dSWojciech Drewek  *
18c1e5da5dSWojciech Drewek  * This function adds advanced rule that forwards packets with
190ef4479dSMichal Swiatkowski  * VF's VSI index to the corresponding switchdev ctrl VSI queue.
20c1e5da5dSWojciech Drewek  */
210ef4479dSMichal Swiatkowski static int
ice_eswitch_add_vf_sp_rule(struct ice_pf * pf,struct ice_vf * vf)220ef4479dSMichal Swiatkowski ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf)
23c1e5da5dSWojciech Drewek {
24c1e5da5dSWojciech Drewek 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
25c1e5da5dSWojciech Drewek 	struct ice_adv_rule_info rule_info = { 0 };
26c1e5da5dSWojciech Drewek 	struct ice_adv_lkup_elem *list;
27c1e5da5dSWojciech Drewek 	struct ice_hw *hw = &pf->hw;
28c1e5da5dSWojciech Drewek 	const u16 lkups_cnt = 1;
29c1e5da5dSWojciech Drewek 	int err;
30c1e5da5dSWojciech Drewek 
31c1e5da5dSWojciech Drewek 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
32c1e5da5dSWojciech Drewek 	if (!list)
33c1e5da5dSWojciech Drewek 		return -ENOMEM;
34c1e5da5dSWojciech Drewek 
350ef4479dSMichal Swiatkowski 	ice_rule_add_src_vsi_metadata(list);
36c1e5da5dSWojciech Drewek 
370ef4479dSMichal Swiatkowski 	rule_info.sw_act.flag = ICE_FLTR_TX;
38c1e5da5dSWojciech Drewek 	rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
39c1e5da5dSWojciech Drewek 	rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
40c1e5da5dSWojciech Drewek 	rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
41c1e5da5dSWojciech Drewek 				       ctrl_vsi->rxq_map[vf->vf_id];
42c1e5da5dSWojciech Drewek 	rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
43c1e5da5dSWojciech Drewek 	rule_info.flags_info.act_valid = true;
44b70bc066SWojciech Drewek 	rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
450ef4479dSMichal Swiatkowski 	rule_info.src_vsi = vf->lan_vsi_idx;
46c1e5da5dSWojciech Drewek 
47c1e5da5dSWojciech Drewek 	err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
480ef4479dSMichal Swiatkowski 			       &vf->repr->sp_rule);
49c1e5da5dSWojciech Drewek 	if (err)
500ef4479dSMichal Swiatkowski 		dev_err(ice_pf_to_dev(pf), "Unable to add VF slow-path rule in switchdev mode for VF %d",
51c1e5da5dSWojciech Drewek 			vf->vf_id);
52c1e5da5dSWojciech Drewek 
53c1e5da5dSWojciech Drewek 	kfree(list);
54c1e5da5dSWojciech Drewek 	return err;
55c1e5da5dSWojciech Drewek }
56c1e5da5dSWojciech Drewek 
57c1e5da5dSWojciech Drewek /**
580ef4479dSMichal Swiatkowski  * ice_eswitch_del_vf_sp_rule - delete adv rule with VF's VSI index
59c1e5da5dSWojciech Drewek  * @vf: pointer to the VF struct
60c1e5da5dSWojciech Drewek  *
610ef4479dSMichal Swiatkowski  * Delete the advanced rule that was used to forward packets with the VF's VSI
620ef4479dSMichal Swiatkowski  * index to the corresponding switchdev ctrl VSI queue.
63c1e5da5dSWojciech Drewek  */
ice_eswitch_del_vf_sp_rule(struct ice_vf * vf)640ef4479dSMichal Swiatkowski static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf)
65c1e5da5dSWojciech Drewek {
660ef4479dSMichal Swiatkowski 	if (!vf->repr)
67c1e5da5dSWojciech Drewek 		return;
68c1e5da5dSWojciech Drewek 
690ef4479dSMichal Swiatkowski 	ice_rem_adv_rule_by_id(&vf->pf->hw, &vf->repr->sp_rule);
70c1e5da5dSWojciech Drewek }
71c1e5da5dSWojciech Drewek 
72c1e5da5dSWojciech Drewek /**
731a1c40dfSGrzegorz Nitka  * ice_eswitch_setup_env - configure switchdev HW filters
741a1c40dfSGrzegorz Nitka  * @pf: pointer to PF struct
751a1c40dfSGrzegorz Nitka  *
761a1c40dfSGrzegorz Nitka  * This function adds HW filters configuration specific for switchdev
771a1c40dfSGrzegorz Nitka  * mode.
781a1c40dfSGrzegorz Nitka  */
ice_eswitch_setup_env(struct ice_pf * pf)791a1c40dfSGrzegorz Nitka static int ice_eswitch_setup_env(struct ice_pf *pf)
801a1c40dfSGrzegorz Nitka {
811a1c40dfSGrzegorz Nitka 	struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
82c79bb28eSMarcin Szycik 	struct net_device *uplink_netdev = uplink_vsi->netdev;
831a1c40dfSGrzegorz Nitka 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
84c31af68aSBrett Creeley 	struct ice_vsi_vlan_ops *vlan_ops;
851a1c40dfSGrzegorz Nitka 	bool rule_added = false;
861a1c40dfSGrzegorz Nitka 
871a1c40dfSGrzegorz Nitka 	ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
881a1c40dfSGrzegorz Nitka 
89c79bb28eSMarcin Szycik 	netif_addr_lock_bh(uplink_netdev);
90c79bb28eSMarcin Szycik 	__dev_uc_unsync(uplink_netdev, NULL);
91c79bb28eSMarcin Szycik 	__dev_mc_unsync(uplink_netdev, NULL);
92c79bb28eSMarcin Szycik 	netif_addr_unlock_bh(uplink_netdev);
93c79bb28eSMarcin Szycik 
943e0b5971SBrett Creeley 	if (ice_vsi_add_vlan_zero(uplink_vsi))
951a1c40dfSGrzegorz Nitka 		goto err_def_rx;
961a1c40dfSGrzegorz Nitka 
97d7393425SMichal Wilczynski 	if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) {
98d7393425SMichal Wilczynski 		if (ice_set_dflt_vsi(uplink_vsi))
991a1c40dfSGrzegorz Nitka 			goto err_def_rx;
1001a1c40dfSGrzegorz Nitka 		rule_added = true;
1011a1c40dfSGrzegorz Nitka 	}
1021a1c40dfSGrzegorz Nitka 
1036ab11557SWojciech Drewek 	vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
1046ab11557SWojciech Drewek 	if (vlan_ops->dis_rx_filtering(uplink_vsi))
1056ab11557SWojciech Drewek 		goto err_dis_rx;
1066ab11557SWojciech Drewek 
1071a1c40dfSGrzegorz Nitka 	if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
1081a1c40dfSGrzegorz Nitka 		goto err_override_uplink;
1091a1c40dfSGrzegorz Nitka 
1101a1c40dfSGrzegorz Nitka 	if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
1111a1c40dfSGrzegorz Nitka 		goto err_override_control;
1121a1c40dfSGrzegorz Nitka 
1136c0f4441SWojciech Drewek 	if (ice_vsi_update_local_lb(uplink_vsi, true))
1146c0f4441SWojciech Drewek 		goto err_override_local_lb;
1156c0f4441SWojciech Drewek 
1161a1c40dfSGrzegorz Nitka 	return 0;
1171a1c40dfSGrzegorz Nitka 
1186c0f4441SWojciech Drewek err_override_local_lb:
1196c0f4441SWojciech Drewek 	ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
1201a1c40dfSGrzegorz Nitka err_override_control:
1211a1c40dfSGrzegorz Nitka 	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
1221a1c40dfSGrzegorz Nitka err_override_uplink:
1236ab11557SWojciech Drewek 	vlan_ops->ena_rx_filtering(uplink_vsi);
1246ab11557SWojciech Drewek err_dis_rx:
1251a1c40dfSGrzegorz Nitka 	if (rule_added)
126d7393425SMichal Wilczynski 		ice_clear_dflt_vsi(uplink_vsi);
1271a1c40dfSGrzegorz Nitka err_def_rx:
1281a1c40dfSGrzegorz Nitka 	ice_fltr_add_mac_and_broadcast(uplink_vsi,
1291a1c40dfSGrzegorz Nitka 				       uplink_vsi->port_info->mac.perm_addr,
1301a1c40dfSGrzegorz Nitka 				       ICE_FWD_TO_VSI);
1311a1c40dfSGrzegorz Nitka 	return -ENODEV;
1321a1c40dfSGrzegorz Nitka }
1331a1c40dfSGrzegorz Nitka 
1341a1c40dfSGrzegorz Nitka /**
1351a1c40dfSGrzegorz Nitka  * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
1361a1c40dfSGrzegorz Nitka  * @pf: pointer to PF struct
1371a1c40dfSGrzegorz Nitka  *
1381a1c40dfSGrzegorz Nitka  * In switchdev number of allocated Tx/Rx rings is equal.
1391a1c40dfSGrzegorz Nitka  *
1401a1c40dfSGrzegorz Nitka  * This function fills q_vectors structures associated with representor and
1411a1c40dfSGrzegorz Nitka  * move each ring pairs to port representor netdevs. Each port representor
1421a1c40dfSGrzegorz Nitka  * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
1431a1c40dfSGrzegorz Nitka  * number of VFs.
1441a1c40dfSGrzegorz Nitka  */
ice_eswitch_remap_rings_to_vectors(struct ice_pf * pf)1451a1c40dfSGrzegorz Nitka static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
1461a1c40dfSGrzegorz Nitka {
1471a1c40dfSGrzegorz Nitka 	struct ice_vsi *vsi = pf->switchdev.control_vsi;
1481a1c40dfSGrzegorz Nitka 	int q_id;
1491a1c40dfSGrzegorz Nitka 
1501a1c40dfSGrzegorz Nitka 	ice_for_each_txq(vsi, q_id) {
151000773c0SJacob Keller 		struct ice_q_vector *q_vector;
152000773c0SJacob Keller 		struct ice_tx_ring *tx_ring;
153000773c0SJacob Keller 		struct ice_rx_ring *rx_ring;
154000773c0SJacob Keller 		struct ice_repr *repr;
155000773c0SJacob Keller 		struct ice_vf *vf;
156000773c0SJacob Keller 
157fb916db1SJacob Keller 		vf = ice_get_vf_by_id(pf, q_id);
158fb916db1SJacob Keller 		if (WARN_ON(!vf))
159000773c0SJacob Keller 			continue;
160000773c0SJacob Keller 
161000773c0SJacob Keller 		repr = vf->repr;
162000773c0SJacob Keller 		q_vector = repr->q_vector;
163000773c0SJacob Keller 		tx_ring = vsi->tx_rings[q_id];
164000773c0SJacob Keller 		rx_ring = vsi->rx_rings[q_id];
1651a1c40dfSGrzegorz Nitka 
1661a1c40dfSGrzegorz Nitka 		q_vector->vsi = vsi;
1671a1c40dfSGrzegorz Nitka 		q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
1681a1c40dfSGrzegorz Nitka 
1691a1c40dfSGrzegorz Nitka 		q_vector->num_ring_tx = 1;
170e72bba21SMaciej Fijalkowski 		q_vector->tx.tx_ring = tx_ring;
171e72bba21SMaciej Fijalkowski 		tx_ring->q_vector = q_vector;
172e72bba21SMaciej Fijalkowski 		tx_ring->next = NULL;
173e72bba21SMaciej Fijalkowski 		tx_ring->netdev = repr->netdev;
1741a1c40dfSGrzegorz Nitka 		/* In switchdev mode, from OS stack perspective, there is only
1751a1c40dfSGrzegorz Nitka 		 * one queue for given netdev, so it needs to be indexed as 0.
1761a1c40dfSGrzegorz Nitka 		 */
1771a1c40dfSGrzegorz Nitka 		tx_ring->q_index = 0;
1781a1c40dfSGrzegorz Nitka 
1791a1c40dfSGrzegorz Nitka 		q_vector->num_ring_rx = 1;
180e72bba21SMaciej Fijalkowski 		q_vector->rx.rx_ring = rx_ring;
181e72bba21SMaciej Fijalkowski 		rx_ring->q_vector = q_vector;
182e72bba21SMaciej Fijalkowski 		rx_ring->next = NULL;
183e72bba21SMaciej Fijalkowski 		rx_ring->netdev = repr->netdev;
1843d5985a1SJacob Keller 
1853d5985a1SJacob Keller 		ice_put_vf(vf);
1861a1c40dfSGrzegorz Nitka 	}
1871a1c40dfSGrzegorz Nitka }
1881a1c40dfSGrzegorz Nitka 
1891a1c40dfSGrzegorz Nitka /**
190df830543SJacob Keller  * ice_eswitch_release_reprs - clear PR VSIs configuration
191df830543SJacob Keller  * @pf: poiner to PF struct
192df830543SJacob Keller  * @ctrl_vsi: pointer to switchdev control VSI
193df830543SJacob Keller  */
194df830543SJacob Keller static void
ice_eswitch_release_reprs(struct ice_pf * pf,struct ice_vsi * ctrl_vsi)195df830543SJacob Keller ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
196df830543SJacob Keller {
197c4c2c7dbSJacob Keller 	struct ice_vf *vf;
198c4c2c7dbSJacob Keller 	unsigned int bkt;
199df830543SJacob Keller 
2003d5985a1SJacob Keller 	lockdep_assert_held(&pf->vfs.table_lock);
2013d5985a1SJacob Keller 
202c4c2c7dbSJacob Keller 	ice_for_each_vf(pf, bkt, vf) {
203c4c2c7dbSJacob Keller 		struct ice_vsi *vsi = vf->repr->src_vsi;
204df830543SJacob Keller 
205df830543SJacob Keller 		/* Skip VFs that aren't configured */
206df830543SJacob Keller 		if (!vf->repr->dst)
207df830543SJacob Keller 			continue;
208df830543SJacob Keller 
209df830543SJacob Keller 		ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
210df830543SJacob Keller 		metadata_dst_free(vf->repr->dst);
211df830543SJacob Keller 		vf->repr->dst = NULL;
2120ef4479dSMichal Swiatkowski 		ice_eswitch_del_vf_sp_rule(vf);
213e0645311SJacob Keller 		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
214df830543SJacob Keller 					       ICE_FWD_TO_VSI);
215df830543SJacob Keller 
216df830543SJacob Keller 		netif_napi_del(&vf->repr->q_vector->napi);
217df830543SJacob Keller 	}
218df830543SJacob Keller }
219df830543SJacob Keller 
220df830543SJacob Keller /**
2211a1c40dfSGrzegorz Nitka  * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode
2221a1c40dfSGrzegorz Nitka  * @pf: pointer to PF struct
2231a1c40dfSGrzegorz Nitka  */
ice_eswitch_setup_reprs(struct ice_pf * pf)2241a1c40dfSGrzegorz Nitka static int ice_eswitch_setup_reprs(struct ice_pf *pf)
2251a1c40dfSGrzegorz Nitka {
2261a1c40dfSGrzegorz Nitka 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
2271a1c40dfSGrzegorz Nitka 	int max_vsi_num = 0;
228c4c2c7dbSJacob Keller 	struct ice_vf *vf;
229c4c2c7dbSJacob Keller 	unsigned int bkt;
2301a1c40dfSGrzegorz Nitka 
2313d5985a1SJacob Keller 	lockdep_assert_held(&pf->vfs.table_lock);
2323d5985a1SJacob Keller 
233c4c2c7dbSJacob Keller 	ice_for_each_vf(pf, bkt, vf) {
234c4c2c7dbSJacob Keller 		struct ice_vsi *vsi = vf->repr->src_vsi;
2351a1c40dfSGrzegorz Nitka 
2361a1c40dfSGrzegorz Nitka 		ice_remove_vsi_fltr(&pf->hw, vsi->idx);
2371a1c40dfSGrzegorz Nitka 		vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
2381a1c40dfSGrzegorz Nitka 						   GFP_KERNEL);
2391a1c40dfSGrzegorz Nitka 		if (!vf->repr->dst) {
2400ef4479dSMichal Swiatkowski 			ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
2410ef4479dSMichal Swiatkowski 						       ICE_FWD_TO_VSI);
2420ef4479dSMichal Swiatkowski 			goto err;
2430ef4479dSMichal Swiatkowski 		}
2440ef4479dSMichal Swiatkowski 
2450ef4479dSMichal Swiatkowski 		if (ice_eswitch_add_vf_sp_rule(pf, vf)) {
2460ef4479dSMichal Swiatkowski 			ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
2471a1c40dfSGrzegorz Nitka 						       ICE_FWD_TO_VSI);
2481a1c40dfSGrzegorz Nitka 			goto err;
2491a1c40dfSGrzegorz Nitka 		}
2501a1c40dfSGrzegorz Nitka 
2511a1c40dfSGrzegorz Nitka 		if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
2520ef4479dSMichal Swiatkowski 			ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
2531a1c40dfSGrzegorz Nitka 						       ICE_FWD_TO_VSI);
2540ef4479dSMichal Swiatkowski 			ice_eswitch_del_vf_sp_rule(vf);
2551a1c40dfSGrzegorz Nitka 			metadata_dst_free(vf->repr->dst);
256df830543SJacob Keller 			vf->repr->dst = NULL;
2571a1c40dfSGrzegorz Nitka 			goto err;
2581a1c40dfSGrzegorz Nitka 		}
2591a1c40dfSGrzegorz Nitka 
2603e0b5971SBrett Creeley 		if (ice_vsi_add_vlan_zero(vsi)) {
2610ef4479dSMichal Swiatkowski 			ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
2621a1c40dfSGrzegorz Nitka 						       ICE_FWD_TO_VSI);
2630ef4479dSMichal Swiatkowski 			ice_eswitch_del_vf_sp_rule(vf);
2641a1c40dfSGrzegorz Nitka 			metadata_dst_free(vf->repr->dst);
265df830543SJacob Keller 			vf->repr->dst = NULL;
2661a1c40dfSGrzegorz Nitka 			ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
2671a1c40dfSGrzegorz Nitka 			goto err;
2681a1c40dfSGrzegorz Nitka 		}
2691a1c40dfSGrzegorz Nitka 
2701a1c40dfSGrzegorz Nitka 		if (max_vsi_num < vsi->vsi_num)
2711a1c40dfSGrzegorz Nitka 			max_vsi_num = vsi->vsi_num;
2721a1c40dfSGrzegorz Nitka 
273b48b89f9SJakub Kicinski 		netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi,
274b48b89f9SJakub Kicinski 			       ice_napi_poll);
2751a1c40dfSGrzegorz Nitka 
2761a1c40dfSGrzegorz Nitka 		netif_keep_dst(vf->repr->netdev);
2771a1c40dfSGrzegorz Nitka 	}
2781a1c40dfSGrzegorz Nitka 
279c4c2c7dbSJacob Keller 	ice_for_each_vf(pf, bkt, vf) {
280c4c2c7dbSJacob Keller 		struct ice_repr *repr = vf->repr;
2811a1c40dfSGrzegorz Nitka 		struct ice_vsi *vsi = repr->src_vsi;
2821a1c40dfSGrzegorz Nitka 		struct metadata_dst *dst;
2831a1c40dfSGrzegorz Nitka 
2841a1c40dfSGrzegorz Nitka 		dst = repr->dst;
2851a1c40dfSGrzegorz Nitka 		dst->u.port_info.port_id = vsi->vsi_num;
2861a1c40dfSGrzegorz Nitka 		dst->u.port_info.lower_dev = repr->netdev;
2871a1c40dfSGrzegorz Nitka 		ice_repr_set_traffic_vsi(repr, ctrl_vsi);
2881a1c40dfSGrzegorz Nitka 	}
2891a1c40dfSGrzegorz Nitka 
2901a1c40dfSGrzegorz Nitka 	return 0;
2911a1c40dfSGrzegorz Nitka 
2921a1c40dfSGrzegorz Nitka err:
293df830543SJacob Keller 	ice_eswitch_release_reprs(pf, ctrl_vsi);
2941a1c40dfSGrzegorz Nitka 
2951a1c40dfSGrzegorz Nitka 	return -ENODEV;
2961a1c40dfSGrzegorz Nitka }
2971a1c40dfSGrzegorz Nitka 
2981a1c40dfSGrzegorz Nitka /**
2991c54c839SGrzegorz Nitka  * ice_eswitch_update_repr - reconfigure VF port representor
3001c54c839SGrzegorz Nitka  * @vsi: VF VSI for which port representor is configured
3011c54c839SGrzegorz Nitka  */
ice_eswitch_update_repr(struct ice_vsi * vsi)3021c54c839SGrzegorz Nitka void ice_eswitch_update_repr(struct ice_vsi *vsi)
3031c54c839SGrzegorz Nitka {
3041c54c839SGrzegorz Nitka 	struct ice_pf *pf = vsi->back;
3051c54c839SGrzegorz Nitka 	struct ice_repr *repr;
3061c54c839SGrzegorz Nitka 	struct ice_vf *vf;
3071c54c839SGrzegorz Nitka 	int ret;
3081c54c839SGrzegorz Nitka 
3091c54c839SGrzegorz Nitka 	if (!ice_is_switchdev_running(pf))
3101c54c839SGrzegorz Nitka 		return;
3111c54c839SGrzegorz Nitka 
312b03d519dSJacob Keller 	vf = vsi->vf;
3131c54c839SGrzegorz Nitka 	repr = vf->repr;
3141c54c839SGrzegorz Nitka 	repr->src_vsi = vsi;
3151c54c839SGrzegorz Nitka 	repr->dst->u.port_info.port_id = vsi->vsi_num;
3161c54c839SGrzegorz Nitka 
317*f6e8fb55SWojciech Drewek 	if (repr->br_port)
318*f6e8fb55SWojciech Drewek 		repr->br_port->vsi = vsi;
319*f6e8fb55SWojciech Drewek 
3201c54c839SGrzegorz Nitka 	ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
3211c54c839SGrzegorz Nitka 	if (ret) {
322e0645311SJacob Keller 		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI);
323b03d519dSJacob Keller 		dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
324b03d519dSJacob Keller 			vsi->vf->vf_id);
3251c54c839SGrzegorz Nitka 	}
3261c54c839SGrzegorz Nitka }
3271c54c839SGrzegorz Nitka 
3281c54c839SGrzegorz Nitka /**
329f5396b8aSGrzegorz Nitka  * ice_eswitch_port_start_xmit - callback for packets transmit
330f5396b8aSGrzegorz Nitka  * @skb: send buffer
331f5396b8aSGrzegorz Nitka  * @netdev: network interface device structure
332f5396b8aSGrzegorz Nitka  *
333f5396b8aSGrzegorz Nitka  * Returns NETDEV_TX_OK if sent, else an error code
334f5396b8aSGrzegorz Nitka  */
335f5396b8aSGrzegorz Nitka netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff * skb,struct net_device * netdev)336f5396b8aSGrzegorz Nitka ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
337f5396b8aSGrzegorz Nitka {
338f5396b8aSGrzegorz Nitka 	struct ice_netdev_priv *np;
339f5396b8aSGrzegorz Nitka 	struct ice_repr *repr;
340f5396b8aSGrzegorz Nitka 	struct ice_vsi *vsi;
341f5396b8aSGrzegorz Nitka 
342f5396b8aSGrzegorz Nitka 	np = netdev_priv(netdev);
343f5396b8aSGrzegorz Nitka 	vsi = np->vsi;
344f5396b8aSGrzegorz Nitka 
3457aa529a6SWojciech Drewek 	if (!vsi || !ice_is_switchdev_running(vsi->back))
3467aa529a6SWojciech Drewek 		return NETDEV_TX_BUSY;
3477aa529a6SWojciech Drewek 
348d2016651SWojciech Drewek 	if (ice_is_reset_in_progress(vsi->back->state) ||
349d2016651SWojciech Drewek 	    test_bit(ICE_VF_DIS, vsi->back->state))
350f5396b8aSGrzegorz Nitka 		return NETDEV_TX_BUSY;
351f5396b8aSGrzegorz Nitka 
352f5396b8aSGrzegorz Nitka 	repr = ice_netdev_to_repr(netdev);
353f5396b8aSGrzegorz Nitka 	skb_dst_drop(skb);
354f5396b8aSGrzegorz Nitka 	dst_hold((struct dst_entry *)repr->dst);
355f5396b8aSGrzegorz Nitka 	skb_dst_set(skb, (struct dst_entry *)repr->dst);
356f5396b8aSGrzegorz Nitka 	skb->queue_mapping = repr->vf->vf_id;
357f5396b8aSGrzegorz Nitka 
358f5396b8aSGrzegorz Nitka 	return ice_start_xmit(skb, netdev);
359f5396b8aSGrzegorz Nitka }
360f5396b8aSGrzegorz Nitka 
361f5396b8aSGrzegorz Nitka /**
362f5396b8aSGrzegorz Nitka  * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor
363f5396b8aSGrzegorz Nitka  * @skb: pointer to send buffer
364f5396b8aSGrzegorz Nitka  * @off: pointer to offload struct
365f5396b8aSGrzegorz Nitka  */
366f5396b8aSGrzegorz Nitka void
ice_eswitch_set_target_vsi(struct sk_buff * skb,struct ice_tx_offload_params * off)367f5396b8aSGrzegorz Nitka ice_eswitch_set_target_vsi(struct sk_buff *skb,
368f5396b8aSGrzegorz Nitka 			   struct ice_tx_offload_params *off)
369f5396b8aSGrzegorz Nitka {
370f5396b8aSGrzegorz Nitka 	struct metadata_dst *dst = skb_metadata_dst(skb);
371f5396b8aSGrzegorz Nitka 	u64 cd_cmd, dst_vsi;
372f5396b8aSGrzegorz Nitka 
373f5396b8aSGrzegorz Nitka 	if (!dst) {
374f5396b8aSGrzegorz Nitka 		cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
375f5396b8aSGrzegorz Nitka 		off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
376f5396b8aSGrzegorz Nitka 	} else {
377f5396b8aSGrzegorz Nitka 		cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
378f5396b8aSGrzegorz Nitka 		dst_vsi = ((u64)dst->u.port_info.port_id <<
379f5396b8aSGrzegorz Nitka 			   ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M;
380f5396b8aSGrzegorz Nitka 		off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
381f5396b8aSGrzegorz Nitka 	}
382f5396b8aSGrzegorz Nitka }
383f5396b8aSGrzegorz Nitka 
384f5396b8aSGrzegorz Nitka /**
3851a1c40dfSGrzegorz Nitka  * ice_eswitch_release_env - clear switchdev HW filters
3861a1c40dfSGrzegorz Nitka  * @pf: pointer to PF struct
3871a1c40dfSGrzegorz Nitka  *
3881a1c40dfSGrzegorz Nitka  * This function removes HW filters configuration specific for switchdev
3891a1c40dfSGrzegorz Nitka  * mode and restores default legacy mode settings.
3901a1c40dfSGrzegorz Nitka  */
ice_eswitch_release_env(struct ice_pf * pf)3911a1c40dfSGrzegorz Nitka static void ice_eswitch_release_env(struct ice_pf *pf)
3921a1c40dfSGrzegorz Nitka {
3931a1c40dfSGrzegorz Nitka 	struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
3941a1c40dfSGrzegorz Nitka 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
3956ab11557SWojciech Drewek 	struct ice_vsi_vlan_ops *vlan_ops;
3966ab11557SWojciech Drewek 
3976ab11557SWojciech Drewek 	vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
3981a1c40dfSGrzegorz Nitka 
3996c0f4441SWojciech Drewek 	ice_vsi_update_local_lb(uplink_vsi, false);
4001a1c40dfSGrzegorz Nitka 	ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
4011a1c40dfSGrzegorz Nitka 	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
4026ab11557SWojciech Drewek 	vlan_ops->ena_rx_filtering(uplink_vsi);
403d7393425SMichal Wilczynski 	ice_clear_dflt_vsi(uplink_vsi);
4041a1c40dfSGrzegorz Nitka 	ice_fltr_add_mac_and_broadcast(uplink_vsi,
4051a1c40dfSGrzegorz Nitka 				       uplink_vsi->port_info->mac.perm_addr,
4061a1c40dfSGrzegorz Nitka 				       ICE_FWD_TO_VSI);
4071a1c40dfSGrzegorz Nitka }
4081a1c40dfSGrzegorz Nitka 
4091a1c40dfSGrzegorz Nitka /**
4101a1c40dfSGrzegorz Nitka  * ice_eswitch_vsi_setup - configure switchdev control VSI
4111a1c40dfSGrzegorz Nitka  * @pf: pointer to PF structure
4121a1c40dfSGrzegorz Nitka  * @pi: pointer to port_info structure
4131a1c40dfSGrzegorz Nitka  */
4141a1c40dfSGrzegorz Nitka static struct ice_vsi *
ice_eswitch_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)4151a1c40dfSGrzegorz Nitka ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
4161a1c40dfSGrzegorz Nitka {
4175e509ab2SJacob Keller 	struct ice_vsi_cfg_params params = {};
4185e509ab2SJacob Keller 
4195e509ab2SJacob Keller 	params.type = ICE_VSI_SWITCHDEV_CTRL;
4205e509ab2SJacob Keller 	params.pi = pi;
4215e509ab2SJacob Keller 	params.flags = ICE_VSI_FLAG_INIT;
4225e509ab2SJacob Keller 
4235e509ab2SJacob Keller 	return ice_vsi_setup(pf, &params);
4241a1c40dfSGrzegorz Nitka }
4251a1c40dfSGrzegorz Nitka 
4261a1c40dfSGrzegorz Nitka /**
427b3be918dSGrzegorz Nitka  * ice_eswitch_napi_del - remove NAPI handle for all port representors
428b3be918dSGrzegorz Nitka  * @pf: pointer to PF structure
429b3be918dSGrzegorz Nitka  */
ice_eswitch_napi_del(struct ice_pf * pf)430b3be918dSGrzegorz Nitka static void ice_eswitch_napi_del(struct ice_pf *pf)
431b3be918dSGrzegorz Nitka {
432c4c2c7dbSJacob Keller 	struct ice_vf *vf;
433c4c2c7dbSJacob Keller 	unsigned int bkt;
434b3be918dSGrzegorz Nitka 
4353d5985a1SJacob Keller 	lockdep_assert_held(&pf->vfs.table_lock);
4363d5985a1SJacob Keller 
437c4c2c7dbSJacob Keller 	ice_for_each_vf(pf, bkt, vf)
438c4c2c7dbSJacob Keller 		netif_napi_del(&vf->repr->q_vector->napi);
439b3be918dSGrzegorz Nitka }
440b3be918dSGrzegorz Nitka 
441b3be918dSGrzegorz Nitka /**
4421a1c40dfSGrzegorz Nitka  * ice_eswitch_napi_enable - enable NAPI for all port representors
4431a1c40dfSGrzegorz Nitka  * @pf: pointer to PF structure
4441a1c40dfSGrzegorz Nitka  */
ice_eswitch_napi_enable(struct ice_pf * pf)4451a1c40dfSGrzegorz Nitka static void ice_eswitch_napi_enable(struct ice_pf *pf)
4461a1c40dfSGrzegorz Nitka {
447c4c2c7dbSJacob Keller 	struct ice_vf *vf;
448c4c2c7dbSJacob Keller 	unsigned int bkt;
4491a1c40dfSGrzegorz Nitka 
4503d5985a1SJacob Keller 	lockdep_assert_held(&pf->vfs.table_lock);
4513d5985a1SJacob Keller 
452c4c2c7dbSJacob Keller 	ice_for_each_vf(pf, bkt, vf)
453c4c2c7dbSJacob Keller 		napi_enable(&vf->repr->q_vector->napi);
4541a1c40dfSGrzegorz Nitka }
4551a1c40dfSGrzegorz Nitka 
4561a1c40dfSGrzegorz Nitka /**
4571a1c40dfSGrzegorz Nitka  * ice_eswitch_napi_disable - disable NAPI for all port representors
4581a1c40dfSGrzegorz Nitka  * @pf: pointer to PF structure
4591a1c40dfSGrzegorz Nitka  */
ice_eswitch_napi_disable(struct ice_pf * pf)4601a1c40dfSGrzegorz Nitka static void ice_eswitch_napi_disable(struct ice_pf *pf)
4611a1c40dfSGrzegorz Nitka {
462c4c2c7dbSJacob Keller 	struct ice_vf *vf;
463c4c2c7dbSJacob Keller 	unsigned int bkt;
4641a1c40dfSGrzegorz Nitka 
4653d5985a1SJacob Keller 	lockdep_assert_held(&pf->vfs.table_lock);
4663d5985a1SJacob Keller 
467c4c2c7dbSJacob Keller 	ice_for_each_vf(pf, bkt, vf)
468c4c2c7dbSJacob Keller 		napi_disable(&vf->repr->q_vector->napi);
4691a1c40dfSGrzegorz Nitka }
4701a1c40dfSGrzegorz Nitka 
4711a1c40dfSGrzegorz Nitka /**
4721a1c40dfSGrzegorz Nitka  * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
4731a1c40dfSGrzegorz Nitka  * @pf: pointer to PF structure
4741a1c40dfSGrzegorz Nitka  */
ice_eswitch_enable_switchdev(struct ice_pf * pf)4751a1c40dfSGrzegorz Nitka static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
4761a1c40dfSGrzegorz Nitka {
477*f6e8fb55SWojciech Drewek 	struct ice_vsi *ctrl_vsi, *uplink_vsi;
478*f6e8fb55SWojciech Drewek 
479*f6e8fb55SWojciech Drewek 	uplink_vsi = ice_get_main_vsi(pf);
480*f6e8fb55SWojciech Drewek 	if (!uplink_vsi)
481*f6e8fb55SWojciech Drewek 		return -ENODEV;
482*f6e8fb55SWojciech Drewek 
483*f6e8fb55SWojciech Drewek 	if (netif_is_any_bridge_port(uplink_vsi->netdev)) {
484*f6e8fb55SWojciech Drewek 		dev_err(ice_pf_to_dev(pf),
485*f6e8fb55SWojciech Drewek 			"Uplink port cannot be a bridge port\n");
486*f6e8fb55SWojciech Drewek 		return -EINVAL;
487*f6e8fb55SWojciech Drewek 	}
4881a1c40dfSGrzegorz Nitka 
4891a1c40dfSGrzegorz Nitka 	pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
4901a1c40dfSGrzegorz Nitka 	if (!pf->switchdev.control_vsi)
4911a1c40dfSGrzegorz Nitka 		return -ENODEV;
4921a1c40dfSGrzegorz Nitka 
4931a1c40dfSGrzegorz Nitka 	ctrl_vsi = pf->switchdev.control_vsi;
494*f6e8fb55SWojciech Drewek 	pf->switchdev.uplink_vsi = uplink_vsi;
4951a1c40dfSGrzegorz Nitka 
4961a1c40dfSGrzegorz Nitka 	if (ice_eswitch_setup_env(pf))
4971a1c40dfSGrzegorz Nitka 		goto err_vsi;
4981a1c40dfSGrzegorz Nitka 
4991a1c40dfSGrzegorz Nitka 	if (ice_repr_add_for_all_vfs(pf))
5001a1c40dfSGrzegorz Nitka 		goto err_repr_add;
5011a1c40dfSGrzegorz Nitka 
5021a1c40dfSGrzegorz Nitka 	if (ice_eswitch_setup_reprs(pf))
5031a1c40dfSGrzegorz Nitka 		goto err_setup_reprs;
5041a1c40dfSGrzegorz Nitka 
5051a1c40dfSGrzegorz Nitka 	ice_eswitch_remap_rings_to_vectors(pf);
5061a1c40dfSGrzegorz Nitka 
5071a1c40dfSGrzegorz Nitka 	if (ice_vsi_open(ctrl_vsi))
5081a1c40dfSGrzegorz Nitka 		goto err_setup_reprs;
5091a1c40dfSGrzegorz Nitka 
510*f6e8fb55SWojciech Drewek 	if (ice_eswitch_br_offloads_init(pf))
511*f6e8fb55SWojciech Drewek 		goto err_br_offloads;
512*f6e8fb55SWojciech Drewek 
5131a1c40dfSGrzegorz Nitka 	ice_eswitch_napi_enable(pf);
5141a1c40dfSGrzegorz Nitka 
5151a1c40dfSGrzegorz Nitka 	return 0;
5161a1c40dfSGrzegorz Nitka 
517*f6e8fb55SWojciech Drewek err_br_offloads:
518*f6e8fb55SWojciech Drewek 	ice_vsi_close(ctrl_vsi);
5191a1c40dfSGrzegorz Nitka err_setup_reprs:
5201a1c40dfSGrzegorz Nitka 	ice_repr_rem_from_all_vfs(pf);
5211a1c40dfSGrzegorz Nitka err_repr_add:
5221a1c40dfSGrzegorz Nitka 	ice_eswitch_release_env(pf);
5231a1c40dfSGrzegorz Nitka err_vsi:
5241a1c40dfSGrzegorz Nitka 	ice_vsi_release(ctrl_vsi);
5251a1c40dfSGrzegorz Nitka 	return -ENODEV;
5261a1c40dfSGrzegorz Nitka }
5271a1c40dfSGrzegorz Nitka 
5281a1c40dfSGrzegorz Nitka /**
5291a1c40dfSGrzegorz Nitka  * ice_eswitch_disable_switchdev - disable switchdev resources
5301a1c40dfSGrzegorz Nitka  * @pf: pointer to PF structure
5311a1c40dfSGrzegorz Nitka  */
ice_eswitch_disable_switchdev(struct ice_pf * pf)5321a1c40dfSGrzegorz Nitka static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
5331a1c40dfSGrzegorz Nitka {
5341a1c40dfSGrzegorz Nitka 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
5351a1c40dfSGrzegorz Nitka 
5361a1c40dfSGrzegorz Nitka 	ice_eswitch_napi_disable(pf);
537*f6e8fb55SWojciech Drewek 	ice_eswitch_br_offloads_deinit(pf);
5381a1c40dfSGrzegorz Nitka 	ice_eswitch_release_env(pf);
5391a1c40dfSGrzegorz Nitka 	ice_eswitch_release_reprs(pf, ctrl_vsi);
5401a1c40dfSGrzegorz Nitka 	ice_vsi_release(ctrl_vsi);
5411a1c40dfSGrzegorz Nitka 	ice_repr_rem_from_all_vfs(pf);
5421a1c40dfSGrzegorz Nitka }
5431a1c40dfSGrzegorz Nitka 
5441a1c40dfSGrzegorz Nitka /**
5453ea9bd5dSMichal Swiatkowski  * ice_eswitch_mode_set - set new eswitch mode
5463ea9bd5dSMichal Swiatkowski  * @devlink: pointer to devlink structure
5473ea9bd5dSMichal Swiatkowski  * @mode: eswitch mode to switch to
5483ea9bd5dSMichal Swiatkowski  * @extack: pointer to extack structure
5493ea9bd5dSMichal Swiatkowski  */
5503ea9bd5dSMichal Swiatkowski int
ice_eswitch_mode_set(struct devlink * devlink,u16 mode,struct netlink_ext_ack * extack)5513ea9bd5dSMichal Swiatkowski ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
5523ea9bd5dSMichal Swiatkowski 		     struct netlink_ext_ack *extack)
5533ea9bd5dSMichal Swiatkowski {
5543ea9bd5dSMichal Swiatkowski 	struct ice_pf *pf = devlink_priv(devlink);
5553ea9bd5dSMichal Swiatkowski 
5563ea9bd5dSMichal Swiatkowski 	if (pf->eswitch_mode == mode)
5573ea9bd5dSMichal Swiatkowski 		return 0;
5583ea9bd5dSMichal Swiatkowski 
559fb916db1SJacob Keller 	if (ice_has_vfs(pf)) {
5603ea9bd5dSMichal Swiatkowski 		dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
5613ea9bd5dSMichal Swiatkowski 		NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
5623ea9bd5dSMichal Swiatkowski 		return -EOPNOTSUPP;
5633ea9bd5dSMichal Swiatkowski 	}
5643ea9bd5dSMichal Swiatkowski 
5653ea9bd5dSMichal Swiatkowski 	switch (mode) {
5663ea9bd5dSMichal Swiatkowski 	case DEVLINK_ESWITCH_MODE_LEGACY:
5673ea9bd5dSMichal Swiatkowski 		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
5683ea9bd5dSMichal Swiatkowski 			 pf->hw.pf_id);
5693ea9bd5dSMichal Swiatkowski 		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
5703ea9bd5dSMichal Swiatkowski 		break;
5713ea9bd5dSMichal Swiatkowski 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
5723ea9bd5dSMichal Swiatkowski 	{
5733ea9bd5dSMichal Swiatkowski 		if (ice_is_adq_active(pf)) {
5743ea9bd5dSMichal Swiatkowski 			dev_err(ice_pf_to_dev(pf), "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
5753ea9bd5dSMichal Swiatkowski 			NL_SET_ERR_MSG_MOD(extack, "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
5763ea9bd5dSMichal Swiatkowski 			return -EOPNOTSUPP;
5773ea9bd5dSMichal Swiatkowski 		}
5783ea9bd5dSMichal Swiatkowski 
5793ea9bd5dSMichal Swiatkowski 		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
5803ea9bd5dSMichal Swiatkowski 			 pf->hw.pf_id);
5813ea9bd5dSMichal Swiatkowski 		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
5823ea9bd5dSMichal Swiatkowski 		break;
5833ea9bd5dSMichal Swiatkowski 	}
5843ea9bd5dSMichal Swiatkowski 	default:
5853ea9bd5dSMichal Swiatkowski 		NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
5863ea9bd5dSMichal Swiatkowski 		return -EINVAL;
5873ea9bd5dSMichal Swiatkowski 	}
5883ea9bd5dSMichal Swiatkowski 
5893ea9bd5dSMichal Swiatkowski 	pf->eswitch_mode = mode;
5903ea9bd5dSMichal Swiatkowski 	return 0;
5913ea9bd5dSMichal Swiatkowski }
5923ea9bd5dSMichal Swiatkowski 
5933ea9bd5dSMichal Swiatkowski /**
5943ea9bd5dSMichal Swiatkowski  * ice_eswitch_mode_get - get current eswitch mode
5953ea9bd5dSMichal Swiatkowski  * @devlink: pointer to devlink structure
5963ea9bd5dSMichal Swiatkowski  * @mode: output parameter for current eswitch mode
5973ea9bd5dSMichal Swiatkowski  */
ice_eswitch_mode_get(struct devlink * devlink,u16 * mode)5983ea9bd5dSMichal Swiatkowski int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
5991a1c40dfSGrzegorz Nitka {
6001a1c40dfSGrzegorz Nitka 	struct ice_pf *pf = devlink_priv(devlink);
6011c54c839SGrzegorz Nitka 
6021c54c839SGrzegorz Nitka 	*mode = pf->eswitch_mode;
6031c54c839SGrzegorz Nitka 	return 0;
6041c54c839SGrzegorz Nitka }
6051c54c839SGrzegorz Nitka 
6061c54c839SGrzegorz Nitka /**
6071c54c839SGrzegorz Nitka  * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev
6081c54c839SGrzegorz Nitka  * @pf: pointer to PF structure
6091c54c839SGrzegorz Nitka  *
6101c54c839SGrzegorz Nitka  * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV,
6111c54c839SGrzegorz Nitka  * false otherwise.
6121c54c839SGrzegorz Nitka  */
ice_is_eswitch_mode_switchdev(struct ice_pf * pf)6131a1c40dfSGrzegorz Nitka bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
6141a1c40dfSGrzegorz Nitka {
6151a1c40dfSGrzegorz Nitka 	return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
6161a1c40dfSGrzegorz Nitka }
6171a1c40dfSGrzegorz Nitka 
6181a1c40dfSGrzegorz Nitka /**
6191a1c40dfSGrzegorz Nitka  * ice_eswitch_release - cleanup eswitch
6201a1c40dfSGrzegorz Nitka  * @pf: pointer to PF structure
6211a1c40dfSGrzegorz Nitka  */
ice_eswitch_release(struct ice_pf * pf)6221a1c40dfSGrzegorz Nitka void ice_eswitch_release(struct ice_pf *pf)
6231a1c40dfSGrzegorz Nitka {
6241a1c40dfSGrzegorz Nitka 	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
6251a1c40dfSGrzegorz Nitka 		return;
6261a1c40dfSGrzegorz Nitka 
6271a1c40dfSGrzegorz Nitka 	ice_eswitch_disable_switchdev(pf);
6281a1c40dfSGrzegorz Nitka 	pf->switchdev.is_running = false;
6291a1c40dfSGrzegorz Nitka }
6301a1c40dfSGrzegorz Nitka 
6311a1c40dfSGrzegorz Nitka /**
6321a1c40dfSGrzegorz Nitka  * ice_eswitch_configure - configure eswitch
6331a1c40dfSGrzegorz Nitka  * @pf: pointer to PF structure
6341a1c40dfSGrzegorz Nitka  */
ice_eswitch_configure(struct ice_pf * pf)6351a1c40dfSGrzegorz Nitka int ice_eswitch_configure(struct ice_pf *pf)
6361a1c40dfSGrzegorz Nitka {
6371a1c40dfSGrzegorz Nitka 	int status;
6381a1c40dfSGrzegorz Nitka 
6391a1c40dfSGrzegorz Nitka 	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
6401a1c40dfSGrzegorz Nitka 		return 0;
6411a1c40dfSGrzegorz Nitka 
6421a1c40dfSGrzegorz Nitka 	status = ice_eswitch_enable_switchdev(pf);
643b3be918dSGrzegorz Nitka 	if (status)
644b3be918dSGrzegorz Nitka 		return status;
645b3be918dSGrzegorz Nitka 
646b3be918dSGrzegorz Nitka 	pf->switchdev.is_running = true;
647b3be918dSGrzegorz Nitka 	return 0;
648b3be918dSGrzegorz Nitka }
649b3be918dSGrzegorz Nitka 
650c4c2c7dbSJacob Keller /**
651c4c2c7dbSJacob Keller  * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
652b3be918dSGrzegorz Nitka  * @pf: pointer to PF structure
6533d5985a1SJacob Keller  */
ice_eswitch_start_all_tx_queues(struct ice_pf * pf)6543d5985a1SJacob Keller static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
655b3be918dSGrzegorz Nitka {
656b3be918dSGrzegorz Nitka 	struct ice_vf *vf;
657b3be918dSGrzegorz Nitka 	unsigned int bkt;
658c4c2c7dbSJacob Keller 
659c4c2c7dbSJacob Keller 	lockdep_assert_held(&pf->vfs.table_lock);
660c4c2c7dbSJacob Keller 
661b3be918dSGrzegorz Nitka 	if (test_bit(ICE_DOWN, pf->state))
662b3be918dSGrzegorz Nitka 		return;
663b3be918dSGrzegorz Nitka 
664b3be918dSGrzegorz Nitka 	ice_for_each_vf(pf, bkt, vf) {
665b3be918dSGrzegorz Nitka 		if (vf->repr)
666b3be918dSGrzegorz Nitka 			ice_repr_start_tx_queues(vf->repr);
667b3be918dSGrzegorz Nitka 	}
668b3be918dSGrzegorz Nitka }
669b3be918dSGrzegorz Nitka 
670c4c2c7dbSJacob Keller /**
671c4c2c7dbSJacob Keller  * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
672b3be918dSGrzegorz Nitka  * @pf: pointer to PF structure
6733d5985a1SJacob Keller  */
ice_eswitch_stop_all_tx_queues(struct ice_pf * pf)6743d5985a1SJacob Keller void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
675b3be918dSGrzegorz Nitka {
676b3be918dSGrzegorz Nitka 	struct ice_vf *vf;
677b3be918dSGrzegorz Nitka 	unsigned int bkt;
678c4c2c7dbSJacob Keller 
679c4c2c7dbSJacob Keller 	lockdep_assert_held(&pf->vfs.table_lock);
680c4c2c7dbSJacob Keller 
681b3be918dSGrzegorz Nitka 	if (test_bit(ICE_DOWN, pf->state))
682b3be918dSGrzegorz Nitka 		return;
683b3be918dSGrzegorz Nitka 
684b3be918dSGrzegorz Nitka 	ice_for_each_vf(pf, bkt, vf) {
685b3be918dSGrzegorz Nitka 		if (vf->repr)
686b3be918dSGrzegorz Nitka 			ice_repr_stop_tx_queues(vf->repr);
687b3be918dSGrzegorz Nitka 	}
688b3be918dSGrzegorz Nitka }
689b3be918dSGrzegorz Nitka 
690b3be918dSGrzegorz Nitka /**
691b3be918dSGrzegorz Nitka  * ice_eswitch_rebuild - rebuild eswitch
692b3be918dSGrzegorz Nitka  * @pf: pointer to PF structure
693b3be918dSGrzegorz Nitka  */
ice_eswitch_rebuild(struct ice_pf * pf)694b3be918dSGrzegorz Nitka int ice_eswitch_rebuild(struct ice_pf *pf)
695b3be918dSGrzegorz Nitka {
696b3be918dSGrzegorz Nitka 	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
697b3be918dSGrzegorz Nitka 	int status;
698b3be918dSGrzegorz Nitka 
699b3be918dSGrzegorz Nitka 	ice_eswitch_napi_disable(pf);
700b3be918dSGrzegorz Nitka 	ice_eswitch_napi_del(pf);
701b3be918dSGrzegorz Nitka 
702b3be918dSGrzegorz Nitka 	status = ice_eswitch_setup_env(pf);
703b3be918dSGrzegorz Nitka 	if (status)
704b3be918dSGrzegorz Nitka 		return status;
705b3be918dSGrzegorz Nitka 
7067fde6d8bSMichal Swiatkowski 	status = ice_eswitch_setup_reprs(pf);
7077fde6d8bSMichal Swiatkowski 	if (status)
708b3be918dSGrzegorz Nitka 		return status;
709b3be918dSGrzegorz Nitka 
710b3be918dSGrzegorz Nitka 	ice_eswitch_remap_rings_to_vectors(pf);
711b3be918dSGrzegorz Nitka 
712b3be918dSGrzegorz Nitka 	ice_replay_tc_fltrs(pf);
713b3be918dSGrzegorz Nitka 
714b3be918dSGrzegorz Nitka 	status = ice_vsi_open(ctrl_vsi);
715b3be918dSGrzegorz Nitka 	if (status)
716b3be918dSGrzegorz Nitka 		return status;
717 
718 	ice_eswitch_napi_enable(pf);
719 	ice_eswitch_start_all_tx_queues(pf);
720 
721 	return 0;
722 }
723