13ea9bd5dSMichal Swiatkowski // SPDX-License-Identifier: GPL-2.0 23ea9bd5dSMichal Swiatkowski /* Copyright (C) 2019-2021, Intel Corporation. */ 33ea9bd5dSMichal Swiatkowski 43ea9bd5dSMichal Swiatkowski #include "ice.h" 51a1c40dfSGrzegorz Nitka #include "ice_lib.h" 63ea9bd5dSMichal Swiatkowski #include "ice_eswitch.h" 71a1c40dfSGrzegorz Nitka #include "ice_fltr.h" 81a1c40dfSGrzegorz Nitka #include "ice_repr.h" 93ea9bd5dSMichal Swiatkowski #include "ice_devlink.h" 107fde6d8bSMichal Swiatkowski #include "ice_tc_lib.h" 113ea9bd5dSMichal Swiatkowski 123ea9bd5dSMichal Swiatkowski /** 131a1c40dfSGrzegorz Nitka * ice_eswitch_setup_env - configure switchdev HW filters 141a1c40dfSGrzegorz Nitka * @pf: pointer to PF struct 151a1c40dfSGrzegorz Nitka * 161a1c40dfSGrzegorz Nitka * This function adds HW filters configuration specific for switchdev 171a1c40dfSGrzegorz Nitka * mode. 181a1c40dfSGrzegorz Nitka */ 191a1c40dfSGrzegorz Nitka static int ice_eswitch_setup_env(struct ice_pf *pf) 201a1c40dfSGrzegorz Nitka { 211a1c40dfSGrzegorz Nitka struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; 221a1c40dfSGrzegorz Nitka struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 231a1c40dfSGrzegorz Nitka struct ice_port_info *pi = pf->hw.port_info; 241a1c40dfSGrzegorz Nitka bool rule_added = false; 251a1c40dfSGrzegorz Nitka 261a1c40dfSGrzegorz Nitka ice_vsi_manage_vlan_stripping(ctrl_vsi, false); 271a1c40dfSGrzegorz Nitka 281a1c40dfSGrzegorz Nitka ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); 291a1c40dfSGrzegorz Nitka 301a1c40dfSGrzegorz Nitka if (ice_vsi_add_vlan(uplink_vsi, 0, ICE_FWD_TO_VSI)) 311a1c40dfSGrzegorz Nitka goto err_def_rx; 321a1c40dfSGrzegorz Nitka 331a1c40dfSGrzegorz Nitka if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) { 341a1c40dfSGrzegorz Nitka if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi)) 351a1c40dfSGrzegorz Nitka goto err_def_rx; 361a1c40dfSGrzegorz Nitka rule_added = true; 371a1c40dfSGrzegorz Nitka } 381a1c40dfSGrzegorz Nitka 391a1c40dfSGrzegorz Nitka if (ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, true, ICE_FLTR_TX)) 401a1c40dfSGrzegorz Nitka goto err_def_tx; 411a1c40dfSGrzegorz Nitka 421a1c40dfSGrzegorz Nitka if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) 431a1c40dfSGrzegorz Nitka goto err_override_uplink; 441a1c40dfSGrzegorz Nitka 451a1c40dfSGrzegorz Nitka if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override)) 461a1c40dfSGrzegorz Nitka goto err_override_control; 471a1c40dfSGrzegorz Nitka 481a1c40dfSGrzegorz Nitka if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id, 491a1c40dfSGrzegorz Nitka ICE_FLTR_TX, 501a1c40dfSGrzegorz Nitka ICE_SINGLE_ACT_LB_ENABLE)) 511a1c40dfSGrzegorz Nitka goto err_update_action; 521a1c40dfSGrzegorz Nitka 531a1c40dfSGrzegorz Nitka return 0; 541a1c40dfSGrzegorz Nitka 551a1c40dfSGrzegorz Nitka err_update_action: 561a1c40dfSGrzegorz Nitka ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); 571a1c40dfSGrzegorz Nitka err_override_control: 581a1c40dfSGrzegorz Nitka ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); 591a1c40dfSGrzegorz Nitka err_override_uplink: 601a1c40dfSGrzegorz Nitka ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, false, ICE_FLTR_TX); 611a1c40dfSGrzegorz Nitka err_def_tx: 621a1c40dfSGrzegorz Nitka if (rule_added) 631a1c40dfSGrzegorz Nitka ice_clear_dflt_vsi(uplink_vsi->vsw); 641a1c40dfSGrzegorz Nitka err_def_rx: 651a1c40dfSGrzegorz Nitka ice_fltr_add_mac_and_broadcast(uplink_vsi, 661a1c40dfSGrzegorz Nitka uplink_vsi->port_info->mac.perm_addr, 671a1c40dfSGrzegorz Nitka ICE_FWD_TO_VSI); 681a1c40dfSGrzegorz Nitka return -ENODEV; 691a1c40dfSGrzegorz Nitka } 701a1c40dfSGrzegorz Nitka 711a1c40dfSGrzegorz Nitka /** 721a1c40dfSGrzegorz Nitka * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI 731a1c40dfSGrzegorz Nitka * @pf: pointer to PF struct 741a1c40dfSGrzegorz Nitka * 751a1c40dfSGrzegorz Nitka * In switchdev number of allocated Tx/Rx rings is equal. 761a1c40dfSGrzegorz Nitka * 771a1c40dfSGrzegorz Nitka * This function fills q_vectors structures associated with representor and 781a1c40dfSGrzegorz Nitka * move each ring pairs to port representor netdevs. Each port representor 791a1c40dfSGrzegorz Nitka * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to 801a1c40dfSGrzegorz Nitka * number of VFs. 811a1c40dfSGrzegorz Nitka */ 821a1c40dfSGrzegorz Nitka static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) 831a1c40dfSGrzegorz Nitka { 841a1c40dfSGrzegorz Nitka struct ice_vsi *vsi = pf->switchdev.control_vsi; 851a1c40dfSGrzegorz Nitka int q_id; 861a1c40dfSGrzegorz Nitka 871a1c40dfSGrzegorz Nitka ice_for_each_txq(vsi, q_id) { 881a1c40dfSGrzegorz Nitka struct ice_repr *repr = pf->vf[q_id].repr; 891a1c40dfSGrzegorz Nitka struct ice_q_vector *q_vector = repr->q_vector; 90*e72bba21SMaciej Fijalkowski struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id]; 91*e72bba21SMaciej Fijalkowski struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id]; 921a1c40dfSGrzegorz Nitka 931a1c40dfSGrzegorz Nitka q_vector->vsi = vsi; 941a1c40dfSGrzegorz Nitka q_vector->reg_idx = vsi->q_vectors[0]->reg_idx; 951a1c40dfSGrzegorz Nitka 961a1c40dfSGrzegorz Nitka q_vector->num_ring_tx = 1; 97*e72bba21SMaciej Fijalkowski q_vector->tx.tx_ring = tx_ring; 98*e72bba21SMaciej Fijalkowski tx_ring->q_vector = q_vector; 99*e72bba21SMaciej Fijalkowski tx_ring->next = NULL; 100*e72bba21SMaciej Fijalkowski tx_ring->netdev = repr->netdev; 1011a1c40dfSGrzegorz Nitka /* In switchdev mode, from OS stack perspective, there is only 1021a1c40dfSGrzegorz Nitka * one queue for given netdev, so it needs to be indexed as 0. 1031a1c40dfSGrzegorz Nitka */ 1041a1c40dfSGrzegorz Nitka tx_ring->q_index = 0; 1051a1c40dfSGrzegorz Nitka 1061a1c40dfSGrzegorz Nitka q_vector->num_ring_rx = 1; 107*e72bba21SMaciej Fijalkowski q_vector->rx.rx_ring = rx_ring; 108*e72bba21SMaciej Fijalkowski rx_ring->q_vector = q_vector; 109*e72bba21SMaciej Fijalkowski rx_ring->next = NULL; 110*e72bba21SMaciej Fijalkowski rx_ring->netdev = repr->netdev; 1111a1c40dfSGrzegorz Nitka } 1121a1c40dfSGrzegorz Nitka } 1131a1c40dfSGrzegorz Nitka 1141a1c40dfSGrzegorz Nitka /** 1151a1c40dfSGrzegorz Nitka * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode 1161a1c40dfSGrzegorz Nitka * @pf: pointer to PF struct 1171a1c40dfSGrzegorz Nitka */ 1181a1c40dfSGrzegorz Nitka static int ice_eswitch_setup_reprs(struct ice_pf *pf) 1191a1c40dfSGrzegorz Nitka { 1201a1c40dfSGrzegorz Nitka struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 1211a1c40dfSGrzegorz Nitka int max_vsi_num = 0; 1221a1c40dfSGrzegorz Nitka int i; 1231a1c40dfSGrzegorz Nitka 1241a1c40dfSGrzegorz Nitka ice_for_each_vf(pf, i) { 1251a1c40dfSGrzegorz Nitka struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; 1261a1c40dfSGrzegorz Nitka struct ice_vf *vf = &pf->vf[i]; 1271a1c40dfSGrzegorz Nitka 1281a1c40dfSGrzegorz Nitka ice_remove_vsi_fltr(&pf->hw, vsi->idx); 1291a1c40dfSGrzegorz Nitka vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, 1301a1c40dfSGrzegorz Nitka GFP_KERNEL); 1311a1c40dfSGrzegorz Nitka if (!vf->repr->dst) { 1321a1c40dfSGrzegorz Nitka ice_fltr_add_mac_and_broadcast(vsi, 1331a1c40dfSGrzegorz Nitka vf->hw_lan_addr.addr, 1341a1c40dfSGrzegorz Nitka ICE_FWD_TO_VSI); 1351a1c40dfSGrzegorz Nitka goto err; 1361a1c40dfSGrzegorz Nitka } 1371a1c40dfSGrzegorz Nitka 1381a1c40dfSGrzegorz Nitka if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) { 1391a1c40dfSGrzegorz Nitka ice_fltr_add_mac_and_broadcast(vsi, 1401a1c40dfSGrzegorz Nitka vf->hw_lan_addr.addr, 1411a1c40dfSGrzegorz Nitka ICE_FWD_TO_VSI); 1421a1c40dfSGrzegorz Nitka metadata_dst_free(vf->repr->dst); 1431a1c40dfSGrzegorz Nitka goto err; 1441a1c40dfSGrzegorz Nitka } 1451a1c40dfSGrzegorz Nitka 1461a1c40dfSGrzegorz Nitka if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI)) { 1471a1c40dfSGrzegorz Nitka ice_fltr_add_mac_and_broadcast(vsi, 1481a1c40dfSGrzegorz Nitka vf->hw_lan_addr.addr, 1491a1c40dfSGrzegorz Nitka ICE_FWD_TO_VSI); 1501a1c40dfSGrzegorz Nitka metadata_dst_free(vf->repr->dst); 1511a1c40dfSGrzegorz Nitka ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); 1521a1c40dfSGrzegorz Nitka goto err; 1531a1c40dfSGrzegorz Nitka } 1541a1c40dfSGrzegorz Nitka 1551a1c40dfSGrzegorz Nitka if (max_vsi_num < vsi->vsi_num) 1561a1c40dfSGrzegorz Nitka max_vsi_num = vsi->vsi_num; 1571a1c40dfSGrzegorz Nitka 1581a1c40dfSGrzegorz Nitka netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll, 1591a1c40dfSGrzegorz Nitka NAPI_POLL_WEIGHT); 1601a1c40dfSGrzegorz Nitka 1611a1c40dfSGrzegorz Nitka netif_keep_dst(vf->repr->netdev); 1621a1c40dfSGrzegorz Nitka } 1631a1c40dfSGrzegorz Nitka 1641a1c40dfSGrzegorz Nitka kfree(ctrl_vsi->target_netdevs); 1651a1c40dfSGrzegorz Nitka 1661a1c40dfSGrzegorz Nitka ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1, 1671a1c40dfSGrzegorz Nitka sizeof(*ctrl_vsi->target_netdevs), 1681a1c40dfSGrzegorz Nitka GFP_KERNEL); 1691a1c40dfSGrzegorz Nitka if (!ctrl_vsi->target_netdevs) 1701a1c40dfSGrzegorz Nitka goto err; 1711a1c40dfSGrzegorz Nitka 1721a1c40dfSGrzegorz Nitka ice_for_each_vf(pf, i) { 1731a1c40dfSGrzegorz Nitka struct ice_repr *repr = pf->vf[i].repr; 1741a1c40dfSGrzegorz Nitka struct ice_vsi *vsi = repr->src_vsi; 1751a1c40dfSGrzegorz Nitka struct metadata_dst *dst; 1761a1c40dfSGrzegorz Nitka 1771a1c40dfSGrzegorz Nitka ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev; 1781a1c40dfSGrzegorz Nitka 1791a1c40dfSGrzegorz Nitka dst = repr->dst; 1801a1c40dfSGrzegorz Nitka dst->u.port_info.port_id = vsi->vsi_num; 1811a1c40dfSGrzegorz Nitka dst->u.port_info.lower_dev = repr->netdev; 1821a1c40dfSGrzegorz Nitka ice_repr_set_traffic_vsi(repr, ctrl_vsi); 1831a1c40dfSGrzegorz Nitka } 1841a1c40dfSGrzegorz Nitka 1851a1c40dfSGrzegorz Nitka return 0; 1861a1c40dfSGrzegorz Nitka 1871a1c40dfSGrzegorz Nitka err: 1881a1c40dfSGrzegorz Nitka for (i = i - 1; i >= 0; i--) { 1891a1c40dfSGrzegorz Nitka struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; 1901a1c40dfSGrzegorz Nitka struct ice_vf *vf = &pf->vf[i]; 1911a1c40dfSGrzegorz Nitka 1921a1c40dfSGrzegorz Nitka ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); 1931a1c40dfSGrzegorz Nitka metadata_dst_free(vf->repr->dst); 1941a1c40dfSGrzegorz Nitka ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, 1951a1c40dfSGrzegorz Nitka ICE_FWD_TO_VSI); 1961a1c40dfSGrzegorz Nitka } 1971a1c40dfSGrzegorz Nitka 1981a1c40dfSGrzegorz Nitka return -ENODEV; 1991a1c40dfSGrzegorz Nitka } 2001a1c40dfSGrzegorz Nitka 2011a1c40dfSGrzegorz Nitka /** 2021a1c40dfSGrzegorz Nitka * ice_eswitch_release_reprs - clear PR VSIs configuration 2031a1c40dfSGrzegorz Nitka * @pf: poiner to PF struct 2041a1c40dfSGrzegorz Nitka * @ctrl_vsi: pointer to switchdev control VSI 2051a1c40dfSGrzegorz Nitka */ 2061a1c40dfSGrzegorz Nitka static void 2071a1c40dfSGrzegorz Nitka ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi) 2081a1c40dfSGrzegorz Nitka { 2091a1c40dfSGrzegorz Nitka int i; 2101a1c40dfSGrzegorz Nitka 2111a1c40dfSGrzegorz Nitka kfree(ctrl_vsi->target_netdevs); 2121a1c40dfSGrzegorz Nitka ice_for_each_vf(pf, i) { 2131a1c40dfSGrzegorz Nitka struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; 2141a1c40dfSGrzegorz Nitka struct ice_vf *vf = &pf->vf[i]; 2151a1c40dfSGrzegorz Nitka 2161a1c40dfSGrzegorz Nitka ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); 2171a1c40dfSGrzegorz Nitka metadata_dst_free(vf->repr->dst); 2181a1c40dfSGrzegorz Nitka ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, 2191a1c40dfSGrzegorz Nitka ICE_FWD_TO_VSI); 2201a1c40dfSGrzegorz Nitka 2211a1c40dfSGrzegorz Nitka netif_napi_del(&vf->repr->q_vector->napi); 2221a1c40dfSGrzegorz Nitka } 2231a1c40dfSGrzegorz Nitka } 2241a1c40dfSGrzegorz Nitka 2251a1c40dfSGrzegorz Nitka /** 2261c54c839SGrzegorz Nitka * ice_eswitch_update_repr - reconfigure VF port representor 2271c54c839SGrzegorz Nitka * @vsi: VF VSI for which port representor is configured 2281c54c839SGrzegorz Nitka */ 2291c54c839SGrzegorz Nitka void ice_eswitch_update_repr(struct ice_vsi *vsi) 2301c54c839SGrzegorz Nitka { 2311c54c839SGrzegorz Nitka struct ice_pf *pf = vsi->back; 2321c54c839SGrzegorz Nitka struct ice_repr *repr; 2331c54c839SGrzegorz Nitka struct ice_vf *vf; 2341c54c839SGrzegorz Nitka int ret; 2351c54c839SGrzegorz Nitka 2361c54c839SGrzegorz Nitka if (!ice_is_switchdev_running(pf)) 2371c54c839SGrzegorz Nitka return; 2381c54c839SGrzegorz Nitka 2391c54c839SGrzegorz Nitka vf = &pf->vf[vsi->vf_id]; 2401c54c839SGrzegorz Nitka repr = vf->repr; 2411c54c839SGrzegorz Nitka repr->src_vsi = vsi; 2421c54c839SGrzegorz Nitka repr->dst->u.port_info.port_id = vsi->vsi_num; 2431c54c839SGrzegorz Nitka 2441c54c839SGrzegorz Nitka ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); 2451c54c839SGrzegorz Nitka if (ret) { 2461c54c839SGrzegorz Nitka ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); 2471c54c839SGrzegorz Nitka dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id); 2481c54c839SGrzegorz Nitka } 2491c54c839SGrzegorz Nitka } 2501c54c839SGrzegorz Nitka 2511c54c839SGrzegorz Nitka /** 252f5396b8aSGrzegorz Nitka * ice_eswitch_port_start_xmit - callback for packets transmit 253f5396b8aSGrzegorz Nitka * @skb: send buffer 254f5396b8aSGrzegorz Nitka * @netdev: network interface device structure 255f5396b8aSGrzegorz Nitka * 256f5396b8aSGrzegorz Nitka * Returns NETDEV_TX_OK if sent, else an error code 257f5396b8aSGrzegorz Nitka */ 258f5396b8aSGrzegorz Nitka netdev_tx_t 259f5396b8aSGrzegorz Nitka ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) 260f5396b8aSGrzegorz Nitka { 261f5396b8aSGrzegorz Nitka struct ice_netdev_priv *np; 262f5396b8aSGrzegorz Nitka struct ice_repr *repr; 263f5396b8aSGrzegorz Nitka struct ice_vsi *vsi; 264f5396b8aSGrzegorz Nitka 265f5396b8aSGrzegorz Nitka np = netdev_priv(netdev); 266f5396b8aSGrzegorz Nitka vsi = np->vsi; 267f5396b8aSGrzegorz Nitka 268f5396b8aSGrzegorz Nitka if (ice_is_reset_in_progress(vsi->back->state)) 269f5396b8aSGrzegorz Nitka return NETDEV_TX_BUSY; 270f5396b8aSGrzegorz Nitka 271f5396b8aSGrzegorz Nitka repr = ice_netdev_to_repr(netdev); 272f5396b8aSGrzegorz Nitka skb_dst_drop(skb); 273f5396b8aSGrzegorz Nitka dst_hold((struct dst_entry *)repr->dst); 274f5396b8aSGrzegorz Nitka skb_dst_set(skb, (struct dst_entry *)repr->dst); 275f5396b8aSGrzegorz Nitka skb->queue_mapping = repr->vf->vf_id; 276f5396b8aSGrzegorz Nitka 277f5396b8aSGrzegorz Nitka return ice_start_xmit(skb, netdev); 278f5396b8aSGrzegorz Nitka } 279f5396b8aSGrzegorz Nitka 280f5396b8aSGrzegorz Nitka /** 281f5396b8aSGrzegorz Nitka * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor 282f5396b8aSGrzegorz Nitka * @skb: pointer to send buffer 283f5396b8aSGrzegorz Nitka * @off: pointer to offload struct 284f5396b8aSGrzegorz Nitka */ 285f5396b8aSGrzegorz Nitka void 286f5396b8aSGrzegorz Nitka ice_eswitch_set_target_vsi(struct sk_buff *skb, 287f5396b8aSGrzegorz Nitka struct ice_tx_offload_params *off) 288f5396b8aSGrzegorz Nitka { 289f5396b8aSGrzegorz Nitka struct metadata_dst *dst = skb_metadata_dst(skb); 290f5396b8aSGrzegorz Nitka u64 cd_cmd, dst_vsi; 291f5396b8aSGrzegorz Nitka 292f5396b8aSGrzegorz Nitka if (!dst) { 293f5396b8aSGrzegorz Nitka cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S; 294f5396b8aSGrzegorz Nitka off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX); 295f5396b8aSGrzegorz Nitka } else { 296f5396b8aSGrzegorz Nitka cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S; 297f5396b8aSGrzegorz Nitka dst_vsi = ((u64)dst->u.port_info.port_id << 298f5396b8aSGrzegorz Nitka ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M; 299f5396b8aSGrzegorz Nitka off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX; 300f5396b8aSGrzegorz Nitka } 301f5396b8aSGrzegorz Nitka } 302f5396b8aSGrzegorz Nitka 303f5396b8aSGrzegorz Nitka /** 3041a1c40dfSGrzegorz Nitka * ice_eswitch_release_env - clear switchdev HW filters 3051a1c40dfSGrzegorz Nitka * @pf: pointer to PF struct 3061a1c40dfSGrzegorz Nitka * 3071a1c40dfSGrzegorz Nitka * This function removes HW filters configuration specific for switchdev 3081a1c40dfSGrzegorz Nitka * mode and restores default legacy mode settings. 3091a1c40dfSGrzegorz Nitka */ 3101a1c40dfSGrzegorz Nitka static void ice_eswitch_release_env(struct ice_pf *pf) 3111a1c40dfSGrzegorz Nitka { 3121a1c40dfSGrzegorz Nitka struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; 3131a1c40dfSGrzegorz Nitka struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 3141a1c40dfSGrzegorz Nitka 3151a1c40dfSGrzegorz Nitka ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); 3161a1c40dfSGrzegorz Nitka ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); 3171a1c40dfSGrzegorz Nitka ice_cfg_dflt_vsi(&pf->hw, ctrl_vsi->idx, false, ICE_FLTR_TX); 3181a1c40dfSGrzegorz Nitka ice_clear_dflt_vsi(uplink_vsi->vsw); 3191a1c40dfSGrzegorz Nitka ice_fltr_add_mac_and_broadcast(uplink_vsi, 3201a1c40dfSGrzegorz Nitka uplink_vsi->port_info->mac.perm_addr, 3211a1c40dfSGrzegorz Nitka ICE_FWD_TO_VSI); 3221a1c40dfSGrzegorz Nitka } 3231a1c40dfSGrzegorz Nitka 3241a1c40dfSGrzegorz Nitka /** 3251a1c40dfSGrzegorz Nitka * ice_eswitch_vsi_setup - configure switchdev control VSI 3261a1c40dfSGrzegorz Nitka * @pf: pointer to PF structure 3271a1c40dfSGrzegorz Nitka * @pi: pointer to port_info structure 3281a1c40dfSGrzegorz Nitka */ 3291a1c40dfSGrzegorz Nitka static struct ice_vsi * 3301a1c40dfSGrzegorz Nitka ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3311a1c40dfSGrzegorz Nitka { 332f66756e0SGrzegorz Nitka return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID); 3331a1c40dfSGrzegorz Nitka } 3341a1c40dfSGrzegorz Nitka 3351a1c40dfSGrzegorz Nitka /** 336b3be918dSGrzegorz Nitka * ice_eswitch_napi_del - remove NAPI handle for all port representors 337b3be918dSGrzegorz Nitka * @pf: pointer to PF structure 338b3be918dSGrzegorz Nitka */ 339b3be918dSGrzegorz Nitka static void ice_eswitch_napi_del(struct ice_pf *pf) 340b3be918dSGrzegorz Nitka { 341b3be918dSGrzegorz Nitka int i; 342b3be918dSGrzegorz Nitka 343b3be918dSGrzegorz Nitka ice_for_each_vf(pf, i) 344b3be918dSGrzegorz Nitka netif_napi_del(&pf->vf[i].repr->q_vector->napi); 345b3be918dSGrzegorz Nitka } 346b3be918dSGrzegorz Nitka 347b3be918dSGrzegorz Nitka /** 3481a1c40dfSGrzegorz Nitka * ice_eswitch_napi_enable - enable NAPI for all port representors 3491a1c40dfSGrzegorz Nitka * @pf: pointer to PF structure 3501a1c40dfSGrzegorz Nitka */ 3511a1c40dfSGrzegorz Nitka static void ice_eswitch_napi_enable(struct ice_pf *pf) 3521a1c40dfSGrzegorz Nitka { 3531a1c40dfSGrzegorz Nitka int i; 3541a1c40dfSGrzegorz Nitka 3551a1c40dfSGrzegorz Nitka ice_for_each_vf(pf, i) 3561a1c40dfSGrzegorz Nitka napi_enable(&pf->vf[i].repr->q_vector->napi); 3571a1c40dfSGrzegorz Nitka } 3581a1c40dfSGrzegorz Nitka 3591a1c40dfSGrzegorz Nitka /** 3601a1c40dfSGrzegorz Nitka * ice_eswitch_napi_disable - disable NAPI for all port representors 3611a1c40dfSGrzegorz Nitka * @pf: pointer to PF structure 3621a1c40dfSGrzegorz Nitka */ 3631a1c40dfSGrzegorz Nitka static void ice_eswitch_napi_disable(struct ice_pf *pf) 3641a1c40dfSGrzegorz Nitka { 3651a1c40dfSGrzegorz Nitka int i; 3661a1c40dfSGrzegorz Nitka 3671a1c40dfSGrzegorz Nitka ice_for_each_vf(pf, i) 3681a1c40dfSGrzegorz Nitka napi_disable(&pf->vf[i].repr->q_vector->napi); 3691a1c40dfSGrzegorz Nitka } 3701a1c40dfSGrzegorz Nitka 3711a1c40dfSGrzegorz Nitka /** 3721a1c40dfSGrzegorz Nitka * ice_eswitch_set_rxdid - configure rxdid on all Rx queues from VSI 3731a1c40dfSGrzegorz Nitka * @vsi: VSI to setup rxdid on 3741a1c40dfSGrzegorz Nitka * @rxdid: flex descriptor id 3751a1c40dfSGrzegorz Nitka */ 3761a1c40dfSGrzegorz Nitka static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid) 3771a1c40dfSGrzegorz Nitka { 3781a1c40dfSGrzegorz Nitka struct ice_hw *hw = &vsi->back->hw; 3791a1c40dfSGrzegorz Nitka int i; 3801a1c40dfSGrzegorz Nitka 3811a1c40dfSGrzegorz Nitka ice_for_each_rxq(vsi, i) { 382*e72bba21SMaciej Fijalkowski struct ice_rx_ring *ring = vsi->rx_rings[i]; 3831a1c40dfSGrzegorz Nitka u16 pf_q = vsi->rxq_map[ring->q_index]; 3841a1c40dfSGrzegorz Nitka 3851a1c40dfSGrzegorz Nitka ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true); 3861a1c40dfSGrzegorz Nitka } 3871a1c40dfSGrzegorz Nitka } 3881a1c40dfSGrzegorz Nitka 3891a1c40dfSGrzegorz Nitka /** 3901a1c40dfSGrzegorz Nitka * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode 3911a1c40dfSGrzegorz Nitka * @pf: pointer to PF structure 3921a1c40dfSGrzegorz Nitka */ 3931a1c40dfSGrzegorz Nitka static int ice_eswitch_enable_switchdev(struct ice_pf *pf) 3941a1c40dfSGrzegorz Nitka { 3951a1c40dfSGrzegorz Nitka struct ice_vsi *ctrl_vsi; 3961a1c40dfSGrzegorz Nitka 3971a1c40dfSGrzegorz Nitka pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); 3981a1c40dfSGrzegorz Nitka if (!pf->switchdev.control_vsi) 3991a1c40dfSGrzegorz Nitka return -ENODEV; 4001a1c40dfSGrzegorz Nitka 4011a1c40dfSGrzegorz Nitka ctrl_vsi = pf->switchdev.control_vsi; 4021a1c40dfSGrzegorz Nitka pf->switchdev.uplink_vsi = ice_get_main_vsi(pf); 4031a1c40dfSGrzegorz Nitka if (!pf->switchdev.uplink_vsi) 4041a1c40dfSGrzegorz Nitka goto err_vsi; 4051a1c40dfSGrzegorz Nitka 4061a1c40dfSGrzegorz Nitka if (ice_eswitch_setup_env(pf)) 4071a1c40dfSGrzegorz Nitka goto err_vsi; 4081a1c40dfSGrzegorz Nitka 4091a1c40dfSGrzegorz Nitka if (ice_repr_add_for_all_vfs(pf)) 4101a1c40dfSGrzegorz Nitka goto err_repr_add; 4111a1c40dfSGrzegorz Nitka 4121a1c40dfSGrzegorz Nitka if (ice_eswitch_setup_reprs(pf)) 4131a1c40dfSGrzegorz Nitka goto err_setup_reprs; 4141a1c40dfSGrzegorz Nitka 4151a1c40dfSGrzegorz Nitka ice_eswitch_remap_rings_to_vectors(pf); 4161a1c40dfSGrzegorz Nitka 4171a1c40dfSGrzegorz Nitka if (ice_vsi_open(ctrl_vsi)) 4181a1c40dfSGrzegorz Nitka goto err_setup_reprs; 4191a1c40dfSGrzegorz Nitka 4201a1c40dfSGrzegorz Nitka ice_eswitch_napi_enable(pf); 4211a1c40dfSGrzegorz Nitka 4221a1c40dfSGrzegorz Nitka ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2); 4231a1c40dfSGrzegorz Nitka 4241a1c40dfSGrzegorz Nitka return 0; 4251a1c40dfSGrzegorz Nitka 4261a1c40dfSGrzegorz Nitka err_setup_reprs: 4271a1c40dfSGrzegorz Nitka ice_repr_rem_from_all_vfs(pf); 4281a1c40dfSGrzegorz Nitka err_repr_add: 4291a1c40dfSGrzegorz Nitka ice_eswitch_release_env(pf); 4301a1c40dfSGrzegorz Nitka err_vsi: 4311a1c40dfSGrzegorz Nitka ice_vsi_release(ctrl_vsi); 4321a1c40dfSGrzegorz Nitka return -ENODEV; 4331a1c40dfSGrzegorz Nitka } 4341a1c40dfSGrzegorz Nitka 4351a1c40dfSGrzegorz Nitka /** 4361a1c40dfSGrzegorz Nitka * ice_eswitch_disable_switchdev - disable switchdev resources 4371a1c40dfSGrzegorz Nitka * @pf: pointer to PF structure 4381a1c40dfSGrzegorz Nitka */ 4391a1c40dfSGrzegorz Nitka static void ice_eswitch_disable_switchdev(struct ice_pf *pf) 4401a1c40dfSGrzegorz Nitka { 4411a1c40dfSGrzegorz Nitka struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 4421a1c40dfSGrzegorz Nitka 4431a1c40dfSGrzegorz Nitka ice_eswitch_napi_disable(pf); 4441a1c40dfSGrzegorz Nitka ice_eswitch_release_env(pf); 4451a1c40dfSGrzegorz Nitka ice_eswitch_release_reprs(pf, ctrl_vsi); 4461a1c40dfSGrzegorz Nitka ice_vsi_release(ctrl_vsi); 4471a1c40dfSGrzegorz Nitka ice_repr_rem_from_all_vfs(pf); 4481a1c40dfSGrzegorz Nitka } 4491a1c40dfSGrzegorz Nitka 4501a1c40dfSGrzegorz Nitka /** 4513ea9bd5dSMichal Swiatkowski * ice_eswitch_mode_set - set new eswitch mode 4523ea9bd5dSMichal Swiatkowski * @devlink: pointer to devlink structure 4533ea9bd5dSMichal Swiatkowski * @mode: eswitch mode to switch to 4543ea9bd5dSMichal Swiatkowski * @extack: pointer to extack structure 4553ea9bd5dSMichal Swiatkowski */ 4563ea9bd5dSMichal Swiatkowski int 4573ea9bd5dSMichal Swiatkowski ice_eswitch_mode_set(struct devlink *devlink, u16 mode, 4583ea9bd5dSMichal Swiatkowski struct netlink_ext_ack *extack) 4593ea9bd5dSMichal Swiatkowski { 4603ea9bd5dSMichal Swiatkowski struct ice_pf *pf = devlink_priv(devlink); 4613ea9bd5dSMichal Swiatkowski 4623ea9bd5dSMichal Swiatkowski if (pf->eswitch_mode == mode) 4633ea9bd5dSMichal Swiatkowski return 0; 4643ea9bd5dSMichal Swiatkowski 4653ea9bd5dSMichal Swiatkowski if (pf->num_alloc_vfs) { 4663ea9bd5dSMichal Swiatkowski dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created"); 4673ea9bd5dSMichal Swiatkowski NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created"); 4683ea9bd5dSMichal Swiatkowski return -EOPNOTSUPP; 4693ea9bd5dSMichal Swiatkowski } 4703ea9bd5dSMichal Swiatkowski 4713ea9bd5dSMichal Swiatkowski switch (mode) { 4723ea9bd5dSMichal Swiatkowski case DEVLINK_ESWITCH_MODE_LEGACY: 4733ea9bd5dSMichal Swiatkowski dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy", 4743ea9bd5dSMichal Swiatkowski pf->hw.pf_id); 4753ea9bd5dSMichal Swiatkowski NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy"); 4763ea9bd5dSMichal Swiatkowski break; 4773ea9bd5dSMichal Swiatkowski case DEVLINK_ESWITCH_MODE_SWITCHDEV: 4783ea9bd5dSMichal Swiatkowski { 4793ea9bd5dSMichal Swiatkowski dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev", 4803ea9bd5dSMichal Swiatkowski pf->hw.pf_id); 4813ea9bd5dSMichal Swiatkowski NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev"); 4823ea9bd5dSMichal Swiatkowski break; 4833ea9bd5dSMichal Swiatkowski } 4843ea9bd5dSMichal Swiatkowski default: 4853ea9bd5dSMichal Swiatkowski NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode"); 4863ea9bd5dSMichal Swiatkowski return -EINVAL; 4873ea9bd5dSMichal Swiatkowski } 4883ea9bd5dSMichal Swiatkowski 4893ea9bd5dSMichal Swiatkowski pf->eswitch_mode = mode; 4903ea9bd5dSMichal Swiatkowski return 0; 4913ea9bd5dSMichal Swiatkowski } 4923ea9bd5dSMichal Swiatkowski 4933ea9bd5dSMichal Swiatkowski /** 494f5396b8aSGrzegorz Nitka * ice_eswitch_get_target_netdev - return port representor netdev 495f5396b8aSGrzegorz Nitka * @rx_ring: pointer to Rx ring 496f5396b8aSGrzegorz Nitka * @rx_desc: pointer to Rx descriptor 497f5396b8aSGrzegorz Nitka * 498f5396b8aSGrzegorz Nitka * When working in switchdev mode context (when control VSI is used), this 499f5396b8aSGrzegorz Nitka * function returns netdev of appropriate port representor. For non-switchdev 500f5396b8aSGrzegorz Nitka * context, regular netdev associated with Rx ring is returned. 501f5396b8aSGrzegorz Nitka */ 502f5396b8aSGrzegorz Nitka struct net_device * 503*e72bba21SMaciej Fijalkowski ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring, 504f5396b8aSGrzegorz Nitka union ice_32b_rx_flex_desc *rx_desc) 505f5396b8aSGrzegorz Nitka { 506f5396b8aSGrzegorz Nitka struct ice_32b_rx_flex_desc_nic_2 *desc; 507f5396b8aSGrzegorz Nitka struct ice_vsi *vsi = rx_ring->vsi; 508f5396b8aSGrzegorz Nitka struct ice_vsi *control_vsi; 509f5396b8aSGrzegorz Nitka u16 target_vsi_id; 510f5396b8aSGrzegorz Nitka 511f5396b8aSGrzegorz Nitka control_vsi = vsi->back->switchdev.control_vsi; 512f5396b8aSGrzegorz Nitka if (vsi != control_vsi) 513f5396b8aSGrzegorz Nitka return rx_ring->netdev; 514f5396b8aSGrzegorz Nitka 515f5396b8aSGrzegorz Nitka desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc; 516f5396b8aSGrzegorz Nitka target_vsi_id = le16_to_cpu(desc->src_vsi); 517f5396b8aSGrzegorz Nitka 518f5396b8aSGrzegorz Nitka return vsi->target_netdevs[target_vsi_id]; 519f5396b8aSGrzegorz Nitka } 520f5396b8aSGrzegorz Nitka 521f5396b8aSGrzegorz Nitka /** 5223ea9bd5dSMichal Swiatkowski * ice_eswitch_mode_get - get current eswitch mode 5233ea9bd5dSMichal Swiatkowski * @devlink: pointer to devlink structure 5243ea9bd5dSMichal Swiatkowski * @mode: output parameter for current eswitch mode 5253ea9bd5dSMichal Swiatkowski */ 5263ea9bd5dSMichal Swiatkowski int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode) 5273ea9bd5dSMichal Swiatkowski { 5283ea9bd5dSMichal Swiatkowski struct ice_pf *pf = devlink_priv(devlink); 5293ea9bd5dSMichal Swiatkowski 5303ea9bd5dSMichal Swiatkowski *mode = pf->eswitch_mode; 5313ea9bd5dSMichal Swiatkowski return 0; 5323ea9bd5dSMichal Swiatkowski } 5331a1c40dfSGrzegorz Nitka 5341a1c40dfSGrzegorz Nitka /** 5351c54c839SGrzegorz Nitka * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev 5361c54c839SGrzegorz Nitka * @pf: pointer to PF structure 5371c54c839SGrzegorz Nitka * 5381c54c839SGrzegorz Nitka * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV, 5391c54c839SGrzegorz Nitka * false otherwise. 5401c54c839SGrzegorz Nitka */ 5411c54c839SGrzegorz Nitka bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf) 5421c54c839SGrzegorz Nitka { 5431c54c839SGrzegorz Nitka return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV; 5441c54c839SGrzegorz Nitka } 5451c54c839SGrzegorz Nitka 5461c54c839SGrzegorz Nitka /** 5471a1c40dfSGrzegorz Nitka * ice_eswitch_release - cleanup eswitch 5481a1c40dfSGrzegorz Nitka * @pf: pointer to PF structure 5491a1c40dfSGrzegorz Nitka */ 5501a1c40dfSGrzegorz Nitka void ice_eswitch_release(struct ice_pf *pf) 5511a1c40dfSGrzegorz Nitka { 5521a1c40dfSGrzegorz Nitka if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) 5531a1c40dfSGrzegorz Nitka return; 5541a1c40dfSGrzegorz Nitka 5551a1c40dfSGrzegorz Nitka ice_eswitch_disable_switchdev(pf); 5561a1c40dfSGrzegorz Nitka pf->switchdev.is_running = false; 5571a1c40dfSGrzegorz Nitka } 5581a1c40dfSGrzegorz Nitka 5591a1c40dfSGrzegorz Nitka /** 5601a1c40dfSGrzegorz Nitka * ice_eswitch_configure - configure eswitch 5611a1c40dfSGrzegorz Nitka * @pf: pointer to PF structure 5621a1c40dfSGrzegorz Nitka */ 5631a1c40dfSGrzegorz Nitka int ice_eswitch_configure(struct ice_pf *pf) 5641a1c40dfSGrzegorz Nitka { 5651a1c40dfSGrzegorz Nitka int status; 5661a1c40dfSGrzegorz Nitka 5671a1c40dfSGrzegorz Nitka if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running) 5681a1c40dfSGrzegorz Nitka return 0; 5691a1c40dfSGrzegorz Nitka 5701a1c40dfSGrzegorz Nitka status = ice_eswitch_enable_switchdev(pf); 5711a1c40dfSGrzegorz Nitka if (status) 5721a1c40dfSGrzegorz Nitka return status; 5731a1c40dfSGrzegorz Nitka 5741a1c40dfSGrzegorz Nitka pf->switchdev.is_running = true; 5751a1c40dfSGrzegorz Nitka return 0; 5761a1c40dfSGrzegorz Nitka } 577b3be918dSGrzegorz Nitka 578b3be918dSGrzegorz Nitka /** 579b3be918dSGrzegorz Nitka * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors 580b3be918dSGrzegorz Nitka * @pf: pointer to PF structure 581b3be918dSGrzegorz Nitka */ 582b3be918dSGrzegorz Nitka static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) 583b3be918dSGrzegorz Nitka { 584b3be918dSGrzegorz Nitka struct ice_repr *repr; 585b3be918dSGrzegorz Nitka int i; 586b3be918dSGrzegorz Nitka 587b3be918dSGrzegorz Nitka if (test_bit(ICE_DOWN, pf->state)) 588b3be918dSGrzegorz Nitka return; 589b3be918dSGrzegorz Nitka 590b3be918dSGrzegorz Nitka ice_for_each_vf(pf, i) { 591b3be918dSGrzegorz Nitka repr = pf->vf[i].repr; 592b3be918dSGrzegorz Nitka if (repr) 593b3be918dSGrzegorz Nitka ice_repr_start_tx_queues(repr); 594b3be918dSGrzegorz Nitka } 595b3be918dSGrzegorz Nitka } 596b3be918dSGrzegorz Nitka 597b3be918dSGrzegorz Nitka /** 598b3be918dSGrzegorz Nitka * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors 599b3be918dSGrzegorz Nitka * @pf: pointer to PF structure 600b3be918dSGrzegorz Nitka */ 601b3be918dSGrzegorz Nitka void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) 602b3be918dSGrzegorz Nitka { 603b3be918dSGrzegorz Nitka struct ice_repr *repr; 604b3be918dSGrzegorz Nitka int i; 605b3be918dSGrzegorz Nitka 606b3be918dSGrzegorz Nitka if (test_bit(ICE_DOWN, pf->state)) 607b3be918dSGrzegorz Nitka return; 608b3be918dSGrzegorz Nitka 609b3be918dSGrzegorz Nitka ice_for_each_vf(pf, i) { 610b3be918dSGrzegorz Nitka repr = pf->vf[i].repr; 611b3be918dSGrzegorz Nitka if (repr) 612b3be918dSGrzegorz Nitka ice_repr_stop_tx_queues(repr); 613b3be918dSGrzegorz Nitka } 614b3be918dSGrzegorz Nitka } 615b3be918dSGrzegorz Nitka 616b3be918dSGrzegorz Nitka /** 617b3be918dSGrzegorz Nitka * ice_eswitch_rebuild - rebuild eswitch 618b3be918dSGrzegorz Nitka * @pf: pointer to PF structure 619b3be918dSGrzegorz Nitka */ 620b3be918dSGrzegorz Nitka int ice_eswitch_rebuild(struct ice_pf *pf) 621b3be918dSGrzegorz Nitka { 622b3be918dSGrzegorz Nitka struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 623b3be918dSGrzegorz Nitka int status; 624b3be918dSGrzegorz Nitka 625b3be918dSGrzegorz Nitka ice_eswitch_napi_disable(pf); 626b3be918dSGrzegorz Nitka ice_eswitch_napi_del(pf); 627b3be918dSGrzegorz Nitka 628b3be918dSGrzegorz Nitka status = ice_eswitch_setup_env(pf); 629b3be918dSGrzegorz Nitka if (status) 630b3be918dSGrzegorz Nitka return status; 631b3be918dSGrzegorz Nitka 632b3be918dSGrzegorz Nitka status = ice_eswitch_setup_reprs(pf); 633b3be918dSGrzegorz Nitka if (status) 634b3be918dSGrzegorz Nitka return status; 635b3be918dSGrzegorz Nitka 636b3be918dSGrzegorz Nitka ice_eswitch_remap_rings_to_vectors(pf); 637b3be918dSGrzegorz Nitka 6387fde6d8bSMichal Swiatkowski ice_replay_tc_fltrs(pf); 6397fde6d8bSMichal Swiatkowski 640b3be918dSGrzegorz Nitka status = ice_vsi_open(ctrl_vsi); 641b3be918dSGrzegorz Nitka if (status) 642b3be918dSGrzegorz Nitka return status; 643b3be918dSGrzegorz Nitka 644b3be918dSGrzegorz Nitka ice_eswitch_napi_enable(pf); 645b3be918dSGrzegorz Nitka ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2); 646b3be918dSGrzegorz Nitka ice_eswitch_start_all_tx_queues(pf); 647b3be918dSGrzegorz Nitka 648b3be918dSGrzegorz Nitka return 0; 649b3be918dSGrzegorz Nitka } 650